aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib
diff options
context:
space:
mode:
authormonster <monster@ydb.tech>2022-07-07 14:41:37 +0300
committermonster <monster@ydb.tech>2022-07-07 14:41:37 +0300
commit06e5c21a835c0e923506c4ff27929f34e00761c2 (patch)
tree75efcbc6854ef9bd476eb8bf00cc5c900da436a2 /contrib/libs/llvm12/lib
parent03f024c4412e3aa613bb543cf1660176320ba8f4 (diff)
downloadydb-06e5c21a835c0e923506c4ff27929f34e00761c2.tar.gz
fix ya.make
Diffstat (limited to 'contrib/libs/llvm12/lib')
-rw-r--r--contrib/libs/llvm12/lib/Analysis/.yandex_meta/licenses.list.txt303
-rw-r--r--contrib/libs/llvm12/lib/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/BinaryFormat/.yandex_meta/licenses.list.txt303
-rw-r--r--contrib/libs/llvm12/lib/Bitcode/Reader/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Bitcode/Writer/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Bitstream/Reader/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/.yandex_meta/licenses.list.txt312
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/AsmPrinter/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/GlobalISel/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/MIRParser/.yandex_meta/licenses.list.txt12
-rw-r--r--contrib/libs/llvm12/lib/CodeGen/SelectionDAG/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/DWARFLinker/DWARFLinker.cpp2637
-rw-r--r--contrib/libs/llvm12/lib/DWARFLinker/DWARFLinkerCompileUnit.cpp152
-rw-r--r--contrib/libs/llvm12/lib/DWARFLinker/DWARFLinkerDeclContext.cpp215
-rw-r--r--contrib/libs/llvm12/lib/DWARFLinker/DWARFStreamer.cpp800
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/CodeView/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/DWARF/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/GSYM/DwarfTransformer.cpp572
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/GSYM/FileWriter.cpp78
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/GSYM/FunctionInfo.cpp254
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/GSYM/GsymCreator.cpp320
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/GSYM/GsymReader.cpp406
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/GSYM/Header.cpp109
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/GSYM/InlineInfo.cpp265
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/GSYM/LineTable.cpp293
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/GSYM/LookupResult.cpp74
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/GSYM/ObjectFileTransformer.cpp116
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/GSYM/Range.cpp124
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/MSF/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/PDB/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/DebugInfo/Symbolize/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Demangle/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Execution.cpp2168
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp509
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Interpreter.cpp102
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Interpreter.h235
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/BasicGOTAndStubsBuilder.h107
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp781
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h122
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/ELF.cpp91
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp813
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLink.cpp350
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp497
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkGeneric.h180
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp133
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO.cpp91
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp583
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h223
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO_arm64.cpp747
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp746
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/MCJIT/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp379
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/CompileUtils.cpp94
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/Core.cpp2777
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/DebugUtils.cpp349
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/ExecutionUtils.cpp387
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/IRCompileLayer.cpp48
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/IRTransformLayer.cpp33
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/IndirectionUtils.cpp375
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp141
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/LLJIT.cpp1230
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/Layer.cpp212
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/LazyReexports.cpp234
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/MachOPlatform.cpp489
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/Mangling.cpp160
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp652
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp40
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/OrcABISupport.cpp910
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp529
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp351
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/OrcError.cpp120
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/RPCError.cpp58
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.cpp44
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp306
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/Speculation.cpp143
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCDynamicLibrarySearchGenerator.cpp70
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCEHFrameRegistrar.cpp80
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCIndirectionUtils.cpp423
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp208
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp43
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcessControl.cpp153
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp64
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/PerfJITEvents/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/PerfJITEvents/CMakeLists.txt2
-rw-r--r--contrib/libs/llvm12/lib/ExecutionEngine/RuntimeDyld/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Extensions/Extensions.cpp15
-rw-r--r--contrib/libs/llvm12/lib/FileCheck/FileCheck.cpp2754
-rw-r--r--contrib/libs/llvm12/lib/FileCheck/FileCheckImpl.h859
-rw-r--r--contrib/libs/llvm12/lib/Frontend/OpenMP/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/FuzzMutate/FuzzerCLI.cpp209
-rw-r--r--contrib/libs/llvm12/lib/FuzzMutate/IRMutator.cpp242
-rw-r--r--contrib/libs/llvm12/lib/FuzzMutate/OpDescriptor.cpp37
-rw-r--r--contrib/libs/llvm12/lib/FuzzMutate/Operations.cpp322
-rw-r--r--contrib/libs/llvm12/lib/FuzzMutate/RandomIRBuilder.cpp156
-rw-r--r--contrib/libs/llvm12/lib/IR/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/IRReader/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/InterfaceStub/ELFObjHandler.cpp680
-rw-r--r--contrib/libs/llvm12/lib/InterfaceStub/ELFStub.cpp28
-rw-r--r--contrib/libs/llvm12/lib/InterfaceStub/TBEHandler.cpp143
-rw-r--r--contrib/libs/llvm12/lib/LTO/Caching.cpp151
-rw-r--r--contrib/libs/llvm12/lib/LTO/LTO.cpp1544
-rw-r--r--contrib/libs/llvm12/lib/LTO/LTOBackend.cpp746
-rw-r--r--contrib/libs/llvm12/lib/LTO/LTOCodeGenerator.cpp733
-rw-r--r--contrib/libs/llvm12/lib/LTO/LTOModule.cpp689
-rw-r--r--contrib/libs/llvm12/lib/LTO/SummaryBasedOptimizations.cpp86
-rw-r--r--contrib/libs/llvm12/lib/LTO/ThinLTOCodeGenerator.cpp1168
-rw-r--r--contrib/libs/llvm12/lib/LTO/UpdateCompilerUsed.cpp133
-rw-r--r--contrib/libs/llvm12/lib/LineEditor/LineEditor.cpp324
-rw-r--r--contrib/libs/llvm12/lib/Linker/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/MC/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/MC/MCDisassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/MC/MCParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/MCA/CodeEmitter.cpp37
-rw-r--r--contrib/libs/llvm12/lib/MCA/Context.cpp68
-rw-r--r--contrib/libs/llvm12/lib/MCA/HWEventListener.cpp22
-rw-r--r--contrib/libs/llvm12/lib/MCA/HardwareUnits/HardwareUnit.cpp24
-rw-r--r--contrib/libs/llvm12/lib/MCA/HardwareUnits/LSUnit.cpp252
-rw-r--r--contrib/libs/llvm12/lib/MCA/HardwareUnits/RegisterFile.cpp491
-rw-r--r--contrib/libs/llvm12/lib/MCA/HardwareUnits/ResourceManager.cpp364
-rw-r--r--contrib/libs/llvm12/lib/MCA/HardwareUnits/RetireControlUnit.cpp100
-rw-r--r--contrib/libs/llvm12/lib/MCA/HardwareUnits/Scheduler.cpp341
-rw-r--r--contrib/libs/llvm12/lib/MCA/InstrBuilder.cpp712
-rw-r--r--contrib/libs/llvm12/lib/MCA/Instruction.cpp254
-rw-r--r--contrib/libs/llvm12/lib/MCA/Pipeline.cpp97
-rw-r--r--contrib/libs/llvm12/lib/MCA/Stages/DispatchStage.cpp187
-rw-r--r--contrib/libs/llvm12/lib/MCA/Stages/EntryStage.cpp77
-rw-r--r--contrib/libs/llvm12/lib/MCA/Stages/ExecuteStage.cpp296
-rw-r--r--contrib/libs/llvm12/lib/MCA/Stages/InstructionTables.cpp68
-rw-r--r--contrib/libs/llvm12/lib/MCA/Stages/MicroOpQueueStage.cpp70
-rw-r--r--contrib/libs/llvm12/lib/MCA/Stages/RetireStage.cpp65
-rw-r--r--contrib/libs/llvm12/lib/MCA/Stages/Stage.cpp28
-rw-r--r--contrib/libs/llvm12/lib/MCA/Support.cpp110
-rw-r--r--contrib/libs/llvm12/lib/Object/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/ArchiveEmitter.cpp51
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/ArchiveYAML.cpp58
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/COFFEmitter.cpp628
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/COFFYAML.cpp600
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLDebugSections.cpp957
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLSymbols.cpp659
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLTypeHashing.cpp87
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLTypes.cpp817
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/DWARFEmitter.cpp1080
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/DWARFYAML.cpp329
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/ELFEmitter.cpp1953
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/ELFYAML.cpp1719
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/MachOEmitter.cpp646
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/MachOYAML.cpp589
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/MinidumpEmitter.cpp247
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/MinidumpYAML.cpp563
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/ObjectYAML.cpp70
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/WasmEmitter.cpp672
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/WasmYAML.cpp621
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/XCOFFYAML.cpp109
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/YAML.cpp64
-rw-r--r--contrib/libs/llvm12/lib/ObjectYAML/yaml2obj.cpp79
-rw-r--r--contrib/libs/llvm12/lib/Option/Arg.cpp125
-rw-r--r--contrib/libs/llvm12/lib/Option/ArgList.cpp274
-rw-r--r--contrib/libs/llvm12/lib/Option/OptTable.cpp672
-rw-r--r--contrib/libs/llvm12/lib/Option/Option.cpp291
-rw-r--r--contrib/libs/llvm12/lib/Passes/PassBuilder.cpp3062
-rw-r--r--contrib/libs/llvm12/lib/Passes/PassPlugin.cpp51
-rw-r--r--contrib/libs/llvm12/lib/Passes/PassRegistry.def421
-rw-r--r--contrib/libs/llvm12/lib/Passes/StandardInstrumentations.cpp895
-rw-r--r--contrib/libs/llvm12/lib/ProfileData/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/ProfileData/Coverage/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Remarks/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Support/.yandex_meta/licenses.list.txt491
-rw-r--r--contrib/libs/llvm12/lib/TableGen/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/AArch64/.yandex_meta/licenses.list.txt303
-rw-r--r--contrib/libs/llvm12/lib/Target/AArch64/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/AArch64/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/AArch64/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/AArch64/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/AArch64/Utils/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/ARM/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/ARM/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/ARM/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/ARM/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/ARM/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/ARM/Utils/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/BPF/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/BPF/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/BPF/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/BPF/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/BPF/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/NVPTX/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/NVPTX/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/NVPTX/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/PowerPC/.yandex_meta/licenses.list.txt16
-rw-r--r--contrib/libs/llvm12/lib/Target/PowerPC/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/PowerPC/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/PowerPC/MCTargetDesc/.yandex_meta/licenses.list.txt303
-rw-r--r--contrib/libs/llvm12/lib/Target/PowerPC/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/X86/.yandex_meta/licenses.list.txt303
-rw-r--r--contrib/libs/llvm12/lib/Target/X86/AsmParser/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/X86/Disassembler/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/X86/MCTargetDesc/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Target/X86/TargetInfo/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/TextAPI/MachO/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/ToolDrivers/llvm-dlltool/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/ToolDrivers/llvm-lib/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Transforms/CFGuard/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Coroutines/CoroCleanup.cpp150
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Coroutines/CoroEarly.cpp285
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Coroutines/CoroElide.cpp458
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Coroutines/CoroFrame.cpp2373
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Coroutines/CoroInstr.h691
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Coroutines/CoroInternal.h283
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Coroutines/CoroSplit.cpp2203
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Coroutines/Coroutines.cpp755
-rw-r--r--contrib/libs/llvm12/lib/Transforms/HelloNew/HelloWorld.cpp17
-rw-r--r--contrib/libs/llvm12/lib/Transforms/IPO/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Transforms/InstCombine/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Instrumentation/.yandex_meta/licenses.list.txt303
-rw-r--r--contrib/libs/llvm12/lib/Transforms/ObjCARC/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Scalar/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Utils/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/Transforms/Vectorize/.yandex_meta/licenses.list.txt7
-rw-r--r--contrib/libs/llvm12/lib/WindowsManifest/WindowsManifestMerger.cpp729
-rw-r--r--contrib/libs/llvm12/lib/XRay/BlockIndexer.cpp97
-rw-r--r--contrib/libs/llvm12/lib/XRay/BlockPrinter.cpp113
-rw-r--r--contrib/libs/llvm12/lib/XRay/BlockVerifier.cpp204
-rw-r--r--contrib/libs/llvm12/lib/XRay/FDRRecordProducer.cpp198
-rw-r--r--contrib/libs/llvm12/lib/XRay/FDRRecords.cpp66
-rw-r--r--contrib/libs/llvm12/lib/XRay/FDRTraceExpander.cpp131
-rw-r--r--contrib/libs/llvm12/lib/XRay/FDRTraceWriter.cpp151
-rw-r--r--contrib/libs/llvm12/lib/XRay/FileHeaderReader.cpp73
-rw-r--r--contrib/libs/llvm12/lib/XRay/InstrumentationMap.cpp293
-rw-r--r--contrib/libs/llvm12/lib/XRay/LogBuilderConsumer.cpp37
-rw-r--r--contrib/libs/llvm12/lib/XRay/Profile.cpp403
-rw-r--r--contrib/libs/llvm12/lib/XRay/RecordInitializer.cpp431
-rw-r--r--contrib/libs/llvm12/lib/XRay/RecordPrinter.cpp108
-rw-r--r--contrib/libs/llvm12/lib/XRay/Trace.cpp477
234 files changed, 69793 insertions, 3105 deletions
diff --git a/contrib/libs/llvm12/lib/Analysis/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Analysis/.yandex_meta/licenses.list.txt
deleted file mode 100644
index ad3879fc450..00000000000
--- a/contrib/libs/llvm12/lib/Analysis/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,303 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-
-====================File: LICENSE.TXT====================
-==============================================================================
-The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
-==============================================================================
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
----- LLVM Exceptions to the Apache 2.0 License ----
-
-As an exception, if, as a result of your compiling your source code, portions
-of this Software are embedded into an Object form of such source code, you
-may redistribute such embedded portions in such Object form without complying
-with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
-
-In addition, if you combine or link compiled forms of this Software with
-software that is licensed under the GPLv2 ("Combined Software") and if a
-court of competent jurisdiction determines that the patent provision (Section
-3), the indemnity provision (Section 9) or other Section of the License
-conflicts with the conditions of the GPLv2, you may retroactively and
-prospectively choose to deem waived or otherwise exclude such Section(s) of
-the License, but only in their entirety and only with respect to the Combined
-Software.
-
-==============================================================================
-Software from third parties included in the LLVM Project:
-==============================================================================
-The LLVM Project contains third party software which is under different license
-terms. All such code will be identified clearly using at least one of two
-mechanisms:
-1) It will be in a separate directory tree with its own `LICENSE.txt` or
- `LICENSE` file at the top containing the specific license and restrictions
- which apply to that software, or
-2) It will contain specific license and restriction terms at the top of every
- file.
-
-==============================================================================
-Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
-==============================================================================
-University of Illinois/NCSA
-Open Source License
-
-Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
-All rights reserved.
-
-Developed by:
-
- LLVM Team
-
- University of Illinois at Urbana-Champaign
-
- http://llvm.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal with
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimers.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimers in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the names of the LLVM Team, University of Illinois at
- Urbana-Champaign, nor the names of its contributors may be used to
- endorse or promote products derived from this Software without specific
- prior written permission.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
-SOFTWARE.
-
-
-
-====================File: include/llvm/Support/LICENSE.TXT====================
-LLVM System Interface Library
--------------------------------------------------------------------------------
-The LLVM System Interface Library is licensed under the Illinois Open Source
-License and has the following additional copyright:
-
-Copyright (C) 2004 eXtensible Systems, Inc.
-
-
-====================NCSA====================
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
diff --git a/contrib/libs/llvm12/lib/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/AsmParser/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/AsmParser/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/BinaryFormat/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/BinaryFormat/.yandex_meta/licenses.list.txt
deleted file mode 100644
index ad3879fc450..00000000000
--- a/contrib/libs/llvm12/lib/BinaryFormat/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,303 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-
-====================File: LICENSE.TXT====================
-==============================================================================
-The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
-==============================================================================
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
----- LLVM Exceptions to the Apache 2.0 License ----
-
-As an exception, if, as a result of your compiling your source code, portions
-of this Software are embedded into an Object form of such source code, you
-may redistribute such embedded portions in such Object form without complying
-with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
-
-In addition, if you combine or link compiled forms of this Software with
-software that is licensed under the GPLv2 ("Combined Software") and if a
-court of competent jurisdiction determines that the patent provision (Section
-3), the indemnity provision (Section 9) or other Section of the License
-conflicts with the conditions of the GPLv2, you may retroactively and
-prospectively choose to deem waived or otherwise exclude such Section(s) of
-the License, but only in their entirety and only with respect to the Combined
-Software.
-
-==============================================================================
-Software from third parties included in the LLVM Project:
-==============================================================================
-The LLVM Project contains third party software which is under different license
-terms. All such code will be identified clearly using at least one of two
-mechanisms:
-1) It will be in a separate directory tree with its own `LICENSE.txt` or
- `LICENSE` file at the top containing the specific license and restrictions
- which apply to that software, or
-2) It will contain specific license and restriction terms at the top of every
- file.
-
-==============================================================================
-Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
-==============================================================================
-University of Illinois/NCSA
-Open Source License
-
-Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
-All rights reserved.
-
-Developed by:
-
- LLVM Team
-
- University of Illinois at Urbana-Champaign
-
- http://llvm.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal with
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimers.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimers in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the names of the LLVM Team, University of Illinois at
- Urbana-Champaign, nor the names of its contributors may be used to
- endorse or promote products derived from this Software without specific
- prior written permission.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
-SOFTWARE.
-
-
-
-====================File: include/llvm/Support/LICENSE.TXT====================
-LLVM System Interface Library
--------------------------------------------------------------------------------
-The LLVM System Interface Library is licensed under the Illinois Open Source
-License and has the following additional copyright:
-
-Copyright (C) 2004 eXtensible Systems, Inc.
-
-
-====================NCSA====================
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
diff --git a/contrib/libs/llvm12/lib/Bitcode/Reader/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Bitcode/Reader/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Bitcode/Reader/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Bitcode/Writer/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Bitcode/Writer/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Bitcode/Writer/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Bitstream/Reader/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Bitstream/Reader/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Bitstream/Reader/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/CodeGen/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/CodeGen/.yandex_meta/licenses.list.txt
deleted file mode 100644
index 36adcc85d19..00000000000
--- a/contrib/libs/llvm12/lib/CodeGen/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,312 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-/// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-/// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-/// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-
-====================File: LICENSE.TXT====================
-==============================================================================
-The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
-==============================================================================
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
----- LLVM Exceptions to the Apache 2.0 License ----
-
-As an exception, if, as a result of your compiling your source code, portions
-of this Software are embedded into an Object form of such source code, you
-may redistribute such embedded portions in such Object form without complying
-with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
-
-In addition, if you combine or link compiled forms of this Software with
-software that is licensed under the GPLv2 ("Combined Software") and if a
-court of competent jurisdiction determines that the patent provision (Section
-3), the indemnity provision (Section 9) or other Section of the License
-conflicts with the conditions of the GPLv2, you may retroactively and
-prospectively choose to deem waived or otherwise exclude such Section(s) of
-the License, but only in their entirety and only with respect to the Combined
-Software.
-
-==============================================================================
-Software from third parties included in the LLVM Project:
-==============================================================================
-The LLVM Project contains third party software which is under different license
-terms. All such code will be identified clearly using at least one of two
-mechanisms:
-1) It will be in a separate directory tree with its own `LICENSE.txt` or
- `LICENSE` file at the top containing the specific license and restrictions
- which apply to that software, or
-2) It will contain specific license and restriction terms at the top of every
- file.
-
-==============================================================================
-Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
-==============================================================================
-University of Illinois/NCSA
-Open Source License
-
-Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
-All rights reserved.
-
-Developed by:
-
- LLVM Team
-
- University of Illinois at Urbana-Champaign
-
- http://llvm.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal with
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimers.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimers in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the names of the LLVM Team, University of Illinois at
- Urbana-Champaign, nor the names of its contributors may be used to
- endorse or promote products derived from this Software without specific
- prior written permission.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
-SOFTWARE.
-
-
-
-====================File: include/llvm/Support/LICENSE.TXT====================
-LLVM System Interface Library
--------------------------------------------------------------------------------
-The LLVM System Interface Library is licensed under the Illinois Open Source
-License and has the following additional copyright:
-
-Copyright (C) 2004 eXtensible Systems, Inc.
-
-
-====================NCSA====================
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
diff --git a/contrib/libs/llvm12/lib/CodeGen/AsmPrinter/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/CodeGen/AsmPrinter/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/CodeGen/AsmPrinter/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/CodeGen/GlobalISel/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/CodeGen/GlobalISel/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/CodeGen/MIRParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/CodeGen/MIRParser/.yandex_meta/licenses.list.txt
deleted file mode 100644
index de1a495b4f3..00000000000
--- a/contrib/libs/llvm12/lib/CodeGen/MIRParser/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-
-====================COPYRIGHT====================
- return isalpha(C) || isdigit(C) || C == '_' || C == '-' || C == '.' ||
- C == '$';
diff --git a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/CodeGen/SelectionDAG/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/DWARFLinker/DWARFLinker.cpp b/contrib/libs/llvm12/lib/DWARFLinker/DWARFLinker.cpp
new file mode 100644
index 00000000000..d20f6dd8f33
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DWARFLinker/DWARFLinker.cpp
@@ -0,0 +1,2637 @@
+//=== DWARFLinker.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DWARFLinker/DWARFLinker.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/CodeGen/NonRelocatableStringpool.h"
+#include "llvm/DWARFLinker/DWARFLinkerDeclContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFAbbreviationDeclaration.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFDataExtractor.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugLine.h"
+#include "llvm/DebugInfo/DWARF/DWARFDebugRangeList.h"
+#include "llvm/DebugInfo/DWARF/DWARFDie.h"
+#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
+#include "llvm/DebugInfo/DWARF/DWARFSection.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/LEB128.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/ThreadPool.h"
+#include <vector>
+
+namespace llvm {
+
+/// Hold the input and output of the debug info size in bytes.
+struct DebugInfoSize {
+ uint64_t Input;
+ uint64_t Output;
+};
+
+/// Compute the total size of the debug info.
+static uint64_t getDebugInfoSize(DWARFContext &Dwarf) {
+ uint64_t Size = 0;
+ for (auto &Unit : Dwarf.compile_units()) {
+ Size += Unit->getLength();
+ }
+ return Size;
+}
+
+/// Similar to DWARFUnitSection::getUnitForOffset(), but returning our
+/// CompileUnit object instead.
+static CompileUnit *getUnitForOffset(const UnitListTy &Units, uint64_t Offset) {
+ auto CU = llvm::upper_bound(
+ Units, Offset, [](uint64_t LHS, const std::unique_ptr<CompileUnit> &RHS) {
+ return LHS < RHS->getOrigUnit().getNextUnitOffset();
+ });
+ return CU != Units.end() ? CU->get() : nullptr;
+}
+
+/// Resolve the DIE attribute reference that has been extracted in \p RefValue.
+/// The resulting DIE might be in another CompileUnit which is stored into \p
+/// ReferencedCU. \returns null if resolving fails for any reason.
+DWARFDie DWARFLinker::resolveDIEReference(const DWARFFile &File,
+ const UnitListTy &Units,
+ const DWARFFormValue &RefValue,
+ const DWARFDie &DIE,
+ CompileUnit *&RefCU) {
+ assert(RefValue.isFormClass(DWARFFormValue::FC_Reference));
+ uint64_t RefOffset = *RefValue.getAsReference();
+ if ((RefCU = getUnitForOffset(Units, RefOffset)))
+ if (const auto RefDie = RefCU->getOrigUnit().getDIEForOffset(RefOffset)) {
+ // In a file with broken references, an attribute might point to a NULL
+ // DIE.
+ if (!RefDie.isNULL())
+ return RefDie;
+ }
+
+ reportWarning("could not find referenced DIE", File, &DIE);
+ return DWARFDie();
+}
+
+/// \returns whether the passed \a Attr type might contain a DIE reference
+/// suitable for ODR uniquing.
+static bool isODRAttribute(uint16_t Attr) {
+ switch (Attr) {
+ default:
+ return false;
+ case dwarf::DW_AT_type:
+ case dwarf::DW_AT_containing_type:
+ case dwarf::DW_AT_specification:
+ case dwarf::DW_AT_abstract_origin:
+ case dwarf::DW_AT_import:
+ return true;
+ }
+ llvm_unreachable("Improper attribute.");
+}
+
+static bool isTypeTag(uint16_t Tag) {
+ switch (Tag) {
+ case dwarf::DW_TAG_array_type:
+ case dwarf::DW_TAG_class_type:
+ case dwarf::DW_TAG_enumeration_type:
+ case dwarf::DW_TAG_pointer_type:
+ case dwarf::DW_TAG_reference_type:
+ case dwarf::DW_TAG_string_type:
+ case dwarf::DW_TAG_structure_type:
+ case dwarf::DW_TAG_subroutine_type:
+ case dwarf::DW_TAG_typedef:
+ case dwarf::DW_TAG_union_type:
+ case dwarf::DW_TAG_ptr_to_member_type:
+ case dwarf::DW_TAG_set_type:
+ case dwarf::DW_TAG_subrange_type:
+ case dwarf::DW_TAG_base_type:
+ case dwarf::DW_TAG_const_type:
+ case dwarf::DW_TAG_constant:
+ case dwarf::DW_TAG_file_type:
+ case dwarf::DW_TAG_namelist:
+ case dwarf::DW_TAG_packed_type:
+ case dwarf::DW_TAG_volatile_type:
+ case dwarf::DW_TAG_restrict_type:
+ case dwarf::DW_TAG_atomic_type:
+ case dwarf::DW_TAG_interface_type:
+ case dwarf::DW_TAG_unspecified_type:
+ case dwarf::DW_TAG_shared_type:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+AddressesMap::~AddressesMap() {}
+
+DwarfEmitter::~DwarfEmitter() {}
+
+static Optional<StringRef> StripTemplateParameters(StringRef Name) {
+ // We are looking for template parameters to strip from Name. e.g.
+ //
+ // operator<<B>
+ //
+ // We look for > at the end but if it does not contain any < then we
+ // have something like operator>>. We check for the operator<=> case.
+ if (!Name.endswith(">") || Name.count("<") == 0 || Name.endswith("<=>"))
+ return {};
+
+ // How many < until we have the start of the template parameters.
+ size_t NumLeftAnglesToSkip = 1;
+
+ // If we have operator<=> then we need to skip its < as well.
+ NumLeftAnglesToSkip += Name.count("<=>");
+
+ size_t RightAngleCount = Name.count('>');
+ size_t LeftAngleCount = Name.count('<');
+
+ // If we have more < than > we have operator< or operator<<
+ // we to account for their < as well.
+ if (LeftAngleCount > RightAngleCount)
+ NumLeftAnglesToSkip += LeftAngleCount - RightAngleCount;
+
+ size_t StartOfTemplate = 0;
+ while (NumLeftAnglesToSkip--)
+ StartOfTemplate = Name.find('<', StartOfTemplate) + 1;
+
+ return Name.substr(0, StartOfTemplate - 1);
+}
+
+bool DWARFLinker::DIECloner::getDIENames(const DWARFDie &Die,
+ AttributesInfo &Info,
+ OffsetsStringPool &StringPool,
+ bool StripTemplate) {
+ // This function will be called on DIEs having low_pcs and
+ // ranges. As getting the name might be more expansive, filter out
+ // blocks directly.
+ if (Die.getTag() == dwarf::DW_TAG_lexical_block)
+ return false;
+
+ if (!Info.MangledName)
+ if (const char *MangledName = Die.getLinkageName())
+ Info.MangledName = StringPool.getEntry(MangledName);
+
+ if (!Info.Name)
+ if (const char *Name = Die.getShortName())
+ Info.Name = StringPool.getEntry(Name);
+
+ if (!Info.MangledName)
+ Info.MangledName = Info.Name;
+
+ if (StripTemplate && Info.Name && Info.MangledName != Info.Name) {
+ StringRef Name = Info.Name.getString();
+ if (Optional<StringRef> StrippedName = StripTemplateParameters(Name))
+ Info.NameWithoutTemplate = StringPool.getEntry(*StrippedName);
+ }
+
+ return Info.Name || Info.MangledName;
+}
+
+/// Resolve the relative path to a build artifact referenced by DWARF by
+/// applying DW_AT_comp_dir.
+static void resolveRelativeObjectPath(SmallVectorImpl<char> &Buf, DWARFDie CU) {
+ sys::path::append(Buf, dwarf::toString(CU.find(dwarf::DW_AT_comp_dir), ""));
+}
+
+/// Collect references to parseable Swift interfaces in imported
+/// DW_TAG_module blocks.
+static void analyzeImportedModule(
+ const DWARFDie &DIE, CompileUnit &CU,
+ swiftInterfacesMap *ParseableSwiftInterfaces,
+ std::function<void(const Twine &, const DWARFDie &)> ReportWarning) {
+ if (CU.getLanguage() != dwarf::DW_LANG_Swift)
+ return;
+
+ if (!ParseableSwiftInterfaces)
+ return;
+
+ StringRef Path = dwarf::toStringRef(DIE.find(dwarf::DW_AT_LLVM_include_path));
+ if (!Path.endswith(".swiftinterface"))
+ return;
+ // Don't track interfaces that are part of the SDK.
+ StringRef SysRoot = dwarf::toStringRef(DIE.find(dwarf::DW_AT_LLVM_sysroot));
+ if (SysRoot.empty())
+ SysRoot = CU.getSysRoot();
+ if (!SysRoot.empty() && Path.startswith(SysRoot))
+ return;
+ if (Optional<DWARFFormValue> Val = DIE.find(dwarf::DW_AT_name))
+ if (Optional<const char *> Name = Val->getAsCString()) {
+ auto &Entry = (*ParseableSwiftInterfaces)[*Name];
+ // The prepend path is applied later when copying.
+ DWARFDie CUDie = CU.getOrigUnit().getUnitDIE();
+ SmallString<128> ResolvedPath;
+ if (sys::path::is_relative(Path))
+ resolveRelativeObjectPath(ResolvedPath, CUDie);
+ sys::path::append(ResolvedPath, Path);
+ if (!Entry.empty() && Entry != ResolvedPath)
+ ReportWarning(
+ Twine("Conflicting parseable interfaces for Swift Module ") +
+ *Name + ": " + Entry + " and " + Path,
+ DIE);
+ Entry = std::string(ResolvedPath.str());
+ }
+}
+
+/// The distinct types of work performed by the work loop in
+/// analyzeContextInfo.
+enum class ContextWorklistItemType : uint8_t {
+ AnalyzeContextInfo,
+ UpdateChildPruning,
+ UpdatePruning,
+};
+
+/// This class represents an item in the work list. The type defines what kind
+/// of work needs to be performed when processing the current item. Everything
+/// but the Type and Die fields are optional based on the type.
+struct ContextWorklistItem {
+ DWARFDie Die;
+ unsigned ParentIdx;
+ union {
+ CompileUnit::DIEInfo *OtherInfo;
+ DeclContext *Context;
+ };
+ ContextWorklistItemType Type;
+ bool InImportedModule;
+
+ ContextWorklistItem(DWARFDie Die, ContextWorklistItemType T,
+ CompileUnit::DIEInfo *OtherInfo = nullptr)
+ : Die(Die), ParentIdx(0), OtherInfo(OtherInfo), Type(T),
+ InImportedModule(false) {}
+
+ ContextWorklistItem(DWARFDie Die, DeclContext *Context, unsigned ParentIdx,
+ bool InImportedModule)
+ : Die(Die), ParentIdx(ParentIdx), Context(Context),
+ Type(ContextWorklistItemType::AnalyzeContextInfo),
+ InImportedModule(InImportedModule) {}
+};
+
+static bool updatePruning(const DWARFDie &Die, CompileUnit &CU,
+ uint64_t ModulesEndOffset) {
+ CompileUnit::DIEInfo &Info = CU.getInfo(Die);
+
+ // Prune this DIE if it is either a forward declaration inside a
+ // DW_TAG_module or a DW_TAG_module that contains nothing but
+ // forward declarations.
+ Info.Prune &= (Die.getTag() == dwarf::DW_TAG_module) ||
+ (isTypeTag(Die.getTag()) &&
+ dwarf::toUnsigned(Die.find(dwarf::DW_AT_declaration), 0));
+
+ // Only prune forward declarations inside a DW_TAG_module for which a
+ // definition exists elsewhere.
+ if (ModulesEndOffset == 0)
+ Info.Prune &= Info.Ctxt && Info.Ctxt->getCanonicalDIEOffset();
+ else
+ Info.Prune &= Info.Ctxt && Info.Ctxt->getCanonicalDIEOffset() > 0 &&
+ Info.Ctxt->getCanonicalDIEOffset() <= ModulesEndOffset;
+
+ return Info.Prune;
+}
+
+static void updateChildPruning(const DWARFDie &Die, CompileUnit &CU,
+ CompileUnit::DIEInfo &ChildInfo) {
+ CompileUnit::DIEInfo &Info = CU.getInfo(Die);
+ Info.Prune &= ChildInfo.Prune;
+}
+
+/// Recursive helper to build the global DeclContext information and
+/// gather the child->parent relationships in the original compile unit.
+///
+/// This function uses the same work list approach as lookForDIEsToKeep.
+///
+/// \return true when this DIE and all of its children are only
+/// forward declarations to types defined in external clang modules
+/// (i.e., forward declarations that are children of a DW_TAG_module).
+static bool analyzeContextInfo(
+ const DWARFDie &DIE, unsigned ParentIdx, CompileUnit &CU,
+ DeclContext *CurrentDeclContext, DeclContextTree &Contexts,
+ uint64_t ModulesEndOffset, swiftInterfacesMap *ParseableSwiftInterfaces,
+ std::function<void(const Twine &, const DWARFDie &)> ReportWarning,
+ bool InImportedModule = false) {
+ // LIFO work list.
+ std::vector<ContextWorklistItem> Worklist;
+ Worklist.emplace_back(DIE, CurrentDeclContext, ParentIdx, InImportedModule);
+
+ while (!Worklist.empty()) {
+ ContextWorklistItem Current = Worklist.back();
+ Worklist.pop_back();
+
+ switch (Current.Type) {
+ case ContextWorklistItemType::UpdatePruning:
+ updatePruning(Current.Die, CU, ModulesEndOffset);
+ continue;
+ case ContextWorklistItemType::UpdateChildPruning:
+ updateChildPruning(Current.Die, CU, *Current.OtherInfo);
+ continue;
+ case ContextWorklistItemType::AnalyzeContextInfo:
+ break;
+ }
+
+ unsigned Idx = CU.getOrigUnit().getDIEIndex(Current.Die);
+ CompileUnit::DIEInfo &Info = CU.getInfo(Idx);
+
+ // Clang imposes an ODR on modules(!) regardless of the language:
+ // "The module-id should consist of only a single identifier,
+ // which provides the name of the module being defined. Each
+ // module shall have a single definition."
+ //
+ // This does not extend to the types inside the modules:
+ // "[I]n C, this implies that if two structs are defined in
+ // different submodules with the same name, those two types are
+ // distinct types (but may be compatible types if their
+ // definitions match)."
+ //
+ // We treat non-C++ modules like namespaces for this reason.
+ if (Current.Die.getTag() == dwarf::DW_TAG_module &&
+ Current.ParentIdx == 0 &&
+ dwarf::toString(Current.Die.find(dwarf::DW_AT_name), "") !=
+ CU.getClangModuleName()) {
+ Current.InImportedModule = true;
+ analyzeImportedModule(Current.Die, CU, ParseableSwiftInterfaces,
+ ReportWarning);
+ }
+
+ Info.ParentIdx = Current.ParentIdx;
+ bool InClangModule = CU.isClangModule() || Current.InImportedModule;
+ if (CU.hasODR() || InClangModule) {
+ if (Current.Context) {
+ auto PtrInvalidPair = Contexts.getChildDeclContext(
+ *Current.Context, Current.Die, CU, InClangModule);
+ Current.Context = PtrInvalidPair.getPointer();
+ Info.Ctxt =
+ PtrInvalidPair.getInt() ? nullptr : PtrInvalidPair.getPointer();
+ if (Info.Ctxt)
+ Info.Ctxt->setDefinedInClangModule(InClangModule);
+ } else
+ Info.Ctxt = Current.Context = nullptr;
+ }
+
+ Info.Prune = Current.InImportedModule;
+ // Add children in reverse order to the worklist to effectively process
+ // them in order.
+ Worklist.emplace_back(Current.Die, ContextWorklistItemType::UpdatePruning);
+ for (auto Child : reverse(Current.Die.children())) {
+ CompileUnit::DIEInfo &ChildInfo = CU.getInfo(Child);
+ Worklist.emplace_back(
+ Current.Die, ContextWorklistItemType::UpdateChildPruning, &ChildInfo);
+ Worklist.emplace_back(Child, Current.Context, Idx,
+ Current.InImportedModule);
+ }
+ }
+
+ return CU.getInfo(DIE).Prune;
+}
+
+static bool dieNeedsChildrenToBeMeaningful(uint32_t Tag) {
+ switch (Tag) {
+ default:
+ return false;
+ case dwarf::DW_TAG_class_type:
+ case dwarf::DW_TAG_common_block:
+ case dwarf::DW_TAG_lexical_block:
+ case dwarf::DW_TAG_structure_type:
+ case dwarf::DW_TAG_subprogram:
+ case dwarf::DW_TAG_subroutine_type:
+ case dwarf::DW_TAG_union_type:
+ return true;
+ }
+ llvm_unreachable("Invalid Tag");
+}
+
+void DWARFLinker::cleanupAuxiliarryData(LinkContext &Context) {
+ Context.clear();
+
+ for (auto I = DIEBlocks.begin(), E = DIEBlocks.end(); I != E; ++I)
+ (*I)->~DIEBlock();
+ for (auto I = DIELocs.begin(), E = DIELocs.end(); I != E; ++I)
+ (*I)->~DIELoc();
+
+ DIEBlocks.clear();
+ DIELocs.clear();
+ DIEAlloc.Reset();
+}
+
+/// Check if a variable describing DIE should be kept.
+/// \returns updated TraversalFlags.
+unsigned DWARFLinker::shouldKeepVariableDIE(AddressesMap &RelocMgr,
+ const DWARFDie &DIE,
+ CompileUnit::DIEInfo &MyInfo,
+ unsigned Flags) {
+ const auto *Abbrev = DIE.getAbbreviationDeclarationPtr();
+
+ // Global variables with constant value can always be kept.
+ if (!(Flags & TF_InFunctionScope) &&
+ Abbrev->findAttributeIndex(dwarf::DW_AT_const_value)) {
+ MyInfo.InDebugMap = true;
+ return Flags | TF_Keep;
+ }
+
+ // See if there is a relocation to a valid debug map entry inside
+ // this variable's location. The order is important here. We want to
+ // always check if the variable has a valid relocation, so that the
+ // DIEInfo is filled. However, we don't want a static variable in a
+ // function to force us to keep the enclosing function.
+ if (!RelocMgr.hasLiveMemoryLocation(DIE, MyInfo) ||
+ (Flags & TF_InFunctionScope))
+ return Flags;
+
+ if (Options.Verbose) {
+ outs() << "Keeping variable DIE:";
+ DIDumpOptions DumpOpts;
+ DumpOpts.ChildRecurseDepth = 0;
+ DumpOpts.Verbose = Options.Verbose;
+ DIE.dump(outs(), 8 /* Indent */, DumpOpts);
+ }
+
+ return Flags | TF_Keep;
+}
+
+/// Check if a function describing DIE should be kept.
+/// \returns updated TraversalFlags.
+unsigned DWARFLinker::shouldKeepSubprogramDIE(
+ AddressesMap &RelocMgr, RangesTy &Ranges, const DWARFDie &DIE,
+ const DWARFFile &File, CompileUnit &Unit, CompileUnit::DIEInfo &MyInfo,
+ unsigned Flags) {
+ Flags |= TF_InFunctionScope;
+
+ auto LowPc = dwarf::toAddress(DIE.find(dwarf::DW_AT_low_pc));
+ if (!LowPc)
+ return Flags;
+
+ assert(LowPc.hasValue() && "low_pc attribute is not an address.");
+ if (!RelocMgr.hasLiveAddressRange(DIE, MyInfo))
+ return Flags;
+
+ if (Options.Verbose) {
+ outs() << "Keeping subprogram DIE:";
+ DIDumpOptions DumpOpts;
+ DumpOpts.ChildRecurseDepth = 0;
+ DumpOpts.Verbose = Options.Verbose;
+ DIE.dump(outs(), 8 /* Indent */, DumpOpts);
+ }
+
+ if (DIE.getTag() == dwarf::DW_TAG_label) {
+ if (Unit.hasLabelAt(*LowPc))
+ return Flags;
+
+ DWARFUnit &OrigUnit = Unit.getOrigUnit();
+ // FIXME: dsymutil-classic compat. dsymutil-classic doesn't consider labels
+ // that don't fall into the CU's aranges. This is wrong IMO. Debug info
+ // generation bugs aside, this is really wrong in the case of labels, where
+ // a label marking the end of a function will have a PC == CU's high_pc.
+ if (dwarf::toAddress(OrigUnit.getUnitDIE().find(dwarf::DW_AT_high_pc))
+ .getValueOr(UINT64_MAX) <= LowPc)
+ return Flags;
+ Unit.addLabelLowPc(*LowPc, MyInfo.AddrAdjust);
+ return Flags | TF_Keep;
+ }
+
+ Flags |= TF_Keep;
+
+ Optional<uint64_t> HighPc = DIE.getHighPC(*LowPc);
+ if (!HighPc) {
+ reportWarning("Function without high_pc. Range will be discarded.\n", File,
+ &DIE);
+ return Flags;
+ }
+
+ // Replace the debug map range with a more accurate one.
+ Ranges[*LowPc] = ObjFileAddressRange(*HighPc, MyInfo.AddrAdjust);
+ Unit.addFunctionRange(*LowPc, *HighPc, MyInfo.AddrAdjust);
+ return Flags;
+}
+
+/// Check if a DIE should be kept.
+/// \returns updated TraversalFlags.
+unsigned DWARFLinker::shouldKeepDIE(AddressesMap &RelocMgr, RangesTy &Ranges,
+ const DWARFDie &DIE, const DWARFFile &File,
+ CompileUnit &Unit,
+ CompileUnit::DIEInfo &MyInfo,
+ unsigned Flags) {
+ switch (DIE.getTag()) {
+ case dwarf::DW_TAG_constant:
+ case dwarf::DW_TAG_variable:
+ return shouldKeepVariableDIE(RelocMgr, DIE, MyInfo, Flags);
+ case dwarf::DW_TAG_subprogram:
+ case dwarf::DW_TAG_label:
+ return shouldKeepSubprogramDIE(RelocMgr, Ranges, DIE, File, Unit, MyInfo,
+ Flags);
+ case dwarf::DW_TAG_base_type:
+ // DWARF Expressions may reference basic types, but scanning them
+ // is expensive. Basic types are tiny, so just keep all of them.
+ case dwarf::DW_TAG_imported_module:
+ case dwarf::DW_TAG_imported_declaration:
+ case dwarf::DW_TAG_imported_unit:
+ // We always want to keep these.
+ return Flags | TF_Keep;
+ default:
+ break;
+ }
+
+ return Flags;
+}
+
+/// Helper that updates the completeness of the current DIE based on the
+/// completeness of one of its children. It depends on the incompleteness of
+/// the children already being computed.
+static void updateChildIncompleteness(const DWARFDie &Die, CompileUnit &CU,
+ CompileUnit::DIEInfo &ChildInfo) {
+ switch (Die.getTag()) {
+ case dwarf::DW_TAG_structure_type:
+ case dwarf::DW_TAG_class_type:
+ break;
+ default:
+ return;
+ }
+
+ CompileUnit::DIEInfo &MyInfo = CU.getInfo(Die);
+
+ if (ChildInfo.Incomplete || ChildInfo.Prune)
+ MyInfo.Incomplete = true;
+}
+
+/// Helper that updates the completeness of the current DIE based on the
+/// completeness of the DIEs it references. It depends on the incompleteness of
+/// the referenced DIE already being computed.
+static void updateRefIncompleteness(const DWARFDie &Die, CompileUnit &CU,
+ CompileUnit::DIEInfo &RefInfo) {
+ switch (Die.getTag()) {
+ case dwarf::DW_TAG_typedef:
+ case dwarf::DW_TAG_member:
+ case dwarf::DW_TAG_reference_type:
+ case dwarf::DW_TAG_ptr_to_member_type:
+ case dwarf::DW_TAG_pointer_type:
+ break;
+ default:
+ return;
+ }
+
+ CompileUnit::DIEInfo &MyInfo = CU.getInfo(Die);
+
+ if (MyInfo.Incomplete)
+ return;
+
+ if (RefInfo.Incomplete)
+ MyInfo.Incomplete = true;
+}
+
+/// Look at the children of the given DIE and decide whether they should be
+/// kept.
+void DWARFLinker::lookForChildDIEsToKeep(
+ const DWARFDie &Die, CompileUnit &CU, unsigned Flags,
+ SmallVectorImpl<WorklistItem> &Worklist) {
+ // The TF_ParentWalk flag tells us that we are currently walking up the
+ // parent chain of a required DIE, and we don't want to mark all the children
+ // of the parents as kept (consider for example a DW_TAG_namespace node in
+ // the parent chain). There are however a set of DIE types for which we want
+ // to ignore that directive and still walk their children.
+ if (dieNeedsChildrenToBeMeaningful(Die.getTag()))
+ Flags &= ~DWARFLinker::TF_ParentWalk;
+
+ // We're finished if this DIE has no children or we're walking the parent
+ // chain.
+ if (!Die.hasChildren() || (Flags & DWARFLinker::TF_ParentWalk))
+ return;
+
+ // Add children in reverse order to the worklist to effectively process them
+ // in order.
+ for (auto Child : reverse(Die.children())) {
+ // Add a worklist item before every child to calculate incompleteness right
+ // after the current child is processed.
+ CompileUnit::DIEInfo &ChildInfo = CU.getInfo(Child);
+ Worklist.emplace_back(Die, CU, WorklistItemType::UpdateChildIncompleteness,
+ &ChildInfo);
+ Worklist.emplace_back(Child, CU, Flags);
+ }
+}
+
+/// Look at DIEs referenced by the given DIE and decide whether they should be
+/// kept. All DIEs referenced though attributes should be kept.
+void DWARFLinker::lookForRefDIEsToKeep(
+ const DWARFDie &Die, CompileUnit &CU, unsigned Flags,
+ const UnitListTy &Units, const DWARFFile &File,
+ SmallVectorImpl<WorklistItem> &Worklist) {
+ bool UseOdr = (Flags & DWARFLinker::TF_DependencyWalk)
+ ? (Flags & DWARFLinker::TF_ODR)
+ : CU.hasODR();
+ DWARFUnit &Unit = CU.getOrigUnit();
+ DWARFDataExtractor Data = Unit.getDebugInfoExtractor();
+ const auto *Abbrev = Die.getAbbreviationDeclarationPtr();
+ uint64_t Offset = Die.getOffset() + getULEB128Size(Abbrev->getCode());
+
+ SmallVector<std::pair<DWARFDie, CompileUnit &>, 4> ReferencedDIEs;
+ for (const auto &AttrSpec : Abbrev->attributes()) {
+ DWARFFormValue Val(AttrSpec.Form);
+ if (!Val.isFormClass(DWARFFormValue::FC_Reference) ||
+ AttrSpec.Attr == dwarf::DW_AT_sibling) {
+ DWARFFormValue::skipValue(AttrSpec.Form, Data, &Offset,
+ Unit.getFormParams());
+ continue;
+ }
+
+ Val.extractValue(Data, &Offset, Unit.getFormParams(), &Unit);
+ CompileUnit *ReferencedCU;
+ if (auto RefDie =
+ resolveDIEReference(File, Units, Val, Die, ReferencedCU)) {
+ CompileUnit::DIEInfo &Info = ReferencedCU->getInfo(RefDie);
+ bool IsModuleRef = Info.Ctxt && Info.Ctxt->getCanonicalDIEOffset() &&
+ Info.Ctxt->isDefinedInClangModule();
+ // If the referenced DIE has a DeclContext that has already been
+ // emitted, then do not keep the one in this CU. We'll link to
+ // the canonical DIE in cloneDieReferenceAttribute.
+ //
+ // FIXME: compatibility with dsymutil-classic. UseODR shouldn't
+ // be necessary and could be advantageously replaced by
+ // ReferencedCU->hasODR() && CU.hasODR().
+ //
+ // FIXME: compatibility with dsymutil-classic. There is no
+ // reason not to unique ref_addr references.
+ if (AttrSpec.Form != dwarf::DW_FORM_ref_addr && (UseOdr || IsModuleRef) &&
+ Info.Ctxt &&
+ Info.Ctxt != ReferencedCU->getInfo(Info.ParentIdx).Ctxt &&
+ Info.Ctxt->getCanonicalDIEOffset() && isODRAttribute(AttrSpec.Attr))
+ continue;
+
+ // Keep a module forward declaration if there is no definition.
+ if (!(isODRAttribute(AttrSpec.Attr) && Info.Ctxt &&
+ Info.Ctxt->getCanonicalDIEOffset()))
+ Info.Prune = false;
+ ReferencedDIEs.emplace_back(RefDie, *ReferencedCU);
+ }
+ }
+
+ unsigned ODRFlag = UseOdr ? DWARFLinker::TF_ODR : 0;
+
+ // Add referenced DIEs in reverse order to the worklist to effectively
+ // process them in order.
+ for (auto &P : reverse(ReferencedDIEs)) {
+ // Add a worklist item before every child to calculate incompleteness right
+ // after the current child is processed.
+ CompileUnit::DIEInfo &Info = P.second.getInfo(P.first);
+ Worklist.emplace_back(Die, CU, WorklistItemType::UpdateRefIncompleteness,
+ &Info);
+ Worklist.emplace_back(P.first, P.second,
+ DWARFLinker::TF_Keep |
+ DWARFLinker::TF_DependencyWalk | ODRFlag);
+ }
+}
+
+/// Look at the parent of the given DIE and decide whether they should be kept.
+void DWARFLinker::lookForParentDIEsToKeep(
+ unsigned AncestorIdx, CompileUnit &CU, unsigned Flags,
+ SmallVectorImpl<WorklistItem> &Worklist) {
+ // Stop if we encounter an ancestor that's already marked as kept.
+ if (CU.getInfo(AncestorIdx).Keep)
+ return;
+
+ DWARFUnit &Unit = CU.getOrigUnit();
+ DWARFDie ParentDIE = Unit.getDIEAtIndex(AncestorIdx);
+ Worklist.emplace_back(CU.getInfo(AncestorIdx).ParentIdx, CU, Flags);
+ Worklist.emplace_back(ParentDIE, CU, Flags);
+}
+
+/// Recursively walk the \p DIE tree and look for DIEs to keep. Store that
+/// information in \p CU's DIEInfo.
+///
+/// This function is the entry point of the DIE selection algorithm. It is
+/// expected to walk the DIE tree in file order and (though the mediation of
+/// its helper) call hasValidRelocation() on each DIE that might be a 'root
+/// DIE' (See DwarfLinker class comment).
+///
+/// While walking the dependencies of root DIEs, this function is also called,
+/// but during these dependency walks the file order is not respected. The
+/// TF_DependencyWalk flag tells us which kind of traversal we are currently
+/// doing.
+///
+/// The recursive algorithm is implemented iteratively as a work list because
+/// very deep recursion could exhaust the stack for large projects. The work
+/// list acts as a scheduler for different types of work that need to be
+/// performed.
+///
+/// The recursive nature of the algorithm is simulated by running the "main"
+/// algorithm (LookForDIEsToKeep) followed by either looking at more DIEs
+/// (LookForChildDIEsToKeep, LookForRefDIEsToKeep, LookForParentDIEsToKeep) or
+/// fixing up a computed property (UpdateChildIncompleteness,
+/// UpdateRefIncompleteness).
+///
+/// The return value indicates whether the DIE is incomplete.
+void DWARFLinker::lookForDIEsToKeep(AddressesMap &AddressesMap,
+ RangesTy &Ranges, const UnitListTy &Units,
+ const DWARFDie &Die, const DWARFFile &File,
+ CompileUnit &Cu, unsigned Flags) {
+ // LIFO work list.
+ SmallVector<WorklistItem, 4> Worklist;
+ Worklist.emplace_back(Die, Cu, Flags);
+
+ while (!Worklist.empty()) {
+ WorklistItem Current = Worklist.pop_back_val();
+
+ // Look at the worklist type to decide what kind of work to perform.
+ switch (Current.Type) {
+ case WorklistItemType::UpdateChildIncompleteness:
+ updateChildIncompleteness(Current.Die, Current.CU, *Current.OtherInfo);
+ continue;
+ case WorklistItemType::UpdateRefIncompleteness:
+ updateRefIncompleteness(Current.Die, Current.CU, *Current.OtherInfo);
+ continue;
+ case WorklistItemType::LookForChildDIEsToKeep:
+ lookForChildDIEsToKeep(Current.Die, Current.CU, Current.Flags, Worklist);
+ continue;
+ case WorklistItemType::LookForRefDIEsToKeep:
+ lookForRefDIEsToKeep(Current.Die, Current.CU, Current.Flags, Units, File,
+ Worklist);
+ continue;
+ case WorklistItemType::LookForParentDIEsToKeep:
+ lookForParentDIEsToKeep(Current.AncestorIdx, Current.CU, Current.Flags,
+ Worklist);
+ continue;
+ case WorklistItemType::LookForDIEsToKeep:
+ break;
+ }
+
+ unsigned Idx = Current.CU.getOrigUnit().getDIEIndex(Current.Die);
+ CompileUnit::DIEInfo &MyInfo = Current.CU.getInfo(Idx);
+
+ if (MyInfo.Prune)
+ continue;
+
+ // If the Keep flag is set, we are marking a required DIE's dependencies.
+ // If our target is already marked as kept, we're all set.
+ bool AlreadyKept = MyInfo.Keep;
+ if ((Current.Flags & TF_DependencyWalk) && AlreadyKept)
+ continue;
+
+ // We must not call shouldKeepDIE while called from keepDIEAndDependencies,
+ // because it would screw up the relocation finding logic.
+ if (!(Current.Flags & TF_DependencyWalk))
+ Current.Flags = shouldKeepDIE(AddressesMap, Ranges, Current.Die, File,
+ Current.CU, MyInfo, Current.Flags);
+
+ // Finish by looking for child DIEs. Because of the LIFO worklist we need
+ // to schedule that work before any subsequent items are added to the
+ // worklist.
+ Worklist.emplace_back(Current.Die, Current.CU, Current.Flags,
+ WorklistItemType::LookForChildDIEsToKeep);
+
+ if (AlreadyKept || !(Current.Flags & TF_Keep))
+ continue;
+
+ // If it is a newly kept DIE mark it as well as all its dependencies as
+ // kept.
+ MyInfo.Keep = true;
+
+ // We're looking for incomplete types.
+ MyInfo.Incomplete =
+ Current.Die.getTag() != dwarf::DW_TAG_subprogram &&
+ Current.Die.getTag() != dwarf::DW_TAG_member &&
+ dwarf::toUnsigned(Current.Die.find(dwarf::DW_AT_declaration), 0);
+
+ // After looking at the parent chain, look for referenced DIEs. Because of
+ // the LIFO worklist we need to schedule that work before any subsequent
+ // items are added to the worklist.
+ Worklist.emplace_back(Current.Die, Current.CU, Current.Flags,
+ WorklistItemType::LookForRefDIEsToKeep);
+
+ bool UseOdr = (Current.Flags & TF_DependencyWalk) ? (Current.Flags & TF_ODR)
+ : Current.CU.hasODR();
+ unsigned ODRFlag = UseOdr ? TF_ODR : 0;
+ unsigned ParFlags = TF_ParentWalk | TF_Keep | TF_DependencyWalk | ODRFlag;
+
+ // Now schedule the parent walk.
+ Worklist.emplace_back(MyInfo.ParentIdx, Current.CU, ParFlags);
+ }
+}
+
+/// Assign an abbreviation number to \p Abbrev.
+///
+/// Our DIEs get freed after every DebugMapObject has been processed,
+/// thus the FoldingSet we use to unique DIEAbbrevs cannot refer to
+/// the instances hold by the DIEs. When we encounter an abbreviation
+/// that we don't know, we create a permanent copy of it.
+void DWARFLinker::assignAbbrev(DIEAbbrev &Abbrev) {
+ // Check the set for priors.
+ FoldingSetNodeID ID;
+ Abbrev.Profile(ID);
+ void *InsertToken;
+ DIEAbbrev *InSet = AbbreviationsSet.FindNodeOrInsertPos(ID, InsertToken);
+
+ // If it's newly added.
+ if (InSet) {
+ // Assign existing abbreviation number.
+ Abbrev.setNumber(InSet->getNumber());
+ } else {
+ // Add to abbreviation list.
+ Abbreviations.push_back(
+ std::make_unique<DIEAbbrev>(Abbrev.getTag(), Abbrev.hasChildren()));
+ for (const auto &Attr : Abbrev.getData())
+ Abbreviations.back()->AddAttribute(Attr.getAttribute(), Attr.getForm());
+ AbbreviationsSet.InsertNode(Abbreviations.back().get(), InsertToken);
+ // Assign the unique abbreviation number.
+ Abbrev.setNumber(Abbreviations.size());
+ Abbreviations.back()->setNumber(Abbreviations.size());
+ }
+}
+
+unsigned DWARFLinker::DIECloner::cloneStringAttribute(
+ DIE &Die, AttributeSpec AttrSpec, const DWARFFormValue &Val,
+ const DWARFUnit &U, OffsetsStringPool &StringPool, AttributesInfo &Info) {
+ Optional<const char *> String = Val.getAsCString();
+ if (!String)
+ return 0;
+
+ // Switch everything to out of line strings.
+ auto StringEntry = StringPool.getEntry(*String);
+
+ // Update attributes info.
+ if (AttrSpec.Attr == dwarf::DW_AT_name)
+ Info.Name = StringEntry;
+ else if (AttrSpec.Attr == dwarf::DW_AT_MIPS_linkage_name ||
+ AttrSpec.Attr == dwarf::DW_AT_linkage_name)
+ Info.MangledName = StringEntry;
+
+ Die.addValue(DIEAlloc, dwarf::Attribute(AttrSpec.Attr), dwarf::DW_FORM_strp,
+ DIEInteger(StringEntry.getOffset()));
+
+ return 4;
+}
+
+unsigned DWARFLinker::DIECloner::cloneDieReferenceAttribute(
+ DIE &Die, const DWARFDie &InputDIE, AttributeSpec AttrSpec,
+ unsigned AttrSize, const DWARFFormValue &Val, const DWARFFile &File,
+ CompileUnit &Unit) {
+ const DWARFUnit &U = Unit.getOrigUnit();
+ uint64_t Ref = *Val.getAsReference();
+
+ DIE *NewRefDie = nullptr;
+ CompileUnit *RefUnit = nullptr;
+ DeclContext *Ctxt = nullptr;
+
+ DWARFDie RefDie =
+ Linker.resolveDIEReference(File, CompileUnits, Val, InputDIE, RefUnit);
+
+ // If the referenced DIE is not found, drop the attribute.
+ if (!RefDie || AttrSpec.Attr == dwarf::DW_AT_sibling)
+ return 0;
+
+ CompileUnit::DIEInfo &RefInfo = RefUnit->getInfo(RefDie);
+
+ // If we already have emitted an equivalent DeclContext, just point
+ // at it.
+ if (isODRAttribute(AttrSpec.Attr)) {
+ Ctxt = RefInfo.Ctxt;
+ if (Ctxt && Ctxt->getCanonicalDIEOffset()) {
+ DIEInteger Attr(Ctxt->getCanonicalDIEOffset());
+ Die.addValue(DIEAlloc, dwarf::Attribute(AttrSpec.Attr),
+ dwarf::DW_FORM_ref_addr, Attr);
+ return U.getRefAddrByteSize();
+ }
+ }
+
+ if (!RefInfo.Clone) {
+ assert(Ref > InputDIE.getOffset());
+ // We haven't cloned this DIE yet. Just create an empty one and
+ // store it. It'll get really cloned when we process it.
+ RefInfo.Clone = DIE::get(DIEAlloc, dwarf::Tag(RefDie.getTag()));
+ }
+ NewRefDie = RefInfo.Clone;
+
+ if (AttrSpec.Form == dwarf::DW_FORM_ref_addr ||
+ (Unit.hasODR() && isODRAttribute(AttrSpec.Attr))) {
+ // We cannot currently rely on a DIEEntry to emit ref_addr
+ // references, because the implementation calls back to DwarfDebug
+ // to find the unit offset. (We don't have a DwarfDebug)
+ // FIXME: we should be able to design DIEEntry reliance on
+ // DwarfDebug away.
+ uint64_t Attr;
+ if (Ref < InputDIE.getOffset()) {
+ // We must have already cloned that DIE.
+ uint32_t NewRefOffset =
+ RefUnit->getStartOffset() + NewRefDie->getOffset();
+ Attr = NewRefOffset;
+ Die.addValue(DIEAlloc, dwarf::Attribute(AttrSpec.Attr),
+ dwarf::DW_FORM_ref_addr, DIEInteger(Attr));
+ } else {
+ // A forward reference. Note and fixup later.
+ Attr = 0xBADDEF;
+ Unit.noteForwardReference(
+ NewRefDie, RefUnit, Ctxt,
+ Die.addValue(DIEAlloc, dwarf::Attribute(AttrSpec.Attr),
+ dwarf::DW_FORM_ref_addr, DIEInteger(Attr)));
+ }
+ return U.getRefAddrByteSize();
+ }
+
+ Die.addValue(DIEAlloc, dwarf::Attribute(AttrSpec.Attr),
+ dwarf::Form(AttrSpec.Form), DIEEntry(*NewRefDie));
+
+ return AttrSize;
+}
+
+void DWARFLinker::DIECloner::cloneExpression(
+ DataExtractor &Data, DWARFExpression Expression, const DWARFFile &File,
+ CompileUnit &Unit, SmallVectorImpl<uint8_t> &OutputBuffer) {
+ using Encoding = DWARFExpression::Operation::Encoding;
+
+ uint64_t OpOffset = 0;
+ for (auto &Op : Expression) {
+ auto Description = Op.getDescription();
+ // DW_OP_const_type is variable-length and has 3
+ // operands. DWARFExpression thus far only supports 2.
+ auto Op0 = Description.Op[0];
+ auto Op1 = Description.Op[1];
+ if ((Op0 == Encoding::BaseTypeRef && Op1 != Encoding::SizeNA) ||
+ (Op1 == Encoding::BaseTypeRef && Op0 != Encoding::Size1))
+ Linker.reportWarning("Unsupported DW_OP encoding.", File);
+
+ if ((Op0 == Encoding::BaseTypeRef && Op1 == Encoding::SizeNA) ||
+ (Op1 == Encoding::BaseTypeRef && Op0 == Encoding::Size1)) {
+ // This code assumes that the other non-typeref operand fits into 1 byte.
+ assert(OpOffset < Op.getEndOffset());
+ uint32_t ULEBsize = Op.getEndOffset() - OpOffset - 1;
+ assert(ULEBsize <= 16);
+
+ // Copy over the operation.
+ OutputBuffer.push_back(Op.getCode());
+ uint64_t RefOffset;
+ if (Op1 == Encoding::SizeNA) {
+ RefOffset = Op.getRawOperand(0);
+ } else {
+ OutputBuffer.push_back(Op.getRawOperand(0));
+ RefOffset = Op.getRawOperand(1);
+ }
+ uint32_t Offset = 0;
+ // Look up the base type. For DW_OP_convert, the operand may be 0 to
+ // instead indicate the generic type. The same holds for
+ // DW_OP_reinterpret, which is currently not supported.
+ if (RefOffset > 0 || Op.getCode() != dwarf::DW_OP_convert) {
+ auto RefDie = Unit.getOrigUnit().getDIEForOffset(RefOffset);
+ CompileUnit::DIEInfo &Info = Unit.getInfo(RefDie);
+ if (DIE *Clone = Info.Clone)
+ Offset = Clone->getOffset();
+ else
+ Linker.reportWarning(
+ "base type ref doesn't point to DW_TAG_base_type.", File);
+ }
+ uint8_t ULEB[16];
+ unsigned RealSize = encodeULEB128(Offset, ULEB, ULEBsize);
+ if (RealSize > ULEBsize) {
+ // Emit the generic type as a fallback.
+ RealSize = encodeULEB128(0, ULEB, ULEBsize);
+ Linker.reportWarning("base type ref doesn't fit.", File);
+ }
+ assert(RealSize == ULEBsize && "padding failed");
+ ArrayRef<uint8_t> ULEBbytes(ULEB, ULEBsize);
+ OutputBuffer.append(ULEBbytes.begin(), ULEBbytes.end());
+ } else {
+ // Copy over everything else unmodified.
+ StringRef Bytes = Data.getData().slice(OpOffset, Op.getEndOffset());
+ OutputBuffer.append(Bytes.begin(), Bytes.end());
+ }
+ OpOffset = Op.getEndOffset();
+ }
+}
+
+unsigned DWARFLinker::DIECloner::cloneBlockAttribute(
+ DIE &Die, const DWARFFile &File, CompileUnit &Unit, AttributeSpec AttrSpec,
+ const DWARFFormValue &Val, unsigned AttrSize, bool IsLittleEndian) {
+ DIEValueList *Attr;
+ DIEValue Value;
+ DIELoc *Loc = nullptr;
+ DIEBlock *Block = nullptr;
+ if (AttrSpec.Form == dwarf::DW_FORM_exprloc) {
+ Loc = new (DIEAlloc) DIELoc;
+ Linker.DIELocs.push_back(Loc);
+ } else {
+ Block = new (DIEAlloc) DIEBlock;
+ Linker.DIEBlocks.push_back(Block);
+ }
+ Attr = Loc ? static_cast<DIEValueList *>(Loc)
+ : static_cast<DIEValueList *>(Block);
+
+ if (Loc)
+ Value = DIEValue(dwarf::Attribute(AttrSpec.Attr),
+ dwarf::Form(AttrSpec.Form), Loc);
+ else
+ Value = DIEValue(dwarf::Attribute(AttrSpec.Attr),
+ dwarf::Form(AttrSpec.Form), Block);
+
+ // If the block is a DWARF Expression, clone it into the temporary
+ // buffer using cloneExpression(), otherwise copy the data directly.
+ SmallVector<uint8_t, 32> Buffer;
+ ArrayRef<uint8_t> Bytes = *Val.getAsBlock();
+ if (DWARFAttribute::mayHaveLocationDescription(AttrSpec.Attr) &&
+ (Val.isFormClass(DWARFFormValue::FC_Block) ||
+ Val.isFormClass(DWARFFormValue::FC_Exprloc))) {
+ DWARFUnit &OrigUnit = Unit.getOrigUnit();
+ DataExtractor Data(StringRef((const char *)Bytes.data(), Bytes.size()),
+ IsLittleEndian, OrigUnit.getAddressByteSize());
+ DWARFExpression Expr(Data, OrigUnit.getAddressByteSize(),
+ OrigUnit.getFormParams().Format);
+ cloneExpression(Data, Expr, File, Unit, Buffer);
+ Bytes = Buffer;
+ }
+ for (auto Byte : Bytes)
+ Attr->addValue(DIEAlloc, static_cast<dwarf::Attribute>(0),
+ dwarf::DW_FORM_data1, DIEInteger(Byte));
+
+ // FIXME: If DIEBlock and DIELoc just reuses the Size field of
+ // the DIE class, this "if" could be replaced by
+ // Attr->setSize(Bytes.size()).
+ if (Loc)
+ Loc->setSize(Bytes.size());
+ else
+ Block->setSize(Bytes.size());
+
+ Die.addValue(DIEAlloc, Value);
+ return AttrSize;
+}
+
+unsigned DWARFLinker::DIECloner::cloneAddressAttribute(
+ DIE &Die, AttributeSpec AttrSpec, const DWARFFormValue &Val,
+ const CompileUnit &Unit, AttributesInfo &Info) {
+ dwarf::Form Form = AttrSpec.Form;
+ uint64_t Addr = *Val.getAsAddress();
+
+ if (LLVM_UNLIKELY(Linker.Options.Update)) {
+ if (AttrSpec.Attr == dwarf::DW_AT_low_pc)
+ Info.HasLowPc = true;
+ Die.addValue(DIEAlloc, dwarf::Attribute(AttrSpec.Attr),
+ dwarf::Form(AttrSpec.Form), DIEInteger(Addr));
+ return Unit.getOrigUnit().getAddressByteSize();
+ }
+
+ if (AttrSpec.Attr == dwarf::DW_AT_low_pc) {
+ if (Die.getTag() == dwarf::DW_TAG_inlined_subroutine ||
+ Die.getTag() == dwarf::DW_TAG_lexical_block)
+ // The low_pc of a block or inline subroutine might get
+ // relocated because it happens to match the low_pc of the
+ // enclosing subprogram. To prevent issues with that, always use
+ // the low_pc from the input DIE if relocations have been applied.
+ Addr = (Info.OrigLowPc != std::numeric_limits<uint64_t>::max()
+ ? Info.OrigLowPc
+ : Addr) +
+ Info.PCOffset;
+ else if (Die.getTag() == dwarf::DW_TAG_compile_unit) {
+ Addr = Unit.getLowPc();
+ if (Addr == std::numeric_limits<uint64_t>::max())
+ return 0;
+ }
+ Info.HasLowPc = true;
+ } else if (AttrSpec.Attr == dwarf::DW_AT_high_pc) {
+ if (Die.getTag() == dwarf::DW_TAG_compile_unit) {
+ if (uint64_t HighPc = Unit.getHighPc())
+ Addr = HighPc;
+ else
+ return 0;
+ } else
+ // If we have a high_pc recorded for the input DIE, use
+ // it. Otherwise (when no relocations where applied) just use the
+ // one we just decoded.
+ Addr = (Info.OrigHighPc ? Info.OrigHighPc : Addr) + Info.PCOffset;
+ } else if (AttrSpec.Attr == dwarf::DW_AT_call_return_pc) {
+ // Relocate a return PC address within a call site entry.
+ if (Die.getTag() == dwarf::DW_TAG_call_site)
+ Addr = (Info.OrigCallReturnPc ? Info.OrigCallReturnPc : Addr) +
+ Info.PCOffset;
+ } else if (AttrSpec.Attr == dwarf::DW_AT_call_pc) {
+ // Relocate the address of a branch instruction within a call site entry.
+ if (Die.getTag() == dwarf::DW_TAG_call_site)
+ Addr = (Info.OrigCallPc ? Info.OrigCallPc : Addr) + Info.PCOffset;
+ }
+
+ // If this is an indexed address emit the relocated address.
+ if (Form == dwarf::DW_FORM_addrx) {
+ if (llvm::Expected<uint64_t> RelocAddr =
+ ObjFile.Addresses->relocateIndexedAddr(Addr)) {
+ Addr = *RelocAddr;
+ Form = dwarf::DW_FORM_addr;
+ } else {
+ Linker.reportWarning(toString(RelocAddr.takeError()), ObjFile);
+ }
+ }
+
+ Die.addValue(DIEAlloc, static_cast<dwarf::Attribute>(AttrSpec.Attr),
+ static_cast<dwarf::Form>(Form), DIEInteger(Addr));
+ return Unit.getOrigUnit().getAddressByteSize();
+}
+
+unsigned DWARFLinker::DIECloner::cloneScalarAttribute(
+ DIE &Die, const DWARFDie &InputDIE, const DWARFFile &File,
+ CompileUnit &Unit, AttributeSpec AttrSpec, const DWARFFormValue &Val,
+ unsigned AttrSize, AttributesInfo &Info) {
+ uint64_t Value;
+
+ if (LLVM_UNLIKELY(Linker.Options.Update)) {
+ if (auto OptionalValue = Val.getAsUnsignedConstant())
+ Value = *OptionalValue;
+ else if (auto OptionalValue = Val.getAsSignedConstant())
+ Value = *OptionalValue;
+ else if (auto OptionalValue = Val.getAsSectionOffset())
+ Value = *OptionalValue;
+ else {
+ Linker.reportWarning(
+ "Unsupported scalar attribute form. Dropping attribute.", File,
+ &InputDIE);
+ return 0;
+ }
+ if (AttrSpec.Attr == dwarf::DW_AT_declaration && Value)
+ Info.IsDeclaration = true;
+ Die.addValue(DIEAlloc, dwarf::Attribute(AttrSpec.Attr),
+ dwarf::Form(AttrSpec.Form), DIEInteger(Value));
+ return AttrSize;
+ }
+
+ if (AttrSpec.Attr == dwarf::DW_AT_high_pc &&
+ Die.getTag() == dwarf::DW_TAG_compile_unit) {
+ if (Unit.getLowPc() == -1ULL)
+ return 0;
+ // Dwarf >= 4 high_pc is an size, not an address.
+ Value = Unit.getHighPc() - Unit.getLowPc();
+ } else if (AttrSpec.Form == dwarf::DW_FORM_sec_offset)
+ Value = *Val.getAsSectionOffset();
+ else if (AttrSpec.Form == dwarf::DW_FORM_sdata)
+ Value = *Val.getAsSignedConstant();
+ else if (auto OptionalValue = Val.getAsUnsignedConstant())
+ Value = *OptionalValue;
+ else {
+ Linker.reportWarning(
+ "Unsupported scalar attribute form. Dropping attribute.", File,
+ &InputDIE);
+ return 0;
+ }
+ PatchLocation Patch =
+ Die.addValue(DIEAlloc, dwarf::Attribute(AttrSpec.Attr),
+ dwarf::Form(AttrSpec.Form), DIEInteger(Value));
+ if (AttrSpec.Attr == dwarf::DW_AT_ranges) {
+ Unit.noteRangeAttribute(Die, Patch);
+ Info.HasRanges = true;
+ }
+
+ // A more generic way to check for location attributes would be
+ // nice, but it's very unlikely that any other attribute needs a
+ // location list.
+ // FIXME: use DWARFAttribute::mayHaveLocationDescription().
+ else if (AttrSpec.Attr == dwarf::DW_AT_location ||
+ AttrSpec.Attr == dwarf::DW_AT_frame_base) {
+ Unit.noteLocationAttribute(Patch, Info.PCOffset);
+ } else if (AttrSpec.Attr == dwarf::DW_AT_declaration && Value)
+ Info.IsDeclaration = true;
+
+ return AttrSize;
+}
+
+/// Clone \p InputDIE's attribute described by \p AttrSpec with
+/// value \p Val, and add it to \p Die.
+/// \returns the size of the cloned attribute.
+unsigned DWARFLinker::DIECloner::cloneAttribute(
+ DIE &Die, const DWARFDie &InputDIE, const DWARFFile &File,
+ CompileUnit &Unit, OffsetsStringPool &StringPool, const DWARFFormValue &Val,
+ const AttributeSpec AttrSpec, unsigned AttrSize, AttributesInfo &Info,
+ bool IsLittleEndian) {
+ const DWARFUnit &U = Unit.getOrigUnit();
+
+ switch (AttrSpec.Form) {
+ case dwarf::DW_FORM_strp:
+ case dwarf::DW_FORM_string:
+ case dwarf::DW_FORM_strx:
+ case dwarf::DW_FORM_strx1:
+ case dwarf::DW_FORM_strx2:
+ case dwarf::DW_FORM_strx3:
+ case dwarf::DW_FORM_strx4:
+ return cloneStringAttribute(Die, AttrSpec, Val, U, StringPool, Info);
+ case dwarf::DW_FORM_ref_addr:
+ case dwarf::DW_FORM_ref1:
+ case dwarf::DW_FORM_ref2:
+ case dwarf::DW_FORM_ref4:
+ case dwarf::DW_FORM_ref8:
+ return cloneDieReferenceAttribute(Die, InputDIE, AttrSpec, AttrSize, Val,
+ File, Unit);
+ case dwarf::DW_FORM_block:
+ case dwarf::DW_FORM_block1:
+ case dwarf::DW_FORM_block2:
+ case dwarf::DW_FORM_block4:
+ case dwarf::DW_FORM_exprloc:
+ return cloneBlockAttribute(Die, File, Unit, AttrSpec, Val, AttrSize,
+ IsLittleEndian);
+ case dwarf::DW_FORM_addr:
+ case dwarf::DW_FORM_addrx:
+ return cloneAddressAttribute(Die, AttrSpec, Val, Unit, Info);
+ case dwarf::DW_FORM_data1:
+ case dwarf::DW_FORM_data2:
+ case dwarf::DW_FORM_data4:
+ case dwarf::DW_FORM_data8:
+ case dwarf::DW_FORM_udata:
+ case dwarf::DW_FORM_sdata:
+ case dwarf::DW_FORM_sec_offset:
+ case dwarf::DW_FORM_flag:
+ case dwarf::DW_FORM_flag_present:
+ return cloneScalarAttribute(Die, InputDIE, File, Unit, AttrSpec, Val,
+ AttrSize, Info);
+ default:
+ Linker.reportWarning("Unsupported attribute form " +
+ dwarf::FormEncodingString(AttrSpec.Form) +
+ " in cloneAttribute. Dropping.",
+ File, &InputDIE);
+ }
+
+ return 0;
+}
+
+static bool isObjCSelector(StringRef Name) {
+ return Name.size() > 2 && (Name[0] == '-' || Name[0] == '+') &&
+ (Name[1] == '[');
+}
+
+void DWARFLinker::DIECloner::addObjCAccelerator(CompileUnit &Unit,
+ const DIE *Die,
+ DwarfStringPoolEntryRef Name,
+ OffsetsStringPool &StringPool,
+ bool SkipPubSection) {
+ assert(isObjCSelector(Name.getString()) && "not an objc selector");
+ // Objective C method or class function.
+ // "- [Class(Category) selector :withArg ...]"
+ StringRef ClassNameStart(Name.getString().drop_front(2));
+ size_t FirstSpace = ClassNameStart.find(' ');
+ if (FirstSpace == StringRef::npos)
+ return;
+
+ StringRef SelectorStart(ClassNameStart.data() + FirstSpace + 1);
+ if (!SelectorStart.size())
+ return;
+
+ StringRef Selector(SelectorStart.data(), SelectorStart.size() - 1);
+ Unit.addNameAccelerator(Die, StringPool.getEntry(Selector), SkipPubSection);
+
+ // Add an entry for the class name that points to this
+ // method/class function.
+ StringRef ClassName(ClassNameStart.data(), FirstSpace);
+ Unit.addObjCAccelerator(Die, StringPool.getEntry(ClassName), SkipPubSection);
+
+ if (ClassName[ClassName.size() - 1] == ')') {
+ size_t OpenParens = ClassName.find('(');
+ if (OpenParens != StringRef::npos) {
+ StringRef ClassNameNoCategory(ClassName.data(), OpenParens);
+ Unit.addObjCAccelerator(Die, StringPool.getEntry(ClassNameNoCategory),
+ SkipPubSection);
+
+ std::string MethodNameNoCategory(Name.getString().data(), OpenParens + 2);
+ // FIXME: The missing space here may be a bug, but
+ // dsymutil-classic also does it this way.
+ MethodNameNoCategory.append(std::string(SelectorStart));
+ Unit.addNameAccelerator(Die, StringPool.getEntry(MethodNameNoCategory),
+ SkipPubSection);
+ }
+ }
+}
+
+static bool
+shouldSkipAttribute(DWARFAbbreviationDeclaration::AttributeSpec AttrSpec,
+ uint16_t Tag, bool InDebugMap, bool SkipPC,
+ bool InFunctionScope) {
+ switch (AttrSpec.Attr) {
+ default:
+ return false;
+ case dwarf::DW_AT_low_pc:
+ case dwarf::DW_AT_high_pc:
+ case dwarf::DW_AT_ranges:
+ return SkipPC;
+ case dwarf::DW_AT_str_offsets_base:
+ // FIXME: Use the string offset table with Dwarf 5.
+ return true;
+ case dwarf::DW_AT_location:
+ case dwarf::DW_AT_frame_base:
+ // FIXME: for some reason dsymutil-classic keeps the location attributes
+ // when they are of block type (i.e. not location lists). This is totally
+ // wrong for globals where we will keep a wrong address. It is mostly
+ // harmless for locals, but there is no point in keeping these anyway when
+ // the function wasn't linked.
+ return (SkipPC || (!InFunctionScope && Tag == dwarf::DW_TAG_variable &&
+ !InDebugMap)) &&
+ !DWARFFormValue(AttrSpec.Form).isFormClass(DWARFFormValue::FC_Block);
+ }
+}
+
+DIE *DWARFLinker::DIECloner::cloneDIE(const DWARFDie &InputDIE,
+ const DWARFFile &File, CompileUnit &Unit,
+ OffsetsStringPool &StringPool,
+ int64_t PCOffset, uint32_t OutOffset,
+ unsigned Flags, bool IsLittleEndian,
+ DIE *Die) {
+ DWARFUnit &U = Unit.getOrigUnit();
+ unsigned Idx = U.getDIEIndex(InputDIE);
+ CompileUnit::DIEInfo &Info = Unit.getInfo(Idx);
+
+ // Should the DIE appear in the output?
+ if (!Unit.getInfo(Idx).Keep)
+ return nullptr;
+
+ uint64_t Offset = InputDIE.getOffset();
+ assert(!(Die && Info.Clone) && "Can't supply a DIE and a cloned DIE");
+ if (!Die) {
+ // The DIE might have been already created by a forward reference
+ // (see cloneDieReferenceAttribute()).
+ if (!Info.Clone)
+ Info.Clone = DIE::get(DIEAlloc, dwarf::Tag(InputDIE.getTag()));
+ Die = Info.Clone;
+ }
+
+ assert(Die->getTag() == InputDIE.getTag());
+ Die->setOffset(OutOffset);
+ if ((Unit.hasODR() || Unit.isClangModule()) && !Info.Incomplete &&
+ Die->getTag() != dwarf::DW_TAG_namespace && Info.Ctxt &&
+ Info.Ctxt != Unit.getInfo(Info.ParentIdx).Ctxt &&
+ !Info.Ctxt->getCanonicalDIEOffset()) {
+ // We are about to emit a DIE that is the root of its own valid
+ // DeclContext tree. Make the current offset the canonical offset
+ // for this context.
+ Info.Ctxt->setCanonicalDIEOffset(OutOffset + Unit.getStartOffset());
+ }
+
+ // Extract and clone every attribute.
+ DWARFDataExtractor Data = U.getDebugInfoExtractor();
+ // Point to the next DIE (generally there is always at least a NULL
+ // entry after the current one). If this is a lone
+ // DW_TAG_compile_unit without any children, point to the next unit.
+ uint64_t NextOffset = (Idx + 1 < U.getNumDIEs())
+ ? U.getDIEAtIndex(Idx + 1).getOffset()
+ : U.getNextUnitOffset();
+ AttributesInfo AttrInfo;
+
+ // We could copy the data only if we need to apply a relocation to it. After
+ // testing, it seems there is no performance downside to doing the copy
+ // unconditionally, and it makes the code simpler.
+ SmallString<40> DIECopy(Data.getData().substr(Offset, NextOffset - Offset));
+ Data =
+ DWARFDataExtractor(DIECopy, Data.isLittleEndian(), Data.getAddressSize());
+
+ // Modify the copy with relocated addresses.
+ if (ObjFile.Addresses->areRelocationsResolved() &&
+ ObjFile.Addresses->applyValidRelocs(DIECopy, Offset,
+ Data.isLittleEndian())) {
+ // If we applied relocations, we store the value of high_pc that was
+ // potentially stored in the input DIE. If high_pc is an address
+ // (Dwarf version == 2), then it might have been relocated to a
+ // totally unrelated value (because the end address in the object
+ // file might be start address of another function which got moved
+ // independently by the linker). The computation of the actual
+ // high_pc value is done in cloneAddressAttribute().
+ AttrInfo.OrigHighPc =
+ dwarf::toAddress(InputDIE.find(dwarf::DW_AT_high_pc), 0);
+ // Also store the low_pc. It might get relocated in an
+ // inline_subprogram that happens at the beginning of its
+ // inlining function.
+ AttrInfo.OrigLowPc = dwarf::toAddress(InputDIE.find(dwarf::DW_AT_low_pc),
+ std::numeric_limits<uint64_t>::max());
+ AttrInfo.OrigCallReturnPc =
+ dwarf::toAddress(InputDIE.find(dwarf::DW_AT_call_return_pc), 0);
+ AttrInfo.OrigCallPc =
+ dwarf::toAddress(InputDIE.find(dwarf::DW_AT_call_pc), 0);
+ }
+
+ // Reset the Offset to 0 as we will be working on the local copy of
+ // the data.
+ Offset = 0;
+
+ const auto *Abbrev = InputDIE.getAbbreviationDeclarationPtr();
+ Offset += getULEB128Size(Abbrev->getCode());
+
+ // We are entering a subprogram. Get and propagate the PCOffset.
+ if (Die->getTag() == dwarf::DW_TAG_subprogram)
+ PCOffset = Info.AddrAdjust;
+ AttrInfo.PCOffset = PCOffset;
+
+ if (Abbrev->getTag() == dwarf::DW_TAG_subprogram) {
+ Flags |= TF_InFunctionScope;
+ if (!Info.InDebugMap && LLVM_LIKELY(!Update))
+ Flags |= TF_SkipPC;
+ }
+
+ bool Copied = false;
+ for (const auto &AttrSpec : Abbrev->attributes()) {
+ if (LLVM_LIKELY(!Update) &&
+ shouldSkipAttribute(AttrSpec, Die->getTag(), Info.InDebugMap,
+ Flags & TF_SkipPC, Flags & TF_InFunctionScope)) {
+ DWARFFormValue::skipValue(AttrSpec.Form, Data, &Offset,
+ U.getFormParams());
+ // FIXME: dsymutil-classic keeps the old abbreviation around
+ // even if it's not used. We can remove this (and the copyAbbrev
+ // helper) as soon as bit-for-bit compatibility is not a goal anymore.
+ if (!Copied) {
+ copyAbbrev(*InputDIE.getAbbreviationDeclarationPtr(), Unit.hasODR());
+ Copied = true;
+ }
+ continue;
+ }
+
+ DWARFFormValue Val(AttrSpec.Form);
+ uint64_t AttrSize = Offset;
+ Val.extractValue(Data, &Offset, U.getFormParams(), &U);
+ AttrSize = Offset - AttrSize;
+
+ OutOffset += cloneAttribute(*Die, InputDIE, File, Unit, StringPool, Val,
+ AttrSpec, AttrSize, AttrInfo, IsLittleEndian);
+ }
+
+ // Look for accelerator entries.
+ uint16_t Tag = InputDIE.getTag();
+ // FIXME: This is slightly wrong. An inline_subroutine without a
+ // low_pc, but with AT_ranges might be interesting to get into the
+ // accelerator tables too. For now stick with dsymutil's behavior.
+ if ((Info.InDebugMap || AttrInfo.HasLowPc || AttrInfo.HasRanges) &&
+ Tag != dwarf::DW_TAG_compile_unit &&
+ getDIENames(InputDIE, AttrInfo, StringPool,
+ Tag != dwarf::DW_TAG_inlined_subroutine)) {
+ if (AttrInfo.MangledName && AttrInfo.MangledName != AttrInfo.Name)
+ Unit.addNameAccelerator(Die, AttrInfo.MangledName,
+ Tag == dwarf::DW_TAG_inlined_subroutine);
+ if (AttrInfo.Name) {
+ if (AttrInfo.NameWithoutTemplate)
+ Unit.addNameAccelerator(Die, AttrInfo.NameWithoutTemplate,
+ /* SkipPubSection */ true);
+ Unit.addNameAccelerator(Die, AttrInfo.Name,
+ Tag == dwarf::DW_TAG_inlined_subroutine);
+ }
+ if (AttrInfo.Name && isObjCSelector(AttrInfo.Name.getString()))
+ addObjCAccelerator(Unit, Die, AttrInfo.Name, StringPool,
+ /* SkipPubSection =*/true);
+
+ } else if (Tag == dwarf::DW_TAG_namespace) {
+ if (!AttrInfo.Name)
+ AttrInfo.Name = StringPool.getEntry("(anonymous namespace)");
+ Unit.addNamespaceAccelerator(Die, AttrInfo.Name);
+ } else if (isTypeTag(Tag) && !AttrInfo.IsDeclaration &&
+ getDIENames(InputDIE, AttrInfo, StringPool) && AttrInfo.Name &&
+ AttrInfo.Name.getString()[0]) {
+ uint32_t Hash = hashFullyQualifiedName(InputDIE, Unit, File);
+ uint64_t RuntimeLang =
+ dwarf::toUnsigned(InputDIE.find(dwarf::DW_AT_APPLE_runtime_class))
+ .getValueOr(0);
+ bool ObjCClassIsImplementation =
+ (RuntimeLang == dwarf::DW_LANG_ObjC ||
+ RuntimeLang == dwarf::DW_LANG_ObjC_plus_plus) &&
+ dwarf::toUnsigned(InputDIE.find(dwarf::DW_AT_APPLE_objc_complete_type))
+ .getValueOr(0);
+ Unit.addTypeAccelerator(Die, AttrInfo.Name, ObjCClassIsImplementation,
+ Hash);
+ }
+
+ // Determine whether there are any children that we want to keep.
+ bool HasChildren = false;
+ for (auto Child : InputDIE.children()) {
+ unsigned Idx = U.getDIEIndex(Child);
+ if (Unit.getInfo(Idx).Keep) {
+ HasChildren = true;
+ break;
+ }
+ }
+
+ DIEAbbrev NewAbbrev = Die->generateAbbrev();
+ if (HasChildren)
+ NewAbbrev.setChildrenFlag(dwarf::DW_CHILDREN_yes);
+ // Assign a permanent abbrev number
+ Linker.assignAbbrev(NewAbbrev);
+ Die->setAbbrevNumber(NewAbbrev.getNumber());
+
+ // Add the size of the abbreviation number to the output offset.
+ OutOffset += getULEB128Size(Die->getAbbrevNumber());
+
+ if (!HasChildren) {
+ // Update our size.
+ Die->setSize(OutOffset - Die->getOffset());
+ return Die;
+ }
+
+ // Recursively clone children.
+ for (auto Child : InputDIE.children()) {
+ if (DIE *Clone = cloneDIE(Child, File, Unit, StringPool, PCOffset,
+ OutOffset, Flags, IsLittleEndian)) {
+ Die->addChild(Clone);
+ OutOffset = Clone->getOffset() + Clone->getSize();
+ }
+ }
+
+ // Account for the end of children marker.
+ OutOffset += sizeof(int8_t);
+ // Update our size.
+ Die->setSize(OutOffset - Die->getOffset());
+ return Die;
+}
+
+/// Patch the input object file relevant debug_ranges entries
+/// and emit them in the output file. Update the relevant attributes
+/// to point at the new entries.
+void DWARFLinker::patchRangesForUnit(const CompileUnit &Unit,
+ DWARFContext &OrigDwarf,
+ const DWARFFile &File) const {
+ DWARFDebugRangeList RangeList;
+ const auto &FunctionRanges = Unit.getFunctionRanges();
+ unsigned AddressSize = Unit.getOrigUnit().getAddressByteSize();
+ DWARFDataExtractor RangeExtractor(OrigDwarf.getDWARFObj(),
+ OrigDwarf.getDWARFObj().getRangesSection(),
+ OrigDwarf.isLittleEndian(), AddressSize);
+ auto InvalidRange = FunctionRanges.end(), CurrRange = InvalidRange;
+ DWARFUnit &OrigUnit = Unit.getOrigUnit();
+ auto OrigUnitDie = OrigUnit.getUnitDIE(false);
+ uint64_t OrigLowPc =
+ dwarf::toAddress(OrigUnitDie.find(dwarf::DW_AT_low_pc), -1ULL);
+ // Ranges addresses are based on the unit's low_pc. Compute the
+ // offset we need to apply to adapt to the new unit's low_pc.
+ int64_t UnitPcOffset = 0;
+ if (OrigLowPc != -1ULL)
+ UnitPcOffset = int64_t(OrigLowPc) - Unit.getLowPc();
+
+ for (const auto &RangeAttribute : Unit.getRangesAttributes()) {
+ uint64_t Offset = RangeAttribute.get();
+ RangeAttribute.set(TheDwarfEmitter->getRangesSectionSize());
+ if (Error E = RangeList.extract(RangeExtractor, &Offset)) {
+ llvm::consumeError(std::move(E));
+ reportWarning("invalid range list ignored.", File);
+ RangeList.clear();
+ }
+ const auto &Entries = RangeList.getEntries();
+ if (!Entries.empty()) {
+ const DWARFDebugRangeList::RangeListEntry &First = Entries.front();
+
+ if (CurrRange == InvalidRange ||
+ First.StartAddress + OrigLowPc < CurrRange.start() ||
+ First.StartAddress + OrigLowPc >= CurrRange.stop()) {
+ CurrRange = FunctionRanges.find(First.StartAddress + OrigLowPc);
+ if (CurrRange == InvalidRange ||
+ CurrRange.start() > First.StartAddress + OrigLowPc) {
+ reportWarning("no mapping for range.", File);
+ continue;
+ }
+ }
+ }
+
+ TheDwarfEmitter->emitRangesEntries(UnitPcOffset, OrigLowPc, CurrRange,
+ Entries, AddressSize);
+ }
+}
+
+/// Generate the debug_aranges entries for \p Unit and if the
+/// unit has a DW_AT_ranges attribute, also emit the debug_ranges
+/// contribution for this attribute.
+/// FIXME: this could actually be done right in patchRangesForUnit,
+/// but for the sake of initial bit-for-bit compatibility with legacy
+/// dsymutil, we have to do it in a delayed pass.
+void DWARFLinker::generateUnitRanges(CompileUnit &Unit) const {
+ auto Attr = Unit.getUnitRangesAttribute();
+ if (Attr)
+ Attr->set(TheDwarfEmitter->getRangesSectionSize());
+ TheDwarfEmitter->emitUnitRangesEntries(Unit, static_cast<bool>(Attr));
+}
+
+/// Insert the new line info sequence \p Seq into the current
+/// set of already linked line info \p Rows.
+static void insertLineSequence(std::vector<DWARFDebugLine::Row> &Seq,
+ std::vector<DWARFDebugLine::Row> &Rows) {
+ if (Seq.empty())
+ return;
+
+ if (!Rows.empty() && Rows.back().Address < Seq.front().Address) {
+ llvm::append_range(Rows, Seq);
+ Seq.clear();
+ return;
+ }
+
+ object::SectionedAddress Front = Seq.front().Address;
+ auto InsertPoint = partition_point(
+ Rows, [=](const DWARFDebugLine::Row &O) { return O.Address < Front; });
+
+ // FIXME: this only removes the unneeded end_sequence if the
+ // sequences have been inserted in order. Using a global sort like
+ // described in patchLineTableForUnit() and delaying the end_sequene
+ // elimination to emitLineTableForUnit() we can get rid of all of them.
+ if (InsertPoint != Rows.end() && InsertPoint->Address == Front &&
+ InsertPoint->EndSequence) {
+ *InsertPoint = Seq.front();
+ Rows.insert(InsertPoint + 1, Seq.begin() + 1, Seq.end());
+ } else {
+ Rows.insert(InsertPoint, Seq.begin(), Seq.end());
+ }
+
+ Seq.clear();
+}
+
+static void patchStmtList(DIE &Die, DIEInteger Offset) {
+ for (auto &V : Die.values())
+ if (V.getAttribute() == dwarf::DW_AT_stmt_list) {
+ V = DIEValue(V.getAttribute(), V.getForm(), Offset);
+ return;
+ }
+
+ llvm_unreachable("Didn't find DW_AT_stmt_list in cloned DIE!");
+}
+
+/// Extract the line table for \p Unit from \p OrigDwarf, and
+/// recreate a relocated version of these for the address ranges that
+/// are present in the binary.
+void DWARFLinker::patchLineTableForUnit(CompileUnit &Unit,
+ DWARFContext &OrigDwarf,
+ const DWARFFile &File) {
+ DWARFDie CUDie = Unit.getOrigUnit().getUnitDIE();
+ auto StmtList = dwarf::toSectionOffset(CUDie.find(dwarf::DW_AT_stmt_list));
+ if (!StmtList)
+ return;
+
+ // Update the cloned DW_AT_stmt_list with the correct debug_line offset.
+ if (auto *OutputDIE = Unit.getOutputUnitDIE())
+ patchStmtList(*OutputDIE,
+ DIEInteger(TheDwarfEmitter->getLineSectionSize()));
+
+ RangesTy &Ranges = File.Addresses->getValidAddressRanges();
+
+ // Parse the original line info for the unit.
+ DWARFDebugLine::LineTable LineTable;
+ uint64_t StmtOffset = *StmtList;
+ DWARFDataExtractor LineExtractor(
+ OrigDwarf.getDWARFObj(), OrigDwarf.getDWARFObj().getLineSection(),
+ OrigDwarf.isLittleEndian(), Unit.getOrigUnit().getAddressByteSize());
+ if (needToTranslateStrings())
+ return TheDwarfEmitter->translateLineTable(LineExtractor, StmtOffset);
+
+ if (Error Err =
+ LineTable.parse(LineExtractor, &StmtOffset, OrigDwarf,
+ &Unit.getOrigUnit(), OrigDwarf.getWarningHandler()))
+ OrigDwarf.getWarningHandler()(std::move(Err));
+
+ // This vector is the output line table.
+ std::vector<DWARFDebugLine::Row> NewRows;
+ NewRows.reserve(LineTable.Rows.size());
+
+ // Current sequence of rows being extracted, before being inserted
+ // in NewRows.
+ std::vector<DWARFDebugLine::Row> Seq;
+ const auto &FunctionRanges = Unit.getFunctionRanges();
+ auto InvalidRange = FunctionRanges.end(), CurrRange = InvalidRange;
+
+ // FIXME: This logic is meant to generate exactly the same output as
+ // Darwin's classic dsymutil. There is a nicer way to implement this
+ // by simply putting all the relocated line info in NewRows and simply
+ // sorting NewRows before passing it to emitLineTableForUnit. This
+ // should be correct as sequences for a function should stay
+ // together in the sorted output. There are a few corner cases that
+ // look suspicious though, and that required to implement the logic
+ // this way. Revisit that once initial validation is finished.
+
+ // Iterate over the object file line info and extract the sequences
+ // that correspond to linked functions.
+ for (auto &Row : LineTable.Rows) {
+ // Check whether we stepped out of the range. The range is
+ // half-open, but consider accept the end address of the range if
+ // it is marked as end_sequence in the input (because in that
+ // case, the relocation offset is accurate and that entry won't
+ // serve as the start of another function).
+ if (CurrRange == InvalidRange || Row.Address.Address < CurrRange.start() ||
+ Row.Address.Address > CurrRange.stop() ||
+ (Row.Address.Address == CurrRange.stop() && !Row.EndSequence)) {
+ // We just stepped out of a known range. Insert a end_sequence
+ // corresponding to the end of the range.
+ uint64_t StopAddress = CurrRange != InvalidRange
+ ? CurrRange.stop() + CurrRange.value()
+ : -1ULL;
+ CurrRange = FunctionRanges.find(Row.Address.Address);
+ bool CurrRangeValid =
+ CurrRange != InvalidRange && CurrRange.start() <= Row.Address.Address;
+ if (!CurrRangeValid) {
+ CurrRange = InvalidRange;
+ if (StopAddress != -1ULL) {
+ // Try harder by looking in the Address ranges map.
+ // There are corner cases where this finds a
+ // valid entry. It's unclear if this is right or wrong, but
+ // for now do as dsymutil.
+ // FIXME: Understand exactly what cases this addresses and
+ // potentially remove it along with the Ranges map.
+ auto Range = Ranges.lower_bound(Row.Address.Address);
+ if (Range != Ranges.begin() && Range != Ranges.end())
+ --Range;
+
+ if (Range != Ranges.end() && Range->first <= Row.Address.Address &&
+ Range->second.HighPC >= Row.Address.Address) {
+ StopAddress = Row.Address.Address + Range->second.Offset;
+ }
+ }
+ }
+ if (StopAddress != -1ULL && !Seq.empty()) {
+ // Insert end sequence row with the computed end address, but
+ // the same line as the previous one.
+ auto NextLine = Seq.back();
+ NextLine.Address.Address = StopAddress;
+ NextLine.EndSequence = 1;
+ NextLine.PrologueEnd = 0;
+ NextLine.BasicBlock = 0;
+ NextLine.EpilogueBegin = 0;
+ Seq.push_back(NextLine);
+ insertLineSequence(Seq, NewRows);
+ }
+
+ if (!CurrRangeValid)
+ continue;
+ }
+
+ // Ignore empty sequences.
+ if (Row.EndSequence && Seq.empty())
+ continue;
+
+ // Relocate row address and add it to the current sequence.
+ Row.Address.Address += CurrRange.value();
+ Seq.emplace_back(Row);
+
+ if (Row.EndSequence)
+ insertLineSequence(Seq, NewRows);
+ }
+
+ // Finished extracting, now emit the line tables.
+ // FIXME: LLVM hard-codes its prologue values. We just copy the
+ // prologue over and that works because we act as both producer and
+ // consumer. It would be nicer to have a real configurable line
+ // table emitter.
+ if (LineTable.Prologue.getVersion() < 2 ||
+ LineTable.Prologue.getVersion() > 5 ||
+ LineTable.Prologue.DefaultIsStmt != DWARF2_LINE_DEFAULT_IS_STMT ||
+ LineTable.Prologue.OpcodeBase > 13)
+ reportWarning("line table parameters mismatch. Cannot emit.", File);
+ else {
+ uint32_t PrologueEnd = *StmtList + 10 + LineTable.Prologue.PrologueLength;
+ // DWARF v5 has an extra 2 bytes of information before the header_length
+ // field.
+ if (LineTable.Prologue.getVersion() == 5)
+ PrologueEnd += 2;
+ StringRef LineData = OrigDwarf.getDWARFObj().getLineSection().Data;
+ MCDwarfLineTableParams Params;
+ Params.DWARF2LineOpcodeBase = LineTable.Prologue.OpcodeBase;
+ Params.DWARF2LineBase = LineTable.Prologue.LineBase;
+ Params.DWARF2LineRange = LineTable.Prologue.LineRange;
+ TheDwarfEmitter->emitLineTableForUnit(
+ Params, LineData.slice(*StmtList + 4, PrologueEnd),
+ LineTable.Prologue.MinInstLength, NewRows,
+ Unit.getOrigUnit().getAddressByteSize());
+ }
+}
+
+void DWARFLinker::emitAcceleratorEntriesForUnit(CompileUnit &Unit) {
+ switch (Options.TheAccelTableKind) {
+ case AccelTableKind::Apple:
+ emitAppleAcceleratorEntriesForUnit(Unit);
+ break;
+ case AccelTableKind::Dwarf:
+ emitDwarfAcceleratorEntriesForUnit(Unit);
+ break;
+ case AccelTableKind::Default:
+ llvm_unreachable("The default must be updated to a concrete value.");
+ break;
+ }
+}
+
+void DWARFLinker::emitAppleAcceleratorEntriesForUnit(CompileUnit &Unit) {
+ // Add namespaces.
+ for (const auto &Namespace : Unit.getNamespaces())
+ AppleNamespaces.addName(Namespace.Name,
+ Namespace.Die->getOffset() + Unit.getStartOffset());
+
+ /// Add names.
+ TheDwarfEmitter->emitPubNamesForUnit(Unit);
+ for (const auto &Pubname : Unit.getPubnames())
+ AppleNames.addName(Pubname.Name,
+ Pubname.Die->getOffset() + Unit.getStartOffset());
+
+ /// Add types.
+ TheDwarfEmitter->emitPubTypesForUnit(Unit);
+ for (const auto &Pubtype : Unit.getPubtypes())
+ AppleTypes.addName(
+ Pubtype.Name, Pubtype.Die->getOffset() + Unit.getStartOffset(),
+ Pubtype.Die->getTag(),
+ Pubtype.ObjcClassImplementation ? dwarf::DW_FLAG_type_implementation
+ : 0,
+ Pubtype.QualifiedNameHash);
+
+ /// Add ObjC names.
+ for (const auto &ObjC : Unit.getObjC())
+ AppleObjc.addName(ObjC.Name, ObjC.Die->getOffset() + Unit.getStartOffset());
+}
+
+void DWARFLinker::emitDwarfAcceleratorEntriesForUnit(CompileUnit &Unit) {
+ for (const auto &Namespace : Unit.getNamespaces())
+ DebugNames.addName(Namespace.Name, Namespace.Die->getOffset(),
+ Namespace.Die->getTag(), Unit.getUniqueID());
+ for (const auto &Pubname : Unit.getPubnames())
+ DebugNames.addName(Pubname.Name, Pubname.Die->getOffset(),
+ Pubname.Die->getTag(), Unit.getUniqueID());
+ for (const auto &Pubtype : Unit.getPubtypes())
+ DebugNames.addName(Pubtype.Name, Pubtype.Die->getOffset(),
+ Pubtype.Die->getTag(), Unit.getUniqueID());
+}
+
+/// Read the frame info stored in the object, and emit the
+/// patched frame descriptions for the resulting file.
+///
+/// This is actually pretty easy as the data of the CIEs and FDEs can
+/// be considered as black boxes and moved as is. The only thing to do
+/// is to patch the addresses in the headers.
+void DWARFLinker::patchFrameInfoForObject(const DWARFFile &File,
+ RangesTy &Ranges,
+ DWARFContext &OrigDwarf,
+ unsigned AddrSize) {
+ StringRef FrameData = OrigDwarf.getDWARFObj().getFrameSection().Data;
+ if (FrameData.empty())
+ return;
+
+ DataExtractor Data(FrameData, OrigDwarf.isLittleEndian(), 0);
+ uint64_t InputOffset = 0;
+
+ // Store the data of the CIEs defined in this object, keyed by their
+ // offsets.
+ DenseMap<uint64_t, StringRef> LocalCIES;
+
+ while (Data.isValidOffset(InputOffset)) {
+ uint64_t EntryOffset = InputOffset;
+ uint32_t InitialLength = Data.getU32(&InputOffset);
+ if (InitialLength == 0xFFFFFFFF)
+ return reportWarning("Dwarf64 bits no supported", File);
+
+ uint32_t CIEId = Data.getU32(&InputOffset);
+ if (CIEId == 0xFFFFFFFF) {
+ // This is a CIE, store it.
+ StringRef CIEData = FrameData.substr(EntryOffset, InitialLength + 4);
+ LocalCIES[EntryOffset] = CIEData;
+ // The -4 is to account for the CIEId we just read.
+ InputOffset += InitialLength - 4;
+ continue;
+ }
+
+ uint32_t Loc = Data.getUnsigned(&InputOffset, AddrSize);
+
+ // Some compilers seem to emit frame info that doesn't start at
+ // the function entry point, thus we can't just lookup the address
+ // in the debug map. Use the AddressInfo's range map to see if the FDE
+ // describes something that we can relocate.
+ auto Range = Ranges.upper_bound(Loc);
+ if (Range != Ranges.begin())
+ --Range;
+ if (Range == Ranges.end() || Range->first > Loc ||
+ Range->second.HighPC <= Loc) {
+ // The +4 is to account for the size of the InitialLength field itself.
+ InputOffset = EntryOffset + InitialLength + 4;
+ continue;
+ }
+
+ // This is an FDE, and we have a mapping.
+ // Have we already emitted a corresponding CIE?
+ StringRef CIEData = LocalCIES[CIEId];
+ if (CIEData.empty())
+ return reportWarning("Inconsistent debug_frame content. Dropping.", File);
+
+ // Look if we already emitted a CIE that corresponds to the
+ // referenced one (the CIE data is the key of that lookup).
+ auto IteratorInserted = EmittedCIEs.insert(
+ std::make_pair(CIEData, TheDwarfEmitter->getFrameSectionSize()));
+ // If there is no CIE yet for this ID, emit it.
+ if (IteratorInserted.second ||
+ // FIXME: dsymutil-classic only caches the last used CIE for
+ // reuse. Mimic that behavior for now. Just removing that
+ // second half of the condition and the LastCIEOffset variable
+ // makes the code DTRT.
+ LastCIEOffset != IteratorInserted.first->getValue()) {
+ LastCIEOffset = TheDwarfEmitter->getFrameSectionSize();
+ IteratorInserted.first->getValue() = LastCIEOffset;
+ TheDwarfEmitter->emitCIE(CIEData);
+ }
+
+ // Emit the FDE with updated address and CIE pointer.
+ // (4 + AddrSize) is the size of the CIEId + initial_location
+ // fields that will get reconstructed by emitFDE().
+ unsigned FDERemainingBytes = InitialLength - (4 + AddrSize);
+ TheDwarfEmitter->emitFDE(IteratorInserted.first->getValue(), AddrSize,
+ Loc + Range->second.Offset,
+ FrameData.substr(InputOffset, FDERemainingBytes));
+ InputOffset += FDERemainingBytes;
+ }
+}
+
+void DWARFLinker::DIECloner::copyAbbrev(
+ const DWARFAbbreviationDeclaration &Abbrev, bool HasODR) {
+ DIEAbbrev Copy(dwarf::Tag(Abbrev.getTag()),
+ dwarf::Form(Abbrev.hasChildren()));
+
+ for (const auto &Attr : Abbrev.attributes()) {
+ uint16_t Form = Attr.Form;
+ if (HasODR && isODRAttribute(Attr.Attr))
+ Form = dwarf::DW_FORM_ref_addr;
+ Copy.AddAttribute(dwarf::Attribute(Attr.Attr), dwarf::Form(Form));
+ }
+
+ Linker.assignAbbrev(Copy);
+}
+
+uint32_t DWARFLinker::DIECloner::hashFullyQualifiedName(DWARFDie DIE,
+ CompileUnit &U,
+ const DWARFFile &File,
+ int ChildRecurseDepth) {
+ const char *Name = nullptr;
+ DWARFUnit *OrigUnit = &U.getOrigUnit();
+ CompileUnit *CU = &U;
+ Optional<DWARFFormValue> Ref;
+
+ while (1) {
+ if (const char *CurrentName = DIE.getName(DINameKind::ShortName))
+ Name = CurrentName;
+
+ if (!(Ref = DIE.find(dwarf::DW_AT_specification)) &&
+ !(Ref = DIE.find(dwarf::DW_AT_abstract_origin)))
+ break;
+
+ if (!Ref->isFormClass(DWARFFormValue::FC_Reference))
+ break;
+
+ CompileUnit *RefCU;
+ if (auto RefDIE =
+ Linker.resolveDIEReference(File, CompileUnits, *Ref, DIE, RefCU)) {
+ CU = RefCU;
+ OrigUnit = &RefCU->getOrigUnit();
+ DIE = RefDIE;
+ }
+ }
+
+ unsigned Idx = OrigUnit->getDIEIndex(DIE);
+ if (!Name && DIE.getTag() == dwarf::DW_TAG_namespace)
+ Name = "(anonymous namespace)";
+
+ if (CU->getInfo(Idx).ParentIdx == 0 ||
+ // FIXME: dsymutil-classic compatibility. Ignore modules.
+ CU->getOrigUnit().getDIEAtIndex(CU->getInfo(Idx).ParentIdx).getTag() ==
+ dwarf::DW_TAG_module)
+ return djbHash(Name ? Name : "", djbHash(ChildRecurseDepth ? "" : "::"));
+
+ DWARFDie Die = OrigUnit->getDIEAtIndex(CU->getInfo(Idx).ParentIdx);
+ return djbHash(
+ (Name ? Name : ""),
+ djbHash((Name ? "::" : ""),
+ hashFullyQualifiedName(Die, *CU, File, ++ChildRecurseDepth)));
+}
+
+static uint64_t getDwoId(const DWARFDie &CUDie, const DWARFUnit &Unit) {
+ auto DwoId = dwarf::toUnsigned(
+ CUDie.find({dwarf::DW_AT_dwo_id, dwarf::DW_AT_GNU_dwo_id}));
+ if (DwoId)
+ return *DwoId;
+ return 0;
+}
+
+static std::string remapPath(StringRef Path,
+ const objectPrefixMap &ObjectPrefixMap) {
+ if (ObjectPrefixMap.empty())
+ return Path.str();
+
+ SmallString<256> p = Path;
+ for (const auto &Entry : ObjectPrefixMap)
+ if (llvm::sys::path::replace_path_prefix(p, Entry.first, Entry.second))
+ break;
+ return p.str().str();
+}
+
+bool DWARFLinker::registerModuleReference(DWARFDie CUDie, const DWARFUnit &Unit,
+ const DWARFFile &File,
+ OffsetsStringPool &StringPool,
+ DeclContextTree &ODRContexts,
+ uint64_t ModulesEndOffset,
+ unsigned &UnitID, bool IsLittleEndian,
+ unsigned Indent, bool Quiet) {
+ std::string PCMfile = dwarf::toString(
+ CUDie.find({dwarf::DW_AT_dwo_name, dwarf::DW_AT_GNU_dwo_name}), "");
+ if (PCMfile.empty())
+ return false;
+ if (Options.ObjectPrefixMap)
+ PCMfile = remapPath(PCMfile, *Options.ObjectPrefixMap);
+
+ // Clang module DWARF skeleton CUs abuse this for the path to the module.
+ uint64_t DwoId = getDwoId(CUDie, Unit);
+
+ std::string Name = dwarf::toString(CUDie.find(dwarf::DW_AT_name), "");
+ if (Name.empty()) {
+ if (!Quiet)
+ reportWarning("Anonymous module skeleton CU for " + PCMfile, File);
+ return true;
+ }
+
+ if (!Quiet && Options.Verbose) {
+ outs().indent(Indent);
+ outs() << "Found clang module reference " << PCMfile;
+ }
+
+ auto Cached = ClangModules.find(PCMfile);
+ if (Cached != ClangModules.end()) {
+ // FIXME: Until PR27449 (https://llvm.org/bugs/show_bug.cgi?id=27449) is
+ // fixed in clang, only warn about DWO_id mismatches in verbose mode.
+ // ASTFileSignatures will change randomly when a module is rebuilt.
+ if (!Quiet && Options.Verbose && (Cached->second != DwoId))
+ reportWarning(Twine("hash mismatch: this object file was built against a "
+ "different version of the module ") +
+ PCMfile,
+ File);
+ if (!Quiet && Options.Verbose)
+ outs() << " [cached].\n";
+ return true;
+ }
+ if (!Quiet && Options.Verbose)
+ outs() << " ...\n";
+
+ // Cyclic dependencies are disallowed by Clang, but we still
+ // shouldn't run into an infinite loop, so mark it as processed now.
+ ClangModules.insert({PCMfile, DwoId});
+
+ if (Error E = loadClangModule(CUDie, PCMfile, Name, DwoId, File, StringPool,
+ ODRContexts, ModulesEndOffset, UnitID,
+ IsLittleEndian, Indent + 2, Quiet)) {
+ consumeError(std::move(E));
+ return false;
+ }
+ return true;
+}
+
+Error DWARFLinker::loadClangModule(
+ DWARFDie CUDie, StringRef Filename, StringRef ModuleName, uint64_t DwoId,
+ const DWARFFile &File, OffsetsStringPool &StringPool,
+ DeclContextTree &ODRContexts, uint64_t ModulesEndOffset, unsigned &UnitID,
+ bool IsLittleEndian, unsigned Indent, bool Quiet) {
+ /// Using a SmallString<0> because loadClangModule() is recursive.
+ SmallString<0> Path(Options.PrependPath);
+ if (sys::path::is_relative(Filename))
+ resolveRelativeObjectPath(Path, CUDie);
+ sys::path::append(Path, Filename);
+ // Don't use the cached binary holder because we have no thread-safety
+ // guarantee and the lifetime is limited.
+
+ if (Options.ObjFileLoader == nullptr)
+ return Error::success();
+
+ auto ErrOrObj = Options.ObjFileLoader(File.FileName, Path);
+ if (!ErrOrObj)
+ return Error::success();
+
+ std::unique_ptr<CompileUnit> Unit;
+
+ for (const auto &CU : ErrOrObj->Dwarf->compile_units()) {
+ updateDwarfVersion(CU->getVersion());
+ // Recursively get all modules imported by this one.
+ auto CUDie = CU->getUnitDIE(false);
+ if (!CUDie)
+ continue;
+ if (!registerModuleReference(CUDie, *CU, File, StringPool, ODRContexts,
+ ModulesEndOffset, UnitID, IsLittleEndian,
+ Indent, Quiet)) {
+ if (Unit) {
+ std::string Err =
+ (Filename +
+ ": Clang modules are expected to have exactly 1 compile unit.\n")
+ .str();
+ reportError(Err, File);
+ return make_error<StringError>(Err, inconvertibleErrorCode());
+ }
+ // FIXME: Until PR27449 (https://llvm.org/bugs/show_bug.cgi?id=27449) is
+ // fixed in clang, only warn about DWO_id mismatches in verbose mode.
+ // ASTFileSignatures will change randomly when a module is rebuilt.
+ uint64_t PCMDwoId = getDwoId(CUDie, *CU);
+ if (PCMDwoId != DwoId) {
+ if (!Quiet && Options.Verbose)
+ reportWarning(
+ Twine("hash mismatch: this object file was built against a "
+ "different version of the module ") +
+ Filename,
+ File);
+ // Update the cache entry with the DwoId of the module loaded from disk.
+ ClangModules[Filename] = PCMDwoId;
+ }
+
+ // Add this module.
+ Unit = std::make_unique<CompileUnit>(*CU, UnitID++, !Options.NoODR,
+ ModuleName);
+ Unit->setHasInterestingContent();
+ analyzeContextInfo(CUDie, 0, *Unit, &ODRContexts.getRoot(), ODRContexts,
+ ModulesEndOffset, Options.ParseableSwiftInterfaces,
+ [&](const Twine &Warning, const DWARFDie &DIE) {
+ reportWarning(Warning, File, &DIE);
+ });
+ // Keep everything.
+ Unit->markEverythingAsKept();
+ }
+ }
+ if (!Unit->getOrigUnit().getUnitDIE().hasChildren())
+ return Error::success();
+ if (!Quiet && Options.Verbose) {
+ outs().indent(Indent);
+ outs() << "cloning .debug_info from " << Filename << "\n";
+ }
+
+ UnitListTy CompileUnits;
+ CompileUnits.push_back(std::move(Unit));
+ assert(TheDwarfEmitter);
+ DIECloner(*this, TheDwarfEmitter, *ErrOrObj, DIEAlloc, CompileUnits,
+ Options.Update)
+ .cloneAllCompileUnits(*(ErrOrObj->Dwarf), File, StringPool,
+ IsLittleEndian);
+ return Error::success();
+}
+
+uint64_t DWARFLinker::DIECloner::cloneAllCompileUnits(
+ DWARFContext &DwarfContext, const DWARFFile &File,
+ OffsetsStringPool &StringPool, bool IsLittleEndian) {
+ uint64_t OutputDebugInfoSize =
+ Linker.Options.NoOutput ? 0 : Emitter->getDebugInfoSectionSize();
+ const uint64_t StartOutputDebugInfoSize = OutputDebugInfoSize;
+
+ for (auto &CurrentUnit : CompileUnits) {
+ const uint16_t DwarfVersion = CurrentUnit->getOrigUnit().getVersion();
+ const uint32_t UnitHeaderSize = DwarfVersion >= 5 ? 12 : 11;
+ auto InputDIE = CurrentUnit->getOrigUnit().getUnitDIE();
+ CurrentUnit->setStartOffset(OutputDebugInfoSize);
+ if (!InputDIE) {
+ OutputDebugInfoSize = CurrentUnit->computeNextUnitOffset(DwarfVersion);
+ continue;
+ }
+ if (CurrentUnit->getInfo(0).Keep) {
+ // Clone the InputDIE into your Unit DIE in our compile unit since it
+ // already has a DIE inside of it.
+ CurrentUnit->createOutputDIE();
+ cloneDIE(InputDIE, File, *CurrentUnit, StringPool, 0 /* PC offset */,
+ UnitHeaderSize, 0, IsLittleEndian,
+ CurrentUnit->getOutputUnitDIE());
+ }
+
+ OutputDebugInfoSize = CurrentUnit->computeNextUnitOffset(DwarfVersion);
+
+ if (!Linker.Options.NoOutput) {
+ assert(Emitter);
+
+ if (LLVM_LIKELY(!Linker.Options.Update) ||
+ Linker.needToTranslateStrings())
+ Linker.patchLineTableForUnit(*CurrentUnit, DwarfContext, File);
+
+ Linker.emitAcceleratorEntriesForUnit(*CurrentUnit);
+
+ if (LLVM_UNLIKELY(Linker.Options.Update))
+ continue;
+
+ Linker.patchRangesForUnit(*CurrentUnit, DwarfContext, File);
+ auto ProcessExpr = [&](StringRef Bytes,
+ SmallVectorImpl<uint8_t> &Buffer) {
+ DWARFUnit &OrigUnit = CurrentUnit->getOrigUnit();
+ DataExtractor Data(Bytes, IsLittleEndian,
+ OrigUnit.getAddressByteSize());
+ cloneExpression(Data,
+ DWARFExpression(Data, OrigUnit.getAddressByteSize(),
+ OrigUnit.getFormParams().Format),
+ File, *CurrentUnit, Buffer);
+ };
+ Emitter->emitLocationsForUnit(*CurrentUnit, DwarfContext, ProcessExpr);
+ }
+ }
+
+ if (!Linker.Options.NoOutput) {
+ assert(Emitter);
+ // Emit all the compile unit's debug information.
+ for (auto &CurrentUnit : CompileUnits) {
+ if (LLVM_LIKELY(!Linker.Options.Update))
+ Linker.generateUnitRanges(*CurrentUnit);
+
+ CurrentUnit->fixupForwardReferences();
+
+ if (!CurrentUnit->getOutputUnitDIE())
+ continue;
+
+ unsigned DwarfVersion = CurrentUnit->getOrigUnit().getVersion();
+
+ assert(Emitter->getDebugInfoSectionSize() ==
+ CurrentUnit->getStartOffset());
+ Emitter->emitCompileUnitHeader(*CurrentUnit, DwarfVersion);
+ Emitter->emitDIE(*CurrentUnit->getOutputUnitDIE());
+ assert(Emitter->getDebugInfoSectionSize() ==
+ CurrentUnit->computeNextUnitOffset(DwarfVersion));
+ }
+ }
+
+ return OutputDebugInfoSize - StartOutputDebugInfoSize;
+}
+
+void DWARFLinker::updateAccelKind(DWARFContext &Dwarf) {
+ if (Options.TheAccelTableKind != AccelTableKind::Default)
+ return;
+
+ auto &DwarfObj = Dwarf.getDWARFObj();
+
+ if (!AtLeastOneDwarfAccelTable &&
+ (!DwarfObj.getAppleNamesSection().Data.empty() ||
+ !DwarfObj.getAppleTypesSection().Data.empty() ||
+ !DwarfObj.getAppleNamespacesSection().Data.empty() ||
+ !DwarfObj.getAppleObjCSection().Data.empty())) {
+ AtLeastOneAppleAccelTable = true;
+ }
+
+ if (!AtLeastOneDwarfAccelTable && !DwarfObj.getNamesSection().Data.empty()) {
+ AtLeastOneDwarfAccelTable = true;
+ }
+}
+
+bool DWARFLinker::emitPaperTrailWarnings(const DWARFFile &File,
+ OffsetsStringPool &StringPool) {
+
+ if (File.Warnings.empty())
+ return false;
+
+ DIE *CUDie = DIE::get(DIEAlloc, dwarf::DW_TAG_compile_unit);
+ CUDie->setOffset(11);
+ StringRef Producer;
+ StringRef WarningHeader;
+
+ switch (DwarfLinkerClientID) {
+ case DwarfLinkerClient::Dsymutil:
+ Producer = StringPool.internString("dsymutil");
+ WarningHeader = "dsymutil_warning";
+ break;
+
+ default:
+ Producer = StringPool.internString("dwarfopt");
+ WarningHeader = "dwarfopt_warning";
+ break;
+ }
+
+ StringRef FileName = StringPool.internString(File.FileName);
+ CUDie->addValue(DIEAlloc, dwarf::DW_AT_producer, dwarf::DW_FORM_strp,
+ DIEInteger(StringPool.getStringOffset(Producer)));
+ DIEBlock *String = new (DIEAlloc) DIEBlock();
+ DIEBlocks.push_back(String);
+ for (auto &C : FileName)
+ String->addValue(DIEAlloc, dwarf::Attribute(0), dwarf::DW_FORM_data1,
+ DIEInteger(C));
+ String->addValue(DIEAlloc, dwarf::Attribute(0), dwarf::DW_FORM_data1,
+ DIEInteger(0));
+
+ CUDie->addValue(DIEAlloc, dwarf::DW_AT_name, dwarf::DW_FORM_string, String);
+ for (const auto &Warning : File.Warnings) {
+ DIE &ConstDie = CUDie->addChild(DIE::get(DIEAlloc, dwarf::DW_TAG_constant));
+ ConstDie.addValue(DIEAlloc, dwarf::DW_AT_name, dwarf::DW_FORM_strp,
+ DIEInteger(StringPool.getStringOffset(WarningHeader)));
+ ConstDie.addValue(DIEAlloc, dwarf::DW_AT_artificial, dwarf::DW_FORM_flag,
+ DIEInteger(1));
+ ConstDie.addValue(DIEAlloc, dwarf::DW_AT_const_value, dwarf::DW_FORM_strp,
+ DIEInteger(StringPool.getStringOffset(Warning)));
+ }
+ unsigned Size = 4 /* FORM_strp */ + FileName.size() + 1 +
+ File.Warnings.size() * (4 + 1 + 4) + 1 /* End of children */;
+ DIEAbbrev Abbrev = CUDie->generateAbbrev();
+ assignAbbrev(Abbrev);
+ CUDie->setAbbrevNumber(Abbrev.getNumber());
+ Size += getULEB128Size(Abbrev.getNumber());
+ // Abbreviation ordering needed for classic compatibility.
+ for (auto &Child : CUDie->children()) {
+ Abbrev = Child.generateAbbrev();
+ assignAbbrev(Abbrev);
+ Child.setAbbrevNumber(Abbrev.getNumber());
+ Size += getULEB128Size(Abbrev.getNumber());
+ }
+ CUDie->setSize(Size);
+ TheDwarfEmitter->emitPaperTrailWarningsDie(*CUDie);
+
+ return true;
+}
+
+void DWARFLinker::copyInvariantDebugSection(DWARFContext &Dwarf) {
+ if (!needToTranslateStrings())
+ TheDwarfEmitter->emitSectionContents(
+ Dwarf.getDWARFObj().getLineSection().Data, "debug_line");
+ TheDwarfEmitter->emitSectionContents(Dwarf.getDWARFObj().getLocSection().Data,
+ "debug_loc");
+ TheDwarfEmitter->emitSectionContents(
+ Dwarf.getDWARFObj().getRangesSection().Data, "debug_ranges");
+ TheDwarfEmitter->emitSectionContents(
+ Dwarf.getDWARFObj().getFrameSection().Data, "debug_frame");
+ TheDwarfEmitter->emitSectionContents(Dwarf.getDWARFObj().getArangesSection(),
+ "debug_aranges");
+}
+
+void DWARFLinker::addObjectFile(DWARFFile &File) {
+ ObjectContexts.emplace_back(LinkContext(File));
+
+ if (ObjectContexts.back().File.Dwarf)
+ updateAccelKind(*ObjectContexts.back().File.Dwarf);
+}
+
+bool DWARFLinker::link() {
+ assert(Options.NoOutput || TheDwarfEmitter);
+
+ // A unique ID that identifies each compile unit.
+ unsigned UnitID = 0;
+
+ // First populate the data structure we need for each iteration of the
+ // parallel loop.
+ unsigned NumObjects = ObjectContexts.size();
+
+ // This Dwarf string pool which is used for emission. It must be used
+ // serially as the order of calling getStringOffset matters for
+ // reproducibility.
+ OffsetsStringPool OffsetsStringPool(StringsTranslator, true);
+
+ // ODR Contexts for the optimize.
+ DeclContextTree ODRContexts;
+
+ // If we haven't decided on an accelerator table kind yet, we base ourselves
+ // on the DWARF we have seen so far. At this point we haven't pulled in debug
+ // information from modules yet, so it is technically possible that they
+ // would affect the decision. However, as they're built with the same
+ // compiler and flags, it is safe to assume that they will follow the
+ // decision made here.
+ if (Options.TheAccelTableKind == AccelTableKind::Default) {
+ if (AtLeastOneDwarfAccelTable && !AtLeastOneAppleAccelTable)
+ Options.TheAccelTableKind = AccelTableKind::Dwarf;
+ else
+ Options.TheAccelTableKind = AccelTableKind::Apple;
+ }
+
+ for (LinkContext &OptContext : ObjectContexts) {
+ if (Options.Verbose) {
+ if (DwarfLinkerClientID == DwarfLinkerClient::Dsymutil)
+ outs() << "DEBUG MAP OBJECT: " << OptContext.File.FileName << "\n";
+ else
+ outs() << "OBJECT FILE: " << OptContext.File.FileName << "\n";
+ }
+
+ if (emitPaperTrailWarnings(OptContext.File, OffsetsStringPool))
+ continue;
+
+ if (!OptContext.File.Dwarf)
+ continue;
+ // Look for relocations that correspond to address map entries.
+
+ // there was findvalidrelocations previously ... probably we need to gather
+ // info here
+ if (LLVM_LIKELY(!Options.Update) &&
+ !OptContext.File.Addresses->hasValidRelocs()) {
+ if (Options.Verbose)
+ outs() << "No valid relocations found. Skipping.\n";
+
+ // Set "Skip" flag as a signal to other loops that we should not
+ // process this iteration.
+ OptContext.Skip = true;
+ continue;
+ }
+
+ // Setup access to the debug info.
+ if (!OptContext.File.Dwarf)
+ continue;
+
+ // In a first phase, just read in the debug info and load all clang modules.
+ OptContext.CompileUnits.reserve(
+ OptContext.File.Dwarf->getNumCompileUnits());
+
+ for (const auto &CU : OptContext.File.Dwarf->compile_units()) {
+ updateDwarfVersion(CU->getVersion());
+ auto CUDie = CU->getUnitDIE(false);
+ if (Options.Verbose) {
+ outs() << "Input compilation unit:";
+ DIDumpOptions DumpOpts;
+ DumpOpts.ChildRecurseDepth = 0;
+ DumpOpts.Verbose = Options.Verbose;
+ CUDie.dump(outs(), 0, DumpOpts);
+ }
+ if (CUDie && !LLVM_UNLIKELY(Options.Update))
+ registerModuleReference(CUDie, *CU, OptContext.File, OffsetsStringPool,
+ ODRContexts, 0, UnitID,
+ OptContext.File.Dwarf->isLittleEndian());
+ }
+ }
+
+ // If we haven't seen any CUs, pick an arbitrary valid Dwarf version anyway.
+ if (MaxDwarfVersion == 0)
+ MaxDwarfVersion = 3;
+
+ // At this point we know how much data we have emitted. We use this value to
+ // compare canonical DIE offsets in analyzeContextInfo to see if a definition
+ // is already emitted, without being affected by canonical die offsets set
+ // later. This prevents undeterminism when analyze and clone execute
+ // concurrently, as clone set the canonical DIE offset and analyze reads it.
+ const uint64_t ModulesEndOffset =
+ Options.NoOutput ? 0 : TheDwarfEmitter->getDebugInfoSectionSize();
+
+ // These variables manage the list of processed object files.
+ // The mutex and condition variable are to ensure that this is thread safe.
+ std::mutex ProcessedFilesMutex;
+ std::condition_variable ProcessedFilesConditionVariable;
+ BitVector ProcessedFiles(NumObjects, false);
+
+ // Analyzing the context info is particularly expensive so it is executed in
+ // parallel with emitting the previous compile unit.
+ auto AnalyzeLambda = [&](size_t I) {
+ auto &Context = ObjectContexts[I];
+
+ if (Context.Skip || !Context.File.Dwarf)
+ return;
+
+ for (const auto &CU : Context.File.Dwarf->compile_units()) {
+ updateDwarfVersion(CU->getVersion());
+ // The !registerModuleReference() condition effectively skips
+ // over fully resolved skeleton units. This second pass of
+ // registerModuleReferences doesn't do any new work, but it
+ // will collect top-level errors, which are suppressed. Module
+ // warnings were already displayed in the first iteration.
+ bool Quiet = true;
+ auto CUDie = CU->getUnitDIE(false);
+ if (!CUDie || LLVM_UNLIKELY(Options.Update) ||
+ !registerModuleReference(CUDie, *CU, Context.File, OffsetsStringPool,
+ ODRContexts, ModulesEndOffset, UnitID,
+ Quiet)) {
+ Context.CompileUnits.push_back(std::make_unique<CompileUnit>(
+ *CU, UnitID++, !Options.NoODR && !Options.Update, ""));
+ }
+ }
+
+ // Now build the DIE parent links that we will use during the next phase.
+ for (auto &CurrentUnit : Context.CompileUnits) {
+ auto CUDie = CurrentUnit->getOrigUnit().getUnitDIE();
+ if (!CUDie)
+ continue;
+ analyzeContextInfo(CurrentUnit->getOrigUnit().getUnitDIE(), 0,
+ *CurrentUnit, &ODRContexts.getRoot(), ODRContexts,
+ ModulesEndOffset, Options.ParseableSwiftInterfaces,
+ [&](const Twine &Warning, const DWARFDie &DIE) {
+ reportWarning(Warning, Context.File, &DIE);
+ });
+ }
+ };
+
+ // For each object file map how many bytes were emitted.
+ StringMap<DebugInfoSize> SizeByObject;
+
+ // And then the remaining work in serial again.
+ // Note, although this loop runs in serial, it can run in parallel with
+ // the analyzeContextInfo loop so long as we process files with indices >=
+ // than those processed by analyzeContextInfo.
+ auto CloneLambda = [&](size_t I) {
+ auto &OptContext = ObjectContexts[I];
+ if (OptContext.Skip || !OptContext.File.Dwarf)
+ return;
+
+ // Then mark all the DIEs that need to be present in the generated output
+ // and collect some information about them.
+ // Note that this loop can not be merged with the previous one because
+ // cross-cu references require the ParentIdx to be setup for every CU in
+ // the object file before calling this.
+ if (LLVM_UNLIKELY(Options.Update)) {
+ for (auto &CurrentUnit : OptContext.CompileUnits)
+ CurrentUnit->markEverythingAsKept();
+ copyInvariantDebugSection(*OptContext.File.Dwarf);
+ } else {
+ for (auto &CurrentUnit : OptContext.CompileUnits)
+ lookForDIEsToKeep(*OptContext.File.Addresses,
+ OptContext.File.Addresses->getValidAddressRanges(),
+ OptContext.CompileUnits,
+ CurrentUnit->getOrigUnit().getUnitDIE(),
+ OptContext.File, *CurrentUnit, 0);
+ }
+
+ // The calls to applyValidRelocs inside cloneDIE will walk the reloc
+ // array again (in the same way findValidRelocsInDebugInfo() did). We
+ // need to reset the NextValidReloc index to the beginning.
+ if (OptContext.File.Addresses->hasValidRelocs() ||
+ LLVM_UNLIKELY(Options.Update)) {
+ SizeByObject[OptContext.File.FileName].Input =
+ getDebugInfoSize(*OptContext.File.Dwarf);
+ SizeByObject[OptContext.File.FileName].Output =
+ DIECloner(*this, TheDwarfEmitter, OptContext.File, DIEAlloc,
+ OptContext.CompileUnits, Options.Update)
+ .cloneAllCompileUnits(*OptContext.File.Dwarf, OptContext.File,
+ OffsetsStringPool,
+ OptContext.File.Dwarf->isLittleEndian());
+ }
+ if (!Options.NoOutput && !OptContext.CompileUnits.empty() &&
+ LLVM_LIKELY(!Options.Update))
+ patchFrameInfoForObject(
+ OptContext.File, OptContext.File.Addresses->getValidAddressRanges(),
+ *OptContext.File.Dwarf,
+ OptContext.CompileUnits[0]->getOrigUnit().getAddressByteSize());
+
+ // Clean-up before starting working on the next object.
+ cleanupAuxiliarryData(OptContext);
+ };
+
+ auto EmitLambda = [&]() {
+ // Emit everything that's global.
+ if (!Options.NoOutput) {
+ TheDwarfEmitter->emitAbbrevs(Abbreviations, MaxDwarfVersion);
+ TheDwarfEmitter->emitStrings(OffsetsStringPool);
+ switch (Options.TheAccelTableKind) {
+ case AccelTableKind::Apple:
+ TheDwarfEmitter->emitAppleNames(AppleNames);
+ TheDwarfEmitter->emitAppleNamespaces(AppleNamespaces);
+ TheDwarfEmitter->emitAppleTypes(AppleTypes);
+ TheDwarfEmitter->emitAppleObjc(AppleObjc);
+ break;
+ case AccelTableKind::Dwarf:
+ TheDwarfEmitter->emitDebugNames(DebugNames);
+ break;
+ case AccelTableKind::Default:
+ llvm_unreachable("Default should have already been resolved.");
+ break;
+ }
+ }
+ };
+
+ auto AnalyzeAll = [&]() {
+ for (unsigned I = 0, E = NumObjects; I != E; ++I) {
+ AnalyzeLambda(I);
+
+ std::unique_lock<std::mutex> LockGuard(ProcessedFilesMutex);
+ ProcessedFiles.set(I);
+ ProcessedFilesConditionVariable.notify_one();
+ }
+ };
+
+ auto CloneAll = [&]() {
+ for (unsigned I = 0, E = NumObjects; I != E; ++I) {
+ {
+ std::unique_lock<std::mutex> LockGuard(ProcessedFilesMutex);
+ if (!ProcessedFiles[I]) {
+ ProcessedFilesConditionVariable.wait(
+ LockGuard, [&]() { return ProcessedFiles[I]; });
+ }
+ }
+
+ CloneLambda(I);
+ }
+ EmitLambda();
+ };
+
+ // To limit memory usage in the single threaded case, analyze and clone are
+ // run sequentially so the OptContext is freed after processing each object
+ // in endDebugObject.
+ if (Options.Threads == 1) {
+ for (unsigned I = 0, E = NumObjects; I != E; ++I) {
+ AnalyzeLambda(I);
+ CloneLambda(I);
+ }
+ EmitLambda();
+ } else {
+ ThreadPool Pool(hardware_concurrency(2));
+ Pool.async(AnalyzeAll);
+ Pool.async(CloneAll);
+ Pool.wait();
+ }
+
+ if (Options.Statistics) {
+ // Create a vector sorted in descending order by output size.
+ std::vector<std::pair<StringRef, DebugInfoSize>> Sorted;
+ for (auto &E : SizeByObject)
+ Sorted.emplace_back(E.first(), E.second);
+ llvm::sort(Sorted, [](auto &LHS, auto &RHS) {
+ return LHS.second.Output > RHS.second.Output;
+ });
+
+ auto ComputePercentange = [](int64_t Input, int64_t Output) -> float {
+ const float Difference = Output - Input;
+ const float Sum = Input + Output;
+ if (Sum == 0)
+ return 0;
+ return (Difference / (Sum / 2));
+ };
+
+ int64_t InputTotal = 0;
+ int64_t OutputTotal = 0;
+ const char *FormatStr = "{0,-45} {1,10}b {2,10}b {3,8:P}\n";
+
+ // Print header.
+ outs() << ".debug_info section size (in bytes)\n";
+ outs() << "----------------------------------------------------------------"
+ "---------------\n";
+ outs() << "Filename Object "
+ " dSYM Change\n";
+ outs() << "----------------------------------------------------------------"
+ "---------------\n";
+
+ // Print body.
+ for (auto &E : Sorted) {
+ InputTotal += E.second.Input;
+ OutputTotal += E.second.Output;
+ llvm::outs() << formatv(
+ FormatStr, sys::path::filename(E.first).take_back(45), E.second.Input,
+ E.second.Output, ComputePercentange(E.second.Input, E.second.Output));
+ }
+ // Print total and footer.
+ outs() << "----------------------------------------------------------------"
+ "---------------\n";
+ llvm::outs() << formatv(FormatStr, "Total", InputTotal, OutputTotal,
+ ComputePercentange(InputTotal, OutputTotal));
+ outs() << "----------------------------------------------------------------"
+ "---------------\n\n";
+ }
+
+ return true;
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/DWARFLinker/DWARFLinkerCompileUnit.cpp b/contrib/libs/llvm12/lib/DWARFLinker/DWARFLinkerCompileUnit.cpp
new file mode 100644
index 00000000000..925ab3d295c
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DWARFLinker/DWARFLinkerCompileUnit.cpp
@@ -0,0 +1,152 @@
+//===- DWARFLinkerCompileUnit.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DWARFLinker/DWARFLinkerCompileUnit.h"
+#include "llvm/DWARFLinker/DWARFLinkerDeclContext.h"
+
+namespace llvm {
+
+/// Check if the DIE at \p Idx is in the scope of a function.
+static bool inFunctionScope(CompileUnit &U, unsigned Idx) {
+ while (Idx) {
+ if (U.getOrigUnit().getDIEAtIndex(Idx).getTag() == dwarf::DW_TAG_subprogram)
+ return true;
+ Idx = U.getInfo(Idx).ParentIdx;
+ }
+ return false;
+}
+
+uint16_t CompileUnit::getLanguage() {
+ if (!Language) {
+ DWARFDie CU = getOrigUnit().getUnitDIE();
+ Language = dwarf::toUnsigned(CU.find(dwarf::DW_AT_language), 0);
+ }
+ return Language;
+}
+
+StringRef CompileUnit::getSysRoot() {
+ if (SysRoot.empty()) {
+ DWARFDie CU = getOrigUnit().getUnitDIE();
+ SysRoot = dwarf::toStringRef(CU.find(dwarf::DW_AT_LLVM_sysroot)).str();
+ }
+ return SysRoot;
+}
+
+void CompileUnit::markEverythingAsKept() {
+ unsigned Idx = 0;
+
+ setHasInterestingContent();
+
+ for (auto &I : Info) {
+ // Mark everything that wasn't explicit marked for pruning.
+ I.Keep = !I.Prune;
+ auto DIE = OrigUnit.getDIEAtIndex(Idx++);
+
+ // Try to guess which DIEs must go to the accelerator tables. We do that
+ // just for variables, because functions will be handled depending on
+ // whether they carry a DW_AT_low_pc attribute or not.
+ if (DIE.getTag() != dwarf::DW_TAG_variable &&
+ DIE.getTag() != dwarf::DW_TAG_constant)
+ continue;
+
+ Optional<DWARFFormValue> Value;
+ if (!(Value = DIE.find(dwarf::DW_AT_location))) {
+ if ((Value = DIE.find(dwarf::DW_AT_const_value)) &&
+ !inFunctionScope(*this, I.ParentIdx))
+ I.InDebugMap = true;
+ continue;
+ }
+ if (auto Block = Value->getAsBlock()) {
+ if (Block->size() > OrigUnit.getAddressByteSize() &&
+ (*Block)[0] == dwarf::DW_OP_addr)
+ I.InDebugMap = true;
+ }
+ }
+}
+
+uint64_t CompileUnit::computeNextUnitOffset(uint16_t DwarfVersion) {
+ NextUnitOffset = StartOffset;
+ if (NewUnit) {
+ NextUnitOffset += (DwarfVersion >= 5) ? 12 : 11; // Header size
+ NextUnitOffset += NewUnit->getUnitDie().getSize();
+ }
+ return NextUnitOffset;
+}
+
+/// Keep track of a forward cross-cu reference from this unit
+/// to \p Die that lives in \p RefUnit.
+void CompileUnit::noteForwardReference(DIE *Die, const CompileUnit *RefUnit,
+ DeclContext *Ctxt, PatchLocation Attr) {
+ ForwardDIEReferences.emplace_back(Die, RefUnit, Ctxt, Attr);
+}
+
+void CompileUnit::fixupForwardReferences() {
+ for (const auto &Ref : ForwardDIEReferences) {
+ DIE *RefDie;
+ const CompileUnit *RefUnit;
+ PatchLocation Attr;
+ DeclContext *Ctxt;
+ std::tie(RefDie, RefUnit, Ctxt, Attr) = Ref;
+ if (Ctxt && Ctxt->getCanonicalDIEOffset())
+ Attr.set(Ctxt->getCanonicalDIEOffset());
+ else
+ Attr.set(RefDie->getOffset() + RefUnit->getStartOffset());
+ }
+}
+
+void CompileUnit::addLabelLowPc(uint64_t LabelLowPc, int64_t PcOffset) {
+ Labels.insert({LabelLowPc, PcOffset});
+}
+
+void CompileUnit::addFunctionRange(uint64_t FuncLowPc, uint64_t FuncHighPc,
+ int64_t PcOffset) {
+ // Don't add empty ranges to the interval map. They are a problem because
+ // the interval map expects half open intervals. This is safe because they
+ // are empty anyway.
+ if (FuncHighPc != FuncLowPc)
+ Ranges.insert(FuncLowPc, FuncHighPc, PcOffset);
+ this->LowPc = std::min(LowPc, FuncLowPc + PcOffset);
+ this->HighPc = std::max(HighPc, FuncHighPc + PcOffset);
+}
+
+void CompileUnit::noteRangeAttribute(const DIE &Die, PatchLocation Attr) {
+ if (Die.getTag() != dwarf::DW_TAG_compile_unit)
+ RangeAttributes.push_back(Attr);
+ else
+ UnitRangeAttribute = Attr;
+}
+
+void CompileUnit::noteLocationAttribute(PatchLocation Attr, int64_t PcOffset) {
+ LocationAttributes.emplace_back(Attr, PcOffset);
+}
+
+void CompileUnit::addNamespaceAccelerator(const DIE *Die,
+ DwarfStringPoolEntryRef Name) {
+ Namespaces.emplace_back(Name, Die);
+}
+
+void CompileUnit::addObjCAccelerator(const DIE *Die,
+ DwarfStringPoolEntryRef Name,
+ bool SkipPubSection) {
+ ObjC.emplace_back(Name, Die, SkipPubSection);
+}
+
+void CompileUnit::addNameAccelerator(const DIE *Die,
+ DwarfStringPoolEntryRef Name,
+ bool SkipPubSection) {
+ Pubnames.emplace_back(Name, Die, SkipPubSection);
+}
+
+void CompileUnit::addTypeAccelerator(const DIE *Die,
+ DwarfStringPoolEntryRef Name,
+ bool ObjcClassImplementation,
+ uint32_t QualifiedNameHash) {
+ Pubtypes.emplace_back(Name, Die, QualifiedNameHash, ObjcClassImplementation);
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/DWARFLinker/DWARFLinkerDeclContext.cpp b/contrib/libs/llvm12/lib/DWARFLinker/DWARFLinkerDeclContext.cpp
new file mode 100644
index 00000000000..d9b3c4235b4
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DWARFLinker/DWARFLinkerDeclContext.cpp
@@ -0,0 +1,215 @@
+//===- DWARFLinkerDeclContext.cpp -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DWARFLinker/DWARFLinkerDeclContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFDie.h"
+#include "llvm/DebugInfo/DWARF/DWARFUnit.h"
+
+namespace llvm {
+
+/// Set the last DIE/CU a context was seen in and, possibly invalidate the
+/// context if it is ambiguous.
+///
+/// In the current implementation, we don't handle overloaded functions well,
+/// because the argument types are not taken into account when computing the
+/// DeclContext tree.
+///
+/// Some of this is mitigated byt using mangled names that do contain the
+/// arguments types, but sometimes (e.g. with function templates) we don't have
+/// that. In that case, just do not unique anything that refers to the contexts
+/// we are not able to distinguish.
+///
+/// If a context that is not a namespace appears twice in the same CU, we know
+/// it is ambiguous. Make it invalid.
+bool DeclContext::setLastSeenDIE(CompileUnit &U, const DWARFDie &Die) {
+ if (LastSeenCompileUnitID == U.getUniqueID()) {
+ DWARFUnit &OrigUnit = U.getOrigUnit();
+ uint32_t FirstIdx = OrigUnit.getDIEIndex(LastSeenDIE);
+ U.getInfo(FirstIdx).Ctxt = nullptr;
+ return false;
+ }
+
+ LastSeenCompileUnitID = U.getUniqueID();
+ LastSeenDIE = Die;
+ return true;
+}
+
+PointerIntPair<DeclContext *, 1>
+DeclContextTree::getChildDeclContext(DeclContext &Context, const DWARFDie &DIE,
+ CompileUnit &U, bool InClangModule) {
+ unsigned Tag = DIE.getTag();
+
+ // FIXME: dsymutil-classic compat: We should bail out here if we
+ // have a specification or an abstract_origin. We will get the
+ // parent context wrong here.
+
+ switch (Tag) {
+ default:
+ // By default stop gathering child contexts.
+ return PointerIntPair<DeclContext *, 1>(nullptr);
+ case dwarf::DW_TAG_module:
+ break;
+ case dwarf::DW_TAG_compile_unit:
+ return PointerIntPair<DeclContext *, 1>(&Context);
+ case dwarf::DW_TAG_subprogram:
+ // Do not unique anything inside CU local functions.
+ if ((Context.getTag() == dwarf::DW_TAG_namespace ||
+ Context.getTag() == dwarf::DW_TAG_compile_unit) &&
+ !dwarf::toUnsigned(DIE.find(dwarf::DW_AT_external), 0))
+ return PointerIntPair<DeclContext *, 1>(nullptr);
+ LLVM_FALLTHROUGH;
+ case dwarf::DW_TAG_member:
+ case dwarf::DW_TAG_namespace:
+ case dwarf::DW_TAG_structure_type:
+ case dwarf::DW_TAG_class_type:
+ case dwarf::DW_TAG_union_type:
+ case dwarf::DW_TAG_enumeration_type:
+ case dwarf::DW_TAG_typedef:
+ // Artificial things might be ambiguous, because they might be created on
+ // demand. For example implicitly defined constructors are ambiguous
+ // because of the way we identify contexts, and they won't be generated
+ // every time everywhere.
+ if (dwarf::toUnsigned(DIE.find(dwarf::DW_AT_artificial), 0))
+ return PointerIntPair<DeclContext *, 1>(nullptr);
+ break;
+ }
+
+ StringRef NameRef;
+ StringRef FileRef;
+
+ if (const char *LinkageName = DIE.getLinkageName())
+ NameRef = StringPool.internString(LinkageName);
+ else if (const char *ShortName = DIE.getShortName())
+ NameRef = StringPool.internString(ShortName);
+
+ bool IsAnonymousNamespace = NameRef.empty() && Tag == dwarf::DW_TAG_namespace;
+ if (IsAnonymousNamespace) {
+ // FIXME: For dsymutil-classic compatibility. I think uniquing within
+ // anonymous namespaces is wrong. There is no ODR guarantee there.
+ NameRef = "(anonymous namespace)";
+ }
+
+ if (Tag != dwarf::DW_TAG_class_type && Tag != dwarf::DW_TAG_structure_type &&
+ Tag != dwarf::DW_TAG_union_type &&
+ Tag != dwarf::DW_TAG_enumeration_type && NameRef.empty())
+ return PointerIntPair<DeclContext *, 1>(nullptr);
+
+ unsigned Line = 0;
+ unsigned ByteSize = std::numeric_limits<uint32_t>::max();
+
+ if (!InClangModule) {
+ // Gather some discriminating data about the DeclContext we will be
+ // creating: File, line number and byte size. This shouldn't be necessary,
+ // because the ODR is just about names, but given that we do some
+ // approximations with overloaded functions and anonymous namespaces, use
+ // these additional data points to make the process safer.
+ //
+ // This is disabled for clang modules, because forward declarations of
+ // module-defined types do not have a file and line.
+ ByteSize = dwarf::toUnsigned(DIE.find(dwarf::DW_AT_byte_size),
+ std::numeric_limits<uint64_t>::max());
+ if (Tag != dwarf::DW_TAG_namespace || IsAnonymousNamespace) {
+ if (unsigned FileNum =
+ dwarf::toUnsigned(DIE.find(dwarf::DW_AT_decl_file), 0)) {
+ if (const auto *LT = U.getOrigUnit().getContext().getLineTableForUnit(
+ &U.getOrigUnit())) {
+ // FIXME: dsymutil-classic compatibility. I'd rather not
+ // unique anything in anonymous namespaces, but if we do, then
+ // verify that the file and line correspond.
+ if (IsAnonymousNamespace)
+ FileNum = 1;
+
+ if (LT->hasFileAtIndex(FileNum)) {
+ Line = dwarf::toUnsigned(DIE.find(dwarf::DW_AT_decl_line), 0);
+ // Cache the resolved paths based on the index in the line table,
+ // because calling realpath is expensive.
+ FileRef = getResolvedPath(U, FileNum, *LT);
+ }
+ }
+ }
+ }
+ }
+
+ if (!Line && NameRef.empty())
+ return PointerIntPair<DeclContext *, 1>(nullptr);
+
+ // We hash NameRef, which is the mangled name, in order to get most
+ // overloaded functions resolve correctly.
+ //
+ // Strictly speaking, hashing the Tag is only necessary for a
+ // DW_TAG_module, to prevent uniquing of a module and a namespace
+ // with the same name.
+ //
+ // FIXME: dsymutil-classic won't unique the same type presented
+ // once as a struct and once as a class. Using the Tag in the fully
+ // qualified name hash to get the same effect.
+ unsigned Hash = hash_combine(Context.getQualifiedNameHash(), Tag, NameRef);
+
+ // FIXME: dsymutil-classic compatibility: when we don't have a name,
+ // use the filename.
+ if (IsAnonymousNamespace)
+ Hash = hash_combine(Hash, FileRef);
+
+ // Now look if this context already exists.
+ DeclContext Key(Hash, Line, ByteSize, Tag, NameRef, FileRef, Context);
+ auto ContextIter = Contexts.find(&Key);
+
+ if (ContextIter == Contexts.end()) {
+ // The context wasn't found.
+ bool Inserted;
+ DeclContext *NewContext =
+ new (Allocator) DeclContext(Hash, Line, ByteSize, Tag, NameRef, FileRef,
+ Context, DIE, U.getUniqueID());
+ std::tie(ContextIter, Inserted) = Contexts.insert(NewContext);
+ assert(Inserted && "Failed to insert DeclContext");
+ (void)Inserted;
+ } else if (Tag != dwarf::DW_TAG_namespace &&
+ !(*ContextIter)->setLastSeenDIE(U, DIE)) {
+ // The context was found, but it is ambiguous with another context
+ // in the same file. Mark it invalid.
+ return PointerIntPair<DeclContext *, 1>(*ContextIter, /* Invalid= */ 1);
+ }
+
+ assert(ContextIter != Contexts.end());
+ // FIXME: dsymutil-classic compatibility. Union types aren't
+ // uniques, but their children might be.
+ if ((Tag == dwarf::DW_TAG_subprogram &&
+ Context.getTag() != dwarf::DW_TAG_structure_type &&
+ Context.getTag() != dwarf::DW_TAG_class_type) ||
+ (Tag == dwarf::DW_TAG_union_type))
+ return PointerIntPair<DeclContext *, 1>(*ContextIter, /* Invalid= */ 1);
+
+ return PointerIntPair<DeclContext *, 1>(*ContextIter);
+}
+
+StringRef
+DeclContextTree::getResolvedPath(CompileUnit &CU, unsigned FileNum,
+ const DWARFDebugLine::LineTable &LineTable) {
+ std::pair<unsigned, unsigned> Key = {CU.getUniqueID(), FileNum};
+
+ ResolvedPathsMap::const_iterator It = ResolvedPaths.find(Key);
+ if (It == ResolvedPaths.end()) {
+ std::string FileName;
+ bool FoundFileName = LineTable.getFileNameByIndex(
+ FileNum, CU.getOrigUnit().getCompilationDir(),
+ DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath, FileName);
+ (void)FoundFileName;
+ assert(FoundFileName && "Must get file name from line table");
+
+ // Second level of caching, this time based on the file's parent
+ // path.
+ StringRef ResolvedPath = PathResolver.resolve(FileName, StringPool);
+
+ It = ResolvedPaths.insert(std::make_pair(Key, ResolvedPath)).first;
+ }
+
+ return It->second;
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/DWARFLinker/DWARFStreamer.cpp b/contrib/libs/llvm12/lib/DWARFLinker/DWARFStreamer.cpp
new file mode 100644
index 00000000000..c0043ae39ef
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DWARFLinker/DWARFStreamer.cpp
@@ -0,0 +1,800 @@
+//===- DwarfStreamer.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DWARFLinker/DWARFStreamer.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/CodeGen/NonRelocatableStringpool.h"
+#include "llvm/DWARFLinker/DWARFLinkerCompileUnit.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCDwarf.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/MCTargetOptions.h"
+#include "llvm/MC/MCTargetOptionsCommandFlags.h"
+#include "llvm/Support/LEB128.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/TargetOptions.h"
+
+namespace llvm {
+
+bool DwarfStreamer::init(Triple TheTriple) {
+ std::string ErrorStr;
+ std::string TripleName;
+ StringRef Context = "dwarf streamer init";
+
+ // Get the target.
+ const Target *TheTarget =
+ TargetRegistry::lookupTarget(TripleName, TheTriple, ErrorStr);
+ if (!TheTarget)
+ return error(ErrorStr, Context), false;
+ TripleName = TheTriple.getTriple();
+
+ // Create all the MC Objects.
+ MRI.reset(TheTarget->createMCRegInfo(TripleName));
+ if (!MRI)
+ return error(Twine("no register info for target ") + TripleName, Context),
+ false;
+
+ MCTargetOptions MCOptions = mc::InitMCTargetOptionsFromFlags();
+ MAI.reset(TheTarget->createMCAsmInfo(*MRI, TripleName, MCOptions));
+ if (!MAI)
+ return error("no asm info for target " + TripleName, Context), false;
+
+ MOFI.reset(new MCObjectFileInfo);
+ MC.reset(new MCContext(MAI.get(), MRI.get(), MOFI.get()));
+ MOFI->InitMCObjectFileInfo(TheTriple, /*PIC*/ false, *MC);
+
+ MSTI.reset(TheTarget->createMCSubtargetInfo(TripleName, "", ""));
+ if (!MSTI)
+ return error("no subtarget info for target " + TripleName, Context), false;
+
+ MAB = TheTarget->createMCAsmBackend(*MSTI, *MRI, MCOptions);
+ if (!MAB)
+ return error("no asm backend for target " + TripleName, Context), false;
+
+ MII.reset(TheTarget->createMCInstrInfo());
+ if (!MII)
+ return error("no instr info info for target " + TripleName, Context), false;
+
+ MCE = TheTarget->createMCCodeEmitter(*MII, *MRI, *MC);
+ if (!MCE)
+ return error("no code emitter for target " + TripleName, Context), false;
+
+ switch (OutFileType) {
+ case OutputFileType::Assembly: {
+ MIP = TheTarget->createMCInstPrinter(TheTriple, MAI->getAssemblerDialect(),
+ *MAI, *MII, *MRI);
+ MS = TheTarget->createAsmStreamer(
+ *MC, std::make_unique<formatted_raw_ostream>(OutFile), true, true, MIP,
+ std::unique_ptr<MCCodeEmitter>(MCE), std::unique_ptr<MCAsmBackend>(MAB),
+ true);
+ break;
+ }
+ case OutputFileType::Object: {
+ MS = TheTarget->createMCObjectStreamer(
+ TheTriple, *MC, std::unique_ptr<MCAsmBackend>(MAB),
+ MAB->createObjectWriter(OutFile), std::unique_ptr<MCCodeEmitter>(MCE),
+ *MSTI, MCOptions.MCRelaxAll, MCOptions.MCIncrementalLinkerCompatible,
+ /*DWARFMustBeAtTheEnd*/ false);
+ break;
+ }
+ }
+
+ if (!MS)
+ return error("no object streamer for target " + TripleName, Context), false;
+
+ // Finally create the AsmPrinter we'll use to emit the DIEs.
+ TM.reset(TheTarget->createTargetMachine(TripleName, "", "", TargetOptions(),
+ None));
+ if (!TM)
+ return error("no target machine for target " + TripleName, Context), false;
+
+ Asm.reset(TheTarget->createAsmPrinter(*TM, std::unique_ptr<MCStreamer>(MS)));
+ if (!Asm)
+ return error("no asm printer for target " + TripleName, Context), false;
+
+ RangesSectionSize = 0;
+ LocSectionSize = 0;
+ LineSectionSize = 0;
+ FrameSectionSize = 0;
+ DebugInfoSectionSize = 0;
+
+ return true;
+}
+
+void DwarfStreamer::finish() { MS->Finish(); }
+
+void DwarfStreamer::switchToDebugInfoSection(unsigned DwarfVersion) {
+ MS->SwitchSection(MOFI->getDwarfInfoSection());
+ MC->setDwarfVersion(DwarfVersion);
+}
+
+/// Emit the compilation unit header for \p Unit in the debug_info section.
+///
+/// A Dwarf 4 section header is encoded as:
+/// uint32_t Unit length (omitting this field)
+/// uint16_t Version
+/// uint32_t Abbreviation table offset
+/// uint8_t Address size
+/// Leading to a total of 11 bytes.
+///
+/// A Dwarf 5 section header is encoded as:
+/// uint32_t Unit length (omitting this field)
+/// uint16_t Version
+/// uint8_t Unit type
+/// uint8_t Address size
+/// uint32_t Abbreviation table offset
+/// Leading to a total of 12 bytes.
+void DwarfStreamer::emitCompileUnitHeader(CompileUnit &Unit,
+ unsigned DwarfVersion) {
+ switchToDebugInfoSection(DwarfVersion);
+
+ /// The start of the unit within its section.
+ Unit.setLabelBegin(Asm->createTempSymbol("cu_begin"));
+ Asm->OutStreamer->emitLabel(Unit.getLabelBegin());
+
+ // Emit size of content not including length itself. The size has already
+ // been computed in CompileUnit::computeOffsets(). Subtract 4 to that size to
+ // account for the length field.
+ Asm->emitInt32(Unit.getNextUnitOffset() - Unit.getStartOffset() - 4);
+ Asm->emitInt16(DwarfVersion);
+
+ if (DwarfVersion >= 5) {
+ Asm->emitInt8(dwarf::DW_UT_compile);
+ Asm->emitInt8(Unit.getOrigUnit().getAddressByteSize());
+ // We share one abbreviations table across all units so it's always at the
+ // start of the section.
+ Asm->emitInt32(0);
+ DebugInfoSectionSize += 12;
+ } else {
+ // We share one abbreviations table across all units so it's always at the
+ // start of the section.
+ Asm->emitInt32(0);
+ Asm->emitInt8(Unit.getOrigUnit().getAddressByteSize());
+ DebugInfoSectionSize += 11;
+ }
+
+ // Remember this CU.
+ EmittedUnits.push_back({Unit.getUniqueID(), Unit.getLabelBegin()});
+}
+
+/// Emit the \p Abbrevs array as the shared abbreviation table
+/// for the linked Dwarf file.
+void DwarfStreamer::emitAbbrevs(
+ const std::vector<std::unique_ptr<DIEAbbrev>> &Abbrevs,
+ unsigned DwarfVersion) {
+ MS->SwitchSection(MOFI->getDwarfAbbrevSection());
+ MC->setDwarfVersion(DwarfVersion);
+ Asm->emitDwarfAbbrevs(Abbrevs);
+}
+
+/// Recursively emit the DIE tree rooted at \p Die.
+void DwarfStreamer::emitDIE(DIE &Die) {
+ MS->SwitchSection(MOFI->getDwarfInfoSection());
+ Asm->emitDwarfDIE(Die);
+ DebugInfoSectionSize += Die.getSize();
+}
+
+/// Emit contents of section SecName From Obj.
+void DwarfStreamer::emitSectionContents(StringRef SecData, StringRef SecName) {
+ MCSection *Section =
+ StringSwitch<MCSection *>(SecName)
+ .Case("debug_line", MC->getObjectFileInfo()->getDwarfLineSection())
+ .Case("debug_loc", MC->getObjectFileInfo()->getDwarfLocSection())
+ .Case("debug_ranges",
+ MC->getObjectFileInfo()->getDwarfRangesSection())
+ .Case("debug_frame", MC->getObjectFileInfo()->getDwarfFrameSection())
+ .Case("debug_aranges",
+ MC->getObjectFileInfo()->getDwarfARangesSection())
+ .Default(nullptr);
+
+ if (Section) {
+ MS->SwitchSection(Section);
+
+ MS->emitBytes(SecData);
+ }
+}
+
+/// Emit DIE containing warnings.
+void DwarfStreamer::emitPaperTrailWarningsDie(DIE &Die) {
+ switchToDebugInfoSection(/* Version */ 2);
+ auto &Asm = getAsmPrinter();
+ Asm.emitInt32(11 + Die.getSize() - 4);
+ Asm.emitInt16(2);
+ Asm.emitInt32(0);
+ Asm.emitInt8(MOFI->getTargetTriple().isArch64Bit() ? 8 : 4);
+ DebugInfoSectionSize += 11;
+ emitDIE(Die);
+}
+
+/// Emit the debug_str section stored in \p Pool.
+void DwarfStreamer::emitStrings(const NonRelocatableStringpool &Pool) {
+ Asm->OutStreamer->SwitchSection(MOFI->getDwarfStrSection());
+ std::vector<DwarfStringPoolEntryRef> Entries = Pool.getEntriesForEmission();
+ for (auto Entry : Entries) {
+ // Emit the string itself.
+ Asm->OutStreamer->emitBytes(Entry.getString());
+ // Emit a null terminator.
+ Asm->emitInt8(0);
+ }
+
+#if 0
+ if (DwarfVersion >= 5) {
+ // Emit an empty string offset section.
+ Asm->OutStreamer->SwitchSection(MOFI->getDwarfStrOffSection());
+ Asm->emitDwarfUnitLength(4, "Length of String Offsets Set");
+ Asm->emitInt16(DwarfVersion);
+ Asm->emitInt16(0);
+ }
+#endif
+}
+
+void DwarfStreamer::emitDebugNames(
+ AccelTable<DWARF5AccelTableStaticData> &Table) {
+ if (EmittedUnits.empty())
+ return;
+
+ // Build up data structures needed to emit this section.
+ std::vector<MCSymbol *> CompUnits;
+ DenseMap<unsigned, size_t> UniqueIdToCuMap;
+ unsigned Id = 0;
+ for (auto &CU : EmittedUnits) {
+ CompUnits.push_back(CU.LabelBegin);
+ // We might be omitting CUs, so we need to remap them.
+ UniqueIdToCuMap[CU.ID] = Id++;
+ }
+
+ Asm->OutStreamer->SwitchSection(MOFI->getDwarfDebugNamesSection());
+ emitDWARF5AccelTable(
+ Asm.get(), Table, CompUnits,
+ [&UniqueIdToCuMap](const DWARF5AccelTableStaticData &Entry) {
+ return UniqueIdToCuMap[Entry.getCUIndex()];
+ });
+}
+
+void DwarfStreamer::emitAppleNamespaces(
+ AccelTable<AppleAccelTableStaticOffsetData> &Table) {
+ Asm->OutStreamer->SwitchSection(MOFI->getDwarfAccelNamespaceSection());
+ auto *SectionBegin = Asm->createTempSymbol("namespac_begin");
+ Asm->OutStreamer->emitLabel(SectionBegin);
+ emitAppleAccelTable(Asm.get(), Table, "namespac", SectionBegin);
+}
+
+void DwarfStreamer::emitAppleNames(
+ AccelTable<AppleAccelTableStaticOffsetData> &Table) {
+ Asm->OutStreamer->SwitchSection(MOFI->getDwarfAccelNamesSection());
+ auto *SectionBegin = Asm->createTempSymbol("names_begin");
+ Asm->OutStreamer->emitLabel(SectionBegin);
+ emitAppleAccelTable(Asm.get(), Table, "names", SectionBegin);
+}
+
+void DwarfStreamer::emitAppleObjc(
+ AccelTable<AppleAccelTableStaticOffsetData> &Table) {
+ Asm->OutStreamer->SwitchSection(MOFI->getDwarfAccelObjCSection());
+ auto *SectionBegin = Asm->createTempSymbol("objc_begin");
+ Asm->OutStreamer->emitLabel(SectionBegin);
+ emitAppleAccelTable(Asm.get(), Table, "objc", SectionBegin);
+}
+
+void DwarfStreamer::emitAppleTypes(
+ AccelTable<AppleAccelTableStaticTypeData> &Table) {
+ Asm->OutStreamer->SwitchSection(MOFI->getDwarfAccelTypesSection());
+ auto *SectionBegin = Asm->createTempSymbol("types_begin");
+ Asm->OutStreamer->emitLabel(SectionBegin);
+ emitAppleAccelTable(Asm.get(), Table, "types", SectionBegin);
+}
+
+/// Emit the swift_ast section stored in \p Buffers.
+void DwarfStreamer::emitSwiftAST(StringRef Buffer) {
+ MCSection *SwiftASTSection = MOFI->getDwarfSwiftASTSection();
+ SwiftASTSection->setAlignment(Align(32));
+ MS->SwitchSection(SwiftASTSection);
+ MS->emitBytes(Buffer);
+}
+
+/// Emit the debug_range section contents for \p FuncRange by
+/// translating the original \p Entries. The debug_range section
+/// format is totally trivial, consisting just of pairs of address
+/// sized addresses describing the ranges.
+void DwarfStreamer::emitRangesEntries(
+ int64_t UnitPcOffset, uint64_t OrigLowPc,
+ const FunctionIntervals::const_iterator &FuncRange,
+ const std::vector<DWARFDebugRangeList::RangeListEntry> &Entries,
+ unsigned AddressSize) {
+ MS->SwitchSection(MC->getObjectFileInfo()->getDwarfRangesSection());
+
+ // Offset each range by the right amount.
+ int64_t PcOffset = Entries.empty() ? 0 : FuncRange.value() + UnitPcOffset;
+ for (const auto &Range : Entries) {
+ if (Range.isBaseAddressSelectionEntry(AddressSize)) {
+ warn("unsupported base address selection operation",
+ "emitting debug_ranges");
+ break;
+ }
+ // Do not emit empty ranges.
+ if (Range.StartAddress == Range.EndAddress)
+ continue;
+
+ // All range entries should lie in the function range.
+ if (!(Range.StartAddress + OrigLowPc >= FuncRange.start() &&
+ Range.EndAddress + OrigLowPc <= FuncRange.stop()))
+ warn("inconsistent range data.", "emitting debug_ranges");
+ MS->emitIntValue(Range.StartAddress + PcOffset, AddressSize);
+ MS->emitIntValue(Range.EndAddress + PcOffset, AddressSize);
+ RangesSectionSize += 2 * AddressSize;
+ }
+
+ // Add the terminator entry.
+ MS->emitIntValue(0, AddressSize);
+ MS->emitIntValue(0, AddressSize);
+ RangesSectionSize += 2 * AddressSize;
+}
+
+/// Emit the debug_aranges contribution of a unit and
+/// if \p DoDebugRanges is true the debug_range contents for a
+/// compile_unit level DW_AT_ranges attribute (Which are basically the
+/// same thing with a different base address).
+/// Just aggregate all the ranges gathered inside that unit.
+void DwarfStreamer::emitUnitRangesEntries(CompileUnit &Unit,
+ bool DoDebugRanges) {
+ unsigned AddressSize = Unit.getOrigUnit().getAddressByteSize();
+ // Gather the ranges in a vector, so that we can simplify them. The
+ // IntervalMap will have coalesced the non-linked ranges, but here
+ // we want to coalesce the linked addresses.
+ std::vector<std::pair<uint64_t, uint64_t>> Ranges;
+ const auto &FunctionRanges = Unit.getFunctionRanges();
+ for (auto Range = FunctionRanges.begin(), End = FunctionRanges.end();
+ Range != End; ++Range)
+ Ranges.push_back(std::make_pair(Range.start() + Range.value(),
+ Range.stop() + Range.value()));
+
+ // The object addresses where sorted, but again, the linked
+ // addresses might end up in a different order.
+ llvm::sort(Ranges);
+
+ if (!Ranges.empty()) {
+ MS->SwitchSection(MC->getObjectFileInfo()->getDwarfARangesSection());
+
+ MCSymbol *BeginLabel = Asm->createTempSymbol("Barange");
+ MCSymbol *EndLabel = Asm->createTempSymbol("Earange");
+
+ unsigned HeaderSize =
+ sizeof(int32_t) + // Size of contents (w/o this field
+ sizeof(int16_t) + // DWARF ARange version number
+ sizeof(int32_t) + // Offset of CU in the .debug_info section
+ sizeof(int8_t) + // Pointer Size (in bytes)
+ sizeof(int8_t); // Segment Size (in bytes)
+
+ unsigned TupleSize = AddressSize * 2;
+ unsigned Padding = offsetToAlignment(HeaderSize, Align(TupleSize));
+
+ Asm->emitLabelDifference(EndLabel, BeginLabel, 4); // Arange length
+ Asm->OutStreamer->emitLabel(BeginLabel);
+ Asm->emitInt16(dwarf::DW_ARANGES_VERSION); // Version number
+ Asm->emitInt32(Unit.getStartOffset()); // Corresponding unit's offset
+ Asm->emitInt8(AddressSize); // Address size
+ Asm->emitInt8(0); // Segment size
+
+ Asm->OutStreamer->emitFill(Padding, 0x0);
+
+ for (auto Range = Ranges.begin(), End = Ranges.end(); Range != End;
+ ++Range) {
+ uint64_t RangeStart = Range->first;
+ MS->emitIntValue(RangeStart, AddressSize);
+ while ((Range + 1) != End && Range->second == (Range + 1)->first)
+ ++Range;
+ MS->emitIntValue(Range->second - RangeStart, AddressSize);
+ }
+
+ // Emit terminator
+ Asm->OutStreamer->emitIntValue(0, AddressSize);
+ Asm->OutStreamer->emitIntValue(0, AddressSize);
+ Asm->OutStreamer->emitLabel(EndLabel);
+ }
+
+ if (!DoDebugRanges)
+ return;
+
+ MS->SwitchSection(MC->getObjectFileInfo()->getDwarfRangesSection());
+ // Offset each range by the right amount.
+ int64_t PcOffset = -Unit.getLowPc();
+ // Emit coalesced ranges.
+ for (auto Range = Ranges.begin(), End = Ranges.end(); Range != End; ++Range) {
+ MS->emitIntValue(Range->first + PcOffset, AddressSize);
+ while (Range + 1 != End && Range->second == (Range + 1)->first)
+ ++Range;
+ MS->emitIntValue(Range->second + PcOffset, AddressSize);
+ RangesSectionSize += 2 * AddressSize;
+ }
+
+ // Add the terminator entry.
+ MS->emitIntValue(0, AddressSize);
+ MS->emitIntValue(0, AddressSize);
+ RangesSectionSize += 2 * AddressSize;
+}
+
+/// Emit location lists for \p Unit and update attributes to point to the new
+/// entries.
+void DwarfStreamer::emitLocationsForUnit(
+ const CompileUnit &Unit, DWARFContext &Dwarf,
+ std::function<void(StringRef, SmallVectorImpl<uint8_t> &)> ProcessExpr) {
+ const auto &Attributes = Unit.getLocationAttributes();
+
+ if (Attributes.empty())
+ return;
+
+ MS->SwitchSection(MC->getObjectFileInfo()->getDwarfLocSection());
+
+ unsigned AddressSize = Unit.getOrigUnit().getAddressByteSize();
+ uint64_t BaseAddressMarker = (AddressSize == 8)
+ ? std::numeric_limits<uint64_t>::max()
+ : std::numeric_limits<uint32_t>::max();
+ const DWARFSection &InputSec = Dwarf.getDWARFObj().getLocSection();
+ DataExtractor Data(InputSec.Data, Dwarf.isLittleEndian(), AddressSize);
+ DWARFUnit &OrigUnit = Unit.getOrigUnit();
+ auto OrigUnitDie = OrigUnit.getUnitDIE(false);
+ int64_t UnitPcOffset = 0;
+ if (auto OrigLowPc = dwarf::toAddress(OrigUnitDie.find(dwarf::DW_AT_low_pc)))
+ UnitPcOffset = int64_t(*OrigLowPc) - Unit.getLowPc();
+
+ SmallVector<uint8_t, 32> Buffer;
+ for (const auto &Attr : Attributes) {
+ uint64_t Offset = Attr.first.get();
+ Attr.first.set(LocSectionSize);
+ // This is the quantity to add to the old location address to get
+ // the correct address for the new one.
+ int64_t LocPcOffset = Attr.second + UnitPcOffset;
+ while (Data.isValidOffset(Offset)) {
+ uint64_t Low = Data.getUnsigned(&Offset, AddressSize);
+ uint64_t High = Data.getUnsigned(&Offset, AddressSize);
+ LocSectionSize += 2 * AddressSize;
+ // End of list entry.
+ if (Low == 0 && High == 0) {
+ Asm->OutStreamer->emitIntValue(0, AddressSize);
+ Asm->OutStreamer->emitIntValue(0, AddressSize);
+ break;
+ }
+ // Base address selection entry.
+ if (Low == BaseAddressMarker) {
+ Asm->OutStreamer->emitIntValue(BaseAddressMarker, AddressSize);
+ Asm->OutStreamer->emitIntValue(High + Attr.second, AddressSize);
+ LocPcOffset = 0;
+ continue;
+ }
+ // Location list entry.
+ Asm->OutStreamer->emitIntValue(Low + LocPcOffset, AddressSize);
+ Asm->OutStreamer->emitIntValue(High + LocPcOffset, AddressSize);
+ uint64_t Length = Data.getU16(&Offset);
+ Asm->OutStreamer->emitIntValue(Length, 2);
+ // Copy the bytes into to the buffer, process them, emit them.
+ Buffer.reserve(Length);
+ Buffer.resize(0);
+ StringRef Input = InputSec.Data.substr(Offset, Length);
+ ProcessExpr(Input, Buffer);
+ Asm->OutStreamer->emitBytes(
+ StringRef((const char *)Buffer.data(), Length));
+ Offset += Length;
+ LocSectionSize += Length + 2;
+ }
+ }
+}
+
+void DwarfStreamer::emitLineTableForUnit(MCDwarfLineTableParams Params,
+ StringRef PrologueBytes,
+ unsigned MinInstLength,
+ std::vector<DWARFDebugLine::Row> &Rows,
+ unsigned PointerSize) {
+ // Switch to the section where the table will be emitted into.
+ MS->SwitchSection(MC->getObjectFileInfo()->getDwarfLineSection());
+ MCSymbol *LineStartSym = MC->createTempSymbol();
+ MCSymbol *LineEndSym = MC->createTempSymbol();
+
+ // The first 4 bytes is the total length of the information for this
+ // compilation unit (not including these 4 bytes for the length).
+ Asm->emitLabelDifference(LineEndSym, LineStartSym, 4);
+ Asm->OutStreamer->emitLabel(LineStartSym);
+ // Copy Prologue.
+ MS->emitBytes(PrologueBytes);
+ LineSectionSize += PrologueBytes.size() + 4;
+
+ SmallString<128> EncodingBuffer;
+ raw_svector_ostream EncodingOS(EncodingBuffer);
+
+ if (Rows.empty()) {
+ // We only have the dummy entry, dsymutil emits an entry with a 0
+ // address in that case.
+ MCDwarfLineAddr::Encode(*MC, Params, std::numeric_limits<int64_t>::max(), 0,
+ EncodingOS);
+ MS->emitBytes(EncodingOS.str());
+ LineSectionSize += EncodingBuffer.size();
+ MS->emitLabel(LineEndSym);
+ return;
+ }
+
+ // Line table state machine fields
+ unsigned FileNum = 1;
+ unsigned LastLine = 1;
+ unsigned Column = 0;
+ unsigned IsStatement = 1;
+ unsigned Isa = 0;
+ uint64_t Address = -1ULL;
+
+ unsigned RowsSinceLastSequence = 0;
+
+ for (unsigned Idx = 0; Idx < Rows.size(); ++Idx) {
+ auto &Row = Rows[Idx];
+
+ int64_t AddressDelta;
+ if (Address == -1ULL) {
+ MS->emitIntValue(dwarf::DW_LNS_extended_op, 1);
+ MS->emitULEB128IntValue(PointerSize + 1);
+ MS->emitIntValue(dwarf::DW_LNE_set_address, 1);
+ MS->emitIntValue(Row.Address.Address, PointerSize);
+ LineSectionSize += 2 + PointerSize + getULEB128Size(PointerSize + 1);
+ AddressDelta = 0;
+ } else {
+ AddressDelta = (Row.Address.Address - Address) / MinInstLength;
+ }
+
+ // FIXME: code copied and transformed from MCDwarf.cpp::EmitDwarfLineTable.
+ // We should find a way to share this code, but the current compatibility
+ // requirement with classic dsymutil makes it hard. Revisit that once this
+ // requirement is dropped.
+
+ if (FileNum != Row.File) {
+ FileNum = Row.File;
+ MS->emitIntValue(dwarf::DW_LNS_set_file, 1);
+ MS->emitULEB128IntValue(FileNum);
+ LineSectionSize += 1 + getULEB128Size(FileNum);
+ }
+ if (Column != Row.Column) {
+ Column = Row.Column;
+ MS->emitIntValue(dwarf::DW_LNS_set_column, 1);
+ MS->emitULEB128IntValue(Column);
+ LineSectionSize += 1 + getULEB128Size(Column);
+ }
+
+ // FIXME: We should handle the discriminator here, but dsymutil doesn't
+ // consider it, thus ignore it for now.
+
+ if (Isa != Row.Isa) {
+ Isa = Row.Isa;
+ MS->emitIntValue(dwarf::DW_LNS_set_isa, 1);
+ MS->emitULEB128IntValue(Isa);
+ LineSectionSize += 1 + getULEB128Size(Isa);
+ }
+ if (IsStatement != Row.IsStmt) {
+ IsStatement = Row.IsStmt;
+ MS->emitIntValue(dwarf::DW_LNS_negate_stmt, 1);
+ LineSectionSize += 1;
+ }
+ if (Row.BasicBlock) {
+ MS->emitIntValue(dwarf::DW_LNS_set_basic_block, 1);
+ LineSectionSize += 1;
+ }
+
+ if (Row.PrologueEnd) {
+ MS->emitIntValue(dwarf::DW_LNS_set_prologue_end, 1);
+ LineSectionSize += 1;
+ }
+
+ if (Row.EpilogueBegin) {
+ MS->emitIntValue(dwarf::DW_LNS_set_epilogue_begin, 1);
+ LineSectionSize += 1;
+ }
+
+ int64_t LineDelta = int64_t(Row.Line) - LastLine;
+ if (!Row.EndSequence) {
+ MCDwarfLineAddr::Encode(*MC, Params, LineDelta, AddressDelta, EncodingOS);
+ MS->emitBytes(EncodingOS.str());
+ LineSectionSize += EncodingBuffer.size();
+ EncodingBuffer.resize(0);
+ Address = Row.Address.Address;
+ LastLine = Row.Line;
+ RowsSinceLastSequence++;
+ } else {
+ if (LineDelta) {
+ MS->emitIntValue(dwarf::DW_LNS_advance_line, 1);
+ MS->emitSLEB128IntValue(LineDelta);
+ LineSectionSize += 1 + getSLEB128Size(LineDelta);
+ }
+ if (AddressDelta) {
+ MS->emitIntValue(dwarf::DW_LNS_advance_pc, 1);
+ MS->emitULEB128IntValue(AddressDelta);
+ LineSectionSize += 1 + getULEB128Size(AddressDelta);
+ }
+ MCDwarfLineAddr::Encode(*MC, Params, std::numeric_limits<int64_t>::max(),
+ 0, EncodingOS);
+ MS->emitBytes(EncodingOS.str());
+ LineSectionSize += EncodingBuffer.size();
+ EncodingBuffer.resize(0);
+ Address = -1ULL;
+ LastLine = FileNum = IsStatement = 1;
+ RowsSinceLastSequence = Column = Isa = 0;
+ }
+ }
+
+ if (RowsSinceLastSequence) {
+ MCDwarfLineAddr::Encode(*MC, Params, std::numeric_limits<int64_t>::max(), 0,
+ EncodingOS);
+ MS->emitBytes(EncodingOS.str());
+ LineSectionSize += EncodingBuffer.size();
+ EncodingBuffer.resize(0);
+ }
+
+ MS->emitLabel(LineEndSym);
+}
+
+/// Copy the debug_line over to the updated binary while unobfuscating the file
+/// names and directories.
+void DwarfStreamer::translateLineTable(DataExtractor Data, uint64_t Offset) {
+ MS->SwitchSection(MC->getObjectFileInfo()->getDwarfLineSection());
+ StringRef Contents = Data.getData();
+
+ // We have to deconstruct the line table header, because it contains to
+ // length fields that will need to be updated when we change the length of
+ // the files and directories in there.
+ unsigned UnitLength = Data.getU32(&Offset);
+ uint64_t UnitEnd = Offset + UnitLength;
+ MCSymbol *BeginLabel = MC->createTempSymbol();
+ MCSymbol *EndLabel = MC->createTempSymbol();
+ unsigned Version = Data.getU16(&Offset);
+
+ if (Version > 5) {
+ warn("Unsupported line table version: dropping contents and not "
+ "unobfsucating line table.");
+ return;
+ }
+
+ Asm->emitLabelDifference(EndLabel, BeginLabel, 4);
+ Asm->OutStreamer->emitLabel(BeginLabel);
+ Asm->emitInt16(Version);
+ LineSectionSize += 6;
+
+ MCSymbol *HeaderBeginLabel = MC->createTempSymbol();
+ MCSymbol *HeaderEndLabel = MC->createTempSymbol();
+ Asm->emitLabelDifference(HeaderEndLabel, HeaderBeginLabel, 4);
+ Asm->OutStreamer->emitLabel(HeaderBeginLabel);
+ Offset += 4;
+ LineSectionSize += 4;
+
+ uint64_t AfterHeaderLengthOffset = Offset;
+ // Skip to the directories.
+ Offset += (Version >= 4) ? 5 : 4;
+ unsigned OpcodeBase = Data.getU8(&Offset);
+ Offset += OpcodeBase - 1;
+ Asm->OutStreamer->emitBytes(Contents.slice(AfterHeaderLengthOffset, Offset));
+ LineSectionSize += Offset - AfterHeaderLengthOffset;
+
+ // Offset points to the first directory.
+ while (const char *Dir = Data.getCStr(&Offset)) {
+ if (Dir[0] == 0)
+ break;
+
+ StringRef Translated = Translator(Dir);
+ Asm->OutStreamer->emitBytes(Translated);
+ Asm->emitInt8(0);
+ LineSectionSize += Translated.size() + 1;
+ }
+ Asm->emitInt8(0);
+ LineSectionSize += 1;
+
+ while (const char *File = Data.getCStr(&Offset)) {
+ if (File[0] == 0)
+ break;
+
+ StringRef Translated = Translator(File);
+ Asm->OutStreamer->emitBytes(Translated);
+ Asm->emitInt8(0);
+ LineSectionSize += Translated.size() + 1;
+
+ uint64_t OffsetBeforeLEBs = Offset;
+ Asm->emitULEB128(Data.getULEB128(&Offset));
+ Asm->emitULEB128(Data.getULEB128(&Offset));
+ Asm->emitULEB128(Data.getULEB128(&Offset));
+ LineSectionSize += Offset - OffsetBeforeLEBs;
+ }
+ Asm->emitInt8(0);
+ LineSectionSize += 1;
+
+ Asm->OutStreamer->emitLabel(HeaderEndLabel);
+
+ // Copy the actual line table program over.
+ Asm->OutStreamer->emitBytes(Contents.slice(Offset, UnitEnd));
+ LineSectionSize += UnitEnd - Offset;
+
+ Asm->OutStreamer->emitLabel(EndLabel);
+ Offset = UnitEnd;
+}
+
+/// Emit the pubnames or pubtypes section contribution for \p
+/// Unit into \p Sec. The data is provided in \p Names.
+void DwarfStreamer::emitPubSectionForUnit(
+ MCSection *Sec, StringRef SecName, const CompileUnit &Unit,
+ const std::vector<CompileUnit::AccelInfo> &Names) {
+ if (Names.empty())
+ return;
+
+ // Start the dwarf pubnames section.
+ Asm->OutStreamer->SwitchSection(Sec);
+ MCSymbol *BeginLabel = Asm->createTempSymbol("pub" + SecName + "_begin");
+ MCSymbol *EndLabel = Asm->createTempSymbol("pub" + SecName + "_end");
+
+ bool HeaderEmitted = false;
+ // Emit the pubnames for this compilation unit.
+ for (const auto &Name : Names) {
+ if (Name.SkipPubSection)
+ continue;
+
+ if (!HeaderEmitted) {
+ // Emit the header.
+ Asm->emitLabelDifference(EndLabel, BeginLabel, 4); // Length
+ Asm->OutStreamer->emitLabel(BeginLabel);
+ Asm->emitInt16(dwarf::DW_PUBNAMES_VERSION); // Version
+ Asm->emitInt32(Unit.getStartOffset()); // Unit offset
+ Asm->emitInt32(Unit.getNextUnitOffset() - Unit.getStartOffset()); // Size
+ HeaderEmitted = true;
+ }
+ Asm->emitInt32(Name.Die->getOffset());
+
+ // Emit the string itself.
+ Asm->OutStreamer->emitBytes(Name.Name.getString());
+ // Emit a null terminator.
+ Asm->emitInt8(0);
+ }
+
+ if (!HeaderEmitted)
+ return;
+ Asm->emitInt32(0); // End marker.
+ Asm->OutStreamer->emitLabel(EndLabel);
+}
+
+/// Emit .debug_pubnames for \p Unit.
+void DwarfStreamer::emitPubNamesForUnit(const CompileUnit &Unit) {
+ if (Minimize)
+ return;
+ emitPubSectionForUnit(MC->getObjectFileInfo()->getDwarfPubNamesSection(),
+ "names", Unit, Unit.getPubnames());
+}
+
+/// Emit .debug_pubtypes for \p Unit.
+void DwarfStreamer::emitPubTypesForUnit(const CompileUnit &Unit) {
+ if (Minimize)
+ return;
+ emitPubSectionForUnit(MC->getObjectFileInfo()->getDwarfPubTypesSection(),
+ "types", Unit, Unit.getPubtypes());
+}
+
+/// Emit a CIE into the debug_frame section.
+void DwarfStreamer::emitCIE(StringRef CIEBytes) {
+ MS->SwitchSection(MC->getObjectFileInfo()->getDwarfFrameSection());
+
+ MS->emitBytes(CIEBytes);
+ FrameSectionSize += CIEBytes.size();
+}
+
+/// Emit a FDE into the debug_frame section. \p FDEBytes
+/// contains the FDE data without the length, CIE offset and address
+/// which will be replaced with the parameter values.
+void DwarfStreamer::emitFDE(uint32_t CIEOffset, uint32_t AddrSize,
+ uint32_t Address, StringRef FDEBytes) {
+ MS->SwitchSection(MC->getObjectFileInfo()->getDwarfFrameSection());
+
+ MS->emitIntValue(FDEBytes.size() + 4 + AddrSize, 4);
+ MS->emitIntValue(CIEOffset, 4);
+ MS->emitIntValue(Address, AddrSize);
+ MS->emitBytes(FDEBytes);
+ FrameSectionSize += FDEBytes.size() + 8 + AddrSize;
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/DebugInfo/CodeView/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/DebugInfo/CodeView/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/DebugInfo/CodeView/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/DebugInfo/DWARF/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/DebugInfo/DWARF/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/DebugInfo/DWARF/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/DebugInfo/GSYM/DwarfTransformer.cpp b/contrib/libs/llvm12/lib/DebugInfo/GSYM/DwarfTransformer.cpp
new file mode 100644
index 00000000000..1e527ab3916
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DebugInfo/GSYM/DwarfTransformer.cpp
@@ -0,0 +1,572 @@
+//===- DwarfTransformer.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <thread>
+#include <unordered_set>
+
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ThreadPool.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include "llvm/DebugInfo/GSYM/DwarfTransformer.h"
+#include "llvm/DebugInfo/GSYM/FunctionInfo.h"
+#include "llvm/DebugInfo/GSYM/GsymCreator.h"
+#include "llvm/DebugInfo/GSYM/GsymReader.h"
+#include "llvm/DebugInfo/GSYM/InlineInfo.h"
+
+using namespace llvm;
+using namespace gsym;
+
+struct llvm::gsym::CUInfo {
+ const DWARFDebugLine::LineTable *LineTable;
+ const char *CompDir;
+ std::vector<uint32_t> FileCache;
+ uint64_t Language = 0;
+ uint8_t AddrSize = 0;
+
+ CUInfo(DWARFContext &DICtx, DWARFCompileUnit *CU) {
+ LineTable = DICtx.getLineTableForUnit(CU);
+ CompDir = CU->getCompilationDir();
+ FileCache.clear();
+ if (LineTable)
+ FileCache.assign(LineTable->Prologue.FileNames.size() + 1, UINT32_MAX);
+ DWARFDie Die = CU->getUnitDIE();
+ Language = dwarf::toUnsigned(Die.find(dwarf::DW_AT_language), 0);
+ AddrSize = CU->getAddressByteSize();
+ }
+
+ /// Return true if Addr is the highest address for a given compile unit. The
+ /// highest address is encoded as -1, of all ones in the address. These high
+ /// addresses are used by some linkers to indicate that a function has been
+ /// dead stripped or didn't end up in the linked executable.
+ bool isHighestAddress(uint64_t Addr) const {
+ if (AddrSize == 4)
+ return Addr == UINT32_MAX;
+ else if (AddrSize == 8)
+ return Addr == UINT64_MAX;
+ return false;
+ }
+
+ /// Convert a DWARF compile unit file index into a GSYM global file index.
+ ///
+ /// Each compile unit in DWARF has its own file table in the line table
+ /// prologue. GSYM has a single large file table that applies to all files
+ /// from all of the info in a GSYM file. This function converts between the
+ /// two and caches and DWARF CU file index that has already been converted so
+ /// the first client that asks for a compile unit file index will end up
+ /// doing the conversion, and subsequent clients will get the cached GSYM
+ /// index.
+ uint32_t DWARFToGSYMFileIndex(GsymCreator &Gsym, uint32_t DwarfFileIdx) {
+ if (!LineTable)
+ return 0;
+ assert(DwarfFileIdx < FileCache.size());
+ uint32_t &GsymFileIdx = FileCache[DwarfFileIdx];
+ if (GsymFileIdx != UINT32_MAX)
+ return GsymFileIdx;
+ std::string File;
+ if (LineTable->getFileNameByIndex(
+ DwarfFileIdx, CompDir,
+ DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath, File))
+ GsymFileIdx = Gsym.insertFile(File);
+ else
+ GsymFileIdx = 0;
+ return GsymFileIdx;
+ }
+};
+
+
+static DWARFDie GetParentDeclContextDIE(DWARFDie &Die) {
+ if (DWARFDie SpecDie =
+ Die.getAttributeValueAsReferencedDie(dwarf::DW_AT_specification)) {
+ if (DWARFDie SpecParent = GetParentDeclContextDIE(SpecDie))
+ return SpecParent;
+ }
+ if (DWARFDie AbstDie =
+ Die.getAttributeValueAsReferencedDie(dwarf::DW_AT_abstract_origin)) {
+ if (DWARFDie AbstParent = GetParentDeclContextDIE(AbstDie))
+ return AbstParent;
+ }
+
+ // We never want to follow parent for inlined subroutine - that would
+ // give us information about where the function is inlined, not what
+ // function is inlined
+ if (Die.getTag() == dwarf::DW_TAG_inlined_subroutine)
+ return DWARFDie();
+
+ DWARFDie ParentDie = Die.getParent();
+ if (!ParentDie)
+ return DWARFDie();
+
+ switch (ParentDie.getTag()) {
+ case dwarf::DW_TAG_namespace:
+ case dwarf::DW_TAG_structure_type:
+ case dwarf::DW_TAG_union_type:
+ case dwarf::DW_TAG_class_type:
+ case dwarf::DW_TAG_subprogram:
+ return ParentDie; // Found parent decl context DIE
+ case dwarf::DW_TAG_lexical_block:
+ return GetParentDeclContextDIE(ParentDie);
+ default:
+ break;
+ }
+
+ return DWARFDie();
+}
+
+/// Get the GsymCreator string table offset for the qualified name for the
+/// DIE passed in. This function will avoid making copies of any strings in
+/// the GsymCreator when possible. We don't need to copy a string when the
+/// string comes from our .debug_str section or is an inlined string in the
+/// .debug_info. If we create a qualified name string in this function by
+/// combining multiple strings in the DWARF string table or info, we will make
+/// a copy of the string when we add it to the string table.
+static Optional<uint32_t> getQualifiedNameIndex(DWARFDie &Die,
+ uint64_t Language,
+ GsymCreator &Gsym) {
+ // If the dwarf has mangled name, use mangled name
+ if (auto LinkageName =
+ dwarf::toString(Die.findRecursively({dwarf::DW_AT_MIPS_linkage_name,
+ dwarf::DW_AT_linkage_name}),
+ nullptr))
+ return Gsym.insertString(LinkageName, /* Copy */ false);
+
+ StringRef ShortName(Die.getName(DINameKind::ShortName));
+ if (ShortName.empty())
+ return llvm::None;
+
+ // For C++ and ObjC, prepend names of all parent declaration contexts
+ if (!(Language == dwarf::DW_LANG_C_plus_plus ||
+ Language == dwarf::DW_LANG_C_plus_plus_03 ||
+ Language == dwarf::DW_LANG_C_plus_plus_11 ||
+ Language == dwarf::DW_LANG_C_plus_plus_14 ||
+ Language == dwarf::DW_LANG_ObjC_plus_plus ||
+ // This should not be needed for C, but we see C++ code marked as C
+ // in some binaries. This should hurt, so let's do it for C as well
+ Language == dwarf::DW_LANG_C))
+ return Gsym.insertString(ShortName, /* Copy */ false);
+
+ // Some GCC optimizations create functions with names ending with .isra.<num>
+ // or .part.<num> and those names are just DW_AT_name, not DW_AT_linkage_name
+ // If it looks like it could be the case, don't add any prefix
+ if (ShortName.startswith("_Z") &&
+ (ShortName.contains(".isra.") || ShortName.contains(".part.")))
+ return Gsym.insertString(ShortName, /* Copy */ false);
+
+ DWARFDie ParentDeclCtxDie = GetParentDeclContextDIE(Die);
+ if (ParentDeclCtxDie) {
+ std::string Name = ShortName.str();
+ while (ParentDeclCtxDie) {
+ StringRef ParentName(ParentDeclCtxDie.getName(DINameKind::ShortName));
+ if (!ParentName.empty()) {
+ // "lambda" names are wrapped in < >. Replace with { }
+ // to be consistent with demangled names and not to confuse with
+ // templates
+ if (ParentName.front() == '<' && ParentName.back() == '>')
+ Name = "{" + ParentName.substr(1, ParentName.size() - 2).str() + "}" +
+ "::" + Name;
+ else
+ Name = ParentName.str() + "::" + Name;
+ }
+ ParentDeclCtxDie = GetParentDeclContextDIE(ParentDeclCtxDie);
+ }
+ // Copy the name since we created a new name in a std::string.
+ return Gsym.insertString(Name, /* Copy */ true);
+ }
+ // Don't copy the name since it exists in the DWARF object file.
+ return Gsym.insertString(ShortName, /* Copy */ false);
+}
+
+static bool hasInlineInfo(DWARFDie Die, uint32_t Depth) {
+ bool CheckChildren = true;
+ switch (Die.getTag()) {
+ case dwarf::DW_TAG_subprogram:
+ // Don't look into functions within functions.
+ CheckChildren = Depth == 0;
+ break;
+ case dwarf::DW_TAG_inlined_subroutine:
+ return true;
+ default:
+ break;
+ }
+ if (!CheckChildren)
+ return false;
+ for (DWARFDie ChildDie : Die.children()) {
+ if (hasInlineInfo(ChildDie, Depth + 1))
+ return true;
+ }
+ return false;
+}
+
+static void parseInlineInfo(GsymCreator &Gsym, CUInfo &CUI, DWARFDie Die,
+ uint32_t Depth, FunctionInfo &FI,
+ InlineInfo &parent) {
+ if (!hasInlineInfo(Die, Depth))
+ return;
+
+ dwarf::Tag Tag = Die.getTag();
+ if (Tag == dwarf::DW_TAG_inlined_subroutine) {
+ // create new InlineInfo and append to parent.children
+ InlineInfo II;
+ DWARFAddressRange FuncRange =
+ DWARFAddressRange(FI.startAddress(), FI.endAddress());
+ Expected<DWARFAddressRangesVector> RangesOrError = Die.getAddressRanges();
+ if (RangesOrError) {
+ for (const DWARFAddressRange &Range : RangesOrError.get()) {
+ // Check that the inlined function is within the range of the function
+ // info, it might not be in case of split functions
+ if (FuncRange.LowPC <= Range.LowPC && Range.HighPC <= FuncRange.HighPC)
+ II.Ranges.insert(AddressRange(Range.LowPC, Range.HighPC));
+ }
+ }
+ if (II.Ranges.empty())
+ return;
+
+ if (auto NameIndex = getQualifiedNameIndex(Die, CUI.Language, Gsym))
+ II.Name = *NameIndex;
+ II.CallFile = CUI.DWARFToGSYMFileIndex(
+ Gsym, dwarf::toUnsigned(Die.find(dwarf::DW_AT_call_file), 0));
+ II.CallLine = dwarf::toUnsigned(Die.find(dwarf::DW_AT_call_line), 0);
+ // parse all children and append to parent
+ for (DWARFDie ChildDie : Die.children())
+ parseInlineInfo(Gsym, CUI, ChildDie, Depth + 1, FI, II);
+ parent.Children.emplace_back(std::move(II));
+ return;
+ }
+ if (Tag == dwarf::DW_TAG_subprogram || Tag == dwarf::DW_TAG_lexical_block) {
+ // skip this Die and just recurse down
+ for (DWARFDie ChildDie : Die.children())
+ parseInlineInfo(Gsym, CUI, ChildDie, Depth + 1, FI, parent);
+ }
+}
+
+static void convertFunctionLineTable(raw_ostream &Log, CUInfo &CUI,
+ DWARFDie Die, GsymCreator &Gsym,
+ FunctionInfo &FI) {
+ std::vector<uint32_t> RowVector;
+ const uint64_t StartAddress = FI.startAddress();
+ const uint64_t EndAddress = FI.endAddress();
+ const uint64_t RangeSize = EndAddress - StartAddress;
+ const object::SectionedAddress SecAddress{
+ StartAddress, object::SectionedAddress::UndefSection};
+
+
+ if (!CUI.LineTable->lookupAddressRange(SecAddress, RangeSize, RowVector)) {
+ // If we have a DW_TAG_subprogram but no line entries, fall back to using
+ // the DW_AT_decl_file an d DW_AT_decl_line if we have both attributes.
+ if (auto FileIdx =
+ dwarf::toUnsigned(Die.findRecursively({dwarf::DW_AT_decl_file}))) {
+ if (auto Line =
+ dwarf::toUnsigned(Die.findRecursively({dwarf::DW_AT_decl_line}))) {
+ LineEntry LE(StartAddress, CUI.DWARFToGSYMFileIndex(Gsym, *FileIdx),
+ *Line);
+ FI.OptLineTable = LineTable();
+ FI.OptLineTable->push(LE);
+ // LE.Addr = EndAddress;
+ // FI.OptLineTable->push(LE);
+ }
+ }
+ return;
+ }
+
+ FI.OptLineTable = LineTable();
+ DWARFDebugLine::Row PrevRow;
+ for (uint32_t RowIndex : RowVector) {
+ // Take file number and line/column from the row.
+ const DWARFDebugLine::Row &Row = CUI.LineTable->Rows[RowIndex];
+ const uint32_t FileIdx = CUI.DWARFToGSYMFileIndex(Gsym, Row.File);
+ uint64_t RowAddress = Row.Address.Address;
+ // Watch out for a RowAddress that is in the middle of a line table entry
+ // in the DWARF. If we pass an address in between two line table entries
+ // we will get a RowIndex for the previous valid line table row which won't
+ // be contained in our function. This is usually a bug in the DWARF due to
+ // linker problems or LTO or other DWARF re-linking so it is worth emitting
+ // an error, but not worth stopping the creation of the GSYM.
+ if (!FI.Range.contains(RowAddress)) {
+ if (RowAddress < FI.Range.Start) {
+ Log << "error: DIE has a start address whose LowPC is between the "
+ "line table Row[" << RowIndex << "] with address "
+ << HEX64(RowAddress) << " and the next one.\n";
+ Die.dump(Log, 0, DIDumpOptions::getForSingleDIE());
+ RowAddress = FI.Range.Start;
+ } else {
+ continue;
+ }
+ }
+
+ LineEntry LE(RowAddress, FileIdx, Row.Line);
+ if (RowIndex != RowVector[0] && Row.Address < PrevRow.Address) {
+ // We have seen full duplicate line tables for functions in some
+ // DWARF files. Watch for those here by checking the the last
+ // row was the function's end address (HighPC) and that the
+ // current line table entry's address is the same as the first
+ // line entry we already have in our "function_info.Lines". If
+ // so break out after printing a warning.
+ auto FirstLE = FI.OptLineTable->first();
+ if (FirstLE && *FirstLE == LE) {
+ Log << "warning: duplicate line table detected for DIE:\n";
+ Die.dump(Log, 0, DIDumpOptions::getForSingleDIE());
+ } else {
+ // Print out (ignore if os == nulls as this is expensive)
+ Log << "error: line table has addresses that do not "
+ << "monotonically increase:\n";
+ for (uint32_t RowIndex2 : RowVector) {
+ CUI.LineTable->Rows[RowIndex2].dump(Log);
+ }
+ Die.dump(Log, 0, DIDumpOptions::getForSingleDIE());
+ }
+ break;
+ }
+
+ // Skip multiple line entries for the same file and line.
+ auto LastLE = FI.OptLineTable->last();
+ if (LastLE && LastLE->File == FileIdx && LastLE->Line == Row.Line)
+ continue;
+ // Only push a row if it isn't an end sequence. End sequence markers are
+ // included for the last address in a function or the last contiguous
+ // address in a sequence.
+ if (Row.EndSequence) {
+ // End sequence means that the next line entry could have a lower address
+ // that the previous entries. So we clear the previous row so we don't
+ // trigger the line table error about address that do not monotonically
+ // increase.
+ PrevRow = DWARFDebugLine::Row();
+ } else {
+ FI.OptLineTable->push(LE);
+ PrevRow = Row;
+ }
+ }
+ // If not line table rows were added, clear the line table so we don't encode
+ // on in the GSYM file.
+ if (FI.OptLineTable->empty())
+ FI.OptLineTable = llvm::None;
+}
+
+void DwarfTransformer::handleDie(raw_ostream &OS, CUInfo &CUI, DWARFDie Die) {
+ switch (Die.getTag()) {
+ case dwarf::DW_TAG_subprogram: {
+ Expected<DWARFAddressRangesVector> RangesOrError = Die.getAddressRanges();
+ if (!RangesOrError) {
+ consumeError(RangesOrError.takeError());
+ break;
+ }
+ const DWARFAddressRangesVector &Ranges = RangesOrError.get();
+ if (Ranges.empty())
+ break;
+ auto NameIndex = getQualifiedNameIndex(Die, CUI.Language, Gsym);
+ if (!NameIndex) {
+ OS << "error: function at " << HEX64(Die.getOffset())
+ << " has no name\n ";
+ Die.dump(OS, 0, DIDumpOptions::getForSingleDIE());
+ break;
+ }
+
+ // Create a function_info for each range
+ for (const DWARFAddressRange &Range : Ranges) {
+ // The low PC must be less than the high PC. Many linkers don't remove
+ // DWARF for functions that don't get linked into the final executable.
+ // If both the high and low pc have relocations, linkers will often set
+ // the address values for both to the same value to indicate the function
+ // has been remove. Other linkers have been known to set the one or both
+ // PC values to a UINT32_MAX for 4 byte addresses and UINT64_MAX for 8
+ // byte addresses to indicate the function isn't valid. The check below
+ // tries to watch for these cases and abort if it runs into them.
+ if (Range.LowPC >= Range.HighPC || CUI.isHighestAddress(Range.LowPC))
+ break;
+
+ // Many linkers can't remove DWARF and might set the LowPC to zero. Since
+ // high PC can be an offset from the low PC in more recent DWARF versions
+ // we need to watch for a zero'ed low pc which we do using
+ // ValidTextRanges below.
+ if (!Gsym.IsValidTextAddress(Range.LowPC)) {
+ // We expect zero and -1 to be invalid addresses in DWARF depending
+ // on the linker of the DWARF. This indicates a function was stripped
+ // and the debug info wasn't able to be stripped from the DWARF. If
+ // the LowPC isn't zero or -1, then we should emit an error.
+ if (Range.LowPC != 0) {
+ // Unexpected invalid address, emit an error
+ Log << "warning: DIE has an address range whose start address is "
+ "not in any executable sections (" <<
+ *Gsym.GetValidTextRanges() << ") and will not be processed:\n";
+ Die.dump(Log, 0, DIDumpOptions::getForSingleDIE());
+ }
+ break;
+ }
+
+ FunctionInfo FI;
+ FI.setStartAddress(Range.LowPC);
+ FI.setEndAddress(Range.HighPC);
+ FI.Name = *NameIndex;
+ if (CUI.LineTable) {
+ convertFunctionLineTable(OS, CUI, Die, Gsym, FI);
+ }
+ if (hasInlineInfo(Die, 0)) {
+ FI.Inline = InlineInfo();
+ FI.Inline->Name = *NameIndex;
+ FI.Inline->Ranges.insert(FI.Range);
+ parseInlineInfo(Gsym, CUI, Die, 0, FI, *FI.Inline);
+ }
+ Gsym.addFunctionInfo(std::move(FI));
+ }
+ } break;
+ default:
+ break;
+ }
+ for (DWARFDie ChildDie : Die.children())
+ handleDie(OS, CUI, ChildDie);
+}
+
+Error DwarfTransformer::convert(uint32_t NumThreads) {
+ size_t NumBefore = Gsym.getNumFunctionInfos();
+ if (NumThreads == 1) {
+ // Parse all DWARF data from this thread, use the same string/file table
+ // for everything
+ for (const auto &CU : DICtx.compile_units()) {
+ DWARFDie Die = CU->getUnitDIE(false);
+ CUInfo CUI(DICtx, dyn_cast<DWARFCompileUnit>(CU.get()));
+ handleDie(Log, CUI, Die);
+ }
+ } else {
+ // LLVM Dwarf parser is not thread-safe and we need to parse all DWARF up
+ // front before we start accessing any DIEs since there might be
+ // cross compile unit references in the DWARF. If we don't do this we can
+ // end up crashing.
+
+ // We need to call getAbbreviations sequentially first so that getUnitDIE()
+ // only works with its local data.
+ for (const auto &CU : DICtx.compile_units())
+ CU->getAbbreviations();
+
+ // Now parse all DIEs in case we have cross compile unit references in a
+ // thread pool.
+ ThreadPool pool(hardware_concurrency(NumThreads));
+ for (const auto &CU : DICtx.compile_units())
+ pool.async([&CU]() { CU->getUnitDIE(false /*CUDieOnly*/); });
+ pool.wait();
+
+ // Now convert all DWARF to GSYM in a thread pool.
+ std::mutex LogMutex;
+ for (const auto &CU : DICtx.compile_units()) {
+ DWARFDie Die = CU->getUnitDIE(false /*CUDieOnly*/);
+ if (Die) {
+ CUInfo CUI(DICtx, dyn_cast<DWARFCompileUnit>(CU.get()));
+ pool.async([this, CUI, &LogMutex, Die]() mutable {
+ std::string ThreadLogStorage;
+ raw_string_ostream ThreadOS(ThreadLogStorage);
+ handleDie(ThreadOS, CUI, Die);
+ ThreadOS.flush();
+ if (!ThreadLogStorage.empty()) {
+ // Print ThreadLogStorage lines into an actual stream under a lock
+ std::lock_guard<std::mutex> guard(LogMutex);
+ Log << ThreadLogStorage;
+ }
+ });
+ }
+ }
+ pool.wait();
+ }
+ size_t FunctionsAddedCount = Gsym.getNumFunctionInfos() - NumBefore;
+ Log << "Loaded " << FunctionsAddedCount << " functions from DWARF.\n";
+ return Error::success();
+}
+
+llvm::Error DwarfTransformer::verify(StringRef GsymPath) {
+ Log << "Verifying GSYM file \"" << GsymPath << "\":\n";
+
+ auto Gsym = GsymReader::openFile(GsymPath);
+ if (!Gsym)
+ return Gsym.takeError();
+
+ auto NumAddrs = Gsym->getNumAddresses();
+ DILineInfoSpecifier DLIS(
+ DILineInfoSpecifier::FileLineInfoKind::AbsoluteFilePath,
+ DILineInfoSpecifier::FunctionNameKind::LinkageName);
+ std::string gsymFilename;
+ for (uint32_t I = 0; I < NumAddrs; ++I) {
+ auto FuncAddr = Gsym->getAddress(I);
+ if (!FuncAddr)
+ return createStringError(std::errc::invalid_argument,
+ "failed to extract address[%i]", I);
+
+ auto FI = Gsym->getFunctionInfo(*FuncAddr);
+ if (!FI)
+ return createStringError(std::errc::invalid_argument,
+ "failed to extract function info for address 0x%"
+ PRIu64, *FuncAddr);
+
+ for (auto Addr = *FuncAddr; Addr < *FuncAddr + FI->size(); ++Addr) {
+ const object::SectionedAddress SectAddr{
+ Addr, object::SectionedAddress::UndefSection};
+ auto LR = Gsym->lookup(Addr);
+ if (!LR)
+ return LR.takeError();
+
+ auto DwarfInlineInfos =
+ DICtx.getInliningInfoForAddress(SectAddr, DLIS);
+ uint32_t NumDwarfInlineInfos = DwarfInlineInfos.getNumberOfFrames();
+ if (NumDwarfInlineInfos == 0) {
+ DwarfInlineInfos.addFrame(
+ DICtx.getLineInfoForAddress(SectAddr, DLIS));
+ }
+
+ // Check for 1 entry that has no file and line info
+ if (NumDwarfInlineInfos == 1 &&
+ DwarfInlineInfos.getFrame(0).FileName == "<invalid>") {
+ DwarfInlineInfos = DIInliningInfo();
+ NumDwarfInlineInfos = 0;
+ }
+ if (NumDwarfInlineInfos > 0 &&
+ NumDwarfInlineInfos != LR->Locations.size()) {
+ Log << "error: address " << HEX64(Addr) << " has "
+ << NumDwarfInlineInfos << " DWARF inline frames and GSYM has "
+ << LR->Locations.size() << "\n";
+ Log << " " << NumDwarfInlineInfos << " DWARF frames:\n";
+ for (size_t Idx = 0; Idx < NumDwarfInlineInfos; ++Idx) {
+ const auto dii = DwarfInlineInfos.getFrame(Idx);
+ Log << " [" << Idx << "]: " << dii.FunctionName << " @ "
+ << dii.FileName << ':' << dii.Line << '\n';
+ }
+ Log << " " << LR->Locations.size() << " GSYM frames:\n";
+ for (size_t Idx = 0, count = LR->Locations.size();
+ Idx < count; ++Idx) {
+ const auto &gii = LR->Locations[Idx];
+ Log << " [" << Idx << "]: " << gii.Name << " @ " << gii.Dir
+ << '/' << gii.Base << ':' << gii.Line << '\n';
+ }
+ DwarfInlineInfos = DICtx.getInliningInfoForAddress(SectAddr, DLIS);
+ Gsym->dump(Log, *FI);
+ continue;
+ }
+
+ for (size_t Idx = 0, count = LR->Locations.size(); Idx < count;
+ ++Idx) {
+ const auto &gii = LR->Locations[Idx];
+ if (Idx < NumDwarfInlineInfos) {
+ const auto dii = DwarfInlineInfos.getFrame(Idx);
+ gsymFilename = LR->getSourceFile(Idx);
+ // Verify function name
+ if (dii.FunctionName.find(gii.Name.str()) != 0)
+ Log << "error: address " << HEX64(Addr) << " DWARF function \""
+ << dii.FunctionName.c_str()
+ << "\" doesn't match GSYM function \"" << gii.Name << "\"\n";
+ // Verify source file path
+ if (dii.FileName != gsymFilename)
+ Log << "error: address " << HEX64(Addr) << " DWARF path \""
+ << dii.FileName.c_str() << "\" doesn't match GSYM path \""
+ << gsymFilename.c_str() << "\"\n";
+ // Verify source file line
+ if (dii.Line != gii.Line)
+ Log << "error: address " << HEX64(Addr) << " DWARF line "
+ << dii.Line << " != GSYM line " << gii.Line << "\n";
+ }
+ }
+ }
+ }
+ return Error::success();
+}
diff --git a/contrib/libs/llvm12/lib/DebugInfo/GSYM/FileWriter.cpp b/contrib/libs/llvm12/lib/DebugInfo/GSYM/FileWriter.cpp
new file mode 100644
index 00000000000..4b30dcb60a7
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DebugInfo/GSYM/FileWriter.cpp
@@ -0,0 +1,78 @@
+//===- FileWriter.cpp -------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/GSYM/FileWriter.h"
+#include "llvm/Support/LEB128.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+using namespace llvm;
+using namespace gsym;
+
+FileWriter::~FileWriter() { OS.flush(); }
+
+void FileWriter::writeSLEB(int64_t S) {
+ uint8_t Bytes[32];
+ auto Length = encodeSLEB128(S, Bytes);
+ assert(Length < sizeof(Bytes));
+ OS.write(reinterpret_cast<const char *>(Bytes), Length);
+}
+
+void FileWriter::writeULEB(uint64_t U) {
+ uint8_t Bytes[32];
+ auto Length = encodeULEB128(U, Bytes);
+ assert(Length < sizeof(Bytes));
+ OS.write(reinterpret_cast<const char *>(Bytes), Length);
+}
+
+void FileWriter::writeU8(uint8_t U) {
+ OS.write(reinterpret_cast<const char *>(&U), sizeof(U));
+}
+
+void FileWriter::writeU16(uint16_t U) {
+ const uint16_t Swapped = support::endian::byte_swap(U, ByteOrder);
+ OS.write(reinterpret_cast<const char *>(&Swapped), sizeof(Swapped));
+}
+
+void FileWriter::writeU32(uint32_t U) {
+ const uint32_t Swapped = support::endian::byte_swap(U, ByteOrder);
+ OS.write(reinterpret_cast<const char *>(&Swapped), sizeof(Swapped));
+}
+
+void FileWriter::writeU64(uint64_t U) {
+ const uint64_t Swapped = support::endian::byte_swap(U, ByteOrder);
+ OS.write(reinterpret_cast<const char *>(&Swapped), sizeof(Swapped));
+}
+
+void FileWriter::fixup32(uint32_t U, uint64_t Offset) {
+ const uint32_t Swapped = support::endian::byte_swap(U, ByteOrder);
+ OS.pwrite(reinterpret_cast<const char *>(&Swapped), sizeof(Swapped),
+ Offset);
+}
+
+void FileWriter::writeData(llvm::ArrayRef<uint8_t> Data) {
+ OS.write(reinterpret_cast<const char *>(Data.data()), Data.size());
+}
+
+void FileWriter::writeNullTerminated(llvm::StringRef Str) {
+ OS << Str << '\0';
+}
+
+uint64_t FileWriter::tell() {
+ return OS.tell();
+}
+
+void FileWriter::alignTo(size_t Align) {
+ off_t Offset = OS.tell();
+ off_t AlignedOffset = (Offset + Align - 1) / Align * Align;
+ if (AlignedOffset == Offset)
+ return;
+ off_t PadCount = AlignedOffset - Offset;
+ OS.write_zeros(PadCount);
+}
diff --git a/contrib/libs/llvm12/lib/DebugInfo/GSYM/FunctionInfo.cpp b/contrib/libs/llvm12/lib/DebugInfo/GSYM/FunctionInfo.cpp
new file mode 100644
index 00000000000..cef1b9498c5
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DebugInfo/GSYM/FunctionInfo.cpp
@@ -0,0 +1,254 @@
+//===- FunctionInfo.cpp ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/GSYM/FunctionInfo.h"
+#include "llvm/DebugInfo/GSYM/FileWriter.h"
+#include "llvm/DebugInfo/GSYM/GsymReader.h"
+#include "llvm/DebugInfo/GSYM/LineTable.h"
+#include "llvm/DebugInfo/GSYM/InlineInfo.h"
+#include "llvm/Support/DataExtractor.h"
+
+using namespace llvm;
+using namespace gsym;
+
+/// FunctionInfo information type that is used to encode the optional data
+/// that is associated with a FunctionInfo object.
+enum InfoType : uint32_t {
+ EndOfList = 0u,
+ LineTableInfo = 1u,
+ InlineInfo = 2u
+};
+
+raw_ostream &llvm::gsym::operator<<(raw_ostream &OS, const FunctionInfo &FI) {
+ OS << FI.Range << ": " << "Name=" << HEX32(FI.Name) << '\n';
+ if (FI.OptLineTable)
+ OS << FI.OptLineTable << '\n';
+ if (FI.Inline)
+ OS << FI.Inline << '\n';
+ return OS;
+}
+
+llvm::Expected<FunctionInfo> FunctionInfo::decode(DataExtractor &Data,
+ uint64_t BaseAddr) {
+ FunctionInfo FI;
+ FI.Range.Start = BaseAddr;
+ uint64_t Offset = 0;
+ if (!Data.isValidOffsetForDataOfSize(Offset, 4))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing FunctionInfo Size", Offset);
+ FI.Range.End = FI.Range.Start + Data.getU32(&Offset);
+ if (!Data.isValidOffsetForDataOfSize(Offset, 4))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing FunctionInfo Name", Offset);
+ FI.Name = Data.getU32(&Offset);
+ if (FI.Name == 0)
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": invalid FunctionInfo Name value 0x%8.8x",
+ Offset - 4, FI.Name);
+ bool Done = false;
+ while (!Done) {
+ if (!Data.isValidOffsetForDataOfSize(Offset, 4))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing FunctionInfo InfoType value", Offset);
+ const uint32_t IT = Data.getU32(&Offset);
+ if (!Data.isValidOffsetForDataOfSize(Offset, 4))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing FunctionInfo InfoType length", Offset);
+ const uint32_t InfoLength = Data.getU32(&Offset);
+ if (!Data.isValidOffsetForDataOfSize(Offset, InfoLength))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing FunctionInfo data for InfoType %u",
+ Offset, IT);
+ DataExtractor InfoData(Data.getData().substr(Offset, InfoLength),
+ Data.isLittleEndian(),
+ Data.getAddressSize());
+ switch (IT) {
+ case InfoType::EndOfList:
+ Done = true;
+ break;
+
+ case InfoType::LineTableInfo:
+ if (Expected<LineTable> LT = LineTable::decode(InfoData, BaseAddr))
+ FI.OptLineTable = std::move(LT.get());
+ else
+ return LT.takeError();
+ break;
+
+ case InfoType::InlineInfo:
+ if (Expected<InlineInfo> II = InlineInfo::decode(InfoData, BaseAddr))
+ FI.Inline = std::move(II.get());
+ else
+ return II.takeError();
+ break;
+
+ default:
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": unsupported InfoType %u",
+ Offset-8, IT);
+ }
+ Offset += InfoLength;
+ }
+ return std::move(FI);
+}
+
+llvm::Expected<uint64_t> FunctionInfo::encode(FileWriter &O) const {
+ if (!isValid())
+ return createStringError(std::errc::invalid_argument,
+ "attempted to encode invalid FunctionInfo object");
+ // Align FunctionInfo data to a 4 byte alignment.
+ O.alignTo(4);
+ const uint64_t FuncInfoOffset = O.tell();
+ // Write the size in bytes of this function as a uint32_t. This can be zero
+ // if we just have a symbol from a symbol table and that symbol has no size.
+ O.writeU32(size());
+ // Write the name of this function as a uint32_t string table offset.
+ O.writeU32(Name);
+
+ if (OptLineTable.hasValue()) {
+ O.writeU32(InfoType::LineTableInfo);
+ // Write a uint32_t length as zero for now, we will fix this up after
+ // writing the LineTable out with the number of bytes that were written.
+ O.writeU32(0);
+ const auto StartOffset = O.tell();
+ llvm::Error err = OptLineTable->encode(O, Range.Start);
+ if (err)
+ return std::move(err);
+ const auto Length = O.tell() - StartOffset;
+ if (Length > UINT32_MAX)
+ return createStringError(std::errc::invalid_argument,
+ "LineTable length is greater than UINT32_MAX");
+ // Fixup the size of the LineTable data with the correct size.
+ O.fixup32(static_cast<uint32_t>(Length), StartOffset - 4);
+ }
+
+ // Write out the inline function info if we have any and if it is valid.
+ if (Inline.hasValue()) {
+ O.writeU32(InfoType::InlineInfo);
+ // Write a uint32_t length as zero for now, we will fix this up after
+ // writing the LineTable out with the number of bytes that were written.
+ O.writeU32(0);
+ const auto StartOffset = O.tell();
+ llvm::Error err = Inline->encode(O, Range.Start);
+ if (err)
+ return std::move(err);
+ const auto Length = O.tell() - StartOffset;
+ if (Length > UINT32_MAX)
+ return createStringError(std::errc::invalid_argument,
+ "InlineInfo length is greater than UINT32_MAX");
+ // Fixup the size of the InlineInfo data with the correct size.
+ O.fixup32(static_cast<uint32_t>(Length), StartOffset - 4);
+ }
+
+ // Terminate the data chunks with and end of list with zero size
+ O.writeU32(InfoType::EndOfList);
+ O.writeU32(0);
+ return FuncInfoOffset;
+}
+
+
+llvm::Expected<LookupResult> FunctionInfo::lookup(DataExtractor &Data,
+ const GsymReader &GR,
+ uint64_t FuncAddr,
+ uint64_t Addr) {
+ LookupResult LR;
+ LR.LookupAddr = Addr;
+ LR.FuncRange.Start = FuncAddr;
+ uint64_t Offset = 0;
+ LR.FuncRange.End = FuncAddr + Data.getU32(&Offset);
+ uint32_t NameOffset = Data.getU32(&Offset);
+ // The "lookup" functions doesn't report errors as accurately as the "decode"
+ // function as it is meant to be fast. For more accurage errors we could call
+ // "decode".
+ if (!Data.isValidOffset(Offset))
+ return createStringError(std::errc::io_error,
+ "FunctionInfo data is truncated");
+ // This function will be called with the result of a binary search of the
+ // address table, we must still make sure the address does not fall into a
+ // gap between functions are after the last function.
+ if (LR.FuncRange.size() > 0 && !LR.FuncRange.contains(Addr))
+ return createStringError(std::errc::io_error,
+ "address 0x%" PRIx64 " is not in GSYM", Addr);
+
+ if (NameOffset == 0)
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": invalid FunctionInfo Name value 0x00000000",
+ Offset - 4);
+ LR.FuncName = GR.getString(NameOffset);
+ bool Done = false;
+ Optional<LineEntry> LineEntry;
+ Optional<DataExtractor> InlineInfoData;
+ while (!Done) {
+ if (!Data.isValidOffsetForDataOfSize(Offset, 8))
+ return createStringError(std::errc::io_error,
+ "FunctionInfo data is truncated");
+ const uint32_t IT = Data.getU32(&Offset);
+ const uint32_t InfoLength = Data.getU32(&Offset);
+ const StringRef InfoBytes = Data.getData().substr(Offset, InfoLength);
+ if (InfoLength != InfoBytes.size())
+ return createStringError(std::errc::io_error,
+ "FunctionInfo data is truncated");
+ DataExtractor InfoData(InfoBytes, Data.isLittleEndian(),
+ Data.getAddressSize());
+ switch (IT) {
+ case InfoType::EndOfList:
+ Done = true;
+ break;
+
+ case InfoType::LineTableInfo:
+ if (auto ExpectedLE = LineTable::lookup(InfoData, FuncAddr, Addr))
+ LineEntry = ExpectedLE.get();
+ else
+ return ExpectedLE.takeError();
+ break;
+
+ case InfoType::InlineInfo:
+ // We will parse the inline info after our line table, but only if
+ // we have a line entry.
+ InlineInfoData = InfoData;
+ break;
+
+ default:
+ break;
+ }
+ Offset += InfoLength;
+ }
+
+ if (!LineEntry) {
+ // We don't have a valid line entry for our address, fill in our source
+ // location as best we can and return.
+ SourceLocation SrcLoc;
+ SrcLoc.Name = LR.FuncName;
+ SrcLoc.Offset = Addr - FuncAddr;
+ LR.Locations.push_back(SrcLoc);
+ return LR;
+ }
+
+ Optional<FileEntry> LineEntryFile = GR.getFile(LineEntry->File);
+ if (!LineEntryFile)
+ return createStringError(std::errc::invalid_argument,
+ "failed to extract file[%" PRIu32 "]",
+ LineEntry->File);
+
+ SourceLocation SrcLoc;
+ SrcLoc.Name = LR.FuncName;
+ SrcLoc.Offset = Addr - FuncAddr;
+ SrcLoc.Dir = GR.getString(LineEntryFile->Dir);
+ SrcLoc.Base = GR.getString(LineEntryFile->Base);
+ SrcLoc.Line = LineEntry->Line;
+ LR.Locations.push_back(SrcLoc);
+ // If we don't have inline information, we are done.
+ if (!InlineInfoData)
+ return LR;
+ // We have inline information. Try to augment the lookup result with this
+ // data.
+ llvm::Error Err = InlineInfo::lookup(GR, *InlineInfoData, FuncAddr, Addr,
+ LR.Locations);
+ if (Err)
+ return std::move(Err);
+ return LR;
+}
diff --git a/contrib/libs/llvm12/lib/DebugInfo/GSYM/GsymCreator.cpp b/contrib/libs/llvm12/lib/DebugInfo/GSYM/GsymCreator.cpp
new file mode 100644
index 00000000000..2001478e804
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DebugInfo/GSYM/GsymCreator.cpp
@@ -0,0 +1,320 @@
+//===- GsymCreator.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/GSYM/GsymCreator.h"
+#include "llvm/DebugInfo/GSYM/FileWriter.h"
+#include "llvm/DebugInfo/GSYM/Header.h"
+#include "llvm/DebugInfo/GSYM/LineTable.h"
+#include "llvm/MC/StringTableBuilder.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <algorithm>
+#include <cassert>
+#include <functional>
+#include <vector>
+
+using namespace llvm;
+using namespace gsym;
+
+
+GsymCreator::GsymCreator() : StrTab(StringTableBuilder::ELF) {
+ insertFile(StringRef());
+}
+
+uint32_t GsymCreator::insertFile(StringRef Path,
+ llvm::sys::path::Style Style) {
+ llvm::StringRef directory = llvm::sys::path::parent_path(Path, Style);
+ llvm::StringRef filename = llvm::sys::path::filename(Path, Style);
+ // We must insert the strings first, then call the FileEntry constructor.
+ // If we inline the insertString() function call into the constructor, the
+ // call order is undefined due to parameter lists not having any ordering
+ // requirements.
+ const uint32_t Dir = insertString(directory);
+ const uint32_t Base = insertString(filename);
+ FileEntry FE(Dir, Base);
+
+ std::lock_guard<std::recursive_mutex> Guard(Mutex);
+ const auto NextIndex = Files.size();
+ // Find FE in hash map and insert if not present.
+ auto R = FileEntryToIndex.insert(std::make_pair(FE, NextIndex));
+ if (R.second)
+ Files.emplace_back(FE);
+ return R.first->second;
+}
+
+llvm::Error GsymCreator::save(StringRef Path,
+ llvm::support::endianness ByteOrder) const {
+ std::error_code EC;
+ raw_fd_ostream OutStrm(Path, EC);
+ if (EC)
+ return llvm::errorCodeToError(EC);
+ FileWriter O(OutStrm, ByteOrder);
+ return encode(O);
+}
+
+llvm::Error GsymCreator::encode(FileWriter &O) const {
+ std::lock_guard<std::recursive_mutex> Guard(Mutex);
+ if (Funcs.empty())
+ return createStringError(std::errc::invalid_argument,
+ "no functions to encode");
+ if (!Finalized)
+ return createStringError(std::errc::invalid_argument,
+ "GsymCreator wasn't finalized prior to encoding");
+
+ if (Funcs.size() > UINT32_MAX)
+ return createStringError(std::errc::invalid_argument,
+ "too many FunctionInfos");
+
+ const uint64_t MinAddr = BaseAddress ? *BaseAddress : Funcs.front().startAddress();
+ const uint64_t MaxAddr = Funcs.back().startAddress();
+ const uint64_t AddrDelta = MaxAddr - MinAddr;
+ Header Hdr;
+ Hdr.Magic = GSYM_MAGIC;
+ Hdr.Version = GSYM_VERSION;
+ Hdr.AddrOffSize = 0;
+ Hdr.UUIDSize = static_cast<uint8_t>(UUID.size());
+ Hdr.BaseAddress = MinAddr;
+ Hdr.NumAddresses = static_cast<uint32_t>(Funcs.size());
+ Hdr.StrtabOffset = 0; // We will fix this up later.
+ Hdr.StrtabSize = 0; // We will fix this up later.
+ memset(Hdr.UUID, 0, sizeof(Hdr.UUID));
+ if (UUID.size() > sizeof(Hdr.UUID))
+ return createStringError(std::errc::invalid_argument,
+ "invalid UUID size %u", (uint32_t)UUID.size());
+ // Set the address offset size correctly in the GSYM header.
+ if (AddrDelta <= UINT8_MAX)
+ Hdr.AddrOffSize = 1;
+ else if (AddrDelta <= UINT16_MAX)
+ Hdr.AddrOffSize = 2;
+ else if (AddrDelta <= UINT32_MAX)
+ Hdr.AddrOffSize = 4;
+ else
+ Hdr.AddrOffSize = 8;
+ // Copy the UUID value if we have one.
+ if (UUID.size() > 0)
+ memcpy(Hdr.UUID, UUID.data(), UUID.size());
+ // Write out the header.
+ llvm::Error Err = Hdr.encode(O);
+ if (Err)
+ return Err;
+
+ // Write out the address offsets.
+ O.alignTo(Hdr.AddrOffSize);
+ for (const auto &FuncInfo : Funcs) {
+ uint64_t AddrOffset = FuncInfo.startAddress() - Hdr.BaseAddress;
+ switch(Hdr.AddrOffSize) {
+ case 1: O.writeU8(static_cast<uint8_t>(AddrOffset)); break;
+ case 2: O.writeU16(static_cast<uint16_t>(AddrOffset)); break;
+ case 4: O.writeU32(static_cast<uint32_t>(AddrOffset)); break;
+ case 8: O.writeU64(AddrOffset); break;
+ }
+ }
+
+ // Write out all zeros for the AddrInfoOffsets.
+ O.alignTo(4);
+ const off_t AddrInfoOffsetsOffset = O.tell();
+ for (size_t i = 0, n = Funcs.size(); i < n; ++i)
+ O.writeU32(0);
+
+ // Write out the file table
+ O.alignTo(4);
+ assert(!Files.empty());
+ assert(Files[0].Dir == 0);
+ assert(Files[0].Base == 0);
+ size_t NumFiles = Files.size();
+ if (NumFiles > UINT32_MAX)
+ return createStringError(std::errc::invalid_argument,
+ "too many files");
+ O.writeU32(static_cast<uint32_t>(NumFiles));
+ for (auto File: Files) {
+ O.writeU32(File.Dir);
+ O.writeU32(File.Base);
+ }
+
+ // Write out the sting table.
+ const off_t StrtabOffset = O.tell();
+ StrTab.write(O.get_stream());
+ const off_t StrtabSize = O.tell() - StrtabOffset;
+ std::vector<uint32_t> AddrInfoOffsets;
+
+ // Write out the address infos for each function info.
+ for (const auto &FuncInfo : Funcs) {
+ if (Expected<uint64_t> OffsetOrErr = FuncInfo.encode(O))
+ AddrInfoOffsets.push_back(OffsetOrErr.get());
+ else
+ return OffsetOrErr.takeError();
+ }
+ // Fixup the string table offset and size in the header
+ O.fixup32((uint32_t)StrtabOffset, offsetof(Header, StrtabOffset));
+ O.fixup32((uint32_t)StrtabSize, offsetof(Header, StrtabSize));
+
+ // Fixup all address info offsets
+ uint64_t Offset = 0;
+ for (auto AddrInfoOffset: AddrInfoOffsets) {
+ O.fixup32(AddrInfoOffset, AddrInfoOffsetsOffset + Offset);
+ Offset += 4;
+ }
+ return ErrorSuccess();
+}
+
+llvm::Error GsymCreator::finalize(llvm::raw_ostream &OS) {
+ std::lock_guard<std::recursive_mutex> Guard(Mutex);
+ if (Finalized)
+ return createStringError(std::errc::invalid_argument,
+ "already finalized");
+ Finalized = true;
+
+ // Sort function infos so we can emit sorted functions.
+ llvm::sort(Funcs);
+
+ // Don't let the string table indexes change by finalizing in order.
+ StrTab.finalizeInOrder();
+
+ // Remove duplicates function infos that have both entries from debug info
+ // (DWARF or Breakpad) and entries from the SymbolTable.
+ //
+ // Also handle overlapping function. Usually there shouldn't be any, but they
+ // can and do happen in some rare cases.
+ //
+ // (a) (b) (c)
+ // ^ ^ ^ ^
+ // |X |Y |X ^ |X
+ // | | | |Y | ^
+ // | | | v v |Y
+ // v v v v
+ //
+ // In (a) and (b), Y is ignored and X will be reported for the full range.
+ // In (c), both functions will be included in the result and lookups for an
+ // address in the intersection will return Y because of binary search.
+ //
+ // Note that in case of (b), we cannot include Y in the result because then
+ // we wouldn't find any function for range (end of Y, end of X)
+ // with binary search
+ auto NumBefore = Funcs.size();
+ auto Curr = Funcs.begin();
+ auto Prev = Funcs.end();
+ while (Curr != Funcs.end()) {
+ // Can't check for overlaps or same address ranges if we don't have a
+ // previous entry
+ if (Prev != Funcs.end()) {
+ if (Prev->Range.intersects(Curr->Range)) {
+ // Overlapping address ranges.
+ if (Prev->Range == Curr->Range) {
+ // Same address range. Check if one is from debug info and the other
+ // is from a symbol table. If so, then keep the one with debug info.
+ // Our sorting guarantees that entries with matching address ranges
+ // that have debug info are last in the sort.
+ if (*Prev == *Curr) {
+ // FunctionInfo entries match exactly (range, lines, inlines)
+ OS << "warning: duplicate function info entries for range: "
+ << Curr->Range << '\n';
+ Curr = Funcs.erase(Prev);
+ } else {
+ if (!Prev->hasRichInfo() && Curr->hasRichInfo()) {
+ // Same address range, one with no debug info (symbol) and the
+ // next with debug info. Keep the latter.
+ Curr = Funcs.erase(Prev);
+ } else {
+ OS << "warning: same address range contains different debug "
+ << "info. Removing:\n"
+ << *Prev << "\nIn favor of this one:\n"
+ << *Curr << "\n";
+ Curr = Funcs.erase(Prev);
+ }
+ }
+ } else {
+ // print warnings about overlaps
+ OS << "warning: function ranges overlap:\n"
+ << *Prev << "\n"
+ << *Curr << "\n";
+ }
+ } else if (Prev->Range.size() == 0 &&
+ Curr->Range.contains(Prev->Range.Start)) {
+ OS << "warning: removing symbol:\n"
+ << *Prev << "\nKeeping:\n"
+ << *Curr << "\n";
+ Curr = Funcs.erase(Prev);
+ }
+ }
+ if (Curr == Funcs.end())
+ break;
+ Prev = Curr++;
+ }
+
+ // If our last function info entry doesn't have a size and if we have valid
+ // text ranges, we should set the size of the last entry since any search for
+ // a high address might match our last entry. By fixing up this size, we can
+ // help ensure we don't cause lookups to always return the last symbol that
+ // has no size when doing lookups.
+ if (!Funcs.empty() && Funcs.back().Range.size() == 0 && ValidTextRanges) {
+ if (auto Range = ValidTextRanges->getRangeThatContains(
+ Funcs.back().Range.Start)) {
+ Funcs.back().Range.End = Range->End;
+ }
+ }
+ OS << "Pruned " << NumBefore - Funcs.size() << " functions, ended with "
+ << Funcs.size() << " total\n";
+ return Error::success();
+}
+
+uint32_t GsymCreator::insertString(StringRef S, bool Copy) {
+ if (S.empty())
+ return 0;
+ std::lock_guard<std::recursive_mutex> Guard(Mutex);
+ if (Copy) {
+ // We need to provide backing storage for the string if requested
+ // since StringTableBuilder stores references to strings. Any string
+ // that comes from a section in an object file doesn't need to be
+ // copied, but any string created by code will need to be copied.
+ // This allows GsymCreator to be really fast when parsing DWARF and
+ // other object files as most strings don't need to be copied.
+ CachedHashStringRef CHStr(S);
+ if (!StrTab.contains(CHStr))
+ S = StringStorage.insert(S).first->getKey();
+ }
+ return StrTab.add(S);
+}
+
+void GsymCreator::addFunctionInfo(FunctionInfo &&FI) {
+ std::lock_guard<std::recursive_mutex> Guard(Mutex);
+ Ranges.insert(FI.Range);
+ Funcs.emplace_back(FI);
+}
+
+void GsymCreator::forEachFunctionInfo(
+ std::function<bool(FunctionInfo &)> const &Callback) {
+ std::lock_guard<std::recursive_mutex> Guard(Mutex);
+ for (auto &FI : Funcs) {
+ if (!Callback(FI))
+ break;
+ }
+}
+
+void GsymCreator::forEachFunctionInfo(
+ std::function<bool(const FunctionInfo &)> const &Callback) const {
+ std::lock_guard<std::recursive_mutex> Guard(Mutex);
+ for (const auto &FI : Funcs) {
+ if (!Callback(FI))
+ break;
+ }
+}
+
+size_t GsymCreator::getNumFunctionInfos() const{
+ std::lock_guard<std::recursive_mutex> Guard(Mutex);
+ return Funcs.size();
+}
+
+bool GsymCreator::IsValidTextAddress(uint64_t Addr) const {
+ if (ValidTextRanges)
+ return ValidTextRanges->contains(Addr);
+ return true; // No valid text ranges has been set, so accept all ranges.
+}
+
+bool GsymCreator::hasFunctionInfoForAddress(uint64_t Addr) const {
+ std::lock_guard<std::recursive_mutex> Guard(Mutex);
+ return Ranges.contains(Addr);
+}
diff --git a/contrib/libs/llvm12/lib/DebugInfo/GSYM/GsymReader.cpp b/contrib/libs/llvm12/lib/DebugInfo/GSYM/GsymReader.cpp
new file mode 100644
index 00000000000..2ad18bf63d5
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DebugInfo/GSYM/GsymReader.cpp
@@ -0,0 +1,406 @@
+//===- GsymReader.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/GSYM/GsymReader.h"
+
+#include <assert.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "llvm/DebugInfo/GSYM/GsymCreator.h"
+#include "llvm/DebugInfo/GSYM/InlineInfo.h"
+#include "llvm/DebugInfo/GSYM/LineTable.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace llvm;
+using namespace gsym;
+
+GsymReader::GsymReader(std::unique_ptr<MemoryBuffer> Buffer) :
+ MemBuffer(std::move(Buffer)),
+ Endian(support::endian::system_endianness()) {}
+
+ GsymReader::GsymReader(GsymReader &&RHS) = default;
+
+GsymReader::~GsymReader() = default;
+
+llvm::Expected<GsymReader> GsymReader::openFile(StringRef Filename) {
+ // Open the input file and return an appropriate error if needed.
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BuffOrErr =
+ MemoryBuffer::getFileOrSTDIN(Filename);
+ auto Err = BuffOrErr.getError();
+ if (Err)
+ return llvm::errorCodeToError(Err);
+ return create(BuffOrErr.get());
+}
+
+llvm::Expected<GsymReader> GsymReader::copyBuffer(StringRef Bytes) {
+ auto MemBuffer = MemoryBuffer::getMemBufferCopy(Bytes, "GSYM bytes");
+ return create(MemBuffer);
+}
+
+llvm::Expected<llvm::gsym::GsymReader>
+GsymReader::create(std::unique_ptr<MemoryBuffer> &MemBuffer) {
+ if (!MemBuffer.get())
+ return createStringError(std::errc::invalid_argument,
+ "invalid memory buffer");
+ GsymReader GR(std::move(MemBuffer));
+ llvm::Error Err = GR.parse();
+ if (Err)
+ return std::move(Err);
+ return std::move(GR);
+}
+
+llvm::Error
+GsymReader::parse() {
+ BinaryStreamReader FileData(MemBuffer->getBuffer(),
+ support::endian::system_endianness());
+ // Check for the magic bytes. This file format is designed to be mmap'ed
+ // into a process and accessed as read only. This is done for performance
+ // and efficiency for symbolicating and parsing GSYM data.
+ if (FileData.readObject(Hdr))
+ return createStringError(std::errc::invalid_argument,
+ "not enough data for a GSYM header");
+
+ const auto HostByteOrder = support::endian::system_endianness();
+ switch (Hdr->Magic) {
+ case GSYM_MAGIC:
+ Endian = HostByteOrder;
+ break;
+ case GSYM_CIGAM:
+ // This is a GSYM file, but not native endianness.
+ Endian = sys::IsBigEndianHost ? support::little : support::big;
+ Swap.reset(new SwappedData);
+ break;
+ default:
+ return createStringError(std::errc::invalid_argument,
+ "not a GSYM file");
+ }
+
+ bool DataIsLittleEndian = HostByteOrder != support::little;
+ // Read a correctly byte swapped header if we need to.
+ if (Swap) {
+ DataExtractor Data(MemBuffer->getBuffer(), DataIsLittleEndian, 4);
+ if (auto ExpectedHdr = Header::decode(Data))
+ Swap->Hdr = ExpectedHdr.get();
+ else
+ return ExpectedHdr.takeError();
+ Hdr = &Swap->Hdr;
+ }
+
+ // Detect errors in the header and report any that are found. If we make it
+ // past this without errors, we know we have a good magic value, a supported
+ // version number, verified address offset size and a valid UUID size.
+ if (Error Err = Hdr->checkForError())
+ return Err;
+
+ if (!Swap) {
+ // This is the native endianness case that is most common and optimized for
+ // efficient lookups. Here we just grab pointers to the native data and
+ // use ArrayRef objects to allow efficient read only access.
+
+ // Read the address offsets.
+ if (FileData.padToAlignment(Hdr->AddrOffSize) ||
+ FileData.readArray(AddrOffsets,
+ Hdr->NumAddresses * Hdr->AddrOffSize))
+ return createStringError(std::errc::invalid_argument,
+ "failed to read address table");
+
+ // Read the address info offsets.
+ if (FileData.padToAlignment(4) ||
+ FileData.readArray(AddrInfoOffsets, Hdr->NumAddresses))
+ return createStringError(std::errc::invalid_argument,
+ "failed to read address info offsets table");
+
+ // Read the file table.
+ uint32_t NumFiles = 0;
+ if (FileData.readInteger(NumFiles) || FileData.readArray(Files, NumFiles))
+ return createStringError(std::errc::invalid_argument,
+ "failed to read file table");
+
+ // Get the string table.
+ FileData.setOffset(Hdr->StrtabOffset);
+ if (FileData.readFixedString(StrTab.Data, Hdr->StrtabSize))
+ return createStringError(std::errc::invalid_argument,
+ "failed to read string table");
+} else {
+ // This is the non native endianness case that is not common and not
+ // optimized for lookups. Here we decode the important tables into local
+ // storage and then set the ArrayRef objects to point to these swapped
+ // copies of the read only data so lookups can be as efficient as possible.
+ DataExtractor Data(MemBuffer->getBuffer(), DataIsLittleEndian, 4);
+
+ // Read the address offsets.
+ uint64_t Offset = alignTo(sizeof(Header), Hdr->AddrOffSize);
+ Swap->AddrOffsets.resize(Hdr->NumAddresses * Hdr->AddrOffSize);
+ switch (Hdr->AddrOffSize) {
+ case 1:
+ if (!Data.getU8(&Offset, Swap->AddrOffsets.data(), Hdr->NumAddresses))
+ return createStringError(std::errc::invalid_argument,
+ "failed to read address table");
+ break;
+ case 2:
+ if (!Data.getU16(&Offset,
+ reinterpret_cast<uint16_t *>(Swap->AddrOffsets.data()),
+ Hdr->NumAddresses))
+ return createStringError(std::errc::invalid_argument,
+ "failed to read address table");
+ break;
+ case 4:
+ if (!Data.getU32(&Offset,
+ reinterpret_cast<uint32_t *>(Swap->AddrOffsets.data()),
+ Hdr->NumAddresses))
+ return createStringError(std::errc::invalid_argument,
+ "failed to read address table");
+ break;
+ case 8:
+ if (!Data.getU64(&Offset,
+ reinterpret_cast<uint64_t *>(Swap->AddrOffsets.data()),
+ Hdr->NumAddresses))
+ return createStringError(std::errc::invalid_argument,
+ "failed to read address table");
+ }
+ AddrOffsets = ArrayRef<uint8_t>(Swap->AddrOffsets);
+
+ // Read the address info offsets.
+ Offset = alignTo(Offset, 4);
+ Swap->AddrInfoOffsets.resize(Hdr->NumAddresses);
+ if (Data.getU32(&Offset, Swap->AddrInfoOffsets.data(), Hdr->NumAddresses))
+ AddrInfoOffsets = ArrayRef<uint32_t>(Swap->AddrInfoOffsets);
+ else
+ return createStringError(std::errc::invalid_argument,
+ "failed to read address table");
+ // Read the file table.
+ const uint32_t NumFiles = Data.getU32(&Offset);
+ if (NumFiles > 0) {
+ Swap->Files.resize(NumFiles);
+ if (Data.getU32(&Offset, &Swap->Files[0].Dir, NumFiles*2))
+ Files = ArrayRef<FileEntry>(Swap->Files);
+ else
+ return createStringError(std::errc::invalid_argument,
+ "failed to read file table");
+ }
+ // Get the string table.
+ StrTab.Data = MemBuffer->getBuffer().substr(Hdr->StrtabOffset,
+ Hdr->StrtabSize);
+ if (StrTab.Data.empty())
+ return createStringError(std::errc::invalid_argument,
+ "failed to read string table");
+ }
+ return Error::success();
+
+}
+
+const Header &GsymReader::getHeader() const {
+ // The only way to get a GsymReader is from GsymReader::openFile(...) or
+ // GsymReader::copyBuffer() and the header must be valid and initialized to
+ // a valid pointer value, so the assert below should not trigger.
+ assert(Hdr);
+ return *Hdr;
+}
+
+Optional<uint64_t> GsymReader::getAddress(size_t Index) const {
+ switch (Hdr->AddrOffSize) {
+ case 1: return addressForIndex<uint8_t>(Index);
+ case 2: return addressForIndex<uint16_t>(Index);
+ case 4: return addressForIndex<uint32_t>(Index);
+ case 8: return addressForIndex<uint64_t>(Index);
+ }
+ return llvm::None;
+}
+
+Optional<uint64_t> GsymReader::getAddressInfoOffset(size_t Index) const {
+ const auto NumAddrInfoOffsets = AddrInfoOffsets.size();
+ if (Index < NumAddrInfoOffsets)
+ return AddrInfoOffsets[Index];
+ return llvm::None;
+}
+
+Expected<uint64_t>
+GsymReader::getAddressIndex(const uint64_t Addr) const {
+ if (Addr >= Hdr->BaseAddress) {
+ const uint64_t AddrOffset = Addr - Hdr->BaseAddress;
+ Optional<uint64_t> AddrOffsetIndex;
+ switch (Hdr->AddrOffSize) {
+ case 1:
+ AddrOffsetIndex = getAddressOffsetIndex<uint8_t>(AddrOffset);
+ break;
+ case 2:
+ AddrOffsetIndex = getAddressOffsetIndex<uint16_t>(AddrOffset);
+ break;
+ case 4:
+ AddrOffsetIndex = getAddressOffsetIndex<uint32_t>(AddrOffset);
+ break;
+ case 8:
+ AddrOffsetIndex = getAddressOffsetIndex<uint64_t>(AddrOffset);
+ break;
+ default:
+ return createStringError(std::errc::invalid_argument,
+ "unsupported address offset size %u",
+ Hdr->AddrOffSize);
+ }
+ if (AddrOffsetIndex)
+ return *AddrOffsetIndex;
+ }
+ return createStringError(std::errc::invalid_argument,
+ "address 0x%" PRIx64 " is not in GSYM", Addr);
+
+}
+
+llvm::Expected<FunctionInfo> GsymReader::getFunctionInfo(uint64_t Addr) const {
+ Expected<uint64_t> AddressIndex = getAddressIndex(Addr);
+ if (!AddressIndex)
+ return AddressIndex.takeError();
+ // Address info offsets size should have been checked in parse().
+ assert(*AddressIndex < AddrInfoOffsets.size());
+ auto AddrInfoOffset = AddrInfoOffsets[*AddressIndex];
+ DataExtractor Data(MemBuffer->getBuffer().substr(AddrInfoOffset), Endian, 4);
+ if (Optional<uint64_t> OptAddr = getAddress(*AddressIndex)) {
+ auto ExpectedFI = FunctionInfo::decode(Data, *OptAddr);
+ if (ExpectedFI) {
+ if (ExpectedFI->Range.contains(Addr) || ExpectedFI->Range.size() == 0)
+ return ExpectedFI;
+ return createStringError(std::errc::invalid_argument,
+ "address 0x%" PRIx64 " is not in GSYM", Addr);
+ }
+ }
+ return createStringError(std::errc::invalid_argument,
+ "failed to extract address[%" PRIu64 "]",
+ *AddressIndex);
+}
+
+llvm::Expected<LookupResult> GsymReader::lookup(uint64_t Addr) const {
+ Expected<uint64_t> AddressIndex = getAddressIndex(Addr);
+ if (!AddressIndex)
+ return AddressIndex.takeError();
+ // Address info offsets size should have been checked in parse().
+ assert(*AddressIndex < AddrInfoOffsets.size());
+ auto AddrInfoOffset = AddrInfoOffsets[*AddressIndex];
+ DataExtractor Data(MemBuffer->getBuffer().substr(AddrInfoOffset), Endian, 4);
+ if (Optional<uint64_t> OptAddr = getAddress(*AddressIndex))
+ return FunctionInfo::lookup(Data, *this, *OptAddr, Addr);
+ return createStringError(std::errc::invalid_argument,
+ "failed to extract address[%" PRIu64 "]",
+ *AddressIndex);
+}
+
+void GsymReader::dump(raw_ostream &OS) {
+ const auto &Header = getHeader();
+ // Dump the GSYM header.
+ OS << Header << "\n";
+ // Dump the address table.
+ OS << "Address Table:\n";
+ OS << "INDEX OFFSET";
+
+ switch (Hdr->AddrOffSize) {
+ case 1: OS << "8 "; break;
+ case 2: OS << "16"; break;
+ case 4: OS << "32"; break;
+ case 8: OS << "64"; break;
+ default: OS << "??"; break;
+ }
+ OS << " (ADDRESS)\n";
+ OS << "====== =============================== \n";
+ for (uint32_t I = 0; I < Header.NumAddresses; ++I) {
+ OS << format("[%4u] ", I);
+ switch (Hdr->AddrOffSize) {
+ case 1: OS << HEX8(getAddrOffsets<uint8_t>()[I]); break;
+ case 2: OS << HEX16(getAddrOffsets<uint16_t>()[I]); break;
+ case 4: OS << HEX32(getAddrOffsets<uint32_t>()[I]); break;
+ case 8: OS << HEX32(getAddrOffsets<uint64_t>()[I]); break;
+ default: break;
+ }
+ OS << " (" << HEX64(*getAddress(I)) << ")\n";
+ }
+ // Dump the address info offsets table.
+ OS << "\nAddress Info Offsets:\n";
+ OS << "INDEX Offset\n";
+ OS << "====== ==========\n";
+ for (uint32_t I = 0; I < Header.NumAddresses; ++I)
+ OS << format("[%4u] ", I) << HEX32(AddrInfoOffsets[I]) << "\n";
+ // Dump the file table.
+ OS << "\nFiles:\n";
+ OS << "INDEX DIRECTORY BASENAME PATH\n";
+ OS << "====== ========== ========== ==============================\n";
+ for (uint32_t I = 0; I < Files.size(); ++I) {
+ OS << format("[%4u] ", I) << HEX32(Files[I].Dir) << ' '
+ << HEX32(Files[I].Base) << ' ';
+ dump(OS, getFile(I));
+ OS << "\n";
+ }
+ OS << "\n" << StrTab << "\n";
+
+ for (uint32_t I = 0; I < Header.NumAddresses; ++I) {
+ OS << "FunctionInfo @ " << HEX32(AddrInfoOffsets[I]) << ": ";
+ if (auto FI = getFunctionInfo(*getAddress(I)))
+ dump(OS, *FI);
+ else
+ logAllUnhandledErrors(FI.takeError(), OS, "FunctionInfo:");
+ }
+}
+
+void GsymReader::dump(raw_ostream &OS, const FunctionInfo &FI) {
+ OS << FI.Range << " \"" << getString(FI.Name) << "\"\n";
+ if (FI.OptLineTable)
+ dump(OS, *FI.OptLineTable);
+ if (FI.Inline)
+ dump(OS, *FI.Inline);
+}
+
+void GsymReader::dump(raw_ostream &OS, const LineTable &LT) {
+ OS << "LineTable:\n";
+ for (auto &LE: LT) {
+ OS << " " << HEX64(LE.Addr) << ' ';
+ if (LE.File)
+ dump(OS, getFile(LE.File));
+ OS << ':' << LE.Line << '\n';
+ }
+}
+
+void GsymReader::dump(raw_ostream &OS, const InlineInfo &II, uint32_t Indent) {
+ if (Indent == 0)
+ OS << "InlineInfo:\n";
+ else
+ OS.indent(Indent);
+ OS << II.Ranges << ' ' << getString(II.Name);
+ if (II.CallFile != 0) {
+ if (auto File = getFile(II.CallFile)) {
+ OS << " called from ";
+ dump(OS, File);
+ OS << ':' << II.CallLine;
+ }
+ }
+ OS << '\n';
+ for (const auto &ChildII: II.Children)
+ dump(OS, ChildII, Indent + 2);
+}
+
+void GsymReader::dump(raw_ostream &OS, Optional<FileEntry> FE) {
+ if (FE) {
+ // IF we have the file from index 0, then don't print anything
+ if (FE->Dir == 0 && FE->Base == 0)
+ return;
+ StringRef Dir = getString(FE->Dir);
+ StringRef Base = getString(FE->Base);
+ if (!Dir.empty()) {
+ OS << Dir;
+ if (Dir.contains('\\') && !Dir.contains('/'))
+ OS << '\\';
+ else
+ OS << '/';
+ }
+ if (!Base.empty()) {
+ OS << Base;
+ }
+ if (!Dir.empty() || !Base.empty())
+ return;
+ }
+ OS << "<invalid-file>";
+}
diff --git a/contrib/libs/llvm12/lib/DebugInfo/GSYM/Header.cpp b/contrib/libs/llvm12/lib/DebugInfo/GSYM/Header.cpp
new file mode 100644
index 00000000000..0b3fb9c4989
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DebugInfo/GSYM/Header.cpp
@@ -0,0 +1,109 @@
+//===- Header.cpp -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/GSYM/Header.h"
+#include "llvm/DebugInfo/GSYM/FileWriter.h"
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define HEX8(v) llvm::format_hex(v, 4)
+#define HEX16(v) llvm::format_hex(v, 6)
+#define HEX32(v) llvm::format_hex(v, 10)
+#define HEX64(v) llvm::format_hex(v, 18)
+
+using namespace llvm;
+using namespace gsym;
+
+raw_ostream &llvm::gsym::operator<<(raw_ostream &OS, const Header &H) {
+ OS << "Header:\n";
+ OS << " Magic = " << HEX32(H.Magic) << "\n";
+ OS << " Version = " << HEX16(H.Version) << '\n';
+ OS << " AddrOffSize = " << HEX8(H.AddrOffSize) << '\n';
+ OS << " UUIDSize = " << HEX8(H.UUIDSize) << '\n';
+ OS << " BaseAddress = " << HEX64(H.BaseAddress) << '\n';
+ OS << " NumAddresses = " << HEX32(H.NumAddresses) << '\n';
+ OS << " StrtabOffset = " << HEX32(H.StrtabOffset) << '\n';
+ OS << " StrtabSize = " << HEX32(H.StrtabSize) << '\n';
+ OS << " UUID = ";
+ for (uint8_t I = 0; I < H.UUIDSize; ++I)
+ OS << format_hex_no_prefix(H.UUID[I], 2);
+ OS << '\n';
+ return OS;
+}
+
+/// Check the header and detect any errors.
+llvm::Error Header::checkForError() const {
+ if (Magic != GSYM_MAGIC)
+ return createStringError(std::errc::invalid_argument,
+ "invalid GSYM magic 0x%8.8x", Magic);
+ if (Version != GSYM_VERSION)
+ return createStringError(std::errc::invalid_argument,
+ "unsupported GSYM version %u", Version);
+ switch (AddrOffSize) {
+ case 1: break;
+ case 2: break;
+ case 4: break;
+ case 8: break;
+ default:
+ return createStringError(std::errc::invalid_argument,
+ "invalid address offset size %u",
+ AddrOffSize);
+ }
+ if (UUIDSize > GSYM_MAX_UUID_SIZE)
+ return createStringError(std::errc::invalid_argument,
+ "invalid UUID size %u", UUIDSize);
+ return Error::success();
+}
+
+llvm::Expected<Header> Header::decode(DataExtractor &Data) {
+ uint64_t Offset = 0;
+ // The header is stored as a single blob of data that has a fixed byte size.
+ if (!Data.isValidOffsetForDataOfSize(Offset, sizeof(Header)))
+ return createStringError(std::errc::invalid_argument,
+ "not enough data for a gsym::Header");
+ Header H;
+ H.Magic = Data.getU32(&Offset);
+ H.Version = Data.getU16(&Offset);
+ H.AddrOffSize = Data.getU8(&Offset);
+ H.UUIDSize = Data.getU8(&Offset);
+ H.BaseAddress = Data.getU64(&Offset);
+ H.NumAddresses = Data.getU32(&Offset);
+ H.StrtabOffset = Data.getU32(&Offset);
+ H.StrtabSize = Data.getU32(&Offset);
+ Data.getU8(&Offset, H.UUID, GSYM_MAX_UUID_SIZE);
+ if (llvm::Error Err = H.checkForError())
+ return std::move(Err);
+ return H;
+}
+
+llvm::Error Header::encode(FileWriter &O) const {
+ // Users must verify the Header is valid prior to calling this funtion.
+ if (llvm::Error Err = checkForError())
+ return Err;
+ O.writeU32(Magic);
+ O.writeU16(Version);
+ O.writeU8(AddrOffSize);
+ O.writeU8(UUIDSize);
+ O.writeU64(BaseAddress);
+ O.writeU32(NumAddresses);
+ O.writeU32(StrtabOffset);
+ O.writeU32(StrtabSize);
+ O.writeData(llvm::ArrayRef<uint8_t>(UUID));
+ return Error::success();
+}
+
+bool llvm::gsym::operator==(const Header &LHS, const Header &RHS) {
+ return LHS.Magic == RHS.Magic && LHS.Version == RHS.Version &&
+ LHS.AddrOffSize == RHS.AddrOffSize && LHS.UUIDSize == RHS.UUIDSize &&
+ LHS.BaseAddress == RHS.BaseAddress &&
+ LHS.NumAddresses == RHS.NumAddresses &&
+ LHS.StrtabOffset == RHS.StrtabOffset &&
+ LHS.StrtabSize == RHS.StrtabSize &&
+ memcmp(LHS.UUID, RHS.UUID, LHS.UUIDSize) == 0;
+}
diff --git a/contrib/libs/llvm12/lib/DebugInfo/GSYM/InlineInfo.cpp b/contrib/libs/llvm12/lib/DebugInfo/GSYM/InlineInfo.cpp
new file mode 100644
index 00000000000..21679b1b78a
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DebugInfo/GSYM/InlineInfo.cpp
@@ -0,0 +1,265 @@
+//===- InlineInfo.cpp -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/GSYM/FileEntry.h"
+#include "llvm/DebugInfo/GSYM/FileWriter.h"
+#include "llvm/DebugInfo/GSYM/GsymReader.h"
+#include "llvm/DebugInfo/GSYM/InlineInfo.h"
+#include "llvm/Support/DataExtractor.h"
+#include <algorithm>
+#include <inttypes.h>
+
+using namespace llvm;
+using namespace gsym;
+
+
+raw_ostream &llvm::gsym::operator<<(raw_ostream &OS, const InlineInfo &II) {
+ if (!II.isValid())
+ return OS;
+ bool First = true;
+ for (auto Range : II.Ranges) {
+ if (First)
+ First = false;
+ else
+ OS << ' ';
+ OS << Range;
+ }
+ OS << " Name = " << HEX32(II.Name) << ", CallFile = " << II.CallFile
+ << ", CallLine = " << II.CallFile << '\n';
+ for (const auto &Child : II.Children)
+ OS << Child;
+ return OS;
+}
+
+static bool getInlineStackHelper(const InlineInfo &II, uint64_t Addr,
+ std::vector<const InlineInfo *> &InlineStack) {
+ if (II.Ranges.contains(Addr)) {
+ // If this is the top level that represents the concrete function,
+ // there will be no name and we shoud clear the inline stack. Otherwise
+ // we have found an inline call stack that we need to insert.
+ if (II.Name != 0)
+ InlineStack.insert(InlineStack.begin(), &II);
+ for (const auto &Child : II.Children) {
+ if (::getInlineStackHelper(Child, Addr, InlineStack))
+ break;
+ }
+ return !InlineStack.empty();
+ }
+ return false;
+}
+
+llvm::Optional<InlineInfo::InlineArray> InlineInfo::getInlineStack(uint64_t Addr) const {
+ InlineArray Result;
+ if (getInlineStackHelper(*this, Addr, Result))
+ return Result;
+ return llvm::None;
+}
+
+/// Skip an InlineInfo object in the specified data at the specified offset.
+///
+/// Used during the InlineInfo::lookup() call to quickly skip child InlineInfo
+/// objects where the addres ranges isn't contained in the InlineInfo object
+/// or its children. This avoids allocations by not appending child InlineInfo
+/// objects to the InlineInfo::Children array.
+///
+/// \param Data The binary stream to read the data from.
+///
+/// \param Offset The byte offset within \a Data.
+///
+/// \param SkippedRanges If true, address ranges have already been skipped.
+
+static bool skip(DataExtractor &Data, uint64_t &Offset, bool SkippedRanges) {
+ if (!SkippedRanges) {
+ if (AddressRanges::skip(Data, Offset) == 0)
+ return false;
+ }
+ bool HasChildren = Data.getU8(&Offset) != 0;
+ Data.getU32(&Offset); // Skip Inline.Name.
+ Data.getULEB128(&Offset); // Skip Inline.CallFile.
+ Data.getULEB128(&Offset); // Skip Inline.CallLine.
+ if (HasChildren) {
+ while (skip(Data, Offset, false /* SkippedRanges */))
+ /* Do nothing */;
+ }
+ // We skipped a valid InlineInfo.
+ return true;
+}
+
+/// A Lookup helper functions.
+///
+/// Used during the InlineInfo::lookup() call to quickly only parse an
+/// InlineInfo object if the address falls within this object. This avoids
+/// allocations by not appending child InlineInfo objects to the
+/// InlineInfo::Children array and also skips any InlineInfo objects that do
+/// not contain the address we are looking up.
+///
+/// \param Data The binary stream to read the data from.
+///
+/// \param Offset The byte offset within \a Data.
+///
+/// \param BaseAddr The address that the relative address range offsets are
+/// relative to.
+
+static bool lookup(const GsymReader &GR, DataExtractor &Data, uint64_t &Offset,
+ uint64_t BaseAddr, uint64_t Addr, SourceLocations &SrcLocs,
+ llvm::Error &Err) {
+ InlineInfo Inline;
+ Inline.Ranges.decode(Data, BaseAddr, Offset);
+ if (Inline.Ranges.empty())
+ return true;
+ // Check if the address is contained within the inline information, and if
+ // not, quickly skip this InlineInfo object and all its children.
+ if (!Inline.Ranges.contains(Addr)) {
+ skip(Data, Offset, true /* SkippedRanges */);
+ return false;
+ }
+
+ // The address range is contained within this InlineInfo, add the source
+ // location for this InlineInfo and any children that contain the address.
+ bool HasChildren = Data.getU8(&Offset) != 0;
+ Inline.Name = Data.getU32(&Offset);
+ Inline.CallFile = (uint32_t)Data.getULEB128(&Offset);
+ Inline.CallLine = (uint32_t)Data.getULEB128(&Offset);
+ if (HasChildren) {
+ // Child address ranges are encoded relative to the first address in the
+ // parent InlineInfo object.
+ const auto ChildBaseAddr = Inline.Ranges[0].Start;
+ bool Done = false;
+ while (!Done)
+ Done = lookup(GR, Data, Offset, ChildBaseAddr, Addr, SrcLocs, Err);
+ }
+
+ Optional<FileEntry> CallFile = GR.getFile(Inline.CallFile);
+ if (!CallFile) {
+ Err = createStringError(std::errc::invalid_argument,
+ "failed to extract file[%" PRIu32 "]",
+ Inline.CallFile);
+ return false;
+ }
+
+ if (CallFile->Dir || CallFile->Base) {
+ SourceLocation SrcLoc;
+ SrcLoc.Name = SrcLocs.back().Name;
+ SrcLoc.Offset = SrcLocs.back().Offset;
+ SrcLoc.Dir = GR.getString(CallFile->Dir);
+ SrcLoc.Base = GR.getString(CallFile->Base);
+ SrcLoc.Line = Inline.CallLine;
+ SrcLocs.back().Name = GR.getString(Inline.Name);
+ SrcLocs.back().Offset = Addr - Inline.Ranges[0].Start;
+ SrcLocs.push_back(SrcLoc);
+ }
+ return true;
+}
+
+llvm::Error InlineInfo::lookup(const GsymReader &GR, DataExtractor &Data,
+ uint64_t BaseAddr, uint64_t Addr,
+ SourceLocations &SrcLocs) {
+ // Call our recursive helper function starting at offset zero.
+ uint64_t Offset = 0;
+ llvm::Error Err = Error::success();
+ ::lookup(GR, Data, Offset, BaseAddr, Addr, SrcLocs, Err);
+ return Err;
+}
+
+/// Decode an InlineInfo in Data at the specified offset.
+///
+/// A local helper function to decode InlineInfo objects. This function is
+/// called recursively when parsing child InlineInfo objects.
+///
+/// \param Data The data extractor to decode from.
+/// \param Offset The offset within \a Data to decode from.
+/// \param BaseAddr The base address to use when decoding address ranges.
+/// \returns An InlineInfo or an error describing the issue that was
+/// encountered during decoding.
+static llvm::Expected<InlineInfo> decode(DataExtractor &Data, uint64_t &Offset,
+ uint64_t BaseAddr) {
+ InlineInfo Inline;
+ if (!Data.isValidOffset(Offset))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing InlineInfo address ranges data", Offset);
+ Inline.Ranges.decode(Data, BaseAddr, Offset);
+ if (Inline.Ranges.empty())
+ return Inline;
+ if (!Data.isValidOffsetForDataOfSize(Offset, 1))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing InlineInfo uint8_t indicating children",
+ Offset);
+ bool HasChildren = Data.getU8(&Offset) != 0;
+ if (!Data.isValidOffsetForDataOfSize(Offset, 4))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing InlineInfo uint32_t for name", Offset);
+ Inline.Name = Data.getU32(&Offset);
+ if (!Data.isValidOffset(Offset))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing ULEB128 for InlineInfo call file", Offset);
+ Inline.CallFile = (uint32_t)Data.getULEB128(&Offset);
+ if (!Data.isValidOffset(Offset))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing ULEB128 for InlineInfo call line", Offset);
+ Inline.CallLine = (uint32_t)Data.getULEB128(&Offset);
+ if (HasChildren) {
+ // Child address ranges are encoded relative to the first address in the
+ // parent InlineInfo object.
+ const auto ChildBaseAddr = Inline.Ranges[0].Start;
+ while (true) {
+ llvm::Expected<InlineInfo> Child = decode(Data, Offset, ChildBaseAddr);
+ if (!Child)
+ return Child.takeError();
+ // InlineInfo with empty Ranges termintes a child sibling chain.
+ if (Child.get().Ranges.empty())
+ break;
+ Inline.Children.emplace_back(std::move(*Child));
+ }
+ }
+ return Inline;
+}
+
+llvm::Expected<InlineInfo> InlineInfo::decode(DataExtractor &Data,
+ uint64_t BaseAddr) {
+ uint64_t Offset = 0;
+ return ::decode(Data, Offset, BaseAddr);
+}
+
+llvm::Error InlineInfo::encode(FileWriter &O, uint64_t BaseAddr) const {
+ // Users must verify the InlineInfo is valid prior to calling this funtion.
+ // We don't want to emit any InlineInfo objects if they are not valid since
+ // it will waste space in the GSYM file.
+ if (!isValid())
+ return createStringError(std::errc::invalid_argument,
+ "attempted to encode invalid InlineInfo object");
+ Ranges.encode(O, BaseAddr);
+ bool HasChildren = !Children.empty();
+ O.writeU8(HasChildren);
+ O.writeU32(Name);
+ O.writeULEB(CallFile);
+ O.writeULEB(CallLine);
+ if (HasChildren) {
+ // Child address ranges are encoded as relative to the first
+ // address in the Ranges for this object. This keeps the offsets
+ // small and allows for efficient encoding using ULEB offsets.
+ const uint64_t ChildBaseAddr = Ranges[0].Start;
+ for (const auto &Child : Children) {
+ // Make sure all child address ranges are contained in the parent address
+ // ranges.
+ for (const auto &ChildRange: Child.Ranges) {
+ if (!Ranges.contains(ChildRange))
+ return createStringError(std::errc::invalid_argument,
+ "child range not contained in parent");
+ }
+ llvm::Error Err = Child.encode(O, ChildBaseAddr);
+ if (Err)
+ return Err;
+ }
+
+ // Terminate child sibling chain by emitting a zero. This zero will cause
+ // the decodeAll() function above to return false and stop the decoding
+ // of child InlineInfo objects that are siblings.
+ O.writeULEB(0);
+ }
+ return Error::success();
+}
diff --git a/contrib/libs/llvm12/lib/DebugInfo/GSYM/LineTable.cpp b/contrib/libs/llvm12/lib/DebugInfo/GSYM/LineTable.cpp
new file mode 100644
index 00000000000..a49a3ba9bf2
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DebugInfo/GSYM/LineTable.cpp
@@ -0,0 +1,293 @@
+//===- LineTable.cpp --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/GSYM/LineTable.h"
+#include "llvm/DebugInfo/GSYM/FileWriter.h"
+#include "llvm/Support/DataExtractor.h"
+
+using namespace llvm;
+using namespace gsym;
+
+enum LineTableOpCode {
+ EndSequence = 0x00, ///< End of the line table.
+ SetFile = 0x01, ///< Set LineTableRow.file_idx, don't push a row.
+ AdvancePC = 0x02, ///< Increment LineTableRow.address, and push a row.
+ AdvanceLine = 0x03, ///< Set LineTableRow.file_line, don't push a row.
+ FirstSpecial = 0x04, ///< All special opcodes push a row.
+};
+
+struct DeltaInfo {
+ int64_t Delta;
+ uint32_t Count;
+ DeltaInfo(int64_t D, uint32_t C) : Delta(D), Count(C) {}
+};
+
+inline bool operator<(const DeltaInfo &LHS, int64_t Delta) {
+ return LHS.Delta < Delta;
+}
+
+static bool encodeSpecial(int64_t MinLineDelta, int64_t MaxLineDelta,
+ int64_t LineDelta, uint64_t AddrDelta,
+ uint8_t &SpecialOp) {
+ if (LineDelta < MinLineDelta)
+ return false;
+ if (LineDelta > MaxLineDelta)
+ return false;
+ int64_t LineRange = MaxLineDelta - MinLineDelta + 1;
+ int64_t AdjustedOp = ((LineDelta - MinLineDelta) + AddrDelta * LineRange);
+ int64_t Op = AdjustedOp + FirstSpecial;
+ if (Op < 0)
+ return false;
+ if (Op > 255)
+ return false;
+ SpecialOp = (uint8_t)Op;
+ return true;
+}
+
+typedef std::function<bool(const LineEntry &Row)> LineEntryCallback;
+
+static llvm::Error parse(DataExtractor &Data, uint64_t BaseAddr,
+ LineEntryCallback const &Callback) {
+ uint64_t Offset = 0;
+ if (!Data.isValidOffset(Offset))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing LineTable MinDelta", Offset);
+ int64_t MinDelta = Data.getSLEB128(&Offset);
+ if (!Data.isValidOffset(Offset))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing LineTable MaxDelta", Offset);
+ int64_t MaxDelta = Data.getSLEB128(&Offset);
+ int64_t LineRange = MaxDelta - MinDelta + 1;
+ if (!Data.isValidOffset(Offset))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": missing LineTable FirstLine", Offset);
+ const uint32_t FirstLine = (uint32_t)Data.getULEB128(&Offset);
+ LineEntry Row(BaseAddr, 1, FirstLine);
+ bool Done = false;
+ while (!Done) {
+ if (!Data.isValidOffset(Offset))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": EOF found before EndSequence", Offset);
+ uint8_t Op = Data.getU8(&Offset);
+ switch (Op) {
+ case EndSequence:
+ Done = true;
+ break;
+ case SetFile:
+ if (!Data.isValidOffset(Offset))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": EOF found before SetFile value",
+ Offset);
+ Row.File = (uint32_t)Data.getULEB128(&Offset);
+ break;
+ case AdvancePC:
+ if (!Data.isValidOffset(Offset))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": EOF found before AdvancePC value",
+ Offset);
+ Row.Addr += Data.getULEB128(&Offset);
+ // If the function callback returns false, we stop parsing.
+ if (Callback(Row) == false)
+ return Error::success();
+ break;
+ case AdvanceLine:
+ if (!Data.isValidOffset(Offset))
+ return createStringError(std::errc::io_error,
+ "0x%8.8" PRIx64 ": EOF found before AdvanceLine value",
+ Offset);
+ Row.Line += Data.getSLEB128(&Offset);
+ break;
+ default: {
+ // A byte that contains both address and line increment.
+ uint8_t AdjustedOp = Op - FirstSpecial;
+ int64_t LineDelta = MinDelta + (AdjustedOp % LineRange);
+ uint64_t AddrDelta = (AdjustedOp / LineRange);
+ Row.Line += LineDelta;
+ Row.Addr += AddrDelta;
+ // If the function callback returns false, we stop parsing.
+ if (Callback(Row) == false)
+ return Error::success();
+ break;
+ }
+ }
+ }
+ return Error::success();
+}
+
+llvm::Error LineTable::encode(FileWriter &Out, uint64_t BaseAddr) const {
+ // Users must verify the LineTable is valid prior to calling this funtion.
+ // We don't want to emit any LineTable objects if they are not valid since
+ // it will waste space in the GSYM file.
+ if (!isValid())
+ return createStringError(std::errc::invalid_argument,
+ "attempted to encode invalid LineTable object");
+
+ int64_t MinLineDelta = INT64_MAX;
+ int64_t MaxLineDelta = INT64_MIN;
+ std::vector<DeltaInfo> DeltaInfos;
+ if (Lines.size() == 1) {
+ MinLineDelta = 0;
+ MaxLineDelta = 0;
+ } else {
+ int64_t PrevLine = 1;
+ bool First = true;
+ for (const auto &line_entry : Lines) {
+ if (First)
+ First = false;
+ else {
+ int64_t LineDelta = (int64_t)line_entry.Line - PrevLine;
+ auto End = DeltaInfos.end();
+ auto Pos = std::lower_bound(DeltaInfos.begin(), End, LineDelta);
+ if (Pos != End && Pos->Delta == LineDelta)
+ ++Pos->Count;
+ else
+ DeltaInfos.insert(Pos, DeltaInfo(LineDelta, 1));
+ if (LineDelta < MinLineDelta)
+ MinLineDelta = LineDelta;
+ if (LineDelta > MaxLineDelta)
+ MaxLineDelta = LineDelta;
+ }
+ PrevLine = (int64_t)line_entry.Line;
+ }
+ assert(MinLineDelta <= MaxLineDelta);
+ }
+ // Set the min and max line delta intelligently based on the counts of
+ // the line deltas. if our range is too large.
+ const int64_t MaxLineRange = 14;
+ if (MaxLineDelta - MinLineDelta > MaxLineRange) {
+ uint32_t BestIndex = 0;
+ uint32_t BestEndIndex = 0;
+ uint32_t BestCount = 0;
+ const size_t NumDeltaInfos = DeltaInfos.size();
+ for (uint32_t I = 0; I < NumDeltaInfos; ++I) {
+ const int64_t FirstDelta = DeltaInfos[I].Delta;
+ uint32_t CurrCount = 0;
+ uint32_t J;
+ for (J = I; J < NumDeltaInfos; ++J) {
+ auto LineRange = DeltaInfos[J].Delta - FirstDelta;
+ if (LineRange > MaxLineRange)
+ break;
+ CurrCount += DeltaInfos[J].Count;
+ }
+ if (CurrCount > BestCount) {
+ BestIndex = I;
+ BestEndIndex = J - 1;
+ BestCount = CurrCount;
+ }
+ }
+ MinLineDelta = DeltaInfos[BestIndex].Delta;
+ MaxLineDelta = DeltaInfos[BestEndIndex].Delta;
+ }
+ if (MinLineDelta == MaxLineDelta && MinLineDelta > 0 &&
+ MinLineDelta < MaxLineRange)
+ MinLineDelta = 0;
+ assert(MinLineDelta <= MaxLineDelta);
+
+ // Initialize the line entry state as a starting point. All line entries
+ // will be deltas from this.
+ LineEntry Prev(BaseAddr, 1, Lines.front().Line);
+
+ // Write out the min and max line delta as signed LEB128.
+ Out.writeSLEB(MinLineDelta);
+ Out.writeSLEB(MaxLineDelta);
+ // Write out the starting line number as a unsigned LEB128.
+ Out.writeULEB(Prev.Line);
+
+ for (const auto &Curr : Lines) {
+ if (Curr.Addr < BaseAddr)
+ return createStringError(std::errc::invalid_argument,
+ "LineEntry has address 0x%" PRIx64 " which is "
+ "less than the function start address 0x%"
+ PRIx64, Curr.Addr, BaseAddr);
+ if (Curr.Addr < Prev.Addr)
+ return createStringError(std::errc::invalid_argument,
+ "LineEntry in LineTable not in ascending order");
+ const uint64_t AddrDelta = Curr.Addr - Prev.Addr;
+ int64_t LineDelta = 0;
+ if (Curr.Line > Prev.Line)
+ LineDelta = Curr.Line - Prev.Line;
+ else if (Prev.Line > Curr.Line)
+ LineDelta = -((int32_t)(Prev.Line - Curr.Line));
+
+ // Set the file if it doesn't match the current one.
+ if (Curr.File != Prev.File) {
+ Out.writeU8(SetFile);
+ Out.writeULEB(Curr.File);
+ }
+
+ uint8_t SpecialOp;
+ if (encodeSpecial(MinLineDelta, MaxLineDelta, LineDelta, AddrDelta,
+ SpecialOp)) {
+ // Advance the PC and line and push a row.
+ Out.writeU8(SpecialOp);
+ } else {
+ // We can't encode the address delta and line delta into
+ // a single special opcode, we must do them separately.
+
+ // Advance the line.
+ if (LineDelta != 0) {
+ Out.writeU8(AdvanceLine);
+ Out.writeSLEB(LineDelta);
+ }
+
+ // Advance the PC and push a row.
+ Out.writeU8(AdvancePC);
+ Out.writeULEB(AddrDelta);
+ }
+ Prev = Curr;
+ }
+ Out.writeU8(EndSequence);
+ return Error::success();
+}
+
+// Parse all line table entries into the "LineTable" vector. We can
+// cache the results of this if needed, or we can call LineTable::lookup()
+// below.
+llvm::Expected<LineTable> LineTable::decode(DataExtractor &Data,
+ uint64_t BaseAddr) {
+ LineTable LT;
+ llvm::Error Err = parse(Data, BaseAddr, [&](const LineEntry &Row) -> bool {
+ LT.Lines.push_back(Row);
+ return true; // Keep parsing by returning true.
+ });
+ if (Err)
+ return std::move(Err);
+ return LT;
+}
+// Parse the line table on the fly and find the row we are looking for.
+// We will need to determine if we need to cache the line table by calling
+// LineTable::parseAllEntries(...) or just call this function each time.
+// There is a CPU vs memory tradeoff we will need to determined.
+Expected<LineEntry> LineTable::lookup(DataExtractor &Data, uint64_t BaseAddr, uint64_t Addr) {
+ LineEntry Result;
+ llvm::Error Err = parse(Data, BaseAddr,
+ [Addr, &Result](const LineEntry &Row) -> bool {
+ if (Addr < Row.Addr)
+ return false; // Stop parsing, result contains the line table row!
+ Result = Row;
+ if (Addr == Row.Addr) {
+ // Stop parsing, this is the row we are looking for since the address
+ // matches.
+ return false;
+ }
+ return true; // Keep parsing till we find the right row.
+ });
+ if (Err)
+ return std::move(Err);
+ if (Result.isValid())
+ return Result;
+ return createStringError(std::errc::invalid_argument,
+ "address 0x%" PRIx64 " is not in the line table",
+ Addr);
+}
+
+raw_ostream &llvm::gsym::operator<<(raw_ostream &OS, const LineTable &LT) {
+ for (const auto &LineEntry : LT)
+ OS << LineEntry << '\n';
+ return OS;
+}
diff --git a/contrib/libs/llvm12/lib/DebugInfo/GSYM/LookupResult.cpp b/contrib/libs/llvm12/lib/DebugInfo/GSYM/LookupResult.cpp
new file mode 100644
index 00000000000..8a624226b1d
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DebugInfo/GSYM/LookupResult.cpp
@@ -0,0 +1,74 @@
+//===- LookupResult.cpp -------------------------------------------------*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/GSYM/LookupResult.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <ciso646>
+
+using namespace llvm;
+using namespace gsym;
+
+std::string LookupResult::getSourceFile(uint32_t Index) const {
+ std::string Fullpath;
+ if (Index < Locations.size()) {
+ if (!Locations[Index].Dir.empty()) {
+ if (Locations[Index].Base.empty()) {
+ Fullpath = std::string(Locations[Index].Dir);
+ } else {
+ llvm::SmallString<64> Storage;
+ llvm::sys::path::append(Storage, Locations[Index].Dir,
+ Locations[Index].Base);
+ Fullpath.assign(Storage.begin(), Storage.end());
+ }
+ } else if (!Locations[Index].Base.empty())
+ Fullpath = std::string(Locations[Index].Base);
+ }
+ return Fullpath;
+}
+
+raw_ostream &llvm::gsym::operator<<(raw_ostream &OS, const SourceLocation &SL) {
+ OS << SL.Name;
+ if (SL.Offset > 0)
+ OS << " + " << SL.Offset;
+ if (SL.Dir.size() || SL.Base.size()) {
+ OS << " @ ";
+ if (!SL.Dir.empty()) {
+ OS << SL.Dir;
+ if (SL.Dir.contains('\\') and not SL.Dir.contains('/'))
+ OS << '\\';
+ else
+ OS << '/';
+ }
+ if (SL.Base.empty())
+ OS << "<invalid-file>";
+ else
+ OS << SL.Base;
+ OS << ':' << SL.Line;
+ }
+ return OS;
+}
+
+raw_ostream &llvm::gsym::operator<<(raw_ostream &OS, const LookupResult &LR) {
+ OS << HEX64(LR.LookupAddr) << ": ";
+ auto NumLocations = LR.Locations.size();
+ for (size_t I = 0; I < NumLocations; ++I) {
+ if (I > 0) {
+ OS << '\n';
+ OS.indent(20);
+ }
+ const bool IsInlined = I + 1 != NumLocations;
+ OS << LR.Locations[I];
+ if (IsInlined)
+ OS << " [inlined]";
+ }
+ OS << '\n';
+ return OS;
+}
diff --git a/contrib/libs/llvm12/lib/DebugInfo/GSYM/ObjectFileTransformer.cpp b/contrib/libs/llvm12/lib/DebugInfo/GSYM/ObjectFileTransformer.cpp
new file mode 100644
index 00000000000..ad35aefe777
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DebugInfo/GSYM/ObjectFileTransformer.cpp
@@ -0,0 +1,116 @@
+//===- ObjectFileTransformer.cpp --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <unordered_set>
+
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/MachOUniversal.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include "llvm/DebugInfo/GSYM/ObjectFileTransformer.h"
+#include "llvm/DebugInfo/GSYM/GsymCreator.h"
+
+using namespace llvm;
+using namespace gsym;
+
+constexpr uint32_t NT_GNU_BUILD_ID_TAG = 0x03;
+
+static std::vector<uint8_t> getUUID(const object::ObjectFile &Obj) {
+ // Extract the UUID from the object file
+ std::vector<uint8_t> UUID;
+ if (auto *MachO = dyn_cast<object::MachOObjectFile>(&Obj)) {
+ const ArrayRef<uint8_t> MachUUID = MachO->getUuid();
+ if (!MachUUID.empty())
+ UUID.assign(MachUUID.data(), MachUUID.data() + MachUUID.size());
+ } else if (isa<object::ELFObjectFileBase>(&Obj)) {
+ const StringRef GNUBuildID(".note.gnu.build-id");
+ for (const object::SectionRef &Sect : Obj.sections()) {
+ Expected<StringRef> SectNameOrErr = Sect.getName();
+ if (!SectNameOrErr) {
+ consumeError(SectNameOrErr.takeError());
+ continue;
+ }
+ StringRef SectName(*SectNameOrErr);
+ if (SectName != GNUBuildID)
+ continue;
+ StringRef BuildIDData;
+ Expected<StringRef> E = Sect.getContents();
+ if (E)
+ BuildIDData = *E;
+ else {
+ consumeError(E.takeError());
+ continue;
+ }
+ DataExtractor Decoder(BuildIDData, Obj.makeTriple().isLittleEndian(), 8);
+ uint64_t Offset = 0;
+ const uint32_t NameSize = Decoder.getU32(&Offset);
+ const uint32_t PayloadSize = Decoder.getU32(&Offset);
+ const uint32_t PayloadType = Decoder.getU32(&Offset);
+ StringRef Name(Decoder.getFixedLengthString(&Offset, NameSize));
+ if (Name == "GNU" && PayloadType == NT_GNU_BUILD_ID_TAG) {
+ Offset = alignTo(Offset, 4);
+ StringRef UUIDBytes(Decoder.getBytes(&Offset, PayloadSize));
+ if (!UUIDBytes.empty()) {
+ auto Ptr = reinterpret_cast<const uint8_t *>(UUIDBytes.data());
+ UUID.assign(Ptr, Ptr + UUIDBytes.size());
+ }
+ }
+ }
+ }
+ return UUID;
+}
+
+llvm::Error ObjectFileTransformer::convert(const object::ObjectFile &Obj,
+ raw_ostream &Log,
+ GsymCreator &Gsym) {
+ using namespace llvm::object;
+
+ const bool IsMachO = isa<MachOObjectFile>(&Obj);
+ const bool IsELF = isa<ELFObjectFileBase>(&Obj);
+
+ // Read build ID.
+ Gsym.setUUID(getUUID(Obj));
+
+ // Parse the symbol table.
+ size_t NumBefore = Gsym.getNumFunctionInfos();
+ for (const object::SymbolRef &Sym : Obj.symbols()) {
+ Expected<SymbolRef::Type> SymType = Sym.getType();
+ if (!SymType) {
+ consumeError(SymType.takeError());
+ continue;
+ }
+ Expected<uint64_t> AddrOrErr = Sym.getValue();
+ if (!AddrOrErr)
+ // TODO: Test this error.
+ return AddrOrErr.takeError();
+
+ if (SymType.get() != SymbolRef::Type::ST_Function ||
+ !Gsym.IsValidTextAddress(*AddrOrErr) ||
+ Gsym.hasFunctionInfoForAddress(*AddrOrErr))
+ continue;
+ // Function size for MachO files will be 0
+ constexpr bool NoCopy = false;
+ const uint64_t size = IsELF ? ELFSymbolRef(Sym).getSize() : 0;
+ Expected<StringRef> Name = Sym.getName();
+ if (!Name) {
+ logAllUnhandledErrors(Name.takeError(), Log, "ObjectFileTransformer: ");
+ continue;
+ }
+ // Remove the leading '_' character in any symbol names if there is one
+ // for mach-o files.
+ if (IsMachO)
+ Name->consume_front("_");
+ Gsym.addFunctionInfo(
+ FunctionInfo(*AddrOrErr, size, Gsym.insertString(*Name, NoCopy)));
+ }
+ size_t FunctionsAddedCount = Gsym.getNumFunctionInfos() - NumBefore;
+ Log << "Loaded " << FunctionsAddedCount << " functions from symbol table.\n";
+ return Error::success();
+}
diff --git a/contrib/libs/llvm12/lib/DebugInfo/GSYM/Range.cpp b/contrib/libs/llvm12/lib/DebugInfo/GSYM/Range.cpp
new file mode 100644
index 00000000000..044ddb8ba1b
--- /dev/null
+++ b/contrib/libs/llvm12/lib/DebugInfo/GSYM/Range.cpp
@@ -0,0 +1,124 @@
+//===- Range.cpp ------------------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/GSYM/Range.h"
+#include "llvm/DebugInfo/GSYM/FileWriter.h"
+#include "llvm/Support/DataExtractor.h"
+#include <algorithm>
+#include <inttypes.h>
+
+using namespace llvm;
+using namespace gsym;
+
+
+void AddressRanges::insert(AddressRange Range) {
+ if (Range.size() == 0)
+ return;
+
+ auto It = llvm::upper_bound(Ranges, Range);
+ auto It2 = It;
+ while (It2 != Ranges.end() && It2->Start < Range.End)
+ ++It2;
+ if (It != It2) {
+ Range.End = std::max(Range.End, It2[-1].End);
+ It = Ranges.erase(It, It2);
+ }
+ if (It != Ranges.begin() && Range.Start < It[-1].End)
+ It[-1].End = std::max(It[-1].End, Range.End);
+ else
+ Ranges.insert(It, Range);
+}
+
+bool AddressRanges::contains(uint64_t Addr) const {
+ auto It = std::partition_point(
+ Ranges.begin(), Ranges.end(),
+ [=](const AddressRange &R) { return R.Start <= Addr; });
+ return It != Ranges.begin() && Addr < It[-1].End;
+}
+
+bool AddressRanges::contains(AddressRange Range) const {
+ if (Range.size() == 0)
+ return false;
+ auto It = std::partition_point(
+ Ranges.begin(), Ranges.end(),
+ [=](const AddressRange &R) { return R.Start <= Range.Start; });
+ if (It == Ranges.begin())
+ return false;
+ return Range.End <= It[-1].End;
+}
+
+Optional<AddressRange>
+AddressRanges::getRangeThatContains(uint64_t Addr) const {
+ auto It = std::partition_point(
+ Ranges.begin(), Ranges.end(),
+ [=](const AddressRange &R) { return R.Start <= Addr; });
+ if (It != Ranges.begin() && Addr < It[-1].End)
+ return It[-1];
+ return llvm::None;
+}
+
+raw_ostream &llvm::gsym::operator<<(raw_ostream &OS, const AddressRange &R) {
+ return OS << '[' << HEX64(R.Start) << " - " << HEX64(R.End) << ")";
+}
+
+raw_ostream &llvm::gsym::operator<<(raw_ostream &OS, const AddressRanges &AR) {
+ size_t Size = AR.size();
+ for (size_t I = 0; I < Size; ++I) {
+ if (I)
+ OS << ' ';
+ OS << AR[I];
+ }
+ return OS;
+}
+
+void AddressRange::encode(FileWriter &O, uint64_t BaseAddr) const {
+ assert(Start >= BaseAddr);
+ O.writeULEB(Start - BaseAddr);
+ O.writeULEB(size());
+}
+
+void AddressRange::decode(DataExtractor &Data, uint64_t BaseAddr,
+ uint64_t &Offset) {
+ const uint64_t AddrOffset = Data.getULEB128(&Offset);
+ const uint64_t Size = Data.getULEB128(&Offset);
+ const uint64_t StartAddr = BaseAddr + AddrOffset;
+ Start = StartAddr;
+ End = StartAddr + Size;
+}
+
+void AddressRanges::encode(FileWriter &O, uint64_t BaseAddr) const {
+ O.writeULEB(Ranges.size());
+ if (Ranges.empty())
+ return;
+ for (auto Range : Ranges)
+ Range.encode(O, BaseAddr);
+}
+
+void AddressRanges::decode(DataExtractor &Data, uint64_t BaseAddr,
+ uint64_t &Offset) {
+ clear();
+ uint64_t NumRanges = Data.getULEB128(&Offset);
+ if (NumRanges == 0)
+ return;
+ Ranges.resize(NumRanges);
+ for (auto &Range : Ranges)
+ Range.decode(Data, BaseAddr, Offset);
+}
+
+void AddressRange::skip(DataExtractor &Data, uint64_t &Offset) {
+ Data.getULEB128(&Offset);
+ Data.getULEB128(&Offset);
+}
+
+uint64_t AddressRanges::skip(DataExtractor &Data, uint64_t &Offset) {
+ uint64_t NumRanges = Data.getULEB128(&Offset);
+ for (uint64_t I=0; I<NumRanges; ++I)
+ AddressRange::skip(Data, Offset);
+ return NumRanges;
+}
diff --git a/contrib/libs/llvm12/lib/DebugInfo/MSF/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/DebugInfo/MSF/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/DebugInfo/MSF/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/DebugInfo/PDB/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/DebugInfo/PDB/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/DebugInfo/PDB/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/DebugInfo/Symbolize/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/DebugInfo/Symbolize/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/DebugInfo/Symbolize/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Demangle/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Demangle/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Demangle/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Execution.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Execution.cpp
new file mode 100644
index 00000000000..62e1ea6e0f0
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -0,0 +1,2168 @@
+//===-- Execution.cpp - Implement code to simulate the program ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the actual instruction interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cmath>
+using namespace llvm;
+
+#define DEBUG_TYPE "interpreter"
+
+STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
+
+static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
+ cl::desc("make the interpreter print every volatile load and store"));
+
+//===----------------------------------------------------------------------===//
+// Various Helper Functions
+//===----------------------------------------------------------------------===//
+
+static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
+ SF.Values[V] = Val;
+}
+
+//===----------------------------------------------------------------------===//
+// Unary Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+static void executeFNegInst(GenericValue &Dest, GenericValue Src, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::FloatTyID:
+ Dest.FloatVal = -Src.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = -Src.DoubleVal;
+ break;
+ default:
+ llvm_unreachable("Unhandled type for FNeg instruction");
+ }
+}
+
+void Interpreter::visitUnaryOperator(UnaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src = getOperandValue(I.getOperand(0), SF);
+ GenericValue R; // Result
+
+ // First process vector operation
+ if (Ty->isVectorTy()) {
+ R.AggregateVal.resize(Src.AggregateVal.size());
+
+ switch(I.getOpcode()) {
+ default:
+ llvm_unreachable("Don't know how to handle this unary operator");
+ break;
+ case Instruction::FNeg:
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].FloatVal = -Src.AggregateVal[i].FloatVal;
+ } else if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) {
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].DoubleVal = -Src.AggregateVal[i].DoubleVal;
+ } else {
+ llvm_unreachable("Unhandled type for FNeg instruction");
+ }
+ break;
+ }
+ } else {
+ switch (I.getOpcode()) {
+ default:
+ llvm_unreachable("Don't know how to handle this unary operator");
+ break;
+ case Instruction::FNeg: executeFNegInst(R, Src, Ty); break;
+ }
+ }
+ SetValue(&I, R, SF);
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+#define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
+ case Type::TY##TyID: \
+ Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
+ break
+
+static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(+, Float);
+ IMPLEMENT_BINARY_OPERATOR(+, Double);
+ default:
+ dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(-, Float);
+ IMPLEMENT_BINARY_OPERATOR(-, Double);
+ default:
+ dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(*, Float);
+ IMPLEMENT_BINARY_OPERATOR(*, Double);
+ default:
+ dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(/, Float);
+ IMPLEMENT_BINARY_OPERATOR(/, Double);
+ default:
+ dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::FloatTyID:
+ Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
+ break;
+ default:
+ dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+#define IMPLEMENT_INTEGER_ICMP(OP, TY) \
+ case Type::IntegerTyID: \
+ Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
+ break;
+
+#define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
+ case Type::FixedVectorTyID: \
+ case Type::ScalableVectorTyID: { \
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
+ Dest.AggregateVal.resize(Src1.AggregateVal.size()); \
+ for (uint32_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
+ Dest.AggregateVal[_i].IntVal = APInt( \
+ 1, Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal)); \
+ } break;
+
+// Handle pointers specially because they must be compared with only as much
+// width as the host has. We _do not_ want to be comparing 64 bit values when
+// running on a 32-bit target, otherwise the upper 32 bits might mess up
+// comparisons if they contain garbage.
+#define IMPLEMENT_POINTER_ICMP(OP) \
+ case Type::PointerTyID: \
+ Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
+ (void*)(intptr_t)Src2.PointerVal); \
+ break;
+
+static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(eq,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
+ IMPLEMENT_POINTER_ICMP(==);
+ default:
+ dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ne,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
+ IMPLEMENT_POINTER_ICMP(!=);
+ default:
+ dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ult,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
+ IMPLEMENT_POINTER_ICMP(<);
+ default:
+ dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(slt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
+ IMPLEMENT_POINTER_ICMP(<);
+ default:
+ dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ugt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
+ IMPLEMENT_POINTER_ICMP(>);
+ default:
+ dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sgt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
+ IMPLEMENT_POINTER_ICMP(>);
+ default:
+ dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ule,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
+ IMPLEMENT_POINTER_ICMP(<=);
+ default:
+ dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sle,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
+ IMPLEMENT_POINTER_ICMP(<=);
+ default:
+ dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(uge,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
+ IMPLEMENT_POINTER_ICMP(>=);
+ default:
+ dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sge,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
+ IMPLEMENT_POINTER_ICMP(>=);
+ default:
+ dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+void Interpreter::visitICmpInst(ICmpInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ switch (I.getPredicate()) {
+ case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
+ default:
+ dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
+ llvm_unreachable(nullptr);
+ }
+
+ SetValue(&I, R, SF);
+}
+
+#define IMPLEMENT_FCMP(OP, TY) \
+ case Type::TY##TyID: \
+ Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
+ break
+
+#define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
+ for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
+ Dest.AggregateVal[_i].IntVal = APInt(1, \
+ Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
+ break;
+
+#define IMPLEMENT_VECTOR_FCMP(OP) \
+ case Type::FixedVectorTyID: \
+ case Type::ScalableVectorTyID: \
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
+ IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
+ } else { \
+ IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
+ }
+
+static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(==, Float);
+ IMPLEMENT_FCMP(==, Double);
+ IMPLEMENT_VECTOR_FCMP(==);
+ default:
+ dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+#define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
+ if (TY->isFloatTy()) { \
+ if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
+ Dest.IntVal = APInt(1,false); \
+ return Dest; \
+ } \
+ } else { \
+ if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
+ Dest.IntVal = APInt(1,false); \
+ return Dest; \
+ } \
+ }
+
+#define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
+ assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
+ Dest.AggregateVal.resize( X.AggregateVal.size() ); \
+ for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
+ if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
+ Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
+ Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
+ else { \
+ Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
+ } \
+ }
+
+#define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
+ if (TY->isVectorTy()) { \
+ if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
+ MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
+ } else { \
+ MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
+ } \
+ } \
+
+
+
+static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
+ Type *Ty)
+{
+ GenericValue Dest;
+ // if input is scalar value and Src1 or Src2 is NaN return false
+ IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
+ // if vector input detect NaNs and fill mask
+ MASK_VECTOR_NANS(Ty, Src1, Src2, false)
+ GenericValue DestMask = Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(!=, Float);
+ IMPLEMENT_FCMP(!=, Double);
+ IMPLEMENT_VECTOR_FCMP(!=);
+ default:
+ dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ // in vector case mask out NaN elements
+ if (Ty->isVectorTy())
+ for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
+ if (DestMask.AggregateVal[_i].IntVal == false)
+ Dest.AggregateVal[_i].IntVal = APInt(1,false);
+
+ return Dest;
+}
+
+static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(<=, Float);
+ IMPLEMENT_FCMP(<=, Double);
+ IMPLEMENT_VECTOR_FCMP(<=);
+ default:
+ dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(>=, Float);
+ IMPLEMENT_FCMP(>=, Double);
+ IMPLEMENT_VECTOR_FCMP(>=);
+ default:
+ dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(<, Float);
+ IMPLEMENT_FCMP(<, Double);
+ IMPLEMENT_VECTOR_FCMP(<);
+ default:
+ dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(>, Float);
+ IMPLEMENT_FCMP(>, Double);
+ IMPLEMENT_VECTOR_FCMP(>);
+ default:
+ dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+#define IMPLEMENT_UNORDERED(TY, X,Y) \
+ if (TY->isFloatTy()) { \
+ if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
+ Dest.IntVal = APInt(1,true); \
+ return Dest; \
+ } \
+ } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
+ Dest.IntVal = APInt(1,true); \
+ return Dest; \
+ }
+
+#define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \
+ if (TY->isVectorTy()) { \
+ GenericValue DestMask = Dest; \
+ Dest = FUNC(Src1, Src2, Ty); \
+ for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
+ if (DestMask.AggregateVal[_i].IntVal == true) \
+ Dest.AggregateVal[_i].IntVal = APInt(1, true); \
+ return Dest; \
+ }
+
+static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
+ return executeFCMP_OEQ(Src1, Src2, Ty);
+
+}
+
+static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
+ return executeFCMP_ONE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
+ return executeFCMP_OLE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
+ return executeFCMP_OGE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
+ return executeFCMP_OLT(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
+ return executeFCMP_OGT(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].FloatVal ==
+ Src1.AggregateVal[_i].FloatVal) &&
+ (Src2.AggregateVal[_i].FloatVal ==
+ Src2.AggregateVal[_i].FloatVal)));
+ } else {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].DoubleVal ==
+ Src1.AggregateVal[_i].DoubleVal) &&
+ (Src2.AggregateVal[_i].DoubleVal ==
+ Src2.AggregateVal[_i].DoubleVal)));
+ }
+ } else if (Ty->isFloatTy())
+ Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
+ Src2.FloatVal == Src2.FloatVal));
+ else {
+ Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
+ Src2.DoubleVal == Src2.DoubleVal));
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].FloatVal !=
+ Src1.AggregateVal[_i].FloatVal) ||
+ (Src2.AggregateVal[_i].FloatVal !=
+ Src2.AggregateVal[_i].FloatVal)));
+ } else {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].DoubleVal !=
+ Src1.AggregateVal[_i].DoubleVal) ||
+ (Src2.AggregateVal[_i].DoubleVal !=
+ Src2.AggregateVal[_i].DoubleVal)));
+ }
+ } else if (Ty->isFloatTy())
+ Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
+ Src2.FloatVal != Src2.FloatVal));
+ else {
+ Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
+ Src2.DoubleVal != Src2.DoubleVal));
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
+ Type *Ty, const bool val) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,val);
+ } else {
+ Dest.IntVal = APInt(1, val);
+ }
+
+ return Dest;
+}
+
+void Interpreter::visitFCmpInst(FCmpInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ switch (I.getPredicate()) {
+ default:
+ dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
+ break;
+ case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
+ break;
+ case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
+ }
+
+ SetValue(&I, R, SF);
+}
+
+static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ GenericValue Result;
+ switch (predicate) {
+ case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
+ case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
+ case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
+ default:
+ dbgs() << "Unhandled Cmp predicate\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+void Interpreter::visitBinaryOperator(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ // First process vector operation
+ if (Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ R.AggregateVal.resize(Src1.AggregateVal.size());
+
+ // Macros to execute binary operation 'OP' over integer vectors
+#define INTEGER_VECTOR_OPERATION(OP) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].IntVal = \
+ Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
+
+ // Additional macros to execute binary operations udiv/sdiv/urem/srem since
+ // they have different notation.
+#define INTEGER_VECTOR_FUNCTION(OP) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].IntVal = \
+ Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
+
+ // Macros to execute binary operation 'OP' over floating point type TY
+ // (float or double) vectors
+#define FLOAT_VECTOR_FUNCTION(OP, TY) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].TY = \
+ Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
+
+ // Macros to choose appropriate TY: float or double and run operation
+ // execution
+#define FLOAT_VECTOR_OP(OP) { \
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
+ FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
+ else { \
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
+ FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
+ else { \
+ dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
+ llvm_unreachable(0); \
+ } \
+ } \
+}
+
+ switch(I.getOpcode()){
+ default:
+ dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
+ case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
+ case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break;
+ case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break;
+ case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break;
+ case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break;
+ case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break;
+ case Instruction::And: INTEGER_VECTOR_OPERATION(&) break;
+ case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break;
+ case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break;
+ case Instruction::FAdd: FLOAT_VECTOR_OP(+) break;
+ case Instruction::FSub: FLOAT_VECTOR_OP(-) break;
+ case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
+ case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
+ case Instruction::FRem:
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].FloatVal =
+ fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
+ else {
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].DoubleVal =
+ fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
+ else {
+ dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ }
+ break;
+ }
+ } else {
+ switch (I.getOpcode()) {
+ default:
+ dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
+ case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
+ case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
+ case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
+ case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
+ case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
+ case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
+ case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
+ case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
+ case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
+ case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
+ case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
+ case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
+ case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
+ case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
+ }
+ }
+ SetValue(&I, R, SF);
+}
+
+static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
+ GenericValue Src3, Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
+ Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
+ Src3.AggregateVal[i] : Src2.AggregateVal[i];
+ } else {
+ Dest = (Src1.IntVal == 0) ? Src3 : Src2;
+ }
+ return Dest;
+}
+
+void Interpreter::visitSelectInst(SelectInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type * Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
+ GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
+ SetValue(&I, R, SF);
+}
+
+//===----------------------------------------------------------------------===//
+// Terminator Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::exitCalled(GenericValue GV) {
+ // runAtExitHandlers() assumes there are no stack frames, but
+ // if exit() was called, then it had a stack frame. Blow away
+ // the stack before interpreting atexit handlers.
+ ECStack.clear();
+ runAtExitHandlers();
+ exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
+}
+
+/// Pop the last stack frame off of ECStack and then copy the result
+/// back into the result variable if we are not returning void. The
+/// result variable may be the ExitValue, or the Value of the calling
+/// CallInst if there was a previous stack frame. This method may
+/// invalidate any ECStack iterators you have. This method also takes
+/// care of switching to the normal destination BB, if we are returning
+/// from an invoke.
+///
+void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
+ GenericValue Result) {
+ // Pop the current stack frame.
+ ECStack.pop_back();
+
+ if (ECStack.empty()) { // Finished main. Put result into exit code...
+ if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
+ ExitValue = Result; // Capture the exit value of the program
+ } else {
+ memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
+ }
+ } else {
+ // If we have a previous stack frame, and we have a previous call,
+ // fill in the return value...
+ ExecutionContext &CallingSF = ECStack.back();
+ if (CallingSF.Caller) {
+ // Save result...
+ if (!CallingSF.Caller->getType()->isVoidTy())
+ SetValue(CallingSF.Caller, Result, CallingSF);
+ if (InvokeInst *II = dyn_cast<InvokeInst>(CallingSF.Caller))
+ SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
+ CallingSF.Caller = nullptr; // We returned from the call...
+ }
+ }
+}
+
+void Interpreter::visitReturnInst(ReturnInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *RetTy = Type::getVoidTy(I.getContext());
+ GenericValue Result;
+
+ // Save away the return value... (if we are not 'ret void')
+ if (I.getNumOperands()) {
+ RetTy = I.getReturnValue()->getType();
+ Result = getOperandValue(I.getReturnValue(), SF);
+ }
+
+ popStackAndReturnValueToCaller(RetTy, Result);
+}
+
+void Interpreter::visitUnreachableInst(UnreachableInst &I) {
+ report_fatal_error("Program executed an 'unreachable' instruction!");
+}
+
+void Interpreter::visitBranchInst(BranchInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ BasicBlock *Dest;
+
+ Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
+ if (!I.isUnconditional()) {
+ Value *Cond = I.getCondition();
+ if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
+ Dest = I.getSuccessor(1);
+ }
+ SwitchToNewBasicBlock(Dest, SF);
+}
+
+void Interpreter::visitSwitchInst(SwitchInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Value* Cond = I.getCondition();
+ Type *ElTy = Cond->getType();
+ GenericValue CondVal = getOperandValue(Cond, SF);
+
+ // Check to see if any of the cases match...
+ BasicBlock *Dest = nullptr;
+ for (auto Case : I.cases()) {
+ GenericValue CaseVal = getOperandValue(Case.getCaseValue(), SF);
+ if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
+ Dest = cast<BasicBlock>(Case.getCaseSuccessor());
+ break;
+ }
+ }
+ if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
+ SwitchToNewBasicBlock(Dest, SF);
+}
+
+void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
+ SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
+}
+
+
+// SwitchToNewBasicBlock - This method is used to jump to a new basic block.
+// This function handles the actual updating of block and instruction iterators
+// as well as execution of all of the PHI nodes in the destination block.
+//
+// This method does this because all of the PHI nodes must be executed
+// atomically, reading their inputs before any of the results are updated. Not
+// doing this can cause problems if the PHI nodes depend on other PHI nodes for
+// their inputs. If the input PHI node is updated before it is read, incorrect
+// results can happen. Thus we use a two phase approach.
+//
+void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
+ BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
+ SF.CurBB = Dest; // Update CurBB to branch destination
+ SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
+
+ if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
+
+ // Loop over all of the PHI nodes in the current block, reading their inputs.
+ std::vector<GenericValue> ResultValues;
+
+ for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
+ // Search for the value corresponding to this previous bb...
+ int i = PN->getBasicBlockIndex(PrevBB);
+ assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
+ Value *IncomingValue = PN->getIncomingValue(i);
+
+ // Save the incoming value for this PHI node...
+ ResultValues.push_back(getOperandValue(IncomingValue, SF));
+ }
+
+ // Now loop over all of the PHI nodes setting their values...
+ SF.CurInst = SF.CurBB->begin();
+ for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
+ PHINode *PN = cast<PHINode>(SF.CurInst);
+ SetValue(PN, ResultValues[i], SF);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Memory Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::visitAllocaInst(AllocaInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ Type *Ty = I.getType()->getElementType(); // Type to be allocated
+
+ // Get the number of elements being allocated by the array...
+ unsigned NumElements =
+ getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
+
+ unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty);
+
+ // Avoid malloc-ing zero bytes, use max()...
+ unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
+
+ // Allocate enough memory to hold the type...
+ void *Memory = safe_malloc(MemToAlloc);
+
+ LLVM_DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize
+ << " bytes) x " << NumElements << " (Total: " << MemToAlloc
+ << ") at " << uintptr_t(Memory) << '\n');
+
+ GenericValue Result = PTOGV(Memory);
+ assert(Result.PointerVal && "Null pointer returned by malloc!");
+ SetValue(&I, Result, SF);
+
+ if (I.getOpcode() == Instruction::Alloca)
+ ECStack.back().Allocas.add(Memory);
+}
+
+// getElementOffset - The workhorse for getelementptr.
+//
+GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
+ gep_type_iterator E,
+ ExecutionContext &SF) {
+ assert(Ptr->getType()->isPointerTy() &&
+ "Cannot getElementOffset of a nonpointer type!");
+
+ uint64_t Total = 0;
+
+ for (; I != E; ++I) {
+ if (StructType *STy = I.getStructTypeOrNull()) {
+ const StructLayout *SLO = getDataLayout().getStructLayout(STy);
+
+ const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
+ unsigned Index = unsigned(CPU->getZExtValue());
+
+ Total += SLO->getElementOffset(Index);
+ } else {
+ // Get the index number for the array... which must be long type...
+ GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
+
+ int64_t Idx;
+ unsigned BitWidth =
+ cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
+ if (BitWidth == 32)
+ Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
+ else {
+ assert(BitWidth == 64 && "Invalid index type for getelementptr");
+ Idx = (int64_t)IdxGV.IntVal.getZExtValue();
+ }
+ Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx;
+ }
+ }
+
+ GenericValue Result;
+ Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
+ LLVM_DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
+ return Result;
+}
+
+void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeGEPOperation(I.getPointerOperand(),
+ gep_type_begin(I), gep_type_end(I), SF), SF);
+}
+
+void Interpreter::visitLoadInst(LoadInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
+ GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
+ GenericValue Result;
+ LoadValueFromMemory(Result, Ptr, I.getType());
+ SetValue(&I, Result, SF);
+ if (I.isVolatile() && PrintVolatile)
+ dbgs() << "Volatile load " << I;
+}
+
+void Interpreter::visitStoreInst(StoreInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Val = getOperandValue(I.getOperand(0), SF);
+ GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
+ StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
+ I.getOperand(0)->getType());
+ if (I.isVolatile() && PrintVolatile)
+ dbgs() << "Volatile store: " << I;
+}
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::visitVAStartInst(VAStartInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue ArgIndex;
+ ArgIndex.UIntPairVal.first = ECStack.size() - 1;
+ ArgIndex.UIntPairVal.second = 0;
+ SetValue(&I, ArgIndex, SF);
+}
+
+void Interpreter::visitVAEndInst(VAEndInst &I) {
+ // va_end is a noop for the interpreter
+}
+
+void Interpreter::visitVACopyInst(VACopyInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, getOperandValue(*I.arg_begin(), SF), SF);
+}
+
+void Interpreter::visitIntrinsicInst(IntrinsicInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ // If it is an unknown intrinsic function, use the intrinsic lowering
+ // class to transform it into hopefully tasty LLVM code.
+ //
+ BasicBlock::iterator Me(&I);
+ BasicBlock *Parent = I.getParent();
+ bool atBegin(Parent->begin() == Me);
+ if (!atBegin)
+ --Me;
+ IL->LowerIntrinsicCall(&I);
+
+ // Restore the CurInst pointer to the first instruction newly inserted, if
+ // any.
+ if (atBegin) {
+ SF.CurInst = Parent->begin();
+ } else {
+ SF.CurInst = Me;
+ ++SF.CurInst;
+ }
+}
+
+void Interpreter::visitCallBase(CallBase &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ SF.Caller = &I;
+ std::vector<GenericValue> ArgVals;
+ const unsigned NumArgs = SF.Caller->arg_size();
+ ArgVals.reserve(NumArgs);
+ for (Value *V : SF.Caller->args())
+ ArgVals.push_back(getOperandValue(V, SF));
+
+ // To handle indirect calls, we must get the pointer value from the argument
+ // and treat it as a function pointer.
+ GenericValue SRC = getOperandValue(SF.Caller->getCalledOperand(), SF);
+ callFunction((Function*)GVTOP(SRC), ArgVals);
+}
+
+// auxiliary function for shift operations
+static unsigned getShiftAmount(uint64_t orgShiftAmount,
+ llvm::APInt valueToShift) {
+ unsigned valueWidth = valueToShift.getBitWidth();
+ if (orgShiftAmount < (uint64_t)valueWidth)
+ return orgShiftAmount;
+ // according to the llvm documentation, if orgShiftAmount > valueWidth,
+ // the result is undfeined. but we do shift by this rule:
+ return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
+}
+
+
+void Interpreter::visitShl(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitLShr(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitAShr(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ size_t src1Size = Src1.AggregateVal.size();
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ Type *SrcTy = SrcVal->getType();
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned NumElts = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(NumElts);
+ for (unsigned i = 0; i < NumElts; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
+ } else {
+ IntegerType *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.trunc(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
+ } else {
+ auto *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.sext(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
+ } else {
+ auto *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.zext(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
+ DstTy->getScalarType()->isFloatTy() &&
+ "Invalid FPTrunc instruction");
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
+ } else {
+ assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
+ "Invalid FPTrunc instruction");
+ Dest.FloatVal = (float)Src.DoubleVal;
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
+ DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
+ } else {
+ assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
+ "Invalid FPExt instruction");
+ Dest.DoubleVal = (double)Src.FloatVal;
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcTy)) {
+ Type *DstVecTy = DstTy->getScalarType();
+ Type *SrcVecTy = SrcTy->getScalarType();
+ uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+
+ if (SrcVecTy->getTypeID() == Type::FloatTyID) {
+ assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
+ Src.AggregateVal[i].FloatVal, DBitWidth);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
+ Src.AggregateVal[i].DoubleVal, DBitWidth);
+ }
+ } else {
+ // scalar
+ uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
+
+ if (SrcTy->getTypeID() == Type::FloatTyID)
+ Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
+ else {
+ Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
+ }
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcTy)) {
+ Type *DstVecTy = DstTy->getScalarType();
+ Type *SrcVecTy = SrcTy->getScalarType();
+ uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (SrcVecTy->getTypeID() == Type::FloatTyID) {
+ assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
+ Src.AggregateVal[i].FloatVal, DBitWidth);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
+ Src.AggregateVal[i].DoubleVal, DBitWidth);
+ }
+ } else {
+ // scalar
+ unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
+
+ if (SrcTy->getTypeID() == Type::FloatTyID)
+ Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
+ else {
+ Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
+ }
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (DstVecTy->getTypeID() == Type::FloatTyID) {
+ assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal =
+ APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
+ }
+ } else {
+ // scalar
+ assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
+ if (DstTy->getTypeID() == Type::FloatTyID)
+ Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
+ else {
+ Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
+ }
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (DstVecTy->getTypeID() == Type::FloatTyID) {
+ assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal =
+ APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
+ }
+ } else {
+ // scalar
+ assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
+
+ if (DstTy->getTypeID() == Type::FloatTyID)
+ Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
+ else {
+ Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
+ }
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
+
+ Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
+ return Dest;
+}
+
+GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
+
+ uint32_t PtrSize = getDataLayout().getPointerSizeInBits();
+ if (PtrSize != Src.IntVal.getBitWidth())
+ Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
+
+ Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
+ return Dest;
+}
+
+GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+
+ // This instruction supports bitwise conversion of vectors to integers and
+ // to vectors of other types (as long as they have the same size)
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcTy) || isa<VectorType>(DstTy)) {
+ // vector src bitcast to vector dst or vector src bitcast to scalar dst or
+ // scalar src bitcast to vector dst
+ bool isLittleEndian = getDataLayout().isLittleEndian();
+ GenericValue TempDst, TempSrc, SrcVec;
+ Type *SrcElemTy;
+ Type *DstElemTy;
+ unsigned SrcBitSize;
+ unsigned DstBitSize;
+ unsigned SrcNum;
+ unsigned DstNum;
+
+ if (isa<VectorType>(SrcTy)) {
+ SrcElemTy = SrcTy->getScalarType();
+ SrcBitSize = SrcTy->getScalarSizeInBits();
+ SrcNum = Src.AggregateVal.size();
+ SrcVec = Src;
+ } else {
+ // if src is scalar value, make it vector <1 x type>
+ SrcElemTy = SrcTy;
+ SrcBitSize = SrcTy->getPrimitiveSizeInBits();
+ SrcNum = 1;
+ SrcVec.AggregateVal.push_back(Src);
+ }
+
+ if (isa<VectorType>(DstTy)) {
+ DstElemTy = DstTy->getScalarType();
+ DstBitSize = DstTy->getScalarSizeInBits();
+ DstNum = (SrcNum * SrcBitSize) / DstBitSize;
+ } else {
+ DstElemTy = DstTy;
+ DstBitSize = DstTy->getPrimitiveSizeInBits();
+ DstNum = 1;
+ }
+
+ if (SrcNum * SrcBitSize != DstNum * DstBitSize)
+ llvm_unreachable("Invalid BitCast");
+
+ // If src is floating point, cast to integer first.
+ TempSrc.AggregateVal.resize(SrcNum);
+ if (SrcElemTy->isFloatTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal =
+ APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
+
+ } else if (SrcElemTy->isDoubleTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal =
+ APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
+ } else if (SrcElemTy->isIntegerTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
+ } else {
+ // Pointers are not allowed as the element type of vector.
+ llvm_unreachable("Invalid Bitcast");
+ }
+
+ // now TempSrc is integer type vector
+ if (DstNum < SrcNum) {
+ // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
+ unsigned Ratio = SrcNum / DstNum;
+ unsigned SrcElt = 0;
+ for (unsigned i = 0; i < DstNum; i++) {
+ GenericValue Elt;
+ Elt.IntVal = 0;
+ Elt.IntVal = Elt.IntVal.zext(DstBitSize);
+ unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
+ for (unsigned j = 0; j < Ratio; j++) {
+ APInt Tmp;
+ Tmp = Tmp.zext(SrcBitSize);
+ Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
+ Tmp = Tmp.zext(DstBitSize);
+ Tmp <<= ShiftAmt;
+ ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
+ Elt.IntVal |= Tmp;
+ }
+ TempDst.AggregateVal.push_back(Elt);
+ }
+ } else {
+ // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
+ unsigned Ratio = DstNum / SrcNum;
+ for (unsigned i = 0; i < SrcNum; i++) {
+ unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
+ for (unsigned j = 0; j < Ratio; j++) {
+ GenericValue Elt;
+ Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
+ Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
+ Elt.IntVal.lshrInPlace(ShiftAmt);
+ // it could be DstBitSize == SrcBitSize, so check it
+ if (DstBitSize < SrcBitSize)
+ Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
+ ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
+ TempDst.AggregateVal.push_back(Elt);
+ }
+ }
+ }
+
+ // convert result from integer to specified type
+ if (isa<VectorType>(DstTy)) {
+ if (DstElemTy->isDoubleTy()) {
+ Dest.AggregateVal.resize(DstNum);
+ for (unsigned i = 0; i < DstNum; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ TempDst.AggregateVal[i].IntVal.bitsToDouble();
+ } else if (DstElemTy->isFloatTy()) {
+ Dest.AggregateVal.resize(DstNum);
+ for (unsigned i = 0; i < DstNum; i++)
+ Dest.AggregateVal[i].FloatVal =
+ TempDst.AggregateVal[i].IntVal.bitsToFloat();
+ } else {
+ Dest = TempDst;
+ }
+ } else {
+ if (DstElemTy->isDoubleTy())
+ Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
+ else if (DstElemTy->isFloatTy()) {
+ Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
+ } else {
+ Dest.IntVal = TempDst.AggregateVal[0].IntVal;
+ }
+ }
+ } else { // if (isa<VectorType>(SrcTy)) || isa<VectorType>(DstTy))
+
+ // scalar src bitcast to scalar dst
+ if (DstTy->isPointerTy()) {
+ assert(SrcTy->isPointerTy() && "Invalid BitCast");
+ Dest.PointerVal = Src.PointerVal;
+ } else if (DstTy->isIntegerTy()) {
+ if (SrcTy->isFloatTy())
+ Dest.IntVal = APInt::floatToBits(Src.FloatVal);
+ else if (SrcTy->isDoubleTy()) {
+ Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
+ } else if (SrcTy->isIntegerTy()) {
+ Dest.IntVal = Src.IntVal;
+ } else {
+ llvm_unreachable("Invalid BitCast");
+ }
+ } else if (DstTy->isFloatTy()) {
+ if (SrcTy->isIntegerTy())
+ Dest.FloatVal = Src.IntVal.bitsToFloat();
+ else {
+ Dest.FloatVal = Src.FloatVal;
+ }
+ } else if (DstTy->isDoubleTy()) {
+ if (SrcTy->isIntegerTy())
+ Dest.DoubleVal = Src.IntVal.bitsToDouble();
+ else {
+ Dest.DoubleVal = Src.DoubleVal;
+ }
+ } else {
+ llvm_unreachable("Invalid Bitcast");
+ }
+ }
+
+ return Dest;
+}
+
+void Interpreter::visitTruncInst(TruncInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitSExtInst(SExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitZExtInst(ZExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPTruncInst(FPTruncInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPExtInst(FPExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitUIToFPInst(UIToFPInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitSIToFPInst(SIToFPInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPToUIInst(FPToUIInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPToSIInst(FPToSIInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitBitCastInst(BitCastInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+#define IMPLEMENT_VAARG(TY) \
+ case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
+
+void Interpreter::visitVAArgInst(VAArgInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ // Get the incoming valist parameter. LLI treats the valist as a
+ // (ec-stack-depth var-arg-index) pair.
+ GenericValue VAList = getOperandValue(I.getOperand(0), SF);
+ GenericValue Dest;
+ GenericValue Src = ECStack[VAList.UIntPairVal.first]
+ .VarArgs[VAList.UIntPairVal.second];
+ Type *Ty = I.getType();
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ Dest.IntVal = Src.IntVal;
+ break;
+ IMPLEMENT_VAARG(Pointer);
+ IMPLEMENT_VAARG(Float);
+ IMPLEMENT_VAARG(Double);
+ default:
+ dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+
+ // Set the Value of this Instruction.
+ SetValue(&I, Dest, SF);
+
+ // Move the pointer to the next vararg.
+ ++VAList.UIntPairVal.second;
+}
+
+void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+
+ Type *Ty = I.getType();
+ const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
+
+ if(Src1.AggregateVal.size() > indx) {
+ switch (Ty->getTypeID()) {
+ default:
+ dbgs() << "Unhandled destination type for extractelement instruction: "
+ << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ break;
+ case Type::IntegerTyID:
+ Dest.IntVal = Src1.AggregateVal[indx].IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
+ break;
+ }
+ } else {
+ dbgs() << "Invalid index in extractelement instruction\n";
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitInsertElementInst(InsertElementInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ VectorType *Ty = cast<VectorType>(I.getType());
+
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
+ GenericValue Dest;
+
+ Type *TyContained = Ty->getElementType();
+
+ const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
+ Dest.AggregateVal = Src1.AggregateVal;
+
+ if(Src1.AggregateVal.size() <= indx)
+ llvm_unreachable("Invalid index in insertelement instruction");
+ switch (TyContained->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ case Type::IntegerTyID:
+ Dest.AggregateVal[indx].IntVal = Src2.IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
+ break;
+ }
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
+ ExecutionContext &SF = ECStack.back();
+
+ VectorType *Ty = cast<VectorType>(I.getType());
+
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+
+ // There is no need to check types of src1 and src2, because the compiled
+ // bytecode can't contain different types for src1 and src2 for a
+ // shufflevector instruction.
+
+ Type *TyContained = Ty->getElementType();
+ unsigned src1Size = (unsigned)Src1.AggregateVal.size();
+ unsigned src2Size = (unsigned)Src2.AggregateVal.size();
+ unsigned src3Size = I.getShuffleMask().size();
+
+ Dest.AggregateVal.resize(src3Size);
+
+ switch (TyContained->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ break;
+ case Type::IntegerTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = std::max(0, I.getMaskValue(i));
+ if(j < src1Size)
+ Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
+ else
+ // The selector may not be greater than sum of lengths of first and
+ // second operands and llasm should not allow situation like
+ // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
+ // <2 x i32> < i32 0, i32 5 >,
+ // where i32 5 is invalid, but let it be additional check here:
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ case Type::FloatTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = std::max(0, I.getMaskValue(i));
+ if(j < src1Size)
+ Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
+ else
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ case Type::DoubleTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = std::max(0, I.getMaskValue(i));
+ if(j < src1Size)
+ Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].DoubleVal =
+ Src2.AggregateVal[j-src1Size].DoubleVal;
+ else
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ }
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Value *Agg = I.getAggregateOperand();
+ GenericValue Dest;
+ GenericValue Src = getOperandValue(Agg, SF);
+
+ ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
+ unsigned Num = I.getNumIndices();
+ GenericValue *pSrc = &Src;
+
+ for (unsigned i = 0 ; i < Num; ++i) {
+ pSrc = &pSrc->AggregateVal[*IdxBegin];
+ ++IdxBegin;
+ }
+
+ Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
+ switch (IndexedType->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for extractelement instruction");
+ break;
+ case Type::IntegerTyID:
+ Dest.IntVal = pSrc->IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.FloatVal = pSrc->FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = pSrc->DoubleVal;
+ break;
+ case Type::ArrayTyID:
+ case Type::StructTyID:
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID:
+ Dest.AggregateVal = pSrc->AggregateVal;
+ break;
+ case Type::PointerTyID:
+ Dest.PointerVal = pSrc->PointerVal;
+ break;
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitInsertValueInst(InsertValueInst &I) {
+
+ ExecutionContext &SF = ECStack.back();
+ Value *Agg = I.getAggregateOperand();
+
+ GenericValue Src1 = getOperandValue(Agg, SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest = Src1; // Dest is a slightly changed Src1
+
+ ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
+ unsigned Num = I.getNumIndices();
+
+ GenericValue *pDest = &Dest;
+ for (unsigned i = 0 ; i < Num; ++i) {
+ pDest = &pDest->AggregateVal[*IdxBegin];
+ ++IdxBegin;
+ }
+ // pDest points to the target value in the Dest now
+
+ Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
+
+ switch (IndexedType->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ break;
+ case Type::IntegerTyID:
+ pDest->IntVal = Src2.IntVal;
+ break;
+ case Type::FloatTyID:
+ pDest->FloatVal = Src2.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ pDest->DoubleVal = Src2.DoubleVal;
+ break;
+ case Type::ArrayTyID:
+ case Type::StructTyID:
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID:
+ pDest->AggregateVal = Src2.AggregateVal;
+ break;
+ case Type::PointerTyID:
+ pDest->PointerVal = Src2.PointerVal;
+ break;
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
+ ExecutionContext &SF) {
+ switch (CE->getOpcode()) {
+ case Instruction::Trunc:
+ return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::ZExt:
+ return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::SExt:
+ return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPTrunc:
+ return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPExt:
+ return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::UIToFP:
+ return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::SIToFP:
+ return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPToUI:
+ return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPToSI:
+ return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::PtrToInt:
+ return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::IntToPtr:
+ return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::BitCast:
+ return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::GetElementPtr:
+ return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
+ gep_type_end(CE), SF);
+ case Instruction::FCmp:
+ case Instruction::ICmp:
+ return executeCmpInst(CE->getPredicate(),
+ getOperandValue(CE->getOperand(0), SF),
+ getOperandValue(CE->getOperand(1), SF),
+ CE->getOperand(0)->getType());
+ case Instruction::Select:
+ return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
+ getOperandValue(CE->getOperand(1), SF),
+ getOperandValue(CE->getOperand(2), SF),
+ CE->getOperand(0)->getType());
+ default :
+ break;
+ }
+
+ // The cases below here require a GenericValue parameter for the result
+ // so we initialize one, compute it and then return it.
+ GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
+ GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
+ GenericValue Dest;
+ Type * Ty = CE->getOperand(0)->getType();
+ switch (CE->getOpcode()) {
+ case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
+ case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
+ case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
+ case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
+ case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
+ case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
+ case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
+ case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
+ case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
+ case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
+ case Instruction::Shl:
+ Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
+ break;
+ case Instruction::LShr:
+ Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
+ break;
+ case Instruction::AShr:
+ Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
+ break;
+ default:
+ dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
+ llvm_unreachable("Unhandled ConstantExpr");
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ return getConstantExprValue(CE, SF);
+ } else if (Constant *CPV = dyn_cast<Constant>(V)) {
+ return getConstantValue(CPV);
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ return PTOGV(getPointerToGlobal(GV));
+ } else {
+ return SF.Values[V];
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Dispatch and Execution Code
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// callFunction - Execute the specified function...
+//
+void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
+ assert((ECStack.empty() || !ECStack.back().Caller ||
+ ECStack.back().Caller->arg_size() == ArgVals.size()) &&
+ "Incorrect number of arguments passed into function call!");
+ // Make a new stack frame... and fill it in.
+ ECStack.emplace_back();
+ ExecutionContext &StackFrame = ECStack.back();
+ StackFrame.CurFunction = F;
+
+ // Special handling for external functions.
+ if (F->isDeclaration()) {
+ GenericValue Result = callExternalFunction (F, ArgVals);
+ // Simulate a 'ret' instruction of the appropriate type.
+ popStackAndReturnValueToCaller (F->getReturnType (), Result);
+ return;
+ }
+
+ // Get pointers to first LLVM BB & Instruction in function.
+ StackFrame.CurBB = &F->front();
+ StackFrame.CurInst = StackFrame.CurBB->begin();
+
+ // Run through the function arguments and initialize their values...
+ assert((ArgVals.size() == F->arg_size() ||
+ (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
+ "Invalid number of values passed to function invocation!");
+
+ // Handle non-varargs arguments...
+ unsigned i = 0;
+ for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
+ AI != E; ++AI, ++i)
+ SetValue(&*AI, ArgVals[i], StackFrame);
+
+ // Handle varargs arguments...
+ StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
+}
+
+
+void Interpreter::run() {
+ while (!ECStack.empty()) {
+ // Interpret a single instruction & increment the "PC".
+ ExecutionContext &SF = ECStack.back(); // Current stack frame
+ Instruction &I = *SF.CurInst++; // Increment before execute
+
+ // Track the number of dynamic instructions executed.
+ ++NumDynamicInsts;
+
+ LLVM_DEBUG(dbgs() << "About to interpret: " << I << "\n");
+ visit(I); // Dispatch to one of the visit* methods...
+ }
+}
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
new file mode 100644
index 00000000000..3aa77557862
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -0,0 +1,509 @@
+//===-- ExternalFunctions.cpp - Implement External Functions --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains both code to deal with invoking "external" functions, but
+// also contains code that implements "exported" external functions.
+//
+// There are currently two mechanisms for handling external functions in the
+// Interpreter. The first is to implement lle_* wrapper functions that are
+// specific to well-known library functions which manually translate the
+// arguments from GenericValues and make the call. If such a wrapper does
+// not exist, and libffi is available, then the Interpreter will attempt to
+// invoke the function using libffi, after finding its address.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Config/config.h" // Detect libffi
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cmath>
+#include <csignal>
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
+#include <map>
+#include <mutex>
+#include <string>
+#include <utility>
+#include <vector>
+
+#ifdef HAVE_FFI_CALL
+#ifdef HAVE_FFI_H
+#include <ffi.h>
+#define USE_LIBFFI
+#elif HAVE_FFI_FFI_H
+#include <ffi/ffi.h>
+#define USE_LIBFFI
+#endif
+#endif
+
+using namespace llvm;
+
+static ManagedStatic<sys::Mutex> FunctionsLock;
+
+typedef GenericValue (*ExFunc)(FunctionType *, ArrayRef<GenericValue>);
+static ManagedStatic<std::map<const Function *, ExFunc> > ExportedFunctions;
+static ManagedStatic<std::map<std::string, ExFunc> > FuncNames;
+
+#ifdef USE_LIBFFI
+typedef void (*RawFunc)();
+static ManagedStatic<std::map<const Function *, RawFunc> > RawFunctions;
+#endif
+
+static Interpreter *TheInterpreter;
+
+static char getTypeID(Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return 'V';
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 1: return 'o';
+ case 8: return 'B';
+ case 16: return 'S';
+ case 32: return 'I';
+ case 64: return 'L';
+ default: return 'N';
+ }
+ case Type::FloatTyID: return 'F';
+ case Type::DoubleTyID: return 'D';
+ case Type::PointerTyID: return 'P';
+ case Type::FunctionTyID:return 'M';
+ case Type::StructTyID: return 'T';
+ case Type::ArrayTyID: return 'A';
+ default: return 'U';
+ }
+}
+
+// Try to find address of external function given a Function object.
+// Please note, that interpreter doesn't know how to assemble a
+// real call in general case (this is JIT job), that's why it assumes,
+// that all external functions has the same (and pretty "general") signature.
+// The typical example of such functions are "lle_X_" ones.
+static ExFunc lookupFunction(const Function *F) {
+ // Function not found, look it up... start by figuring out what the
+ // composite function name should be.
+ std::string ExtName = "lle_";
+ FunctionType *FT = F->getFunctionType();
+ ExtName += getTypeID(FT->getReturnType());
+ for (Type *T : FT->params())
+ ExtName += getTypeID(T);
+ ExtName += ("_" + F->getName()).str();
+
+ sys::ScopedLock Writer(*FunctionsLock);
+ ExFunc FnPtr = (*FuncNames)[ExtName];
+ if (!FnPtr)
+ FnPtr = (*FuncNames)[("lle_X_" + F->getName()).str()];
+ if (!FnPtr) // Try calling a generic function... if it exists...
+ FnPtr = (ExFunc)(intptr_t)sys::DynamicLibrary::SearchForAddressOfSymbol(
+ ("lle_X_" + F->getName()).str());
+ if (FnPtr)
+ ExportedFunctions->insert(std::make_pair(F, FnPtr)); // Cache for later
+ return FnPtr;
+}
+
+#ifdef USE_LIBFFI
+static ffi_type *ffiTypeFor(Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return &ffi_type_void;
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 8: return &ffi_type_sint8;
+ case 16: return &ffi_type_sint16;
+ case 32: return &ffi_type_sint32;
+ case 64: return &ffi_type_sint64;
+ }
+ case Type::FloatTyID: return &ffi_type_float;
+ case Type::DoubleTyID: return &ffi_type_double;
+ case Type::PointerTyID: return &ffi_type_pointer;
+ default: break;
+ }
+ // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
+ report_fatal_error("Type could not be mapped for use with libffi.");
+ return NULL;
+}
+
+static void *ffiValueFor(Type *Ty, const GenericValue &AV,
+ void *ArgDataPtr) {
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 8: {
+ int8_t *I8Ptr = (int8_t *) ArgDataPtr;
+ *I8Ptr = (int8_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 16: {
+ int16_t *I16Ptr = (int16_t *) ArgDataPtr;
+ *I16Ptr = (int16_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 32: {
+ int32_t *I32Ptr = (int32_t *) ArgDataPtr;
+ *I32Ptr = (int32_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 64: {
+ int64_t *I64Ptr = (int64_t *) ArgDataPtr;
+ *I64Ptr = (int64_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ }
+ case Type::FloatTyID: {
+ float *FloatPtr = (float *) ArgDataPtr;
+ *FloatPtr = AV.FloatVal;
+ return ArgDataPtr;
+ }
+ case Type::DoubleTyID: {
+ double *DoublePtr = (double *) ArgDataPtr;
+ *DoublePtr = AV.DoubleVal;
+ return ArgDataPtr;
+ }
+ case Type::PointerTyID: {
+ void **PtrPtr = (void **) ArgDataPtr;
+ *PtrPtr = GVTOP(AV);
+ return ArgDataPtr;
+ }
+ default: break;
+ }
+ // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
+ report_fatal_error("Type value could not be mapped for use with libffi.");
+ return NULL;
+}
+
+static bool ffiInvoke(RawFunc Fn, Function *F, ArrayRef<GenericValue> ArgVals,
+ const DataLayout &TD, GenericValue &Result) {
+ ffi_cif cif;
+ FunctionType *FTy = F->getFunctionType();
+ const unsigned NumArgs = F->arg_size();
+
+ // TODO: We don't have type information about the remaining arguments, because
+ // this information is never passed into ExecutionEngine::runFunction().
+ if (ArgVals.size() > NumArgs && F->isVarArg()) {
+ report_fatal_error("Calling external var arg function '" + F->getName()
+ + "' is not supported by the Interpreter.");
+ }
+
+ unsigned ArgBytes = 0;
+
+ std::vector<ffi_type*> args(NumArgs);
+ for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ const unsigned ArgNo = A->getArgNo();
+ Type *ArgTy = FTy->getParamType(ArgNo);
+ args[ArgNo] = ffiTypeFor(ArgTy);
+ ArgBytes += TD.getTypeStoreSize(ArgTy);
+ }
+
+ SmallVector<uint8_t, 128> ArgData;
+ ArgData.resize(ArgBytes);
+ uint8_t *ArgDataPtr = ArgData.data();
+ SmallVector<void*, 16> values(NumArgs);
+ for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ const unsigned ArgNo = A->getArgNo();
+ Type *ArgTy = FTy->getParamType(ArgNo);
+ values[ArgNo] = ffiValueFor(ArgTy, ArgVals[ArgNo], ArgDataPtr);
+ ArgDataPtr += TD.getTypeStoreSize(ArgTy);
+ }
+
+ Type *RetTy = FTy->getReturnType();
+ ffi_type *rtype = ffiTypeFor(RetTy);
+
+ if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NumArgs, rtype, args.data()) ==
+ FFI_OK) {
+ SmallVector<uint8_t, 128> ret;
+ if (RetTy->getTypeID() != Type::VoidTyID)
+ ret.resize(TD.getTypeStoreSize(RetTy));
+ ffi_call(&cif, Fn, ret.data(), values.data());
+ switch (RetTy->getTypeID()) {
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(RetTy)->getBitWidth()) {
+ case 8: Result.IntVal = APInt(8 , *(int8_t *) ret.data()); break;
+ case 16: Result.IntVal = APInt(16, *(int16_t*) ret.data()); break;
+ case 32: Result.IntVal = APInt(32, *(int32_t*) ret.data()); break;
+ case 64: Result.IntVal = APInt(64, *(int64_t*) ret.data()); break;
+ }
+ break;
+ case Type::FloatTyID: Result.FloatVal = *(float *) ret.data(); break;
+ case Type::DoubleTyID: Result.DoubleVal = *(double*) ret.data(); break;
+ case Type::PointerTyID: Result.PointerVal = *(void **) ret.data(); break;
+ default: break;
+ }
+ return true;
+ }
+
+ return false;
+}
+#endif // USE_LIBFFI
+
+GenericValue Interpreter::callExternalFunction(Function *F,
+ ArrayRef<GenericValue> ArgVals) {
+ TheInterpreter = this;
+
+ std::unique_lock<sys::Mutex> Guard(*FunctionsLock);
+
+ // Do a lookup to see if the function is in our cache... this should just be a
+ // deferred annotation!
+ std::map<const Function *, ExFunc>::iterator FI = ExportedFunctions->find(F);
+ if (ExFunc Fn = (FI == ExportedFunctions->end()) ? lookupFunction(F)
+ : FI->second) {
+ Guard.unlock();
+ return Fn(F->getFunctionType(), ArgVals);
+ }
+
+#ifdef USE_LIBFFI
+ std::map<const Function *, RawFunc>::iterator RF = RawFunctions->find(F);
+ RawFunc RawFn;
+ if (RF == RawFunctions->end()) {
+ RawFn = (RawFunc)(intptr_t)
+ sys::DynamicLibrary::SearchForAddressOfSymbol(std::string(F->getName()));
+ if (!RawFn)
+ RawFn = (RawFunc)(intptr_t)getPointerToGlobalIfAvailable(F);
+ if (RawFn != 0)
+ RawFunctions->insert(std::make_pair(F, RawFn)); // Cache for later
+ } else {
+ RawFn = RF->second;
+ }
+
+ Guard.unlock();
+
+ GenericValue Result;
+ if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getDataLayout(), Result))
+ return Result;
+#endif // USE_LIBFFI
+
+ if (F->getName() == "__main")
+ errs() << "Tried to execute an unknown external function: "
+ << *F->getType() << " __main\n";
+ else
+ report_fatal_error("Tried to execute an unknown external function: " +
+ F->getName());
+#ifndef USE_LIBFFI
+ errs() << "Recompiling LLVM with --enable-libffi might help.\n";
+#endif
+ return GenericValue();
+}
+
+//===----------------------------------------------------------------------===//
+// Functions "exported" to the running application...
+//
+
+// void atexit(Function*)
+static GenericValue lle_X_atexit(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ assert(Args.size() == 1);
+ TheInterpreter->addAtExitHandler((Function*)GVTOP(Args[0]));
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+// void exit(int)
+static GenericValue lle_X_exit(FunctionType *FT, ArrayRef<GenericValue> Args) {
+ TheInterpreter->exitCalled(Args[0]);
+ return GenericValue();
+}
+
+// void abort(void)
+static GenericValue lle_X_abort(FunctionType *FT, ArrayRef<GenericValue> Args) {
+ //FIXME: should we report or raise here?
+ //report_fatal_error("Interpreted program raised SIGABRT");
+ raise (SIGABRT);
+ return GenericValue();
+}
+
+// int sprintf(char *, const char *, ...) - a very rough implementation to make
+// output useful.
+static GenericValue lle_X_sprintf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ char *OutputBuffer = (char *)GVTOP(Args[0]);
+ const char *FmtStr = (const char *)GVTOP(Args[1]);
+ unsigned ArgNo = 2;
+
+ // printf should return # chars printed. This is completely incorrect, but
+ // close enough for now.
+ GenericValue GV;
+ GV.IntVal = APInt(32, strlen(FmtStr));
+ while (true) {
+ switch (*FmtStr) {
+ case 0: return GV; // Null terminator...
+ default: // Normal nonspecial character
+ sprintf(OutputBuffer++, "%c", *FmtStr++);
+ break;
+ case '\\': { // Handle escape codes
+ sprintf(OutputBuffer, "%c%c", *FmtStr, *(FmtStr+1));
+ FmtStr += 2; OutputBuffer += 2;
+ break;
+ }
+ case '%': { // Handle format specifiers
+ char FmtBuf[100] = "", Buffer[1000] = "";
+ char *FB = FmtBuf;
+ *FB++ = *FmtStr++;
+ char Last = *FB++ = *FmtStr++;
+ unsigned HowLong = 0;
+ while (Last != 'c' && Last != 'd' && Last != 'i' && Last != 'u' &&
+ Last != 'o' && Last != 'x' && Last != 'X' && Last != 'e' &&
+ Last != 'E' && Last != 'g' && Last != 'G' && Last != 'f' &&
+ Last != 'p' && Last != 's' && Last != '%') {
+ if (Last == 'l' || Last == 'L') HowLong++; // Keep track of l's
+ Last = *FB++ = *FmtStr++;
+ }
+ *FB = 0;
+
+ switch (Last) {
+ case '%':
+ memcpy(Buffer, "%", 2); break;
+ case 'c':
+ sprintf(Buffer, FmtBuf, uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
+ break;
+ case 'd': case 'i':
+ case 'u': case 'o':
+ case 'x': case 'X':
+ if (HowLong >= 1) {
+ if (HowLong == 1 &&
+ TheInterpreter->getDataLayout().getPointerSizeInBits() == 64 &&
+ sizeof(long) < sizeof(int64_t)) {
+ // Make sure we use %lld with a 64 bit argument because we might be
+ // compiling LLI on a 32 bit compiler.
+ unsigned Size = strlen(FmtBuf);
+ FmtBuf[Size] = FmtBuf[Size-1];
+ FmtBuf[Size+1] = 0;
+ FmtBuf[Size-1] = 'l';
+ }
+ sprintf(Buffer, FmtBuf, Args[ArgNo++].IntVal.getZExtValue());
+ } else
+ sprintf(Buffer, FmtBuf,uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
+ break;
+ case 'e': case 'E': case 'g': case 'G': case 'f':
+ sprintf(Buffer, FmtBuf, Args[ArgNo++].DoubleVal); break;
+ case 'p':
+ sprintf(Buffer, FmtBuf, (void*)GVTOP(Args[ArgNo++])); break;
+ case 's':
+ sprintf(Buffer, FmtBuf, (char*)GVTOP(Args[ArgNo++])); break;
+ default:
+ errs() << "<unknown printf code '" << *FmtStr << "'!>";
+ ArgNo++; break;
+ }
+ size_t Len = strlen(Buffer);
+ memcpy(OutputBuffer, Buffer, Len + 1);
+ OutputBuffer += Len;
+ }
+ break;
+ }
+ }
+ return GV;
+}
+
+// int printf(const char *, ...) - a very rough implementation to make output
+// useful.
+static GenericValue lle_X_printf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ char Buffer[10000];
+ std::vector<GenericValue> NewArgs;
+ NewArgs.push_back(PTOGV((void*)&Buffer[0]));
+ llvm::append_range(NewArgs, Args);
+ GenericValue GV = lle_X_sprintf(FT, NewArgs);
+ outs() << Buffer;
+ return GV;
+}
+
+// int sscanf(const char *format, ...);
+static GenericValue lle_X_sscanf(FunctionType *FT,
+ ArrayRef<GenericValue> args) {
+ assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!");
+
+ char *Args[10];
+ for (unsigned i = 0; i < args.size(); ++i)
+ Args[i] = (char*)GVTOP(args[i]);
+
+ GenericValue GV;
+ GV.IntVal = APInt(32, sscanf(Args[0], Args[1], Args[2], Args[3], Args[4],
+ Args[5], Args[6], Args[7], Args[8], Args[9]));
+ return GV;
+}
+
+// int scanf(const char *format, ...);
+static GenericValue lle_X_scanf(FunctionType *FT, ArrayRef<GenericValue> args) {
+ assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!");
+
+ char *Args[10];
+ for (unsigned i = 0; i < args.size(); ++i)
+ Args[i] = (char*)GVTOP(args[i]);
+
+ GenericValue GV;
+ GV.IntVal = APInt(32, scanf( Args[0], Args[1], Args[2], Args[3], Args[4],
+ Args[5], Args[6], Args[7], Args[8], Args[9]));
+ return GV;
+}
+
+// int fprintf(FILE *, const char *, ...) - a very rough implementation to make
+// output useful.
+static GenericValue lle_X_fprintf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ assert(Args.size() >= 2);
+ char Buffer[10000];
+ std::vector<GenericValue> NewArgs;
+ NewArgs.push_back(PTOGV(Buffer));
+ NewArgs.insert(NewArgs.end(), Args.begin()+1, Args.end());
+ GenericValue GV = lle_X_sprintf(FT, NewArgs);
+
+ fputs(Buffer, (FILE *) GVTOP(Args[0]));
+ return GV;
+}
+
+static GenericValue lle_X_memset(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ int val = (int)Args[1].IntVal.getSExtValue();
+ size_t len = (size_t)Args[2].IntVal.getZExtValue();
+ memset((void *)GVTOP(Args[0]), val, len);
+ // llvm.memset.* returns void, lle_X_* returns GenericValue,
+ // so here we return GenericValue with IntVal set to zero
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+static GenericValue lle_X_memcpy(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ memcpy(GVTOP(Args[0]), GVTOP(Args[1]),
+ (size_t)(Args[2].IntVal.getLimitedValue()));
+
+ // llvm.memcpy* returns void, lle_X_* returns GenericValue,
+ // so here we return GenericValue with IntVal set to zero
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+void Interpreter::initializeExternalFunctions() {
+ sys::ScopedLock Writer(*FunctionsLock);
+ (*FuncNames)["lle_X_atexit"] = lle_X_atexit;
+ (*FuncNames)["lle_X_exit"] = lle_X_exit;
+ (*FuncNames)["lle_X_abort"] = lle_X_abort;
+
+ (*FuncNames)["lle_X_printf"] = lle_X_printf;
+ (*FuncNames)["lle_X_sprintf"] = lle_X_sprintf;
+ (*FuncNames)["lle_X_sscanf"] = lle_X_sscanf;
+ (*FuncNames)["lle_X_scanf"] = lle_X_scanf;
+ (*FuncNames)["lle_X_fprintf"] = lle_X_fprintf;
+ (*FuncNames)["lle_X_memset"] = lle_X_memset;
+ (*FuncNames)["lle_X_memcpy"] = lle_X_memcpy;
+}
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Interpreter.cpp
new file mode 100644
index 00000000000..5727f7adb49
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Interpreter.cpp
@@ -0,0 +1,102 @@
+//===- Interpreter.cpp - Top-Level LLVM Interpreter Implementation --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the top-level functionality for the LLVM interpreter.
+// This interpreter is designed to be a very simple, portable, inefficient
+// interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Module.h"
+#include <cstring>
+using namespace llvm;
+
+namespace {
+
+static struct RegisterInterp {
+ RegisterInterp() { Interpreter::Register(); }
+} InterpRegistrator;
+
+}
+
+extern "C" void LLVMLinkInInterpreter() { }
+
+/// Create a new interpreter object.
+///
+ExecutionEngine *Interpreter::create(std::unique_ptr<Module> M,
+ std::string *ErrStr) {
+ // Tell this Module to materialize everything and release the GVMaterializer.
+ if (Error Err = M->materializeAll()) {
+ std::string Msg;
+ handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+ Msg = EIB.message();
+ });
+ if (ErrStr)
+ *ErrStr = Msg;
+ // We got an error, just return 0
+ return nullptr;
+ }
+
+ return new Interpreter(std::move(M));
+}
+
+//===----------------------------------------------------------------------===//
+// Interpreter ctor - Initialize stuff
+//
+Interpreter::Interpreter(std::unique_ptr<Module> M)
+ : ExecutionEngine(std::move(M)) {
+
+ memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
+ // Initialize the "backend"
+ initializeExecutionEngine();
+ initializeExternalFunctions();
+ emitGlobals();
+
+ IL = new IntrinsicLowering(getDataLayout());
+}
+
+Interpreter::~Interpreter() {
+ delete IL;
+}
+
+void Interpreter::runAtExitHandlers () {
+ while (!AtExitHandlers.empty()) {
+ callFunction(AtExitHandlers.back(), None);
+ AtExitHandlers.pop_back();
+ run();
+ }
+}
+
+/// run - Start execution with the specified function and arguments.
+///
+GenericValue Interpreter::runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) {
+ assert (F && "Function *F was null at entry to run()");
+
+ // Try extra hard not to pass extra args to a function that isn't
+ // expecting them. C programmers frequently bend the rules and
+ // declare main() with fewer parameters than it actually gets
+ // passed, and the interpreter barfs if you pass a function more
+ // parameters than it is declared to take. This does not attempt to
+ // take into account gratuitous differences in declared types,
+ // though.
+ const size_t ArgCount = F->getFunctionType()->getNumParams();
+ ArrayRef<GenericValue> ActualArgs =
+ ArgValues.slice(0, std::min(ArgValues.size(), ArgCount));
+
+ // Set up the function call.
+ callFunction(F, ActualArgs);
+
+ // Start executing the function.
+ run();
+
+ return ExitValue;
+}
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Interpreter.h b/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Interpreter.h
new file mode 100644
index 00000000000..fd7fa21df19
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -0,0 +1,235 @@
+//===-- Interpreter.h ------------------------------------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines the interpreter structure
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_INTERPRETER_INTERPRETER_H
+#define LLVM_LIB_EXECUTIONENGINE_INTERPRETER_INTERPRETER_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+namespace llvm {
+
+class IntrinsicLowering;
+template<typename T> class generic_gep_type_iterator;
+class ConstantExpr;
+typedef generic_gep_type_iterator<User::const_op_iterator> gep_type_iterator;
+
+
+// AllocaHolder - Object to track all of the blocks of memory allocated by
+// alloca. When the function returns, this object is popped off the execution
+// stack, which causes the dtor to be run, which frees all the alloca'd memory.
+//
+class AllocaHolder {
+ std::vector<void *> Allocations;
+
+public:
+ AllocaHolder() {}
+
+ // Make this type move-only.
+ AllocaHolder(AllocaHolder &&) = default;
+ AllocaHolder &operator=(AllocaHolder &&RHS) = default;
+
+ ~AllocaHolder() {
+ for (void *Allocation : Allocations)
+ free(Allocation);
+ }
+
+ void add(void *Mem) { Allocations.push_back(Mem); }
+};
+
+typedef std::vector<GenericValue> ValuePlaneTy;
+
+// ExecutionContext struct - This struct represents one stack frame currently
+// executing.
+//
+struct ExecutionContext {
+ Function *CurFunction;// The currently executing function
+ BasicBlock *CurBB; // The currently executing BB
+ BasicBlock::iterator CurInst; // The next instruction to execute
+ CallBase *Caller; // Holds the call that called subframes.
+ // NULL if main func or debugger invoked fn
+ std::map<Value *, GenericValue> Values; // LLVM values used in this invocation
+ std::vector<GenericValue> VarArgs; // Values passed through an ellipsis
+ AllocaHolder Allocas; // Track memory allocated by alloca
+
+ ExecutionContext() : CurFunction(nullptr), CurBB(nullptr), CurInst(nullptr) {}
+};
+
+// Interpreter - This class represents the entirety of the interpreter.
+//
+class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> {
+ GenericValue ExitValue; // The return value of the called function
+ IntrinsicLowering *IL;
+
+ // The runtime stack of executing code. The top of the stack is the current
+ // function record.
+ std::vector<ExecutionContext> ECStack;
+
+ // AtExitHandlers - List of functions to call when the program exits,
+ // registered with the atexit() library function.
+ std::vector<Function*> AtExitHandlers;
+
+public:
+ explicit Interpreter(std::unique_ptr<Module> M);
+ ~Interpreter() override;
+
+ /// runAtExitHandlers - Run any functions registered by the program's calls to
+ /// atexit(3), which we intercept and store in AtExitHandlers.
+ ///
+ void runAtExitHandlers();
+
+ static void Register() {
+ InterpCtor = create;
+ }
+
+ /// Create an interpreter ExecutionEngine.
+ ///
+ static ExecutionEngine *create(std::unique_ptr<Module> M,
+ std::string *ErrorStr = nullptr);
+
+ /// run - Start execution with the specified function and arguments.
+ ///
+ GenericValue runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) override;
+
+ void *getPointerToNamedFunction(StringRef Name,
+ bool AbortOnFailure = true) override {
+ // FIXME: not implemented.
+ return nullptr;
+ }
+
+ // Methods used to execute code:
+ // Place a call on the stack
+ void callFunction(Function *F, ArrayRef<GenericValue> ArgVals);
+ void run(); // Execute instructions until nothing left to do
+
+ // Opcode Implementations
+ void visitReturnInst(ReturnInst &I);
+ void visitBranchInst(BranchInst &I);
+ void visitSwitchInst(SwitchInst &I);
+ void visitIndirectBrInst(IndirectBrInst &I);
+
+ void visitUnaryOperator(UnaryOperator &I);
+ void visitBinaryOperator(BinaryOperator &I);
+ void visitICmpInst(ICmpInst &I);
+ void visitFCmpInst(FCmpInst &I);
+ void visitAllocaInst(AllocaInst &I);
+ void visitLoadInst(LoadInst &I);
+ void visitStoreInst(StoreInst &I);
+ void visitGetElementPtrInst(GetElementPtrInst &I);
+ void visitPHINode(PHINode &PN) {
+ llvm_unreachable("PHI nodes already handled!");
+ }
+ void visitTruncInst(TruncInst &I);
+ void visitZExtInst(ZExtInst &I);
+ void visitSExtInst(SExtInst &I);
+ void visitFPTruncInst(FPTruncInst &I);
+ void visitFPExtInst(FPExtInst &I);
+ void visitUIToFPInst(UIToFPInst &I);
+ void visitSIToFPInst(SIToFPInst &I);
+ void visitFPToUIInst(FPToUIInst &I);
+ void visitFPToSIInst(FPToSIInst &I);
+ void visitPtrToIntInst(PtrToIntInst &I);
+ void visitIntToPtrInst(IntToPtrInst &I);
+ void visitBitCastInst(BitCastInst &I);
+ void visitSelectInst(SelectInst &I);
+
+ void visitVAStartInst(VAStartInst &I);
+ void visitVAEndInst(VAEndInst &I);
+ void visitVACopyInst(VACopyInst &I);
+ void visitIntrinsicInst(IntrinsicInst &I);
+ void visitCallBase(CallBase &I);
+ void visitUnreachableInst(UnreachableInst &I);
+
+ void visitShl(BinaryOperator &I);
+ void visitLShr(BinaryOperator &I);
+ void visitAShr(BinaryOperator &I);
+
+ void visitVAArgInst(VAArgInst &I);
+ void visitExtractElementInst(ExtractElementInst &I);
+ void visitInsertElementInst(InsertElementInst &I);
+ void visitShuffleVectorInst(ShuffleVectorInst &I);
+
+ void visitExtractValueInst(ExtractValueInst &I);
+ void visitInsertValueInst(InsertValueInst &I);
+
+ void visitInstruction(Instruction &I) {
+ errs() << I << "\n";
+ llvm_unreachable("Instruction not interpretable yet!");
+ }
+
+ GenericValue callExternalFunction(Function *F,
+ ArrayRef<GenericValue> ArgVals);
+ void exitCalled(GenericValue GV);
+
+ void addAtExitHandler(Function *F) {
+ AtExitHandlers.push_back(F);
+ }
+
+ GenericValue *getFirstVarArg () {
+ return &(ECStack.back ().VarArgs[0]);
+ }
+
+private: // Helper functions
+ GenericValue executeGEPOperation(Value *Ptr, gep_type_iterator I,
+ gep_type_iterator E, ExecutionContext &SF);
+
+ // SwitchToNewBasicBlock - Start execution in a new basic block and run any
+ // PHI nodes in the top of the block. This is used for intraprocedural
+ // control flow.
+ //
+ void SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF);
+
+ void *getPointerToFunction(Function *F) override { return (void*)F; }
+
+ void initializeExecutionEngine() { }
+ void initializeExternalFunctions();
+ GenericValue getConstantExprValue(ConstantExpr *CE, ExecutionContext &SF);
+ GenericValue getOperandValue(Value *V, ExecutionContext &SF);
+ GenericValue executeTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeSExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeZExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPToUIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPToSIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeUIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeSIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executePtrToIntInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeIntToPtrInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeBitCastInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
+ Type *Ty, ExecutionContext &SF);
+ void popStackAndReturnValueToCaller(Type *RetTy, GenericValue Result);
+
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/BasicGOTAndStubsBuilder.h b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/BasicGOTAndStubsBuilder.h
new file mode 100644
index 00000000000..82258a35a67
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/BasicGOTAndStubsBuilder.h
@@ -0,0 +1,107 @@
+//===--- BasicGOTAndStubsBuilder.h - Generic GOT/Stub creation --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A base for simple GOT and stub creation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_JITLINK_BASICGOTANDSTUBSBUILDER_H
+#define LLVM_LIB_EXECUTIONENGINE_JITLINK_BASICGOTANDSTUBSBUILDER_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+template <typename BuilderImpl> class BasicGOTAndStubsBuilder {
+public:
+ BasicGOTAndStubsBuilder(LinkGraph &G) : G(G) {}
+
+ void run() {
+ // We're going to be adding new blocks, but we don't want to iterate over
+ // the newly added ones, so just copy the existing blocks out.
+ std::vector<Block *> Blocks(G.blocks().begin(), G.blocks().end());
+
+ LLVM_DEBUG(dbgs() << "Creating GOT entries and stubs:\n");
+
+ for (auto *B : Blocks)
+ for (auto &E : B->edges())
+ if (impl().isGOTEdge(E)) {
+ LLVM_DEBUG({
+ dbgs() << " Updating GOT edge ";
+ printEdge(dbgs(), *B, E, "<target GOT>");
+ dbgs() << "\n";
+ });
+ impl().fixGOTEdge(E, getGOTEntrySymbol(E.getTarget()));
+ } else if (impl().isExternalBranchEdge(E)) {
+ LLVM_DEBUG({
+ dbgs() << " Updating external branch edge ";
+ printEdge(dbgs(), *B, E, "<target PC-rel>");
+ dbgs() << "\n";
+ });
+ impl().fixExternalBranchEdge(E, getStubSymbol(E.getTarget()));
+ }
+ }
+
+protected:
+ Symbol &getGOTEntrySymbol(Symbol &Target) {
+ assert(Target.hasName() && "GOT edge cannot point to anonymous target");
+
+ auto GOTEntryI = GOTEntries.find(Target.getName());
+
+ // Build the entry if it doesn't exist.
+ if (GOTEntryI == GOTEntries.end()) {
+ auto &GOTEntry = impl().createGOTEntry(Target);
+ LLVM_DEBUG({
+ dbgs() << " Created GOT entry for " << Target.getName() << ": "
+ << GOTEntry << "\n";
+ });
+ GOTEntryI =
+ GOTEntries.insert(std::make_pair(Target.getName(), &GOTEntry)).first;
+ }
+
+ assert(GOTEntryI != GOTEntries.end() && "Could not get GOT entry symbol");
+ LLVM_DEBUG(
+ { dbgs() << " Using GOT entry " << *GOTEntryI->second << "\n"; });
+ return *GOTEntryI->second;
+ }
+
+ Symbol &getStubSymbol(Symbol &Target) {
+ assert(Target.hasName() &&
+ "External branch edge can not point to an anonymous target");
+ auto StubI = Stubs.find(Target.getName());
+
+ if (StubI == Stubs.end()) {
+ auto &StubSymbol = impl().createStub(Target);
+ LLVM_DEBUG({
+ dbgs() << " Created stub for " << Target.getName() << ": "
+ << StubSymbol << "\n";
+ });
+ StubI = Stubs.insert(std::make_pair(Target.getName(), &StubSymbol)).first;
+ }
+
+ assert(StubI != Stubs.end() && "Count not get stub symbol");
+ LLVM_DEBUG({ dbgs() << " Using stub " << *StubI->second << "\n"; });
+ return *StubI->second;
+ }
+
+ LinkGraph &G;
+
+private:
+ BuilderImpl &impl() { return static_cast<BuilderImpl &>(*this); }
+
+ DenseMap<StringRef, Symbol *> GOTEntries;
+ DenseMap<StringRef, Symbol *> Stubs;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_LIB_EXECUTIONENGINE_JITLINK_BASICGOTANDSTUBSBUILDER_H
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp
new file mode 100644
index 00000000000..3602601287f
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp
@@ -0,0 +1,781 @@
+//===-------- JITLink_EHFrameSupport.cpp - JITLink eh-frame utils ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "EHFrameSupportImpl.h"
+
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h"
+#include "llvm/Support/DynamicLibrary.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+EHFrameSplitter::EHFrameSplitter(StringRef EHFrameSectionName)
+ : EHFrameSectionName(EHFrameSectionName) {}
+
+Error EHFrameSplitter::operator()(LinkGraph &G) {
+ auto *EHFrame = G.findSectionByName(EHFrameSectionName);
+
+ if (!EHFrame) {
+ LLVM_DEBUG({
+ dbgs() << "EHFrameSplitter: No " << EHFrameSectionName
+ << " section. Nothing to do\n";
+ });
+ return Error::success();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "EHFrameSplitter: Processing " << EHFrameSectionName << "...\n";
+ });
+
+ DenseMap<Block *, LinkGraph::SplitBlockCache> Caches;
+
+ {
+ // Pre-build the split caches.
+ for (auto *B : EHFrame->blocks())
+ Caches[B] = LinkGraph::SplitBlockCache::value_type();
+ for (auto *Sym : EHFrame->symbols())
+ Caches[&Sym->getBlock()]->push_back(Sym);
+ for (auto *B : EHFrame->blocks())
+ llvm::sort(*Caches[B], [](const Symbol *LHS, const Symbol *RHS) {
+ return LHS->getOffset() > RHS->getOffset();
+ });
+ }
+
+ // Iterate over blocks (we do this by iterating over Caches entries rather
+ // than EHFrame->blocks() as we will be inserting new blocks along the way,
+ // which would invalidate iterators in the latter sequence.
+ for (auto &KV : Caches) {
+ auto &B = *KV.first;
+ auto &BCache = KV.second;
+ if (auto Err = processBlock(G, B, BCache))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+Error EHFrameSplitter::processBlock(LinkGraph &G, Block &B,
+ LinkGraph::SplitBlockCache &Cache) {
+ LLVM_DEBUG({
+ dbgs() << " Processing block at " << formatv("{0:x16}", B.getAddress())
+ << "\n";
+ });
+
+ // eh-frame should not contain zero-fill blocks.
+ if (B.isZeroFill())
+ return make_error<JITLinkError>("Unexpected zero-fill block in " +
+ EHFrameSectionName + " section");
+
+ if (B.getSize() == 0) {
+ LLVM_DEBUG(dbgs() << " Block is empty. Skipping.\n");
+ return Error::success();
+ }
+
+ BinaryStreamReader BlockReader(B.getContent(), G.getEndianness());
+
+ while (true) {
+ uint64_t RecordStartOffset = BlockReader.getOffset();
+
+ LLVM_DEBUG({
+ dbgs() << " Processing CFI record at "
+ << formatv("{0:x16}", B.getAddress()) << "\n";
+ });
+
+ uint32_t Length;
+ if (auto Err = BlockReader.readInteger(Length))
+ return Err;
+ if (Length != 0xffffffff) {
+ if (auto Err = BlockReader.skip(Length))
+ return Err;
+ } else {
+ uint64_t ExtendedLength;
+ if (auto Err = BlockReader.readInteger(ExtendedLength))
+ return Err;
+ if (auto Err = BlockReader.skip(ExtendedLength))
+ return Err;
+ }
+
+ // If this was the last block then there's nothing to split
+ if (BlockReader.empty()) {
+ LLVM_DEBUG(dbgs() << " Extracted " << B << "\n");
+ return Error::success();
+ }
+
+ uint64_t BlockSize = BlockReader.getOffset() - RecordStartOffset;
+ auto &NewBlock = G.splitBlock(B, BlockSize);
+ (void)NewBlock;
+ LLVM_DEBUG(dbgs() << " Extracted " << NewBlock << "\n");
+ }
+}
+
+EHFrameEdgeFixer::EHFrameEdgeFixer(StringRef EHFrameSectionName,
+ unsigned PointerSize, Edge::Kind Delta64,
+ Edge::Kind Delta32, Edge::Kind NegDelta32)
+ : EHFrameSectionName(EHFrameSectionName), PointerSize(PointerSize),
+ Delta64(Delta64), Delta32(Delta32), NegDelta32(NegDelta32) {}
+
+Error EHFrameEdgeFixer::operator()(LinkGraph &G) {
+ auto *EHFrame = G.findSectionByName(EHFrameSectionName);
+
+ if (!EHFrame) {
+ LLVM_DEBUG({
+ dbgs() << "EHFrameEdgeFixer: No " << EHFrameSectionName
+ << " section. Nothing to do\n";
+ });
+ return Error::success();
+ }
+
+ // Check that we support the graph's pointer size.
+ if (G.getPointerSize() != 4 && G.getPointerSize() != 8)
+ return make_error<JITLinkError>(
+ "EHFrameEdgeFixer only supports 32 and 64 bit targets");
+
+ LLVM_DEBUG({
+ dbgs() << "EHFrameEdgeFixer: Processing " << EHFrameSectionName << "...\n";
+ });
+
+ ParseContext PC(G);
+
+ // Build a map of all blocks and symbols in the text sections. We will use
+ // these for finding / building edge targets when processing FDEs.
+ for (auto &Sec : G.sections()) {
+ PC.AddrToSyms.addSymbols(Sec.symbols());
+ if (auto Err = PC.AddrToBlock.addBlocks(Sec.blocks(),
+ BlockAddressMap::includeNonNull))
+ return Err;
+ }
+
+ // Sort eh-frame blocks into address order to ensure we visit CIEs before
+ // their child FDEs.
+ std::vector<Block *> EHFrameBlocks;
+ for (auto *B : EHFrame->blocks())
+ EHFrameBlocks.push_back(B);
+ llvm::sort(EHFrameBlocks, [](const Block *LHS, const Block *RHS) {
+ return LHS->getAddress() < RHS->getAddress();
+ });
+
+ // Loop over the blocks in address order.
+ for (auto *B : EHFrameBlocks)
+ if (auto Err = processBlock(PC, *B))
+ return Err;
+
+ return Error::success();
+}
+
+Error EHFrameEdgeFixer::processBlock(ParseContext &PC, Block &B) {
+
+ LLVM_DEBUG({
+ dbgs() << " Processing block at " << formatv("{0:x16}", B.getAddress())
+ << "\n";
+ });
+
+ // eh-frame should not contain zero-fill blocks.
+ if (B.isZeroFill())
+ return make_error<JITLinkError>("Unexpected zero-fill block in " +
+ EHFrameSectionName + " section");
+
+ if (B.getSize() == 0) {
+ LLVM_DEBUG(dbgs() << " Block is empty. Skipping.\n");
+ return Error::success();
+ }
+
+ // Find the offsets of any existing edges from this block.
+ BlockEdgeMap BlockEdges;
+ for (auto &E : B.edges())
+ if (E.isRelocation()) {
+ if (BlockEdges.count(E.getOffset()))
+ return make_error<JITLinkError>(
+ "Multiple relocations at offset " +
+ formatv("{0:x16}", E.getOffset()) + " in " + EHFrameSectionName +
+ " block at address " + formatv("{0:x16}", B.getAddress()));
+
+ BlockEdges[E.getOffset()] = EdgeTarget(E);
+ }
+
+ CIEInfosMap CIEInfos;
+ BinaryStreamReader BlockReader(B.getContent(), PC.G.getEndianness());
+ while (!BlockReader.empty()) {
+ size_t RecordStartOffset = BlockReader.getOffset();
+
+ LLVM_DEBUG({
+ dbgs() << " Processing CFI record at "
+ << formatv("{0:x16}", B.getAddress() + RecordStartOffset) << "\n";
+ });
+
+ // Get the record length.
+ size_t RecordRemaining;
+ {
+ uint32_t Length;
+ if (auto Err = BlockReader.readInteger(Length))
+ return Err;
+ // If Length < 0xffffffff then use the regular length field, otherwise
+ // read the extended length field.
+ if (Length != 0xffffffff)
+ RecordRemaining = Length;
+ else {
+ uint64_t ExtendedLength;
+ if (auto Err = BlockReader.readInteger(ExtendedLength))
+ return Err;
+ RecordRemaining = ExtendedLength;
+ }
+ }
+
+ if (BlockReader.bytesRemaining() < RecordRemaining)
+ return make_error<JITLinkError>(
+ "Incomplete CFI record at " +
+ formatv("{0:x16}", B.getAddress() + RecordStartOffset));
+
+ // Read the CIE delta for this record.
+ uint64_t CIEDeltaFieldOffset = BlockReader.getOffset() - RecordStartOffset;
+ uint32_t CIEDelta;
+ if (auto Err = BlockReader.readInteger(CIEDelta))
+ return Err;
+
+ if (CIEDelta == 0) {
+ if (auto Err = processCIE(PC, B, RecordStartOffset,
+ CIEDeltaFieldOffset + RecordRemaining,
+ CIEDeltaFieldOffset))
+ return Err;
+ } else {
+ if (auto Err = processFDE(PC, B, RecordStartOffset,
+ CIEDeltaFieldOffset + RecordRemaining,
+ CIEDeltaFieldOffset, CIEDelta, BlockEdges))
+ return Err;
+ }
+
+ // Move to the next record.
+ BlockReader.setOffset(RecordStartOffset + CIEDeltaFieldOffset +
+ RecordRemaining);
+ }
+
+ return Error::success();
+}
+
+Error EHFrameEdgeFixer::processCIE(ParseContext &PC, Block &B,
+ size_t RecordOffset, size_t RecordLength,
+ size_t CIEDeltaFieldOffset) {
+
+ LLVM_DEBUG(dbgs() << " Record is CIE\n");
+
+ auto RecordContent = B.getContent().substr(RecordOffset, RecordLength);
+ BinaryStreamReader RecordReader(RecordContent, PC.G.getEndianness());
+
+ // Skip past the CIE delta field: we've already processed this far.
+ RecordReader.setOffset(CIEDeltaFieldOffset + 4);
+
+ auto &CIESymbol =
+ PC.G.addAnonymousSymbol(B, RecordOffset, RecordLength, false, false);
+ CIEInformation CIEInfo(CIESymbol);
+
+ uint8_t Version = 0;
+ if (auto Err = RecordReader.readInteger(Version))
+ return Err;
+
+ if (Version != 0x01)
+ return make_error<JITLinkError>("Bad CIE version " + Twine(Version) +
+ " (should be 0x01) in eh-frame");
+
+ auto AugInfo = parseAugmentationString(RecordReader);
+ if (!AugInfo)
+ return AugInfo.takeError();
+
+ // Skip the EH Data field if present.
+ if (AugInfo->EHDataFieldPresent)
+ if (auto Err = RecordReader.skip(PC.G.getPointerSize()))
+ return Err;
+
+ // Read and sanity check the code alignment factor.
+ {
+ uint64_t CodeAlignmentFactor = 0;
+ if (auto Err = RecordReader.readULEB128(CodeAlignmentFactor))
+ return Err;
+ if (CodeAlignmentFactor != 1)
+ return make_error<JITLinkError>("Unsupported CIE code alignment factor " +
+ Twine(CodeAlignmentFactor) +
+ " (expected 1)");
+ }
+
+ // Read and sanity check the data alignment factor.
+ {
+ int64_t DataAlignmentFactor = 0;
+ if (auto Err = RecordReader.readSLEB128(DataAlignmentFactor))
+ return Err;
+ if (DataAlignmentFactor != -8)
+ return make_error<JITLinkError>("Unsupported CIE data alignment factor " +
+ Twine(DataAlignmentFactor) +
+ " (expected -8)");
+ }
+
+ // Skip the return address register field.
+ if (auto Err = RecordReader.skip(1))
+ return Err;
+
+ uint64_t AugmentationDataLength = 0;
+ if (auto Err = RecordReader.readULEB128(AugmentationDataLength))
+ return Err;
+
+ uint32_t AugmentationDataStartOffset = RecordReader.getOffset();
+
+ uint8_t *NextField = &AugInfo->Fields[0];
+ while (uint8_t Field = *NextField++) {
+ switch (Field) {
+ case 'L': {
+ CIEInfo.FDEsHaveLSDAField = true;
+ uint8_t LSDAPointerEncoding;
+ if (auto Err = RecordReader.readInteger(LSDAPointerEncoding))
+ return Err;
+ if (!isSupportedPointerEncoding(LSDAPointerEncoding))
+ return make_error<JITLinkError>(
+ "Unsupported LSDA pointer encoding " +
+ formatv("{0:x2}", LSDAPointerEncoding) + " in CIE at " +
+ formatv("{0:x16}", CIESymbol.getAddress()));
+ CIEInfo.LSDAPointerEncoding = LSDAPointerEncoding;
+ break;
+ }
+ case 'P': {
+ uint8_t PersonalityPointerEncoding = 0;
+ if (auto Err = RecordReader.readInteger(PersonalityPointerEncoding))
+ return Err;
+ if (PersonalityPointerEncoding !=
+ (dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
+ dwarf::DW_EH_PE_sdata4))
+ return make_error<JITLinkError>(
+ "Unspported personality pointer "
+ "encoding " +
+ formatv("{0:x2}", PersonalityPointerEncoding) + " in CIE at " +
+ formatv("{0:x16}", CIESymbol.getAddress()));
+ uint32_t PersonalityPointerAddress;
+ if (auto Err = RecordReader.readInteger(PersonalityPointerAddress))
+ return Err;
+ break;
+ }
+ case 'R': {
+ uint8_t FDEPointerEncoding;
+ if (auto Err = RecordReader.readInteger(FDEPointerEncoding))
+ return Err;
+ if (!isSupportedPointerEncoding(FDEPointerEncoding))
+ return make_error<JITLinkError>(
+ "Unsupported FDE pointer encoding " +
+ formatv("{0:x2}", FDEPointerEncoding) + " in CIE at " +
+ formatv("{0:x16}", CIESymbol.getAddress()));
+ CIEInfo.FDEPointerEncoding = FDEPointerEncoding;
+ break;
+ }
+ default:
+ llvm_unreachable("Invalid augmentation string field");
+ }
+ }
+
+ if (RecordReader.getOffset() - AugmentationDataStartOffset >
+ AugmentationDataLength)
+ return make_error<JITLinkError>("Read past the end of the augmentation "
+ "data while parsing fields");
+
+ assert(!PC.CIEInfos.count(CIESymbol.getAddress()) &&
+ "Multiple CIEs recorded at the same address?");
+ PC.CIEInfos[CIESymbol.getAddress()] = std::move(CIEInfo);
+
+ return Error::success();
+}
+
+Error EHFrameEdgeFixer::processFDE(ParseContext &PC, Block &B,
+ size_t RecordOffset, size_t RecordLength,
+ size_t CIEDeltaFieldOffset,
+ uint32_t CIEDelta,
+ BlockEdgeMap &BlockEdges) {
+ LLVM_DEBUG(dbgs() << " Record is FDE\n");
+
+ JITTargetAddress RecordAddress = B.getAddress() + RecordOffset;
+
+ auto RecordContent = B.getContent().substr(RecordOffset, RecordLength);
+ BinaryStreamReader RecordReader(RecordContent, PC.G.getEndianness());
+
+ // Skip past the CIE delta field: we've already read this far.
+ RecordReader.setOffset(CIEDeltaFieldOffset + 4);
+
+ auto &FDESymbol =
+ PC.G.addAnonymousSymbol(B, RecordOffset, RecordLength, false, false);
+
+ CIEInformation *CIEInfo = nullptr;
+
+ {
+ // Process the CIE pointer field.
+ auto CIEEdgeItr = BlockEdges.find(RecordOffset + CIEDeltaFieldOffset);
+ JITTargetAddress CIEAddress =
+ RecordAddress + CIEDeltaFieldOffset - CIEDelta;
+ if (CIEEdgeItr == BlockEdges.end()) {
+
+ LLVM_DEBUG({
+ dbgs() << " Adding edge at "
+ << formatv("{0:x16}", RecordAddress + CIEDeltaFieldOffset)
+ << " to CIE at: " << formatv("{0:x16}", CIEAddress) << "\n";
+ });
+ if (auto CIEInfoOrErr = PC.findCIEInfo(CIEAddress))
+ CIEInfo = *CIEInfoOrErr;
+ else
+ return CIEInfoOrErr.takeError();
+ assert(CIEInfo->CIESymbol && "CIEInfo has no CIE symbol set");
+ B.addEdge(NegDelta32, RecordOffset + CIEDeltaFieldOffset,
+ *CIEInfo->CIESymbol, 0);
+ } else {
+ LLVM_DEBUG({
+ dbgs() << " Already has edge at "
+ << formatv("{0:x16}", RecordAddress + CIEDeltaFieldOffset)
+ << " to CIE at " << formatv("{0:x16}", CIEAddress) << "\n";
+ });
+ auto &EI = CIEEdgeItr->second;
+ if (EI.Addend)
+ return make_error<JITLinkError>(
+ "CIE edge at " +
+ formatv("{0:x16}", RecordAddress + CIEDeltaFieldOffset) +
+ " has non-zero addend");
+ if (auto CIEInfoOrErr = PC.findCIEInfo(EI.Target->getAddress()))
+ CIEInfo = *CIEInfoOrErr;
+ else
+ return CIEInfoOrErr.takeError();
+ }
+ }
+
+ {
+ // Process the PC-Begin field.
+ Block *PCBeginBlock = nullptr;
+ JITTargetAddress PCBeginFieldOffset = RecordReader.getOffset();
+ auto PCEdgeItr = BlockEdges.find(RecordOffset + PCBeginFieldOffset);
+ if (PCEdgeItr == BlockEdges.end()) {
+ auto PCBeginPtrInfo =
+ readEncodedPointer(CIEInfo->FDEPointerEncoding,
+ RecordAddress + PCBeginFieldOffset, RecordReader);
+ if (!PCBeginPtrInfo)
+ return PCBeginPtrInfo.takeError();
+ JITTargetAddress PCBegin = PCBeginPtrInfo->first;
+ Edge::Kind PCBeginEdgeKind = PCBeginPtrInfo->second;
+ LLVM_DEBUG({
+ dbgs() << " Adding edge at "
+ << formatv("{0:x16}", RecordAddress + PCBeginFieldOffset)
+ << " to PC at " << formatv("{0:x16}", PCBegin) << "\n";
+ });
+ auto PCBeginSym = getOrCreateSymbol(PC, PCBegin);
+ if (!PCBeginSym)
+ return PCBeginSym.takeError();
+ B.addEdge(PCBeginEdgeKind, RecordOffset + PCBeginFieldOffset, *PCBeginSym,
+ 0);
+ PCBeginBlock = &PCBeginSym->getBlock();
+ } else {
+ auto &EI = PCEdgeItr->second;
+ LLVM_DEBUG({
+ dbgs() << " Already has edge at "
+ << formatv("{0:x16}", RecordAddress + PCBeginFieldOffset)
+ << " to PC at " << formatv("{0:x16}", EI.Target->getAddress());
+ if (EI.Addend)
+ dbgs() << " + " << formatv("{0:x16}", EI.Addend);
+ dbgs() << "\n";
+ });
+
+ // Make sure the existing edge points at a defined block.
+ if (!EI.Target->isDefined()) {
+ auto EdgeAddr = RecordAddress + PCBeginFieldOffset;
+ return make_error<JITLinkError>("FDE edge at " +
+ formatv("{0:x16}", EdgeAddr) +
+ " points at external block");
+ }
+ PCBeginBlock = &EI.Target->getBlock();
+ if (auto Err = RecordReader.skip(
+ getPointerEncodingDataSize(CIEInfo->FDEPointerEncoding)))
+ return Err;
+ }
+
+ // Add a keep-alive edge from the FDE target to the FDE to ensure that the
+ // FDE is kept alive if its target is.
+ assert(PCBeginBlock && "PC-begin block not recorded");
+ LLVM_DEBUG({
+ dbgs() << " Adding keep-alive edge from target at "
+ << formatv("{0:x16}", PCBeginBlock->getAddress()) << " to FDE at "
+ << formatv("{0:x16}", RecordAddress) << "\n";
+ });
+ PCBeginBlock->addEdge(Edge::KeepAlive, 0, FDESymbol, 0);
+ }
+
+ // Skip over the PC range size field.
+ if (auto Err = RecordReader.skip(
+ getPointerEncodingDataSize(CIEInfo->FDEPointerEncoding)))
+ return Err;
+
+ if (CIEInfo->FDEsHaveLSDAField) {
+ uint64_t AugmentationDataSize;
+ if (auto Err = RecordReader.readULEB128(AugmentationDataSize))
+ return Err;
+
+ JITTargetAddress LSDAFieldOffset = RecordReader.getOffset();
+ auto LSDAEdgeItr = BlockEdges.find(RecordOffset + LSDAFieldOffset);
+ if (LSDAEdgeItr == BlockEdges.end()) {
+ auto LSDAPointerInfo =
+ readEncodedPointer(CIEInfo->LSDAPointerEncoding,
+ RecordAddress + LSDAFieldOffset, RecordReader);
+ if (!LSDAPointerInfo)
+ return LSDAPointerInfo.takeError();
+ JITTargetAddress LSDA = LSDAPointerInfo->first;
+ Edge::Kind LSDAEdgeKind = LSDAPointerInfo->second;
+ auto LSDASym = getOrCreateSymbol(PC, LSDA);
+ if (!LSDASym)
+ return LSDASym.takeError();
+ LLVM_DEBUG({
+ dbgs() << " Adding edge at "
+ << formatv("{0:x16}", RecordAddress + LSDAFieldOffset)
+ << " to LSDA at " << formatv("{0:x16}", LSDA) << "\n";
+ });
+ B.addEdge(LSDAEdgeKind, RecordOffset + LSDAFieldOffset, *LSDASym, 0);
+ } else {
+ LLVM_DEBUG({
+ auto &EI = LSDAEdgeItr->second;
+ dbgs() << " Already has edge at "
+ << formatv("{0:x16}", RecordAddress + LSDAFieldOffset)
+ << " to LSDA at " << formatv("{0:x16}", EI.Target->getAddress());
+ if (EI.Addend)
+ dbgs() << " + " << formatv("{0:x16}", EI.Addend);
+ dbgs() << "\n";
+ });
+ if (auto Err = RecordReader.skip(AugmentationDataSize))
+ return Err;
+ }
+ } else {
+ LLVM_DEBUG(dbgs() << " Record does not have LSDA field.\n");
+ }
+
+ return Error::success();
+}
+
+Expected<EHFrameEdgeFixer::AugmentationInfo>
+EHFrameEdgeFixer::parseAugmentationString(BinaryStreamReader &RecordReader) {
+ AugmentationInfo AugInfo;
+ uint8_t NextChar;
+ uint8_t *NextField = &AugInfo.Fields[0];
+
+ if (auto Err = RecordReader.readInteger(NextChar))
+ return std::move(Err);
+
+ while (NextChar != 0) {
+ switch (NextChar) {
+ case 'z':
+ AugInfo.AugmentationDataPresent = true;
+ break;
+ case 'e':
+ if (auto Err = RecordReader.readInteger(NextChar))
+ return std::move(Err);
+ if (NextChar != 'h')
+ return make_error<JITLinkError>("Unrecognized substring e" +
+ Twine(NextChar) +
+ " in augmentation string");
+ AugInfo.EHDataFieldPresent = true;
+ break;
+ case 'L':
+ case 'P':
+ case 'R':
+ *NextField++ = NextChar;
+ break;
+ default:
+ return make_error<JITLinkError>("Unrecognized character " +
+ Twine(NextChar) +
+ " in augmentation string");
+ }
+
+ if (auto Err = RecordReader.readInteger(NextChar))
+ return std::move(Err);
+ }
+
+ return std::move(AugInfo);
+}
+
+bool EHFrameEdgeFixer::isSupportedPointerEncoding(uint8_t PointerEncoding) {
+ using namespace dwarf;
+
+ // We only support PC-rel for now.
+ if ((PointerEncoding & 0x70) != DW_EH_PE_pcrel)
+ return false;
+
+ // readEncodedPointer does not handle indirect.
+ if (PointerEncoding & DW_EH_PE_indirect)
+ return false;
+
+ // Supported datatypes.
+ switch (PointerEncoding & 0xf) {
+ case DW_EH_PE_absptr:
+ case DW_EH_PE_udata4:
+ case DW_EH_PE_udata8:
+ case DW_EH_PE_sdata4:
+ case DW_EH_PE_sdata8:
+ return true;
+ }
+
+ return false;
+}
+
+unsigned EHFrameEdgeFixer::getPointerEncodingDataSize(uint8_t PointerEncoding) {
+ using namespace dwarf;
+
+ assert(isSupportedPointerEncoding(PointerEncoding) &&
+ "Unsupported pointer encoding");
+ switch (PointerEncoding & 0xf) {
+ case DW_EH_PE_absptr:
+ return PointerSize;
+ case DW_EH_PE_udata4:
+ case DW_EH_PE_sdata4:
+ return 4;
+ case DW_EH_PE_udata8:
+ case DW_EH_PE_sdata8:
+ return 8;
+ default:
+ llvm_unreachable("Unsupported encoding");
+ }
+}
+
+Expected<std::pair<JITTargetAddress, Edge::Kind>>
+EHFrameEdgeFixer::readEncodedPointer(uint8_t PointerEncoding,
+ JITTargetAddress PointerFieldAddress,
+ BinaryStreamReader &RecordReader) {
+ static_assert(sizeof(JITTargetAddress) == sizeof(uint64_t),
+ "Result must be able to hold a uint64_t");
+ assert(isSupportedPointerEncoding(PointerEncoding) &&
+ "Unsupported pointer encoding");
+
+ using namespace dwarf;
+
+ // Isolate data type, remap absptr to udata4 or udata8. This relies on us
+ // having verified that the graph uses 32-bit or 64-bit pointers only at the
+ // start of this pass.
+ uint8_t EffectiveType = PointerEncoding & 0xf;
+ if (EffectiveType == DW_EH_PE_absptr)
+ EffectiveType = (PointerSize == 8) ? DW_EH_PE_udata8 : DW_EH_PE_udata4;
+
+ JITTargetAddress Addr;
+ Edge::Kind PointerEdgeKind;
+ switch (EffectiveType) {
+ case DW_EH_PE_udata4: {
+ uint32_t Val;
+ if (auto Err = RecordReader.readInteger(Val))
+ return std::move(Err);
+ Addr = PointerFieldAddress + Val;
+ PointerEdgeKind = Delta32;
+ break;
+ }
+ case DW_EH_PE_udata8: {
+ uint64_t Val;
+ if (auto Err = RecordReader.readInteger(Val))
+ return std::move(Err);
+ Addr = PointerFieldAddress + Val;
+ PointerEdgeKind = Delta64;
+ break;
+ }
+ case DW_EH_PE_sdata4: {
+ int32_t Val;
+ if (auto Err = RecordReader.readInteger(Val))
+ return std::move(Err);
+ Addr = PointerFieldAddress + Val;
+ PointerEdgeKind = Delta32;
+ break;
+ }
+ case DW_EH_PE_sdata8: {
+ int64_t Val;
+ if (auto Err = RecordReader.readInteger(Val))
+ return std::move(Err);
+ Addr = PointerFieldAddress + Val;
+ PointerEdgeKind = Delta64;
+ break;
+ }
+ }
+
+ if (PointerEdgeKind == Edge::Invalid)
+ return make_error<JITLinkError>(
+ "Unspported edge kind for encoded pointer at " +
+ formatv("{0:x}", PointerFieldAddress));
+
+ return std::make_pair(Addr, Delta64);
+}
+
+Expected<Symbol &> EHFrameEdgeFixer::getOrCreateSymbol(ParseContext &PC,
+ JITTargetAddress Addr) {
+ Symbol *CanonicalSym = nullptr;
+
+ auto UpdateCanonicalSym = [&](Symbol *Sym) {
+ if (!CanonicalSym || Sym->getLinkage() < CanonicalSym->getLinkage() ||
+ Sym->getScope() < CanonicalSym->getScope() ||
+ (Sym->hasName() && !CanonicalSym->hasName()) ||
+ Sym->getName() < CanonicalSym->getName())
+ CanonicalSym = Sym;
+ };
+
+ if (auto *SymbolsAtAddr = PC.AddrToSyms.getSymbolsAt(Addr))
+ for (auto *Sym : *SymbolsAtAddr)
+ UpdateCanonicalSym(Sym);
+
+ // If we found an existing symbol at the given address then use it.
+ if (CanonicalSym)
+ return *CanonicalSym;
+
+ // Otherwise search for a block covering the address and create a new symbol.
+ auto *B = PC.AddrToBlock.getBlockCovering(Addr);
+ if (!B)
+ return make_error<JITLinkError>("No symbol or block covering address " +
+ formatv("{0:x16}", Addr));
+
+ return PC.G.addAnonymousSymbol(*B, Addr - B->getAddress(), 0, false, false);
+}
+
+EHFrameRegistrar::~EHFrameRegistrar() {}
+
+Error InProcessEHFrameRegistrar::registerEHFrames(
+ JITTargetAddress EHFrameSectionAddr, size_t EHFrameSectionSize) {
+ return orc::registerEHFrameSection(
+ jitTargetAddressToPointer<void *>(EHFrameSectionAddr),
+ EHFrameSectionSize);
+}
+
+Error InProcessEHFrameRegistrar::deregisterEHFrames(
+ JITTargetAddress EHFrameSectionAddr, size_t EHFrameSectionSize) {
+ return orc::deregisterEHFrameSection(
+ jitTargetAddressToPointer<void *>(EHFrameSectionAddr),
+ EHFrameSectionSize);
+}
+
+LinkGraphPassFunction
+createEHFrameRecorderPass(const Triple &TT,
+ StoreFrameRangeFunction StoreRangeAddress) {
+ const char *EHFrameSectionName = nullptr;
+ if (TT.getObjectFormat() == Triple::MachO)
+ EHFrameSectionName = "__eh_frame";
+ else
+ EHFrameSectionName = ".eh_frame";
+
+ auto RecordEHFrame =
+ [EHFrameSectionName,
+ StoreFrameRange = std::move(StoreRangeAddress)](LinkGraph &G) -> Error {
+ // Search for a non-empty eh-frame and record the address of the first
+ // symbol in it.
+ JITTargetAddress Addr = 0;
+ size_t Size = 0;
+ if (auto *S = G.findSectionByName(EHFrameSectionName)) {
+ auto R = SectionRange(*S);
+ Addr = R.getStart();
+ Size = R.getSize();
+ }
+ if (Addr == 0 && Size != 0)
+ return make_error<JITLinkError>("__eh_frame section can not have zero "
+ "address with non-zero size");
+ StoreFrameRange(Addr, Size);
+ return Error::success();
+ };
+
+ return RecordEHFrame;
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h
new file mode 100644
index 00000000000..5e68e72ba18
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h
@@ -0,0 +1,122 @@
+//===------- EHFrameSupportImpl.h - JITLink eh-frame utils ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// EHFrame registration support for JITLink.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
+
+#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Support/BinaryStreamReader.h"
+
+namespace llvm {
+namespace jitlink {
+
+/// A LinkGraph pass that splits blocks in an eh-frame section into sub-blocks
+/// representing individual eh-frames.
+/// EHFrameSplitter should not be run without EHFrameEdgeFixer, which is
+/// responsible for adding FDE-to-CIE edges.
+class EHFrameSplitter {
+public:
+ EHFrameSplitter(StringRef EHFrameSectionName);
+ Error operator()(LinkGraph &G);
+
+private:
+ Error processBlock(LinkGraph &G, Block &B, LinkGraph::SplitBlockCache &Cache);
+
+ StringRef EHFrameSectionName;
+};
+
+/// A LinkGraph pass that adds missing FDE-to-CIE, FDE-to-PC and FDE-to-LSDA
+/// edges.
+class EHFrameEdgeFixer {
+public:
+ EHFrameEdgeFixer(StringRef EHFrameSectionName, unsigned PointerSize,
+ Edge::Kind Delta64, Edge::Kind Delta32,
+ Edge::Kind NegDelta32);
+ Error operator()(LinkGraph &G);
+
+private:
+
+ struct AugmentationInfo {
+ bool AugmentationDataPresent = false;
+ bool EHDataFieldPresent = false;
+ uint8_t Fields[4] = {0x0, 0x0, 0x0, 0x0};
+ };
+
+ struct CIEInformation {
+ CIEInformation() = default;
+ CIEInformation(Symbol &CIESymbol) : CIESymbol(&CIESymbol) {}
+ Symbol *CIESymbol = nullptr;
+ bool FDEsHaveLSDAField = false;
+ uint8_t FDEPointerEncoding = 0;
+ uint8_t LSDAPointerEncoding = 0;
+ };
+
+ struct EdgeTarget {
+ EdgeTarget() = default;
+ EdgeTarget(const Edge &E) : Target(&E.getTarget()), Addend(E.getAddend()) {}
+
+ Symbol *Target = nullptr;
+ Edge::AddendT Addend = 0;
+ };
+
+ using BlockEdgeMap = DenseMap<Edge::OffsetT, EdgeTarget>;
+ using CIEInfosMap = DenseMap<JITTargetAddress, CIEInformation>;
+
+ struct ParseContext {
+ ParseContext(LinkGraph &G) : G(G) {}
+
+ Expected<CIEInformation *> findCIEInfo(JITTargetAddress Address) {
+ auto I = CIEInfos.find(Address);
+ if (I == CIEInfos.end())
+ return make_error<JITLinkError>("No CIE found at address " +
+ formatv("{0:x16}", Address));
+ return &I->second;
+ }
+
+ LinkGraph &G;
+ CIEInfosMap CIEInfos;
+ BlockAddressMap AddrToBlock;
+ SymbolAddressMap AddrToSyms;
+ };
+
+ Error processBlock(ParseContext &PC, Block &B);
+ Error processCIE(ParseContext &PC, Block &B, size_t RecordOffset,
+ size_t RecordLength, size_t CIEDeltaFieldOffset);
+ Error processFDE(ParseContext &PC, Block &B, size_t RecordOffset,
+ size_t RecordLength, size_t CIEDeltaFieldOffset,
+ uint32_t CIEDelta, BlockEdgeMap &BlockEdges);
+
+ Expected<AugmentationInfo>
+ parseAugmentationString(BinaryStreamReader &RecordReader);
+
+ static bool isSupportedPointerEncoding(uint8_t PointerEncoding);
+ unsigned getPointerEncodingDataSize(uint8_t PointerEncoding);
+ Expected<std::pair<JITTargetAddress, Edge::Kind>>
+ readEncodedPointer(uint8_t PointerEncoding,
+ JITTargetAddress PointerFieldAddress,
+ BinaryStreamReader &RecordReader);
+
+ Expected<Symbol &> getOrCreateSymbol(ParseContext &PC, JITTargetAddress Addr);
+
+ StringRef EHFrameSectionName;
+ unsigned PointerSize;
+ Edge::Kind Delta64;
+ Edge::Kind Delta32;
+ Edge::Kind NegDelta32;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/ELF.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/ELF.cpp
new file mode 100644
index 00000000000..27eb7d576e2
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/ELF.cpp
@@ -0,0 +1,91 @@
+//===-------------- ELF.cpp - JIT linker function for ELF -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF jit-link function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF.h"
+
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_x86_64.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstring>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+Expected<uint16_t> readTargetMachineArch(StringRef Buffer) {
+ const char *Data = Buffer.data();
+
+ if (Data[ELF::EI_DATA] == ELF::ELFDATA2LSB) {
+ if (Data[ELF::EI_CLASS] == ELF::ELFCLASS64) {
+ if (auto File = llvm::object::ELF64LEFile::create(Buffer)) {
+ return File->getHeader().e_machine;
+ } else {
+ return File.takeError();
+ }
+ } else if (Data[ELF::EI_CLASS] == ELF::ELFCLASS32) {
+ if (auto File = llvm::object::ELF32LEFile::create(Buffer)) {
+ return File->getHeader().e_machine;
+ } else {
+ return File.takeError();
+ }
+ }
+ }
+
+ return ELF::EM_NONE;
+}
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject(MemoryBufferRef ObjectBuffer) {
+ StringRef Buffer = ObjectBuffer.getBuffer();
+ if (Buffer.size() < ELF::EI_MAG3 + 1)
+ return make_error<JITLinkError>("Truncated ELF buffer");
+
+ if (memcmp(Buffer.data(), ELF::ElfMagic, strlen(ELF::ElfMagic)) != 0)
+ return make_error<JITLinkError>("ELF magic not valid");
+
+ Expected<uint16_t> TargetMachineArch = readTargetMachineArch(Buffer);
+ if (!TargetMachineArch)
+ return TargetMachineArch.takeError();
+
+ switch (*TargetMachineArch) {
+ case ELF::EM_X86_64:
+ return createLinkGraphFromELFObject_x86_64(std::move(ObjectBuffer));
+ default:
+ return make_error<JITLinkError>(
+ "Unsupported target machine architecture in ELF object " +
+ ObjectBuffer.getBufferIdentifier());
+ }
+}
+
+void link_ELF(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ switch (G->getTargetTriple().getArch()) {
+ case Triple::x86_64:
+ link_ELF_x86_64(std::move(G), std::move(Ctx));
+ return;
+ default:
+ Ctx->notifyFailed(make_error<JITLinkError>(
+ "Unsupported target machine architecture in ELF link graph " +
+ G->getName()));
+ return;
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
new file mode 100644
index 00000000000..2a6b3eb19de
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
@@ -0,0 +1,813 @@
+//===---- ELF_x86_64.cpp -JIT linker implementation for ELF/x86-64 ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF/x86-64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF_x86_64.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/Endian.h"
+
+#include "BasicGOTAndStubsBuilder.h"
+#include "EHFrameSupportImpl.h"
+#include "JITLinkGeneric.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::jitlink::ELF_x86_64_Edges;
+
+namespace {
+
+class ELF_x86_64_GOTAndStubsBuilder
+ : public BasicGOTAndStubsBuilder<ELF_x86_64_GOTAndStubsBuilder> {
+public:
+ static const uint8_t NullGOTEntryContent[8];
+ static const uint8_t StubContent[6];
+
+ ELF_x86_64_GOTAndStubsBuilder(LinkGraph &G)
+ : BasicGOTAndStubsBuilder<ELF_x86_64_GOTAndStubsBuilder>(G) {}
+
+ bool isGOTEdge(Edge &E) const {
+ return E.getKind() == PCRel32GOT || E.getKind() == PCRel32GOTLoad;
+ }
+
+ Symbol &createGOTEntry(Symbol &Target) {
+ auto &GOTEntryBlock = G.createContentBlock(
+ getGOTSection(), getGOTEntryBlockContent(), 0, 8, 0);
+ GOTEntryBlock.addEdge(Pointer64, 0, Target, 0);
+ return G.addAnonymousSymbol(GOTEntryBlock, 0, 8, false, false);
+ }
+
+ void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
+ assert((E.getKind() == PCRel32GOT || E.getKind() == PCRel32GOTLoad) &&
+ "Not a GOT edge?");
+ // If this is a PCRel32GOT then change it to an ordinary PCRel32. If it is
+ // a PCRel32GOTLoad then leave it as-is for now. We will use the kind to
+ // check for GOT optimization opportunities in the
+ // optimizeMachO_x86_64_GOTAndStubs pass below.
+ if (E.getKind() == PCRel32GOT)
+ E.setKind(PCRel32);
+
+ E.setTarget(GOTEntry);
+ // Leave the edge addend as-is.
+ }
+
+ bool isExternalBranchEdge(Edge &E) {
+ return E.getKind() == Branch32 && !E.getTarget().isDefined();
+ }
+
+ Symbol &createStub(Symbol &Target) {
+ auto &StubContentBlock =
+ G.createContentBlock(getStubsSection(), getStubBlockContent(), 0, 1, 0);
+ // Re-use GOT entries for stub targets.
+ auto &GOTEntrySymbol = getGOTEntrySymbol(Target);
+ StubContentBlock.addEdge(PCRel32, 2, GOTEntrySymbol, -4);
+ return G.addAnonymousSymbol(StubContentBlock, 0, 6, true, false);
+ }
+
+ void fixExternalBranchEdge(Edge &E, Symbol &Stub) {
+ assert(E.getKind() == Branch32 && "Not a Branch32 edge?");
+
+ // Set the edge kind to Branch32ToStub. We will use this to check for stub
+ // optimization opportunities in the optimize ELF_x86_64_GOTAndStubs pass
+ // below.
+ E.setKind(Branch32ToStub);
+ E.setTarget(Stub);
+ }
+
+private:
+ Section &getGOTSection() {
+ if (!GOTSection)
+ GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
+ return *GOTSection;
+ }
+
+ Section &getStubsSection() {
+ if (!StubsSection) {
+ auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC);
+ StubsSection = &G.createSection("$__STUBS", StubsProt);
+ }
+ return *StubsSection;
+ }
+
+ StringRef getGOTEntryBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(NullGOTEntryContent),
+ sizeof(NullGOTEntryContent));
+ }
+
+ StringRef getStubBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(StubContent),
+ sizeof(StubContent));
+ }
+
+ Section *GOTSection = nullptr;
+ Section *StubsSection = nullptr;
+};
+
+const char *const DwarfSectionNames[] = {
+#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME, OPTION) \
+ ELF_NAME,
+#include "llvm/BinaryFormat/Dwarf.def"
+#undef HANDLE_DWARF_SECTION
+};
+
+} // namespace
+
+const uint8_t ELF_x86_64_GOTAndStubsBuilder::NullGOTEntryContent[8] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+const uint8_t ELF_x86_64_GOTAndStubsBuilder::StubContent[6] = {
+ 0xFF, 0x25, 0x00, 0x00, 0x00, 0x00};
+
+static const char *CommonSectionName = "__common";
+static Error optimizeELF_x86_64_GOTAndStubs(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Optimizing GOT entries and stubs:\n");
+
+ for (auto *B : G.blocks())
+ for (auto &E : B->edges())
+ if (E.getKind() == PCRel32GOTLoad) {
+ // Replace GOT load with LEA only for MOVQ instructions.
+ constexpr uint8_t MOVQRIPRel[] = {0x48, 0x8b};
+ if (E.getOffset() < 3 ||
+ strncmp(B->getContent().data() + E.getOffset() - 3,
+ reinterpret_cast<const char *>(MOVQRIPRel), 2) != 0)
+ continue;
+
+ auto &GOTBlock = E.getTarget().getBlock();
+ assert(GOTBlock.getSize() == G.getPointerSize() &&
+ "GOT entry block should be pointer sized");
+ assert(GOTBlock.edges_size() == 1 &&
+ "GOT entry should only have one outgoing edge");
+
+ auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
+ JITTargetAddress EdgeAddr = B->getAddress() + E.getOffset();
+ JITTargetAddress TargetAddr = GOTTarget.getAddress();
+
+ int64_t Displacement = TargetAddr - EdgeAddr + 4;
+ if (Displacement >= std::numeric_limits<int32_t>::min() &&
+ Displacement <= std::numeric_limits<int32_t>::max()) {
+ // Change the edge kind as we don't go through GOT anymore. This is
+ // for formal correctness only. Technically, the two relocation kinds
+ // are resolved the same way.
+ E.setKind(PCRel32);
+ E.setTarget(GOTTarget);
+ auto *BlockData = reinterpret_cast<uint8_t *>(
+ const_cast<char *>(B->getContent().data()));
+ BlockData[E.getOffset() - 2] = 0x8d;
+ LLVM_DEBUG({
+ dbgs() << " Replaced GOT load wih LEA:\n ";
+ printEdge(dbgs(), *B, E, getELFX86RelocationKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ }
+ } else if (E.getKind() == Branch32ToStub) {
+ auto &StubBlock = E.getTarget().getBlock();
+ assert(StubBlock.getSize() ==
+ sizeof(ELF_x86_64_GOTAndStubsBuilder::StubContent) &&
+ "Stub block should be stub sized");
+ assert(StubBlock.edges_size() == 1 &&
+ "Stub block should only have one outgoing edge");
+
+ auto &GOTBlock = StubBlock.edges().begin()->getTarget().getBlock();
+ assert(GOTBlock.getSize() == G.getPointerSize() &&
+ "GOT block should be pointer sized");
+ assert(GOTBlock.edges_size() == 1 &&
+ "GOT block should only have one outgoing edge");
+
+ auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
+ JITTargetAddress EdgeAddr = B->getAddress() + E.getOffset();
+ JITTargetAddress TargetAddr = GOTTarget.getAddress();
+
+ int64_t Displacement = TargetAddr - EdgeAddr + 4;
+ if (Displacement >= std::numeric_limits<int32_t>::min() &&
+ Displacement <= std::numeric_limits<int32_t>::max()) {
+ E.setKind(Branch32);
+ E.setTarget(GOTTarget);
+ LLVM_DEBUG({
+ dbgs() << " Replaced stub branch with direct branch:\n ";
+ printEdge(dbgs(), *B, E, getELFX86RelocationKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ }
+ }
+
+ return Error::success();
+}
+
+static bool isDwarfSection(StringRef SectionName) {
+ for (auto &DwarfSectionName : DwarfSectionNames)
+ if (SectionName == DwarfSectionName)
+ return true;
+ return false;
+}
+
+namespace llvm {
+namespace jitlink {
+
+// This should become a template as the ELFFile is so a lot of this could become
+// generic
+class ELFLinkGraphBuilder_x86_64 {
+
+private:
+ Section *CommonSection = nullptr;
+ // TODO hack to get this working
+ // Find a better way
+ using SymbolTable = object::ELFFile<object::ELF64LE>::Elf_Shdr;
+ // For now we just assume
+ using SymbolMap = std::map<int32_t, Symbol *>;
+ SymbolMap JITSymbolTable;
+
+ Section &getCommonSection() {
+ if (!CommonSection) {
+ auto Prot = static_cast<sys::Memory::ProtectionFlags>(
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE);
+ CommonSection = &G->createSection(CommonSectionName, Prot);
+ }
+ return *CommonSection;
+ }
+
+ static Expected<ELF_x86_64_Edges::ELFX86RelocationKind>
+ getRelocationKind(const uint32_t Type) {
+ switch (Type) {
+ case ELF::R_X86_64_PC32:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::PCRel32;
+ case ELF::R_X86_64_PC64:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::Delta64;
+ case ELF::R_X86_64_64:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::Pointer64;
+ case ELF::R_X86_64_GOTPCREL:
+ case ELF::R_X86_64_GOTPCRELX:
+ case ELF::R_X86_64_REX_GOTPCRELX:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::PCRel32GOTLoad;
+ case ELF::R_X86_64_PLT32:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::Branch32;
+ }
+ return make_error<JITLinkError>("Unsupported x86-64 relocation:" +
+ formatv("{0:d}", Type));
+ }
+
+ std::unique_ptr<LinkGraph> G;
+ // This could be a template
+ const object::ELFFile<object::ELF64LE> &Obj;
+ object::ELFFile<object::ELF64LE>::Elf_Shdr_Range sections;
+ SymbolTable SymTab;
+
+ bool isRelocatable() { return Obj.getHeader().e_type == llvm::ELF::ET_REL; }
+
+ support::endianness
+ getEndianness(const object::ELFFile<object::ELF64LE> &Obj) {
+ return Obj.isLE() ? support::little : support::big;
+ }
+
+ // This could also just become part of a template
+ unsigned getPointerSize(const object::ELFFile<object::ELF64LE> &Obj) {
+ return Obj.getHeader().getFileClass() == ELF::ELFCLASS64 ? 8 : 4;
+ }
+
+ // We don't technically need this right now
+ // But for now going to keep it as it helps me to debug things
+
+ Error createNormalizedSymbols() {
+ LLVM_DEBUG(dbgs() << "Creating normalized symbols...\n");
+
+ for (auto SecRef : sections) {
+ if (SecRef.sh_type != ELF::SHT_SYMTAB &&
+ SecRef.sh_type != ELF::SHT_DYNSYM)
+ continue;
+
+ auto Symbols = Obj.symbols(&SecRef);
+ // TODO: Currently I use this function to test things
+ // I also want to leave it to see if its common between MACH and elf
+ // so for now I just want to continue even if there is an error
+ if (errorToBool(Symbols.takeError()))
+ continue;
+
+ auto StrTabSec = Obj.getSection(SecRef.sh_link);
+ if (!StrTabSec)
+ return StrTabSec.takeError();
+ auto StringTable = Obj.getStringTable(**StrTabSec);
+ if (!StringTable)
+ return StringTable.takeError();
+
+ for (auto SymRef : *Symbols) {
+ Optional<StringRef> Name;
+
+ if (auto NameOrErr = SymRef.getName(*StringTable))
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+
+ LLVM_DEBUG({
+ dbgs() << " value = " << formatv("{0:x16}", SymRef.getValue())
+ << ", type = " << formatv("{0:x2}", SymRef.getType())
+ << ", binding = " << formatv("{0:x2}", SymRef.getBinding())
+ << ", size = "
+ << formatv("{0:x16}", static_cast<uint64_t>(SymRef.st_size))
+ << ", info = " << formatv("{0:x2}", SymRef.st_info)
+ << " :" << (Name ? *Name : "<anonymous symbol>") << "\n";
+ });
+ }
+ }
+ return Error::success();
+ }
+
+ Error createNormalizedSections() {
+ LLVM_DEBUG(dbgs() << "Creating normalized sections...\n");
+ for (auto &SecRef : sections) {
+ auto Name = Obj.getSectionName(SecRef);
+ if (!Name)
+ return Name.takeError();
+
+ // Skip Dwarf sections.
+ if (isDwarfSection(*Name)) {
+ LLVM_DEBUG({
+ dbgs() << *Name
+ << " is a debug section: No graph section will be created.\n";
+ });
+ continue;
+ }
+
+ sys::Memory::ProtectionFlags Prot;
+ if (SecRef.sh_flags & ELF::SHF_EXECINSTR) {
+ Prot = static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_EXEC);
+ } else {
+ Prot = static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_WRITE);
+ }
+ uint64_t Address = SecRef.sh_addr;
+ uint64_t Size = SecRef.sh_size;
+ uint64_t Flags = SecRef.sh_flags;
+ uint64_t Alignment = SecRef.sh_addralign;
+ const char *Data = nullptr;
+ // for now we just use this to skip the "undefined" section, probably need
+ // to revist
+ if (Size == 0)
+ continue;
+
+ // FIXME: Use flags.
+ (void)Flags;
+
+ LLVM_DEBUG({
+ dbgs() << " " << *Name << ": " << formatv("{0:x16}", Address) << " -- "
+ << formatv("{0:x16}", Address + Size) << ", align: " << Alignment
+ << " Flags: " << formatv("{0:x}", Flags) << "\n";
+ });
+
+ if (SecRef.sh_type != ELF::SHT_NOBITS) {
+ // .sections() already checks that the data is not beyond the end of
+ // file
+ auto contents = Obj.getSectionContentsAsArray<char>(SecRef);
+ if (!contents)
+ return contents.takeError();
+
+ Data = contents->data();
+ // TODO protection flags.
+ // for now everything is
+ auto &section = G->createSection(*Name, Prot);
+ // Do this here because we have it, but move it into graphify later
+ G->createContentBlock(section, StringRef(Data, Size), Address,
+ Alignment, 0);
+ if (SecRef.sh_type == ELF::SHT_SYMTAB)
+ // TODO: Dynamic?
+ SymTab = SecRef;
+ } else {
+ auto &Section = G->createSection(*Name, Prot);
+ G->createZeroFillBlock(Section, Size, Address, Alignment, 0);
+ }
+ }
+
+ return Error::success();
+ }
+
+ Error addRelocations() {
+ LLVM_DEBUG(dbgs() << "Adding relocations\n");
+ // TODO a partern is forming of iterate some sections but only give me
+ // ones I am interested, i should abstract that concept some where
+ for (auto &SecRef : sections) {
+ if (SecRef.sh_type != ELF::SHT_RELA && SecRef.sh_type != ELF::SHT_REL)
+ continue;
+ // TODO can the elf obj file do this for me?
+ if (SecRef.sh_type == ELF::SHT_REL)
+ return make_error<llvm::StringError>("Shouldn't have REL in x64",
+ llvm::inconvertibleErrorCode());
+
+ auto RelSectName = Obj.getSectionName(SecRef);
+ if (!RelSectName)
+ return RelSectName.takeError();
+
+ LLVM_DEBUG({
+ dbgs() << "Adding relocations from section " << *RelSectName << "\n";
+ });
+
+ auto UpdateSection = Obj.getSection(SecRef.sh_info);
+ if (!UpdateSection)
+ return UpdateSection.takeError();
+
+ auto UpdateSectionName = Obj.getSectionName(**UpdateSection);
+ if (!UpdateSectionName)
+ return UpdateSectionName.takeError();
+
+ // Don't process relocations for debug sections.
+ if (isDwarfSection(*UpdateSectionName)) {
+ LLVM_DEBUG({
+ dbgs() << " Target is dwarf section " << *UpdateSectionName
+ << ". Skipping.\n";
+ });
+ continue;
+ } else
+ LLVM_DEBUG({
+ dbgs() << " For target section " << *UpdateSectionName << "\n";
+ });
+
+ auto JITSection = G->findSectionByName(*UpdateSectionName);
+ if (!JITSection)
+ return make_error<llvm::StringError>(
+ "Refencing a a section that wasn't added to graph" +
+ *UpdateSectionName,
+ llvm::inconvertibleErrorCode());
+
+ auto Relocations = Obj.relas(SecRef);
+ if (!Relocations)
+ return Relocations.takeError();
+
+ for (const auto &Rela : *Relocations) {
+ auto Type = Rela.getType(false);
+
+ LLVM_DEBUG({
+ dbgs() << "Relocation Type: " << Type << "\n"
+ << "Name: " << Obj.getRelocationTypeName(Type) << "\n";
+ });
+ auto SymbolIndex = Rela.getSymbol(false);
+ auto Symbol = Obj.getRelocationSymbol(Rela, &SymTab);
+ if (!Symbol)
+ return Symbol.takeError();
+
+ auto BlockToFix = *(JITSection->blocks().begin());
+ auto *TargetSymbol = JITSymbolTable[SymbolIndex];
+
+ if (!TargetSymbol) {
+ return make_error<llvm::StringError>(
+ "Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: " + std::to_string(SymbolIndex)
+ + ", shndx: " + std::to_string((*Symbol)->st_shndx) +
+ " Size of table: " + std::to_string(JITSymbolTable.size()),
+ llvm::inconvertibleErrorCode());
+ }
+ uint64_t Addend = Rela.r_addend;
+ JITTargetAddress FixupAddress =
+ (*UpdateSection)->sh_addr + Rela.r_offset;
+
+ LLVM_DEBUG({
+ dbgs() << "Processing relocation at "
+ << format("0x%016" PRIx64, FixupAddress) << "\n";
+ });
+ auto Kind = getRelocationKind(Type);
+ if (!Kind)
+ return Kind.takeError();
+
+ LLVM_DEBUG({
+ Edge GE(*Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
+ Addend);
+ printEdge(dbgs(), *BlockToFix, GE,
+ getELFX86RelocationKindName(*Kind));
+ dbgs() << "\n";
+ });
+ BlockToFix->addEdge(*Kind, FixupAddress - BlockToFix->getAddress(),
+ *TargetSymbol, Addend);
+ }
+ }
+ return Error::success();
+ }
+
+ Error graphifyRegularSymbols() {
+
+ // TODO: ELF supports beyond SHN_LORESERVE,
+ // need to perf test how a vector vs map handles those cases
+
+ std::vector<std::vector<object::ELFFile<object::ELF64LE>::Elf_Shdr_Range *>>
+ SecIndexToSymbols;
+
+ LLVM_DEBUG(dbgs() << "Creating graph symbols...\n");
+
+ for (auto SecRef : sections) {
+
+ if (SecRef.sh_type != ELF::SHT_SYMTAB &&
+ SecRef.sh_type != ELF::SHT_DYNSYM)
+ continue;
+ auto Symbols = Obj.symbols(&SecRef);
+ if (!Symbols)
+ return Symbols.takeError();
+
+ auto StrTabSec = Obj.getSection(SecRef.sh_link);
+ if (!StrTabSec)
+ return StrTabSec.takeError();
+ auto StringTable = Obj.getStringTable(**StrTabSec);
+ if (!StringTable)
+ return StringTable.takeError();
+ auto Name = Obj.getSectionName(SecRef);
+ if (!Name)
+ return Name.takeError();
+
+ LLVM_DEBUG(dbgs() << "Processing symbol section " << *Name << ":\n");
+
+ auto Section = G->findSectionByName(*Name);
+ if (!Section)
+ return make_error<llvm::StringError>("Could not find a section " +
+ *Name,
+ llvm::inconvertibleErrorCode());
+ // we only have one for now
+ auto blocks = Section->blocks();
+ if (blocks.empty())
+ return make_error<llvm::StringError>("Section has no block",
+ llvm::inconvertibleErrorCode());
+ int SymbolIndex = -1;
+ for (auto SymRef : *Symbols) {
+ ++SymbolIndex;
+ auto Type = SymRef.getType();
+
+ if (Type == ELF::STT_FILE || SymbolIndex == 0)
+ continue;
+ // these should do it for now
+ // if(Type != ELF::STT_NOTYPE &&
+ // Type != ELF::STT_OBJECT &&
+ // Type != ELF::STT_FUNC &&
+ // Type != ELF::STT_SECTION &&
+ // Type != ELF::STT_COMMON) {
+ // continue;
+ // }
+ auto Name = SymRef.getName(*StringTable);
+ // I am not sure on If this is going to hold as an invariant. Revisit.
+ if (!Name)
+ return Name.takeError();
+
+ if (SymRef.isCommon()) {
+ // Symbols in SHN_COMMON refer to uninitialized data. The st_value
+ // field holds alignment constraints.
+ Symbol &S =
+ G->addCommonSymbol(*Name, Scope::Default, getCommonSection(), 0,
+ SymRef.st_size, SymRef.getValue(), false);
+ JITSymbolTable[SymbolIndex] = &S;
+ continue;
+ }
+
+ // Map Visibility and Binding to Scope and Linkage:
+ Linkage L = Linkage::Strong;
+ Scope S = Scope::Default;
+
+ switch (SymRef.getBinding()) {
+ case ELF::STB_LOCAL:
+ S = Scope::Local;
+ break;
+ case ELF::STB_GLOBAL:
+ // Nothing to do here.
+ break;
+ case ELF::STB_WEAK:
+ L = Linkage::Weak;
+ break;
+ default:
+ return make_error<StringError>("Unrecognized symbol binding for " +
+ *Name,
+ inconvertibleErrorCode());
+ }
+
+ switch (SymRef.getVisibility()) {
+ case ELF::STV_DEFAULT:
+ case ELF::STV_PROTECTED:
+ // FIXME: Make STV_DEFAULT symbols pre-emptible? This probably needs
+ // Orc support.
+ // Otherwise nothing to do here.
+ break;
+ case ELF::STV_HIDDEN:
+ // Default scope -> Hidden scope. No effect on local scope.
+ if (S == Scope::Default)
+ S = Scope::Hidden;
+ break;
+ case ELF::STV_INTERNAL:
+ return make_error<StringError>("Unrecognized symbol visibility for " +
+ *Name,
+ inconvertibleErrorCode());
+ }
+
+ if (SymRef.isDefined() &&
+ (Type == ELF::STT_FUNC || Type == ELF::STT_OBJECT ||
+ Type == ELF::STT_SECTION)) {
+
+ auto DefinedSection = Obj.getSection(SymRef.st_shndx);
+ if (!DefinedSection)
+ return DefinedSection.takeError();
+ auto sectName = Obj.getSectionName(**DefinedSection);
+ if (!sectName)
+ return Name.takeError();
+
+ // Skip debug section symbols.
+ if (isDwarfSection(*sectName))
+ continue;
+
+ auto JitSection = G->findSectionByName(*sectName);
+ if (!JitSection)
+ return make_error<llvm::StringError>(
+ "Could not find the JitSection " + *sectName,
+ llvm::inconvertibleErrorCode());
+ auto bs = JitSection->blocks();
+ if (bs.empty())
+ return make_error<llvm::StringError>(
+ "Section has no block", llvm::inconvertibleErrorCode());
+
+ auto *B = *bs.begin();
+ LLVM_DEBUG({ dbgs() << " " << *Name << " at index " << SymbolIndex << "\n"; });
+ if (SymRef.getType() == ELF::STT_SECTION)
+ *Name = *sectName;
+ auto &Sym = G->addDefinedSymbol(
+ *B, SymRef.getValue(), *Name, SymRef.st_size, L, S,
+ SymRef.getType() == ELF::STT_FUNC, false);
+ JITSymbolTable[SymbolIndex] = &Sym;
+ } else if (SymRef.isUndefined() && SymRef.isExternal()) {
+ auto &Sym = G->addExternalSymbol(*Name, SymRef.st_size, L);
+ JITSymbolTable[SymbolIndex] = &Sym;
+ } else
+ LLVM_DEBUG({
+ dbgs()
+ << "Not creating graph symbol for normalized symbol at index "
+ << SymbolIndex << ", \"" << *Name << "\"\n";
+ });
+
+ // TODO: The following has to be implmented.
+ // leaving commented out to save time for future patchs
+ /*
+ G->addAbsoluteSymbol(*Name, SymRef.getValue(), SymRef.st_size,
+ Linkage::Strong, Scope::Default, false);
+ */
+ }
+ }
+ return Error::success();
+ }
+
+public:
+ ELFLinkGraphBuilder_x86_64(StringRef FileName,
+ const object::ELFFile<object::ELF64LE> &Obj)
+ : G(std::make_unique<LinkGraph>(FileName.str(),
+ Triple("x86_64-unknown-linux"),
+ getPointerSize(Obj), getEndianness(Obj))),
+ Obj(Obj) {}
+
+ Expected<std::unique_ptr<LinkGraph>> buildGraph() {
+ // Sanity check: we only operate on relocatable objects.
+ if (!isRelocatable())
+ return make_error<JITLinkError>("Object is not a relocatable ELF");
+
+ auto Secs = Obj.sections();
+
+ if (!Secs) {
+ return Secs.takeError();
+ }
+ sections = *Secs;
+
+ if (auto Err = createNormalizedSections())
+ return std::move(Err);
+
+ if (auto Err = createNormalizedSymbols())
+ return std::move(Err);
+
+ if (auto Err = graphifyRegularSymbols())
+ return std::move(Err);
+
+ if (auto Err = addRelocations())
+ return std::move(Err);
+
+ return std::move(G);
+ }
+};
+
+class ELFJITLinker_x86_64 : public JITLinker<ELFJITLinker_x86_64> {
+ friend class JITLinker<ELFJITLinker_x86_64>;
+
+public:
+ ELFJITLinker_x86_64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+ StringRef getEdgeKindName(Edge::Kind R) const override {
+ return getELFX86RelocationKindName(R);
+ }
+
+ static Error targetOutOfRangeError(const Block &B, const Edge &E) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Relocation target out of range: ";
+ printEdge(ErrStream, B, E, getELFX86RelocationKindName(E.getKind()));
+ ErrStream << "\n";
+ }
+ return make_error<JITLinkError>(std::move(ErrMsg));
+ }
+
+ Error applyFixup(Block &B, const Edge &E, char *BlockWorkingMem) const {
+ using namespace ELF_x86_64_Edges;
+ using namespace llvm::support;
+ char *FixupPtr = BlockWorkingMem + E.getOffset();
+ JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
+ switch (E.getKind()) {
+ case ELFX86RelocationKind::Branch32:
+ case ELFX86RelocationKind::Branch32ToStub:
+ case ELFX86RelocationKind::PCRel32:
+ case ELFX86RelocationKind::PCRel32GOTLoad: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ break;
+ }
+ case ELFX86RelocationKind::Pointer64: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend();
+ *(ulittle64_t *)FixupPtr = Value;
+ break;
+ }
+ case ELFX86RelocationKind::Delta64: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ *(little64_t *)FixupPtr = Value;
+ break;
+ }
+ }
+ return Error::success();
+ }
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_x86_64(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto ELFObj = object::ObjectFile::createELFObjectFile(ObjectBuffer);
+ if (!ELFObj)
+ return ELFObj.takeError();
+
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF64LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_x86_64((*ELFObj)->getFileName(),
+ ELFObjFile.getELFFile())
+ .buildGraph();
+}
+
+void link_ELF_x86_64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+
+ if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
+
+ Config.PrePrunePasses.push_back(EHFrameSplitter(".eh_frame"));
+ Config.PrePrunePasses.push_back(EHFrameEdgeFixer(
+ ".eh_frame", G->getPointerSize(), Delta64, Delta32, NegDelta32));
+
+ // Construct a JITLinker and run the link function.
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add an in-place GOT/Stubs pass.
+ Config.PostPrunePasses.push_back([](LinkGraph &G) -> Error {
+ ELF_x86_64_GOTAndStubsBuilder(G).run();
+ return Error::success();
+ });
+
+ // Add GOT/Stubs optimizer pass.
+ Config.PreFixupPasses.push_back(optimizeELF_x86_64_GOTAndStubs);
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(G->getTargetTriple(), Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ ELFJITLinker_x86_64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+StringRef getELFX86RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case PCRel32:
+ return "PCRel32";
+ case Pointer64:
+ return "Pointer64";
+ case PCRel32GOTLoad:
+ return "PCRel32GOTLoad";
+ case Branch32:
+ return "Branch32";
+ case Branch32ToStub:
+ return "Branch32ToStub";
+ }
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
+}
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLink.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLink.cpp
new file mode 100644
index 00000000000..93dfba9c759
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLink.cpp
@@ -0,0 +1,350 @@
+//===------------- JITLink.cpp - Core Run-time JIT linker APIs ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+#include "llvm/BinaryFormat/Magic.h"
+#include "llvm/ExecutionEngine/JITLink/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/MachO.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace {
+
+enum JITLinkErrorCode { GenericJITLinkError = 1 };
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class JITLinkerErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "runtimedyld"; }
+
+ std::string message(int Condition) const override {
+ switch (static_cast<JITLinkErrorCode>(Condition)) {
+ case GenericJITLinkError:
+ return "Generic JITLink error";
+ }
+ llvm_unreachable("Unrecognized JITLinkErrorCode");
+ }
+};
+
+static ManagedStatic<JITLinkerErrorCategory> JITLinkerErrorCategory;
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+char JITLinkError::ID = 0;
+
+void JITLinkError::log(raw_ostream &OS) const { OS << ErrMsg << "\n"; }
+
+std::error_code JITLinkError::convertToErrorCode() const {
+ return std::error_code(GenericJITLinkError, *JITLinkerErrorCategory);
+}
+
+const char *getGenericEdgeKindName(Edge::Kind K) {
+ switch (K) {
+ case Edge::Invalid:
+ return "INVALID RELOCATION";
+ case Edge::KeepAlive:
+ return "Keep-Alive";
+ default:
+ return "<Unrecognized edge kind>";
+ }
+}
+
+const char *getLinkageName(Linkage L) {
+ switch (L) {
+ case Linkage::Strong:
+ return "strong";
+ case Linkage::Weak:
+ return "weak";
+ }
+ llvm_unreachable("Unrecognized llvm.jitlink.Linkage enum");
+}
+
+const char *getScopeName(Scope S) {
+ switch (S) {
+ case Scope::Default:
+ return "default";
+ case Scope::Hidden:
+ return "hidden";
+ case Scope::Local:
+ return "local";
+ }
+ llvm_unreachable("Unrecognized llvm.jitlink.Scope enum");
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const Block &B) {
+ return OS << formatv("{0:x16}", B.getAddress()) << " -- "
+ << formatv("{0:x16}", B.getAddress() + B.getSize()) << ": "
+ << "size = " << formatv("{0:x}", B.getSize()) << ", "
+ << (B.isZeroFill() ? "zero-fill" : "content")
+ << ", align = " << B.getAlignment()
+ << ", align-ofs = " << B.getAlignmentOffset()
+ << ", section = " << B.getSection().getName();
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const Symbol &Sym) {
+ OS << "<";
+ if (Sym.getName().empty())
+ OS << "*anon*";
+ else
+ OS << Sym.getName();
+ OS << ": flags = ";
+ switch (Sym.getLinkage()) {
+ case Linkage::Strong:
+ OS << 'S';
+ break;
+ case Linkage::Weak:
+ OS << 'W';
+ break;
+ }
+ switch (Sym.getScope()) {
+ case Scope::Default:
+ OS << 'D';
+ break;
+ case Scope::Hidden:
+ OS << 'H';
+ break;
+ case Scope::Local:
+ OS << 'L';
+ break;
+ }
+ OS << (Sym.isLive() ? '+' : '-')
+ << ", size = " << formatv("{0:x}", Sym.getSize())
+ << ", addr = " << formatv("{0:x16}", Sym.getAddress()) << " ("
+ << formatv("{0:x16}", Sym.getAddressable().getAddress()) << " + "
+ << formatv("{0:x}", Sym.getOffset());
+ if (Sym.isDefined())
+ OS << " " << Sym.getBlock().getSection().getName();
+ OS << ")>";
+ return OS;
+}
+
+void printEdge(raw_ostream &OS, const Block &B, const Edge &E,
+ StringRef EdgeKindName) {
+ OS << "edge@" << formatv("{0:x16}", B.getAddress() + E.getOffset()) << ": "
+ << formatv("{0:x16}", B.getAddress()) << " + "
+ << formatv("{0:x}", E.getOffset()) << " -- " << EdgeKindName << " -> ";
+
+ auto &TargetSym = E.getTarget();
+ if (TargetSym.hasName())
+ OS << TargetSym.getName();
+ else {
+ auto &TargetBlock = TargetSym.getBlock();
+ auto &TargetSec = TargetBlock.getSection();
+ JITTargetAddress SecAddress = ~JITTargetAddress(0);
+ for (auto *B : TargetSec.blocks())
+ if (B->getAddress() < SecAddress)
+ SecAddress = B->getAddress();
+
+ JITTargetAddress SecDelta = TargetSym.getAddress() - SecAddress;
+ OS << formatv("{0:x16}", TargetSym.getAddress()) << " (section "
+ << TargetSec.getName();
+ if (SecDelta)
+ OS << " + " << formatv("{0:x}", SecDelta);
+ OS << " / block " << formatv("{0:x16}", TargetBlock.getAddress());
+ if (TargetSym.getOffset())
+ OS << " + " << formatv("{0:x}", TargetSym.getOffset());
+ OS << ")";
+ }
+
+ if (E.getAddend() != 0)
+ OS << " + " << E.getAddend();
+}
+
+Section::~Section() {
+ for (auto *Sym : Symbols)
+ Sym->~Symbol();
+ for (auto *B : Blocks)
+ B->~Block();
+}
+
+Block &LinkGraph::splitBlock(Block &B, size_t SplitIndex,
+ SplitBlockCache *Cache) {
+
+ assert(SplitIndex > 0 && "splitBlock can not be called with SplitIndex == 0");
+
+ // If the split point covers all of B then just return B.
+ if (SplitIndex == B.getSize())
+ return B;
+
+ assert(SplitIndex < B.getSize() && "SplitIndex out of range");
+
+ // Create the new block covering [ 0, SplitIndex ).
+ auto &NewBlock =
+ B.isZeroFill()
+ ? createZeroFillBlock(B.getSection(), SplitIndex, B.getAddress(),
+ B.getAlignment(), B.getAlignmentOffset())
+ : createContentBlock(
+ B.getSection(), B.getContent().substr(0, SplitIndex),
+ B.getAddress(), B.getAlignment(), B.getAlignmentOffset());
+
+ // Modify B to cover [ SplitIndex, B.size() ).
+ B.setAddress(B.getAddress() + SplitIndex);
+ B.setContent(B.getContent().substr(SplitIndex));
+ B.setAlignmentOffset((B.getAlignmentOffset() + SplitIndex) %
+ B.getAlignment());
+
+ // Handle edge transfer/update.
+ {
+ // Copy edges to NewBlock (recording their iterators so that we can remove
+ // them from B), and update of Edges remaining on B.
+ std::vector<Block::edge_iterator> EdgesToRemove;
+ for (auto I = B.edges().begin(); I != B.edges().end();) {
+ if (I->getOffset() < SplitIndex) {
+ NewBlock.addEdge(*I);
+ I = B.removeEdge(I);
+ } else {
+ I->setOffset(I->getOffset() - SplitIndex);
+ ++I;
+ }
+ }
+ }
+
+ // Handle symbol transfer/update.
+ {
+ // Initialize the symbols cache if necessary.
+ SplitBlockCache LocalBlockSymbolsCache;
+ if (!Cache)
+ Cache = &LocalBlockSymbolsCache;
+ if (*Cache == None) {
+ *Cache = SplitBlockCache::value_type();
+ for (auto *Sym : B.getSection().symbols())
+ if (&Sym->getBlock() == &B)
+ (*Cache)->push_back(Sym);
+
+ llvm::sort(**Cache, [](const Symbol *LHS, const Symbol *RHS) {
+ return LHS->getOffset() > RHS->getOffset();
+ });
+ }
+ auto &BlockSymbols = **Cache;
+
+ // Transfer all symbols with offset less than SplitIndex to NewBlock.
+ while (!BlockSymbols.empty() &&
+ BlockSymbols.back()->getOffset() < SplitIndex) {
+ BlockSymbols.back()->setBlock(NewBlock);
+ BlockSymbols.pop_back();
+ }
+
+ // Update offsets for all remaining symbols in B.
+ for (auto *Sym : BlockSymbols)
+ Sym->setOffset(Sym->getOffset() - SplitIndex);
+ }
+
+ return NewBlock;
+}
+
+void LinkGraph::dump(raw_ostream &OS,
+ std::function<StringRef(Edge::Kind)> EdgeKindToName) {
+ if (!EdgeKindToName)
+ EdgeKindToName = [](Edge::Kind K) { return StringRef(); };
+
+ OS << "Symbols:\n";
+ for (auto *Sym : defined_symbols()) {
+ OS << " " << format("0x%016" PRIx64, Sym->getAddress()) << ": " << *Sym
+ << "\n";
+ if (Sym->isDefined()) {
+ for (auto &E : Sym->getBlock().edges()) {
+ OS << " ";
+ StringRef EdgeName = (E.getKind() < Edge::FirstRelocation
+ ? getGenericEdgeKindName(E.getKind())
+ : EdgeKindToName(E.getKind()));
+
+ if (!EdgeName.empty())
+ printEdge(OS, Sym->getBlock(), E, EdgeName);
+ else {
+ auto EdgeNumberString = std::to_string(E.getKind());
+ printEdge(OS, Sym->getBlock(), E, EdgeNumberString);
+ }
+ OS << "\n";
+ }
+ }
+ }
+
+ OS << "Absolute symbols:\n";
+ for (auto *Sym : absolute_symbols())
+ OS << " " << format("0x%016" PRIx64, Sym->getAddress()) << ": " << *Sym
+ << "\n";
+
+ OS << "External symbols:\n";
+ for (auto *Sym : external_symbols())
+ OS << " " << format("0x%016" PRIx64, Sym->getAddress()) << ": " << *Sym
+ << "\n";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupFlags &LF) {
+ switch (LF) {
+ case SymbolLookupFlags::RequiredSymbol:
+ return OS << "RequiredSymbol";
+ case SymbolLookupFlags::WeaklyReferencedSymbol:
+ return OS << "WeaklyReferencedSymbol";
+ }
+ llvm_unreachable("Unrecognized lookup flags");
+}
+
+void JITLinkAsyncLookupContinuation::anchor() {}
+
+JITLinkContext::~JITLinkContext() {}
+
+bool JITLinkContext::shouldAddDefaultTargetPasses(const Triple &TT) const {
+ return true;
+}
+
+LinkGraphPassFunction JITLinkContext::getMarkLivePass(const Triple &TT) const {
+ return LinkGraphPassFunction();
+}
+
+Error JITLinkContext::modifyPassConfig(const Triple &TT,
+ PassConfiguration &Config) {
+ return Error::success();
+}
+
+Error markAllSymbolsLive(LinkGraph &G) {
+ for (auto *Sym : G.defined_symbols())
+ Sym->setLive(true);
+ return Error::success();
+}
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromObject(MemoryBufferRef ObjectBuffer) {
+ auto Magic = identify_magic(ObjectBuffer.getBuffer());
+ switch (Magic) {
+ case file_magic::macho_object:
+ return createLinkGraphFromMachOObject(std::move(ObjectBuffer));
+ case file_magic::elf_relocatable:
+ return createLinkGraphFromELFObject(std::move(ObjectBuffer));
+ default:
+ return make_error<JITLinkError>("Unsupported file format");
+ };
+}
+
+void link(std::unique_ptr<LinkGraph> G, std::unique_ptr<JITLinkContext> Ctx) {
+ switch (G->getTargetTriple().getObjectFormat()) {
+ case Triple::MachO:
+ return link_MachO(std::move(G), std::move(Ctx));
+ case Triple::ELF:
+ return link_ELF(std::move(G), std::move(Ctx));
+ default:
+ Ctx->notifyFailed(make_error<JITLinkError>("Unsupported object format"));
+ };
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
new file mode 100644
index 00000000000..7a5e014f223
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
@@ -0,0 +1,497 @@
+//===--------- JITLinkGeneric.cpp - Generic JIT linker utilities ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic JITLinker utility class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JITLinkGeneric.h"
+
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+JITLinkerBase::~JITLinkerBase() {}
+
+void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) {
+
+ LLVM_DEBUG({
+ dbgs() << "Starting link phase 1 for graph " << G->getName() << "\n";
+ });
+
+ // Prune and optimize the graph.
+ if (auto Err = runPasses(Passes.PrePrunePasses))
+ return Ctx->notifyFailed(std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" pre-pruning:\n";
+ dumpGraph(dbgs());
+ });
+
+ prune(*G);
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" post-pruning:\n";
+ dumpGraph(dbgs());
+ });
+
+ // Run post-pruning passes.
+ if (auto Err = runPasses(Passes.PostPrunePasses))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Sort blocks into segments.
+ auto Layout = layOutBlocks();
+
+ // Allocate memory for segments.
+ if (auto Err = allocateSegments(Layout))
+ return Ctx->notifyFailed(std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName()
+ << "\" before post-allocation passes:\n";
+ dumpGraph(dbgs());
+ });
+
+ // Run post-allocation passes.
+ if (auto Err = runPasses(Passes.PostAllocationPasses))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Notify client that the defined symbols have been assigned addresses.
+ LLVM_DEBUG(
+ { dbgs() << "Resolving symbols defined in " << G->getName() << "\n"; });
+
+ if (auto Err = Ctx->notifyResolved(*G))
+ return Ctx->notifyFailed(std::move(Err));
+
+ auto ExternalSymbols = getExternalSymbolNames();
+
+ LLVM_DEBUG({
+ dbgs() << "Issuing lookup for external symbols for " << G->getName()
+ << " (may trigger materialization/linking of other graphs)...\n";
+ });
+
+ // We're about to hand off ownership of ourself to the continuation. Grab a
+ // pointer to the context so that we can call it to initiate the lookup.
+ //
+ // FIXME: Once callee expressions are defined to be sequenced before argument
+ // expressions (c++17) we can simplify all this to:
+ //
+ // Ctx->lookup(std::move(UnresolvedExternals),
+ // [Self=std::move(Self)](Expected<AsyncLookupResult> Result) {
+ // Self->linkPhase2(std::move(Self), std::move(Result));
+ // });
+ auto *TmpCtx = Ctx.get();
+ TmpCtx->lookup(std::move(ExternalSymbols),
+ createLookupContinuation(
+ [S = std::move(Self), L = std::move(Layout)](
+ Expected<AsyncLookupResult> LookupResult) mutable {
+ auto &TmpSelf = *S;
+ TmpSelf.linkPhase2(std::move(S), std::move(LookupResult),
+ std::move(L));
+ }));
+}
+
+void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
+ Expected<AsyncLookupResult> LR,
+ SegmentLayoutMap Layout) {
+
+ LLVM_DEBUG({
+ dbgs() << "Starting link phase 2 for graph " << G->getName() << "\n";
+ });
+
+ // If the lookup failed, bail out.
+ if (!LR)
+ return deallocateAndBailOut(LR.takeError());
+
+ // Assign addresses to external addressables.
+ applyLookupResult(*LR);
+
+ // Copy block content to working memory.
+ copyBlockContentToWorkingMemory(Layout, *Alloc);
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName()
+ << "\" before pre-fixup passes:\n";
+ dumpGraph(dbgs());
+ });
+
+ if (auto Err = runPasses(Passes.PreFixupPasses))
+ return deallocateAndBailOut(std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" before copy-and-fixup:\n";
+ dumpGraph(dbgs());
+ });
+
+ // Fix up block content.
+ if (auto Err = fixUpBlocks(*G))
+ return deallocateAndBailOut(std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" after copy-and-fixup:\n";
+ dumpGraph(dbgs());
+ });
+
+ if (auto Err = runPasses(Passes.PostFixupPasses))
+ return deallocateAndBailOut(std::move(Err));
+
+ // FIXME: Use move capture once we have c++14.
+ auto *UnownedSelf = Self.release();
+ auto Phase3Continuation = [UnownedSelf](Error Err) {
+ std::unique_ptr<JITLinkerBase> Self(UnownedSelf);
+ UnownedSelf->linkPhase3(std::move(Self), std::move(Err));
+ };
+
+ Alloc->finalizeAsync(std::move(Phase3Continuation));
+}
+
+void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self, Error Err) {
+
+ LLVM_DEBUG({
+ dbgs() << "Starting link phase 3 for graph " << G->getName() << "\n";
+ });
+
+ if (Err)
+ return deallocateAndBailOut(std::move(Err));
+ Ctx->notifyFinalized(std::move(Alloc));
+
+ LLVM_DEBUG({ dbgs() << "Link of graph " << G->getName() << " complete\n"; });
+}
+
+Error JITLinkerBase::runPasses(LinkGraphPassList &Passes) {
+ for (auto &P : Passes)
+ if (auto Err = P(*G))
+ return Err;
+ return Error::success();
+}
+
+JITLinkerBase::SegmentLayoutMap JITLinkerBase::layOutBlocks() {
+
+ SegmentLayoutMap Layout;
+
+ /// Partition blocks based on permissions and content vs. zero-fill.
+ for (auto *B : G->blocks()) {
+ auto &SegLists = Layout[B->getSection().getProtectionFlags()];
+ if (!B->isZeroFill())
+ SegLists.ContentBlocks.push_back(B);
+ else
+ SegLists.ZeroFillBlocks.push_back(B);
+ }
+
+ /// Sort blocks within each list.
+ for (auto &KV : Layout) {
+
+ auto CompareBlocks = [](const Block *LHS, const Block *RHS) {
+ // Sort by section, address and size
+ if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal())
+ return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal();
+ if (LHS->getAddress() != RHS->getAddress())
+ return LHS->getAddress() < RHS->getAddress();
+ return LHS->getSize() < RHS->getSize();
+ };
+
+ auto &SegLists = KV.second;
+ llvm::sort(SegLists.ContentBlocks, CompareBlocks);
+ llvm::sort(SegLists.ZeroFillBlocks, CompareBlocks);
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Computed segment ordering:\n";
+ for (auto &KV : Layout) {
+ dbgs() << " Segment "
+ << static_cast<sys::Memory::ProtectionFlags>(KV.first) << ":\n";
+ auto &SL = KV.second;
+ for (auto &SIEntry :
+ {std::make_pair(&SL.ContentBlocks, "content block"),
+ std::make_pair(&SL.ZeroFillBlocks, "zero-fill block")}) {
+ dbgs() << " " << SIEntry.second << ":\n";
+ for (auto *B : *SIEntry.first)
+ dbgs() << " " << *B << "\n";
+ }
+ }
+ });
+
+ return Layout;
+}
+
+Error JITLinkerBase::allocateSegments(const SegmentLayoutMap &Layout) {
+
+ // Compute segment sizes and allocate memory.
+ LLVM_DEBUG(dbgs() << "JIT linker requesting: { ");
+ JITLinkMemoryManager::SegmentsRequestMap Segments;
+ for (auto &KV : Layout) {
+ auto &Prot = KV.first;
+ auto &SegLists = KV.second;
+
+ uint64_t SegAlign = 1;
+
+ // Calculate segment content size.
+ size_t SegContentSize = 0;
+ for (auto *B : SegLists.ContentBlocks) {
+ SegAlign = std::max(SegAlign, B->getAlignment());
+ SegContentSize = alignToBlock(SegContentSize, *B);
+ SegContentSize += B->getSize();
+ }
+
+ uint64_t SegZeroFillStart = SegContentSize;
+ uint64_t SegZeroFillEnd = SegZeroFillStart;
+
+ for (auto *B : SegLists.ZeroFillBlocks) {
+ SegAlign = std::max(SegAlign, B->getAlignment());
+ SegZeroFillEnd = alignToBlock(SegZeroFillEnd, *B);
+ SegZeroFillEnd += B->getSize();
+ }
+
+ Segments[Prot] = {SegAlign, SegContentSize,
+ SegZeroFillEnd - SegZeroFillStart};
+
+ LLVM_DEBUG({
+ dbgs() << (&KV == &*Layout.begin() ? "" : "; ")
+ << static_cast<sys::Memory::ProtectionFlags>(Prot)
+ << ": alignment = " << SegAlign
+ << ", content size = " << SegContentSize
+ << ", zero-fill size = " << (SegZeroFillEnd - SegZeroFillStart);
+ });
+ }
+ LLVM_DEBUG(dbgs() << " }\n");
+
+ if (auto AllocOrErr =
+ Ctx->getMemoryManager().allocate(Ctx->getJITLinkDylib(), Segments))
+ Alloc = std::move(*AllocOrErr);
+ else
+ return AllocOrErr.takeError();
+
+ LLVM_DEBUG({
+ dbgs() << "JIT linker got memory (working -> target):\n";
+ for (auto &KV : Layout) {
+ auto Prot = static_cast<sys::Memory::ProtectionFlags>(KV.first);
+ dbgs() << " " << Prot << ": "
+ << (const void *)Alloc->getWorkingMemory(Prot).data() << " -> "
+ << formatv("{0:x16}", Alloc->getTargetMemory(Prot)) << "\n";
+ }
+ });
+
+ // Update block target addresses.
+ for (auto &KV : Layout) {
+ auto &Prot = KV.first;
+ auto &SL = KV.second;
+
+ JITTargetAddress NextBlockAddr =
+ Alloc->getTargetMemory(static_cast<sys::Memory::ProtectionFlags>(Prot));
+
+ for (auto *SIList : {&SL.ContentBlocks, &SL.ZeroFillBlocks})
+ for (auto *B : *SIList) {
+ NextBlockAddr = alignToBlock(NextBlockAddr, *B);
+ B->setAddress(NextBlockAddr);
+ NextBlockAddr += B->getSize();
+ }
+ }
+
+ return Error::success();
+}
+
+JITLinkContext::LookupMap JITLinkerBase::getExternalSymbolNames() const {
+ // Identify unresolved external symbols.
+ JITLinkContext::LookupMap UnresolvedExternals;
+ for (auto *Sym : G->external_symbols()) {
+ assert(Sym->getAddress() == 0 &&
+ "External has already been assigned an address");
+ assert(Sym->getName() != StringRef() && Sym->getName() != "" &&
+ "Externals must be named");
+ SymbolLookupFlags LookupFlags =
+ Sym->getLinkage() == Linkage::Weak
+ ? SymbolLookupFlags::WeaklyReferencedSymbol
+ : SymbolLookupFlags::RequiredSymbol;
+ UnresolvedExternals[Sym->getName()] = LookupFlags;
+ }
+ return UnresolvedExternals;
+}
+
+void JITLinkerBase::applyLookupResult(AsyncLookupResult Result) {
+ for (auto *Sym : G->external_symbols()) {
+ assert(Sym->getOffset() == 0 &&
+ "External symbol is not at the start of its addressable block");
+ assert(Sym->getAddress() == 0 && "Symbol already resolved");
+ assert(!Sym->isDefined() && "Symbol being resolved is already defined");
+ auto ResultI = Result.find(Sym->getName());
+ if (ResultI != Result.end())
+ Sym->getAddressable().setAddress(ResultI->second.getAddress());
+ else
+ assert(Sym->getLinkage() == Linkage::Weak &&
+ "Failed to resolve non-weak reference");
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Externals after applying lookup result:\n";
+ for (auto *Sym : G->external_symbols())
+ dbgs() << " " << Sym->getName() << ": "
+ << formatv("{0:x16}", Sym->getAddress()) << "\n";
+ });
+}
+
+void JITLinkerBase::copyBlockContentToWorkingMemory(
+ const SegmentLayoutMap &Layout, JITLinkMemoryManager::Allocation &Alloc) {
+
+ LLVM_DEBUG(dbgs() << "Copying block content:\n");
+ for (auto &KV : Layout) {
+ auto &Prot = KV.first;
+ auto &SegLayout = KV.second;
+
+ auto SegMem =
+ Alloc.getWorkingMemory(static_cast<sys::Memory::ProtectionFlags>(Prot));
+ char *LastBlockEnd = SegMem.data();
+ char *BlockDataPtr = LastBlockEnd;
+
+ LLVM_DEBUG({
+ dbgs() << " Processing segment "
+ << static_cast<sys::Memory::ProtectionFlags>(Prot) << " [ "
+ << (const void *)SegMem.data() << " .. "
+ << (const void *)((char *)SegMem.data() + SegMem.size())
+ << " ]\n Processing content sections:\n";
+ });
+
+ for (auto *B : SegLayout.ContentBlocks) {
+ LLVM_DEBUG(dbgs() << " " << *B << ":\n");
+
+ // Pad to alignment/alignment-offset.
+ BlockDataPtr = alignToBlock(BlockDataPtr, *B);
+
+ LLVM_DEBUG({
+ dbgs() << " Bumped block pointer to " << (const void *)BlockDataPtr
+ << " to meet block alignment " << B->getAlignment()
+ << " and alignment offset " << B->getAlignmentOffset() << "\n";
+ });
+
+ // Zero pad up to alignment.
+ LLVM_DEBUG({
+ if (LastBlockEnd != BlockDataPtr)
+ dbgs() << " Zero padding from " << (const void *)LastBlockEnd
+ << " to " << (const void *)BlockDataPtr << "\n";
+ });
+
+ while (LastBlockEnd != BlockDataPtr)
+ *LastBlockEnd++ = 0;
+
+ // Copy initial block content.
+ LLVM_DEBUG({
+ dbgs() << " Copying block " << *B << " content, "
+ << B->getContent().size() << " bytes, from "
+ << (const void *)B->getContent().data() << " to "
+ << (const void *)BlockDataPtr << "\n";
+ });
+ memcpy(BlockDataPtr, B->getContent().data(), B->getContent().size());
+
+ // Point the block's content to the fixed up buffer.
+ B->setContent(StringRef(BlockDataPtr, B->getContent().size()));
+
+ // Update block end pointer.
+ LastBlockEnd = BlockDataPtr + B->getContent().size();
+ BlockDataPtr = LastBlockEnd;
+ }
+
+ // Zero pad the rest of the segment.
+ LLVM_DEBUG({
+ dbgs() << " Zero padding end of segment from "
+ << (const void *)LastBlockEnd << " to "
+ << (const void *)((char *)SegMem.data() + SegMem.size()) << "\n";
+ });
+ while (LastBlockEnd != SegMem.data() + SegMem.size())
+ *LastBlockEnd++ = 0;
+ }
+}
+
+void JITLinkerBase::deallocateAndBailOut(Error Err) {
+ assert(Err && "Should not be bailing out on success value");
+ assert(Alloc && "can not call deallocateAndBailOut before allocation");
+ Ctx->notifyFailed(joinErrors(std::move(Err), Alloc->deallocate()));
+}
+
+void JITLinkerBase::dumpGraph(raw_ostream &OS) {
+ assert(G && "Graph is not set yet");
+ G->dump(dbgs(), [this](Edge::Kind K) { return getEdgeKindName(K); });
+}
+
+void prune(LinkGraph &G) {
+ std::vector<Symbol *> Worklist;
+ DenseSet<Block *> VisitedBlocks;
+
+ // Build the initial worklist from all symbols initially live.
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->isLive())
+ Worklist.push_back(Sym);
+
+ // Propagate live flags to all symbols reachable from the initial live set.
+ while (!Worklist.empty()) {
+ auto *Sym = Worklist.back();
+ Worklist.pop_back();
+
+ auto &B = Sym->getBlock();
+
+ // Skip addressables that we've visited before.
+ if (VisitedBlocks.count(&B))
+ continue;
+
+ VisitedBlocks.insert(&B);
+
+ for (auto &E : Sym->getBlock().edges()) {
+ // If the edge target is a defined symbol that is being newly marked live
+ // then add it to the worklist.
+ if (E.getTarget().isDefined() && !E.getTarget().isLive())
+ Worklist.push_back(&E.getTarget());
+
+ // Mark the target live.
+ E.getTarget().setLive(true);
+ }
+ }
+
+ // Collect all defined symbols to remove, then remove them.
+ {
+ LLVM_DEBUG(dbgs() << "Dead-stripping defined symbols:\n");
+ std::vector<Symbol *> SymbolsToRemove;
+ for (auto *Sym : G.defined_symbols())
+ if (!Sym->isLive())
+ SymbolsToRemove.push_back(Sym);
+ for (auto *Sym : SymbolsToRemove) {
+ LLVM_DEBUG(dbgs() << " " << *Sym << "...\n");
+ G.removeDefinedSymbol(*Sym);
+ }
+ }
+
+ // Delete any unused blocks.
+ {
+ LLVM_DEBUG(dbgs() << "Dead-stripping blocks:\n");
+ std::vector<Block *> BlocksToRemove;
+ for (auto *B : G.blocks())
+ if (!VisitedBlocks.count(B))
+ BlocksToRemove.push_back(B);
+ for (auto *B : BlocksToRemove) {
+ LLVM_DEBUG(dbgs() << " " << *B << "...\n");
+ G.removeBlock(*B);
+ }
+ }
+
+ // Collect all external symbols to remove, then remove them.
+ {
+ LLVM_DEBUG(dbgs() << "Removing unused external symbols:\n");
+ std::vector<Symbol *> SymbolsToRemove;
+ for (auto *Sym : G.external_symbols())
+ if (!Sym->isLive())
+ SymbolsToRemove.push_back(Sym);
+ for (auto *Sym : SymbolsToRemove) {
+ LLVM_DEBUG(dbgs() << " " << *Sym << "...\n");
+ G.removeExternalSymbol(*Sym);
+ }
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkGeneric.h b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkGeneric.h
new file mode 100644
index 00000000000..1d28f5006b2
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkGeneric.h
@@ -0,0 +1,180 @@
+//===------ JITLinkGeneric.h - Generic JIT linker utilities -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic JITLinker utilities. E.g. graph pruning, eh-frame parsing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
+#define LIB_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+
+class MemoryBufferRef;
+
+namespace jitlink {
+
+/// Base class for a JIT linker.
+///
+/// A JITLinkerBase instance links one object file into an ongoing JIT
+/// session. Symbol resolution and finalization operations are pluggable,
+/// and called using continuation passing (passing a continuation for the
+/// remaining linker work) to allow them to be performed asynchronously.
+class JITLinkerBase {
+public:
+ JITLinkerBase(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G, PassConfiguration Passes)
+ : Ctx(std::move(Ctx)), G(std::move(G)), Passes(std::move(Passes)) {
+ assert(this->Ctx && "Ctx can not be null");
+ assert(this->G && "G can not be null");
+ }
+
+ virtual ~JITLinkerBase();
+
+protected:
+ struct SegmentLayout {
+ using BlocksList = std::vector<Block *>;
+
+ BlocksList ContentBlocks;
+ BlocksList ZeroFillBlocks;
+ };
+
+ using SegmentLayoutMap = DenseMap<unsigned, SegmentLayout>;
+
+ // Phase 1:
+ // 1.1: Run pre-prune passes
+ // 1.2: Prune graph
+ // 1.3: Run post-prune passes
+ // 1.4: Sort blocks into segments
+ // 1.5: Allocate segment memory
+ // 1.6: Identify externals and make an async call to resolve function
+ void linkPhase1(std::unique_ptr<JITLinkerBase> Self);
+
+ // Phase 2:
+ // 2.1: Apply resolution results
+ // 2.2: Fix up block contents
+ // 2.3: Call OnResolved callback
+ // 2.3: Make an async call to transfer and finalize memory.
+ void linkPhase2(std::unique_ptr<JITLinkerBase> Self,
+ Expected<AsyncLookupResult> LookupResult,
+ SegmentLayoutMap Layout);
+
+ // Phase 3:
+ // 3.1: Call OnFinalized callback, handing off allocation.
+ void linkPhase3(std::unique_ptr<JITLinkerBase> Self, Error Err);
+
+ // For debug dumping of the link graph.
+ virtual StringRef getEdgeKindName(Edge::Kind K) const = 0;
+
+ // Align a JITTargetAddress to conform with block alignment requirements.
+ static JITTargetAddress alignToBlock(JITTargetAddress Addr, Block &B) {
+ uint64_t Delta = (B.getAlignmentOffset() - Addr) % B.getAlignment();
+ return Addr + Delta;
+ }
+
+ // Align a pointer to conform with block alignment requirements.
+ static char *alignToBlock(char *P, Block &B) {
+ uint64_t PAddr = static_cast<uint64_t>(reinterpret_cast<uintptr_t>(P));
+ uint64_t Delta = (B.getAlignmentOffset() - PAddr) % B.getAlignment();
+ return P + Delta;
+ }
+
+private:
+ // Run all passes in the given pass list, bailing out immediately if any pass
+ // returns an error.
+ Error runPasses(LinkGraphPassList &Passes);
+
+ // Copy block contents and apply relocations.
+ // Implemented in JITLinker.
+ virtual Error fixUpBlocks(LinkGraph &G) const = 0;
+
+ SegmentLayoutMap layOutBlocks();
+ Error allocateSegments(const SegmentLayoutMap &Layout);
+ JITLinkContext::LookupMap getExternalSymbolNames() const;
+ void applyLookupResult(AsyncLookupResult LR);
+ void copyBlockContentToWorkingMemory(const SegmentLayoutMap &Layout,
+ JITLinkMemoryManager::Allocation &Alloc);
+ void deallocateAndBailOut(Error Err);
+
+ void dumpGraph(raw_ostream &OS);
+
+ std::unique_ptr<JITLinkContext> Ctx;
+ std::unique_ptr<LinkGraph> G;
+ PassConfiguration Passes;
+ std::unique_ptr<JITLinkMemoryManager::Allocation> Alloc;
+};
+
+template <typename LinkerImpl> class JITLinker : public JITLinkerBase {
+public:
+ using JITLinkerBase::JITLinkerBase;
+
+ /// Link constructs a LinkerImpl instance and calls linkPhase1.
+ /// Link should be called with the constructor arguments for LinkerImpl, which
+ /// will be forwarded to the constructor.
+ template <typename... ArgTs> static void link(ArgTs &&... Args) {
+ auto L = std::make_unique<LinkerImpl>(std::forward<ArgTs>(Args)...);
+
+ // Ownership of the linker is passed into the linker's doLink function to
+ // allow it to be passed on to async continuations.
+ //
+ // FIXME: Remove LTmp once we have c++17.
+ // C++17 sequencing rules guarantee that function name expressions are
+ // sequenced before arguments, so L->linkPhase1(std::move(L), ...) will be
+ // well formed.
+ auto &LTmp = *L;
+ LTmp.linkPhase1(std::move(L));
+ }
+
+private:
+ const LinkerImpl &impl() const {
+ return static_cast<const LinkerImpl &>(*this);
+ }
+
+ Error fixUpBlocks(LinkGraph &G) const override {
+ LLVM_DEBUG(dbgs() << "Fixing up blocks:\n");
+
+ for (auto *B : G.blocks()) {
+ LLVM_DEBUG(dbgs() << " " << *B << ":\n");
+
+ // Copy Block data and apply fixups.
+ LLVM_DEBUG(dbgs() << " Applying fixups.\n");
+ for (auto &E : B->edges()) {
+
+ // Skip non-relocation edges.
+ if (!E.isRelocation())
+ continue;
+
+ // Dispatch to LinkerImpl for fixup.
+ auto *BlockData = const_cast<char *>(B->getContent().data());
+ if (auto Err = impl().applyFixup(*B, E, BlockData))
+ return Err;
+ }
+ }
+
+ return Error::success();
+ }
+};
+
+/// Removes dead symbols/blocks/addressables.
+///
+/// Finds the set of symbols and addressables reachable from any symbol
+/// initially marked live. All symbols/addressables not marked live at the end
+/// of this process are removed.
+void prune(LinkGraph &G);
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#undef DEBUG_TYPE // "jitlink"
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
new file mode 100644
index 00000000000..fbbb29e9164
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
@@ -0,0 +1,133 @@
+//===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
+#include "llvm/Support/Process.h"
+
+namespace llvm {
+namespace jitlink {
+
+JITLinkMemoryManager::~JITLinkMemoryManager() = default;
+JITLinkMemoryManager::Allocation::~Allocation() = default;
+
+Expected<std::unique_ptr<JITLinkMemoryManager::Allocation>>
+InProcessMemoryManager::allocate(const JITLinkDylib *JD,
+ const SegmentsRequestMap &Request) {
+
+ using AllocationMap = DenseMap<unsigned, sys::MemoryBlock>;
+
+ // Local class for allocation.
+ class IPMMAlloc : public Allocation {
+ public:
+ IPMMAlloc(AllocationMap SegBlocks) : SegBlocks(std::move(SegBlocks)) {}
+ MutableArrayRef<char> getWorkingMemory(ProtectionFlags Seg) override {
+ assert(SegBlocks.count(Seg) && "No allocation for segment");
+ return {static_cast<char *>(SegBlocks[Seg].base()),
+ SegBlocks[Seg].allocatedSize()};
+ }
+ JITTargetAddress getTargetMemory(ProtectionFlags Seg) override {
+ assert(SegBlocks.count(Seg) && "No allocation for segment");
+ return pointerToJITTargetAddress(SegBlocks[Seg].base());
+ }
+ void finalizeAsync(FinalizeContinuation OnFinalize) override {
+ OnFinalize(applyProtections());
+ }
+ Error deallocate() override {
+ if (SegBlocks.empty())
+ return Error::success();
+ void *SlabStart = SegBlocks.begin()->second.base();
+ char *SlabEnd = (char *)SlabStart;
+ for (auto &KV : SegBlocks) {
+ SlabStart = std::min(SlabStart, KV.second.base());
+ SlabEnd = std::max(SlabEnd, (char *)(KV.second.base()) +
+ KV.second.allocatedSize());
+ }
+ size_t SlabSize = SlabEnd - (char *)SlabStart;
+ assert((SlabSize % sys::Process::getPageSizeEstimate()) == 0 &&
+ "Slab size is not a multiple of page size");
+ sys::MemoryBlock Slab(SlabStart, SlabSize);
+ if (auto EC = sys::Memory::releaseMappedMemory(Slab))
+ return errorCodeToError(EC);
+ return Error::success();
+ }
+
+ private:
+ Error applyProtections() {
+ for (auto &KV : SegBlocks) {
+ auto &Prot = KV.first;
+ auto &Block = KV.second;
+ if (auto EC = sys::Memory::protectMappedMemory(Block, Prot))
+ return errorCodeToError(EC);
+ if (Prot & sys::Memory::MF_EXEC)
+ sys::Memory::InvalidateInstructionCache(Block.base(),
+ Block.allocatedSize());
+ }
+ return Error::success();
+ }
+
+ AllocationMap SegBlocks;
+ };
+
+ if (!isPowerOf2_64((uint64_t)sys::Process::getPageSizeEstimate()))
+ return make_error<StringError>("Page size is not a power of 2",
+ inconvertibleErrorCode());
+
+ AllocationMap Blocks;
+ const sys::Memory::ProtectionFlags ReadWrite =
+ static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_WRITE);
+
+ // Compute the total number of pages to allocate.
+ size_t TotalSize = 0;
+ for (auto &KV : Request) {
+ const auto &Seg = KV.second;
+
+ if (Seg.getAlignment() > sys::Process::getPageSizeEstimate())
+ return make_error<StringError>("Cannot request higher than page "
+ "alignment",
+ inconvertibleErrorCode());
+
+ TotalSize = alignTo(TotalSize, sys::Process::getPageSizeEstimate());
+ TotalSize += Seg.getContentSize();
+ TotalSize += Seg.getZeroFillSize();
+ }
+
+ // Allocate one slab to cover all the segments.
+ std::error_code EC;
+ auto SlabRemaining =
+ sys::Memory::allocateMappedMemory(TotalSize, nullptr, ReadWrite, EC);
+
+ if (EC)
+ return errorCodeToError(EC);
+
+ // Allocate segment memory from the slab.
+ for (auto &KV : Request) {
+
+ const auto &Seg = KV.second;
+
+ uint64_t SegmentSize = alignTo(Seg.getContentSize() + Seg.getZeroFillSize(),
+ sys::Process::getPageSizeEstimate());
+
+ sys::MemoryBlock SegMem(SlabRemaining.base(), SegmentSize);
+ SlabRemaining = sys::MemoryBlock((char *)SlabRemaining.base() + SegmentSize,
+ SegmentSize);
+
+ // Zero out the zero-fill memory.
+ memset(static_cast<char *>(SegMem.base()) + Seg.getContentSize(), 0,
+ Seg.getZeroFillSize());
+
+ // Record the block for this segment.
+ Blocks[KV.first] = std::move(SegMem);
+ }
+ return std::unique_ptr<InProcessMemoryManager::Allocation>(
+ new IPMMAlloc(std::move(Blocks)));
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO.cpp
new file mode 100644
index 00000000000..e9327df6da4
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO.cpp
@@ -0,0 +1,91 @@
+//===-------------- MachO.cpp - JIT linker function for MachO -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO jit-link function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO.h"
+
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
+#include "llvm/ExecutionEngine/JITLink/MachO_x86_64.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SwapByteOrder.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromMachOObject(MemoryBufferRef ObjectBuffer) {
+ StringRef Data = ObjectBuffer.getBuffer();
+ if (Data.size() < 4)
+ return make_error<JITLinkError>("Truncated MachO buffer \"" +
+ ObjectBuffer.getBufferIdentifier() + "\"");
+
+ uint32_t Magic;
+ memcpy(&Magic, Data.data(), sizeof(uint32_t));
+ LLVM_DEBUG({
+ dbgs() << "jitLink_MachO: magic = " << format("0x%08" PRIx32, Magic)
+ << ", identifier = \"" << ObjectBuffer.getBufferIdentifier()
+ << "\"\n";
+ });
+
+ if (Magic == MachO::MH_MAGIC || Magic == MachO::MH_CIGAM)
+ return make_error<JITLinkError>("MachO 32-bit platforms not supported");
+ else if (Magic == MachO::MH_MAGIC_64 || Magic == MachO::MH_CIGAM_64) {
+
+ if (Data.size() < sizeof(MachO::mach_header_64))
+ return make_error<JITLinkError>("Truncated MachO buffer \"" +
+ ObjectBuffer.getBufferIdentifier() +
+ "\"");
+
+ // Read the CPU type from the header.
+ uint32_t CPUType;
+ memcpy(&CPUType, Data.data() + 4, sizeof(uint32_t));
+ if (Magic == MachO::MH_CIGAM_64)
+ CPUType = ByteSwap_32(CPUType);
+
+ LLVM_DEBUG({
+ dbgs() << "jitLink_MachO: cputype = " << format("0x%08" PRIx32, CPUType)
+ << "\n";
+ });
+
+ switch (CPUType) {
+ case MachO::CPU_TYPE_ARM64:
+ return createLinkGraphFromMachOObject_arm64(std::move(ObjectBuffer));
+ case MachO::CPU_TYPE_X86_64:
+ return createLinkGraphFromMachOObject_x86_64(std::move(ObjectBuffer));
+ }
+ return make_error<JITLinkError>("MachO-64 CPU type not valid");
+ } else
+ return make_error<JITLinkError>("Unrecognized MachO magic value");
+}
+
+void link_MachO(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+
+ switch (G->getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ return link_MachO_arm64(std::move(G), std::move(Ctx));
+ case Triple::x86_64:
+ return link_MachO_x86_64(std::move(G), std::move(Ctx));
+ default:
+ Ctx->notifyFailed(make_error<JITLinkError>("MachO-64 CPU type not valid"));
+ return;
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp
new file mode 100644
index 00000000000..4602154eb57
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp
@@ -0,0 +1,583 @@
+//=--------- MachOLinkGraphBuilder.cpp - MachO LinkGraph builder ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic MachO LinkGraph buliding code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MachOLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+static const char *CommonSectionName = "__common";
+
+namespace llvm {
+namespace jitlink {
+
+MachOLinkGraphBuilder::~MachOLinkGraphBuilder() {}
+
+Expected<std::unique_ptr<LinkGraph>> MachOLinkGraphBuilder::buildGraph() {
+
+ // Sanity check: we only operate on relocatable objects.
+ if (!Obj.isRelocatableObject())
+ return make_error<JITLinkError>("Object is not a relocatable MachO");
+
+ if (auto Err = createNormalizedSections())
+ return std::move(Err);
+
+ if (auto Err = createNormalizedSymbols())
+ return std::move(Err);
+
+ if (auto Err = graphifyRegularSymbols())
+ return std::move(Err);
+
+ if (auto Err = graphifySectionsWithCustomParsers())
+ return std::move(Err);
+
+ if (auto Err = addRelocations())
+ return std::move(Err);
+
+ return std::move(G);
+}
+
+MachOLinkGraphBuilder::MachOLinkGraphBuilder(const object::MachOObjectFile &Obj,
+ Triple TT)
+ : Obj(Obj),
+ G(std::make_unique<LinkGraph>(std::string(Obj.getFileName()),
+ std::move(TT), getPointerSize(Obj),
+ getEndianness(Obj))) {}
+
+void MachOLinkGraphBuilder::addCustomSectionParser(
+ StringRef SectionName, SectionParserFunction Parser) {
+ assert(!CustomSectionParserFunctions.count(SectionName) &&
+ "Custom parser for this section already exists");
+ CustomSectionParserFunctions[SectionName] = std::move(Parser);
+}
+
+Linkage MachOLinkGraphBuilder::getLinkage(uint16_t Desc) {
+ if ((Desc & MachO::N_WEAK_DEF) || (Desc & MachO::N_WEAK_REF))
+ return Linkage::Weak;
+ return Linkage::Strong;
+}
+
+Scope MachOLinkGraphBuilder::getScope(StringRef Name, uint8_t Type) {
+ if (Type & MachO::N_EXT) {
+ if ((Type & MachO::N_PEXT) || Name.startswith("l"))
+ return Scope::Hidden;
+ else
+ return Scope::Default;
+ }
+ return Scope::Local;
+}
+
+bool MachOLinkGraphBuilder::isAltEntry(const NormalizedSymbol &NSym) {
+ return NSym.Desc & MachO::N_ALT_ENTRY;
+}
+
+bool MachOLinkGraphBuilder::isDebugSection(const NormalizedSection &NSec) {
+ return (NSec.Flags & MachO::S_ATTR_DEBUG &&
+ strcmp(NSec.SegName, "__DWARF") == 0);
+}
+
+unsigned
+MachOLinkGraphBuilder::getPointerSize(const object::MachOObjectFile &Obj) {
+ return Obj.is64Bit() ? 8 : 4;
+}
+
+support::endianness
+MachOLinkGraphBuilder::getEndianness(const object::MachOObjectFile &Obj) {
+ return Obj.isLittleEndian() ? support::little : support::big;
+}
+
+Section &MachOLinkGraphBuilder::getCommonSection() {
+ if (!CommonSection) {
+ auto Prot = static_cast<sys::Memory::ProtectionFlags>(
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE);
+ CommonSection = &G->createSection(CommonSectionName, Prot);
+ }
+ return *CommonSection;
+}
+
+Error MachOLinkGraphBuilder::createNormalizedSections() {
+ // Build normalized sections. Verifies that section data is in-range (for
+ // sections with content) and that address ranges are non-overlapping.
+
+ LLVM_DEBUG(dbgs() << "Creating normalized sections...\n");
+
+ for (auto &SecRef : Obj.sections()) {
+ NormalizedSection NSec;
+ uint32_t DataOffset = 0;
+
+ auto SecIndex = Obj.getSectionIndex(SecRef.getRawDataRefImpl());
+
+ auto Name = SecRef.getName();
+ if (!Name)
+ return Name.takeError();
+
+ if (Obj.is64Bit()) {
+ const MachO::section_64 &Sec64 =
+ Obj.getSection64(SecRef.getRawDataRefImpl());
+
+ memcpy(&NSec.SectName, &Sec64.sectname, 16);
+ NSec.SectName[16] = '\0';
+ memcpy(&NSec.SegName, Sec64.segname, 16);
+ NSec.SegName[16] = '\0';
+
+ NSec.Address = Sec64.addr;
+ NSec.Size = Sec64.size;
+ NSec.Alignment = 1ULL << Sec64.align;
+ NSec.Flags = Sec64.flags;
+ DataOffset = Sec64.offset;
+ } else {
+ const MachO::section &Sec32 = Obj.getSection(SecRef.getRawDataRefImpl());
+
+ memcpy(&NSec.SectName, &Sec32.sectname, 16);
+ NSec.SectName[16] = '\0';
+ memcpy(&NSec.SegName, Sec32.segname, 16);
+ NSec.SegName[16] = '\0';
+
+ NSec.Address = Sec32.addr;
+ NSec.Size = Sec32.size;
+ NSec.Alignment = 1ULL << Sec32.align;
+ NSec.Flags = Sec32.flags;
+ DataOffset = Sec32.offset;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " " << *Name << ": " << formatv("{0:x16}", NSec.Address)
+ << " -- " << formatv("{0:x16}", NSec.Address + NSec.Size)
+ << ", align: " << NSec.Alignment << ", index: " << SecIndex
+ << "\n";
+ });
+
+ // Get the section data if any.
+ {
+ unsigned SectionType = NSec.Flags & MachO::SECTION_TYPE;
+ if (SectionType != MachO::S_ZEROFILL &&
+ SectionType != MachO::S_GB_ZEROFILL) {
+
+ if (DataOffset + NSec.Size > Obj.getData().size())
+ return make_error<JITLinkError>(
+ "Section data extends past end of file");
+
+ NSec.Data = Obj.getData().data() + DataOffset;
+ }
+ }
+
+ // Get prot flags.
+ // FIXME: Make sure this test is correct (it's probably missing cases
+ // as-is).
+ sys::Memory::ProtectionFlags Prot;
+ if (NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS)
+ Prot = static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_EXEC);
+ else
+ Prot = static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_WRITE);
+
+ if (!isDebugSection(NSec))
+ NSec.GraphSection = &G->createSection(*Name, Prot);
+ else
+ LLVM_DEBUG({
+ dbgs() << " " << *Name
+ << " is a debug section: No graph section will be created.\n";
+ });
+
+ IndexToSection.insert(std::make_pair(SecIndex, std::move(NSec)));
+ }
+
+ std::vector<NormalizedSection *> Sections;
+ Sections.reserve(IndexToSection.size());
+ for (auto &KV : IndexToSection)
+ Sections.push_back(&KV.second);
+
+ // If we didn't end up creating any sections then bail out. The code below
+ // assumes that we have at least one section.
+ if (Sections.empty())
+ return Error::success();
+
+ llvm::sort(Sections,
+ [](const NormalizedSection *LHS, const NormalizedSection *RHS) {
+ assert(LHS && RHS && "Null section?");
+ if (LHS->Address != RHS->Address)
+ return LHS->Address < RHS->Address;
+ return LHS->Size < RHS->Size;
+ });
+
+ for (unsigned I = 0, E = Sections.size() - 1; I != E; ++I) {
+ auto &Cur = *Sections[I];
+ auto &Next = *Sections[I + 1];
+ if (Next.Address < Cur.Address + Cur.Size)
+ return make_error<JITLinkError>(
+ "Address range for section " +
+ formatv("\"{0}/{1}\" [ {2:x16} -- {3:x16} ] ", Cur.SegName,
+ Cur.SectName, Cur.Address, Cur.Address + Cur.Size) +
+ "overlaps section \"" + Next.SegName + "/" + Next.SectName + "\"" +
+ formatv("\"{0}/{1}\" [ {2:x16} -- {3:x16} ] ", Next.SegName,
+ Next.SectName, Next.Address, Next.Address + Next.Size));
+ }
+
+ return Error::success();
+}
+
+Error MachOLinkGraphBuilder::createNormalizedSymbols() {
+ LLVM_DEBUG(dbgs() << "Creating normalized symbols...\n");
+
+ for (auto &SymRef : Obj.symbols()) {
+
+ unsigned SymbolIndex = Obj.getSymbolIndex(SymRef.getRawDataRefImpl());
+ uint64_t Value;
+ uint32_t NStrX;
+ uint8_t Type;
+ uint8_t Sect;
+ uint16_t Desc;
+
+ if (Obj.is64Bit()) {
+ const MachO::nlist_64 &NL64 =
+ Obj.getSymbol64TableEntry(SymRef.getRawDataRefImpl());
+ Value = NL64.n_value;
+ NStrX = NL64.n_strx;
+ Type = NL64.n_type;
+ Sect = NL64.n_sect;
+ Desc = NL64.n_desc;
+ } else {
+ const MachO::nlist &NL32 =
+ Obj.getSymbolTableEntry(SymRef.getRawDataRefImpl());
+ Value = NL32.n_value;
+ NStrX = NL32.n_strx;
+ Type = NL32.n_type;
+ Sect = NL32.n_sect;
+ Desc = NL32.n_desc;
+ }
+
+ // Skip stabs.
+ // FIXME: Are there other symbols we should be skipping?
+ if (Type & MachO::N_STAB)
+ continue;
+
+ Optional<StringRef> Name;
+ if (NStrX) {
+ if (auto NameOrErr = SymRef.getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " ";
+ if (!Name)
+ dbgs() << "<anonymous symbol>";
+ else
+ dbgs() << *Name;
+ dbgs() << ": value = " << formatv("{0:x16}", Value)
+ << ", type = " << formatv("{0:x2}", Type)
+ << ", desc = " << formatv("{0:x4}", Desc) << ", sect = ";
+ if (Sect)
+ dbgs() << static_cast<unsigned>(Sect - 1);
+ else
+ dbgs() << "none";
+ dbgs() << "\n";
+ });
+
+ // If this symbol has a section, sanity check that the addresses line up.
+ if (Sect != 0) {
+ auto NSec = findSectionByIndex(Sect - 1);
+ if (!NSec)
+ return NSec.takeError();
+
+ if (Value < NSec->Address || Value > NSec->Address + NSec->Size)
+ return make_error<JITLinkError>("Symbol address does not fall within "
+ "section");
+
+ if (!NSec->GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping: Symbol is in section " << NSec->SegName << "/"
+ << NSec->SectName
+ << " which has no associated graph section.\n";
+ });
+ continue;
+ }
+ }
+
+ IndexToSymbol[SymbolIndex] =
+ &createNormalizedSymbol(*Name, Value, Type, Sect, Desc,
+ getLinkage(Desc), getScope(*Name, Type));
+ }
+
+ return Error::success();
+}
+
+void MachOLinkGraphBuilder::addSectionStartSymAndBlock(
+ Section &GraphSec, uint64_t Address, const char *Data, uint64_t Size,
+ uint32_t Alignment, bool IsLive) {
+ Block &B =
+ Data ? G->createContentBlock(GraphSec, StringRef(Data, Size), Address,
+ Alignment, 0)
+ : G->createZeroFillBlock(GraphSec, Size, Address, Alignment, 0);
+ auto &Sym = G->addAnonymousSymbol(B, 0, Size, false, IsLive);
+ assert(!AddrToCanonicalSymbol.count(Sym.getAddress()) &&
+ "Anonymous block start symbol clashes with existing symbol address");
+ AddrToCanonicalSymbol[Sym.getAddress()] = &Sym;
+}
+
+Error MachOLinkGraphBuilder::graphifyRegularSymbols() {
+
+ LLVM_DEBUG(dbgs() << "Creating graph symbols...\n");
+
+ /// We only have 256 section indexes: Use a vector rather than a map.
+ std::vector<std::vector<NormalizedSymbol *>> SecIndexToSymbols;
+ SecIndexToSymbols.resize(256);
+
+ // Create commons, externs, and absolutes, and partition all other symbols by
+ // section.
+ for (auto &KV : IndexToSymbol) {
+ auto &NSym = *KV.second;
+
+ switch (NSym.Type & MachO::N_TYPE) {
+ case MachO::N_UNDF:
+ if (NSym.Value) {
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous common symbol at index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addCommonSymbol(
+ *NSym.Name, NSym.S, getCommonSection(), 0, NSym.Value,
+ 1ull << MachO::GET_COMM_ALIGN(NSym.Desc),
+ NSym.Desc & MachO::N_NO_DEAD_STRIP);
+ } else {
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous external symbol at "
+ "index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addExternalSymbol(
+ *NSym.Name, 0,
+ NSym.Desc & MachO::N_WEAK_REF ? Linkage::Weak : Linkage::Strong);
+ }
+ break;
+ case MachO::N_ABS:
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous absolute symbol at index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addAbsoluteSymbol(
+ *NSym.Name, NSym.Value, 0, Linkage::Strong, Scope::Default,
+ NSym.Desc & MachO::N_NO_DEAD_STRIP);
+ break;
+ case MachO::N_SECT:
+ SecIndexToSymbols[NSym.Sect - 1].push_back(&NSym);
+ break;
+ case MachO::N_PBUD:
+ return make_error<JITLinkError>(
+ "Unupported N_PBUD symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ case MachO::N_INDR:
+ return make_error<JITLinkError>(
+ "Unupported N_INDR symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ default:
+ return make_error<JITLinkError>(
+ "Unrecognized symbol type " + Twine(NSym.Type & MachO::N_TYPE) +
+ " for symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ }
+ }
+
+ // Loop over sections performing regular graphification for those that
+ // don't have custom parsers.
+ for (auto &KV : IndexToSection) {
+ auto SecIndex = KV.first;
+ auto &NSec = KV.second;
+
+ if (!NSec.GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " " << NSec.SegName << "/" << NSec.SectName
+ << " has no graph section. Skipping.\n";
+ });
+ continue;
+ }
+
+ // Skip sections with custom parsers.
+ if (CustomSectionParserFunctions.count(NSec.GraphSection->getName())) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping section " << NSec.GraphSection->getName()
+ << " as it has a custom parser.\n";
+ });
+ continue;
+ } else
+ LLVM_DEBUG({
+ dbgs() << " Processing section " << NSec.GraphSection->getName()
+ << "...\n";
+ });
+
+ bool SectionIsNoDeadStrip = NSec.Flags & MachO::S_ATTR_NO_DEAD_STRIP;
+ bool SectionIsText = NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS;
+
+ auto &SecNSymStack = SecIndexToSymbols[SecIndex];
+
+ // If this section is non-empty but there are no symbols covering it then
+ // create one block and anonymous symbol to cover the entire section.
+ if (SecNSymStack.empty()) {
+ if (NSec.Size > 0) {
+ LLVM_DEBUG({
+ dbgs() << " Section non-empty, but contains no symbols. "
+ "Creating anonymous block to cover "
+ << formatv("{0:x16}", NSec.Address) << " -- "
+ << formatv("{0:x16}", NSec.Address + NSec.Size) << "\n";
+ });
+ addSectionStartSymAndBlock(*NSec.GraphSection, NSec.Address, NSec.Data,
+ NSec.Size, NSec.Alignment,
+ SectionIsNoDeadStrip);
+ } else
+ LLVM_DEBUG({
+ dbgs() << " Section empty and contains no symbols. Skipping.\n";
+ });
+ continue;
+ }
+
+ // Sort the symbol stack in by address, alt-entry status, scope, and name.
+ // We sort in reverse order so that symbols will be visited in the right
+ // order when we pop off the stack below.
+ llvm::sort(SecNSymStack, [](const NormalizedSymbol *LHS,
+ const NormalizedSymbol *RHS) {
+ if (LHS->Value != RHS->Value)
+ return LHS->Value > RHS->Value;
+ if (isAltEntry(*LHS) != isAltEntry(*RHS))
+ return isAltEntry(*RHS);
+ if (LHS->S != RHS->S)
+ return static_cast<uint8_t>(LHS->S) < static_cast<uint8_t>(RHS->S);
+ return LHS->Name < RHS->Name;
+ });
+
+ // The first symbol in a section can not be an alt-entry symbol.
+ if (!SecNSymStack.empty() && isAltEntry(*SecNSymStack.back()))
+ return make_error<JITLinkError>(
+ "First symbol in " + NSec.GraphSection->getName() + " is alt-entry");
+
+ // If the section is non-empty but there is no symbol covering the start
+ // address then add an anonymous one.
+ if (SecNSymStack.back()->Value != NSec.Address) {
+ auto AnonBlockSize = SecNSymStack.back()->Value - NSec.Address;
+ LLVM_DEBUG({
+ dbgs() << " Section start not covered by symbol. "
+ << "Creating anonymous block to cover [ "
+ << formatv("{0:x16}", NSec.Address) << " -- "
+ << formatv("{0:x16}", NSec.Address + AnonBlockSize) << " ]\n";
+ });
+ addSectionStartSymAndBlock(*NSec.GraphSection, NSec.Address, NSec.Data,
+ AnonBlockSize, NSec.Alignment,
+ SectionIsNoDeadStrip);
+ }
+
+ // Visit section symbols in order by popping off the reverse-sorted stack,
+ // building blocks for each alt-entry chain and creating symbols as we go.
+ while (!SecNSymStack.empty()) {
+ SmallVector<NormalizedSymbol *, 8> BlockSyms;
+
+ BlockSyms.push_back(SecNSymStack.back());
+ SecNSymStack.pop_back();
+ while (!SecNSymStack.empty() &&
+ (isAltEntry(*SecNSymStack.back()) ||
+ SecNSymStack.back()->Value == BlockSyms.back()->Value)) {
+ BlockSyms.push_back(SecNSymStack.back());
+ SecNSymStack.pop_back();
+ }
+
+ // BlockNSyms now contains the block symbols in reverse canonical order.
+ JITTargetAddress BlockStart = BlockSyms.front()->Value;
+ JITTargetAddress BlockEnd = SecNSymStack.empty()
+ ? NSec.Address + NSec.Size
+ : SecNSymStack.back()->Value;
+ JITTargetAddress BlockOffset = BlockStart - NSec.Address;
+ JITTargetAddress BlockSize = BlockEnd - BlockStart;
+
+ LLVM_DEBUG({
+ dbgs() << " Creating block for " << formatv("{0:x16}", BlockStart)
+ << " -- " << formatv("{0:x16}", BlockEnd) << ": "
+ << NSec.GraphSection->getName() << " + "
+ << formatv("{0:x16}", BlockOffset) << " with "
+ << BlockSyms.size() << " symbol(s)...\n";
+ });
+
+ Block &B =
+ NSec.Data
+ ? G->createContentBlock(
+ *NSec.GraphSection,
+ StringRef(NSec.Data + BlockOffset, BlockSize), BlockStart,
+ NSec.Alignment, BlockStart % NSec.Alignment)
+ : G->createZeroFillBlock(*NSec.GraphSection, BlockSize,
+ BlockStart, NSec.Alignment,
+ BlockStart % NSec.Alignment);
+
+ Optional<JITTargetAddress> LastCanonicalAddr;
+ JITTargetAddress SymEnd = BlockEnd;
+ while (!BlockSyms.empty()) {
+ auto &NSym = *BlockSyms.back();
+ BlockSyms.pop_back();
+
+ bool SymLive =
+ (NSym.Desc & MachO::N_NO_DEAD_STRIP) || SectionIsNoDeadStrip;
+
+ LLVM_DEBUG({
+ dbgs() << " " << formatv("{0:x16}", NSym.Value) << " -- "
+ << formatv("{0:x16}", SymEnd) << ": ";
+ if (!NSym.Name)
+ dbgs() << "<anonymous symbol>";
+ else
+ dbgs() << NSym.Name;
+ if (SymLive)
+ dbgs() << " [no-dead-strip]";
+ if (LastCanonicalAddr == NSym.Value)
+ dbgs() << " [non-canonical]";
+ dbgs() << "\n";
+ });
+
+ auto &Sym =
+ NSym.Name
+ ? G->addDefinedSymbol(B, NSym.Value - BlockStart, *NSym.Name,
+ SymEnd - NSym.Value, NSym.L, NSym.S,
+ SectionIsText, SymLive)
+ : G->addAnonymousSymbol(B, NSym.Value - BlockStart,
+ SymEnd - NSym.Value, SectionIsText,
+ SymLive);
+ NSym.GraphSymbol = &Sym;
+ if (LastCanonicalAddr != Sym.getAddress()) {
+ if (LastCanonicalAddr)
+ SymEnd = *LastCanonicalAddr;
+ LastCanonicalAddr = Sym.getAddress();
+ setCanonicalSymbol(Sym);
+ }
+ }
+ }
+ }
+
+ return Error::success();
+}
+
+Error MachOLinkGraphBuilder::graphifySectionsWithCustomParsers() {
+ // Graphify special sections.
+ for (auto &KV : IndexToSection) {
+ auto &NSec = KV.second;
+
+ // Skip non-graph sections.
+ if (!NSec.GraphSection)
+ continue;
+
+ auto HI = CustomSectionParserFunctions.find(NSec.GraphSection->getName());
+ if (HI != CustomSectionParserFunctions.end()) {
+ auto &Parse = HI->second;
+ if (auto Err = Parse(NSec))
+ return Err;
+ }
+ }
+
+ return Error::success();
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h
new file mode 100644
index 00000000000..26e6859de91
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h
@@ -0,0 +1,223 @@
+//===----- MachOLinkGraphBuilder.h - MachO LinkGraph builder ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic MachO LinkGraph building code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
+#define LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Object/MachO.h"
+
+#include "EHFrameSupportImpl.h"
+#include "JITLinkGeneric.h"
+
+#include <list>
+
+namespace llvm {
+namespace jitlink {
+
+class MachOLinkGraphBuilder {
+public:
+ virtual ~MachOLinkGraphBuilder();
+ Expected<std::unique_ptr<LinkGraph>> buildGraph();
+
+protected:
+
+ struct NormalizedSymbol {
+ friend class MachOLinkGraphBuilder;
+
+ private:
+ NormalizedSymbol(Optional<StringRef> Name, uint64_t Value, uint8_t Type,
+ uint8_t Sect, uint16_t Desc, Linkage L, Scope S)
+ : Name(Name), Value(Value), Type(Type), Sect(Sect), Desc(Desc), L(L),
+ S(S) {
+ assert((!Name || !Name->empty()) && "Name must be none or non-empty");
+ }
+
+ public:
+ NormalizedSymbol(const NormalizedSymbol &) = delete;
+ NormalizedSymbol &operator=(const NormalizedSymbol &) = delete;
+ NormalizedSymbol(NormalizedSymbol &&) = delete;
+ NormalizedSymbol &operator=(NormalizedSymbol &&) = delete;
+
+ Optional<StringRef> Name;
+ uint64_t Value = 0;
+ uint8_t Type = 0;
+ uint8_t Sect = 0;
+ uint16_t Desc = 0;
+ Linkage L = Linkage::Strong;
+ Scope S = Scope::Default;
+ Symbol *GraphSymbol = nullptr;
+ };
+
+ // Normalized section representation. Section and segment names are guaranteed
+ // to be null-terminated, hence the extra bytes on SegName and SectName.
+ class NormalizedSection {
+ friend class MachOLinkGraphBuilder;
+
+ private:
+ NormalizedSection() = default;
+
+ public:
+ char SectName[17];
+ char SegName[17];
+ uint64_t Address = 0;
+ uint64_t Size = 0;
+ uint64_t Alignment = 0;
+ uint32_t Flags = 0;
+ const char *Data = nullptr;
+ Section *GraphSection = nullptr;
+ };
+
+ using SectionParserFunction = std::function<Error(NormalizedSection &S)>;
+
+ MachOLinkGraphBuilder(const object::MachOObjectFile &Obj, Triple TT);
+
+ LinkGraph &getGraph() const { return *G; }
+
+ const object::MachOObjectFile &getObject() const { return Obj; }
+
+ void addCustomSectionParser(StringRef SectionName,
+ SectionParserFunction Parse);
+
+ virtual Error addRelocations() = 0;
+
+ /// Create a symbol.
+ template <typename... ArgTs>
+ NormalizedSymbol &createNormalizedSymbol(ArgTs &&... Args) {
+ NormalizedSymbol *Sym = reinterpret_cast<NormalizedSymbol *>(
+ Allocator.Allocate<NormalizedSymbol>());
+ new (Sym) NormalizedSymbol(std::forward<ArgTs>(Args)...);
+ return *Sym;
+ }
+
+ /// Index is zero-based (MachO section indexes are usually one-based) and
+ /// assumed to be in-range. Client is responsible for checking.
+ NormalizedSection &getSectionByIndex(unsigned Index) {
+ auto I = IndexToSection.find(Index);
+ assert(I != IndexToSection.end() && "No section recorded at index");
+ return I->second;
+ }
+
+ /// Try to get the section at the given index. Will return an error if the
+ /// given index is out of range, or if no section has been added for the given
+ /// index.
+ Expected<NormalizedSection &> findSectionByIndex(unsigned Index) {
+ auto I = IndexToSection.find(Index);
+ if (I == IndexToSection.end())
+ return make_error<JITLinkError>("No section recorded for index " +
+ formatv("{0:d}", Index));
+ return I->second;
+ }
+
+ /// Try to get the symbol at the given index. Will return an error if the
+ /// given index is out of range, or if no symbol has been added for the given
+ /// index.
+ Expected<NormalizedSymbol &> findSymbolByIndex(uint64_t Index) {
+ if (Index >= IndexToSymbol.size())
+ return make_error<JITLinkError>("Symbol index out of range");
+ auto *Sym = IndexToSymbol[Index];
+ if (!Sym)
+ return make_error<JITLinkError>("No symbol at index " +
+ formatv("{0:d}", Index));
+ return *Sym;
+ }
+
+ /// Returns the symbol with the highest address not greater than the search
+ /// address, or null if no such symbol exists.
+ Symbol *getSymbolByAddress(JITTargetAddress Address) {
+ auto I = AddrToCanonicalSymbol.upper_bound(Address);
+ if (I == AddrToCanonicalSymbol.begin())
+ return nullptr;
+ return std::prev(I)->second;
+ }
+
+ /// Returns the symbol with the highest address not greater than the search
+ /// address, or an error if no such symbol exists.
+ Expected<Symbol &> findSymbolByAddress(JITTargetAddress Address) {
+ auto *Sym = getSymbolByAddress(Address);
+ if (Sym)
+ if (Address < Sym->getAddress() + Sym->getSize())
+ return *Sym;
+ return make_error<JITLinkError>("No symbol covering address " +
+ formatv("{0:x16}", Address));
+ }
+
+ static Linkage getLinkage(uint16_t Desc);
+ static Scope getScope(StringRef Name, uint8_t Type);
+ static bool isAltEntry(const NormalizedSymbol &NSym);
+
+ static bool isDebugSection(const NormalizedSection &NSec);
+
+ MachO::relocation_info
+ getRelocationInfo(const object::relocation_iterator RelItr) {
+ MachO::any_relocation_info ARI =
+ getObject().getRelocation(RelItr->getRawDataRefImpl());
+ MachO::relocation_info RI;
+ RI.r_address = ARI.r_word0;
+ RI.r_symbolnum = ARI.r_word1 & 0xffffff;
+ RI.r_pcrel = (ARI.r_word1 >> 24) & 1;
+ RI.r_length = (ARI.r_word1 >> 25) & 3;
+ RI.r_extern = (ARI.r_word1 >> 27) & 1;
+ RI.r_type = (ARI.r_word1 >> 28);
+ return RI;
+ }
+
+private:
+ static unsigned getPointerSize(const object::MachOObjectFile &Obj);
+ static support::endianness getEndianness(const object::MachOObjectFile &Obj);
+
+ void setCanonicalSymbol(Symbol &Sym) {
+ auto *&CanonicalSymEntry = AddrToCanonicalSymbol[Sym.getAddress()];
+ // There should be no symbol at this address, or, if there is,
+ // it should be a zero-sized symbol from an empty section (which
+ // we can safely override).
+ assert((!CanonicalSymEntry || CanonicalSymEntry->getSize() == 0) &&
+ "Duplicate canonical symbol at address");
+ CanonicalSymEntry = &Sym;
+ }
+
+ Section &getCommonSection();
+ void addSectionStartSymAndBlock(Section &GraphSec, uint64_t Address,
+ const char *Data, uint64_t Size,
+ uint32_t Alignment, bool IsLive);
+
+ Error createNormalizedSections();
+ Error createNormalizedSymbols();
+
+ /// Create graph blocks and symbols for externals, absolutes, commons and
+ /// all defined symbols in sections without custom parsers.
+ Error graphifyRegularSymbols();
+
+ /// Create graph blocks and symbols for all sections.
+ Error graphifySectionsWithCustomParsers();
+
+ // Put the BumpPtrAllocator first so that we don't free any of the underlying
+ // memory until the Symbol/Addressable destructors have been run.
+ BumpPtrAllocator Allocator;
+
+ const object::MachOObjectFile &Obj;
+ std::unique_ptr<LinkGraph> G;
+
+ DenseMap<unsigned, NormalizedSection> IndexToSection;
+ Section *CommonSection = nullptr;
+
+ DenseMap<uint32_t, NormalizedSymbol *> IndexToSymbol;
+ std::map<JITTargetAddress, Symbol *> AddrToCanonicalSymbol;
+ StringMap<SectionParserFunction> CustomSectionParserFunctions;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO_arm64.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
new file mode 100644
index 00000000000..8366e965853
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
@@ -0,0 +1,747 @@
+//===---- MachO_arm64.cpp - JIT linker implementation for MachO/arm64 -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO/arm64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
+
+#include "BasicGOTAndStubsBuilder.h"
+#include "MachOLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::jitlink::MachO_arm64_Edges;
+
+namespace {
+
+class MachOLinkGraphBuilder_arm64 : public MachOLinkGraphBuilder {
+public:
+ MachOLinkGraphBuilder_arm64(const object::MachOObjectFile &Obj)
+ : MachOLinkGraphBuilder(Obj, Triple("arm64-apple-darwin")),
+ NumSymbols(Obj.getSymtabLoadCommand().nsyms) {}
+
+private:
+ static Expected<MachOARM64RelocationKind>
+ getRelocationKind(const MachO::relocation_info &RI) {
+ switch (RI.r_type) {
+ case MachO::ARM64_RELOC_UNSIGNED:
+ if (!RI.r_pcrel) {
+ if (RI.r_length == 3)
+ return RI.r_extern ? Pointer64 : Pointer64Anon;
+ else if (RI.r_length == 2)
+ return Pointer32;
+ }
+ break;
+ case MachO::ARM64_RELOC_SUBTRACTOR:
+ // SUBTRACTOR must be non-pc-rel, extern, with length 2 or 3.
+ // Initially represent SUBTRACTOR relocations with 'Delta<W>'.
+ // They may be turned into NegDelta<W> by parsePairRelocation.
+ if (!RI.r_pcrel && RI.r_extern) {
+ if (RI.r_length == 2)
+ return Delta32;
+ else if (RI.r_length == 3)
+ return Delta64;
+ }
+ break;
+ case MachO::ARM64_RELOC_BRANCH26:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return Branch26;
+ break;
+ case MachO::ARM64_RELOC_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return Page21;
+ break;
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PageOffset12;
+ break;
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return GOTPage21;
+ break;
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return GOTPageOffset12;
+ break;
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PointerToGOT;
+ break;
+ case MachO::ARM64_RELOC_ADDEND:
+ if (!RI.r_pcrel && !RI.r_extern && RI.r_length == 2)
+ return PairedAddend;
+ break;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported arm64 relocation: address=" +
+ formatv("{0:x8}", RI.r_address) +
+ ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
+ ", kind=" + formatv("{0:x1}", RI.r_type) +
+ ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
+ ", extern=" + (RI.r_extern ? "true" : "false") +
+ ", length=" + formatv("{0:d}", RI.r_length));
+ }
+
+ using PairRelocInfo =
+ std::tuple<MachOARM64RelocationKind, Symbol *, uint64_t>;
+
+ // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
+ // returns the edge kind and addend to be used.
+ Expected<PairRelocInfo>
+ parsePairRelocation(Block &BlockToFix, Edge::Kind SubtractorKind,
+ const MachO::relocation_info &SubRI,
+ JITTargetAddress FixupAddress, const char *FixupContent,
+ object::relocation_iterator &UnsignedRelItr,
+ object::relocation_iterator &RelEnd) {
+ using namespace support;
+
+ assert(((SubtractorKind == Delta32 && SubRI.r_length == 2) ||
+ (SubtractorKind == Delta64 && SubRI.r_length == 3)) &&
+ "Subtractor kind should match length");
+ assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
+ assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
+
+ if (UnsignedRelItr == RelEnd)
+ return make_error<JITLinkError>("arm64 SUBTRACTOR without paired "
+ "UNSIGNED relocation");
+
+ auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
+
+ if (SubRI.r_address != UnsignedRI.r_address)
+ return make_error<JITLinkError>("arm64 SUBTRACTOR and paired UNSIGNED "
+ "point to different addresses");
+
+ if (SubRI.r_length != UnsignedRI.r_length)
+ return make_error<JITLinkError>("length of arm64 SUBTRACTOR and paired "
+ "UNSIGNED reloc must match");
+
+ Symbol *FromSymbol;
+ if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
+ FromSymbol = FromSymbolOrErr->GraphSymbol;
+ else
+ return FromSymbolOrErr.takeError();
+
+ // Read the current fixup value.
+ uint64_t FixupValue = 0;
+ if (SubRI.r_length == 3)
+ FixupValue = *(const little64_t *)FixupContent;
+ else
+ FixupValue = *(const little32_t *)FixupContent;
+
+ // Find 'ToSymbol' using symbol number or address, depending on whether the
+ // paired UNSIGNED relocation is extern.
+ Symbol *ToSymbol = nullptr;
+ if (UnsignedRI.r_extern) {
+ // Find target symbol by symbol index.
+ if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
+ ToSymbol = ToSymbolOrErr->GraphSymbol;
+ else
+ return ToSymbolOrErr.takeError();
+ } else {
+ auto ToSymbolSec = findSectionByIndex(UnsignedRI.r_symbolnum - 1);
+ if (!ToSymbolSec)
+ return ToSymbolSec.takeError();
+ ToSymbol = getSymbolByAddress(ToSymbolSec->Address);
+ assert(ToSymbol && "No symbol for section");
+ FixupValue -= ToSymbol->getAddress();
+ }
+
+ MachOARM64RelocationKind DeltaKind;
+ Symbol *TargetSymbol;
+ uint64_t Addend;
+ if (&BlockToFix == &FromSymbol->getAddressable()) {
+ TargetSymbol = ToSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? Delta64 : Delta32;
+ Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
+ // FIXME: handle extern 'from'.
+ } else if (&BlockToFix == &ToSymbol->getAddressable()) {
+ TargetSymbol = &*FromSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? NegDelta64 : NegDelta32;
+ Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
+ } else {
+ // BlockToFix was neither FromSymbol nor ToSymbol.
+ return make_error<JITLinkError>("SUBTRACTOR relocation must fix up "
+ "either 'A' or 'B' (or a symbol in one "
+ "of their alt-entry groups)");
+ }
+
+ return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
+ }
+
+ Error addRelocations() override {
+ using namespace support;
+ auto &Obj = getObject();
+
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ for (auto &S : Obj.sections()) {
+
+ JITTargetAddress SectionAddress = S.getAddress();
+
+ // Skip relocations virtual sections.
+ if (S.isVirtual()) {
+ if (S.relocation_begin() != S.relocation_end())
+ return make_error<JITLinkError>("Virtual section contains "
+ "relocations");
+ continue;
+ }
+
+ // Skip relocations for debug symbols.
+ {
+ auto &NSec =
+ getSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
+ if (!NSec.GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping relocations for MachO section "
+ << NSec.SegName << "/" << NSec.SectName
+ << " which has no associated graph section\n";
+ });
+ continue;
+ }
+ }
+
+ for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
+ RelItr != RelEnd; ++RelItr) {
+
+ MachO::relocation_info RI = getRelocationInfo(RelItr);
+
+ // Sanity check the relocation kind.
+ auto Kind = getRelocationKind(RI);
+ if (!Kind)
+ return Kind.takeError();
+
+ // Find the address of the value to fix up.
+ JITTargetAddress FixupAddress = SectionAddress + (uint32_t)RI.r_address;
+
+ LLVM_DEBUG({
+ auto &NSec =
+ getSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
+ dbgs() << " " << NSec.SectName << " + "
+ << formatv("{0:x8}", RI.r_address) << ":\n";
+ });
+
+ // Find the block that the fixup points to.
+ Block *BlockToFix = nullptr;
+ {
+ auto SymbolToFixOrErr = findSymbolByAddress(FixupAddress);
+ if (!SymbolToFixOrErr)
+ return SymbolToFixOrErr.takeError();
+ BlockToFix = &SymbolToFixOrErr->getBlock();
+ }
+
+ if (FixupAddress + static_cast<JITTargetAddress>(1ULL << RI.r_length) >
+ BlockToFix->getAddress() + BlockToFix->getContent().size())
+ return make_error<JITLinkError>(
+ "Relocation content extends past end of fixup block");
+
+ // Get a pointer to the fixup content.
+ const char *FixupContent = BlockToFix->getContent().data() +
+ (FixupAddress - BlockToFix->getAddress());
+
+ // The target symbol and addend will be populated by the switch below.
+ Symbol *TargetSymbol = nullptr;
+ uint64_t Addend = 0;
+
+ if (*Kind == PairedAddend) {
+ // If this is an Addend relocation then process it and move to the
+ // paired reloc.
+
+ Addend = SignExtend64(RI.r_symbolnum, 24);
+
+ if (RelItr == RelEnd)
+ return make_error<JITLinkError>("Unpaired Addend reloc at " +
+ formatv("{0:x16}", FixupAddress));
+ ++RelItr;
+ RI = getRelocationInfo(RelItr);
+
+ Kind = getRelocationKind(RI);
+ if (!Kind)
+ return Kind.takeError();
+
+ if (*Kind != Branch26 && *Kind != Page21 && *Kind != PageOffset12)
+ return make_error<JITLinkError>(
+ "Invalid relocation pair: Addend + " +
+ getMachOARM64RelocationKindName(*Kind));
+
+ LLVM_DEBUG({
+ dbgs() << " Addend: value = " << formatv("{0:x6}", Addend)
+ << ", pair is " << getMachOARM64RelocationKindName(*Kind)
+ << "\n";
+ });
+
+ // Find the address of the value to fix up.
+ JITTargetAddress PairedFixupAddress =
+ SectionAddress + (uint32_t)RI.r_address;
+ if (PairedFixupAddress != FixupAddress)
+ return make_error<JITLinkError>("Paired relocation points at "
+ "different target");
+ }
+
+ switch (*Kind) {
+ case Branch26: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0x7fffffff) != 0x14000000)
+ return make_error<JITLinkError>("BRANCH26 target is not a B or BL "
+ "instruction with a zero addend");
+ break;
+ }
+ case Pointer32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle32_t *)FixupContent;
+ break;
+ case Pointer64:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle64_t *)FixupContent;
+ break;
+ case Pointer64Anon: {
+ JITTargetAddress TargetAddress = *(const ulittle64_t *)FixupContent;
+ if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ break;
+ }
+ case Page21:
+ case GOTPage21: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0xffffffe0) != 0x90000000)
+ return make_error<JITLinkError>("PAGE21/GOTPAGE21 target is not an "
+ "ADRP instruction with a zero "
+ "addend");
+ break;
+ }
+ case PageOffset12: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ uint32_t EncodedAddend = (Instr & 0x003FFC00) >> 10;
+ if (EncodedAddend != 0)
+ return make_error<JITLinkError>("GOTPAGEOFF12 target has non-zero "
+ "encoded addend");
+ break;
+ }
+ case GOTPageOffset12: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0xfffffc00) != 0xf9400000)
+ return make_error<JITLinkError>("GOTPAGEOFF12 target is not an LDR "
+ "immediate instruction with a zero "
+ "addend");
+ break;
+ }
+ case PointerToGOT:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ break;
+ case Delta32:
+ case Delta64: {
+ // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
+ // parsePairRelocation handles the paired reloc, and returns the
+ // edge kind to be used (either Delta32/Delta64, or
+ // NegDelta32/NegDelta64, depending on the direction of the
+ // subtraction) along with the addend.
+ auto PairInfo =
+ parsePairRelocation(*BlockToFix, *Kind, RI, FixupAddress,
+ FixupContent, ++RelItr, RelEnd);
+ if (!PairInfo)
+ return PairInfo.takeError();
+ std::tie(*Kind, TargetSymbol, Addend) = *PairInfo;
+ assert(TargetSymbol && "No target symbol from parsePairRelocation?");
+ break;
+ }
+ default:
+ llvm_unreachable("Special relocation kind should not appear in "
+ "mach-o file");
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " ";
+ Edge GE(*Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
+ Addend);
+ printEdge(dbgs(), *BlockToFix, GE,
+ getMachOARM64RelocationKindName(*Kind));
+ dbgs() << "\n";
+ });
+ BlockToFix->addEdge(*Kind, FixupAddress - BlockToFix->getAddress(),
+ *TargetSymbol, Addend);
+ }
+ }
+ return Error::success();
+ }
+
+ unsigned NumSymbols = 0;
+};
+
+class MachO_arm64_GOTAndStubsBuilder
+ : public BasicGOTAndStubsBuilder<MachO_arm64_GOTAndStubsBuilder> {
+public:
+ MachO_arm64_GOTAndStubsBuilder(LinkGraph &G)
+ : BasicGOTAndStubsBuilder<MachO_arm64_GOTAndStubsBuilder>(G) {}
+
+ bool isGOTEdge(Edge &E) const {
+ return E.getKind() == GOTPage21 || E.getKind() == GOTPageOffset12 ||
+ E.getKind() == PointerToGOT;
+ }
+
+ Symbol &createGOTEntry(Symbol &Target) {
+ auto &GOTEntryBlock = G.createContentBlock(
+ getGOTSection(), getGOTEntryBlockContent(), 0, 8, 0);
+ GOTEntryBlock.addEdge(Pointer64, 0, Target, 0);
+ return G.addAnonymousSymbol(GOTEntryBlock, 0, 8, false, false);
+ }
+
+ void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
+ if (E.getKind() == GOTPage21 || E.getKind() == GOTPageOffset12) {
+ // Update the target, but leave the edge addend as-is.
+ E.setTarget(GOTEntry);
+ } else if (E.getKind() == PointerToGOT) {
+ E.setTarget(GOTEntry);
+ E.setKind(Delta32);
+ } else
+ llvm_unreachable("Not a GOT edge?");
+ }
+
+ bool isExternalBranchEdge(Edge &E) {
+ return E.getKind() == Branch26 && !E.getTarget().isDefined();
+ }
+
+ Symbol &createStub(Symbol &Target) {
+ auto &StubContentBlock =
+ G.createContentBlock(getStubsSection(), getStubBlockContent(), 0, 1, 0);
+ // Re-use GOT entries for stub targets.
+ auto &GOTEntrySymbol = getGOTEntrySymbol(Target);
+ StubContentBlock.addEdge(LDRLiteral19, 0, GOTEntrySymbol, 0);
+ return G.addAnonymousSymbol(StubContentBlock, 0, 8, true, false);
+ }
+
+ void fixExternalBranchEdge(Edge &E, Symbol &Stub) {
+ assert(E.getKind() == Branch26 && "Not a Branch32 edge?");
+ assert(E.getAddend() == 0 && "Branch32 edge has non-zero addend?");
+ E.setTarget(Stub);
+ }
+
+private:
+ Section &getGOTSection() {
+ if (!GOTSection)
+ GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
+ return *GOTSection;
+ }
+
+ Section &getStubsSection() {
+ if (!StubsSection) {
+ auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC);
+ StubsSection = &G.createSection("$__STUBS", StubsProt);
+ }
+ return *StubsSection;
+ }
+
+ StringRef getGOTEntryBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(NullGOTEntryContent),
+ sizeof(NullGOTEntryContent));
+ }
+
+ StringRef getStubBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(StubContent),
+ sizeof(StubContent));
+ }
+
+ static const uint8_t NullGOTEntryContent[8];
+ static const uint8_t StubContent[8];
+ Section *GOTSection = nullptr;
+ Section *StubsSection = nullptr;
+};
+
+const uint8_t MachO_arm64_GOTAndStubsBuilder::NullGOTEntryContent[8] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+const uint8_t MachO_arm64_GOTAndStubsBuilder::StubContent[8] = {
+ 0x10, 0x00, 0x00, 0x58, // LDR x16, <literal>
+ 0x00, 0x02, 0x1f, 0xd6 // BR x16
+};
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+class MachOJITLinker_arm64 : public JITLinker<MachOJITLinker_arm64> {
+ friend class JITLinker<MachOJITLinker_arm64>;
+
+public:
+ MachOJITLinker_arm64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+ StringRef getEdgeKindName(Edge::Kind R) const override {
+ return getMachOARM64RelocationKindName(R);
+ }
+
+ static Error targetOutOfRangeError(const Block &B, const Edge &E) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Relocation target out of range: ";
+ printEdge(ErrStream, B, E, getMachOARM64RelocationKindName(E.getKind()));
+ ErrStream << "\n";
+ }
+ return make_error<JITLinkError>(std::move(ErrMsg));
+ }
+
+ static unsigned getPageOffset12Shift(uint32_t Instr) {
+ constexpr uint32_t LoadStoreImm12Mask = 0x3b000000;
+ constexpr uint32_t Vec128Mask = 0x04800000;
+
+ if ((Instr & LoadStoreImm12Mask) == 0x39000000) {
+ uint32_t ImplicitShift = Instr >> 30;
+ if (ImplicitShift == 0)
+ if ((Instr & Vec128Mask) == Vec128Mask)
+ ImplicitShift = 4;
+
+ return ImplicitShift;
+ }
+
+ return 0;
+ }
+
+ Error applyFixup(Block &B, const Edge &E, char *BlockWorkingMem) const {
+ using namespace support;
+
+ char *FixupPtr = BlockWorkingMem + E.getOffset();
+ JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
+
+ switch (E.getKind()) {
+ case Branch26: {
+ assert((FixupAddress & 0x3) == 0 && "Branch-inst is not 32-bit aligned");
+
+ int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+
+ if (static_cast<uint64_t>(Value) & 0x3)
+ return make_error<JITLinkError>("Branch26 target is not 32-bit "
+ "aligned");
+
+ if (Value < -(1 << 27) || Value > ((1 << 27) - 1))
+ return targetOutOfRangeError(B, E);
+
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ assert((RawInstr & 0x7fffffff) == 0x14000000 &&
+ "RawInstr isn't a B or BR immediate instruction");
+ uint32_t Imm = (static_cast<uint32_t>(Value) & ((1 << 28) - 1)) >> 2;
+ uint32_t FixedInstr = RawInstr | Imm;
+ *(little32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case Pointer32: {
+ uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ if (Value > std::numeric_limits<uint32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(ulittle32_t *)FixupPtr = Value;
+ break;
+ }
+ case Pointer64:
+ case Pointer64Anon: {
+ uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ *(ulittle64_t *)FixupPtr = Value;
+ break;
+ }
+ case Page21:
+ case GOTPage21: {
+ assert((E.getKind() != GOTPage21 || E.getAddend() == 0) &&
+ "GOTPAGE21 with non-zero addend");
+ uint64_t TargetPage =
+ (E.getTarget().getAddress() + E.getAddend()) &
+ ~static_cast<uint64_t>(4096 - 1);
+ uint64_t PCPage = FixupAddress & ~static_cast<uint64_t>(4096 - 1);
+
+ int64_t PageDelta = TargetPage - PCPage;
+ if (PageDelta < -(1 << 30) || PageDelta > ((1 << 30) - 1))
+ return targetOutOfRangeError(B, E);
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert((RawInstr & 0xffffffe0) == 0x90000000 &&
+ "RawInstr isn't an ADRP instruction");
+ uint32_t ImmLo = (static_cast<uint64_t>(PageDelta) >> 12) & 0x3;
+ uint32_t ImmHi = (static_cast<uint64_t>(PageDelta) >> 14) & 0x7ffff;
+ uint32_t FixedInstr = RawInstr | (ImmLo << 29) | (ImmHi << 5);
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case PageOffset12: {
+ uint64_t TargetOffset =
+ (E.getTarget().getAddress() + E.getAddend()) & 0xfff;
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ unsigned ImmShift = getPageOffset12Shift(RawInstr);
+
+ if (TargetOffset & ((1 << ImmShift) - 1))
+ return make_error<JITLinkError>("PAGEOFF12 target is not aligned");
+
+ uint32_t EncodedImm = (TargetOffset >> ImmShift) << 10;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case GOTPageOffset12: {
+ assert(E.getAddend() == 0 && "GOTPAGEOF12 with non-zero addend");
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert((RawInstr & 0xfffffc00) == 0xf9400000 &&
+ "RawInstr isn't a 64-bit LDR immediate");
+
+ uint32_t TargetOffset = E.getTarget().getAddress() & 0xfff;
+ assert((TargetOffset & 0x7) == 0 && "GOT entry is not 8-byte aligned");
+ uint32_t EncodedImm = (TargetOffset >> 3) << 10;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case LDRLiteral19: {
+ assert((FixupAddress & 0x3) == 0 && "LDR is not 32-bit aligned");
+ assert(E.getAddend() == 0 && "LDRLiteral19 with non-zero addend");
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert(RawInstr == 0x58000010 && "RawInstr isn't a 64-bit LDR literal");
+ int64_t Delta = E.getTarget().getAddress() - FixupAddress;
+ if (Delta & 0x3)
+ return make_error<JITLinkError>("LDR literal target is not 32-bit "
+ "aligned");
+ if (Delta < -(1 << 20) || Delta > ((1 << 20) - 1))
+ return targetOutOfRangeError(B, E);
+
+ uint32_t EncodedImm = (static_cast<uint32_t>(Delta) >> 2) << 5;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case Delta32:
+ case Delta64:
+ case NegDelta32:
+ case NegDelta64: {
+ int64_t Value;
+ if (E.getKind() == Delta32 || E.getKind() == Delta64)
+ Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+ else
+ Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
+
+ if (E.getKind() == Delta32 || E.getKind() == NegDelta32) {
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ } else
+ *(little64_t *)FixupPtr = Value;
+ break;
+ }
+ default:
+ llvm_unreachable("Unrecognized edge kind");
+ }
+
+ return Error::success();
+ }
+
+ uint64_t NullValue = 0;
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromMachOObject_arm64(MemoryBufferRef ObjectBuffer) {
+ auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjectBuffer);
+ if (!MachOObj)
+ return MachOObj.takeError();
+ return MachOLinkGraphBuilder_arm64(**MachOObj).buildGraph();
+}
+
+void link_MachO_arm64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+
+ PassConfiguration Config;
+
+ if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add an in-place GOT/Stubs pass.
+ Config.PostPrunePasses.push_back([](LinkGraph &G) -> Error {
+ MachO_arm64_GOTAndStubsBuilder(G).run();
+ return Error::success();
+ });
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(G->getTargetTriple(), Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Construct a JITLinker and run the link function.
+ MachOJITLinker_arm64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+StringRef getMachOARM64RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case Branch26:
+ return "Branch26";
+ case Pointer64:
+ return "Pointer64";
+ case Pointer64Anon:
+ return "Pointer64Anon";
+ case Page21:
+ return "Page21";
+ case PageOffset12:
+ return "PageOffset12";
+ case GOTPage21:
+ return "GOTPage21";
+ case GOTPageOffset12:
+ return "GOTPageOffset12";
+ case PointerToGOT:
+ return "PointerToGOT";
+ case PairedAddend:
+ return "PairedAddend";
+ case LDRLiteral19:
+ return "LDRLiteral19";
+ case Delta32:
+ return "Delta32";
+ case Delta64:
+ return "Delta64";
+ case NegDelta32:
+ return "NegDelta32";
+ case NegDelta64:
+ return "NegDelta64";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
new file mode 100644
index 00000000000..bde4a19e71b
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
@@ -0,0 +1,746 @@
+//===---- MachO_x86_64.cpp -JIT linker implementation for MachO/x86-64 ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO/x86-64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO_x86_64.h"
+
+#include "BasicGOTAndStubsBuilder.h"
+#include "MachOLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::jitlink::MachO_x86_64_Edges;
+
+namespace {
+
+class MachOLinkGraphBuilder_x86_64 : public MachOLinkGraphBuilder {
+public:
+ MachOLinkGraphBuilder_x86_64(const object::MachOObjectFile &Obj)
+ : MachOLinkGraphBuilder(Obj, Triple("x86_64-apple-darwin")) {}
+
+private:
+ static Expected<MachOX86RelocationKind>
+ getRelocationKind(const MachO::relocation_info &RI) {
+ switch (RI.r_type) {
+ case MachO::X86_64_RELOC_UNSIGNED:
+ if (!RI.r_pcrel) {
+ if (RI.r_length == 3)
+ return RI.r_extern ? Pointer64 : Pointer64Anon;
+ else if (RI.r_extern && RI.r_length == 2)
+ return Pointer32;
+ }
+ break;
+ case MachO::X86_64_RELOC_SIGNED:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? PCRel32 : PCRel32Anon;
+ break;
+ case MachO::X86_64_RELOC_BRANCH:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return Branch32;
+ break;
+ case MachO::X86_64_RELOC_GOT_LOAD:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PCRel32GOTLoad;
+ break;
+ case MachO::X86_64_RELOC_GOT:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PCRel32GOT;
+ break;
+ case MachO::X86_64_RELOC_SUBTRACTOR:
+ // SUBTRACTOR must be non-pc-rel, extern, with length 2 or 3.
+ // Initially represent SUBTRACTOR relocations with 'Delta<W>'. They may
+ // be turned into NegDelta<W> by parsePairRelocation.
+ if (!RI.r_pcrel && RI.r_extern) {
+ if (RI.r_length == 2)
+ return Delta32;
+ else if (RI.r_length == 3)
+ return Delta64;
+ }
+ break;
+ case MachO::X86_64_RELOC_SIGNED_1:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? PCRel32Minus1 : PCRel32Minus1Anon;
+ break;
+ case MachO::X86_64_RELOC_SIGNED_2:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? PCRel32Minus2 : PCRel32Minus2Anon;
+ break;
+ case MachO::X86_64_RELOC_SIGNED_4:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? PCRel32Minus4 : PCRel32Minus4Anon;
+ break;
+ case MachO::X86_64_RELOC_TLV:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PCRel32TLV;
+ break;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported x86-64 relocation: address=" +
+ formatv("{0:x8}", RI.r_address) +
+ ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
+ ", kind=" + formatv("{0:x1}", RI.r_type) +
+ ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
+ ", extern=" + (RI.r_extern ? "true" : "false") +
+ ", length=" + formatv("{0:d}", RI.r_length));
+ }
+
+ using PairRelocInfo = std::tuple<MachOX86RelocationKind, Symbol *, uint64_t>;
+
+ // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
+ // returns the edge kind and addend to be used.
+ Expected<PairRelocInfo>
+ parsePairRelocation(Block &BlockToFix, Edge::Kind SubtractorKind,
+ const MachO::relocation_info &SubRI,
+ JITTargetAddress FixupAddress, const char *FixupContent,
+ object::relocation_iterator &UnsignedRelItr,
+ object::relocation_iterator &RelEnd) {
+ using namespace support;
+
+ assert(((SubtractorKind == Delta32 && SubRI.r_length == 2) ||
+ (SubtractorKind == Delta64 && SubRI.r_length == 3)) &&
+ "Subtractor kind should match length");
+ assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
+ assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
+
+ if (UnsignedRelItr == RelEnd)
+ return make_error<JITLinkError>("x86_64 SUBTRACTOR without paired "
+ "UNSIGNED relocation");
+
+ auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
+
+ if (SubRI.r_address != UnsignedRI.r_address)
+ return make_error<JITLinkError>("x86_64 SUBTRACTOR and paired UNSIGNED "
+ "point to different addresses");
+
+ if (SubRI.r_length != UnsignedRI.r_length)
+ return make_error<JITLinkError>("length of x86_64 SUBTRACTOR and paired "
+ "UNSIGNED reloc must match");
+
+ Symbol *FromSymbol;
+ if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
+ FromSymbol = FromSymbolOrErr->GraphSymbol;
+ else
+ return FromSymbolOrErr.takeError();
+
+ // Read the current fixup value.
+ uint64_t FixupValue = 0;
+ if (SubRI.r_length == 3)
+ FixupValue = *(const little64_t *)FixupContent;
+ else
+ FixupValue = *(const little32_t *)FixupContent;
+
+ // Find 'ToSymbol' using symbol number or address, depending on whether the
+ // paired UNSIGNED relocation is extern.
+ Symbol *ToSymbol = nullptr;
+ if (UnsignedRI.r_extern) {
+ // Find target symbol by symbol index.
+ if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
+ ToSymbol = ToSymbolOrErr->GraphSymbol;
+ else
+ return ToSymbolOrErr.takeError();
+ } else {
+ auto ToSymbolSec = findSectionByIndex(UnsignedRI.r_symbolnum - 1);
+ if (!ToSymbolSec)
+ return ToSymbolSec.takeError();
+ ToSymbol = getSymbolByAddress(ToSymbolSec->Address);
+ assert(ToSymbol && "No symbol for section");
+ FixupValue -= ToSymbol->getAddress();
+ }
+
+ MachOX86RelocationKind DeltaKind;
+ Symbol *TargetSymbol;
+ uint64_t Addend;
+ if (&BlockToFix == &FromSymbol->getAddressable()) {
+ TargetSymbol = ToSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? Delta64 : Delta32;
+ Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
+ // FIXME: handle extern 'from'.
+ } else if (&BlockToFix == &ToSymbol->getAddressable()) {
+ TargetSymbol = FromSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? NegDelta64 : NegDelta32;
+ Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
+ } else {
+ // BlockToFix was neither FromSymbol nor ToSymbol.
+ return make_error<JITLinkError>("SUBTRACTOR relocation must fix up "
+ "either 'A' or 'B' (or a symbol in one "
+ "of their alt-entry chains)");
+ }
+
+ return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
+ }
+
+ Error addRelocations() override {
+ using namespace support;
+ auto &Obj = getObject();
+
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ for (auto &S : Obj.sections()) {
+
+ JITTargetAddress SectionAddress = S.getAddress();
+
+ // Skip relocations virtual sections.
+ if (S.isVirtual()) {
+ if (S.relocation_begin() != S.relocation_end())
+ return make_error<JITLinkError>("Virtual section contains "
+ "relocations");
+ continue;
+ }
+
+ // Skip relocations for debug symbols.
+ {
+ auto &NSec =
+ getSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
+ if (!NSec.GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping relocations for MachO section "
+ << NSec.SegName << "/" << NSec.SectName
+ << " which has no associated graph section\n";
+ });
+ continue;
+ }
+ }
+
+ // Add relocations for section.
+ for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
+ RelItr != RelEnd; ++RelItr) {
+
+ MachO::relocation_info RI = getRelocationInfo(RelItr);
+
+ // Sanity check the relocation kind.
+ auto Kind = getRelocationKind(RI);
+ if (!Kind)
+ return Kind.takeError();
+
+ // Find the address of the value to fix up.
+ JITTargetAddress FixupAddress = SectionAddress + (uint32_t)RI.r_address;
+
+ LLVM_DEBUG({
+ auto &NSec =
+ getSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
+ dbgs() << " " << NSec.SectName << " + "
+ << formatv("{0:x8}", RI.r_address) << ":\n";
+ });
+
+ // Find the block that the fixup points to.
+ Block *BlockToFix = nullptr;
+ {
+ auto SymbolToFixOrErr = findSymbolByAddress(FixupAddress);
+ if (!SymbolToFixOrErr)
+ return SymbolToFixOrErr.takeError();
+ BlockToFix = &SymbolToFixOrErr->getBlock();
+ }
+
+ if (FixupAddress + static_cast<JITTargetAddress>(1ULL << RI.r_length) >
+ BlockToFix->getAddress() + BlockToFix->getContent().size())
+ return make_error<JITLinkError>(
+ "Relocation extends past end of fixup block");
+
+ // Get a pointer to the fixup content.
+ const char *FixupContent = BlockToFix->getContent().data() +
+ (FixupAddress - BlockToFix->getAddress());
+
+ // The target symbol and addend will be populated by the switch below.
+ Symbol *TargetSymbol = nullptr;
+ uint64_t Addend = 0;
+
+ switch (*Kind) {
+ case Branch32:
+ case PCRel32:
+ case PCRel32GOTLoad:
+ case PCRel32GOT:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent;
+ break;
+ case Pointer32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle32_t *)FixupContent;
+ break;
+ case Pointer64:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle64_t *)FixupContent;
+ break;
+ case Pointer64Anon: {
+ JITTargetAddress TargetAddress = *(const ulittle64_t *)FixupContent;
+ if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ break;
+ }
+ case PCRel32Minus1:
+ case PCRel32Minus2:
+ case PCRel32Minus4:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent +
+ (1 << (*Kind - PCRel32Minus1));
+ break;
+ case PCRel32Anon: {
+ JITTargetAddress TargetAddress =
+ FixupAddress + 4 + *(const little32_t *)FixupContent;
+ if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ break;
+ }
+ case PCRel32Minus1Anon:
+ case PCRel32Minus2Anon:
+ case PCRel32Minus4Anon: {
+ JITTargetAddress Delta =
+ static_cast<JITTargetAddress>(1ULL << (*Kind - PCRel32Minus1Anon));
+ JITTargetAddress TargetAddress =
+ FixupAddress + 4 + Delta + *(const little32_t *)FixupContent;
+ if (auto TargetSymbolOrErr = findSymbolByAddress(TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ break;
+ }
+ case Delta32:
+ case Delta64: {
+ // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
+ // parsePairRelocation handles the paired reloc, and returns the
+ // edge kind to be used (either Delta32/Delta64, or
+ // NegDelta32/NegDelta64, depending on the direction of the
+ // subtraction) along with the addend.
+ auto PairInfo =
+ parsePairRelocation(*BlockToFix, *Kind, RI, FixupAddress,
+ FixupContent, ++RelItr, RelEnd);
+ if (!PairInfo)
+ return PairInfo.takeError();
+ std::tie(*Kind, TargetSymbol, Addend) = *PairInfo;
+ assert(TargetSymbol && "No target symbol from parsePairRelocation?");
+ break;
+ }
+ case PCRel32TLV:
+ return make_error<JITLinkError>(
+ "MachO TLV relocations not yet supported");
+ default:
+ llvm_unreachable("Special relocation kind should not appear in "
+ "mach-o file");
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " ";
+ Edge GE(*Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
+ Addend);
+ printEdge(dbgs(), *BlockToFix, GE,
+ getMachOX86RelocationKindName(*Kind));
+ dbgs() << "\n";
+ });
+ BlockToFix->addEdge(*Kind, FixupAddress - BlockToFix->getAddress(),
+ *TargetSymbol, Addend);
+ }
+ }
+ return Error::success();
+ }
+};
+
+class MachO_x86_64_GOTAndStubsBuilder
+ : public BasicGOTAndStubsBuilder<MachO_x86_64_GOTAndStubsBuilder> {
+public:
+ static const uint8_t NullGOTEntryContent[8];
+ static const uint8_t StubContent[6];
+
+ MachO_x86_64_GOTAndStubsBuilder(LinkGraph &G)
+ : BasicGOTAndStubsBuilder<MachO_x86_64_GOTAndStubsBuilder>(G) {}
+
+ bool isGOTEdge(Edge &E) const {
+ return E.getKind() == PCRel32GOT || E.getKind() == PCRel32GOTLoad;
+ }
+
+ Symbol &createGOTEntry(Symbol &Target) {
+ auto &GOTEntryBlock = G.createContentBlock(
+ getGOTSection(), getGOTEntryBlockContent(), 0, 8, 0);
+ GOTEntryBlock.addEdge(Pointer64, 0, Target, 0);
+ return G.addAnonymousSymbol(GOTEntryBlock, 0, 8, false, false);
+ }
+
+ void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
+ assert((E.getKind() == PCRel32GOT || E.getKind() == PCRel32GOTLoad) &&
+ "Not a GOT edge?");
+ // If this is a PCRel32GOT then change it to an ordinary PCRel32. If it is
+ // a PCRel32GOTLoad then leave it as-is for now. We will use the kind to
+ // check for GOT optimization opportunities in the
+ // optimizeMachO_x86_64_GOTAndStubs pass below.
+ if (E.getKind() == PCRel32GOT)
+ E.setKind(PCRel32);
+
+ E.setTarget(GOTEntry);
+ // Leave the edge addend as-is.
+ }
+
+ bool isExternalBranchEdge(Edge &E) {
+ return E.getKind() == Branch32 && !E.getTarget().isDefined();
+ }
+
+ Symbol &createStub(Symbol &Target) {
+ auto &StubContentBlock =
+ G.createContentBlock(getStubsSection(), getStubBlockContent(), 0, 1, 0);
+ // Re-use GOT entries for stub targets.
+ auto &GOTEntrySymbol = getGOTEntrySymbol(Target);
+ StubContentBlock.addEdge(PCRel32, 2, GOTEntrySymbol, 0);
+ return G.addAnonymousSymbol(StubContentBlock, 0, 6, true, false);
+ }
+
+ void fixExternalBranchEdge(Edge &E, Symbol &Stub) {
+ assert(E.getKind() == Branch32 && "Not a Branch32 edge?");
+ assert(E.getAddend() == 0 && "Branch32 edge has non-zero addend?");
+
+ // Set the edge kind to Branch32ToStub. We will use this to check for stub
+ // optimization opportunities in the optimizeMachO_x86_64_GOTAndStubs pass
+ // below.
+ E.setKind(Branch32ToStub);
+ E.setTarget(Stub);
+ }
+
+private:
+ Section &getGOTSection() {
+ if (!GOTSection)
+ GOTSection = &G.createSection("$__GOT", sys::Memory::MF_READ);
+ return *GOTSection;
+ }
+
+ Section &getStubsSection() {
+ if (!StubsSection) {
+ auto StubsProt = static_cast<sys::Memory::ProtectionFlags>(
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC);
+ StubsSection = &G.createSection("$__STUBS", StubsProt);
+ }
+ return *StubsSection;
+ }
+
+ StringRef getGOTEntryBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(NullGOTEntryContent),
+ sizeof(NullGOTEntryContent));
+ }
+
+ StringRef getStubBlockContent() {
+ return StringRef(reinterpret_cast<const char *>(StubContent),
+ sizeof(StubContent));
+ }
+
+ Section *GOTSection = nullptr;
+ Section *StubsSection = nullptr;
+};
+
+const uint8_t MachO_x86_64_GOTAndStubsBuilder::NullGOTEntryContent[8] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+const uint8_t MachO_x86_64_GOTAndStubsBuilder::StubContent[6] = {
+ 0xFF, 0x25, 0x00, 0x00, 0x00, 0x00};
+} // namespace
+
+static Error optimizeMachO_x86_64_GOTAndStubs(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Optimizing GOT entries and stubs:\n");
+
+ for (auto *B : G.blocks())
+ for (auto &E : B->edges())
+ if (E.getKind() == PCRel32GOTLoad) {
+ assert(E.getOffset() >= 3 && "GOT edge occurs too early in block");
+
+ // Switch the edge kind to PCRel32: Whether we change the edge target
+ // or not this will be the desired kind.
+ E.setKind(PCRel32);
+
+ // Optimize GOT references.
+ auto &GOTBlock = E.getTarget().getBlock();
+ assert(GOTBlock.getSize() == G.getPointerSize() &&
+ "GOT entry block should be pointer sized");
+ assert(GOTBlock.edges_size() == 1 &&
+ "GOT entry should only have one outgoing edge");
+
+ auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
+ JITTargetAddress EdgeAddr = B->getAddress() + E.getOffset();
+ JITTargetAddress TargetAddr = GOTTarget.getAddress();
+
+ // Check that this is a recognized MOV instruction.
+ // FIXME: Can we assume this?
+ constexpr uint8_t MOVQRIPRel[] = {0x48, 0x8b};
+ if (strncmp(B->getContent().data() + E.getOffset() - 3,
+ reinterpret_cast<const char *>(MOVQRIPRel), 2) != 0)
+ continue;
+
+ int64_t Displacement = TargetAddr - EdgeAddr + 4;
+ if (Displacement >= std::numeric_limits<int32_t>::min() &&
+ Displacement <= std::numeric_limits<int32_t>::max()) {
+ E.setTarget(GOTTarget);
+ auto *BlockData = reinterpret_cast<uint8_t *>(
+ const_cast<char *>(B->getContent().data()));
+ BlockData[E.getOffset() - 2] = 0x8d;
+ LLVM_DEBUG({
+ dbgs() << " Replaced GOT load wih LEA:\n ";
+ printEdge(dbgs(), *B, E,
+ getMachOX86RelocationKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ }
+ } else if (E.getKind() == Branch32ToStub) {
+
+ // Switch the edge kind to PCRel32: Whether we change the edge target
+ // or not this will be the desired kind.
+ E.setKind(Branch32);
+
+ auto &StubBlock = E.getTarget().getBlock();
+ assert(StubBlock.getSize() ==
+ sizeof(MachO_x86_64_GOTAndStubsBuilder::StubContent) &&
+ "Stub block should be stub sized");
+ assert(StubBlock.edges_size() == 1 &&
+ "Stub block should only have one outgoing edge");
+
+ auto &GOTBlock = StubBlock.edges().begin()->getTarget().getBlock();
+ assert(GOTBlock.getSize() == G.getPointerSize() &&
+ "GOT block should be pointer sized");
+ assert(GOTBlock.edges_size() == 1 &&
+ "GOT block should only have one outgoing edge");
+
+ auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
+ JITTargetAddress EdgeAddr = B->getAddress() + E.getOffset();
+ JITTargetAddress TargetAddr = GOTTarget.getAddress();
+
+ int64_t Displacement = TargetAddr - EdgeAddr + 4;
+ if (Displacement >= std::numeric_limits<int32_t>::min() &&
+ Displacement <= std::numeric_limits<int32_t>::max()) {
+ E.setTarget(GOTTarget);
+ LLVM_DEBUG({
+ dbgs() << " Replaced stub branch with direct branch:\n ";
+ printEdge(dbgs(), *B, E,
+ getMachOX86RelocationKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ }
+ }
+
+ return Error::success();
+}
+
+namespace llvm {
+namespace jitlink {
+
+class MachOJITLinker_x86_64 : public JITLinker<MachOJITLinker_x86_64> {
+ friend class JITLinker<MachOJITLinker_x86_64>;
+
+public:
+ MachOJITLinker_x86_64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+ StringRef getEdgeKindName(Edge::Kind R) const override {
+ return getMachOX86RelocationKindName(R);
+ }
+
+ static Error targetOutOfRangeError(const Block &B, const Edge &E) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Relocation target out of range: ";
+ printEdge(ErrStream, B, E, getMachOX86RelocationKindName(E.getKind()));
+ ErrStream << "\n";
+ }
+ return make_error<JITLinkError>(std::move(ErrMsg));
+ }
+
+ Error applyFixup(Block &B, const Edge &E, char *BlockWorkingMem) const {
+
+ using namespace support;
+
+ char *FixupPtr = BlockWorkingMem + E.getOffset();
+ JITTargetAddress FixupAddress = B.getAddress() + E.getOffset();
+
+ switch (E.getKind()) {
+ case Branch32:
+ case PCRel32:
+ case PCRel32Anon: {
+ int64_t Value =
+ E.getTarget().getAddress() - (FixupAddress + 4) + E.getAddend();
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ break;
+ }
+ case Pointer64:
+ case Pointer64Anon: {
+ uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ *(ulittle64_t *)FixupPtr = Value;
+ break;
+ }
+ case PCRel32Minus1:
+ case PCRel32Minus2:
+ case PCRel32Minus4: {
+ int Delta = 4 + (1 << (E.getKind() - PCRel32Minus1));
+ int64_t Value =
+ E.getTarget().getAddress() - (FixupAddress + Delta) + E.getAddend();
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ break;
+ }
+ case PCRel32Minus1Anon:
+ case PCRel32Minus2Anon:
+ case PCRel32Minus4Anon: {
+ int Delta = 4 + (1 << (E.getKind() - PCRel32Minus1Anon));
+ int64_t Value =
+ E.getTarget().getAddress() - (FixupAddress + Delta) + E.getAddend();
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ break;
+ }
+ case Delta32:
+ case Delta64:
+ case NegDelta32:
+ case NegDelta64: {
+ int64_t Value;
+ if (E.getKind() == Delta32 || E.getKind() == Delta64)
+ Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+ else
+ Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
+
+ if (E.getKind() == Delta32 || E.getKind() == NegDelta32) {
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(little32_t *)FixupPtr = Value;
+ } else
+ *(little64_t *)FixupPtr = Value;
+ break;
+ }
+ case Pointer32: {
+ uint64_t Value = E.getTarget().getAddress() + E.getAddend();
+ if (Value > std::numeric_limits<uint32_t>::max())
+ return targetOutOfRangeError(B, E);
+ *(ulittle32_t *)FixupPtr = Value;
+ break;
+ }
+ default:
+ llvm_unreachable("Unrecognized edge kind");
+ }
+
+ return Error::success();
+ }
+
+ uint64_t NullValue = 0;
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromMachOObject_x86_64(MemoryBufferRef ObjectBuffer) {
+ auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjectBuffer);
+ if (!MachOObj)
+ return MachOObj.takeError();
+ return MachOLinkGraphBuilder_x86_64(**MachOObj).buildGraph();
+}
+
+void link_MachO_x86_64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+
+ PassConfiguration Config;
+
+ if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
+ // Add eh-frame passses.
+ Config.PrePrunePasses.push_back(EHFrameSplitter("__eh_frame"));
+ Config.PrePrunePasses.push_back(EHFrameEdgeFixer(
+ "__eh_frame", G->getPointerSize(), Delta64, Delta32, NegDelta32));
+
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add an in-place GOT/Stubs pass.
+ Config.PostPrunePasses.push_back([](LinkGraph &G) -> Error {
+ MachO_x86_64_GOTAndStubsBuilder(G).run();
+ return Error::success();
+ });
+
+ // Add GOT/Stubs optimizer pass.
+ Config.PreFixupPasses.push_back(optimizeMachO_x86_64_GOTAndStubs);
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(G->getTargetTriple(), Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Construct a JITLinker and run the link function.
+ MachOJITLinker_x86_64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+StringRef getMachOX86RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case Branch32:
+ return "Branch32";
+ case Branch32ToStub:
+ return "Branch32ToStub";
+ case Pointer32:
+ return "Pointer32";
+ case Pointer64:
+ return "Pointer64";
+ case Pointer64Anon:
+ return "Pointer64Anon";
+ case PCRel32:
+ return "PCRel32";
+ case PCRel32Minus1:
+ return "PCRel32Minus1";
+ case PCRel32Minus2:
+ return "PCRel32Minus2";
+ case PCRel32Minus4:
+ return "PCRel32Minus4";
+ case PCRel32Anon:
+ return "PCRel32Anon";
+ case PCRel32Minus1Anon:
+ return "PCRel32Minus1Anon";
+ case PCRel32Minus2Anon:
+ return "PCRel32Minus2Anon";
+ case PCRel32Minus4Anon:
+ return "PCRel32Minus4Anon";
+ case PCRel32GOTLoad:
+ return "PCRel32GOTLoad";
+ case PCRel32GOT:
+ return "PCRel32GOT";
+ case PCRel32TLV:
+ return "PCRel32TLV";
+ case Delta32:
+ return "Delta32";
+ case Delta64:
+ return "Delta64";
+ case NegDelta32:
+ return "NegDelta32";
+ case NegDelta64:
+ return "NegDelta64";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/MCJIT/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/ExecutionEngine/MCJIT/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/ExecutionEngine/MCJIT/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp
new file mode 100644
index 00000000000..68878f6729e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp
@@ -0,0 +1,379 @@
+//===----- CompileOnDemandLayer.cpp - Lazily emit IR on first call --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
+
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/FormatVariadic.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+static ThreadSafeModule extractSubModule(ThreadSafeModule &TSM,
+ StringRef Suffix,
+ GVPredicate ShouldExtract) {
+
+ auto DeleteExtractedDefs = [](GlobalValue &GV) {
+ // Bump the linkage: this global will be provided by the external module.
+ GV.setLinkage(GlobalValue::ExternalLinkage);
+
+ // Delete the definition in the source module.
+ if (isa<Function>(GV)) {
+ auto &F = cast<Function>(GV);
+ F.deleteBody();
+ F.setPersonalityFn(nullptr);
+ } else if (isa<GlobalVariable>(GV)) {
+ cast<GlobalVariable>(GV).setInitializer(nullptr);
+ } else if (isa<GlobalAlias>(GV)) {
+ // We need to turn deleted aliases into function or variable decls based
+ // on the type of their aliasee.
+ auto &A = cast<GlobalAlias>(GV);
+ Constant *Aliasee = A.getAliasee();
+ assert(A.hasName() && "Anonymous alias?");
+ assert(Aliasee->hasName() && "Anonymous aliasee");
+ std::string AliasName = std::string(A.getName());
+
+ if (isa<Function>(Aliasee)) {
+ auto *F = cloneFunctionDecl(*A.getParent(), *cast<Function>(Aliasee));
+ A.replaceAllUsesWith(F);
+ A.eraseFromParent();
+ F->setName(AliasName);
+ } else if (isa<GlobalVariable>(Aliasee)) {
+ auto *G = cloneGlobalVariableDecl(*A.getParent(),
+ *cast<GlobalVariable>(Aliasee));
+ A.replaceAllUsesWith(G);
+ A.eraseFromParent();
+ G->setName(AliasName);
+ } else
+ llvm_unreachable("Alias to unsupported type");
+ } else
+ llvm_unreachable("Unsupported global type");
+ };
+
+ auto NewTSM = cloneToNewContext(TSM, ShouldExtract, DeleteExtractedDefs);
+ NewTSM.withModuleDo([&](Module &M) {
+ M.setModuleIdentifier((M.getModuleIdentifier() + Suffix).str());
+ });
+
+ return NewTSM;
+}
+
+namespace llvm {
+namespace orc {
+
+class PartitioningIRMaterializationUnit : public IRMaterializationUnit {
+public:
+ PartitioningIRMaterializationUnit(ExecutionSession &ES,
+ const IRSymbolMapper::ManglingOptions &MO,
+ ThreadSafeModule TSM,
+ CompileOnDemandLayer &Parent)
+ : IRMaterializationUnit(ES, MO, std::move(TSM)), Parent(Parent) {}
+
+ PartitioningIRMaterializationUnit(
+ ThreadSafeModule TSM, SymbolFlagsMap SymbolFlags,
+ SymbolStringPtr InitSymbol, SymbolNameToDefinitionMap SymbolToDefinition,
+ CompileOnDemandLayer &Parent)
+ : IRMaterializationUnit(std::move(TSM), std::move(SymbolFlags),
+ std::move(InitSymbol),
+ std::move(SymbolToDefinition)),
+ Parent(Parent) {}
+
+private:
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ Parent.emitPartition(std::move(R), std::move(TSM),
+ std::move(SymbolToDefinition));
+ }
+
+ void discard(const JITDylib &V, const SymbolStringPtr &Name) override {
+ // All original symbols were materialized by the CODLayer and should be
+ // final. The function bodies provided by M should never be overridden.
+ llvm_unreachable("Discard should never be called on an "
+ "ExtractingIRMaterializationUnit");
+ }
+
+ mutable std::mutex SourceModuleMutex;
+ CompileOnDemandLayer &Parent;
+};
+
+Optional<CompileOnDemandLayer::GlobalValueSet>
+CompileOnDemandLayer::compileRequested(GlobalValueSet Requested) {
+ return std::move(Requested);
+}
+
+Optional<CompileOnDemandLayer::GlobalValueSet>
+CompileOnDemandLayer::compileWholeModule(GlobalValueSet Requested) {
+ return None;
+}
+
+CompileOnDemandLayer::CompileOnDemandLayer(
+ ExecutionSession &ES, IRLayer &BaseLayer, LazyCallThroughManager &LCTMgr,
+ IndirectStubsManagerBuilder BuildIndirectStubsManager)
+ : IRLayer(ES, BaseLayer.getManglingOptions()), BaseLayer(BaseLayer),
+ LCTMgr(LCTMgr),
+ BuildIndirectStubsManager(std::move(BuildIndirectStubsManager)) {}
+
+void CompileOnDemandLayer::setPartitionFunction(PartitionFunction Partition) {
+ this->Partition = std::move(Partition);
+}
+
+void CompileOnDemandLayer::setImplMap(ImplSymbolMap *Imp) {
+ this->AliaseeImpls = Imp;
+}
+void CompileOnDemandLayer::emit(
+ std::unique_ptr<MaterializationResponsibility> R, ThreadSafeModule TSM) {
+ assert(TSM && "Null module");
+
+ auto &ES = getExecutionSession();
+
+ // Sort the callables and non-callables, build re-exports and lodge the
+ // actual module with the implementation dylib.
+ auto &PDR = getPerDylibResources(R->getTargetJITDylib());
+
+ SymbolAliasMap NonCallables;
+ SymbolAliasMap Callables;
+ TSM.withModuleDo([&](Module &M) {
+ // First, do some cleanup on the module:
+ cleanUpModule(M);
+ });
+
+ for (auto &KV : R->getSymbols()) {
+ auto &Name = KV.first;
+ auto &Flags = KV.second;
+ if (Flags.isCallable())
+ Callables[Name] = SymbolAliasMapEntry(Name, Flags);
+ else
+ NonCallables[Name] = SymbolAliasMapEntry(Name, Flags);
+ }
+
+ // Create a partitioning materialization unit and lodge it with the
+ // implementation dylib.
+ if (auto Err = PDR.getImplDylib().define(
+ std::make_unique<PartitioningIRMaterializationUnit>(
+ ES, *getManglingOptions(), std::move(TSM), *this))) {
+ ES.reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+
+ if (!NonCallables.empty())
+ if (auto Err =
+ R->replace(reexports(PDR.getImplDylib(), std::move(NonCallables),
+ JITDylibLookupFlags::MatchAllSymbols))) {
+ getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ if (!Callables.empty()) {
+ if (auto Err = R->replace(
+ lazyReexports(LCTMgr, PDR.getISManager(), PDR.getImplDylib(),
+ std::move(Callables), AliaseeImpls))) {
+ getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ }
+}
+
+CompileOnDemandLayer::PerDylibResources &
+CompileOnDemandLayer::getPerDylibResources(JITDylib &TargetD) {
+ auto I = DylibResources.find(&TargetD);
+ if (I == DylibResources.end()) {
+ auto &ImplD =
+ getExecutionSession().createBareJITDylib(TargetD.getName() + ".impl");
+ JITDylibSearchOrder NewLinkOrder;
+ TargetD.withLinkOrderDo([&](const JITDylibSearchOrder &TargetLinkOrder) {
+ NewLinkOrder = TargetLinkOrder;
+ });
+
+ assert(!NewLinkOrder.empty() && NewLinkOrder.front().first == &TargetD &&
+ NewLinkOrder.front().second ==
+ JITDylibLookupFlags::MatchAllSymbols &&
+ "TargetD must be at the front of its own search order and match "
+ "non-exported symbol");
+ NewLinkOrder.insert(std::next(NewLinkOrder.begin()),
+ {&ImplD, JITDylibLookupFlags::MatchAllSymbols});
+ ImplD.setLinkOrder(NewLinkOrder, false);
+ TargetD.setLinkOrder(std::move(NewLinkOrder), false);
+
+ PerDylibResources PDR(ImplD, BuildIndirectStubsManager());
+ I = DylibResources.insert(std::make_pair(&TargetD, std::move(PDR))).first;
+ }
+
+ return I->second;
+}
+
+void CompileOnDemandLayer::cleanUpModule(Module &M) {
+ for (auto &F : M.functions()) {
+ if (F.isDeclaration())
+ continue;
+
+ if (F.hasAvailableExternallyLinkage()) {
+ F.deleteBody();
+ F.setPersonalityFn(nullptr);
+ continue;
+ }
+ }
+}
+
+void CompileOnDemandLayer::expandPartition(GlobalValueSet &Partition) {
+ // Expands the partition to ensure the following rules hold:
+ // (1) If any alias is in the partition, its aliasee is also in the partition.
+ // (2) If any aliasee is in the partition, its aliases are also in the
+ // partiton.
+ // (3) If any global variable is in the partition then all global variables
+ // are in the partition.
+ assert(!Partition.empty() && "Unexpected empty partition");
+
+ const Module &M = *(*Partition.begin())->getParent();
+ bool ContainsGlobalVariables = false;
+ std::vector<const GlobalValue *> GVsToAdd;
+
+ for (auto *GV : Partition)
+ if (isa<GlobalAlias>(GV))
+ GVsToAdd.push_back(
+ cast<GlobalValue>(cast<GlobalAlias>(GV)->getAliasee()));
+ else if (isa<GlobalVariable>(GV))
+ ContainsGlobalVariables = true;
+
+ for (auto &A : M.aliases())
+ if (Partition.count(cast<GlobalValue>(A.getAliasee())))
+ GVsToAdd.push_back(&A);
+
+ if (ContainsGlobalVariables)
+ for (auto &G : M.globals())
+ GVsToAdd.push_back(&G);
+
+ for (auto *GV : GVsToAdd)
+ Partition.insert(GV);
+}
+
+void CompileOnDemandLayer::emitPartition(
+ std::unique_ptr<MaterializationResponsibility> R, ThreadSafeModule TSM,
+ IRMaterializationUnit::SymbolNameToDefinitionMap Defs) {
+
+ // FIXME: Need a 'notify lazy-extracting/emitting' callback to tie the
+ // extracted module key, extracted module, and source module key
+ // together. This could be used, for example, to provide a specific
+ // memory manager instance to the linking layer.
+
+ auto &ES = getExecutionSession();
+ GlobalValueSet RequestedGVs;
+ for (auto &Name : R->getRequestedSymbols()) {
+ if (Name == R->getInitializerSymbol())
+ TSM.withModuleDo([&](Module &M) {
+ for (auto &GV : getStaticInitGVs(M))
+ RequestedGVs.insert(&GV);
+ });
+ else {
+ assert(Defs.count(Name) && "No definition for symbol");
+ RequestedGVs.insert(Defs[Name]);
+ }
+ }
+
+ /// Perform partitioning with the context lock held, since the partition
+ /// function is allowed to access the globals to compute the partition.
+ auto GVsToExtract =
+ TSM.withModuleDo([&](Module &M) { return Partition(RequestedGVs); });
+
+ // Take a 'None' partition to mean the whole module (as opposed to an empty
+ // partition, which means "materialize nothing"). Emit the whole module
+ // unmodified to the base layer.
+ if (GVsToExtract == None) {
+ Defs.clear();
+ BaseLayer.emit(std::move(R), std::move(TSM));
+ return;
+ }
+
+ // If the partition is empty, return the whole module to the symbol table.
+ if (GVsToExtract->empty()) {
+ if (auto Err =
+ R->replace(std::make_unique<PartitioningIRMaterializationUnit>(
+ std::move(TSM), R->getSymbols(), R->getInitializerSymbol(),
+ std::move(Defs), *this))) {
+ getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ return;
+ }
+
+ // Ok -- we actually need to partition the symbols. Promote the symbol
+ // linkages/names, expand the partition to include any required symbols
+ // (i.e. symbols that can't be separated from our partition), and
+ // then extract the partition.
+ //
+ // FIXME: We apply this promotion once per partitioning. It's safe, but
+ // overkill.
+ auto ExtractedTSM =
+ TSM.withModuleDo([&](Module &M) -> Expected<ThreadSafeModule> {
+ auto PromotedGlobals = PromoteSymbols(M);
+ if (!PromotedGlobals.empty()) {
+
+ MangleAndInterner Mangle(ES, M.getDataLayout());
+ SymbolFlagsMap SymbolFlags;
+ IRSymbolMapper::add(ES, *getManglingOptions(),
+ PromotedGlobals, SymbolFlags);
+
+ if (auto Err = R->defineMaterializing(SymbolFlags))
+ return std::move(Err);
+ }
+
+ expandPartition(*GVsToExtract);
+
+ // Submodule name is given by hashing the names of the globals.
+ std::string SubModuleName;
+ {
+ std::vector<const GlobalValue*> HashGVs;
+ HashGVs.reserve(GVsToExtract->size());
+ for (auto *GV : *GVsToExtract)
+ HashGVs.push_back(GV);
+ llvm::sort(HashGVs, [](const GlobalValue *LHS, const GlobalValue *RHS) {
+ return LHS->getName() < RHS->getName();
+ });
+ hash_code HC(0);
+ for (auto *GV : HashGVs) {
+ assert(GV->hasName() && "All GVs to extract should be named by now");
+ auto GVName = GV->getName();
+ HC = hash_combine(HC, hash_combine_range(GVName.begin(), GVName.end()));
+ }
+ raw_string_ostream(SubModuleName)
+ << ".submodule."
+ << formatv(sizeof(size_t) == 8 ? "{0:x16}" : "{0:x8}",
+ static_cast<size_t>(HC))
+ << ".ll";
+ }
+
+ // Extract the requested partiton (plus any necessary aliases) and
+ // put the rest back into the impl dylib.
+ auto ShouldExtract = [&](const GlobalValue &GV) -> bool {
+ return GVsToExtract->count(&GV);
+ };
+
+ return extractSubModule(TSM, SubModuleName , ShouldExtract);
+ });
+
+ if (!ExtractedTSM) {
+ ES.reportError(ExtractedTSM.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ if (auto Err = R->replace(std::make_unique<PartitioningIRMaterializationUnit>(
+ ES, *getManglingOptions(), std::move(TSM), *this))) {
+ ES.reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ BaseLayer.emit(std::move(R), std::move(*ExtractedTSM));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/CompileUtils.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/CompileUtils.cpp
new file mode 100644
index 00000000000..f8efed15ede
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/CompileUtils.cpp
@@ -0,0 +1,94 @@
+//===------ CompileUtils.cpp - Utilities for compiling IR in the JIT ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <algorithm>
+
+namespace llvm {
+namespace orc {
+
+IRSymbolMapper::ManglingOptions
+irManglingOptionsFromTargetOptions(const TargetOptions &Opts) {
+ IRSymbolMapper::ManglingOptions MO;
+
+ MO.EmulatedTLS = Opts.EmulatedTLS;
+
+ return MO;
+}
+
+/// Compile a Module to an ObjectFile.
+Expected<SimpleCompiler::CompileResult> SimpleCompiler::operator()(Module &M) {
+ CompileResult CachedObject = tryToLoadFromObjectCache(M);
+ if (CachedObject)
+ return std::move(CachedObject);
+
+ SmallVector<char, 0> ObjBufferSV;
+
+ {
+ raw_svector_ostream ObjStream(ObjBufferSV);
+
+ legacy::PassManager PM;
+ MCContext *Ctx;
+ if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
+ return make_error<StringError>("Target does not support MC emission",
+ inconvertibleErrorCode());
+ PM.run(M);
+ }
+
+ auto ObjBuffer = std::make_unique<SmallVectorMemoryBuffer>(
+ std::move(ObjBufferSV), M.getModuleIdentifier() + "-jitted-objectbuffer");
+
+ auto Obj = object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+
+ if (!Obj)
+ return Obj.takeError();
+
+ notifyObjectCompiled(M, *ObjBuffer);
+ return std::move(ObjBuffer);
+}
+
+SimpleCompiler::CompileResult
+SimpleCompiler::tryToLoadFromObjectCache(const Module &M) {
+ if (!ObjCache)
+ return CompileResult();
+
+ return ObjCache->getObject(&M);
+}
+
+void SimpleCompiler::notifyObjectCompiled(const Module &M,
+ const MemoryBuffer &ObjBuffer) {
+ if (ObjCache)
+ ObjCache->notifyObjectCompiled(&M, ObjBuffer.getMemBufferRef());
+}
+
+ConcurrentIRCompiler::ConcurrentIRCompiler(JITTargetMachineBuilder JTMB,
+ ObjectCache *ObjCache)
+ : IRCompiler(irManglingOptionsFromTargetOptions(JTMB.getOptions())),
+ JTMB(std::move(JTMB)), ObjCache(ObjCache) {}
+
+Expected<std::unique_ptr<MemoryBuffer>>
+ConcurrentIRCompiler::operator()(Module &M) {
+ auto TM = cantFail(JTMB.createTargetMachine());
+ SimpleCompiler C(*TM, ObjCache);
+ return C(M);
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Core.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Core.cpp
new file mode 100644
index 00000000000..dfb558808c3
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Core.cpp
@@ -0,0 +1,2777 @@
+//===--- Core.cpp - Core ORC APIs (MaterializationUnit, JITDylib, etc.) ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcError.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+
+#include <condition_variable>
+#include <future>
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+char ResourceTrackerDefunct::ID = 0;
+char FailedToMaterialize::ID = 0;
+char SymbolsNotFound::ID = 0;
+char SymbolsCouldNotBeRemoved::ID = 0;
+char MissingSymbolDefinitions::ID = 0;
+char UnexpectedSymbolDefinitions::ID = 0;
+
+RegisterDependenciesFunction NoDependenciesToRegister =
+ RegisterDependenciesFunction();
+
+void MaterializationUnit::anchor() {}
+
+ResourceTracker::ResourceTracker(JITDylibSP JD) {
+ assert((reinterpret_cast<uintptr_t>(JD.get()) & 0x1) == 0 &&
+ "JITDylib must be two byte aligned");
+ JD->Retain();
+ JDAndFlag.store(reinterpret_cast<uintptr_t>(JD.get()));
+}
+
+ResourceTracker::~ResourceTracker() {
+ getJITDylib().getExecutionSession().destroyResourceTracker(*this);
+ getJITDylib().Release();
+}
+
+Error ResourceTracker::remove() {
+ return getJITDylib().getExecutionSession().removeResourceTracker(*this);
+}
+
+void ResourceTracker::transferTo(ResourceTracker &DstRT) {
+ getJITDylib().getExecutionSession().transferResourceTracker(DstRT, *this);
+}
+
+void ResourceTracker::makeDefunct() {
+ uintptr_t Val = JDAndFlag.load();
+ Val |= 0x1U;
+ JDAndFlag.store(Val);
+}
+
+ResourceManager::~ResourceManager() {}
+
+ResourceTrackerDefunct::ResourceTrackerDefunct(ResourceTrackerSP RT)
+ : RT(std::move(RT)) {}
+
+std::error_code ResourceTrackerDefunct::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void ResourceTrackerDefunct::log(raw_ostream &OS) const {
+ OS << "Resource tracker " << (void *)RT.get() << " became defunct";
+}
+
+FailedToMaterialize::FailedToMaterialize(
+ std::shared_ptr<SymbolDependenceMap> Symbols)
+ : Symbols(std::move(Symbols)) {
+ assert(!this->Symbols->empty() && "Can not fail to resolve an empty set");
+}
+
+std::error_code FailedToMaterialize::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void FailedToMaterialize::log(raw_ostream &OS) const {
+ OS << "Failed to materialize symbols: " << *Symbols;
+}
+
+SymbolsNotFound::SymbolsNotFound(SymbolNameSet Symbols) {
+ for (auto &Sym : Symbols)
+ this->Symbols.push_back(Sym);
+ assert(!this->Symbols.empty() && "Can not fail to resolve an empty set");
+}
+
+SymbolsNotFound::SymbolsNotFound(SymbolNameVector Symbols)
+ : Symbols(std::move(Symbols)) {
+ assert(!this->Symbols.empty() && "Can not fail to resolve an empty set");
+}
+
+std::error_code SymbolsNotFound::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void SymbolsNotFound::log(raw_ostream &OS) const {
+ OS << "Symbols not found: " << Symbols;
+}
+
+SymbolsCouldNotBeRemoved::SymbolsCouldNotBeRemoved(SymbolNameSet Symbols)
+ : Symbols(std::move(Symbols)) {
+ assert(!this->Symbols.empty() && "Can not fail to resolve an empty set");
+}
+
+std::error_code SymbolsCouldNotBeRemoved::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void SymbolsCouldNotBeRemoved::log(raw_ostream &OS) const {
+ OS << "Symbols could not be removed: " << Symbols;
+}
+
+std::error_code MissingSymbolDefinitions::convertToErrorCode() const {
+ return orcError(OrcErrorCode::MissingSymbolDefinitions);
+}
+
+void MissingSymbolDefinitions::log(raw_ostream &OS) const {
+ OS << "Missing definitions in module " << ModuleName
+ << ": " << Symbols;
+}
+
+std::error_code UnexpectedSymbolDefinitions::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnexpectedSymbolDefinitions);
+}
+
+void UnexpectedSymbolDefinitions::log(raw_ostream &OS) const {
+ OS << "Unexpected definitions in module " << ModuleName
+ << ": " << Symbols;
+}
+
+AsynchronousSymbolQuery::AsynchronousSymbolQuery(
+ const SymbolLookupSet &Symbols, SymbolState RequiredState,
+ SymbolsResolvedCallback NotifyComplete)
+ : NotifyComplete(std::move(NotifyComplete)), RequiredState(RequiredState) {
+ assert(RequiredState >= SymbolState::Resolved &&
+ "Cannot query for a symbols that have not reached the resolve state "
+ "yet");
+
+ OutstandingSymbolsCount = Symbols.size();
+
+ for (auto &KV : Symbols)
+ ResolvedSymbols[KV.first] = nullptr;
+}
+
+void AsynchronousSymbolQuery::notifySymbolMetRequiredState(
+ const SymbolStringPtr &Name, JITEvaluatedSymbol Sym) {
+ auto I = ResolvedSymbols.find(Name);
+ assert(I != ResolvedSymbols.end() &&
+ "Resolving symbol outside the requested set");
+ assert(I->second.getAddress() == 0 && "Redundantly resolving symbol Name");
+
+ // If this is a materialization-side-effects-only symbol then drop it,
+ // otherwise update its map entry with its resolved address.
+ if (Sym.getFlags().hasMaterializationSideEffectsOnly())
+ ResolvedSymbols.erase(I);
+ else
+ I->second = std::move(Sym);
+ --OutstandingSymbolsCount;
+}
+
+void AsynchronousSymbolQuery::handleComplete() {
+ assert(OutstandingSymbolsCount == 0 &&
+ "Symbols remain, handleComplete called prematurely");
+
+ auto TmpNotifyComplete = std::move(NotifyComplete);
+ NotifyComplete = SymbolsResolvedCallback();
+ TmpNotifyComplete(std::move(ResolvedSymbols));
+}
+
+void AsynchronousSymbolQuery::handleFailed(Error Err) {
+ assert(QueryRegistrations.empty() && ResolvedSymbols.empty() &&
+ OutstandingSymbolsCount == 0 &&
+ "Query should already have been abandoned");
+ NotifyComplete(std::move(Err));
+ NotifyComplete = SymbolsResolvedCallback();
+}
+
+void AsynchronousSymbolQuery::addQueryDependence(JITDylib &JD,
+ SymbolStringPtr Name) {
+ bool Added = QueryRegistrations[&JD].insert(std::move(Name)).second;
+ (void)Added;
+ assert(Added && "Duplicate dependence notification?");
+}
+
+void AsynchronousSymbolQuery::removeQueryDependence(
+ JITDylib &JD, const SymbolStringPtr &Name) {
+ auto QRI = QueryRegistrations.find(&JD);
+ assert(QRI != QueryRegistrations.end() &&
+ "No dependencies registered for JD");
+ assert(QRI->second.count(Name) && "No dependency on Name in JD");
+ QRI->second.erase(Name);
+ if (QRI->second.empty())
+ QueryRegistrations.erase(QRI);
+}
+
+void AsynchronousSymbolQuery::dropSymbol(const SymbolStringPtr &Name) {
+ auto I = ResolvedSymbols.find(Name);
+ assert(I != ResolvedSymbols.end() &&
+ "Redundant removal of weakly-referenced symbol");
+ ResolvedSymbols.erase(I);
+ --OutstandingSymbolsCount;
+}
+
+void AsynchronousSymbolQuery::detach() {
+ ResolvedSymbols.clear();
+ OutstandingSymbolsCount = 0;
+ for (auto &KV : QueryRegistrations)
+ KV.first->detachQueryHelper(*this, KV.second);
+ QueryRegistrations.clear();
+}
+
+AbsoluteSymbolsMaterializationUnit::AbsoluteSymbolsMaterializationUnit(
+ SymbolMap Symbols)
+ : MaterializationUnit(extractFlags(Symbols), nullptr),
+ Symbols(std::move(Symbols)) {}
+
+StringRef AbsoluteSymbolsMaterializationUnit::getName() const {
+ return "<Absolute Symbols>";
+}
+
+void AbsoluteSymbolsMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+ // No dependencies, so these calls can't fail.
+ cantFail(R->notifyResolved(Symbols));
+ cantFail(R->notifyEmitted());
+}
+
+void AbsoluteSymbolsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(Symbols.count(Name) && "Symbol is not part of this MU");
+ Symbols.erase(Name);
+}
+
+SymbolFlagsMap
+AbsoluteSymbolsMaterializationUnit::extractFlags(const SymbolMap &Symbols) {
+ SymbolFlagsMap Flags;
+ for (const auto &KV : Symbols)
+ Flags[KV.first] = KV.second.getFlags();
+ return Flags;
+}
+
+ReExportsMaterializationUnit::ReExportsMaterializationUnit(
+ JITDylib *SourceJD, JITDylibLookupFlags SourceJDLookupFlags,
+ SymbolAliasMap Aliases)
+ : MaterializationUnit(extractFlags(Aliases), nullptr), SourceJD(SourceJD),
+ SourceJDLookupFlags(SourceJDLookupFlags), Aliases(std::move(Aliases)) {}
+
+StringRef ReExportsMaterializationUnit::getName() const {
+ return "<Reexports>";
+}
+
+void ReExportsMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+
+ auto &ES = R->getTargetJITDylib().getExecutionSession();
+ JITDylib &TgtJD = R->getTargetJITDylib();
+ JITDylib &SrcJD = SourceJD ? *SourceJD : TgtJD;
+
+ // Find the set of requested aliases and aliasees. Return any unrequested
+ // aliases back to the JITDylib so as to not prematurely materialize any
+ // aliasees.
+ auto RequestedSymbols = R->getRequestedSymbols();
+ SymbolAliasMap RequestedAliases;
+
+ for (auto &Name : RequestedSymbols) {
+ auto I = Aliases.find(Name);
+ assert(I != Aliases.end() && "Symbol not found in aliases map?");
+ RequestedAliases[Name] = std::move(I->second);
+ Aliases.erase(I);
+ }
+
+ LLVM_DEBUG({
+ ES.runSessionLocked([&]() {
+ dbgs() << "materializing reexports: target = " << TgtJD.getName()
+ << ", source = " << SrcJD.getName() << " " << RequestedAliases
+ << "\n";
+ });
+ });
+
+ if (!Aliases.empty()) {
+ auto Err = SourceJD ? R->replace(reexports(*SourceJD, std::move(Aliases),
+ SourceJDLookupFlags))
+ : R->replace(symbolAliases(std::move(Aliases)));
+
+ if (Err) {
+ // FIXME: Should this be reported / treated as failure to materialize?
+ // Or should this be treated as a sanctioned bailing-out?
+ ES.reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ }
+
+ // The OnResolveInfo struct will hold the aliases and responsibilty for each
+ // query in the list.
+ struct OnResolveInfo {
+ OnResolveInfo(std::unique_ptr<MaterializationResponsibility> R,
+ SymbolAliasMap Aliases)
+ : R(std::move(R)), Aliases(std::move(Aliases)) {}
+
+ std::unique_ptr<MaterializationResponsibility> R;
+ SymbolAliasMap Aliases;
+ };
+
+ // Build a list of queries to issue. In each round we build a query for the
+ // largest set of aliases that we can resolve without encountering a chain of
+ // aliases (e.g. Foo -> Bar, Bar -> Baz). Such a chain would deadlock as the
+ // query would be waiting on a symbol that it itself had to resolve. Creating
+ // a new query for each link in such a chain eliminates the possibility of
+ // deadlock. In practice chains are likely to be rare, and this algorithm will
+ // usually result in a single query to issue.
+
+ std::vector<std::pair<SymbolLookupSet, std::shared_ptr<OnResolveInfo>>>
+ QueryInfos;
+ while (!RequestedAliases.empty()) {
+ SymbolNameSet ResponsibilitySymbols;
+ SymbolLookupSet QuerySymbols;
+ SymbolAliasMap QueryAliases;
+
+ // Collect as many aliases as we can without including a chain.
+ for (auto &KV : RequestedAliases) {
+ // Chain detected. Skip this symbol for this round.
+ if (&SrcJD == &TgtJD && (QueryAliases.count(KV.second.Aliasee) ||
+ RequestedAliases.count(KV.second.Aliasee)))
+ continue;
+
+ ResponsibilitySymbols.insert(KV.first);
+ QuerySymbols.add(KV.second.Aliasee,
+ KV.second.AliasFlags.hasMaterializationSideEffectsOnly()
+ ? SymbolLookupFlags::WeaklyReferencedSymbol
+ : SymbolLookupFlags::RequiredSymbol);
+ QueryAliases[KV.first] = std::move(KV.second);
+ }
+
+ // Remove the aliases collected this round from the RequestedAliases map.
+ for (auto &KV : QueryAliases)
+ RequestedAliases.erase(KV.first);
+
+ assert(!QuerySymbols.empty() && "Alias cycle detected!");
+
+ auto NewR = R->delegate(ResponsibilitySymbols);
+ if (!NewR) {
+ ES.reportError(NewR.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ auto QueryInfo = std::make_shared<OnResolveInfo>(std::move(*NewR),
+ std::move(QueryAliases));
+ QueryInfos.push_back(
+ make_pair(std::move(QuerySymbols), std::move(QueryInfo)));
+ }
+
+ // Issue the queries.
+ while (!QueryInfos.empty()) {
+ auto QuerySymbols = std::move(QueryInfos.back().first);
+ auto QueryInfo = std::move(QueryInfos.back().second);
+
+ QueryInfos.pop_back();
+
+ auto RegisterDependencies = [QueryInfo,
+ &SrcJD](const SymbolDependenceMap &Deps) {
+ // If there were no materializing symbols, just bail out.
+ if (Deps.empty())
+ return;
+
+ // Otherwise the only deps should be on SrcJD.
+ assert(Deps.size() == 1 && Deps.count(&SrcJD) &&
+ "Unexpected dependencies for reexports");
+
+ auto &SrcJDDeps = Deps.find(&SrcJD)->second;
+ SymbolDependenceMap PerAliasDepsMap;
+ auto &PerAliasDeps = PerAliasDepsMap[&SrcJD];
+
+ for (auto &KV : QueryInfo->Aliases)
+ if (SrcJDDeps.count(KV.second.Aliasee)) {
+ PerAliasDeps = {KV.second.Aliasee};
+ QueryInfo->R->addDependencies(KV.first, PerAliasDepsMap);
+ }
+ };
+
+ auto OnComplete = [QueryInfo](Expected<SymbolMap> Result) {
+ auto &ES = QueryInfo->R->getTargetJITDylib().getExecutionSession();
+ if (Result) {
+ SymbolMap ResolutionMap;
+ for (auto &KV : QueryInfo->Aliases) {
+ assert((KV.second.AliasFlags.hasMaterializationSideEffectsOnly() ||
+ Result->count(KV.second.Aliasee)) &&
+ "Result map missing entry?");
+ // Don't try to resolve materialization-side-effects-only symbols.
+ if (KV.second.AliasFlags.hasMaterializationSideEffectsOnly())
+ continue;
+
+ ResolutionMap[KV.first] = JITEvaluatedSymbol(
+ (*Result)[KV.second.Aliasee].getAddress(), KV.second.AliasFlags);
+ }
+ if (auto Err = QueryInfo->R->notifyResolved(ResolutionMap)) {
+ ES.reportError(std::move(Err));
+ QueryInfo->R->failMaterialization();
+ return;
+ }
+ if (auto Err = QueryInfo->R->notifyEmitted()) {
+ ES.reportError(std::move(Err));
+ QueryInfo->R->failMaterialization();
+ return;
+ }
+ } else {
+ ES.reportError(Result.takeError());
+ QueryInfo->R->failMaterialization();
+ }
+ };
+
+ ES.lookup(LookupKind::Static,
+ JITDylibSearchOrder({{&SrcJD, SourceJDLookupFlags}}),
+ QuerySymbols, SymbolState::Resolved, std::move(OnComplete),
+ std::move(RegisterDependencies));
+ }
+}
+
+void ReExportsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(Aliases.count(Name) &&
+ "Symbol not covered by this MaterializationUnit");
+ Aliases.erase(Name);
+}
+
+SymbolFlagsMap
+ReExportsMaterializationUnit::extractFlags(const SymbolAliasMap &Aliases) {
+ SymbolFlagsMap SymbolFlags;
+ for (auto &KV : Aliases)
+ SymbolFlags[KV.first] = KV.second.AliasFlags;
+
+ return SymbolFlags;
+}
+
+Expected<SymbolAliasMap> buildSimpleReexportsAliasMap(JITDylib &SourceJD,
+ SymbolNameSet Symbols) {
+ SymbolLookupSet LookupSet(Symbols);
+ auto Flags = SourceJD.getExecutionSession().lookupFlags(
+ LookupKind::Static, {{&SourceJD, JITDylibLookupFlags::MatchAllSymbols}},
+ SymbolLookupSet(std::move(Symbols)));
+
+ if (!Flags)
+ return Flags.takeError();
+
+ SymbolAliasMap Result;
+ for (auto &Name : Symbols) {
+ assert(Flags->count(Name) && "Missing entry in flags map");
+ Result[Name] = SymbolAliasMapEntry(Name, (*Flags)[Name]);
+ }
+
+ return Result;
+}
+
+class InProgressLookupState {
+public:
+ InProgressLookupState(LookupKind K, JITDylibSearchOrder SearchOrder,
+ SymbolLookupSet LookupSet, SymbolState RequiredState)
+ : K(K), SearchOrder(std::move(SearchOrder)),
+ LookupSet(std::move(LookupSet)), RequiredState(RequiredState) {
+ DefGeneratorCandidates = this->LookupSet;
+ }
+ virtual ~InProgressLookupState() {}
+ virtual void complete(std::unique_ptr<InProgressLookupState> IPLS) = 0;
+ virtual void fail(Error Err) = 0;
+
+ LookupKind K;
+ JITDylibSearchOrder SearchOrder;
+ SymbolLookupSet LookupSet;
+ SymbolState RequiredState;
+
+ std::unique_lock<std::mutex> GeneratorLock;
+ size_t CurSearchOrderIndex = 0;
+ bool NewJITDylib = true;
+ SymbolLookupSet DefGeneratorCandidates;
+ SymbolLookupSet DefGeneratorNonCandidates;
+ std::vector<std::weak_ptr<DefinitionGenerator>> CurDefGeneratorStack;
+};
+
+class InProgressLookupFlagsState : public InProgressLookupState {
+public:
+ InProgressLookupFlagsState(
+ LookupKind K, JITDylibSearchOrder SearchOrder, SymbolLookupSet LookupSet,
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete)
+ : InProgressLookupState(K, std::move(SearchOrder), std::move(LookupSet),
+ SymbolState::NeverSearched),
+ OnComplete(std::move(OnComplete)) {}
+
+ void complete(std::unique_ptr<InProgressLookupState> IPLS) override {
+ GeneratorLock = {}; // Unlock and release.
+ auto &ES = SearchOrder.front().first->getExecutionSession();
+ ES.OL_completeLookupFlags(std::move(IPLS), std::move(OnComplete));
+ }
+
+ void fail(Error Err) override {
+ GeneratorLock = {}; // Unlock and release.
+ OnComplete(std::move(Err));
+ }
+
+private:
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete;
+};
+
+class InProgressFullLookupState : public InProgressLookupState {
+public:
+ InProgressFullLookupState(LookupKind K, JITDylibSearchOrder SearchOrder,
+ SymbolLookupSet LookupSet,
+ SymbolState RequiredState,
+ std::shared_ptr<AsynchronousSymbolQuery> Q,
+ RegisterDependenciesFunction RegisterDependencies)
+ : InProgressLookupState(K, std::move(SearchOrder), std::move(LookupSet),
+ RequiredState),
+ Q(std::move(Q)), RegisterDependencies(std::move(RegisterDependencies)) {
+ }
+
+ void complete(std::unique_ptr<InProgressLookupState> IPLS) override {
+ GeneratorLock = {}; // Unlock and release.
+ auto &ES = SearchOrder.front().first->getExecutionSession();
+ ES.OL_completeLookup(std::move(IPLS), std::move(Q),
+ std::move(RegisterDependencies));
+ }
+
+ void fail(Error Err) override {
+ GeneratorLock = {};
+ Q->detach();
+ Q->handleFailed(std::move(Err));
+ }
+
+private:
+ std::shared_ptr<AsynchronousSymbolQuery> Q;
+ RegisterDependenciesFunction RegisterDependencies;
+};
+
+ReexportsGenerator::ReexportsGenerator(JITDylib &SourceJD,
+ JITDylibLookupFlags SourceJDLookupFlags,
+ SymbolPredicate Allow)
+ : SourceJD(SourceJD), SourceJDLookupFlags(SourceJDLookupFlags),
+ Allow(std::move(Allow)) {}
+
+Error ReexportsGenerator::tryToGenerate(LookupState &LS, LookupKind K,
+ JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags,
+ const SymbolLookupSet &LookupSet) {
+ assert(&JD != &SourceJD && "Cannot re-export from the same dylib");
+
+ // Use lookupFlags to find the subset of symbols that match our lookup.
+ auto Flags = JD.getExecutionSession().lookupFlags(
+ K, {{&SourceJD, JDLookupFlags}}, LookupSet);
+ if (!Flags)
+ return Flags.takeError();
+
+ // Create an alias map.
+ orc::SymbolAliasMap AliasMap;
+ for (auto &KV : *Flags)
+ if (!Allow || Allow(KV.first))
+ AliasMap[KV.first] = SymbolAliasMapEntry(KV.first, KV.second);
+
+ if (AliasMap.empty())
+ return Error::success();
+
+ // Define the re-exports.
+ return JD.define(reexports(SourceJD, AliasMap, SourceJDLookupFlags));
+}
+
+LookupState::LookupState(std::unique_ptr<InProgressLookupState> IPLS)
+ : IPLS(std::move(IPLS)) {}
+
+void LookupState::reset(InProgressLookupState *IPLS) { this->IPLS.reset(IPLS); }
+
+LookupState::LookupState() = default;
+LookupState::LookupState(LookupState &&) = default;
+LookupState &LookupState::operator=(LookupState &&) = default;
+LookupState::~LookupState() = default;
+
+void LookupState::continueLookup(Error Err) {
+ assert(IPLS && "Cannot call continueLookup on empty LookupState");
+ auto &ES = IPLS->SearchOrder.begin()->first->getExecutionSession();
+ ES.OL_applyQueryPhase1(std::move(IPLS), std::move(Err));
+}
+
+DefinitionGenerator::~DefinitionGenerator() {}
+
+Error JITDylib::clear() {
+ std::vector<ResourceTrackerSP> TrackersToRemove;
+ ES.runSessionLocked([&]() {
+ for (auto &KV : TrackerSymbols)
+ TrackersToRemove.push_back(KV.first);
+ TrackersToRemove.push_back(getDefaultResourceTracker());
+ });
+
+ Error Err = Error::success();
+ for (auto &RT : TrackersToRemove)
+ Err = joinErrors(std::move(Err), RT->remove());
+ return Err;
+}
+
+ResourceTrackerSP JITDylib::getDefaultResourceTracker() {
+ return ES.runSessionLocked([this] {
+ if (!DefaultTracker)
+ DefaultTracker = new ResourceTracker(this);
+ return DefaultTracker;
+ });
+}
+
+ResourceTrackerSP JITDylib::createResourceTracker() {
+ return ES.runSessionLocked([this] {
+ ResourceTrackerSP RT = new ResourceTracker(this);
+ return RT;
+ });
+}
+
+void JITDylib::removeGenerator(DefinitionGenerator &G) {
+ std::lock_guard<std::mutex> Lock(GeneratorsMutex);
+ auto I = llvm::find_if(DefGenerators,
+ [&](const std::shared_ptr<DefinitionGenerator> &H) {
+ return H.get() == &G;
+ });
+ assert(I != DefGenerators.end() && "Generator not found");
+ DefGenerators.erase(I);
+}
+
+Expected<SymbolFlagsMap>
+JITDylib::defineMaterializing(SymbolFlagsMap SymbolFlags) {
+
+ return ES.runSessionLocked([&]() -> Expected<SymbolFlagsMap> {
+ std::vector<SymbolTable::iterator> AddedSyms;
+ std::vector<SymbolFlagsMap::iterator> RejectedWeakDefs;
+
+ for (auto SFItr = SymbolFlags.begin(), SFEnd = SymbolFlags.end();
+ SFItr != SFEnd; ++SFItr) {
+
+ auto &Name = SFItr->first;
+ auto &Flags = SFItr->second;
+
+ auto EntryItr = Symbols.find(Name);
+
+ // If the entry already exists...
+ if (EntryItr != Symbols.end()) {
+
+ // If this is a strong definition then error out.
+ if (!Flags.isWeak()) {
+ // Remove any symbols already added.
+ for (auto &SI : AddedSyms)
+ Symbols.erase(SI);
+
+ // FIXME: Return all duplicates.
+ return make_error<DuplicateDefinition>(std::string(*Name));
+ }
+
+ // Otherwise just make a note to discard this symbol after the loop.
+ RejectedWeakDefs.push_back(SFItr);
+ continue;
+ } else
+ EntryItr =
+ Symbols.insert(std::make_pair(Name, SymbolTableEntry(Flags))).first;
+
+ AddedSyms.push_back(EntryItr);
+ EntryItr->second.setState(SymbolState::Materializing);
+ }
+
+ // Remove any rejected weak definitions from the SymbolFlags map.
+ while (!RejectedWeakDefs.empty()) {
+ SymbolFlags.erase(RejectedWeakDefs.back());
+ RejectedWeakDefs.pop_back();
+ }
+
+ return SymbolFlags;
+ });
+}
+
+Error JITDylib::replace(MaterializationResponsibility &FromMR,
+ std::unique_ptr<MaterializationUnit> MU) {
+ assert(MU != nullptr && "Can not replace with a null MaterializationUnit");
+ std::unique_ptr<MaterializationUnit> MustRunMU;
+ std::unique_ptr<MaterializationResponsibility> MustRunMR;
+
+ auto Err =
+ ES.runSessionLocked([&, this]() -> Error {
+ auto RT = getTracker(FromMR);
+
+ if (RT->isDefunct())
+ return make_error<ResourceTrackerDefunct>(std::move(RT));
+
+#ifndef NDEBUG
+ for (auto &KV : MU->getSymbols()) {
+ auto SymI = Symbols.find(KV.first);
+ assert(SymI != Symbols.end() && "Replacing unknown symbol");
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Can not replace a symbol that ha is not materializing");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Symbol should not have materializer attached already");
+ assert(UnmaterializedInfos.count(KV.first) == 0 &&
+ "Symbol being replaced should have no UnmaterializedInfo");
+ }
+#endif // NDEBUG
+
+ // If the tracker is defunct we need to bail out immediately.
+
+ // If any symbol has pending queries against it then we need to
+ // materialize MU immediately.
+ for (auto &KV : MU->getSymbols()) {
+ auto MII = MaterializingInfos.find(KV.first);
+ if (MII != MaterializingInfos.end()) {
+ if (MII->second.hasQueriesPending()) {
+ MustRunMR = ES.createMaterializationResponsibility(
+ *RT, std::move(MU->SymbolFlags), std::move(MU->InitSymbol));
+ MustRunMU = std::move(MU);
+ return Error::success();
+ }
+ }
+ }
+
+ // Otherwise, make MU responsible for all the symbols.
+ auto RTI = MRTrackers.find(&FromMR);
+ assert(RTI != MRTrackers.end() && "No tracker for FromMR");
+ auto UMI =
+ std::make_shared<UnmaterializedInfo>(std::move(MU), RTI->second);
+ for (auto &KV : UMI->MU->getSymbols()) {
+ auto SymI = Symbols.find(KV.first);
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Can not replace a symbol that is not materializing");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Can not replace a symbol that has a materializer attached");
+ assert(UnmaterializedInfos.count(KV.first) == 0 &&
+ "Unexpected materializer entry in map");
+ SymI->second.setAddress(SymI->second.getAddress());
+ SymI->second.setMaterializerAttached(true);
+
+ auto &UMIEntry = UnmaterializedInfos[KV.first];
+ assert((!UMIEntry || !UMIEntry->MU) &&
+ "Replacing symbol with materializer still attached");
+ UMIEntry = UMI;
+ }
+
+ return Error::success();
+ });
+
+ if (Err)
+ return Err;
+
+ if (MustRunMU) {
+ assert(MustRunMR && "MustRunMU set implies MustRunMR set");
+ ES.dispatchMaterialization(std::move(MustRunMU), std::move(MustRunMR));
+ } else {
+ assert(!MustRunMR && "MustRunMU unset implies MustRunMR unset");
+ }
+
+ return Error::success();
+}
+
+Expected<std::unique_ptr<MaterializationResponsibility>>
+JITDylib::delegate(MaterializationResponsibility &FromMR,
+ SymbolFlagsMap SymbolFlags, SymbolStringPtr InitSymbol) {
+
+ return ES.runSessionLocked(
+ [&]() -> Expected<std::unique_ptr<MaterializationResponsibility>> {
+ auto RT = getTracker(FromMR);
+
+ if (RT->isDefunct())
+ return make_error<ResourceTrackerDefunct>(std::move(RT));
+
+ return ES.createMaterializationResponsibility(
+ *RT, std::move(SymbolFlags), std::move(InitSymbol));
+ });
+}
+
+SymbolNameSet
+JITDylib::getRequestedSymbols(const SymbolFlagsMap &SymbolFlags) const {
+ return ES.runSessionLocked([&]() {
+ SymbolNameSet RequestedSymbols;
+
+ for (auto &KV : SymbolFlags) {
+ assert(Symbols.count(KV.first) && "JITDylib does not cover this symbol?");
+ assert(Symbols.find(KV.first)->second.getState() !=
+ SymbolState::NeverSearched &&
+ Symbols.find(KV.first)->second.getState() != SymbolState::Ready &&
+ "getRequestedSymbols can only be called for symbols that have "
+ "started materializing");
+ auto I = MaterializingInfos.find(KV.first);
+ if (I == MaterializingInfos.end())
+ continue;
+
+ if (I->second.hasQueriesPending())
+ RequestedSymbols.insert(KV.first);
+ }
+
+ return RequestedSymbols;
+ });
+}
+
+void JITDylib::addDependencies(const SymbolStringPtr &Name,
+ const SymbolDependenceMap &Dependencies) {
+ assert(Symbols.count(Name) && "Name not in symbol table");
+ assert(Symbols[Name].getState() < SymbolState::Emitted &&
+ "Can not add dependencies for a symbol that is not materializing");
+
+ LLVM_DEBUG({
+ dbgs() << "In " << getName() << " adding dependencies for "
+ << *Name << ": " << Dependencies << "\n";
+ });
+
+ // If Name is already in an error state then just bail out.
+ if (Symbols[Name].getFlags().hasError())
+ return;
+
+ auto &MI = MaterializingInfos[Name];
+ assert(Symbols[Name].getState() != SymbolState::Emitted &&
+ "Can not add dependencies to an emitted symbol");
+
+ bool DependsOnSymbolInErrorState = false;
+
+ // Register dependencies, record whether any depenendency is in the error
+ // state.
+ for (auto &KV : Dependencies) {
+ assert(KV.first && "Null JITDylib in dependency?");
+ auto &OtherJITDylib = *KV.first;
+ auto &DepsOnOtherJITDylib = MI.UnemittedDependencies[&OtherJITDylib];
+
+ for (auto &OtherSymbol : KV.second) {
+
+ // Check the sym entry for the dependency.
+ auto OtherSymI = OtherJITDylib.Symbols.find(OtherSymbol);
+
+ // Assert that this symbol exists and has not reached the ready state
+ // already.
+ assert(OtherSymI != OtherJITDylib.Symbols.end() &&
+ "Dependency on unknown symbol");
+
+ auto &OtherSymEntry = OtherSymI->second;
+
+ // If the other symbol is already in the Ready state then there's no
+ // dependency to add.
+ if (OtherSymEntry.getState() == SymbolState::Ready)
+ continue;
+
+ // If the dependency is in an error state then note this and continue,
+ // we will move this symbol to the error state below.
+ if (OtherSymEntry.getFlags().hasError()) {
+ DependsOnSymbolInErrorState = true;
+ continue;
+ }
+
+ // If the dependency was not in the error state then add it to
+ // our list of dependencies.
+ auto &OtherMI = OtherJITDylib.MaterializingInfos[OtherSymbol];
+
+ if (OtherSymEntry.getState() == SymbolState::Emitted)
+ transferEmittedNodeDependencies(MI, Name, OtherMI);
+ else if (&OtherJITDylib != this || OtherSymbol != Name) {
+ OtherMI.Dependants[this].insert(Name);
+ DepsOnOtherJITDylib.insert(OtherSymbol);
+ }
+ }
+
+ if (DepsOnOtherJITDylib.empty())
+ MI.UnemittedDependencies.erase(&OtherJITDylib);
+ }
+
+ // If this symbol dependended on any symbols in the error state then move
+ // this symbol to the error state too.
+ if (DependsOnSymbolInErrorState)
+ Symbols[Name].setFlags(Symbols[Name].getFlags() | JITSymbolFlags::HasError);
+}
+
+Error JITDylib::resolve(MaterializationResponsibility &MR,
+ const SymbolMap &Resolved) {
+ AsynchronousSymbolQuerySet CompletedQueries;
+
+ if (auto Err = ES.runSessionLocked([&, this]() -> Error {
+ auto RTI = MRTrackers.find(&MR);
+ assert(RTI != MRTrackers.end() && "No resource tracker for MR?");
+ if (RTI->second->isDefunct())
+ return make_error<ResourceTrackerDefunct>(RTI->second);
+
+ struct WorklistEntry {
+ SymbolTable::iterator SymI;
+ JITEvaluatedSymbol ResolvedSym;
+ };
+
+ SymbolNameSet SymbolsInErrorState;
+ std::vector<WorklistEntry> Worklist;
+ Worklist.reserve(Resolved.size());
+
+ // Build worklist and check for any symbols in the error state.
+ for (const auto &KV : Resolved) {
+
+ assert(!KV.second.getFlags().hasError() &&
+ "Resolution result can not have error flag set");
+
+ auto SymI = Symbols.find(KV.first);
+
+ assert(SymI != Symbols.end() && "Symbol not found");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Resolving symbol with materializer attached?");
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Symbol should be materializing");
+ assert(SymI->second.getAddress() == 0 &&
+ "Symbol has already been resolved");
+
+ if (SymI->second.getFlags().hasError())
+ SymbolsInErrorState.insert(KV.first);
+ else {
+ auto Flags = KV.second.getFlags();
+ Flags &= ~(JITSymbolFlags::Weak | JITSymbolFlags::Common);
+ assert(Flags ==
+ (SymI->second.getFlags() &
+ ~(JITSymbolFlags::Weak | JITSymbolFlags::Common)) &&
+ "Resolved flags should match the declared flags");
+
+ Worklist.push_back(
+ {SymI, JITEvaluatedSymbol(KV.second.getAddress(), Flags)});
+ }
+ }
+
+ // If any symbols were in the error state then bail out.
+ if (!SymbolsInErrorState.empty()) {
+ auto FailedSymbolsDepMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsDepMap)[this] = std::move(SymbolsInErrorState);
+ return make_error<FailedToMaterialize>(
+ std::move(FailedSymbolsDepMap));
+ }
+
+ while (!Worklist.empty()) {
+ auto SymI = Worklist.back().SymI;
+ auto ResolvedSym = Worklist.back().ResolvedSym;
+ Worklist.pop_back();
+
+ auto &Name = SymI->first;
+
+ // Resolved symbols can not be weak: discard the weak flag.
+ JITSymbolFlags ResolvedFlags = ResolvedSym.getFlags();
+ SymI->second.setAddress(ResolvedSym.getAddress());
+ SymI->second.setFlags(ResolvedFlags);
+ SymI->second.setState(SymbolState::Resolved);
+
+ auto MII = MaterializingInfos.find(Name);
+ if (MII == MaterializingInfos.end())
+ continue;
+
+ auto &MI = MII->second;
+ for (auto &Q : MI.takeQueriesMeeting(SymbolState::Resolved)) {
+ Q->notifySymbolMetRequiredState(Name, ResolvedSym);
+ Q->removeQueryDependence(*this, Name);
+ if (Q->isComplete())
+ CompletedQueries.insert(std::move(Q));
+ }
+ }
+
+ return Error::success();
+ }))
+ return Err;
+
+ // Otherwise notify all the completed queries.
+ for (auto &Q : CompletedQueries) {
+ assert(Q->isComplete() && "Q not completed");
+ Q->handleComplete();
+ }
+
+ return Error::success();
+}
+
+Error JITDylib::emit(MaterializationResponsibility &MR,
+ const SymbolFlagsMap &Emitted) {
+ AsynchronousSymbolQuerySet CompletedQueries;
+ DenseMap<JITDylib *, SymbolNameVector> ReadySymbols;
+
+ if (auto Err = ES.runSessionLocked([&, this]() -> Error {
+ auto RTI = MRTrackers.find(&MR);
+ assert(RTI != MRTrackers.end() && "No resource tracker for MR?");
+ if (RTI->second->isDefunct())
+ return make_error<ResourceTrackerDefunct>(RTI->second);
+
+ SymbolNameSet SymbolsInErrorState;
+ std::vector<SymbolTable::iterator> Worklist;
+
+ // Scan to build worklist, record any symbols in the erorr state.
+ for (const auto &KV : Emitted) {
+ auto &Name = KV.first;
+
+ auto SymI = Symbols.find(Name);
+ assert(SymI != Symbols.end() && "No symbol table entry for Name");
+
+ if (SymI->second.getFlags().hasError())
+ SymbolsInErrorState.insert(Name);
+ else
+ Worklist.push_back(SymI);
+ }
+
+ // If any symbols were in the error state then bail out.
+ if (!SymbolsInErrorState.empty()) {
+ auto FailedSymbolsDepMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsDepMap)[this] = std::move(SymbolsInErrorState);
+ return make_error<FailedToMaterialize>(
+ std::move(FailedSymbolsDepMap));
+ }
+
+ // Otherwise update dependencies and move to the emitted state.
+ while (!Worklist.empty()) {
+ auto SymI = Worklist.back();
+ Worklist.pop_back();
+
+ auto &Name = SymI->first;
+ auto &SymEntry = SymI->second;
+
+ // Move symbol to the emitted state.
+ assert(((SymEntry.getFlags().hasMaterializationSideEffectsOnly() &&
+ SymEntry.getState() == SymbolState::Materializing) ||
+ SymEntry.getState() == SymbolState::Resolved) &&
+ "Emitting from state other than Resolved");
+ SymEntry.setState(SymbolState::Emitted);
+
+ auto MII = MaterializingInfos.find(Name);
+
+ // If this symbol has no MaterializingInfo then it's trivially ready.
+ // Update its state and continue.
+ if (MII == MaterializingInfos.end()) {
+ SymEntry.setState(SymbolState::Ready);
+ continue;
+ }
+
+ auto &MI = MII->second;
+
+ // For each dependant, transfer this node's emitted dependencies to
+ // it. If the dependant node is ready (i.e. has no unemitted
+ // dependencies) then notify any pending queries.
+ for (auto &KV : MI.Dependants) {
+ auto &DependantJD = *KV.first;
+ auto &DependantJDReadySymbols = ReadySymbols[&DependantJD];
+ for (auto &DependantName : KV.second) {
+ auto DependantMII =
+ DependantJD.MaterializingInfos.find(DependantName);
+ assert(DependantMII != DependantJD.MaterializingInfos.end() &&
+ "Dependant should have MaterializingInfo");
+
+ auto &DependantMI = DependantMII->second;
+
+ // Remove the dependant's dependency on this node.
+ assert(DependantMI.UnemittedDependencies.count(this) &&
+ "Dependant does not have an unemitted dependencies record "
+ "for "
+ "this JITDylib");
+ assert(DependantMI.UnemittedDependencies[this].count(Name) &&
+ "Dependant does not count this symbol as a dependency?");
+
+ DependantMI.UnemittedDependencies[this].erase(Name);
+ if (DependantMI.UnemittedDependencies[this].empty())
+ DependantMI.UnemittedDependencies.erase(this);
+
+ // Transfer unemitted dependencies from this node to the
+ // dependant.
+ DependantJD.transferEmittedNodeDependencies(DependantMI,
+ DependantName, MI);
+
+ auto DependantSymI = DependantJD.Symbols.find(DependantName);
+ assert(DependantSymI != DependantJD.Symbols.end() &&
+ "Dependant has no entry in the Symbols table");
+ auto &DependantSymEntry = DependantSymI->second;
+
+ // If the dependant is emitted and this node was the last of its
+ // unemitted dependencies then the dependant node is now ready, so
+ // notify any pending queries on the dependant node.
+ if (DependantSymEntry.getState() == SymbolState::Emitted &&
+ DependantMI.UnemittedDependencies.empty()) {
+ assert(DependantMI.Dependants.empty() &&
+ "Dependants should be empty by now");
+
+ // Since this dependant is now ready, we erase its
+ // MaterializingInfo and update its materializing state.
+ DependantSymEntry.setState(SymbolState::Ready);
+ DependantJDReadySymbols.push_back(DependantName);
+
+ for (auto &Q :
+ DependantMI.takeQueriesMeeting(SymbolState::Ready)) {
+ Q->notifySymbolMetRequiredState(
+ DependantName, DependantSymI->second.getSymbol());
+ if (Q->isComplete())
+ CompletedQueries.insert(Q);
+ Q->removeQueryDependence(DependantJD, DependantName);
+ }
+ }
+ }
+ }
+
+ auto &ThisJDReadySymbols = ReadySymbols[this];
+ MI.Dependants.clear();
+ if (MI.UnemittedDependencies.empty()) {
+ SymI->second.setState(SymbolState::Ready);
+ ThisJDReadySymbols.push_back(Name);
+ for (auto &Q : MI.takeQueriesMeeting(SymbolState::Ready)) {
+ Q->notifySymbolMetRequiredState(Name, SymI->second.getSymbol());
+ if (Q->isComplete())
+ CompletedQueries.insert(Q);
+ Q->removeQueryDependence(*this, Name);
+ }
+ }
+ }
+
+ return Error::success();
+ }))
+ return Err;
+
+ // Otherwise notify all the completed queries.
+ for (auto &Q : CompletedQueries) {
+ assert(Q->isComplete() && "Q is not complete");
+ Q->handleComplete();
+ }
+
+ return Error::success();
+}
+
+void JITDylib::unlinkMaterializationResponsibility(
+ MaterializationResponsibility &MR) {
+ ES.runSessionLocked([&]() {
+ auto I = MRTrackers.find(&MR);
+ assert(I != MRTrackers.end() && "MaterializationResponsibility not linked");
+ MRTrackers.erase(I);
+ });
+}
+
+std::pair<JITDylib::AsynchronousSymbolQuerySet,
+ std::shared_ptr<SymbolDependenceMap>>
+JITDylib::failSymbols(FailedSymbolsWorklist Worklist) {
+ AsynchronousSymbolQuerySet FailedQueries;
+ auto FailedSymbolsMap = std::make_shared<SymbolDependenceMap>();
+
+ while (!Worklist.empty()) {
+ assert(Worklist.back().first && "Failed JITDylib can not be null");
+ auto &JD = *Worklist.back().first;
+ auto Name = std::move(Worklist.back().second);
+ Worklist.pop_back();
+
+ (*FailedSymbolsMap)[&JD].insert(Name);
+
+ assert(JD.Symbols.count(Name) && "No symbol table entry for Name");
+ auto &Sym = JD.Symbols[Name];
+
+ // Move the symbol into the error state.
+ // Note that this may be redundant: The symbol might already have been
+ // moved to this state in response to the failure of a dependence.
+ Sym.setFlags(Sym.getFlags() | JITSymbolFlags::HasError);
+
+ // FIXME: Come up with a sane mapping of state to
+ // presence-of-MaterializingInfo so that we can assert presence / absence
+ // here, rather than testing it.
+ auto MII = JD.MaterializingInfos.find(Name);
+
+ if (MII == JD.MaterializingInfos.end())
+ continue;
+
+ auto &MI = MII->second;
+
+ // Move all dependants to the error state and disconnect from them.
+ for (auto &KV : MI.Dependants) {
+ auto &DependantJD = *KV.first;
+ for (auto &DependantName : KV.second) {
+ assert(DependantJD.Symbols.count(DependantName) &&
+ "No symbol table entry for DependantName");
+ auto &DependantSym = DependantJD.Symbols[DependantName];
+ DependantSym.setFlags(DependantSym.getFlags() |
+ JITSymbolFlags::HasError);
+
+ assert(DependantJD.MaterializingInfos.count(DependantName) &&
+ "No MaterializingInfo for dependant");
+ auto &DependantMI = DependantJD.MaterializingInfos[DependantName];
+
+ auto UnemittedDepI = DependantMI.UnemittedDependencies.find(&JD);
+ assert(UnemittedDepI != DependantMI.UnemittedDependencies.end() &&
+ "No UnemittedDependencies entry for this JITDylib");
+ assert(UnemittedDepI->second.count(Name) &&
+ "No UnemittedDependencies entry for this symbol");
+ UnemittedDepI->second.erase(Name);
+ if (UnemittedDepI->second.empty())
+ DependantMI.UnemittedDependencies.erase(UnemittedDepI);
+
+ // If this symbol is already in the emitted state then we need to
+ // take responsibility for failing its queries, so add it to the
+ // worklist.
+ if (DependantSym.getState() == SymbolState::Emitted) {
+ assert(DependantMI.Dependants.empty() &&
+ "Emitted symbol should not have dependants");
+ Worklist.push_back(std::make_pair(&DependantJD, DependantName));
+ }
+ }
+ }
+ MI.Dependants.clear();
+
+ // Disconnect from all unemitted depenencies.
+ for (auto &KV : MI.UnemittedDependencies) {
+ auto &UnemittedDepJD = *KV.first;
+ for (auto &UnemittedDepName : KV.second) {
+ auto UnemittedDepMII =
+ UnemittedDepJD.MaterializingInfos.find(UnemittedDepName);
+ assert(UnemittedDepMII != UnemittedDepJD.MaterializingInfos.end() &&
+ "Missing MII for unemitted dependency");
+ assert(UnemittedDepMII->second.Dependants.count(&JD) &&
+ "JD not listed as a dependant of unemitted dependency");
+ assert(UnemittedDepMII->second.Dependants[&JD].count(Name) &&
+ "Name is not listed as a dependant of unemitted dependency");
+ UnemittedDepMII->second.Dependants[&JD].erase(Name);
+ if (UnemittedDepMII->second.Dependants[&JD].empty())
+ UnemittedDepMII->second.Dependants.erase(&JD);
+ }
+ }
+ MI.UnemittedDependencies.clear();
+
+ // Collect queries to be failed for this MII.
+ AsynchronousSymbolQueryList ToDetach;
+ for (auto &Q : MII->second.pendingQueries()) {
+ // Add the query to the list to be failed and detach it.
+ FailedQueries.insert(Q);
+ ToDetach.push_back(Q);
+ }
+ for (auto &Q : ToDetach)
+ Q->detach();
+
+ assert(MI.Dependants.empty() &&
+ "Can not delete MaterializingInfo with dependants still attached");
+ assert(MI.UnemittedDependencies.empty() &&
+ "Can not delete MaterializingInfo with unemitted dependencies "
+ "still attached");
+ assert(!MI.hasQueriesPending() &&
+ "Can not delete MaterializingInfo with queries pending");
+ JD.MaterializingInfos.erase(MII);
+ }
+
+ return std::make_pair(std::move(FailedQueries), std::move(FailedSymbolsMap));
+}
+
+void JITDylib::setLinkOrder(JITDylibSearchOrder NewLinkOrder,
+ bool LinkAgainstThisJITDylibFirst) {
+ ES.runSessionLocked([&]() {
+ if (LinkAgainstThisJITDylibFirst) {
+ LinkOrder.clear();
+ if (NewLinkOrder.empty() || NewLinkOrder.front().first != this)
+ LinkOrder.push_back(
+ std::make_pair(this, JITDylibLookupFlags::MatchAllSymbols));
+ llvm::append_range(LinkOrder, NewLinkOrder);
+ } else
+ LinkOrder = std::move(NewLinkOrder);
+ });
+}
+
+void JITDylib::addToLinkOrder(JITDylib &JD, JITDylibLookupFlags JDLookupFlags) {
+ ES.runSessionLocked([&]() { LinkOrder.push_back({&JD, JDLookupFlags}); });
+}
+
+void JITDylib::replaceInLinkOrder(JITDylib &OldJD, JITDylib &NewJD,
+ JITDylibLookupFlags JDLookupFlags) {
+ ES.runSessionLocked([&]() {
+ for (auto &KV : LinkOrder)
+ if (KV.first == &OldJD) {
+ KV = {&NewJD, JDLookupFlags};
+ break;
+ }
+ });
+}
+
+void JITDylib::removeFromLinkOrder(JITDylib &JD) {
+ ES.runSessionLocked([&]() {
+ auto I = llvm::find_if(LinkOrder,
+ [&](const JITDylibSearchOrder::value_type &KV) {
+ return KV.first == &JD;
+ });
+ if (I != LinkOrder.end())
+ LinkOrder.erase(I);
+ });
+}
+
+Error JITDylib::remove(const SymbolNameSet &Names) {
+ return ES.runSessionLocked([&]() -> Error {
+ using SymbolMaterializerItrPair =
+ std::pair<SymbolTable::iterator, UnmaterializedInfosMap::iterator>;
+ std::vector<SymbolMaterializerItrPair> SymbolsToRemove;
+ SymbolNameSet Missing;
+ SymbolNameSet Materializing;
+
+ for (auto &Name : Names) {
+ auto I = Symbols.find(Name);
+
+ // Note symbol missing.
+ if (I == Symbols.end()) {
+ Missing.insert(Name);
+ continue;
+ }
+
+ // Note symbol materializing.
+ if (I->second.getState() != SymbolState::NeverSearched &&
+ I->second.getState() != SymbolState::Ready) {
+ Materializing.insert(Name);
+ continue;
+ }
+
+ auto UMII = I->second.hasMaterializerAttached()
+ ? UnmaterializedInfos.find(Name)
+ : UnmaterializedInfos.end();
+ SymbolsToRemove.push_back(std::make_pair(I, UMII));
+ }
+
+ // If any of the symbols are not defined, return an error.
+ if (!Missing.empty())
+ return make_error<SymbolsNotFound>(std::move(Missing));
+
+ // If any of the symbols are currently materializing, return an error.
+ if (!Materializing.empty())
+ return make_error<SymbolsCouldNotBeRemoved>(std::move(Materializing));
+
+ // Remove the symbols.
+ for (auto &SymbolMaterializerItrPair : SymbolsToRemove) {
+ auto UMII = SymbolMaterializerItrPair.second;
+
+ // If there is a materializer attached, call discard.
+ if (UMII != UnmaterializedInfos.end()) {
+ UMII->second->MU->doDiscard(*this, UMII->first);
+ UnmaterializedInfos.erase(UMII);
+ }
+
+ auto SymI = SymbolMaterializerItrPair.first;
+ Symbols.erase(SymI);
+ }
+
+ return Error::success();
+ });
+}
+
+void JITDylib::dump(raw_ostream &OS) {
+ ES.runSessionLocked([&, this]() {
+ OS << "JITDylib \"" << JITDylibName << "\" (ES: "
+ << format("0x%016" PRIx64, reinterpret_cast<uintptr_t>(&ES)) << "):\n"
+ << "Link order: " << LinkOrder << "\n"
+ << "Symbol table:\n";
+
+ for (auto &KV : Symbols) {
+ OS << " \"" << *KV.first << "\": ";
+ if (auto Addr = KV.second.getAddress())
+ OS << format("0x%016" PRIx64, Addr) << ", " << KV.second.getFlags()
+ << " ";
+ else
+ OS << "<not resolved> ";
+
+ OS << KV.second.getFlags() << " " << KV.second.getState();
+
+ if (KV.second.hasMaterializerAttached()) {
+ OS << " (Materializer ";
+ auto I = UnmaterializedInfos.find(KV.first);
+ assert(I != UnmaterializedInfos.end() &&
+ "Lazy symbol should have UnmaterializedInfo");
+ OS << I->second->MU.get() << ", " << I->second->MU->getName() << ")\n";
+ } else
+ OS << "\n";
+ }
+
+ if (!MaterializingInfos.empty())
+ OS << " MaterializingInfos entries:\n";
+ for (auto &KV : MaterializingInfos) {
+ OS << " \"" << *KV.first << "\":\n"
+ << " " << KV.second.pendingQueries().size()
+ << " pending queries: { ";
+ for (const auto &Q : KV.second.pendingQueries())
+ OS << Q.get() << " (" << Q->getRequiredState() << ") ";
+ OS << "}\n Dependants:\n";
+ for (auto &KV2 : KV.second.Dependants)
+ OS << " " << KV2.first->getName() << ": " << KV2.second << "\n";
+ OS << " Unemitted Dependencies:\n";
+ for (auto &KV2 : KV.second.UnemittedDependencies)
+ OS << " " << KV2.first->getName() << ": " << KV2.second << "\n";
+ }
+ });
+}
+
+void JITDylib::MaterializingInfo::addQuery(
+ std::shared_ptr<AsynchronousSymbolQuery> Q) {
+
+ auto I = std::lower_bound(
+ PendingQueries.rbegin(), PendingQueries.rend(), Q->getRequiredState(),
+ [](const std::shared_ptr<AsynchronousSymbolQuery> &V, SymbolState S) {
+ return V->getRequiredState() <= S;
+ });
+ PendingQueries.insert(I.base(), std::move(Q));
+}
+
+void JITDylib::MaterializingInfo::removeQuery(
+ const AsynchronousSymbolQuery &Q) {
+ // FIXME: Implement 'find_as' for shared_ptr<T>/T*.
+ auto I = llvm::find_if(
+ PendingQueries, [&Q](const std::shared_ptr<AsynchronousSymbolQuery> &V) {
+ return V.get() == &Q;
+ });
+ assert(I != PendingQueries.end() &&
+ "Query is not attached to this MaterializingInfo");
+ PendingQueries.erase(I);
+}
+
+JITDylib::AsynchronousSymbolQueryList
+JITDylib::MaterializingInfo::takeQueriesMeeting(SymbolState RequiredState) {
+ AsynchronousSymbolQueryList Result;
+ while (!PendingQueries.empty()) {
+ if (PendingQueries.back()->getRequiredState() > RequiredState)
+ break;
+
+ Result.push_back(std::move(PendingQueries.back()));
+ PendingQueries.pop_back();
+ }
+
+ return Result;
+}
+
+JITDylib::JITDylib(ExecutionSession &ES, std::string Name)
+ : ES(ES), JITDylibName(std::move(Name)) {
+ LinkOrder.push_back({this, JITDylibLookupFlags::MatchAllSymbols});
+}
+
+ResourceTrackerSP JITDylib::getTracker(MaterializationResponsibility &MR) {
+ auto I = MRTrackers.find(&MR);
+ assert(I != MRTrackers.end() && "MR is not linked");
+ assert(I->second && "Linked tracker is null");
+ return I->second;
+}
+
+std::pair<JITDylib::AsynchronousSymbolQuerySet,
+ std::shared_ptr<SymbolDependenceMap>>
+JITDylib::removeTracker(ResourceTracker &RT) {
+ // Note: Should be called under the session lock.
+
+ SymbolNameVector SymbolsToRemove;
+ std::vector<std::pair<JITDylib *, SymbolStringPtr>> SymbolsToFail;
+
+ if (&RT == DefaultTracker.get()) {
+ SymbolNameSet TrackedSymbols;
+ for (auto &KV : TrackerSymbols)
+ for (auto &Sym : KV.second)
+ TrackedSymbols.insert(Sym);
+
+ for (auto &KV : Symbols) {
+ auto &Sym = KV.first;
+ if (!TrackedSymbols.count(Sym))
+ SymbolsToRemove.push_back(Sym);
+ }
+
+ DefaultTracker.reset();
+ } else {
+ /// Check for a non-default tracker.
+ auto I = TrackerSymbols.find(&RT);
+ if (I != TrackerSymbols.end()) {
+ SymbolsToRemove = std::move(I->second);
+ TrackerSymbols.erase(I);
+ }
+ // ... if not found this tracker was already defunct. Nothing to do.
+ }
+
+ for (auto &Sym : SymbolsToRemove) {
+ assert(Symbols.count(Sym) && "Symbol not in symbol table");
+
+ // If there is a MaterializingInfo then collect any queries to fail.
+ auto MII = MaterializingInfos.find(Sym);
+ if (MII != MaterializingInfos.end())
+ SymbolsToFail.push_back({this, Sym});
+ }
+
+ AsynchronousSymbolQuerySet QueriesToFail;
+ auto Result = failSymbols(std::move(SymbolsToFail));
+
+ // Removed symbols should be taken out of the table altogether.
+ for (auto &Sym : SymbolsToRemove) {
+ auto I = Symbols.find(Sym);
+ assert(I != Symbols.end() && "Symbol not present in table");
+
+ // Remove Materializer if present.
+ if (I->second.hasMaterializerAttached()) {
+ // FIXME: Should this discard the symbols?
+ UnmaterializedInfos.erase(Sym);
+ } else {
+ assert(!UnmaterializedInfos.count(Sym) &&
+ "Symbol has materializer attached");
+ }
+
+ Symbols.erase(I);
+ }
+
+ return Result;
+}
+
+void JITDylib::transferTracker(ResourceTracker &DstRT, ResourceTracker &SrcRT) {
+ assert(&DstRT != &SrcRT && "No-op transfers shouldn't call transferTracker");
+ assert(&DstRT.getJITDylib() == this && "DstRT is not for this JITDylib");
+ assert(&SrcRT.getJITDylib() == this && "SrcRT is not for this JITDylib");
+
+ // Update trackers for any not-yet materialized units.
+ for (auto &KV : UnmaterializedInfos) {
+ if (KV.second->RT == &SrcRT)
+ KV.second->RT = &DstRT;
+ }
+
+ // Update trackers for any active materialization responsibilities.
+ for (auto &KV : MRTrackers) {
+ if (KV.second == &SrcRT)
+ KV.second = &DstRT;
+ }
+
+ // If we're transfering to the default tracker we just need to delete the
+ // tracked symbols for the source tracker.
+ if (&DstRT == DefaultTracker.get()) {
+ TrackerSymbols.erase(&SrcRT);
+ return;
+ }
+
+ // If we're transferring from the default tracker we need to find all
+ // currently untracked symbols.
+ if (&SrcRT == DefaultTracker.get()) {
+ assert(!TrackerSymbols.count(&SrcRT) &&
+ "Default tracker should not appear in TrackerSymbols");
+
+ SymbolNameVector SymbolsToTrack;
+
+ SymbolNameSet CurrentlyTrackedSymbols;
+ for (auto &KV : TrackerSymbols)
+ for (auto &Sym : KV.second)
+ CurrentlyTrackedSymbols.insert(Sym);
+
+ for (auto &KV : Symbols) {
+ auto &Sym = KV.first;
+ if (!CurrentlyTrackedSymbols.count(Sym))
+ SymbolsToTrack.push_back(Sym);
+ }
+
+ TrackerSymbols[&DstRT] = std::move(SymbolsToTrack);
+ return;
+ }
+
+ auto &DstTrackedSymbols = TrackerSymbols[&DstRT];
+
+ // Finally if neither SrtRT or DstRT are the default tracker then
+ // just append DstRT's tracked symbols to SrtRT's.
+ auto SI = TrackerSymbols.find(&SrcRT);
+ if (SI == TrackerSymbols.end())
+ return;
+
+ DstTrackedSymbols.reserve(DstTrackedSymbols.size() + SI->second.size());
+ for (auto &Sym : SI->second)
+ DstTrackedSymbols.push_back(std::move(Sym));
+ TrackerSymbols.erase(SI);
+}
+
+Error JITDylib::defineImpl(MaterializationUnit &MU) {
+
+ LLVM_DEBUG({ dbgs() << " " << MU.getSymbols() << "\n"; });
+
+ SymbolNameSet Duplicates;
+ std::vector<SymbolStringPtr> ExistingDefsOverridden;
+ std::vector<SymbolStringPtr> MUDefsOverridden;
+
+ for (const auto &KV : MU.getSymbols()) {
+ auto I = Symbols.find(KV.first);
+
+ if (I != Symbols.end()) {
+ if (KV.second.isStrong()) {
+ if (I->second.getFlags().isStrong() ||
+ I->second.getState() > SymbolState::NeverSearched)
+ Duplicates.insert(KV.first);
+ else {
+ assert(I->second.getState() == SymbolState::NeverSearched &&
+ "Overridden existing def should be in the never-searched "
+ "state");
+ ExistingDefsOverridden.push_back(KV.first);
+ }
+ } else
+ MUDefsOverridden.push_back(KV.first);
+ }
+ }
+
+ // If there were any duplicate definitions then bail out.
+ if (!Duplicates.empty()) {
+ LLVM_DEBUG(
+ { dbgs() << " Error: Duplicate symbols " << Duplicates << "\n"; });
+ return make_error<DuplicateDefinition>(std::string(**Duplicates.begin()));
+ }
+
+ // Discard any overridden defs in this MU.
+ LLVM_DEBUG({
+ if (!MUDefsOverridden.empty())
+ dbgs() << " Defs in this MU overridden: " << MUDefsOverridden << "\n";
+ });
+ for (auto &S : MUDefsOverridden)
+ MU.doDiscard(*this, S);
+
+ // Discard existing overridden defs.
+ LLVM_DEBUG({
+ if (!ExistingDefsOverridden.empty())
+ dbgs() << " Existing defs overridden by this MU: " << MUDefsOverridden
+ << "\n";
+ });
+ for (auto &S : ExistingDefsOverridden) {
+
+ auto UMII = UnmaterializedInfos.find(S);
+ assert(UMII != UnmaterializedInfos.end() &&
+ "Overridden existing def should have an UnmaterializedInfo");
+ UMII->second->MU->doDiscard(*this, S);
+ }
+
+ // Finally, add the defs from this MU.
+ for (auto &KV : MU.getSymbols()) {
+ auto &SymEntry = Symbols[KV.first];
+ SymEntry.setFlags(KV.second);
+ SymEntry.setState(SymbolState::NeverSearched);
+ SymEntry.setMaterializerAttached(true);
+ }
+
+ return Error::success();
+}
+
+void JITDylib::installMaterializationUnit(
+ std::unique_ptr<MaterializationUnit> MU, ResourceTracker &RT) {
+
+ /// defineImpl succeeded.
+ if (&RT != DefaultTracker.get()) {
+ auto &TS = TrackerSymbols[&RT];
+ TS.reserve(TS.size() + MU->getSymbols().size());
+ for (auto &KV : MU->getSymbols())
+ TS.push_back(KV.first);
+ }
+
+ auto UMI = std::make_shared<UnmaterializedInfo>(std::move(MU), &RT);
+ for (auto &KV : UMI->MU->getSymbols())
+ UnmaterializedInfos[KV.first] = UMI;
+}
+
+void JITDylib::detachQueryHelper(AsynchronousSymbolQuery &Q,
+ const SymbolNameSet &QuerySymbols) {
+ for (auto &QuerySymbol : QuerySymbols) {
+ assert(MaterializingInfos.count(QuerySymbol) &&
+ "QuerySymbol does not have MaterializingInfo");
+ auto &MI = MaterializingInfos[QuerySymbol];
+ MI.removeQuery(Q);
+ }
+}
+
+void JITDylib::transferEmittedNodeDependencies(
+ MaterializingInfo &DependantMI, const SymbolStringPtr &DependantName,
+ MaterializingInfo &EmittedMI) {
+ for (auto &KV : EmittedMI.UnemittedDependencies) {
+ auto &DependencyJD = *KV.first;
+ SymbolNameSet *UnemittedDependenciesOnDependencyJD = nullptr;
+
+ for (auto &DependencyName : KV.second) {
+ auto &DependencyMI = DependencyJD.MaterializingInfos[DependencyName];
+
+ // Do not add self dependencies.
+ if (&DependencyMI == &DependantMI)
+ continue;
+
+ // If we haven't looked up the dependencies for DependencyJD yet, do it
+ // now and cache the result.
+ if (!UnemittedDependenciesOnDependencyJD)
+ UnemittedDependenciesOnDependencyJD =
+ &DependantMI.UnemittedDependencies[&DependencyJD];
+
+ DependencyMI.Dependants[this].insert(DependantName);
+ UnemittedDependenciesOnDependencyJD->insert(DependencyName);
+ }
+ }
+}
+
+Platform::~Platform() {}
+
+Expected<DenseMap<JITDylib *, SymbolMap>> Platform::lookupInitSymbols(
+ ExecutionSession &ES,
+ const DenseMap<JITDylib *, SymbolLookupSet> &InitSyms) {
+
+ DenseMap<JITDylib *, SymbolMap> CompoundResult;
+ Error CompoundErr = Error::success();
+ std::mutex LookupMutex;
+ std::condition_variable CV;
+ uint64_t Count = InitSyms.size();
+
+ LLVM_DEBUG({
+ dbgs() << "Issuing init-symbol lookup:\n";
+ for (auto &KV : InitSyms)
+ dbgs() << " " << KV.first->getName() << ": " << KV.second << "\n";
+ });
+
+ for (auto &KV : InitSyms) {
+ auto *JD = KV.first;
+ auto Names = std::move(KV.second);
+ ES.lookup(
+ LookupKind::Static,
+ JITDylibSearchOrder({{JD, JITDylibLookupFlags::MatchAllSymbols}}),
+ std::move(Names), SymbolState::Ready,
+ [&, JD](Expected<SymbolMap> Result) {
+ {
+ std::lock_guard<std::mutex> Lock(LookupMutex);
+ --Count;
+ if (Result) {
+ assert(!CompoundResult.count(JD) &&
+ "Duplicate JITDylib in lookup?");
+ CompoundResult[JD] = std::move(*Result);
+ } else
+ CompoundErr =
+ joinErrors(std::move(CompoundErr), Result.takeError());
+ }
+ CV.notify_one();
+ },
+ NoDependenciesToRegister);
+ }
+
+ std::unique_lock<std::mutex> Lock(LookupMutex);
+ CV.wait(Lock, [&] { return Count == 0 || CompoundErr; });
+
+ if (CompoundErr)
+ return std::move(CompoundErr);
+
+ return std::move(CompoundResult);
+}
+
+ExecutionSession::ExecutionSession(std::shared_ptr<SymbolStringPool> SSP)
+ : SSP(SSP ? std::move(SSP) : std::make_shared<SymbolStringPool>()) {}
+
+Error ExecutionSession::endSession() {
+ LLVM_DEBUG(dbgs() << "Ending ExecutionSession " << this << "\n");
+
+ std::vector<JITDylibSP> JITDylibsToClose = runSessionLocked([&] {
+ SessionOpen = false;
+ return std::move(JDs);
+ });
+
+ // TODO: notifiy platform? run static deinits?
+
+ Error Err = Error::success();
+ for (auto &JD : JITDylibsToClose)
+ Err = joinErrors(std::move(Err), JD->clear());
+ return Err;
+}
+
+void ExecutionSession::registerResourceManager(ResourceManager &RM) {
+ runSessionLocked([&] { ResourceManagers.push_back(&RM); });
+}
+
+void ExecutionSession::deregisterResourceManager(ResourceManager &RM) {
+ runSessionLocked([&] {
+ assert(!ResourceManagers.empty() && "No managers registered");
+ if (ResourceManagers.back() == &RM)
+ ResourceManagers.pop_back();
+ else {
+ auto I = llvm::find(ResourceManagers, &RM);
+ assert(I != ResourceManagers.end() && "RM not registered");
+ ResourceManagers.erase(I);
+ }
+ });
+}
+
+JITDylib *ExecutionSession::getJITDylibByName(StringRef Name) {
+ return runSessionLocked([&, this]() -> JITDylib * {
+ for (auto &JD : JDs)
+ if (JD->getName() == Name)
+ return JD.get();
+ return nullptr;
+ });
+}
+
+JITDylib &ExecutionSession::createBareJITDylib(std::string Name) {
+ assert(!getJITDylibByName(Name) && "JITDylib with that name already exists");
+ return runSessionLocked([&, this]() -> JITDylib & {
+ JDs.push_back(new JITDylib(*this, std::move(Name)));
+ return *JDs.back();
+ });
+}
+
+Expected<JITDylib &> ExecutionSession::createJITDylib(std::string Name) {
+ auto &JD = createBareJITDylib(Name);
+ if (P)
+ if (auto Err = P->setupJITDylib(JD))
+ return std::move(Err);
+ return JD;
+}
+
+std::vector<JITDylibSP> JITDylib::getDFSLinkOrder(ArrayRef<JITDylibSP> JDs) {
+ if (JDs.empty())
+ return {};
+
+ auto &ES = JDs.front()->getExecutionSession();
+ return ES.runSessionLocked([&]() {
+ DenseSet<JITDylib *> Visited;
+ std::vector<JITDylibSP> Result;
+
+ for (auto &JD : JDs) {
+
+ if (Visited.count(JD.get()))
+ continue;
+
+ SmallVector<JITDylibSP, 64> WorkStack;
+ WorkStack.push_back(JD);
+ Visited.insert(JD.get());
+
+ while (!WorkStack.empty()) {
+ Result.push_back(std::move(WorkStack.back()));
+ WorkStack.pop_back();
+
+ for (auto &KV : llvm::reverse(Result.back()->LinkOrder)) {
+ auto &JD = *KV.first;
+ if (Visited.count(&JD))
+ continue;
+ Visited.insert(&JD);
+ WorkStack.push_back(&JD);
+ }
+ }
+ }
+ return Result;
+ });
+}
+
+std::vector<JITDylibSP>
+JITDylib::getReverseDFSLinkOrder(ArrayRef<JITDylibSP> JDs) {
+ auto Tmp = getDFSLinkOrder(JDs);
+ std::reverse(Tmp.begin(), Tmp.end());
+ return Tmp;
+}
+
+std::vector<JITDylibSP> JITDylib::getDFSLinkOrder() {
+ return getDFSLinkOrder({this});
+}
+
+std::vector<JITDylibSP> JITDylib::getReverseDFSLinkOrder() {
+ return getReverseDFSLinkOrder({this});
+}
+
+void ExecutionSession::lookupFlags(
+ LookupKind K, JITDylibSearchOrder SearchOrder, SymbolLookupSet LookupSet,
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete) {
+
+ OL_applyQueryPhase1(std::make_unique<InProgressLookupFlagsState>(
+ K, std::move(SearchOrder), std::move(LookupSet),
+ std::move(OnComplete)),
+ Error::success());
+}
+
+Expected<SymbolFlagsMap>
+ExecutionSession::lookupFlags(LookupKind K, JITDylibSearchOrder SearchOrder,
+ SymbolLookupSet LookupSet) {
+
+ std::promise<MSVCPExpected<SymbolFlagsMap>> ResultP;
+ OL_applyQueryPhase1(std::make_unique<InProgressLookupFlagsState>(
+ K, std::move(SearchOrder), std::move(LookupSet),
+ [&ResultP](Expected<SymbolFlagsMap> Result) {
+ ResultP.set_value(std::move(Result));
+ }),
+ Error::success());
+
+ auto ResultF = ResultP.get_future();
+ return ResultF.get();
+}
+
+void ExecutionSession::lookup(
+ LookupKind K, const JITDylibSearchOrder &SearchOrder,
+ SymbolLookupSet Symbols, SymbolState RequiredState,
+ SymbolsResolvedCallback NotifyComplete,
+ RegisterDependenciesFunction RegisterDependencies) {
+
+ LLVM_DEBUG({
+ runSessionLocked([&]() {
+ dbgs() << "Looking up " << Symbols << " in " << SearchOrder
+ << " (required state: " << RequiredState << ")\n";
+ });
+ });
+
+ // lookup can be re-entered recursively if running on a single thread. Run any
+ // outstanding MUs in case this query depends on them, otherwise this lookup
+ // will starve waiting for a result from an MU that is stuck in the queue.
+ dispatchOutstandingMUs();
+
+ auto Unresolved = std::move(Symbols);
+ auto Q = std::make_shared<AsynchronousSymbolQuery>(Unresolved, RequiredState,
+ std::move(NotifyComplete));
+
+ auto IPLS = std::make_unique<InProgressFullLookupState>(
+ K, SearchOrder, std::move(Unresolved), RequiredState, std::move(Q),
+ std::move(RegisterDependencies));
+
+ OL_applyQueryPhase1(std::move(IPLS), Error::success());
+}
+
+Expected<SymbolMap>
+ExecutionSession::lookup(const JITDylibSearchOrder &SearchOrder,
+ const SymbolLookupSet &Symbols, LookupKind K,
+ SymbolState RequiredState,
+ RegisterDependenciesFunction RegisterDependencies) {
+#if LLVM_ENABLE_THREADS
+ // In the threaded case we use promises to return the results.
+ std::promise<SymbolMap> PromisedResult;
+ Error ResolutionError = Error::success();
+
+ auto NotifyComplete = [&](Expected<SymbolMap> R) {
+ if (R)
+ PromisedResult.set_value(std::move(*R));
+ else {
+ ErrorAsOutParameter _(&ResolutionError);
+ ResolutionError = R.takeError();
+ PromisedResult.set_value(SymbolMap());
+ }
+ };
+
+#else
+ SymbolMap Result;
+ Error ResolutionError = Error::success();
+
+ auto NotifyComplete = [&](Expected<SymbolMap> R) {
+ ErrorAsOutParameter _(&ResolutionError);
+ if (R)
+ Result = std::move(*R);
+ else
+ ResolutionError = R.takeError();
+ };
+#endif
+
+ // Perform the asynchronous lookup.
+ lookup(K, SearchOrder, Symbols, RequiredState, NotifyComplete,
+ RegisterDependencies);
+
+#if LLVM_ENABLE_THREADS
+ auto ResultFuture = PromisedResult.get_future();
+ auto Result = ResultFuture.get();
+
+ if (ResolutionError)
+ return std::move(ResolutionError);
+
+ return std::move(Result);
+
+#else
+ if (ResolutionError)
+ return std::move(ResolutionError);
+
+ return Result;
+#endif
+}
+
+Expected<JITEvaluatedSymbol>
+ExecutionSession::lookup(const JITDylibSearchOrder &SearchOrder,
+ SymbolStringPtr Name, SymbolState RequiredState) {
+ SymbolLookupSet Names({Name});
+
+ if (auto ResultMap = lookup(SearchOrder, std::move(Names), LookupKind::Static,
+ RequiredState, NoDependenciesToRegister)) {
+ assert(ResultMap->size() == 1 && "Unexpected number of results");
+ assert(ResultMap->count(Name) && "Missing result for symbol");
+ return std::move(ResultMap->begin()->second);
+ } else
+ return ResultMap.takeError();
+}
+
+Expected<JITEvaluatedSymbol>
+ExecutionSession::lookup(ArrayRef<JITDylib *> SearchOrder, SymbolStringPtr Name,
+ SymbolState RequiredState) {
+ return lookup(makeJITDylibSearchOrder(SearchOrder), Name, RequiredState);
+}
+
+Expected<JITEvaluatedSymbol>
+ExecutionSession::lookup(ArrayRef<JITDylib *> SearchOrder, StringRef Name,
+ SymbolState RequiredState) {
+ return lookup(SearchOrder, intern(Name), RequiredState);
+}
+
+void ExecutionSession::dump(raw_ostream &OS) {
+ runSessionLocked([this, &OS]() {
+ for (auto &JD : JDs)
+ JD->dump(OS);
+ });
+}
+
+void ExecutionSession::dispatchOutstandingMUs() {
+ LLVM_DEBUG(dbgs() << "Dispatching MaterializationUnits...\n");
+ while (1) {
+ Optional<std::pair<std::unique_ptr<MaterializationUnit>,
+ std::unique_ptr<MaterializationResponsibility>>>
+ JMU;
+
+ {
+ std::lock_guard<std::recursive_mutex> Lock(OutstandingMUsMutex);
+ if (!OutstandingMUs.empty()) {
+ JMU.emplace(std::move(OutstandingMUs.back()));
+ OutstandingMUs.pop_back();
+ }
+ }
+
+ if (!JMU)
+ break;
+
+ assert(JMU->first && "No MU?");
+ LLVM_DEBUG(dbgs() << " Dispatching \"" << JMU->first->getName() << "\"\n");
+ dispatchMaterialization(std::move(JMU->first), std::move(JMU->second));
+ }
+ LLVM_DEBUG(dbgs() << "Done dispatching MaterializationUnits.\n");
+}
+
+Error ExecutionSession::removeResourceTracker(ResourceTracker &RT) {
+ LLVM_DEBUG({
+ dbgs() << "In " << RT.getJITDylib().getName() << " removing tracker "
+ << formatv("{0:x}", RT.getKeyUnsafe()) << "\n";
+ });
+ std::vector<ResourceManager *> CurrentResourceManagers;
+
+ JITDylib::AsynchronousSymbolQuerySet QueriesToFail;
+ std::shared_ptr<SymbolDependenceMap> FailedSymbols;
+
+ runSessionLocked([&] {
+ CurrentResourceManagers = ResourceManagers;
+ RT.makeDefunct();
+ std::tie(QueriesToFail, FailedSymbols) = RT.getJITDylib().removeTracker(RT);
+ });
+
+ Error Err = Error::success();
+
+ for (auto *L : reverse(CurrentResourceManagers))
+ Err =
+ joinErrors(std::move(Err), L->handleRemoveResources(RT.getKeyUnsafe()));
+
+ for (auto &Q : QueriesToFail)
+ Q->handleFailed(make_error<FailedToMaterialize>(FailedSymbols));
+
+ return Err;
+}
+
+void ExecutionSession::transferResourceTracker(ResourceTracker &DstRT,
+ ResourceTracker &SrcRT) {
+ LLVM_DEBUG({
+ dbgs() << "In " << SrcRT.getJITDylib().getName()
+ << " transfering resources from tracker "
+ << formatv("{0:x}", SrcRT.getKeyUnsafe()) << " to tracker "
+ << formatv("{0:x}", DstRT.getKeyUnsafe()) << "\n";
+ });
+
+ // No-op transfers are allowed and do not invalidate the source.
+ if (&DstRT == &SrcRT)
+ return;
+
+ assert(&DstRT.getJITDylib() == &SrcRT.getJITDylib() &&
+ "Can't transfer resources between JITDylibs");
+ runSessionLocked([&]() {
+ SrcRT.makeDefunct();
+ auto &JD = DstRT.getJITDylib();
+ JD.transferTracker(DstRT, SrcRT);
+ for (auto *L : reverse(ResourceManagers))
+ L->handleTransferResources(DstRT.getKeyUnsafe(), SrcRT.getKeyUnsafe());
+ });
+}
+
+void ExecutionSession::destroyResourceTracker(ResourceTracker &RT) {
+ runSessionLocked([&]() {
+ LLVM_DEBUG({
+ dbgs() << "In " << RT.getJITDylib().getName() << " destroying tracker "
+ << formatv("{0:x}", RT.getKeyUnsafe()) << "\n";
+ });
+ if (!RT.isDefunct())
+ transferResourceTracker(*RT.getJITDylib().getDefaultResourceTracker(),
+ RT);
+ });
+}
+
+Error ExecutionSession::IL_updateCandidatesFor(
+ JITDylib &JD, JITDylibLookupFlags JDLookupFlags,
+ SymbolLookupSet &Candidates, SymbolLookupSet *NonCandidates) {
+ return Candidates.forEachWithRemoval(
+ [&](const SymbolStringPtr &Name,
+ SymbolLookupFlags SymLookupFlags) -> Expected<bool> {
+ /// Search for the symbol. If not found then continue without
+ /// removal.
+ auto SymI = JD.Symbols.find(Name);
+ if (SymI == JD.Symbols.end())
+ return false;
+
+ // If this is a non-exported symbol and we're matching exported
+ // symbols only then remove this symbol from the candidates list.
+ //
+ // If we're tracking non-candidates then add this to the non-candidate
+ // list.
+ if (!SymI->second.getFlags().isExported() &&
+ JDLookupFlags == JITDylibLookupFlags::MatchExportedSymbolsOnly) {
+ if (NonCandidates)
+ NonCandidates->add(Name, SymLookupFlags);
+ return true;
+ }
+
+ // If we match against a materialization-side-effects only symbol
+ // then make sure it is weakly-referenced. Otherwise bail out with
+ // an error.
+ // FIXME: Use a "materialization-side-effects-only symbols must be
+ // weakly referenced" specific error here to reduce confusion.
+ if (SymI->second.getFlags().hasMaterializationSideEffectsOnly() &&
+ SymLookupFlags != SymbolLookupFlags::WeaklyReferencedSymbol)
+ return make_error<SymbolsNotFound>(SymbolNameVector({Name}));
+
+ // If we matched against this symbol but it is in the error state
+ // then bail out and treat it as a failure to materialize.
+ if (SymI->second.getFlags().hasError()) {
+ auto FailedSymbolsMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsMap)[&JD] = {Name};
+ return make_error<FailedToMaterialize>(std::move(FailedSymbolsMap));
+ }
+
+ // Otherwise this is a match. Remove it from the candidate set.
+ return true;
+ });
+}
+
+void ExecutionSession::OL_applyQueryPhase1(
+ std::unique_ptr<InProgressLookupState> IPLS, Error Err) {
+
+ LLVM_DEBUG({
+ dbgs() << "Entering OL_applyQueryPhase1:\n"
+ << " Lookup kind: " << IPLS->K << "\n"
+ << " Search order: " << IPLS->SearchOrder
+ << ", Current index = " << IPLS->CurSearchOrderIndex
+ << (IPLS->NewJITDylib ? " (entering new JITDylib)" : "") << "\n"
+ << " Lookup set: " << IPLS->LookupSet << "\n"
+ << " Definition generator candidates: "
+ << IPLS->DefGeneratorCandidates << "\n"
+ << " Definition generator non-candidates: "
+ << IPLS->DefGeneratorNonCandidates << "\n";
+ });
+
+ // FIXME: We should attach the query as we go: This provides a result in a
+ // single pass in the common case where all symbols have already reached the
+ // required state. The query could be detached again in the 'fail' method on
+ // IPLS. Phase 2 would be reduced to collecting and dispatching the MUs.
+
+ while (IPLS->CurSearchOrderIndex != IPLS->SearchOrder.size()) {
+
+ // If we've been handed an error or received one back from a generator then
+ // fail the query. We don't need to unlink: At this stage the query hasn't
+ // actually been lodged.
+ if (Err)
+ return IPLS->fail(std::move(Err));
+
+ // Get the next JITDylib and lookup flags.
+ auto &KV = IPLS->SearchOrder[IPLS->CurSearchOrderIndex];
+ auto &JD = *KV.first;
+ auto JDLookupFlags = KV.second;
+
+ LLVM_DEBUG({
+ dbgs() << "Visiting \"" << JD.getName() << "\" (" << JDLookupFlags
+ << ") with lookup set " << IPLS->LookupSet << ":\n";
+ });
+
+ // If we've just reached a new JITDylib then perform some setup.
+ if (IPLS->NewJITDylib) {
+
+ // Acquire the generator lock for this JITDylib.
+ IPLS->GeneratorLock = std::unique_lock<std::mutex>(JD.GeneratorsMutex);
+
+ // Add any non-candidates from the last JITDylib (if any) back on to the
+ // list of definition candidates for this JITDylib, reset definition
+ // non-candiates to the empty set.
+ SymbolLookupSet Tmp;
+ std::swap(IPLS->DefGeneratorNonCandidates, Tmp);
+ IPLS->DefGeneratorCandidates.append(std::move(Tmp));
+
+ LLVM_DEBUG({
+ dbgs() << " First time visiting " << JD.getName()
+ << ", resetting candidate sets and building generator stack\n";
+ });
+
+ // Build the definition generator stack for this JITDylib.
+ for (auto &DG : reverse(JD.DefGenerators))
+ IPLS->CurDefGeneratorStack.push_back(DG);
+
+ // Flag that we've done our initialization.
+ IPLS->NewJITDylib = false;
+ }
+
+ // Remove any generation candidates that are already defined (and match) in
+ // this JITDylib.
+ runSessionLocked([&] {
+ // Update the list of candidates (and non-candidates) for definition
+ // generation.
+ LLVM_DEBUG(dbgs() << " Updating candidate set...\n");
+ Err = IL_updateCandidatesFor(
+ JD, JDLookupFlags, IPLS->DefGeneratorCandidates,
+ JD.DefGenerators.empty() ? nullptr
+ : &IPLS->DefGeneratorNonCandidates);
+ LLVM_DEBUG({
+ dbgs() << " Remaining candidates = " << IPLS->DefGeneratorCandidates
+ << "\n";
+ });
+ });
+
+ // If we encountered an error while filtering generation candidates then
+ // bail out.
+ if (Err)
+ return IPLS->fail(std::move(Err));
+
+ /// Apply any definition generators on the stack.
+ LLVM_DEBUG({
+ if (IPLS->CurDefGeneratorStack.empty())
+ LLVM_DEBUG(dbgs() << " No generators to run for this JITDylib.\n");
+ else if (IPLS->DefGeneratorCandidates.empty())
+ LLVM_DEBUG(dbgs() << " No candidates to generate.\n");
+ else
+ dbgs() << " Running " << IPLS->CurDefGeneratorStack.size()
+ << " remaining generators for "
+ << IPLS->DefGeneratorCandidates.size() << " candidates\n";
+ });
+ while (!IPLS->CurDefGeneratorStack.empty() &&
+ !IPLS->DefGeneratorCandidates.empty()) {
+ auto DG = IPLS->CurDefGeneratorStack.back().lock();
+ IPLS->CurDefGeneratorStack.pop_back();
+
+ if (!DG)
+ return IPLS->fail(make_error<StringError>(
+ "DefinitionGenerator removed while lookup in progress",
+ inconvertibleErrorCode()));
+
+ auto K = IPLS->K;
+ auto &LookupSet = IPLS->DefGeneratorCandidates;
+
+ // Run the generator. If the generator takes ownership of QA then this
+ // will break the loop.
+ {
+ LLVM_DEBUG(dbgs() << " Attempting to generate " << LookupSet << "\n");
+ LookupState LS(std::move(IPLS));
+ Err = DG->tryToGenerate(LS, K, JD, JDLookupFlags, LookupSet);
+ IPLS = std::move(LS.IPLS);
+ }
+
+ // If there was an error then fail the query.
+ if (Err) {
+ LLVM_DEBUG({
+ dbgs() << " Error attempting to generate " << LookupSet << "\n";
+ });
+ assert(IPLS && "LS cannot be retained if error is returned");
+ return IPLS->fail(std::move(Err));
+ }
+
+ // Otherwise if QA was captured then break the loop.
+ if (!IPLS) {
+ LLVM_DEBUG(
+ { dbgs() << " LookupState captured. Exiting phase1 for now.\n"; });
+ return;
+ }
+
+ // Otherwise if we're continuing around the loop then update candidates
+ // for the next round.
+ runSessionLocked([&] {
+ LLVM_DEBUG(dbgs() << " Updating candidate set post-generation\n");
+ Err = IL_updateCandidatesFor(
+ JD, JDLookupFlags, IPLS->DefGeneratorCandidates,
+ JD.DefGenerators.empty() ? nullptr
+ : &IPLS->DefGeneratorNonCandidates);
+ });
+
+ // If updating candidates failed then fail the query.
+ if (Err) {
+ LLVM_DEBUG(dbgs() << " Error encountered while updating candidates\n");
+ return IPLS->fail(std::move(Err));
+ }
+ }
+
+ // If we get here then we've moved on to the next JITDylib.
+ LLVM_DEBUG(dbgs() << "Phase 1 moving to next JITDylib.\n");
+ ++IPLS->CurSearchOrderIndex;
+ IPLS->NewJITDylib = true;
+ }
+
+ // Remove any weakly referenced candidates that could not be found/generated.
+ IPLS->DefGeneratorCandidates.remove_if(
+ [](const SymbolStringPtr &Name, SymbolLookupFlags SymLookupFlags) {
+ return SymLookupFlags == SymbolLookupFlags::WeaklyReferencedSymbol;
+ });
+
+ // If we get here then we've finished searching all JITDylibs.
+ // If we matched all symbols then move to phase 2, otherwise fail the query
+ // with a SymbolsNotFound error.
+ if (IPLS->DefGeneratorCandidates.empty()) {
+ LLVM_DEBUG(dbgs() << "Phase 1 succeeded.\n");
+ IPLS->complete(std::move(IPLS));
+ } else {
+ LLVM_DEBUG(dbgs() << "Phase 1 failed with unresolved symbols.\n");
+ IPLS->fail(make_error<SymbolsNotFound>(
+ IPLS->DefGeneratorCandidates.getSymbolNames()));
+ }
+}
+
+void ExecutionSession::OL_completeLookup(
+ std::unique_ptr<InProgressLookupState> IPLS,
+ std::shared_ptr<AsynchronousSymbolQuery> Q,
+ RegisterDependenciesFunction RegisterDependencies) {
+
+ LLVM_DEBUG({
+ dbgs() << "Entering OL_completeLookup:\n"
+ << " Lookup kind: " << IPLS->K << "\n"
+ << " Search order: " << IPLS->SearchOrder
+ << ", Current index = " << IPLS->CurSearchOrderIndex
+ << (IPLS->NewJITDylib ? " (entering new JITDylib)" : "") << "\n"
+ << " Lookup set: " << IPLS->LookupSet << "\n"
+ << " Definition generator candidates: "
+ << IPLS->DefGeneratorCandidates << "\n"
+ << " Definition generator non-candidates: "
+ << IPLS->DefGeneratorNonCandidates << "\n";
+ });
+
+ bool QueryComplete = false;
+ DenseMap<JITDylib *, JITDylib::UnmaterializedInfosList> CollectedUMIs;
+
+ auto LodgingErr = runSessionLocked([&]() -> Error {
+ for (auto &KV : IPLS->SearchOrder) {
+ auto &JD = *KV.first;
+ auto JDLookupFlags = KV.second;
+ LLVM_DEBUG({
+ dbgs() << "Visiting \"" << JD.getName() << "\" (" << JDLookupFlags
+ << ") with lookup set " << IPLS->LookupSet << ":\n";
+ });
+
+ auto Err = IPLS->LookupSet.forEachWithRemoval(
+ [&](const SymbolStringPtr &Name,
+ SymbolLookupFlags SymLookupFlags) -> Expected<bool> {
+ LLVM_DEBUG({
+ dbgs() << " Attempting to match \"" << Name << "\" ("
+ << SymLookupFlags << ")... ";
+ });
+
+ /// Search for the symbol. If not found then continue without
+ /// removal.
+ auto SymI = JD.Symbols.find(Name);
+ if (SymI == JD.Symbols.end()) {
+ LLVM_DEBUG(dbgs() << "skipping: not present\n");
+ return false;
+ }
+
+ // If this is a non-exported symbol and we're matching exported
+ // symbols only then skip this symbol without removal.
+ if (!SymI->second.getFlags().isExported() &&
+ JDLookupFlags ==
+ JITDylibLookupFlags::MatchExportedSymbolsOnly) {
+ LLVM_DEBUG(dbgs() << "skipping: not exported\n");
+ return false;
+ }
+
+ // If we match against a materialization-side-effects only symbol
+ // then make sure it is weakly-referenced. Otherwise bail out with
+ // an error.
+ // FIXME: Use a "materialization-side-effects-only symbols must be
+ // weakly referenced" specific error here to reduce confusion.
+ if (SymI->second.getFlags().hasMaterializationSideEffectsOnly() &&
+ SymLookupFlags != SymbolLookupFlags::WeaklyReferencedSymbol) {
+ LLVM_DEBUG({
+ dbgs() << "error: "
+ "required, but symbol is has-side-effects-only\n";
+ });
+ return make_error<SymbolsNotFound>(SymbolNameVector({Name}));
+ }
+
+ // If we matched against this symbol but it is in the error state
+ // then bail out and treat it as a failure to materialize.
+ if (SymI->second.getFlags().hasError()) {
+ LLVM_DEBUG(dbgs() << "error: symbol is in error state\n");
+ auto FailedSymbolsMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsMap)[&JD] = {Name};
+ return make_error<FailedToMaterialize>(
+ std::move(FailedSymbolsMap));
+ }
+
+ // Otherwise this is a match.
+
+ // If this symbol is already in the requried state then notify the
+ // query, remove the symbol and continue.
+ if (SymI->second.getState() >= Q->getRequiredState()) {
+ LLVM_DEBUG(dbgs()
+ << "matched, symbol already in required state\n");
+ Q->notifySymbolMetRequiredState(Name, SymI->second.getSymbol());
+ return true;
+ }
+
+ // Otherwise this symbol does not yet meet the required state. Check
+ // whether it has a materializer attached, and if so prepare to run
+ // it.
+ if (SymI->second.hasMaterializerAttached()) {
+ assert(SymI->second.getAddress() == 0 &&
+ "Symbol not resolved but already has address?");
+ auto UMII = JD.UnmaterializedInfos.find(Name);
+ assert(UMII != JD.UnmaterializedInfos.end() &&
+ "Lazy symbol should have UnmaterializedInfo");
+
+ auto UMI = UMII->second;
+ assert(UMI->MU && "Materializer should not be null");
+ assert(UMI->RT && "Tracker should not be null");
+ LLVM_DEBUG({
+ dbgs() << "matched, preparing to dispatch MU@" << UMI->MU.get()
+ << " (" << UMI->MU->getName() << ")\n";
+ });
+
+ // Move all symbols associated with this MaterializationUnit into
+ // materializing state.
+ for (auto &KV : UMI->MU->getSymbols()) {
+ auto SymK = JD.Symbols.find(KV.first);
+ assert(SymK != JD.Symbols.end() &&
+ "No entry for symbol covered by MaterializationUnit");
+ SymK->second.setMaterializerAttached(false);
+ SymK->second.setState(SymbolState::Materializing);
+ JD.UnmaterializedInfos.erase(KV.first);
+ }
+
+ // Add MU to the list of MaterializationUnits to be materialized.
+ CollectedUMIs[&JD].push_back(std::move(UMI));
+ } else
+ LLVM_DEBUG(dbgs() << "matched, registering query");
+
+ // Add the query to the PendingQueries list and continue, deleting
+ // the element from the lookup set.
+ assert(SymI->second.getState() != SymbolState::NeverSearched &&
+ SymI->second.getState() != SymbolState::Ready &&
+ "By this line the symbol should be materializing");
+ auto &MI = JD.MaterializingInfos[Name];
+ MI.addQuery(Q);
+ Q->addQueryDependence(JD, Name);
+
+ return true;
+ });
+
+ // Handle failure.
+ if (Err) {
+
+ LLVM_DEBUG({
+ dbgs() << "Lookup failed. Detaching query and replacing MUs.\n";
+ });
+
+ // Detach the query.
+ Q->detach();
+
+ // Replace the MUs.
+ for (auto &KV : CollectedUMIs) {
+ auto &JD = *KV.first;
+ for (auto &UMI : KV.second)
+ for (auto &KV2 : UMI->MU->getSymbols()) {
+ assert(!JD.UnmaterializedInfos.count(KV2.first) &&
+ "Unexpected materializer in map");
+ auto SymI = JD.Symbols.find(KV2.first);
+ assert(SymI != JD.Symbols.end() && "Missing symbol entry");
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Can not replace symbol that is not materializing");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "MaterializerAttached flag should not be set");
+ SymI->second.setMaterializerAttached(true);
+ JD.UnmaterializedInfos[KV2.first] = UMI;
+ }
+ }
+
+ return Err;
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << "Stripping unmatched weakly-refererced symbols\n");
+ IPLS->LookupSet.forEachWithRemoval(
+ [&](const SymbolStringPtr &Name, SymbolLookupFlags SymLookupFlags) {
+ if (SymLookupFlags == SymbolLookupFlags::WeaklyReferencedSymbol) {
+ Q->dropSymbol(Name);
+ return true;
+ } else
+ return false;
+ });
+
+ if (!IPLS->LookupSet.empty()) {
+ LLVM_DEBUG(dbgs() << "Failing due to unresolved symbols\n");
+ return make_error<SymbolsNotFound>(IPLS->LookupSet.getSymbolNames());
+ }
+
+ // Record whether the query completed.
+ QueryComplete = Q->isComplete();
+
+ LLVM_DEBUG({
+ dbgs() << "Query successfully "
+ << (QueryComplete ? "completed" : "lodged") << "\n";
+ });
+
+ // Move the collected MUs to the OutstandingMUs list.
+ if (!CollectedUMIs.empty()) {
+ std::lock_guard<std::recursive_mutex> Lock(OutstandingMUsMutex);
+
+ LLVM_DEBUG(dbgs() << "Adding MUs to dispatch:\n");
+ for (auto &KV : CollectedUMIs) {
+ auto &JD = *KV.first;
+ LLVM_DEBUG({
+ dbgs() << " For " << JD.getName() << ": Adding " << KV.second.size()
+ << " MUs.\n";
+ });
+ for (auto &UMI : KV.second) {
+ std::unique_ptr<MaterializationResponsibility> MR(
+ new MaterializationResponsibility(
+ &JD, std::move(UMI->MU->SymbolFlags),
+ std::move(UMI->MU->InitSymbol)));
+ JD.MRTrackers[MR.get()] = UMI->RT;
+ OutstandingMUs.push_back(
+ std::make_pair(std::move(UMI->MU), std::move(MR)));
+ }
+ }
+ } else
+ LLVM_DEBUG(dbgs() << "No MUs to dispatch.\n");
+
+ if (RegisterDependencies && !Q->QueryRegistrations.empty()) {
+ LLVM_DEBUG(dbgs() << "Registering dependencies\n");
+ RegisterDependencies(Q->QueryRegistrations);
+ } else
+ LLVM_DEBUG(dbgs() << "No dependencies to register\n");
+
+ return Error::success();
+ });
+
+ if (LodgingErr) {
+ LLVM_DEBUG(dbgs() << "Failing query\n");
+ Q->detach();
+ Q->handleFailed(std::move(LodgingErr));
+ return;
+ }
+
+ if (QueryComplete) {
+ LLVM_DEBUG(dbgs() << "Completing query\n");
+ Q->handleComplete();
+ }
+
+ dispatchOutstandingMUs();
+}
+
+void ExecutionSession::OL_completeLookupFlags(
+ std::unique_ptr<InProgressLookupState> IPLS,
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete) {
+
+ auto Result = runSessionLocked([&]() -> Expected<SymbolFlagsMap> {
+ LLVM_DEBUG({
+ dbgs() << "Entering OL_completeLookupFlags:\n"
+ << " Lookup kind: " << IPLS->K << "\n"
+ << " Search order: " << IPLS->SearchOrder
+ << ", Current index = " << IPLS->CurSearchOrderIndex
+ << (IPLS->NewJITDylib ? " (entering new JITDylib)" : "") << "\n"
+ << " Lookup set: " << IPLS->LookupSet << "\n"
+ << " Definition generator candidates: "
+ << IPLS->DefGeneratorCandidates << "\n"
+ << " Definition generator non-candidates: "
+ << IPLS->DefGeneratorNonCandidates << "\n";
+ });
+
+ SymbolFlagsMap Result;
+
+ // Attempt to find flags for each symbol.
+ for (auto &KV : IPLS->SearchOrder) {
+ auto &JD = *KV.first;
+ auto JDLookupFlags = KV.second;
+ LLVM_DEBUG({
+ dbgs() << "Visiting \"" << JD.getName() << "\" (" << JDLookupFlags
+ << ") with lookup set " << IPLS->LookupSet << ":\n";
+ });
+
+ IPLS->LookupSet.forEachWithRemoval([&](const SymbolStringPtr &Name,
+ SymbolLookupFlags SymLookupFlags) {
+ LLVM_DEBUG({
+ dbgs() << " Attempting to match \"" << Name << "\" ("
+ << SymLookupFlags << ")... ";
+ });
+
+ // Search for the symbol. If not found then continue without removing
+ // from the lookup set.
+ auto SymI = JD.Symbols.find(Name);
+ if (SymI == JD.Symbols.end()) {
+ LLVM_DEBUG(dbgs() << "skipping: not present\n");
+ return false;
+ }
+
+ // If this is a non-exported symbol then it doesn't match. Skip it.
+ if (!SymI->second.getFlags().isExported() &&
+ JDLookupFlags == JITDylibLookupFlags::MatchExportedSymbolsOnly) {
+ LLVM_DEBUG(dbgs() << "skipping: not exported\n");
+ return false;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "matched, \"" << Name << "\" -> " << SymI->second.getFlags()
+ << "\n";
+ });
+ Result[Name] = SymI->second.getFlags();
+ return true;
+ });
+ }
+
+ // Remove any weakly referenced symbols that haven't been resolved.
+ IPLS->LookupSet.remove_if(
+ [](const SymbolStringPtr &Name, SymbolLookupFlags SymLookupFlags) {
+ return SymLookupFlags == SymbolLookupFlags::WeaklyReferencedSymbol;
+ });
+
+ if (!IPLS->LookupSet.empty()) {
+ LLVM_DEBUG(dbgs() << "Failing due to unresolved symbols\n");
+ return make_error<SymbolsNotFound>(IPLS->LookupSet.getSymbolNames());
+ }
+
+ LLVM_DEBUG(dbgs() << "Succeded, result = " << Result << "\n");
+ return Result;
+ });
+
+ // Run the callback on the result.
+ LLVM_DEBUG(dbgs() << "Sending result to handler.\n");
+ OnComplete(std::move(Result));
+}
+
+void ExecutionSession::OL_destroyMaterializationResponsibility(
+ MaterializationResponsibility &MR) {
+
+ assert(MR.SymbolFlags.empty() &&
+ "All symbols should have been explicitly materialized or failed");
+ MR.JD->unlinkMaterializationResponsibility(MR);
+}
+
+SymbolNameSet ExecutionSession::OL_getRequestedSymbols(
+ const MaterializationResponsibility &MR) {
+ return MR.JD->getRequestedSymbols(MR.SymbolFlags);
+}
+
+Error ExecutionSession::OL_notifyResolved(MaterializationResponsibility &MR,
+ const SymbolMap &Symbols) {
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD->getName() << " resolving " << Symbols << "\n";
+ });
+#ifndef NDEBUG
+ for (auto &KV : Symbols) {
+ auto WeakFlags = JITSymbolFlags::Weak | JITSymbolFlags::Common;
+ auto I = MR.SymbolFlags.find(KV.first);
+ assert(I != MR.SymbolFlags.end() &&
+ "Resolving symbol outside this responsibility set");
+ assert(!I->second.hasMaterializationSideEffectsOnly() &&
+ "Can't resolve materialization-side-effects-only symbol");
+ assert((KV.second.getFlags() & ~WeakFlags) == (I->second & ~WeakFlags) &&
+ "Resolving symbol with incorrect flags");
+ }
+#endif
+
+ return MR.JD->resolve(MR, Symbols);
+}
+
+Error ExecutionSession::OL_notifyEmitted(MaterializationResponsibility &MR) {
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD->getName() << " emitting " << MR.SymbolFlags << "\n";
+ });
+
+ if (auto Err = MR.JD->emit(MR, MR.SymbolFlags))
+ return Err;
+
+ MR.SymbolFlags.clear();
+ return Error::success();
+}
+
+Error ExecutionSession::OL_defineMaterializing(
+ MaterializationResponsibility &MR, SymbolFlagsMap NewSymbolFlags) {
+
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD->getName() << " defining materializing symbols "
+ << NewSymbolFlags << "\n";
+ });
+ if (auto AcceptedDefs = MR.JD->defineMaterializing(std::move(NewSymbolFlags))) {
+ // Add all newly accepted symbols to this responsibility object.
+ for (auto &KV : *AcceptedDefs)
+ MR.SymbolFlags.insert(KV);
+ return Error::success();
+ } else
+ return AcceptedDefs.takeError();
+}
+
+void ExecutionSession::OL_notifyFailed(MaterializationResponsibility &MR) {
+
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD->getName() << " failing materialization for "
+ << MR.SymbolFlags << "\n";
+ });
+
+ JITDylib::FailedSymbolsWorklist Worklist;
+
+ for (auto &KV : MR.SymbolFlags)
+ Worklist.push_back(std::make_pair(MR.JD.get(), KV.first));
+ MR.SymbolFlags.clear();
+
+ if (Worklist.empty())
+ return;
+
+ JITDylib::AsynchronousSymbolQuerySet FailedQueries;
+ std::shared_ptr<SymbolDependenceMap> FailedSymbols;
+
+ runSessionLocked([&]() {
+ auto RTI = MR.JD->MRTrackers.find(&MR);
+ assert(RTI != MR.JD->MRTrackers.end() && "No tracker for this");
+ if (RTI->second->isDefunct())
+ return;
+
+ std::tie(FailedQueries, FailedSymbols) =
+ JITDylib::failSymbols(std::move(Worklist));
+ });
+
+ for (auto &Q : FailedQueries)
+ Q->handleFailed(make_error<FailedToMaterialize>(FailedSymbols));
+}
+
+Error ExecutionSession::OL_replace(MaterializationResponsibility &MR,
+ std::unique_ptr<MaterializationUnit> MU) {
+ for (auto &KV : MU->getSymbols()) {
+ assert(MR.SymbolFlags.count(KV.first) &&
+ "Replacing definition outside this responsibility set");
+ MR.SymbolFlags.erase(KV.first);
+ }
+
+ if (MU->getInitializerSymbol() == MR.InitSymbol)
+ MR.InitSymbol = nullptr;
+
+ LLVM_DEBUG(MR.JD->getExecutionSession().runSessionLocked([&]() {
+ dbgs() << "In " << MR.JD->getName() << " replacing symbols with " << *MU
+ << "\n";
+ }););
+
+ return MR.JD->replace(MR, std::move(MU));
+}
+
+Expected<std::unique_ptr<MaterializationResponsibility>>
+ExecutionSession::OL_delegate(MaterializationResponsibility &MR,
+ const SymbolNameSet &Symbols) {
+
+ SymbolStringPtr DelegatedInitSymbol;
+ SymbolFlagsMap DelegatedFlags;
+
+ for (auto &Name : Symbols) {
+ auto I = MR.SymbolFlags.find(Name);
+ assert(I != MR.SymbolFlags.end() &&
+ "Symbol is not tracked by this MaterializationResponsibility "
+ "instance");
+
+ DelegatedFlags[Name] = std::move(I->second);
+ if (Name == MR.InitSymbol)
+ std::swap(MR.InitSymbol, DelegatedInitSymbol);
+
+ MR.SymbolFlags.erase(I);
+ }
+
+ return MR.JD->delegate(MR, std::move(DelegatedFlags),
+ std::move(DelegatedInitSymbol));
+}
+
+void ExecutionSession::OL_addDependencies(
+ MaterializationResponsibility &MR, const SymbolStringPtr &Name,
+ const SymbolDependenceMap &Dependencies) {
+ LLVM_DEBUG({
+ dbgs() << "Adding dependencies for " << Name << ": " << Dependencies
+ << "\n";
+ });
+ assert(MR.SymbolFlags.count(Name) &&
+ "Symbol not covered by this MaterializationResponsibility instance");
+ MR.JD->addDependencies(Name, Dependencies);
+}
+
+void ExecutionSession::OL_addDependenciesForAll(
+ MaterializationResponsibility &MR,
+ const SymbolDependenceMap &Dependencies) {
+ LLVM_DEBUG({
+ dbgs() << "Adding dependencies for all symbols in " << MR.SymbolFlags << ": "
+ << Dependencies << "\n";
+ });
+ for (auto &KV : MR.SymbolFlags)
+ MR.JD->addDependencies(KV.first, Dependencies);
+}
+
+#ifndef NDEBUG
+void ExecutionSession::dumpDispatchInfo(JITDylib &JD, MaterializationUnit &MU) {
+ runSessionLocked([&]() {
+ dbgs() << "Dispatching " << MU << " for " << JD.getName() << "\n";
+ });
+}
+#endif // NDEBUG
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/DebugUtils.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/DebugUtils.cpp
new file mode 100644
index 00000000000..6247158919f
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/DebugUtils.cpp
@@ -0,0 +1,349 @@
+//===---------- DebugUtils.cpp - Utilities for debugging ORC JITs ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+
+namespace {
+
+#ifndef NDEBUG
+
+cl::opt<bool> PrintHidden("debug-orc-print-hidden", cl::init(true),
+ cl::desc("debug print hidden symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+cl::opt<bool> PrintCallable("debug-orc-print-callable", cl::init(true),
+ cl::desc("debug print callable symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+cl::opt<bool> PrintData("debug-orc-print-data", cl::init(true),
+ cl::desc("debug print data symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+#endif // NDEBUG
+
+// SetPrinter predicate that prints every element.
+template <typename T> struct PrintAll {
+ bool operator()(const T &E) { return true; }
+};
+
+bool anyPrintSymbolOptionSet() {
+#ifndef NDEBUG
+ return PrintHidden || PrintCallable || PrintData;
+#else
+ return false;
+#endif // NDEBUG
+}
+
+bool flagsMatchCLOpts(const JITSymbolFlags &Flags) {
+#ifndef NDEBUG
+ // Bail out early if this is a hidden symbol and we're not printing hiddens.
+ if (!PrintHidden && !Flags.isExported())
+ return false;
+
+ // Return true if this is callable and we're printing callables.
+ if (PrintCallable && Flags.isCallable())
+ return true;
+
+ // Return true if this is data and we're printing data.
+ if (PrintData && !Flags.isCallable())
+ return true;
+
+ // otherwise return false.
+ return false;
+#else
+ return false;
+#endif // NDEBUG
+}
+
+// Prints a sequence of items, filtered by an user-supplied predicate.
+template <typename Sequence,
+ typename Pred = PrintAll<typename Sequence::value_type>>
+class SequencePrinter {
+public:
+ SequencePrinter(const Sequence &S, char OpenSeq, char CloseSeq,
+ Pred ShouldPrint = Pred())
+ : S(S), OpenSeq(OpenSeq), CloseSeq(CloseSeq),
+ ShouldPrint(std::move(ShouldPrint)) {}
+
+ void printTo(llvm::raw_ostream &OS) const {
+ bool PrintComma = false;
+ OS << OpenSeq;
+ for (auto &E : S) {
+ if (ShouldPrint(E)) {
+ if (PrintComma)
+ OS << ',';
+ OS << ' ' << E;
+ PrintComma = true;
+ }
+ }
+ OS << ' ' << CloseSeq;
+ }
+
+private:
+ const Sequence &S;
+ char OpenSeq;
+ char CloseSeq;
+ mutable Pred ShouldPrint;
+};
+
+template <typename Sequence, typename Pred>
+SequencePrinter<Sequence, Pred> printSequence(const Sequence &S, char OpenSeq,
+ char CloseSeq, Pred P = Pred()) {
+ return SequencePrinter<Sequence, Pred>(S, OpenSeq, CloseSeq, std::move(P));
+}
+
+// Render a SequencePrinter by delegating to its printTo method.
+template <typename Sequence, typename Pred>
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ const SequencePrinter<Sequence, Pred> &Printer) {
+ Printer.printTo(OS);
+ return OS;
+}
+
+struct PrintSymbolFlagsMapElemsMatchingCLOpts {
+ bool operator()(const orc::SymbolFlagsMap::value_type &KV) {
+ return flagsMatchCLOpts(KV.second);
+ }
+};
+
+struct PrintSymbolMapElemsMatchingCLOpts {
+ bool operator()(const orc::SymbolMap::value_type &KV) {
+ return flagsMatchCLOpts(KV.second.getFlags());
+ }
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolStringPtr &Sym) {
+ return OS << *Sym;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolNameSet &Symbols) {
+ return OS << printSequence(Symbols, '{', '}', PrintAll<SymbolStringPtr>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolNameVector &Symbols) {
+ return OS << printSequence(Symbols, '[', ']', PrintAll<SymbolStringPtr>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, ArrayRef<SymbolStringPtr> Symbols) {
+ return OS << printSequence(Symbols, '[', ']', PrintAll<SymbolStringPtr>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const JITSymbolFlags &Flags) {
+ if (Flags.hasError())
+ OS << "[*ERROR*]";
+ if (Flags.isCallable())
+ OS << "[Callable]";
+ else
+ OS << "[Data]";
+ if (Flags.isWeak())
+ OS << "[Weak]";
+ else if (Flags.isCommon())
+ OS << "[Common]";
+
+ if (!Flags.isExported())
+ OS << "[Hidden]";
+
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const JITEvaluatedSymbol &Sym) {
+ return OS << format("0x%016" PRIx64, Sym.getAddress()) << " "
+ << Sym.getFlags();
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap::value_type &KV) {
+ return OS << "(\"" << KV.first << "\", " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolMap::value_type &KV) {
+ return OS << "(\"" << KV.first << "\": " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap &SymbolFlags) {
+ return OS << printSequence(SymbolFlags, '{', '}',
+ PrintSymbolFlagsMapElemsMatchingCLOpts());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolMap &Symbols) {
+ return OS << printSequence(Symbols, '{', '}',
+ PrintSymbolMapElemsMatchingCLOpts());
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const SymbolDependenceMap::value_type &KV) {
+ return OS << "(" << KV.first->getName() << ", " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolDependenceMap &Deps) {
+ return OS << printSequence(Deps, '{', '}',
+ PrintAll<SymbolDependenceMap::value_type>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const MaterializationUnit &MU) {
+ OS << "MU@" << &MU << " (\"" << MU.getName() << "\"";
+ if (anyPrintSymbolOptionSet())
+ OS << ", " << MU.getSymbols();
+ return OS << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const LookupKind &K) {
+ switch (K) {
+ case LookupKind::Static:
+ return OS << "Static";
+ case LookupKind::DLSym:
+ return OS << "DLSym";
+ }
+ llvm_unreachable("Invalid lookup kind");
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const JITDylibLookupFlags &JDLookupFlags) {
+ switch (JDLookupFlags) {
+ case JITDylibLookupFlags::MatchExportedSymbolsOnly:
+ return OS << "MatchExportedSymbolsOnly";
+ case JITDylibLookupFlags::MatchAllSymbols:
+ return OS << "MatchAllSymbols";
+ }
+ llvm_unreachable("Invalid JITDylib lookup flags");
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupFlags &LookupFlags) {
+ switch (LookupFlags) {
+ case SymbolLookupFlags::RequiredSymbol:
+ return OS << "RequiredSymbol";
+ case SymbolLookupFlags::WeaklyReferencedSymbol:
+ return OS << "WeaklyReferencedSymbol";
+ }
+ llvm_unreachable("Invalid symbol lookup flags");
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const SymbolLookupSet::value_type &KV) {
+ return OS << "(" << KV.first << ", " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupSet &LookupSet) {
+ return OS << printSequence(LookupSet, '{', '}',
+ PrintAll<SymbolLookupSet::value_type>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const JITDylibSearchOrder &SearchOrder) {
+ OS << "[";
+ if (!SearchOrder.empty()) {
+ assert(SearchOrder.front().first &&
+ "JITDylibList entries must not be null");
+ OS << " (\"" << SearchOrder.front().first->getName() << "\", "
+ << SearchOrder.begin()->second << ")";
+ for (auto &KV :
+ make_range(std::next(SearchOrder.begin(), 1), SearchOrder.end())) {
+ assert(KV.first && "JITDylibList entries must not be null");
+ OS << ", (\"" << KV.first->getName() << "\", " << KV.second << ")";
+ }
+ }
+ OS << " ]";
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolAliasMap &Aliases) {
+ OS << "{";
+ for (auto &KV : Aliases)
+ OS << " " << *KV.first << ": " << KV.second.Aliasee << " "
+ << KV.second.AliasFlags;
+ OS << " }";
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolState &S) {
+ switch (S) {
+ case SymbolState::Invalid:
+ return OS << "Invalid";
+ case SymbolState::NeverSearched:
+ return OS << "Never-Searched";
+ case SymbolState::Materializing:
+ return OS << "Materializing";
+ case SymbolState::Resolved:
+ return OS << "Resolved";
+ case SymbolState::Emitted:
+ return OS << "Emitted";
+ case SymbolState::Ready:
+ return OS << "Ready";
+ }
+ llvm_unreachable("Invalid state");
+}
+
+DumpObjects::DumpObjects(std::string DumpDir, std::string IdentifierOverride)
+ : DumpDir(std::move(DumpDir)),
+ IdentifierOverride(std::move(IdentifierOverride)) {
+
+ /// Discard any trailing separators.
+ while (!this->DumpDir.empty() &&
+ sys::path::is_separator(this->DumpDir.back()))
+ this->DumpDir.pop_back();
+}
+
+Expected<std::unique_ptr<MemoryBuffer>>
+DumpObjects::operator()(std::unique_ptr<MemoryBuffer> Obj) {
+ size_t Idx = 1;
+
+ std::string DumpPathStem;
+ raw_string_ostream(DumpPathStem)
+ << DumpDir << (DumpDir.empty() ? "" : "/") << getBufferIdentifier(*Obj);
+
+ std::string DumpPath = DumpPathStem + ".o";
+ while (sys::fs::exists(DumpPath)) {
+ DumpPath.clear();
+ raw_string_ostream(DumpPath) << DumpPathStem << "." << (++Idx) << ".o";
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Dumping object buffer [ " << (const void *)Obj->getBufferStart()
+ << " -- " << (const void *)(Obj->getBufferEnd() - 1) << " ] to "
+ << DumpPath << "\n";
+ });
+
+ std::error_code EC;
+ raw_fd_ostream DumpStream(DumpPath, EC);
+ if (EC)
+ return errorCodeToError(EC);
+ DumpStream.write(Obj->getBufferStart(), Obj->getBufferSize());
+
+ return std::move(Obj);
+}
+
+StringRef DumpObjects::getBufferIdentifier(MemoryBuffer &B) {
+ if (!IdentifierOverride.empty())
+ return IdentifierOverride;
+ StringRef Identifier = B.getBufferIdentifier();
+ Identifier.consume_back(".o");
+ return Identifier;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ExecutionUtils.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
new file mode 100644
index 00000000000..6a1a41a13a1
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
@@ -0,0 +1,387 @@
+//===---- ExecutionUtils.cpp - Utilities for executing functions in Orc ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+
+#include "llvm/ExecutionEngine/Orc/Layer.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/MachOUniversal.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+namespace orc {
+
+CtorDtorIterator::CtorDtorIterator(const GlobalVariable *GV, bool End)
+ : InitList(
+ GV ? dyn_cast_or_null<ConstantArray>(GV->getInitializer()) : nullptr),
+ I((InitList && End) ? InitList->getNumOperands() : 0) {
+}
+
+bool CtorDtorIterator::operator==(const CtorDtorIterator &Other) const {
+ assert(InitList == Other.InitList && "Incomparable iterators.");
+ return I == Other.I;
+}
+
+bool CtorDtorIterator::operator!=(const CtorDtorIterator &Other) const {
+ return !(*this == Other);
+}
+
+CtorDtorIterator& CtorDtorIterator::operator++() {
+ ++I;
+ return *this;
+}
+
+CtorDtorIterator CtorDtorIterator::operator++(int) {
+ CtorDtorIterator Temp = *this;
+ ++I;
+ return Temp;
+}
+
+CtorDtorIterator::Element CtorDtorIterator::operator*() const {
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(I));
+ assert(CS && "Unrecognized type in llvm.global_ctors/llvm.global_dtors");
+
+ Constant *FuncC = CS->getOperand(1);
+ Function *Func = nullptr;
+
+ // Extract function pointer, pulling off any casts.
+ while (FuncC) {
+ if (Function *F = dyn_cast_or_null<Function>(FuncC)) {
+ Func = F;
+ break;
+ } else if (ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(FuncC)) {
+ if (CE->isCast())
+ FuncC = dyn_cast_or_null<ConstantExpr>(CE->getOperand(0));
+ else
+ break;
+ } else {
+ // This isn't anything we recognize. Bail out with Func left set to null.
+ break;
+ }
+ }
+
+ auto *Priority = cast<ConstantInt>(CS->getOperand(0));
+ Value *Data = CS->getNumOperands() == 3 ? CS->getOperand(2) : nullptr;
+ if (Data && !isa<GlobalValue>(Data))
+ Data = nullptr;
+ return Element(Priority->getZExtValue(), Func, Data);
+}
+
+iterator_range<CtorDtorIterator> getConstructors(const Module &M) {
+ const GlobalVariable *CtorsList = M.getNamedGlobal("llvm.global_ctors");
+ return make_range(CtorDtorIterator(CtorsList, false),
+ CtorDtorIterator(CtorsList, true));
+}
+
+iterator_range<CtorDtorIterator> getDestructors(const Module &M) {
+ const GlobalVariable *DtorsList = M.getNamedGlobal("llvm.global_dtors");
+ return make_range(CtorDtorIterator(DtorsList, false),
+ CtorDtorIterator(DtorsList, true));
+}
+
+bool StaticInitGVIterator::isStaticInitGlobal(GlobalValue &GV) {
+ if (GV.isDeclaration())
+ return false;
+
+ if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
+ GV.getName() == "llvm.global_dtors"))
+ return true;
+
+ if (ObjFmt == Triple::MachO) {
+ // FIXME: These section checks are too strict: We should match first and
+ // second word split by comma.
+ if (GV.hasSection() &&
+ (GV.getSection().startswith("__DATA,__objc_classlist") ||
+ GV.getSection().startswith("__DATA,__objc_selrefs")))
+ return true;
+ }
+
+ return false;
+}
+
+void CtorDtorRunner::add(iterator_range<CtorDtorIterator> CtorDtors) {
+ if (CtorDtors.empty())
+ return;
+
+ MangleAndInterner Mangle(
+ JD.getExecutionSession(),
+ (*CtorDtors.begin()).Func->getParent()->getDataLayout());
+
+ for (auto CtorDtor : CtorDtors) {
+ assert(CtorDtor.Func && CtorDtor.Func->hasName() &&
+ "Ctor/Dtor function must be named to be runnable under the JIT");
+
+ // FIXME: Maybe use a symbol promoter here instead.
+ if (CtorDtor.Func->hasLocalLinkage()) {
+ CtorDtor.Func->setLinkage(GlobalValue::ExternalLinkage);
+ CtorDtor.Func->setVisibility(GlobalValue::HiddenVisibility);
+ }
+
+ if (CtorDtor.Data && cast<GlobalValue>(CtorDtor.Data)->isDeclaration()) {
+ dbgs() << " Skipping because why now?\n";
+ continue;
+ }
+
+ CtorDtorsByPriority[CtorDtor.Priority].push_back(
+ Mangle(CtorDtor.Func->getName()));
+ }
+}
+
+Error CtorDtorRunner::run() {
+ using CtorDtorTy = void (*)();
+
+ SymbolLookupSet LookupSet;
+ for (auto &KV : CtorDtorsByPriority)
+ for (auto &Name : KV.second)
+ LookupSet.add(Name);
+ assert(!LookupSet.containsDuplicates() &&
+ "Ctor/Dtor list contains duplicates");
+
+ auto &ES = JD.getExecutionSession();
+ if (auto CtorDtorMap = ES.lookup(
+ makeJITDylibSearchOrder(&JD, JITDylibLookupFlags::MatchAllSymbols),
+ std::move(LookupSet))) {
+ for (auto &KV : CtorDtorsByPriority) {
+ for (auto &Name : KV.second) {
+ assert(CtorDtorMap->count(Name) && "No entry for Name");
+ auto CtorDtor = reinterpret_cast<CtorDtorTy>(
+ static_cast<uintptr_t>((*CtorDtorMap)[Name].getAddress()));
+ CtorDtor();
+ }
+ }
+ CtorDtorsByPriority.clear();
+ return Error::success();
+ } else
+ return CtorDtorMap.takeError();
+}
+
+void LocalCXXRuntimeOverridesBase::runDestructors() {
+ auto& CXXDestructorDataPairs = DSOHandleOverride;
+ for (auto &P : CXXDestructorDataPairs)
+ P.first(P.second);
+ CXXDestructorDataPairs.clear();
+}
+
+int LocalCXXRuntimeOverridesBase::CXAAtExitOverride(DestructorPtr Destructor,
+ void *Arg,
+ void *DSOHandle) {
+ auto& CXXDestructorDataPairs =
+ *reinterpret_cast<CXXDestructorDataPairList*>(DSOHandle);
+ CXXDestructorDataPairs.push_back(std::make_pair(Destructor, Arg));
+ return 0;
+}
+
+Error LocalCXXRuntimeOverrides::enable(JITDylib &JD,
+ MangleAndInterner &Mangle) {
+ SymbolMap RuntimeInterposes;
+ RuntimeInterposes[Mangle("__dso_handle")] =
+ JITEvaluatedSymbol(toTargetAddress(&DSOHandleOverride),
+ JITSymbolFlags::Exported);
+ RuntimeInterposes[Mangle("__cxa_atexit")] =
+ JITEvaluatedSymbol(toTargetAddress(&CXAAtExitOverride),
+ JITSymbolFlags::Exported);
+
+ return JD.define(absoluteSymbols(std::move(RuntimeInterposes)));
+}
+
+void ItaniumCXAAtExitSupport::registerAtExit(void (*F)(void *), void *Ctx,
+ void *DSOHandle) {
+ std::lock_guard<std::mutex> Lock(AtExitsMutex);
+ AtExitRecords[DSOHandle].push_back({F, Ctx});
+}
+
+void ItaniumCXAAtExitSupport::runAtExits(void *DSOHandle) {
+ std::vector<AtExitRecord> AtExitsToRun;
+
+ {
+ std::lock_guard<std::mutex> Lock(AtExitsMutex);
+ auto I = AtExitRecords.find(DSOHandle);
+ if (I != AtExitRecords.end()) {
+ AtExitsToRun = std::move(I->second);
+ AtExitRecords.erase(I);
+ }
+ }
+
+ while (!AtExitsToRun.empty()) {
+ AtExitsToRun.back().F(AtExitsToRun.back().Ctx);
+ AtExitsToRun.pop_back();
+ }
+}
+
+DynamicLibrarySearchGenerator::DynamicLibrarySearchGenerator(
+ sys::DynamicLibrary Dylib, char GlobalPrefix, SymbolPredicate Allow)
+ : Dylib(std::move(Dylib)), Allow(std::move(Allow)),
+ GlobalPrefix(GlobalPrefix) {}
+
+Expected<std::unique_ptr<DynamicLibrarySearchGenerator>>
+DynamicLibrarySearchGenerator::Load(const char *FileName, char GlobalPrefix,
+ SymbolPredicate Allow) {
+ std::string ErrMsg;
+ auto Lib = sys::DynamicLibrary::getPermanentLibrary(FileName, &ErrMsg);
+ if (!Lib.isValid())
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+ return std::make_unique<DynamicLibrarySearchGenerator>(
+ std::move(Lib), GlobalPrefix, std::move(Allow));
+}
+
+Error DynamicLibrarySearchGenerator::tryToGenerate(
+ LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags, const SymbolLookupSet &Symbols) {
+ orc::SymbolMap NewSymbols;
+
+ bool HasGlobalPrefix = (GlobalPrefix != '\0');
+
+ for (auto &KV : Symbols) {
+ auto &Name = KV.first;
+
+ if ((*Name).empty())
+ continue;
+
+ if (Allow && !Allow(Name))
+ continue;
+
+ if (HasGlobalPrefix && (*Name).front() != GlobalPrefix)
+ continue;
+
+ std::string Tmp((*Name).data() + HasGlobalPrefix,
+ (*Name).size() - HasGlobalPrefix);
+ if (void *Addr = Dylib.getAddressOfSymbol(Tmp.c_str())) {
+ NewSymbols[Name] = JITEvaluatedSymbol(
+ static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(Addr)),
+ JITSymbolFlags::Exported);
+ }
+ }
+
+ if (NewSymbols.empty())
+ return Error::success();
+
+ return JD.define(absoluteSymbols(std::move(NewSymbols)));
+}
+
+Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
+StaticLibraryDefinitionGenerator::Load(ObjectLayer &L, const char *FileName) {
+ auto ArchiveBuffer = errorOrToExpected(MemoryBuffer::getFile(FileName));
+
+ if (!ArchiveBuffer)
+ return ArchiveBuffer.takeError();
+
+ return Create(L, std::move(*ArchiveBuffer));
+}
+
+Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
+StaticLibraryDefinitionGenerator::Load(ObjectLayer &L, const char *FileName,
+ const Triple &TT) {
+ auto B = object::createBinary(FileName);
+ if (!B)
+ return B.takeError();
+
+ // If this is a regular archive then create an instance from it.
+ if (isa<object::Archive>(B->getBinary()))
+ return Create(L, std::move(B->takeBinary().second));
+
+ // If this is a universal binary then search for a slice matching the given
+ // Triple.
+ if (auto *UB = cast<object::MachOUniversalBinary>(B->getBinary())) {
+ for (const auto &Obj : UB->objects()) {
+ auto ObjTT = Obj.getTriple();
+ if (ObjTT.getArch() == TT.getArch() &&
+ ObjTT.getSubArch() == TT.getSubArch() &&
+ (TT.getVendor() == Triple::UnknownVendor ||
+ ObjTT.getVendor() == TT.getVendor())) {
+ // We found a match. Create an instance from a buffer covering this
+ // slice.
+ auto SliceBuffer = MemoryBuffer::getFileSlice(FileName, Obj.getSize(),
+ Obj.getOffset());
+ if (!SliceBuffer)
+ return make_error<StringError>(
+ Twine("Could not create buffer for ") + TT.str() + " slice of " +
+ FileName + ": [ " + formatv("{0:x}", Obj.getOffset()) +
+ " .. " + formatv("{0:x}", Obj.getOffset() + Obj.getSize()) +
+ ": " + SliceBuffer.getError().message(),
+ SliceBuffer.getError());
+ return Create(L, std::move(*SliceBuffer));
+ }
+ }
+
+ return make_error<StringError>(Twine("Universal binary ") + FileName +
+ " does not contain a slice for " +
+ TT.str(),
+ inconvertibleErrorCode());
+ }
+
+ return make_error<StringError>(Twine("Unrecognized file type for ") +
+ FileName,
+ inconvertibleErrorCode());
+}
+
+Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
+StaticLibraryDefinitionGenerator::Create(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer) {
+ Error Err = Error::success();
+
+ std::unique_ptr<StaticLibraryDefinitionGenerator> ADG(
+ new StaticLibraryDefinitionGenerator(L, std::move(ArchiveBuffer), Err));
+
+ if (Err)
+ return std::move(Err);
+
+ return std::move(ADG);
+}
+
+Error StaticLibraryDefinitionGenerator::tryToGenerate(
+ LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags, const SymbolLookupSet &Symbols) {
+
+ // Don't materialize symbols from static archives unless this is a static
+ // lookup.
+ if (K != LookupKind::Static)
+ return Error::success();
+
+ // Bail out early if we've already freed the archive.
+ if (!Archive)
+ return Error::success();
+
+ DenseSet<std::pair<StringRef, StringRef>> ChildBufferInfos;
+
+ for (const auto &KV : Symbols) {
+ const auto &Name = KV.first;
+ auto Child = Archive->findSym(*Name);
+ if (!Child)
+ return Child.takeError();
+ if (*Child == None)
+ continue;
+ auto ChildBuffer = (*Child)->getMemoryBufferRef();
+ if (!ChildBuffer)
+ return ChildBuffer.takeError();
+ ChildBufferInfos.insert(
+ {ChildBuffer->getBuffer(), ChildBuffer->getBufferIdentifier()});
+ }
+
+ for (auto ChildBufferInfo : ChildBufferInfos) {
+ MemoryBufferRef ChildBufferRef(ChildBufferInfo.first,
+ ChildBufferInfo.second);
+
+ if (auto Err = L.add(JD, MemoryBuffer::getMemBuffer(ChildBufferRef, false)))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+StaticLibraryDefinitionGenerator::StaticLibraryDefinitionGenerator(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer, Error &Err)
+ : L(L), ArchiveBuffer(std::move(ArchiveBuffer)),
+ Archive(std::make_unique<object::Archive>(*this->ArchiveBuffer, Err)) {}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/IRCompileLayer.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/IRCompileLayer.cpp
new file mode 100644
index 00000000000..aadc437c80c
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/IRCompileLayer.cpp
@@ -0,0 +1,48 @@
+//===--------------- IRCompileLayer.cpp - IR Compiling Layer --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
+
+namespace llvm {
+namespace orc {
+
+IRCompileLayer::IRCompiler::~IRCompiler() {}
+
+IRCompileLayer::IRCompileLayer(ExecutionSession &ES, ObjectLayer &BaseLayer,
+ std::unique_ptr<IRCompiler> Compile)
+ : IRLayer(ES, ManglingOpts), BaseLayer(BaseLayer),
+ Compile(std::move(Compile)) {
+ ManglingOpts = &this->Compile->getManglingOptions();
+}
+
+void IRCompileLayer::setNotifyCompiled(NotifyCompiledFunction NotifyCompiled) {
+ std::lock_guard<std::mutex> Lock(IRLayerMutex);
+ this->NotifyCompiled = std::move(NotifyCompiled);
+}
+
+void IRCompileLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ ThreadSafeModule TSM) {
+ assert(TSM && "Module must not be null");
+
+ if (auto Obj = TSM.withModuleDo(*Compile)) {
+ {
+ std::lock_guard<std::mutex> Lock(IRLayerMutex);
+ if (NotifyCompiled)
+ NotifyCompiled(*R, std::move(TSM));
+ else
+ TSM = ThreadSafeModule();
+ }
+ BaseLayer.emit(std::move(R), std::move(*Obj));
+ } else {
+ R->failMaterialization();
+ getExecutionSession().reportError(Obj.takeError());
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/IRTransformLayer.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/IRTransformLayer.cpp
new file mode 100644
index 00000000000..d5b11349277
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/IRTransformLayer.cpp
@@ -0,0 +1,33 @@
+//===-------------- IRTransformLayer.cpp - IR Transform Layer -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+namespace orc {
+
+IRTransformLayer::IRTransformLayer(ExecutionSession &ES, IRLayer &BaseLayer,
+ TransformFunction Transform)
+ : IRLayer(ES, BaseLayer.getManglingOptions()), BaseLayer(BaseLayer),
+ Transform(std::move(Transform)) {}
+
+void IRTransformLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ ThreadSafeModule TSM) {
+ assert(TSM && "Module must not be null");
+
+ if (auto TransformedTSM = Transform(std::move(TSM), *R))
+ BaseLayer.emit(std::move(R), std::move(*TransformedTSM));
+ else {
+ R->failMaterialization();
+ getExecutionSession().reportError(TransformedTSM.takeError());
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/IndirectionUtils.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
new file mode 100644
index 00000000000..1cfcf8ae943
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
@@ -0,0 +1,375 @@
+//===---- IndirectionUtils.cpp - Utilities for call indirection in Orc ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <sstream>
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+class CompileCallbackMaterializationUnit : public orc::MaterializationUnit {
+public:
+ using CompileFunction = JITCompileCallbackManager::CompileFunction;
+
+ CompileCallbackMaterializationUnit(SymbolStringPtr Name,
+ CompileFunction Compile)
+ : MaterializationUnit(SymbolFlagsMap({{Name, JITSymbolFlags::Exported}}),
+ nullptr),
+ Name(std::move(Name)), Compile(std::move(Compile)) {}
+
+ StringRef getName() const override { return "<Compile Callbacks>"; }
+
+private:
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ SymbolMap Result;
+ Result[Name] = JITEvaluatedSymbol(Compile(), JITSymbolFlags::Exported);
+ // No dependencies, so these calls cannot fail.
+ cantFail(R->notifyResolved(Result));
+ cantFail(R->notifyEmitted());
+ }
+
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override {
+ llvm_unreachable("Discard should never occur on a LMU?");
+ }
+
+ SymbolStringPtr Name;
+ CompileFunction Compile;
+};
+
+} // namespace
+
+namespace llvm {
+namespace orc {
+
+TrampolinePool::~TrampolinePool() {}
+void IndirectStubsManager::anchor() {}
+
+Expected<JITTargetAddress>
+JITCompileCallbackManager::getCompileCallback(CompileFunction Compile) {
+ if (auto TrampolineAddr = TP->getTrampoline()) {
+ auto CallbackName =
+ ES.intern(std::string("cc") + std::to_string(++NextCallbackId));
+
+ std::lock_guard<std::mutex> Lock(CCMgrMutex);
+ AddrToSymbol[*TrampolineAddr] = CallbackName;
+ cantFail(
+ CallbacksJD.define(std::make_unique<CompileCallbackMaterializationUnit>(
+ std::move(CallbackName), std::move(Compile))));
+ return *TrampolineAddr;
+ } else
+ return TrampolineAddr.takeError();
+}
+
+JITTargetAddress JITCompileCallbackManager::executeCompileCallback(
+ JITTargetAddress TrampolineAddr) {
+ SymbolStringPtr Name;
+
+ {
+ std::unique_lock<std::mutex> Lock(CCMgrMutex);
+ auto I = AddrToSymbol.find(TrampolineAddr);
+
+ // If this address is not associated with a compile callback then report an
+ // error to the execution session and return ErrorHandlerAddress to the
+ // callee.
+ if (I == AddrToSymbol.end()) {
+ Lock.unlock();
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "No compile callback for trampoline at "
+ << format("0x%016" PRIx64, TrampolineAddr);
+ }
+ ES.reportError(
+ make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode()));
+ return ErrorHandlerAddress;
+ } else
+ Name = I->second;
+ }
+
+ if (auto Sym =
+ ES.lookup(makeJITDylibSearchOrder(
+ &CallbacksJD, JITDylibLookupFlags::MatchAllSymbols),
+ Name))
+ return Sym->getAddress();
+ else {
+ llvm::dbgs() << "Didn't find callback.\n";
+ // If anything goes wrong materializing Sym then report it to the session
+ // and return the ErrorHandlerAddress;
+ ES.reportError(Sym.takeError());
+ return ErrorHandlerAddress;
+ }
+}
+
+Expected<std::unique_ptr<JITCompileCallbackManager>>
+createLocalCompileCallbackManager(const Triple &T, ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddress) {
+ switch (T.getArch()) {
+ default:
+ return make_error<StringError>(
+ std::string("No callback manager available for ") + T.str(),
+ inconvertibleErrorCode());
+ case Triple::aarch64:
+ case Triple::aarch64_32: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcAArch64> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::x86: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcI386> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::mips: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips32Be> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+ case Triple::mipsel: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips32Le> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::mips64:
+ case Triple::mips64el: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips64> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::x86_64: {
+ if (T.getOS() == Triple::OSType::Win32) {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcX86_64_Win32> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ } else {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcX86_64_SysV> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+ }
+
+ }
+}
+
+std::function<std::unique_ptr<IndirectStubsManager>()>
+createLocalIndirectStubsManagerBuilder(const Triple &T) {
+ switch (T.getArch()) {
+ default:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcGenericABI>>();
+ };
+
+ case Triple::aarch64:
+ case Triple::aarch64_32:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcAArch64>>();
+ };
+
+ case Triple::x86:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcI386>>();
+ };
+
+ case Triple::mips:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips32Be>>();
+ };
+
+ case Triple::mipsel:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips32Le>>();
+ };
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips64>>();
+ };
+
+ case Triple::x86_64:
+ if (T.getOS() == Triple::OSType::Win32) {
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcX86_64_Win32>>();
+ };
+ } else {
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcX86_64_SysV>>();
+ };
+ }
+
+ }
+}
+
+Constant* createIRTypedAddress(FunctionType &FT, JITTargetAddress Addr) {
+ Constant *AddrIntVal =
+ ConstantInt::get(Type::getInt64Ty(FT.getContext()), Addr);
+ Constant *AddrPtrVal =
+ ConstantExpr::getCast(Instruction::IntToPtr, AddrIntVal,
+ PointerType::get(&FT, 0));
+ return AddrPtrVal;
+}
+
+GlobalVariable* createImplPointer(PointerType &PT, Module &M,
+ const Twine &Name, Constant *Initializer) {
+ auto IP = new GlobalVariable(M, &PT, false, GlobalValue::ExternalLinkage,
+ Initializer, Name, nullptr,
+ GlobalValue::NotThreadLocal, 0, true);
+ IP->setVisibility(GlobalValue::HiddenVisibility);
+ return IP;
+}
+
+void makeStub(Function &F, Value &ImplPointer) {
+ assert(F.isDeclaration() && "Can't turn a definition into a stub.");
+ assert(F.getParent() && "Function isn't in a module.");
+ Module &M = *F.getParent();
+ BasicBlock *EntryBlock = BasicBlock::Create(M.getContext(), "entry", &F);
+ IRBuilder<> Builder(EntryBlock);
+ LoadInst *ImplAddr = Builder.CreateLoad(F.getType(), &ImplPointer);
+ std::vector<Value*> CallArgs;
+ for (auto &A : F.args())
+ CallArgs.push_back(&A);
+ CallInst *Call = Builder.CreateCall(F.getFunctionType(), ImplAddr, CallArgs);
+ Call->setTailCall();
+ Call->setAttributes(F.getAttributes());
+ if (F.getReturnType()->isVoidTy())
+ Builder.CreateRetVoid();
+ else
+ Builder.CreateRet(Call);
+}
+
+std::vector<GlobalValue *> SymbolLinkagePromoter::operator()(Module &M) {
+ std::vector<GlobalValue *> PromotedGlobals;
+
+ for (auto &GV : M.global_values()) {
+ bool Promoted = true;
+
+ // Rename if necessary.
+ if (!GV.hasName())
+ GV.setName("__orc_anon." + Twine(NextId++));
+ else if (GV.getName().startswith("\01L"))
+ GV.setName("__" + GV.getName().substr(1) + "." + Twine(NextId++));
+ else if (GV.hasLocalLinkage())
+ GV.setName("__orc_lcl." + GV.getName() + "." + Twine(NextId++));
+ else
+ Promoted = false;
+
+ if (GV.hasLocalLinkage()) {
+ GV.setLinkage(GlobalValue::ExternalLinkage);
+ GV.setVisibility(GlobalValue::HiddenVisibility);
+ Promoted = true;
+ }
+ GV.setUnnamedAddr(GlobalValue::UnnamedAddr::None);
+
+ if (Promoted)
+ PromotedGlobals.push_back(&GV);
+ }
+
+ return PromotedGlobals;
+}
+
+Function* cloneFunctionDecl(Module &Dst, const Function &F,
+ ValueToValueMapTy *VMap) {
+ Function *NewF =
+ Function::Create(cast<FunctionType>(F.getValueType()),
+ F.getLinkage(), F.getName(), &Dst);
+ NewF->copyAttributesFrom(&F);
+
+ if (VMap) {
+ (*VMap)[&F] = NewF;
+ auto NewArgI = NewF->arg_begin();
+ for (auto ArgI = F.arg_begin(), ArgE = F.arg_end(); ArgI != ArgE;
+ ++ArgI, ++NewArgI)
+ (*VMap)[&*ArgI] = &*NewArgI;
+ }
+
+ return NewF;
+}
+
+void moveFunctionBody(Function &OrigF, ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer,
+ Function *NewF) {
+ assert(!OrigF.isDeclaration() && "Nothing to move");
+ if (!NewF)
+ NewF = cast<Function>(VMap[&OrigF]);
+ else
+ assert(VMap[&OrigF] == NewF && "Incorrect function mapping in VMap.");
+ assert(NewF && "Function mapping missing from VMap.");
+ assert(NewF->getParent() != OrigF.getParent() &&
+ "moveFunctionBody should only be used to move bodies between "
+ "modules.");
+
+ SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned.
+ CloneFunctionInto(NewF, &OrigF, VMap, /*ModuleLevelChanges=*/true, Returns,
+ "", nullptr, nullptr, Materializer);
+ OrigF.deleteBody();
+}
+
+GlobalVariable* cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
+ ValueToValueMapTy *VMap) {
+ GlobalVariable *NewGV = new GlobalVariable(
+ Dst, GV.getValueType(), GV.isConstant(),
+ GV.getLinkage(), nullptr, GV.getName(), nullptr,
+ GV.getThreadLocalMode(), GV.getType()->getAddressSpace());
+ NewGV->copyAttributesFrom(&GV);
+ if (VMap)
+ (*VMap)[&GV] = NewGV;
+ return NewGV;
+}
+
+void moveGlobalVariableInitializer(GlobalVariable &OrigGV,
+ ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer,
+ GlobalVariable *NewGV) {
+ assert(OrigGV.hasInitializer() && "Nothing to move");
+ if (!NewGV)
+ NewGV = cast<GlobalVariable>(VMap[&OrigGV]);
+ else
+ assert(VMap[&OrigGV] == NewGV &&
+ "Incorrect global variable mapping in VMap.");
+ assert(NewGV->getParent() != OrigGV.getParent() &&
+ "moveGlobalVariableInitializer should only be used to move "
+ "initializers between modules");
+
+ NewGV->setInitializer(MapValue(OrigGV.getInitializer(), VMap, RF_None,
+ nullptr, Materializer));
+}
+
+GlobalAlias* cloneGlobalAliasDecl(Module &Dst, const GlobalAlias &OrigA,
+ ValueToValueMapTy &VMap) {
+ assert(OrigA.getAliasee() && "Original alias doesn't have an aliasee?");
+ auto *NewA = GlobalAlias::create(OrigA.getValueType(),
+ OrigA.getType()->getPointerAddressSpace(),
+ OrigA.getLinkage(), OrigA.getName(), &Dst);
+ NewA->copyAttributesFrom(&OrigA);
+ VMap[&OrigA] = NewA;
+ return NewA;
+}
+
+void cloneModuleFlagsMetadata(Module &Dst, const Module &Src,
+ ValueToValueMapTy &VMap) {
+ auto *MFs = Src.getModuleFlagsMetadata();
+ if (!MFs)
+ return;
+ for (auto *MF : MFs->operands())
+ Dst.addModuleFlag(MapMetadata(MF, VMap));
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp
new file mode 100644
index 00000000000..8cf66c9e759
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp
@@ -0,0 +1,141 @@
+//===----- JITTargetMachineBuilder.cpp - Build TargetMachines for JIT -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
+
+#include "llvm/Support/Host.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace orc {
+
+JITTargetMachineBuilder::JITTargetMachineBuilder(Triple TT)
+ : TT(std::move(TT)) {
+ Options.EmulatedTLS = true;
+ Options.ExplicitEmulatedTLS = true;
+}
+
+Expected<JITTargetMachineBuilder> JITTargetMachineBuilder::detectHost() {
+ // FIXME: getProcessTriple is bogus. It returns the host LLVM was compiled on,
+ // rather than a valid triple for the current process.
+ JITTargetMachineBuilder TMBuilder((Triple(sys::getProcessTriple())));
+
+ // Retrieve host CPU name and sub-target features and add them to builder.
+ // Relocation model, code model and codegen opt level are kept to default
+ // values.
+ llvm::StringMap<bool> FeatureMap;
+ llvm::sys::getHostCPUFeatures(FeatureMap);
+ for (auto &Feature : FeatureMap)
+ TMBuilder.getFeatures().AddFeature(Feature.first(), Feature.second);
+
+ TMBuilder.setCPU(std::string(llvm::sys::getHostCPUName()));
+
+ return TMBuilder;
+}
+
+Expected<std::unique_ptr<TargetMachine>>
+JITTargetMachineBuilder::createTargetMachine() {
+
+ std::string ErrMsg;
+ auto *TheTarget = TargetRegistry::lookupTarget(TT.getTriple(), ErrMsg);
+ if (!TheTarget)
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+
+ auto *TM =
+ TheTarget->createTargetMachine(TT.getTriple(), CPU, Features.getString(),
+ Options, RM, CM, OptLevel, /*JIT*/ true);
+ if (!TM)
+ return make_error<StringError>("Could not allocate target machine",
+ inconvertibleErrorCode());
+
+ return std::unique_ptr<TargetMachine>(TM);
+}
+
+JITTargetMachineBuilder &JITTargetMachineBuilder::addFeatures(
+ const std::vector<std::string> &FeatureVec) {
+ for (const auto &F : FeatureVec)
+ Features.AddFeature(F);
+ return *this;
+}
+
+#ifndef NDEBUG
+raw_ostream &operator<<(raw_ostream &OS, const JITTargetMachineBuilder &JTMB) {
+ OS << "{ Triple = \"" << JTMB.TT.str() << "\", CPU = \"" << JTMB.CPU
+ << "\", Options = <not-printable>, Relocation Model = ";
+
+ if (JTMB.RM) {
+ switch (*JTMB.RM) {
+ case Reloc::Static:
+ OS << "Static";
+ break;
+ case Reloc::PIC_:
+ OS << "PIC_";
+ break;
+ case Reloc::DynamicNoPIC:
+ OS << "DynamicNoPIC";
+ break;
+ case Reloc::ROPI:
+ OS << "ROPI";
+ break;
+ case Reloc::RWPI:
+ OS << "RWPI";
+ break;
+ case Reloc::ROPI_RWPI:
+ OS << "ROPI_RWPI";
+ break;
+ }
+ } else
+ OS << "unspecified";
+
+ OS << ", Code Model = ";
+
+ if (JTMB.CM) {
+ switch (*JTMB.CM) {
+ case CodeModel::Tiny:
+ OS << "Tiny";
+ break;
+ case CodeModel::Small:
+ OS << "Small";
+ break;
+ case CodeModel::Kernel:
+ OS << "Kernel";
+ break;
+ case CodeModel::Medium:
+ OS << "Medium";
+ break;
+ case CodeModel::Large:
+ OS << "Large";
+ break;
+ }
+ } else
+ OS << "unspecified";
+
+ OS << ", Optimization Level = ";
+ switch (JTMB.OptLevel) {
+ case CodeGenOpt::None:
+ OS << "None";
+ break;
+ case CodeGenOpt::Less:
+ OS << "Less";
+ break;
+ case CodeGenOpt::Default:
+ OS << "Default";
+ break;
+ case CodeGenOpt::Aggressive:
+ OS << "Aggressive";
+ break;
+ }
+
+ OS << " }";
+ return OS;
+}
+#endif // NDEBUG
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/LLJIT.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/LLJIT.cpp
new file mode 100644
index 00000000000..c368c1e3713
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/LLJIT.cpp
@@ -0,0 +1,1230 @@
+//===--------- LLJIT.cpp - An ORC-based JIT for compiling LLVM IR ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
+#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
+#include "llvm/ExecutionEngine/Orc/MachOPlatform.h"
+#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/Orc/ObjectTransformLayer.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcError.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcessControl.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/DynamicLibrary.h"
+
+#include <map>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+/// Adds helper function decls and wrapper functions that call the helper with
+/// some additional prefix arguments.
+///
+/// E.g. For wrapper "foo" with type i8(i8, i64), helper "bar", and prefix
+/// args i32 4 and i16 12345, this function will add:
+///
+/// declare i8 @bar(i32, i16, i8, i64)
+///
+/// define i8 @foo(i8, i64) {
+/// entry:
+/// %2 = call i8 @bar(i32 4, i16 12345, i8 %0, i64 %1)
+/// ret i8 %2
+/// }
+///
+Function *addHelperAndWrapper(Module &M, StringRef WrapperName,
+ FunctionType *WrapperFnType,
+ GlobalValue::VisibilityTypes WrapperVisibility,
+ StringRef HelperName,
+ ArrayRef<Value *> HelperPrefixArgs) {
+ std::vector<Type *> HelperArgTypes;
+ for (auto *Arg : HelperPrefixArgs)
+ HelperArgTypes.push_back(Arg->getType());
+ for (auto *T : WrapperFnType->params())
+ HelperArgTypes.push_back(T);
+ auto *HelperFnType =
+ FunctionType::get(WrapperFnType->getReturnType(), HelperArgTypes, false);
+ auto *HelperFn = Function::Create(HelperFnType, GlobalValue::ExternalLinkage,
+ HelperName, M);
+
+ auto *WrapperFn = Function::Create(
+ WrapperFnType, GlobalValue::ExternalLinkage, WrapperName, M);
+ WrapperFn->setVisibility(WrapperVisibility);
+
+ auto *EntryBlock = BasicBlock::Create(M.getContext(), "entry", WrapperFn);
+ IRBuilder<> IB(EntryBlock);
+
+ std::vector<Value *> HelperArgs;
+ for (auto *Arg : HelperPrefixArgs)
+ HelperArgs.push_back(Arg);
+ for (auto &Arg : WrapperFn->args())
+ HelperArgs.push_back(&Arg);
+ auto *HelperResult = IB.CreateCall(HelperFn, HelperArgs);
+ if (HelperFn->getReturnType()->isVoidTy())
+ IB.CreateRetVoid();
+ else
+ IB.CreateRet(HelperResult);
+
+ return WrapperFn;
+}
+
+class GenericLLVMIRPlatformSupport;
+
+/// orc::Platform component of Generic LLVM IR Platform support.
+/// Just forwards calls to the GenericLLVMIRPlatformSupport class below.
+class GenericLLVMIRPlatform : public Platform {
+public:
+ GenericLLVMIRPlatform(GenericLLVMIRPlatformSupport &S) : S(S) {}
+ Error setupJITDylib(JITDylib &JD) override;
+ Error notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) override;
+ Error notifyRemoving(ResourceTracker &RT) override {
+ // Noop -- Nothing to do (yet).
+ return Error::success();
+ }
+
+private:
+ GenericLLVMIRPlatformSupport &S;
+};
+
+/// This transform parses llvm.global_ctors to produce a single initialization
+/// function for the module, records the function, then deletes
+/// llvm.global_ctors.
+class GlobalCtorDtorScraper {
+public:
+
+ GlobalCtorDtorScraper(GenericLLVMIRPlatformSupport &PS,
+ StringRef InitFunctionPrefix)
+ : PS(PS), InitFunctionPrefix(InitFunctionPrefix) {}
+ Expected<ThreadSafeModule> operator()(ThreadSafeModule TSM,
+ MaterializationResponsibility &R);
+
+private:
+ GenericLLVMIRPlatformSupport &PS;
+ StringRef InitFunctionPrefix;
+};
+
+/// Generic IR Platform Support
+///
+/// Scrapes llvm.global_ctors and llvm.global_dtors and replaces them with
+/// specially named 'init' and 'deinit'. Injects definitions / interposes for
+/// some runtime API, including __cxa_atexit, dlopen, and dlclose.
+class GenericLLVMIRPlatformSupport : public LLJIT::PlatformSupport {
+public:
+ // GenericLLVMIRPlatform &P) : P(P) {
+ GenericLLVMIRPlatformSupport(LLJIT &J)
+ : J(J), InitFunctionPrefix(J.mangle("__orc_init_func.")) {
+
+ getExecutionSession().setPlatform(
+ std::make_unique<GenericLLVMIRPlatform>(*this));
+
+ setInitTransform(J, GlobalCtorDtorScraper(*this, InitFunctionPrefix));
+
+ SymbolMap StdInterposes;
+
+ StdInterposes[J.mangleAndIntern("__lljit.platform_support_instance")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(this),
+ JITSymbolFlags::Exported);
+ StdInterposes[J.mangleAndIntern("__lljit.cxa_atexit_helper")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(registerAtExitHelper),
+ JITSymbolFlags());
+
+ cantFail(
+ J.getMainJITDylib().define(absoluteSymbols(std::move(StdInterposes))));
+ cantFail(setupJITDylib(J.getMainJITDylib()));
+ cantFail(J.addIRModule(J.getMainJITDylib(), createPlatformRuntimeModule()));
+ }
+
+ ExecutionSession &getExecutionSession() { return J.getExecutionSession(); }
+
+ /// Adds a module that defines the __dso_handle global.
+ Error setupJITDylib(JITDylib &JD) {
+
+ // Add per-jitdylib standard interposes.
+ SymbolMap PerJDInterposes;
+ PerJDInterposes[J.mangleAndIntern("__lljit.run_atexits_helper")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(runAtExitsHelper),
+ JITSymbolFlags());
+ cantFail(JD.define(absoluteSymbols(std::move(PerJDInterposes))));
+
+ auto Ctx = std::make_unique<LLVMContext>();
+ auto M = std::make_unique<Module>("__standard_lib", *Ctx);
+ M->setDataLayout(J.getDataLayout());
+
+ auto *Int64Ty = Type::getInt64Ty(*Ctx);
+ auto *DSOHandle = new GlobalVariable(
+ *M, Int64Ty, true, GlobalValue::ExternalLinkage,
+ ConstantInt::get(Int64Ty, reinterpret_cast<uintptr_t>(&JD)),
+ "__dso_handle");
+ DSOHandle->setVisibility(GlobalValue::DefaultVisibility);
+ DSOHandle->setInitializer(
+ ConstantInt::get(Int64Ty, pointerToJITTargetAddress(&JD)));
+
+ auto *GenericIRPlatformSupportTy =
+ StructType::create(*Ctx, "lljit.GenericLLJITIRPlatformSupport");
+
+ auto *PlatformInstanceDecl = new GlobalVariable(
+ *M, GenericIRPlatformSupportTy, true, GlobalValue::ExternalLinkage,
+ nullptr, "__lljit.platform_support_instance");
+
+ auto *VoidTy = Type::getVoidTy(*Ctx);
+ addHelperAndWrapper(
+ *M, "__lljit_run_atexits", FunctionType::get(VoidTy, {}, false),
+ GlobalValue::HiddenVisibility, "__lljit.run_atexits_helper",
+ {PlatformInstanceDecl, DSOHandle});
+
+ return J.addIRModule(JD, ThreadSafeModule(std::move(M), std::move(Ctx)));
+ }
+
+ Error notifyAdding(ResourceTracker &RT, const MaterializationUnit &MU) {
+ auto &JD = RT.getJITDylib();
+ if (auto &InitSym = MU.getInitializerSymbol())
+ InitSymbols[&JD].add(InitSym, SymbolLookupFlags::WeaklyReferencedSymbol);
+ else {
+ // If there's no identified init symbol attached, but there is a symbol
+ // with the GenericIRPlatform::InitFunctionPrefix, then treat that as
+ // an init function. Add the symbol to both the InitSymbols map (which
+ // will trigger a lookup to materialize the module) and the InitFunctions
+ // map (which holds the names of the symbols to execute).
+ for (auto &KV : MU.getSymbols())
+ if ((*KV.first).startswith(InitFunctionPrefix)) {
+ InitSymbols[&JD].add(KV.first,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ InitFunctions[&JD].add(KV.first);
+ }
+ }
+ return Error::success();
+ }
+
+ Error initialize(JITDylib &JD) override {
+ LLVM_DEBUG({
+ dbgs() << "GenericLLVMIRPlatformSupport getting initializers to run\n";
+ });
+ if (auto Initializers = getInitializers(JD)) {
+ LLVM_DEBUG(
+ { dbgs() << "GenericLLVMIRPlatformSupport running initializers\n"; });
+ for (auto InitFnAddr : *Initializers) {
+ LLVM_DEBUG({
+ dbgs() << " Running init " << formatv("{0:x16}", InitFnAddr)
+ << "...\n";
+ });
+ auto *InitFn = jitTargetAddressToFunction<void (*)()>(InitFnAddr);
+ InitFn();
+ }
+ } else
+ return Initializers.takeError();
+ return Error::success();
+ }
+
+ Error deinitialize(JITDylib &JD) override {
+ LLVM_DEBUG({
+ dbgs() << "GenericLLVMIRPlatformSupport getting deinitializers to run\n";
+ });
+ if (auto Deinitializers = getDeinitializers(JD)) {
+ LLVM_DEBUG({
+ dbgs() << "GenericLLVMIRPlatformSupport running deinitializers\n";
+ });
+ for (auto DeinitFnAddr : *Deinitializers) {
+ LLVM_DEBUG({
+ dbgs() << " Running deinit " << formatv("{0:x16}", DeinitFnAddr)
+ << "...\n";
+ });
+ auto *DeinitFn = jitTargetAddressToFunction<void (*)()>(DeinitFnAddr);
+ DeinitFn();
+ }
+ } else
+ return Deinitializers.takeError();
+
+ return Error::success();
+ }
+
+ void registerInitFunc(JITDylib &JD, SymbolStringPtr InitName) {
+ getExecutionSession().runSessionLocked([&]() {
+ InitFunctions[&JD].add(InitName);
+ });
+ }
+
+private:
+
+ Expected<std::vector<JITTargetAddress>> getInitializers(JITDylib &JD) {
+ if (auto Err = issueInitLookups(JD))
+ return std::move(Err);
+
+ DenseMap<JITDylib *, SymbolLookupSet> LookupSymbols;
+ std::vector<JITDylibSP> DFSLinkOrder;
+
+ getExecutionSession().runSessionLocked([&]() {
+ DFSLinkOrder = JD.getDFSLinkOrder();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto IFItr = InitFunctions.find(NextJD.get());
+ if (IFItr != InitFunctions.end()) {
+ LookupSymbols[NextJD.get()] = std::move(IFItr->second);
+ InitFunctions.erase(IFItr);
+ }
+ }
+ });
+
+ LLVM_DEBUG({
+ dbgs() << "JITDylib init order is [ ";
+ for (auto &JD : llvm::reverse(DFSLinkOrder))
+ dbgs() << "\"" << JD->getName() << "\" ";
+ dbgs() << "]\n";
+ dbgs() << "Looking up init functions:\n";
+ for (auto &KV : LookupSymbols)
+ dbgs() << " \"" << KV.first->getName() << "\": " << KV.second << "\n";
+ });
+
+ auto &ES = getExecutionSession();
+ auto LookupResult = Platform::lookupInitSymbols(ES, LookupSymbols);
+
+ if (!LookupResult)
+ return LookupResult.takeError();
+
+ std::vector<JITTargetAddress> Initializers;
+ while (!DFSLinkOrder.empty()) {
+ auto &NextJD = *DFSLinkOrder.back();
+ DFSLinkOrder.pop_back();
+ auto InitsItr = LookupResult->find(&NextJD);
+ if (InitsItr == LookupResult->end())
+ continue;
+ for (auto &KV : InitsItr->second)
+ Initializers.push_back(KV.second.getAddress());
+ }
+
+ return Initializers;
+ }
+
+ Expected<std::vector<JITTargetAddress>> getDeinitializers(JITDylib &JD) {
+ auto &ES = getExecutionSession();
+
+ auto LLJITRunAtExits = J.mangleAndIntern("__lljit_run_atexits");
+
+ DenseMap<JITDylib *, SymbolLookupSet> LookupSymbols;
+ std::vector<JITDylibSP> DFSLinkOrder;
+
+ ES.runSessionLocked([&]() {
+ DFSLinkOrder = JD.getDFSLinkOrder();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto &JDLookupSymbols = LookupSymbols[NextJD.get()];
+ auto DIFItr = DeInitFunctions.find(NextJD.get());
+ if (DIFItr != DeInitFunctions.end()) {
+ LookupSymbols[NextJD.get()] = std::move(DIFItr->second);
+ DeInitFunctions.erase(DIFItr);
+ }
+ JDLookupSymbols.add(LLJITRunAtExits,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ }
+ });
+
+ LLVM_DEBUG({
+ dbgs() << "JITDylib deinit order is [ ";
+ for (auto &JD : DFSLinkOrder)
+ dbgs() << "\"" << JD->getName() << "\" ";
+ dbgs() << "]\n";
+ dbgs() << "Looking up deinit functions:\n";
+ for (auto &KV : LookupSymbols)
+ dbgs() << " \"" << KV.first->getName() << "\": " << KV.second << "\n";
+ });
+
+ auto LookupResult = Platform::lookupInitSymbols(ES, LookupSymbols);
+
+ if (!LookupResult)
+ return LookupResult.takeError();
+
+ std::vector<JITTargetAddress> DeInitializers;
+ for (auto &NextJD : DFSLinkOrder) {
+ auto DeInitsItr = LookupResult->find(NextJD.get());
+ assert(DeInitsItr != LookupResult->end() &&
+ "Every JD should have at least __lljit_run_atexits");
+
+ auto RunAtExitsItr = DeInitsItr->second.find(LLJITRunAtExits);
+ if (RunAtExitsItr != DeInitsItr->second.end())
+ DeInitializers.push_back(RunAtExitsItr->second.getAddress());
+
+ for (auto &KV : DeInitsItr->second)
+ if (KV.first != LLJITRunAtExits)
+ DeInitializers.push_back(KV.second.getAddress());
+ }
+
+ return DeInitializers;
+ }
+
+ /// Issue lookups for all init symbols required to initialize JD (and any
+ /// JITDylibs that it depends on).
+ Error issueInitLookups(JITDylib &JD) {
+ DenseMap<JITDylib *, SymbolLookupSet> RequiredInitSymbols;
+ std::vector<JITDylibSP> DFSLinkOrder;
+
+ getExecutionSession().runSessionLocked([&]() {
+ DFSLinkOrder = JD.getDFSLinkOrder();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto ISItr = InitSymbols.find(NextJD.get());
+ if (ISItr != InitSymbols.end()) {
+ RequiredInitSymbols[NextJD.get()] = std::move(ISItr->second);
+ InitSymbols.erase(ISItr);
+ }
+ }
+ });
+
+ return Platform::lookupInitSymbols(getExecutionSession(),
+ RequiredInitSymbols)
+ .takeError();
+ }
+
+ static void registerAtExitHelper(void *Self, void (*F)(void *), void *Ctx,
+ void *DSOHandle) {
+ LLVM_DEBUG({
+ dbgs() << "Registering atexit function " << (void *)F << " for JD "
+ << (*static_cast<JITDylib **>(DSOHandle))->getName() << "\n";
+ });
+ static_cast<GenericLLVMIRPlatformSupport *>(Self)->AtExitMgr.registerAtExit(
+ F, Ctx, DSOHandle);
+ }
+
+ static void runAtExitsHelper(void *Self, void *DSOHandle) {
+ LLVM_DEBUG({
+ dbgs() << "Running atexit functions for JD "
+ << (*static_cast<JITDylib **>(DSOHandle))->getName() << "\n";
+ });
+ static_cast<GenericLLVMIRPlatformSupport *>(Self)->AtExitMgr.runAtExits(
+ DSOHandle);
+ }
+
+ // Constructs an LLVM IR module containing platform runtime globals,
+ // functions, and interposes.
+ ThreadSafeModule createPlatformRuntimeModule() {
+ auto Ctx = std::make_unique<LLVMContext>();
+ auto M = std::make_unique<Module>("__standard_lib", *Ctx);
+ M->setDataLayout(J.getDataLayout());
+
+ auto *GenericIRPlatformSupportTy =
+ StructType::create(*Ctx, "lljit.GenericLLJITIRPlatformSupport");
+
+ auto *PlatformInstanceDecl = new GlobalVariable(
+ *M, GenericIRPlatformSupportTy, true, GlobalValue::ExternalLinkage,
+ nullptr, "__lljit.platform_support_instance");
+
+ auto *Int8Ty = Type::getInt8Ty(*Ctx);
+ auto *IntTy = Type::getIntNTy(*Ctx, sizeof(int) * CHAR_BIT);
+ auto *VoidTy = Type::getVoidTy(*Ctx);
+ auto *BytePtrTy = PointerType::getUnqual(Int8Ty);
+ auto *AtExitCallbackTy = FunctionType::get(VoidTy, {BytePtrTy}, false);
+ auto *AtExitCallbackPtrTy = PointerType::getUnqual(AtExitCallbackTy);
+
+ addHelperAndWrapper(
+ *M, "__cxa_atexit",
+ FunctionType::get(IntTy, {AtExitCallbackPtrTy, BytePtrTy, BytePtrTy},
+ false),
+ GlobalValue::DefaultVisibility, "__lljit.cxa_atexit_helper",
+ {PlatformInstanceDecl});
+
+ return ThreadSafeModule(std::move(M), std::move(Ctx));
+ }
+
+ LLJIT &J;
+ std::string InitFunctionPrefix;
+ DenseMap<JITDylib *, SymbolLookupSet> InitSymbols;
+ DenseMap<JITDylib *, SymbolLookupSet> InitFunctions;
+ DenseMap<JITDylib *, SymbolLookupSet> DeInitFunctions;
+ ItaniumCXAAtExitSupport AtExitMgr;
+};
+
+Error GenericLLVMIRPlatform::setupJITDylib(JITDylib &JD) {
+ return S.setupJITDylib(JD);
+}
+
+Error GenericLLVMIRPlatform::notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) {
+ return S.notifyAdding(RT, MU);
+}
+
+Expected<ThreadSafeModule>
+GlobalCtorDtorScraper::operator()(ThreadSafeModule TSM,
+ MaterializationResponsibility &R) {
+ auto Err = TSM.withModuleDo([&](Module &M) -> Error {
+ auto &Ctx = M.getContext();
+ auto *GlobalCtors = M.getNamedGlobal("llvm.global_ctors");
+
+ // If there's no llvm.global_ctors or it's just a decl then skip.
+ if (!GlobalCtors || GlobalCtors->isDeclaration())
+ return Error::success();
+
+ std::string InitFunctionName;
+ raw_string_ostream(InitFunctionName)
+ << InitFunctionPrefix << M.getModuleIdentifier();
+
+ MangleAndInterner Mangle(PS.getExecutionSession(), M.getDataLayout());
+ auto InternedName = Mangle(InitFunctionName);
+ if (auto Err =
+ R.defineMaterializing({{InternedName, JITSymbolFlags::Callable}}))
+ return Err;
+
+ auto *InitFunc =
+ Function::Create(FunctionType::get(Type::getVoidTy(Ctx), {}, false),
+ GlobalValue::ExternalLinkage, InitFunctionName, &M);
+ InitFunc->setVisibility(GlobalValue::HiddenVisibility);
+ std::vector<std::pair<Function *, unsigned>> Inits;
+ for (auto E : getConstructors(M))
+ Inits.push_back(std::make_pair(E.Func, E.Priority));
+ llvm::sort(Inits, [](const std::pair<Function *, unsigned> &LHS,
+ const std::pair<Function *, unsigned> &RHS) {
+ return LHS.first < RHS.first;
+ });
+ auto *EntryBlock = BasicBlock::Create(Ctx, "entry", InitFunc);
+ IRBuilder<> IB(EntryBlock);
+ for (auto &KV : Inits)
+ IB.CreateCall(KV.first);
+ IB.CreateRetVoid();
+
+ PS.registerInitFunc(R.getTargetJITDylib(), InternedName);
+ GlobalCtors->eraseFromParent();
+ return Error::success();
+ });
+
+ if (Err)
+ return std::move(Err);
+
+ return std::move(TSM);
+}
+
+class MachOPlatformSupport : public LLJIT::PlatformSupport {
+public:
+ using DLOpenType = void *(*)(const char *Name, int Mode);
+ using DLCloseType = int (*)(void *Handle);
+ using DLSymType = void *(*)(void *Handle, const char *Name);
+ using DLErrorType = const char *(*)();
+
+ struct DlFcnValues {
+ Optional<void *> RTLDDefault;
+ DLOpenType dlopen = nullptr;
+ DLCloseType dlclose = nullptr;
+ DLSymType dlsym = nullptr;
+ DLErrorType dlerror = nullptr;
+ };
+
+ static Expected<std::unique_ptr<MachOPlatformSupport>>
+ Create(LLJIT &J, JITDylib &PlatformJITDylib) {
+
+ // Make process symbols visible.
+ {
+ std::string ErrMsg;
+ auto Lib = sys::DynamicLibrary::getPermanentLibrary(nullptr, &ErrMsg);
+ if (!Lib.isValid())
+ return make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode());
+ }
+
+ DlFcnValues DlFcn;
+
+ // Add support for RTLDDefault on known platforms.
+#ifdef __APPLE__
+ DlFcn.RTLDDefault = reinterpret_cast<void *>(-2);
+#endif // __APPLE__
+
+ if (auto Err = hookUpFunction(DlFcn.dlopen, "dlopen"))
+ return std::move(Err);
+ if (auto Err = hookUpFunction(DlFcn.dlclose, "dlclose"))
+ return std::move(Err);
+ if (auto Err = hookUpFunction(DlFcn.dlsym, "dlsym"))
+ return std::move(Err);
+ if (auto Err = hookUpFunction(DlFcn.dlerror, "dlerror"))
+ return std::move(Err);
+
+ std::unique_ptr<MachOPlatformSupport> MP(
+ new MachOPlatformSupport(J, PlatformJITDylib, DlFcn));
+ return std::move(MP);
+ }
+
+ Error initialize(JITDylib &JD) override {
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatformSupport initializing \"" << JD.getName()
+ << "\"\n";
+ });
+
+ auto InitSeq = MP.getInitializerSequence(JD);
+ if (!InitSeq)
+ return InitSeq.takeError();
+
+ // If ObjC is not enabled but there are JIT'd ObjC inits then return
+ // an error.
+ if (!objCRegistrationEnabled())
+ for (auto &KV : *InitSeq) {
+ if (!KV.second.getObjCSelRefsSections().empty() ||
+ !KV.second.getObjCClassListSections().empty())
+ return make_error<StringError>("JITDylib " + KV.first->getName() +
+ " contains objc metadata but objc"
+ " is not enabled",
+ inconvertibleErrorCode());
+ }
+
+ // Run the initializers.
+ for (auto &KV : *InitSeq) {
+ if (objCRegistrationEnabled()) {
+ KV.second.registerObjCSelectors();
+ if (auto Err = KV.second.registerObjCClasses()) {
+ // FIXME: Roll back registrations on error?
+ return Err;
+ }
+ }
+ KV.second.runModInits();
+ }
+
+ return Error::success();
+ }
+
+ Error deinitialize(JITDylib &JD) override {
+ auto &ES = J.getExecutionSession();
+ if (auto DeinitSeq = MP.getDeinitializerSequence(JD)) {
+ for (auto &KV : *DeinitSeq) {
+ auto DSOHandleName = ES.intern("___dso_handle");
+
+ // FIXME: Run DeInits here.
+ auto Result = ES.lookup(
+ {{KV.first, JITDylibLookupFlags::MatchAllSymbols}},
+ SymbolLookupSet(DSOHandleName,
+ SymbolLookupFlags::WeaklyReferencedSymbol));
+ if (!Result)
+ return Result.takeError();
+ if (Result->empty())
+ continue;
+ assert(Result->count(DSOHandleName) &&
+ "Result does not contain __dso_handle");
+ auto *DSOHandle = jitTargetAddressToPointer<void *>(
+ Result->begin()->second.getAddress());
+ AtExitMgr.runAtExits(DSOHandle);
+ }
+ } else
+ return DeinitSeq.takeError();
+ return Error::success();
+ }
+
+private:
+ template <typename FunctionPtrTy>
+ static Error hookUpFunction(FunctionPtrTy &Fn, const char *Name) {
+ if (auto *FnAddr = sys::DynamicLibrary::SearchForAddressOfSymbol(Name)) {
+ Fn = reinterpret_cast<FunctionPtrTy>(Fn);
+ return Error::success();
+ }
+
+ return make_error<StringError>((Twine("Can not enable MachO JIT Platform: "
+ "missing function: ") +
+ Name)
+ .str(),
+ inconvertibleErrorCode());
+ }
+
+ MachOPlatformSupport(LLJIT &J, JITDylib &PlatformJITDylib, DlFcnValues DlFcn)
+ : J(J), MP(setupPlatform(J)), DlFcn(std::move(DlFcn)) {
+
+ SymbolMap HelperSymbols;
+
+ // platform and atexit helpers.
+ HelperSymbols[J.mangleAndIntern("__lljit.platform_support_instance")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(this), JITSymbolFlags());
+ HelperSymbols[J.mangleAndIntern("__lljit.cxa_atexit_helper")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(registerAtExitHelper),
+ JITSymbolFlags());
+ HelperSymbols[J.mangleAndIntern("__lljit.run_atexits_helper")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(runAtExitsHelper),
+ JITSymbolFlags());
+
+ // dlfcn helpers.
+ HelperSymbols[J.mangleAndIntern("__lljit.dlopen_helper")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(dlopenHelper),
+ JITSymbolFlags());
+ HelperSymbols[J.mangleAndIntern("__lljit.dlclose_helper")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(dlcloseHelper),
+ JITSymbolFlags());
+ HelperSymbols[J.mangleAndIntern("__lljit.dlsym_helper")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(dlsymHelper),
+ JITSymbolFlags());
+ HelperSymbols[J.mangleAndIntern("__lljit.dlerror_helper")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(dlerrorHelper),
+ JITSymbolFlags());
+
+ cantFail(
+ PlatformJITDylib.define(absoluteSymbols(std::move(HelperSymbols))));
+ cantFail(MP.setupJITDylib(J.getMainJITDylib()));
+ cantFail(J.addIRModule(PlatformJITDylib, createPlatformRuntimeModule()));
+ }
+
+ static MachOPlatform &setupPlatform(LLJIT &J) {
+ auto Tmp = std::make_unique<MachOPlatform>(
+ J.getExecutionSession(),
+ static_cast<ObjectLinkingLayer &>(J.getObjLinkingLayer()),
+ createStandardSymbolsObject(J));
+ auto &MP = *Tmp;
+ J.getExecutionSession().setPlatform(std::move(Tmp));
+ return MP;
+ }
+
+ static std::unique_ptr<MemoryBuffer> createStandardSymbolsObject(LLJIT &J) {
+ LLVMContext Ctx;
+ Module M("__standard_symbols", Ctx);
+ M.setDataLayout(J.getDataLayout());
+
+ auto *Int64Ty = Type::getInt64Ty(Ctx);
+
+ auto *DSOHandle =
+ new GlobalVariable(M, Int64Ty, true, GlobalValue::ExternalLinkage,
+ ConstantInt::get(Int64Ty, 0), "__dso_handle");
+ DSOHandle->setVisibility(GlobalValue::DefaultVisibility);
+
+ return cantFail(J.getIRCompileLayer().getCompiler()(M));
+ }
+
+ ThreadSafeModule createPlatformRuntimeModule() {
+ auto Ctx = std::make_unique<LLVMContext>();
+ auto M = std::make_unique<Module>("__standard_lib", *Ctx);
+ M->setDataLayout(J.getDataLayout());
+
+ auto *MachOPlatformSupportTy =
+ StructType::create(*Ctx, "lljit.MachOPlatformSupport");
+
+ auto *PlatformInstanceDecl = new GlobalVariable(
+ *M, MachOPlatformSupportTy, true, GlobalValue::ExternalLinkage, nullptr,
+ "__lljit.platform_support_instance");
+
+ auto *Int8Ty = Type::getInt8Ty(*Ctx);
+ auto *IntTy = Type::getIntNTy(*Ctx, sizeof(int) * CHAR_BIT);
+ auto *VoidTy = Type::getVoidTy(*Ctx);
+ auto *BytePtrTy = PointerType::getUnqual(Int8Ty);
+ auto *AtExitCallbackTy = FunctionType::get(VoidTy, {BytePtrTy}, false);
+ auto *AtExitCallbackPtrTy = PointerType::getUnqual(AtExitCallbackTy);
+
+ addHelperAndWrapper(
+ *M, "__cxa_atexit",
+ FunctionType::get(IntTy, {AtExitCallbackPtrTy, BytePtrTy, BytePtrTy},
+ false),
+ GlobalValue::DefaultVisibility, "__lljit.cxa_atexit_helper",
+ {PlatformInstanceDecl});
+
+ addHelperAndWrapper(*M, "dlopen",
+ FunctionType::get(BytePtrTy, {BytePtrTy, IntTy}, false),
+ GlobalValue::DefaultVisibility, "__lljit.dlopen_helper",
+ {PlatformInstanceDecl});
+
+ addHelperAndWrapper(*M, "dlclose",
+ FunctionType::get(IntTy, {BytePtrTy}, false),
+ GlobalValue::DefaultVisibility,
+ "__lljit.dlclose_helper", {PlatformInstanceDecl});
+
+ addHelperAndWrapper(
+ *M, "dlsym",
+ FunctionType::get(BytePtrTy, {BytePtrTy, BytePtrTy}, false),
+ GlobalValue::DefaultVisibility, "__lljit.dlsym_helper",
+ {PlatformInstanceDecl});
+
+ addHelperAndWrapper(*M, "dlerror", FunctionType::get(BytePtrTy, {}, false),
+ GlobalValue::DefaultVisibility,
+ "__lljit.dlerror_helper", {PlatformInstanceDecl});
+
+ return ThreadSafeModule(std::move(M), std::move(Ctx));
+ }
+
+ static void registerAtExitHelper(void *Self, void (*F)(void *), void *Ctx,
+ void *DSOHandle) {
+ static_cast<MachOPlatformSupport *>(Self)->AtExitMgr.registerAtExit(
+ F, Ctx, DSOHandle);
+ }
+
+ static void runAtExitsHelper(void *Self, void *DSOHandle) {
+ static_cast<MachOPlatformSupport *>(Self)->AtExitMgr.runAtExits(DSOHandle);
+ }
+
+ void *jit_dlopen(const char *Path, int Mode) {
+ JITDylib *JDToOpen = nullptr;
+ // FIXME: Do the right thing with Mode flags.
+ {
+ std::lock_guard<std::mutex> Lock(PlatformSupportMutex);
+
+ // Clear any existing error messages.
+ dlErrorMsgs.erase(std::this_thread::get_id());
+
+ if (auto *JD = J.getExecutionSession().getJITDylibByName(Path)) {
+ auto I = JDRefCounts.find(JD);
+ if (I != JDRefCounts.end()) {
+ ++I->second;
+ return JD;
+ }
+
+ JDRefCounts[JD] = 1;
+ JDToOpen = JD;
+ }
+ }
+
+ if (JDToOpen) {
+ if (auto Err = initialize(*JDToOpen)) {
+ recordError(std::move(Err));
+ return 0;
+ }
+ }
+
+ // Fall through to dlopen if no JITDylib found for Path.
+ return DlFcn.dlopen(Path, Mode);
+ }
+
+ static void *dlopenHelper(void *Self, const char *Path, int Mode) {
+ return static_cast<MachOPlatformSupport *>(Self)->jit_dlopen(Path, Mode);
+ }
+
+ int jit_dlclose(void *Handle) {
+ JITDylib *JDToClose = nullptr;
+
+ {
+ std::lock_guard<std::mutex> Lock(PlatformSupportMutex);
+
+ // Clear any existing error messages.
+ dlErrorMsgs.erase(std::this_thread::get_id());
+
+ auto I = JDRefCounts.find(Handle);
+ if (I != JDRefCounts.end()) {
+ --I->second;
+ if (I->second == 0) {
+ JDRefCounts.erase(I);
+ JDToClose = static_cast<JITDylib *>(Handle);
+ } else
+ return 0;
+ }
+ }
+
+ if (JDToClose) {
+ if (auto Err = deinitialize(*JDToClose)) {
+ recordError(std::move(Err));
+ return -1;
+ }
+ return 0;
+ }
+
+ // Fall through to dlclose if no JITDylib found for Path.
+ return DlFcn.dlclose(Handle);
+ }
+
+ static int dlcloseHelper(void *Self, void *Handle) {
+ return static_cast<MachOPlatformSupport *>(Self)->jit_dlclose(Handle);
+ }
+
+ void *jit_dlsym(void *Handle, const char *Name) {
+ JITDylibSearchOrder JITSymSearchOrder;
+
+ // FIXME: RTLD_NEXT, RTLD_SELF not supported.
+ {
+ std::lock_guard<std::mutex> Lock(PlatformSupportMutex);
+
+ // Clear any existing error messages.
+ dlErrorMsgs.erase(std::this_thread::get_id());
+
+ if (JDRefCounts.count(Handle)) {
+ JITSymSearchOrder.push_back(
+ {static_cast<JITDylib *>(Handle),
+ JITDylibLookupFlags::MatchExportedSymbolsOnly});
+ } else if (Handle == DlFcn.RTLDDefault) {
+ for (auto &KV : JDRefCounts)
+ JITSymSearchOrder.push_back(
+ {static_cast<JITDylib *>(KV.first),
+ JITDylibLookupFlags::MatchExportedSymbolsOnly});
+ }
+ }
+
+ if (!JITSymSearchOrder.empty()) {
+ auto MangledName = J.mangleAndIntern(Name);
+ SymbolLookupSet Syms(MangledName,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ if (auto Result = J.getExecutionSession().lookup(JITSymSearchOrder, Syms,
+ LookupKind::DLSym)) {
+ auto I = Result->find(MangledName);
+ if (I != Result->end())
+ return jitTargetAddressToPointer<void *>(I->second.getAddress());
+ } else {
+ recordError(Result.takeError());
+ return 0;
+ }
+ }
+
+ // Fall through to dlsym.
+ return DlFcn.dlsym(Handle, Name);
+ }
+
+ static void *dlsymHelper(void *Self, void *Handle, const char *Name) {
+ return static_cast<MachOPlatformSupport *>(Self)->jit_dlsym(Handle, Name);
+ }
+
+ const char *jit_dlerror() {
+ {
+ std::lock_guard<std::mutex> Lock(PlatformSupportMutex);
+ auto I = dlErrorMsgs.find(std::this_thread::get_id());
+ if (I != dlErrorMsgs.end())
+ return I->second->c_str();
+ }
+ return DlFcn.dlerror();
+ }
+
+ static const char *dlerrorHelper(void *Self) {
+ return static_cast<MachOPlatformSupport *>(Self)->jit_dlerror();
+ }
+
+ void recordError(Error Err) {
+ std::lock_guard<std::mutex> Lock(PlatformSupportMutex);
+ dlErrorMsgs[std::this_thread::get_id()] =
+ std::make_unique<std::string>(toString(std::move(Err)));
+ }
+
+ std::mutex PlatformSupportMutex;
+ LLJIT &J;
+ MachOPlatform &MP;
+ DlFcnValues DlFcn;
+ ItaniumCXAAtExitSupport AtExitMgr;
+ DenseMap<void *, unsigned> JDRefCounts;
+ std::map<std::thread::id, std::unique_ptr<std::string>> dlErrorMsgs;
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+void LLJIT::PlatformSupport::setInitTransform(
+ LLJIT &J, IRTransformLayer::TransformFunction T) {
+ J.InitHelperTransformLayer->setTransform(std::move(T));
+}
+
+LLJIT::PlatformSupport::~PlatformSupport() {}
+
+Error LLJITBuilderState::prepareForConstruction() {
+
+ LLVM_DEBUG(dbgs() << "Preparing to create LLJIT instance...\n");
+
+ if (!JTMB) {
+ LLVM_DEBUG({
+ dbgs() << " No explicitly set JITTargetMachineBuilder. "
+ "Detecting host...\n";
+ });
+ if (auto JTMBOrErr = JITTargetMachineBuilder::detectHost())
+ JTMB = std::move(*JTMBOrErr);
+ else
+ return JTMBOrErr.takeError();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " JITTargetMachineBuilder is " << JTMB << "\n"
+ << " Pre-constructed ExecutionSession: " << (ES ? "Yes" : "No")
+ << "\n"
+ << " DataLayout: ";
+ if (DL)
+ dbgs() << DL->getStringRepresentation() << "\n";
+ else
+ dbgs() << "None (will be created by JITTargetMachineBuilder)\n";
+
+ dbgs() << " Custom object-linking-layer creator: "
+ << (CreateObjectLinkingLayer ? "Yes" : "No") << "\n"
+ << " Custom compile-function creator: "
+ << (CreateCompileFunction ? "Yes" : "No") << "\n"
+ << " Custom platform-setup function: "
+ << (SetUpPlatform ? "Yes" : "No") << "\n"
+ << " Number of compile threads: " << NumCompileThreads;
+ if (!NumCompileThreads)
+ dbgs() << " (code will be compiled on the execution thread)\n";
+ else
+ dbgs() << "\n";
+ });
+
+ // If the client didn't configure any linker options then auto-configure the
+ // JIT linker.
+ if (!CreateObjectLinkingLayer) {
+ auto &TT = JTMB->getTargetTriple();
+ if (TT.isOSBinFormatMachO() &&
+ (TT.getArch() == Triple::aarch64 || TT.getArch() == Triple::x86_64)) {
+
+ JTMB->setRelocationModel(Reloc::PIC_);
+ JTMB->setCodeModel(CodeModel::Small);
+ CreateObjectLinkingLayer =
+ [TPC = this->TPC](
+ ExecutionSession &ES,
+ const Triple &) -> Expected<std::unique_ptr<ObjectLayer>> {
+ std::unique_ptr<ObjectLinkingLayer> ObjLinkingLayer;
+ if (TPC)
+ ObjLinkingLayer =
+ std::make_unique<ObjectLinkingLayer>(ES, TPC->getMemMgr());
+ else
+ ObjLinkingLayer = std::make_unique<ObjectLinkingLayer>(
+ ES, std::make_unique<jitlink::InProcessMemoryManager>());
+ ObjLinkingLayer->addPlugin(std::make_unique<EHFrameRegistrationPlugin>(
+ ES, std::make_unique<jitlink::InProcessEHFrameRegistrar>()));
+ return std::move(ObjLinkingLayer);
+ };
+ }
+ }
+
+ return Error::success();
+}
+
+LLJIT::~LLJIT() {
+ if (CompileThreads)
+ CompileThreads->wait();
+ if (auto Err = ES->endSession())
+ ES->reportError(std::move(Err));
+}
+
+Error LLJIT::addIRModule(ResourceTrackerSP RT, ThreadSafeModule TSM) {
+ assert(TSM && "Can not add null module");
+
+ if (auto Err =
+ TSM.withModuleDo([&](Module &M) { return applyDataLayout(M); }))
+ return Err;
+
+ return InitHelperTransformLayer->add(std::move(RT), std::move(TSM));
+}
+
+Error LLJIT::addIRModule(JITDylib &JD, ThreadSafeModule TSM) {
+ return addIRModule(JD.getDefaultResourceTracker(), std::move(TSM));
+}
+
+Error LLJIT::addObjectFile(ResourceTrackerSP RT,
+ std::unique_ptr<MemoryBuffer> Obj) {
+ assert(Obj && "Can not add null object");
+
+ return ObjTransformLayer->add(std::move(RT), std::move(Obj));
+}
+
+Error LLJIT::addObjectFile(JITDylib &JD, std::unique_ptr<MemoryBuffer> Obj) {
+ return addObjectFile(JD.getDefaultResourceTracker(), std::move(Obj));
+}
+
+Expected<JITEvaluatedSymbol> LLJIT::lookupLinkerMangled(JITDylib &JD,
+ SymbolStringPtr Name) {
+ return ES->lookup(
+ makeJITDylibSearchOrder(&JD, JITDylibLookupFlags::MatchAllSymbols), Name);
+}
+
+Expected<std::unique_ptr<ObjectLayer>>
+LLJIT::createObjectLinkingLayer(LLJITBuilderState &S, ExecutionSession &ES) {
+
+ // If the config state provided an ObjectLinkingLayer factory then use it.
+ if (S.CreateObjectLinkingLayer)
+ return S.CreateObjectLinkingLayer(ES, S.JTMB->getTargetTriple());
+
+ // Otherwise default to creating an RTDyldObjectLinkingLayer that constructs
+ // a new SectionMemoryManager for each object.
+ auto GetMemMgr = []() { return std::make_unique<SectionMemoryManager>(); };
+ auto ObjLinkingLayer =
+ std::make_unique<RTDyldObjectLinkingLayer>(ES, std::move(GetMemMgr));
+
+ if (S.JTMB->getTargetTriple().isOSBinFormatCOFF()) {
+ ObjLinkingLayer->setOverrideObjectFlagsWithResponsibilityFlags(true);
+ ObjLinkingLayer->setAutoClaimResponsibilityForObjectSymbols(true);
+ }
+
+ // FIXME: Explicit conversion to std::unique_ptr<ObjectLayer> added to silence
+ // errors from some GCC / libstdc++ bots. Remove this conversion (i.e.
+ // just return ObjLinkingLayer) once those bots are upgraded.
+ return std::unique_ptr<ObjectLayer>(std::move(ObjLinkingLayer));
+}
+
+Expected<std::unique_ptr<IRCompileLayer::IRCompiler>>
+LLJIT::createCompileFunction(LLJITBuilderState &S,
+ JITTargetMachineBuilder JTMB) {
+
+ /// If there is a custom compile function creator set then use it.
+ if (S.CreateCompileFunction)
+ return S.CreateCompileFunction(std::move(JTMB));
+
+ // Otherwise default to creating a SimpleCompiler, or ConcurrentIRCompiler,
+ // depending on the number of threads requested.
+ if (S.NumCompileThreads > 0)
+ return std::make_unique<ConcurrentIRCompiler>(std::move(JTMB));
+
+ auto TM = JTMB.createTargetMachine();
+ if (!TM)
+ return TM.takeError();
+
+ return std::make_unique<TMOwningSimpleCompiler>(std::move(*TM));
+}
+
+LLJIT::LLJIT(LLJITBuilderState &S, Error &Err)
+ : ES(S.ES ? std::move(S.ES) : std::make_unique<ExecutionSession>()), Main(),
+ DL(""), TT(S.JTMB->getTargetTriple()) {
+
+ ErrorAsOutParameter _(&Err);
+
+ if (auto MainOrErr = this->ES->createJITDylib("main"))
+ Main = &*MainOrErr;
+ else {
+ Err = MainOrErr.takeError();
+ return;
+ }
+
+ if (S.DL)
+ DL = std::move(*S.DL);
+ else if (auto DLOrErr = S.JTMB->getDefaultDataLayoutForTarget())
+ DL = std::move(*DLOrErr);
+ else {
+ Err = DLOrErr.takeError();
+ return;
+ }
+
+ auto ObjLayer = createObjectLinkingLayer(S, *ES);
+ if (!ObjLayer) {
+ Err = ObjLayer.takeError();
+ return;
+ }
+ ObjLinkingLayer = std::move(*ObjLayer);
+ ObjTransformLayer =
+ std::make_unique<ObjectTransformLayer>(*ES, *ObjLinkingLayer);
+
+ {
+ auto CompileFunction = createCompileFunction(S, std::move(*S.JTMB));
+ if (!CompileFunction) {
+ Err = CompileFunction.takeError();
+ return;
+ }
+ CompileLayer = std::make_unique<IRCompileLayer>(
+ *ES, *ObjTransformLayer, std::move(*CompileFunction));
+ TransformLayer = std::make_unique<IRTransformLayer>(*ES, *CompileLayer);
+ InitHelperTransformLayer =
+ std::make_unique<IRTransformLayer>(*ES, *TransformLayer);
+ }
+
+ if (S.NumCompileThreads > 0) {
+ InitHelperTransformLayer->setCloneToNewContextOnEmit(true);
+ CompileThreads =
+ std::make_unique<ThreadPool>(hardware_concurrency(S.NumCompileThreads));
+ ES->setDispatchMaterialization(
+ [this](std::unique_ptr<MaterializationUnit> MU,
+ std::unique_ptr<MaterializationResponsibility> MR) {
+ // FIXME: We should be able to use move-capture here, but ThreadPool's
+ // AsyncTaskTys are std::functions rather than unique_functions
+ // (because MSVC's std::packaged_tasks don't support move-only types).
+ // Fix this when all the above gets sorted out.
+ CompileThreads->async(
+ [UnownedMU = MU.release(), UnownedMR = MR.release()]() mutable {
+ std::unique_ptr<MaterializationUnit> MU(UnownedMU);
+ std::unique_ptr<MaterializationResponsibility> MR(UnownedMR);
+ MU->materialize(std::move(MR));
+ });
+ });
+ }
+
+ if (S.SetUpPlatform)
+ Err = S.SetUpPlatform(*this);
+ else
+ setUpGenericLLVMIRPlatform(*this);
+}
+
+std::string LLJIT::mangle(StringRef UnmangledName) const {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, UnmangledName, DL);
+ }
+ return MangledName;
+}
+
+Error LLJIT::applyDataLayout(Module &M) {
+ if (M.getDataLayout().isDefault())
+ M.setDataLayout(DL);
+
+ if (M.getDataLayout() != DL)
+ return make_error<StringError>(
+ "Added modules have incompatible data layouts: " +
+ M.getDataLayout().getStringRepresentation() + " (module) vs " +
+ DL.getStringRepresentation() + " (jit)",
+ inconvertibleErrorCode());
+
+ return Error::success();
+}
+
+void setUpGenericLLVMIRPlatform(LLJIT &J) {
+ LLVM_DEBUG(
+ { dbgs() << "Setting up GenericLLVMIRPlatform support for LLJIT\n"; });
+ J.setPlatformSupport(std::make_unique<GenericLLVMIRPlatformSupport>(J));
+}
+
+Error setUpMachOPlatform(LLJIT &J) {
+ LLVM_DEBUG({ dbgs() << "Setting up MachOPlatform support for LLJIT\n"; });
+ auto MP = MachOPlatformSupport::Create(J, J.getMainJITDylib());
+ if (!MP)
+ return MP.takeError();
+ J.setPlatformSupport(std::move(*MP));
+ return Error::success();
+}
+
+Error LLLazyJITBuilderState::prepareForConstruction() {
+ if (auto Err = LLJITBuilderState::prepareForConstruction())
+ return Err;
+ TT = JTMB->getTargetTriple();
+ return Error::success();
+}
+
+Error LLLazyJIT::addLazyIRModule(JITDylib &JD, ThreadSafeModule TSM) {
+ assert(TSM && "Can not add null module");
+
+ if (auto Err = TSM.withModuleDo(
+ [&](Module &M) -> Error { return applyDataLayout(M); }))
+ return Err;
+
+ return CODLayer->add(JD, std::move(TSM));
+}
+
+LLLazyJIT::LLLazyJIT(LLLazyJITBuilderState &S, Error &Err) : LLJIT(S, Err) {
+
+ // If LLJIT construction failed then bail out.
+ if (Err)
+ return;
+
+ ErrorAsOutParameter _(&Err);
+
+ /// Take/Create the lazy-compile callthrough manager.
+ if (S.LCTMgr)
+ LCTMgr = std::move(S.LCTMgr);
+ else {
+ if (auto LCTMgrOrErr = createLocalLazyCallThroughManager(
+ S.TT, *ES, S.LazyCompileFailureAddr))
+ LCTMgr = std::move(*LCTMgrOrErr);
+ else {
+ Err = LCTMgrOrErr.takeError();
+ return;
+ }
+ }
+
+ // Take/Create the indirect stubs manager builder.
+ auto ISMBuilder = std::move(S.ISMBuilder);
+
+ // If none was provided, try to build one.
+ if (!ISMBuilder)
+ ISMBuilder = createLocalIndirectStubsManagerBuilder(S.TT);
+
+ // No luck. Bail out.
+ if (!ISMBuilder) {
+ Err = make_error<StringError>("Could not construct "
+ "IndirectStubsManagerBuilder for target " +
+ S.TT.str(),
+ inconvertibleErrorCode());
+ return;
+ }
+
+ // Create the COD layer.
+ CODLayer = std::make_unique<CompileOnDemandLayer>(
+ *ES, *InitHelperTransformLayer, *LCTMgr, std::move(ISMBuilder));
+
+ if (S.NumCompileThreads > 0)
+ CODLayer->setCloneToNewContextOnEmit(true);
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Layer.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Layer.cpp
new file mode 100644
index 00000000000..5e27e343d23
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Layer.cpp
@@ -0,0 +1,212 @@
+//===-------------------- Layer.cpp - Layer interfaces --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Layer.h"
+
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+IRLayer::~IRLayer() {}
+
+Error IRLayer::add(ResourceTrackerSP RT, ThreadSafeModule TSM) {
+ assert(RT && "RT can not be null");
+ auto &JD = RT->getJITDylib();
+ return JD.define(std::make_unique<BasicIRLayerMaterializationUnit>(
+ *this, *getManglingOptions(), std::move(TSM)),
+ std::move(RT));
+}
+
+IRMaterializationUnit::IRMaterializationUnit(
+ ExecutionSession &ES, const IRSymbolMapper::ManglingOptions &MO,
+ ThreadSafeModule TSM)
+ : MaterializationUnit(SymbolFlagsMap(), nullptr), TSM(std::move(TSM)) {
+
+ assert(this->TSM && "Module must not be null");
+
+ MangleAndInterner Mangle(ES, this->TSM.getModuleUnlocked()->getDataLayout());
+ this->TSM.withModuleDo([&](Module &M) {
+ for (auto &G : M.global_values()) {
+ // Skip globals that don't generate symbols.
+
+ if (!G.hasName() || G.isDeclaration() || G.hasLocalLinkage() ||
+ G.hasAvailableExternallyLinkage() || G.hasAppendingLinkage())
+ continue;
+
+ // thread locals generate different symbols depending on whether or not
+ // emulated TLS is enabled.
+ if (G.isThreadLocal() && MO.EmulatedTLS) {
+ auto &GV = cast<GlobalVariable>(G);
+
+ auto Flags = JITSymbolFlags::fromGlobalValue(GV);
+
+ auto EmuTLSV = Mangle(("__emutls_v." + GV.getName()).str());
+ SymbolFlags[EmuTLSV] = Flags;
+ SymbolToDefinition[EmuTLSV] = &GV;
+
+ // If this GV has a non-zero initializer we'll need to emit an
+ // __emutls.t symbol too.
+ if (GV.hasInitializer()) {
+ const auto *InitVal = GV.getInitializer();
+
+ // Skip zero-initializers.
+ if (isa<ConstantAggregateZero>(InitVal))
+ continue;
+ const auto *InitIntValue = dyn_cast<ConstantInt>(InitVal);
+ if (InitIntValue && InitIntValue->isZero())
+ continue;
+
+ auto EmuTLST = Mangle(("__emutls_t." + GV.getName()).str());
+ SymbolFlags[EmuTLST] = Flags;
+ }
+ continue;
+ }
+
+ // Otherwise we just need a normal linker mangling.
+ auto MangledName = Mangle(G.getName());
+ SymbolFlags[MangledName] = JITSymbolFlags::fromGlobalValue(G);
+ SymbolToDefinition[MangledName] = &G;
+ }
+
+ // If we need an init symbol for this module then create one.
+ if (!llvm::empty(getStaticInitGVs(M))) {
+ size_t Counter = 0;
+
+ do {
+ std::string InitSymbolName;
+ raw_string_ostream(InitSymbolName)
+ << "$." << M.getModuleIdentifier() << ".__inits." << Counter++;
+ InitSymbol = ES.intern(InitSymbolName);
+ } while (SymbolFlags.count(InitSymbol));
+
+ SymbolFlags[InitSymbol] = JITSymbolFlags::MaterializationSideEffectsOnly;
+ }
+ });
+}
+
+IRMaterializationUnit::IRMaterializationUnit(
+ ThreadSafeModule TSM, SymbolFlagsMap SymbolFlags,
+ SymbolStringPtr InitSymbol, SymbolNameToDefinitionMap SymbolToDefinition)
+ : MaterializationUnit(std::move(SymbolFlags), std::move(InitSymbol)),
+ TSM(std::move(TSM)), SymbolToDefinition(std::move(SymbolToDefinition)) {}
+
+StringRef IRMaterializationUnit::getName() const {
+ if (TSM)
+ return TSM.withModuleDo(
+ [](const Module &M) -> StringRef { return M.getModuleIdentifier(); });
+ return "<null module>";
+}
+
+void IRMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ LLVM_DEBUG(JD.getExecutionSession().runSessionLocked([&]() {
+ dbgs() << "In " << JD.getName() << " discarding " << *Name << " from MU@"
+ << this << " (" << getName() << ")\n";
+ }););
+
+ auto I = SymbolToDefinition.find(Name);
+ assert(I != SymbolToDefinition.end() &&
+ "Symbol not provided by this MU, or previously discarded");
+ assert(!I->second->isDeclaration() &&
+ "Discard should only apply to definitions");
+ I->second->setLinkage(GlobalValue::AvailableExternallyLinkage);
+ SymbolToDefinition.erase(I);
+}
+
+BasicIRLayerMaterializationUnit::BasicIRLayerMaterializationUnit(
+ IRLayer &L, const IRSymbolMapper::ManglingOptions &MO, ThreadSafeModule TSM)
+ : IRMaterializationUnit(L.getExecutionSession(), MO, std::move(TSM)), L(L) {
+}
+
+void BasicIRLayerMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+
+ // Throw away the SymbolToDefinition map: it's not usable after we hand
+ // off the module.
+ SymbolToDefinition.clear();
+
+ // If cloneToNewContextOnEmit is set, clone the module now.
+ if (L.getCloneToNewContextOnEmit())
+ TSM = cloneToNewContext(TSM);
+
+#ifndef NDEBUG
+ auto &ES = R->getTargetJITDylib().getExecutionSession();
+ auto &N = R->getTargetJITDylib().getName();
+#endif // NDEBUG
+
+ LLVM_DEBUG(ES.runSessionLocked(
+ [&]() { dbgs() << "Emitting, for " << N << ", " << *this << "\n"; }););
+ L.emit(std::move(R), std::move(TSM));
+ LLVM_DEBUG(ES.runSessionLocked([&]() {
+ dbgs() << "Finished emitting, for " << N << ", " << *this << "\n";
+ }););
+}
+
+ObjectLayer::ObjectLayer(ExecutionSession &ES) : ES(ES) {}
+
+ObjectLayer::~ObjectLayer() {}
+
+Error ObjectLayer::add(ResourceTrackerSP RT, std::unique_ptr<MemoryBuffer> O) {
+ assert(RT && "RT can not be null");
+ auto ObjMU = BasicObjectLayerMaterializationUnit::Create(*this, std::move(O));
+ if (!ObjMU)
+ return ObjMU.takeError();
+ auto &JD = RT->getJITDylib();
+ return JD.define(std::move(*ObjMU), std::move(RT));
+}
+
+Expected<std::unique_ptr<BasicObjectLayerMaterializationUnit>>
+BasicObjectLayerMaterializationUnit::Create(ObjectLayer &L,
+ std::unique_ptr<MemoryBuffer> O) {
+ auto ObjSymInfo =
+ getObjectSymbolInfo(L.getExecutionSession(), O->getMemBufferRef());
+
+ if (!ObjSymInfo)
+ return ObjSymInfo.takeError();
+
+ auto &SymbolFlags = ObjSymInfo->first;
+ auto &InitSymbol = ObjSymInfo->second;
+
+ return std::unique_ptr<BasicObjectLayerMaterializationUnit>(
+ new BasicObjectLayerMaterializationUnit(
+ L, std::move(O), std::move(SymbolFlags), std::move(InitSymbol)));
+}
+
+BasicObjectLayerMaterializationUnit::BasicObjectLayerMaterializationUnit(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> O, SymbolFlagsMap SymbolFlags,
+ SymbolStringPtr InitSymbol)
+ : MaterializationUnit(std::move(SymbolFlags), std::move(InitSymbol)), L(L),
+ O(std::move(O)) {}
+
+StringRef BasicObjectLayerMaterializationUnit::getName() const {
+ if (O)
+ return O->getBufferIdentifier();
+ return "<null object>";
+}
+
+void BasicObjectLayerMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+ L.emit(std::move(R), std::move(O));
+}
+
+void BasicObjectLayerMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ // This is a no-op for object files: Having removed 'Name' from SymbolFlags
+ // the symbol will be dead-stripped by the JIT linker.
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/LazyReexports.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/LazyReexports.cpp
new file mode 100644
index 00000000000..e1f494415e8
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/LazyReexports.cpp
@@ -0,0 +1,234 @@
+//===---------- LazyReexports.cpp - Utilities for lazy reexports ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/LazyReexports.h"
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+LazyCallThroughManager::LazyCallThroughManager(
+ ExecutionSession &ES, JITTargetAddress ErrorHandlerAddr, TrampolinePool *TP)
+ : ES(ES), ErrorHandlerAddr(ErrorHandlerAddr), TP(TP) {}
+
+Expected<JITTargetAddress> LazyCallThroughManager::getCallThroughTrampoline(
+ JITDylib &SourceJD, SymbolStringPtr SymbolName,
+ NotifyResolvedFunction NotifyResolved) {
+ assert(TP && "TrampolinePool not set");
+
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto Trampoline = TP->getTrampoline();
+
+ if (!Trampoline)
+ return Trampoline.takeError();
+
+ Reexports[*Trampoline] = ReexportsEntry{&SourceJD, std::move(SymbolName)};
+ Notifiers[*Trampoline] = std::move(NotifyResolved);
+ return *Trampoline;
+}
+
+JITTargetAddress LazyCallThroughManager::reportCallThroughError(Error Err) {
+ ES.reportError(std::move(Err));
+ return ErrorHandlerAddr;
+}
+
+Expected<LazyCallThroughManager::ReexportsEntry>
+LazyCallThroughManager::findReexport(JITTargetAddress TrampolineAddr) {
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto I = Reexports.find(TrampolineAddr);
+ if (I == Reexports.end())
+ return createStringError(inconvertibleErrorCode(),
+ "Missing reexport for trampoline address %p",
+ TrampolineAddr);
+ return I->second;
+}
+
+Error LazyCallThroughManager::notifyResolved(JITTargetAddress TrampolineAddr,
+ JITTargetAddress ResolvedAddr) {
+ NotifyResolvedFunction NotifyResolved;
+ {
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto I = Notifiers.find(TrampolineAddr);
+ if (I != Notifiers.end()) {
+ NotifyResolved = std::move(I->second);
+ Notifiers.erase(I);
+ }
+ }
+
+ return NotifyResolved ? NotifyResolved(ResolvedAddr) : Error::success();
+}
+
+void LazyCallThroughManager::resolveTrampolineLandingAddress(
+ JITTargetAddress TrampolineAddr,
+ NotifyLandingResolvedFunction NotifyLandingResolved) {
+
+ auto Entry = findReexport(TrampolineAddr);
+ if (!Entry)
+ return NotifyLandingResolved(reportCallThroughError(Entry.takeError()));
+
+ // Declaring SLS and the callback outside of the call to ES.lookup is a
+ // workaround to fix build failures on AIX and on z/OS platforms.
+ SymbolLookupSet SLS({Entry->SymbolName});
+ auto Callback = [this, TrampolineAddr, SymbolName = Entry->SymbolName,
+ NotifyLandingResolved = std::move(NotifyLandingResolved)](
+ Expected<SymbolMap> Result) mutable {
+ if (Result) {
+ assert(Result->size() == 1 && "Unexpected result size");
+ assert(Result->count(SymbolName) && "Unexpected result value");
+ JITTargetAddress LandingAddr = (*Result)[SymbolName].getAddress();
+
+ if (auto Err = notifyResolved(TrampolineAddr, LandingAddr))
+ NotifyLandingResolved(reportCallThroughError(std::move(Err)));
+ else
+ NotifyLandingResolved(LandingAddr);
+ } else {
+ NotifyLandingResolved(reportCallThroughError(Result.takeError()));
+ }
+ };
+
+ ES.lookup(LookupKind::Static,
+ makeJITDylibSearchOrder(Entry->SourceJD,
+ JITDylibLookupFlags::MatchAllSymbols),
+ std::move(SLS), SymbolState::Ready, std::move(Callback),
+ NoDependenciesToRegister);
+}
+
+Expected<std::unique_ptr<LazyCallThroughManager>>
+createLocalLazyCallThroughManager(const Triple &T, ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddr) {
+ switch (T.getArch()) {
+ default:
+ return make_error<StringError>(
+ std::string("No callback manager available for ") + T.str(),
+ inconvertibleErrorCode());
+
+ case Triple::aarch64:
+ case Triple::aarch64_32:
+ return LocalLazyCallThroughManager::Create<OrcAArch64>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::x86:
+ return LocalLazyCallThroughManager::Create<OrcI386>(ES, ErrorHandlerAddr);
+
+ case Triple::mips:
+ return LocalLazyCallThroughManager::Create<OrcMips32Be>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::mipsel:
+ return LocalLazyCallThroughManager::Create<OrcMips32Le>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ return LocalLazyCallThroughManager::Create<OrcMips64>(ES, ErrorHandlerAddr);
+
+ case Triple::x86_64:
+ if (T.getOS() == Triple::OSType::Win32)
+ return LocalLazyCallThroughManager::Create<OrcX86_64_Win32>(
+ ES, ErrorHandlerAddr);
+ else
+ return LocalLazyCallThroughManager::Create<OrcX86_64_SysV>(
+ ES, ErrorHandlerAddr);
+ }
+}
+
+LazyReexportsMaterializationUnit::LazyReexportsMaterializationUnit(
+ LazyCallThroughManager &LCTManager, IndirectStubsManager &ISManager,
+ JITDylib &SourceJD, SymbolAliasMap CallableAliases, ImplSymbolMap *SrcJDLoc)
+ : MaterializationUnit(extractFlags(CallableAliases), nullptr),
+ LCTManager(LCTManager), ISManager(ISManager), SourceJD(SourceJD),
+ CallableAliases(std::move(CallableAliases)), AliaseeTable(SrcJDLoc) {}
+
+StringRef LazyReexportsMaterializationUnit::getName() const {
+ return "<Lazy Reexports>";
+}
+
+void LazyReexportsMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+ auto RequestedSymbols = R->getRequestedSymbols();
+
+ SymbolAliasMap RequestedAliases;
+ for (auto &RequestedSymbol : RequestedSymbols) {
+ auto I = CallableAliases.find(RequestedSymbol);
+ assert(I != CallableAliases.end() && "Symbol not found in alias map?");
+ RequestedAliases[I->first] = std::move(I->second);
+ CallableAliases.erase(I);
+ }
+
+ if (!CallableAliases.empty())
+ if (auto Err = R->replace(lazyReexports(LCTManager, ISManager, SourceJD,
+ std::move(CallableAliases),
+ AliaseeTable))) {
+ R->getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+
+ IndirectStubsManager::StubInitsMap StubInits;
+ for (auto &Alias : RequestedAliases) {
+
+ auto CallThroughTrampoline = LCTManager.getCallThroughTrampoline(
+ SourceJD, Alias.second.Aliasee,
+ [&ISManager = this->ISManager,
+ StubSym = Alias.first](JITTargetAddress ResolvedAddr) -> Error {
+ return ISManager.updatePointer(*StubSym, ResolvedAddr);
+ });
+
+ if (!CallThroughTrampoline) {
+ SourceJD.getExecutionSession().reportError(
+ CallThroughTrampoline.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ StubInits[*Alias.first] =
+ std::make_pair(*CallThroughTrampoline, Alias.second.AliasFlags);
+ }
+
+ if (AliaseeTable != nullptr && !RequestedAliases.empty())
+ AliaseeTable->trackImpls(RequestedAliases, &SourceJD);
+
+ if (auto Err = ISManager.createStubs(StubInits)) {
+ SourceJD.getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+
+ SymbolMap Stubs;
+ for (auto &Alias : RequestedAliases)
+ Stubs[Alias.first] = ISManager.findStub(*Alias.first, false);
+
+ // No registered dependencies, so these calls cannot fail.
+ cantFail(R->notifyResolved(Stubs));
+ cantFail(R->notifyEmitted());
+}
+
+void LazyReexportsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(CallableAliases.count(Name) &&
+ "Symbol not covered by this MaterializationUnit");
+ CallableAliases.erase(Name);
+}
+
+SymbolFlagsMap
+LazyReexportsMaterializationUnit::extractFlags(const SymbolAliasMap &Aliases) {
+ SymbolFlagsMap SymbolFlags;
+ for (auto &KV : Aliases) {
+ assert(KV.second.AliasFlags.isCallable() &&
+ "Lazy re-exports must be callable symbols");
+ SymbolFlags[KV.first] = KV.second.AliasFlags;
+ }
+ return SymbolFlags;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/MachOPlatform.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/MachOPlatform.cpp
new file mode 100644
index 00000000000..17b9465a054
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/MachOPlatform.cpp
@@ -0,0 +1,489 @@
+//===------ MachOPlatform.cpp - Utilities for executing MachO in Orc ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/MachOPlatform.h"
+
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace {
+
+struct objc_class;
+struct objc_image_info;
+struct objc_object;
+struct objc_selector;
+
+using Class = objc_class *;
+using id = objc_object *;
+using SEL = objc_selector *;
+
+using ObjCMsgSendTy = id (*)(id, SEL, ...);
+using ObjCReadClassPairTy = Class (*)(Class, const objc_image_info *);
+using SelRegisterNameTy = SEL (*)(const char *);
+
+enum class ObjCRegistrationAPI { Uninitialized, Unavailable, Initialized };
+
+ObjCRegistrationAPI ObjCRegistrationAPIState =
+ ObjCRegistrationAPI::Uninitialized;
+ObjCMsgSendTy objc_msgSend = nullptr;
+ObjCReadClassPairTy objc_readClassPair = nullptr;
+SelRegisterNameTy sel_registerName = nullptr;
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+template <typename FnTy>
+static Error setUpObjCRegAPIFunc(FnTy &Target, sys::DynamicLibrary &LibObjC,
+ const char *Name) {
+ if (void *Addr = LibObjC.getAddressOfSymbol(Name))
+ Target = reinterpret_cast<FnTy>(Addr);
+ else
+ return make_error<StringError>(
+ (Twine("Could not find address for ") + Name).str(),
+ inconvertibleErrorCode());
+ return Error::success();
+}
+
+Error enableObjCRegistration(const char *PathToLibObjC) {
+ // If we've already tried to initialize then just bail out.
+ if (ObjCRegistrationAPIState != ObjCRegistrationAPI::Uninitialized)
+ return Error::success();
+
+ ObjCRegistrationAPIState = ObjCRegistrationAPI::Unavailable;
+
+ std::string ErrMsg;
+ auto LibObjC =
+ sys::DynamicLibrary::getPermanentLibrary(PathToLibObjC, &ErrMsg);
+
+ if (!LibObjC.isValid())
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+
+ if (auto Err = setUpObjCRegAPIFunc(objc_msgSend, LibObjC, "objc_msgSend"))
+ return Err;
+ if (auto Err = setUpObjCRegAPIFunc(objc_readClassPair, LibObjC,
+ "objc_readClassPair"))
+ return Err;
+ if (auto Err =
+ setUpObjCRegAPIFunc(sel_registerName, LibObjC, "sel_registerName"))
+ return Err;
+
+ ObjCRegistrationAPIState = ObjCRegistrationAPI::Initialized;
+ return Error::success();
+}
+
+bool objCRegistrationEnabled() {
+ return ObjCRegistrationAPIState == ObjCRegistrationAPI::Initialized;
+}
+
+void MachOJITDylibInitializers::runModInits() const {
+ for (const auto &ModInit : ModInitSections) {
+ for (uint64_t I = 0; I != ModInit.NumPtrs; ++I) {
+ auto *InitializerAddr = jitTargetAddressToPointer<uintptr_t *>(
+ ModInit.Address + (I * sizeof(uintptr_t)));
+ auto *Initializer =
+ jitTargetAddressToFunction<void (*)()>(*InitializerAddr);
+ Initializer();
+ }
+ }
+}
+
+void MachOJITDylibInitializers::registerObjCSelectors() const {
+ assert(objCRegistrationEnabled() && "ObjC registration not enabled.");
+
+ for (const auto &ObjCSelRefs : ObjCSelRefsSections) {
+ for (uint64_t I = 0; I != ObjCSelRefs.NumPtrs; ++I) {
+ auto SelEntryAddr = ObjCSelRefs.Address + (I * sizeof(uintptr_t));
+ const auto *SelName =
+ *jitTargetAddressToPointer<const char **>(SelEntryAddr);
+ auto Sel = sel_registerName(SelName);
+ *jitTargetAddressToPointer<SEL *>(SelEntryAddr) = Sel;
+ }
+ }
+}
+
+Error MachOJITDylibInitializers::registerObjCClasses() const {
+ assert(objCRegistrationEnabled() && "ObjC registration not enabled.");
+
+ struct ObjCClassCompiled {
+ void *Metaclass;
+ void *Parent;
+ void *Cache1;
+ void *Cache2;
+ void *Data;
+ };
+
+ auto *ImageInfo =
+ jitTargetAddressToPointer<const objc_image_info *>(ObjCImageInfoAddr);
+ auto ClassSelector = sel_registerName("class");
+
+ for (const auto &ObjCClassList : ObjCClassListSections) {
+ for (uint64_t I = 0; I != ObjCClassList.NumPtrs; ++I) {
+ auto ClassPtrAddr = ObjCClassList.Address + (I * sizeof(uintptr_t));
+ auto Cls = *jitTargetAddressToPointer<Class *>(ClassPtrAddr);
+ auto *ClassCompiled =
+ *jitTargetAddressToPointer<ObjCClassCompiled **>(ClassPtrAddr);
+ objc_msgSend(reinterpret_cast<id>(ClassCompiled->Parent), ClassSelector);
+ auto Registered = objc_readClassPair(Cls, ImageInfo);
+
+ // FIXME: Improve diagnostic by reporting the failed class's name.
+ if (Registered != Cls)
+ return make_error<StringError>("Unable to register Objective-C class",
+ inconvertibleErrorCode());
+ }
+ }
+ return Error::success();
+}
+
+MachOPlatform::MachOPlatform(
+ ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ std::unique_ptr<MemoryBuffer> StandardSymbolsObject)
+ : ES(ES), ObjLinkingLayer(ObjLinkingLayer),
+ StandardSymbolsObject(std::move(StandardSymbolsObject)) {
+ ObjLinkingLayer.addPlugin(std::make_unique<InitScraperPlugin>(*this));
+}
+
+Error MachOPlatform::setupJITDylib(JITDylib &JD) {
+ auto ObjBuffer = MemoryBuffer::getMemBuffer(
+ StandardSymbolsObject->getMemBufferRef(), false);
+ return ObjLinkingLayer.add(JD, std::move(ObjBuffer));
+}
+
+Error MachOPlatform::notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) {
+ auto &JD = RT.getJITDylib();
+ const auto &InitSym = MU.getInitializerSymbol();
+ if (!InitSym)
+ return Error::success();
+
+ RegisteredInitSymbols[&JD].add(InitSym,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Registered init symbol " << *InitSym << " for MU "
+ << MU.getName() << "\n";
+ });
+ return Error::success();
+}
+
+Error MachOPlatform::notifyRemoving(ResourceTracker &RT) {
+ llvm_unreachable("Not supported yet");
+}
+
+Expected<MachOPlatform::InitializerSequence>
+MachOPlatform::getInitializerSequence(JITDylib &JD) {
+
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Building initializer sequence for "
+ << JD.getName() << "\n";
+ });
+
+ std::vector<JITDylibSP> DFSLinkOrder;
+
+ while (true) {
+
+ DenseMap<JITDylib *, SymbolLookupSet> NewInitSymbols;
+
+ ES.runSessionLocked([&]() {
+ DFSLinkOrder = JD.getDFSLinkOrder();
+
+ for (auto &InitJD : DFSLinkOrder) {
+ auto RISItr = RegisteredInitSymbols.find(InitJD.get());
+ if (RISItr != RegisteredInitSymbols.end()) {
+ NewInitSymbols[InitJD.get()] = std::move(RISItr->second);
+ RegisteredInitSymbols.erase(RISItr);
+ }
+ }
+ });
+
+ if (NewInitSymbols.empty())
+ break;
+
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Issuing lookups for new init symbols: "
+ "(lookup may require multiple rounds)\n";
+ for (auto &KV : NewInitSymbols)
+ dbgs() << " \"" << KV.first->getName() << "\": " << KV.second << "\n";
+ });
+
+ // Outside the lock, issue the lookup.
+ if (auto R = lookupInitSymbols(JD.getExecutionSession(), NewInitSymbols))
+ ; // Nothing to do in the success case.
+ else
+ return R.takeError();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Init symbol lookup complete, building init "
+ "sequence\n";
+ });
+
+ // Lock again to collect the initializers.
+ InitializerSequence FullInitSeq;
+ {
+ std::lock_guard<std::mutex> Lock(InitSeqsMutex);
+ for (auto &InitJD : reverse(DFSLinkOrder)) {
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Appending inits for \"" << InitJD->getName()
+ << "\" to sequence\n";
+ });
+ auto ISItr = InitSeqs.find(InitJD.get());
+ if (ISItr != InitSeqs.end()) {
+ FullInitSeq.emplace_back(InitJD.get(), std::move(ISItr->second));
+ InitSeqs.erase(ISItr);
+ }
+ }
+ }
+
+ return FullInitSeq;
+}
+
+Expected<MachOPlatform::DeinitializerSequence>
+MachOPlatform::getDeinitializerSequence(JITDylib &JD) {
+ std::vector<JITDylibSP> DFSLinkOrder = JD.getDFSLinkOrder();
+
+ DeinitializerSequence FullDeinitSeq;
+ {
+ std::lock_guard<std::mutex> Lock(InitSeqsMutex);
+ for (auto &DeinitJD : DFSLinkOrder) {
+ FullDeinitSeq.emplace_back(DeinitJD.get(), MachOJITDylibDeinitializers());
+ }
+ }
+
+ return FullDeinitSeq;
+}
+
+void MachOPlatform::registerInitInfo(
+ JITDylib &JD, JITTargetAddress ObjCImageInfoAddr,
+ MachOJITDylibInitializers::SectionExtent ModInits,
+ MachOJITDylibInitializers::SectionExtent ObjCSelRefs,
+ MachOJITDylibInitializers::SectionExtent ObjCClassList) {
+ std::lock_guard<std::mutex> Lock(InitSeqsMutex);
+
+ auto &InitSeq = InitSeqs[&JD];
+
+ InitSeq.setObjCImageInfoAddr(ObjCImageInfoAddr);
+
+ if (ModInits.Address)
+ InitSeq.addModInitsSection(std::move(ModInits));
+
+ if (ObjCSelRefs.Address)
+ InitSeq.addObjCSelRefsSection(std::move(ObjCSelRefs));
+
+ if (ObjCClassList.Address)
+ InitSeq.addObjCClassListSection(std::move(ObjCClassList));
+}
+
+static Expected<MachOJITDylibInitializers::SectionExtent>
+getSectionExtent(jitlink::LinkGraph &G, StringRef SectionName) {
+ auto *Sec = G.findSectionByName(SectionName);
+ if (!Sec)
+ return MachOJITDylibInitializers::SectionExtent();
+ jitlink::SectionRange R(*Sec);
+ if (R.getSize() % G.getPointerSize() != 0)
+ return make_error<StringError>(SectionName + " section size is not a "
+ "multiple of the pointer size",
+ inconvertibleErrorCode());
+ return MachOJITDylibInitializers::SectionExtent(
+ R.getStart(), R.getSize() / G.getPointerSize());
+}
+
+void MachOPlatform::InitScraperPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, const Triple &TT,
+ jitlink::PassConfiguration &Config) {
+
+ if (!MR.getInitializerSymbol())
+ return;
+
+ Config.PrePrunePasses.push_back([this, &MR](jitlink::LinkGraph &G) -> Error {
+ JITLinkSymbolVector InitSectionSymbols;
+ preserveInitSectionIfPresent(InitSectionSymbols, G, "__mod_init_func");
+ preserveInitSectionIfPresent(InitSectionSymbols, G, "__objc_selrefs");
+ preserveInitSectionIfPresent(InitSectionSymbols, G, "__objc_classlist");
+
+ if (!InitSectionSymbols.empty()) {
+ std::lock_guard<std::mutex> Lock(InitScraperMutex);
+ InitSymbolDeps[&MR] = std::move(InitSectionSymbols);
+ }
+
+ if (auto Err = processObjCImageInfo(G, MR))
+ return Err;
+
+ return Error::success();
+ });
+
+ Config.PostFixupPasses.push_back([this, &JD = MR.getTargetJITDylib()](
+ jitlink::LinkGraph &G) -> Error {
+ MachOJITDylibInitializers::SectionExtent ModInits, ObjCSelRefs,
+ ObjCClassList;
+
+ JITTargetAddress ObjCImageInfoAddr = 0;
+ if (auto *ObjCImageInfoSec = G.findSectionByName("__objc_image_info")) {
+ if (auto Addr = jitlink::SectionRange(*ObjCImageInfoSec).getStart())
+ ObjCImageInfoAddr = Addr;
+ }
+
+ // Record __mod_init_func.
+ if (auto ModInitsOrErr = getSectionExtent(G, "__mod_init_func"))
+ ModInits = std::move(*ModInitsOrErr);
+ else
+ return ModInitsOrErr.takeError();
+
+ // Record __objc_selrefs.
+ if (auto ObjCSelRefsOrErr = getSectionExtent(G, "__objc_selrefs"))
+ ObjCSelRefs = std::move(*ObjCSelRefsOrErr);
+ else
+ return ObjCSelRefsOrErr.takeError();
+
+ // Record __objc_classlist.
+ if (auto ObjCClassListOrErr = getSectionExtent(G, "__objc_classlist"))
+ ObjCClassList = std::move(*ObjCClassListOrErr);
+ else
+ return ObjCClassListOrErr.takeError();
+
+ // Dump the scraped inits.
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Scraped " << G.getName() << " init sections:\n";
+ dbgs() << " __objc_selrefs: ";
+ if (ObjCSelRefs.NumPtrs)
+ dbgs() << ObjCSelRefs.NumPtrs << " pointer(s) at "
+ << formatv("{0:x16}", ObjCSelRefs.Address) << "\n";
+ else
+ dbgs() << "none\n";
+
+ dbgs() << " __objc_classlist: ";
+ if (ObjCClassList.NumPtrs)
+ dbgs() << ObjCClassList.NumPtrs << " pointer(s) at "
+ << formatv("{0:x16}", ObjCClassList.Address) << "\n";
+ else
+ dbgs() << "none\n";
+
+ dbgs() << " __mod_init_func: ";
+ if (ModInits.NumPtrs)
+ dbgs() << ModInits.NumPtrs << " pointer(s) at "
+ << formatv("{0:x16}", ModInits.Address) << "\n";
+ else
+ dbgs() << "none\n";
+ });
+
+ MP.registerInitInfo(JD, ObjCImageInfoAddr, std::move(ModInits),
+ std::move(ObjCSelRefs), std::move(ObjCClassList));
+
+ return Error::success();
+ });
+}
+
+ObjectLinkingLayer::Plugin::LocalDependenciesMap
+MachOPlatform::InitScraperPlugin::getSyntheticSymbolLocalDependencies(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(InitScraperMutex);
+ auto I = InitSymbolDeps.find(&MR);
+ if (I != InitSymbolDeps.end()) {
+ LocalDependenciesMap Result;
+ Result[MR.getInitializerSymbol()] = std::move(I->second);
+ InitSymbolDeps.erase(&MR);
+ return Result;
+ }
+ return LocalDependenciesMap();
+}
+
+void MachOPlatform::InitScraperPlugin::preserveInitSectionIfPresent(
+ JITLinkSymbolVector &Symbols, jitlink::LinkGraph &G,
+ StringRef SectionName) {
+ if (auto *Sec = G.findSectionByName(SectionName)) {
+ auto SecBlocks = Sec->blocks();
+ if (!llvm::empty(SecBlocks))
+ Symbols.push_back(
+ &G.addAnonymousSymbol(**SecBlocks.begin(), 0, 0, false, true));
+ }
+}
+
+Error MachOPlatform::InitScraperPlugin::processObjCImageInfo(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR) {
+
+ // If there's an ObjC imagine info then either
+ // (1) It's the first __objc_imageinfo we've seen in this JITDylib. In
+ // this case we name and record it.
+ // OR
+ // (2) We already have a recorded __objc_imageinfo for this JITDylib,
+ // in which case we just verify it.
+ auto *ObjCImageInfo = G.findSectionByName("__objc_imageinfo");
+ if (!ObjCImageInfo)
+ return Error::success();
+
+ auto ObjCImageInfoBlocks = ObjCImageInfo->blocks();
+
+ // Check that the section is not empty if present.
+ if (llvm::empty(ObjCImageInfoBlocks))
+ return make_error<StringError>("Empty __objc_imageinfo section in " +
+ G.getName(),
+ inconvertibleErrorCode());
+
+ // Check that there's only one block in the section.
+ if (std::next(ObjCImageInfoBlocks.begin()) != ObjCImageInfoBlocks.end())
+ return make_error<StringError>("Multiple blocks in __objc_imageinfo "
+ "section in " +
+ G.getName(),
+ inconvertibleErrorCode());
+
+ // Check that the __objc_imageinfo section is unreferenced.
+ // FIXME: We could optimize this check if Symbols had a ref-count.
+ for (auto &Sec : G.sections()) {
+ if (&Sec != ObjCImageInfo)
+ for (auto *B : Sec.blocks())
+ for (auto &E : B->edges())
+ if (E.getTarget().isDefined() &&
+ &E.getTarget().getBlock().getSection() == ObjCImageInfo)
+ return make_error<StringError>("__objc_imageinfo is referenced "
+ "within file " +
+ G.getName(),
+ inconvertibleErrorCode());
+ }
+
+ auto &ObjCImageInfoBlock = **ObjCImageInfoBlocks.begin();
+ auto *ObjCImageInfoData = ObjCImageInfoBlock.getContent().data();
+ auto Version = support::endian::read32(ObjCImageInfoData, G.getEndianness());
+ auto Flags =
+ support::endian::read32(ObjCImageInfoData + 4, G.getEndianness());
+
+ // Lock the mutex while we verify / update the ObjCImageInfos map.
+ std::lock_guard<std::mutex> Lock(InitScraperMutex);
+
+ auto ObjCImageInfoItr = ObjCImageInfos.find(&MR.getTargetJITDylib());
+ if (ObjCImageInfoItr != ObjCImageInfos.end()) {
+ // We've already registered an __objc_imageinfo section. Verify the
+ // content of this new section matches, then delete it.
+ if (ObjCImageInfoItr->second.first != Version)
+ return make_error<StringError>(
+ "ObjC version in " + G.getName() +
+ " does not match first registered version",
+ inconvertibleErrorCode());
+ if (ObjCImageInfoItr->second.second != Flags)
+ return make_error<StringError>("ObjC flags in " + G.getName() +
+ " do not match first registered flags",
+ inconvertibleErrorCode());
+
+ // __objc_imageinfo is valid. Delete the block.
+ for (auto *S : ObjCImageInfo->symbols())
+ G.removeDefinedSymbol(*S);
+ G.removeBlock(ObjCImageInfoBlock);
+ } else {
+ // We haven't registered an __objc_imageinfo section yet. Register and
+ // move on. The section should already be marked no-dead-strip.
+ ObjCImageInfos[&MR.getTargetJITDylib()] = std::make_pair(Version, Flags);
+ }
+
+ return Error::success();
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Mangling.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Mangling.cpp
new file mode 100644
index 00000000000..606304741cf
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Mangling.cpp
@@ -0,0 +1,160 @@
+//===----------- Mangling.cpp -- Name Mangling Utilities for ORC ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Mangling.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+MangleAndInterner::MangleAndInterner(ExecutionSession &ES, const DataLayout &DL)
+ : ES(ES), DL(DL) {}
+
+SymbolStringPtr MangleAndInterner::operator()(StringRef Name) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
+ }
+ return ES.intern(MangledName);
+}
+
+void IRSymbolMapper::add(ExecutionSession &ES, const ManglingOptions &MO,
+ ArrayRef<GlobalValue *> GVs,
+ SymbolFlagsMap &SymbolFlags,
+ SymbolNameToDefinitionMap *SymbolToDefinition) {
+ if (GVs.empty())
+ return;
+
+ MangleAndInterner Mangle(ES, GVs[0]->getParent()->getDataLayout());
+ for (auto *G : GVs) {
+ assert(G && "GVs cannot contain null elements");
+ if (!G->hasName() || G->isDeclaration() || G->hasLocalLinkage() ||
+ G->hasAvailableExternallyLinkage() || G->hasAppendingLinkage())
+ continue;
+
+ if (G->isThreadLocal() && MO.EmulatedTLS) {
+ auto *GV = cast<GlobalVariable>(G);
+
+ auto Flags = JITSymbolFlags::fromGlobalValue(*GV);
+
+ auto EmuTLSV = Mangle(("__emutls_v." + GV->getName()).str());
+ SymbolFlags[EmuTLSV] = Flags;
+ if (SymbolToDefinition)
+ (*SymbolToDefinition)[EmuTLSV] = GV;
+
+ // If this GV has a non-zero initializer we'll need to emit an
+ // __emutls.t symbol too.
+ if (GV->hasInitializer()) {
+ const auto *InitVal = GV->getInitializer();
+
+ // Skip zero-initializers.
+ if (isa<ConstantAggregateZero>(InitVal))
+ continue;
+ const auto *InitIntValue = dyn_cast<ConstantInt>(InitVal);
+ if (InitIntValue && InitIntValue->isZero())
+ continue;
+
+ auto EmuTLST = Mangle(("__emutls_t." + GV->getName()).str());
+ SymbolFlags[EmuTLST] = Flags;
+ if (SymbolToDefinition)
+ (*SymbolToDefinition)[EmuTLST] = GV;
+ }
+ continue;
+ }
+
+ // Otherwise we just need a normal linker mangling.
+ auto MangledName = Mangle(G->getName());
+ SymbolFlags[MangledName] = JITSymbolFlags::fromGlobalValue(*G);
+ if (SymbolToDefinition)
+ (*SymbolToDefinition)[MangledName] = G;
+ }
+}
+
+Expected<std::pair<SymbolFlagsMap, SymbolStringPtr>>
+getObjectSymbolInfo(ExecutionSession &ES, MemoryBufferRef ObjBuffer) {
+ auto Obj = object::ObjectFile::createObjectFile(ObjBuffer);
+
+ if (!Obj)
+ return Obj.takeError();
+
+ bool IsMachO = isa<object::MachOObjectFile>(Obj->get());
+
+ SymbolFlagsMap SymbolFlags;
+ for (auto &Sym : (*Obj)->symbols()) {
+ Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
+ if (!SymFlagsOrErr)
+ // TODO: Test this error.
+ return SymFlagsOrErr.takeError();
+
+ // Skip symbols not defined in this object file.
+ if (*SymFlagsOrErr & object::BasicSymbolRef::SF_Undefined)
+ continue;
+
+ // Skip symbols that are not global.
+ if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global))
+ continue;
+
+ // Skip symbols that have type SF_File.
+ if (auto SymType = Sym.getType()) {
+ if (*SymType == object::SymbolRef::ST_File)
+ continue;
+ } else
+ return SymType.takeError();
+
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+ auto InternedName = ES.intern(*Name);
+ auto SymFlags = JITSymbolFlags::fromObjectSymbol(Sym);
+ if (!SymFlags)
+ return SymFlags.takeError();
+
+ // Strip the 'exported' flag from MachO linker-private symbols.
+ if (IsMachO && Name->startswith("l"))
+ *SymFlags &= ~JITSymbolFlags::Exported;
+
+ SymbolFlags[InternedName] = std::move(*SymFlags);
+ }
+
+ SymbolStringPtr InitSymbol;
+
+ if (IsMachO) {
+ auto &MachOObj = cast<object::MachOObjectFile>(*Obj->get());
+ for (auto &Sec : MachOObj.sections()) {
+ auto SecType = MachOObj.getSectionType(Sec);
+ if ((SecType & MachO::SECTION_TYPE) == MachO::S_MOD_INIT_FUNC_POINTERS) {
+ size_t Counter = 0;
+ while (true) {
+ std::string InitSymString;
+ raw_string_ostream(InitSymString)
+ << "$." << ObjBuffer.getBufferIdentifier() << ".__inits."
+ << Counter++;
+ InitSymbol = ES.intern(InitSymString);
+ if (SymbolFlags.count(InitSymbol))
+ continue;
+ SymbolFlags[InitSymbol] =
+ JITSymbolFlags::MaterializationSideEffectsOnly;
+ break;
+ }
+ break;
+ }
+ }
+ }
+
+ return std::make_pair(std::move(SymbolFlags), std::move(InitSymbol));
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp
new file mode 100644
index 00000000000..26f77acd91f
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp
@@ -0,0 +1,652 @@
+//===------- ObjectLinkingLayer.cpp - JITLink backed ORC ObjectLayer ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
+
+#include <vector>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::orc;
+
+namespace llvm {
+namespace orc {
+
+class ObjectLinkingLayerJITLinkContext final : public JITLinkContext {
+public:
+ ObjectLinkingLayerJITLinkContext(
+ ObjectLinkingLayer &Layer,
+ std::unique_ptr<MaterializationResponsibility> MR,
+ std::unique_ptr<MemoryBuffer> ObjBuffer)
+ : JITLinkContext(&MR->getTargetJITDylib()), Layer(Layer),
+ MR(std::move(MR)), ObjBuffer(std::move(ObjBuffer)) {}
+
+ ~ObjectLinkingLayerJITLinkContext() {
+ // If there is an object buffer return function then use it to
+ // return ownership of the buffer.
+ if (Layer.ReturnObjectBuffer && ObjBuffer)
+ Layer.ReturnObjectBuffer(std::move(ObjBuffer));
+ }
+
+ JITLinkMemoryManager &getMemoryManager() override { return Layer.MemMgr; }
+
+ void notifyFailed(Error Err) override {
+ for (auto &P : Layer.Plugins)
+ Err = joinErrors(std::move(Err), P->notifyFailed(*MR));
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR->failMaterialization();
+ }
+
+ void lookup(const LookupMap &Symbols,
+ std::unique_ptr<JITLinkAsyncLookupContinuation> LC) override {
+
+ JITDylibSearchOrder LinkOrder;
+ MR->getTargetJITDylib().withLinkOrderDo(
+ [&](const JITDylibSearchOrder &LO) { LinkOrder = LO; });
+
+ auto &ES = Layer.getExecutionSession();
+
+ SymbolLookupSet LookupSet;
+ for (auto &KV : Symbols) {
+ orc::SymbolLookupFlags LookupFlags;
+ switch (KV.second) {
+ case jitlink::SymbolLookupFlags::RequiredSymbol:
+ LookupFlags = orc::SymbolLookupFlags::RequiredSymbol;
+ break;
+ case jitlink::SymbolLookupFlags::WeaklyReferencedSymbol:
+ LookupFlags = orc::SymbolLookupFlags::WeaklyReferencedSymbol;
+ break;
+ }
+ LookupSet.add(ES.intern(KV.first), LookupFlags);
+ }
+
+ // OnResolve -- De-intern the symbols and pass the result to the linker.
+ auto OnResolve = [LookupContinuation =
+ std::move(LC)](Expected<SymbolMap> Result) mutable {
+ if (!Result)
+ LookupContinuation->run(Result.takeError());
+ else {
+ AsyncLookupResult LR;
+ for (auto &KV : *Result)
+ LR[*KV.first] = KV.second;
+ LookupContinuation->run(std::move(LR));
+ }
+ };
+
+ for (auto &KV : InternalNamedSymbolDeps) {
+ SymbolDependenceMap InternalDeps;
+ InternalDeps[&MR->getTargetJITDylib()] = std::move(KV.second);
+ MR->addDependencies(KV.first, InternalDeps);
+ }
+
+ ES.lookup(LookupKind::Static, LinkOrder, std::move(LookupSet),
+ SymbolState::Resolved, std::move(OnResolve),
+ [this](const SymbolDependenceMap &Deps) {
+ registerDependencies(Deps);
+ });
+ }
+
+ Error notifyResolved(LinkGraph &G) override {
+ auto &ES = Layer.getExecutionSession();
+
+ SymbolFlagsMap ExtraSymbolsToClaim;
+ bool AutoClaim = Layer.AutoClaimObjectSymbols;
+
+ SymbolMap InternedResult;
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->hasName() && Sym->getScope() != Scope::Local) {
+ auto InternedName = ES.intern(Sym->getName());
+ JITSymbolFlags Flags;
+
+ if (Sym->isCallable())
+ Flags |= JITSymbolFlags::Callable;
+ if (Sym->getScope() == Scope::Default)
+ Flags |= JITSymbolFlags::Exported;
+
+ InternedResult[InternedName] =
+ JITEvaluatedSymbol(Sym->getAddress(), Flags);
+ if (AutoClaim && !MR->getSymbols().count(InternedName)) {
+ assert(!ExtraSymbolsToClaim.count(InternedName) &&
+ "Duplicate symbol to claim?");
+ ExtraSymbolsToClaim[InternedName] = Flags;
+ }
+ }
+
+ for (auto *Sym : G.absolute_symbols())
+ if (Sym->hasName()) {
+ auto InternedName = ES.intern(Sym->getName());
+ JITSymbolFlags Flags;
+ Flags |= JITSymbolFlags::Absolute;
+ if (Sym->isCallable())
+ Flags |= JITSymbolFlags::Callable;
+ if (Sym->getLinkage() == Linkage::Weak)
+ Flags |= JITSymbolFlags::Weak;
+ InternedResult[InternedName] =
+ JITEvaluatedSymbol(Sym->getAddress(), Flags);
+ if (AutoClaim && !MR->getSymbols().count(InternedName)) {
+ assert(!ExtraSymbolsToClaim.count(InternedName) &&
+ "Duplicate symbol to claim?");
+ ExtraSymbolsToClaim[InternedName] = Flags;
+ }
+ }
+
+ if (!ExtraSymbolsToClaim.empty())
+ if (auto Err = MR->defineMaterializing(ExtraSymbolsToClaim))
+ return Err;
+
+ {
+
+ // Check that InternedResult matches up with MR->getSymbols().
+ // This guards against faulty transformations / compilers / object caches.
+
+ // First check that there aren't any missing symbols.
+ size_t NumMaterializationSideEffectsOnlySymbols = 0;
+ SymbolNameVector ExtraSymbols;
+ SymbolNameVector MissingSymbols;
+ for (auto &KV : MR->getSymbols()) {
+
+ // If this is a materialization-side-effects only symbol then bump
+ // the counter and make sure it's *not* defined, otherwise make
+ // sure that it is defined.
+ if (KV.second.hasMaterializationSideEffectsOnly()) {
+ ++NumMaterializationSideEffectsOnlySymbols;
+ if (InternedResult.count(KV.first))
+ ExtraSymbols.push_back(KV.first);
+ continue;
+ } else if (!InternedResult.count(KV.first))
+ MissingSymbols.push_back(KV.first);
+ }
+
+ // If there were missing symbols then report the error.
+ if (!MissingSymbols.empty())
+ return make_error<MissingSymbolDefinitions>(G.getName(),
+ std::move(MissingSymbols));
+
+ // If there are more definitions than expected, add them to the
+ // ExtraSymbols vector.
+ if (InternedResult.size() >
+ MR->getSymbols().size() - NumMaterializationSideEffectsOnlySymbols) {
+ for (auto &KV : InternedResult)
+ if (!MR->getSymbols().count(KV.first))
+ ExtraSymbols.push_back(KV.first);
+ }
+
+ // If there were extra definitions then report the error.
+ if (!ExtraSymbols.empty())
+ return make_error<UnexpectedSymbolDefinitions>(G.getName(),
+ std::move(ExtraSymbols));
+ }
+
+ if (auto Err = MR->notifyResolved(InternedResult))
+ return Err;
+
+ Layer.notifyLoaded(*MR);
+ return Error::success();
+ }
+
+ void notifyFinalized(
+ std::unique_ptr<JITLinkMemoryManager::Allocation> A) override {
+ if (auto Err = Layer.notifyEmitted(*MR, std::move(A))) {
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR->failMaterialization();
+ return;
+ }
+ if (auto Err = MR->notifyEmitted()) {
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR->failMaterialization();
+ }
+ }
+
+ LinkGraphPassFunction getMarkLivePass(const Triple &TT) const override {
+ return [this](LinkGraph &G) { return markResponsibilitySymbolsLive(G); };
+ }
+
+ Error modifyPassConfig(const Triple &TT, PassConfiguration &Config) override {
+ // Add passes to mark duplicate defs as should-discard, and to walk the
+ // link graph to build the symbol dependence graph.
+ Config.PrePrunePasses.push_back([this](LinkGraph &G) {
+ return claimOrExternalizeWeakAndCommonSymbols(G);
+ });
+
+ Layer.modifyPassConfig(*MR, TT, Config);
+
+ Config.PostPrunePasses.push_back(
+ [this](LinkGraph &G) { return computeNamedSymbolDependencies(G); });
+
+ return Error::success();
+ }
+
+private:
+ struct LocalSymbolNamedDependencies {
+ SymbolNameSet Internal, External;
+ };
+
+ using LocalSymbolNamedDependenciesMap =
+ DenseMap<const Symbol *, LocalSymbolNamedDependencies>;
+
+ Error claimOrExternalizeWeakAndCommonSymbols(LinkGraph &G) {
+ auto &ES = Layer.getExecutionSession();
+
+ SymbolFlagsMap NewSymbolsToClaim;
+ std::vector<std::pair<SymbolStringPtr, Symbol *>> NameToSym;
+
+ auto ProcessSymbol = [&](Symbol *Sym) {
+ if (Sym->hasName() && Sym->getLinkage() == Linkage::Weak) {
+ auto Name = ES.intern(Sym->getName());
+ if (!MR->getSymbols().count(ES.intern(Sym->getName()))) {
+ JITSymbolFlags SF = JITSymbolFlags::Weak;
+ if (Sym->getScope() == Scope::Default)
+ SF |= JITSymbolFlags::Exported;
+ NewSymbolsToClaim[Name] = SF;
+ NameToSym.push_back(std::make_pair(std::move(Name), Sym));
+ }
+ }
+ };
+
+ for (auto *Sym : G.defined_symbols())
+ ProcessSymbol(Sym);
+ for (auto *Sym : G.absolute_symbols())
+ ProcessSymbol(Sym);
+
+ // Attempt to claim all weak defs that we're not already responsible for.
+ // This cannot fail -- any clashes will just result in rejection of our
+ // claim, at which point we'll externalize that symbol.
+ cantFail(MR->defineMaterializing(std::move(NewSymbolsToClaim)));
+
+ for (auto &KV : NameToSym)
+ if (!MR->getSymbols().count(KV.first))
+ G.makeExternal(*KV.second);
+
+ return Error::success();
+ }
+
+ Error markResponsibilitySymbolsLive(LinkGraph &G) const {
+ auto &ES = Layer.getExecutionSession();
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->hasName() && MR->getSymbols().count(ES.intern(Sym->getName())))
+ Sym->setLive(true);
+ return Error::success();
+ }
+
+ Error computeNamedSymbolDependencies(LinkGraph &G) {
+ auto &ES = MR->getTargetJITDylib().getExecutionSession();
+ auto LocalDeps = computeLocalDeps(G);
+
+ // Compute dependencies for symbols defined in the JITLink graph.
+ for (auto *Sym : G.defined_symbols()) {
+
+ // Skip local symbols: we do not track dependencies for these.
+ if (Sym->getScope() == Scope::Local)
+ continue;
+ assert(Sym->hasName() &&
+ "Defined non-local jitlink::Symbol should have a name");
+
+ SymbolNameSet ExternalSymDeps, InternalSymDeps;
+
+ // Find internal and external named symbol dependencies.
+ for (auto &E : Sym->getBlock().edges()) {
+ auto &TargetSym = E.getTarget();
+
+ if (TargetSym.getScope() != Scope::Local) {
+ if (TargetSym.isExternal())
+ ExternalSymDeps.insert(ES.intern(TargetSym.getName()));
+ else if (&TargetSym != Sym)
+ InternalSymDeps.insert(ES.intern(TargetSym.getName()));
+ } else {
+ assert(TargetSym.isDefined() &&
+ "local symbols must be defined");
+ auto I = LocalDeps.find(&TargetSym);
+ if (I != LocalDeps.end()) {
+ for (auto &S : I->second.External)
+ ExternalSymDeps.insert(S);
+ for (auto &S : I->second.Internal)
+ InternalSymDeps.insert(S);
+ }
+ }
+ }
+
+ if (ExternalSymDeps.empty() && InternalSymDeps.empty())
+ continue;
+
+ auto SymName = ES.intern(Sym->getName());
+ if (!ExternalSymDeps.empty())
+ ExternalNamedSymbolDeps[SymName] = std::move(ExternalSymDeps);
+ if (!InternalSymDeps.empty())
+ InternalNamedSymbolDeps[SymName] = std::move(InternalSymDeps);
+ }
+
+ for (auto &P : Layer.Plugins) {
+ auto SyntheticLocalDeps = P->getSyntheticSymbolLocalDependencies(*MR);
+ if (SyntheticLocalDeps.empty())
+ continue;
+
+ for (auto &KV : SyntheticLocalDeps) {
+ auto &Name = KV.first;
+ auto &LocalDepsForName = KV.second;
+ for (auto *Local : LocalDepsForName) {
+ assert(Local->getScope() == Scope::Local &&
+ "Dependence on non-local symbol");
+ auto LocalNamedDepsItr = LocalDeps.find(Local);
+ if (LocalNamedDepsItr == LocalDeps.end())
+ continue;
+ for (auto &S : LocalNamedDepsItr->second.Internal)
+ InternalNamedSymbolDeps[Name].insert(S);
+ for (auto &S : LocalNamedDepsItr->second.External)
+ ExternalNamedSymbolDeps[Name].insert(S);
+ }
+ }
+ }
+
+ return Error::success();
+ }
+
+ LocalSymbolNamedDependenciesMap computeLocalDeps(LinkGraph &G) {
+ DenseMap<jitlink::Symbol *, DenseSet<jitlink::Symbol *>> DepMap;
+
+ // For all local symbols:
+ // (1) Add their named dependencies.
+ // (2) Add them to the worklist for further iteration if they have any
+ // depend on any other local symbols.
+ struct WorklistEntry {
+ WorklistEntry(Symbol *Sym, DenseSet<Symbol *> LocalDeps)
+ : Sym(Sym), LocalDeps(std::move(LocalDeps)) {}
+
+ Symbol *Sym = nullptr;
+ DenseSet<Symbol *> LocalDeps;
+ };
+ std::vector<WorklistEntry> Worklist;
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->getScope() == Scope::Local) {
+ auto &SymNamedDeps = DepMap[Sym];
+ DenseSet<Symbol *> LocalDeps;
+
+ for (auto &E : Sym->getBlock().edges()) {
+ auto &TargetSym = E.getTarget();
+ if (TargetSym.getScope() != Scope::Local)
+ SymNamedDeps.insert(&TargetSym);
+ else {
+ assert(TargetSym.isDefined() &&
+ "local symbols must be defined");
+ LocalDeps.insert(&TargetSym);
+ }
+ }
+
+ if (!LocalDeps.empty())
+ Worklist.push_back(WorklistEntry(Sym, std::move(LocalDeps)));
+ }
+
+ // Loop over all local symbols with local dependencies, propagating
+ // their respective non-local dependencies. Iterate until we hit a stable
+ // state.
+ bool Changed;
+ do {
+ Changed = false;
+ for (auto &WLEntry : Worklist) {
+ auto *Sym = WLEntry.Sym;
+ auto &NamedDeps = DepMap[Sym];
+ auto &LocalDeps = WLEntry.LocalDeps;
+
+ for (auto *TargetSym : LocalDeps) {
+ auto I = DepMap.find(TargetSym);
+ if (I != DepMap.end())
+ for (const auto &S : I->second)
+ Changed |= NamedDeps.insert(S).second;
+ }
+ }
+ } while (Changed);
+
+ // Intern the results to produce a mapping of jitlink::Symbol* to internal
+ // and external symbol names.
+ auto &ES = Layer.getExecutionSession();
+ LocalSymbolNamedDependenciesMap Result;
+ for (auto &KV : DepMap) {
+ auto *Local = KV.first;
+ assert(Local->getScope() == Scope::Local &&
+ "DepMap keys should all be local symbols");
+ auto &LocalNamedDeps = Result[Local];
+ for (auto *Named : KV.second) {
+ assert(Named->getScope() != Scope::Local &&
+ "DepMap values should all be non-local symbol sets");
+ if (Named->isExternal())
+ LocalNamedDeps.External.insert(ES.intern(Named->getName()));
+ else
+ LocalNamedDeps.Internal.insert(ES.intern(Named->getName()));
+ }
+ }
+
+ return Result;
+ }
+
+ void registerDependencies(const SymbolDependenceMap &QueryDeps) {
+ for (auto &NamedDepsEntry : ExternalNamedSymbolDeps) {
+ auto &Name = NamedDepsEntry.first;
+ auto &NameDeps = NamedDepsEntry.second;
+ SymbolDependenceMap SymbolDeps;
+
+ for (const auto &QueryDepsEntry : QueryDeps) {
+ JITDylib &SourceJD = *QueryDepsEntry.first;
+ const SymbolNameSet &Symbols = QueryDepsEntry.second;
+ auto &DepsForJD = SymbolDeps[&SourceJD];
+
+ for (const auto &S : Symbols)
+ if (NameDeps.count(S))
+ DepsForJD.insert(S);
+
+ if (DepsForJD.empty())
+ SymbolDeps.erase(&SourceJD);
+ }
+
+ MR->addDependencies(Name, SymbolDeps);
+ }
+ }
+
+ ObjectLinkingLayer &Layer;
+ std::unique_ptr<MaterializationResponsibility> MR;
+ std::unique_ptr<MemoryBuffer> ObjBuffer;
+ DenseMap<SymbolStringPtr, SymbolNameSet> ExternalNamedSymbolDeps;
+ DenseMap<SymbolStringPtr, SymbolNameSet> InternalNamedSymbolDeps;
+};
+
+ObjectLinkingLayer::Plugin::~Plugin() {}
+
+ObjectLinkingLayer::ObjectLinkingLayer(ExecutionSession &ES,
+ JITLinkMemoryManager &MemMgr)
+ : ObjectLayer(ES), MemMgr(MemMgr) {
+ ES.registerResourceManager(*this);
+}
+
+ObjectLinkingLayer::ObjectLinkingLayer(
+ ExecutionSession &ES, std::unique_ptr<JITLinkMemoryManager> MemMgr)
+ : ObjectLayer(ES), MemMgr(*MemMgr), MemMgrOwnership(std::move(MemMgr)) {
+ ES.registerResourceManager(*this);
+}
+
+ObjectLinkingLayer::~ObjectLinkingLayer() {
+ assert(Allocs.empty() && "Layer destroyed with resources still attached");
+ getExecutionSession().deregisterResourceManager(*this);
+}
+
+void ObjectLinkingLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Object must not be null");
+ auto ObjBuffer = O->getMemBufferRef();
+ auto Ctx = std::make_unique<ObjectLinkingLayerJITLinkContext>(
+ *this, std::move(R), std::move(O));
+ if (auto G = createLinkGraphFromObject(std::move(ObjBuffer)))
+ link(std::move(*G), std::move(Ctx));
+ else
+ Ctx->notifyFailed(G.takeError());
+}
+
+void ObjectLinkingLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<LinkGraph> G) {
+ link(std::move(G), std::make_unique<ObjectLinkingLayerJITLinkContext>(
+ *this, std::move(R), nullptr));
+}
+
+void ObjectLinkingLayer::modifyPassConfig(MaterializationResponsibility &MR,
+ const Triple &TT,
+ PassConfiguration &PassConfig) {
+ for (auto &P : Plugins)
+ P->modifyPassConfig(MR, TT, PassConfig);
+}
+
+void ObjectLinkingLayer::notifyLoaded(MaterializationResponsibility &MR) {
+ for (auto &P : Plugins)
+ P->notifyLoaded(MR);
+}
+
+Error ObjectLinkingLayer::notifyEmitted(MaterializationResponsibility &MR,
+ AllocPtr Alloc) {
+ Error Err = Error::success();
+ for (auto &P : Plugins)
+ Err = joinErrors(std::move(Err), P->notifyEmitted(MR));
+
+ if (Err)
+ return Err;
+
+ return MR.withResourceKeyDo(
+ [&](ResourceKey K) { Allocs[K].push_back(std::move(Alloc)); });
+}
+
+Error ObjectLinkingLayer::handleRemoveResources(ResourceKey K) {
+
+ Error Err = Error::success();
+
+ for (auto &P : Plugins)
+ Err = joinErrors(std::move(Err), P->notifyRemovingResources(K));
+
+ std::vector<AllocPtr> AllocsToRemove;
+ getExecutionSession().runSessionLocked([&] {
+ auto I = Allocs.find(K);
+ if (I != Allocs.end()) {
+ std::swap(AllocsToRemove, I->second);
+ Allocs.erase(I);
+ }
+ });
+
+ while (!AllocsToRemove.empty()) {
+ Err = joinErrors(std::move(Err), AllocsToRemove.back()->deallocate());
+ AllocsToRemove.pop_back();
+ }
+
+ return Err;
+}
+
+void ObjectLinkingLayer::handleTransferResources(ResourceKey DstKey,
+ ResourceKey SrcKey) {
+ auto I = Allocs.find(SrcKey);
+ if (I != Allocs.end()) {
+ auto &SrcAllocs = I->second;
+ auto &DstAllocs = Allocs[DstKey];
+ DstAllocs.reserve(DstAllocs.size() + SrcAllocs.size());
+ for (auto &Alloc : SrcAllocs)
+ DstAllocs.push_back(std::move(Alloc));
+
+ // Erase SrcKey entry using value rather than iterator I: I may have been
+ // invalidated when we looked up DstKey.
+ Allocs.erase(SrcKey);
+ }
+
+ for (auto &P : Plugins)
+ P->notifyTransferringResources(DstKey, SrcKey);
+}
+
+EHFrameRegistrationPlugin::EHFrameRegistrationPlugin(
+ ExecutionSession &ES, std::unique_ptr<EHFrameRegistrar> Registrar)
+ : ES(ES), Registrar(std::move(Registrar)) {}
+
+void EHFrameRegistrationPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, const Triple &TT,
+ PassConfiguration &PassConfig) {
+
+ PassConfig.PostFixupPasses.push_back(createEHFrameRecorderPass(
+ TT, [this, &MR](JITTargetAddress Addr, size_t Size) {
+ if (Addr) {
+ std::lock_guard<std::mutex> Lock(EHFramePluginMutex);
+ assert(!InProcessLinks.count(&MR) &&
+ "Link for MR already being tracked?");
+ InProcessLinks[&MR] = {Addr, Size};
+ }
+ }));
+}
+
+Error EHFrameRegistrationPlugin::notifyEmitted(
+ MaterializationResponsibility &MR) {
+
+ EHFrameRange EmittedRange;
+ {
+ std::lock_guard<std::mutex> Lock(EHFramePluginMutex);
+
+ auto EHFrameRangeItr = InProcessLinks.find(&MR);
+ if (EHFrameRangeItr == InProcessLinks.end())
+ return Error::success();
+
+ EmittedRange = EHFrameRangeItr->second;
+ assert(EmittedRange.Addr && "eh-frame addr to register can not be null");
+ InProcessLinks.erase(EHFrameRangeItr);
+ }
+
+ if (auto Err = MR.withResourceKeyDo(
+ [&](ResourceKey K) { EHFrameRanges[K].push_back(EmittedRange); }))
+ return Err;
+
+ return Registrar->registerEHFrames(EmittedRange.Addr, EmittedRange.Size);
+}
+
+Error EHFrameRegistrationPlugin::notifyFailed(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(EHFramePluginMutex);
+ InProcessLinks.erase(&MR);
+ return Error::success();
+}
+
+Error EHFrameRegistrationPlugin::notifyRemovingResources(ResourceKey K) {
+ std::vector<EHFrameRange> RangesToRemove;
+
+ ES.runSessionLocked([&] {
+ auto I = EHFrameRanges.find(K);
+ if (I != EHFrameRanges.end()) {
+ RangesToRemove = std::move(I->second);
+ EHFrameRanges.erase(I);
+ }
+ });
+
+ Error Err = Error::success();
+ while (!RangesToRemove.empty()) {
+ auto RangeToRemove = RangesToRemove.back();
+ RangesToRemove.pop_back();
+ assert(RangeToRemove.Addr && "Untracked eh-frame range must not be null");
+ Err = joinErrors(
+ std::move(Err),
+ Registrar->deregisterEHFrames(RangeToRemove.Addr, RangeToRemove.Size));
+ }
+
+ return Err;
+}
+
+void EHFrameRegistrationPlugin::notifyTransferringResources(
+ ResourceKey DstKey, ResourceKey SrcKey) {
+ auto SI = EHFrameRanges.find(SrcKey);
+ if (SI != EHFrameRanges.end()) {
+ auto &SrcRanges = SI->second;
+ auto &DstRanges = EHFrameRanges[DstKey];
+ DstRanges.reserve(DstRanges.size() + SrcRanges.size());
+ for (auto &SrcRange : SrcRanges)
+ DstRanges.push_back(std::move(SrcRange));
+ EHFrameRanges.erase(SI);
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp
new file mode 100644
index 00000000000..a57662e10a7
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp
@@ -0,0 +1,40 @@
+//===---------- ObjectTransformLayer.cpp - Object Transform Layer ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ObjectTransformLayer.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+namespace orc {
+
+ObjectTransformLayer::ObjectTransformLayer(ExecutionSession &ES,
+ ObjectLayer &BaseLayer,
+ TransformFunction Transform)
+ : ObjectLayer(ES), BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+void ObjectTransformLayer::emit(
+ std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Module must not be null");
+
+ // If there is a transform set then apply it.
+ if (Transform) {
+ if (auto TransformedObj = Transform(std::move(O)))
+ O = std::move(*TransformedObj);
+ else {
+ R->failMaterialization();
+ getExecutionSession().reportError(TransformedObj.takeError());
+ return;
+ }
+ }
+
+ BaseLayer.emit(std::move(R), std::move(O));
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/OrcABISupport.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/OrcABISupport.cpp
new file mode 100644
index 00000000000..18b3c5e12b1
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/OrcABISupport.cpp
@@ -0,0 +1,910 @@
+//===------------- OrcABISupport.cpp - ABI specific support code ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+
+template <typename ORCABI>
+bool stubAndPointerRangesOk(JITTargetAddress StubBlockAddr,
+ JITTargetAddress PointerBlockAddr,
+ unsigned NumStubs) {
+ constexpr unsigned MaxDisp = ORCABI::StubToPointerMaxDisplacement;
+ JITTargetAddress FirstStub = StubBlockAddr;
+ JITTargetAddress LastStub = FirstStub + ((NumStubs - 1) * ORCABI::StubSize);
+ JITTargetAddress FirstPointer = PointerBlockAddr;
+ JITTargetAddress LastPointer =
+ FirstPointer + ((NumStubs - 1) * ORCABI::StubSize);
+
+ if (FirstStub < FirstPointer) {
+ if (LastStub >= FirstPointer)
+ return false; // Ranges overlap.
+ return (FirstPointer - FirstStub <= MaxDisp) &&
+ (LastPointer - LastStub <= MaxDisp); // out-of-range.
+ }
+
+ if (LastPointer >= FirstStub)
+ return false; // Ranges overlap.
+
+ return (FirstStub - FirstPointer <= MaxDisp) &&
+ (LastStub - LastPointer <= MaxDisp);
+}
+
+namespace llvm {
+namespace orc {
+
+void OrcAArch64::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+
+ const uint32_t ResolverCode[] = {
+ // resolver_entry:
+ 0xa9bf47fd, // 0x000: stp x29, x17, [sp, #-16]!
+ 0x910003fd, // 0x004: mov x29, sp
+ 0xa9bf73fb, // 0x008: stp x27, x28, [sp, #-16]!
+ 0xa9bf6bf9, // 0x00c: stp x25, x26, [sp, #-16]!
+ 0xa9bf63f7, // 0x010: stp x23, x24, [sp, #-16]!
+ 0xa9bf5bf5, // 0x014: stp x21, x22, [sp, #-16]!
+ 0xa9bf53f3, // 0x018: stp x19, x20, [sp, #-16]!
+ 0xa9bf3fee, // 0x01c: stp x14, x15, [sp, #-16]!
+ 0xa9bf37ec, // 0x020: stp x12, x13, [sp, #-16]!
+ 0xa9bf2fea, // 0x024: stp x10, x11, [sp, #-16]!
+ 0xa9bf27e8, // 0x028: stp x8, x9, [sp, #-16]!
+ 0xa9bf1fe6, // 0x02c: stp x6, x7, [sp, #-16]!
+ 0xa9bf17e4, // 0x030: stp x4, x5, [sp, #-16]!
+ 0xa9bf0fe2, // 0x034: stp x2, x3, [sp, #-16]!
+ 0xa9bf07e0, // 0x038: stp x0, x1, [sp, #-16]!
+ 0xadbf7ffe, // 0x03c: stp q30, q31, [sp, #-32]!
+ 0xadbf77fc, // 0x040: stp q28, q29, [sp, #-32]!
+ 0xadbf6ffa, // 0x044: stp q26, q27, [sp, #-32]!
+ 0xadbf67f8, // 0x048: stp q24, q25, [sp, #-32]!
+ 0xadbf5ff6, // 0x04c: stp q22, q23, [sp, #-32]!
+ 0xadbf57f4, // 0x050: stp q20, q21, [sp, #-32]!
+ 0xadbf4ff2, // 0x054: stp q18, q19, [sp, #-32]!
+ 0xadbf47f0, // 0x058: stp q16, q17, [sp, #-32]!
+ 0xadbf3fee, // 0x05c: stp q14, q15, [sp, #-32]!
+ 0xadbf37ec, // 0x060: stp q12, q13, [sp, #-32]!
+ 0xadbf2fea, // 0x064: stp q10, q11, [sp, #-32]!
+ 0xadbf27e8, // 0x068: stp q8, q9, [sp, #-32]!
+ 0xadbf1fe6, // 0x06c: stp q6, q7, [sp, #-32]!
+ 0xadbf17e4, // 0x070: stp q4, q5, [sp, #-32]!
+ 0xadbf0fe2, // 0x074: stp q2, q3, [sp, #-32]!
+ 0xadbf07e0, // 0x078: stp q0, q1, [sp, #-32]!
+ 0x580004e0, // 0x07c: ldr x0, Lreentry_ctx_ptr
+ 0xaa1e03e1, // 0x080: mov x1, x30
+ 0xd1003021, // 0x084: sub x1, x1, #12
+ 0x58000442, // 0x088: ldr x2, Lreentry_fn_ptr
+ 0xd63f0040, // 0x08c: blr x2
+ 0xaa0003f1, // 0x090: mov x17, x0
+ 0xacc107e0, // 0x094: ldp q0, q1, [sp], #32
+ 0xacc10fe2, // 0x098: ldp q2, q3, [sp], #32
+ 0xacc117e4, // 0x09c: ldp q4, q5, [sp], #32
+ 0xacc11fe6, // 0x0a0: ldp q6, q7, [sp], #32
+ 0xacc127e8, // 0x0a4: ldp q8, q9, [sp], #32
+ 0xacc12fea, // 0x0a8: ldp q10, q11, [sp], #32
+ 0xacc137ec, // 0x0ac: ldp q12, q13, [sp], #32
+ 0xacc13fee, // 0x0b0: ldp q14, q15, [sp], #32
+ 0xacc147f0, // 0x0b4: ldp q16, q17, [sp], #32
+ 0xacc14ff2, // 0x0b8: ldp q18, q19, [sp], #32
+ 0xacc157f4, // 0x0bc: ldp q20, q21, [sp], #32
+ 0xacc15ff6, // 0x0c0: ldp q22, q23, [sp], #32
+ 0xacc167f8, // 0x0c4: ldp q24, q25, [sp], #32
+ 0xacc16ffa, // 0x0c8: ldp q26, q27, [sp], #32
+ 0xacc177fc, // 0x0cc: ldp q28, q29, [sp], #32
+ 0xacc17ffe, // 0x0d0: ldp q30, q31, [sp], #32
+ 0xa8c107e0, // 0x0d4: ldp x0, x1, [sp], #16
+ 0xa8c10fe2, // 0x0d8: ldp x2, x3, [sp], #16
+ 0xa8c117e4, // 0x0dc: ldp x4, x5, [sp], #16
+ 0xa8c11fe6, // 0x0e0: ldp x6, x7, [sp], #16
+ 0xa8c127e8, // 0x0e4: ldp x8, x9, [sp], #16
+ 0xa8c12fea, // 0x0e8: ldp x10, x11, [sp], #16
+ 0xa8c137ec, // 0x0ec: ldp x12, x13, [sp], #16
+ 0xa8c13fee, // 0x0f0: ldp x14, x15, [sp], #16
+ 0xa8c153f3, // 0x0f4: ldp x19, x20, [sp], #16
+ 0xa8c15bf5, // 0x0f8: ldp x21, x22, [sp], #16
+ 0xa8c163f7, // 0x0fc: ldp x23, x24, [sp], #16
+ 0xa8c16bf9, // 0x100: ldp x25, x26, [sp], #16
+ 0xa8c173fb, // 0x104: ldp x27, x28, [sp], #16
+ 0xa8c17bfd, // 0x108: ldp x29, x30, [sp], #16
+ 0xd65f0220, // 0x10c: ret x17
+ 0x01234567, // 0x110: Lreentry_fn_ptr:
+ 0xdeadbeef, // 0x114: .quad 0
+ 0x98765432, // 0x118: Lreentry_ctx_ptr:
+ 0xcafef00d // 0x11c: .quad 0
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x110;
+ const unsigned ReentryCtxAddrOffset = 0x118;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint64_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint64_t));
+}
+
+void OrcAArch64::writeTrampolines(char *TrampolineBlockWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverAddr,
+ unsigned NumTrampolines) {
+
+ unsigned OffsetToPtr = alignTo(NumTrampolines * TrampolineSize, 8);
+
+ memcpy(TrampolineBlockWorkingMem + OffsetToPtr, &ResolverAddr,
+ sizeof(uint64_t));
+
+ // OffsetToPtr is actually the offset from the PC for the 2nd instruction, so
+ // subtract 32-bits.
+ OffsetToPtr -= 4;
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize) {
+ Trampolines[3 * I + 0] = 0xaa1e03f1; // mov x17, x30
+ Trampolines[3 * I + 1] = 0x58000010 | (OffsetToPtr << 3); // adr x16, Lptr
+ Trampolines[3 * I + 2] = 0xd63f0200; // blr x16
+ }
+}
+
+void OrcAArch64::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // ldr x0, ptr1 ; PC-rel load of ptr1
+ // br x0 ; Jump to resolver
+ // stub2:
+ // ldr x0, ptr2 ; PC-rel load of ptr2
+ // br x0 ; Jump to resolver
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ static_assert(StubSize == PointerSize,
+ "Pointer and stub size must match for algorithm below");
+ assert(stubAndPointerRangesOk<OrcAArch64>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+ uint64_t PtrDisplacement =
+ PointersBlockTargetAddress - StubsBlockTargetAddress;
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlockWorkingMem);
+ uint64_t PtrOffsetField = PtrDisplacement << 3;
+
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Stub[I] = 0xd61f020058000010 | PtrOffsetField;
+}
+
+void OrcX86_64_Base::writeTrampolines(
+ char *TrampolineBlockWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverAddr, unsigned NumTrampolines) {
+
+ unsigned OffsetToPtr = NumTrampolines * TrampolineSize;
+
+ memcpy(TrampolineBlockWorkingMem + OffsetToPtr, &ResolverAddr,
+ sizeof(uint64_t));
+
+ uint64_t *Trampolines =
+ reinterpret_cast<uint64_t *>(TrampolineBlockWorkingMem);
+ uint64_t CallIndirPCRel = 0xf1c40000000015ff;
+
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize)
+ Trampolines[I] = CallIndirPCRel | ((OffsetToPtr - 6) << 16);
+}
+
+void OrcX86_64_Base::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // jmpq *ptr1(%rip)
+ // .byte 0xC4 ; <- Invalid opcode padding.
+ // .byte 0xF1
+ // stub2:
+ // jmpq *ptr2(%rip)
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ // Populate the stubs page stubs and mark it executable.
+ static_assert(StubSize == PointerSize,
+ "Pointer and stub size must match for algorithm below");
+ assert(stubAndPointerRangesOk<OrcX86_64_Base>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlockWorkingMem);
+ uint64_t PtrOffsetField =
+ (PointersBlockTargetAddress - StubsBlockTargetAddress - 6) << 16;
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Stub[I] = 0xF1C40000000025ff | PtrOffsetField;
+}
+
+void OrcX86_64_SysV::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+
+ LLVM_DEBUG({
+ dbgs() << "Writing resolver code to "
+ << formatv("{0:x16}", ResolverTargetAddress) << "\n";
+ });
+
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushq %rbp
+ 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
+ 0x50, // 0x04: pushq %rax
+ 0x53, // 0x05: pushq %rbx
+ 0x51, // 0x06: pushq %rcx
+ 0x52, // 0x07: pushq %rdx
+ 0x56, // 0x08: pushq %rsi
+ 0x57, // 0x09: pushq %rdi
+ 0x41, 0x50, // 0x0a: pushq %r8
+ 0x41, 0x51, // 0x0c: pushq %r9
+ 0x41, 0x52, // 0x0e: pushq %r10
+ 0x41, 0x53, // 0x10: pushq %r11
+ 0x41, 0x54, // 0x12: pushq %r12
+ 0x41, 0x55, // 0x14: pushq %r13
+ 0x41, 0x56, // 0x16: pushq %r14
+ 0x41, 0x57, // 0x18: pushq %r15
+ 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
+ 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
+ 0x48, 0xbf, // 0x26: movabsq <CBMgr>, %rdi
+
+ // 0x28: JIT re-entry ctx addr.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x8b, 0x75, 0x08, // 0x30: movq 8(%rbp), %rsi
+ 0x48, 0x83, 0xee, 0x06, // 0x34: subq $6, %rsi
+ 0x48, 0xb8, // 0x38: movabsq <REntry>, %rax
+
+ // 0x3a: JIT re-entry fn addr:
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xff, 0xd0, // 0x42: callq *%rax
+ 0x48, 0x89, 0x45, 0x08, // 0x44: movq %rax, 8(%rbp)
+ 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x48: fxrstor64 (%rsp)
+ 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x4d: addq 0x208, %rsp
+ 0x41, 0x5f, // 0x54: popq %r15
+ 0x41, 0x5e, // 0x56: popq %r14
+ 0x41, 0x5d, // 0x58: popq %r13
+ 0x41, 0x5c, // 0x5a: popq %r12
+ 0x41, 0x5b, // 0x5c: popq %r11
+ 0x41, 0x5a, // 0x5e: popq %r10
+ 0x41, 0x59, // 0x60: popq %r9
+ 0x41, 0x58, // 0x62: popq %r8
+ 0x5f, // 0x64: popq %rdi
+ 0x5e, // 0x65: popq %rsi
+ 0x5a, // 0x66: popq %rdx
+ 0x59, // 0x67: popq %rcx
+ 0x5b, // 0x68: popq %rbx
+ 0x58, // 0x69: popq %rax
+ 0x5d, // 0x6a: popq %rbp
+ 0xc3, // 0x6b: retq
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x3a;
+ const unsigned ReentryCtxAddrOffset = 0x28;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint64_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint64_t));
+}
+
+void OrcX86_64_Win32::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+
+ // resolverCode is similar to OrcX86_64 with differences specific to windows
+ // x64 calling convention: arguments go into rcx, rdx and come in reverse
+ // order, shadow space allocation on stack
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushq %rbp
+ 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
+ 0x50, // 0x04: pushq %rax
+ 0x53, // 0x05: pushq %rbx
+ 0x51, // 0x06: pushq %rcx
+ 0x52, // 0x07: pushq %rdx
+ 0x56, // 0x08: pushq %rsi
+ 0x57, // 0x09: pushq %rdi
+ 0x41, 0x50, // 0x0a: pushq %r8
+ 0x41, 0x51, // 0x0c: pushq %r9
+ 0x41, 0x52, // 0x0e: pushq %r10
+ 0x41, 0x53, // 0x10: pushq %r11
+ 0x41, 0x54, // 0x12: pushq %r12
+ 0x41, 0x55, // 0x14: pushq %r13
+ 0x41, 0x56, // 0x16: pushq %r14
+ 0x41, 0x57, // 0x18: pushq %r15
+ 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
+ 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
+
+ 0x48, 0xb9, // 0x26: movabsq <CBMgr>, %rcx
+ // 0x28: JIT re-entry ctx addr.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x8B, 0x55, 0x08, // 0x30: mov rdx, [rbp+0x8]
+ 0x48, 0x83, 0xea, 0x06, // 0x34: sub rdx, 0x6
+
+ 0x48, 0xb8, // 0x38: movabsq <REntry>, %rax
+ // 0x3a: JIT re-entry fn addr:
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ // 0x42: sub rsp, 0x20 (Allocate shadow space)
+ 0x48, 0x83, 0xEC, 0x20,
+ 0xff, 0xd0, // 0x46: callq *%rax
+
+ // 0x48: add rsp, 0x20 (Free shadow space)
+ 0x48, 0x83, 0xC4, 0x20,
+
+ 0x48, 0x89, 0x45, 0x08, // 0x4C: movq %rax, 8(%rbp)
+ 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x50: fxrstor64 (%rsp)
+ 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x55: addq 0x208, %rsp
+ 0x41, 0x5f, // 0x5C: popq %r15
+ 0x41, 0x5e, // 0x5E: popq %r14
+ 0x41, 0x5d, // 0x60: popq %r13
+ 0x41, 0x5c, // 0x62: popq %r12
+ 0x41, 0x5b, // 0x64: popq %r11
+ 0x41, 0x5a, // 0x66: popq %r10
+ 0x41, 0x59, // 0x68: popq %r9
+ 0x41, 0x58, // 0x6a: popq %r8
+ 0x5f, // 0x6c: popq %rdi
+ 0x5e, // 0x6d: popq %rsi
+ 0x5a, // 0x6e: popq %rdx
+ 0x59, // 0x6f: popq %rcx
+ 0x5b, // 0x70: popq %rbx
+ 0x58, // 0x71: popq %rax
+ 0x5d, // 0x72: popq %rbp
+ 0xc3, // 0x73: retq
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x3a;
+ const unsigned ReentryCtxAddrOffset = 0x28;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint64_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint64_t));
+}
+
+void OrcI386::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+
+ assert((ReentryFnAddr >> 32) == 0 && "ReentryFnAddr out of range");
+ assert((ReentryCtxAddr >> 32) == 0 && "ReentryCtxAddr out of range");
+
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushl %ebp
+ 0x89, 0xe5, // 0x01: movl %esp, %ebp
+ 0x54, // 0x03: pushl %esp
+ 0x83, 0xe4, 0xf0, // 0x04: andl $-0x10, %esp
+ 0x50, // 0x07: pushl %eax
+ 0x53, // 0x08: pushl %ebx
+ 0x51, // 0x09: pushl %ecx
+ 0x52, // 0x0a: pushl %edx
+ 0x56, // 0x0b: pushl %esi
+ 0x57, // 0x0c: pushl %edi
+ 0x81, 0xec, 0x18, 0x02, 0x00, 0x00, // 0x0d: subl $0x218, %esp
+ 0x0f, 0xae, 0x44, 0x24, 0x10, // 0x13: fxsave 0x10(%esp)
+ 0x8b, 0x75, 0x04, // 0x18: movl 0x4(%ebp), %esi
+ 0x83, 0xee, 0x05, // 0x1b: subl $0x5, %esi
+ 0x89, 0x74, 0x24, 0x04, // 0x1e: movl %esi, 0x4(%esp)
+ 0xc7, 0x04, 0x24, 0x00, 0x00, 0x00,
+ 0x00, // 0x22: movl <cbmgr>, (%esp)
+ 0xb8, 0x00, 0x00, 0x00, 0x00, // 0x29: movl <reentry>, %eax
+ 0xff, 0xd0, // 0x2e: calll *%eax
+ 0x89, 0x45, 0x04, // 0x30: movl %eax, 0x4(%ebp)
+ 0x0f, 0xae, 0x4c, 0x24, 0x10, // 0x33: fxrstor 0x10(%esp)
+ 0x81, 0xc4, 0x18, 0x02, 0x00, 0x00, // 0x38: addl $0x218, %esp
+ 0x5f, // 0x3e: popl %edi
+ 0x5e, // 0x3f: popl %esi
+ 0x5a, // 0x40: popl %edx
+ 0x59, // 0x41: popl %ecx
+ 0x5b, // 0x42: popl %ebx
+ 0x58, // 0x43: popl %eax
+ 0x8b, 0x65, 0xfc, // 0x44: movl -0x4(%ebp), %esp
+ 0x5d, // 0x48: popl %ebp
+ 0xc3 // 0x49: retl
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x2a;
+ const unsigned ReentryCtxAddrOffset = 0x25;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint32_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint32_t));
+}
+
+void OrcI386::writeTrampolines(char *TrampolineWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverAddr,
+ unsigned NumTrampolines) {
+ assert((ResolverAddr >> 32) == 0 && "ResolverAddr out of range");
+
+ uint64_t CallRelImm = 0xF1C4C400000000e8;
+ uint64_t ResolverRel = ResolverAddr - TrampolineBlockTargetAddress - 5;
+
+ uint64_t *Trampolines = reinterpret_cast<uint64_t *>(TrampolineWorkingMem);
+ for (unsigned I = 0; I < NumTrampolines; ++I, ResolverRel -= TrampolineSize)
+ Trampolines[I] = CallRelImm | (ResolverRel << 8);
+}
+
+void OrcI386::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs) {
+ assert((StubsBlockTargetAddress >> 32) == 0 &&
+ "StubsBlockTargetAddress is out of range");
+ assert((PointersBlockTargetAddress >> 32) == 0 &&
+ "PointersBlockTargetAddress is out of range");
+
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // jmpq *ptr1
+ // .byte 0xC4 ; <- Invalid opcode padding.
+ // .byte 0xF1
+ // stub2:
+ // jmpq *ptr2
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ assert(stubAndPointerRangesOk<OrcI386>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlockWorkingMem);
+ uint64_t PtrAddr = PointersBlockTargetAddress;
+ for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 4)
+ Stub[I] = 0xF1C40000000025ff | (PtrAddr << 16);
+}
+
+void OrcMips32_Base::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr,
+ bool isBigEndian) {
+
+ const uint32_t ResolverCode[] = {
+ // resolver_entry:
+ 0x27bdff98, // 0x00: addiu $sp,$sp,-104
+ 0xafa20000, // 0x04: sw $v0,0($sp)
+ 0xafa30004, // 0x08: sw $v1,4($sp)
+ 0xafa40008, // 0x0c: sw $a0,8($sp)
+ 0xafa5000c, // 0x10: sw $a1,12($sp)
+ 0xafa60010, // 0x14: sw $a2,16($sp)
+ 0xafa70014, // 0x18: sw $a3,20($sp)
+ 0xafb00018, // 0x1c: sw $s0,24($sp)
+ 0xafb1001c, // 0x20: sw $s1,28($sp)
+ 0xafb20020, // 0x24: sw $s2,32($sp)
+ 0xafb30024, // 0x28: sw $s3,36($sp)
+ 0xafb40028, // 0x2c: sw $s4,40($sp)
+ 0xafb5002c, // 0x30: sw $s5,44($sp)
+ 0xafb60030, // 0x34: sw $s6,48($sp)
+ 0xafb70034, // 0x38: sw $s7,52($sp)
+ 0xafa80038, // 0x3c: sw $t0,56($sp)
+ 0xafa9003c, // 0x40: sw $t1,60($sp)
+ 0xafaa0040, // 0x44: sw $t2,64($sp)
+ 0xafab0044, // 0x48: sw $t3,68($sp)
+ 0xafac0048, // 0x4c: sw $t4,72($sp)
+ 0xafad004c, // 0x50: sw $t5,76($sp)
+ 0xafae0050, // 0x54: sw $t6,80($sp)
+ 0xafaf0054, // 0x58: sw $t7,84($sp)
+ 0xafb80058, // 0x5c: sw $t8,88($sp)
+ 0xafb9005c, // 0x60: sw $t9,92($sp)
+ 0xafbe0060, // 0x64: sw $fp,96($sp)
+ 0xafbf0064, // 0x68: sw $ra,100($sp)
+
+ // JIT re-entry ctx addr.
+ 0x00000000, // 0x6c: lui $a0,ctx
+ 0x00000000, // 0x70: addiu $a0,$a0,ctx
+
+ 0x03e02825, // 0x74: move $a1, $ra
+ 0x24a5ffec, // 0x78: addiu $a1,$a1,-20
+
+ // JIT re-entry fn addr:
+ 0x00000000, // 0x7c: lui $t9,reentry
+ 0x00000000, // 0x80: addiu $t9,$t9,reentry
+
+ 0x0320f809, // 0x84: jalr $t9
+ 0x00000000, // 0x88: nop
+ 0x8fbf0064, // 0x8c: lw $ra,100($sp)
+ 0x8fbe0060, // 0x90: lw $fp,96($sp)
+ 0x8fb9005c, // 0x94: lw $t9,92($sp)
+ 0x8fb80058, // 0x98: lw $t8,88($sp)
+ 0x8faf0054, // 0x9c: lw $t7,84($sp)
+ 0x8fae0050, // 0xa0: lw $t6,80($sp)
+ 0x8fad004c, // 0xa4: lw $t5,76($sp)
+ 0x8fac0048, // 0xa8: lw $t4,72($sp)
+ 0x8fab0044, // 0xac: lw $t3,68($sp)
+ 0x8faa0040, // 0xb0: lw $t2,64($sp)
+ 0x8fa9003c, // 0xb4: lw $t1,60($sp)
+ 0x8fa80038, // 0xb8: lw $t0,56($sp)
+ 0x8fb70034, // 0xbc: lw $s7,52($sp)
+ 0x8fb60030, // 0xc0: lw $s6,48($sp)
+ 0x8fb5002c, // 0xc4: lw $s5,44($sp)
+ 0x8fb40028, // 0xc8: lw $s4,40($sp)
+ 0x8fb30024, // 0xcc: lw $s3,36($sp)
+ 0x8fb20020, // 0xd0: lw $s2,32($sp)
+ 0x8fb1001c, // 0xd4: lw $s1,28($sp)
+ 0x8fb00018, // 0xd8: lw $s0,24($sp)
+ 0x8fa70014, // 0xdc: lw $a3,20($sp)
+ 0x8fa60010, // 0xe0: lw $a2,16($sp)
+ 0x8fa5000c, // 0xe4: lw $a1,12($sp)
+ 0x8fa40008, // 0xe8: lw $a0,8($sp)
+ 0x27bd0068, // 0xec: addiu $sp,$sp,104
+ 0x0300f825, // 0xf0: move $ra, $t8
+ 0x03200008, // 0xf4: jr $t9
+ 0x00000000, // 0xf8: move $t9, $v0/v1
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x7c; // JIT re-entry fn addr lui
+ const unsigned ReentryCtxAddrOffset = 0x6c; // JIT re-entry context addr lui
+ const unsigned Offsett = 0xf8;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+
+ // Depending on endian return value will be in v0 or v1.
+ uint32_t MoveVxT9 = isBigEndian ? 0x0060c825 : 0x0040c825;
+ memcpy(ResolverWorkingMem + Offsett, &MoveVxT9, sizeof(MoveVxT9));
+
+ uint32_t ReentryCtxLUi =
+ 0x3c040000 | (((ReentryCtxAddr + 0x8000) >> 16) & 0xFFFF);
+ uint32_t ReentryCtxADDiu = 0x24840000 | ((ReentryCtxAddr)&0xFFFF);
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxLUi,
+ sizeof(ReentryCtxLUi));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset + 4, &ReentryCtxADDiu,
+ sizeof(ReentryCtxADDiu));
+
+ uint32_t ReentryFnLUi =
+ 0x3c190000 | (((ReentryFnAddr + 0x8000) >> 16) & 0xFFFF);
+ uint32_t ReentryFnADDiu = 0x27390000 | ((ReentryFnAddr)&0xFFFF);
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnLUi,
+ sizeof(ReentryFnLUi));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset + 4, &ReentryFnADDiu,
+ sizeof(ReentryFnADDiu));
+}
+
+void OrcMips32_Base::writeTrampolines(
+ char *TrampolineBlockWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverAddr, unsigned NumTrampolines) {
+
+ assert((ResolverAddr >> 32) == 0 && "ResolverAddr out of range");
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+ uint32_t RHiAddr = ((ResolverAddr + 0x8000) >> 16);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I) {
+ // move $t8,$ra
+ // lui $t9,ResolverAddr
+ // addiu $t9,$t9,ResolverAddr
+ // jalr $t9
+ // nop
+ Trampolines[5 * I + 0] = 0x03e0c025;
+ Trampolines[5 * I + 1] = 0x3c190000 | (RHiAddr & 0xFFFF);
+ Trampolines[5 * I + 2] = 0x27390000 | (ResolverAddr & 0xFFFF);
+ Trampolines[5 * I + 3] = 0x0320f809;
+ Trampolines[5 * I + 4] = 0x00000000;
+ }
+}
+
+void OrcMips32_Base::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs) {
+ assert((StubsBlockTargetAddress >> 32) == 0 &&
+ "InitialPtrVal is out of range");
+
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // lui $t9, ptr1
+ // lw $t9, %lo(ptr1)($t9)
+ // jr $t9
+ // stub2:
+ // lui $t9, ptr2
+ // lw $t9,%lo(ptr1)($t9)
+ // jr $t9
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .word 0x0
+ // ptr2:
+ // .word 0x0
+ //
+ // i..
+
+ assert(stubAndPointerRangesOk<OrcAArch64>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ // Populate the stubs page stubs and mark it executable.
+ uint32_t *Stub = reinterpret_cast<uint32_t *>(StubsBlockWorkingMem);
+ uint64_t PtrAddr = PointersBlockTargetAddress;
+
+ for (unsigned I = 0; I < NumStubs; ++I) {
+ uint32_t HiAddr = ((PtrAddr + 0x8000) >> 16);
+ Stub[4 * I + 0] = 0x3c190000 | (HiAddr & 0xFFFF); // lui $t9,ptr1
+ Stub[4 * I + 1] = 0x8f390000 | (PtrAddr & 0xFFFF); // lw $t9,%lo(ptr1)($t9)
+ Stub[4 * I + 2] = 0x03200008; // jr $t9
+ Stub[4 * I + 3] = 0x00000000; // nop
+ PtrAddr += 4;
+ }
+}
+
+void OrcMips64::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+
+ const uint32_t ResolverCode[] = {
+ //resolver_entry:
+ 0x67bdff30, // 0x00: daddiu $sp,$sp,-208
+ 0xffa20000, // 0x04: sd v0,0(sp)
+ 0xffa30008, // 0x08: sd v1,8(sp)
+ 0xffa40010, // 0x0c: sd a0,16(sp)
+ 0xffa50018, // 0x10: sd a1,24(sp)
+ 0xffa60020, // 0x14: sd a2,32(sp)
+ 0xffa70028, // 0x18: sd a3,40(sp)
+ 0xffa80030, // 0x1c: sd a4,48(sp)
+ 0xffa90038, // 0x20: sd a5,56(sp)
+ 0xffaa0040, // 0x24: sd a6,64(sp)
+ 0xffab0048, // 0x28: sd a7,72(sp)
+ 0xffac0050, // 0x2c: sd t0,80(sp)
+ 0xffad0058, // 0x30: sd t1,88(sp)
+ 0xffae0060, // 0x34: sd t2,96(sp)
+ 0xffaf0068, // 0x38: sd t3,104(sp)
+ 0xffb00070, // 0x3c: sd s0,112(sp)
+ 0xffb10078, // 0x40: sd s1,120(sp)
+ 0xffb20080, // 0x44: sd s2,128(sp)
+ 0xffb30088, // 0x48: sd s3,136(sp)
+ 0xffb40090, // 0x4c: sd s4,144(sp)
+ 0xffb50098, // 0x50: sd s5,152(sp)
+ 0xffb600a0, // 0x54: sd s6,160(sp)
+ 0xffb700a8, // 0x58: sd s7,168(sp)
+ 0xffb800b0, // 0x5c: sd t8,176(sp)
+ 0xffb900b8, // 0x60: sd t9,184(sp)
+ 0xffbe00c0, // 0x64: sd fp,192(sp)
+ 0xffbf00c8, // 0x68: sd ra,200(sp)
+
+ // JIT re-entry ctx addr.
+ 0x00000000, // 0x6c: lui $a0,heighest(ctx)
+ 0x00000000, // 0x70: daddiu $a0,$a0,heigher(ctx)
+ 0x00000000, // 0x74: dsll $a0,$a0,16
+ 0x00000000, // 0x78: daddiu $a0,$a0,hi(ctx)
+ 0x00000000, // 0x7c: dsll $a0,$a0,16
+ 0x00000000, // 0x80: daddiu $a0,$a0,lo(ctx)
+
+ 0x03e02825, // 0x84: move $a1, $ra
+ 0x64a5ffdc, // 0x88: daddiu $a1,$a1,-36
+
+ // JIT re-entry fn addr:
+ 0x00000000, // 0x8c: lui $t9,reentry
+ 0x00000000, // 0x90: daddiu $t9,$t9,reentry
+ 0x00000000, // 0x94: dsll $t9,$t9,
+ 0x00000000, // 0x98: daddiu $t9,$t9,
+ 0x00000000, // 0x9c: dsll $t9,$t9,
+ 0x00000000, // 0xa0: daddiu $t9,$t9,
+ 0x0320f809, // 0xa4: jalr $t9
+ 0x00000000, // 0xa8: nop
+ 0xdfbf00c8, // 0xac: ld ra, 200(sp)
+ 0xdfbe00c0, // 0xb0: ld fp, 192(sp)
+ 0xdfb900b8, // 0xb4: ld t9, 184(sp)
+ 0xdfb800b0, // 0xb8: ld t8, 176(sp)
+ 0xdfb700a8, // 0xbc: ld s7, 168(sp)
+ 0xdfb600a0, // 0xc0: ld s6, 160(sp)
+ 0xdfb50098, // 0xc4: ld s5, 152(sp)
+ 0xdfb40090, // 0xc8: ld s4, 144(sp)
+ 0xdfb30088, // 0xcc: ld s3, 136(sp)
+ 0xdfb20080, // 0xd0: ld s2, 128(sp)
+ 0xdfb10078, // 0xd4: ld s1, 120(sp)
+ 0xdfb00070, // 0xd8: ld s0, 112(sp)
+ 0xdfaf0068, // 0xdc: ld t3, 104(sp)
+ 0xdfae0060, // 0xe0: ld t2, 96(sp)
+ 0xdfad0058, // 0xe4: ld t1, 88(sp)
+ 0xdfac0050, // 0xe8: ld t0, 80(sp)
+ 0xdfab0048, // 0xec: ld a7, 72(sp)
+ 0xdfaa0040, // 0xf0: ld a6, 64(sp)
+ 0xdfa90038, // 0xf4: ld a5, 56(sp)
+ 0xdfa80030, // 0xf8: ld a4, 48(sp)
+ 0xdfa70028, // 0xfc: ld a3, 40(sp)
+ 0xdfa60020, // 0x100: ld a2, 32(sp)
+ 0xdfa50018, // 0x104: ld a1, 24(sp)
+ 0xdfa40010, // 0x108: ld a0, 16(sp)
+ 0xdfa30008, // 0x10c: ld v1, 8(sp)
+ 0x67bd00d0, // 0x110: daddiu $sp,$sp,208
+ 0x0300f825, // 0x114: move $ra, $t8
+ 0x03200008, // 0x118: jr $t9
+ 0x0040c825, // 0x11c: move $t9, $v0
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x8c; // JIT re-entry fn addr lui
+ const unsigned ReentryCtxAddrOffset = 0x6c; // JIT re-entry ctx addr lui
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+
+ uint32_t ReentryCtxLUi =
+ 0x3c040000 | (((ReentryCtxAddr + 0x800080008000) >> 48) & 0xFFFF);
+ uint32_t ReentryCtxDADDiu =
+ 0x64840000 | (((ReentryCtxAddr + 0x80008000) >> 32) & 0xFFFF);
+ uint32_t ReentryCtxDSLL = 0x00042438;
+ uint32_t ReentryCtxDADDiu2 =
+ 0x64840000 | ((((ReentryCtxAddr + 0x8000) >> 16) & 0xFFFF));
+ uint32_t ReentryCtxDSLL2 = 0x00042438;
+ uint32_t ReentryCtxDADDiu3 = 0x64840000 | ((ReentryCtxAddr)&0xFFFF);
+
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxLUi,
+ sizeof(ReentryCtxLUi));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 4), &ReentryCtxDADDiu,
+ sizeof(ReentryCtxDADDiu));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 8), &ReentryCtxDSLL,
+ sizeof(ReentryCtxDSLL));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 12), &ReentryCtxDADDiu2,
+ sizeof(ReentryCtxDADDiu2));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 16), &ReentryCtxDSLL2,
+ sizeof(ReentryCtxDSLL2));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 20), &ReentryCtxDADDiu3,
+ sizeof(ReentryCtxDADDiu3));
+
+ uint32_t ReentryFnLUi =
+ 0x3c190000 | (((ReentryFnAddr + 0x800080008000) >> 48) & 0xFFFF);
+
+ uint32_t ReentryFnDADDiu =
+ 0x67390000 | (((ReentryFnAddr + 0x80008000) >> 32) & 0xFFFF);
+
+ uint32_t ReentryFnDSLL = 0x0019cc38;
+
+ uint32_t ReentryFnDADDiu2 =
+ 0x67390000 | (((ReentryFnAddr + 0x8000) >> 16) & 0xFFFF);
+
+ uint32_t ReentryFnDSLL2 = 0x0019cc38;
+
+ uint32_t ReentryFnDADDiu3 = 0x67390000 | ((ReentryFnAddr)&0xFFFF);
+
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnLUi,
+ sizeof(ReentryFnLUi));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 4), &ReentryFnDADDiu,
+ sizeof(ReentryFnDADDiu));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 8), &ReentryFnDSLL,
+ sizeof(ReentryFnDSLL));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 12), &ReentryFnDADDiu2,
+ sizeof(ReentryFnDADDiu2));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 16), &ReentryFnDSLL2,
+ sizeof(ReentryFnDSLL2));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 20), &ReentryFnDADDiu3,
+ sizeof(ReentryFnDADDiu3));
+}
+
+void OrcMips64::writeTrampolines(char *TrampolineBlockWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverAddr,
+ unsigned NumTrampolines) {
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+
+ uint64_t HeighestAddr = ((ResolverAddr + 0x800080008000) >> 48);
+ uint64_t HeigherAddr = ((ResolverAddr + 0x80008000) >> 32);
+ uint64_t HiAddr = ((ResolverAddr + 0x8000) >> 16);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I) {
+ Trampolines[10 * I + 0] = 0x03e0c025; // move $t8,$ra
+ Trampolines[10 * I + 1] = 0x3c190000 | (HeighestAddr & 0xFFFF); // lui $t9,resolveAddr
+ Trampolines[10 * I + 2] = 0x67390000 | (HeigherAddr & 0xFFFF); // daddiu $t9,$t9,%higher(resolveAddr)
+ Trampolines[10 * I + 3] = 0x0019cc38; // dsll $t9,$t9,16
+ Trampolines[10 * I + 4] = 0x67390000 | (HiAddr & 0xFFFF); // daddiu $t9,$t9,%hi(ptr)
+ Trampolines[10 * I + 5] = 0x0019cc38; // dsll $t9,$t9,16
+ Trampolines[10 * I + 6] =
+ 0x67390000 | (ResolverAddr & 0xFFFF); // daddiu $t9,$t9,%lo(ptr)
+ Trampolines[10 * I + 7] = 0x0320f809; // jalr $t9
+ Trampolines[10 * I + 8] = 0x00000000; // nop
+ Trampolines[10 * I + 9] = 0x00000000; // nop
+ }
+}
+
+void OrcMips64::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // lui $t9,ptr1
+ // dsll $t9,$t9,16
+ // daddiu $t9,$t9,%hi(ptr)
+ // dsll $t9,$t9,16
+ // ld $t9,%lo(ptr)
+ // jr $t9
+ // stub2:
+ // lui $t9,ptr1
+ // dsll $t9,$t9,16
+ // daddiu $t9,$t9,%hi(ptr)
+ // dsll $t9,$t9,16
+ // ld $t9,%lo(ptr)
+ // jr $t9
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .dword 0x0
+ // ptr2:
+ // .dword 0x0
+ //
+ // ...
+
+ assert(stubAndPointerRangesOk<OrcAArch64>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ // Populate the stubs page stubs and mark it executable.
+ uint32_t *Stub = reinterpret_cast<uint32_t *>(StubsBlockWorkingMem);
+ uint64_t PtrAddr = PointersBlockTargetAddress;
+
+ for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 8) {
+ uint64_t HeighestAddr = ((PtrAddr + 0x800080008000) >> 48);
+ uint64_t HeigherAddr = ((PtrAddr + 0x80008000) >> 32);
+ uint64_t HiAddr = ((PtrAddr + 0x8000) >> 16);
+ Stub[8 * I + 0] = 0x3c190000 | (HeighestAddr & 0xFFFF); // lui $t9,ptr1
+ Stub[8 * I + 1] = 0x67390000 | (HeigherAddr & 0xFFFF); // daddiu $t9,$t9,%higher(ptr)
+ Stub[8 * I + 2] = 0x0019cc38; // dsll $t9,$t9,16
+ Stub[8 * I + 3] = 0x67390000 | (HiAddr & 0xFFFF); // daddiu $t9,$t9,%hi(ptr)
+ Stub[8 * I + 4] = 0x0019cc38; // dsll $t9,$t9,16
+ Stub[8 * I + 5] = 0xdf390000 | (PtrAddr & 0xFFFF); // ld $t9,%lo(ptr)
+ Stub[8 * I + 6] = 0x03200008; // jr $t9
+ Stub[8 * I + 7] = 0x00000000; // nop
+ }
+}
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
new file mode 100644
index 00000000000..834d4cc8f51
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
@@ -0,0 +1,529 @@
+//===--------------- OrcV2CBindings.cpp - C bindings OrcV2 APIs -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/LLJIT.h"
+#include "llvm-c/Orc.h"
+#include "llvm-c/OrcEE.h"
+#include "llvm-c/TargetMachine.h"
+
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace llvm {
+namespace orc {
+
+class InProgressLookupState;
+
+class OrcV2CAPIHelper {
+public:
+ using PoolEntry = SymbolStringPtr::PoolEntry;
+ using PoolEntryPtr = SymbolStringPtr::PoolEntryPtr;
+
+ static PoolEntryPtr releaseSymbolStringPtr(SymbolStringPtr S) {
+ PoolEntryPtr Result = nullptr;
+ std::swap(Result, S.S);
+ return Result;
+ }
+
+ static SymbolStringPtr retainSymbolStringPtr(PoolEntryPtr P) {
+ return SymbolStringPtr(P);
+ }
+
+ static PoolEntryPtr getRawPoolEntryPtr(const SymbolStringPtr &S) {
+ return S.S;
+ }
+
+ static void retainPoolEntry(PoolEntryPtr P) {
+ SymbolStringPtr S(P);
+ S.S = nullptr;
+ }
+
+ static void releasePoolEntry(PoolEntryPtr P) {
+ SymbolStringPtr S;
+ S.S = P;
+ }
+
+ static InProgressLookupState *extractLookupState(LookupState &LS) {
+ return LS.IPLS.release();
+ }
+
+ static void resetLookupState(LookupState &LS, InProgressLookupState *IPLS) {
+ return LS.reset(IPLS);
+ }
+};
+
+} // namespace orc
+} // namespace llvm
+
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ExecutionSession, LLVMOrcExecutionSessionRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(SymbolStringPool, LLVMOrcSymbolStringPoolRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(OrcV2CAPIHelper::PoolEntry,
+ LLVMOrcSymbolStringPoolEntryRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(MaterializationUnit,
+ LLVMOrcMaterializationUnitRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(JITDylib, LLVMOrcJITDylibRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ResourceTracker, LLVMOrcResourceTrackerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DefinitionGenerator,
+ LLVMOrcDefinitionGeneratorRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(InProgressLookupState, LLVMOrcLookupStateRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ThreadSafeContext,
+ LLVMOrcThreadSafeContextRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ThreadSafeModule, LLVMOrcThreadSafeModuleRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(JITTargetMachineBuilder,
+ LLVMOrcJITTargetMachineBuilderRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ObjectLayer, LLVMOrcObjectLayerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLJITBuilder, LLVMOrcLLJITBuilderRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLJIT, LLVMOrcLLJITRef)
+
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
+
+namespace llvm {
+namespace orc {
+
+class CAPIDefinitionGenerator final : public DefinitionGenerator {
+public:
+ CAPIDefinitionGenerator(
+ void *Ctx,
+ LLVMOrcCAPIDefinitionGeneratorTryToGenerateFunction TryToGenerate)
+ : Ctx(Ctx), TryToGenerate(TryToGenerate) {}
+
+ Error tryToGenerate(LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags,
+ const SymbolLookupSet &LookupSet) override {
+
+ // Take the lookup state.
+ LLVMOrcLookupStateRef LSR = ::wrap(OrcV2CAPIHelper::extractLookupState(LS));
+
+ // Translate the lookup kind.
+ LLVMOrcLookupKind CLookupKind;
+ switch (K) {
+ case LookupKind::Static:
+ CLookupKind = LLVMOrcLookupKindStatic;
+ break;
+ case LookupKind::DLSym:
+ CLookupKind = LLVMOrcLookupKindDLSym;
+ break;
+ }
+
+ // Translate the JITDylibSearchFlags.
+ LLVMOrcJITDylibLookupFlags CJDLookupFlags;
+ switch (JDLookupFlags) {
+ case JITDylibLookupFlags::MatchExportedSymbolsOnly:
+ CJDLookupFlags = LLVMOrcJITDylibLookupFlagsMatchExportedSymbolsOnly;
+ break;
+ case JITDylibLookupFlags::MatchAllSymbols:
+ CJDLookupFlags = LLVMOrcJITDylibLookupFlagsMatchAllSymbols;
+ break;
+ }
+
+ // Translate the lookup set.
+ std::vector<LLVMOrcCLookupSetElement> CLookupSet;
+ CLookupSet.reserve(LookupSet.size());
+ for (auto &KV : LookupSet) {
+ LLVMOrcSymbolLookupFlags SLF;
+ LLVMOrcSymbolStringPoolEntryRef Name =
+ ::wrap(OrcV2CAPIHelper::getRawPoolEntryPtr(KV.first));
+ switch (KV.second) {
+ case SymbolLookupFlags::RequiredSymbol:
+ SLF = LLVMOrcSymbolLookupFlagsRequiredSymbol;
+ break;
+ case SymbolLookupFlags::WeaklyReferencedSymbol:
+ SLF = LLVMOrcSymbolLookupFlagsWeaklyReferencedSymbol;
+ break;
+ }
+ CLookupSet.push_back({Name, SLF});
+ }
+
+ // Run the C TryToGenerate function.
+ auto Err = unwrap(TryToGenerate(::wrap(this), Ctx, &LSR, CLookupKind,
+ ::wrap(&JD), CJDLookupFlags,
+ CLookupSet.data(), CLookupSet.size()));
+
+ // Restore the lookup state.
+ OrcV2CAPIHelper::resetLookupState(LS, ::unwrap(LSR));
+
+ return Err;
+ }
+
+private:
+ void *Ctx;
+ LLVMOrcCAPIDefinitionGeneratorTryToGenerateFunction TryToGenerate;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+void LLVMOrcExecutionSessionSetErrorReporter(
+ LLVMOrcExecutionSessionRef ES, LLVMOrcErrorReporterFunction ReportError,
+ void *Ctx) {
+ unwrap(ES)->setErrorReporter(
+ [=](Error Err) { ReportError(Ctx, wrap(std::move(Err))); });
+}
+
+LLVMOrcSymbolStringPoolRef
+LLVMOrcExecutionSessionGetSymbolStringPool(LLVMOrcExecutionSessionRef ES) {
+ return wrap(unwrap(ES)->getSymbolStringPool().get());
+}
+
+void LLVMOrcSymbolStringPoolClearDeadEntries(LLVMOrcSymbolStringPoolRef SSP) {
+ unwrap(SSP)->clearDeadEntries();
+}
+
+LLVMOrcSymbolStringPoolEntryRef
+LLVMOrcExecutionSessionIntern(LLVMOrcExecutionSessionRef ES, const char *Name) {
+ return wrap(
+ OrcV2CAPIHelper::releaseSymbolStringPtr(unwrap(ES)->intern(Name)));
+}
+
+void LLVMOrcRetainSymbolStringPoolEntry(LLVMOrcSymbolStringPoolEntryRef S) {
+ OrcV2CAPIHelper::retainPoolEntry(unwrap(S));
+}
+
+void LLVMOrcReleaseSymbolStringPoolEntry(LLVMOrcSymbolStringPoolEntryRef S) {
+ OrcV2CAPIHelper::releasePoolEntry(unwrap(S));
+}
+
+const char *LLVMOrcSymbolStringPoolEntryStr(LLVMOrcSymbolStringPoolEntryRef S) {
+ return unwrap(S)->getKey().data();
+}
+
+LLVMOrcResourceTrackerRef
+LLVMOrcJITDylibCreateResourceTracker(LLVMOrcJITDylibRef JD) {
+ auto RT = unwrap(JD)->createResourceTracker();
+ // Retain the pointer for the C API client.
+ RT->Retain();
+ return wrap(RT.get());
+}
+
+LLVMOrcResourceTrackerRef
+LLVMOrcJITDylibGetDefaultResourceTracker(LLVMOrcJITDylibRef JD) {
+ auto RT = unwrap(JD)->getDefaultResourceTracker();
+ // Retain the pointer for the C API client.
+ return wrap(RT.get());
+}
+
+void LLVMOrcReleaseResourceTracker(LLVMOrcResourceTrackerRef RT) {
+ ResourceTrackerSP TmpRT(unwrap(RT));
+ TmpRT->Release();
+}
+
+void LLVMOrcResourceTrackerTransferTo(LLVMOrcResourceTrackerRef SrcRT,
+ LLVMOrcResourceTrackerRef DstRT) {
+ ResourceTrackerSP TmpRT(unwrap(SrcRT));
+ TmpRT->transferTo(*unwrap(DstRT));
+}
+
+LLVMErrorRef LLVMOrcResourceTrackerRemove(LLVMOrcResourceTrackerRef RT) {
+ ResourceTrackerSP TmpRT(unwrap(RT));
+ return wrap(TmpRT->remove());
+}
+
+void LLVMOrcDisposeDefinitionGenerator(LLVMOrcDefinitionGeneratorRef DG) {
+ std::unique_ptr<DefinitionGenerator> TmpDG(unwrap(DG));
+}
+
+void LLVMOrcDisposeMaterializationUnit(LLVMOrcMaterializationUnitRef MU) {
+ std::unique_ptr<MaterializationUnit> TmpMU(unwrap(MU));
+}
+
+LLVMOrcMaterializationUnitRef
+LLVMOrcAbsoluteSymbols(LLVMOrcCSymbolMapPairs Syms, size_t NumPairs) {
+ SymbolMap SM;
+ for (size_t I = 0; I != NumPairs; ++I) {
+ JITSymbolFlags Flags;
+
+ if (Syms[I].Sym.Flags.GenericFlags & LLVMJITSymbolGenericFlagsExported)
+ Flags |= JITSymbolFlags::Exported;
+ if (Syms[I].Sym.Flags.GenericFlags & LLVMJITSymbolGenericFlagsWeak)
+ Flags |= JITSymbolFlags::Weak;
+
+ Flags.getTargetFlags() = Syms[I].Sym.Flags.TargetFlags;
+
+ SM[OrcV2CAPIHelper::retainSymbolStringPtr(unwrap(Syms[I].Name))] =
+ JITEvaluatedSymbol(Syms[I].Sym.Address, Flags);
+ }
+
+ return wrap(absoluteSymbols(std::move(SM)).release());
+}
+
+LLVMOrcJITDylibRef
+LLVMOrcExecutionSessionCreateBareJITDylib(LLVMOrcExecutionSessionRef ES,
+ const char *Name) {
+ return wrap(&unwrap(ES)->createBareJITDylib(Name));
+}
+
+LLVMErrorRef
+LLVMOrcExecutionSessionCreateJITDylib(LLVMOrcExecutionSessionRef ES,
+ LLVMOrcJITDylibRef *Result,
+ const char *Name) {
+ auto JD = unwrap(ES)->createJITDylib(Name);
+ if (!JD)
+ return wrap(JD.takeError());
+ *Result = wrap(&*JD);
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcJITDylibRef
+LLVMOrcExecutionSessionGetJITDylibByName(LLVMOrcExecutionSessionRef ES,
+ const char *Name) {
+ return wrap(unwrap(ES)->getJITDylibByName(Name));
+}
+
+LLVMErrorRef LLVMOrcJITDylibDefine(LLVMOrcJITDylibRef JD,
+ LLVMOrcMaterializationUnitRef MU) {
+ std::unique_ptr<MaterializationUnit> TmpMU(unwrap(MU));
+
+ if (auto Err = unwrap(JD)->define(TmpMU)) {
+ TmpMU.release();
+ return wrap(std::move(Err));
+ }
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef LLVMOrcJITDylibClear(LLVMOrcJITDylibRef JD) {
+ return wrap(unwrap(JD)->clear());
+}
+
+void LLVMOrcJITDylibAddGenerator(LLVMOrcJITDylibRef JD,
+ LLVMOrcDefinitionGeneratorRef DG) {
+ unwrap(JD)->addGenerator(std::unique_ptr<DefinitionGenerator>(unwrap(DG)));
+}
+
+LLVMOrcDefinitionGeneratorRef LLVMOrcCreateCustomCAPIDefinitionGenerator(
+ LLVMOrcCAPIDefinitionGeneratorTryToGenerateFunction F, void *Ctx) {
+ auto DG = std::make_unique<CAPIDefinitionGenerator>(Ctx, F);
+ return wrap(DG.release());
+}
+
+LLVMErrorRef LLVMOrcCreateDynamicLibrarySearchGeneratorForProcess(
+ LLVMOrcDefinitionGeneratorRef *Result, char GlobalPrefix,
+ LLVMOrcSymbolPredicate Filter, void *FilterCtx) {
+ assert(Result && "Result can not be null");
+ assert((Filter || !FilterCtx) &&
+ "if Filter is null then FilterCtx must also be null");
+
+ DynamicLibrarySearchGenerator::SymbolPredicate Pred;
+ if (Filter)
+ Pred = [=](const SymbolStringPtr &Name) -> bool {
+ return Filter(FilterCtx, wrap(OrcV2CAPIHelper::getRawPoolEntryPtr(Name)));
+ };
+
+ auto ProcessSymsGenerator =
+ DynamicLibrarySearchGenerator::GetForCurrentProcess(GlobalPrefix, Pred);
+
+ if (!ProcessSymsGenerator) {
+ *Result = 0;
+ return wrap(ProcessSymsGenerator.takeError());
+ }
+
+ *Result = wrap(ProcessSymsGenerator->release());
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcThreadSafeContextRef LLVMOrcCreateNewThreadSafeContext(void) {
+ return wrap(new ThreadSafeContext(std::make_unique<LLVMContext>()));
+}
+
+LLVMContextRef
+LLVMOrcThreadSafeContextGetContext(LLVMOrcThreadSafeContextRef TSCtx) {
+ return wrap(unwrap(TSCtx)->getContext());
+}
+
+void LLVMOrcDisposeThreadSafeContext(LLVMOrcThreadSafeContextRef TSCtx) {
+ delete unwrap(TSCtx);
+}
+
+LLVMOrcThreadSafeModuleRef
+LLVMOrcCreateNewThreadSafeModule(LLVMModuleRef M,
+ LLVMOrcThreadSafeContextRef TSCtx) {
+ return wrap(
+ new ThreadSafeModule(std::unique_ptr<Module>(unwrap(M)), *unwrap(TSCtx)));
+}
+
+void LLVMOrcDisposeThreadSafeModule(LLVMOrcThreadSafeModuleRef TSM) {
+ delete unwrap(TSM);
+}
+
+LLVMErrorRef LLVMOrcJITTargetMachineBuilderDetectHost(
+ LLVMOrcJITTargetMachineBuilderRef *Result) {
+ assert(Result && "Result can not be null");
+
+ auto JTMB = JITTargetMachineBuilder::detectHost();
+ if (!JTMB) {
+ Result = 0;
+ return wrap(JTMB.takeError());
+ }
+
+ *Result = wrap(new JITTargetMachineBuilder(std::move(*JTMB)));
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcJITTargetMachineBuilderRef
+LLVMOrcJITTargetMachineBuilderCreateFromTargetMachine(LLVMTargetMachineRef TM) {
+ auto *TemplateTM = unwrap(TM);
+
+ auto JTMB =
+ std::make_unique<JITTargetMachineBuilder>(TemplateTM->getTargetTriple());
+
+ (*JTMB)
+ .setCPU(TemplateTM->getTargetCPU().str())
+ .setRelocationModel(TemplateTM->getRelocationModel())
+ .setCodeModel(TemplateTM->getCodeModel())
+ .setCodeGenOptLevel(TemplateTM->getOptLevel())
+ .setFeatures(TemplateTM->getTargetFeatureString())
+ .setOptions(TemplateTM->Options);
+
+ LLVMDisposeTargetMachine(TM);
+
+ return wrap(JTMB.release());
+}
+
+void LLVMOrcDisposeJITTargetMachineBuilder(
+ LLVMOrcJITTargetMachineBuilderRef JTMB) {
+ delete unwrap(JTMB);
+}
+
+void LLVMOrcDisposeObjectLayer(LLVMOrcObjectLayerRef ObjLayer) {
+ delete unwrap(ObjLayer);
+}
+
+LLVMOrcLLJITBuilderRef LLVMOrcCreateLLJITBuilder(void) {
+ return wrap(new LLJITBuilder());
+}
+
+void LLVMOrcDisposeLLJITBuilder(LLVMOrcLLJITBuilderRef Builder) {
+ delete unwrap(Builder);
+}
+
+void LLVMOrcLLJITBuilderSetJITTargetMachineBuilder(
+ LLVMOrcLLJITBuilderRef Builder, LLVMOrcJITTargetMachineBuilderRef JTMB) {
+ unwrap(Builder)->setJITTargetMachineBuilder(*unwrap(JTMB));
+}
+
+void LLVMOrcLLJITBuilderSetObjectLinkingLayerCreator(
+ LLVMOrcLLJITBuilderRef Builder,
+ LLVMOrcLLJITBuilderObjectLinkingLayerCreatorFunction F, void *Ctx) {
+ unwrap(Builder)->setObjectLinkingLayerCreator(
+ [=](ExecutionSession &ES, const Triple &TT) {
+ auto TTStr = TT.str();
+ return std::unique_ptr<ObjectLayer>(
+ unwrap(F(Ctx, wrap(&ES), TTStr.c_str())));
+ });
+}
+
+LLVMErrorRef LLVMOrcCreateLLJIT(LLVMOrcLLJITRef *Result,
+ LLVMOrcLLJITBuilderRef Builder) {
+ assert(Result && "Result can not be null");
+
+ if (!Builder)
+ Builder = LLVMOrcCreateLLJITBuilder();
+
+ auto J = unwrap(Builder)->create();
+ LLVMOrcDisposeLLJITBuilder(Builder);
+
+ if (!J) {
+ Result = 0;
+ return wrap(J.takeError());
+ }
+
+ *Result = wrap(J->release());
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef LLVMOrcDisposeLLJIT(LLVMOrcLLJITRef J) {
+ delete unwrap(J);
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcExecutionSessionRef LLVMOrcLLJITGetExecutionSession(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getExecutionSession());
+}
+
+LLVMOrcJITDylibRef LLVMOrcLLJITGetMainJITDylib(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getMainJITDylib());
+}
+
+const char *LLVMOrcLLJITGetTripleString(LLVMOrcLLJITRef J) {
+ return unwrap(J)->getTargetTriple().str().c_str();
+}
+
+char LLVMOrcLLJITGetGlobalPrefix(LLVMOrcLLJITRef J) {
+ return unwrap(J)->getDataLayout().getGlobalPrefix();
+}
+
+LLVMOrcSymbolStringPoolEntryRef
+LLVMOrcLLJITMangleAndIntern(LLVMOrcLLJITRef J, const char *UnmangledName) {
+ return wrap(OrcV2CAPIHelper::releaseSymbolStringPtr(
+ unwrap(J)->mangleAndIntern(UnmangledName)));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddObjectFile(LLVMOrcLLJITRef J, LLVMOrcJITDylibRef JD,
+ LLVMMemoryBufferRef ObjBuffer) {
+ return wrap(unwrap(J)->addObjectFile(
+ *unwrap(JD), std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer))));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddObjectFileWithRT(LLVMOrcLLJITRef J,
+ LLVMOrcResourceTrackerRef RT,
+ LLVMMemoryBufferRef ObjBuffer) {
+ return wrap(unwrap(J)->addObjectFile(
+ ResourceTrackerSP(unwrap(RT)),
+ std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer))));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddLLVMIRModule(LLVMOrcLLJITRef J,
+ LLVMOrcJITDylibRef JD,
+ LLVMOrcThreadSafeModuleRef TSM) {
+ std::unique_ptr<ThreadSafeModule> TmpTSM(unwrap(TSM));
+ return wrap(unwrap(J)->addIRModule(*unwrap(JD), std::move(*TmpTSM)));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddLLVMIRModuleWithRT(LLVMOrcLLJITRef J,
+ LLVMOrcResourceTrackerRef RT,
+ LLVMOrcThreadSafeModuleRef TSM) {
+ std::unique_ptr<ThreadSafeModule> TmpTSM(unwrap(TSM));
+ return wrap(unwrap(J)->addIRModule(ResourceTrackerSP(unwrap(RT)),
+ std::move(*TmpTSM)));
+}
+
+LLVMErrorRef LLVMOrcLLJITLookup(LLVMOrcLLJITRef J,
+ LLVMOrcJITTargetAddress *Result,
+ const char *Name) {
+ assert(Result && "Result can not be null");
+
+ auto Sym = unwrap(J)->lookup(Name);
+ if (!Sym) {
+ *Result = 0;
+ return wrap(Sym.takeError());
+ }
+
+ *Result = Sym->getAddress();
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcObjectLayerRef
+LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(
+ LLVMOrcExecutionSessionRef ES) {
+ assert(ES && "ES must not be null");
+ return wrap(new RTDyldObjectLinkingLayer(
+ *unwrap(ES), [] { return std::make_unique<SectionMemoryManager>(); }));
+}
+
+void LLVMOrcRTDyldObjectLinkingLayerRegisterJITEventListener(
+ LLVMOrcObjectLayerRef RTDyldObjLinkingLayer,
+ LLVMJITEventListenerRef Listener) {
+ assert(RTDyldObjLinkingLayer && "RTDyldObjLinkingLayer must not be null");
+ assert(Listener && "Listener must not be null");
+ reinterpret_cast<RTDyldObjectLinkingLayer *>(unwrap(RTDyldObjLinkingLayer))
+ ->registerJITEventListener(*unwrap(Listener));
+}
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp
new file mode 100644
index 00000000000..0ad666ebbeb
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp
@@ -0,0 +1,351 @@
+//===-- RTDyldObjectLinkingLayer.cpp - RuntimeDyld backed ORC ObjectLayer -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/Object/COFF.h"
+
+namespace {
+
+using namespace llvm;
+using namespace llvm::orc;
+
+class JITDylibSearchOrderResolver : public JITSymbolResolver {
+public:
+ JITDylibSearchOrderResolver(MaterializationResponsibility &MR) : MR(MR) {}
+
+ void lookup(const LookupSet &Symbols, OnResolvedFunction OnResolved) override {
+ auto &ES = MR.getTargetJITDylib().getExecutionSession();
+ SymbolLookupSet InternedSymbols;
+
+ // Intern the requested symbols: lookup takes interned strings.
+ for (auto &S : Symbols)
+ InternedSymbols.add(ES.intern(S));
+
+ // Build an OnResolve callback to unwrap the interned strings and pass them
+ // to the OnResolved callback.
+ auto OnResolvedWithUnwrap =
+ [OnResolved = std::move(OnResolved)](
+ Expected<SymbolMap> InternedResult) mutable {
+ if (!InternedResult) {
+ OnResolved(InternedResult.takeError());
+ return;
+ }
+
+ LookupResult Result;
+ for (auto &KV : *InternedResult)
+ Result[*KV.first] = std::move(KV.second);
+ OnResolved(Result);
+ };
+
+ // Register dependencies for all symbols contained in this set.
+ auto RegisterDependencies = [&](const SymbolDependenceMap &Deps) {
+ MR.addDependenciesForAll(Deps);
+ };
+
+ JITDylibSearchOrder LinkOrder;
+ MR.getTargetJITDylib().withLinkOrderDo(
+ [&](const JITDylibSearchOrder &LO) { LinkOrder = LO; });
+ ES.lookup(LookupKind::Static, LinkOrder, InternedSymbols,
+ SymbolState::Resolved, std::move(OnResolvedWithUnwrap),
+ RegisterDependencies);
+ }
+
+ Expected<LookupSet> getResponsibilitySet(const LookupSet &Symbols) override {
+ LookupSet Result;
+
+ for (auto &KV : MR.getSymbols()) {
+ if (Symbols.count(*KV.first))
+ Result.insert(*KV.first);
+ }
+
+ return Result;
+ }
+
+private:
+ MaterializationResponsibility &MR;
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+RTDyldObjectLinkingLayer::RTDyldObjectLinkingLayer(
+ ExecutionSession &ES, GetMemoryManagerFunction GetMemoryManager)
+ : ObjectLayer(ES), GetMemoryManager(GetMemoryManager) {
+ ES.registerResourceManager(*this);
+}
+
+RTDyldObjectLinkingLayer::~RTDyldObjectLinkingLayer() {
+ assert(MemMgrs.empty() && "Layer destroyed with resources still attached");
+}
+
+void RTDyldObjectLinkingLayer::emit(
+ std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Object must not be null");
+
+ auto &ES = getExecutionSession();
+
+ auto Obj = object::ObjectFile::createObjectFile(*O);
+
+ if (!Obj) {
+ getExecutionSession().reportError(Obj.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ // Collect the internal symbols from the object file: We will need to
+ // filter these later.
+ auto InternalSymbols = std::make_shared<std::set<StringRef>>();
+ {
+ for (auto &Sym : (*Obj)->symbols()) {
+
+ // Skip file symbols.
+ if (auto SymType = Sym.getType()) {
+ if (*SymType == object::SymbolRef::ST_File)
+ continue;
+ } else {
+ ES.reportError(SymType.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
+ if (!SymFlagsOrErr) {
+ // TODO: Test this error.
+ ES.reportError(SymFlagsOrErr.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ // Don't include symbols that aren't global.
+ if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global)) {
+ if (auto SymName = Sym.getName())
+ InternalSymbols->insert(*SymName);
+ else {
+ ES.reportError(SymName.takeError());
+ R->failMaterialization();
+ return;
+ }
+ }
+ }
+ }
+
+ auto MemMgr = GetMemoryManager();
+ auto &MemMgrRef = *MemMgr;
+
+ // Switch to shared ownership of MR so that it can be captured by both
+ // lambdas below.
+ std::shared_ptr<MaterializationResponsibility> SharedR(std::move(R));
+
+ JITDylibSearchOrderResolver Resolver(*SharedR);
+
+ jitLinkForORC(
+ object::OwningBinary<object::ObjectFile>(std::move(*Obj), std::move(O)),
+ MemMgrRef, Resolver, ProcessAllSections,
+ [this, SharedR, &MemMgrRef, InternalSymbols](
+ const object::ObjectFile &Obj,
+ RuntimeDyld::LoadedObjectInfo &LoadedObjInfo,
+ std::map<StringRef, JITEvaluatedSymbol> ResolvedSymbols) {
+ return onObjLoad(*SharedR, Obj, MemMgrRef, LoadedObjInfo,
+ ResolvedSymbols, *InternalSymbols);
+ },
+ [this, SharedR, MemMgr = std::move(MemMgr)](
+ object::OwningBinary<object::ObjectFile> Obj,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> LoadedObjInfo,
+ Error Err) mutable {
+ onObjEmit(*SharedR, std::move(Obj), std::move(MemMgr),
+ std::move(LoadedObjInfo), std::move(Err));
+ });
+}
+
+void RTDyldObjectLinkingLayer::registerJITEventListener(JITEventListener &L) {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ assert(!llvm::is_contained(EventListeners, &L) &&
+ "Listener has already been registered");
+ EventListeners.push_back(&L);
+}
+
+void RTDyldObjectLinkingLayer::unregisterJITEventListener(JITEventListener &L) {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ auto I = llvm::find(EventListeners, &L);
+ assert(I != EventListeners.end() && "Listener not registered");
+ EventListeners.erase(I);
+}
+
+Error RTDyldObjectLinkingLayer::onObjLoad(
+ MaterializationResponsibility &R, const object::ObjectFile &Obj,
+ RuntimeDyld::MemoryManager &MemMgr,
+ RuntimeDyld::LoadedObjectInfo &LoadedObjInfo,
+ std::map<StringRef, JITEvaluatedSymbol> Resolved,
+ std::set<StringRef> &InternalSymbols) {
+ SymbolFlagsMap ExtraSymbolsToClaim;
+ SymbolMap Symbols;
+
+ // Hack to support COFF constant pool comdats introduced during compilation:
+ // (See http://llvm.org/PR40074)
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(&Obj)) {
+ auto &ES = getExecutionSession();
+
+ // For all resolved symbols that are not already in the responsibilty set:
+ // check whether the symbol is in a comdat section and if so mark it as
+ // weak.
+ for (auto &Sym : COFFObj->symbols()) {
+ // getFlags() on COFF symbols can't fail.
+ uint32_t SymFlags = cantFail(Sym.getFlags());
+ if (SymFlags & object::BasicSymbolRef::SF_Undefined)
+ continue;
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+ auto I = Resolved.find(*Name);
+
+ // Skip unresolved symbols, internal symbols, and symbols that are
+ // already in the responsibility set.
+ if (I == Resolved.end() || InternalSymbols.count(*Name) ||
+ R.getSymbols().count(ES.intern(*Name)))
+ continue;
+ auto Sec = Sym.getSection();
+ if (!Sec)
+ return Sec.takeError();
+ if (*Sec == COFFObj->section_end())
+ continue;
+ auto &COFFSec = *COFFObj->getCOFFSection(**Sec);
+ if (COFFSec.Characteristics & COFF::IMAGE_SCN_LNK_COMDAT)
+ I->second.setFlags(I->second.getFlags() | JITSymbolFlags::Weak);
+ }
+ }
+
+ for (auto &KV : Resolved) {
+ // Scan the symbols and add them to the Symbols map for resolution.
+
+ // We never claim internal symbols.
+ if (InternalSymbols.count(KV.first))
+ continue;
+
+ auto InternedName = getExecutionSession().intern(KV.first);
+ auto Flags = KV.second.getFlags();
+
+ // Override object flags and claim responsibility for symbols if
+ // requested.
+ if (OverrideObjectFlags || AutoClaimObjectSymbols) {
+ auto I = R.getSymbols().find(InternedName);
+
+ if (OverrideObjectFlags && I != R.getSymbols().end())
+ Flags = I->second;
+ else if (AutoClaimObjectSymbols && I == R.getSymbols().end())
+ ExtraSymbolsToClaim[InternedName] = Flags;
+ }
+
+ Symbols[InternedName] = JITEvaluatedSymbol(KV.second.getAddress(), Flags);
+ }
+
+ if (!ExtraSymbolsToClaim.empty()) {
+ if (auto Err = R.defineMaterializing(ExtraSymbolsToClaim))
+ return Err;
+
+ // If we claimed responsibility for any weak symbols but were rejected then
+ // we need to remove them from the resolved set.
+ for (auto &KV : ExtraSymbolsToClaim)
+ if (KV.second.isWeak() && !R.getSymbols().count(KV.first))
+ Symbols.erase(KV.first);
+ }
+
+ if (auto Err = R.notifyResolved(Symbols)) {
+ R.failMaterialization();
+ return Err;
+ }
+
+ if (NotifyLoaded)
+ NotifyLoaded(R, Obj, LoadedObjInfo);
+
+ return Error::success();
+}
+
+void RTDyldObjectLinkingLayer::onObjEmit(
+ MaterializationResponsibility &R,
+ object::OwningBinary<object::ObjectFile> O,
+ std::unique_ptr<RuntimeDyld::MemoryManager> MemMgr,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> LoadedObjInfo, Error Err) {
+ if (Err) {
+ getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ return;
+ }
+
+ if (auto Err = R.notifyEmitted()) {
+ getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ return;
+ }
+
+ std::unique_ptr<object::ObjectFile> Obj;
+ std::unique_ptr<MemoryBuffer> ObjBuffer;
+ std::tie(Obj, ObjBuffer) = O.takeBinary();
+
+ // Run EventListener notifyLoaded callbacks.
+ {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ for (auto *L : EventListeners)
+ L->notifyObjectLoaded(pointerToJITTargetAddress(MemMgr.get()), *Obj,
+ *LoadedObjInfo);
+ }
+
+ if (NotifyEmitted)
+ NotifyEmitted(R, std::move(ObjBuffer));
+
+ if (auto Err = R.withResourceKeyDo(
+ [&](ResourceKey K) { MemMgrs[K].push_back(std::move(MemMgr)); })) {
+ getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ }
+}
+
+Error RTDyldObjectLinkingLayer::handleRemoveResources(ResourceKey K) {
+
+ std::vector<MemoryManagerUP> MemMgrsToRemove;
+
+ getExecutionSession().runSessionLocked([&] {
+ auto I = MemMgrs.find(K);
+ if (I != MemMgrs.end()) {
+ std::swap(MemMgrsToRemove, I->second);
+ MemMgrs.erase(I);
+ }
+ });
+
+ {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ for (auto &MemMgr : MemMgrsToRemove) {
+ for (auto *L : EventListeners)
+ L->notifyFreeingObject(pointerToJITTargetAddress(MemMgr.get()));
+ MemMgr->deregisterEHFrames();
+ }
+ }
+
+ return Error::success();
+}
+
+void RTDyldObjectLinkingLayer::handleTransferResources(ResourceKey DstKey,
+ ResourceKey SrcKey) {
+ auto I = MemMgrs.find(SrcKey);
+ if (I != MemMgrs.end()) {
+ auto &SrcMemMgrs = I->second;
+ auto &DstMemMgrs = MemMgrs[DstKey];
+ DstMemMgrs.reserve(DstMemMgrs.size() + SrcMemMgrs.size());
+ for (auto &MemMgr : SrcMemMgrs)
+ DstMemMgrs.push_back(std::move(MemMgr));
+
+ // Erase SrcKey entry using value rather than iterator I: I may have been
+ // invalidated when we looked up DstKey.
+ MemMgrs.erase(SrcKey);
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/OrcError.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/OrcError.cpp
new file mode 100644
index 00000000000..fdad90cbcfb
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/OrcError.cpp
@@ -0,0 +1,120 @@
+//===---------------- OrcError.cpp - Error codes for ORC ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Error codes for ORC.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcError.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+
+#include <type_traits>
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class OrcErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "orc"; }
+
+ std::string message(int condition) const override {
+ switch (static_cast<OrcErrorCode>(condition)) {
+ case OrcErrorCode::UnknownORCError:
+ return "Unknown ORC error";
+ case OrcErrorCode::DuplicateDefinition:
+ return "Duplicate symbol definition";
+ case OrcErrorCode::JITSymbolNotFound:
+ return "JIT symbol not found";
+ case OrcErrorCode::RemoteAllocatorDoesNotExist:
+ return "Remote allocator does not exist";
+ case OrcErrorCode::RemoteAllocatorIdAlreadyInUse:
+ return "Remote allocator Id already in use";
+ case OrcErrorCode::RemoteMProtectAddrUnrecognized:
+ return "Remote mprotect call references unallocated memory";
+ case OrcErrorCode::RemoteIndirectStubsOwnerDoesNotExist:
+ return "Remote indirect stubs owner does not exist";
+ case OrcErrorCode::RemoteIndirectStubsOwnerIdAlreadyInUse:
+ return "Remote indirect stubs owner Id already in use";
+ case OrcErrorCode::RPCConnectionClosed:
+ return "RPC connection closed";
+ case OrcErrorCode::RPCCouldNotNegotiateFunction:
+ return "Could not negotiate RPC function";
+ case OrcErrorCode::RPCResponseAbandoned:
+ return "RPC response abandoned";
+ case OrcErrorCode::UnexpectedRPCCall:
+ return "Unexpected RPC call";
+ case OrcErrorCode::UnexpectedRPCResponse:
+ return "Unexpected RPC response";
+ case OrcErrorCode::UnknownErrorCodeFromRemote:
+ return "Unknown error returned from remote RPC function "
+ "(Use StringError to get error message)";
+ case OrcErrorCode::UnknownResourceHandle:
+ return "Unknown resource handle";
+ case OrcErrorCode::MissingSymbolDefinitions:
+ return "MissingSymbolsDefinitions";
+ case OrcErrorCode::UnexpectedSymbolDefinitions:
+ return "UnexpectedSymbolDefinitions";
+ }
+ llvm_unreachable("Unhandled error code");
+ }
+};
+
+static ManagedStatic<OrcErrorCategory> OrcErrCat;
+} // namespace
+
+namespace llvm {
+namespace orc {
+
+char DuplicateDefinition::ID = 0;
+char JITSymbolNotFound::ID = 0;
+
+std::error_code orcError(OrcErrorCode ErrCode) {
+ typedef std::underlying_type<OrcErrorCode>::type UT;
+ return std::error_code(static_cast<UT>(ErrCode), *OrcErrCat);
+}
+
+DuplicateDefinition::DuplicateDefinition(std::string SymbolName)
+ : SymbolName(std::move(SymbolName)) {}
+
+std::error_code DuplicateDefinition::convertToErrorCode() const {
+ return orcError(OrcErrorCode::DuplicateDefinition);
+}
+
+void DuplicateDefinition::log(raw_ostream &OS) const {
+ OS << "Duplicate definition of symbol '" << SymbolName << "'";
+}
+
+const std::string &DuplicateDefinition::getSymbolName() const {
+ return SymbolName;
+}
+
+JITSymbolNotFound::JITSymbolNotFound(std::string SymbolName)
+ : SymbolName(std::move(SymbolName)) {}
+
+std::error_code JITSymbolNotFound::convertToErrorCode() const {
+ typedef std::underlying_type<OrcErrorCode>::type UT;
+ return std::error_code(static_cast<UT>(OrcErrorCode::JITSymbolNotFound),
+ *OrcErrCat);
+}
+
+void JITSymbolNotFound::log(raw_ostream &OS) const {
+ OS << "Could not find symbol '" << SymbolName << "'";
+}
+
+const std::string &JITSymbolNotFound::getSymbolName() const {
+ return SymbolName;
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/RPCError.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/RPCError.cpp
new file mode 100644
index 00000000000..a55cb220f21
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/RPCError.cpp
@@ -0,0 +1,58 @@
+//===--------------- RPCError.cpp - RPCERror implementation ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// RPC Error type implmentations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/RPCUtils.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <string>
+#include <system_error>
+
+char llvm::orc::shared::RPCFatalError::ID = 0;
+char llvm::orc::shared::ConnectionClosed::ID = 0;
+char llvm::orc::shared::ResponseAbandoned::ID = 0;
+char llvm::orc::shared::CouldNotNegotiate::ID = 0;
+
+namespace llvm {
+namespace orc {
+namespace shared {
+
+std::error_code ConnectionClosed::convertToErrorCode() const {
+ return orcError(OrcErrorCode::RPCConnectionClosed);
+}
+
+void ConnectionClosed::log(raw_ostream &OS) const {
+ OS << "RPC connection already closed";
+}
+
+std::error_code ResponseAbandoned::convertToErrorCode() const {
+ return orcError(OrcErrorCode::RPCResponseAbandoned);
+}
+
+void ResponseAbandoned::log(raw_ostream &OS) const {
+ OS << "RPC response abandoned";
+}
+
+CouldNotNegotiate::CouldNotNegotiate(std::string Signature)
+ : Signature(std::move(Signature)) {}
+
+std::error_code CouldNotNegotiate::convertToErrorCode() const {
+ return orcError(OrcErrorCode::RPCCouldNotNegotiateFunction);
+}
+
+void CouldNotNegotiate::log(raw_ostream &OS) const {
+ OS << "Could not negotiate RPC function " << Signature;
+}
+
+} // end namespace shared
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.cpp
new file mode 100644
index 00000000000..52d11f0741d
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.cpp
@@ -0,0 +1,44 @@
+//===---------- TargetProcessControlTypes.cpp - Shared TPC types ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// TargetProcessControl types.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h"
+
+namespace llvm {
+namespace orc {
+namespace tpctypes {
+
+WrapperFunctionResult WrapperFunctionResult::from(StringRef S) {
+ CWrapperFunctionResult R;
+ zeroInit(R);
+ R.Size = S.size();
+ if (R.Size > sizeof(uint64_t)) {
+ R.Data.ValuePtr = new uint8_t[R.Size];
+ memcpy(R.Data.ValuePtr, S.data(), R.Size);
+ R.Destroy = destroyWithDeleteArray;
+ } else
+ memcpy(R.Data.Value, S.data(), R.Size);
+ return R;
+}
+
+void WrapperFunctionResult::destroyWithFree(CWrapperFunctionResultData Data,
+ uint64_t Size) {
+ free(Data.ValuePtr);
+}
+
+void WrapperFunctionResult::destroyWithDeleteArray(
+ CWrapperFunctionResultData Data, uint64_t Size) {
+ delete[] Data.ValuePtr;
+}
+
+} // end namespace tpctypes
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp
new file mode 100644
index 00000000000..c2fa4466eab
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp
@@ -0,0 +1,306 @@
+//===-- SpeculateAnalyses.cpp --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/SpeculateAnalyses.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/CFG.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <algorithm>
+
+namespace {
+using namespace llvm;
+SmallVector<const BasicBlock *, 8> findBBwithCalls(const Function &F,
+ bool IndirectCall = false) {
+ SmallVector<const BasicBlock *, 8> BBs;
+
+ auto findCallInst = [&IndirectCall](const Instruction &I) {
+ if (auto Call = dyn_cast<CallBase>(&I))
+ return Call->isIndirectCall() ? IndirectCall : true;
+ else
+ return false;
+ };
+ for (auto &BB : F)
+ if (findCallInst(*BB.getTerminator()) ||
+ llvm::any_of(BB.instructionsWithoutDebug(), findCallInst))
+ BBs.emplace_back(&BB);
+
+ return BBs;
+}
+} // namespace
+
+// Implementations of Queries shouldn't need to lock the resources
+// such as LLVMContext, each argument (function) has a non-shared LLVMContext
+// Plus, if Queries contain states necessary locking scheme should be provided.
+namespace llvm {
+namespace orc {
+
+// Collect direct calls only
+void SpeculateQuery::findCalles(const BasicBlock *BB,
+ DenseSet<StringRef> &CallesNames) {
+ assert(BB != nullptr && "Traversing Null BB to find calls?");
+
+ auto getCalledFunction = [&CallesNames](const CallBase *Call) {
+ auto CalledValue = Call->getCalledOperand()->stripPointerCasts();
+ if (auto DirectCall = dyn_cast<Function>(CalledValue))
+ CallesNames.insert(DirectCall->getName());
+ };
+ for (auto &I : BB->instructionsWithoutDebug())
+ if (auto CI = dyn_cast<CallInst>(&I))
+ getCalledFunction(CI);
+
+ if (auto II = dyn_cast<InvokeInst>(BB->getTerminator()))
+ getCalledFunction(II);
+}
+
+bool SpeculateQuery::isStraightLine(const Function &F) {
+ return llvm::all_of(F.getBasicBlockList(), [](const BasicBlock &BB) {
+ return BB.getSingleSuccessor() != nullptr;
+ });
+}
+
+// BlockFreqQuery Implementations
+
+size_t BlockFreqQuery::numBBToGet(size_t numBB) {
+ // small CFG
+ if (numBB < 4)
+ return numBB;
+ // mid-size CFG
+ else if (numBB < 20)
+ return (numBB / 2);
+ else
+ return (numBB / 2) + (numBB / 4);
+}
+
+BlockFreqQuery::ResultTy BlockFreqQuery::operator()(Function &F) {
+ DenseMap<StringRef, DenseSet<StringRef>> CallerAndCalles;
+ DenseSet<StringRef> Calles;
+ SmallVector<std::pair<const BasicBlock *, uint64_t>, 8> BBFreqs;
+
+ PassBuilder PB;
+ FunctionAnalysisManager FAM;
+ PB.registerFunctionAnalyses(FAM);
+
+ auto IBBs = findBBwithCalls(F);
+
+ if (IBBs.empty())
+ return None;
+
+ auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(F);
+
+ for (const auto I : IBBs)
+ BBFreqs.push_back({I, BFI.getBlockFreq(I).getFrequency()});
+
+ assert(IBBs.size() == BBFreqs.size() && "BB Count Mismatch");
+
+ llvm::sort(BBFreqs, [](decltype(BBFreqs)::const_reference BBF,
+ decltype(BBFreqs)::const_reference BBS) {
+ return BBF.second > BBS.second ? true : false;
+ });
+
+ // ignoring number of direct calls in a BB
+ auto Topk = numBBToGet(BBFreqs.size());
+
+ for (size_t i = 0; i < Topk; i++)
+ findCalles(BBFreqs[i].first, Calles);
+
+ assert(!Calles.empty() && "Running Analysis on Function with no calls?");
+
+ CallerAndCalles.insert({F.getName(), std::move(Calles)});
+
+ return CallerAndCalles;
+}
+
+// SequenceBBQuery Implementation
+std::size_t SequenceBBQuery::getHottestBlocks(std::size_t TotalBlocks) {
+ if (TotalBlocks == 1)
+ return TotalBlocks;
+ return TotalBlocks / 2;
+}
+
+// FIXME : find good implementation.
+SequenceBBQuery::BlockListTy
+SequenceBBQuery::rearrangeBB(const Function &F, const BlockListTy &BBList) {
+ BlockListTy RearrangedBBSet;
+
+ for (auto &Block : F.getBasicBlockList())
+ if (llvm::is_contained(BBList, &Block))
+ RearrangedBBSet.push_back(&Block);
+
+ assert(RearrangedBBSet.size() == BBList.size() &&
+ "BasicBlock missing while rearranging?");
+ return RearrangedBBSet;
+}
+
+void SequenceBBQuery::traverseToEntryBlock(const BasicBlock *AtBB,
+ const BlockListTy &CallerBlocks,
+ const BackEdgesInfoTy &BackEdgesInfo,
+ const BranchProbabilityInfo *BPI,
+ VisitedBlocksInfoTy &VisitedBlocks) {
+ auto Itr = VisitedBlocks.find(AtBB);
+ if (Itr != VisitedBlocks.end()) { // already visited.
+ if (!Itr->second.Upward)
+ return;
+ Itr->second.Upward = false;
+ } else {
+ // Create hint for newly discoverd blocks.
+ WalkDirection BlockHint;
+ BlockHint.Upward = false;
+ // FIXME: Expensive Check
+ if (llvm::is_contained(CallerBlocks, AtBB))
+ BlockHint.CallerBlock = true;
+ VisitedBlocks.insert(std::make_pair(AtBB, BlockHint));
+ }
+
+ const_pred_iterator PIt = pred_begin(AtBB), EIt = pred_end(AtBB);
+ // Move this check to top, when we have code setup to launch speculative
+ // compiles for function in entry BB, this triggers the speculative compiles
+ // before running the program.
+ if (PIt == EIt) // No Preds.
+ return;
+
+ DenseSet<const BasicBlock *> PredSkipNodes;
+
+ // Since we are checking for predecessor's backedges, this Block
+ // occurs in second position.
+ for (auto &I : BackEdgesInfo)
+ if (I.second == AtBB)
+ PredSkipNodes.insert(I.first);
+
+ // Skip predecessors which source of back-edges.
+ for (; PIt != EIt; ++PIt)
+ // checking EdgeHotness is cheaper
+ if (BPI->isEdgeHot(*PIt, AtBB) && !PredSkipNodes.count(*PIt))
+ traverseToEntryBlock(*PIt, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+}
+
+void SequenceBBQuery::traverseToExitBlock(const BasicBlock *AtBB,
+ const BlockListTy &CallerBlocks,
+ const BackEdgesInfoTy &BackEdgesInfo,
+ const BranchProbabilityInfo *BPI,
+ VisitedBlocksInfoTy &VisitedBlocks) {
+ auto Itr = VisitedBlocks.find(AtBB);
+ if (Itr != VisitedBlocks.end()) { // already visited.
+ if (!Itr->second.Downward)
+ return;
+ Itr->second.Downward = false;
+ } else {
+ // Create hint for newly discoverd blocks.
+ WalkDirection BlockHint;
+ BlockHint.Downward = false;
+ // FIXME: Expensive Check
+ if (llvm::is_contained(CallerBlocks, AtBB))
+ BlockHint.CallerBlock = true;
+ VisitedBlocks.insert(std::make_pair(AtBB, BlockHint));
+ }
+
+ const_succ_iterator PIt = succ_begin(AtBB), EIt = succ_end(AtBB);
+ if (PIt == EIt) // No succs.
+ return;
+
+ // If there are hot edges, then compute SuccSkipNodes.
+ DenseSet<const BasicBlock *> SuccSkipNodes;
+
+ // Since we are checking for successor's backedges, this Block
+ // occurs in first position.
+ for (auto &I : BackEdgesInfo)
+ if (I.first == AtBB)
+ SuccSkipNodes.insert(I.second);
+
+ for (; PIt != EIt; ++PIt)
+ if (BPI->isEdgeHot(AtBB, *PIt) && !SuccSkipNodes.count(*PIt))
+ traverseToExitBlock(*PIt, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+}
+
+// Get Block frequencies for blocks and take most frquently executed block,
+// walk towards the entry block from those blocks and discover the basic blocks
+// with call.
+SequenceBBQuery::BlockListTy
+SequenceBBQuery::queryCFG(Function &F, const BlockListTy &CallerBlocks) {
+
+ BlockFreqInfoTy BBFreqs;
+ VisitedBlocksInfoTy VisitedBlocks;
+ BackEdgesInfoTy BackEdgesInfo;
+
+ PassBuilder PB;
+ FunctionAnalysisManager FAM;
+ PB.registerFunctionAnalyses(FAM);
+
+ auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(F);
+
+ llvm::FindFunctionBackedges(F, BackEdgesInfo);
+
+ for (const auto I : CallerBlocks)
+ BBFreqs.push_back({I, BFI.getBlockFreq(I).getFrequency()});
+
+ llvm::sort(BBFreqs, [](decltype(BBFreqs)::const_reference Bbf,
+ decltype(BBFreqs)::const_reference Bbs) {
+ return Bbf.second > Bbs.second;
+ });
+
+ ArrayRef<std::pair<const BasicBlock *, uint64_t>> HotBlocksRef(BBFreqs);
+ HotBlocksRef =
+ HotBlocksRef.drop_back(BBFreqs.size() - getHottestBlocks(BBFreqs.size()));
+
+ BranchProbabilityInfo *BPI =
+ FAM.getCachedResult<BranchProbabilityAnalysis>(F);
+
+ // visit NHotBlocks,
+ // traverse upwards to entry
+ // traverse downwards to end.
+
+ for (auto I : HotBlocksRef) {
+ traverseToEntryBlock(I.first, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+ traverseToExitBlock(I.first, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+ }
+
+ BlockListTy MinCallerBlocks;
+ for (auto &I : VisitedBlocks)
+ if (I.second.CallerBlock)
+ MinCallerBlocks.push_back(std::move(I.first));
+
+ return rearrangeBB(F, MinCallerBlocks);
+}
+
+SpeculateQuery::ResultTy SequenceBBQuery::operator()(Function &F) {
+ // reduce the number of lists!
+ DenseMap<StringRef, DenseSet<StringRef>> CallerAndCalles;
+ DenseSet<StringRef> Calles;
+ BlockListTy SequencedBlocks;
+ BlockListTy CallerBlocks;
+
+ CallerBlocks = findBBwithCalls(F);
+ if (CallerBlocks.empty())
+ return None;
+
+ if (isStraightLine(F))
+ SequencedBlocks = rearrangeBB(F, CallerBlocks);
+ else
+ SequencedBlocks = queryCFG(F, CallerBlocks);
+
+ for (auto BB : SequencedBlocks)
+ findCalles(BB, Calles);
+
+ CallerAndCalles.insert({F.getName(), std::move(Calles)});
+ return CallerAndCalles;
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Speculation.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Speculation.cpp
new file mode 100644
index 00000000000..0b4755fe23c
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/Speculation.cpp
@@ -0,0 +1,143 @@
+//===---------- speculation.cpp - Utilities for Speculation ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Speculation.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Verifier.h"
+
+namespace llvm {
+
+namespace orc {
+
+// ImplSymbolMap methods
+void ImplSymbolMap::trackImpls(SymbolAliasMap ImplMaps, JITDylib *SrcJD) {
+ assert(SrcJD && "Tracking on Null Source .impl dylib");
+ std::lock_guard<std::mutex> Lockit(ConcurrentAccess);
+ for (auto &I : ImplMaps) {
+ auto It = Maps.insert({I.first, {I.second.Aliasee, SrcJD}});
+ // check rationale when independent dylibs have same symbol name?
+ assert(It.second && "ImplSymbols are already tracked for this Symbol?");
+ (void)(It);
+ }
+}
+
+// Trigger Speculative Compiles.
+void Speculator::speculateForEntryPoint(Speculator *Ptr, uint64_t StubId) {
+ assert(Ptr && " Null Address Received in orc_speculate_for ");
+ Ptr->speculateFor(StubId);
+}
+
+Error Speculator::addSpeculationRuntime(JITDylib &JD,
+ MangleAndInterner &Mangle) {
+ JITEvaluatedSymbol ThisPtr(pointerToJITTargetAddress(this),
+ JITSymbolFlags::Exported);
+ JITEvaluatedSymbol SpeculateForEntryPtr(
+ pointerToJITTargetAddress(&speculateForEntryPoint),
+ JITSymbolFlags::Exported);
+ return JD.define(absoluteSymbols({
+ {Mangle("__orc_speculator"), ThisPtr}, // Data Symbol
+ {Mangle("__orc_speculate_for"), SpeculateForEntryPtr} // Callable Symbol
+ }));
+}
+
+// If two modules, share the same LLVMContext, different threads must
+// not access them concurrently without locking the associated LLVMContext
+// this implementation follows this contract.
+void IRSpeculationLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ ThreadSafeModule TSM) {
+
+ assert(TSM && "Speculation Layer received Null Module ?");
+ assert(TSM.getContext().getContext() != nullptr &&
+ "Module with null LLVMContext?");
+
+ // Instrumentation of runtime calls, lock the Module
+ TSM.withModuleDo([this, &R](Module &M) {
+ auto &MContext = M.getContext();
+ auto SpeculatorVTy = StructType::create(MContext, "Class.Speculator");
+ auto RuntimeCallTy = FunctionType::get(
+ Type::getVoidTy(MContext),
+ {SpeculatorVTy->getPointerTo(), Type::getInt64Ty(MContext)}, false);
+ auto RuntimeCall =
+ Function::Create(RuntimeCallTy, Function::LinkageTypes::ExternalLinkage,
+ "__orc_speculate_for", &M);
+ auto SpeclAddr = new GlobalVariable(
+ M, SpeculatorVTy, false, GlobalValue::LinkageTypes::ExternalLinkage,
+ nullptr, "__orc_speculator");
+
+ IRBuilder<> Mutator(MContext);
+
+ // QueryAnalysis allowed to transform the IR source, one such example is
+ // Simplify CFG helps the static branch prediction heuristics!
+ for (auto &Fn : M.getFunctionList()) {
+ if (!Fn.isDeclaration()) {
+
+ auto IRNames = QueryAnalysis(Fn);
+ // Instrument and register if Query has result
+ if (IRNames.hasValue()) {
+
+ // Emit globals for each function.
+ auto LoadValueTy = Type::getInt8Ty(MContext);
+ auto SpeculatorGuard = new GlobalVariable(
+ M, LoadValueTy, false, GlobalValue::LinkageTypes::InternalLinkage,
+ ConstantInt::get(LoadValueTy, 0),
+ "__orc_speculate.guard.for." + Fn.getName());
+ SpeculatorGuard->setAlignment(Align(1));
+ SpeculatorGuard->setUnnamedAddr(GlobalValue::UnnamedAddr::Local);
+
+ BasicBlock &ProgramEntry = Fn.getEntryBlock();
+ // Create BasicBlocks before the program's entry basicblock
+ BasicBlock *SpeculateBlock = BasicBlock::Create(
+ MContext, "__orc_speculate.block", &Fn, &ProgramEntry);
+ BasicBlock *SpeculateDecisionBlock = BasicBlock::Create(
+ MContext, "__orc_speculate.decision.block", &Fn, SpeculateBlock);
+
+ assert(SpeculateDecisionBlock == &Fn.getEntryBlock() &&
+ "SpeculateDecisionBlock not updated?");
+ Mutator.SetInsertPoint(SpeculateDecisionBlock);
+
+ auto LoadGuard =
+ Mutator.CreateLoad(LoadValueTy, SpeculatorGuard, "guard.value");
+ // if just loaded value equal to 0,return true.
+ auto CanSpeculate =
+ Mutator.CreateICmpEQ(LoadGuard, ConstantInt::get(LoadValueTy, 0),
+ "compare.to.speculate");
+ Mutator.CreateCondBr(CanSpeculate, SpeculateBlock, &ProgramEntry);
+
+ Mutator.SetInsertPoint(SpeculateBlock);
+ auto ImplAddrToUint =
+ Mutator.CreatePtrToInt(&Fn, Type::getInt64Ty(MContext));
+ Mutator.CreateCall(RuntimeCallTy, RuntimeCall,
+ {SpeclAddr, ImplAddrToUint});
+ Mutator.CreateStore(ConstantInt::get(LoadValueTy, 1),
+ SpeculatorGuard);
+ Mutator.CreateBr(&ProgramEntry);
+
+ assert(Mutator.GetInsertBlock()->getParent() == &Fn &&
+ "IR builder association mismatch?");
+ S.registerSymbols(internToJITSymbols(IRNames.getValue()),
+ &R->getTargetJITDylib());
+ }
+ }
+ }
+ });
+
+ assert(!TSM.withModuleDo([](const Module &M) { return verifyModule(M); }) &&
+ "Speculation Instrumentation breaks IR?");
+
+ NextLayer.emit(std::move(R), std::move(TSM));
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCDynamicLibrarySearchGenerator.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCDynamicLibrarySearchGenerator.cpp
new file mode 100644
index 00000000000..bbf3ada1d4b
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCDynamicLibrarySearchGenerator.cpp
@@ -0,0 +1,70 @@
+//===---------------- TPCDynamicLibrarySearchGenerator.cpp ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TPCDynamicLibrarySearchGenerator.h"
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<TPCDynamicLibrarySearchGenerator>>
+TPCDynamicLibrarySearchGenerator::Load(TargetProcessControl &TPC,
+ const char *LibraryPath,
+ SymbolPredicate Allow) {
+ auto Handle = TPC.loadDylib(LibraryPath);
+ if (!Handle)
+ return Handle.takeError();
+
+ return std::make_unique<TPCDynamicLibrarySearchGenerator>(TPC, *Handle,
+ std::move(Allow));
+}
+
+Error TPCDynamicLibrarySearchGenerator::tryToGenerate(
+ LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags, const SymbolLookupSet &Symbols) {
+
+ if (Symbols.empty())
+ return Error::success();
+
+ SymbolLookupSet LookupSymbols;
+
+ for (auto &KV : Symbols) {
+ // Skip symbols that don't match the filter.
+ if (Allow && !Allow(KV.first))
+ continue;
+ LookupSymbols.add(KV.first, SymbolLookupFlags::WeaklyReferencedSymbol);
+ }
+
+ SymbolMap NewSymbols;
+
+ TargetProcessControl::LookupRequest Request(H, LookupSymbols);
+ auto Result = TPC.lookupSymbols(Request);
+ if (!Result)
+ return Result.takeError();
+
+ assert(Result->size() == 1 && "Results for more than one library returned");
+ assert(Result->front().size() == LookupSymbols.size() &&
+ "Result has incorrect number of elements");
+
+ auto ResultI = Result->front().begin();
+ for (auto &KV : LookupSymbols) {
+ if (*ResultI)
+ NewSymbols[KV.first] =
+ JITEvaluatedSymbol(*ResultI, JITSymbolFlags::Exported);
+ ++ResultI;
+ }
+
+ // If there were no resolved symbols bail out.
+ if (NewSymbols.empty())
+ return Error::success();
+
+ // Define resolved symbols.
+ return JD.define(absoluteSymbols(std::move(NewSymbols)));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCEHFrameRegistrar.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCEHFrameRegistrar.cpp
new file mode 100644
index 00000000000..4f901ce6d44
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCEHFrameRegistrar.cpp
@@ -0,0 +1,80 @@
+//===------ TPCEHFrameRegistrar.cpp - TPC-based eh-frame registration -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TPCEHFrameRegistrar.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<TPCEHFrameRegistrar>>
+TPCEHFrameRegistrar::Create(TargetProcessControl &TPC) {
+ // FIXME: Proper mangling here -- we really need to decouple linker mangling
+ // from DataLayout.
+
+ // Find the addresses of the registration/deregistration functions in the
+ // target process.
+ auto ProcessHandle = TPC.loadDylib(nullptr);
+ if (!ProcessHandle)
+ return ProcessHandle.takeError();
+
+ std::string RegisterWrapperName, DeregisterWrapperName;
+ if (TPC.getTargetTriple().isOSBinFormatMachO()) {
+ RegisterWrapperName += '_';
+ DeregisterWrapperName += '_';
+ }
+ RegisterWrapperName += "llvm_orc_registerEHFrameSectionWrapper";
+ DeregisterWrapperName += "llvm_orc_deregisterEHFrameSectionWrapper";
+
+ SymbolLookupSet RegistrationSymbols;
+ RegistrationSymbols.add(TPC.intern(RegisterWrapperName));
+ RegistrationSymbols.add(TPC.intern(DeregisterWrapperName));
+
+ auto Result = TPC.lookupSymbols({{*ProcessHandle, RegistrationSymbols}});
+ if (!Result)
+ return Result.takeError();
+
+ assert(Result->size() == 1 && "Unexpected number of dylibs in result");
+ assert((*Result)[0].size() == 2 &&
+ "Unexpected number of addresses in result");
+
+ auto RegisterEHFrameWrapperFnAddr = (*Result)[0][0];
+ auto DeregisterEHFrameWrapperFnAddr = (*Result)[0][1];
+
+ return std::make_unique<TPCEHFrameRegistrar>(
+ TPC, RegisterEHFrameWrapperFnAddr, DeregisterEHFrameWrapperFnAddr);
+}
+
+Error TPCEHFrameRegistrar::registerEHFrames(JITTargetAddress EHFrameSectionAddr,
+ size_t EHFrameSectionSize) {
+ constexpr size_t ArgBufferSize = sizeof(uint64_t) + sizeof(uint64_t);
+ uint8_t ArgBuffer[ArgBufferSize];
+ BinaryStreamWriter ArgWriter(
+ MutableArrayRef<uint8_t>(ArgBuffer, ArgBufferSize),
+ support::endianness::big);
+ cantFail(ArgWriter.writeInteger(static_cast<uint64_t>(EHFrameSectionAddr)));
+ cantFail(ArgWriter.writeInteger(static_cast<uint64_t>(EHFrameSectionSize)));
+
+ return TPC.runWrapper(RegisterEHFrameWrapperFnAddr, ArgBuffer).takeError();
+}
+
+Error TPCEHFrameRegistrar::deregisterEHFrames(
+ JITTargetAddress EHFrameSectionAddr, size_t EHFrameSectionSize) {
+ constexpr size_t ArgBufferSize = sizeof(uint64_t) + sizeof(uint64_t);
+ uint8_t ArgBuffer[ArgBufferSize];
+ BinaryStreamWriter ArgWriter(
+ MutableArrayRef<uint8_t>(ArgBuffer, ArgBufferSize),
+ support::endianness::big);
+ cantFail(ArgWriter.writeInteger(static_cast<uint64_t>(EHFrameSectionAddr)));
+ cantFail(ArgWriter.writeInteger(static_cast<uint64_t>(EHFrameSectionSize)));
+
+ return TPC.runWrapper(DeregisterEHFrameWrapperFnAddr, ArgBuffer).takeError();
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCIndirectionUtils.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCIndirectionUtils.cpp
new file mode 100644
index 00000000000..7989ec41952
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TPCIndirectionUtils.cpp
@@ -0,0 +1,423 @@
+//===------ TargetProcessControl.cpp -- Target process control APIs -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TPCIndirectionUtils.h"
+
+#include "llvm/ExecutionEngine/Orc/TargetProcessControl.h"
+#include "llvm/Support/MathExtras.h"
+
+#include <future>
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace llvm {
+namespace orc {
+
+class TPCIndirectionUtilsAccess {
+public:
+ using IndirectStubInfo = TPCIndirectionUtils::IndirectStubInfo;
+ using IndirectStubInfoVector = TPCIndirectionUtils::IndirectStubInfoVector;
+
+ static Expected<IndirectStubInfoVector>
+ getIndirectStubs(TPCIndirectionUtils &TPCIU, unsigned NumStubs) {
+ return TPCIU.getIndirectStubs(NumStubs);
+ };
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+namespace {
+
+class TPCTrampolinePool : public TrampolinePool {
+public:
+ TPCTrampolinePool(TPCIndirectionUtils &TPCIU);
+ Error deallocatePool();
+
+protected:
+ Error grow() override;
+
+ using Allocation = jitlink::JITLinkMemoryManager::Allocation;
+
+ TPCIndirectionUtils &TPCIU;
+ unsigned TrampolineSize = 0;
+ unsigned TrampolinesPerPage = 0;
+ std::vector<std::unique_ptr<Allocation>> TrampolineBlocks;
+};
+
+class TPCIndirectStubsManager : public IndirectStubsManager,
+ private TPCIndirectionUtilsAccess {
+public:
+ TPCIndirectStubsManager(TPCIndirectionUtils &TPCIU) : TPCIU(TPCIU) {}
+
+ Error deallocateStubs();
+
+ Error createStub(StringRef StubName, JITTargetAddress StubAddr,
+ JITSymbolFlags StubFlags) override;
+
+ Error createStubs(const StubInitsMap &StubInits) override;
+
+ JITEvaluatedSymbol findStub(StringRef Name, bool ExportedStubsOnly) override;
+
+ JITEvaluatedSymbol findPointer(StringRef Name) override;
+
+ Error updatePointer(StringRef Name, JITTargetAddress NewAddr) override;
+
+private:
+ using StubInfo = std::pair<IndirectStubInfo, JITSymbolFlags>;
+
+ std::mutex ISMMutex;
+ TPCIndirectionUtils &TPCIU;
+ StringMap<StubInfo> StubInfos;
+};
+
+TPCTrampolinePool::TPCTrampolinePool(TPCIndirectionUtils &TPCIU)
+ : TPCIU(TPCIU) {
+ auto &TPC = TPCIU.getTargetProcessControl();
+ auto &ABI = TPCIU.getABISupport();
+
+ TrampolineSize = ABI.getTrampolineSize();
+ TrampolinesPerPage =
+ (TPC.getPageSize() - ABI.getPointerSize()) / TrampolineSize;
+}
+
+Error TPCTrampolinePool::deallocatePool() {
+ Error Err = Error::success();
+ for (auto &Alloc : TrampolineBlocks)
+ Err = joinErrors(std::move(Err), Alloc->deallocate());
+ return Err;
+}
+
+Error TPCTrampolinePool::grow() {
+ assert(AvailableTrampolines.empty() &&
+ "Grow called with trampolines still available");
+
+ auto ResolverAddress = TPCIU.getResolverBlockAddress();
+ assert(ResolverAddress && "Resolver address can not be null");
+
+ auto &TPC = TPCIU.getTargetProcessControl();
+ constexpr auto TrampolinePagePermissions =
+ static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_EXEC);
+ auto PageSize = TPC.getPageSize();
+ jitlink::JITLinkMemoryManager::SegmentsRequestMap Request;
+ Request[TrampolinePagePermissions] = {PageSize, static_cast<size_t>(PageSize),
+ 0};
+ auto Alloc = TPC.getMemMgr().allocate(nullptr, Request);
+
+ if (!Alloc)
+ return Alloc.takeError();
+
+ unsigned NumTrampolines = TrampolinesPerPage;
+
+ auto WorkingMemory = (*Alloc)->getWorkingMemory(TrampolinePagePermissions);
+ auto TargetAddress = (*Alloc)->getTargetMemory(TrampolinePagePermissions);
+
+ TPCIU.getABISupport().writeTrampolines(WorkingMemory.data(), TargetAddress,
+ ResolverAddress, NumTrampolines);
+
+ auto TargetAddr = (*Alloc)->getTargetMemory(TrampolinePagePermissions);
+ for (unsigned I = 0; I < NumTrampolines; ++I)
+ AvailableTrampolines.push_back(TargetAddr + (I * TrampolineSize));
+
+ if (auto Err = (*Alloc)->finalize())
+ return Err;
+
+ TrampolineBlocks.push_back(std::move(*Alloc));
+
+ return Error::success();
+}
+
+Error TPCIndirectStubsManager::createStub(StringRef StubName,
+ JITTargetAddress StubAddr,
+ JITSymbolFlags StubFlags) {
+ StubInitsMap SIM;
+ SIM[StubName] = std::make_pair(StubAddr, StubFlags);
+ return createStubs(SIM);
+}
+
+Error TPCIndirectStubsManager::createStubs(const StubInitsMap &StubInits) {
+ auto AvailableStubInfos = getIndirectStubs(TPCIU, StubInits.size());
+ if (!AvailableStubInfos)
+ return AvailableStubInfos.takeError();
+
+ {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ unsigned ASIdx = 0;
+ for (auto &SI : StubInits) {
+ auto &A = (*AvailableStubInfos)[ASIdx++];
+ StubInfos[SI.first()] = std::make_pair(A, SI.second.second);
+ }
+ }
+
+ auto &MemAccess = TPCIU.getTargetProcessControl().getMemoryAccess();
+ switch (TPCIU.getABISupport().getPointerSize()) {
+ case 4: {
+ unsigned ASIdx = 0;
+ std::vector<tpctypes::UInt32Write> PtrUpdates;
+ for (auto &SI : StubInits)
+ PtrUpdates.push_back({(*AvailableStubInfos)[ASIdx++].PointerAddress,
+ static_cast<uint32_t>(SI.second.first)});
+ return MemAccess.writeUInt32s(PtrUpdates);
+ }
+ case 8: {
+ unsigned ASIdx = 0;
+ std::vector<tpctypes::UInt64Write> PtrUpdates;
+ for (auto &SI : StubInits)
+ PtrUpdates.push_back({(*AvailableStubInfos)[ASIdx++].PointerAddress,
+ static_cast<uint64_t>(SI.second.first)});
+ return MemAccess.writeUInt64s(PtrUpdates);
+ }
+ default:
+ return make_error<StringError>("Unsupported pointer size",
+ inconvertibleErrorCode());
+ }
+}
+
+JITEvaluatedSymbol TPCIndirectStubsManager::findStub(StringRef Name,
+ bool ExportedStubsOnly) {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ auto I = StubInfos.find(Name);
+ if (I == StubInfos.end())
+ return nullptr;
+ return {I->second.first.StubAddress, I->second.second};
+}
+
+JITEvaluatedSymbol TPCIndirectStubsManager::findPointer(StringRef Name) {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ auto I = StubInfos.find(Name);
+ if (I == StubInfos.end())
+ return nullptr;
+ return {I->second.first.PointerAddress, I->second.second};
+}
+
+Error TPCIndirectStubsManager::updatePointer(StringRef Name,
+ JITTargetAddress NewAddr) {
+
+ JITTargetAddress PtrAddr = 0;
+ {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ auto I = StubInfos.find(Name);
+ if (I == StubInfos.end())
+ return make_error<StringError>("Unknown stub name",
+ inconvertibleErrorCode());
+ PtrAddr = I->second.first.PointerAddress;
+ }
+
+ auto &MemAccess = TPCIU.getTargetProcessControl().getMemoryAccess();
+ switch (TPCIU.getABISupport().getPointerSize()) {
+ case 4: {
+ tpctypes::UInt32Write PUpdate(PtrAddr, NewAddr);
+ return MemAccess.writeUInt32s(PUpdate);
+ }
+ case 8: {
+ tpctypes::UInt64Write PUpdate(PtrAddr, NewAddr);
+ return MemAccess.writeUInt64s(PUpdate);
+ }
+ default:
+ return make_error<StringError>("Unsupported pointer size",
+ inconvertibleErrorCode());
+ }
+}
+
+} // end anonymous namespace.
+
+namespace llvm {
+namespace orc {
+
+TPCIndirectionUtils::ABISupport::~ABISupport() {}
+
+Expected<std::unique_ptr<TPCIndirectionUtils>>
+TPCIndirectionUtils::Create(TargetProcessControl &TPC) {
+ const auto &TT = TPC.getTargetTriple();
+ switch (TT.getArch()) {
+ default:
+ return make_error<StringError>(
+ std::string("No TPCIndirectionUtils available for ") + TT.str(),
+ inconvertibleErrorCode());
+ case Triple::aarch64:
+ case Triple::aarch64_32:
+ return CreateWithABI<OrcAArch64>(TPC);
+
+ case Triple::x86:
+ return CreateWithABI<OrcI386>(TPC);
+
+ case Triple::mips:
+ return CreateWithABI<OrcMips32Be>(TPC);
+
+ case Triple::mipsel:
+ return CreateWithABI<OrcMips32Le>(TPC);
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ return CreateWithABI<OrcMips64>(TPC);
+
+ case Triple::x86_64:
+ if (TT.getOS() == Triple::OSType::Win32)
+ return CreateWithABI<OrcX86_64_Win32>(TPC);
+ else
+ return CreateWithABI<OrcX86_64_SysV>(TPC);
+ }
+}
+
+Error TPCIndirectionUtils::cleanup() {
+ Error Err = Error::success();
+
+ for (auto &A : IndirectStubAllocs)
+ Err = joinErrors(std::move(Err), A->deallocate());
+
+ if (TP)
+ Err = joinErrors(std::move(Err),
+ static_cast<TPCTrampolinePool &>(*TP).deallocatePool());
+
+ if (ResolverBlock)
+ Err = joinErrors(std::move(Err), ResolverBlock->deallocate());
+
+ return Err;
+}
+
+Expected<JITTargetAddress>
+TPCIndirectionUtils::writeResolverBlock(JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+ assert(ABI && "ABI can not be null");
+ constexpr auto ResolverBlockPermissions =
+ static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_EXEC);
+ auto ResolverSize = ABI->getResolverCodeSize();
+
+ jitlink::JITLinkMemoryManager::SegmentsRequestMap Request;
+ Request[ResolverBlockPermissions] = {TPC.getPageSize(),
+ static_cast<size_t>(ResolverSize), 0};
+ auto Alloc = TPC.getMemMgr().allocate(nullptr, Request);
+ if (!Alloc)
+ return Alloc.takeError();
+
+ auto WorkingMemory = (*Alloc)->getWorkingMemory(ResolverBlockPermissions);
+ ResolverBlockAddr = (*Alloc)->getTargetMemory(ResolverBlockPermissions);
+ ABI->writeResolverCode(WorkingMemory.data(), ResolverBlockAddr, ReentryFnAddr,
+ ReentryCtxAddr);
+
+ if (auto Err = (*Alloc)->finalize())
+ return std::move(Err);
+
+ ResolverBlock = std::move(*Alloc);
+ return ResolverBlockAddr;
+}
+
+std::unique_ptr<IndirectStubsManager>
+TPCIndirectionUtils::createIndirectStubsManager() {
+ return std::make_unique<TPCIndirectStubsManager>(*this);
+}
+
+TrampolinePool &TPCIndirectionUtils::getTrampolinePool() {
+ if (!TP)
+ TP = std::make_unique<TPCTrampolinePool>(*this);
+ return *TP;
+}
+
+LazyCallThroughManager &TPCIndirectionUtils::createLazyCallThroughManager(
+ ExecutionSession &ES, JITTargetAddress ErrorHandlerAddr) {
+ assert(!LCTM &&
+ "createLazyCallThroughManager can not have been called before");
+ LCTM = std::make_unique<LazyCallThroughManager>(ES, ErrorHandlerAddr,
+ &getTrampolinePool());
+ return *LCTM;
+}
+
+TPCIndirectionUtils::TPCIndirectionUtils(TargetProcessControl &TPC,
+ std::unique_ptr<ABISupport> ABI)
+ : TPC(TPC), ABI(std::move(ABI)) {
+ assert(this->ABI && "ABI can not be null");
+
+ assert(TPC.getPageSize() > getABISupport().getStubSize() &&
+ "Stubs larger than one page are not supported");
+}
+
+Expected<TPCIndirectionUtils::IndirectStubInfoVector>
+TPCIndirectionUtils::getIndirectStubs(unsigned NumStubs) {
+
+ std::lock_guard<std::mutex> Lock(TPCUIMutex);
+
+ // If there aren't enough stubs available then allocate some more.
+ if (NumStubs > AvailableIndirectStubs.size()) {
+ auto NumStubsToAllocate = NumStubs;
+ auto PageSize = TPC.getPageSize();
+ auto StubBytes = alignTo(NumStubsToAllocate * ABI->getStubSize(), PageSize);
+ NumStubsToAllocate = StubBytes / ABI->getStubSize();
+ auto PointerBytes =
+ alignTo(NumStubsToAllocate * ABI->getPointerSize(), PageSize);
+
+ constexpr auto StubPagePermissions =
+ static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_EXEC);
+ constexpr auto PointerPagePermissions =
+ static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_WRITE);
+
+ jitlink::JITLinkMemoryManager::SegmentsRequestMap Request;
+ Request[StubPagePermissions] = {PageSize, static_cast<size_t>(StubBytes),
+ 0};
+ Request[PointerPagePermissions] = {PageSize, 0, PointerBytes};
+ auto Alloc = TPC.getMemMgr().allocate(nullptr, Request);
+ if (!Alloc)
+ return Alloc.takeError();
+
+ auto StubTargetAddr = (*Alloc)->getTargetMemory(StubPagePermissions);
+ auto PointerTargetAddr = (*Alloc)->getTargetMemory(PointerPagePermissions);
+
+ ABI->writeIndirectStubsBlock(
+ (*Alloc)->getWorkingMemory(StubPagePermissions).data(), StubTargetAddr,
+ PointerTargetAddr, NumStubsToAllocate);
+
+ if (auto Err = (*Alloc)->finalize())
+ return std::move(Err);
+
+ for (unsigned I = 0; I != NumStubsToAllocate; ++I) {
+ AvailableIndirectStubs.push_back(
+ IndirectStubInfo(StubTargetAddr, PointerTargetAddr));
+ StubTargetAddr += ABI->getStubSize();
+ PointerTargetAddr += ABI->getPointerSize();
+ }
+
+ IndirectStubAllocs.push_back(std::move(*Alloc));
+ }
+
+ assert(NumStubs <= AvailableIndirectStubs.size() &&
+ "Sufficient stubs should have been allocated above");
+
+ IndirectStubInfoVector Result;
+ while (NumStubs--) {
+ Result.push_back(AvailableIndirectStubs.back());
+ AvailableIndirectStubs.pop_back();
+ }
+
+ return std::move(Result);
+}
+
+static JITTargetAddress reentry(JITTargetAddress LCTMAddr,
+ JITTargetAddress TrampolineAddr) {
+ auto &LCTM = *jitTargetAddressToPointer<LazyCallThroughManager *>(LCTMAddr);
+ std::promise<JITTargetAddress> LandingAddrP;
+ auto LandingAddrF = LandingAddrP.get_future();
+ LCTM.resolveTrampolineLandingAddress(
+ TrampolineAddr,
+ [&](JITTargetAddress Addr) { LandingAddrP.set_value(Addr); });
+ return LandingAddrF.get();
+}
+
+Error setUpInProcessLCTMReentryViaTPCIU(TPCIndirectionUtils &TPCIU) {
+ auto &LCTM = TPCIU.getLazyCallThroughManager();
+ return TPCIU
+ .writeResolverBlock(pointerToJITTargetAddress(&reentry),
+ pointerToJITTargetAddress(&LCTM))
+ .takeError();
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp
new file mode 100644
index 00000000000..aff7296cb6e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp
@@ -0,0 +1,208 @@
+//===--------- RegisterEHFrames.cpp - Register EH frame sections ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h"
+
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::orc::tpctypes;
+
+namespace llvm {
+namespace orc {
+
+#if defined(HAVE_REGISTER_FRAME) && defined(HAVE_DEREGISTER_FRAME) && \
+ !defined(__SEH__) && !defined(__USING_SJLJ_EXCEPTIONS__)
+
+extern "C" void __register_frame(const void *);
+extern "C" void __deregister_frame(const void *);
+
+Error registerFrameWrapper(const void *P) {
+ __register_frame(P);
+ return Error::success();
+}
+
+Error deregisterFrameWrapper(const void *P) {
+ __deregister_frame(P);
+ return Error::success();
+}
+
+#else
+
+// The building compiler does not have __(de)register_frame but
+// it may be found at runtime in a dynamically-loaded library.
+// For example, this happens when building LLVM with Visual C++
+// but using the MingW runtime.
+static Error registerFrameWrapper(const void *P) {
+ static void((*RegisterFrame)(const void *)) = 0;
+
+ if (!RegisterFrame)
+ *(void **)&RegisterFrame =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol("__register_frame");
+
+ if (RegisterFrame) {
+ RegisterFrame(P);
+ return Error::success();
+ }
+
+ return make_error<StringError>("could not register eh-frame: "
+ "__register_frame function not found",
+ inconvertibleErrorCode());
+}
+
+static Error deregisterFrameWrapper(const void *P) {
+ static void((*DeregisterFrame)(const void *)) = 0;
+
+ if (!DeregisterFrame)
+ *(void **)&DeregisterFrame =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
+ "__deregister_frame");
+
+ if (DeregisterFrame) {
+ DeregisterFrame(P);
+ return Error::success();
+ }
+
+ return make_error<StringError>("could not deregister eh-frame: "
+ "__deregister_frame function not found",
+ inconvertibleErrorCode());
+}
+#endif
+
+#ifdef __APPLE__
+
+template <typename HandleFDEFn>
+Error walkAppleEHFrameSection(const char *const SectionStart,
+ size_t SectionSize, HandleFDEFn HandleFDE) {
+ const char *CurCFIRecord = SectionStart;
+ const char *End = SectionStart + SectionSize;
+ uint64_t Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+
+ while (CurCFIRecord != End && Size != 0) {
+ const char *OffsetField = CurCFIRecord + (Size == 0xffffffff ? 12 : 4);
+ if (Size == 0xffffffff)
+ Size = *reinterpret_cast<const uint64_t *>(CurCFIRecord + 4) + 12;
+ else
+ Size += 4;
+ uint32_t Offset = *reinterpret_cast<const uint32_t *>(OffsetField);
+
+ LLVM_DEBUG({
+ dbgs() << "Registering eh-frame section:\n";
+ dbgs() << "Processing " << (Offset ? "FDE" : "CIE") << " @"
+ << (void *)CurCFIRecord << ": [";
+ for (unsigned I = 0; I < Size; ++I)
+ dbgs() << format(" 0x%02" PRIx8, *(CurCFIRecord + I));
+ dbgs() << " ]\n";
+ });
+
+ if (Offset != 0)
+ if (auto Err = HandleFDE(CurCFIRecord))
+ return Err;
+
+ CurCFIRecord += Size;
+
+ Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+ }
+
+ return Error::success();
+}
+
+#endif // __APPLE__
+
+Error registerEHFrameSection(const void *EHFrameSectionAddr,
+ size_t EHFrameSectionSize) {
+#ifdef __APPLE__
+ // On Darwin __register_frame has to be called for each FDE entry.
+ return walkAppleEHFrameSection(static_cast<const char *>(EHFrameSectionAddr),
+ EHFrameSectionSize, registerFrameWrapper);
+#else
+ // On Linux __register_frame takes a single argument:
+ // a pointer to the start of the .eh_frame section.
+
+ // How can it find the end? Because crtendS.o is linked
+ // in and it has an .eh_frame section with four zero chars.
+ return registerFrameWrapper(EHFrameSectionAddr);
+#endif
+}
+
+Error deregisterEHFrameSection(const void *EHFrameSectionAddr,
+ size_t EHFrameSectionSize) {
+#ifdef __APPLE__
+ return walkAppleEHFrameSection(static_cast<const char *>(EHFrameSectionAddr),
+ EHFrameSectionSize, deregisterFrameWrapper);
+#else
+ return deregisterFrameWrapper(EHFrameSectionAddr);
+#endif
+}
+
+} // end namespace orc
+} // end namespace llvm
+
+extern "C" CWrapperFunctionResult
+llvm_orc_registerEHFrameSectionWrapper(uint8_t *Data, uint64_t Size) {
+ if (Size != sizeof(uint64_t) + sizeof(uint64_t))
+ return WrapperFunctionResult::from(
+ "Invalid arguments to llvm_orc_registerEHFrameSectionWrapper")
+ .release();
+
+ uint64_t EHFrameSectionAddr;
+ uint64_t EHFrameSectionSize;
+
+ {
+ BinaryStreamReader ArgReader(ArrayRef<uint8_t>(Data, Size),
+ support::endianness::big);
+ cantFail(ArgReader.readInteger(EHFrameSectionAddr));
+ cantFail(ArgReader.readInteger(EHFrameSectionSize));
+ }
+
+ if (auto Err = registerEHFrameSection(
+ jitTargetAddressToPointer<void *>(EHFrameSectionAddr),
+ EHFrameSectionSize)) {
+ auto ErrMsg = toString(std::move(Err));
+ return WrapperFunctionResult::from(ErrMsg).release();
+ }
+ return WrapperFunctionResult().release();
+}
+
+extern "C" CWrapperFunctionResult
+llvm_orc_deregisterEHFrameSectionWrapper(uint8_t *Data, uint64_t Size) {
+ if (Size != sizeof(uint64_t) + sizeof(uint64_t))
+ return WrapperFunctionResult::from(
+ "Invalid arguments to llvm_orc_registerEHFrameSectionWrapper")
+ .release();
+
+ uint64_t EHFrameSectionAddr;
+ uint64_t EHFrameSectionSize;
+
+ {
+ BinaryStreamReader ArgReader(ArrayRef<uint8_t>(Data, Size),
+ support::endianness::big);
+ cantFail(ArgReader.readInteger(EHFrameSectionAddr));
+ cantFail(ArgReader.readInteger(EHFrameSectionSize));
+ }
+
+ if (auto Err = deregisterEHFrameSection(
+ jitTargetAddressToPointer<void *>(EHFrameSectionAddr),
+ EHFrameSectionSize)) {
+ auto ErrMsg = toString(std::move(Err));
+ return WrapperFunctionResult::from(ErrMsg).release();
+ }
+ return WrapperFunctionResult().release();
+}
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp
new file mode 100644
index 00000000000..a8e6c049cf4
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp
@@ -0,0 +1,43 @@
+//===--- TargetExecutionUtils.cpp - Execution utils for target processes --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h"
+
+#include <vector>
+
+namespace llvm {
+namespace orc {
+
+int runAsMain(int (*Main)(int, char *[]), ArrayRef<std::string> Args,
+ Optional<StringRef> ProgramName) {
+ std::vector<std::unique_ptr<char[]>> ArgVStorage;
+ std::vector<char *> ArgV;
+
+ ArgVStorage.reserve(Args.size() + (ProgramName ? 1 : 0));
+ ArgV.reserve(Args.size() + 1 + (ProgramName ? 1 : 0));
+
+ if (ProgramName) {
+ ArgVStorage.push_back(std::make_unique<char[]>(ProgramName->size() + 1));
+ llvm::copy(*ProgramName, &ArgVStorage.back()[0]);
+ ArgVStorage.back()[ProgramName->size()] = '\0';
+ ArgV.push_back(ArgVStorage.back().get());
+ }
+
+ for (const auto &Arg : Args) {
+ ArgVStorage.push_back(std::make_unique<char[]>(Arg.size() + 1));
+ llvm::copy(Arg, &ArgVStorage.back()[0]);
+ ArgVStorage.back()[Arg.size()] = '\0';
+ ArgV.push_back(ArgVStorage.back().get());
+ }
+ ArgV.push_back(nullptr);
+
+ return Main(Args.size() + !!ProgramName, ArgV.data());
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcessControl.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcessControl.cpp
new file mode 100644
index 00000000000..7bf874e88c2
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/TargetProcessControl.cpp
@@ -0,0 +1,153 @@
+//===------ TargetProcessControl.cpp -- Target process control APIs -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcessControl.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Process.h"
+
+#include <mutex>
+
+namespace llvm {
+namespace orc {
+
+TargetProcessControl::MemoryAccess::~MemoryAccess() {}
+
+TargetProcessControl::~TargetProcessControl() {}
+
+SelfTargetProcessControl::SelfTargetProcessControl(
+ std::shared_ptr<SymbolStringPool> SSP, Triple TargetTriple,
+ unsigned PageSize, std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgr)
+ : TargetProcessControl(std::move(SSP)) {
+
+ OwnedMemMgr = std::move(MemMgr);
+ if (!OwnedMemMgr)
+ OwnedMemMgr = std::make_unique<jitlink::InProcessMemoryManager>();
+
+ this->TargetTriple = std::move(TargetTriple);
+ this->PageSize = PageSize;
+ this->MemMgr = OwnedMemMgr.get();
+ this->MemAccess = this;
+ if (this->TargetTriple.isOSBinFormatMachO())
+ GlobalManglingPrefix = '_';
+}
+
+Expected<std::unique_ptr<SelfTargetProcessControl>>
+SelfTargetProcessControl::Create(
+ std::shared_ptr<SymbolStringPool> SSP,
+ std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgr) {
+ auto PageSize = sys::Process::getPageSize();
+ if (!PageSize)
+ return PageSize.takeError();
+
+ Triple TT(sys::getProcessTriple());
+
+ return std::make_unique<SelfTargetProcessControl>(
+ std::move(SSP), std::move(TT), *PageSize, std::move(MemMgr));
+}
+
+Expected<tpctypes::DylibHandle>
+SelfTargetProcessControl::loadDylib(const char *DylibPath) {
+ std::string ErrMsg;
+ auto Dylib = std::make_unique<sys::DynamicLibrary>(
+ sys::DynamicLibrary::getPermanentLibrary(DylibPath, &ErrMsg));
+ if (!Dylib->isValid())
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+ DynamicLibraries.push_back(std::move(Dylib));
+ return pointerToJITTargetAddress(DynamicLibraries.back().get());
+}
+
+Expected<std::vector<tpctypes::LookupResult>>
+SelfTargetProcessControl::lookupSymbols(ArrayRef<LookupRequest> Request) {
+ std::vector<tpctypes::LookupResult> R;
+
+ for (auto &Elem : Request) {
+ auto *Dylib = jitTargetAddressToPointer<sys::DynamicLibrary *>(Elem.Handle);
+ assert(llvm::any_of(DynamicLibraries,
+ [=](const std::unique_ptr<sys::DynamicLibrary> &DL) {
+ return DL.get() == Dylib;
+ }) &&
+ "Invalid handle");
+
+ R.push_back(std::vector<JITTargetAddress>());
+ for (auto &KV : Elem.Symbols) {
+ auto &Sym = KV.first;
+ std::string Tmp((*Sym).data() + !!GlobalManglingPrefix,
+ (*Sym).size() - !!GlobalManglingPrefix);
+ void *Addr = Dylib->getAddressOfSymbol(Tmp.c_str());
+ if (!Addr && KV.second == SymbolLookupFlags::RequiredSymbol) {
+ // FIXME: Collect all failing symbols before erroring out.
+ SymbolNameVector MissingSymbols;
+ MissingSymbols.push_back(Sym);
+ return make_error<SymbolsNotFound>(std::move(MissingSymbols));
+ }
+ R.back().push_back(pointerToJITTargetAddress(Addr));
+ }
+ }
+
+ return R;
+}
+
+Expected<int32_t>
+SelfTargetProcessControl::runAsMain(JITTargetAddress MainFnAddr,
+ ArrayRef<std::string> Args) {
+ using MainTy = int (*)(int, char *[]);
+ return orc::runAsMain(jitTargetAddressToFunction<MainTy>(MainFnAddr), Args);
+}
+
+Expected<tpctypes::WrapperFunctionResult>
+SelfTargetProcessControl::runWrapper(JITTargetAddress WrapperFnAddr,
+ ArrayRef<uint8_t> ArgBuffer) {
+ using WrapperFnTy =
+ tpctypes::CWrapperFunctionResult (*)(const uint8_t *Data, uint64_t Size);
+ auto *WrapperFn = jitTargetAddressToFunction<WrapperFnTy>(WrapperFnAddr);
+ return WrapperFn(ArgBuffer.data(), ArgBuffer.size());
+}
+
+Error SelfTargetProcessControl::disconnect() { return Error::success(); }
+
+void SelfTargetProcessControl::writeUInt8s(ArrayRef<tpctypes::UInt8Write> Ws,
+ WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *jitTargetAddressToPointer<uint8_t *>(W.Address) = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void SelfTargetProcessControl::writeUInt16s(ArrayRef<tpctypes::UInt16Write> Ws,
+ WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *jitTargetAddressToPointer<uint16_t *>(W.Address) = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void SelfTargetProcessControl::writeUInt32s(ArrayRef<tpctypes::UInt32Write> Ws,
+ WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *jitTargetAddressToPointer<uint32_t *>(W.Address) = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void SelfTargetProcessControl::writeUInt64s(ArrayRef<tpctypes::UInt64Write> Ws,
+ WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *jitTargetAddressToPointer<uint64_t *>(W.Address) = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void SelfTargetProcessControl::writeBuffers(ArrayRef<tpctypes::BufferWrite> Ws,
+ WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ memcpy(jitTargetAddressToPointer<char *>(W.Address), W.Buffer.data(),
+ W.Buffer.size());
+ OnWriteComplete(Error::success());
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp
new file mode 100644
index 00000000000..2e128dd2374
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp
@@ -0,0 +1,64 @@
+//===-- ThreadSafeModule.cpp - Thread safe Module, Context, and Utilities
+//h-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ThreadSafeModule.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+
+namespace llvm {
+namespace orc {
+
+ThreadSafeModule cloneToNewContext(const ThreadSafeModule &TSM,
+ GVPredicate ShouldCloneDef,
+ GVModifier UpdateClonedDefSource) {
+ assert(TSM && "Can not clone null module");
+
+ if (!ShouldCloneDef)
+ ShouldCloneDef = [](const GlobalValue &) { return true; };
+
+ return TSM.withModuleDo([&](Module &M) {
+ SmallVector<char, 1> ClonedModuleBuffer;
+
+ {
+ std::set<GlobalValue *> ClonedDefsInSrc;
+ ValueToValueMapTy VMap;
+ auto Tmp = CloneModule(M, VMap, [&](const GlobalValue *GV) {
+ if (ShouldCloneDef(*GV)) {
+ ClonedDefsInSrc.insert(const_cast<GlobalValue *>(GV));
+ return true;
+ }
+ return false;
+ });
+
+ if (UpdateClonedDefSource)
+ for (auto *GV : ClonedDefsInSrc)
+ UpdateClonedDefSource(*GV);
+
+ BitcodeWriter BCWriter(ClonedModuleBuffer);
+
+ BCWriter.writeModule(*Tmp);
+ BCWriter.writeSymtab();
+ BCWriter.writeStrtab();
+ }
+
+ MemoryBufferRef ClonedModuleBufferRef(
+ StringRef(ClonedModuleBuffer.data(), ClonedModuleBuffer.size()),
+ "cloned module buffer");
+ ThreadSafeContext NewTSCtx(std::make_unique<LLVMContext>());
+
+ auto ClonedModule = cantFail(
+ parseBitcodeFile(ClonedModuleBufferRef, *NewTSCtx.getContext()));
+ ClonedModule->setModuleIdentifier(M.getName());
+ return ThreadSafeModule(std::move(ClonedModule), std::move(NewTSCtx));
+ });
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/PerfJITEvents/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/ExecutionEngine/PerfJITEvents/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/ExecutionEngine/PerfJITEvents/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/PerfJITEvents/CMakeLists.txt b/contrib/libs/llvm12/lib/ExecutionEngine/PerfJITEvents/CMakeLists.txt
index e8c98bfd99f..c031ddb850e 100644
--- a/contrib/libs/llvm12/lib/ExecutionEngine/PerfJITEvents/CMakeLists.txt
+++ b/contrib/libs/llvm12/lib/ExecutionEngine/PerfJITEvents/CMakeLists.txt
@@ -6,6 +6,6 @@
# original buildsystem will not be accepted.
-if (UNIX AND NOT APPLE)
+if (UNIX)
include(CMakeLists.linux.txt)
endif()
diff --git a/contrib/libs/llvm12/lib/ExecutionEngine/RuntimeDyld/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/ExecutionEngine/RuntimeDyld/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/ExecutionEngine/RuntimeDyld/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Extensions/Extensions.cpp b/contrib/libs/llvm12/lib/Extensions/Extensions.cpp
new file mode 100644
index 00000000000..0d25cbda38e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Extensions/Extensions.cpp
@@ -0,0 +1,15 @@
+#include "llvm/Passes/PassPlugin.h"
+#define HANDLE_EXTENSION(Ext) \
+ llvm::PassPluginLibraryInfo get##Ext##PluginInfo();
+#include "llvm/Support/Extension.def"
+
+
+namespace llvm {
+ namespace details {
+ void extensions_anchor() {
+#define HANDLE_EXTENSION(Ext) \
+ get##Ext##PluginInfo();
+#include "llvm/Support/Extension.def"
+ }
+ }
+}
diff --git a/contrib/libs/llvm12/lib/FileCheck/FileCheck.cpp b/contrib/libs/llvm12/lib/FileCheck/FileCheck.cpp
new file mode 100644
index 00000000000..3169afaed58
--- /dev/null
+++ b/contrib/libs/llvm12/lib/FileCheck/FileCheck.cpp
@@ -0,0 +1,2754 @@
+//===- FileCheck.cpp - Check that File's Contents match what is expected --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// FileCheck does a line-by line check of a file that validates whether it
+// contains the expected content. This is useful for regression tests etc.
+//
+// This file implements most of the API that will be used by the FileCheck utility
+// as well as various unittests.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/FileCheck/FileCheck.h"
+#include "FileCheckImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/CheckedArithmetic.h"
+#include "llvm/Support/FormatVariadic.h"
+#include <cstdint>
+#include <list>
+#include <set>
+#include <tuple>
+#include <utility>
+
+using namespace llvm;
+
+StringRef ExpressionFormat::toString() const {
+ switch (Value) {
+ case Kind::NoFormat:
+ return StringRef("<none>");
+ case Kind::Unsigned:
+ return StringRef("%u");
+ case Kind::Signed:
+ return StringRef("%d");
+ case Kind::HexUpper:
+ return StringRef("%X");
+ case Kind::HexLower:
+ return StringRef("%x");
+ }
+ llvm_unreachable("unknown expression format");
+}
+
+Expected<std::string> ExpressionFormat::getWildcardRegex() const {
+ auto CreatePrecisionRegex = [this](StringRef S) {
+ return (S + Twine('{') + Twine(Precision) + "}").str();
+ };
+
+ switch (Value) {
+ case Kind::Unsigned:
+ if (Precision)
+ return CreatePrecisionRegex("([1-9][0-9]*)?[0-9]");
+ return std::string("[0-9]+");
+ case Kind::Signed:
+ if (Precision)
+ return CreatePrecisionRegex("-?([1-9][0-9]*)?[0-9]");
+ return std::string("-?[0-9]+");
+ case Kind::HexUpper:
+ if (Precision)
+ return CreatePrecisionRegex("([1-9A-F][0-9A-F]*)?[0-9A-F]");
+ return std::string("[0-9A-F]+");
+ case Kind::HexLower:
+ if (Precision)
+ return CreatePrecisionRegex("([1-9a-f][0-9a-f]*)?[0-9a-f]");
+ return std::string("[0-9a-f]+");
+ default:
+ return createStringError(std::errc::invalid_argument,
+ "trying to match value with invalid format");
+ }
+}
+
+Expected<std::string>
+ExpressionFormat::getMatchingString(ExpressionValue IntegerValue) const {
+ uint64_t AbsoluteValue;
+ StringRef SignPrefix = IntegerValue.isNegative() ? "-" : "";
+
+ if (Value == Kind::Signed) {
+ Expected<int64_t> SignedValue = IntegerValue.getSignedValue();
+ if (!SignedValue)
+ return SignedValue.takeError();
+ if (*SignedValue < 0)
+ AbsoluteValue = cantFail(IntegerValue.getAbsolute().getUnsignedValue());
+ else
+ AbsoluteValue = *SignedValue;
+ } else {
+ Expected<uint64_t> UnsignedValue = IntegerValue.getUnsignedValue();
+ if (!UnsignedValue)
+ return UnsignedValue.takeError();
+ AbsoluteValue = *UnsignedValue;
+ }
+
+ std::string AbsoluteValueStr;
+ switch (Value) {
+ case Kind::Unsigned:
+ case Kind::Signed:
+ AbsoluteValueStr = utostr(AbsoluteValue);
+ break;
+ case Kind::HexUpper:
+ case Kind::HexLower:
+ AbsoluteValueStr = utohexstr(AbsoluteValue, Value == Kind::HexLower);
+ break;
+ default:
+ return createStringError(std::errc::invalid_argument,
+ "trying to match value with invalid format");
+ }
+
+ if (Precision > AbsoluteValueStr.size()) {
+ unsigned LeadingZeros = Precision - AbsoluteValueStr.size();
+ return (Twine(SignPrefix) + std::string(LeadingZeros, '0') +
+ AbsoluteValueStr)
+ .str();
+ }
+
+ return (Twine(SignPrefix) + AbsoluteValueStr).str();
+}
+
+Expected<ExpressionValue>
+ExpressionFormat::valueFromStringRepr(StringRef StrVal,
+ const SourceMgr &SM) const {
+ bool ValueIsSigned = Value == Kind::Signed;
+ StringRef OverflowErrorStr = "unable to represent numeric value";
+ if (ValueIsSigned) {
+ int64_t SignedValue;
+
+ if (StrVal.getAsInteger(10, SignedValue))
+ return ErrorDiagnostic::get(SM, StrVal, OverflowErrorStr);
+
+ return ExpressionValue(SignedValue);
+ }
+
+ bool Hex = Value == Kind::HexUpper || Value == Kind::HexLower;
+ uint64_t UnsignedValue;
+ if (StrVal.getAsInteger(Hex ? 16 : 10, UnsignedValue))
+ return ErrorDiagnostic::get(SM, StrVal, OverflowErrorStr);
+
+ return ExpressionValue(UnsignedValue);
+}
+
+static int64_t getAsSigned(uint64_t UnsignedValue) {
+ // Use memcpy to reinterpret the bitpattern in Value since casting to
+ // signed is implementation-defined if the unsigned value is too big to be
+ // represented in the signed type and using an union violates type aliasing
+ // rules.
+ int64_t SignedValue;
+ memcpy(&SignedValue, &UnsignedValue, sizeof(SignedValue));
+ return SignedValue;
+}
+
+Expected<int64_t> ExpressionValue::getSignedValue() const {
+ if (Negative)
+ return getAsSigned(Value);
+
+ if (Value > (uint64_t)std::numeric_limits<int64_t>::max())
+ return make_error<OverflowError>();
+
+ // Value is in the representable range of int64_t so we can use cast.
+ return static_cast<int64_t>(Value);
+}
+
+Expected<uint64_t> ExpressionValue::getUnsignedValue() const {
+ if (Negative)
+ return make_error<OverflowError>();
+
+ return Value;
+}
+
+ExpressionValue ExpressionValue::getAbsolute() const {
+ if (!Negative)
+ return *this;
+
+ int64_t SignedValue = getAsSigned(Value);
+ int64_t MaxInt64 = std::numeric_limits<int64_t>::max();
+ // Absolute value can be represented as int64_t.
+ if (SignedValue >= -MaxInt64)
+ return ExpressionValue(-getAsSigned(Value));
+
+ // -X == -(max int64_t + Rem), negate each component independently.
+ SignedValue += MaxInt64;
+ uint64_t RemainingValueAbsolute = -SignedValue;
+ return ExpressionValue(MaxInt64 + RemainingValueAbsolute);
+}
+
+Expected<ExpressionValue> llvm::operator+(const ExpressionValue &LeftOperand,
+ const ExpressionValue &RightOperand) {
+ if (LeftOperand.isNegative() && RightOperand.isNegative()) {
+ int64_t LeftValue = cantFail(LeftOperand.getSignedValue());
+ int64_t RightValue = cantFail(RightOperand.getSignedValue());
+ Optional<int64_t> Result = checkedAdd<int64_t>(LeftValue, RightValue);
+ if (!Result)
+ return make_error<OverflowError>();
+
+ return ExpressionValue(*Result);
+ }
+
+ // (-A) + B == B - A.
+ if (LeftOperand.isNegative())
+ return RightOperand - LeftOperand.getAbsolute();
+
+ // A + (-B) == A - B.
+ if (RightOperand.isNegative())
+ return LeftOperand - RightOperand.getAbsolute();
+
+ // Both values are positive at this point.
+ uint64_t LeftValue = cantFail(LeftOperand.getUnsignedValue());
+ uint64_t RightValue = cantFail(RightOperand.getUnsignedValue());
+ Optional<uint64_t> Result =
+ checkedAddUnsigned<uint64_t>(LeftValue, RightValue);
+ if (!Result)
+ return make_error<OverflowError>();
+
+ return ExpressionValue(*Result);
+}
+
+Expected<ExpressionValue> llvm::operator-(const ExpressionValue &LeftOperand,
+ const ExpressionValue &RightOperand) {
+ // Result will be negative and thus might underflow.
+ if (LeftOperand.isNegative() && !RightOperand.isNegative()) {
+ int64_t LeftValue = cantFail(LeftOperand.getSignedValue());
+ uint64_t RightValue = cantFail(RightOperand.getUnsignedValue());
+ // Result <= -1 - (max int64_t) which overflows on 1- and 2-complement.
+ if (RightValue > (uint64_t)std::numeric_limits<int64_t>::max())
+ return make_error<OverflowError>();
+ Optional<int64_t> Result =
+ checkedSub(LeftValue, static_cast<int64_t>(RightValue));
+ if (!Result)
+ return make_error<OverflowError>();
+
+ return ExpressionValue(*Result);
+ }
+
+ // (-A) - (-B) == B - A.
+ if (LeftOperand.isNegative())
+ return RightOperand.getAbsolute() - LeftOperand.getAbsolute();
+
+ // A - (-B) == A + B.
+ if (RightOperand.isNegative())
+ return LeftOperand + RightOperand.getAbsolute();
+
+ // Both values are positive at this point.
+ uint64_t LeftValue = cantFail(LeftOperand.getUnsignedValue());
+ uint64_t RightValue = cantFail(RightOperand.getUnsignedValue());
+ if (LeftValue >= RightValue)
+ return ExpressionValue(LeftValue - RightValue);
+ else {
+ uint64_t AbsoluteDifference = RightValue - LeftValue;
+ uint64_t MaxInt64 = std::numeric_limits<int64_t>::max();
+ // Value might underflow.
+ if (AbsoluteDifference > MaxInt64) {
+ AbsoluteDifference -= MaxInt64;
+ int64_t Result = -MaxInt64;
+ int64_t MinInt64 = std::numeric_limits<int64_t>::min();
+ // Underflow, tested by:
+ // abs(Result + (max int64_t)) > abs((min int64_t) + (max int64_t))
+ if (AbsoluteDifference > static_cast<uint64_t>(-(MinInt64 - Result)))
+ return make_error<OverflowError>();
+ Result -= static_cast<int64_t>(AbsoluteDifference);
+ return ExpressionValue(Result);
+ }
+
+ return ExpressionValue(-static_cast<int64_t>(AbsoluteDifference));
+ }
+}
+
+Expected<ExpressionValue> llvm::operator*(const ExpressionValue &LeftOperand,
+ const ExpressionValue &RightOperand) {
+ // -A * -B == A * B
+ if (LeftOperand.isNegative() && RightOperand.isNegative())
+ return LeftOperand.getAbsolute() * RightOperand.getAbsolute();
+
+ // A * -B == -B * A
+ if (RightOperand.isNegative())
+ return RightOperand * LeftOperand;
+
+ assert(!RightOperand.isNegative() && "Unexpected negative operand!");
+
+ // Result will be negative and can underflow.
+ if (LeftOperand.isNegative()) {
+ auto Result = LeftOperand.getAbsolute() * RightOperand.getAbsolute();
+ if (!Result)
+ return Result;
+
+ return ExpressionValue(0) - *Result;
+ }
+
+ // Result will be positive and can overflow.
+ uint64_t LeftValue = cantFail(LeftOperand.getUnsignedValue());
+ uint64_t RightValue = cantFail(RightOperand.getUnsignedValue());
+ Optional<uint64_t> Result =
+ checkedMulUnsigned<uint64_t>(LeftValue, RightValue);
+ if (!Result)
+ return make_error<OverflowError>();
+
+ return ExpressionValue(*Result);
+}
+
+Expected<ExpressionValue> llvm::operator/(const ExpressionValue &LeftOperand,
+ const ExpressionValue &RightOperand) {
+ // -A / -B == A / B
+ if (LeftOperand.isNegative() && RightOperand.isNegative())
+ return LeftOperand.getAbsolute() / RightOperand.getAbsolute();
+
+ // Check for divide by zero.
+ if (RightOperand == ExpressionValue(0))
+ return make_error<OverflowError>();
+
+ // Result will be negative and can underflow.
+ if (LeftOperand.isNegative() || RightOperand.isNegative())
+ return ExpressionValue(0) -
+ cantFail(LeftOperand.getAbsolute() / RightOperand.getAbsolute());
+
+ uint64_t LeftValue = cantFail(LeftOperand.getUnsignedValue());
+ uint64_t RightValue = cantFail(RightOperand.getUnsignedValue());
+ return ExpressionValue(LeftValue / RightValue);
+}
+
+Expected<ExpressionValue> llvm::max(const ExpressionValue &LeftOperand,
+ const ExpressionValue &RightOperand) {
+ if (LeftOperand.isNegative() && RightOperand.isNegative()) {
+ int64_t LeftValue = cantFail(LeftOperand.getSignedValue());
+ int64_t RightValue = cantFail(RightOperand.getSignedValue());
+ return ExpressionValue(std::max(LeftValue, RightValue));
+ }
+
+ if (!LeftOperand.isNegative() && !RightOperand.isNegative()) {
+ uint64_t LeftValue = cantFail(LeftOperand.getUnsignedValue());
+ uint64_t RightValue = cantFail(RightOperand.getUnsignedValue());
+ return ExpressionValue(std::max(LeftValue, RightValue));
+ }
+
+ if (LeftOperand.isNegative())
+ return RightOperand;
+
+ return LeftOperand;
+}
+
+Expected<ExpressionValue> llvm::min(const ExpressionValue &LeftOperand,
+ const ExpressionValue &RightOperand) {
+ if (cantFail(max(LeftOperand, RightOperand)) == LeftOperand)
+ return RightOperand;
+
+ return LeftOperand;
+}
+
+Expected<ExpressionValue> NumericVariableUse::eval() const {
+ Optional<ExpressionValue> Value = Variable->getValue();
+ if (Value)
+ return *Value;
+
+ return make_error<UndefVarError>(getExpressionStr());
+}
+
+Expected<ExpressionValue> BinaryOperation::eval() const {
+ Expected<ExpressionValue> LeftOp = LeftOperand->eval();
+ Expected<ExpressionValue> RightOp = RightOperand->eval();
+
+ // Bubble up any error (e.g. undefined variables) in the recursive
+ // evaluation.
+ if (!LeftOp || !RightOp) {
+ Error Err = Error::success();
+ if (!LeftOp)
+ Err = joinErrors(std::move(Err), LeftOp.takeError());
+ if (!RightOp)
+ Err = joinErrors(std::move(Err), RightOp.takeError());
+ return std::move(Err);
+ }
+
+ return EvalBinop(*LeftOp, *RightOp);
+}
+
+Expected<ExpressionFormat>
+BinaryOperation::getImplicitFormat(const SourceMgr &SM) const {
+ Expected<ExpressionFormat> LeftFormat = LeftOperand->getImplicitFormat(SM);
+ Expected<ExpressionFormat> RightFormat = RightOperand->getImplicitFormat(SM);
+ if (!LeftFormat || !RightFormat) {
+ Error Err = Error::success();
+ if (!LeftFormat)
+ Err = joinErrors(std::move(Err), LeftFormat.takeError());
+ if (!RightFormat)
+ Err = joinErrors(std::move(Err), RightFormat.takeError());
+ return std::move(Err);
+ }
+
+ if (*LeftFormat != ExpressionFormat::Kind::NoFormat &&
+ *RightFormat != ExpressionFormat::Kind::NoFormat &&
+ *LeftFormat != *RightFormat)
+ return ErrorDiagnostic::get(
+ SM, getExpressionStr(),
+ "implicit format conflict between '" + LeftOperand->getExpressionStr() +
+ "' (" + LeftFormat->toString() + ") and '" +
+ RightOperand->getExpressionStr() + "' (" + RightFormat->toString() +
+ "), need an explicit format specifier");
+
+ return *LeftFormat != ExpressionFormat::Kind::NoFormat ? *LeftFormat
+ : *RightFormat;
+}
+
+Expected<std::string> NumericSubstitution::getResult() const {
+ assert(ExpressionPointer->getAST() != nullptr &&
+ "Substituting empty expression");
+ Expected<ExpressionValue> EvaluatedValue =
+ ExpressionPointer->getAST()->eval();
+ if (!EvaluatedValue)
+ return EvaluatedValue.takeError();
+ ExpressionFormat Format = ExpressionPointer->getFormat();
+ return Format.getMatchingString(*EvaluatedValue);
+}
+
+Expected<std::string> StringSubstitution::getResult() const {
+ // Look up the value and escape it so that we can put it into the regex.
+ Expected<StringRef> VarVal = Context->getPatternVarValue(FromStr);
+ if (!VarVal)
+ return VarVal.takeError();
+ return Regex::escape(*VarVal);
+}
+
+bool Pattern::isValidVarNameStart(char C) { return C == '_' || isAlpha(C); }
+
+Expected<Pattern::VariableProperties>
+Pattern::parseVariable(StringRef &Str, const SourceMgr &SM) {
+ if (Str.empty())
+ return ErrorDiagnostic::get(SM, Str, "empty variable name");
+
+ size_t I = 0;
+ bool IsPseudo = Str[0] == '@';
+
+ // Global vars start with '$'.
+ if (Str[0] == '$' || IsPseudo)
+ ++I;
+
+ if (!isValidVarNameStart(Str[I++]))
+ return ErrorDiagnostic::get(SM, Str, "invalid variable name");
+
+ for (size_t E = Str.size(); I != E; ++I)
+ // Variable names are composed of alphanumeric characters and underscores.
+ if (Str[I] != '_' && !isAlnum(Str[I]))
+ break;
+
+ StringRef Name = Str.take_front(I);
+ Str = Str.substr(I);
+ return VariableProperties {Name, IsPseudo};
+}
+
+// StringRef holding all characters considered as horizontal whitespaces by
+// FileCheck input canonicalization.
+constexpr StringLiteral SpaceChars = " \t";
+
+// Parsing helper function that strips the first character in S and returns it.
+static char popFront(StringRef &S) {
+ char C = S.front();
+ S = S.drop_front();
+ return C;
+}
+
+char OverflowError::ID = 0;
+char UndefVarError::ID = 0;
+char ErrorDiagnostic::ID = 0;
+char NotFoundError::ID = 0;
+
+Expected<NumericVariable *> Pattern::parseNumericVariableDefinition(
+ StringRef &Expr, FileCheckPatternContext *Context,
+ Optional<size_t> LineNumber, ExpressionFormat ImplicitFormat,
+ const SourceMgr &SM) {
+ Expected<VariableProperties> ParseVarResult = parseVariable(Expr, SM);
+ if (!ParseVarResult)
+ return ParseVarResult.takeError();
+ StringRef Name = ParseVarResult->Name;
+
+ if (ParseVarResult->IsPseudo)
+ return ErrorDiagnostic::get(
+ SM, Name, "definition of pseudo numeric variable unsupported");
+
+ // Detect collisions between string and numeric variables when the latter
+ // is created later than the former.
+ if (Context->DefinedVariableTable.find(Name) !=
+ Context->DefinedVariableTable.end())
+ return ErrorDiagnostic::get(
+ SM, Name, "string variable with name '" + Name + "' already exists");
+
+ Expr = Expr.ltrim(SpaceChars);
+ if (!Expr.empty())
+ return ErrorDiagnostic::get(
+ SM, Expr, "unexpected characters after numeric variable name");
+
+ NumericVariable *DefinedNumericVariable;
+ auto VarTableIter = Context->GlobalNumericVariableTable.find(Name);
+ if (VarTableIter != Context->GlobalNumericVariableTable.end()) {
+ DefinedNumericVariable = VarTableIter->second;
+ if (DefinedNumericVariable->getImplicitFormat() != ImplicitFormat)
+ return ErrorDiagnostic::get(
+ SM, Expr, "format different from previous variable definition");
+ } else
+ DefinedNumericVariable =
+ Context->makeNumericVariable(Name, ImplicitFormat, LineNumber);
+
+ return DefinedNumericVariable;
+}
+
+Expected<std::unique_ptr<NumericVariableUse>> Pattern::parseNumericVariableUse(
+ StringRef Name, bool IsPseudo, Optional<size_t> LineNumber,
+ FileCheckPatternContext *Context, const SourceMgr &SM) {
+ if (IsPseudo && !Name.equals("@LINE"))
+ return ErrorDiagnostic::get(
+ SM, Name, "invalid pseudo numeric variable '" + Name + "'");
+
+ // Numeric variable definitions and uses are parsed in the order in which
+ // they appear in the CHECK patterns. For each definition, the pointer to the
+ // class instance of the corresponding numeric variable definition is stored
+ // in GlobalNumericVariableTable in parsePattern. Therefore, if the pointer
+ // we get below is null, it means no such variable was defined before. When
+ // that happens, we create a dummy variable so that parsing can continue. All
+ // uses of undefined variables, whether string or numeric, are then diagnosed
+ // in printSubstitutions() after failing to match.
+ auto VarTableIter = Context->GlobalNumericVariableTable.find(Name);
+ NumericVariable *NumericVariable;
+ if (VarTableIter != Context->GlobalNumericVariableTable.end())
+ NumericVariable = VarTableIter->second;
+ else {
+ NumericVariable = Context->makeNumericVariable(
+ Name, ExpressionFormat(ExpressionFormat::Kind::Unsigned));
+ Context->GlobalNumericVariableTable[Name] = NumericVariable;
+ }
+
+ Optional<size_t> DefLineNumber = NumericVariable->getDefLineNumber();
+ if (DefLineNumber && LineNumber && *DefLineNumber == *LineNumber)
+ return ErrorDiagnostic::get(
+ SM, Name,
+ "numeric variable '" + Name +
+ "' defined earlier in the same CHECK directive");
+
+ return std::make_unique<NumericVariableUse>(Name, NumericVariable);
+}
+
+Expected<std::unique_ptr<ExpressionAST>> Pattern::parseNumericOperand(
+ StringRef &Expr, AllowedOperand AO, bool MaybeInvalidConstraint,
+ Optional<size_t> LineNumber, FileCheckPatternContext *Context,
+ const SourceMgr &SM) {
+ if (Expr.startswith("(")) {
+ if (AO != AllowedOperand::Any)
+ return ErrorDiagnostic::get(
+ SM, Expr, "parenthesized expression not permitted here");
+ return parseParenExpr(Expr, LineNumber, Context, SM);
+ }
+
+ if (AO == AllowedOperand::LineVar || AO == AllowedOperand::Any) {
+ // Try to parse as a numeric variable use.
+ Expected<Pattern::VariableProperties> ParseVarResult =
+ parseVariable(Expr, SM);
+ if (ParseVarResult) {
+ // Try to parse a function call.
+ if (Expr.ltrim(SpaceChars).startswith("(")) {
+ if (AO != AllowedOperand::Any)
+ return ErrorDiagnostic::get(SM, ParseVarResult->Name,
+ "unexpected function call");
+
+ return parseCallExpr(Expr, ParseVarResult->Name, LineNumber, Context,
+ SM);
+ }
+
+ return parseNumericVariableUse(ParseVarResult->Name,
+ ParseVarResult->IsPseudo, LineNumber,
+ Context, SM);
+ }
+
+ if (AO == AllowedOperand::LineVar)
+ return ParseVarResult.takeError();
+ // Ignore the error and retry parsing as a literal.
+ consumeError(ParseVarResult.takeError());
+ }
+
+ // Otherwise, parse it as a literal.
+ int64_t SignedLiteralValue;
+ uint64_t UnsignedLiteralValue;
+ StringRef SaveExpr = Expr;
+ // Accept both signed and unsigned literal, default to signed literal.
+ if (!Expr.consumeInteger((AO == AllowedOperand::LegacyLiteral) ? 10 : 0,
+ UnsignedLiteralValue))
+ return std::make_unique<ExpressionLiteral>(SaveExpr.drop_back(Expr.size()),
+ UnsignedLiteralValue);
+ Expr = SaveExpr;
+ if (AO == AllowedOperand::Any && !Expr.consumeInteger(0, SignedLiteralValue))
+ return std::make_unique<ExpressionLiteral>(SaveExpr.drop_back(Expr.size()),
+ SignedLiteralValue);
+
+ return ErrorDiagnostic::get(
+ SM, Expr,
+ Twine("invalid ") +
+ (MaybeInvalidConstraint ? "matching constraint or " : "") +
+ "operand format");
+}
+
+Expected<std::unique_ptr<ExpressionAST>>
+Pattern::parseParenExpr(StringRef &Expr, Optional<size_t> LineNumber,
+ FileCheckPatternContext *Context, const SourceMgr &SM) {
+ Expr = Expr.ltrim(SpaceChars);
+ assert(Expr.startswith("("));
+
+ // Parse right operand.
+ Expr.consume_front("(");
+ Expr = Expr.ltrim(SpaceChars);
+ if (Expr.empty())
+ return ErrorDiagnostic::get(SM, Expr, "missing operand in expression");
+
+ // Note: parseNumericOperand handles nested opening parentheses.
+ Expected<std::unique_ptr<ExpressionAST>> SubExprResult = parseNumericOperand(
+ Expr, AllowedOperand::Any, /*MaybeInvalidConstraint=*/false, LineNumber,
+ Context, SM);
+ Expr = Expr.ltrim(SpaceChars);
+ while (SubExprResult && !Expr.empty() && !Expr.startswith(")")) {
+ StringRef OrigExpr = Expr;
+ SubExprResult = parseBinop(OrigExpr, Expr, std::move(*SubExprResult), false,
+ LineNumber, Context, SM);
+ Expr = Expr.ltrim(SpaceChars);
+ }
+ if (!SubExprResult)
+ return SubExprResult;
+
+ if (!Expr.consume_front(")")) {
+ return ErrorDiagnostic::get(SM, Expr,
+ "missing ')' at end of nested expression");
+ }
+ return SubExprResult;
+}
+
+Expected<std::unique_ptr<ExpressionAST>>
+Pattern::parseBinop(StringRef Expr, StringRef &RemainingExpr,
+ std::unique_ptr<ExpressionAST> LeftOp,
+ bool IsLegacyLineExpr, Optional<size_t> LineNumber,
+ FileCheckPatternContext *Context, const SourceMgr &SM) {
+ RemainingExpr = RemainingExpr.ltrim(SpaceChars);
+ if (RemainingExpr.empty())
+ return std::move(LeftOp);
+
+ // Check if this is a supported operation and select a function to perform
+ // it.
+ SMLoc OpLoc = SMLoc::getFromPointer(RemainingExpr.data());
+ char Operator = popFront(RemainingExpr);
+ binop_eval_t EvalBinop;
+ switch (Operator) {
+ case '+':
+ EvalBinop = operator+;
+ break;
+ case '-':
+ EvalBinop = operator-;
+ break;
+ default:
+ return ErrorDiagnostic::get(
+ SM, OpLoc, Twine("unsupported operation '") + Twine(Operator) + "'");
+ }
+
+ // Parse right operand.
+ RemainingExpr = RemainingExpr.ltrim(SpaceChars);
+ if (RemainingExpr.empty())
+ return ErrorDiagnostic::get(SM, RemainingExpr,
+ "missing operand in expression");
+ // The second operand in a legacy @LINE expression is always a literal.
+ AllowedOperand AO =
+ IsLegacyLineExpr ? AllowedOperand::LegacyLiteral : AllowedOperand::Any;
+ Expected<std::unique_ptr<ExpressionAST>> RightOpResult =
+ parseNumericOperand(RemainingExpr, AO, /*MaybeInvalidConstraint=*/false,
+ LineNumber, Context, SM);
+ if (!RightOpResult)
+ return RightOpResult;
+
+ Expr = Expr.drop_back(RemainingExpr.size());
+ return std::make_unique<BinaryOperation>(Expr, EvalBinop, std::move(LeftOp),
+ std::move(*RightOpResult));
+}
+
+Expected<std::unique_ptr<ExpressionAST>>
+Pattern::parseCallExpr(StringRef &Expr, StringRef FuncName,
+ Optional<size_t> LineNumber,
+ FileCheckPatternContext *Context, const SourceMgr &SM) {
+ Expr = Expr.ltrim(SpaceChars);
+ assert(Expr.startswith("("));
+
+ auto OptFunc = StringSwitch<Optional<binop_eval_t>>(FuncName)
+ .Case("add", operator+)
+ .Case("div", operator/)
+ .Case("max", max)
+ .Case("min", min)
+ .Case("mul", operator*)
+ .Case("sub", operator-)
+ .Default(None);
+
+ if (!OptFunc)
+ return ErrorDiagnostic::get(
+ SM, FuncName, Twine("call to undefined function '") + FuncName + "'");
+
+ Expr.consume_front("(");
+ Expr = Expr.ltrim(SpaceChars);
+
+ // Parse call arguments, which are comma separated.
+ SmallVector<std::unique_ptr<ExpressionAST>, 4> Args;
+ while (!Expr.empty() && !Expr.startswith(")")) {
+ if (Expr.startswith(","))
+ return ErrorDiagnostic::get(SM, Expr, "missing argument");
+
+ // Parse the argument, which is an arbitary expression.
+ StringRef OuterBinOpExpr = Expr;
+ Expected<std::unique_ptr<ExpressionAST>> Arg = parseNumericOperand(
+ Expr, AllowedOperand::Any, /*MaybeInvalidConstraint=*/false, LineNumber,
+ Context, SM);
+ while (Arg && !Expr.empty()) {
+ Expr = Expr.ltrim(SpaceChars);
+ // Have we reached an argument terminator?
+ if (Expr.startswith(",") || Expr.startswith(")"))
+ break;
+
+ // Arg = Arg <op> <expr>
+ Arg = parseBinop(OuterBinOpExpr, Expr, std::move(*Arg), false, LineNumber,
+ Context, SM);
+ }
+
+ // Prefer an expression error over a generic invalid argument message.
+ if (!Arg)
+ return Arg.takeError();
+ Args.push_back(std::move(*Arg));
+
+ // Have we parsed all available arguments?
+ Expr = Expr.ltrim(SpaceChars);
+ if (!Expr.consume_front(","))
+ break;
+
+ Expr = Expr.ltrim(SpaceChars);
+ if (Expr.startswith(")"))
+ return ErrorDiagnostic::get(SM, Expr, "missing argument");
+ }
+
+ if (!Expr.consume_front(")"))
+ return ErrorDiagnostic::get(SM, Expr,
+ "missing ')' at end of call expression");
+
+ const unsigned NumArgs = Args.size();
+ if (NumArgs == 2)
+ return std::make_unique<BinaryOperation>(Expr, *OptFunc, std::move(Args[0]),
+ std::move(Args[1]));
+
+ // TODO: Support more than binop_eval_t.
+ return ErrorDiagnostic::get(SM, FuncName,
+ Twine("function '") + FuncName +
+ Twine("' takes 2 arguments but ") +
+ Twine(NumArgs) + " given");
+}
+
+Expected<std::unique_ptr<Expression>> Pattern::parseNumericSubstitutionBlock(
+ StringRef Expr, Optional<NumericVariable *> &DefinedNumericVariable,
+ bool IsLegacyLineExpr, Optional<size_t> LineNumber,
+ FileCheckPatternContext *Context, const SourceMgr &SM) {
+ std::unique_ptr<ExpressionAST> ExpressionASTPointer = nullptr;
+ StringRef DefExpr = StringRef();
+ DefinedNumericVariable = None;
+ ExpressionFormat ExplicitFormat = ExpressionFormat();
+ unsigned Precision = 0;
+
+ // Parse format specifier (NOTE: ',' is also an argument seperator).
+ size_t FormatSpecEnd = Expr.find(',');
+ size_t FunctionStart = Expr.find('(');
+ if (FormatSpecEnd != StringRef::npos && FormatSpecEnd < FunctionStart) {
+ StringRef FormatExpr = Expr.take_front(FormatSpecEnd);
+ Expr = Expr.drop_front(FormatSpecEnd + 1);
+ FormatExpr = FormatExpr.trim(SpaceChars);
+ if (!FormatExpr.consume_front("%"))
+ return ErrorDiagnostic::get(
+ SM, FormatExpr,
+ "invalid matching format specification in expression");
+
+ // Parse precision.
+ if (FormatExpr.consume_front(".")) {
+ if (FormatExpr.consumeInteger(10, Precision))
+ return ErrorDiagnostic::get(SM, FormatExpr,
+ "invalid precision in format specifier");
+ }
+
+ if (!FormatExpr.empty()) {
+ // Check for unknown matching format specifier and set matching format in
+ // class instance representing this expression.
+ SMLoc FmtLoc = SMLoc::getFromPointer(FormatExpr.data());
+ switch (popFront(FormatExpr)) {
+ case 'u':
+ ExplicitFormat =
+ ExpressionFormat(ExpressionFormat::Kind::Unsigned, Precision);
+ break;
+ case 'd':
+ ExplicitFormat =
+ ExpressionFormat(ExpressionFormat::Kind::Signed, Precision);
+ break;
+ case 'x':
+ ExplicitFormat =
+ ExpressionFormat(ExpressionFormat::Kind::HexLower, Precision);
+ break;
+ case 'X':
+ ExplicitFormat =
+ ExpressionFormat(ExpressionFormat::Kind::HexUpper, Precision);
+ break;
+ default:
+ return ErrorDiagnostic::get(SM, FmtLoc,
+ "invalid format specifier in expression");
+ }
+ }
+
+ FormatExpr = FormatExpr.ltrim(SpaceChars);
+ if (!FormatExpr.empty())
+ return ErrorDiagnostic::get(
+ SM, FormatExpr,
+ "invalid matching format specification in expression");
+ }
+
+ // Save variable definition expression if any.
+ size_t DefEnd = Expr.find(':');
+ if (DefEnd != StringRef::npos) {
+ DefExpr = Expr.substr(0, DefEnd);
+ Expr = Expr.substr(DefEnd + 1);
+ }
+
+ // Parse matching constraint.
+ Expr = Expr.ltrim(SpaceChars);
+ bool HasParsedValidConstraint = false;
+ if (Expr.consume_front("=="))
+ HasParsedValidConstraint = true;
+
+ // Parse the expression itself.
+ Expr = Expr.ltrim(SpaceChars);
+ if (Expr.empty()) {
+ if (HasParsedValidConstraint)
+ return ErrorDiagnostic::get(
+ SM, Expr, "empty numeric expression should not have a constraint");
+ } else {
+ Expr = Expr.rtrim(SpaceChars);
+ StringRef OuterBinOpExpr = Expr;
+ // The first operand in a legacy @LINE expression is always the @LINE
+ // pseudo variable.
+ AllowedOperand AO =
+ IsLegacyLineExpr ? AllowedOperand::LineVar : AllowedOperand::Any;
+ Expected<std::unique_ptr<ExpressionAST>> ParseResult = parseNumericOperand(
+ Expr, AO, !HasParsedValidConstraint, LineNumber, Context, SM);
+ while (ParseResult && !Expr.empty()) {
+ ParseResult = parseBinop(OuterBinOpExpr, Expr, std::move(*ParseResult),
+ IsLegacyLineExpr, LineNumber, Context, SM);
+ // Legacy @LINE expressions only allow 2 operands.
+ if (ParseResult && IsLegacyLineExpr && !Expr.empty())
+ return ErrorDiagnostic::get(
+ SM, Expr,
+ "unexpected characters at end of expression '" + Expr + "'");
+ }
+ if (!ParseResult)
+ return ParseResult.takeError();
+ ExpressionASTPointer = std::move(*ParseResult);
+ }
+
+ // Select format of the expression, i.e. (i) its explicit format, if any,
+ // otherwise (ii) its implicit format, if any, otherwise (iii) the default
+ // format (unsigned). Error out in case of conflicting implicit format
+ // without explicit format.
+ ExpressionFormat Format;
+ if (ExplicitFormat)
+ Format = ExplicitFormat;
+ else if (ExpressionASTPointer) {
+ Expected<ExpressionFormat> ImplicitFormat =
+ ExpressionASTPointer->getImplicitFormat(SM);
+ if (!ImplicitFormat)
+ return ImplicitFormat.takeError();
+ Format = *ImplicitFormat;
+ }
+ if (!Format)
+ Format = ExpressionFormat(ExpressionFormat::Kind::Unsigned, Precision);
+
+ std::unique_ptr<Expression> ExpressionPointer =
+ std::make_unique<Expression>(std::move(ExpressionASTPointer), Format);
+
+ // Parse the numeric variable definition.
+ if (DefEnd != StringRef::npos) {
+ DefExpr = DefExpr.ltrim(SpaceChars);
+ Expected<NumericVariable *> ParseResult = parseNumericVariableDefinition(
+ DefExpr, Context, LineNumber, ExpressionPointer->getFormat(), SM);
+
+ if (!ParseResult)
+ return ParseResult.takeError();
+ DefinedNumericVariable = *ParseResult;
+ }
+
+ return std::move(ExpressionPointer);
+}
+
+bool Pattern::parsePattern(StringRef PatternStr, StringRef Prefix,
+ SourceMgr &SM, const FileCheckRequest &Req) {
+ bool MatchFullLinesHere = Req.MatchFullLines && CheckTy != Check::CheckNot;
+ IgnoreCase = Req.IgnoreCase;
+
+ PatternLoc = SMLoc::getFromPointer(PatternStr.data());
+
+ if (!(Req.NoCanonicalizeWhiteSpace && Req.MatchFullLines))
+ // Ignore trailing whitespace.
+ while (!PatternStr.empty() &&
+ (PatternStr.back() == ' ' || PatternStr.back() == '\t'))
+ PatternStr = PatternStr.substr(0, PatternStr.size() - 1);
+
+ // Check that there is something on the line.
+ if (PatternStr.empty() && CheckTy != Check::CheckEmpty) {
+ SM.PrintMessage(PatternLoc, SourceMgr::DK_Error,
+ "found empty check string with prefix '" + Prefix + ":'");
+ return true;
+ }
+
+ if (!PatternStr.empty() && CheckTy == Check::CheckEmpty) {
+ SM.PrintMessage(
+ PatternLoc, SourceMgr::DK_Error,
+ "found non-empty check string for empty check with prefix '" + Prefix +
+ ":'");
+ return true;
+ }
+
+ if (CheckTy == Check::CheckEmpty) {
+ RegExStr = "(\n$)";
+ return false;
+ }
+
+ // If literal check, set fixed string.
+ if (CheckTy.isLiteralMatch()) {
+ FixedStr = PatternStr;
+ return false;
+ }
+
+ // Check to see if this is a fixed string, or if it has regex pieces.
+ if (!MatchFullLinesHere &&
+ (PatternStr.size() < 2 || (PatternStr.find("{{") == StringRef::npos &&
+ PatternStr.find("[[") == StringRef::npos))) {
+ FixedStr = PatternStr;
+ return false;
+ }
+
+ if (MatchFullLinesHere) {
+ RegExStr += '^';
+ if (!Req.NoCanonicalizeWhiteSpace)
+ RegExStr += " *";
+ }
+
+ // Paren value #0 is for the fully matched string. Any new parenthesized
+ // values add from there.
+ unsigned CurParen = 1;
+
+ // Otherwise, there is at least one regex piece. Build up the regex pattern
+ // by escaping scary characters in fixed strings, building up one big regex.
+ while (!PatternStr.empty()) {
+ // RegEx matches.
+ if (PatternStr.startswith("{{")) {
+ // This is the start of a regex match. Scan for the }}.
+ size_t End = PatternStr.find("}}");
+ if (End == StringRef::npos) {
+ SM.PrintMessage(SMLoc::getFromPointer(PatternStr.data()),
+ SourceMgr::DK_Error,
+ "found start of regex string with no end '}}'");
+ return true;
+ }
+
+ // Enclose {{}} patterns in parens just like [[]] even though we're not
+ // capturing the result for any purpose. This is required in case the
+ // expression contains an alternation like: CHECK: abc{{x|z}}def. We
+ // want this to turn into: "abc(x|z)def" not "abcx|zdef".
+ RegExStr += '(';
+ ++CurParen;
+
+ if (AddRegExToRegEx(PatternStr.substr(2, End - 2), CurParen, SM))
+ return true;
+ RegExStr += ')';
+
+ PatternStr = PatternStr.substr(End + 2);
+ continue;
+ }
+
+ // String and numeric substitution blocks. Pattern substitution blocks come
+ // in two forms: [[foo:.*]] and [[foo]]. The former matches .* (or some
+ // other regex) and assigns it to the string variable 'foo'. The latter
+ // substitutes foo's value. Numeric substitution blocks recognize the same
+ // form as string ones, but start with a '#' sign after the double
+ // brackets. They also accept a combined form which sets a numeric variable
+ // to the evaluation of an expression. Both string and numeric variable
+ // names must satisfy the regular expression "[a-zA-Z_][0-9a-zA-Z_]*" to be
+ // valid, as this helps catch some common errors.
+ if (PatternStr.startswith("[[")) {
+ StringRef UnparsedPatternStr = PatternStr.substr(2);
+ // Find the closing bracket pair ending the match. End is going to be an
+ // offset relative to the beginning of the match string.
+ size_t End = FindRegexVarEnd(UnparsedPatternStr, SM);
+ StringRef MatchStr = UnparsedPatternStr.substr(0, End);
+ bool IsNumBlock = MatchStr.consume_front("#");
+
+ if (End == StringRef::npos) {
+ SM.PrintMessage(SMLoc::getFromPointer(PatternStr.data()),
+ SourceMgr::DK_Error,
+ "Invalid substitution block, no ]] found");
+ return true;
+ }
+ // Strip the substitution block we are parsing. End points to the start
+ // of the "]]" closing the expression so account for it in computing the
+ // index of the first unparsed character.
+ PatternStr = UnparsedPatternStr.substr(End + 2);
+
+ bool IsDefinition = false;
+ bool SubstNeeded = false;
+ // Whether the substitution block is a legacy use of @LINE with string
+ // substitution block syntax.
+ bool IsLegacyLineExpr = false;
+ StringRef DefName;
+ StringRef SubstStr;
+ std::string MatchRegexp;
+ size_t SubstInsertIdx = RegExStr.size();
+
+ // Parse string variable or legacy @LINE expression.
+ if (!IsNumBlock) {
+ size_t VarEndIdx = MatchStr.find(':');
+ size_t SpacePos = MatchStr.substr(0, VarEndIdx).find_first_of(" \t");
+ if (SpacePos != StringRef::npos) {
+ SM.PrintMessage(SMLoc::getFromPointer(MatchStr.data() + SpacePos),
+ SourceMgr::DK_Error, "unexpected whitespace");
+ return true;
+ }
+
+ // Get the name (e.g. "foo") and verify it is well formed.
+ StringRef OrigMatchStr = MatchStr;
+ Expected<Pattern::VariableProperties> ParseVarResult =
+ parseVariable(MatchStr, SM);
+ if (!ParseVarResult) {
+ logAllUnhandledErrors(ParseVarResult.takeError(), errs());
+ return true;
+ }
+ StringRef Name = ParseVarResult->Name;
+ bool IsPseudo = ParseVarResult->IsPseudo;
+
+ IsDefinition = (VarEndIdx != StringRef::npos);
+ SubstNeeded = !IsDefinition;
+ if (IsDefinition) {
+ if ((IsPseudo || !MatchStr.consume_front(":"))) {
+ SM.PrintMessage(SMLoc::getFromPointer(Name.data()),
+ SourceMgr::DK_Error,
+ "invalid name in string variable definition");
+ return true;
+ }
+
+ // Detect collisions between string and numeric variables when the
+ // former is created later than the latter.
+ if (Context->GlobalNumericVariableTable.find(Name) !=
+ Context->GlobalNumericVariableTable.end()) {
+ SM.PrintMessage(
+ SMLoc::getFromPointer(Name.data()), SourceMgr::DK_Error,
+ "numeric variable with name '" + Name + "' already exists");
+ return true;
+ }
+ DefName = Name;
+ MatchRegexp = MatchStr.str();
+ } else {
+ if (IsPseudo) {
+ MatchStr = OrigMatchStr;
+ IsLegacyLineExpr = IsNumBlock = true;
+ } else
+ SubstStr = Name;
+ }
+ }
+
+ // Parse numeric substitution block.
+ std::unique_ptr<Expression> ExpressionPointer;
+ Optional<NumericVariable *> DefinedNumericVariable;
+ if (IsNumBlock) {
+ Expected<std::unique_ptr<Expression>> ParseResult =
+ parseNumericSubstitutionBlock(MatchStr, DefinedNumericVariable,
+ IsLegacyLineExpr, LineNumber, Context,
+ SM);
+ if (!ParseResult) {
+ logAllUnhandledErrors(ParseResult.takeError(), errs());
+ return true;
+ }
+ ExpressionPointer = std::move(*ParseResult);
+ SubstNeeded = ExpressionPointer->getAST() != nullptr;
+ if (DefinedNumericVariable) {
+ IsDefinition = true;
+ DefName = (*DefinedNumericVariable)->getName();
+ }
+ if (SubstNeeded)
+ SubstStr = MatchStr;
+ else {
+ ExpressionFormat Format = ExpressionPointer->getFormat();
+ MatchRegexp = cantFail(Format.getWildcardRegex());
+ }
+ }
+
+ // Handle variable definition: [[<def>:(...)]] and [[#(...)<def>:(...)]].
+ if (IsDefinition) {
+ RegExStr += '(';
+ ++SubstInsertIdx;
+
+ if (IsNumBlock) {
+ NumericVariableMatch NumericVariableDefinition = {
+ *DefinedNumericVariable, CurParen};
+ NumericVariableDefs[DefName] = NumericVariableDefinition;
+ // This store is done here rather than in match() to allow
+ // parseNumericVariableUse() to get the pointer to the class instance
+ // of the right variable definition corresponding to a given numeric
+ // variable use.
+ Context->GlobalNumericVariableTable[DefName] =
+ *DefinedNumericVariable;
+ } else {
+ VariableDefs[DefName] = CurParen;
+ // Mark string variable as defined to detect collisions between
+ // string and numeric variables in parseNumericVariableUse() and
+ // defineCmdlineVariables() when the latter is created later than the
+ // former. We cannot reuse GlobalVariableTable for this by populating
+ // it with an empty string since we would then lose the ability to
+ // detect the use of an undefined variable in match().
+ Context->DefinedVariableTable[DefName] = true;
+ }
+
+ ++CurParen;
+ }
+
+ if (!MatchRegexp.empty() && AddRegExToRegEx(MatchRegexp, CurParen, SM))
+ return true;
+
+ if (IsDefinition)
+ RegExStr += ')';
+
+ // Handle substitutions: [[foo]] and [[#<foo expr>]].
+ if (SubstNeeded) {
+ // Handle substitution of string variables that were defined earlier on
+ // the same line by emitting a backreference. Expressions do not
+ // support substituting a numeric variable defined on the same line.
+ if (!IsNumBlock && VariableDefs.find(SubstStr) != VariableDefs.end()) {
+ unsigned CaptureParenGroup = VariableDefs[SubstStr];
+ if (CaptureParenGroup < 1 || CaptureParenGroup > 9) {
+ SM.PrintMessage(SMLoc::getFromPointer(SubstStr.data()),
+ SourceMgr::DK_Error,
+ "Can't back-reference more than 9 variables");
+ return true;
+ }
+ AddBackrefToRegEx(CaptureParenGroup);
+ } else {
+ // Handle substitution of string variables ([[<var>]]) defined in
+ // previous CHECK patterns, and substitution of expressions.
+ Substitution *Substitution =
+ IsNumBlock
+ ? Context->makeNumericSubstitution(
+ SubstStr, std::move(ExpressionPointer), SubstInsertIdx)
+ : Context->makeStringSubstitution(SubstStr, SubstInsertIdx);
+ Substitutions.push_back(Substitution);
+ }
+ }
+ }
+
+ // Handle fixed string matches.
+ // Find the end, which is the start of the next regex.
+ size_t FixedMatchEnd = PatternStr.find("{{");
+ FixedMatchEnd = std::min(FixedMatchEnd, PatternStr.find("[["));
+ RegExStr += Regex::escape(PatternStr.substr(0, FixedMatchEnd));
+ PatternStr = PatternStr.substr(FixedMatchEnd);
+ }
+
+ if (MatchFullLinesHere) {
+ if (!Req.NoCanonicalizeWhiteSpace)
+ RegExStr += " *";
+ RegExStr += '$';
+ }
+
+ return false;
+}
+
+bool Pattern::AddRegExToRegEx(StringRef RS, unsigned &CurParen, SourceMgr &SM) {
+ Regex R(RS);
+ std::string Error;
+ if (!R.isValid(Error)) {
+ SM.PrintMessage(SMLoc::getFromPointer(RS.data()), SourceMgr::DK_Error,
+ "invalid regex: " + Error);
+ return true;
+ }
+
+ RegExStr += RS.str();
+ CurParen += R.getNumMatches();
+ return false;
+}
+
+void Pattern::AddBackrefToRegEx(unsigned BackrefNum) {
+ assert(BackrefNum >= 1 && BackrefNum <= 9 && "Invalid backref number");
+ std::string Backref = std::string("\\") + std::string(1, '0' + BackrefNum);
+ RegExStr += Backref;
+}
+
+Expected<size_t> Pattern::match(StringRef Buffer, size_t &MatchLen,
+ const SourceMgr &SM) const {
+ // If this is the EOF pattern, match it immediately.
+ if (CheckTy == Check::CheckEOF) {
+ MatchLen = 0;
+ return Buffer.size();
+ }
+
+ // If this is a fixed string pattern, just match it now.
+ if (!FixedStr.empty()) {
+ MatchLen = FixedStr.size();
+ size_t Pos =
+ IgnoreCase ? Buffer.find_lower(FixedStr) : Buffer.find(FixedStr);
+ if (Pos == StringRef::npos)
+ return make_error<NotFoundError>();
+ return Pos;
+ }
+
+ // Regex match.
+
+ // If there are substitutions, we need to create a temporary string with the
+ // actual value.
+ StringRef RegExToMatch = RegExStr;
+ std::string TmpStr;
+ if (!Substitutions.empty()) {
+ TmpStr = RegExStr;
+ if (LineNumber)
+ Context->LineVariable->setValue(ExpressionValue(*LineNumber));
+
+ size_t InsertOffset = 0;
+ // Substitute all string variables and expressions whose values are only
+ // now known. Use of string variables defined on the same line are handled
+ // by back-references.
+ for (const auto &Substitution : Substitutions) {
+ // Substitute and check for failure (e.g. use of undefined variable).
+ Expected<std::string> Value = Substitution->getResult();
+ if (!Value) {
+ // Convert to an ErrorDiagnostic to get location information. This is
+ // done here rather than PrintNoMatch since now we know which
+ // substitution block caused the overflow.
+ Error Err =
+ handleErrors(Value.takeError(), [&](const OverflowError &E) {
+ return ErrorDiagnostic::get(SM, Substitution->getFromString(),
+ "unable to substitute variable or "
+ "numeric expression: overflow error");
+ });
+ return std::move(Err);
+ }
+
+ // Plop it into the regex at the adjusted offset.
+ TmpStr.insert(TmpStr.begin() + Substitution->getIndex() + InsertOffset,
+ Value->begin(), Value->end());
+ InsertOffset += Value->size();
+ }
+
+ // Match the newly constructed regex.
+ RegExToMatch = TmpStr;
+ }
+
+ SmallVector<StringRef, 4> MatchInfo;
+ unsigned int Flags = Regex::Newline;
+ if (IgnoreCase)
+ Flags |= Regex::IgnoreCase;
+ if (!Regex(RegExToMatch, Flags).match(Buffer, &MatchInfo))
+ return make_error<NotFoundError>();
+
+ // Successful regex match.
+ assert(!MatchInfo.empty() && "Didn't get any match");
+ StringRef FullMatch = MatchInfo[0];
+
+ // If this defines any string variables, remember their values.
+ for (const auto &VariableDef : VariableDefs) {
+ assert(VariableDef.second < MatchInfo.size() && "Internal paren error");
+ Context->GlobalVariableTable[VariableDef.first] =
+ MatchInfo[VariableDef.second];
+ }
+
+ // If this defines any numeric variables, remember their values.
+ for (const auto &NumericVariableDef : NumericVariableDefs) {
+ const NumericVariableMatch &NumericVariableMatch =
+ NumericVariableDef.getValue();
+ unsigned CaptureParenGroup = NumericVariableMatch.CaptureParenGroup;
+ assert(CaptureParenGroup < MatchInfo.size() && "Internal paren error");
+ NumericVariable *DefinedNumericVariable =
+ NumericVariableMatch.DefinedNumericVariable;
+
+ StringRef MatchedValue = MatchInfo[CaptureParenGroup];
+ ExpressionFormat Format = DefinedNumericVariable->getImplicitFormat();
+ Expected<ExpressionValue> Value =
+ Format.valueFromStringRepr(MatchedValue, SM);
+ if (!Value)
+ return Value.takeError();
+ DefinedNumericVariable->setValue(*Value, MatchedValue);
+ }
+
+ // Like CHECK-NEXT, CHECK-EMPTY's match range is considered to start after
+ // the required preceding newline, which is consumed by the pattern in the
+ // case of CHECK-EMPTY but not CHECK-NEXT.
+ size_t MatchStartSkip = CheckTy == Check::CheckEmpty;
+ MatchLen = FullMatch.size() - MatchStartSkip;
+ return FullMatch.data() - Buffer.data() + MatchStartSkip;
+}
+
+unsigned Pattern::computeMatchDistance(StringRef Buffer) const {
+ // Just compute the number of matching characters. For regular expressions, we
+ // just compare against the regex itself and hope for the best.
+ //
+ // FIXME: One easy improvement here is have the regex lib generate a single
+ // example regular expression which matches, and use that as the example
+ // string.
+ StringRef ExampleString(FixedStr);
+ if (ExampleString.empty())
+ ExampleString = RegExStr;
+
+ // Only compare up to the first line in the buffer, or the string size.
+ StringRef BufferPrefix = Buffer.substr(0, ExampleString.size());
+ BufferPrefix = BufferPrefix.split('\n').first;
+ return BufferPrefix.edit_distance(ExampleString);
+}
+
+void Pattern::printSubstitutions(const SourceMgr &SM, StringRef Buffer,
+ SMRange Range,
+ FileCheckDiag::MatchType MatchTy,
+ std::vector<FileCheckDiag> *Diags) const {
+ // Print what we know about substitutions.
+ if (!Substitutions.empty()) {
+ for (const auto &Substitution : Substitutions) {
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ Expected<std::string> MatchedValue = Substitution->getResult();
+
+ // Substitution failed or is not known at match time, print the undefined
+ // variables it uses.
+ if (!MatchedValue) {
+ bool UndefSeen = false;
+ handleAllErrors(
+ MatchedValue.takeError(), [](const NotFoundError &E) {},
+ // Handled in PrintNoMatch().
+ [](const ErrorDiagnostic &E) {},
+ // Handled in match().
+ [](const OverflowError &E) {},
+ [&](const UndefVarError &E) {
+ if (!UndefSeen) {
+ OS << "uses undefined variable(s):";
+ UndefSeen = true;
+ }
+ OS << " ";
+ E.log(OS);
+ });
+ } else {
+ // Substitution succeeded. Print substituted value.
+ OS << "with \"";
+ OS.write_escaped(Substitution->getFromString()) << "\" equal to \"";
+ OS.write_escaped(*MatchedValue) << "\"";
+ }
+
+ // We report only the start of the match/search range to suggest we are
+ // reporting the substitutions as set at the start of the match/search.
+ // Indicating a non-zero-length range might instead seem to imply that the
+ // substitution matches or was captured from exactly that range.
+ if (Diags)
+ Diags->emplace_back(SM, CheckTy, getLoc(), MatchTy,
+ SMRange(Range.Start, Range.Start), OS.str());
+ else
+ SM.PrintMessage(Range.Start, SourceMgr::DK_Note, OS.str());
+ }
+ }
+}
+
+void Pattern::printVariableDefs(const SourceMgr &SM,
+ FileCheckDiag::MatchType MatchTy,
+ std::vector<FileCheckDiag> *Diags) const {
+ if (VariableDefs.empty() && NumericVariableDefs.empty())
+ return;
+ // Build list of variable captures.
+ struct VarCapture {
+ StringRef Name;
+ SMRange Range;
+ };
+ SmallVector<VarCapture, 2> VarCaptures;
+ for (const auto &VariableDef : VariableDefs) {
+ VarCapture VC;
+ VC.Name = VariableDef.first;
+ StringRef Value = Context->GlobalVariableTable[VC.Name];
+ SMLoc Start = SMLoc::getFromPointer(Value.data());
+ SMLoc End = SMLoc::getFromPointer(Value.data() + Value.size());
+ VC.Range = SMRange(Start, End);
+ VarCaptures.push_back(VC);
+ }
+ for (const auto &VariableDef : NumericVariableDefs) {
+ VarCapture VC;
+ VC.Name = VariableDef.getKey();
+ StringRef StrValue = VariableDef.getValue()
+ .DefinedNumericVariable->getStringValue()
+ .getValue();
+ SMLoc Start = SMLoc::getFromPointer(StrValue.data());
+ SMLoc End = SMLoc::getFromPointer(StrValue.data() + StrValue.size());
+ VC.Range = SMRange(Start, End);
+ VarCaptures.push_back(VC);
+ }
+ // Sort variable captures by the order in which they matched the input.
+ // Ranges shouldn't be overlapping, so we can just compare the start.
+ llvm::sort(VarCaptures, [](const VarCapture &A, const VarCapture &B) {
+ assert(A.Range.Start != B.Range.Start &&
+ "unexpected overlapping variable captures");
+ return A.Range.Start.getPointer() < B.Range.Start.getPointer();
+ });
+ // Create notes for the sorted captures.
+ for (const VarCapture &VC : VarCaptures) {
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "captured var \"" << VC.Name << "\"";
+ if (Diags)
+ Diags->emplace_back(SM, CheckTy, getLoc(), MatchTy, VC.Range, OS.str());
+ else
+ SM.PrintMessage(VC.Range.Start, SourceMgr::DK_Note, OS.str(), VC.Range);
+ }
+}
+
+static SMRange ProcessMatchResult(FileCheckDiag::MatchType MatchTy,
+ const SourceMgr &SM, SMLoc Loc,
+ Check::FileCheckType CheckTy,
+ StringRef Buffer, size_t Pos, size_t Len,
+ std::vector<FileCheckDiag> *Diags,
+ bool AdjustPrevDiags = false) {
+ SMLoc Start = SMLoc::getFromPointer(Buffer.data() + Pos);
+ SMLoc End = SMLoc::getFromPointer(Buffer.data() + Pos + Len);
+ SMRange Range(Start, End);
+ if (Diags) {
+ if (AdjustPrevDiags) {
+ SMLoc CheckLoc = Diags->rbegin()->CheckLoc;
+ for (auto I = Diags->rbegin(), E = Diags->rend();
+ I != E && I->CheckLoc == CheckLoc; ++I)
+ I->MatchTy = MatchTy;
+ } else
+ Diags->emplace_back(SM, CheckTy, Loc, MatchTy, Range);
+ }
+ return Range;
+}
+
+void Pattern::printFuzzyMatch(const SourceMgr &SM, StringRef Buffer,
+ std::vector<FileCheckDiag> *Diags) const {
+ // Attempt to find the closest/best fuzzy match. Usually an error happens
+ // because some string in the output didn't exactly match. In these cases, we
+ // would like to show the user a best guess at what "should have" matched, to
+ // save them having to actually check the input manually.
+ size_t NumLinesForward = 0;
+ size_t Best = StringRef::npos;
+ double BestQuality = 0;
+
+ // Use an arbitrary 4k limit on how far we will search.
+ for (size_t i = 0, e = std::min(size_t(4096), Buffer.size()); i != e; ++i) {
+ if (Buffer[i] == '\n')
+ ++NumLinesForward;
+
+ // Patterns have leading whitespace stripped, so skip whitespace when
+ // looking for something which looks like a pattern.
+ if (Buffer[i] == ' ' || Buffer[i] == '\t')
+ continue;
+
+ // Compute the "quality" of this match as an arbitrary combination of the
+ // match distance and the number of lines skipped to get to this match.
+ unsigned Distance = computeMatchDistance(Buffer.substr(i));
+ double Quality = Distance + (NumLinesForward / 100.);
+
+ if (Quality < BestQuality || Best == StringRef::npos) {
+ Best = i;
+ BestQuality = Quality;
+ }
+ }
+
+ // Print the "possible intended match here" line if we found something
+ // reasonable and not equal to what we showed in the "scanning from here"
+ // line.
+ if (Best && Best != StringRef::npos && BestQuality < 50) {
+ SMRange MatchRange =
+ ProcessMatchResult(FileCheckDiag::MatchFuzzy, SM, getLoc(),
+ getCheckTy(), Buffer, Best, 0, Diags);
+ SM.PrintMessage(MatchRange.Start, SourceMgr::DK_Note,
+ "possible intended match here");
+
+ // FIXME: If we wanted to be really friendly we would show why the match
+ // failed, as it can be hard to spot simple one character differences.
+ }
+}
+
+Expected<StringRef>
+FileCheckPatternContext::getPatternVarValue(StringRef VarName) {
+ auto VarIter = GlobalVariableTable.find(VarName);
+ if (VarIter == GlobalVariableTable.end())
+ return make_error<UndefVarError>(VarName);
+
+ return VarIter->second;
+}
+
+template <class... Types>
+NumericVariable *FileCheckPatternContext::makeNumericVariable(Types... args) {
+ NumericVariables.push_back(std::make_unique<NumericVariable>(args...));
+ return NumericVariables.back().get();
+}
+
+Substitution *
+FileCheckPatternContext::makeStringSubstitution(StringRef VarName,
+ size_t InsertIdx) {
+ Substitutions.push_back(
+ std::make_unique<StringSubstitution>(this, VarName, InsertIdx));
+ return Substitutions.back().get();
+}
+
+Substitution *FileCheckPatternContext::makeNumericSubstitution(
+ StringRef ExpressionStr, std::unique_ptr<Expression> Expression,
+ size_t InsertIdx) {
+ Substitutions.push_back(std::make_unique<NumericSubstitution>(
+ this, ExpressionStr, std::move(Expression), InsertIdx));
+ return Substitutions.back().get();
+}
+
+size_t Pattern::FindRegexVarEnd(StringRef Str, SourceMgr &SM) {
+ // Offset keeps track of the current offset within the input Str
+ size_t Offset = 0;
+ // [...] Nesting depth
+ size_t BracketDepth = 0;
+
+ while (!Str.empty()) {
+ if (Str.startswith("]]") && BracketDepth == 0)
+ return Offset;
+ if (Str[0] == '\\') {
+ // Backslash escapes the next char within regexes, so skip them both.
+ Str = Str.substr(2);
+ Offset += 2;
+ } else {
+ switch (Str[0]) {
+ default:
+ break;
+ case '[':
+ BracketDepth++;
+ break;
+ case ']':
+ if (BracketDepth == 0) {
+ SM.PrintMessage(SMLoc::getFromPointer(Str.data()),
+ SourceMgr::DK_Error,
+ "missing closing \"]\" for regex variable");
+ exit(1);
+ }
+ BracketDepth--;
+ break;
+ }
+ Str = Str.substr(1);
+ Offset++;
+ }
+ }
+
+ return StringRef::npos;
+}
+
+StringRef FileCheck::CanonicalizeFile(MemoryBuffer &MB,
+ SmallVectorImpl<char> &OutputBuffer) {
+ OutputBuffer.reserve(MB.getBufferSize());
+
+ for (const char *Ptr = MB.getBufferStart(), *End = MB.getBufferEnd();
+ Ptr != End; ++Ptr) {
+ // Eliminate trailing dosish \r.
+ if (Ptr <= End - 2 && Ptr[0] == '\r' && Ptr[1] == '\n') {
+ continue;
+ }
+
+ // If current char is not a horizontal whitespace or if horizontal
+ // whitespace canonicalization is disabled, dump it to output as is.
+ if (Req.NoCanonicalizeWhiteSpace || (*Ptr != ' ' && *Ptr != '\t')) {
+ OutputBuffer.push_back(*Ptr);
+ continue;
+ }
+
+ // Otherwise, add one space and advance over neighboring space.
+ OutputBuffer.push_back(' ');
+ while (Ptr + 1 != End && (Ptr[1] == ' ' || Ptr[1] == '\t'))
+ ++Ptr;
+ }
+
+ // Add a null byte and then return all but that byte.
+ OutputBuffer.push_back('\0');
+ return StringRef(OutputBuffer.data(), OutputBuffer.size() - 1);
+}
+
+FileCheckDiag::FileCheckDiag(const SourceMgr &SM,
+ const Check::FileCheckType &CheckTy,
+ SMLoc CheckLoc, MatchType MatchTy,
+ SMRange InputRange, StringRef Note)
+ : CheckTy(CheckTy), CheckLoc(CheckLoc), MatchTy(MatchTy), Note(Note) {
+ auto Start = SM.getLineAndColumn(InputRange.Start);
+ auto End = SM.getLineAndColumn(InputRange.End);
+ InputStartLine = Start.first;
+ InputStartCol = Start.second;
+ InputEndLine = End.first;
+ InputEndCol = End.second;
+}
+
+static bool IsPartOfWord(char c) {
+ return (isAlnum(c) || c == '-' || c == '_');
+}
+
+Check::FileCheckType &Check::FileCheckType::setCount(int C) {
+ assert(Count > 0 && "zero and negative counts are not supported");
+ assert((C == 1 || Kind == CheckPlain) &&
+ "count supported only for plain CHECK directives");
+ Count = C;
+ return *this;
+}
+
+std::string Check::FileCheckType::getModifiersDescription() const {
+ if (Modifiers.none())
+ return "";
+ std::string Ret;
+ raw_string_ostream OS(Ret);
+ OS << '{';
+ if (isLiteralMatch())
+ OS << "LITERAL";
+ OS << '}';
+ return OS.str();
+}
+
+std::string Check::FileCheckType::getDescription(StringRef Prefix) const {
+ // Append directive modifiers.
+ auto WithModifiers = [this, Prefix](StringRef Str) -> std::string {
+ return (Prefix + Str + getModifiersDescription()).str();
+ };
+
+ switch (Kind) {
+ case Check::CheckNone:
+ return "invalid";
+ case Check::CheckPlain:
+ if (Count > 1)
+ return WithModifiers("-COUNT");
+ return WithModifiers("");
+ case Check::CheckNext:
+ return WithModifiers("-NEXT");
+ case Check::CheckSame:
+ return WithModifiers("-SAME");
+ case Check::CheckNot:
+ return WithModifiers("-NOT");
+ case Check::CheckDAG:
+ return WithModifiers("-DAG");
+ case Check::CheckLabel:
+ return WithModifiers("-LABEL");
+ case Check::CheckEmpty:
+ return WithModifiers("-EMPTY");
+ case Check::CheckComment:
+ return std::string(Prefix);
+ case Check::CheckEOF:
+ return "implicit EOF";
+ case Check::CheckBadNot:
+ return "bad NOT";
+ case Check::CheckBadCount:
+ return "bad COUNT";
+ }
+ llvm_unreachable("unknown FileCheckType");
+}
+
+static std::pair<Check::FileCheckType, StringRef>
+FindCheckType(const FileCheckRequest &Req, StringRef Buffer, StringRef Prefix) {
+ if (Buffer.size() <= Prefix.size())
+ return {Check::CheckNone, StringRef()};
+
+ StringRef Rest = Buffer.drop_front(Prefix.size());
+ // Check for comment.
+ if (llvm::is_contained(Req.CommentPrefixes, Prefix)) {
+ if (Rest.consume_front(":"))
+ return {Check::CheckComment, Rest};
+ // Ignore a comment prefix if it has a suffix like "-NOT".
+ return {Check::CheckNone, StringRef()};
+ }
+
+ auto ConsumeModifiers = [&](Check::FileCheckType Ret)
+ -> std::pair<Check::FileCheckType, StringRef> {
+ if (Rest.consume_front(":"))
+ return {Ret, Rest};
+ if (!Rest.consume_front("{"))
+ return {Check::CheckNone, StringRef()};
+
+ // Parse the modifiers, speparated by commas.
+ do {
+ // Allow whitespace in modifiers list.
+ Rest = Rest.ltrim();
+ if (Rest.consume_front("LITERAL"))
+ Ret.setLiteralMatch();
+ else
+ return {Check::CheckNone, Rest};
+ // Allow whitespace in modifiers list.
+ Rest = Rest.ltrim();
+ } while (Rest.consume_front(","));
+ if (!Rest.consume_front("}:"))
+ return {Check::CheckNone, Rest};
+ return {Ret, Rest};
+ };
+
+ // Verify that the prefix is followed by directive modifiers or a colon.
+ if (Rest.consume_front(":"))
+ return {Check::CheckPlain, Rest};
+ if (Rest.front() == '{')
+ return ConsumeModifiers(Check::CheckPlain);
+
+ if (!Rest.consume_front("-"))
+ return {Check::CheckNone, StringRef()};
+
+ if (Rest.consume_front("COUNT-")) {
+ int64_t Count;
+ if (Rest.consumeInteger(10, Count))
+ // Error happened in parsing integer.
+ return {Check::CheckBadCount, Rest};
+ if (Count <= 0 || Count > INT32_MAX)
+ return {Check::CheckBadCount, Rest};
+ if (Rest.front() != ':' && Rest.front() != '{')
+ return {Check::CheckBadCount, Rest};
+ return ConsumeModifiers(
+ Check::FileCheckType(Check::CheckPlain).setCount(Count));
+ }
+
+ // You can't combine -NOT with another suffix.
+ if (Rest.startswith("DAG-NOT:") || Rest.startswith("NOT-DAG:") ||
+ Rest.startswith("NEXT-NOT:") || Rest.startswith("NOT-NEXT:") ||
+ Rest.startswith("SAME-NOT:") || Rest.startswith("NOT-SAME:") ||
+ Rest.startswith("EMPTY-NOT:") || Rest.startswith("NOT-EMPTY:"))
+ return {Check::CheckBadNot, Rest};
+
+ if (Rest.consume_front("NEXT"))
+ return ConsumeModifiers(Check::CheckNext);
+
+ if (Rest.consume_front("SAME"))
+ return ConsumeModifiers(Check::CheckSame);
+
+ if (Rest.consume_front("NOT"))
+ return ConsumeModifiers(Check::CheckNot);
+
+ if (Rest.consume_front("DAG"))
+ return ConsumeModifiers(Check::CheckDAG);
+
+ if (Rest.consume_front("LABEL"))
+ return ConsumeModifiers(Check::CheckLabel);
+
+ if (Rest.consume_front("EMPTY"))
+ return ConsumeModifiers(Check::CheckEmpty);
+
+ return {Check::CheckNone, Rest};
+}
+
+// From the given position, find the next character after the word.
+static size_t SkipWord(StringRef Str, size_t Loc) {
+ while (Loc < Str.size() && IsPartOfWord(Str[Loc]))
+ ++Loc;
+ return Loc;
+}
+
+/// Searches the buffer for the first prefix in the prefix regular expression.
+///
+/// This searches the buffer using the provided regular expression, however it
+/// enforces constraints beyond that:
+/// 1) The found prefix must not be a suffix of something that looks like
+/// a valid prefix.
+/// 2) The found prefix must be followed by a valid check type suffix using \c
+/// FindCheckType above.
+///
+/// \returns a pair of StringRefs into the Buffer, which combines:
+/// - the first match of the regular expression to satisfy these two is
+/// returned,
+/// otherwise an empty StringRef is returned to indicate failure.
+/// - buffer rewound to the location right after parsed suffix, for parsing
+/// to continue from
+///
+/// If this routine returns a valid prefix, it will also shrink \p Buffer to
+/// start at the beginning of the returned prefix, increment \p LineNumber for
+/// each new line consumed from \p Buffer, and set \p CheckTy to the type of
+/// check found by examining the suffix.
+///
+/// If no valid prefix is found, the state of Buffer, LineNumber, and CheckTy
+/// is unspecified.
+static std::pair<StringRef, StringRef>
+FindFirstMatchingPrefix(const FileCheckRequest &Req, Regex &PrefixRE,
+ StringRef &Buffer, unsigned &LineNumber,
+ Check::FileCheckType &CheckTy) {
+ SmallVector<StringRef, 2> Matches;
+
+ while (!Buffer.empty()) {
+ // Find the first (longest) match using the RE.
+ if (!PrefixRE.match(Buffer, &Matches))
+ // No match at all, bail.
+ return {StringRef(), StringRef()};
+
+ StringRef Prefix = Matches[0];
+ Matches.clear();
+
+ assert(Prefix.data() >= Buffer.data() &&
+ Prefix.data() < Buffer.data() + Buffer.size() &&
+ "Prefix doesn't start inside of buffer!");
+ size_t Loc = Prefix.data() - Buffer.data();
+ StringRef Skipped = Buffer.substr(0, Loc);
+ Buffer = Buffer.drop_front(Loc);
+ LineNumber += Skipped.count('\n');
+
+ // Check that the matched prefix isn't a suffix of some other check-like
+ // word.
+ // FIXME: This is a very ad-hoc check. it would be better handled in some
+ // other way. Among other things it seems hard to distinguish between
+ // intentional and unintentional uses of this feature.
+ if (Skipped.empty() || !IsPartOfWord(Skipped.back())) {
+ // Now extract the type.
+ StringRef AfterSuffix;
+ std::tie(CheckTy, AfterSuffix) = FindCheckType(Req, Buffer, Prefix);
+
+ // If we've found a valid check type for this prefix, we're done.
+ if (CheckTy != Check::CheckNone)
+ return {Prefix, AfterSuffix};
+ }
+
+ // If we didn't successfully find a prefix, we need to skip this invalid
+ // prefix and continue scanning. We directly skip the prefix that was
+ // matched and any additional parts of that check-like word.
+ Buffer = Buffer.drop_front(SkipWord(Buffer, Prefix.size()));
+ }
+
+ // We ran out of buffer while skipping partial matches so give up.
+ return {StringRef(), StringRef()};
+}
+
+void FileCheckPatternContext::createLineVariable() {
+ assert(!LineVariable && "@LINE pseudo numeric variable already created");
+ StringRef LineName = "@LINE";
+ LineVariable = makeNumericVariable(
+ LineName, ExpressionFormat(ExpressionFormat::Kind::Unsigned));
+ GlobalNumericVariableTable[LineName] = LineVariable;
+}
+
+FileCheck::FileCheck(FileCheckRequest Req)
+ : Req(Req), PatternContext(std::make_unique<FileCheckPatternContext>()),
+ CheckStrings(std::make_unique<std::vector<FileCheckString>>()) {}
+
+FileCheck::~FileCheck() = default;
+
+bool FileCheck::readCheckFile(
+ SourceMgr &SM, StringRef Buffer, Regex &PrefixRE,
+ std::pair<unsigned, unsigned> *ImpPatBufferIDRange) {
+ if (ImpPatBufferIDRange)
+ ImpPatBufferIDRange->first = ImpPatBufferIDRange->second = 0;
+
+ Error DefineError =
+ PatternContext->defineCmdlineVariables(Req.GlobalDefines, SM);
+ if (DefineError) {
+ logAllUnhandledErrors(std::move(DefineError), errs());
+ return true;
+ }
+
+ PatternContext->createLineVariable();
+
+ std::vector<Pattern> ImplicitNegativeChecks;
+ for (StringRef PatternString : Req.ImplicitCheckNot) {
+ // Create a buffer with fake command line content in order to display the
+ // command line option responsible for the specific implicit CHECK-NOT.
+ std::string Prefix = "-implicit-check-not='";
+ std::string Suffix = "'";
+ std::unique_ptr<MemoryBuffer> CmdLine = MemoryBuffer::getMemBufferCopy(
+ (Prefix + PatternString + Suffix).str(), "command line");
+
+ StringRef PatternInBuffer =
+ CmdLine->getBuffer().substr(Prefix.size(), PatternString.size());
+ unsigned BufferID = SM.AddNewSourceBuffer(std::move(CmdLine), SMLoc());
+ if (ImpPatBufferIDRange) {
+ if (ImpPatBufferIDRange->first == ImpPatBufferIDRange->second) {
+ ImpPatBufferIDRange->first = BufferID;
+ ImpPatBufferIDRange->second = BufferID + 1;
+ } else {
+ assert(BufferID == ImpPatBufferIDRange->second &&
+ "expected consecutive source buffer IDs");
+ ++ImpPatBufferIDRange->second;
+ }
+ }
+
+ ImplicitNegativeChecks.push_back(
+ Pattern(Check::CheckNot, PatternContext.get()));
+ ImplicitNegativeChecks.back().parsePattern(PatternInBuffer,
+ "IMPLICIT-CHECK", SM, Req);
+ }
+
+ std::vector<Pattern> DagNotMatches = ImplicitNegativeChecks;
+
+ // LineNumber keeps track of the line on which CheckPrefix instances are
+ // found.
+ unsigned LineNumber = 1;
+
+ std::set<StringRef> PrefixesNotFound(Req.CheckPrefixes.begin(),
+ Req.CheckPrefixes.end());
+ const size_t DistinctPrefixes = PrefixesNotFound.size();
+ while (true) {
+ Check::FileCheckType CheckTy;
+
+ // See if a prefix occurs in the memory buffer.
+ StringRef UsedPrefix;
+ StringRef AfterSuffix;
+ std::tie(UsedPrefix, AfterSuffix) =
+ FindFirstMatchingPrefix(Req, PrefixRE, Buffer, LineNumber, CheckTy);
+ if (UsedPrefix.empty())
+ break;
+ if (CheckTy != Check::CheckComment)
+ PrefixesNotFound.erase(UsedPrefix);
+
+ assert(UsedPrefix.data() == Buffer.data() &&
+ "Failed to move Buffer's start forward, or pointed prefix outside "
+ "of the buffer!");
+ assert(AfterSuffix.data() >= Buffer.data() &&
+ AfterSuffix.data() < Buffer.data() + Buffer.size() &&
+ "Parsing after suffix doesn't start inside of buffer!");
+
+ // Location to use for error messages.
+ const char *UsedPrefixStart = UsedPrefix.data();
+
+ // Skip the buffer to the end of parsed suffix (or just prefix, if no good
+ // suffix was processed).
+ Buffer = AfterSuffix.empty() ? Buffer.drop_front(UsedPrefix.size())
+ : AfterSuffix;
+
+ // Complain about useful-looking but unsupported suffixes.
+ if (CheckTy == Check::CheckBadNot) {
+ SM.PrintMessage(SMLoc::getFromPointer(Buffer.data()), SourceMgr::DK_Error,
+ "unsupported -NOT combo on prefix '" + UsedPrefix + "'");
+ return true;
+ }
+
+ // Complain about invalid count specification.
+ if (CheckTy == Check::CheckBadCount) {
+ SM.PrintMessage(SMLoc::getFromPointer(Buffer.data()), SourceMgr::DK_Error,
+ "invalid count in -COUNT specification on prefix '" +
+ UsedPrefix + "'");
+ return true;
+ }
+
+ // Okay, we found the prefix, yay. Remember the rest of the line, but ignore
+ // leading whitespace.
+ if (!(Req.NoCanonicalizeWhiteSpace && Req.MatchFullLines))
+ Buffer = Buffer.substr(Buffer.find_first_not_of(" \t"));
+
+ // Scan ahead to the end of line.
+ size_t EOL = Buffer.find_first_of("\n\r");
+
+ // Remember the location of the start of the pattern, for diagnostics.
+ SMLoc PatternLoc = SMLoc::getFromPointer(Buffer.data());
+
+ // Extract the pattern from the buffer.
+ StringRef PatternBuffer = Buffer.substr(0, EOL);
+ Buffer = Buffer.substr(EOL);
+
+ // If this is a comment, we're done.
+ if (CheckTy == Check::CheckComment)
+ continue;
+
+ // Parse the pattern.
+ Pattern P(CheckTy, PatternContext.get(), LineNumber);
+ if (P.parsePattern(PatternBuffer, UsedPrefix, SM, Req))
+ return true;
+
+ // Verify that CHECK-LABEL lines do not define or use variables
+ if ((CheckTy == Check::CheckLabel) && P.hasVariable()) {
+ SM.PrintMessage(
+ SMLoc::getFromPointer(UsedPrefixStart), SourceMgr::DK_Error,
+ "found '" + UsedPrefix + "-LABEL:'"
+ " with variable definition or use");
+ return true;
+ }
+
+ // Verify that CHECK-NEXT/SAME/EMPTY lines have at least one CHECK line before them.
+ if ((CheckTy == Check::CheckNext || CheckTy == Check::CheckSame ||
+ CheckTy == Check::CheckEmpty) &&
+ CheckStrings->empty()) {
+ StringRef Type = CheckTy == Check::CheckNext
+ ? "NEXT"
+ : CheckTy == Check::CheckEmpty ? "EMPTY" : "SAME";
+ SM.PrintMessage(SMLoc::getFromPointer(UsedPrefixStart),
+ SourceMgr::DK_Error,
+ "found '" + UsedPrefix + "-" + Type +
+ "' without previous '" + UsedPrefix + ": line");
+ return true;
+ }
+
+ // Handle CHECK-DAG/-NOT.
+ if (CheckTy == Check::CheckDAG || CheckTy == Check::CheckNot) {
+ DagNotMatches.push_back(P);
+ continue;
+ }
+
+ // Okay, add the string we captured to the output vector and move on.
+ CheckStrings->emplace_back(P, UsedPrefix, PatternLoc);
+ std::swap(DagNotMatches, CheckStrings->back().DagNotStrings);
+ DagNotMatches = ImplicitNegativeChecks;
+ }
+
+ // When there are no used prefixes we report an error except in the case that
+ // no prefix is specified explicitly but -implicit-check-not is specified.
+ const bool NoPrefixesFound = PrefixesNotFound.size() == DistinctPrefixes;
+ const bool SomePrefixesUnexpectedlyNotUsed =
+ !Req.AllowUnusedPrefixes && !PrefixesNotFound.empty();
+ if ((NoPrefixesFound || SomePrefixesUnexpectedlyNotUsed) &&
+ (ImplicitNegativeChecks.empty() || !Req.IsDefaultCheckPrefix)) {
+ errs() << "error: no check strings found with prefix"
+ << (PrefixesNotFound.size() > 1 ? "es " : " ");
+ bool First = true;
+ for (StringRef MissingPrefix : PrefixesNotFound) {
+ if (!First)
+ errs() << ", ";
+ errs() << "\'" << MissingPrefix << ":'";
+ First = false;
+ }
+ errs() << '\n';
+ return true;
+ }
+
+ // Add an EOF pattern for any trailing --implicit-check-not/CHECK-DAG/-NOTs,
+ // and use the first prefix as a filler for the error message.
+ if (!DagNotMatches.empty()) {
+ CheckStrings->emplace_back(
+ Pattern(Check::CheckEOF, PatternContext.get(), LineNumber + 1),
+ *Req.CheckPrefixes.begin(), SMLoc::getFromPointer(Buffer.data()));
+ std::swap(DagNotMatches, CheckStrings->back().DagNotStrings);
+ }
+
+ return false;
+}
+
+static void PrintMatch(bool ExpectedMatch, const SourceMgr &SM,
+ StringRef Prefix, SMLoc Loc, const Pattern &Pat,
+ int MatchedCount, StringRef Buffer, size_t MatchPos,
+ size_t MatchLen, const FileCheckRequest &Req,
+ std::vector<FileCheckDiag> *Diags) {
+ bool PrintDiag = true;
+ if (ExpectedMatch) {
+ if (!Req.Verbose)
+ return;
+ if (!Req.VerboseVerbose && Pat.getCheckTy() == Check::CheckEOF)
+ return;
+ // Due to their verbosity, we don't print verbose diagnostics here if we're
+ // gathering them for a different rendering, but we always print other
+ // diagnostics.
+ PrintDiag = !Diags;
+ }
+ FileCheckDiag::MatchType MatchTy = ExpectedMatch
+ ? FileCheckDiag::MatchFoundAndExpected
+ : FileCheckDiag::MatchFoundButExcluded;
+ SMRange MatchRange = ProcessMatchResult(MatchTy, SM, Loc, Pat.getCheckTy(),
+ Buffer, MatchPos, MatchLen, Diags);
+ if (Diags) {
+ Pat.printSubstitutions(SM, Buffer, MatchRange, MatchTy, Diags);
+ Pat.printVariableDefs(SM, MatchTy, Diags);
+ }
+ if (!PrintDiag)
+ return;
+
+ std::string Message = formatv("{0}: {1} string found in input",
+ Pat.getCheckTy().getDescription(Prefix),
+ (ExpectedMatch ? "expected" : "excluded"))
+ .str();
+ if (Pat.getCount() > 1)
+ Message += formatv(" ({0} out of {1})", MatchedCount, Pat.getCount()).str();
+
+ SM.PrintMessage(
+ Loc, ExpectedMatch ? SourceMgr::DK_Remark : SourceMgr::DK_Error, Message);
+ SM.PrintMessage(MatchRange.Start, SourceMgr::DK_Note, "found here",
+ {MatchRange});
+ Pat.printSubstitutions(SM, Buffer, MatchRange, MatchTy, nullptr);
+ Pat.printVariableDefs(SM, MatchTy, nullptr);
+}
+
+static void PrintMatch(bool ExpectedMatch, const SourceMgr &SM,
+ const FileCheckString &CheckStr, int MatchedCount,
+ StringRef Buffer, size_t MatchPos, size_t MatchLen,
+ FileCheckRequest &Req,
+ std::vector<FileCheckDiag> *Diags) {
+ PrintMatch(ExpectedMatch, SM, CheckStr.Prefix, CheckStr.Loc, CheckStr.Pat,
+ MatchedCount, Buffer, MatchPos, MatchLen, Req, Diags);
+}
+
+static void PrintNoMatch(bool ExpectedMatch, const SourceMgr &SM,
+ StringRef Prefix, SMLoc Loc, const Pattern &Pat,
+ int MatchedCount, StringRef Buffer,
+ bool VerboseVerbose, std::vector<FileCheckDiag> *Diags,
+ Error MatchErrors) {
+ assert(MatchErrors && "Called on successful match");
+ bool PrintDiag = true;
+ if (!ExpectedMatch) {
+ if (!VerboseVerbose) {
+ consumeError(std::move(MatchErrors));
+ return;
+ }
+ // Due to their verbosity, we don't print verbose diagnostics here if we're
+ // gathering them for a different rendering, but we always print other
+ // diagnostics.
+ PrintDiag = !Diags;
+ }
+
+ // If the current position is at the end of a line, advance to the start of
+ // the next line.
+ Buffer = Buffer.substr(Buffer.find_first_not_of(" \t\n\r"));
+ FileCheckDiag::MatchType MatchTy = ExpectedMatch
+ ? FileCheckDiag::MatchNoneButExpected
+ : FileCheckDiag::MatchNoneAndExcluded;
+ SMRange SearchRange = ProcessMatchResult(MatchTy, SM, Loc, Pat.getCheckTy(),
+ Buffer, 0, Buffer.size(), Diags);
+ if (Diags)
+ Pat.printSubstitutions(SM, Buffer, SearchRange, MatchTy, Diags);
+ if (!PrintDiag) {
+ consumeError(std::move(MatchErrors));
+ return;
+ }
+
+ MatchErrors = handleErrors(std::move(MatchErrors),
+ [](const ErrorDiagnostic &E) { E.log(errs()); });
+
+ // No problem matching the string per se.
+ if (!MatchErrors)
+ return;
+ consumeError(std::move(MatchErrors));
+
+ // Print "not found" diagnostic.
+ std::string Message = formatv("{0}: {1} string not found in input",
+ Pat.getCheckTy().getDescription(Prefix),
+ (ExpectedMatch ? "expected" : "excluded"))
+ .str();
+ if (Pat.getCount() > 1)
+ Message += formatv(" ({0} out of {1})", MatchedCount, Pat.getCount()).str();
+ SM.PrintMessage(
+ Loc, ExpectedMatch ? SourceMgr::DK_Error : SourceMgr::DK_Remark, Message);
+
+ // Print the "scanning from here" line.
+ SM.PrintMessage(SearchRange.Start, SourceMgr::DK_Note, "scanning from here");
+
+ // Allow the pattern to print additional information if desired.
+ Pat.printSubstitutions(SM, Buffer, SearchRange, MatchTy, nullptr);
+
+ if (ExpectedMatch)
+ Pat.printFuzzyMatch(SM, Buffer, Diags);
+}
+
+static void PrintNoMatch(bool ExpectedMatch, const SourceMgr &SM,
+ const FileCheckString &CheckStr, int MatchedCount,
+ StringRef Buffer, bool VerboseVerbose,
+ std::vector<FileCheckDiag> *Diags, Error MatchErrors) {
+ PrintNoMatch(ExpectedMatch, SM, CheckStr.Prefix, CheckStr.Loc, CheckStr.Pat,
+ MatchedCount, Buffer, VerboseVerbose, Diags,
+ std::move(MatchErrors));
+}
+
+/// Counts the number of newlines in the specified range.
+static unsigned CountNumNewlinesBetween(StringRef Range,
+ const char *&FirstNewLine) {
+ unsigned NumNewLines = 0;
+ while (1) {
+ // Scan for newline.
+ Range = Range.substr(Range.find_first_of("\n\r"));
+ if (Range.empty())
+ return NumNewLines;
+
+ ++NumNewLines;
+
+ // Handle \n\r and \r\n as a single newline.
+ if (Range.size() > 1 && (Range[1] == '\n' || Range[1] == '\r') &&
+ (Range[0] != Range[1]))
+ Range = Range.substr(1);
+ Range = Range.substr(1);
+
+ if (NumNewLines == 1)
+ FirstNewLine = Range.begin();
+ }
+}
+
+size_t FileCheckString::Check(const SourceMgr &SM, StringRef Buffer,
+ bool IsLabelScanMode, size_t &MatchLen,
+ FileCheckRequest &Req,
+ std::vector<FileCheckDiag> *Diags) const {
+ size_t LastPos = 0;
+ std::vector<const Pattern *> NotStrings;
+
+ // IsLabelScanMode is true when we are scanning forward to find CHECK-LABEL
+ // bounds; we have not processed variable definitions within the bounded block
+ // yet so cannot handle any final CHECK-DAG yet; this is handled when going
+ // over the block again (including the last CHECK-LABEL) in normal mode.
+ if (!IsLabelScanMode) {
+ // Match "dag strings" (with mixed "not strings" if any).
+ LastPos = CheckDag(SM, Buffer, NotStrings, Req, Diags);
+ if (LastPos == StringRef::npos)
+ return StringRef::npos;
+ }
+
+ // Match itself from the last position after matching CHECK-DAG.
+ size_t LastMatchEnd = LastPos;
+ size_t FirstMatchPos = 0;
+ // Go match the pattern Count times. Majority of patterns only match with
+ // count 1 though.
+ assert(Pat.getCount() != 0 && "pattern count can not be zero");
+ for (int i = 1; i <= Pat.getCount(); i++) {
+ StringRef MatchBuffer = Buffer.substr(LastMatchEnd);
+ size_t CurrentMatchLen;
+ // get a match at current start point
+ Expected<size_t> MatchResult = Pat.match(MatchBuffer, CurrentMatchLen, SM);
+
+ // report
+ if (!MatchResult) {
+ PrintNoMatch(true, SM, *this, i, MatchBuffer, Req.VerboseVerbose, Diags,
+ MatchResult.takeError());
+ return StringRef::npos;
+ }
+ size_t MatchPos = *MatchResult;
+ PrintMatch(true, SM, *this, i, MatchBuffer, MatchPos, CurrentMatchLen, Req,
+ Diags);
+ if (i == 1)
+ FirstMatchPos = LastPos + MatchPos;
+
+ // move start point after the match
+ LastMatchEnd += MatchPos + CurrentMatchLen;
+ }
+ // Full match len counts from first match pos.
+ MatchLen = LastMatchEnd - FirstMatchPos;
+
+ // Similar to the above, in "label-scan mode" we can't yet handle CHECK-NEXT
+ // or CHECK-NOT
+ if (!IsLabelScanMode) {
+ size_t MatchPos = FirstMatchPos - LastPos;
+ StringRef MatchBuffer = Buffer.substr(LastPos);
+ StringRef SkippedRegion = Buffer.substr(LastPos, MatchPos);
+
+ // If this check is a "CHECK-NEXT", verify that the previous match was on
+ // the previous line (i.e. that there is one newline between them).
+ if (CheckNext(SM, SkippedRegion)) {
+ ProcessMatchResult(FileCheckDiag::MatchFoundButWrongLine, SM, Loc,
+ Pat.getCheckTy(), MatchBuffer, MatchPos, MatchLen,
+ Diags, Req.Verbose);
+ return StringRef::npos;
+ }
+
+ // If this check is a "CHECK-SAME", verify that the previous match was on
+ // the same line (i.e. that there is no newline between them).
+ if (CheckSame(SM, SkippedRegion)) {
+ ProcessMatchResult(FileCheckDiag::MatchFoundButWrongLine, SM, Loc,
+ Pat.getCheckTy(), MatchBuffer, MatchPos, MatchLen,
+ Diags, Req.Verbose);
+ return StringRef::npos;
+ }
+
+ // If this match had "not strings", verify that they don't exist in the
+ // skipped region.
+ if (CheckNot(SM, SkippedRegion, NotStrings, Req, Diags))
+ return StringRef::npos;
+ }
+
+ return FirstMatchPos;
+}
+
+bool FileCheckString::CheckNext(const SourceMgr &SM, StringRef Buffer) const {
+ if (Pat.getCheckTy() != Check::CheckNext &&
+ Pat.getCheckTy() != Check::CheckEmpty)
+ return false;
+
+ Twine CheckName =
+ Prefix +
+ Twine(Pat.getCheckTy() == Check::CheckEmpty ? "-EMPTY" : "-NEXT");
+
+ // Count the number of newlines between the previous match and this one.
+ const char *FirstNewLine = nullptr;
+ unsigned NumNewLines = CountNumNewlinesBetween(Buffer, FirstNewLine);
+
+ if (NumNewLines == 0) {
+ SM.PrintMessage(Loc, SourceMgr::DK_Error,
+ CheckName + ": is on the same line as previous match");
+ SM.PrintMessage(SMLoc::getFromPointer(Buffer.end()), SourceMgr::DK_Note,
+ "'next' match was here");
+ SM.PrintMessage(SMLoc::getFromPointer(Buffer.data()), SourceMgr::DK_Note,
+ "previous match ended here");
+ return true;
+ }
+
+ if (NumNewLines != 1) {
+ SM.PrintMessage(Loc, SourceMgr::DK_Error,
+ CheckName +
+ ": is not on the line after the previous match");
+ SM.PrintMessage(SMLoc::getFromPointer(Buffer.end()), SourceMgr::DK_Note,
+ "'next' match was here");
+ SM.PrintMessage(SMLoc::getFromPointer(Buffer.data()), SourceMgr::DK_Note,
+ "previous match ended here");
+ SM.PrintMessage(SMLoc::getFromPointer(FirstNewLine), SourceMgr::DK_Note,
+ "non-matching line after previous match is here");
+ return true;
+ }
+
+ return false;
+}
+
+bool FileCheckString::CheckSame(const SourceMgr &SM, StringRef Buffer) const {
+ if (Pat.getCheckTy() != Check::CheckSame)
+ return false;
+
+ // Count the number of newlines between the previous match and this one.
+ const char *FirstNewLine = nullptr;
+ unsigned NumNewLines = CountNumNewlinesBetween(Buffer, FirstNewLine);
+
+ if (NumNewLines != 0) {
+ SM.PrintMessage(Loc, SourceMgr::DK_Error,
+ Prefix +
+ "-SAME: is not on the same line as the previous match");
+ SM.PrintMessage(SMLoc::getFromPointer(Buffer.end()), SourceMgr::DK_Note,
+ "'next' match was here");
+ SM.PrintMessage(SMLoc::getFromPointer(Buffer.data()), SourceMgr::DK_Note,
+ "previous match ended here");
+ return true;
+ }
+
+ return false;
+}
+
+bool FileCheckString::CheckNot(const SourceMgr &SM, StringRef Buffer,
+ const std::vector<const Pattern *> &NotStrings,
+ const FileCheckRequest &Req,
+ std::vector<FileCheckDiag> *Diags) const {
+ bool DirectiveFail = false;
+ for (const Pattern *Pat : NotStrings) {
+ assert((Pat->getCheckTy() == Check::CheckNot) && "Expect CHECK-NOT!");
+
+ size_t MatchLen = 0;
+ Expected<size_t> MatchResult = Pat->match(Buffer, MatchLen, SM);
+
+ if (!MatchResult) {
+ PrintNoMatch(false, SM, Prefix, Pat->getLoc(), *Pat, 1, Buffer,
+ Req.VerboseVerbose, Diags, MatchResult.takeError());
+ continue;
+ }
+ size_t Pos = *MatchResult;
+
+ PrintMatch(false, SM, Prefix, Pat->getLoc(), *Pat, 1, Buffer, Pos, MatchLen,
+ Req, Diags);
+ DirectiveFail = true;
+ }
+
+ return DirectiveFail;
+}
+
+size_t FileCheckString::CheckDag(const SourceMgr &SM, StringRef Buffer,
+ std::vector<const Pattern *> &NotStrings,
+ const FileCheckRequest &Req,
+ std::vector<FileCheckDiag> *Diags) const {
+ if (DagNotStrings.empty())
+ return 0;
+
+ // The start of the search range.
+ size_t StartPos = 0;
+
+ struct MatchRange {
+ size_t Pos;
+ size_t End;
+ };
+ // A sorted list of ranges for non-overlapping CHECK-DAG matches. Match
+ // ranges are erased from this list once they are no longer in the search
+ // range.
+ std::list<MatchRange> MatchRanges;
+
+ // We need PatItr and PatEnd later for detecting the end of a CHECK-DAG
+ // group, so we don't use a range-based for loop here.
+ for (auto PatItr = DagNotStrings.begin(), PatEnd = DagNotStrings.end();
+ PatItr != PatEnd; ++PatItr) {
+ const Pattern &Pat = *PatItr;
+ assert((Pat.getCheckTy() == Check::CheckDAG ||
+ Pat.getCheckTy() == Check::CheckNot) &&
+ "Invalid CHECK-DAG or CHECK-NOT!");
+
+ if (Pat.getCheckTy() == Check::CheckNot) {
+ NotStrings.push_back(&Pat);
+ continue;
+ }
+
+ assert((Pat.getCheckTy() == Check::CheckDAG) && "Expect CHECK-DAG!");
+
+ // CHECK-DAG always matches from the start.
+ size_t MatchLen = 0, MatchPos = StartPos;
+
+ // Search for a match that doesn't overlap a previous match in this
+ // CHECK-DAG group.
+ for (auto MI = MatchRanges.begin(), ME = MatchRanges.end(); true; ++MI) {
+ StringRef MatchBuffer = Buffer.substr(MatchPos);
+ Expected<size_t> MatchResult = Pat.match(MatchBuffer, MatchLen, SM);
+ // With a group of CHECK-DAGs, a single mismatching means the match on
+ // that group of CHECK-DAGs fails immediately.
+ if (!MatchResult) {
+ PrintNoMatch(true, SM, Prefix, Pat.getLoc(), Pat, 1, MatchBuffer,
+ Req.VerboseVerbose, Diags, MatchResult.takeError());
+ return StringRef::npos;
+ }
+ size_t MatchPosBuf = *MatchResult;
+ // Re-calc it as the offset relative to the start of the original string.
+ MatchPos += MatchPosBuf;
+ if (Req.VerboseVerbose)
+ PrintMatch(true, SM, Prefix, Pat.getLoc(), Pat, 1, Buffer, MatchPos,
+ MatchLen, Req, Diags);
+ MatchRange M{MatchPos, MatchPos + MatchLen};
+ if (Req.AllowDeprecatedDagOverlap) {
+ // We don't need to track all matches in this mode, so we just maintain
+ // one match range that encompasses the current CHECK-DAG group's
+ // matches.
+ if (MatchRanges.empty())
+ MatchRanges.insert(MatchRanges.end(), M);
+ else {
+ auto Block = MatchRanges.begin();
+ Block->Pos = std::min(Block->Pos, M.Pos);
+ Block->End = std::max(Block->End, M.End);
+ }
+ break;
+ }
+ // Iterate previous matches until overlapping match or insertion point.
+ bool Overlap = false;
+ for (; MI != ME; ++MI) {
+ if (M.Pos < MI->End) {
+ // !Overlap => New match has no overlap and is before this old match.
+ // Overlap => New match overlaps this old match.
+ Overlap = MI->Pos < M.End;
+ break;
+ }
+ }
+ if (!Overlap) {
+ // Insert non-overlapping match into list.
+ MatchRanges.insert(MI, M);
+ break;
+ }
+ if (Req.VerboseVerbose) {
+ // Due to their verbosity, we don't print verbose diagnostics here if
+ // we're gathering them for a different rendering, but we always print
+ // other diagnostics.
+ if (!Diags) {
+ SMLoc OldStart = SMLoc::getFromPointer(Buffer.data() + MI->Pos);
+ SMLoc OldEnd = SMLoc::getFromPointer(Buffer.data() + MI->End);
+ SMRange OldRange(OldStart, OldEnd);
+ SM.PrintMessage(OldStart, SourceMgr::DK_Note,
+ "match discarded, overlaps earlier DAG match here",
+ {OldRange});
+ } else {
+ SMLoc CheckLoc = Diags->rbegin()->CheckLoc;
+ for (auto I = Diags->rbegin(), E = Diags->rend();
+ I != E && I->CheckLoc == CheckLoc; ++I)
+ I->MatchTy = FileCheckDiag::MatchFoundButDiscarded;
+ }
+ }
+ MatchPos = MI->End;
+ }
+ if (!Req.VerboseVerbose)
+ PrintMatch(true, SM, Prefix, Pat.getLoc(), Pat, 1, Buffer, MatchPos,
+ MatchLen, Req, Diags);
+
+ // Handle the end of a CHECK-DAG group.
+ if (std::next(PatItr) == PatEnd ||
+ std::next(PatItr)->getCheckTy() == Check::CheckNot) {
+ if (!NotStrings.empty()) {
+ // If there are CHECK-NOTs between two CHECK-DAGs or from CHECK to
+ // CHECK-DAG, verify that there are no 'not' strings occurred in that
+ // region.
+ StringRef SkippedRegion =
+ Buffer.slice(StartPos, MatchRanges.begin()->Pos);
+ if (CheckNot(SM, SkippedRegion, NotStrings, Req, Diags))
+ return StringRef::npos;
+ // Clear "not strings".
+ NotStrings.clear();
+ }
+ // All subsequent CHECK-DAGs and CHECK-NOTs should be matched from the
+ // end of this CHECK-DAG group's match range.
+ StartPos = MatchRanges.rbegin()->End;
+ // Don't waste time checking for (impossible) overlaps before that.
+ MatchRanges.clear();
+ }
+ }
+
+ return StartPos;
+}
+
+static bool ValidatePrefixes(StringRef Kind, StringSet<> &UniquePrefixes,
+ ArrayRef<StringRef> SuppliedPrefixes) {
+ for (StringRef Prefix : SuppliedPrefixes) {
+ if (Prefix.empty()) {
+ errs() << "error: supplied " << Kind << " prefix must not be the empty "
+ << "string\n";
+ return false;
+ }
+ static const Regex Validator("^[a-zA-Z0-9_-]*$");
+ if (!Validator.match(Prefix)) {
+ errs() << "error: supplied " << Kind << " prefix must start with a "
+ << "letter and contain only alphanumeric characters, hyphens, and "
+ << "underscores: '" << Prefix << "'\n";
+ return false;
+ }
+ if (!UniquePrefixes.insert(Prefix).second) {
+ errs() << "error: supplied " << Kind << " prefix must be unique among "
+ << "check and comment prefixes: '" << Prefix << "'\n";
+ return false;
+ }
+ }
+ return true;
+}
+
+static const char *DefaultCheckPrefixes[] = {"CHECK"};
+static const char *DefaultCommentPrefixes[] = {"COM", "RUN"};
+
+bool FileCheck::ValidateCheckPrefixes() {
+ StringSet<> UniquePrefixes;
+ // Add default prefixes to catch user-supplied duplicates of them below.
+ if (Req.CheckPrefixes.empty()) {
+ for (const char *Prefix : DefaultCheckPrefixes)
+ UniquePrefixes.insert(Prefix);
+ }
+ if (Req.CommentPrefixes.empty()) {
+ for (const char *Prefix : DefaultCommentPrefixes)
+ UniquePrefixes.insert(Prefix);
+ }
+ // Do not validate the default prefixes, or diagnostics about duplicates might
+ // incorrectly indicate that they were supplied by the user.
+ if (!ValidatePrefixes("check", UniquePrefixes, Req.CheckPrefixes))
+ return false;
+ if (!ValidatePrefixes("comment", UniquePrefixes, Req.CommentPrefixes))
+ return false;
+ return true;
+}
+
+Regex FileCheck::buildCheckPrefixRegex() {
+ if (Req.CheckPrefixes.empty()) {
+ for (const char *Prefix : DefaultCheckPrefixes)
+ Req.CheckPrefixes.push_back(Prefix);
+ Req.IsDefaultCheckPrefix = true;
+ }
+ if (Req.CommentPrefixes.empty()) {
+ for (const char *Prefix : DefaultCommentPrefixes)
+ Req.CommentPrefixes.push_back(Prefix);
+ }
+
+ // We already validated the contents of CheckPrefixes and CommentPrefixes so
+ // just concatenate them as alternatives.
+ SmallString<32> PrefixRegexStr;
+ for (size_t I = 0, E = Req.CheckPrefixes.size(); I != E; ++I) {
+ if (I != 0)
+ PrefixRegexStr.push_back('|');
+ PrefixRegexStr.append(Req.CheckPrefixes[I]);
+ }
+ for (StringRef Prefix : Req.CommentPrefixes) {
+ PrefixRegexStr.push_back('|');
+ PrefixRegexStr.append(Prefix);
+ }
+
+ return Regex(PrefixRegexStr);
+}
+
+Error FileCheckPatternContext::defineCmdlineVariables(
+ ArrayRef<StringRef> CmdlineDefines, SourceMgr &SM) {
+ assert(GlobalVariableTable.empty() && GlobalNumericVariableTable.empty() &&
+ "Overriding defined variable with command-line variable definitions");
+
+ if (CmdlineDefines.empty())
+ return Error::success();
+
+ // Create a string representing the vector of command-line definitions. Each
+ // definition is on its own line and prefixed with a definition number to
+ // clarify which definition a given diagnostic corresponds to.
+ unsigned I = 0;
+ Error Errs = Error::success();
+ std::string CmdlineDefsDiag;
+ SmallVector<std::pair<size_t, size_t>, 4> CmdlineDefsIndices;
+ for (StringRef CmdlineDef : CmdlineDefines) {
+ std::string DefPrefix = ("Global define #" + Twine(++I) + ": ").str();
+ size_t EqIdx = CmdlineDef.find('=');
+ if (EqIdx == StringRef::npos) {
+ CmdlineDefsIndices.push_back(std::make_pair(CmdlineDefsDiag.size(), 0));
+ continue;
+ }
+ // Numeric variable definition.
+ if (CmdlineDef[0] == '#') {
+ // Append a copy of the command-line definition adapted to use the same
+ // format as in the input file to be able to reuse
+ // parseNumericSubstitutionBlock.
+ CmdlineDefsDiag += (DefPrefix + CmdlineDef + " (parsed as: [[").str();
+ std::string SubstitutionStr = std::string(CmdlineDef);
+ SubstitutionStr[EqIdx] = ':';
+ CmdlineDefsIndices.push_back(
+ std::make_pair(CmdlineDefsDiag.size(), SubstitutionStr.size()));
+ CmdlineDefsDiag += (SubstitutionStr + Twine("]])\n")).str();
+ } else {
+ CmdlineDefsDiag += DefPrefix;
+ CmdlineDefsIndices.push_back(
+ std::make_pair(CmdlineDefsDiag.size(), CmdlineDef.size()));
+ CmdlineDefsDiag += (CmdlineDef + "\n").str();
+ }
+ }
+
+ // Create a buffer with fake command line content in order to display
+ // parsing diagnostic with location information and point to the
+ // global definition with invalid syntax.
+ std::unique_ptr<MemoryBuffer> CmdLineDefsDiagBuffer =
+ MemoryBuffer::getMemBufferCopy(CmdlineDefsDiag, "Global defines");
+ StringRef CmdlineDefsDiagRef = CmdLineDefsDiagBuffer->getBuffer();
+ SM.AddNewSourceBuffer(std::move(CmdLineDefsDiagBuffer), SMLoc());
+
+ for (std::pair<size_t, size_t> CmdlineDefIndices : CmdlineDefsIndices) {
+ StringRef CmdlineDef = CmdlineDefsDiagRef.substr(CmdlineDefIndices.first,
+ CmdlineDefIndices.second);
+ if (CmdlineDef.empty()) {
+ Errs = joinErrors(
+ std::move(Errs),
+ ErrorDiagnostic::get(SM, CmdlineDef,
+ "missing equal sign in global definition"));
+ continue;
+ }
+
+ // Numeric variable definition.
+ if (CmdlineDef[0] == '#') {
+ // Now parse the definition both to check that the syntax is correct and
+ // to create the necessary class instance.
+ StringRef CmdlineDefExpr = CmdlineDef.substr(1);
+ Optional<NumericVariable *> DefinedNumericVariable;
+ Expected<std::unique_ptr<Expression>> ExpressionResult =
+ Pattern::parseNumericSubstitutionBlock(
+ CmdlineDefExpr, DefinedNumericVariable, false, None, this, SM);
+ if (!ExpressionResult) {
+ Errs = joinErrors(std::move(Errs), ExpressionResult.takeError());
+ continue;
+ }
+ std::unique_ptr<Expression> Expression = std::move(*ExpressionResult);
+ // Now evaluate the expression whose value this variable should be set
+ // to, since the expression of a command-line variable definition should
+ // only use variables defined earlier on the command-line. If not, this
+ // is an error and we report it.
+ Expected<ExpressionValue> Value = Expression->getAST()->eval();
+ if (!Value) {
+ Errs = joinErrors(std::move(Errs), Value.takeError());
+ continue;
+ }
+
+ assert(DefinedNumericVariable && "No variable defined");
+ (*DefinedNumericVariable)->setValue(*Value);
+
+ // Record this variable definition.
+ GlobalNumericVariableTable[(*DefinedNumericVariable)->getName()] =
+ *DefinedNumericVariable;
+ } else {
+ // String variable definition.
+ std::pair<StringRef, StringRef> CmdlineNameVal = CmdlineDef.split('=');
+ StringRef CmdlineName = CmdlineNameVal.first;
+ StringRef OrigCmdlineName = CmdlineName;
+ Expected<Pattern::VariableProperties> ParseVarResult =
+ Pattern::parseVariable(CmdlineName, SM);
+ if (!ParseVarResult) {
+ Errs = joinErrors(std::move(Errs), ParseVarResult.takeError());
+ continue;
+ }
+ // Check that CmdlineName does not denote a pseudo variable is only
+ // composed of the parsed numeric variable. This catches cases like
+ // "FOO+2" in a "FOO+2=10" definition.
+ if (ParseVarResult->IsPseudo || !CmdlineName.empty()) {
+ Errs = joinErrors(std::move(Errs),
+ ErrorDiagnostic::get(
+ SM, OrigCmdlineName,
+ "invalid name in string variable definition '" +
+ OrigCmdlineName + "'"));
+ continue;
+ }
+ StringRef Name = ParseVarResult->Name;
+
+ // Detect collisions between string and numeric variables when the former
+ // is created later than the latter.
+ if (GlobalNumericVariableTable.find(Name) !=
+ GlobalNumericVariableTable.end()) {
+ Errs = joinErrors(std::move(Errs),
+ ErrorDiagnostic::get(SM, Name,
+ "numeric variable with name '" +
+ Name + "' already exists"));
+ continue;
+ }
+ GlobalVariableTable.insert(CmdlineNameVal);
+ // Mark the string variable as defined to detect collisions between
+ // string and numeric variables in defineCmdlineVariables when the latter
+ // is created later than the former. We cannot reuse GlobalVariableTable
+ // for this by populating it with an empty string since we would then
+ // lose the ability to detect the use of an undefined variable in
+ // match().
+ DefinedVariableTable[Name] = true;
+ }
+ }
+
+ return Errs;
+}
+
+void FileCheckPatternContext::clearLocalVars() {
+ SmallVector<StringRef, 16> LocalPatternVars, LocalNumericVars;
+ for (const StringMapEntry<StringRef> &Var : GlobalVariableTable)
+ if (Var.first()[0] != '$')
+ LocalPatternVars.push_back(Var.first());
+
+ // Numeric substitution reads the value of a variable directly, not via
+ // GlobalNumericVariableTable. Therefore, we clear local variables by
+ // clearing their value which will lead to a numeric substitution failure. We
+ // also mark the variable for removal from GlobalNumericVariableTable since
+ // this is what defineCmdlineVariables checks to decide that no global
+ // variable has been defined.
+ for (const auto &Var : GlobalNumericVariableTable)
+ if (Var.first()[0] != '$') {
+ Var.getValue()->clearValue();
+ LocalNumericVars.push_back(Var.first());
+ }
+
+ for (const auto &Var : LocalPatternVars)
+ GlobalVariableTable.erase(Var);
+ for (const auto &Var : LocalNumericVars)
+ GlobalNumericVariableTable.erase(Var);
+}
+
+bool FileCheck::checkInput(SourceMgr &SM, StringRef Buffer,
+ std::vector<FileCheckDiag> *Diags) {
+ bool ChecksFailed = false;
+
+ unsigned i = 0, j = 0, e = CheckStrings->size();
+ while (true) {
+ StringRef CheckRegion;
+ if (j == e) {
+ CheckRegion = Buffer;
+ } else {
+ const FileCheckString &CheckLabelStr = (*CheckStrings)[j];
+ if (CheckLabelStr.Pat.getCheckTy() != Check::CheckLabel) {
+ ++j;
+ continue;
+ }
+
+ // Scan to next CHECK-LABEL match, ignoring CHECK-NOT and CHECK-DAG
+ size_t MatchLabelLen = 0;
+ size_t MatchLabelPos =
+ CheckLabelStr.Check(SM, Buffer, true, MatchLabelLen, Req, Diags);
+ if (MatchLabelPos == StringRef::npos)
+ // Immediately bail if CHECK-LABEL fails, nothing else we can do.
+ return false;
+
+ CheckRegion = Buffer.substr(0, MatchLabelPos + MatchLabelLen);
+ Buffer = Buffer.substr(MatchLabelPos + MatchLabelLen);
+ ++j;
+ }
+
+ // Do not clear the first region as it's the one before the first
+ // CHECK-LABEL and it would clear variables defined on the command-line
+ // before they get used.
+ if (i != 0 && Req.EnableVarScope)
+ PatternContext->clearLocalVars();
+
+ for (; i != j; ++i) {
+ const FileCheckString &CheckStr = (*CheckStrings)[i];
+
+ // Check each string within the scanned region, including a second check
+ // of any final CHECK-LABEL (to verify CHECK-NOT and CHECK-DAG)
+ size_t MatchLen = 0;
+ size_t MatchPos =
+ CheckStr.Check(SM, CheckRegion, false, MatchLen, Req, Diags);
+
+ if (MatchPos == StringRef::npos) {
+ ChecksFailed = true;
+ i = j;
+ break;
+ }
+
+ CheckRegion = CheckRegion.substr(MatchPos + MatchLen);
+ }
+
+ if (j == e)
+ break;
+ }
+
+ // Success if no checks failed.
+ return !ChecksFailed;
+}
diff --git a/contrib/libs/llvm12/lib/FileCheck/FileCheckImpl.h b/contrib/libs/llvm12/lib/FileCheck/FileCheckImpl.h
new file mode 100644
index 00000000000..05b2a529002
--- /dev/null
+++ b/contrib/libs/llvm12/lib/FileCheck/FileCheckImpl.h
@@ -0,0 +1,859 @@
+//===-- FileCheckImpl.h - Private FileCheck Interface ------------*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the private interfaces of FileCheck. Its purpose is to
+// allow unit testing of FileCheck and to separate the interface from the
+// implementation. It is only meant to be used by FileCheck.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_FILECHECK_FILECHECKIMPL_H
+#define LLVM_LIB_FILECHECK_FILECHECKIMPL_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/FileCheck/FileCheck.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/SourceMgr.h"
+#include <map>
+#include <string>
+#include <vector>
+
+namespace llvm {
+
+//===----------------------------------------------------------------------===//
+// Numeric substitution handling code.
+//===----------------------------------------------------------------------===//
+
+class ExpressionValue;
+
+/// Type representing the format an expression value should be textualized into
+/// for matching. Used to represent both explicit format specifiers as well as
+/// implicit format from using numeric variables.
+struct ExpressionFormat {
+ enum class Kind {
+ /// Denote absence of format. Used for implicit format of literals and
+ /// empty expressions.
+ NoFormat,
+ /// Value is an unsigned integer and should be printed as a decimal number.
+ Unsigned,
+ /// Value is a signed integer and should be printed as a decimal number.
+ Signed,
+ /// Value should be printed as an uppercase hex number.
+ HexUpper,
+ /// Value should be printed as a lowercase hex number.
+ HexLower
+ };
+
+private:
+ Kind Value;
+ unsigned Precision = 0;
+
+public:
+ /// Evaluates a format to true if it can be used in a match.
+ explicit operator bool() const { return Value != Kind::NoFormat; }
+
+ /// Define format equality: formats are equal if neither is NoFormat and
+ /// their kinds and precision are the same.
+ bool operator==(const ExpressionFormat &Other) const {
+ return Value != Kind::NoFormat && Value == Other.Value &&
+ Precision == Other.Precision;
+ }
+
+ bool operator!=(const ExpressionFormat &Other) const {
+ return !(*this == Other);
+ }
+
+ bool operator==(Kind OtherValue) const { return Value == OtherValue; }
+
+ bool operator!=(Kind OtherValue) const { return !(*this == OtherValue); }
+
+ /// \returns the format specifier corresponding to this format as a string.
+ StringRef toString() const;
+
+ ExpressionFormat() : Value(Kind::NoFormat){};
+ explicit ExpressionFormat(Kind Value) : Value(Value), Precision(0){};
+ explicit ExpressionFormat(Kind Value, unsigned Precision)
+ : Value(Value), Precision(Precision){};
+
+ /// \returns a wildcard regular expression string that matches any value in
+ /// the format represented by this instance and no other value, or an error
+ /// if the format is NoFormat.
+ Expected<std::string> getWildcardRegex() const;
+
+ /// \returns the string representation of \p Value in the format represented
+ /// by this instance, or an error if conversion to this format failed or the
+ /// format is NoFormat.
+ Expected<std::string> getMatchingString(ExpressionValue Value) const;
+
+ /// \returns the value corresponding to string representation \p StrVal
+ /// according to the matching format represented by this instance or an error
+ /// with diagnostic against \p SM if \p StrVal does not correspond to a valid
+ /// and representable value.
+ Expected<ExpressionValue> valueFromStringRepr(StringRef StrVal,
+ const SourceMgr &SM) const;
+};
+
+/// Class to represent an overflow error that might result when manipulating a
+/// value.
+class OverflowError : public ErrorInfo<OverflowError> {
+public:
+ static char ID;
+
+ std::error_code convertToErrorCode() const override {
+ return std::make_error_code(std::errc::value_too_large);
+ }
+
+ void log(raw_ostream &OS) const override { OS << "overflow error"; }
+};
+
+/// Class representing a numeric value.
+class ExpressionValue {
+private:
+ uint64_t Value;
+ bool Negative;
+
+public:
+ template <class T>
+ explicit ExpressionValue(T Val) : Value(Val), Negative(Val < 0) {}
+
+ bool operator==(const ExpressionValue &Other) const {
+ return Value == Other.Value && isNegative() == Other.isNegative();
+ }
+
+ bool operator!=(const ExpressionValue &Other) const {
+ return !(*this == Other);
+ }
+
+ /// Returns true if value is signed and negative, false otherwise.
+ bool isNegative() const {
+ assert((Value != 0 || !Negative) && "Unexpected negative zero!");
+ return Negative;
+ }
+
+ /// \returns the value as a signed integer or an error if the value is out of
+ /// range.
+ Expected<int64_t> getSignedValue() const;
+
+ /// \returns the value as an unsigned integer or an error if the value is out
+ /// of range.
+ Expected<uint64_t> getUnsignedValue() const;
+
+ /// \returns an unsigned ExpressionValue instance whose value is the absolute
+ /// value to this object's value.
+ ExpressionValue getAbsolute() const;
+};
+
+/// Performs operation and \returns its result or an error in case of failure,
+/// such as if an overflow occurs.
+Expected<ExpressionValue> operator+(const ExpressionValue &Lhs,
+ const ExpressionValue &Rhs);
+Expected<ExpressionValue> operator-(const ExpressionValue &Lhs,
+ const ExpressionValue &Rhs);
+Expected<ExpressionValue> operator*(const ExpressionValue &Lhs,
+ const ExpressionValue &Rhs);
+Expected<ExpressionValue> operator/(const ExpressionValue &Lhs,
+ const ExpressionValue &Rhs);
+Expected<ExpressionValue> max(const ExpressionValue &Lhs,
+ const ExpressionValue &Rhs);
+Expected<ExpressionValue> min(const ExpressionValue &Lhs,
+ const ExpressionValue &Rhs);
+
+/// Base class representing the AST of a given expression.
+class ExpressionAST {
+private:
+ StringRef ExpressionStr;
+
+public:
+ ExpressionAST(StringRef ExpressionStr) : ExpressionStr(ExpressionStr) {}
+
+ virtual ~ExpressionAST() = default;
+
+ StringRef getExpressionStr() const { return ExpressionStr; }
+
+ /// Evaluates and \returns the value of the expression represented by this
+ /// AST or an error if evaluation fails.
+ virtual Expected<ExpressionValue> eval() const = 0;
+
+ /// \returns either the implicit format of this AST, a diagnostic against
+ /// \p SM if implicit formats of the AST's components conflict, or NoFormat
+ /// if the AST has no implicit format (e.g. AST is made up of a single
+ /// literal).
+ virtual Expected<ExpressionFormat>
+ getImplicitFormat(const SourceMgr &SM) const {
+ return ExpressionFormat();
+ }
+};
+
+/// Class representing an unsigned literal in the AST of an expression.
+class ExpressionLiteral : public ExpressionAST {
+private:
+ /// Actual value of the literal.
+ ExpressionValue Value;
+
+public:
+ template <class T>
+ explicit ExpressionLiteral(StringRef ExpressionStr, T Val)
+ : ExpressionAST(ExpressionStr), Value(Val) {}
+
+ /// \returns the literal's value.
+ Expected<ExpressionValue> eval() const override { return Value; }
+};
+
+/// Class to represent an undefined variable error, which quotes that
+/// variable's name when printed.
+class UndefVarError : public ErrorInfo<UndefVarError> {
+private:
+ StringRef VarName;
+
+public:
+ static char ID;
+
+ UndefVarError(StringRef VarName) : VarName(VarName) {}
+
+ StringRef getVarName() const { return VarName; }
+
+ std::error_code convertToErrorCode() const override {
+ return inconvertibleErrorCode();
+ }
+
+ /// Print name of variable associated with this error.
+ void log(raw_ostream &OS) const override {
+ OS << "\"";
+ OS.write_escaped(VarName) << "\"";
+ }
+};
+
+/// Class representing an expression and its matching format.
+class Expression {
+private:
+ /// Pointer to AST of the expression.
+ std::unique_ptr<ExpressionAST> AST;
+
+ /// Format to use (e.g. hex upper case letters) when matching the value.
+ ExpressionFormat Format;
+
+public:
+ /// Generic constructor for an expression represented by the given \p AST and
+ /// whose matching format is \p Format.
+ Expression(std::unique_ptr<ExpressionAST> AST, ExpressionFormat Format)
+ : AST(std::move(AST)), Format(Format) {}
+
+ /// \returns pointer to AST of the expression. Pointer is guaranteed to be
+ /// valid as long as this object is.
+ ExpressionAST *getAST() const { return AST.get(); }
+
+ ExpressionFormat getFormat() const { return Format; }
+};
+
+/// Class representing a numeric variable and its associated current value.
+class NumericVariable {
+private:
+ /// Name of the numeric variable.
+ StringRef Name;
+
+ /// Format to use for expressions using this variable without an explicit
+ /// format.
+ ExpressionFormat ImplicitFormat;
+
+ /// Value of numeric variable, if defined, or None otherwise.
+ Optional<ExpressionValue> Value;
+
+ /// The input buffer's string from which Value was parsed, or None. See
+ /// comments on getStringValue for a discussion of the None case.
+ Optional<StringRef> StrValue;
+
+ /// Line number where this variable is defined, or None if defined before
+ /// input is parsed. Used to determine whether a variable is defined on the
+ /// same line as a given use.
+ Optional<size_t> DefLineNumber;
+
+public:
+ /// Constructor for a variable \p Name with implicit format \p ImplicitFormat
+ /// defined at line \p DefLineNumber or defined before input is parsed if
+ /// \p DefLineNumber is None.
+ explicit NumericVariable(StringRef Name, ExpressionFormat ImplicitFormat,
+ Optional<size_t> DefLineNumber = None)
+ : Name(Name), ImplicitFormat(ImplicitFormat),
+ DefLineNumber(DefLineNumber) {}
+
+ /// \returns name of this numeric variable.
+ StringRef getName() const { return Name; }
+
+ /// \returns implicit format of this numeric variable.
+ ExpressionFormat getImplicitFormat() const { return ImplicitFormat; }
+
+ /// \returns this variable's value.
+ Optional<ExpressionValue> getValue() const { return Value; }
+
+ /// \returns the input buffer's string from which this variable's value was
+ /// parsed, or None if the value is not yet defined or was not parsed from the
+ /// input buffer. For example, the value of @LINE is not parsed from the
+ /// input buffer, and some numeric variables are parsed from the command
+ /// line instead.
+ Optional<StringRef> getStringValue() const { return StrValue; }
+
+ /// Sets value of this numeric variable to \p NewValue, and sets the input
+ /// buffer string from which it was parsed to \p NewStrValue. See comments on
+ /// getStringValue for a discussion of when the latter can be None.
+ void setValue(ExpressionValue NewValue,
+ Optional<StringRef> NewStrValue = None) {
+ Value = NewValue;
+ StrValue = NewStrValue;
+ }
+
+ /// Clears value of this numeric variable, regardless of whether it is
+ /// currently defined or not.
+ void clearValue() {
+ Value = None;
+ StrValue = None;
+ }
+
+ /// \returns the line number where this variable is defined, if any, or None
+ /// if defined before input is parsed.
+ Optional<size_t> getDefLineNumber() const { return DefLineNumber; }
+};
+
+/// Class representing the use of a numeric variable in the AST of an
+/// expression.
+class NumericVariableUse : public ExpressionAST {
+private:
+ /// Pointer to the class instance for the variable this use is about.
+ NumericVariable *Variable;
+
+public:
+ NumericVariableUse(StringRef Name, NumericVariable *Variable)
+ : ExpressionAST(Name), Variable(Variable) {}
+ /// \returns the value of the variable referenced by this instance.
+ Expected<ExpressionValue> eval() const override;
+
+ /// \returns implicit format of this numeric variable.
+ Expected<ExpressionFormat>
+ getImplicitFormat(const SourceMgr &SM) const override {
+ return Variable->getImplicitFormat();
+ }
+};
+
+/// Type of functions evaluating a given binary operation.
+using binop_eval_t = Expected<ExpressionValue> (*)(const ExpressionValue &,
+ const ExpressionValue &);
+
+/// Class representing a single binary operation in the AST of an expression.
+class BinaryOperation : public ExpressionAST {
+private:
+ /// Left operand.
+ std::unique_ptr<ExpressionAST> LeftOperand;
+
+ /// Right operand.
+ std::unique_ptr<ExpressionAST> RightOperand;
+
+ /// Pointer to function that can evaluate this binary operation.
+ binop_eval_t EvalBinop;
+
+public:
+ BinaryOperation(StringRef ExpressionStr, binop_eval_t EvalBinop,
+ std::unique_ptr<ExpressionAST> LeftOp,
+ std::unique_ptr<ExpressionAST> RightOp)
+ : ExpressionAST(ExpressionStr), EvalBinop(EvalBinop) {
+ LeftOperand = std::move(LeftOp);
+ RightOperand = std::move(RightOp);
+ }
+
+ /// Evaluates the value of the binary operation represented by this AST,
+ /// using EvalBinop on the result of recursively evaluating the operands.
+ /// \returns the expression value or an error if an undefined numeric
+ /// variable is used in one of the operands.
+ Expected<ExpressionValue> eval() const override;
+
+ /// \returns the implicit format of this AST, if any, a diagnostic against
+ /// \p SM if the implicit formats of the AST's components conflict, or no
+ /// format if the AST has no implicit format (e.g. AST is made of a single
+ /// literal).
+ Expected<ExpressionFormat>
+ getImplicitFormat(const SourceMgr &SM) const override;
+};
+
+class FileCheckPatternContext;
+
+/// Class representing a substitution to perform in the RegExStr string.
+class Substitution {
+protected:
+ /// Pointer to a class instance holding, among other things, the table with
+ /// the values of live string variables at the start of any given CHECK line.
+ /// Used for substituting string variables with the text they were defined
+ /// as. Expressions are linked to the numeric variables they use at
+ /// parse time and directly access the value of the numeric variable to
+ /// evaluate their value.
+ FileCheckPatternContext *Context;
+
+ /// The string that needs to be substituted for something else. For a
+ /// string variable this is its name, otherwise this is the whole expression.
+ StringRef FromStr;
+
+ // Index in RegExStr of where to do the substitution.
+ size_t InsertIdx;
+
+public:
+ Substitution(FileCheckPatternContext *Context, StringRef VarName,
+ size_t InsertIdx)
+ : Context(Context), FromStr(VarName), InsertIdx(InsertIdx) {}
+
+ virtual ~Substitution() = default;
+
+ /// \returns the string to be substituted for something else.
+ StringRef getFromString() const { return FromStr; }
+
+ /// \returns the index where the substitution is to be performed in RegExStr.
+ size_t getIndex() const { return InsertIdx; }
+
+ /// \returns a string containing the result of the substitution represented
+ /// by this class instance or an error if substitution failed.
+ virtual Expected<std::string> getResult() const = 0;
+};
+
+class StringSubstitution : public Substitution {
+public:
+ StringSubstitution(FileCheckPatternContext *Context, StringRef VarName,
+ size_t InsertIdx)
+ : Substitution(Context, VarName, InsertIdx) {}
+
+ /// \returns the text that the string variable in this substitution matched
+ /// when defined, or an error if the variable is undefined.
+ Expected<std::string> getResult() const override;
+};
+
+class NumericSubstitution : public Substitution {
+private:
+ /// Pointer to the class representing the expression whose value is to be
+ /// substituted.
+ std::unique_ptr<Expression> ExpressionPointer;
+
+public:
+ NumericSubstitution(FileCheckPatternContext *Context, StringRef ExpressionStr,
+ std::unique_ptr<Expression> ExpressionPointer,
+ size_t InsertIdx)
+ : Substitution(Context, ExpressionStr, InsertIdx),
+ ExpressionPointer(std::move(ExpressionPointer)) {}
+
+ /// \returns a string containing the result of evaluating the expression in
+ /// this substitution, or an error if evaluation failed.
+ Expected<std::string> getResult() const override;
+};
+
+//===----------------------------------------------------------------------===//
+// Pattern handling code.
+//===----------------------------------------------------------------------===//
+
+/// Class holding the Pattern global state, shared by all patterns: tables
+/// holding values of variables and whether they are defined or not at any
+/// given time in the matching process.
+class FileCheckPatternContext {
+ friend class Pattern;
+
+private:
+ /// When matching a given pattern, this holds the value of all the string
+ /// variables defined in previous patterns. In a pattern, only the last
+ /// definition for a given variable is recorded in this table.
+ /// Back-references are used for uses after any the other definition.
+ StringMap<StringRef> GlobalVariableTable;
+
+ /// Map of all string variables defined so far. Used at parse time to detect
+ /// a name conflict between a numeric variable and a string variable when
+ /// the former is defined on a later line than the latter.
+ StringMap<bool> DefinedVariableTable;
+
+ /// When matching a given pattern, this holds the pointers to the classes
+ /// representing the numeric variables defined in previous patterns. When
+ /// matching a pattern all definitions for that pattern are recorded in the
+ /// NumericVariableDefs table in the Pattern instance of that pattern.
+ StringMap<NumericVariable *> GlobalNumericVariableTable;
+
+ /// Pointer to the class instance representing the @LINE pseudo variable for
+ /// easily updating its value.
+ NumericVariable *LineVariable = nullptr;
+
+ /// Vector holding pointers to all parsed numeric variables. Used to
+ /// automatically free them once they are guaranteed to no longer be used.
+ std::vector<std::unique_ptr<NumericVariable>> NumericVariables;
+
+ /// Vector holding pointers to all parsed expressions. Used to automatically
+ /// free the expressions once they are guaranteed to no longer be used.
+ std::vector<std::unique_ptr<Expression>> Expressions;
+
+ /// Vector holding pointers to all substitutions. Used to automatically free
+ /// them once they are guaranteed to no longer be used.
+ std::vector<std::unique_ptr<Substitution>> Substitutions;
+
+public:
+ /// \returns the value of string variable \p VarName or an error if no such
+ /// variable has been defined.
+ Expected<StringRef> getPatternVarValue(StringRef VarName);
+
+ /// Defines string and numeric variables from definitions given on the
+ /// command line, passed as a vector of [#]VAR=VAL strings in
+ /// \p CmdlineDefines. \returns an error list containing diagnostics against
+ /// \p SM for all definition parsing failures, if any, or Success otherwise.
+ Error defineCmdlineVariables(ArrayRef<StringRef> CmdlineDefines,
+ SourceMgr &SM);
+
+ /// Create @LINE pseudo variable. Value is set when pattern are being
+ /// matched.
+ void createLineVariable();
+
+ /// Undefines local variables (variables whose name does not start with a '$'
+ /// sign), i.e. removes them from GlobalVariableTable and from
+ /// GlobalNumericVariableTable and also clears the value of numeric
+ /// variables.
+ void clearLocalVars();
+
+private:
+ /// Makes a new numeric variable and registers it for destruction when the
+ /// context is destroyed.
+ template <class... Types> NumericVariable *makeNumericVariable(Types... args);
+
+ /// Makes a new string substitution and registers it for destruction when the
+ /// context is destroyed.
+ Substitution *makeStringSubstitution(StringRef VarName, size_t InsertIdx);
+
+ /// Makes a new numeric substitution and registers it for destruction when
+ /// the context is destroyed.
+ Substitution *makeNumericSubstitution(StringRef ExpressionStr,
+ std::unique_ptr<Expression> Expression,
+ size_t InsertIdx);
+};
+
+/// Class to represent an error holding a diagnostic with location information
+/// used when printing it.
+class ErrorDiagnostic : public ErrorInfo<ErrorDiagnostic> {
+private:
+ SMDiagnostic Diagnostic;
+
+public:
+ static char ID;
+
+ ErrorDiagnostic(SMDiagnostic &&Diag) : Diagnostic(Diag) {}
+
+ std::error_code convertToErrorCode() const override {
+ return inconvertibleErrorCode();
+ }
+
+ /// Print diagnostic associated with this error when printing the error.
+ void log(raw_ostream &OS) const override { Diagnostic.print(nullptr, OS); }
+
+ static Error get(const SourceMgr &SM, SMLoc Loc, const Twine &ErrMsg) {
+ return make_error<ErrorDiagnostic>(
+ SM.GetMessage(Loc, SourceMgr::DK_Error, ErrMsg));
+ }
+
+ static Error get(const SourceMgr &SM, StringRef Buffer, const Twine &ErrMsg) {
+ return get(SM, SMLoc::getFromPointer(Buffer.data()), ErrMsg);
+ }
+};
+
+class NotFoundError : public ErrorInfo<NotFoundError> {
+public:
+ static char ID;
+
+ std::error_code convertToErrorCode() const override {
+ return inconvertibleErrorCode();
+ }
+
+ /// Print diagnostic associated with this error when printing the error.
+ void log(raw_ostream &OS) const override {
+ OS << "String not found in input";
+ }
+};
+
+class Pattern {
+ SMLoc PatternLoc;
+
+ /// A fixed string to match as the pattern or empty if this pattern requires
+ /// a regex match.
+ StringRef FixedStr;
+
+ /// A regex string to match as the pattern or empty if this pattern requires
+ /// a fixed string to match.
+ std::string RegExStr;
+
+ /// Entries in this vector represent a substitution of a string variable or
+ /// an expression in the RegExStr regex at match time. For example, in the
+ /// case of a CHECK directive with the pattern "foo[[bar]]baz[[#N+1]]",
+ /// RegExStr will contain "foobaz" and we'll get two entries in this vector
+ /// that tells us to insert the value of string variable "bar" at offset 3
+ /// and the value of expression "N+1" at offset 6.
+ std::vector<Substitution *> Substitutions;
+
+ /// Maps names of string variables defined in a pattern to the number of
+ /// their parenthesis group in RegExStr capturing their last definition.
+ ///
+ /// E.g. for the pattern "foo[[bar:.*]]baz([[bar]][[QUUX]][[bar:.*]])",
+ /// RegExStr will be "foo(.*)baz(\1<quux value>(.*))" where <quux value> is
+ /// the value captured for QUUX on the earlier line where it was defined, and
+ /// VariableDefs will map "bar" to the third parenthesis group which captures
+ /// the second definition of "bar".
+ ///
+ /// Note: uses std::map rather than StringMap to be able to get the key when
+ /// iterating over values.
+ std::map<StringRef, unsigned> VariableDefs;
+
+ /// Structure representing the definition of a numeric variable in a pattern.
+ /// It holds the pointer to the class instance holding the value and matching
+ /// format of the numeric variable whose value is being defined and the
+ /// number of the parenthesis group in RegExStr to capture that value.
+ struct NumericVariableMatch {
+ /// Pointer to class instance holding the value and matching format of the
+ /// numeric variable being defined.
+ NumericVariable *DefinedNumericVariable;
+
+ /// Number of the parenthesis group in RegExStr that captures the value of
+ /// this numeric variable definition.
+ unsigned CaptureParenGroup;
+ };
+
+ /// Holds the number of the parenthesis group in RegExStr and pointer to the
+ /// corresponding NumericVariable class instance of all numeric variable
+ /// definitions. Used to set the matched value of all those variables.
+ StringMap<NumericVariableMatch> NumericVariableDefs;
+
+ /// Pointer to a class instance holding the global state shared by all
+ /// patterns:
+ /// - separate tables with the values of live string and numeric variables
+ /// respectively at the start of any given CHECK line;
+ /// - table holding whether a string variable has been defined at any given
+ /// point during the parsing phase.
+ FileCheckPatternContext *Context;
+
+ Check::FileCheckType CheckTy;
+
+ /// Line number for this CHECK pattern or None if it is an implicit pattern.
+ /// Used to determine whether a variable definition is made on an earlier
+ /// line to the one with this CHECK.
+ Optional<size_t> LineNumber;
+
+ /// Ignore case while matching if set to true.
+ bool IgnoreCase = false;
+
+public:
+ Pattern(Check::FileCheckType Ty, FileCheckPatternContext *Context,
+ Optional<size_t> Line = None)
+ : Context(Context), CheckTy(Ty), LineNumber(Line) {}
+
+ /// \returns the location in source code.
+ SMLoc getLoc() const { return PatternLoc; }
+
+ /// \returns the pointer to the global state for all patterns in this
+ /// FileCheck instance.
+ FileCheckPatternContext *getContext() const { return Context; }
+
+ /// \returns whether \p C is a valid first character for a variable name.
+ static bool isValidVarNameStart(char C);
+
+ /// Parsing information about a variable.
+ struct VariableProperties {
+ StringRef Name;
+ bool IsPseudo;
+ };
+
+ /// Parses the string at the start of \p Str for a variable name. \returns
+ /// a VariableProperties structure holding the variable name and whether it
+ /// is the name of a pseudo variable, or an error holding a diagnostic
+ /// against \p SM if parsing fail. If parsing was successful, also strips
+ /// \p Str from the variable name.
+ static Expected<VariableProperties> parseVariable(StringRef &Str,
+ const SourceMgr &SM);
+ /// Parses \p Expr for a numeric substitution block at line \p LineNumber,
+ /// or before input is parsed if \p LineNumber is None. Parameter
+ /// \p IsLegacyLineExpr indicates whether \p Expr should be a legacy @LINE
+ /// expression and \p Context points to the class instance holding the live
+ /// string and numeric variables. \returns a pointer to the class instance
+ /// representing the expression whose value must be substitued, or an error
+ /// holding a diagnostic against \p SM if parsing fails. If substitution was
+ /// successful, sets \p DefinedNumericVariable to point to the class
+ /// representing the numeric variable defined in this numeric substitution
+ /// block, or None if this block does not define any variable.
+ static Expected<std::unique_ptr<Expression>> parseNumericSubstitutionBlock(
+ StringRef Expr, Optional<NumericVariable *> &DefinedNumericVariable,
+ bool IsLegacyLineExpr, Optional<size_t> LineNumber,
+ FileCheckPatternContext *Context, const SourceMgr &SM);
+ /// Parses the pattern in \p PatternStr and initializes this Pattern instance
+ /// accordingly.
+ ///
+ /// \p Prefix provides which prefix is being matched, \p Req describes the
+ /// global options that influence the parsing such as whitespace
+ /// canonicalization, \p SM provides the SourceMgr used for error reports.
+ /// \returns true in case of an error, false otherwise.
+ bool parsePattern(StringRef PatternStr, StringRef Prefix, SourceMgr &SM,
+ const FileCheckRequest &Req);
+ /// Matches the pattern string against the input buffer \p Buffer
+ ///
+ /// \returns the position that is matched or an error indicating why matching
+ /// failed. If there is a match, updates \p MatchLen with the size of the
+ /// matched string.
+ ///
+ /// The GlobalVariableTable StringMap in the FileCheckPatternContext class
+ /// instance provides the current values of FileCheck string variables and is
+ /// updated if this match defines new values. Likewise, the
+ /// GlobalNumericVariableTable StringMap in the same class provides the
+ /// current values of FileCheck numeric variables and is updated if this
+ /// match defines new numeric values.
+ Expected<size_t> match(StringRef Buffer, size_t &MatchLen,
+ const SourceMgr &SM) const;
+ /// Prints the value of successful substitutions or the name of the undefined
+ /// string or numeric variables preventing a successful substitution.
+ void printSubstitutions(const SourceMgr &SM, StringRef Buffer,
+ SMRange MatchRange, FileCheckDiag::MatchType MatchTy,
+ std::vector<FileCheckDiag> *Diags) const;
+ void printFuzzyMatch(const SourceMgr &SM, StringRef Buffer,
+ std::vector<FileCheckDiag> *Diags) const;
+
+ bool hasVariable() const {
+ return !(Substitutions.empty() && VariableDefs.empty());
+ }
+ void printVariableDefs(const SourceMgr &SM, FileCheckDiag::MatchType MatchTy,
+ std::vector<FileCheckDiag> *Diags) const;
+
+ Check::FileCheckType getCheckTy() const { return CheckTy; }
+
+ int getCount() const { return CheckTy.getCount(); }
+
+private:
+ bool AddRegExToRegEx(StringRef RS, unsigned &CurParen, SourceMgr &SM);
+ void AddBackrefToRegEx(unsigned BackrefNum);
+ /// Computes an arbitrary estimate for the quality of matching this pattern
+ /// at the start of \p Buffer; a distance of zero should correspond to a
+ /// perfect match.
+ unsigned computeMatchDistance(StringRef Buffer) const;
+ /// Finds the closing sequence of a regex variable usage or definition.
+ ///
+ /// \p Str has to point in the beginning of the definition (right after the
+ /// opening sequence). \p SM holds the SourceMgr used for error reporting.
+ /// \returns the offset of the closing sequence within Str, or npos if it
+ /// was not found.
+ static size_t FindRegexVarEnd(StringRef Str, SourceMgr &SM);
+
+ /// Parses \p Expr for the name of a numeric variable to be defined at line
+ /// \p LineNumber, or before input is parsed if \p LineNumber is None.
+ /// \returns a pointer to the class instance representing that variable,
+ /// creating it if needed, or an error holding a diagnostic against \p SM
+ /// should defining such a variable be invalid.
+ static Expected<NumericVariable *> parseNumericVariableDefinition(
+ StringRef &Expr, FileCheckPatternContext *Context,
+ Optional<size_t> LineNumber, ExpressionFormat ImplicitFormat,
+ const SourceMgr &SM);
+ /// Parses \p Name as a (pseudo if \p IsPseudo is true) numeric variable use
+ /// at line \p LineNumber, or before input is parsed if \p LineNumber is
+ /// None. Parameter \p Context points to the class instance holding the live
+ /// string and numeric variables. \returns the pointer to the class instance
+ /// representing that variable if successful, or an error holding a
+ /// diagnostic against \p SM otherwise.
+ static Expected<std::unique_ptr<NumericVariableUse>> parseNumericVariableUse(
+ StringRef Name, bool IsPseudo, Optional<size_t> LineNumber,
+ FileCheckPatternContext *Context, const SourceMgr &SM);
+ enum class AllowedOperand { LineVar, LegacyLiteral, Any };
+ /// Parses \p Expr for use of a numeric operand at line \p LineNumber, or
+ /// before input is parsed if \p LineNumber is None. Accepts literal values,
+ /// numeric variables and function calls, depending on the value of \p AO.
+ /// \p MaybeInvalidConstraint indicates whether the text being parsed could
+ /// be an invalid constraint. \p Context points to the class instance holding
+ /// the live string and numeric variables. \returns the class representing
+ /// that operand in the AST of the expression or an error holding a
+ /// diagnostic against \p SM otherwise. If \p Expr starts with a "(" this
+ /// function will attempt to parse a parenthesized expression.
+ static Expected<std::unique_ptr<ExpressionAST>>
+ parseNumericOperand(StringRef &Expr, AllowedOperand AO, bool ConstraintParsed,
+ Optional<size_t> LineNumber,
+ FileCheckPatternContext *Context, const SourceMgr &SM);
+ /// Parses and updates \p RemainingExpr for a binary operation at line
+ /// \p LineNumber, or before input is parsed if \p LineNumber is None. The
+ /// left operand of this binary operation is given in \p LeftOp and \p Expr
+ /// holds the string for the full expression, including the left operand.
+ /// Parameter \p IsLegacyLineExpr indicates whether we are parsing a legacy
+ /// @LINE expression. Parameter \p Context points to the class instance
+ /// holding the live string and numeric variables. \returns the class
+ /// representing the binary operation in the AST of the expression, or an
+ /// error holding a diagnostic against \p SM otherwise.
+ static Expected<std::unique_ptr<ExpressionAST>>
+ parseBinop(StringRef Expr, StringRef &RemainingExpr,
+ std::unique_ptr<ExpressionAST> LeftOp, bool IsLegacyLineExpr,
+ Optional<size_t> LineNumber, FileCheckPatternContext *Context,
+ const SourceMgr &SM);
+
+ /// Parses a parenthesized expression inside \p Expr at line \p LineNumber, or
+ /// before input is parsed if \p LineNumber is None. \p Expr must start with
+ /// a '('. Accepts both literal values and numeric variables. Parameter \p
+ /// Context points to the class instance holding the live string and numeric
+ /// variables. \returns the class representing that operand in the AST of the
+ /// expression or an error holding a diagnostic against \p SM otherwise.
+ static Expected<std::unique_ptr<ExpressionAST>>
+ parseParenExpr(StringRef &Expr, Optional<size_t> LineNumber,
+ FileCheckPatternContext *Context, const SourceMgr &SM);
+
+ /// Parses \p Expr for an argument list belonging to a call to function \p
+ /// FuncName at line \p LineNumber, or before input is parsed if \p LineNumber
+ /// is None. Parameter \p FuncLoc is the source location used for diagnostics.
+ /// Parameter \p Context points to the class instance holding the live string
+ /// and numeric variables. \returns the class representing that call in the
+ /// AST of the expression or an error holding a diagnostic against \p SM
+ /// otherwise.
+ static Expected<std::unique_ptr<ExpressionAST>>
+ parseCallExpr(StringRef &Expr, StringRef FuncName,
+ Optional<size_t> LineNumber, FileCheckPatternContext *Context,
+ const SourceMgr &SM);
+};
+
+//===----------------------------------------------------------------------===//
+// Check Strings.
+//===----------------------------------------------------------------------===//
+
+/// A check that we found in the input file.
+struct FileCheckString {
+ /// The pattern to match.
+ Pattern Pat;
+
+ /// Which prefix name this check matched.
+ StringRef Prefix;
+
+ /// The location in the match file that the check string was specified.
+ SMLoc Loc;
+
+ /// All of the strings that are disallowed from occurring between this match
+ /// string and the previous one (or start of file).
+ std::vector<Pattern> DagNotStrings;
+
+ FileCheckString(const Pattern &P, StringRef S, SMLoc L)
+ : Pat(P), Prefix(S), Loc(L) {}
+
+ /// Matches check string and its "not strings" and/or "dag strings".
+ size_t Check(const SourceMgr &SM, StringRef Buffer, bool IsLabelScanMode,
+ size_t &MatchLen, FileCheckRequest &Req,
+ std::vector<FileCheckDiag> *Diags) const;
+
+ /// Verifies that there is a single line in the given \p Buffer. Errors are
+ /// reported against \p SM.
+ bool CheckNext(const SourceMgr &SM, StringRef Buffer) const;
+ /// Verifies that there is no newline in the given \p Buffer. Errors are
+ /// reported against \p SM.
+ bool CheckSame(const SourceMgr &SM, StringRef Buffer) const;
+ /// Verifies that none of the strings in \p NotStrings are found in the given
+ /// \p Buffer. Errors are reported against \p SM and diagnostics recorded in
+ /// \p Diags according to the verbosity level set in \p Req.
+ bool CheckNot(const SourceMgr &SM, StringRef Buffer,
+ const std::vector<const Pattern *> &NotStrings,
+ const FileCheckRequest &Req,
+ std::vector<FileCheckDiag> *Diags) const;
+ /// Matches "dag strings" and their mixed "not strings".
+ size_t CheckDag(const SourceMgr &SM, StringRef Buffer,
+ std::vector<const Pattern *> &NotStrings,
+ const FileCheckRequest &Req,
+ std::vector<FileCheckDiag> *Diags) const;
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm12/lib/Frontend/OpenMP/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Frontend/OpenMP/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Frontend/OpenMP/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/FuzzMutate/FuzzerCLI.cpp b/contrib/libs/llvm12/lib/FuzzMutate/FuzzerCLI.cpp
new file mode 100644
index 00000000000..be0d5bfcab4
--- /dev/null
+++ b/contrib/libs/llvm12/lib/FuzzMutate/FuzzerCLI.cpp
@@ -0,0 +1,209 @@
+//===-- FuzzerCLI.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/FuzzMutate/FuzzerCLI.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/IR/Verifier.h"
+
+using namespace llvm;
+
+void llvm::parseFuzzerCLOpts(int ArgC, char *ArgV[]) {
+ std::vector<const char *> CLArgs;
+ CLArgs.push_back(ArgV[0]);
+
+ int I = 1;
+ while (I < ArgC)
+ if (StringRef(ArgV[I++]).equals("-ignore_remaining_args=1"))
+ break;
+ while (I < ArgC)
+ CLArgs.push_back(ArgV[I++]);
+
+ cl::ParseCommandLineOptions(CLArgs.size(), CLArgs.data());
+}
+
+void llvm::handleExecNameEncodedBEOpts(StringRef ExecName) {
+ std::vector<std::string> Args{std::string(ExecName)};
+
+ auto NameAndArgs = ExecName.split("--");
+ if (NameAndArgs.second.empty())
+ return;
+
+ SmallVector<StringRef, 4> Opts;
+ NameAndArgs.second.split(Opts, '-');
+ for (StringRef Opt : Opts) {
+ if (Opt.equals("gisel")) {
+ Args.push_back("-global-isel");
+ // For now we default GlobalISel to -O0
+ Args.push_back("-O0");
+ } else if (Opt.startswith("O")) {
+ Args.push_back("-" + Opt.str());
+ } else if (Triple(Opt).getArch()) {
+ Args.push_back("-mtriple=" + Opt.str());
+ } else {
+ errs() << ExecName << ": Unknown option: " << Opt << ".\n";
+ exit(1);
+ }
+ }
+ errs() << NameAndArgs.first << ": Injected args:";
+ for (int I = 1, E = Args.size(); I < E; ++I)
+ errs() << " " << Args[I];
+ errs() << "\n";
+
+ std::vector<const char *> CLArgs;
+ CLArgs.reserve(Args.size());
+ for (std::string &S : Args)
+ CLArgs.push_back(S.c_str());
+
+ cl::ParseCommandLineOptions(CLArgs.size(), CLArgs.data());
+}
+
+void llvm::handleExecNameEncodedOptimizerOpts(StringRef ExecName) {
+ // TODO: Refactor parts common with the 'handleExecNameEncodedBEOpts'
+ std::vector<std::string> Args{std::string(ExecName)};
+
+ auto NameAndArgs = ExecName.split("--");
+ if (NameAndArgs.second.empty())
+ return;
+
+ SmallVector<StringRef, 4> Opts;
+ NameAndArgs.second.split(Opts, '-');
+ for (StringRef Opt : Opts) {
+ if (Opt == "instcombine") {
+ Args.push_back("-passes=instcombine");
+ } else if (Opt == "earlycse") {
+ Args.push_back("-passes=early-cse");
+ } else if (Opt == "simplifycfg") {
+ Args.push_back("-passes=simplify-cfg");
+ } else if (Opt == "gvn") {
+ Args.push_back("-passes=gvn");
+ } else if (Opt == "sccp") {
+ Args.push_back("-passes=sccp");
+
+ } else if (Opt == "loop_predication") {
+ Args.push_back("-passes=loop-predication");
+ } else if (Opt == "guard_widening") {
+ Args.push_back("-passes=guard-widening");
+ } else if (Opt == "loop_rotate") {
+ Args.push_back("-passes=loop(rotate)");
+ } else if (Opt == "loop_unswitch") {
+ Args.push_back("-passes=loop(unswitch)");
+ } else if (Opt == "loop_unroll") {
+ Args.push_back("-passes=unroll");
+ } else if (Opt == "loop_vectorize") {
+ Args.push_back("-passes=loop-vectorize");
+ } else if (Opt == "licm") {
+ Args.push_back("-passes=licm");
+ } else if (Opt == "indvars") {
+ Args.push_back("-passes=indvars");
+ } else if (Opt == "strength_reduce") {
+ Args.push_back("-passes=loop-reduce");
+ } else if (Opt == "irce") {
+ Args.push_back("-passes=irce");
+
+ } else if (Triple(Opt).getArch()) {
+ Args.push_back("-mtriple=" + Opt.str());
+ } else {
+ errs() << ExecName << ": Unknown option: " << Opt << ".\n";
+ exit(1);
+ }
+ }
+
+ errs() << NameAndArgs.first << ": Injected args:";
+ for (int I = 1, E = Args.size(); I < E; ++I)
+ errs() << " " << Args[I];
+ errs() << "\n";
+
+ std::vector<const char *> CLArgs;
+ CLArgs.reserve(Args.size());
+ for (std::string &S : Args)
+ CLArgs.push_back(S.c_str());
+
+ cl::ParseCommandLineOptions(CLArgs.size(), CLArgs.data());
+}
+
+int llvm::runFuzzerOnInputs(int ArgC, char *ArgV[], FuzzerTestFun TestOne,
+ FuzzerInitFun Init) {
+ errs() << "*** This tool was not linked to libFuzzer.\n"
+ << "*** No fuzzing will be performed.\n";
+ if (int RC = Init(&ArgC, &ArgV)) {
+ errs() << "Initialization failed\n";
+ return RC;
+ }
+
+ for (int I = 1; I < ArgC; ++I) {
+ StringRef Arg(ArgV[I]);
+ if (Arg.startswith("-")) {
+ if (Arg.equals("-ignore_remaining_args=1"))
+ break;
+ continue;
+ }
+
+ auto BufOrErr = MemoryBuffer::getFile(Arg, /*FileSize-*/ -1,
+ /*RequiresNullTerminator=*/false);
+ if (std::error_code EC = BufOrErr.getError()) {
+ errs() << "Error reading file: " << Arg << ": " << EC.message() << "\n";
+ return 1;
+ }
+ std::unique_ptr<MemoryBuffer> Buf = std::move(BufOrErr.get());
+ errs() << "Running: " << Arg << " (" << Buf->getBufferSize() << " bytes)\n";
+ TestOne(reinterpret_cast<const uint8_t *>(Buf->getBufferStart()),
+ Buf->getBufferSize());
+ }
+ return 0;
+}
+
+std::unique_ptr<Module> llvm::parseModule(
+ const uint8_t *Data, size_t Size, LLVMContext &Context) {
+
+ if (Size <= 1)
+ // We get bogus data given an empty corpus - just create a new module.
+ return std::make_unique<Module>("M", Context);
+
+ auto Buffer = MemoryBuffer::getMemBuffer(
+ StringRef(reinterpret_cast<const char *>(Data), Size), "Fuzzer input",
+ /*RequiresNullTerminator=*/false);
+
+ SMDiagnostic Err;
+ auto M = parseBitcodeFile(Buffer->getMemBufferRef(), Context);
+ if (Error E = M.takeError()) {
+ errs() << toString(std::move(E)) << "\n";
+ return nullptr;
+ }
+ return std::move(M.get());
+}
+
+size_t llvm::writeModule(const Module &M, uint8_t *Dest, size_t MaxSize) {
+ std::string Buf;
+ {
+ raw_string_ostream OS(Buf);
+ WriteBitcodeToFile(M, OS);
+ }
+ if (Buf.size() > MaxSize)
+ return 0;
+ memcpy(Dest, Buf.data(), Buf.size());
+ return Buf.size();
+}
+
+std::unique_ptr<Module> llvm::parseAndVerify(const uint8_t *Data, size_t Size,
+ LLVMContext &Context) {
+ auto M = parseModule(Data, Size, Context);
+ if (!M || verifyModule(*M, &errs()))
+ return nullptr;
+
+ return M;
+}
diff --git a/contrib/libs/llvm12/lib/FuzzMutate/IRMutator.cpp b/contrib/libs/llvm12/lib/FuzzMutate/IRMutator.cpp
new file mode 100644
index 00000000000..33b90097ab2
--- /dev/null
+++ b/contrib/libs/llvm12/lib/FuzzMutate/IRMutator.cpp
@@ -0,0 +1,242 @@
+//===-- IRMutator.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/FuzzMutate/IRMutator.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/FuzzMutate/Operations.h"
+#include "llvm/FuzzMutate/Random.h"
+#include "llvm/FuzzMutate/RandomIRBuilder.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Transforms/Scalar/DCE.h"
+
+using namespace llvm;
+
+static void createEmptyFunction(Module &M) {
+ // TODO: Some arguments and a return value would probably be more interesting.
+ LLVMContext &Context = M.getContext();
+ Function *F = Function::Create(FunctionType::get(Type::getVoidTy(Context), {},
+ /*isVarArg=*/false),
+ GlobalValue::ExternalLinkage, "f", &M);
+ BasicBlock *BB = BasicBlock::Create(Context, "BB", F);
+ ReturnInst::Create(Context, BB);
+}
+
+void IRMutationStrategy::mutate(Module &M, RandomIRBuilder &IB) {
+ if (M.empty())
+ createEmptyFunction(M);
+
+ auto RS = makeSampler<Function *>(IB.Rand);
+ for (Function &F : M)
+ if (!F.isDeclaration())
+ RS.sample(&F, /*Weight=*/1);
+ mutate(*RS.getSelection(), IB);
+}
+
+void IRMutationStrategy::mutate(Function &F, RandomIRBuilder &IB) {
+ mutate(*makeSampler(IB.Rand, make_pointer_range(F)).getSelection(), IB);
+}
+
+void IRMutationStrategy::mutate(BasicBlock &BB, RandomIRBuilder &IB) {
+ mutate(*makeSampler(IB.Rand, make_pointer_range(BB)).getSelection(), IB);
+}
+
+void IRMutator::mutateModule(Module &M, int Seed, size_t CurSize,
+ size_t MaxSize) {
+ std::vector<Type *> Types;
+ for (const auto &Getter : AllowedTypes)
+ Types.push_back(Getter(M.getContext()));
+ RandomIRBuilder IB(Seed, Types);
+
+ auto RS = makeSampler<IRMutationStrategy *>(IB.Rand);
+ for (const auto &Strategy : Strategies)
+ RS.sample(Strategy.get(),
+ Strategy->getWeight(CurSize, MaxSize, RS.totalWeight()));
+ auto Strategy = RS.getSelection();
+
+ Strategy->mutate(M, IB);
+}
+
+static void eliminateDeadCode(Function &F) {
+ FunctionPassManager FPM;
+ FPM.addPass(DCEPass());
+ FunctionAnalysisManager FAM;
+ FAM.registerPass([&] { return TargetLibraryAnalysis(); });
+ FAM.registerPass([&] { return PassInstrumentationAnalysis(); });
+ FPM.run(F, FAM);
+}
+
+void InjectorIRStrategy::mutate(Function &F, RandomIRBuilder &IB) {
+ IRMutationStrategy::mutate(F, IB);
+ eliminateDeadCode(F);
+}
+
+std::vector<fuzzerop::OpDescriptor> InjectorIRStrategy::getDefaultOps() {
+ std::vector<fuzzerop::OpDescriptor> Ops;
+ describeFuzzerIntOps(Ops);
+ describeFuzzerFloatOps(Ops);
+ describeFuzzerControlFlowOps(Ops);
+ describeFuzzerPointerOps(Ops);
+ describeFuzzerAggregateOps(Ops);
+ describeFuzzerVectorOps(Ops);
+ return Ops;
+}
+
+Optional<fuzzerop::OpDescriptor>
+InjectorIRStrategy::chooseOperation(Value *Src, RandomIRBuilder &IB) {
+ auto OpMatchesPred = [&Src](fuzzerop::OpDescriptor &Op) {
+ return Op.SourcePreds[0].matches({}, Src);
+ };
+ auto RS = makeSampler(IB.Rand, make_filter_range(Operations, OpMatchesPred));
+ if (RS.isEmpty())
+ return None;
+ return *RS;
+}
+
+void InjectorIRStrategy::mutate(BasicBlock &BB, RandomIRBuilder &IB) {
+ SmallVector<Instruction *, 32> Insts;
+ for (auto I = BB.getFirstInsertionPt(), E = BB.end(); I != E; ++I)
+ Insts.push_back(&*I);
+ if (Insts.size() < 1)
+ return;
+
+ // Choose an insertion point for our new instruction.
+ size_t IP = uniform<size_t>(IB.Rand, 0, Insts.size() - 1);
+
+ auto InstsBefore = makeArrayRef(Insts).slice(0, IP);
+ auto InstsAfter = makeArrayRef(Insts).slice(IP);
+
+ // Choose a source, which will be used to constrain the operation selection.
+ SmallVector<Value *, 2> Srcs;
+ Srcs.push_back(IB.findOrCreateSource(BB, InstsBefore));
+
+ // Choose an operation that's constrained to be valid for the type of the
+ // source, collect any other sources it needs, and then build it.
+ auto OpDesc = chooseOperation(Srcs[0], IB);
+ // Bail if no operation was found
+ if (!OpDesc)
+ return;
+
+ for (const auto &Pred : makeArrayRef(OpDesc->SourcePreds).slice(1))
+ Srcs.push_back(IB.findOrCreateSource(BB, InstsBefore, Srcs, Pred));
+
+ if (Value *Op = OpDesc->BuilderFunc(Srcs, Insts[IP])) {
+ // Find a sink and wire up the results of the operation.
+ IB.connectToSink(BB, InstsAfter, Op);
+ }
+}
+
+uint64_t InstDeleterIRStrategy::getWeight(size_t CurrentSize, size_t MaxSize,
+ uint64_t CurrentWeight) {
+ // If we have less than 200 bytes, panic and try to always delete.
+ if (CurrentSize > MaxSize - 200)
+ return CurrentWeight ? CurrentWeight * 100 : 1;
+ // Draw a line starting from when we only have 1k left and increasing linearly
+ // to double the current weight.
+ int Line = (-2 * CurrentWeight) * (MaxSize - CurrentSize + 1000);
+ // Clamp negative weights to zero.
+ if (Line < 0)
+ return 0;
+ return Line;
+}
+
+void InstDeleterIRStrategy::mutate(Function &F, RandomIRBuilder &IB) {
+ auto RS = makeSampler<Instruction *>(IB.Rand);
+ for (Instruction &Inst : instructions(F)) {
+ // TODO: We can't handle these instructions.
+ if (Inst.isTerminator() || Inst.isEHPad() ||
+ Inst.isSwiftError() || isa<PHINode>(Inst))
+ continue;
+
+ RS.sample(&Inst, /*Weight=*/1);
+ }
+ if (RS.isEmpty())
+ return;
+
+ // Delete the instruction.
+ mutate(*RS.getSelection(), IB);
+ // Clean up any dead code that's left over after removing the instruction.
+ eliminateDeadCode(F);
+}
+
+void InstDeleterIRStrategy::mutate(Instruction &Inst, RandomIRBuilder &IB) {
+ assert(!Inst.isTerminator() && "Deleting terminators invalidates CFG");
+
+ if (Inst.getType()->isVoidTy()) {
+ // Instructions with void type (ie, store) have no uses to worry about. Just
+ // erase it and move on.
+ Inst.eraseFromParent();
+ return;
+ }
+
+ // Otherwise we need to find some other value with the right type to keep the
+ // users happy.
+ auto Pred = fuzzerop::onlyType(Inst.getType());
+ auto RS = makeSampler<Value *>(IB.Rand);
+ SmallVector<Instruction *, 32> InstsBefore;
+ BasicBlock *BB = Inst.getParent();
+ for (auto I = BB->getFirstInsertionPt(), E = Inst.getIterator(); I != E;
+ ++I) {
+ if (Pred.matches({}, &*I))
+ RS.sample(&*I, /*Weight=*/1);
+ InstsBefore.push_back(&*I);
+ }
+ if (!RS)
+ RS.sample(IB.newSource(*BB, InstsBefore, {}, Pred), /*Weight=*/1);
+
+ Inst.replaceAllUsesWith(RS.getSelection());
+ Inst.eraseFromParent();
+}
+
+void InstModificationIRStrategy::mutate(Instruction &Inst,
+ RandomIRBuilder &IB) {
+ SmallVector<std::function<void()>, 8> Modifications;
+ CmpInst *CI = nullptr;
+ GetElementPtrInst *GEP = nullptr;
+ switch (Inst.getOpcode()) {
+ default:
+ break;
+ case Instruction::Add:
+ case Instruction::Mul:
+ case Instruction::Sub:
+ case Instruction::Shl:
+ Modifications.push_back([&Inst]() { Inst.setHasNoSignedWrap(true); }),
+ Modifications.push_back([&Inst]() { Inst.setHasNoSignedWrap(false); });
+ Modifications.push_back([&Inst]() { Inst.setHasNoUnsignedWrap(true); });
+ Modifications.push_back([&Inst]() { Inst.setHasNoUnsignedWrap(false); });
+
+ break;
+ case Instruction::ICmp:
+ CI = cast<ICmpInst>(&Inst);
+ Modifications.push_back([CI]() { CI->setPredicate(CmpInst::ICMP_EQ); });
+ Modifications.push_back([CI]() { CI->setPredicate(CmpInst::ICMP_NE); });
+ Modifications.push_back([CI]() { CI->setPredicate(CmpInst::ICMP_UGT); });
+ Modifications.push_back([CI]() { CI->setPredicate(CmpInst::ICMP_UGE); });
+ Modifications.push_back([CI]() { CI->setPredicate(CmpInst::ICMP_ULT); });
+ Modifications.push_back([CI]() { CI->setPredicate(CmpInst::ICMP_ULE); });
+ Modifications.push_back([CI]() { CI->setPredicate(CmpInst::ICMP_SGT); });
+ Modifications.push_back([CI]() { CI->setPredicate(CmpInst::ICMP_SGE); });
+ Modifications.push_back([CI]() { CI->setPredicate(CmpInst::ICMP_SLT); });
+ Modifications.push_back([CI]() { CI->setPredicate(CmpInst::ICMP_SLE); });
+ break;
+ case Instruction::GetElementPtr:
+ GEP = cast<GetElementPtrInst>(&Inst);
+ Modifications.push_back([GEP]() { GEP->setIsInBounds(true); });
+ Modifications.push_back([GEP]() { GEP->setIsInBounds(false); });
+ break;
+ }
+
+ auto RS = makeSampler(IB.Rand, Modifications);
+ if (RS)
+ RS.getSelection()();
+}
diff --git a/contrib/libs/llvm12/lib/FuzzMutate/OpDescriptor.cpp b/contrib/libs/llvm12/lib/FuzzMutate/OpDescriptor.cpp
new file mode 100644
index 00000000000..67d44be8b69
--- /dev/null
+++ b/contrib/libs/llvm12/lib/FuzzMutate/OpDescriptor.cpp
@@ -0,0 +1,37 @@
+//===-- OpDescriptor.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/FuzzMutate/OpDescriptor.h"
+#include "llvm/IR/Constants.h"
+
+using namespace llvm;
+using namespace fuzzerop;
+
+void fuzzerop::makeConstantsWithType(Type *T, std::vector<Constant *> &Cs) {
+ if (auto *IntTy = dyn_cast<IntegerType>(T)) {
+ uint64_t W = IntTy->getBitWidth();
+ Cs.push_back(ConstantInt::get(IntTy, APInt::getMaxValue(W)));
+ Cs.push_back(ConstantInt::get(IntTy, APInt::getMinValue(W)));
+ Cs.push_back(ConstantInt::get(IntTy, APInt::getSignedMaxValue(W)));
+ Cs.push_back(ConstantInt::get(IntTy, APInt::getSignedMinValue(W)));
+ Cs.push_back(ConstantInt::get(IntTy, APInt::getOneBitSet(W, W / 2)));
+ } else if (T->isFloatingPointTy()) {
+ auto &Ctx = T->getContext();
+ auto &Sem = T->getFltSemantics();
+ Cs.push_back(ConstantFP::get(Ctx, APFloat::getZero(Sem)));
+ Cs.push_back(ConstantFP::get(Ctx, APFloat::getLargest(Sem)));
+ Cs.push_back(ConstantFP::get(Ctx, APFloat::getSmallest(Sem)));
+ } else
+ Cs.push_back(UndefValue::get(T));
+}
+
+std::vector<Constant *> fuzzerop::makeConstantsWithType(Type *T) {
+ std::vector<Constant *> Result;
+ makeConstantsWithType(T, Result);
+ return Result;
+}
diff --git a/contrib/libs/llvm12/lib/FuzzMutate/Operations.cpp b/contrib/libs/llvm12/lib/FuzzMutate/Operations.cpp
new file mode 100644
index 00000000000..a37fd5454dd
--- /dev/null
+++ b/contrib/libs/llvm12/lib/FuzzMutate/Operations.cpp
@@ -0,0 +1,322 @@
+//===-- Operations.cpp ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/FuzzMutate/Operations.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+
+using namespace llvm;
+using namespace fuzzerop;
+
+void llvm::describeFuzzerIntOps(std::vector<fuzzerop::OpDescriptor> &Ops) {
+ Ops.push_back(binOpDescriptor(1, Instruction::Add));
+ Ops.push_back(binOpDescriptor(1, Instruction::Sub));
+ Ops.push_back(binOpDescriptor(1, Instruction::Mul));
+ Ops.push_back(binOpDescriptor(1, Instruction::SDiv));
+ Ops.push_back(binOpDescriptor(1, Instruction::UDiv));
+ Ops.push_back(binOpDescriptor(1, Instruction::SRem));
+ Ops.push_back(binOpDescriptor(1, Instruction::URem));
+ Ops.push_back(binOpDescriptor(1, Instruction::Shl));
+ Ops.push_back(binOpDescriptor(1, Instruction::LShr));
+ Ops.push_back(binOpDescriptor(1, Instruction::AShr));
+ Ops.push_back(binOpDescriptor(1, Instruction::And));
+ Ops.push_back(binOpDescriptor(1, Instruction::Or));
+ Ops.push_back(binOpDescriptor(1, Instruction::Xor));
+
+ Ops.push_back(cmpOpDescriptor(1, Instruction::ICmp, CmpInst::ICMP_EQ));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::ICmp, CmpInst::ICMP_NE));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::ICmp, CmpInst::ICMP_UGT));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::ICmp, CmpInst::ICMP_UGE));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::ICmp, CmpInst::ICMP_ULT));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::ICmp, CmpInst::ICMP_ULE));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::ICmp, CmpInst::ICMP_SGT));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::ICmp, CmpInst::ICMP_SGE));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::ICmp, CmpInst::ICMP_SLT));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::ICmp, CmpInst::ICMP_SLE));
+}
+
+void llvm::describeFuzzerFloatOps(std::vector<fuzzerop::OpDescriptor> &Ops) {
+ Ops.push_back(binOpDescriptor(1, Instruction::FAdd));
+ Ops.push_back(binOpDescriptor(1, Instruction::FSub));
+ Ops.push_back(binOpDescriptor(1, Instruction::FMul));
+ Ops.push_back(binOpDescriptor(1, Instruction::FDiv));
+ Ops.push_back(binOpDescriptor(1, Instruction::FRem));
+
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_FALSE));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_OEQ));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_OGT));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_OGE));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_OLT));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_OLE));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_ONE));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_ORD));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_UNO));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_UEQ));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_UGT));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_UGE));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_ULT));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_ULE));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_UNE));
+ Ops.push_back(cmpOpDescriptor(1, Instruction::FCmp, CmpInst::FCMP_TRUE));
+}
+
+void llvm::describeFuzzerControlFlowOps(
+ std::vector<fuzzerop::OpDescriptor> &Ops) {
+ Ops.push_back(splitBlockDescriptor(1));
+}
+
+void llvm::describeFuzzerPointerOps(std::vector<fuzzerop::OpDescriptor> &Ops) {
+ Ops.push_back(gepDescriptor(1));
+}
+
+void llvm::describeFuzzerAggregateOps(
+ std::vector<fuzzerop::OpDescriptor> &Ops) {
+ Ops.push_back(extractValueDescriptor(1));
+ Ops.push_back(insertValueDescriptor(1));
+}
+
+void llvm::describeFuzzerVectorOps(std::vector<fuzzerop::OpDescriptor> &Ops) {
+ Ops.push_back(extractElementDescriptor(1));
+ Ops.push_back(insertElementDescriptor(1));
+ Ops.push_back(shuffleVectorDescriptor(1));
+}
+
+OpDescriptor llvm::fuzzerop::binOpDescriptor(unsigned Weight,
+ Instruction::BinaryOps Op) {
+ auto buildOp = [Op](ArrayRef<Value *> Srcs, Instruction *Inst) {
+ return BinaryOperator::Create(Op, Srcs[0], Srcs[1], "B", Inst);
+ };
+ switch (Op) {
+ case Instruction::Add:
+ case Instruction::Sub:
+ case Instruction::Mul:
+ case Instruction::SDiv:
+ case Instruction::UDiv:
+ case Instruction::SRem:
+ case Instruction::URem:
+ case Instruction::Shl:
+ case Instruction::LShr:
+ case Instruction::AShr:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor:
+ return {Weight, {anyIntType(), matchFirstType()}, buildOp};
+ case Instruction::FAdd:
+ case Instruction::FSub:
+ case Instruction::FMul:
+ case Instruction::FDiv:
+ case Instruction::FRem:
+ return {Weight, {anyFloatType(), matchFirstType()}, buildOp};
+ case Instruction::BinaryOpsEnd:
+ llvm_unreachable("Value out of range of enum");
+ }
+ llvm_unreachable("Covered switch");
+}
+
+OpDescriptor llvm::fuzzerop::cmpOpDescriptor(unsigned Weight,
+ Instruction::OtherOps CmpOp,
+ CmpInst::Predicate Pred) {
+ auto buildOp = [CmpOp, Pred](ArrayRef<Value *> Srcs, Instruction *Inst) {
+ return CmpInst::Create(CmpOp, Pred, Srcs[0], Srcs[1], "C", Inst);
+ };
+
+ switch (CmpOp) {
+ case Instruction::ICmp:
+ return {Weight, {anyIntType(), matchFirstType()}, buildOp};
+ case Instruction::FCmp:
+ return {Weight, {anyFloatType(), matchFirstType()}, buildOp};
+ default:
+ llvm_unreachable("CmpOp must be ICmp or FCmp");
+ }
+}
+
+OpDescriptor llvm::fuzzerop::splitBlockDescriptor(unsigned Weight) {
+ auto buildSplitBlock = [](ArrayRef<Value *> Srcs, Instruction *Inst) {
+ BasicBlock *Block = Inst->getParent();
+ BasicBlock *Next = Block->splitBasicBlock(Inst, "BB");
+
+ // If it was an exception handling block, we are done.
+ if (Block->isEHPad())
+ return nullptr;
+
+ // Loop back on this block by replacing the unconditional forward branch
+ // with a conditional with a backedge.
+ if (Block != &Block->getParent()->getEntryBlock()) {
+ BranchInst::Create(Block, Next, Srcs[0], Block->getTerminator());
+ Block->getTerminator()->eraseFromParent();
+
+ // We need values for each phi in the block. Since there isn't a good way
+ // to do a variable number of input values currently, we just fill them
+ // with undef.
+ for (PHINode &PHI : Block->phis())
+ PHI.addIncoming(UndefValue::get(PHI.getType()), Block);
+ }
+ return nullptr;
+ };
+ SourcePred isInt1Ty{[](ArrayRef<Value *>, const Value *V) {
+ return V->getType()->isIntegerTy(1);
+ },
+ None};
+ return {Weight, {isInt1Ty}, buildSplitBlock};
+}
+
+OpDescriptor llvm::fuzzerop::gepDescriptor(unsigned Weight) {
+ auto buildGEP = [](ArrayRef<Value *> Srcs, Instruction *Inst) {
+ Type *Ty = cast<PointerType>(Srcs[0]->getType())->getElementType();
+ auto Indices = makeArrayRef(Srcs).drop_front(1);
+ return GetElementPtrInst::Create(Ty, Srcs[0], Indices, "G", Inst);
+ };
+ // TODO: Handle aggregates and vectors
+ // TODO: Support multiple indices.
+ // TODO: Try to avoid meaningless accesses.
+ return {Weight, {sizedPtrType(), anyIntType()}, buildGEP};
+}
+
+static uint64_t getAggregateNumElements(Type *T) {
+ assert(T->isAggregateType() && "Not a struct or array");
+ if (isa<StructType>(T))
+ return T->getStructNumElements();
+ return T->getArrayNumElements();
+}
+
+static SourcePred validExtractValueIndex() {
+ auto Pred = [](ArrayRef<Value *> Cur, const Value *V) {
+ if (auto *CI = dyn_cast<ConstantInt>(V))
+ if (!CI->uge(getAggregateNumElements(Cur[0]->getType())))
+ return true;
+ return false;
+ };
+ auto Make = [](ArrayRef<Value *> Cur, ArrayRef<Type *> Ts) {
+ std::vector<Constant *> Result;
+ auto *Int32Ty = Type::getInt32Ty(Cur[0]->getContext());
+ uint64_t N = getAggregateNumElements(Cur[0]->getType());
+ // Create indices at the start, end, and middle, but avoid dups.
+ Result.push_back(ConstantInt::get(Int32Ty, 0));
+ if (N > 1)
+ Result.push_back(ConstantInt::get(Int32Ty, N - 1));
+ if (N > 2)
+ Result.push_back(ConstantInt::get(Int32Ty, N / 2));
+ return Result;
+ };
+ return {Pred, Make};
+}
+
+OpDescriptor llvm::fuzzerop::extractValueDescriptor(unsigned Weight) {
+ auto buildExtract = [](ArrayRef<Value *> Srcs, Instruction *Inst) {
+ // TODO: It's pretty inefficient to shuffle this all through constants.
+ unsigned Idx = cast<ConstantInt>(Srcs[1])->getZExtValue();
+ return ExtractValueInst::Create(Srcs[0], {Idx}, "E", Inst);
+ };
+ // TODO: Should we handle multiple indices?
+ return {Weight, {anyAggregateType(), validExtractValueIndex()}, buildExtract};
+}
+
+static SourcePred matchScalarInAggregate() {
+ auto Pred = [](ArrayRef<Value *> Cur, const Value *V) {
+ if (auto *ArrayT = dyn_cast<ArrayType>(Cur[0]->getType()))
+ return V->getType() == ArrayT->getElementType();
+
+ auto *STy = cast<StructType>(Cur[0]->getType());
+ for (int I = 0, E = STy->getNumElements(); I < E; ++I)
+ if (STy->getTypeAtIndex(I) == V->getType())
+ return true;
+ return false;
+ };
+ auto Make = [](ArrayRef<Value *> Cur, ArrayRef<Type *>) {
+ if (auto *ArrayT = dyn_cast<ArrayType>(Cur[0]->getType()))
+ return makeConstantsWithType(ArrayT->getElementType());
+
+ std::vector<Constant *> Result;
+ auto *STy = cast<StructType>(Cur[0]->getType());
+ for (int I = 0, E = STy->getNumElements(); I < E; ++I)
+ makeConstantsWithType(STy->getTypeAtIndex(I), Result);
+ return Result;
+ };
+ return {Pred, Make};
+}
+
+static SourcePred validInsertValueIndex() {
+ auto Pred = [](ArrayRef<Value *> Cur, const Value *V) {
+ if (auto *CI = dyn_cast<ConstantInt>(V))
+ if (CI->getBitWidth() == 32) {
+ Type *Indexed = ExtractValueInst::getIndexedType(Cur[0]->getType(),
+ CI->getZExtValue());
+ return Indexed == Cur[1]->getType();
+ }
+ return false;
+ };
+ auto Make = [](ArrayRef<Value *> Cur, ArrayRef<Type *> Ts) {
+ std::vector<Constant *> Result;
+ auto *Int32Ty = Type::getInt32Ty(Cur[0]->getContext());
+ auto *BaseTy = Cur[0]->getType();
+ int I = 0;
+ while (Type *Indexed = ExtractValueInst::getIndexedType(BaseTy, I)) {
+ if (Indexed == Cur[1]->getType())
+ Result.push_back(ConstantInt::get(Int32Ty, I));
+ ++I;
+ }
+ return Result;
+ };
+ return {Pred, Make};
+}
+
+OpDescriptor llvm::fuzzerop::insertValueDescriptor(unsigned Weight) {
+ auto buildInsert = [](ArrayRef<Value *> Srcs, Instruction *Inst) {
+ // TODO: It's pretty inefficient to shuffle this all through constants.
+ unsigned Idx = cast<ConstantInt>(Srcs[2])->getZExtValue();
+ return InsertValueInst::Create(Srcs[0], Srcs[1], {Idx}, "I", Inst);
+ };
+ return {
+ Weight,
+ {anyAggregateType(), matchScalarInAggregate(), validInsertValueIndex()},
+ buildInsert};
+}
+
+OpDescriptor llvm::fuzzerop::extractElementDescriptor(unsigned Weight) {
+ auto buildExtract = [](ArrayRef<Value *> Srcs, Instruction *Inst) {
+ return ExtractElementInst::Create(Srcs[0], Srcs[1], "E", Inst);
+ };
+ // TODO: Try to avoid undefined accesses.
+ return {Weight, {anyVectorType(), anyIntType()}, buildExtract};
+}
+
+OpDescriptor llvm::fuzzerop::insertElementDescriptor(unsigned Weight) {
+ auto buildInsert = [](ArrayRef<Value *> Srcs, Instruction *Inst) {
+ return InsertElementInst::Create(Srcs[0], Srcs[1], Srcs[2], "I", Inst);
+ };
+ // TODO: Try to avoid undefined accesses.
+ return {Weight,
+ {anyVectorType(), matchScalarOfFirstType(), anyIntType()},
+ buildInsert};
+}
+
+static SourcePred validShuffleVectorIndex() {
+ auto Pred = [](ArrayRef<Value *> Cur, const Value *V) {
+ return ShuffleVectorInst::isValidOperands(Cur[0], Cur[1], V);
+ };
+ auto Make = [](ArrayRef<Value *> Cur, ArrayRef<Type *> Ts) {
+ auto *FirstTy = cast<FixedVectorType>(Cur[0]->getType());
+ auto *Int32Ty = Type::getInt32Ty(Cur[0]->getContext());
+ // TODO: It's straighforward to make up reasonable values, but listing them
+ // exhaustively would be insane. Come up with a couple of sensible ones.
+ return std::vector<Constant *>{UndefValue::get(
+ FixedVectorType::get(Int32Ty, FirstTy->getNumElements()))};
+ };
+ return {Pred, Make};
+}
+
+OpDescriptor llvm::fuzzerop::shuffleVectorDescriptor(unsigned Weight) {
+ auto buildShuffle = [](ArrayRef<Value *> Srcs, Instruction *Inst) {
+ return new ShuffleVectorInst(Srcs[0], Srcs[1], Srcs[2], "S", Inst);
+ };
+ return {Weight,
+ {anyVectorType(), matchFirstType(), validShuffleVectorIndex()},
+ buildShuffle};
+}
diff --git a/contrib/libs/llvm12/lib/FuzzMutate/RandomIRBuilder.cpp b/contrib/libs/llvm12/lib/FuzzMutate/RandomIRBuilder.cpp
new file mode 100644
index 00000000000..1295714839e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/FuzzMutate/RandomIRBuilder.cpp
@@ -0,0 +1,156 @@
+//===-- RandomIRBuilder.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/FuzzMutate/RandomIRBuilder.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/FuzzMutate/Random.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+
+using namespace llvm;
+using namespace fuzzerop;
+
+Value *RandomIRBuilder::findOrCreateSource(BasicBlock &BB,
+ ArrayRef<Instruction *> Insts) {
+ return findOrCreateSource(BB, Insts, {}, anyType());
+}
+
+Value *RandomIRBuilder::findOrCreateSource(BasicBlock &BB,
+ ArrayRef<Instruction *> Insts,
+ ArrayRef<Value *> Srcs,
+ SourcePred Pred) {
+ auto MatchesPred = [&Srcs, &Pred](Instruction *Inst) {
+ return Pred.matches(Srcs, Inst);
+ };
+ auto RS = makeSampler(Rand, make_filter_range(Insts, MatchesPred));
+ // Also consider choosing no source, meaning we want a new one.
+ RS.sample(nullptr, /*Weight=*/1);
+ if (Instruction *Src = RS.getSelection())
+ return Src;
+ return newSource(BB, Insts, Srcs, Pred);
+}
+
+Value *RandomIRBuilder::newSource(BasicBlock &BB, ArrayRef<Instruction *> Insts,
+ ArrayRef<Value *> Srcs, SourcePred Pred) {
+ // Generate some constants to choose from.
+ auto RS = makeSampler<Value *>(Rand);
+ RS.sample(Pred.generate(Srcs, KnownTypes));
+
+ // If we can find a pointer to load from, use it half the time.
+ Value *Ptr = findPointer(BB, Insts, Srcs, Pred);
+ if (Ptr) {
+ // Create load from the chosen pointer
+ auto IP = BB.getFirstInsertionPt();
+ if (auto *I = dyn_cast<Instruction>(Ptr)) {
+ IP = ++I->getIterator();
+ assert(IP != BB.end() && "guaranteed by the findPointer");
+ }
+ auto *NewLoad = new LoadInst(
+ cast<PointerType>(Ptr->getType())->getElementType(), Ptr, "L", &*IP);
+
+ // Only sample this load if it really matches the descriptor
+ if (Pred.matches(Srcs, NewLoad))
+ RS.sample(NewLoad, RS.totalWeight());
+ else
+ NewLoad->eraseFromParent();
+ }
+
+ assert(!RS.isEmpty() && "Failed to generate sources");
+ return RS.getSelection();
+}
+
+static bool isCompatibleReplacement(const Instruction *I, const Use &Operand,
+ const Value *Replacement) {
+ if (Operand->getType() != Replacement->getType())
+ return false;
+ switch (I->getOpcode()) {
+ case Instruction::GetElementPtr:
+ case Instruction::ExtractElement:
+ case Instruction::ExtractValue:
+ // TODO: We could potentially validate these, but for now just leave indices
+ // alone.
+ if (Operand.getOperandNo() >= 1)
+ return false;
+ break;
+ case Instruction::InsertValue:
+ case Instruction::InsertElement:
+ case Instruction::ShuffleVector:
+ if (Operand.getOperandNo() >= 2)
+ return false;
+ break;
+ default:
+ break;
+ }
+ return true;
+}
+
+void RandomIRBuilder::connectToSink(BasicBlock &BB,
+ ArrayRef<Instruction *> Insts, Value *V) {
+ auto RS = makeSampler<Use *>(Rand);
+ for (auto &I : Insts) {
+ if (isa<IntrinsicInst>(I))
+ // TODO: Replacing operands of intrinsics would be interesting, but
+ // there's no easy way to verify that a given replacement is valid given
+ // that intrinsics can impose arbitrary constraints.
+ continue;
+ for (Use &U : I->operands())
+ if (isCompatibleReplacement(I, U, V))
+ RS.sample(&U, 1);
+ }
+ // Also consider choosing no sink, meaning we want a new one.
+ RS.sample(nullptr, /*Weight=*/1);
+
+ if (Use *Sink = RS.getSelection()) {
+ User *U = Sink->getUser();
+ unsigned OpNo = Sink->getOperandNo();
+ U->setOperand(OpNo, V);
+ return;
+ }
+ newSink(BB, Insts, V);
+}
+
+void RandomIRBuilder::newSink(BasicBlock &BB, ArrayRef<Instruction *> Insts,
+ Value *V) {
+ Value *Ptr = findPointer(BB, Insts, {V}, matchFirstType());
+ if (!Ptr) {
+ if (uniform(Rand, 0, 1))
+ Ptr = new AllocaInst(V->getType(), 0, "A", &*BB.getFirstInsertionPt());
+ else
+ Ptr = UndefValue::get(PointerType::get(V->getType(), 0));
+ }
+
+ new StoreInst(V, Ptr, Insts.back());
+}
+
+Value *RandomIRBuilder::findPointer(BasicBlock &BB,
+ ArrayRef<Instruction *> Insts,
+ ArrayRef<Value *> Srcs, SourcePred Pred) {
+ auto IsMatchingPtr = [&Srcs, &Pred](Instruction *Inst) {
+ // Invoke instructions sometimes produce valid pointers but currently
+ // we can't insert loads or stores from them
+ if (Inst->isTerminator())
+ return false;
+
+ if (auto PtrTy = dyn_cast<PointerType>(Inst->getType())) {
+ // We can never generate loads from non first class or non sized types
+ if (!PtrTy->getElementType()->isSized() ||
+ !PtrTy->getElementType()->isFirstClassType())
+ return false;
+
+ // TODO: Check if this is horribly expensive.
+ return Pred.matches(Srcs, UndefValue::get(PtrTy->getElementType()));
+ }
+ return false;
+ };
+ if (auto RS = makeSampler(Rand, make_filter_range(Insts, IsMatchingPtr)))
+ return RS.getSelection();
+ return nullptr;
+}
diff --git a/contrib/libs/llvm12/lib/IR/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/IR/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/IR/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/IRReader/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/IRReader/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/IRReader/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/InterfaceStub/ELFObjHandler.cpp b/contrib/libs/llvm12/lib/InterfaceStub/ELFObjHandler.cpp
new file mode 100644
index 00000000000..255d301362e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/InterfaceStub/ELFObjHandler.cpp
@@ -0,0 +1,680 @@
+//===- ELFObjHandler.cpp --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===-----------------------------------------------------------------------===/
+
+#include "llvm/InterfaceStub/ELFObjHandler.h"
+#include "llvm/InterfaceStub/ELFStub.h"
+#include "llvm/MC/StringTableBuilder.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ELFTypes.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileOutputBuffer.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Process.h"
+
+using llvm::MemoryBufferRef;
+using llvm::object::ELFObjectFile;
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace llvm::ELF;
+
+namespace llvm {
+namespace elfabi {
+
+// Simple struct to hold relevant .dynamic entries.
+struct DynamicEntries {
+ uint64_t StrTabAddr = 0;
+ uint64_t StrSize = 0;
+ Optional<uint64_t> SONameOffset;
+ std::vector<uint64_t> NeededLibNames;
+ // Symbol table:
+ uint64_t DynSymAddr = 0;
+ // Hash tables:
+ Optional<uint64_t> ElfHash;
+ Optional<uint64_t> GnuHash;
+};
+
+/// This initializes an ELF file header with information specific to a binary
+/// dynamic shared object.
+/// Offsets, indexes, links, etc. for section and program headers are just
+/// zero-initialized as they will be updated elsewhere.
+///
+/// @param ElfHeader Target ELFT::Ehdr to populate.
+/// @param Machine Target architecture (e_machine from ELF specifications).
+template <class ELFT>
+static void initELFHeader(typename ELFT::Ehdr &ElfHeader, uint16_t Machine) {
+ memset(&ElfHeader, 0, sizeof(ElfHeader));
+ // ELF identification.
+ ElfHeader.e_ident[EI_MAG0] = ElfMagic[EI_MAG0];
+ ElfHeader.e_ident[EI_MAG1] = ElfMagic[EI_MAG1];
+ ElfHeader.e_ident[EI_MAG2] = ElfMagic[EI_MAG2];
+ ElfHeader.e_ident[EI_MAG3] = ElfMagic[EI_MAG3];
+ ElfHeader.e_ident[EI_CLASS] = ELFT::Is64Bits ? ELFCLASS64 : ELFCLASS32;
+ bool IsLittleEndian = ELFT::TargetEndianness == support::little;
+ ElfHeader.e_ident[EI_DATA] = IsLittleEndian ? ELFDATA2LSB : ELFDATA2MSB;
+ ElfHeader.e_ident[EI_VERSION] = EV_CURRENT;
+ ElfHeader.e_ident[EI_OSABI] = ELFOSABI_NONE;
+
+ // Remainder of ELF header.
+ ElfHeader.e_type = ET_DYN;
+ ElfHeader.e_machine = Machine;
+ ElfHeader.e_version = EV_CURRENT;
+ ElfHeader.e_ehsize = sizeof(typename ELFT::Ehdr);
+ ElfHeader.e_phentsize = sizeof(typename ELFT::Phdr);
+ ElfHeader.e_shentsize = sizeof(typename ELFT::Shdr);
+}
+
+namespace {
+template <class ELFT> struct OutputSection {
+ using Elf_Shdr = typename ELFT::Shdr;
+ std::string Name;
+ Elf_Shdr Shdr;
+ uint64_t Addr;
+ uint64_t Offset;
+ uint64_t Size;
+ uint64_t Align;
+ uint32_t Index;
+ bool NoBits = true;
+};
+
+template <class T, class ELFT>
+struct ContentSection : public OutputSection<ELFT> {
+ T Content;
+ ContentSection() { this->NoBits = false; }
+};
+
+// This class just wraps StringTableBuilder for the purpose of adding a
+// default constructor.
+class ELFStringTableBuilder : public StringTableBuilder {
+public:
+ ELFStringTableBuilder() : StringTableBuilder(StringTableBuilder::ELF) {}
+};
+
+template <class ELFT> class ELFSymbolTableBuilder {
+public:
+ using Elf_Sym = typename ELFT::Sym;
+
+ ELFSymbolTableBuilder() { Symbols.push_back({}); }
+
+ void add(size_t StNameOffset, uint64_t StSize, uint8_t StBind, uint8_t StType,
+ uint8_t StOther, uint16_t StShndx) {
+ Elf_Sym S{};
+ S.st_name = StNameOffset;
+ S.st_size = StSize;
+ S.st_info = (StBind << 4) | (StType & 0xf);
+ S.st_other = StOther;
+ S.st_shndx = StShndx;
+ Symbols.push_back(S);
+ }
+
+ size_t getSize() const { return Symbols.size() * sizeof(Elf_Sym); }
+
+ void write(uint8_t *Buf) const {
+ memcpy(Buf, Symbols.data(), sizeof(Elf_Sym) * Symbols.size());
+ }
+
+private:
+ llvm::SmallVector<Elf_Sym, 8> Symbols;
+};
+
+template <class ELFT> class ELFDynamicTableBuilder {
+public:
+ using Elf_Dyn = typename ELFT::Dyn;
+
+ size_t addAddr(uint64_t Tag, uint64_t Addr) {
+ Elf_Dyn Entry;
+ Entry.d_tag = Tag;
+ Entry.d_un.d_ptr = Addr;
+ Entries.push_back(Entry);
+ return Entries.size() - 1;
+ }
+
+ void modifyAddr(size_t Index, uint64_t Addr) {
+ Entries[Index].d_un.d_ptr = Addr;
+ }
+
+ size_t addValue(uint64_t Tag, uint64_t Value) {
+ Elf_Dyn Entry;
+ Entry.d_tag = Tag;
+ Entry.d_un.d_val = Value;
+ Entries.push_back(Entry);
+ return Entries.size() - 1;
+ }
+
+ void modifyValue(size_t Index, uint64_t Value) {
+ Entries[Index].d_un.d_val = Value;
+ }
+
+ size_t getSize() const {
+ // Add DT_NULL entry at the end.
+ return (Entries.size() + 1) * sizeof(Elf_Dyn);
+ }
+
+ void write(uint8_t *Buf) const {
+ memcpy(Buf, Entries.data(), sizeof(Elf_Dyn) * Entries.size());
+ // Add DT_NULL entry at the end.
+ memset(Buf + sizeof(Elf_Dyn) * Entries.size(), 0, sizeof(Elf_Dyn));
+ }
+
+private:
+ llvm::SmallVector<Elf_Dyn, 8> Entries;
+};
+
+template <class ELFT> class ELFStubBuilder {
+public:
+ using Elf_Ehdr = typename ELFT::Ehdr;
+ using Elf_Shdr = typename ELFT::Shdr;
+ using Elf_Phdr = typename ELFT::Phdr;
+ using Elf_Sym = typename ELFT::Sym;
+ using Elf_Addr = typename ELFT::Addr;
+ using Elf_Dyn = typename ELFT::Dyn;
+
+ ELFStubBuilder(const ELFStubBuilder &) = delete;
+ ELFStubBuilder(ELFStubBuilder &&) = default;
+
+ explicit ELFStubBuilder(const ELFStub &Stub) {
+ DynSym.Name = ".dynsym";
+ DynSym.Align = sizeof(Elf_Addr);
+ DynStr.Name = ".dynstr";
+ DynStr.Align = 1;
+ DynTab.Name = ".dynamic";
+ DynTab.Align = sizeof(Elf_Addr);
+ ShStrTab.Name = ".shstrtab";
+ ShStrTab.Align = 1;
+
+ // Populate string tables.
+ for (const ELFSymbol &Sym : Stub.Symbols)
+ DynStr.Content.add(Sym.Name);
+ for (const std::string &Lib : Stub.NeededLibs)
+ DynStr.Content.add(Lib);
+ if (Stub.SoName)
+ DynStr.Content.add(Stub.SoName.getValue());
+
+ std::vector<OutputSection<ELFT> *> Sections = {&DynSym, &DynStr, &DynTab,
+ &ShStrTab};
+ const OutputSection<ELFT> *LastSection = Sections.back();
+ // Now set the Index and put sections names into ".shstrtab".
+ uint64_t Index = 1;
+ for (OutputSection<ELFT> *Sec : Sections) {
+ Sec->Index = Index++;
+ ShStrTab.Content.add(Sec->Name);
+ }
+ ShStrTab.Content.finalize();
+ ShStrTab.Size = ShStrTab.Content.getSize();
+ DynStr.Content.finalize();
+ DynStr.Size = DynStr.Content.getSize();
+
+ // Populate dynamic symbol table.
+ for (const ELFSymbol &Sym : Stub.Symbols) {
+ uint8_t Bind = Sym.Weak ? STB_WEAK : STB_GLOBAL;
+ // For non-undefined symbols, value of the shndx is not relevant at link
+ // time as long as it is not SHN_UNDEF. Set shndx to 1, which
+ // points to ".dynsym".
+ uint16_t Shndx = Sym.Undefined ? SHN_UNDEF : 1;
+ DynSym.Content.add(DynStr.Content.getOffset(Sym.Name), Sym.Size, Bind,
+ (uint8_t)Sym.Type, 0, Shndx);
+ }
+ DynSym.Size = DynSym.Content.getSize();
+
+ // Poplulate dynamic table.
+ size_t DynSymIndex = DynTab.Content.addAddr(DT_SYMTAB, 0);
+ size_t DynStrIndex = DynTab.Content.addAddr(DT_STRTAB, 0);
+ for (const std::string &Lib : Stub.NeededLibs)
+ DynTab.Content.addValue(DT_NEEDED, DynStr.Content.getOffset(Lib));
+ if (Stub.SoName)
+ DynTab.Content.addValue(DT_SONAME,
+ DynStr.Content.getOffset(Stub.SoName.getValue()));
+ DynTab.Size = DynTab.Content.getSize();
+ // Calculate sections' addresses and offsets.
+ uint64_t CurrentOffset = sizeof(Elf_Ehdr);
+ for (OutputSection<ELFT> *Sec : Sections) {
+ Sec->Offset = alignTo(CurrentOffset, Sec->Align);
+ Sec->Addr = Sec->Offset;
+ CurrentOffset = Sec->Offset + Sec->Size;
+ }
+ // Fill Addr back to dynamic table.
+ DynTab.Content.modifyAddr(DynSymIndex, DynSym.Addr);
+ DynTab.Content.modifyAddr(DynStrIndex, DynStr.Addr);
+ // Write section headers of string tables.
+ fillSymTabShdr(DynSym, SHT_DYNSYM);
+ fillStrTabShdr(DynStr, SHF_ALLOC);
+ fillDynTabShdr(DynTab);
+ fillStrTabShdr(ShStrTab);
+
+ // Finish initializing the ELF header.
+ initELFHeader<ELFT>(ElfHeader, Stub.Arch);
+ ElfHeader.e_shstrndx = ShStrTab.Index;
+ ElfHeader.e_shnum = LastSection->Index + 1;
+ ElfHeader.e_shoff =
+ alignTo(LastSection->Offset + LastSection->Size, sizeof(Elf_Addr));
+ }
+
+ size_t getSize() const {
+ return ElfHeader.e_shoff + ElfHeader.e_shnum * sizeof(Elf_Shdr);
+ }
+
+ void write(uint8_t *Data) const {
+ write(Data, ElfHeader);
+ DynSym.Content.write(Data + DynSym.Shdr.sh_offset);
+ DynStr.Content.write(Data + DynStr.Shdr.sh_offset);
+ DynTab.Content.write(Data + DynTab.Shdr.sh_offset);
+ ShStrTab.Content.write(Data + ShStrTab.Shdr.sh_offset);
+ writeShdr(Data, DynSym);
+ writeShdr(Data, DynStr);
+ writeShdr(Data, DynTab);
+ writeShdr(Data, ShStrTab);
+ }
+
+private:
+ Elf_Ehdr ElfHeader;
+ ContentSection<ELFStringTableBuilder, ELFT> DynStr;
+ ContentSection<ELFStringTableBuilder, ELFT> ShStrTab;
+ ContentSection<ELFSymbolTableBuilder<ELFT>, ELFT> DynSym;
+ ContentSection<ELFDynamicTableBuilder<ELFT>, ELFT> DynTab;
+
+ template <class T> static void write(uint8_t *Data, const T &Value) {
+ *reinterpret_cast<T *>(Data) = Value;
+ }
+
+ void fillStrTabShdr(ContentSection<ELFStringTableBuilder, ELFT> &StrTab,
+ uint32_t ShFlags = 0) const {
+ StrTab.Shdr.sh_type = SHT_STRTAB;
+ StrTab.Shdr.sh_flags = ShFlags;
+ StrTab.Shdr.sh_addr = StrTab.Addr;
+ StrTab.Shdr.sh_offset = StrTab.Offset;
+ StrTab.Shdr.sh_info = 0;
+ StrTab.Shdr.sh_size = StrTab.Size;
+ StrTab.Shdr.sh_name = ShStrTab.Content.getOffset(StrTab.Name);
+ StrTab.Shdr.sh_addralign = StrTab.Align;
+ StrTab.Shdr.sh_entsize = 0;
+ StrTab.Shdr.sh_link = 0;
+ }
+ void fillSymTabShdr(ContentSection<ELFSymbolTableBuilder<ELFT>, ELFT> &SymTab,
+ uint32_t ShType) const {
+ SymTab.Shdr.sh_type = ShType;
+ SymTab.Shdr.sh_flags = SHF_ALLOC;
+ SymTab.Shdr.sh_addr = SymTab.Addr;
+ SymTab.Shdr.sh_offset = SymTab.Offset;
+ SymTab.Shdr.sh_info = SymTab.Size / sizeof(Elf_Sym) > 1 ? 1 : 0;
+ SymTab.Shdr.sh_size = SymTab.Size;
+ SymTab.Shdr.sh_name = this->ShStrTab.Content.getOffset(SymTab.Name);
+ SymTab.Shdr.sh_addralign = SymTab.Align;
+ SymTab.Shdr.sh_entsize = sizeof(Elf_Sym);
+ SymTab.Shdr.sh_link = this->DynStr.Index;
+ }
+ void fillDynTabShdr(
+ ContentSection<ELFDynamicTableBuilder<ELFT>, ELFT> &DynTab) const {
+ DynTab.Shdr.sh_type = SHT_DYNAMIC;
+ DynTab.Shdr.sh_flags = SHF_ALLOC;
+ DynTab.Shdr.sh_addr = DynTab.Addr;
+ DynTab.Shdr.sh_offset = DynTab.Offset;
+ DynTab.Shdr.sh_info = 0;
+ DynTab.Shdr.sh_size = DynTab.Size;
+ DynTab.Shdr.sh_name = this->ShStrTab.Content.getOffset(DynTab.Name);
+ DynTab.Shdr.sh_addralign = DynTab.Align;
+ DynTab.Shdr.sh_entsize = sizeof(Elf_Dyn);
+ DynTab.Shdr.sh_link = this->DynStr.Index;
+ }
+ uint64_t shdrOffset(const OutputSection<ELFT> &Sec) const {
+ return ElfHeader.e_shoff + Sec.Index * sizeof(Elf_Shdr);
+ }
+
+ void writeShdr(uint8_t *Data, const OutputSection<ELFT> &Sec) const {
+ write(Data + shdrOffset(Sec), Sec.Shdr);
+ }
+};
+} // end anonymous namespace
+
+/// This function behaves similarly to StringRef::substr(), but attempts to
+/// terminate the returned StringRef at the first null terminator. If no null
+/// terminator is found, an error is returned.
+///
+/// @param Str Source string to create a substring from.
+/// @param Offset The start index of the desired substring.
+static Expected<StringRef> terminatedSubstr(StringRef Str, size_t Offset) {
+ size_t StrEnd = Str.find('\0', Offset);
+ if (StrEnd == StringLiteral::npos) {
+ return createError(
+ "String overran bounds of string table (no null terminator)");
+ }
+
+ size_t StrLen = StrEnd - Offset;
+ return Str.substr(Offset, StrLen);
+}
+
+/// This function takes an error, and appends a string of text to the end of
+/// that error. Since "appending" to an Error isn't supported behavior of an
+/// Error, this function technically creates a new error with the combined
+/// message and consumes the old error.
+///
+/// @param Err Source error.
+/// @param After Text to append at the end of Err's error message.
+Error appendToError(Error Err, StringRef After) {
+ std::string Message;
+ raw_string_ostream Stream(Message);
+ Stream << Err;
+ Stream << " " << After;
+ consumeError(std::move(Err));
+ return createError(Stream.str().c_str());
+}
+
+/// This function populates a DynamicEntries struct using an ELFT::DynRange.
+/// After populating the struct, the members are validated with
+/// some basic sanity checks.
+///
+/// @param Dyn Target DynamicEntries struct to populate.
+/// @param DynTable Source dynamic table.
+template <class ELFT>
+static Error populateDynamic(DynamicEntries &Dyn,
+ typename ELFT::DynRange DynTable) {
+ if (DynTable.empty())
+ return createError("No .dynamic section found");
+
+ // Search .dynamic for relevant entries.
+ bool FoundDynStr = false;
+ bool FoundDynStrSz = false;
+ bool FoundDynSym = false;
+ for (auto &Entry : DynTable) {
+ switch (Entry.d_tag) {
+ case DT_SONAME:
+ Dyn.SONameOffset = Entry.d_un.d_val;
+ break;
+ case DT_STRTAB:
+ Dyn.StrTabAddr = Entry.d_un.d_ptr;
+ FoundDynStr = true;
+ break;
+ case DT_STRSZ:
+ Dyn.StrSize = Entry.d_un.d_val;
+ FoundDynStrSz = true;
+ break;
+ case DT_NEEDED:
+ Dyn.NeededLibNames.push_back(Entry.d_un.d_val);
+ break;
+ case DT_SYMTAB:
+ Dyn.DynSymAddr = Entry.d_un.d_ptr;
+ FoundDynSym = true;
+ break;
+ case DT_HASH:
+ Dyn.ElfHash = Entry.d_un.d_ptr;
+ break;
+ case DT_GNU_HASH:
+ Dyn.GnuHash = Entry.d_un.d_ptr;
+ }
+ }
+
+ if (!FoundDynStr) {
+ return createError(
+ "Couldn't locate dynamic string table (no DT_STRTAB entry)");
+ }
+ if (!FoundDynStrSz) {
+ return createError(
+ "Couldn't determine dynamic string table size (no DT_STRSZ entry)");
+ }
+ if (!FoundDynSym) {
+ return createError(
+ "Couldn't locate dynamic symbol table (no DT_SYMTAB entry)");
+ }
+ if (Dyn.SONameOffset.hasValue() && *Dyn.SONameOffset >= Dyn.StrSize) {
+ return createStringError(object_error::parse_failed,
+ "DT_SONAME string offset (0x%016" PRIx64
+ ") outside of dynamic string table",
+ *Dyn.SONameOffset);
+ }
+ for (uint64_t Offset : Dyn.NeededLibNames) {
+ if (Offset >= Dyn.StrSize) {
+ return createStringError(object_error::parse_failed,
+ "DT_NEEDED string offset (0x%016" PRIx64
+ ") outside of dynamic string table",
+ Offset);
+ }
+ }
+
+ return Error::success();
+}
+
+/// This function extracts symbol type from a symbol's st_info member and
+/// maps it to an ELFSymbolType enum.
+/// Currently, STT_NOTYPE, STT_OBJECT, STT_FUNC, and STT_TLS are supported.
+/// Other symbol types are mapped to ELFSymbolType::Unknown.
+///
+/// @param Info Binary symbol st_info to extract symbol type from.
+static ELFSymbolType convertInfoToType(uint8_t Info) {
+ Info = Info & 0xf;
+ switch (Info) {
+ case ELF::STT_NOTYPE:
+ return ELFSymbolType::NoType;
+ case ELF::STT_OBJECT:
+ return ELFSymbolType::Object;
+ case ELF::STT_FUNC:
+ return ELFSymbolType::Func;
+ case ELF::STT_TLS:
+ return ELFSymbolType::TLS;
+ default:
+ return ELFSymbolType::Unknown;
+ }
+}
+
+/// This function creates an ELFSymbol and populates all members using
+/// information from a binary ELFT::Sym.
+///
+/// @param SymName The desired name of the ELFSymbol.
+/// @param RawSym ELFT::Sym to extract symbol information from.
+template <class ELFT>
+static ELFSymbol createELFSym(StringRef SymName,
+ const typename ELFT::Sym &RawSym) {
+ ELFSymbol TargetSym{std::string(SymName)};
+ uint8_t Binding = RawSym.getBinding();
+ if (Binding == STB_WEAK)
+ TargetSym.Weak = true;
+ else
+ TargetSym.Weak = false;
+
+ TargetSym.Undefined = RawSym.isUndefined();
+ TargetSym.Type = convertInfoToType(RawSym.st_info);
+
+ if (TargetSym.Type == ELFSymbolType::Func) {
+ TargetSym.Size = 0;
+ } else {
+ TargetSym.Size = RawSym.st_size;
+ }
+ return TargetSym;
+}
+
+/// This function populates an ELFStub with symbols using information read
+/// from an ELF binary.
+///
+/// @param TargetStub ELFStub to add symbols to.
+/// @param DynSym Range of dynamic symbols to add to TargetStub.
+/// @param DynStr StringRef to the dynamic string table.
+template <class ELFT>
+static Error populateSymbols(ELFStub &TargetStub,
+ const typename ELFT::SymRange DynSym,
+ StringRef DynStr) {
+ // Skips the first symbol since it's the NULL symbol.
+ for (auto RawSym : DynSym.drop_front(1)) {
+ // If a symbol does not have global or weak binding, ignore it.
+ uint8_t Binding = RawSym.getBinding();
+ if (!(Binding == STB_GLOBAL || Binding == STB_WEAK))
+ continue;
+ // If a symbol doesn't have default or protected visibility, ignore it.
+ uint8_t Visibility = RawSym.getVisibility();
+ if (!(Visibility == STV_DEFAULT || Visibility == STV_PROTECTED))
+ continue;
+ // Create an ELFSymbol and populate it with information from the symbol
+ // table entry.
+ Expected<StringRef> SymName = terminatedSubstr(DynStr, RawSym.st_name);
+ if (!SymName)
+ return SymName.takeError();
+ ELFSymbol Sym = createELFSym<ELFT>(*SymName, RawSym);
+ TargetStub.Symbols.insert(std::move(Sym));
+ // TODO: Populate symbol warning.
+ }
+ return Error::success();
+}
+
+/// Returns a new ELFStub with all members populated from an ELFObjectFile.
+/// @param ElfObj Source ELFObjectFile.
+template <class ELFT>
+static Expected<std::unique_ptr<ELFStub>>
+buildStub(const ELFObjectFile<ELFT> &ElfObj) {
+ using Elf_Dyn_Range = typename ELFT::DynRange;
+ using Elf_Phdr_Range = typename ELFT::PhdrRange;
+ using Elf_Sym_Range = typename ELFT::SymRange;
+ using Elf_Sym = typename ELFT::Sym;
+ std::unique_ptr<ELFStub> DestStub = std::make_unique<ELFStub>();
+ const ELFFile<ELFT> &ElfFile = ElfObj.getELFFile();
+ // Fetch .dynamic table.
+ Expected<Elf_Dyn_Range> DynTable = ElfFile.dynamicEntries();
+ if (!DynTable) {
+ return DynTable.takeError();
+ }
+
+ // Fetch program headers.
+ Expected<Elf_Phdr_Range> PHdrs = ElfFile.program_headers();
+ if (!PHdrs) {
+ return PHdrs.takeError();
+ }
+
+ // Collect relevant .dynamic entries.
+ DynamicEntries DynEnt;
+ if (Error Err = populateDynamic<ELFT>(DynEnt, *DynTable))
+ return std::move(Err);
+
+ // Get pointer to in-memory location of .dynstr section.
+ Expected<const uint8_t *> DynStrPtr = ElfFile.toMappedAddr(DynEnt.StrTabAddr);
+ if (!DynStrPtr)
+ return appendToError(DynStrPtr.takeError(),
+ "when locating .dynstr section contents");
+
+ StringRef DynStr(reinterpret_cast<const char *>(DynStrPtr.get()),
+ DynEnt.StrSize);
+
+ // Populate Arch from ELF header.
+ DestStub->Arch = ElfFile.getHeader().e_machine;
+
+ // Populate SoName from .dynamic entries and dynamic string table.
+ if (DynEnt.SONameOffset.hasValue()) {
+ Expected<StringRef> NameOrErr =
+ terminatedSubstr(DynStr, *DynEnt.SONameOffset);
+ if (!NameOrErr) {
+ return appendToError(NameOrErr.takeError(), "when reading DT_SONAME");
+ }
+ DestStub->SoName = std::string(*NameOrErr);
+ }
+
+ // Populate NeededLibs from .dynamic entries and dynamic string table.
+ for (uint64_t NeededStrOffset : DynEnt.NeededLibNames) {
+ Expected<StringRef> LibNameOrErr =
+ terminatedSubstr(DynStr, NeededStrOffset);
+ if (!LibNameOrErr) {
+ return appendToError(LibNameOrErr.takeError(), "when reading DT_NEEDED");
+ }
+ DestStub->NeededLibs.push_back(std::string(*LibNameOrErr));
+ }
+
+ // Populate Symbols from .dynsym table and dynamic string table.
+ Expected<uint64_t> SymCount = ElfFile.getDynSymtabSize();
+ if (!SymCount)
+ return SymCount.takeError();
+ if (*SymCount > 0) {
+ // Get pointer to in-memory location of .dynsym section.
+ Expected<const uint8_t *> DynSymPtr =
+ ElfFile.toMappedAddr(DynEnt.DynSymAddr);
+ if (!DynSymPtr)
+ return appendToError(DynSymPtr.takeError(),
+ "when locating .dynsym section contents");
+ Elf_Sym_Range DynSyms = ArrayRef<Elf_Sym>(
+ reinterpret_cast<const Elf_Sym *>(*DynSymPtr), *SymCount);
+ Error SymReadError = populateSymbols<ELFT>(*DestStub, DynSyms, DynStr);
+ if (SymReadError)
+ return appendToError(std::move(SymReadError),
+ "when reading dynamic symbols");
+ }
+
+ return std::move(DestStub);
+}
+
+/// This function opens a file for writing and then writes a binary ELF stub to
+/// the file.
+///
+/// @param FilePath File path for writing the ELF binary.
+/// @param Stub Source ELFStub to generate a binary ELF stub from.
+template <class ELFT>
+static Error writeELFBinaryToFile(StringRef FilePath, const ELFStub &Stub,
+ bool WriteIfChanged) {
+ ELFStubBuilder<ELFT> Builder{Stub};
+ // Write Stub to memory first.
+ std::vector<uint8_t> Buf(Builder.getSize());
+ Builder.write(Buf.data());
+
+ if (WriteIfChanged) {
+ if (ErrorOr<std::unique_ptr<MemoryBuffer>> BufOrError =
+ MemoryBuffer::getFile(FilePath)) {
+ // Compare Stub output with existing Stub file.
+ // If Stub file unchanged, abort updating.
+ if ((*BufOrError)->getBufferSize() == Builder.getSize() &&
+ !memcmp((*BufOrError)->getBufferStart(), Buf.data(),
+ Builder.getSize()))
+ return Error::success();
+ }
+ }
+
+ Expected<std::unique_ptr<FileOutputBuffer>> BufOrError =
+ FileOutputBuffer::create(FilePath, Builder.getSize());
+ if (!BufOrError)
+ return createStringError(errc::invalid_argument,
+ toString(BufOrError.takeError()) +
+ " when trying to open `" + FilePath +
+ "` for writing");
+
+ // Write binary to file.
+ std::unique_ptr<FileOutputBuffer> FileBuf = std::move(*BufOrError);
+ memcpy(FileBuf->getBufferStart(), Buf.data(), Buf.size());
+
+ return FileBuf->commit();
+}
+
+Expected<std::unique_ptr<ELFStub>> readELFFile(MemoryBufferRef Buf) {
+ Expected<std::unique_ptr<Binary>> BinOrErr = createBinary(Buf);
+ if (!BinOrErr) {
+ return BinOrErr.takeError();
+ }
+
+ Binary *Bin = BinOrErr->get();
+ if (auto Obj = dyn_cast<ELFObjectFile<ELF32LE>>(Bin)) {
+ return buildStub(*Obj);
+ } else if (auto Obj = dyn_cast<ELFObjectFile<ELF64LE>>(Bin)) {
+ return buildStub(*Obj);
+ } else if (auto Obj = dyn_cast<ELFObjectFile<ELF32BE>>(Bin)) {
+ return buildStub(*Obj);
+ } else if (auto Obj = dyn_cast<ELFObjectFile<ELF64BE>>(Bin)) {
+ return buildStub(*Obj);
+ }
+ return createStringError(errc::not_supported, "unsupported binary format");
+}
+
+// This function wraps the ELFT writeELFBinaryToFile() so writeBinaryStub()
+// can be called without having to use ELFType templates directly.
+Error writeBinaryStub(StringRef FilePath, const ELFStub &Stub,
+ ELFTarget OutputFormat, bool WriteIfChanged) {
+ if (OutputFormat == ELFTarget::ELF32LE)
+ return writeELFBinaryToFile<ELF32LE>(FilePath, Stub, WriteIfChanged);
+ if (OutputFormat == ELFTarget::ELF32BE)
+ return writeELFBinaryToFile<ELF32BE>(FilePath, Stub, WriteIfChanged);
+ if (OutputFormat == ELFTarget::ELF64LE)
+ return writeELFBinaryToFile<ELF64LE>(FilePath, Stub, WriteIfChanged);
+ if (OutputFormat == ELFTarget::ELF64BE)
+ return writeELFBinaryToFile<ELF64BE>(FilePath, Stub, WriteIfChanged);
+ llvm_unreachable("invalid binary output target");
+}
+
+} // end namespace elfabi
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/InterfaceStub/ELFStub.cpp b/contrib/libs/llvm12/lib/InterfaceStub/ELFStub.cpp
new file mode 100644
index 00000000000..3c637695d8e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/InterfaceStub/ELFStub.cpp
@@ -0,0 +1,28 @@
+//===- ELFStub.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===-----------------------------------------------------------------------===/
+
+#include "llvm/InterfaceStub/ELFStub.h"
+
+using namespace llvm;
+using namespace llvm::elfabi;
+
+ELFStub::ELFStub(ELFStub const &Stub) {
+ TbeVersion = Stub.TbeVersion;
+ Arch = Stub.Arch;
+ SoName = Stub.SoName;
+ NeededLibs = Stub.NeededLibs;
+ Symbols = Stub.Symbols;
+}
+
+ELFStub::ELFStub(ELFStub &&Stub) {
+ TbeVersion = std::move(Stub.TbeVersion);
+ Arch = std::move(Stub.Arch);
+ SoName = std::move(Stub.SoName);
+ NeededLibs = std::move(Stub.NeededLibs);
+ Symbols = std::move(Stub.Symbols);
+}
diff --git a/contrib/libs/llvm12/lib/InterfaceStub/TBEHandler.cpp b/contrib/libs/llvm12/lib/InterfaceStub/TBEHandler.cpp
new file mode 100644
index 00000000000..ee95d21ee66
--- /dev/null
+++ b/contrib/libs/llvm12/lib/InterfaceStub/TBEHandler.cpp
@@ -0,0 +1,143 @@
+//===- TBEHandler.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===-----------------------------------------------------------------------===/
+
+#include "llvm/InterfaceStub/TBEHandler.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/InterfaceStub/ELFStub.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/YAMLTraits.h"
+
+using namespace llvm;
+using namespace llvm::elfabi;
+
+LLVM_YAML_STRONG_TYPEDEF(ELFArch, ELFArchMapper)
+
+namespace llvm {
+namespace yaml {
+
+/// YAML traits for ELFSymbolType.
+template <> struct ScalarEnumerationTraits<ELFSymbolType> {
+ static void enumeration(IO &IO, ELFSymbolType &SymbolType) {
+ IO.enumCase(SymbolType, "NoType", ELFSymbolType::NoType);
+ IO.enumCase(SymbolType, "Func", ELFSymbolType::Func);
+ IO.enumCase(SymbolType, "Object", ELFSymbolType::Object);
+ IO.enumCase(SymbolType, "TLS", ELFSymbolType::TLS);
+ IO.enumCase(SymbolType, "Unknown", ELFSymbolType::Unknown);
+ // Treat other symbol types as noise, and map to Unknown.
+ if (!IO.outputting() && IO.matchEnumFallback())
+ SymbolType = ELFSymbolType::Unknown;
+ }
+};
+
+/// YAML traits for ELFArch.
+template <> struct ScalarTraits<ELFArchMapper> {
+ static void output(const ELFArchMapper &Value, void *,
+ llvm::raw_ostream &Out) {
+ // Map from integer to architecture string.
+ switch (Value) {
+ case (ELFArch)ELF::EM_X86_64:
+ Out << "x86_64";
+ break;
+ case (ELFArch)ELF::EM_AARCH64:
+ Out << "AArch64";
+ break;
+ case (ELFArch)ELF::EM_NONE:
+ default:
+ Out << "Unknown";
+ }
+ }
+
+ static StringRef input(StringRef Scalar, void *, ELFArchMapper &Value) {
+ // Map from architecture string to integer.
+ Value = StringSwitch<ELFArch>(Scalar)
+ .Case("x86_64", ELF::EM_X86_64)
+ .Case("AArch64", ELF::EM_AARCH64)
+ .Case("Unknown", ELF::EM_NONE)
+ .Default(ELF::EM_NONE);
+
+ // Returning empty StringRef indicates successful parse.
+ return StringRef();
+ }
+
+ // Don't place quotation marks around architecture value.
+ static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+
+/// YAML traits for ELFSymbol.
+template <> struct MappingTraits<ELFSymbol> {
+ static void mapping(IO &IO, ELFSymbol &Symbol) {
+ IO.mapRequired("Type", Symbol.Type);
+ // The need for symbol size depends on the symbol type.
+ if (Symbol.Type == ELFSymbolType::NoType) {
+ IO.mapOptional("Size", Symbol.Size, (uint64_t)0);
+ } else if (Symbol.Type == ELFSymbolType::Func) {
+ Symbol.Size = 0;
+ } else {
+ IO.mapRequired("Size", Symbol.Size);
+ }
+ IO.mapOptional("Undefined", Symbol.Undefined, false);
+ IO.mapOptional("Weak", Symbol.Weak, false);
+ IO.mapOptional("Warning", Symbol.Warning);
+ }
+
+ // Compacts symbol information into a single line.
+ static const bool flow = true;
+};
+
+/// YAML traits for set of ELFSymbols.
+template <> struct CustomMappingTraits<std::set<ELFSymbol>> {
+ static void inputOne(IO &IO, StringRef Key, std::set<ELFSymbol> &Set) {
+ ELFSymbol Sym(Key.str());
+ IO.mapRequired(Key.str().c_str(), Sym);
+ Set.insert(Sym);
+ }
+
+ static void output(IO &IO, std::set<ELFSymbol> &Set) {
+ for (auto &Sym : Set)
+ IO.mapRequired(Sym.Name.c_str(), const_cast<ELFSymbol &>(Sym));
+ }
+};
+
+/// YAML traits for ELFStub objects.
+template <> struct MappingTraits<ELFStub> {
+ static void mapping(IO &IO, ELFStub &Stub) {
+ if (!IO.mapTag("!tapi-tbe", true))
+ IO.setError("Not a .tbe YAML file.");
+ IO.mapRequired("TbeVersion", Stub.TbeVersion);
+ IO.mapOptional("SoName", Stub.SoName);
+ IO.mapRequired("Arch", (ELFArchMapper &)Stub.Arch);
+ IO.mapOptional("NeededLibs", Stub.NeededLibs);
+ IO.mapRequired("Symbols", Stub.Symbols);
+ }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+Expected<std::unique_ptr<ELFStub>> elfabi::readTBEFromBuffer(StringRef Buf) {
+ yaml::Input YamlIn(Buf);
+ std::unique_ptr<ELFStub> Stub(new ELFStub());
+ YamlIn >> *Stub;
+ if (std::error_code Err = YamlIn.error())
+ return createStringError(Err, "YAML failed reading as TBE");
+
+ if (Stub->TbeVersion > elfabi::TBEVersionCurrent)
+ return make_error<StringError>(
+ "TBE version " + Stub->TbeVersion.getAsString() + " is unsupported.",
+ std::make_error_code(std::errc::invalid_argument));
+
+ return std::move(Stub);
+}
+
+Error elfabi::writeTBEToOutputStream(raw_ostream &OS, const ELFStub &Stub) {
+ yaml::Output YamlOut(OS, NULL, /*WrapColumn =*/0);
+
+ YamlOut << const_cast<ELFStub &>(Stub);
+ return Error::success();
+}
diff --git a/contrib/libs/llvm12/lib/LTO/Caching.cpp b/contrib/libs/llvm12/lib/LTO/Caching.cpp
new file mode 100644
index 00000000000..75a89e729f4
--- /dev/null
+++ b/contrib/libs/llvm12/lib/LTO/Caching.cpp
@@ -0,0 +1,151 @@
+//===-Caching.cpp - LLVM Link Time Optimizer Cache Handling ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Caching for ThinLTO.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LTO/Caching.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/raw_ostream.h"
+
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#include <unistd.h>
+#else
+#include <io.h>
+#endif
+
+using namespace llvm;
+using namespace llvm::lto;
+
+Expected<NativeObjectCache> lto::localCache(StringRef CacheDirectoryPath,
+ AddBufferFn AddBuffer) {
+ if (std::error_code EC = sys::fs::create_directories(CacheDirectoryPath))
+ return errorCodeToError(EC);
+
+ return [=](unsigned Task, StringRef Key) -> AddStreamFn {
+ // This choice of file name allows the cache to be pruned (see pruneCache()
+ // in include/llvm/Support/CachePruning.h).
+ SmallString<64> EntryPath;
+ sys::path::append(EntryPath, CacheDirectoryPath, "llvmcache-" + Key);
+ // First, see if we have a cache hit.
+ SmallString<64> ResultPath;
+ Expected<sys::fs::file_t> FDOrErr = sys::fs::openNativeFileForRead(
+ Twine(EntryPath), sys::fs::OF_UpdateAtime, &ResultPath);
+ std::error_code EC;
+ if (FDOrErr) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr =
+ MemoryBuffer::getOpenFile(*FDOrErr, EntryPath,
+ /*FileSize=*/-1,
+ /*RequiresNullTerminator=*/false);
+ sys::fs::closeFile(*FDOrErr);
+ if (MBOrErr) {
+ AddBuffer(Task, std::move(*MBOrErr));
+ return AddStreamFn();
+ }
+ EC = MBOrErr.getError();
+ } else {
+ EC = errorToErrorCode(FDOrErr.takeError());
+ }
+
+ // On Windows we can fail to open a cache file with a permission denied
+ // error. This generally means that another process has requested to delete
+ // the file while it is still open, but it could also mean that another
+ // process has opened the file without the sharing permissions we need.
+ // Since the file is probably being deleted we handle it in the same way as
+ // if the file did not exist at all.
+ if (EC != errc::no_such_file_or_directory && EC != errc::permission_denied)
+ report_fatal_error(Twine("Failed to open cache file ") + EntryPath +
+ ": " + EC.message() + "\n");
+
+ // This native object stream is responsible for commiting the resulting
+ // file to the cache and calling AddBuffer to add it to the link.
+ struct CacheStream : NativeObjectStream {
+ AddBufferFn AddBuffer;
+ sys::fs::TempFile TempFile;
+ std::string EntryPath;
+ unsigned Task;
+
+ CacheStream(std::unique_ptr<raw_pwrite_stream> OS, AddBufferFn AddBuffer,
+ sys::fs::TempFile TempFile, std::string EntryPath,
+ unsigned Task)
+ : NativeObjectStream(std::move(OS)), AddBuffer(std::move(AddBuffer)),
+ TempFile(std::move(TempFile)), EntryPath(std::move(EntryPath)),
+ Task(Task) {}
+
+ ~CacheStream() {
+ // Make sure the stream is closed before committing it.
+ OS.reset();
+
+ // Open the file first to avoid racing with a cache pruner.
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr =
+ MemoryBuffer::getOpenFile(
+ sys::fs::convertFDToNativeFile(TempFile.FD), TempFile.TmpName,
+ /*FileSize=*/-1, /*RequiresNullTerminator=*/false);
+ if (!MBOrErr)
+ report_fatal_error(Twine("Failed to open new cache file ") +
+ TempFile.TmpName + ": " +
+ MBOrErr.getError().message() + "\n");
+
+ // On POSIX systems, this will atomically replace the destination if
+ // it already exists. We try to emulate this on Windows, but this may
+ // fail with a permission denied error (for example, if the destination
+ // is currently opened by another process that does not give us the
+ // sharing permissions we need). Since the existing file should be
+ // semantically equivalent to the one we are trying to write, we give
+ // AddBuffer a copy of the bytes we wrote in that case. We do this
+ // instead of just using the existing file, because the pruner might
+ // delete the file before we get a chance to use it.
+ Error E = TempFile.keep(EntryPath);
+ E = handleErrors(std::move(E), [&](const ECError &E) -> Error {
+ std::error_code EC = E.convertToErrorCode();
+ if (EC != errc::permission_denied)
+ return errorCodeToError(EC);
+
+ auto MBCopy = MemoryBuffer::getMemBufferCopy((*MBOrErr)->getBuffer(),
+ EntryPath);
+ MBOrErr = std::move(MBCopy);
+
+ // FIXME: should we consume the discard error?
+ consumeError(TempFile.discard());
+
+ return Error::success();
+ });
+
+ if (E)
+ report_fatal_error(Twine("Failed to rename temporary file ") +
+ TempFile.TmpName + " to " + EntryPath + ": " +
+ toString(std::move(E)) + "\n");
+
+ AddBuffer(Task, std::move(*MBOrErr));
+ }
+ };
+
+ return [=](size_t Task) -> std::unique_ptr<NativeObjectStream> {
+ // Write to a temporary to avoid race condition
+ SmallString<64> TempFilenameModel;
+ sys::path::append(TempFilenameModel, CacheDirectoryPath, "Thin-%%%%%%.tmp.o");
+ Expected<sys::fs::TempFile> Temp = sys::fs::TempFile::create(
+ TempFilenameModel, sys::fs::owner_read | sys::fs::owner_write);
+ if (!Temp) {
+ errs() << "Error: " << toString(Temp.takeError()) << "\n";
+ report_fatal_error("ThinLTO: Can't get a temporary file");
+ }
+
+ // This CacheStream will move the temporary file into the cache when done.
+ return std::make_unique<CacheStream>(
+ std::make_unique<raw_fd_ostream>(Temp->FD, /* ShouldClose */ false),
+ AddBuffer, std::move(*Temp), std::string(EntryPath.str()), Task);
+ };
+ };
+}
diff --git a/contrib/libs/llvm12/lib/LTO/LTO.cpp b/contrib/libs/llvm12/lib/LTO/LTO.cpp
new file mode 100644
index 00000000000..9103d11059e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/LTO/LTO.cpp
@@ -0,0 +1,1544 @@
+//===-LTO.cpp - LLVM Link Time Optimizer ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements functions and classes used to support LTO.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LTO/LTO.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/Analysis/StackSafetyAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "llvm/CodeGen/Analysis.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/AutoUpgrade.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMRemarkStreamer.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/LTO/LTOBackend.h"
+#include "llvm/LTO/SummaryBasedOptimizations.h"
+#include "llvm/Linker/IRMover.h"
+#include "llvm/Object/IRObjectFile.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/SHA1.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/ThreadPool.h"
+#include "llvm/Support/Threading.h"
+#include "llvm/Support/TimeProfiler.h"
+#include "llvm/Support/VCSRevision.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/IPO/WholeProgramDevirt.h"
+#include "llvm/Transforms/Utils/FunctionImportUtils.h"
+#include "llvm/Transforms/Utils/SplitModule.h"
+
+#include <set>
+
+using namespace llvm;
+using namespace lto;
+using namespace object;
+
+#define DEBUG_TYPE "lto"
+
+static cl::opt<bool>
+ DumpThinCGSCCs("dump-thin-cg-sccs", cl::init(false), cl::Hidden,
+ cl::desc("Dump the SCCs in the ThinLTO index's callgraph"));
+
+/// Enable global value internalization in LTO.
+cl::opt<bool> EnableLTOInternalization(
+ "enable-lto-internalization", cl::init(true), cl::Hidden,
+ cl::desc("Enable global value internalization in LTO"));
+
+// Computes a unique hash for the Module considering the current list of
+// export/import and other global analysis results.
+// The hash is produced in \p Key.
+void llvm::computeLTOCacheKey(
+ SmallString<40> &Key, const Config &Conf, const ModuleSummaryIndex &Index,
+ StringRef ModuleID, const FunctionImporter::ImportMapTy &ImportList,
+ const FunctionImporter::ExportSetTy &ExportList,
+ const std::map<GlobalValue::GUID, GlobalValue::LinkageTypes> &ResolvedODR,
+ const GVSummaryMapTy &DefinedGlobals,
+ const std::set<GlobalValue::GUID> &CfiFunctionDefs,
+ const std::set<GlobalValue::GUID> &CfiFunctionDecls) {
+ // Compute the unique hash for this entry.
+ // This is based on the current compiler version, the module itself, the
+ // export list, the hash for every single module in the import list, the
+ // list of ResolvedODR for the module, and the list of preserved symbols.
+ SHA1 Hasher;
+
+ // Start with the compiler revision
+ Hasher.update(LLVM_VERSION_STRING);
+#ifdef LLVM_REVISION
+ Hasher.update(LLVM_REVISION);
+#endif
+
+ // Include the parts of the LTO configuration that affect code generation.
+ auto AddString = [&](StringRef Str) {
+ Hasher.update(Str);
+ Hasher.update(ArrayRef<uint8_t>{0});
+ };
+ auto AddUnsigned = [&](unsigned I) {
+ uint8_t Data[4];
+ support::endian::write32le(Data, I);
+ Hasher.update(ArrayRef<uint8_t>{Data, 4});
+ };
+ auto AddUint64 = [&](uint64_t I) {
+ uint8_t Data[8];
+ support::endian::write64le(Data, I);
+ Hasher.update(ArrayRef<uint8_t>{Data, 8});
+ };
+ AddString(Conf.CPU);
+ // FIXME: Hash more of Options. For now all clients initialize Options from
+ // command-line flags (which is unsupported in production), but may set
+ // RelaxELFRelocations. The clang driver can also pass FunctionSections,
+ // DataSections and DebuggerTuning via command line flags.
+ AddUnsigned(Conf.Options.RelaxELFRelocations);
+ AddUnsigned(Conf.Options.FunctionSections);
+ AddUnsigned(Conf.Options.DataSections);
+ AddUnsigned((unsigned)Conf.Options.DebuggerTuning);
+ for (auto &A : Conf.MAttrs)
+ AddString(A);
+ if (Conf.RelocModel)
+ AddUnsigned(*Conf.RelocModel);
+ else
+ AddUnsigned(-1);
+ if (Conf.CodeModel)
+ AddUnsigned(*Conf.CodeModel);
+ else
+ AddUnsigned(-1);
+ AddUnsigned(Conf.CGOptLevel);
+ AddUnsigned(Conf.CGFileType);
+ AddUnsigned(Conf.OptLevel);
+ AddUnsigned(Conf.UseNewPM);
+ AddUnsigned(Conf.Freestanding);
+ AddString(Conf.OptPipeline);
+ AddString(Conf.AAPipeline);
+ AddString(Conf.OverrideTriple);
+ AddString(Conf.DefaultTriple);
+ AddString(Conf.DwoDir);
+
+ // Include the hash for the current module
+ auto ModHash = Index.getModuleHash(ModuleID);
+ Hasher.update(ArrayRef<uint8_t>((uint8_t *)&ModHash[0], sizeof(ModHash)));
+
+ std::vector<uint64_t> ExportsGUID;
+ ExportsGUID.reserve(ExportList.size());
+ for (const auto &VI : ExportList) {
+ auto GUID = VI.getGUID();
+ ExportsGUID.push_back(GUID);
+ }
+
+ // Sort the export list elements GUIDs.
+ llvm::sort(ExportsGUID);
+ for (uint64_t GUID : ExportsGUID) {
+ // The export list can impact the internalization, be conservative here
+ Hasher.update(ArrayRef<uint8_t>((uint8_t *)&GUID, sizeof(GUID)));
+ }
+
+ // Include the hash for every module we import functions from. The set of
+ // imported symbols for each module may affect code generation and is
+ // sensitive to link order, so include that as well.
+ using ImportMapIteratorTy = FunctionImporter::ImportMapTy::const_iterator;
+ std::vector<ImportMapIteratorTy> ImportModulesVector;
+ ImportModulesVector.reserve(ImportList.size());
+
+ for (ImportMapIteratorTy It = ImportList.begin(); It != ImportList.end();
+ ++It) {
+ ImportModulesVector.push_back(It);
+ }
+ llvm::sort(ImportModulesVector,
+ [](const ImportMapIteratorTy &Lhs, const ImportMapIteratorTy &Rhs)
+ -> bool { return Lhs->getKey() < Rhs->getKey(); });
+ for (const ImportMapIteratorTy &EntryIt : ImportModulesVector) {
+ auto ModHash = Index.getModuleHash(EntryIt->first());
+ Hasher.update(ArrayRef<uint8_t>((uint8_t *)&ModHash[0], sizeof(ModHash)));
+
+ AddUint64(EntryIt->second.size());
+ for (auto &Fn : EntryIt->second)
+ AddUint64(Fn);
+ }
+
+ // Include the hash for the resolved ODR.
+ for (auto &Entry : ResolvedODR) {
+ Hasher.update(ArrayRef<uint8_t>((const uint8_t *)&Entry.first,
+ sizeof(GlobalValue::GUID)));
+ Hasher.update(ArrayRef<uint8_t>((const uint8_t *)&Entry.second,
+ sizeof(GlobalValue::LinkageTypes)));
+ }
+
+ // Members of CfiFunctionDefs and CfiFunctionDecls that are referenced or
+ // defined in this module.
+ std::set<GlobalValue::GUID> UsedCfiDefs;
+ std::set<GlobalValue::GUID> UsedCfiDecls;
+
+ // Typeids used in this module.
+ std::set<GlobalValue::GUID> UsedTypeIds;
+
+ auto AddUsedCfiGlobal = [&](GlobalValue::GUID ValueGUID) {
+ if (CfiFunctionDefs.count(ValueGUID))
+ UsedCfiDefs.insert(ValueGUID);
+ if (CfiFunctionDecls.count(ValueGUID))
+ UsedCfiDecls.insert(ValueGUID);
+ };
+
+ auto AddUsedThings = [&](GlobalValueSummary *GS) {
+ if (!GS) return;
+ AddUnsigned(GS->isLive());
+ AddUnsigned(GS->canAutoHide());
+ for (const ValueInfo &VI : GS->refs()) {
+ AddUnsigned(VI.isDSOLocal());
+ AddUsedCfiGlobal(VI.getGUID());
+ }
+ if (auto *GVS = dyn_cast<GlobalVarSummary>(GS)) {
+ AddUnsigned(GVS->maybeReadOnly());
+ AddUnsigned(GVS->maybeWriteOnly());
+ }
+ if (auto *FS = dyn_cast<FunctionSummary>(GS)) {
+ for (auto &TT : FS->type_tests())
+ UsedTypeIds.insert(TT);
+ for (auto &TT : FS->type_test_assume_vcalls())
+ UsedTypeIds.insert(TT.GUID);
+ for (auto &TT : FS->type_checked_load_vcalls())
+ UsedTypeIds.insert(TT.GUID);
+ for (auto &TT : FS->type_test_assume_const_vcalls())
+ UsedTypeIds.insert(TT.VFunc.GUID);
+ for (auto &TT : FS->type_checked_load_const_vcalls())
+ UsedTypeIds.insert(TT.VFunc.GUID);
+ for (auto &ET : FS->calls()) {
+ AddUnsigned(ET.first.isDSOLocal());
+ AddUsedCfiGlobal(ET.first.getGUID());
+ }
+ }
+ };
+
+ // Include the hash for the linkage type to reflect internalization and weak
+ // resolution, and collect any used type identifier resolutions.
+ for (auto &GS : DefinedGlobals) {
+ GlobalValue::LinkageTypes Linkage = GS.second->linkage();
+ Hasher.update(
+ ArrayRef<uint8_t>((const uint8_t *)&Linkage, sizeof(Linkage)));
+ AddUsedCfiGlobal(GS.first);
+ AddUsedThings(GS.second);
+ }
+
+ // Imported functions may introduce new uses of type identifier resolutions,
+ // so we need to collect their used resolutions as well.
+ for (auto &ImpM : ImportList)
+ for (auto &ImpF : ImpM.second) {
+ GlobalValueSummary *S = Index.findSummaryInModule(ImpF, ImpM.first());
+ AddUsedThings(S);
+ // If this is an alias, we also care about any types/etc. that the aliasee
+ // may reference.
+ if (auto *AS = dyn_cast_or_null<AliasSummary>(S))
+ AddUsedThings(AS->getBaseObject());
+ }
+
+ auto AddTypeIdSummary = [&](StringRef TId, const TypeIdSummary &S) {
+ AddString(TId);
+
+ AddUnsigned(S.TTRes.TheKind);
+ AddUnsigned(S.TTRes.SizeM1BitWidth);
+
+ AddUint64(S.TTRes.AlignLog2);
+ AddUint64(S.TTRes.SizeM1);
+ AddUint64(S.TTRes.BitMask);
+ AddUint64(S.TTRes.InlineBits);
+
+ AddUint64(S.WPDRes.size());
+ for (auto &WPD : S.WPDRes) {
+ AddUnsigned(WPD.first);
+ AddUnsigned(WPD.second.TheKind);
+ AddString(WPD.second.SingleImplName);
+
+ AddUint64(WPD.second.ResByArg.size());
+ for (auto &ByArg : WPD.second.ResByArg) {
+ AddUint64(ByArg.first.size());
+ for (uint64_t Arg : ByArg.first)
+ AddUint64(Arg);
+ AddUnsigned(ByArg.second.TheKind);
+ AddUint64(ByArg.second.Info);
+ AddUnsigned(ByArg.second.Byte);
+ AddUnsigned(ByArg.second.Bit);
+ }
+ }
+ };
+
+ // Include the hash for all type identifiers used by this module.
+ for (GlobalValue::GUID TId : UsedTypeIds) {
+ auto TidIter = Index.typeIds().equal_range(TId);
+ for (auto It = TidIter.first; It != TidIter.second; ++It)
+ AddTypeIdSummary(It->second.first, It->second.second);
+ }
+
+ AddUnsigned(UsedCfiDefs.size());
+ for (auto &V : UsedCfiDefs)
+ AddUint64(V);
+
+ AddUnsigned(UsedCfiDecls.size());
+ for (auto &V : UsedCfiDecls)
+ AddUint64(V);
+
+ if (!Conf.SampleProfile.empty()) {
+ auto FileOrErr = MemoryBuffer::getFile(Conf.SampleProfile);
+ if (FileOrErr) {
+ Hasher.update(FileOrErr.get()->getBuffer());
+
+ if (!Conf.ProfileRemapping.empty()) {
+ FileOrErr = MemoryBuffer::getFile(Conf.ProfileRemapping);
+ if (FileOrErr)
+ Hasher.update(FileOrErr.get()->getBuffer());
+ }
+ }
+ }
+
+ Key = toHex(Hasher.result());
+}
+
+static void thinLTOResolvePrevailingGUID(
+ ValueInfo VI, DenseSet<GlobalValueSummary *> &GlobalInvolvedWithAlias,
+ function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
+ isPrevailing,
+ function_ref<void(StringRef, GlobalValue::GUID, GlobalValue::LinkageTypes)>
+ recordNewLinkage,
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols) {
+ for (auto &S : VI.getSummaryList()) {
+ GlobalValue::LinkageTypes OriginalLinkage = S->linkage();
+ // Ignore local and appending linkage values since the linker
+ // doesn't resolve them.
+ if (GlobalValue::isLocalLinkage(OriginalLinkage) ||
+ GlobalValue::isAppendingLinkage(S->linkage()))
+ continue;
+ // We need to emit only one of these. The prevailing module will keep it,
+ // but turned into a weak, while the others will drop it when possible.
+ // This is both a compile-time optimization and a correctness
+ // transformation. This is necessary for correctness when we have exported
+ // a reference - we need to convert the linkonce to weak to
+ // ensure a copy is kept to satisfy the exported reference.
+ // FIXME: We may want to split the compile time and correctness
+ // aspects into separate routines.
+ if (isPrevailing(VI.getGUID(), S.get())) {
+ if (GlobalValue::isLinkOnceLinkage(OriginalLinkage)) {
+ S->setLinkage(GlobalValue::getWeakLinkage(
+ GlobalValue::isLinkOnceODRLinkage(OriginalLinkage)));
+ // The kept copy is eligible for auto-hiding (hidden visibility) if all
+ // copies were (i.e. they were all linkonce_odr global unnamed addr).
+ // If any copy is not (e.g. it was originally weak_odr), then the symbol
+ // must remain externally available (e.g. a weak_odr from an explicitly
+ // instantiated template). Additionally, if it is in the
+ // GUIDPreservedSymbols set, that means that it is visibile outside
+ // the summary (e.g. in a native object or a bitcode file without
+ // summary), and in that case we cannot hide it as it isn't possible to
+ // check all copies.
+ S->setCanAutoHide(VI.canAutoHide() &&
+ !GUIDPreservedSymbols.count(VI.getGUID()));
+ }
+ }
+ // Alias and aliasee can't be turned into available_externally.
+ else if (!isa<AliasSummary>(S.get()) &&
+ !GlobalInvolvedWithAlias.count(S.get()))
+ S->setLinkage(GlobalValue::AvailableExternallyLinkage);
+ if (S->linkage() != OriginalLinkage)
+ recordNewLinkage(S->modulePath(), VI.getGUID(), S->linkage());
+ }
+}
+
+/// Resolve linkage for prevailing symbols in the \p Index.
+//
+// We'd like to drop these functions if they are no longer referenced in the
+// current module. However there is a chance that another module is still
+// referencing them because of the import. We make sure we always emit at least
+// one copy.
+void llvm::thinLTOResolvePrevailingInIndex(
+ ModuleSummaryIndex &Index,
+ function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
+ isPrevailing,
+ function_ref<void(StringRef, GlobalValue::GUID, GlobalValue::LinkageTypes)>
+ recordNewLinkage,
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols) {
+ // We won't optimize the globals that are referenced by an alias for now
+ // Ideally we should turn the alias into a global and duplicate the definition
+ // when needed.
+ DenseSet<GlobalValueSummary *> GlobalInvolvedWithAlias;
+ for (auto &I : Index)
+ for (auto &S : I.second.SummaryList)
+ if (auto AS = dyn_cast<AliasSummary>(S.get()))
+ GlobalInvolvedWithAlias.insert(&AS->getAliasee());
+
+ for (auto &I : Index)
+ thinLTOResolvePrevailingGUID(Index.getValueInfo(I), GlobalInvolvedWithAlias,
+ isPrevailing, recordNewLinkage,
+ GUIDPreservedSymbols);
+}
+
+static bool isWeakObjectWithRWAccess(GlobalValueSummary *GVS) {
+ if (auto *VarSummary = dyn_cast<GlobalVarSummary>(GVS->getBaseObject()))
+ return !VarSummary->maybeReadOnly() && !VarSummary->maybeWriteOnly() &&
+ (VarSummary->linkage() == GlobalValue::WeakODRLinkage ||
+ VarSummary->linkage() == GlobalValue::LinkOnceODRLinkage);
+ return false;
+}
+
+static void thinLTOInternalizeAndPromoteGUID(
+ ValueInfo VI, function_ref<bool(StringRef, ValueInfo)> isExported,
+ function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
+ isPrevailing) {
+ for (auto &S : VI.getSummaryList()) {
+ if (isExported(S->modulePath(), VI)) {
+ if (GlobalValue::isLocalLinkage(S->linkage()))
+ S->setLinkage(GlobalValue::ExternalLinkage);
+ } else if (EnableLTOInternalization &&
+ // Ignore local and appending linkage values since the linker
+ // doesn't resolve them.
+ !GlobalValue::isLocalLinkage(S->linkage()) &&
+ (!GlobalValue::isInterposableLinkage(S->linkage()) ||
+ isPrevailing(VI.getGUID(), S.get())) &&
+ S->linkage() != GlobalValue::AppendingLinkage &&
+ // We can't internalize available_externally globals because this
+ // can break function pointer equality.
+ S->linkage() != GlobalValue::AvailableExternallyLinkage &&
+ // Functions and read-only variables with linkonce_odr and
+ // weak_odr linkage can be internalized. We can't internalize
+ // linkonce_odr and weak_odr variables which are both modified
+ // and read somewhere in the program because reads and writes
+ // will become inconsistent.
+ !isWeakObjectWithRWAccess(S.get()))
+ S->setLinkage(GlobalValue::InternalLinkage);
+ }
+}
+
+// Update the linkages in the given \p Index to mark exported values
+// as external and non-exported values as internal.
+void llvm::thinLTOInternalizeAndPromoteInIndex(
+ ModuleSummaryIndex &Index,
+ function_ref<bool(StringRef, ValueInfo)> isExported,
+ function_ref<bool(GlobalValue::GUID, const GlobalValueSummary *)>
+ isPrevailing) {
+ for (auto &I : Index)
+ thinLTOInternalizeAndPromoteGUID(Index.getValueInfo(I), isExported,
+ isPrevailing);
+}
+
+// Requires a destructor for std::vector<InputModule>.
+InputFile::~InputFile() = default;
+
+Expected<std::unique_ptr<InputFile>> InputFile::create(MemoryBufferRef Object) {
+ std::unique_ptr<InputFile> File(new InputFile);
+
+ Expected<IRSymtabFile> FOrErr = readIRSymtab(Object);
+ if (!FOrErr)
+ return FOrErr.takeError();
+
+ File->TargetTriple = FOrErr->TheReader.getTargetTriple();
+ File->SourceFileName = FOrErr->TheReader.getSourceFileName();
+ File->COFFLinkerOpts = FOrErr->TheReader.getCOFFLinkerOpts();
+ File->DependentLibraries = FOrErr->TheReader.getDependentLibraries();
+ File->ComdatTable = FOrErr->TheReader.getComdatTable();
+
+ for (unsigned I = 0; I != FOrErr->Mods.size(); ++I) {
+ size_t Begin = File->Symbols.size();
+ for (const irsymtab::Reader::SymbolRef &Sym :
+ FOrErr->TheReader.module_symbols(I))
+ // Skip symbols that are irrelevant to LTO. Note that this condition needs
+ // to match the one in Skip() in LTO::addRegularLTO().
+ if (Sym.isGlobal() && !Sym.isFormatSpecific())
+ File->Symbols.push_back(Sym);
+ File->ModuleSymIndices.push_back({Begin, File->Symbols.size()});
+ }
+
+ File->Mods = FOrErr->Mods;
+ File->Strtab = std::move(FOrErr->Strtab);
+ return std::move(File);
+}
+
+StringRef InputFile::getName() const {
+ return Mods[0].getModuleIdentifier();
+}
+
+BitcodeModule &InputFile::getSingleBitcodeModule() {
+ assert(Mods.size() == 1 && "Expect only one bitcode module");
+ return Mods[0];
+}
+
+LTO::RegularLTOState::RegularLTOState(unsigned ParallelCodeGenParallelismLevel,
+ const Config &Conf)
+ : ParallelCodeGenParallelismLevel(ParallelCodeGenParallelismLevel),
+ Ctx(Conf), CombinedModule(std::make_unique<Module>("ld-temp.o", Ctx)),
+ Mover(std::make_unique<IRMover>(*CombinedModule)) {}
+
+LTO::ThinLTOState::ThinLTOState(ThinBackend Backend)
+ : Backend(Backend), CombinedIndex(/*HaveGVs*/ false) {
+ if (!Backend)
+ this->Backend =
+ createInProcessThinBackend(llvm::heavyweight_hardware_concurrency());
+}
+
+LTO::LTO(Config Conf, ThinBackend Backend,
+ unsigned ParallelCodeGenParallelismLevel)
+ : Conf(std::move(Conf)),
+ RegularLTO(ParallelCodeGenParallelismLevel, this->Conf),
+ ThinLTO(std::move(Backend)) {}
+
+// Requires a destructor for MapVector<BitcodeModule>.
+LTO::~LTO() = default;
+
+// Add the symbols in the given module to the GlobalResolutions map, and resolve
+// their partitions.
+void LTO::addModuleToGlobalRes(ArrayRef<InputFile::Symbol> Syms,
+ ArrayRef<SymbolResolution> Res,
+ unsigned Partition, bool InSummary) {
+ auto *ResI = Res.begin();
+ auto *ResE = Res.end();
+ (void)ResE;
+ for (const InputFile::Symbol &Sym : Syms) {
+ assert(ResI != ResE);
+ SymbolResolution Res = *ResI++;
+
+ StringRef Name = Sym.getName();
+ Triple TT(RegularLTO.CombinedModule->getTargetTriple());
+ // Strip the __imp_ prefix from COFF dllimport symbols (similar to the
+ // way they are handled by lld), otherwise we can end up with two
+ // global resolutions (one with and one for a copy of the symbol without).
+ if (TT.isOSBinFormatCOFF() && Name.startswith("__imp_"))
+ Name = Name.substr(strlen("__imp_"));
+ auto &GlobalRes = GlobalResolutions[Name];
+ GlobalRes.UnnamedAddr &= Sym.isUnnamedAddr();
+ if (Res.Prevailing) {
+ assert(!GlobalRes.Prevailing &&
+ "Multiple prevailing defs are not allowed");
+ GlobalRes.Prevailing = true;
+ GlobalRes.IRName = std::string(Sym.getIRName());
+ } else if (!GlobalRes.Prevailing && GlobalRes.IRName.empty()) {
+ // Sometimes it can be two copies of symbol in a module and prevailing
+ // symbol can have no IR name. That might happen if symbol is defined in
+ // module level inline asm block. In case we have multiple modules with
+ // the same symbol we want to use IR name of the prevailing symbol.
+ // Otherwise, if we haven't seen a prevailing symbol, set the name so that
+ // we can later use it to check if there is any prevailing copy in IR.
+ GlobalRes.IRName = std::string(Sym.getIRName());
+ }
+
+ // Set the partition to external if we know it is re-defined by the linker
+ // with -defsym or -wrap options, used elsewhere, e.g. it is visible to a
+ // regular object, is referenced from llvm.compiler_used, or was already
+ // recorded as being referenced from a different partition.
+ if (Res.LinkerRedefined || Res.VisibleToRegularObj || Sym.isUsed() ||
+ (GlobalRes.Partition != GlobalResolution::Unknown &&
+ GlobalRes.Partition != Partition)) {
+ GlobalRes.Partition = GlobalResolution::External;
+ } else
+ // First recorded reference, save the current partition.
+ GlobalRes.Partition = Partition;
+
+ // Flag as visible outside of summary if visible from a regular object or
+ // from a module that does not have a summary.
+ GlobalRes.VisibleOutsideSummary |=
+ (Res.VisibleToRegularObj || Sym.isUsed() || !InSummary);
+ }
+}
+
+static void writeToResolutionFile(raw_ostream &OS, InputFile *Input,
+ ArrayRef<SymbolResolution> Res) {
+ StringRef Path = Input->getName();
+ OS << Path << '\n';
+ auto ResI = Res.begin();
+ for (const InputFile::Symbol &Sym : Input->symbols()) {
+ assert(ResI != Res.end());
+ SymbolResolution Res = *ResI++;
+
+ OS << "-r=" << Path << ',' << Sym.getName() << ',';
+ if (Res.Prevailing)
+ OS << 'p';
+ if (Res.FinalDefinitionInLinkageUnit)
+ OS << 'l';
+ if (Res.VisibleToRegularObj)
+ OS << 'x';
+ if (Res.LinkerRedefined)
+ OS << 'r';
+ OS << '\n';
+ }
+ OS.flush();
+ assert(ResI == Res.end());
+}
+
+Error LTO::add(std::unique_ptr<InputFile> Input,
+ ArrayRef<SymbolResolution> Res) {
+ assert(!CalledGetMaxTasks);
+
+ if (Conf.ResolutionFile)
+ writeToResolutionFile(*Conf.ResolutionFile, Input.get(), Res);
+
+ if (RegularLTO.CombinedModule->getTargetTriple().empty())
+ RegularLTO.CombinedModule->setTargetTriple(Input->getTargetTriple());
+
+ const SymbolResolution *ResI = Res.begin();
+ for (unsigned I = 0; I != Input->Mods.size(); ++I)
+ if (Error Err = addModule(*Input, I, ResI, Res.end()))
+ return Err;
+
+ assert(ResI == Res.end());
+ return Error::success();
+}
+
+Error LTO::addModule(InputFile &Input, unsigned ModI,
+ const SymbolResolution *&ResI,
+ const SymbolResolution *ResE) {
+ Expected<BitcodeLTOInfo> LTOInfo = Input.Mods[ModI].getLTOInfo();
+ if (!LTOInfo)
+ return LTOInfo.takeError();
+
+ if (EnableSplitLTOUnit.hasValue()) {
+ // If only some modules were split, flag this in the index so that
+ // we can skip or error on optimizations that need consistently split
+ // modules (whole program devirt and lower type tests).
+ if (EnableSplitLTOUnit.getValue() != LTOInfo->EnableSplitLTOUnit)
+ ThinLTO.CombinedIndex.setPartiallySplitLTOUnits();
+ } else
+ EnableSplitLTOUnit = LTOInfo->EnableSplitLTOUnit;
+
+ BitcodeModule BM = Input.Mods[ModI];
+ auto ModSyms = Input.module_symbols(ModI);
+ addModuleToGlobalRes(ModSyms, {ResI, ResE},
+ LTOInfo->IsThinLTO ? ThinLTO.ModuleMap.size() + 1 : 0,
+ LTOInfo->HasSummary);
+
+ if (LTOInfo->IsThinLTO)
+ return addThinLTO(BM, ModSyms, ResI, ResE);
+
+ RegularLTO.EmptyCombinedModule = false;
+ Expected<RegularLTOState::AddedModule> ModOrErr =
+ addRegularLTO(BM, ModSyms, ResI, ResE);
+ if (!ModOrErr)
+ return ModOrErr.takeError();
+
+ if (!LTOInfo->HasSummary)
+ return linkRegularLTO(std::move(*ModOrErr), /*LivenessFromIndex=*/false);
+
+ // Regular LTO module summaries are added to a dummy module that represents
+ // the combined regular LTO module.
+ if (Error Err = BM.readSummary(ThinLTO.CombinedIndex, "", -1ull))
+ return Err;
+ RegularLTO.ModsWithSummaries.push_back(std::move(*ModOrErr));
+ return Error::success();
+}
+
+// Checks whether the given global value is in a non-prevailing comdat
+// (comdat containing values the linker indicated were not prevailing,
+// which we then dropped to available_externally), and if so, removes
+// it from the comdat. This is called for all global values to ensure the
+// comdat is empty rather than leaving an incomplete comdat. It is needed for
+// regular LTO modules, in case we are in a mixed-LTO mode (both regular
+// and thin LTO modules) compilation. Since the regular LTO module will be
+// linked first in the final native link, we want to make sure the linker
+// doesn't select any of these incomplete comdats that would be left
+// in the regular LTO module without this cleanup.
+static void
+handleNonPrevailingComdat(GlobalValue &GV,
+ std::set<const Comdat *> &NonPrevailingComdats) {
+ Comdat *C = GV.getComdat();
+ if (!C)
+ return;
+
+ if (!NonPrevailingComdats.count(C))
+ return;
+
+ // Additionally need to drop externally visible global values from the comdat
+ // to available_externally, so that there aren't multiply defined linker
+ // errors.
+ if (!GV.hasLocalLinkage())
+ GV.setLinkage(GlobalValue::AvailableExternallyLinkage);
+
+ if (auto GO = dyn_cast<GlobalObject>(&GV))
+ GO->setComdat(nullptr);
+}
+
+// Add a regular LTO object to the link.
+// The resulting module needs to be linked into the combined LTO module with
+// linkRegularLTO.
+Expected<LTO::RegularLTOState::AddedModule>
+LTO::addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
+ const SymbolResolution *&ResI,
+ const SymbolResolution *ResE) {
+ RegularLTOState::AddedModule Mod;
+ Expected<std::unique_ptr<Module>> MOrErr =
+ BM.getLazyModule(RegularLTO.Ctx, /*ShouldLazyLoadMetadata*/ true,
+ /*IsImporting*/ false);
+ if (!MOrErr)
+ return MOrErr.takeError();
+ Module &M = **MOrErr;
+ Mod.M = std::move(*MOrErr);
+
+ if (Error Err = M.materializeMetadata())
+ return std::move(Err);
+ UpgradeDebugInfo(M);
+
+ ModuleSymbolTable SymTab;
+ SymTab.addModule(&M);
+
+ for (GlobalVariable &GV : M.globals())
+ if (GV.hasAppendingLinkage())
+ Mod.Keep.push_back(&GV);
+
+ DenseSet<GlobalObject *> AliasedGlobals;
+ for (auto &GA : M.aliases())
+ if (GlobalObject *GO = GA.getBaseObject())
+ AliasedGlobals.insert(GO);
+
+ // In this function we need IR GlobalValues matching the symbols in Syms
+ // (which is not backed by a module), so we need to enumerate them in the same
+ // order. The symbol enumeration order of a ModuleSymbolTable intentionally
+ // matches the order of an irsymtab, but when we read the irsymtab in
+ // InputFile::create we omit some symbols that are irrelevant to LTO. The
+ // Skip() function skips the same symbols from the module as InputFile does
+ // from the symbol table.
+ auto MsymI = SymTab.symbols().begin(), MsymE = SymTab.symbols().end();
+ auto Skip = [&]() {
+ while (MsymI != MsymE) {
+ auto Flags = SymTab.getSymbolFlags(*MsymI);
+ if ((Flags & object::BasicSymbolRef::SF_Global) &&
+ !(Flags & object::BasicSymbolRef::SF_FormatSpecific))
+ return;
+ ++MsymI;
+ }
+ };
+ Skip();
+
+ std::set<const Comdat *> NonPrevailingComdats;
+ for (const InputFile::Symbol &Sym : Syms) {
+ assert(ResI != ResE);
+ SymbolResolution Res = *ResI++;
+
+ assert(MsymI != MsymE);
+ ModuleSymbolTable::Symbol Msym = *MsymI++;
+ Skip();
+
+ if (GlobalValue *GV = Msym.dyn_cast<GlobalValue *>()) {
+ if (Res.Prevailing) {
+ if (Sym.isUndefined())
+ continue;
+ Mod.Keep.push_back(GV);
+ // For symbols re-defined with linker -wrap and -defsym options,
+ // set the linkage to weak to inhibit IPO. The linkage will be
+ // restored by the linker.
+ if (Res.LinkerRedefined)
+ GV->setLinkage(GlobalValue::WeakAnyLinkage);
+
+ GlobalValue::LinkageTypes OriginalLinkage = GV->getLinkage();
+ if (GlobalValue::isLinkOnceLinkage(OriginalLinkage))
+ GV->setLinkage(GlobalValue::getWeakLinkage(
+ GlobalValue::isLinkOnceODRLinkage(OriginalLinkage)));
+ } else if (isa<GlobalObject>(GV) &&
+ (GV->hasLinkOnceODRLinkage() || GV->hasWeakODRLinkage() ||
+ GV->hasAvailableExternallyLinkage()) &&
+ !AliasedGlobals.count(cast<GlobalObject>(GV))) {
+ // Any of the above three types of linkage indicates that the
+ // chosen prevailing symbol will have the same semantics as this copy of
+ // the symbol, so we may be able to link it with available_externally
+ // linkage. We will decide later whether to do that when we link this
+ // module (in linkRegularLTO), based on whether it is undefined.
+ Mod.Keep.push_back(GV);
+ GV->setLinkage(GlobalValue::AvailableExternallyLinkage);
+ if (GV->hasComdat())
+ NonPrevailingComdats.insert(GV->getComdat());
+ cast<GlobalObject>(GV)->setComdat(nullptr);
+ }
+
+ // Set the 'local' flag based on the linker resolution for this symbol.
+ if (Res.FinalDefinitionInLinkageUnit) {
+ GV->setDSOLocal(true);
+ if (GV->hasDLLImportStorageClass())
+ GV->setDLLStorageClass(GlobalValue::DLLStorageClassTypes::
+ DefaultStorageClass);
+ }
+ }
+ // Common resolution: collect the maximum size/alignment over all commons.
+ // We also record if we see an instance of a common as prevailing, so that
+ // if none is prevailing we can ignore it later.
+ if (Sym.isCommon()) {
+ // FIXME: We should figure out what to do about commons defined by asm.
+ // For now they aren't reported correctly by ModuleSymbolTable.
+ auto &CommonRes = RegularLTO.Commons[std::string(Sym.getIRName())];
+ CommonRes.Size = std::max(CommonRes.Size, Sym.getCommonSize());
+ MaybeAlign SymAlign(Sym.getCommonAlignment());
+ if (SymAlign)
+ CommonRes.Align = max(*SymAlign, CommonRes.Align);
+ CommonRes.Prevailing |= Res.Prevailing;
+ }
+
+ }
+ if (!M.getComdatSymbolTable().empty())
+ for (GlobalValue &GV : M.global_values())
+ handleNonPrevailingComdat(GV, NonPrevailingComdats);
+ assert(MsymI == MsymE);
+ return std::move(Mod);
+}
+
+Error LTO::linkRegularLTO(RegularLTOState::AddedModule Mod,
+ bool LivenessFromIndex) {
+ std::vector<GlobalValue *> Keep;
+ for (GlobalValue *GV : Mod.Keep) {
+ if (LivenessFromIndex && !ThinLTO.CombinedIndex.isGUIDLive(GV->getGUID())) {
+ if (Function *F = dyn_cast<Function>(GV)) {
+ OptimizationRemarkEmitter ORE(F, nullptr);
+ ORE.emit(OptimizationRemark(DEBUG_TYPE, "deadfunction", F)
+ << ore::NV("Function", F)
+ << " not added to the combined module ");
+ }
+ continue;
+ }
+
+ if (!GV->hasAvailableExternallyLinkage()) {
+ Keep.push_back(GV);
+ continue;
+ }
+
+ // Only link available_externally definitions if we don't already have a
+ // definition.
+ GlobalValue *CombinedGV =
+ RegularLTO.CombinedModule->getNamedValue(GV->getName());
+ if (CombinedGV && !CombinedGV->isDeclaration())
+ continue;
+
+ Keep.push_back(GV);
+ }
+
+ return RegularLTO.Mover->move(std::move(Mod.M), Keep,
+ [](GlobalValue &, IRMover::ValueAdder) {},
+ /* IsPerformingImport */ false);
+}
+
+// Add a ThinLTO module to the link.
+Error LTO::addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
+ const SymbolResolution *&ResI,
+ const SymbolResolution *ResE) {
+ if (Error Err =
+ BM.readSummary(ThinLTO.CombinedIndex, BM.getModuleIdentifier(),
+ ThinLTO.ModuleMap.size()))
+ return Err;
+
+ for (const InputFile::Symbol &Sym : Syms) {
+ assert(ResI != ResE);
+ SymbolResolution Res = *ResI++;
+
+ if (!Sym.getIRName().empty()) {
+ auto GUID = GlobalValue::getGUID(GlobalValue::getGlobalIdentifier(
+ Sym.getIRName(), GlobalValue::ExternalLinkage, ""));
+ if (Res.Prevailing) {
+ ThinLTO.PrevailingModuleForGUID[GUID] = BM.getModuleIdentifier();
+
+ // For linker redefined symbols (via --wrap or --defsym) we want to
+ // switch the linkage to `weak` to prevent IPOs from happening.
+ // Find the summary in the module for this very GV and record the new
+ // linkage so that we can switch it when we import the GV.
+ if (Res.LinkerRedefined)
+ if (auto S = ThinLTO.CombinedIndex.findSummaryInModule(
+ GUID, BM.getModuleIdentifier()))
+ S->setLinkage(GlobalValue::WeakAnyLinkage);
+ }
+
+ // If the linker resolved the symbol to a local definition then mark it
+ // as local in the summary for the module we are adding.
+ if (Res.FinalDefinitionInLinkageUnit) {
+ if (auto S = ThinLTO.CombinedIndex.findSummaryInModule(
+ GUID, BM.getModuleIdentifier())) {
+ S->setDSOLocal(true);
+ }
+ }
+ }
+ }
+
+ if (!ThinLTO.ModuleMap.insert({BM.getModuleIdentifier(), BM}).second)
+ return make_error<StringError>(
+ "Expected at most one ThinLTO module per bitcode file",
+ inconvertibleErrorCode());
+
+ if (!Conf.ThinLTOModulesToCompile.empty()) {
+ if (!ThinLTO.ModulesToCompile)
+ ThinLTO.ModulesToCompile = ModuleMapType();
+ // This is a fuzzy name matching where only modules with name containing the
+ // specified switch values are going to be compiled.
+ for (const std::string &Name : Conf.ThinLTOModulesToCompile) {
+ if (BM.getModuleIdentifier().contains(Name)) {
+ ThinLTO.ModulesToCompile->insert({BM.getModuleIdentifier(), BM});
+ llvm::errs() << "[ThinLTO] Selecting " << BM.getModuleIdentifier()
+ << " to compile\n";
+ }
+ }
+ }
+
+ return Error::success();
+}
+
+unsigned LTO::getMaxTasks() const {
+ CalledGetMaxTasks = true;
+ auto ModuleCount = ThinLTO.ModulesToCompile ? ThinLTO.ModulesToCompile->size()
+ : ThinLTO.ModuleMap.size();
+ return RegularLTO.ParallelCodeGenParallelismLevel + ModuleCount;
+}
+
+// If only some of the modules were split, we cannot correctly handle
+// code that contains type tests or type checked loads.
+Error LTO::checkPartiallySplit() {
+ if (!ThinLTO.CombinedIndex.partiallySplitLTOUnits())
+ return Error::success();
+
+ Function *TypeTestFunc = RegularLTO.CombinedModule->getFunction(
+ Intrinsic::getName(Intrinsic::type_test));
+ Function *TypeCheckedLoadFunc = RegularLTO.CombinedModule->getFunction(
+ Intrinsic::getName(Intrinsic::type_checked_load));
+
+ // First check if there are type tests / type checked loads in the
+ // merged regular LTO module IR.
+ if ((TypeTestFunc && !TypeTestFunc->use_empty()) ||
+ (TypeCheckedLoadFunc && !TypeCheckedLoadFunc->use_empty()))
+ return make_error<StringError>(
+ "inconsistent LTO Unit splitting (recompile with -fsplit-lto-unit)",
+ inconvertibleErrorCode());
+
+ // Otherwise check if there are any recorded in the combined summary from the
+ // ThinLTO modules.
+ for (auto &P : ThinLTO.CombinedIndex) {
+ for (auto &S : P.second.SummaryList) {
+ auto *FS = dyn_cast<FunctionSummary>(S.get());
+ if (!FS)
+ continue;
+ if (!FS->type_test_assume_vcalls().empty() ||
+ !FS->type_checked_load_vcalls().empty() ||
+ !FS->type_test_assume_const_vcalls().empty() ||
+ !FS->type_checked_load_const_vcalls().empty() ||
+ !FS->type_tests().empty())
+ return make_error<StringError>(
+ "inconsistent LTO Unit splitting (recompile with -fsplit-lto-unit)",
+ inconvertibleErrorCode());
+ }
+ }
+ return Error::success();
+}
+
+Error LTO::run(AddStreamFn AddStream, NativeObjectCache Cache) {
+ // Compute "dead" symbols, we don't want to import/export these!
+ DenseSet<GlobalValue::GUID> GUIDPreservedSymbols;
+ DenseMap<GlobalValue::GUID, PrevailingType> GUIDPrevailingResolutions;
+ for (auto &Res : GlobalResolutions) {
+ // Normally resolution have IR name of symbol. We can do nothing here
+ // otherwise. See comments in GlobalResolution struct for more details.
+ if (Res.second.IRName.empty())
+ continue;
+
+ GlobalValue::GUID GUID = GlobalValue::getGUID(
+ GlobalValue::dropLLVMManglingEscape(Res.second.IRName));
+
+ if (Res.second.VisibleOutsideSummary && Res.second.Prevailing)
+ GUIDPreservedSymbols.insert(GUID);
+
+ GUIDPrevailingResolutions[GUID] =
+ Res.second.Prevailing ? PrevailingType::Yes : PrevailingType::No;
+ }
+
+ auto isPrevailing = [&](GlobalValue::GUID G) {
+ auto It = GUIDPrevailingResolutions.find(G);
+ if (It == GUIDPrevailingResolutions.end())
+ return PrevailingType::Unknown;
+ return It->second;
+ };
+ computeDeadSymbolsWithConstProp(ThinLTO.CombinedIndex, GUIDPreservedSymbols,
+ isPrevailing, Conf.OptLevel > 0);
+
+ // Setup output file to emit statistics.
+ auto StatsFileOrErr = setupStatsFile(Conf.StatsFile);
+ if (!StatsFileOrErr)
+ return StatsFileOrErr.takeError();
+ std::unique_ptr<ToolOutputFile> StatsFile = std::move(StatsFileOrErr.get());
+
+ Error Result = runRegularLTO(AddStream);
+ if (!Result)
+ Result = runThinLTO(AddStream, Cache, GUIDPreservedSymbols);
+
+ if (StatsFile)
+ PrintStatisticsJSON(StatsFile->os());
+
+ return Result;
+}
+
+Error LTO::runRegularLTO(AddStreamFn AddStream) {
+ // Setup optimization remarks.
+ auto DiagFileOrErr = lto::setupLLVMOptimizationRemarks(
+ RegularLTO.CombinedModule->getContext(), Conf.RemarksFilename,
+ Conf.RemarksPasses, Conf.RemarksFormat, Conf.RemarksWithHotness,
+ Conf.RemarksHotnessThreshold);
+ if (!DiagFileOrErr)
+ return DiagFileOrErr.takeError();
+
+ // Finalize linking of regular LTO modules containing summaries now that
+ // we have computed liveness information.
+ for (auto &M : RegularLTO.ModsWithSummaries)
+ if (Error Err = linkRegularLTO(std::move(M),
+ /*LivenessFromIndex=*/true))
+ return Err;
+
+ // Ensure we don't have inconsistently split LTO units with type tests.
+ // FIXME: this checks both LTO and ThinLTO. It happens to work as we take
+ // this path both cases but eventually this should be split into two and
+ // do the ThinLTO checks in `runThinLTO`.
+ if (Error Err = checkPartiallySplit())
+ return Err;
+
+ // Make sure commons have the right size/alignment: we kept the largest from
+ // all the prevailing when adding the inputs, and we apply it here.
+ const DataLayout &DL = RegularLTO.CombinedModule->getDataLayout();
+ for (auto &I : RegularLTO.Commons) {
+ if (!I.second.Prevailing)
+ // Don't do anything if no instance of this common was prevailing.
+ continue;
+ GlobalVariable *OldGV = RegularLTO.CombinedModule->getNamedGlobal(I.first);
+ if (OldGV && DL.getTypeAllocSize(OldGV->getValueType()) == I.second.Size) {
+ // Don't create a new global if the type is already correct, just make
+ // sure the alignment is correct.
+ OldGV->setAlignment(I.second.Align);
+ continue;
+ }
+ ArrayType *Ty =
+ ArrayType::get(Type::getInt8Ty(RegularLTO.Ctx), I.second.Size);
+ auto *GV = new GlobalVariable(*RegularLTO.CombinedModule, Ty, false,
+ GlobalValue::CommonLinkage,
+ ConstantAggregateZero::get(Ty), "");
+ GV->setAlignment(I.second.Align);
+ if (OldGV) {
+ OldGV->replaceAllUsesWith(ConstantExpr::getBitCast(GV, OldGV->getType()));
+ GV->takeName(OldGV);
+ OldGV->eraseFromParent();
+ } else {
+ GV->setName(I.first);
+ }
+ }
+
+ // If allowed, upgrade public vcall visibility metadata to linkage unit
+ // visibility before whole program devirtualization in the optimizer.
+ updateVCallVisibilityInModule(*RegularLTO.CombinedModule,
+ Conf.HasWholeProgramVisibility);
+
+ if (Conf.PreOptModuleHook &&
+ !Conf.PreOptModuleHook(0, *RegularLTO.CombinedModule))
+ return Error::success();
+
+ if (!Conf.CodeGenOnly) {
+ for (const auto &R : GlobalResolutions) {
+ if (!R.second.isPrevailingIRSymbol())
+ continue;
+ if (R.second.Partition != 0 &&
+ R.second.Partition != GlobalResolution::External)
+ continue;
+
+ GlobalValue *GV =
+ RegularLTO.CombinedModule->getNamedValue(R.second.IRName);
+ // Ignore symbols defined in other partitions.
+ // Also skip declarations, which are not allowed to have internal linkage.
+ if (!GV || GV->hasLocalLinkage() || GV->isDeclaration())
+ continue;
+ GV->setUnnamedAddr(R.second.UnnamedAddr ? GlobalValue::UnnamedAddr::Global
+ : GlobalValue::UnnamedAddr::None);
+ if (EnableLTOInternalization && R.second.Partition == 0)
+ GV->setLinkage(GlobalValue::InternalLinkage);
+ }
+
+ RegularLTO.CombinedModule->addModuleFlag(Module::Error, "LTOPostLink", 1);
+
+ if (Conf.PostInternalizeModuleHook &&
+ !Conf.PostInternalizeModuleHook(0, *RegularLTO.CombinedModule))
+ return Error::success();
+ }
+
+ if (!RegularLTO.EmptyCombinedModule || Conf.AlwaysEmitRegularLTOObj) {
+ if (Error Err = backend(
+ Conf, AddStream, RegularLTO.ParallelCodeGenParallelismLevel,
+ std::move(RegularLTO.CombinedModule), ThinLTO.CombinedIndex))
+ return Err;
+ }
+
+ return finalizeOptimizationRemarks(std::move(*DiagFileOrErr));
+}
+
+static const char *libcallRoutineNames[] = {
+#define HANDLE_LIBCALL(code, name) name,
+#include "llvm/IR/RuntimeLibcalls.def"
+#undef HANDLE_LIBCALL
+};
+
+ArrayRef<const char*> LTO::getRuntimeLibcallSymbols() {
+ return makeArrayRef(libcallRoutineNames);
+}
+
+/// This class defines the interface to the ThinLTO backend.
+class lto::ThinBackendProc {
+protected:
+ const Config &Conf;
+ ModuleSummaryIndex &CombinedIndex;
+ const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries;
+
+public:
+ ThinBackendProc(const Config &Conf, ModuleSummaryIndex &CombinedIndex,
+ const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries)
+ : Conf(Conf), CombinedIndex(CombinedIndex),
+ ModuleToDefinedGVSummaries(ModuleToDefinedGVSummaries) {}
+
+ virtual ~ThinBackendProc() {}
+ virtual Error start(
+ unsigned Task, BitcodeModule BM,
+ const FunctionImporter::ImportMapTy &ImportList,
+ const FunctionImporter::ExportSetTy &ExportList,
+ const std::map<GlobalValue::GUID, GlobalValue::LinkageTypes> &ResolvedODR,
+ MapVector<StringRef, BitcodeModule> &ModuleMap) = 0;
+ virtual Error wait() = 0;
+ virtual unsigned getThreadCount() = 0;
+};
+
+namespace {
+class InProcessThinBackend : public ThinBackendProc {
+ ThreadPool BackendThreadPool;
+ AddStreamFn AddStream;
+ NativeObjectCache Cache;
+ std::set<GlobalValue::GUID> CfiFunctionDefs;
+ std::set<GlobalValue::GUID> CfiFunctionDecls;
+
+ Optional<Error> Err;
+ std::mutex ErrMu;
+
+public:
+ InProcessThinBackend(
+ const Config &Conf, ModuleSummaryIndex &CombinedIndex,
+ ThreadPoolStrategy ThinLTOParallelism,
+ const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
+ AddStreamFn AddStream, NativeObjectCache Cache)
+ : ThinBackendProc(Conf, CombinedIndex, ModuleToDefinedGVSummaries),
+ BackendThreadPool(ThinLTOParallelism), AddStream(std::move(AddStream)),
+ Cache(std::move(Cache)) {
+ for (auto &Name : CombinedIndex.cfiFunctionDefs())
+ CfiFunctionDefs.insert(
+ GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
+ for (auto &Name : CombinedIndex.cfiFunctionDecls())
+ CfiFunctionDecls.insert(
+ GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Name)));
+ }
+
+ Error runThinLTOBackendThread(
+ AddStreamFn AddStream, NativeObjectCache Cache, unsigned Task,
+ BitcodeModule BM, ModuleSummaryIndex &CombinedIndex,
+ const FunctionImporter::ImportMapTy &ImportList,
+ const FunctionImporter::ExportSetTy &ExportList,
+ const std::map<GlobalValue::GUID, GlobalValue::LinkageTypes> &ResolvedODR,
+ const GVSummaryMapTy &DefinedGlobals,
+ MapVector<StringRef, BitcodeModule> &ModuleMap) {
+ auto RunThinBackend = [&](AddStreamFn AddStream) {
+ LTOLLVMContext BackendContext(Conf);
+ Expected<std::unique_ptr<Module>> MOrErr = BM.parseModule(BackendContext);
+ if (!MOrErr)
+ return MOrErr.takeError();
+
+ return thinBackend(Conf, Task, AddStream, **MOrErr, CombinedIndex,
+ ImportList, DefinedGlobals, ModuleMap);
+ };
+
+ auto ModuleID = BM.getModuleIdentifier();
+
+ if (!Cache || !CombinedIndex.modulePaths().count(ModuleID) ||
+ all_of(CombinedIndex.getModuleHash(ModuleID),
+ [](uint32_t V) { return V == 0; }))
+ // Cache disabled or no entry for this module in the combined index or
+ // no module hash.
+ return RunThinBackend(AddStream);
+
+ SmallString<40> Key;
+ // The module may be cached, this helps handling it.
+ computeLTOCacheKey(Key, Conf, CombinedIndex, ModuleID, ImportList,
+ ExportList, ResolvedODR, DefinedGlobals, CfiFunctionDefs,
+ CfiFunctionDecls);
+ if (AddStreamFn CacheAddStream = Cache(Task, Key))
+ return RunThinBackend(CacheAddStream);
+
+ return Error::success();
+ }
+
+ Error start(
+ unsigned Task, BitcodeModule BM,
+ const FunctionImporter::ImportMapTy &ImportList,
+ const FunctionImporter::ExportSetTy &ExportList,
+ const std::map<GlobalValue::GUID, GlobalValue::LinkageTypes> &ResolvedODR,
+ MapVector<StringRef, BitcodeModule> &ModuleMap) override {
+ StringRef ModulePath = BM.getModuleIdentifier();
+ assert(ModuleToDefinedGVSummaries.count(ModulePath));
+ const GVSummaryMapTy &DefinedGlobals =
+ ModuleToDefinedGVSummaries.find(ModulePath)->second;
+ BackendThreadPool.async(
+ [=](BitcodeModule BM, ModuleSummaryIndex &CombinedIndex,
+ const FunctionImporter::ImportMapTy &ImportList,
+ const FunctionImporter::ExportSetTy &ExportList,
+ const std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>
+ &ResolvedODR,
+ const GVSummaryMapTy &DefinedGlobals,
+ MapVector<StringRef, BitcodeModule> &ModuleMap) {
+ if (LLVM_ENABLE_THREADS && Conf.TimeTraceEnabled)
+ timeTraceProfilerInitialize(Conf.TimeTraceGranularity,
+ "thin backend");
+ Error E = runThinLTOBackendThread(
+ AddStream, Cache, Task, BM, CombinedIndex, ImportList, ExportList,
+ ResolvedODR, DefinedGlobals, ModuleMap);
+ if (E) {
+ std::unique_lock<std::mutex> L(ErrMu);
+ if (Err)
+ Err = joinErrors(std::move(*Err), std::move(E));
+ else
+ Err = std::move(E);
+ }
+ if (LLVM_ENABLE_THREADS && Conf.TimeTraceEnabled)
+ timeTraceProfilerFinishThread();
+ },
+ BM, std::ref(CombinedIndex), std::ref(ImportList), std::ref(ExportList),
+ std::ref(ResolvedODR), std::ref(DefinedGlobals), std::ref(ModuleMap));
+ return Error::success();
+ }
+
+ Error wait() override {
+ BackendThreadPool.wait();
+ if (Err)
+ return std::move(*Err);
+ else
+ return Error::success();
+ }
+
+ unsigned getThreadCount() override {
+ return BackendThreadPool.getThreadCount();
+ }
+};
+} // end anonymous namespace
+
+ThinBackend lto::createInProcessThinBackend(ThreadPoolStrategy Parallelism) {
+ return [=](const Config &Conf, ModuleSummaryIndex &CombinedIndex,
+ const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
+ AddStreamFn AddStream, NativeObjectCache Cache) {
+ return std::make_unique<InProcessThinBackend>(
+ Conf, CombinedIndex, Parallelism, ModuleToDefinedGVSummaries, AddStream,
+ Cache);
+ };
+}
+
+// Given the original \p Path to an output file, replace any path
+// prefix matching \p OldPrefix with \p NewPrefix. Also, create the
+// resulting directory if it does not yet exist.
+std::string lto::getThinLTOOutputFile(const std::string &Path,
+ const std::string &OldPrefix,
+ const std::string &NewPrefix) {
+ if (OldPrefix.empty() && NewPrefix.empty())
+ return Path;
+ SmallString<128> NewPath(Path);
+ llvm::sys::path::replace_path_prefix(NewPath, OldPrefix, NewPrefix);
+ StringRef ParentPath = llvm::sys::path::parent_path(NewPath.str());
+ if (!ParentPath.empty()) {
+ // Make sure the new directory exists, creating it if necessary.
+ if (std::error_code EC = llvm::sys::fs::create_directories(ParentPath))
+ llvm::errs() << "warning: could not create directory '" << ParentPath
+ << "': " << EC.message() << '\n';
+ }
+ return std::string(NewPath.str());
+}
+
+namespace {
+class WriteIndexesThinBackend : public ThinBackendProc {
+ std::string OldPrefix, NewPrefix;
+ bool ShouldEmitImportsFiles;
+ raw_fd_ostream *LinkedObjectsFile;
+ lto::IndexWriteCallback OnWrite;
+
+public:
+ WriteIndexesThinBackend(
+ const Config &Conf, ModuleSummaryIndex &CombinedIndex,
+ const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
+ std::string OldPrefix, std::string NewPrefix, bool ShouldEmitImportsFiles,
+ raw_fd_ostream *LinkedObjectsFile, lto::IndexWriteCallback OnWrite)
+ : ThinBackendProc(Conf, CombinedIndex, ModuleToDefinedGVSummaries),
+ OldPrefix(OldPrefix), NewPrefix(NewPrefix),
+ ShouldEmitImportsFiles(ShouldEmitImportsFiles),
+ LinkedObjectsFile(LinkedObjectsFile), OnWrite(OnWrite) {}
+
+ Error start(
+ unsigned Task, BitcodeModule BM,
+ const FunctionImporter::ImportMapTy &ImportList,
+ const FunctionImporter::ExportSetTy &ExportList,
+ const std::map<GlobalValue::GUID, GlobalValue::LinkageTypes> &ResolvedODR,
+ MapVector<StringRef, BitcodeModule> &ModuleMap) override {
+ StringRef ModulePath = BM.getModuleIdentifier();
+ std::string NewModulePath =
+ getThinLTOOutputFile(std::string(ModulePath), OldPrefix, NewPrefix);
+
+ if (LinkedObjectsFile)
+ *LinkedObjectsFile << NewModulePath << '\n';
+
+ std::map<std::string, GVSummaryMapTy> ModuleToSummariesForIndex;
+ gatherImportedSummariesForModule(ModulePath, ModuleToDefinedGVSummaries,
+ ImportList, ModuleToSummariesForIndex);
+
+ std::error_code EC;
+ raw_fd_ostream OS(NewModulePath + ".thinlto.bc", EC,
+ sys::fs::OpenFlags::OF_None);
+ if (EC)
+ return errorCodeToError(EC);
+ WriteIndexToFile(CombinedIndex, OS, &ModuleToSummariesForIndex);
+
+ if (ShouldEmitImportsFiles) {
+ EC = EmitImportsFiles(ModulePath, NewModulePath + ".imports",
+ ModuleToSummariesForIndex);
+ if (EC)
+ return errorCodeToError(EC);
+ }
+
+ if (OnWrite)
+ OnWrite(std::string(ModulePath));
+ return Error::success();
+ }
+
+ Error wait() override { return Error::success(); }
+
+ // WriteIndexesThinBackend should always return 1 to prevent module
+ // re-ordering and avoid non-determinism in the final link.
+ unsigned getThreadCount() override { return 1; }
+};
+} // end anonymous namespace
+
+ThinBackend lto::createWriteIndexesThinBackend(
+ std::string OldPrefix, std::string NewPrefix, bool ShouldEmitImportsFiles,
+ raw_fd_ostream *LinkedObjectsFile, IndexWriteCallback OnWrite) {
+ return [=](const Config &Conf, ModuleSummaryIndex &CombinedIndex,
+ const StringMap<GVSummaryMapTy> &ModuleToDefinedGVSummaries,
+ AddStreamFn AddStream, NativeObjectCache Cache) {
+ return std::make_unique<WriteIndexesThinBackend>(
+ Conf, CombinedIndex, ModuleToDefinedGVSummaries, OldPrefix, NewPrefix,
+ ShouldEmitImportsFiles, LinkedObjectsFile, OnWrite);
+ };
+}
+
+Error LTO::runThinLTO(AddStreamFn AddStream, NativeObjectCache Cache,
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols) {
+ if (ThinLTO.ModuleMap.empty())
+ return Error::success();
+
+ if (ThinLTO.ModulesToCompile && ThinLTO.ModulesToCompile->empty()) {
+ llvm::errs() << "warning: [ThinLTO] No module compiled\n";
+ return Error::success();
+ }
+
+ if (Conf.CombinedIndexHook &&
+ !Conf.CombinedIndexHook(ThinLTO.CombinedIndex, GUIDPreservedSymbols))
+ return Error::success();
+
+ // Collect for each module the list of function it defines (GUID ->
+ // Summary).
+ StringMap<GVSummaryMapTy>
+ ModuleToDefinedGVSummaries(ThinLTO.ModuleMap.size());
+ ThinLTO.CombinedIndex.collectDefinedGVSummariesPerModule(
+ ModuleToDefinedGVSummaries);
+ // Create entries for any modules that didn't have any GV summaries
+ // (either they didn't have any GVs to start with, or we suppressed
+ // generation of the summaries because they e.g. had inline assembly
+ // uses that couldn't be promoted/renamed on export). This is so
+ // InProcessThinBackend::start can still launch a backend thread, which
+ // is passed the map of summaries for the module, without any special
+ // handling for this case.
+ for (auto &Mod : ThinLTO.ModuleMap)
+ if (!ModuleToDefinedGVSummaries.count(Mod.first))
+ ModuleToDefinedGVSummaries.try_emplace(Mod.first);
+
+ // Synthesize entry counts for functions in the CombinedIndex.
+ computeSyntheticCounts(ThinLTO.CombinedIndex);
+
+ StringMap<FunctionImporter::ImportMapTy> ImportLists(
+ ThinLTO.ModuleMap.size());
+ StringMap<FunctionImporter::ExportSetTy> ExportLists(
+ ThinLTO.ModuleMap.size());
+ StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
+
+ if (DumpThinCGSCCs)
+ ThinLTO.CombinedIndex.dumpSCCs(outs());
+
+ std::set<GlobalValue::GUID> ExportedGUIDs;
+
+ // If allowed, upgrade public vcall visibility to linkage unit visibility in
+ // the summaries before whole program devirtualization below.
+ updateVCallVisibilityInIndex(ThinLTO.CombinedIndex,
+ Conf.HasWholeProgramVisibility);
+
+ // Perform index-based WPD. This will return immediately if there are
+ // no index entries in the typeIdMetadata map (e.g. if we are instead
+ // performing IR-based WPD in hybrid regular/thin LTO mode).
+ std::map<ValueInfo, std::vector<VTableSlotSummary>> LocalWPDTargetsMap;
+ runWholeProgramDevirtOnIndex(ThinLTO.CombinedIndex, ExportedGUIDs,
+ LocalWPDTargetsMap);
+
+ if (Conf.OptLevel > 0)
+ ComputeCrossModuleImport(ThinLTO.CombinedIndex, ModuleToDefinedGVSummaries,
+ ImportLists, ExportLists);
+
+ // Figure out which symbols need to be internalized. This also needs to happen
+ // at -O0 because summary-based DCE is implemented using internalization, and
+ // we must apply DCE consistently with the full LTO module in order to avoid
+ // undefined references during the final link.
+ for (auto &Res : GlobalResolutions) {
+ // If the symbol does not have external references or it is not prevailing,
+ // then not need to mark it as exported from a ThinLTO partition.
+ if (Res.second.Partition != GlobalResolution::External ||
+ !Res.second.isPrevailingIRSymbol())
+ continue;
+ auto GUID = GlobalValue::getGUID(
+ GlobalValue::dropLLVMManglingEscape(Res.second.IRName));
+ // Mark exported unless index-based analysis determined it to be dead.
+ if (ThinLTO.CombinedIndex.isGUIDLive(GUID))
+ ExportedGUIDs.insert(GUID);
+ }
+
+ // Any functions referenced by the jump table in the regular LTO object must
+ // be exported.
+ for (auto &Def : ThinLTO.CombinedIndex.cfiFunctionDefs())
+ ExportedGUIDs.insert(
+ GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(Def)));
+
+ auto isExported = [&](StringRef ModuleIdentifier, ValueInfo VI) {
+ const auto &ExportList = ExportLists.find(ModuleIdentifier);
+ return (ExportList != ExportLists.end() && ExportList->second.count(VI)) ||
+ ExportedGUIDs.count(VI.getGUID());
+ };
+
+ // Update local devirtualized targets that were exported by cross-module
+ // importing or by other devirtualizations marked in the ExportedGUIDs set.
+ updateIndexWPDForExports(ThinLTO.CombinedIndex, isExported,
+ LocalWPDTargetsMap);
+
+ auto isPrevailing = [&](GlobalValue::GUID GUID,
+ const GlobalValueSummary *S) {
+ return ThinLTO.PrevailingModuleForGUID[GUID] == S->modulePath();
+ };
+ thinLTOInternalizeAndPromoteInIndex(ThinLTO.CombinedIndex, isExported,
+ isPrevailing);
+
+ auto recordNewLinkage = [&](StringRef ModuleIdentifier,
+ GlobalValue::GUID GUID,
+ GlobalValue::LinkageTypes NewLinkage) {
+ ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
+ };
+ thinLTOResolvePrevailingInIndex(ThinLTO.CombinedIndex, isPrevailing,
+ recordNewLinkage, GUIDPreservedSymbols);
+
+ generateParamAccessSummary(ThinLTO.CombinedIndex);
+
+ std::unique_ptr<ThinBackendProc> BackendProc =
+ ThinLTO.Backend(Conf, ThinLTO.CombinedIndex, ModuleToDefinedGVSummaries,
+ AddStream, Cache);
+
+ auto &ModuleMap =
+ ThinLTO.ModulesToCompile ? *ThinLTO.ModulesToCompile : ThinLTO.ModuleMap;
+
+ auto ProcessOneModule = [&](int I) -> Error {
+ auto &Mod = *(ModuleMap.begin() + I);
+ // Tasks 0 through ParallelCodeGenParallelismLevel-1 are reserved for
+ // combined module and parallel code generation partitions.
+ return BackendProc->start(RegularLTO.ParallelCodeGenParallelismLevel + I,
+ Mod.second, ImportLists[Mod.first],
+ ExportLists[Mod.first], ResolvedODR[Mod.first],
+ ThinLTO.ModuleMap);
+ };
+
+ if (BackendProc->getThreadCount() == 1) {
+ // Process the modules in the order they were provided on the command-line.
+ // It is important for this codepath to be used for WriteIndexesThinBackend,
+ // to ensure the emitted LinkedObjectsFile lists ThinLTO objects in the same
+ // order as the inputs, which otherwise would affect the final link order.
+ for (int I = 0, E = ModuleMap.size(); I != E; ++I)
+ if (Error E = ProcessOneModule(I))
+ return E;
+ } else {
+ // When executing in parallel, process largest bitsize modules first to
+ // improve parallelism, and avoid starving the thread pool near the end.
+ // This saves about 15 sec on a 36-core machine while link `clang.exe` (out
+ // of 100 sec).
+ std::vector<BitcodeModule *> ModulesVec;
+ ModulesVec.reserve(ModuleMap.size());
+ for (auto &Mod : ModuleMap)
+ ModulesVec.push_back(&Mod.second);
+ for (int I : generateModulesOrdering(ModulesVec))
+ if (Error E = ProcessOneModule(I))
+ return E;
+ }
+ return BackendProc->wait();
+}
+
+Expected<std::unique_ptr<ToolOutputFile>> lto::setupLLVMOptimizationRemarks(
+ LLVMContext &Context, StringRef RemarksFilename, StringRef RemarksPasses,
+ StringRef RemarksFormat, bool RemarksWithHotness,
+ Optional<uint64_t> RemarksHotnessThreshold, int Count) {
+ std::string Filename = std::string(RemarksFilename);
+ // For ThinLTO, file.opt.<format> becomes
+ // file.opt.<format>.thin.<num>.<format>.
+ if (!Filename.empty() && Count != -1)
+ Filename =
+ (Twine(Filename) + ".thin." + llvm::utostr(Count) + "." + RemarksFormat)
+ .str();
+
+ auto ResultOrErr = llvm::setupLLVMOptimizationRemarks(
+ Context, Filename, RemarksPasses, RemarksFormat, RemarksWithHotness,
+ RemarksHotnessThreshold);
+ if (Error E = ResultOrErr.takeError())
+ return std::move(E);
+
+ if (*ResultOrErr)
+ (*ResultOrErr)->keep();
+
+ return ResultOrErr;
+}
+
+Expected<std::unique_ptr<ToolOutputFile>>
+lto::setupStatsFile(StringRef StatsFilename) {
+ // Setup output file to emit statistics.
+ if (StatsFilename.empty())
+ return nullptr;
+
+ llvm::EnableStatistics(false);
+ std::error_code EC;
+ auto StatsFile =
+ std::make_unique<ToolOutputFile>(StatsFilename, EC, sys::fs::OF_None);
+ if (EC)
+ return errorCodeToError(EC);
+
+ StatsFile->keep();
+ return std::move(StatsFile);
+}
+
+// Compute the ordering we will process the inputs: the rough heuristic here
+// is to sort them per size so that the largest module get schedule as soon as
+// possible. This is purely a compile-time optimization.
+std::vector<int> lto::generateModulesOrdering(ArrayRef<BitcodeModule *> R) {
+ std::vector<int> ModulesOrdering;
+ ModulesOrdering.resize(R.size());
+ std::iota(ModulesOrdering.begin(), ModulesOrdering.end(), 0);
+ llvm::sort(ModulesOrdering, [&](int LeftIndex, int RightIndex) {
+ auto LSize = R[LeftIndex]->getBuffer().size();
+ auto RSize = R[RightIndex]->getBuffer().size();
+ return LSize > RSize;
+ });
+ return ModulesOrdering;
+}
diff --git a/contrib/libs/llvm12/lib/LTO/LTOBackend.cpp b/contrib/libs/llvm12/lib/LTO/LTOBackend.cpp
new file mode 100644
index 00000000000..1796d6ba60c
--- /dev/null
+++ b/contrib/libs/llvm12/lib/LTO/LTOBackend.cpp
@@ -0,0 +1,746 @@
+//===-LTOBackend.cpp - LLVM Link Time Optimizer Backend -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the "backend" phase of LTO, i.e. it performs
+// optimization and code generation on a loaded module. It is generally used
+// internally by the LTO class but can also be used independently, for example
+// to implement a standalone ThinLTO backend.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LTO/LTOBackend.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/ModuleSummaryAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "llvm/IR/LLVMRemarkStreamer.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/LTO/LTO.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Object/ModuleSymbolTable.h"
+#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Passes/PassPlugin.h"
+#include "llvm/Passes/StandardInstrumentations.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/ThreadPool.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+#include "llvm/Transforms/Utils/FunctionImportUtils.h"
+#include "llvm/Transforms/Utils/SplitModule.h"
+
+using namespace llvm;
+using namespace lto;
+
+#define DEBUG_TYPE "lto-backend"
+
+enum class LTOBitcodeEmbedding {
+ DoNotEmbed = 0,
+ EmbedOptimized = 1,
+ EmbedPostMergePreOptimized = 2
+};
+
+static cl::opt<LTOBitcodeEmbedding> EmbedBitcode(
+ "lto-embed-bitcode", cl::init(LTOBitcodeEmbedding::DoNotEmbed),
+ cl::values(clEnumValN(LTOBitcodeEmbedding::DoNotEmbed, "none",
+ "Do not embed"),
+ clEnumValN(LTOBitcodeEmbedding::EmbedOptimized, "optimized",
+ "Embed after all optimization passes"),
+ clEnumValN(LTOBitcodeEmbedding::EmbedPostMergePreOptimized,
+ "post-merge-pre-opt",
+ "Embed post merge, but before optimizations")),
+ cl::desc("Embed LLVM bitcode in object files produced by LTO"));
+
+static cl::opt<bool> ThinLTOAssumeMerged(
+ "thinlto-assume-merged", cl::init(false),
+ cl::desc("Assume the input has already undergone ThinLTO function "
+ "importing and the other pre-optimization pipeline changes."));
+
+LLVM_ATTRIBUTE_NORETURN static void reportOpenError(StringRef Path, Twine Msg) {
+ errs() << "failed to open " << Path << ": " << Msg << '\n';
+ errs().flush();
+ exit(1);
+}
+
+Error Config::addSaveTemps(std::string OutputFileName,
+ bool UseInputModulePath) {
+ ShouldDiscardValueNames = false;
+
+ std::error_code EC;
+ ResolutionFile = std::make_unique<raw_fd_ostream>(
+ OutputFileName + "resolution.txt", EC, sys::fs::OpenFlags::OF_Text);
+ if (EC) {
+ ResolutionFile.reset();
+ return errorCodeToError(EC);
+ }
+
+ auto setHook = [&](std::string PathSuffix, ModuleHookFn &Hook) {
+ // Keep track of the hook provided by the linker, which also needs to run.
+ ModuleHookFn LinkerHook = Hook;
+ Hook = [=](unsigned Task, const Module &M) {
+ // If the linker's hook returned false, we need to pass that result
+ // through.
+ if (LinkerHook && !LinkerHook(Task, M))
+ return false;
+
+ std::string PathPrefix;
+ // If this is the combined module (not a ThinLTO backend compile) or the
+ // user hasn't requested using the input module's path, emit to a file
+ // named from the provided OutputFileName with the Task ID appended.
+ if (M.getModuleIdentifier() == "ld-temp.o" || !UseInputModulePath) {
+ PathPrefix = OutputFileName;
+ if (Task != (unsigned)-1)
+ PathPrefix += utostr(Task) + ".";
+ } else
+ PathPrefix = M.getModuleIdentifier() + ".";
+ std::string Path = PathPrefix + PathSuffix + ".bc";
+ std::error_code EC;
+ raw_fd_ostream OS(Path, EC, sys::fs::OpenFlags::OF_None);
+ // Because -save-temps is a debugging feature, we report the error
+ // directly and exit.
+ if (EC)
+ reportOpenError(Path, EC.message());
+ WriteBitcodeToFile(M, OS, /*ShouldPreserveUseListOrder=*/false);
+ return true;
+ };
+ };
+
+ setHook("0.preopt", PreOptModuleHook);
+ setHook("1.promote", PostPromoteModuleHook);
+ setHook("2.internalize", PostInternalizeModuleHook);
+ setHook("3.import", PostImportModuleHook);
+ setHook("4.opt", PostOptModuleHook);
+ setHook("5.precodegen", PreCodeGenModuleHook);
+
+ CombinedIndexHook =
+ [=](const ModuleSummaryIndex &Index,
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols) {
+ std::string Path = OutputFileName + "index.bc";
+ std::error_code EC;
+ raw_fd_ostream OS(Path, EC, sys::fs::OpenFlags::OF_None);
+ // Because -save-temps is a debugging feature, we report the error
+ // directly and exit.
+ if (EC)
+ reportOpenError(Path, EC.message());
+ WriteIndexToFile(Index, OS);
+
+ Path = OutputFileName + "index.dot";
+ raw_fd_ostream OSDot(Path, EC, sys::fs::OpenFlags::OF_None);
+ if (EC)
+ reportOpenError(Path, EC.message());
+ Index.exportToDot(OSDot, GUIDPreservedSymbols);
+ return true;
+ };
+
+ return Error::success();
+}
+
+#define HANDLE_EXTENSION(Ext) \
+ llvm::PassPluginLibraryInfo get##Ext##PluginInfo();
+#include "llvm/Support/Extension.def"
+
+static void RegisterPassPlugins(ArrayRef<std::string> PassPlugins,
+ PassBuilder &PB) {
+#define HANDLE_EXTENSION(Ext) \
+ get##Ext##PluginInfo().RegisterPassBuilderCallbacks(PB);
+#include "llvm/Support/Extension.def"
+
+ // Load requested pass plugins and let them register pass builder callbacks
+ for (auto &PluginFN : PassPlugins) {
+ auto PassPlugin = PassPlugin::Load(PluginFN);
+ if (!PassPlugin) {
+ errs() << "Failed to load passes from '" << PluginFN
+ << "'. Request ignored.\n";
+ continue;
+ }
+
+ PassPlugin->registerPassBuilderCallbacks(PB);
+ }
+}
+
+static std::unique_ptr<TargetMachine>
+createTargetMachine(const Config &Conf, const Target *TheTarget, Module &M) {
+ StringRef TheTriple = M.getTargetTriple();
+ SubtargetFeatures Features;
+ Features.getDefaultSubtargetFeatures(Triple(TheTriple));
+ for (const std::string &A : Conf.MAttrs)
+ Features.AddFeature(A);
+
+ Reloc::Model RelocModel;
+ if (Conf.RelocModel)
+ RelocModel = *Conf.RelocModel;
+ else
+ RelocModel =
+ M.getPICLevel() == PICLevel::NotPIC ? Reloc::Static : Reloc::PIC_;
+
+ Optional<CodeModel::Model> CodeModel;
+ if (Conf.CodeModel)
+ CodeModel = *Conf.CodeModel;
+ else
+ CodeModel = M.getCodeModel();
+
+ std::unique_ptr<TargetMachine> TM(TheTarget->createTargetMachine(
+ TheTriple, Conf.CPU, Features.getString(), Conf.Options, RelocModel,
+ CodeModel, Conf.CGOptLevel));
+ assert(TM && "Failed to create target machine");
+ return TM;
+}
+
+static void runNewPMPasses(const Config &Conf, Module &Mod, TargetMachine *TM,
+ unsigned OptLevel, bool IsThinLTO,
+ ModuleSummaryIndex *ExportSummary,
+ const ModuleSummaryIndex *ImportSummary) {
+ Optional<PGOOptions> PGOOpt;
+ if (!Conf.SampleProfile.empty())
+ PGOOpt = PGOOptions(Conf.SampleProfile, "", Conf.ProfileRemapping,
+ PGOOptions::SampleUse, PGOOptions::NoCSAction, true);
+ else if (Conf.RunCSIRInstr) {
+ PGOOpt = PGOOptions("", Conf.CSIRProfile, Conf.ProfileRemapping,
+ PGOOptions::IRUse, PGOOptions::CSIRInstr);
+ } else if (!Conf.CSIRProfile.empty()) {
+ PGOOpt = PGOOptions(Conf.CSIRProfile, "", Conf.ProfileRemapping,
+ PGOOptions::IRUse, PGOOptions::CSIRUse);
+ }
+
+ PassInstrumentationCallbacks PIC;
+ StandardInstrumentations SI(Conf.DebugPassManager);
+ SI.registerCallbacks(PIC);
+ PassBuilder PB(Conf.DebugPassManager, TM, Conf.PTO, PGOOpt, &PIC);
+ AAManager AA;
+
+ // Parse a custom AA pipeline if asked to.
+ if (auto Err = PB.parseAAPipeline(AA, "default"))
+ report_fatal_error("Error parsing default AA pipeline");
+
+ RegisterPassPlugins(Conf.PassPlugins, PB);
+
+ LoopAnalysisManager LAM(Conf.DebugPassManager);
+ FunctionAnalysisManager FAM(Conf.DebugPassManager);
+ CGSCCAnalysisManager CGAM(Conf.DebugPassManager);
+ ModuleAnalysisManager MAM(Conf.DebugPassManager);
+
+ std::unique_ptr<TargetLibraryInfoImpl> TLII(
+ new TargetLibraryInfoImpl(Triple(TM->getTargetTriple())));
+ if (Conf.Freestanding)
+ TLII->disableAllFunctions();
+ FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
+
+ // Register the AA manager first so that our version is the one used.
+ FAM.registerPass([&] { return std::move(AA); });
+
+ // Register all the basic analyses with the managers.
+ PB.registerModuleAnalyses(MAM);
+ PB.registerCGSCCAnalyses(CGAM);
+ PB.registerFunctionAnalyses(FAM);
+ PB.registerLoopAnalyses(LAM);
+ PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
+
+ ModulePassManager MPM(Conf.DebugPassManager);
+
+ if (!Conf.DisableVerify)
+ MPM.addPass(VerifierPass());
+
+ PassBuilder::OptimizationLevel OL;
+
+ switch (OptLevel) {
+ default:
+ llvm_unreachable("Invalid optimization level");
+ case 0:
+ OL = PassBuilder::OptimizationLevel::O0;
+ break;
+ case 1:
+ OL = PassBuilder::OptimizationLevel::O1;
+ break;
+ case 2:
+ OL = PassBuilder::OptimizationLevel::O2;
+ break;
+ case 3:
+ OL = PassBuilder::OptimizationLevel::O3;
+ break;
+ }
+
+ if (IsThinLTO)
+ MPM.addPass(PB.buildThinLTODefaultPipeline(OL, ImportSummary));
+ else
+ MPM.addPass(PB.buildLTODefaultPipeline(OL, ExportSummary));
+
+ if (!Conf.DisableVerify)
+ MPM.addPass(VerifierPass());
+
+ MPM.run(Mod, MAM);
+}
+
+static void runNewPMCustomPasses(const Config &Conf, Module &Mod,
+ TargetMachine *TM, std::string PipelineDesc,
+ std::string AAPipelineDesc,
+ bool DisableVerify) {
+ PassBuilder PB(Conf.DebugPassManager, TM);
+ AAManager AA;
+
+ // Parse a custom AA pipeline if asked to.
+ if (!AAPipelineDesc.empty())
+ if (auto Err = PB.parseAAPipeline(AA, AAPipelineDesc))
+ report_fatal_error("unable to parse AA pipeline description '" +
+ AAPipelineDesc + "': " + toString(std::move(Err)));
+
+ RegisterPassPlugins(Conf.PassPlugins, PB);
+
+ LoopAnalysisManager LAM;
+ FunctionAnalysisManager FAM;
+ CGSCCAnalysisManager CGAM;
+ ModuleAnalysisManager MAM;
+
+ std::unique_ptr<TargetLibraryInfoImpl> TLII(
+ new TargetLibraryInfoImpl(Triple(TM->getTargetTriple())));
+ if (Conf.Freestanding)
+ TLII->disableAllFunctions();
+ FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
+
+ // Register the AA manager first so that our version is the one used.
+ FAM.registerPass([&] { return std::move(AA); });
+
+ // Register all the basic analyses with the managers.
+ PB.registerModuleAnalyses(MAM);
+ PB.registerCGSCCAnalyses(CGAM);
+ PB.registerFunctionAnalyses(FAM);
+ PB.registerLoopAnalyses(LAM);
+ PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
+
+ ModulePassManager MPM;
+
+ // Always verify the input.
+ MPM.addPass(VerifierPass());
+
+ // Now, add all the passes we've been requested to.
+ if (auto Err = PB.parsePassPipeline(MPM, PipelineDesc))
+ report_fatal_error("unable to parse pass pipeline description '" +
+ PipelineDesc + "': " + toString(std::move(Err)));
+
+ if (!DisableVerify)
+ MPM.addPass(VerifierPass());
+ MPM.run(Mod, MAM);
+}
+
+static void runOldPMPasses(const Config &Conf, Module &Mod, TargetMachine *TM,
+ bool IsThinLTO, ModuleSummaryIndex *ExportSummary,
+ const ModuleSummaryIndex *ImportSummary) {
+ legacy::PassManager passes;
+ passes.add(createTargetTransformInfoWrapperPass(TM->getTargetIRAnalysis()));
+
+ PassManagerBuilder PMB;
+ PMB.LibraryInfo = new TargetLibraryInfoImpl(Triple(TM->getTargetTriple()));
+ if (Conf.Freestanding)
+ PMB.LibraryInfo->disableAllFunctions();
+ PMB.Inliner = createFunctionInliningPass();
+ PMB.ExportSummary = ExportSummary;
+ PMB.ImportSummary = ImportSummary;
+ // Unconditionally verify input since it is not verified before this
+ // point and has unknown origin.
+ PMB.VerifyInput = true;
+ PMB.VerifyOutput = !Conf.DisableVerify;
+ PMB.LoopVectorize = true;
+ PMB.SLPVectorize = true;
+ PMB.OptLevel = Conf.OptLevel;
+ PMB.PGOSampleUse = Conf.SampleProfile;
+ PMB.EnablePGOCSInstrGen = Conf.RunCSIRInstr;
+ if (!Conf.RunCSIRInstr && !Conf.CSIRProfile.empty()) {
+ PMB.EnablePGOCSInstrUse = true;
+ PMB.PGOInstrUse = Conf.CSIRProfile;
+ }
+ if (IsThinLTO)
+ PMB.populateThinLTOPassManager(passes);
+ else
+ PMB.populateLTOPassManager(passes);
+ passes.run(Mod);
+}
+
+bool lto::opt(const Config &Conf, TargetMachine *TM, unsigned Task, Module &Mod,
+ bool IsThinLTO, ModuleSummaryIndex *ExportSummary,
+ const ModuleSummaryIndex *ImportSummary,
+ const std::vector<uint8_t> &CmdArgs) {
+ if (EmbedBitcode == LTOBitcodeEmbedding::EmbedPostMergePreOptimized) {
+ // FIXME: the motivation for capturing post-merge bitcode and command line
+ // is replicating the compilation environment from bitcode, without needing
+ // to understand the dependencies (the functions to be imported). This
+ // assumes a clang - based invocation, case in which we have the command
+ // line.
+ // It's not very clear how the above motivation would map in the
+ // linker-based case, so we currently don't plumb the command line args in
+ // that case.
+ if (CmdArgs.empty())
+ LLVM_DEBUG(
+ dbgs() << "Post-(Thin)LTO merge bitcode embedding was requested, but "
+ "command line arguments are not available");
+ llvm::EmbedBitcodeInModule(Mod, llvm::MemoryBufferRef(),
+ /*EmbedBitcode*/ true, /*EmbedCmdline*/ true,
+ /*Cmdline*/ CmdArgs);
+ }
+ // FIXME: Plumb the combined index into the new pass manager.
+ if (!Conf.OptPipeline.empty())
+ runNewPMCustomPasses(Conf, Mod, TM, Conf.OptPipeline, Conf.AAPipeline,
+ Conf.DisableVerify);
+ else if (Conf.UseNewPM)
+ runNewPMPasses(Conf, Mod, TM, Conf.OptLevel, IsThinLTO, ExportSummary,
+ ImportSummary);
+ else
+ runOldPMPasses(Conf, Mod, TM, IsThinLTO, ExportSummary, ImportSummary);
+ return !Conf.PostOptModuleHook || Conf.PostOptModuleHook(Task, Mod);
+}
+
+static void codegen(const Config &Conf, TargetMachine *TM,
+ AddStreamFn AddStream, unsigned Task, Module &Mod,
+ const ModuleSummaryIndex &CombinedIndex) {
+ if (Conf.PreCodeGenModuleHook && !Conf.PreCodeGenModuleHook(Task, Mod))
+ return;
+
+ if (EmbedBitcode == LTOBitcodeEmbedding::EmbedOptimized)
+ llvm::EmbedBitcodeInModule(Mod, llvm::MemoryBufferRef(),
+ /*EmbedBitcode*/ true,
+ /*EmbedCmdline*/ false,
+ /*CmdArgs*/ std::vector<uint8_t>());
+
+ std::unique_ptr<ToolOutputFile> DwoOut;
+ SmallString<1024> DwoFile(Conf.SplitDwarfOutput);
+ if (!Conf.DwoDir.empty()) {
+ std::error_code EC;
+ if (auto EC = llvm::sys::fs::create_directories(Conf.DwoDir))
+ report_fatal_error("Failed to create directory " + Conf.DwoDir + ": " +
+ EC.message());
+
+ DwoFile = Conf.DwoDir;
+ sys::path::append(DwoFile, std::to_string(Task) + ".dwo");
+ TM->Options.MCOptions.SplitDwarfFile = std::string(DwoFile);
+ } else
+ TM->Options.MCOptions.SplitDwarfFile = Conf.SplitDwarfFile;
+
+ if (!DwoFile.empty()) {
+ std::error_code EC;
+ DwoOut = std::make_unique<ToolOutputFile>(DwoFile, EC, sys::fs::OF_None);
+ if (EC)
+ report_fatal_error("Failed to open " + DwoFile + ": " + EC.message());
+ }
+
+ auto Stream = AddStream(Task);
+ legacy::PassManager CodeGenPasses;
+ CodeGenPasses.add(
+ createImmutableModuleSummaryIndexWrapperPass(&CombinedIndex));
+ if (Conf.PreCodeGenPassesHook)
+ Conf.PreCodeGenPassesHook(CodeGenPasses);
+ if (TM->addPassesToEmitFile(CodeGenPasses, *Stream->OS,
+ DwoOut ? &DwoOut->os() : nullptr,
+ Conf.CGFileType))
+ report_fatal_error("Failed to setup codegen");
+ CodeGenPasses.run(Mod);
+
+ if (DwoOut)
+ DwoOut->keep();
+}
+
+static void splitCodeGen(const Config &C, TargetMachine *TM,
+ AddStreamFn AddStream,
+ unsigned ParallelCodeGenParallelismLevel,
+ std::unique_ptr<Module> Mod,
+ const ModuleSummaryIndex &CombinedIndex) {
+ ThreadPool CodegenThreadPool(
+ heavyweight_hardware_concurrency(ParallelCodeGenParallelismLevel));
+ unsigned ThreadCount = 0;
+ const Target *T = &TM->getTarget();
+
+ SplitModule(
+ std::move(Mod), ParallelCodeGenParallelismLevel,
+ [&](std::unique_ptr<Module> MPart) {
+ // We want to clone the module in a new context to multi-thread the
+ // codegen. We do it by serializing partition modules to bitcode
+ // (while still on the main thread, in order to avoid data races) and
+ // spinning up new threads which deserialize the partitions into
+ // separate contexts.
+ // FIXME: Provide a more direct way to do this in LLVM.
+ SmallString<0> BC;
+ raw_svector_ostream BCOS(BC);
+ WriteBitcodeToFile(*MPart, BCOS);
+
+ // Enqueue the task
+ CodegenThreadPool.async(
+ [&](const SmallString<0> &BC, unsigned ThreadId) {
+ LTOLLVMContext Ctx(C);
+ Expected<std::unique_ptr<Module>> MOrErr = parseBitcodeFile(
+ MemoryBufferRef(StringRef(BC.data(), BC.size()), "ld-temp.o"),
+ Ctx);
+ if (!MOrErr)
+ report_fatal_error("Failed to read bitcode");
+ std::unique_ptr<Module> MPartInCtx = std::move(MOrErr.get());
+
+ std::unique_ptr<TargetMachine> TM =
+ createTargetMachine(C, T, *MPartInCtx);
+
+ codegen(C, TM.get(), AddStream, ThreadId, *MPartInCtx,
+ CombinedIndex);
+ },
+ // Pass BC using std::move to ensure that it get moved rather than
+ // copied into the thread's context.
+ std::move(BC), ThreadCount++);
+ },
+ false);
+
+ // Because the inner lambda (which runs in a worker thread) captures our local
+ // variables, we need to wait for the worker threads to terminate before we
+ // can leave the function scope.
+ CodegenThreadPool.wait();
+}
+
+static Expected<const Target *> initAndLookupTarget(const Config &C,
+ Module &Mod) {
+ if (!C.OverrideTriple.empty())
+ Mod.setTargetTriple(C.OverrideTriple);
+ else if (Mod.getTargetTriple().empty())
+ Mod.setTargetTriple(C.DefaultTriple);
+
+ std::string Msg;
+ const Target *T = TargetRegistry::lookupTarget(Mod.getTargetTriple(), Msg);
+ if (!T)
+ return make_error<StringError>(Msg, inconvertibleErrorCode());
+ return T;
+}
+
+Error lto::finalizeOptimizationRemarks(
+ std::unique_ptr<ToolOutputFile> DiagOutputFile) {
+ // Make sure we flush the diagnostic remarks file in case the linker doesn't
+ // call the global destructors before exiting.
+ if (!DiagOutputFile)
+ return Error::success();
+ DiagOutputFile->keep();
+ DiagOutputFile->os().flush();
+ return Error::success();
+}
+
+Error lto::backend(const Config &C, AddStreamFn AddStream,
+ unsigned ParallelCodeGenParallelismLevel,
+ std::unique_ptr<Module> Mod,
+ ModuleSummaryIndex &CombinedIndex) {
+ Expected<const Target *> TOrErr = initAndLookupTarget(C, *Mod);
+ if (!TOrErr)
+ return TOrErr.takeError();
+
+ std::unique_ptr<TargetMachine> TM = createTargetMachine(C, *TOrErr, *Mod);
+
+ if (!C.CodeGenOnly) {
+ if (!opt(C, TM.get(), 0, *Mod, /*IsThinLTO=*/false,
+ /*ExportSummary=*/&CombinedIndex, /*ImportSummary=*/nullptr,
+ /*CmdArgs*/ std::vector<uint8_t>()))
+ return Error::success();
+ }
+
+ if (ParallelCodeGenParallelismLevel == 1) {
+ codegen(C, TM.get(), AddStream, 0, *Mod, CombinedIndex);
+ } else {
+ splitCodeGen(C, TM.get(), AddStream, ParallelCodeGenParallelismLevel,
+ std::move(Mod), CombinedIndex);
+ }
+ return Error::success();
+}
+
+static void dropDeadSymbols(Module &Mod, const GVSummaryMapTy &DefinedGlobals,
+ const ModuleSummaryIndex &Index) {
+ std::vector<GlobalValue*> DeadGVs;
+ for (auto &GV : Mod.global_values())
+ if (GlobalValueSummary *GVS = DefinedGlobals.lookup(GV.getGUID()))
+ if (!Index.isGlobalValueLive(GVS)) {
+ DeadGVs.push_back(&GV);
+ convertToDeclaration(GV);
+ }
+
+ // Now that all dead bodies have been dropped, delete the actual objects
+ // themselves when possible.
+ for (GlobalValue *GV : DeadGVs) {
+ GV->removeDeadConstantUsers();
+ // Might reference something defined in native object (i.e. dropped a
+ // non-prevailing IR def, but we need to keep the declaration).
+ if (GV->use_empty())
+ GV->eraseFromParent();
+ }
+}
+
+Error lto::thinBackend(const Config &Conf, unsigned Task, AddStreamFn AddStream,
+ Module &Mod, const ModuleSummaryIndex &CombinedIndex,
+ const FunctionImporter::ImportMapTy &ImportList,
+ const GVSummaryMapTy &DefinedGlobals,
+ MapVector<StringRef, BitcodeModule> &ModuleMap,
+ const std::vector<uint8_t> &CmdArgs) {
+ Expected<const Target *> TOrErr = initAndLookupTarget(Conf, Mod);
+ if (!TOrErr)
+ return TOrErr.takeError();
+
+ std::unique_ptr<TargetMachine> TM = createTargetMachine(Conf, *TOrErr, Mod);
+
+ // Setup optimization remarks.
+ auto DiagFileOrErr = lto::setupLLVMOptimizationRemarks(
+ Mod.getContext(), Conf.RemarksFilename, Conf.RemarksPasses,
+ Conf.RemarksFormat, Conf.RemarksWithHotness, Conf.RemarksHotnessThreshold,
+ Task);
+ if (!DiagFileOrErr)
+ return DiagFileOrErr.takeError();
+ auto DiagnosticOutputFile = std::move(*DiagFileOrErr);
+
+ // Set the partial sample profile ratio in the profile summary module flag of
+ // the module, if applicable.
+ Mod.setPartialSampleProfileRatio(CombinedIndex);
+
+ if (Conf.CodeGenOnly) {
+ codegen(Conf, TM.get(), AddStream, Task, Mod, CombinedIndex);
+ return finalizeOptimizationRemarks(std::move(DiagnosticOutputFile));
+ }
+
+ if (Conf.PreOptModuleHook && !Conf.PreOptModuleHook(Task, Mod))
+ return finalizeOptimizationRemarks(std::move(DiagnosticOutputFile));
+
+ auto OptimizeAndCodegen =
+ [&](Module &Mod, TargetMachine *TM,
+ std::unique_ptr<ToolOutputFile> DiagnosticOutputFile) {
+ if (!opt(Conf, TM, Task, Mod, /*IsThinLTO=*/true,
+ /*ExportSummary=*/nullptr, /*ImportSummary=*/&CombinedIndex,
+ CmdArgs))
+ return finalizeOptimizationRemarks(std::move(DiagnosticOutputFile));
+
+ codegen(Conf, TM, AddStream, Task, Mod, CombinedIndex);
+ return finalizeOptimizationRemarks(std::move(DiagnosticOutputFile));
+ };
+
+ if (ThinLTOAssumeMerged)
+ return OptimizeAndCodegen(Mod, TM.get(), std::move(DiagnosticOutputFile));
+
+ // When linking an ELF shared object, dso_local should be dropped. We
+ // conservatively do this for -fpic.
+ bool ClearDSOLocalOnDeclarations =
+ TM->getTargetTriple().isOSBinFormatELF() &&
+ TM->getRelocationModel() != Reloc::Static &&
+ Mod.getPIELevel() == PIELevel::Default;
+ renameModuleForThinLTO(Mod, CombinedIndex, ClearDSOLocalOnDeclarations);
+
+ dropDeadSymbols(Mod, DefinedGlobals, CombinedIndex);
+
+ thinLTOResolvePrevailingInModule(Mod, DefinedGlobals);
+
+ if (Conf.PostPromoteModuleHook && !Conf.PostPromoteModuleHook(Task, Mod))
+ return finalizeOptimizationRemarks(std::move(DiagnosticOutputFile));
+
+ if (!DefinedGlobals.empty())
+ thinLTOInternalizeModule(Mod, DefinedGlobals);
+
+ if (Conf.PostInternalizeModuleHook &&
+ !Conf.PostInternalizeModuleHook(Task, Mod))
+ return finalizeOptimizationRemarks(std::move(DiagnosticOutputFile));
+
+ auto ModuleLoader = [&](StringRef Identifier) {
+ assert(Mod.getContext().isODRUniquingDebugTypes() &&
+ "ODR Type uniquing should be enabled on the context");
+ auto I = ModuleMap.find(Identifier);
+ assert(I != ModuleMap.end());
+ return I->second.getLazyModule(Mod.getContext(),
+ /*ShouldLazyLoadMetadata=*/true,
+ /*IsImporting*/ true);
+ };
+
+ FunctionImporter Importer(CombinedIndex, ModuleLoader,
+ ClearDSOLocalOnDeclarations);
+ if (Error Err = Importer.importFunctions(Mod, ImportList).takeError())
+ return Err;
+
+ if (Conf.PostImportModuleHook && !Conf.PostImportModuleHook(Task, Mod))
+ return finalizeOptimizationRemarks(std::move(DiagnosticOutputFile));
+
+ return OptimizeAndCodegen(Mod, TM.get(), std::move(DiagnosticOutputFile));
+}
+
+BitcodeModule *lto::findThinLTOModule(MutableArrayRef<BitcodeModule> BMs) {
+ if (ThinLTOAssumeMerged && BMs.size() == 1)
+ return BMs.begin();
+
+ for (BitcodeModule &BM : BMs) {
+ Expected<BitcodeLTOInfo> LTOInfo = BM.getLTOInfo();
+ if (LTOInfo && LTOInfo->IsThinLTO)
+ return &BM;
+ }
+ return nullptr;
+}
+
+Expected<BitcodeModule> lto::findThinLTOModule(MemoryBufferRef MBRef) {
+ Expected<std::vector<BitcodeModule>> BMsOrErr = getBitcodeModuleList(MBRef);
+ if (!BMsOrErr)
+ return BMsOrErr.takeError();
+
+ // The bitcode file may contain multiple modules, we want the one that is
+ // marked as being the ThinLTO module.
+ if (const BitcodeModule *Bm = lto::findThinLTOModule(*BMsOrErr))
+ return *Bm;
+
+ return make_error<StringError>("Could not find module summary",
+ inconvertibleErrorCode());
+}
+
+bool lto::loadReferencedModules(
+ const Module &M, const ModuleSummaryIndex &CombinedIndex,
+ FunctionImporter::ImportMapTy &ImportList,
+ MapVector<llvm::StringRef, llvm::BitcodeModule> &ModuleMap,
+ std::vector<std::unique_ptr<llvm::MemoryBuffer>>
+ &OwnedImportsLifetimeManager) {
+ if (ThinLTOAssumeMerged)
+ return true;
+ // We can simply import the values mentioned in the combined index, since
+ // we should only invoke this using the individual indexes written out
+ // via a WriteIndexesThinBackend.
+ for (const auto &GlobalList : CombinedIndex) {
+ // Ignore entries for undefined references.
+ if (GlobalList.second.SummaryList.empty())
+ continue;
+
+ auto GUID = GlobalList.first;
+ for (const auto &Summary : GlobalList.second.SummaryList) {
+ // Skip the summaries for the importing module. These are included to
+ // e.g. record required linkage changes.
+ if (Summary->modulePath() == M.getModuleIdentifier())
+ continue;
+ // Add an entry to provoke importing by thinBackend.
+ ImportList[Summary->modulePath()].insert(GUID);
+ }
+ }
+
+ for (auto &I : ImportList) {
+ ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MBOrErr =
+ llvm::MemoryBuffer::getFile(I.first());
+ if (!MBOrErr) {
+ errs() << "Error loading imported file '" << I.first()
+ << "': " << MBOrErr.getError().message() << "\n";
+ return false;
+ }
+
+ Expected<BitcodeModule> BMOrErr = findThinLTOModule(**MBOrErr);
+ if (!BMOrErr) {
+ handleAllErrors(BMOrErr.takeError(), [&](ErrorInfoBase &EIB) {
+ errs() << "Error loading imported file '" << I.first()
+ << "': " << EIB.message() << '\n';
+ });
+ return false;
+ }
+ ModuleMap.insert({I.first(), *BMOrErr});
+ OwnedImportsLifetimeManager.push_back(std::move(*MBOrErr));
+ }
+ return true;
+}
diff --git a/contrib/libs/llvm12/lib/LTO/LTOCodeGenerator.cpp b/contrib/libs/llvm12/lib/LTO/LTOCodeGenerator.cpp
new file mode 100644
index 00000000000..027e197e1e0
--- /dev/null
+++ b/contrib/libs/llvm12/lib/LTO/LTOCodeGenerator.cpp
@@ -0,0 +1,733 @@
+//===-LTOCodeGenerator.cpp - LLVM Link Time Optimizer ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Link Time Optimization library. This library is
+// intended to be used by linker to optimize code at link time.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LTO/legacy/LTOCodeGenerator.h"
+
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "llvm/CodeGen/ParallelCG.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/Config/config.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LLVMRemarkStreamer.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassTimingInfo.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/LTO/LTO.h"
+#include "llvm/LTO/legacy/LTOModule.h"
+#include "llvm/LTO/legacy/UpdateCompilerUsed.h"
+#include "llvm/Linker/Linker.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Remarks/HotnessThresholdParser.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/Internalize.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/IPO/WholeProgramDevirt.h"
+#include "llvm/Transforms/ObjCARC.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include <system_error>
+using namespace llvm;
+
+const char* LTOCodeGenerator::getVersionString() {
+#ifdef LLVM_VERSION_INFO
+ return PACKAGE_NAME " version " PACKAGE_VERSION ", " LLVM_VERSION_INFO;
+#else
+ return PACKAGE_NAME " version " PACKAGE_VERSION;
+#endif
+}
+
+namespace llvm {
+cl::opt<bool> LTODiscardValueNames(
+ "lto-discard-value-names",
+ cl::desc("Strip names from Value during LTO (other than GlobalValue)."),
+#ifdef NDEBUG
+ cl::init(true),
+#else
+ cl::init(false),
+#endif
+ cl::Hidden);
+
+cl::opt<bool> RemarksWithHotness(
+ "lto-pass-remarks-with-hotness",
+ cl::desc("With PGO, include profile count in optimization remarks"),
+ cl::Hidden);
+
+cl::opt<Optional<uint64_t>, false, remarks::HotnessThresholdParser>
+ RemarksHotnessThreshold(
+ "lto-pass-remarks-hotness-threshold",
+ cl::desc("Minimum profile count required for an "
+ "optimization remark to be output."
+ " Use 'auto' to apply the threshold from profile summary."),
+ cl::value_desc("uint or 'auto'"), cl::init(0), cl::Hidden);
+
+cl::opt<std::string>
+ RemarksFilename("lto-pass-remarks-output",
+ cl::desc("Output filename for pass remarks"),
+ cl::value_desc("filename"));
+
+cl::opt<std::string>
+ RemarksPasses("lto-pass-remarks-filter",
+ cl::desc("Only record optimization remarks from passes whose "
+ "names match the given regular expression"),
+ cl::value_desc("regex"));
+
+cl::opt<std::string> RemarksFormat(
+ "lto-pass-remarks-format",
+ cl::desc("The format used for serializing remarks (default: YAML)"),
+ cl::value_desc("format"), cl::init("yaml"));
+
+cl::opt<std::string> LTOStatsFile(
+ "lto-stats-file",
+ cl::desc("Save statistics to the specified file"),
+ cl::Hidden);
+}
+
+LTOCodeGenerator::LTOCodeGenerator(LLVMContext &Context)
+ : Context(Context), MergedModule(new Module("ld-temp.o", Context)),
+ TheLinker(new Linker(*MergedModule)) {
+ Context.setDiscardValueNames(LTODiscardValueNames);
+ Context.enableDebugTypeODRUniquing();
+ initializeLTOPasses();
+}
+
+LTOCodeGenerator::~LTOCodeGenerator() {}
+
+// Initialize LTO passes. Please keep this function in sync with
+// PassManagerBuilder::populateLTOPassManager(), and make sure all LTO
+// passes are initialized.
+void LTOCodeGenerator::initializeLTOPasses() {
+ PassRegistry &R = *PassRegistry::getPassRegistry();
+
+ initializeInternalizeLegacyPassPass(R);
+ initializeIPSCCPLegacyPassPass(R);
+ initializeGlobalOptLegacyPassPass(R);
+ initializeConstantMergeLegacyPassPass(R);
+ initializeDAHPass(R);
+ initializeInstructionCombiningPassPass(R);
+ initializeSimpleInlinerPass(R);
+ initializePruneEHPass(R);
+ initializeGlobalDCELegacyPassPass(R);
+ initializeOpenMPOptLegacyPassPass(R);
+ initializeArgPromotionPass(R);
+ initializeJumpThreadingPass(R);
+ initializeSROALegacyPassPass(R);
+ initializeAttributorLegacyPassPass(R);
+ initializeAttributorCGSCCLegacyPassPass(R);
+ initializePostOrderFunctionAttrsLegacyPassPass(R);
+ initializeReversePostOrderFunctionAttrsLegacyPassPass(R);
+ initializeGlobalsAAWrapperPassPass(R);
+ initializeLegacyLICMPassPass(R);
+ initializeMergedLoadStoreMotionLegacyPassPass(R);
+ initializeGVNLegacyPassPass(R);
+ initializeMemCpyOptLegacyPassPass(R);
+ initializeDCELegacyPassPass(R);
+ initializeCFGSimplifyPassPass(R);
+}
+
+void LTOCodeGenerator::setAsmUndefinedRefs(LTOModule *Mod) {
+ const std::vector<StringRef> &undefs = Mod->getAsmUndefinedRefs();
+ for (int i = 0, e = undefs.size(); i != e; ++i)
+ AsmUndefinedRefs.insert(undefs[i]);
+}
+
+bool LTOCodeGenerator::addModule(LTOModule *Mod) {
+ assert(&Mod->getModule().getContext() == &Context &&
+ "Expected module in same context");
+
+ bool ret = TheLinker->linkInModule(Mod->takeModule());
+ setAsmUndefinedRefs(Mod);
+
+ // We've just changed the input, so let's make sure we verify it.
+ HasVerifiedInput = false;
+
+ return !ret;
+}
+
+void LTOCodeGenerator::setModule(std::unique_ptr<LTOModule> Mod) {
+ assert(&Mod->getModule().getContext() == &Context &&
+ "Expected module in same context");
+
+ AsmUndefinedRefs.clear();
+
+ MergedModule = Mod->takeModule();
+ TheLinker = std::make_unique<Linker>(*MergedModule);
+ setAsmUndefinedRefs(&*Mod);
+
+ // We've just changed the input, so let's make sure we verify it.
+ HasVerifiedInput = false;
+}
+
+void LTOCodeGenerator::setTargetOptions(const TargetOptions &Options) {
+ this->Options = Options;
+}
+
+void LTOCodeGenerator::setDebugInfo(lto_debug_model Debug) {
+ switch (Debug) {
+ case LTO_DEBUG_MODEL_NONE:
+ EmitDwarfDebugInfo = false;
+ return;
+
+ case LTO_DEBUG_MODEL_DWARF:
+ EmitDwarfDebugInfo = true;
+ return;
+ }
+ llvm_unreachable("Unknown debug format!");
+}
+
+void LTOCodeGenerator::setOptLevel(unsigned Level) {
+ OptLevel = Level;
+ switch (OptLevel) {
+ case 0:
+ CGOptLevel = CodeGenOpt::None;
+ return;
+ case 1:
+ CGOptLevel = CodeGenOpt::Less;
+ return;
+ case 2:
+ CGOptLevel = CodeGenOpt::Default;
+ return;
+ case 3:
+ CGOptLevel = CodeGenOpt::Aggressive;
+ return;
+ }
+ llvm_unreachable("Unknown optimization level!");
+}
+
+bool LTOCodeGenerator::writeMergedModules(StringRef Path) {
+ if (!determineTarget())
+ return false;
+
+ // We always run the verifier once on the merged module.
+ verifyMergedModuleOnce();
+
+ // mark which symbols can not be internalized
+ applyScopeRestrictions();
+
+ // create output file
+ std::error_code EC;
+ ToolOutputFile Out(Path, EC, sys::fs::OF_None);
+ if (EC) {
+ std::string ErrMsg = "could not open bitcode file for writing: ";
+ ErrMsg += Path.str() + ": " + EC.message();
+ emitError(ErrMsg);
+ return false;
+ }
+
+ // write bitcode to it
+ WriteBitcodeToFile(*MergedModule, Out.os(), ShouldEmbedUselists);
+ Out.os().close();
+
+ if (Out.os().has_error()) {
+ std::string ErrMsg = "could not write bitcode file: ";
+ ErrMsg += Path.str() + ": " + Out.os().error().message();
+ emitError(ErrMsg);
+ Out.os().clear_error();
+ return false;
+ }
+
+ Out.keep();
+ return true;
+}
+
+bool LTOCodeGenerator::compileOptimizedToFile(const char **Name) {
+ // make unique temp output file to put generated code
+ SmallString<128> Filename;
+ int FD;
+
+ StringRef Extension
+ (FileType == CGFT_AssemblyFile ? "s" : "o");
+
+ std::error_code EC =
+ sys::fs::createTemporaryFile("lto-llvm", Extension, FD, Filename);
+ if (EC) {
+ emitError(EC.message());
+ return false;
+ }
+
+ // generate object file
+ ToolOutputFile objFile(Filename, FD);
+
+ bool genResult = compileOptimized(&objFile.os());
+ objFile.os().close();
+ if (objFile.os().has_error()) {
+ emitError((Twine("could not write object file: ") + Filename + ": " +
+ objFile.os().error().message())
+ .str());
+ objFile.os().clear_error();
+ sys::fs::remove(Twine(Filename));
+ return false;
+ }
+
+ objFile.keep();
+ if (!genResult) {
+ sys::fs::remove(Twine(Filename));
+ return false;
+ }
+
+ NativeObjectPath = Filename.c_str();
+ *Name = NativeObjectPath.c_str();
+ return true;
+}
+
+std::unique_ptr<MemoryBuffer>
+LTOCodeGenerator::compileOptimized() {
+ const char *name;
+ if (!compileOptimizedToFile(&name))
+ return nullptr;
+
+ // read .o file into memory buffer
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getFile(name, -1, false);
+ if (std::error_code EC = BufferOrErr.getError()) {
+ emitError(EC.message());
+ sys::fs::remove(NativeObjectPath);
+ return nullptr;
+ }
+
+ // remove temp files
+ sys::fs::remove(NativeObjectPath);
+
+ return std::move(*BufferOrErr);
+}
+
+bool LTOCodeGenerator::compile_to_file(const char **Name) {
+ if (!optimize())
+ return false;
+
+ return compileOptimizedToFile(Name);
+}
+
+std::unique_ptr<MemoryBuffer> LTOCodeGenerator::compile() {
+ if (!optimize())
+ return nullptr;
+
+ return compileOptimized();
+}
+
+bool LTOCodeGenerator::determineTarget() {
+ if (TargetMach)
+ return true;
+
+ TripleStr = MergedModule->getTargetTriple();
+ if (TripleStr.empty()) {
+ TripleStr = sys::getDefaultTargetTriple();
+ MergedModule->setTargetTriple(TripleStr);
+ }
+ llvm::Triple Triple(TripleStr);
+
+ // create target machine from info for merged modules
+ std::string ErrMsg;
+ MArch = TargetRegistry::lookupTarget(TripleStr, ErrMsg);
+ if (!MArch) {
+ emitError(ErrMsg);
+ return false;
+ }
+
+ // Construct LTOModule, hand over ownership of module and target. Use MAttr as
+ // the default set of features.
+ SubtargetFeatures Features(join(MAttrs, ""));
+ Features.getDefaultSubtargetFeatures(Triple);
+ FeatureStr = Features.getString();
+ // Set a default CPU for Darwin triples.
+ if (MCpu.empty() && Triple.isOSDarwin()) {
+ if (Triple.getArch() == llvm::Triple::x86_64)
+ MCpu = "core2";
+ else if (Triple.getArch() == llvm::Triple::x86)
+ MCpu = "yonah";
+ else if (Triple.isArm64e())
+ MCpu = "apple-a12";
+ else if (Triple.getArch() == llvm::Triple::aarch64 ||
+ Triple.getArch() == llvm::Triple::aarch64_32)
+ MCpu = "cyclone";
+ }
+
+ TargetMach = createTargetMachine();
+ assert(TargetMach && "Unable to create target machine");
+
+ return true;
+}
+
+std::unique_ptr<TargetMachine> LTOCodeGenerator::createTargetMachine() {
+ assert(MArch && "MArch is not set!");
+ return std::unique_ptr<TargetMachine>(MArch->createTargetMachine(
+ TripleStr, MCpu, FeatureStr, Options, RelocModel, None, CGOptLevel));
+}
+
+// If a linkonce global is present in the MustPreserveSymbols, we need to make
+// sure we honor this. To force the compiler to not drop it, we add it to the
+// "llvm.compiler.used" global.
+void LTOCodeGenerator::preserveDiscardableGVs(
+ Module &TheModule,
+ llvm::function_ref<bool(const GlobalValue &)> mustPreserveGV) {
+ std::vector<GlobalValue *> Used;
+ auto mayPreserveGlobal = [&](GlobalValue &GV) {
+ if (!GV.isDiscardableIfUnused() || GV.isDeclaration() ||
+ !mustPreserveGV(GV))
+ return;
+ if (GV.hasAvailableExternallyLinkage())
+ return emitWarning(
+ (Twine("Linker asked to preserve available_externally global: '") +
+ GV.getName() + "'").str());
+ if (GV.hasInternalLinkage())
+ return emitWarning((Twine("Linker asked to preserve internal global: '") +
+ GV.getName() + "'").str());
+ Used.push_back(&GV);
+ };
+ for (auto &GV : TheModule)
+ mayPreserveGlobal(GV);
+ for (auto &GV : TheModule.globals())
+ mayPreserveGlobal(GV);
+ for (auto &GV : TheModule.aliases())
+ mayPreserveGlobal(GV);
+
+ if (Used.empty())
+ return;
+
+ appendToCompilerUsed(TheModule, Used);
+}
+
+void LTOCodeGenerator::applyScopeRestrictions() {
+ if (ScopeRestrictionsDone)
+ return;
+
+ // Declare a callback for the internalize pass that will ask for every
+ // candidate GlobalValue if it can be internalized or not.
+ Mangler Mang;
+ SmallString<64> MangledName;
+ auto mustPreserveGV = [&](const GlobalValue &GV) -> bool {
+ // Unnamed globals can't be mangled, but they can't be preserved either.
+ if (!GV.hasName())
+ return false;
+
+ // Need to mangle the GV as the "MustPreserveSymbols" StringSet is filled
+ // with the linker supplied name, which on Darwin includes a leading
+ // underscore.
+ MangledName.clear();
+ MangledName.reserve(GV.getName().size() + 1);
+ Mang.getNameWithPrefix(MangledName, &GV, /*CannotUsePrivateLabel=*/false);
+ return MustPreserveSymbols.count(MangledName);
+ };
+
+ // Preserve linkonce value on linker request
+ preserveDiscardableGVs(*MergedModule, mustPreserveGV);
+
+ if (!ShouldInternalize)
+ return;
+
+ if (ShouldRestoreGlobalsLinkage) {
+ // Record the linkage type of non-local symbols so they can be restored
+ // prior
+ // to module splitting.
+ auto RecordLinkage = [&](const GlobalValue &GV) {
+ if (!GV.hasAvailableExternallyLinkage() && !GV.hasLocalLinkage() &&
+ GV.hasName())
+ ExternalSymbols.insert(std::make_pair(GV.getName(), GV.getLinkage()));
+ };
+ for (auto &GV : *MergedModule)
+ RecordLinkage(GV);
+ for (auto &GV : MergedModule->globals())
+ RecordLinkage(GV);
+ for (auto &GV : MergedModule->aliases())
+ RecordLinkage(GV);
+ }
+
+ // Update the llvm.compiler_used globals to force preserving libcalls and
+ // symbols referenced from asm
+ updateCompilerUsed(*MergedModule, *TargetMach, AsmUndefinedRefs);
+
+ internalizeModule(*MergedModule, mustPreserveGV);
+
+ ScopeRestrictionsDone = true;
+}
+
+/// Restore original linkage for symbols that may have been internalized
+void LTOCodeGenerator::restoreLinkageForExternals() {
+ if (!ShouldInternalize || !ShouldRestoreGlobalsLinkage)
+ return;
+
+ assert(ScopeRestrictionsDone &&
+ "Cannot externalize without internalization!");
+
+ if (ExternalSymbols.empty())
+ return;
+
+ auto externalize = [this](GlobalValue &GV) {
+ if (!GV.hasLocalLinkage() || !GV.hasName())
+ return;
+
+ auto I = ExternalSymbols.find(GV.getName());
+ if (I == ExternalSymbols.end())
+ return;
+
+ GV.setLinkage(I->second);
+ };
+
+ llvm::for_each(MergedModule->functions(), externalize);
+ llvm::for_each(MergedModule->globals(), externalize);
+ llvm::for_each(MergedModule->aliases(), externalize);
+}
+
+void LTOCodeGenerator::verifyMergedModuleOnce() {
+ // Only run on the first call.
+ if (HasVerifiedInput)
+ return;
+ HasVerifiedInput = true;
+
+ bool BrokenDebugInfo = false;
+ if (verifyModule(*MergedModule, &dbgs(), &BrokenDebugInfo))
+ report_fatal_error("Broken module found, compilation aborted!");
+ if (BrokenDebugInfo) {
+ emitWarning("Invalid debug info found, debug info will be stripped");
+ StripDebugInfo(*MergedModule);
+ }
+}
+
+void LTOCodeGenerator::finishOptimizationRemarks() {
+ if (DiagnosticOutputFile) {
+ DiagnosticOutputFile->keep();
+ // FIXME: LTOCodeGenerator dtor is not invoked on Darwin
+ DiagnosticOutputFile->os().flush();
+ }
+}
+
+/// Optimize merged modules using various IPO passes
+bool LTOCodeGenerator::optimize() {
+ if (!this->determineTarget())
+ return false;
+
+ auto DiagFileOrErr = lto::setupLLVMOptimizationRemarks(
+ Context, RemarksFilename, RemarksPasses, RemarksFormat,
+ RemarksWithHotness, RemarksHotnessThreshold);
+ if (!DiagFileOrErr) {
+ errs() << "Error: " << toString(DiagFileOrErr.takeError()) << "\n";
+ report_fatal_error("Can't get an output file for the remarks");
+ }
+ DiagnosticOutputFile = std::move(*DiagFileOrErr);
+
+ // Setup output file to emit statistics.
+ auto StatsFileOrErr = lto::setupStatsFile(LTOStatsFile);
+ if (!StatsFileOrErr) {
+ errs() << "Error: " << toString(StatsFileOrErr.takeError()) << "\n";
+ report_fatal_error("Can't get an output file for the statistics");
+ }
+ StatsFile = std::move(StatsFileOrErr.get());
+
+ // Currently there is no support for enabling whole program visibility via a
+ // linker option in the old LTO API, but this call allows it to be specified
+ // via the internal option. Must be done before WPD invoked via the optimizer
+ // pipeline run below.
+ updateVCallVisibilityInModule(*MergedModule,
+ /* WholeProgramVisibilityEnabledInLTO */ false);
+
+ // We always run the verifier once on the merged module, the `DisableVerify`
+ // parameter only applies to subsequent verify.
+ verifyMergedModuleOnce();
+
+ // Mark which symbols can not be internalized
+ this->applyScopeRestrictions();
+
+ // Write LTOPostLink flag for passes that require all the modules.
+ MergedModule->addModuleFlag(Module::Error, "LTOPostLink", 1);
+
+ // Instantiate the pass manager to organize the passes.
+ legacy::PassManager passes;
+
+ // Add an appropriate DataLayout instance for this module...
+ MergedModule->setDataLayout(TargetMach->createDataLayout());
+
+ passes.add(
+ createTargetTransformInfoWrapperPass(TargetMach->getTargetIRAnalysis()));
+
+ Triple TargetTriple(TargetMach->getTargetTriple());
+ PassManagerBuilder PMB;
+ PMB.LoopVectorize = true;
+ PMB.SLPVectorize = true;
+ PMB.Inliner = createFunctionInliningPass();
+ PMB.LibraryInfo = new TargetLibraryInfoImpl(TargetTriple);
+ if (Freestanding)
+ PMB.LibraryInfo->disableAllFunctions();
+ PMB.OptLevel = OptLevel;
+ PMB.VerifyInput = !DisableVerify;
+ PMB.VerifyOutput = !DisableVerify;
+
+ PMB.populateLTOPassManager(passes);
+
+ // Run our queue of passes all at once now, efficiently.
+ passes.run(*MergedModule);
+
+ return true;
+}
+
+bool LTOCodeGenerator::compileOptimized(ArrayRef<raw_pwrite_stream *> Out) {
+ if (!this->determineTarget())
+ return false;
+
+ // We always run the verifier once on the merged module. If it has already
+ // been called in optimize(), this call will return early.
+ verifyMergedModuleOnce();
+
+ legacy::PassManager preCodeGenPasses;
+
+ // If the bitcode files contain ARC code and were compiled with optimization,
+ // the ObjCARCContractPass must be run, so do it unconditionally here.
+ preCodeGenPasses.add(createObjCARCContractPass());
+ preCodeGenPasses.run(*MergedModule);
+
+ // Re-externalize globals that may have been internalized to increase scope
+ // for splitting
+ restoreLinkageForExternals();
+
+ // Do code generation. We need to preserve the module in case the client calls
+ // writeMergedModules() after compilation, but we only need to allow this at
+ // parallelism level 1. This is achieved by having splitCodeGen return the
+ // original module at parallelism level 1 which we then assign back to
+ // MergedModule.
+ MergedModule = splitCodeGen(std::move(MergedModule), Out, {},
+ [&]() { return createTargetMachine(); }, FileType,
+ ShouldRestoreGlobalsLinkage);
+
+ // If statistics were requested, save them to the specified file or
+ // print them out after codegen.
+ if (StatsFile)
+ PrintStatisticsJSON(StatsFile->os());
+ else if (AreStatisticsEnabled())
+ PrintStatistics();
+
+ reportAndResetTimings();
+
+ finishOptimizationRemarks();
+
+ return true;
+}
+
+void LTOCodeGenerator::setCodeGenDebugOptions(ArrayRef<StringRef> Options) {
+ for (StringRef Option : Options)
+ CodegenOptions.push_back(Option.str());
+}
+
+void LTOCodeGenerator::parseCodeGenDebugOptions() {
+ // if options were requested, set them
+ if (!CodegenOptions.empty()) {
+ // ParseCommandLineOptions() expects argv[0] to be program name.
+ std::vector<const char *> CodegenArgv(1, "libLLVMLTO");
+ for (std::string &Arg : CodegenOptions)
+ CodegenArgv.push_back(Arg.c_str());
+ cl::ParseCommandLineOptions(CodegenArgv.size(), CodegenArgv.data());
+ }
+}
+
+
+void LTOCodeGenerator::DiagnosticHandler(const DiagnosticInfo &DI) {
+ // Map the LLVM internal diagnostic severity to the LTO diagnostic severity.
+ lto_codegen_diagnostic_severity_t Severity;
+ switch (DI.getSeverity()) {
+ case DS_Error:
+ Severity = LTO_DS_ERROR;
+ break;
+ case DS_Warning:
+ Severity = LTO_DS_WARNING;
+ break;
+ case DS_Remark:
+ Severity = LTO_DS_REMARK;
+ break;
+ case DS_Note:
+ Severity = LTO_DS_NOTE;
+ break;
+ }
+ // Create the string that will be reported to the external diagnostic handler.
+ std::string MsgStorage;
+ raw_string_ostream Stream(MsgStorage);
+ DiagnosticPrinterRawOStream DP(Stream);
+ DI.print(DP);
+ Stream.flush();
+
+ // If this method has been called it means someone has set up an external
+ // diagnostic handler. Assert on that.
+ assert(DiagHandler && "Invalid diagnostic handler");
+ (*DiagHandler)(Severity, MsgStorage.c_str(), DiagContext);
+}
+
+namespace {
+struct LTODiagnosticHandler : public DiagnosticHandler {
+ LTOCodeGenerator *CodeGenerator;
+ LTODiagnosticHandler(LTOCodeGenerator *CodeGenPtr)
+ : CodeGenerator(CodeGenPtr) {}
+ bool handleDiagnostics(const DiagnosticInfo &DI) override {
+ CodeGenerator->DiagnosticHandler(DI);
+ return true;
+ }
+};
+}
+
+void
+LTOCodeGenerator::setDiagnosticHandler(lto_diagnostic_handler_t DiagHandler,
+ void *Ctxt) {
+ this->DiagHandler = DiagHandler;
+ this->DiagContext = Ctxt;
+ if (!DiagHandler)
+ return Context.setDiagnosticHandler(nullptr);
+ // Register the LTOCodeGenerator stub in the LLVMContext to forward the
+ // diagnostic to the external DiagHandler.
+ Context.setDiagnosticHandler(std::make_unique<LTODiagnosticHandler>(this),
+ true);
+}
+
+namespace {
+class LTODiagnosticInfo : public DiagnosticInfo {
+ const Twine &Msg;
+public:
+ LTODiagnosticInfo(const Twine &DiagMsg, DiagnosticSeverity Severity=DS_Error)
+ : DiagnosticInfo(DK_Linker, Severity), Msg(DiagMsg) {}
+ void print(DiagnosticPrinter &DP) const override { DP << Msg; }
+};
+}
+
+void LTOCodeGenerator::emitError(const std::string &ErrMsg) {
+ if (DiagHandler)
+ (*DiagHandler)(LTO_DS_ERROR, ErrMsg.c_str(), DiagContext);
+ else
+ Context.diagnose(LTODiagnosticInfo(ErrMsg));
+}
+
+void LTOCodeGenerator::emitWarning(const std::string &ErrMsg) {
+ if (DiagHandler)
+ (*DiagHandler)(LTO_DS_WARNING, ErrMsg.c_str(), DiagContext);
+ else
+ Context.diagnose(LTODiagnosticInfo(ErrMsg, DS_Warning));
+}
diff --git a/contrib/libs/llvm12/lib/LTO/LTOModule.cpp b/contrib/libs/llvm12/lib/LTO/LTOModule.cpp
new file mode 100644
index 00000000000..1119622578d
--- /dev/null
+++ b/contrib/libs/llvm12/lib/LTO/LTOModule.cpp
@@ -0,0 +1,689 @@
+//===-- LTOModule.cpp - LLVM Link Time Optimizer --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Link Time Optimization library. This library is
+// intended to be used by linker to optimize code at link time.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LTO/legacy/LTOModule.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/MC/MCSection.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Object/IRObjectFile.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Target/TargetLoweringObjectFile.h"
+#include "llvm/Transforms/Utils/GlobalStatus.h"
+#include <system_error>
+using namespace llvm;
+using namespace llvm::object;
+
+LTOModule::LTOModule(std::unique_ptr<Module> M, MemoryBufferRef MBRef,
+ llvm::TargetMachine *TM)
+ : Mod(std::move(M)), MBRef(MBRef), _target(TM) {
+ assert(_target && "target machine is null");
+ SymTab.addModule(Mod.get());
+}
+
+LTOModule::~LTOModule() {}
+
+/// isBitcodeFile - Returns 'true' if the file (or memory contents) is LLVM
+/// bitcode.
+bool LTOModule::isBitcodeFile(const void *Mem, size_t Length) {
+ Expected<MemoryBufferRef> BCData = IRObjectFile::findBitcodeInMemBuffer(
+ MemoryBufferRef(StringRef((const char *)Mem, Length), "<mem>"));
+ return !errorToBool(BCData.takeError());
+}
+
+bool LTOModule::isBitcodeFile(StringRef Path) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getFile(Path);
+ if (!BufferOrErr)
+ return false;
+
+ Expected<MemoryBufferRef> BCData = IRObjectFile::findBitcodeInMemBuffer(
+ BufferOrErr.get()->getMemBufferRef());
+ return !errorToBool(BCData.takeError());
+}
+
+bool LTOModule::isThinLTO() {
+ Expected<BitcodeLTOInfo> Result = getBitcodeLTOInfo(MBRef);
+ if (!Result) {
+ logAllUnhandledErrors(Result.takeError(), errs());
+ return false;
+ }
+ return Result->IsThinLTO;
+}
+
+bool LTOModule::isBitcodeForTarget(MemoryBuffer *Buffer,
+ StringRef TriplePrefix) {
+ Expected<MemoryBufferRef> BCOrErr =
+ IRObjectFile::findBitcodeInMemBuffer(Buffer->getMemBufferRef());
+ if (errorToBool(BCOrErr.takeError()))
+ return false;
+ LLVMContext Context;
+ ErrorOr<std::string> TripleOrErr =
+ expectedToErrorOrAndEmitErrors(Context, getBitcodeTargetTriple(*BCOrErr));
+ if (!TripleOrErr)
+ return false;
+ return StringRef(*TripleOrErr).startswith(TriplePrefix);
+}
+
+std::string LTOModule::getProducerString(MemoryBuffer *Buffer) {
+ Expected<MemoryBufferRef> BCOrErr =
+ IRObjectFile::findBitcodeInMemBuffer(Buffer->getMemBufferRef());
+ if (errorToBool(BCOrErr.takeError()))
+ return "";
+ LLVMContext Context;
+ ErrorOr<std::string> ProducerOrErr = expectedToErrorOrAndEmitErrors(
+ Context, getBitcodeProducerString(*BCOrErr));
+ if (!ProducerOrErr)
+ return "";
+ return *ProducerOrErr;
+}
+
+ErrorOr<std::unique_ptr<LTOModule>>
+LTOModule::createFromFile(LLVMContext &Context, StringRef path,
+ const TargetOptions &options) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getFile(path);
+ if (std::error_code EC = BufferOrErr.getError()) {
+ Context.emitError(EC.message());
+ return EC;
+ }
+ std::unique_ptr<MemoryBuffer> Buffer = std::move(BufferOrErr.get());
+ return makeLTOModule(Buffer->getMemBufferRef(), options, Context,
+ /* ShouldBeLazy*/ false);
+}
+
+ErrorOr<std::unique_ptr<LTOModule>>
+LTOModule::createFromOpenFile(LLVMContext &Context, int fd, StringRef path,
+ size_t size, const TargetOptions &options) {
+ return createFromOpenFileSlice(Context, fd, path, size, 0, options);
+}
+
+ErrorOr<std::unique_ptr<LTOModule>>
+LTOModule::createFromOpenFileSlice(LLVMContext &Context, int fd, StringRef path,
+ size_t map_size, off_t offset,
+ const TargetOptions &options) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> BufferOrErr =
+ MemoryBuffer::getOpenFileSlice(sys::fs::convertFDToNativeFile(fd), path,
+ map_size, offset);
+ if (std::error_code EC = BufferOrErr.getError()) {
+ Context.emitError(EC.message());
+ return EC;
+ }
+ std::unique_ptr<MemoryBuffer> Buffer = std::move(BufferOrErr.get());
+ return makeLTOModule(Buffer->getMemBufferRef(), options, Context,
+ /* ShouldBeLazy */ false);
+}
+
+ErrorOr<std::unique_ptr<LTOModule>>
+LTOModule::createFromBuffer(LLVMContext &Context, const void *mem,
+ size_t length, const TargetOptions &options,
+ StringRef path) {
+ StringRef Data((const char *)mem, length);
+ MemoryBufferRef Buffer(Data, path);
+ return makeLTOModule(Buffer, options, Context, /* ShouldBeLazy */ false);
+}
+
+ErrorOr<std::unique_ptr<LTOModule>>
+LTOModule::createInLocalContext(std::unique_ptr<LLVMContext> Context,
+ const void *mem, size_t length,
+ const TargetOptions &options, StringRef path) {
+ StringRef Data((const char *)mem, length);
+ MemoryBufferRef Buffer(Data, path);
+ // If we own a context, we know this is being used only for symbol extraction,
+ // not linking. Be lazy in that case.
+ ErrorOr<std::unique_ptr<LTOModule>> Ret =
+ makeLTOModule(Buffer, options, *Context, /* ShouldBeLazy */ true);
+ if (Ret)
+ (*Ret)->OwnedContext = std::move(Context);
+ return Ret;
+}
+
+static ErrorOr<std::unique_ptr<Module>>
+parseBitcodeFileImpl(MemoryBufferRef Buffer, LLVMContext &Context,
+ bool ShouldBeLazy) {
+ // Find the buffer.
+ Expected<MemoryBufferRef> MBOrErr =
+ IRObjectFile::findBitcodeInMemBuffer(Buffer);
+ if (Error E = MBOrErr.takeError()) {
+ std::error_code EC = errorToErrorCode(std::move(E));
+ Context.emitError(EC.message());
+ return EC;
+ }
+
+ if (!ShouldBeLazy) {
+ // Parse the full file.
+ return expectedToErrorOrAndEmitErrors(Context,
+ parseBitcodeFile(*MBOrErr, Context));
+ }
+
+ // Parse lazily.
+ return expectedToErrorOrAndEmitErrors(
+ Context,
+ getLazyBitcodeModule(*MBOrErr, Context, true /*ShouldLazyLoadMetadata*/));
+}
+
+ErrorOr<std::unique_ptr<LTOModule>>
+LTOModule::makeLTOModule(MemoryBufferRef Buffer, const TargetOptions &options,
+ LLVMContext &Context, bool ShouldBeLazy) {
+ ErrorOr<std::unique_ptr<Module>> MOrErr =
+ parseBitcodeFileImpl(Buffer, Context, ShouldBeLazy);
+ if (std::error_code EC = MOrErr.getError())
+ return EC;
+ std::unique_ptr<Module> &M = *MOrErr;
+
+ std::string TripleStr = M->getTargetTriple();
+ if (TripleStr.empty())
+ TripleStr = sys::getDefaultTargetTriple();
+ llvm::Triple Triple(TripleStr);
+
+ // find machine architecture for this module
+ std::string errMsg;
+ const Target *march = TargetRegistry::lookupTarget(TripleStr, errMsg);
+ if (!march)
+ return make_error_code(object::object_error::arch_not_found);
+
+ // construct LTOModule, hand over ownership of module and target
+ SubtargetFeatures Features;
+ Features.getDefaultSubtargetFeatures(Triple);
+ std::string FeatureStr = Features.getString();
+ // Set a default CPU for Darwin triples.
+ std::string CPU;
+ if (Triple.isOSDarwin()) {
+ if (Triple.getArch() == llvm::Triple::x86_64)
+ CPU = "core2";
+ else if (Triple.getArch() == llvm::Triple::x86)
+ CPU = "yonah";
+ else if (Triple.isArm64e())
+ CPU = "apple-a12";
+ else if (Triple.getArch() == llvm::Triple::aarch64 ||
+ Triple.getArch() == llvm::Triple::aarch64_32)
+ CPU = "cyclone";
+ }
+
+ TargetMachine *target =
+ march->createTargetMachine(TripleStr, CPU, FeatureStr, options, None);
+
+ std::unique_ptr<LTOModule> Ret(new LTOModule(std::move(M), Buffer, target));
+ Ret->parseSymbols();
+ Ret->parseMetadata();
+
+ return std::move(Ret);
+}
+
+/// Create a MemoryBuffer from a memory range with an optional name.
+std::unique_ptr<MemoryBuffer>
+LTOModule::makeBuffer(const void *mem, size_t length, StringRef name) {
+ const char *startPtr = (const char*)mem;
+ return MemoryBuffer::getMemBuffer(StringRef(startPtr, length), name, false);
+}
+
+/// objcClassNameFromExpression - Get string that the data pointer points to.
+bool
+LTOModule::objcClassNameFromExpression(const Constant *c, std::string &name) {
+ if (const ConstantExpr *ce = dyn_cast<ConstantExpr>(c)) {
+ Constant *op = ce->getOperand(0);
+ if (GlobalVariable *gvn = dyn_cast<GlobalVariable>(op)) {
+ Constant *cn = gvn->getInitializer();
+ if (ConstantDataArray *ca = dyn_cast<ConstantDataArray>(cn)) {
+ if (ca->isCString()) {
+ name = (".objc_class_name_" + ca->getAsCString()).str();
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+/// addObjCClass - Parse i386/ppc ObjC class data structure.
+void LTOModule::addObjCClass(const GlobalVariable *clgv) {
+ const ConstantStruct *c = dyn_cast<ConstantStruct>(clgv->getInitializer());
+ if (!c) return;
+
+ // second slot in __OBJC,__class is pointer to superclass name
+ std::string superclassName;
+ if (objcClassNameFromExpression(c->getOperand(1), superclassName)) {
+ auto IterBool =
+ _undefines.insert(std::make_pair(superclassName, NameAndAttributes()));
+ if (IterBool.second) {
+ NameAndAttributes &info = IterBool.first->second;
+ info.name = IterBool.first->first();
+ info.attributes = LTO_SYMBOL_DEFINITION_UNDEFINED;
+ info.isFunction = false;
+ info.symbol = clgv;
+ }
+ }
+
+ // third slot in __OBJC,__class is pointer to class name
+ std::string className;
+ if (objcClassNameFromExpression(c->getOperand(2), className)) {
+ auto Iter = _defines.insert(className).first;
+
+ NameAndAttributes info;
+ info.name = Iter->first();
+ info.attributes = LTO_SYMBOL_PERMISSIONS_DATA |
+ LTO_SYMBOL_DEFINITION_REGULAR | LTO_SYMBOL_SCOPE_DEFAULT;
+ info.isFunction = false;
+ info.symbol = clgv;
+ _symbols.push_back(info);
+ }
+}
+
+/// addObjCCategory - Parse i386/ppc ObjC category data structure.
+void LTOModule::addObjCCategory(const GlobalVariable *clgv) {
+ const ConstantStruct *c = dyn_cast<ConstantStruct>(clgv->getInitializer());
+ if (!c) return;
+
+ // second slot in __OBJC,__category is pointer to target class name
+ std::string targetclassName;
+ if (!objcClassNameFromExpression(c->getOperand(1), targetclassName))
+ return;
+
+ auto IterBool =
+ _undefines.insert(std::make_pair(targetclassName, NameAndAttributes()));
+
+ if (!IterBool.second)
+ return;
+
+ NameAndAttributes &info = IterBool.first->second;
+ info.name = IterBool.first->first();
+ info.attributes = LTO_SYMBOL_DEFINITION_UNDEFINED;
+ info.isFunction = false;
+ info.symbol = clgv;
+}
+
+/// addObjCClassRef - Parse i386/ppc ObjC class list data structure.
+void LTOModule::addObjCClassRef(const GlobalVariable *clgv) {
+ std::string targetclassName;
+ if (!objcClassNameFromExpression(clgv->getInitializer(), targetclassName))
+ return;
+
+ auto IterBool =
+ _undefines.insert(std::make_pair(targetclassName, NameAndAttributes()));
+
+ if (!IterBool.second)
+ return;
+
+ NameAndAttributes &info = IterBool.first->second;
+ info.name = IterBool.first->first();
+ info.attributes = LTO_SYMBOL_DEFINITION_UNDEFINED;
+ info.isFunction = false;
+ info.symbol = clgv;
+}
+
+void LTOModule::addDefinedDataSymbol(ModuleSymbolTable::Symbol Sym) {
+ SmallString<64> Buffer;
+ {
+ raw_svector_ostream OS(Buffer);
+ SymTab.printSymbolName(OS, Sym);
+ Buffer.c_str();
+ }
+
+ const GlobalValue *V = Sym.get<GlobalValue *>();
+ addDefinedDataSymbol(Buffer, V);
+}
+
+void LTOModule::addDefinedDataSymbol(StringRef Name, const GlobalValue *v) {
+ // Add to list of defined symbols.
+ addDefinedSymbol(Name, v, false);
+
+ if (!v->hasSection() /* || !isTargetDarwin */)
+ return;
+
+ // Special case i386/ppc ObjC data structures in magic sections:
+ // The issue is that the old ObjC object format did some strange
+ // contortions to avoid real linker symbols. For instance, the
+ // ObjC class data structure is allocated statically in the executable
+ // that defines that class. That data structures contains a pointer to
+ // its superclass. But instead of just initializing that part of the
+ // struct to the address of its superclass, and letting the static and
+ // dynamic linkers do the rest, the runtime works by having that field
+ // instead point to a C-string that is the name of the superclass.
+ // At runtime the objc initialization updates that pointer and sets
+ // it to point to the actual super class. As far as the linker
+ // knows it is just a pointer to a string. But then someone wanted the
+ // linker to issue errors at build time if the superclass was not found.
+ // So they figured out a way in mach-o object format to use an absolute
+ // symbols (.objc_class_name_Foo = 0) and a floating reference
+ // (.reference .objc_class_name_Bar) to cause the linker into erroring when
+ // a class was missing.
+ // The following synthesizes the implicit .objc_* symbols for the linker
+ // from the ObjC data structures generated by the front end.
+
+ // special case if this data blob is an ObjC class definition
+ if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(v)) {
+ StringRef Section = GV->getSection();
+ if (Section.startswith("__OBJC,__class,")) {
+ addObjCClass(GV);
+ }
+
+ // special case if this data blob is an ObjC category definition
+ else if (Section.startswith("__OBJC,__category,")) {
+ addObjCCategory(GV);
+ }
+
+ // special case if this data blob is the list of referenced classes
+ else if (Section.startswith("__OBJC,__cls_refs,")) {
+ addObjCClassRef(GV);
+ }
+ }
+}
+
+void LTOModule::addDefinedFunctionSymbol(ModuleSymbolTable::Symbol Sym) {
+ SmallString<64> Buffer;
+ {
+ raw_svector_ostream OS(Buffer);
+ SymTab.printSymbolName(OS, Sym);
+ Buffer.c_str();
+ }
+
+ const Function *F = cast<Function>(Sym.get<GlobalValue *>());
+ addDefinedFunctionSymbol(Buffer, F);
+}
+
+void LTOModule::addDefinedFunctionSymbol(StringRef Name, const Function *F) {
+ // add to list of defined symbols
+ addDefinedSymbol(Name, F, true);
+}
+
+void LTOModule::addDefinedSymbol(StringRef Name, const GlobalValue *def,
+ bool isFunction) {
+ const GlobalObject *go = dyn_cast<GlobalObject>(def);
+ uint32_t attr = go ? Log2(go->getAlign().valueOrOne()) : 0;
+
+ // set permissions part
+ if (isFunction) {
+ attr |= LTO_SYMBOL_PERMISSIONS_CODE;
+ } else {
+ const GlobalVariable *gv = dyn_cast<GlobalVariable>(def);
+ if (gv && gv->isConstant())
+ attr |= LTO_SYMBOL_PERMISSIONS_RODATA;
+ else
+ attr |= LTO_SYMBOL_PERMISSIONS_DATA;
+ }
+
+ // set definition part
+ if (def->hasWeakLinkage() || def->hasLinkOnceLinkage())
+ attr |= LTO_SYMBOL_DEFINITION_WEAK;
+ else if (def->hasCommonLinkage())
+ attr |= LTO_SYMBOL_DEFINITION_TENTATIVE;
+ else
+ attr |= LTO_SYMBOL_DEFINITION_REGULAR;
+
+ // set scope part
+ if (def->hasLocalLinkage())
+ // Ignore visibility if linkage is local.
+ attr |= LTO_SYMBOL_SCOPE_INTERNAL;
+ else if (def->hasHiddenVisibility())
+ attr |= LTO_SYMBOL_SCOPE_HIDDEN;
+ else if (def->hasProtectedVisibility())
+ attr |= LTO_SYMBOL_SCOPE_PROTECTED;
+ else if (def->canBeOmittedFromSymbolTable())
+ attr |= LTO_SYMBOL_SCOPE_DEFAULT_CAN_BE_HIDDEN;
+ else
+ attr |= LTO_SYMBOL_SCOPE_DEFAULT;
+
+ if (def->hasComdat())
+ attr |= LTO_SYMBOL_COMDAT;
+
+ if (isa<GlobalAlias>(def))
+ attr |= LTO_SYMBOL_ALIAS;
+
+ auto Iter = _defines.insert(Name).first;
+
+ // fill information structure
+ NameAndAttributes info;
+ StringRef NameRef = Iter->first();
+ info.name = NameRef;
+ assert(NameRef.data()[NameRef.size()] == '\0');
+ info.attributes = attr;
+ info.isFunction = isFunction;
+ info.symbol = def;
+
+ // add to table of symbols
+ _symbols.push_back(info);
+}
+
+/// addAsmGlobalSymbol - Add a global symbol from module-level ASM to the
+/// defined list.
+void LTOModule::addAsmGlobalSymbol(StringRef name,
+ lto_symbol_attributes scope) {
+ auto IterBool = _defines.insert(name);
+
+ // only add new define if not already defined
+ if (!IterBool.second)
+ return;
+
+ NameAndAttributes &info = _undefines[IterBool.first->first()];
+
+ if (info.symbol == nullptr) {
+ // FIXME: This is trying to take care of module ASM like this:
+ //
+ // module asm ".zerofill __FOO, __foo, _bar_baz_qux, 0"
+ //
+ // but is gross and its mother dresses it funny. Have the ASM parser give us
+ // more details for this type of situation so that we're not guessing so
+ // much.
+
+ // fill information structure
+ info.name = IterBool.first->first();
+ info.attributes =
+ LTO_SYMBOL_PERMISSIONS_DATA | LTO_SYMBOL_DEFINITION_REGULAR | scope;
+ info.isFunction = false;
+ info.symbol = nullptr;
+
+ // add to table of symbols
+ _symbols.push_back(info);
+ return;
+ }
+
+ if (info.isFunction)
+ addDefinedFunctionSymbol(info.name, cast<Function>(info.symbol));
+ else
+ addDefinedDataSymbol(info.name, info.symbol);
+
+ _symbols.back().attributes &= ~LTO_SYMBOL_SCOPE_MASK;
+ _symbols.back().attributes |= scope;
+}
+
+/// addAsmGlobalSymbolUndef - Add a global symbol from module-level ASM to the
+/// undefined list.
+void LTOModule::addAsmGlobalSymbolUndef(StringRef name) {
+ auto IterBool = _undefines.insert(std::make_pair(name, NameAndAttributes()));
+
+ _asm_undefines.push_back(IterBool.first->first());
+
+ // we already have the symbol
+ if (!IterBool.second)
+ return;
+
+ uint32_t attr = LTO_SYMBOL_DEFINITION_UNDEFINED;
+ attr |= LTO_SYMBOL_SCOPE_DEFAULT;
+ NameAndAttributes &info = IterBool.first->second;
+ info.name = IterBool.first->first();
+ info.attributes = attr;
+ info.isFunction = false;
+ info.symbol = nullptr;
+}
+
+/// Add a symbol which isn't defined just yet to a list to be resolved later.
+void LTOModule::addPotentialUndefinedSymbol(ModuleSymbolTable::Symbol Sym,
+ bool isFunc) {
+ SmallString<64> name;
+ {
+ raw_svector_ostream OS(name);
+ SymTab.printSymbolName(OS, Sym);
+ name.c_str();
+ }
+
+ auto IterBool = _undefines.insert(std::make_pair(name, NameAndAttributes()));
+
+ // we already have the symbol
+ if (!IterBool.second)
+ return;
+
+ NameAndAttributes &info = IterBool.first->second;
+
+ info.name = IterBool.first->first();
+
+ const GlobalValue *decl = Sym.dyn_cast<GlobalValue *>();
+
+ if (decl->hasExternalWeakLinkage())
+ info.attributes = LTO_SYMBOL_DEFINITION_WEAKUNDEF;
+ else
+ info.attributes = LTO_SYMBOL_DEFINITION_UNDEFINED;
+
+ info.isFunction = isFunc;
+ info.symbol = decl;
+}
+
+void LTOModule::parseSymbols() {
+ for (auto Sym : SymTab.symbols()) {
+ auto *GV = Sym.dyn_cast<GlobalValue *>();
+ uint32_t Flags = SymTab.getSymbolFlags(Sym);
+ if (Flags & object::BasicSymbolRef::SF_FormatSpecific)
+ continue;
+
+ bool IsUndefined = Flags & object::BasicSymbolRef::SF_Undefined;
+
+ if (!GV) {
+ SmallString<64> Buffer;
+ {
+ raw_svector_ostream OS(Buffer);
+ SymTab.printSymbolName(OS, Sym);
+ Buffer.c_str();
+ }
+ StringRef Name(Buffer);
+
+ if (IsUndefined)
+ addAsmGlobalSymbolUndef(Name);
+ else if (Flags & object::BasicSymbolRef::SF_Global)
+ addAsmGlobalSymbol(Name, LTO_SYMBOL_SCOPE_DEFAULT);
+ else
+ addAsmGlobalSymbol(Name, LTO_SYMBOL_SCOPE_INTERNAL);
+ continue;
+ }
+
+ auto *F = dyn_cast<Function>(GV);
+ if (IsUndefined) {
+ addPotentialUndefinedSymbol(Sym, F != nullptr);
+ continue;
+ }
+
+ if (F) {
+ addDefinedFunctionSymbol(Sym);
+ continue;
+ }
+
+ if (isa<GlobalVariable>(GV)) {
+ addDefinedDataSymbol(Sym);
+ continue;
+ }
+
+ assert(isa<GlobalAlias>(GV));
+ addDefinedDataSymbol(Sym);
+ }
+
+ // make symbols for all undefines
+ for (StringMap<NameAndAttributes>::iterator u =_undefines.begin(),
+ e = _undefines.end(); u != e; ++u) {
+ // If this symbol also has a definition, then don't make an undefine because
+ // it is a tentative definition.
+ if (_defines.count(u->getKey())) continue;
+ NameAndAttributes info = u->getValue();
+ _symbols.push_back(info);
+ }
+}
+
+/// parseMetadata - Parse metadata from the module
+void LTOModule::parseMetadata() {
+ raw_string_ostream OS(LinkerOpts);
+
+ // Linker Options
+ if (NamedMDNode *LinkerOptions =
+ getModule().getNamedMetadata("llvm.linker.options")) {
+ for (unsigned i = 0, e = LinkerOptions->getNumOperands(); i != e; ++i) {
+ MDNode *MDOptions = LinkerOptions->getOperand(i);
+ for (unsigned ii = 0, ie = MDOptions->getNumOperands(); ii != ie; ++ii) {
+ MDString *MDOption = cast<MDString>(MDOptions->getOperand(ii));
+ OS << " " << MDOption->getString();
+ }
+ }
+ }
+
+ // Globals - we only need to do this for COFF.
+ const Triple TT(_target->getTargetTriple());
+ if (!TT.isOSBinFormatCOFF())
+ return;
+ Mangler M;
+ for (const NameAndAttributes &Sym : _symbols) {
+ if (!Sym.symbol)
+ continue;
+ emitLinkerFlagsForGlobalCOFF(OS, Sym.symbol, TT, M);
+ }
+}
+
+lto::InputFile *LTOModule::createInputFile(const void *buffer,
+ size_t buffer_size, const char *path,
+ std::string &outErr) {
+ StringRef Data((const char *)buffer, buffer_size);
+ MemoryBufferRef BufferRef(Data, path);
+
+ Expected<std::unique_ptr<lto::InputFile>> ObjOrErr =
+ lto::InputFile::create(BufferRef);
+
+ if (ObjOrErr)
+ return ObjOrErr->release();
+
+ outErr = std::string(path) +
+ ": Could not read LTO input file: " + toString(ObjOrErr.takeError());
+ return nullptr;
+}
+
+size_t LTOModule::getDependentLibraryCount(lto::InputFile *input) {
+ return input->getDependentLibraries().size();
+}
+
+const char *LTOModule::getDependentLibrary(lto::InputFile *input, size_t index,
+ size_t *size) {
+ StringRef S = input->getDependentLibraries()[index];
+ *size = S.size();
+ return S.data();
+}
+
+Expected<uint32_t> LTOModule::getMachOCPUType() const {
+ return MachO::getCPUType(Triple(Mod->getTargetTriple()));
+}
+
+Expected<uint32_t> LTOModule::getMachOCPUSubType() const {
+ return MachO::getCPUSubType(Triple(Mod->getTargetTriple()));
+}
diff --git a/contrib/libs/llvm12/lib/LTO/SummaryBasedOptimizations.cpp b/contrib/libs/llvm12/lib/LTO/SummaryBasedOptimizations.cpp
new file mode 100644
index 00000000000..d4dbefb231e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/LTO/SummaryBasedOptimizations.cpp
@@ -0,0 +1,86 @@
+//==-SummaryBasedOptimizations.cpp - Optimizations based on ThinLTO summary-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements optimizations that are based on the module summaries.
+// These optimizations are performed during the thinlink phase of the
+// compilation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LTO/SummaryBasedOptimizations.h"
+#include "llvm/Analysis/SyntheticCountsUtils.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/Support/CommandLine.h"
+
+using namespace llvm;
+
+static cl::opt<bool> ThinLTOSynthesizeEntryCounts(
+ "thinlto-synthesize-entry-counts", cl::init(false), cl::Hidden,
+ cl::desc("Synthesize entry counts based on the summary"));
+
+extern cl::opt<int> InitialSyntheticCount;
+
+static void initializeCounts(ModuleSummaryIndex &Index) {
+ auto Root = Index.calculateCallGraphRoot();
+ // Root is a fake node. All its successors are the actual roots of the
+ // callgraph.
+ // FIXME: This initializes the entry counts of only the root nodes. This makes
+ // sense when compiling a binary with ThinLTO, but for libraries any of the
+ // non-root nodes could be called from outside.
+ for (auto &C : Root.calls()) {
+ auto &V = C.first;
+ for (auto &GVS : V.getSummaryList()) {
+ auto S = GVS.get()->getBaseObject();
+ auto *F = cast<FunctionSummary>(S);
+ F->setEntryCount(InitialSyntheticCount);
+ }
+ }
+}
+
+void llvm::computeSyntheticCounts(ModuleSummaryIndex &Index) {
+ if (!ThinLTOSynthesizeEntryCounts)
+ return;
+
+ using Scaled64 = ScaledNumber<uint64_t>;
+ initializeCounts(Index);
+ auto GetCallSiteRelFreq = [](FunctionSummary::EdgeTy &Edge) {
+ return Scaled64(Edge.second.RelBlockFreq, -CalleeInfo::ScaleShift);
+ };
+ auto GetEntryCount = [](ValueInfo V) {
+ if (V.getSummaryList().size()) {
+ auto S = V.getSummaryList().front().get()->getBaseObject();
+ auto *F = cast<FunctionSummary>(S);
+ return F->entryCount();
+ } else {
+ return UINT64_C(0);
+ }
+ };
+ auto AddToEntryCount = [](ValueInfo V, Scaled64 New) {
+ if (!V.getSummaryList().size())
+ return;
+ for (auto &GVS : V.getSummaryList()) {
+ auto S = GVS.get()->getBaseObject();
+ auto *F = cast<FunctionSummary>(S);
+ F->setEntryCount(
+ SaturatingAdd(F->entryCount(), New.template toInt<uint64_t>()));
+ }
+ };
+
+ auto GetProfileCount = [&](ValueInfo V, FunctionSummary::EdgeTy &Edge) {
+ auto RelFreq = GetCallSiteRelFreq(Edge);
+ Scaled64 EC(GetEntryCount(V), 0);
+ return RelFreq * EC;
+ };
+ // After initializing the counts in initializeCounts above, the counts have to
+ // be propagated across the combined callgraph.
+ // SyntheticCountsUtils::propagate takes care of this propagation on any
+ // callgraph that specialized GraphTraits.
+ SyntheticCountsUtils<ModuleSummaryIndex *>::propagate(&Index, GetProfileCount,
+ AddToEntryCount);
+ Index.setHasSyntheticEntryCounts();
+}
diff --git a/contrib/libs/llvm12/lib/LTO/ThinLTOCodeGenerator.cpp b/contrib/libs/llvm12/lib/LTO/ThinLTOCodeGenerator.cpp
new file mode 100644
index 00000000000..38f49693b62
--- /dev/null
+++ b/contrib/libs/llvm12/lib/LTO/ThinLTOCodeGenerator.cpp
@@ -0,0 +1,1168 @@
+//===-ThinLTOCodeGenerator.cpp - LLVM Link Time Optimizer -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Thin Link Time Optimization library. This library is
+// intended to be used by linker to optimize code at link time.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LTO/legacy/ThinLTOCodeGenerator.h"
+#include "llvm/Support/CommandLine.h"
+
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Analysis/ModuleSummaryAnalysis.h"
+#include "llvm/Analysis/ProfileSummaryInfo.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "llvm/Bitcode/BitcodeWriterPass.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LLVMRemarkStreamer.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/PassTimingInfo.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/IRReader/IRReader.h"
+#include "llvm/LTO/LTO.h"
+#include "llvm/LTO/SummaryBasedOptimizations.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Object/IRObjectFile.h"
+#include "llvm/Remarks/HotnessThresholdParser.h"
+#include "llvm/Support/CachePruning.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileUtilities.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/SHA1.h"
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/ThreadPool.h"
+#include "llvm/Support/Threading.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/FunctionImport.h"
+#include "llvm/Transforms/IPO/Internalize.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/IPO/WholeProgramDevirt.h"
+#include "llvm/Transforms/ObjCARC.h"
+#include "llvm/Transforms/Utils/FunctionImportUtils.h"
+
+#include <numeric>
+
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#include <unistd.h>
+#else
+#include <io.h>
+#endif
+
+using namespace llvm;
+
+#define DEBUG_TYPE "thinlto"
+
+namespace llvm {
+// Flags -discard-value-names, defined in LTOCodeGenerator.cpp
+extern cl::opt<bool> LTODiscardValueNames;
+extern cl::opt<std::string> RemarksFilename;
+extern cl::opt<std::string> RemarksPasses;
+extern cl::opt<bool> RemarksWithHotness;
+extern cl::opt<Optional<uint64_t>, false, remarks::HotnessThresholdParser>
+ RemarksHotnessThreshold;
+extern cl::opt<std::string> RemarksFormat;
+}
+
+namespace {
+
+// Default to using all available threads in the system, but using only one
+// thred per core, as indicated by the usage of
+// heavyweight_hardware_concurrency() below.
+static cl::opt<int> ThreadCount("threads", cl::init(0));
+
+// Simple helper to save temporary files for debug.
+static void saveTempBitcode(const Module &TheModule, StringRef TempDir,
+ unsigned count, StringRef Suffix) {
+ if (TempDir.empty())
+ return;
+ // User asked to save temps, let dump the bitcode file after import.
+ std::string SaveTempPath = (TempDir + llvm::Twine(count) + Suffix).str();
+ std::error_code EC;
+ raw_fd_ostream OS(SaveTempPath, EC, sys::fs::OF_None);
+ if (EC)
+ report_fatal_error(Twine("Failed to open ") + SaveTempPath +
+ " to save optimized bitcode\n");
+ WriteBitcodeToFile(TheModule, OS, /* ShouldPreserveUseListOrder */ true);
+}
+
+static const GlobalValueSummary *
+getFirstDefinitionForLinker(const GlobalValueSummaryList &GVSummaryList) {
+ // If there is any strong definition anywhere, get it.
+ auto StrongDefForLinker = llvm::find_if(
+ GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
+ auto Linkage = Summary->linkage();
+ return !GlobalValue::isAvailableExternallyLinkage(Linkage) &&
+ !GlobalValue::isWeakForLinker(Linkage);
+ });
+ if (StrongDefForLinker != GVSummaryList.end())
+ return StrongDefForLinker->get();
+ // Get the first *linker visible* definition for this global in the summary
+ // list.
+ auto FirstDefForLinker = llvm::find_if(
+ GVSummaryList, [](const std::unique_ptr<GlobalValueSummary> &Summary) {
+ auto Linkage = Summary->linkage();
+ return !GlobalValue::isAvailableExternallyLinkage(Linkage);
+ });
+ // Extern templates can be emitted as available_externally.
+ if (FirstDefForLinker == GVSummaryList.end())
+ return nullptr;
+ return FirstDefForLinker->get();
+}
+
+// Populate map of GUID to the prevailing copy for any multiply defined
+// symbols. Currently assume first copy is prevailing, or any strong
+// definition. Can be refined with Linker information in the future.
+static void computePrevailingCopies(
+ const ModuleSummaryIndex &Index,
+ DenseMap<GlobalValue::GUID, const GlobalValueSummary *> &PrevailingCopy) {
+ auto HasMultipleCopies = [&](const GlobalValueSummaryList &GVSummaryList) {
+ return GVSummaryList.size() > 1;
+ };
+
+ for (auto &I : Index) {
+ if (HasMultipleCopies(I.second.SummaryList))
+ PrevailingCopy[I.first] =
+ getFirstDefinitionForLinker(I.second.SummaryList);
+ }
+}
+
+static StringMap<lto::InputFile *>
+generateModuleMap(std::vector<std::unique_ptr<lto::InputFile>> &Modules) {
+ StringMap<lto::InputFile *> ModuleMap;
+ for (auto &M : Modules) {
+ assert(ModuleMap.find(M->getName()) == ModuleMap.end() &&
+ "Expect unique Buffer Identifier");
+ ModuleMap[M->getName()] = M.get();
+ }
+ return ModuleMap;
+}
+
+static void promoteModule(Module &TheModule, const ModuleSummaryIndex &Index,
+ bool ClearDSOLocalOnDeclarations) {
+ if (renameModuleForThinLTO(TheModule, Index, ClearDSOLocalOnDeclarations))
+ report_fatal_error("renameModuleForThinLTO failed");
+}
+
+namespace {
+class ThinLTODiagnosticInfo : public DiagnosticInfo {
+ const Twine &Msg;
+public:
+ ThinLTODiagnosticInfo(const Twine &DiagMsg,
+ DiagnosticSeverity Severity = DS_Error)
+ : DiagnosticInfo(DK_Linker, Severity), Msg(DiagMsg) {}
+ void print(DiagnosticPrinter &DP) const override { DP << Msg; }
+};
+}
+
+/// Verify the module and strip broken debug info.
+static void verifyLoadedModule(Module &TheModule) {
+ bool BrokenDebugInfo = false;
+ if (verifyModule(TheModule, &dbgs(), &BrokenDebugInfo))
+ report_fatal_error("Broken module found, compilation aborted!");
+ if (BrokenDebugInfo) {
+ TheModule.getContext().diagnose(ThinLTODiagnosticInfo(
+ "Invalid debug info found, debug info will be stripped", DS_Warning));
+ StripDebugInfo(TheModule);
+ }
+}
+
+static std::unique_ptr<Module> loadModuleFromInput(lto::InputFile *Input,
+ LLVMContext &Context,
+ bool Lazy,
+ bool IsImporting) {
+ auto &Mod = Input->getSingleBitcodeModule();
+ SMDiagnostic Err;
+ Expected<std::unique_ptr<Module>> ModuleOrErr =
+ Lazy ? Mod.getLazyModule(Context,
+ /* ShouldLazyLoadMetadata */ true, IsImporting)
+ : Mod.parseModule(Context);
+ if (!ModuleOrErr) {
+ handleAllErrors(ModuleOrErr.takeError(), [&](ErrorInfoBase &EIB) {
+ SMDiagnostic Err = SMDiagnostic(Mod.getModuleIdentifier(),
+ SourceMgr::DK_Error, EIB.message());
+ Err.print("ThinLTO", errs());
+ });
+ report_fatal_error("Can't load module, abort.");
+ }
+ if (!Lazy)
+ verifyLoadedModule(*ModuleOrErr.get());
+ return std::move(*ModuleOrErr);
+}
+
+static void
+crossImportIntoModule(Module &TheModule, const ModuleSummaryIndex &Index,
+ StringMap<lto::InputFile *> &ModuleMap,
+ const FunctionImporter::ImportMapTy &ImportList,
+ bool ClearDSOLocalOnDeclarations) {
+ auto Loader = [&](StringRef Identifier) {
+ auto &Input = ModuleMap[Identifier];
+ return loadModuleFromInput(Input, TheModule.getContext(),
+ /*Lazy=*/true, /*IsImporting*/ true);
+ };
+
+ FunctionImporter Importer(Index, Loader, ClearDSOLocalOnDeclarations);
+ Expected<bool> Result = Importer.importFunctions(TheModule, ImportList);
+ if (!Result) {
+ handleAllErrors(Result.takeError(), [&](ErrorInfoBase &EIB) {
+ SMDiagnostic Err = SMDiagnostic(TheModule.getModuleIdentifier(),
+ SourceMgr::DK_Error, EIB.message());
+ Err.print("ThinLTO", errs());
+ });
+ report_fatal_error("importFunctions failed");
+ }
+ // Verify again after cross-importing.
+ verifyLoadedModule(TheModule);
+}
+
+static void optimizeModule(Module &TheModule, TargetMachine &TM,
+ unsigned OptLevel, bool Freestanding,
+ ModuleSummaryIndex *Index) {
+ // Populate the PassManager
+ PassManagerBuilder PMB;
+ PMB.LibraryInfo = new TargetLibraryInfoImpl(TM.getTargetTriple());
+ if (Freestanding)
+ PMB.LibraryInfo->disableAllFunctions();
+ PMB.Inliner = createFunctionInliningPass();
+ // FIXME: should get it from the bitcode?
+ PMB.OptLevel = OptLevel;
+ PMB.LoopVectorize = true;
+ PMB.SLPVectorize = true;
+ // Already did this in verifyLoadedModule().
+ PMB.VerifyInput = false;
+ PMB.VerifyOutput = false;
+ PMB.ImportSummary = Index;
+
+ legacy::PassManager PM;
+
+ // Add the TTI (required to inform the vectorizer about register size for
+ // instance)
+ PM.add(createTargetTransformInfoWrapperPass(TM.getTargetIRAnalysis()));
+
+ // Add optimizations
+ PMB.populateThinLTOPassManager(PM);
+
+ PM.run(TheModule);
+}
+
+static void
+addUsedSymbolToPreservedGUID(const lto::InputFile &File,
+ DenseSet<GlobalValue::GUID> &PreservedGUID) {
+ for (const auto &Sym : File.symbols()) {
+ if (Sym.isUsed())
+ PreservedGUID.insert(GlobalValue::getGUID(Sym.getIRName()));
+ }
+}
+
+// Convert the PreservedSymbols map from "Name" based to "GUID" based.
+static void computeGUIDPreservedSymbols(const lto::InputFile &File,
+ const StringSet<> &PreservedSymbols,
+ const Triple &TheTriple,
+ DenseSet<GlobalValue::GUID> &GUIDs) {
+ // Iterate the symbols in the input file and if the input has preserved symbol
+ // compute the GUID for the symbol.
+ for (const auto &Sym : File.symbols()) {
+ if (PreservedSymbols.count(Sym.getName()) && !Sym.getIRName().empty())
+ GUIDs.insert(GlobalValue::getGUID(GlobalValue::getGlobalIdentifier(
+ Sym.getIRName(), GlobalValue::ExternalLinkage, "")));
+ }
+}
+
+static DenseSet<GlobalValue::GUID>
+computeGUIDPreservedSymbols(const lto::InputFile &File,
+ const StringSet<> &PreservedSymbols,
+ const Triple &TheTriple) {
+ DenseSet<GlobalValue::GUID> GUIDPreservedSymbols(PreservedSymbols.size());
+ computeGUIDPreservedSymbols(File, PreservedSymbols, TheTriple,
+ GUIDPreservedSymbols);
+ return GUIDPreservedSymbols;
+}
+
+std::unique_ptr<MemoryBuffer> codegenModule(Module &TheModule,
+ TargetMachine &TM) {
+ SmallVector<char, 128> OutputBuffer;
+
+ // CodeGen
+ {
+ raw_svector_ostream OS(OutputBuffer);
+ legacy::PassManager PM;
+
+ // If the bitcode files contain ARC code and were compiled with optimization,
+ // the ObjCARCContractPass must be run, so do it unconditionally here.
+ PM.add(createObjCARCContractPass());
+
+ // Setup the codegen now.
+ if (TM.addPassesToEmitFile(PM, OS, nullptr, CGFT_ObjectFile,
+ /* DisableVerify */ true))
+ report_fatal_error("Failed to setup codegen");
+
+ // Run codegen now. resulting binary is in OutputBuffer.
+ PM.run(TheModule);
+ }
+ return std::make_unique<SmallVectorMemoryBuffer>(std::move(OutputBuffer));
+}
+
+/// Manage caching for a single Module.
+class ModuleCacheEntry {
+ SmallString<128> EntryPath;
+
+public:
+ // Create a cache entry. This compute a unique hash for the Module considering
+ // the current list of export/import, and offer an interface to query to
+ // access the content in the cache.
+ ModuleCacheEntry(
+ StringRef CachePath, const ModuleSummaryIndex &Index, StringRef ModuleID,
+ const FunctionImporter::ImportMapTy &ImportList,
+ const FunctionImporter::ExportSetTy &ExportList,
+ const std::map<GlobalValue::GUID, GlobalValue::LinkageTypes> &ResolvedODR,
+ const GVSummaryMapTy &DefinedGVSummaries, unsigned OptLevel,
+ bool Freestanding, const TargetMachineBuilder &TMBuilder) {
+ if (CachePath.empty())
+ return;
+
+ if (!Index.modulePaths().count(ModuleID))
+ // The module does not have an entry, it can't have a hash at all
+ return;
+
+ if (all_of(Index.getModuleHash(ModuleID),
+ [](uint32_t V) { return V == 0; }))
+ // No hash entry, no caching!
+ return;
+
+ llvm::lto::Config Conf;
+ Conf.OptLevel = OptLevel;
+ Conf.Options = TMBuilder.Options;
+ Conf.CPU = TMBuilder.MCpu;
+ Conf.MAttrs.push_back(TMBuilder.MAttr);
+ Conf.RelocModel = TMBuilder.RelocModel;
+ Conf.CGOptLevel = TMBuilder.CGOptLevel;
+ Conf.Freestanding = Freestanding;
+ SmallString<40> Key;
+ computeLTOCacheKey(Key, Conf, Index, ModuleID, ImportList, ExportList,
+ ResolvedODR, DefinedGVSummaries);
+
+ // This choice of file name allows the cache to be pruned (see pruneCache()
+ // in include/llvm/Support/CachePruning.h).
+ sys::path::append(EntryPath, CachePath, "llvmcache-" + Key);
+ }
+
+ // Access the path to this entry in the cache.
+ StringRef getEntryPath() { return EntryPath; }
+
+ // Try loading the buffer for this cache entry.
+ ErrorOr<std::unique_ptr<MemoryBuffer>> tryLoadingBuffer() {
+ if (EntryPath.empty())
+ return std::error_code();
+ SmallString<64> ResultPath;
+ Expected<sys::fs::file_t> FDOrErr = sys::fs::openNativeFileForRead(
+ Twine(EntryPath), sys::fs::OF_UpdateAtime, &ResultPath);
+ if (!FDOrErr)
+ return errorToErrorCode(FDOrErr.takeError());
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MBOrErr = MemoryBuffer::getOpenFile(
+ *FDOrErr, EntryPath, /*FileSize=*/-1, /*RequiresNullTerminator=*/false);
+ sys::fs::closeFile(*FDOrErr);
+ return MBOrErr;
+ }
+
+ // Cache the Produced object file
+ void write(const MemoryBuffer &OutputBuffer) {
+ if (EntryPath.empty())
+ return;
+
+ // Write to a temporary to avoid race condition
+ SmallString<128> TempFilename;
+ SmallString<128> CachePath(EntryPath);
+ llvm::sys::path::remove_filename(CachePath);
+ sys::path::append(TempFilename, CachePath, "Thin-%%%%%%.tmp.o");
+
+ if (auto Err = handleErrors(
+ llvm::writeFileAtomically(TempFilename, EntryPath,
+ OutputBuffer.getBuffer()),
+ [](const llvm::AtomicFileWriteError &E) {
+ std::string ErrorMsgBuffer;
+ llvm::raw_string_ostream S(ErrorMsgBuffer);
+ E.log(S);
+
+ if (E.Error ==
+ llvm::atomic_write_error::failed_to_create_uniq_file) {
+ errs() << "Error: " << ErrorMsgBuffer << "\n";
+ report_fatal_error("ThinLTO: Can't get a temporary file");
+ }
+ })) {
+ // FIXME
+ consumeError(std::move(Err));
+ }
+ }
+};
+
+static std::unique_ptr<MemoryBuffer>
+ProcessThinLTOModule(Module &TheModule, ModuleSummaryIndex &Index,
+ StringMap<lto::InputFile *> &ModuleMap, TargetMachine &TM,
+ const FunctionImporter::ImportMapTy &ImportList,
+ const FunctionImporter::ExportSetTy &ExportList,
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
+ const GVSummaryMapTy &DefinedGlobals,
+ const ThinLTOCodeGenerator::CachingOptions &CacheOptions,
+ bool DisableCodeGen, StringRef SaveTempsDir,
+ bool Freestanding, unsigned OptLevel, unsigned count) {
+
+ // "Benchmark"-like optimization: single-source case
+ bool SingleModule = (ModuleMap.size() == 1);
+
+ // When linking an ELF shared object, dso_local should be dropped. We
+ // conservatively do this for -fpic.
+ bool ClearDSOLocalOnDeclarations =
+ TM.getTargetTriple().isOSBinFormatELF() &&
+ TM.getRelocationModel() != Reloc::Static &&
+ TheModule.getPIELevel() == PIELevel::Default;
+
+ if (!SingleModule) {
+ promoteModule(TheModule, Index, ClearDSOLocalOnDeclarations);
+
+ // Apply summary-based prevailing-symbol resolution decisions.
+ thinLTOResolvePrevailingInModule(TheModule, DefinedGlobals);
+
+ // Save temps: after promotion.
+ saveTempBitcode(TheModule, SaveTempsDir, count, ".1.promoted.bc");
+ }
+
+ // Be friendly and don't nuke totally the module when the client didn't
+ // supply anything to preserve.
+ if (!ExportList.empty() || !GUIDPreservedSymbols.empty()) {
+ // Apply summary-based internalization decisions.
+ thinLTOInternalizeModule(TheModule, DefinedGlobals);
+ }
+
+ // Save internalized bitcode
+ saveTempBitcode(TheModule, SaveTempsDir, count, ".2.internalized.bc");
+
+ if (!SingleModule) {
+ crossImportIntoModule(TheModule, Index, ModuleMap, ImportList,
+ ClearDSOLocalOnDeclarations);
+
+ // Save temps: after cross-module import.
+ saveTempBitcode(TheModule, SaveTempsDir, count, ".3.imported.bc");
+ }
+
+ optimizeModule(TheModule, TM, OptLevel, Freestanding, &Index);
+
+ saveTempBitcode(TheModule, SaveTempsDir, count, ".4.opt.bc");
+
+ if (DisableCodeGen) {
+ // Configured to stop before CodeGen, serialize the bitcode and return.
+ SmallVector<char, 128> OutputBuffer;
+ {
+ raw_svector_ostream OS(OutputBuffer);
+ ProfileSummaryInfo PSI(TheModule);
+ auto Index = buildModuleSummaryIndex(TheModule, nullptr, &PSI);
+ WriteBitcodeToFile(TheModule, OS, true, &Index);
+ }
+ return std::make_unique<SmallVectorMemoryBuffer>(std::move(OutputBuffer));
+ }
+
+ return codegenModule(TheModule, TM);
+}
+
+/// Resolve prevailing symbols. Record resolutions in the \p ResolvedODR map
+/// for caching, and in the \p Index for application during the ThinLTO
+/// backends. This is needed for correctness for exported symbols (ensure
+/// at least one copy kept) and a compile-time optimization (to drop duplicate
+/// copies when possible).
+static void resolvePrevailingInIndex(
+ ModuleSummaryIndex &Index,
+ StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>>
+ &ResolvedODR,
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols,
+ const DenseMap<GlobalValue::GUID, const GlobalValueSummary *>
+ &PrevailingCopy) {
+
+ auto isPrevailing = [&](GlobalValue::GUID GUID, const GlobalValueSummary *S) {
+ const auto &Prevailing = PrevailingCopy.find(GUID);
+ // Not in map means that there was only one copy, which must be prevailing.
+ if (Prevailing == PrevailingCopy.end())
+ return true;
+ return Prevailing->second == S;
+ };
+
+ auto recordNewLinkage = [&](StringRef ModuleIdentifier,
+ GlobalValue::GUID GUID,
+ GlobalValue::LinkageTypes NewLinkage) {
+ ResolvedODR[ModuleIdentifier][GUID] = NewLinkage;
+ };
+
+ thinLTOResolvePrevailingInIndex(Index, isPrevailing, recordNewLinkage,
+ GUIDPreservedSymbols);
+}
+
+// Initialize the TargetMachine builder for a given Triple
+static void initTMBuilder(TargetMachineBuilder &TMBuilder,
+ const Triple &TheTriple) {
+ // Set a default CPU for Darwin triples (copied from LTOCodeGenerator).
+ // FIXME this looks pretty terrible...
+ if (TMBuilder.MCpu.empty() && TheTriple.isOSDarwin()) {
+ if (TheTriple.getArch() == llvm::Triple::x86_64)
+ TMBuilder.MCpu = "core2";
+ else if (TheTriple.getArch() == llvm::Triple::x86)
+ TMBuilder.MCpu = "yonah";
+ else if (TheTriple.getArch() == llvm::Triple::aarch64 ||
+ TheTriple.getArch() == llvm::Triple::aarch64_32)
+ TMBuilder.MCpu = "cyclone";
+ }
+ TMBuilder.TheTriple = std::move(TheTriple);
+}
+
+} // end anonymous namespace
+
+void ThinLTOCodeGenerator::addModule(StringRef Identifier, StringRef Data) {
+ MemoryBufferRef Buffer(Data, Identifier);
+
+ auto InputOrError = lto::InputFile::create(Buffer);
+ if (!InputOrError)
+ report_fatal_error("ThinLTO cannot create input file: " +
+ toString(InputOrError.takeError()));
+
+ auto TripleStr = (*InputOrError)->getTargetTriple();
+ Triple TheTriple(TripleStr);
+
+ if (Modules.empty())
+ initTMBuilder(TMBuilder, Triple(TheTriple));
+ else if (TMBuilder.TheTriple != TheTriple) {
+ if (!TMBuilder.TheTriple.isCompatibleWith(TheTriple))
+ report_fatal_error("ThinLTO modules with incompatible triples not "
+ "supported");
+ initTMBuilder(TMBuilder, Triple(TMBuilder.TheTriple.merge(TheTriple)));
+ }
+
+ Modules.emplace_back(std::move(*InputOrError));
+}
+
+void ThinLTOCodeGenerator::preserveSymbol(StringRef Name) {
+ PreservedSymbols.insert(Name);
+}
+
+void ThinLTOCodeGenerator::crossReferenceSymbol(StringRef Name) {
+ // FIXME: At the moment, we don't take advantage of this extra information,
+ // we're conservatively considering cross-references as preserved.
+ // CrossReferencedSymbols.insert(Name);
+ PreservedSymbols.insert(Name);
+}
+
+// TargetMachine factory
+std::unique_ptr<TargetMachine> TargetMachineBuilder::create() const {
+ std::string ErrMsg;
+ const Target *TheTarget =
+ TargetRegistry::lookupTarget(TheTriple.str(), ErrMsg);
+ if (!TheTarget) {
+ report_fatal_error("Can't load target for this Triple: " + ErrMsg);
+ }
+
+ // Use MAttr as the default set of features.
+ SubtargetFeatures Features(MAttr);
+ Features.getDefaultSubtargetFeatures(TheTriple);
+ std::string FeatureStr = Features.getString();
+
+ std::unique_ptr<TargetMachine> TM(
+ TheTarget->createTargetMachine(TheTriple.str(), MCpu, FeatureStr, Options,
+ RelocModel, None, CGOptLevel));
+ assert(TM && "Cannot create target machine");
+
+ return TM;
+}
+
+/**
+ * Produce the combined summary index from all the bitcode files:
+ * "thin-link".
+ */
+std::unique_ptr<ModuleSummaryIndex> ThinLTOCodeGenerator::linkCombinedIndex() {
+ std::unique_ptr<ModuleSummaryIndex> CombinedIndex =
+ std::make_unique<ModuleSummaryIndex>(/*HaveGVs=*/false);
+ uint64_t NextModuleId = 0;
+ for (auto &Mod : Modules) {
+ auto &M = Mod->getSingleBitcodeModule();
+ if (Error Err =
+ M.readSummary(*CombinedIndex, Mod->getName(), NextModuleId++)) {
+ // FIXME diagnose
+ logAllUnhandledErrors(
+ std::move(Err), errs(),
+ "error: can't create module summary index for buffer: ");
+ return nullptr;
+ }
+ }
+ return CombinedIndex;
+}
+
+namespace {
+struct IsExported {
+ const StringMap<FunctionImporter::ExportSetTy> &ExportLists;
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols;
+
+ IsExported(const StringMap<FunctionImporter::ExportSetTy> &ExportLists,
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols)
+ : ExportLists(ExportLists), GUIDPreservedSymbols(GUIDPreservedSymbols) {}
+
+ bool operator()(StringRef ModuleIdentifier, ValueInfo VI) const {
+ const auto &ExportList = ExportLists.find(ModuleIdentifier);
+ return (ExportList != ExportLists.end() && ExportList->second.count(VI)) ||
+ GUIDPreservedSymbols.count(VI.getGUID());
+ }
+};
+
+struct IsPrevailing {
+ const DenseMap<GlobalValue::GUID, const GlobalValueSummary *> &PrevailingCopy;
+ IsPrevailing(const DenseMap<GlobalValue::GUID, const GlobalValueSummary *>
+ &PrevailingCopy)
+ : PrevailingCopy(PrevailingCopy) {}
+
+ bool operator()(GlobalValue::GUID GUID, const GlobalValueSummary *S) const {
+ const auto &Prevailing = PrevailingCopy.find(GUID);
+ // Not in map means that there was only one copy, which must be prevailing.
+ if (Prevailing == PrevailingCopy.end())
+ return true;
+ return Prevailing->second == S;
+ };
+};
+} // namespace
+
+static void computeDeadSymbolsInIndex(
+ ModuleSummaryIndex &Index,
+ const DenseSet<GlobalValue::GUID> &GUIDPreservedSymbols) {
+ // We have no symbols resolution available. And can't do any better now in the
+ // case where the prevailing symbol is in a native object. It can be refined
+ // with linker information in the future.
+ auto isPrevailing = [&](GlobalValue::GUID G) {
+ return PrevailingType::Unknown;
+ };
+ computeDeadSymbolsWithConstProp(Index, GUIDPreservedSymbols, isPrevailing,
+ /* ImportEnabled = */ true);
+}
+
+/**
+ * Perform promotion and renaming of exported internal functions.
+ * Index is updated to reflect linkage changes from weak resolution.
+ */
+void ThinLTOCodeGenerator::promote(Module &TheModule, ModuleSummaryIndex &Index,
+ const lto::InputFile &File) {
+ auto ModuleCount = Index.modulePaths().size();
+ auto ModuleIdentifier = TheModule.getModuleIdentifier();
+
+ // Collect for each module the list of function it defines (GUID -> Summary).
+ StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries;
+ Index.collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
+
+ // Convert the preserved symbols set from string to GUID
+ auto GUIDPreservedSymbols = computeGUIDPreservedSymbols(
+ File, PreservedSymbols, Triple(TheModule.getTargetTriple()));
+
+ // Add used symbol to the preserved symbols.
+ addUsedSymbolToPreservedGUID(File, GUIDPreservedSymbols);
+
+ // Compute "dead" symbols, we don't want to import/export these!
+ computeDeadSymbolsInIndex(Index, GUIDPreservedSymbols);
+
+ // Generate import/export list
+ StringMap<FunctionImporter::ImportMapTy> ImportLists(ModuleCount);
+ StringMap<FunctionImporter::ExportSetTy> ExportLists(ModuleCount);
+ ComputeCrossModuleImport(Index, ModuleToDefinedGVSummaries, ImportLists,
+ ExportLists);
+
+ DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
+ computePrevailingCopies(Index, PrevailingCopy);
+
+ // Resolve prevailing symbols
+ StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
+ resolvePrevailingInIndex(Index, ResolvedODR, GUIDPreservedSymbols,
+ PrevailingCopy);
+
+ thinLTOResolvePrevailingInModule(
+ TheModule, ModuleToDefinedGVSummaries[ModuleIdentifier]);
+
+ // Promote the exported values in the index, so that they are promoted
+ // in the module.
+ thinLTOInternalizeAndPromoteInIndex(
+ Index, IsExported(ExportLists, GUIDPreservedSymbols),
+ IsPrevailing(PrevailingCopy));
+
+ // FIXME Set ClearDSOLocalOnDeclarations.
+ promoteModule(TheModule, Index, /*ClearDSOLocalOnDeclarations=*/false);
+}
+
+/**
+ * Perform cross-module importing for the module identified by ModuleIdentifier.
+ */
+void ThinLTOCodeGenerator::crossModuleImport(Module &TheModule,
+ ModuleSummaryIndex &Index,
+ const lto::InputFile &File) {
+ auto ModuleMap = generateModuleMap(Modules);
+ auto ModuleCount = Index.modulePaths().size();
+
+ // Collect for each module the list of function it defines (GUID -> Summary).
+ StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries(ModuleCount);
+ Index.collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
+
+ // Convert the preserved symbols set from string to GUID
+ auto GUIDPreservedSymbols = computeGUIDPreservedSymbols(
+ File, PreservedSymbols, Triple(TheModule.getTargetTriple()));
+
+ addUsedSymbolToPreservedGUID(File, GUIDPreservedSymbols);
+
+ // Compute "dead" symbols, we don't want to import/export these!
+ computeDeadSymbolsInIndex(Index, GUIDPreservedSymbols);
+
+ // Generate import/export list
+ StringMap<FunctionImporter::ImportMapTy> ImportLists(ModuleCount);
+ StringMap<FunctionImporter::ExportSetTy> ExportLists(ModuleCount);
+ ComputeCrossModuleImport(Index, ModuleToDefinedGVSummaries, ImportLists,
+ ExportLists);
+ auto &ImportList = ImportLists[TheModule.getModuleIdentifier()];
+
+ // FIXME Set ClearDSOLocalOnDeclarations.
+ crossImportIntoModule(TheModule, Index, ModuleMap, ImportList,
+ /*ClearDSOLocalOnDeclarations=*/false);
+}
+
+/**
+ * Compute the list of summaries needed for importing into module.
+ */
+void ThinLTOCodeGenerator::gatherImportedSummariesForModule(
+ Module &TheModule, ModuleSummaryIndex &Index,
+ std::map<std::string, GVSummaryMapTy> &ModuleToSummariesForIndex,
+ const lto::InputFile &File) {
+ auto ModuleCount = Index.modulePaths().size();
+ auto ModuleIdentifier = TheModule.getModuleIdentifier();
+
+ // Collect for each module the list of function it defines (GUID -> Summary).
+ StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries(ModuleCount);
+ Index.collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
+
+ // Convert the preserved symbols set from string to GUID
+ auto GUIDPreservedSymbols = computeGUIDPreservedSymbols(
+ File, PreservedSymbols, Triple(TheModule.getTargetTriple()));
+
+ addUsedSymbolToPreservedGUID(File, GUIDPreservedSymbols);
+
+ // Compute "dead" symbols, we don't want to import/export these!
+ computeDeadSymbolsInIndex(Index, GUIDPreservedSymbols);
+
+ // Generate import/export list
+ StringMap<FunctionImporter::ImportMapTy> ImportLists(ModuleCount);
+ StringMap<FunctionImporter::ExportSetTy> ExportLists(ModuleCount);
+ ComputeCrossModuleImport(Index, ModuleToDefinedGVSummaries, ImportLists,
+ ExportLists);
+
+ llvm::gatherImportedSummariesForModule(
+ ModuleIdentifier, ModuleToDefinedGVSummaries,
+ ImportLists[ModuleIdentifier], ModuleToSummariesForIndex);
+}
+
+/**
+ * Emit the list of files needed for importing into module.
+ */
+void ThinLTOCodeGenerator::emitImports(Module &TheModule, StringRef OutputName,
+ ModuleSummaryIndex &Index,
+ const lto::InputFile &File) {
+ auto ModuleCount = Index.modulePaths().size();
+ auto ModuleIdentifier = TheModule.getModuleIdentifier();
+
+ // Collect for each module the list of function it defines (GUID -> Summary).
+ StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries(ModuleCount);
+ Index.collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
+
+ // Convert the preserved symbols set from string to GUID
+ auto GUIDPreservedSymbols = computeGUIDPreservedSymbols(
+ File, PreservedSymbols, Triple(TheModule.getTargetTriple()));
+
+ addUsedSymbolToPreservedGUID(File, GUIDPreservedSymbols);
+
+ // Compute "dead" symbols, we don't want to import/export these!
+ computeDeadSymbolsInIndex(Index, GUIDPreservedSymbols);
+
+ // Generate import/export list
+ StringMap<FunctionImporter::ImportMapTy> ImportLists(ModuleCount);
+ StringMap<FunctionImporter::ExportSetTy> ExportLists(ModuleCount);
+ ComputeCrossModuleImport(Index, ModuleToDefinedGVSummaries, ImportLists,
+ ExportLists);
+
+ std::map<std::string, GVSummaryMapTy> ModuleToSummariesForIndex;
+ llvm::gatherImportedSummariesForModule(
+ ModuleIdentifier, ModuleToDefinedGVSummaries,
+ ImportLists[ModuleIdentifier], ModuleToSummariesForIndex);
+
+ std::error_code EC;
+ if ((EC = EmitImportsFiles(ModuleIdentifier, OutputName,
+ ModuleToSummariesForIndex)))
+ report_fatal_error(Twine("Failed to open ") + OutputName +
+ " to save imports lists\n");
+}
+
+/**
+ * Perform internalization. Runs promote and internalization together.
+ * Index is updated to reflect linkage changes.
+ */
+void ThinLTOCodeGenerator::internalize(Module &TheModule,
+ ModuleSummaryIndex &Index,
+ const lto::InputFile &File) {
+ initTMBuilder(TMBuilder, Triple(TheModule.getTargetTriple()));
+ auto ModuleCount = Index.modulePaths().size();
+ auto ModuleIdentifier = TheModule.getModuleIdentifier();
+
+ // Convert the preserved symbols set from string to GUID
+ auto GUIDPreservedSymbols =
+ computeGUIDPreservedSymbols(File, PreservedSymbols, TMBuilder.TheTriple);
+
+ addUsedSymbolToPreservedGUID(File, GUIDPreservedSymbols);
+
+ // Collect for each module the list of function it defines (GUID -> Summary).
+ StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries(ModuleCount);
+ Index.collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
+
+ // Compute "dead" symbols, we don't want to import/export these!
+ computeDeadSymbolsInIndex(Index, GUIDPreservedSymbols);
+
+ // Generate import/export list
+ StringMap<FunctionImporter::ImportMapTy> ImportLists(ModuleCount);
+ StringMap<FunctionImporter::ExportSetTy> ExportLists(ModuleCount);
+ ComputeCrossModuleImport(Index, ModuleToDefinedGVSummaries, ImportLists,
+ ExportLists);
+ auto &ExportList = ExportLists[ModuleIdentifier];
+
+ // Be friendly and don't nuke totally the module when the client didn't
+ // supply anything to preserve.
+ if (ExportList.empty() && GUIDPreservedSymbols.empty())
+ return;
+
+ DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
+ computePrevailingCopies(Index, PrevailingCopy);
+
+ // Resolve prevailing symbols
+ StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
+ resolvePrevailingInIndex(Index, ResolvedODR, GUIDPreservedSymbols,
+ PrevailingCopy);
+
+ // Promote the exported values in the index, so that they are promoted
+ // in the module.
+ thinLTOInternalizeAndPromoteInIndex(
+ Index, IsExported(ExportLists, GUIDPreservedSymbols),
+ IsPrevailing(PrevailingCopy));
+
+ // FIXME Set ClearDSOLocalOnDeclarations.
+ promoteModule(TheModule, Index, /*ClearDSOLocalOnDeclarations=*/false);
+
+ // Internalization
+ thinLTOResolvePrevailingInModule(
+ TheModule, ModuleToDefinedGVSummaries[ModuleIdentifier]);
+
+ thinLTOInternalizeModule(TheModule,
+ ModuleToDefinedGVSummaries[ModuleIdentifier]);
+}
+
+/**
+ * Perform post-importing ThinLTO optimizations.
+ */
+void ThinLTOCodeGenerator::optimize(Module &TheModule) {
+ initTMBuilder(TMBuilder, Triple(TheModule.getTargetTriple()));
+
+ // Optimize now
+ optimizeModule(TheModule, *TMBuilder.create(), OptLevel, Freestanding,
+ nullptr);
+}
+
+/// Write out the generated object file, either from CacheEntryPath or from
+/// OutputBuffer, preferring hard-link when possible.
+/// Returns the path to the generated file in SavedObjectsDirectoryPath.
+std::string
+ThinLTOCodeGenerator::writeGeneratedObject(int count, StringRef CacheEntryPath,
+ const MemoryBuffer &OutputBuffer) {
+ auto ArchName = TMBuilder.TheTriple.getArchName();
+ SmallString<128> OutputPath(SavedObjectsDirectoryPath);
+ llvm::sys::path::append(OutputPath,
+ Twine(count) + "." + ArchName + ".thinlto.o");
+ OutputPath.c_str(); // Ensure the string is null terminated.
+ if (sys::fs::exists(OutputPath))
+ sys::fs::remove(OutputPath);
+
+ // We don't return a memory buffer to the linker, just a list of files.
+ if (!CacheEntryPath.empty()) {
+ // Cache is enabled, hard-link the entry (or copy if hard-link fails).
+ auto Err = sys::fs::create_hard_link(CacheEntryPath, OutputPath);
+ if (!Err)
+ return std::string(OutputPath.str());
+ // Hard linking failed, try to copy.
+ Err = sys::fs::copy_file(CacheEntryPath, OutputPath);
+ if (!Err)
+ return std::string(OutputPath.str());
+ // Copy failed (could be because the CacheEntry was removed from the cache
+ // in the meantime by another process), fall back and try to write down the
+ // buffer to the output.
+ errs() << "remark: can't link or copy from cached entry '" << CacheEntryPath
+ << "' to '" << OutputPath << "'\n";
+ }
+ // No cache entry, just write out the buffer.
+ std::error_code Err;
+ raw_fd_ostream OS(OutputPath, Err, sys::fs::OF_None);
+ if (Err)
+ report_fatal_error("Can't open output '" + OutputPath + "'\n");
+ OS << OutputBuffer.getBuffer();
+ return std::string(OutputPath.str());
+}
+
+// Main entry point for the ThinLTO processing
+void ThinLTOCodeGenerator::run() {
+ // Prepare the resulting object vector
+ assert(ProducedBinaries.empty() && "The generator should not be reused");
+ if (SavedObjectsDirectoryPath.empty())
+ ProducedBinaries.resize(Modules.size());
+ else {
+ sys::fs::create_directories(SavedObjectsDirectoryPath);
+ bool IsDir;
+ sys::fs::is_directory(SavedObjectsDirectoryPath, IsDir);
+ if (!IsDir)
+ report_fatal_error("Unexistent dir: '" + SavedObjectsDirectoryPath + "'");
+ ProducedBinaryFiles.resize(Modules.size());
+ }
+
+ if (CodeGenOnly) {
+ // Perform only parallel codegen and return.
+ ThreadPool Pool;
+ int count = 0;
+ for (auto &Mod : Modules) {
+ Pool.async([&](int count) {
+ LLVMContext Context;
+ Context.setDiscardValueNames(LTODiscardValueNames);
+
+ // Parse module now
+ auto TheModule = loadModuleFromInput(Mod.get(), Context, false,
+ /*IsImporting*/ false);
+
+ // CodeGen
+ auto OutputBuffer = codegenModule(*TheModule, *TMBuilder.create());
+ if (SavedObjectsDirectoryPath.empty())
+ ProducedBinaries[count] = std::move(OutputBuffer);
+ else
+ ProducedBinaryFiles[count] =
+ writeGeneratedObject(count, "", *OutputBuffer);
+ }, count++);
+ }
+
+ return;
+ }
+
+ // Sequential linking phase
+ auto Index = linkCombinedIndex();
+
+ // Save temps: index.
+ if (!SaveTempsDir.empty()) {
+ auto SaveTempPath = SaveTempsDir + "index.bc";
+ std::error_code EC;
+ raw_fd_ostream OS(SaveTempPath, EC, sys::fs::OF_None);
+ if (EC)
+ report_fatal_error(Twine("Failed to open ") + SaveTempPath +
+ " to save optimized bitcode\n");
+ WriteIndexToFile(*Index, OS);
+ }
+
+
+ // Prepare the module map.
+ auto ModuleMap = generateModuleMap(Modules);
+ auto ModuleCount = Modules.size();
+
+ // Collect for each module the list of function it defines (GUID -> Summary).
+ StringMap<GVSummaryMapTy> ModuleToDefinedGVSummaries(ModuleCount);
+ Index->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
+
+ // Convert the preserved symbols set from string to GUID, this is needed for
+ // computing the caching hash and the internalization.
+ DenseSet<GlobalValue::GUID> GUIDPreservedSymbols;
+ for (const auto &M : Modules)
+ computeGUIDPreservedSymbols(*M, PreservedSymbols, TMBuilder.TheTriple,
+ GUIDPreservedSymbols);
+
+ // Add used symbol from inputs to the preserved symbols.
+ for (const auto &M : Modules)
+ addUsedSymbolToPreservedGUID(*M, GUIDPreservedSymbols);
+
+ // Compute "dead" symbols, we don't want to import/export these!
+ computeDeadSymbolsInIndex(*Index, GUIDPreservedSymbols);
+
+ // Synthesize entry counts for functions in the combined index.
+ computeSyntheticCounts(*Index);
+
+ // Currently there is no support for enabling whole program visibility via a
+ // linker option in the old LTO API, but this call allows it to be specified
+ // via the internal option. Must be done before WPD below.
+ updateVCallVisibilityInIndex(*Index,
+ /* WholeProgramVisibilityEnabledInLTO */ false);
+
+ // Perform index-based WPD. This will return immediately if there are
+ // no index entries in the typeIdMetadata map (e.g. if we are instead
+ // performing IR-based WPD in hybrid regular/thin LTO mode).
+ std::map<ValueInfo, std::vector<VTableSlotSummary>> LocalWPDTargetsMap;
+ std::set<GlobalValue::GUID> ExportedGUIDs;
+ runWholeProgramDevirtOnIndex(*Index, ExportedGUIDs, LocalWPDTargetsMap);
+ for (auto GUID : ExportedGUIDs)
+ GUIDPreservedSymbols.insert(GUID);
+
+ // Collect the import/export lists for all modules from the call-graph in the
+ // combined index.
+ StringMap<FunctionImporter::ImportMapTy> ImportLists(ModuleCount);
+ StringMap<FunctionImporter::ExportSetTy> ExportLists(ModuleCount);
+ ComputeCrossModuleImport(*Index, ModuleToDefinedGVSummaries, ImportLists,
+ ExportLists);
+
+ // We use a std::map here to be able to have a defined ordering when
+ // producing a hash for the cache entry.
+ // FIXME: we should be able to compute the caching hash for the entry based
+ // on the index, and nuke this map.
+ StringMap<std::map<GlobalValue::GUID, GlobalValue::LinkageTypes>> ResolvedODR;
+
+ DenseMap<GlobalValue::GUID, const GlobalValueSummary *> PrevailingCopy;
+ computePrevailingCopies(*Index, PrevailingCopy);
+
+ // Resolve prevailing symbols, this has to be computed early because it
+ // impacts the caching.
+ resolvePrevailingInIndex(*Index, ResolvedODR, GUIDPreservedSymbols,
+ PrevailingCopy);
+
+ // Use global summary-based analysis to identify symbols that can be
+ // internalized (because they aren't exported or preserved as per callback).
+ // Changes are made in the index, consumed in the ThinLTO backends.
+ updateIndexWPDForExports(*Index,
+ IsExported(ExportLists, GUIDPreservedSymbols),
+ LocalWPDTargetsMap);
+ thinLTOInternalizeAndPromoteInIndex(
+ *Index, IsExported(ExportLists, GUIDPreservedSymbols),
+ IsPrevailing(PrevailingCopy));
+
+ // Make sure that every module has an entry in the ExportLists, ImportList,
+ // GVSummary and ResolvedODR maps to enable threaded access to these maps
+ // below.
+ for (auto &Module : Modules) {
+ auto ModuleIdentifier = Module->getName();
+ ExportLists[ModuleIdentifier];
+ ImportLists[ModuleIdentifier];
+ ResolvedODR[ModuleIdentifier];
+ ModuleToDefinedGVSummaries[ModuleIdentifier];
+ }
+
+ std::vector<BitcodeModule *> ModulesVec;
+ ModulesVec.reserve(Modules.size());
+ for (auto &Mod : Modules)
+ ModulesVec.push_back(&Mod->getSingleBitcodeModule());
+ std::vector<int> ModulesOrdering = lto::generateModulesOrdering(ModulesVec);
+
+ // Parallel optimizer + codegen
+ {
+ ThreadPool Pool(heavyweight_hardware_concurrency(ThreadCount));
+ for (auto IndexCount : ModulesOrdering) {
+ auto &Mod = Modules[IndexCount];
+ Pool.async([&](int count) {
+ auto ModuleIdentifier = Mod->getName();
+ auto &ExportList = ExportLists[ModuleIdentifier];
+
+ auto &DefinedGVSummaries = ModuleToDefinedGVSummaries[ModuleIdentifier];
+
+ // The module may be cached, this helps handling it.
+ ModuleCacheEntry CacheEntry(CacheOptions.Path, *Index, ModuleIdentifier,
+ ImportLists[ModuleIdentifier], ExportList,
+ ResolvedODR[ModuleIdentifier],
+ DefinedGVSummaries, OptLevel, Freestanding,
+ TMBuilder);
+ auto CacheEntryPath = CacheEntry.getEntryPath();
+
+ {
+ auto ErrOrBuffer = CacheEntry.tryLoadingBuffer();
+ LLVM_DEBUG(dbgs() << "Cache " << (ErrOrBuffer ? "hit" : "miss")
+ << " '" << CacheEntryPath << "' for buffer "
+ << count << " " << ModuleIdentifier << "\n");
+
+ if (ErrOrBuffer) {
+ // Cache Hit!
+ if (SavedObjectsDirectoryPath.empty())
+ ProducedBinaries[count] = std::move(ErrOrBuffer.get());
+ else
+ ProducedBinaryFiles[count] = writeGeneratedObject(
+ count, CacheEntryPath, *ErrOrBuffer.get());
+ return;
+ }
+ }
+
+ LLVMContext Context;
+ Context.setDiscardValueNames(LTODiscardValueNames);
+ Context.enableDebugTypeODRUniquing();
+ auto DiagFileOrErr = lto::setupLLVMOptimizationRemarks(
+ Context, RemarksFilename, RemarksPasses, RemarksFormat,
+ RemarksWithHotness, RemarksHotnessThreshold, count);
+ if (!DiagFileOrErr) {
+ errs() << "Error: " << toString(DiagFileOrErr.takeError()) << "\n";
+ report_fatal_error("ThinLTO: Can't get an output file for the "
+ "remarks");
+ }
+
+ // Parse module now
+ auto TheModule = loadModuleFromInput(Mod.get(), Context, false,
+ /*IsImporting*/ false);
+
+ // Save temps: original file.
+ saveTempBitcode(*TheModule, SaveTempsDir, count, ".0.original.bc");
+
+ auto &ImportList = ImportLists[ModuleIdentifier];
+ // Run the main process now, and generates a binary
+ auto OutputBuffer = ProcessThinLTOModule(
+ *TheModule, *Index, ModuleMap, *TMBuilder.create(), ImportList,
+ ExportList, GUIDPreservedSymbols,
+ ModuleToDefinedGVSummaries[ModuleIdentifier], CacheOptions,
+ DisableCodeGen, SaveTempsDir, Freestanding, OptLevel, count);
+
+ // Commit to the cache (if enabled)
+ CacheEntry.write(*OutputBuffer);
+
+ if (SavedObjectsDirectoryPath.empty()) {
+ // We need to generated a memory buffer for the linker.
+ if (!CacheEntryPath.empty()) {
+ // When cache is enabled, reload from the cache if possible.
+ // Releasing the buffer from the heap and reloading it from the
+ // cache file with mmap helps us to lower memory pressure.
+ // The freed memory can be used for the next input file.
+ // The final binary link will read from the VFS cache (hopefully!)
+ // or from disk (if the memory pressure was too high).
+ auto ReloadedBufferOrErr = CacheEntry.tryLoadingBuffer();
+ if (auto EC = ReloadedBufferOrErr.getError()) {
+ // On error, keep the preexisting buffer and print a diagnostic.
+ errs() << "remark: can't reload cached file '" << CacheEntryPath
+ << "': " << EC.message() << "\n";
+ } else {
+ OutputBuffer = std::move(*ReloadedBufferOrErr);
+ }
+ }
+ ProducedBinaries[count] = std::move(OutputBuffer);
+ return;
+ }
+ ProducedBinaryFiles[count] = writeGeneratedObject(
+ count, CacheEntryPath, *OutputBuffer);
+ }, IndexCount);
+ }
+ }
+
+ pruneCache(CacheOptions.Path, CacheOptions.Policy);
+
+ // If statistics were requested, print them out now.
+ if (llvm::AreStatisticsEnabled())
+ llvm::PrintStatistics();
+ reportAndResetTimings();
+}
diff --git a/contrib/libs/llvm12/lib/LTO/UpdateCompilerUsed.cpp b/contrib/libs/llvm12/lib/LTO/UpdateCompilerUsed.cpp
new file mode 100644
index 00000000000..040e1106523
--- /dev/null
+++ b/contrib/libs/llvm12/lib/LTO/UpdateCompilerUsed.cpp
@@ -0,0 +1,133 @@
+//==-LTOInternalize.cpp - LLVM Link Time Optimizer Internalization Utility -==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a helper to run the internalization part of LTO.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LTO/legacy/UpdateCompilerUsed.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/Transforms/Utils/ModuleUtils.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+namespace {
+
+// Helper class that collects AsmUsed and user supplied libcalls.
+class PreserveLibCallsAndAsmUsed {
+public:
+ PreserveLibCallsAndAsmUsed(const StringSet<> &AsmUndefinedRefs,
+ const TargetMachine &TM,
+ std::vector<GlobalValue *> &LLVMUsed)
+ : AsmUndefinedRefs(AsmUndefinedRefs), TM(TM), LLVMUsed(LLVMUsed) {}
+
+ void findInModule(Module &TheModule) {
+ initializeLibCalls(TheModule);
+ for (Function &F : TheModule)
+ findLibCallsAndAsm(F);
+ for (GlobalVariable &GV : TheModule.globals())
+ findLibCallsAndAsm(GV);
+ for (GlobalAlias &GA : TheModule.aliases())
+ findLibCallsAndAsm(GA);
+ }
+
+private:
+ // Inputs
+ const StringSet<> &AsmUndefinedRefs;
+ const TargetMachine &TM;
+
+ // Temps
+ llvm::Mangler Mangler;
+ StringSet<> Libcalls;
+
+ // Output
+ std::vector<GlobalValue *> &LLVMUsed;
+
+ // Collect names of runtime library functions. User-defined functions with the
+ // same names are added to llvm.compiler.used to prevent them from being
+ // deleted by optimizations.
+ void initializeLibCalls(const Module &TheModule) {
+ TargetLibraryInfoImpl TLII(Triple(TM.getTargetTriple()));
+ TargetLibraryInfo TLI(TLII);
+
+ // TargetLibraryInfo has info on C runtime library calls on the current
+ // target.
+ for (unsigned I = 0, E = static_cast<unsigned>(LibFunc::NumLibFuncs);
+ I != E; ++I) {
+ LibFunc F = static_cast<LibFunc>(I);
+ if (TLI.has(F))
+ Libcalls.insert(TLI.getName(F));
+ }
+
+ SmallPtrSet<const TargetLowering *, 1> TLSet;
+
+ for (const Function &F : TheModule) {
+ const TargetLowering *Lowering =
+ TM.getSubtargetImpl(F)->getTargetLowering();
+
+ if (Lowering && TLSet.insert(Lowering).second)
+ // TargetLowering has info on library calls that CodeGen expects to be
+ // available, both from the C runtime and compiler-rt.
+ for (unsigned I = 0, E = static_cast<unsigned>(RTLIB::UNKNOWN_LIBCALL);
+ I != E; ++I)
+ if (const char *Name =
+ Lowering->getLibcallName(static_cast<RTLIB::Libcall>(I)))
+ Libcalls.insert(Name);
+ }
+ }
+
+ void findLibCallsAndAsm(GlobalValue &GV) {
+ // There are no restrictions to apply to declarations.
+ if (GV.isDeclaration())
+ return;
+
+ // There is nothing more restrictive than private linkage.
+ if (GV.hasPrivateLinkage())
+ return;
+
+ // Conservatively append user-supplied runtime library functions (supplied
+ // either directly, or via a function alias) to llvm.compiler.used. These
+ // could be internalized and deleted by optimizations like -globalopt,
+ // causing problems when later optimizations add new library calls (e.g.,
+ // llvm.memset => memset and printf => puts).
+ // Leave it to the linker to remove any dead code (e.g. with -dead_strip).
+ GlobalValue *FuncAliasee = nullptr;
+ if (isa<GlobalAlias>(GV)) {
+ auto *A = cast<GlobalAlias>(&GV);
+ FuncAliasee = dyn_cast<Function>(A->getAliasee());
+ }
+ if ((isa<Function>(GV) || FuncAliasee) && Libcalls.count(GV.getName())) {
+ LLVMUsed.push_back(&GV);
+ return;
+ }
+
+ SmallString<64> Buffer;
+ TM.getNameWithPrefix(Buffer, &GV, Mangler);
+ if (AsmUndefinedRefs.count(Buffer))
+ LLVMUsed.push_back(&GV);
+ }
+};
+
+} // namespace anonymous
+
+void llvm::updateCompilerUsed(Module &TheModule, const TargetMachine &TM,
+ const StringSet<> &AsmUndefinedRefs) {
+ std::vector<GlobalValue *> UsedValues;
+ PreserveLibCallsAndAsmUsed(AsmUndefinedRefs, TM, UsedValues)
+ .findInModule(TheModule);
+
+ if (UsedValues.empty())
+ return;
+
+ appendToCompilerUsed(TheModule, UsedValues);
+}
diff --git a/contrib/libs/llvm12/lib/LineEditor/LineEditor.cpp b/contrib/libs/llvm12/lib/LineEditor/LineEditor.cpp
new file mode 100644
index 00000000000..1aa3476eb35
--- /dev/null
+++ b/contrib/libs/llvm12/lib/LineEditor/LineEditor.cpp
@@ -0,0 +1,324 @@
+//===-- LineEditor.cpp - line editor --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LineEditor/LineEditor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Config/config.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdio>
+#ifdef HAVE_LIBEDIT
+#include <histedit.h>
+#endif
+
+using namespace llvm;
+
+std::string LineEditor::getDefaultHistoryPath(StringRef ProgName) {
+ SmallString<32> Path;
+ if (sys::path::home_directory(Path)) {
+ sys::path::append(Path, "." + ProgName + "-history");
+ return std::string(Path.str());
+ }
+ return std::string();
+}
+
+LineEditor::CompleterConcept::~CompleterConcept() {}
+LineEditor::ListCompleterConcept::~ListCompleterConcept() {}
+
+std::string LineEditor::ListCompleterConcept::getCommonPrefix(
+ const std::vector<Completion> &Comps) {
+ assert(!Comps.empty());
+
+ std::string CommonPrefix = Comps[0].TypedText;
+ for (std::vector<Completion>::const_iterator I = Comps.begin() + 1,
+ E = Comps.end();
+ I != E; ++I) {
+ size_t Len = std::min(CommonPrefix.size(), I->TypedText.size());
+ size_t CommonLen = 0;
+ for (; CommonLen != Len; ++CommonLen) {
+ if (CommonPrefix[CommonLen] != I->TypedText[CommonLen])
+ break;
+ }
+ CommonPrefix.resize(CommonLen);
+ }
+ return CommonPrefix;
+}
+
+LineEditor::CompletionAction
+LineEditor::ListCompleterConcept::complete(StringRef Buffer, size_t Pos) const {
+ CompletionAction Action;
+ std::vector<Completion> Comps = getCompletions(Buffer, Pos);
+ if (Comps.empty()) {
+ Action.Kind = CompletionAction::AK_ShowCompletions;
+ return Action;
+ }
+
+ std::string CommonPrefix = getCommonPrefix(Comps);
+
+ // If the common prefix is non-empty we can simply insert it. If there is a
+ // single completion, this will insert the full completion. If there is more
+ // than one, this might be enough information to jog the user's memory but if
+ // not the user can also hit tab again to see the completions because the
+ // common prefix will then be empty.
+ if (CommonPrefix.empty()) {
+ Action.Kind = CompletionAction::AK_ShowCompletions;
+ for (std::vector<Completion>::iterator I = Comps.begin(), E = Comps.end();
+ I != E; ++I)
+ Action.Completions.push_back(I->DisplayText);
+ } else {
+ Action.Kind = CompletionAction::AK_Insert;
+ Action.Text = CommonPrefix;
+ }
+
+ return Action;
+}
+
+LineEditor::CompletionAction LineEditor::getCompletionAction(StringRef Buffer,
+ size_t Pos) const {
+ if (!Completer) {
+ CompletionAction Action;
+ Action.Kind = CompletionAction::AK_ShowCompletions;
+ return Action;
+ }
+
+ return Completer->complete(Buffer, Pos);
+}
+
+#ifdef HAVE_LIBEDIT
+
+// libedit-based implementation.
+
+struct LineEditor::InternalData {
+ LineEditor *LE;
+
+ History *Hist;
+ EditLine *EL;
+
+ unsigned PrevCount;
+ std::string ContinuationOutput;
+
+ FILE *Out;
+};
+
+namespace {
+
+const char *ElGetPromptFn(EditLine *EL) {
+ LineEditor::InternalData *Data;
+ if (el_get(EL, EL_CLIENTDATA, &Data) == 0)
+ return Data->LE->getPrompt().c_str();
+ return "> ";
+}
+
+// Handles tab completion.
+//
+// This function is really horrible. But since the alternative is to get into
+// the line editor business, here we are.
+unsigned char ElCompletionFn(EditLine *EL, int ch) {
+ LineEditor::InternalData *Data;
+ if (el_get(EL, EL_CLIENTDATA, &Data) == 0) {
+ if (!Data->ContinuationOutput.empty()) {
+ // This is the continuation of the AK_ShowCompletions branch below.
+ FILE *Out = Data->Out;
+
+ // Print the required output (see below).
+ ::fwrite(Data->ContinuationOutput.c_str(),
+ Data->ContinuationOutput.size(), 1, Out);
+
+ // Push a sequence of Ctrl-B characters to move the cursor back to its
+ // original position.
+ std::string Prevs(Data->PrevCount, '\02');
+ ::el_push(EL, const_cast<char *>(Prevs.c_str()));
+
+ Data->ContinuationOutput.clear();
+
+ return CC_REFRESH;
+ }
+
+ const LineInfo *LI = ::el_line(EL);
+ LineEditor::CompletionAction Action = Data->LE->getCompletionAction(
+ StringRef(LI->buffer, LI->lastchar - LI->buffer),
+ LI->cursor - LI->buffer);
+ switch (Action.Kind) {
+ case LineEditor::CompletionAction::AK_Insert:
+ ::el_insertstr(EL, Action.Text.c_str());
+ return CC_REFRESH;
+
+ case LineEditor::CompletionAction::AK_ShowCompletions:
+ if (Action.Completions.empty()) {
+ return CC_REFRESH_BEEP;
+ } else {
+ // Push a Ctrl-E and a tab. The Ctrl-E causes libedit to move the cursor
+ // to the end of the line, so that when we emit a newline we will be on
+ // a new blank line. The tab causes libedit to call this function again
+ // after moving the cursor. There doesn't seem to be anything we can do
+ // from here to cause libedit to move the cursor immediately. This will
+ // break horribly if the user has rebound their keys, so for now we do
+ // not permit user rebinding.
+ ::el_push(EL, const_cast<char *>("\05\t"));
+
+ // This assembles the output for the continuation block above.
+ raw_string_ostream OS(Data->ContinuationOutput);
+
+ // Move cursor to a blank line.
+ OS << "\n";
+
+ // Emit the completions.
+ for (std::vector<std::string>::iterator I = Action.Completions.begin(),
+ E = Action.Completions.end();
+ I != E; ++I) {
+ OS << *I << "\n";
+ }
+
+ // Fool libedit into thinking nothing has changed. Reprint its prompt
+ // and the user input. Note that the cursor will remain at the end of
+ // the line after this.
+ OS << Data->LE->getPrompt()
+ << StringRef(LI->buffer, LI->lastchar - LI->buffer);
+
+ // This is the number of characters we need to tell libedit to go back:
+ // the distance between end of line and the original cursor position.
+ Data->PrevCount = LI->lastchar - LI->cursor;
+
+ return CC_REFRESH;
+ }
+ }
+ }
+ return CC_ERROR;
+}
+
+} // end anonymous namespace
+
+LineEditor::LineEditor(StringRef ProgName, StringRef HistoryPath, FILE *In,
+ FILE *Out, FILE *Err)
+ : Prompt((ProgName + "> ").str()), HistoryPath(std::string(HistoryPath)),
+ Data(new InternalData) {
+ if (HistoryPath.empty())
+ this->HistoryPath = getDefaultHistoryPath(ProgName);
+
+ Data->LE = this;
+ Data->Out = Out;
+
+ Data->Hist = ::history_init();
+ assert(Data->Hist);
+
+ Data->EL = ::el_init(ProgName.str().c_str(), In, Out, Err);
+ assert(Data->EL);
+
+ ::el_set(Data->EL, EL_PROMPT, ElGetPromptFn);
+ ::el_set(Data->EL, EL_EDITOR, "emacs");
+ ::el_set(Data->EL, EL_HIST, history, Data->Hist);
+ ::el_set(Data->EL, EL_ADDFN, "tab_complete", "Tab completion function",
+ ElCompletionFn);
+ ::el_set(Data->EL, EL_BIND, "\t", "tab_complete", NULL);
+ ::el_set(Data->EL, EL_BIND, "^r", "em-inc-search-prev",
+ NULL); // Cycle through backwards search, entering string
+ ::el_set(Data->EL, EL_BIND, "^w", "ed-delete-prev-word",
+ NULL); // Delete previous word, behave like bash does.
+ ::el_set(Data->EL, EL_BIND, "\033[3~", "ed-delete-next-char",
+ NULL); // Fix the delete key.
+ ::el_set(Data->EL, EL_CLIENTDATA, Data.get());
+
+ HistEvent HE;
+ ::history(Data->Hist, &HE, H_SETSIZE, 800);
+ ::history(Data->Hist, &HE, H_SETUNIQUE, 1);
+ loadHistory();
+}
+
+LineEditor::~LineEditor() {
+ saveHistory();
+
+ ::history_end(Data->Hist);
+ ::el_end(Data->EL);
+ ::fwrite("\n", 1, 1, Data->Out);
+}
+
+void LineEditor::saveHistory() {
+ if (!HistoryPath.empty()) {
+ HistEvent HE;
+ ::history(Data->Hist, &HE, H_SAVE, HistoryPath.c_str());
+ }
+}
+
+void LineEditor::loadHistory() {
+ if (!HistoryPath.empty()) {
+ HistEvent HE;
+ ::history(Data->Hist, &HE, H_LOAD, HistoryPath.c_str());
+ }
+}
+
+Optional<std::string> LineEditor::readLine() const {
+ // Call el_gets to prompt the user and read the user's input.
+ int LineLen = 0;
+ const char *Line = ::el_gets(Data->EL, &LineLen);
+
+ // Either of these may mean end-of-file.
+ if (!Line || LineLen == 0)
+ return Optional<std::string>();
+
+ // Strip any newlines off the end of the string.
+ while (LineLen > 0 &&
+ (Line[LineLen - 1] == '\n' || Line[LineLen - 1] == '\r'))
+ --LineLen;
+
+ HistEvent HE;
+ if (LineLen > 0)
+ ::history(Data->Hist, &HE, H_ENTER, Line);
+
+ return std::string(Line, LineLen);
+}
+
+#else // HAVE_LIBEDIT
+
+// Simple fgets-based implementation.
+
+struct LineEditor::InternalData {
+ FILE *In;
+ FILE *Out;
+};
+
+LineEditor::LineEditor(StringRef ProgName, StringRef HistoryPath, FILE *In,
+ FILE *Out, FILE *Err)
+ : Prompt((ProgName + "> ").str()), Data(new InternalData) {
+ Data->In = In;
+ Data->Out = Out;
+}
+
+LineEditor::~LineEditor() {
+ ::fwrite("\n", 1, 1, Data->Out);
+}
+
+void LineEditor::saveHistory() {}
+void LineEditor::loadHistory() {}
+
+Optional<std::string> LineEditor::readLine() const {
+ ::fprintf(Data->Out, "%s", Prompt.c_str());
+
+ std::string Line;
+ do {
+ char Buf[64];
+ char *Res = ::fgets(Buf, sizeof(Buf), Data->In);
+ if (!Res) {
+ if (Line.empty())
+ return Optional<std::string>();
+ else
+ return Line;
+ }
+ Line.append(Buf);
+ } while (Line.empty() ||
+ (Line[Line.size() - 1] != '\n' && Line[Line.size() - 1] != '\r'));
+
+ while (!Line.empty() &&
+ (Line[Line.size() - 1] == '\n' || Line[Line.size() - 1] == '\r'))
+ Line.resize(Line.size() - 1);
+
+ return Line;
+}
+
+#endif // HAVE_LIBEDIT
diff --git a/contrib/libs/llvm12/lib/Linker/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Linker/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Linker/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/MC/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/MC/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/MC/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/MC/MCDisassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/MC/MCDisassembler/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/MC/MCDisassembler/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/MC/MCParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/MC/MCParser/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/MC/MCParser/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/MCA/CodeEmitter.cpp b/contrib/libs/llvm12/lib/MCA/CodeEmitter.cpp
new file mode 100644
index 00000000000..dcb92d253ba
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/CodeEmitter.cpp
@@ -0,0 +1,37 @@
+//===--------------------- CodeEmitter.cpp ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CodeEmitter API.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/CodeEmitter.h"
+
+namespace llvm {
+namespace mca {
+
+CodeEmitter::EncodingInfo
+CodeEmitter::getOrCreateEncodingInfo(unsigned MCID) {
+ EncodingInfo &EI = Encodings[MCID];
+ if (EI.second)
+ return EI;
+
+ SmallVector<llvm::MCFixup, 2> Fixups;
+ const MCInst &Inst = Sequence[MCID];
+ MCInst Relaxed(Sequence[MCID]);
+ if (MAB.mayNeedRelaxation(Inst, STI))
+ MAB.relaxInstruction(Relaxed, STI);
+
+ EI.first = Code.size();
+ MCE.encodeInstruction(Relaxed, VecOS, Fixups, STI);
+ EI.second = Code.size() - EI.first;
+ return EI;
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/Context.cpp b/contrib/libs/llvm12/lib/MCA/Context.cpp
new file mode 100644
index 00000000000..0160e1f9f78
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/Context.cpp
@@ -0,0 +1,68 @@
+//===---------------------------- Context.cpp -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a class for holding ownership of various simulated
+/// hardware units. A Context also provides a utility routine for constructing
+/// a default out-of-order pipeline with fetch, dispatch, execute, and retire
+/// stages.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Context.h"
+#include "llvm/MCA/HardwareUnits/RegisterFile.h"
+#include "llvm/MCA/HardwareUnits/RetireControlUnit.h"
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/MCA/Stages/DispatchStage.h"
+#include "llvm/MCA/Stages/EntryStage.h"
+#include "llvm/MCA/Stages/ExecuteStage.h"
+#include "llvm/MCA/Stages/MicroOpQueueStage.h"
+#include "llvm/MCA/Stages/RetireStage.h"
+
+namespace llvm {
+namespace mca {
+
+std::unique_ptr<Pipeline>
+Context::createDefaultPipeline(const PipelineOptions &Opts, SourceMgr &SrcMgr) {
+ const MCSchedModel &SM = STI.getSchedModel();
+
+ // Create the hardware units defining the backend.
+ auto RCU = std::make_unique<RetireControlUnit>(SM);
+ auto PRF = std::make_unique<RegisterFile>(SM, MRI, Opts.RegisterFileSize);
+ auto LSU = std::make_unique<LSUnit>(SM, Opts.LoadQueueSize,
+ Opts.StoreQueueSize, Opts.AssumeNoAlias);
+ auto HWS = std::make_unique<Scheduler>(SM, *LSU);
+
+ // Create the pipeline stages.
+ auto Fetch = std::make_unique<EntryStage>(SrcMgr);
+ auto Dispatch = std::make_unique<DispatchStage>(STI, MRI, Opts.DispatchWidth,
+ *RCU, *PRF);
+ auto Execute =
+ std::make_unique<ExecuteStage>(*HWS, Opts.EnableBottleneckAnalysis);
+ auto Retire = std::make_unique<RetireStage>(*RCU, *PRF, *LSU);
+
+ // Pass the ownership of all the hardware units to this Context.
+ addHardwareUnit(std::move(RCU));
+ addHardwareUnit(std::move(PRF));
+ addHardwareUnit(std::move(LSU));
+ addHardwareUnit(std::move(HWS));
+
+ // Build the pipeline.
+ auto StagePipeline = std::make_unique<Pipeline>();
+ StagePipeline->appendStage(std::move(Fetch));
+ if (Opts.MicroOpQueueSize)
+ StagePipeline->appendStage(std::make_unique<MicroOpQueueStage>(
+ Opts.MicroOpQueueSize, Opts.DecodersThroughput));
+ StagePipeline->appendStage(std::move(Dispatch));
+ StagePipeline->appendStage(std::move(Execute));
+ StagePipeline->appendStage(std::move(Retire));
+ return StagePipeline;
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/HWEventListener.cpp b/contrib/libs/llvm12/lib/MCA/HWEventListener.cpp
new file mode 100644
index 00000000000..58b2e032922
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/HWEventListener.cpp
@@ -0,0 +1,22 @@
+//===----------------------- HWEventListener.cpp ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a vtable anchor for class HWEventListener.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HWEventListener.h"
+
+namespace llvm {
+namespace mca {
+
+// Anchor the vtable here.
+void HWEventListener::anchor() {}
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/HardwareUnits/HardwareUnit.cpp b/contrib/libs/llvm12/lib/MCA/HardwareUnits/HardwareUnit.cpp
new file mode 100644
index 00000000000..69f793796ec
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/HardwareUnits/HardwareUnit.cpp
@@ -0,0 +1,24 @@
+//===------------------------- HardwareUnit.cpp -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the anchor for the base class that describes
+/// simulated hardware units.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/HardwareUnit.h"
+
+namespace llvm {
+namespace mca {
+
+// Pin the vtable with this method.
+HardwareUnit::~HardwareUnit() = default;
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/HardwareUnits/LSUnit.cpp b/contrib/libs/llvm12/lib/MCA/HardwareUnits/LSUnit.cpp
new file mode 100644
index 00000000000..4594368fc0e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/HardwareUnits/LSUnit.cpp
@@ -0,0 +1,252 @@
+//===----------------------- LSUnit.cpp --------------------------*- C++-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// A Load-Store Unit for the llvm-mca tool.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/LSUnit.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+LSUnitBase::LSUnitBase(const MCSchedModel &SM, unsigned LQ, unsigned SQ,
+ bool AssumeNoAlias)
+ : LQSize(LQ), SQSize(SQ), UsedLQEntries(0), UsedSQEntries(0),
+ NoAlias(AssumeNoAlias), NextGroupID(1) {
+ if (SM.hasExtraProcessorInfo()) {
+ const MCExtraProcessorInfo &EPI = SM.getExtraProcessorInfo();
+ if (!LQSize && EPI.LoadQueueID) {
+ const MCProcResourceDesc &LdQDesc = *SM.getProcResource(EPI.LoadQueueID);
+ LQSize = std::max(0, LdQDesc.BufferSize);
+ }
+
+ if (!SQSize && EPI.StoreQueueID) {
+ const MCProcResourceDesc &StQDesc = *SM.getProcResource(EPI.StoreQueueID);
+ SQSize = std::max(0, StQDesc.BufferSize);
+ }
+ }
+}
+
+LSUnitBase::~LSUnitBase() {}
+
+void LSUnitBase::cycleEvent() {
+ for (const std::pair<unsigned, std::unique_ptr<MemoryGroup>> &G : Groups)
+ G.second->cycleEvent();
+}
+
+#ifndef NDEBUG
+void LSUnitBase::dump() const {
+ dbgs() << "[LSUnit] LQ_Size = " << getLoadQueueSize() << '\n';
+ dbgs() << "[LSUnit] SQ_Size = " << getStoreQueueSize() << '\n';
+ dbgs() << "[LSUnit] NextLQSlotIdx = " << getUsedLQEntries() << '\n';
+ dbgs() << "[LSUnit] NextSQSlotIdx = " << getUsedSQEntries() << '\n';
+ dbgs() << "\n";
+ for (const auto &GroupIt : Groups) {
+ const MemoryGroup &Group = *GroupIt.second;
+ dbgs() << "[LSUnit] Group (" << GroupIt.first << "): "
+ << "[ #Preds = " << Group.getNumPredecessors()
+ << ", #GIssued = " << Group.getNumExecutingPredecessors()
+ << ", #GExecuted = " << Group.getNumExecutedPredecessors()
+ << ", #Inst = " << Group.getNumInstructions()
+ << ", #IIssued = " << Group.getNumExecuting()
+ << ", #IExecuted = " << Group.getNumExecuted() << '\n';
+ }
+}
+#endif
+
+unsigned LSUnit::dispatch(const InstRef &IR) {
+ const InstrDesc &Desc = IR.getInstruction()->getDesc();
+ unsigned IsMemBarrier = Desc.HasSideEffects;
+ assert((Desc.MayLoad || Desc.MayStore) && "Not a memory operation!");
+
+ if (Desc.MayLoad)
+ acquireLQSlot();
+ if (Desc.MayStore)
+ acquireSQSlot();
+
+ if (Desc.MayStore) {
+ unsigned NewGID = createMemoryGroup();
+ MemoryGroup &NewGroup = getGroup(NewGID);
+ NewGroup.addInstruction();
+
+ // A store may not pass a previous load or load barrier.
+ unsigned ImmediateLoadDominator =
+ std::max(CurrentLoadGroupID, CurrentLoadBarrierGroupID);
+ if (ImmediateLoadDominator) {
+ MemoryGroup &IDom = getGroup(ImmediateLoadDominator);
+ LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: (" << ImmediateLoadDominator
+ << ") --> (" << NewGID << ")\n");
+ IDom.addSuccessor(&NewGroup, !assumeNoAlias());
+ }
+
+ // A store may not pass a previous store barrier.
+ if (CurrentStoreBarrierGroupID) {
+ MemoryGroup &StoreGroup = getGroup(CurrentStoreBarrierGroupID);
+ LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: ("
+ << CurrentStoreBarrierGroupID
+ << ") --> (" << NewGID << ")\n");
+ StoreGroup.addSuccessor(&NewGroup, true);
+ }
+
+ // A store may not pass a previous store.
+ if (CurrentStoreGroupID &&
+ (CurrentStoreGroupID != CurrentStoreBarrierGroupID)) {
+ MemoryGroup &StoreGroup = getGroup(CurrentStoreGroupID);
+ LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: (" << CurrentStoreGroupID
+ << ") --> (" << NewGID << ")\n");
+ StoreGroup.addSuccessor(&NewGroup, !assumeNoAlias());
+ }
+
+
+ CurrentStoreGroupID = NewGID;
+ if (IsMemBarrier)
+ CurrentStoreBarrierGroupID = NewGID;
+
+ if (Desc.MayLoad) {
+ CurrentLoadGroupID = NewGID;
+ if (IsMemBarrier)
+ CurrentLoadBarrierGroupID = NewGID;
+ }
+
+ return NewGID;
+ }
+
+ assert(Desc.MayLoad && "Expected a load!");
+
+ unsigned ImmediateLoadDominator =
+ std::max(CurrentLoadGroupID, CurrentLoadBarrierGroupID);
+
+ // A new load group is created if we are in one of the following situations:
+ // 1) This is a load barrier (by construction, a load barrier is always
+ // assigned to a different memory group).
+ // 2) There is no load in flight (by construction we always keep loads and
+ // stores into separate memory groups).
+ // 3) There is a load barrier in flight. This load depends on it.
+ // 4) There is an intervening store between the last load dispatched to the
+ // LSU and this load. We always create a new group even if this load
+ // does not alias the last dispatched store.
+ // 5) There is no intervening store and there is an active load group.
+ // However that group has already started execution, so we cannot add
+ // this load to it.
+ bool ShouldCreateANewGroup =
+ IsMemBarrier || !ImmediateLoadDominator ||
+ CurrentLoadBarrierGroupID == ImmediateLoadDominator ||
+ ImmediateLoadDominator <= CurrentStoreGroupID ||
+ getGroup(ImmediateLoadDominator).isExecuting();
+
+ if (ShouldCreateANewGroup) {
+ unsigned NewGID = createMemoryGroup();
+ MemoryGroup &NewGroup = getGroup(NewGID);
+ NewGroup.addInstruction();
+
+ // A load may not pass a previous store or store barrier
+ // unless flag 'NoAlias' is set.
+ if (!assumeNoAlias() && CurrentStoreGroupID) {
+ MemoryGroup &StoreGroup = getGroup(CurrentStoreGroupID);
+ LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: (" << CurrentStoreGroupID
+ << ") --> (" << NewGID << ")\n");
+ StoreGroup.addSuccessor(&NewGroup, true);
+ }
+
+ // A load barrier may not pass a previous load or load barrier.
+ if (IsMemBarrier) {
+ if (ImmediateLoadDominator) {
+ MemoryGroup &LoadGroup = getGroup(ImmediateLoadDominator);
+ LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: ("
+ << ImmediateLoadDominator
+ << ") --> (" << NewGID << ")\n");
+ LoadGroup.addSuccessor(&NewGroup, true);
+ }
+ } else {
+ // A younger load cannot pass a older load barrier.
+ if (CurrentLoadBarrierGroupID) {
+ MemoryGroup &LoadGroup = getGroup(CurrentLoadBarrierGroupID);
+ LLVM_DEBUG(dbgs() << "[LSUnit]: GROUP DEP: ("
+ << CurrentLoadBarrierGroupID
+ << ") --> (" << NewGID << ")\n");
+ LoadGroup.addSuccessor(&NewGroup, true);
+ }
+ }
+
+ CurrentLoadGroupID = NewGID;
+ if (IsMemBarrier)
+ CurrentLoadBarrierGroupID = NewGID;
+ return NewGID;
+ }
+
+ // A load may pass a previous load.
+ MemoryGroup &Group = getGroup(CurrentLoadGroupID);
+ Group.addInstruction();
+ return CurrentLoadGroupID;
+}
+
+LSUnit::Status LSUnit::isAvailable(const InstRef &IR) const {
+ const InstrDesc &Desc = IR.getInstruction()->getDesc();
+ if (Desc.MayLoad && isLQFull())
+ return LSUnit::LSU_LQUEUE_FULL;
+ if (Desc.MayStore && isSQFull())
+ return LSUnit::LSU_SQUEUE_FULL;
+ return LSUnit::LSU_AVAILABLE;
+}
+
+void LSUnitBase::onInstructionExecuted(const InstRef &IR) {
+ unsigned GroupID = IR.getInstruction()->getLSUTokenID();
+ auto It = Groups.find(GroupID);
+ assert(It != Groups.end() && "Instruction not dispatched to the LS unit");
+ It->second->onInstructionExecuted();
+ if (It->second->isExecuted())
+ Groups.erase(It);
+}
+
+void LSUnitBase::onInstructionRetired(const InstRef &IR) {
+ const InstrDesc &Desc = IR.getInstruction()->getDesc();
+ bool IsALoad = Desc.MayLoad;
+ bool IsAStore = Desc.MayStore;
+ assert((IsALoad || IsAStore) && "Expected a memory operation!");
+
+ if (IsALoad) {
+ releaseLQSlot();
+ LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << IR.getSourceIndex()
+ << " has been removed from the load queue.\n");
+ }
+
+ if (IsAStore) {
+ releaseSQSlot();
+ LLVM_DEBUG(dbgs() << "[LSUnit]: Instruction idx=" << IR.getSourceIndex()
+ << " has been removed from the store queue.\n");
+ }
+}
+
+void LSUnit::onInstructionExecuted(const InstRef &IR) {
+ const Instruction &IS = *IR.getInstruction();
+ if (!IS.isMemOp())
+ return;
+
+ LSUnitBase::onInstructionExecuted(IR);
+ unsigned GroupID = IS.getLSUTokenID();
+ if (!isValidGroupID(GroupID)) {
+ if (GroupID == CurrentLoadGroupID)
+ CurrentLoadGroupID = 0;
+ if (GroupID == CurrentStoreGroupID)
+ CurrentStoreGroupID = 0;
+ if (GroupID == CurrentLoadBarrierGroupID)
+ CurrentLoadBarrierGroupID = 0;
+ if (GroupID == CurrentStoreBarrierGroupID)
+ CurrentStoreBarrierGroupID = 0;
+ }
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/HardwareUnits/RegisterFile.cpp b/contrib/libs/llvm12/lib/MCA/HardwareUnits/RegisterFile.cpp
new file mode 100644
index 00000000000..11a24a6889f
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/HardwareUnits/RegisterFile.cpp
@@ -0,0 +1,491 @@
+//===--------------------- RegisterFile.cpp ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a register mapping file class. This class is responsible
+/// for managing hardware register files and the tracking of data dependencies
+/// between registers.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/RegisterFile.h"
+#include "llvm/MCA/Instruction.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+RegisterFile::RegisterFile(const MCSchedModel &SM, const MCRegisterInfo &mri,
+ unsigned NumRegs)
+ : MRI(mri),
+ RegisterMappings(mri.getNumRegs(), {WriteRef(), RegisterRenamingInfo()}),
+ ZeroRegisters(mri.getNumRegs(), false) {
+ initialize(SM, NumRegs);
+}
+
+void RegisterFile::initialize(const MCSchedModel &SM, unsigned NumRegs) {
+ // Create a default register file that "sees" all the machine registers
+ // declared by the target. The number of physical registers in the default
+ // register file is set equal to `NumRegs`. A value of zero for `NumRegs`
+ // means: this register file has an unbounded number of physical registers.
+ RegisterFiles.emplace_back(NumRegs);
+ if (!SM.hasExtraProcessorInfo())
+ return;
+
+ // For each user defined register file, allocate a RegisterMappingTracker
+ // object. The size of every register file, as well as the mapping between
+ // register files and register classes is specified via tablegen.
+ const MCExtraProcessorInfo &Info = SM.getExtraProcessorInfo();
+
+ // Skip invalid register file at index 0.
+ for (unsigned I = 1, E = Info.NumRegisterFiles; I < E; ++I) {
+ const MCRegisterFileDesc &RF = Info.RegisterFiles[I];
+ assert(RF.NumPhysRegs && "Invalid PRF with zero physical registers!");
+
+ // The cost of a register definition is equivalent to the number of
+ // physical registers that are allocated at register renaming stage.
+ unsigned Length = RF.NumRegisterCostEntries;
+ const MCRegisterCostEntry *FirstElt =
+ &Info.RegisterCostTable[RF.RegisterCostEntryIdx];
+ addRegisterFile(RF, ArrayRef<MCRegisterCostEntry>(FirstElt, Length));
+ }
+}
+
+void RegisterFile::cycleStart() {
+ for (RegisterMappingTracker &RMT : RegisterFiles)
+ RMT.NumMoveEliminated = 0;
+}
+
+void RegisterFile::addRegisterFile(const MCRegisterFileDesc &RF,
+ ArrayRef<MCRegisterCostEntry> Entries) {
+ // A default register file is always allocated at index #0. That register file
+ // is mainly used to count the total number of mappings created by all
+ // register files at runtime. Users can limit the number of available physical
+ // registers in register file #0 through the command line flag
+ // `-register-file-size`.
+ unsigned RegisterFileIndex = RegisterFiles.size();
+ RegisterFiles.emplace_back(RF.NumPhysRegs, RF.MaxMovesEliminatedPerCycle,
+ RF.AllowZeroMoveEliminationOnly);
+
+ // Special case where there is no register class identifier in the set.
+ // An empty set of register classes means: this register file contains all
+ // the physical registers specified by the target.
+ // We optimistically assume that a register can be renamed at the cost of a
+ // single physical register. The constructor of RegisterFile ensures that
+ // a RegisterMapping exists for each logical register defined by the Target.
+ if (Entries.empty())
+ return;
+
+ // Now update the cost of individual registers.
+ for (const MCRegisterCostEntry &RCE : Entries) {
+ const MCRegisterClass &RC = MRI.getRegClass(RCE.RegisterClassID);
+ for (const MCPhysReg Reg : RC) {
+ RegisterRenamingInfo &Entry = RegisterMappings[Reg].second;
+ IndexPlusCostPairTy &IPC = Entry.IndexPlusCost;
+ if (IPC.first && IPC.first != RegisterFileIndex) {
+ // The only register file that is allowed to overlap is the default
+ // register file at index #0. The analysis is inaccurate if register
+ // files overlap.
+ errs() << "warning: register " << MRI.getName(Reg)
+ << " defined in multiple register files.";
+ }
+ IPC = std::make_pair(RegisterFileIndex, RCE.Cost);
+ Entry.RenameAs = Reg;
+ Entry.AllowMoveElimination = RCE.AllowMoveElimination;
+
+ // Assume the same cost for each sub-register.
+ for (MCSubRegIterator I(Reg, &MRI); I.isValid(); ++I) {
+ RegisterRenamingInfo &OtherEntry = RegisterMappings[*I].second;
+ if (!OtherEntry.IndexPlusCost.first &&
+ (!OtherEntry.RenameAs ||
+ MRI.isSuperRegister(*I, OtherEntry.RenameAs))) {
+ OtherEntry.IndexPlusCost = IPC;
+ OtherEntry.RenameAs = Reg;
+ }
+ }
+ }
+ }
+}
+
+void RegisterFile::allocatePhysRegs(const RegisterRenamingInfo &Entry,
+ MutableArrayRef<unsigned> UsedPhysRegs) {
+ unsigned RegisterFileIndex = Entry.IndexPlusCost.first;
+ unsigned Cost = Entry.IndexPlusCost.second;
+ if (RegisterFileIndex) {
+ RegisterMappingTracker &RMT = RegisterFiles[RegisterFileIndex];
+ RMT.NumUsedPhysRegs += Cost;
+ UsedPhysRegs[RegisterFileIndex] += Cost;
+ }
+
+ // Now update the default register mapping tracker.
+ RegisterFiles[0].NumUsedPhysRegs += Cost;
+ UsedPhysRegs[0] += Cost;
+}
+
+void RegisterFile::freePhysRegs(const RegisterRenamingInfo &Entry,
+ MutableArrayRef<unsigned> FreedPhysRegs) {
+ unsigned RegisterFileIndex = Entry.IndexPlusCost.first;
+ unsigned Cost = Entry.IndexPlusCost.second;
+ if (RegisterFileIndex) {
+ RegisterMappingTracker &RMT = RegisterFiles[RegisterFileIndex];
+ RMT.NumUsedPhysRegs -= Cost;
+ FreedPhysRegs[RegisterFileIndex] += Cost;
+ }
+
+ // Now update the default register mapping tracker.
+ RegisterFiles[0].NumUsedPhysRegs -= Cost;
+ FreedPhysRegs[0] += Cost;
+}
+
+void RegisterFile::addRegisterWrite(WriteRef Write,
+ MutableArrayRef<unsigned> UsedPhysRegs) {
+ WriteState &WS = *Write.getWriteState();
+ MCPhysReg RegID = WS.getRegisterID();
+ assert(RegID && "Adding an invalid register definition?");
+
+ LLVM_DEBUG({
+ dbgs() << "RegisterFile: addRegisterWrite [ " << Write.getSourceIndex()
+ << ", " << MRI.getName(RegID) << "]\n";
+ });
+
+ // If RenameAs is equal to RegID, then RegID is subject to register renaming
+ // and false dependencies on RegID are all eliminated.
+
+ // If RenameAs references the invalid register, then we optimistically assume
+ // that it can be renamed. In the absence of tablegen descriptors for register
+ // files, RenameAs is always set to the invalid register ID. In all other
+ // cases, RenameAs must be either equal to RegID, or it must reference a
+ // super-register of RegID.
+
+ // If RenameAs is a super-register of RegID, then a write to RegID has always
+ // a false dependency on RenameAs. The only exception is for when the write
+ // implicitly clears the upper portion of the underlying register.
+ // If a write clears its super-registers, then it is renamed as `RenameAs`.
+ bool IsWriteZero = WS.isWriteZero();
+ bool IsEliminated = WS.isEliminated();
+ bool ShouldAllocatePhysRegs = !IsWriteZero && !IsEliminated;
+ const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
+ WS.setPRF(RRI.IndexPlusCost.first);
+
+ if (RRI.RenameAs && RRI.RenameAs != RegID) {
+ RegID = RRI.RenameAs;
+ WriteRef &OtherWrite = RegisterMappings[RegID].first;
+
+ if (!WS.clearsSuperRegisters()) {
+ // The processor keeps the definition of `RegID` together with register
+ // `RenameAs`. Since this partial write is not renamed, no physical
+ // register is allocated.
+ ShouldAllocatePhysRegs = false;
+
+ WriteState *OtherWS = OtherWrite.getWriteState();
+ if (OtherWS && (OtherWrite.getSourceIndex() != Write.getSourceIndex())) {
+ // This partial write has a false dependency on RenameAs.
+ assert(!IsEliminated && "Unexpected partial update!");
+ OtherWS->addUser(OtherWrite.getSourceIndex(), &WS);
+ }
+ }
+ }
+
+ // Update zero registers.
+ MCPhysReg ZeroRegisterID =
+ WS.clearsSuperRegisters() ? RegID : WS.getRegisterID();
+ ZeroRegisters.setBitVal(ZeroRegisterID, IsWriteZero);
+ for (MCSubRegIterator I(ZeroRegisterID, &MRI); I.isValid(); ++I)
+ ZeroRegisters.setBitVal(*I, IsWriteZero);
+
+ // If this is move has been eliminated, then the call to tryEliminateMove
+ // should have already updated all the register mappings.
+ if (!IsEliminated) {
+ // Update the mapping for register RegID including its sub-registers.
+ RegisterMappings[RegID].first = Write;
+ RegisterMappings[RegID].second.AliasRegID = 0U;
+ for (MCSubRegIterator I(RegID, &MRI); I.isValid(); ++I) {
+ RegisterMappings[*I].first = Write;
+ RegisterMappings[*I].second.AliasRegID = 0U;
+ }
+
+ // No physical registers are allocated for instructions that are optimized
+ // in hardware. For example, zero-latency data-dependency breaking
+ // instructions don't consume physical registers.
+ if (ShouldAllocatePhysRegs)
+ allocatePhysRegs(RegisterMappings[RegID].second, UsedPhysRegs);
+ }
+
+ if (!WS.clearsSuperRegisters())
+ return;
+
+ for (MCSuperRegIterator I(RegID, &MRI); I.isValid(); ++I) {
+ if (!IsEliminated) {
+ RegisterMappings[*I].first = Write;
+ RegisterMappings[*I].second.AliasRegID = 0U;
+ }
+
+ ZeroRegisters.setBitVal(*I, IsWriteZero);
+ }
+}
+
+void RegisterFile::removeRegisterWrite(
+ const WriteState &WS, MutableArrayRef<unsigned> FreedPhysRegs) {
+ // Early exit if this write was eliminated. A write eliminated at register
+ // renaming stage generates an alias, and it is not added to the PRF.
+ if (WS.isEliminated())
+ return;
+
+ MCPhysReg RegID = WS.getRegisterID();
+
+ assert(RegID != 0 && "Invalidating an already invalid register?");
+ assert(WS.getCyclesLeft() != UNKNOWN_CYCLES &&
+ "Invalidating a write of unknown cycles!");
+ assert(WS.getCyclesLeft() <= 0 && "Invalid cycles left for this write!");
+
+ bool ShouldFreePhysRegs = !WS.isWriteZero();
+ MCPhysReg RenameAs = RegisterMappings[RegID].second.RenameAs;
+ if (RenameAs && RenameAs != RegID) {
+ RegID = RenameAs;
+
+ if (!WS.clearsSuperRegisters()) {
+ // Keep the definition of `RegID` together with register `RenameAs`.
+ ShouldFreePhysRegs = false;
+ }
+ }
+
+ if (ShouldFreePhysRegs)
+ freePhysRegs(RegisterMappings[RegID].second, FreedPhysRegs);
+
+ WriteRef &WR = RegisterMappings[RegID].first;
+ if (WR.getWriteState() == &WS)
+ WR.invalidate();
+
+ for (MCSubRegIterator I(RegID, &MRI); I.isValid(); ++I) {
+ WriteRef &OtherWR = RegisterMappings[*I].first;
+ if (OtherWR.getWriteState() == &WS)
+ OtherWR.invalidate();
+ }
+
+ if (!WS.clearsSuperRegisters())
+ return;
+
+ for (MCSuperRegIterator I(RegID, &MRI); I.isValid(); ++I) {
+ WriteRef &OtherWR = RegisterMappings[*I].first;
+ if (OtherWR.getWriteState() == &WS)
+ OtherWR.invalidate();
+ }
+}
+
+bool RegisterFile::tryEliminateMove(WriteState &WS, ReadState &RS) {
+ const RegisterMapping &RMFrom = RegisterMappings[RS.getRegisterID()];
+ const RegisterMapping &RMTo = RegisterMappings[WS.getRegisterID()];
+
+ // From and To must be owned by the same PRF.
+ const RegisterRenamingInfo &RRIFrom = RMFrom.second;
+ const RegisterRenamingInfo &RRITo = RMTo.second;
+ unsigned RegisterFileIndex = RRIFrom.IndexPlusCost.first;
+ if (RegisterFileIndex != RRITo.IndexPlusCost.first)
+ return false;
+
+ // We only allow move elimination for writes that update a full physical
+ // register. On X86, move elimination is possible with 32-bit general purpose
+ // registers because writes to those registers are not partial writes. If a
+ // register move is a partial write, then we conservatively assume that move
+ // elimination fails, since it would either trigger a partial update, or the
+ // issue of a merge opcode.
+ //
+ // Note that this constraint may be lifted in future. For example, we could
+ // make this model more flexible, and let users customize the set of registers
+ // (i.e. register classes) that allow move elimination.
+ //
+ // For now, we assume that there is a strong correlation between registers
+ // that allow move elimination, and how those same registers are renamed in
+ // hardware.
+ if (RRITo.RenameAs && RRITo.RenameAs != WS.getRegisterID()) {
+ // Early exit if the PRF doesn't support move elimination for this register.
+ if (!RegisterMappings[RRITo.RenameAs].second.AllowMoveElimination)
+ return false;
+ if (!WS.clearsSuperRegisters())
+ return false;
+ }
+
+ RegisterMappingTracker &RMT = RegisterFiles[RegisterFileIndex];
+ if (RMT.MaxMoveEliminatedPerCycle &&
+ RMT.NumMoveEliminated == RMT.MaxMoveEliminatedPerCycle)
+ return false;
+
+ bool IsZeroMove = ZeroRegisters[RS.getRegisterID()];
+ if (RMT.AllowZeroMoveEliminationOnly && !IsZeroMove)
+ return false;
+
+ // Construct an alias.
+ MCPhysReg AliasedReg =
+ RRIFrom.RenameAs ? RRIFrom.RenameAs : RS.getRegisterID();
+ MCPhysReg AliasReg = RRITo.RenameAs ? RRITo.RenameAs : WS.getRegisterID();
+
+ const RegisterRenamingInfo &RMAlias = RegisterMappings[AliasedReg].second;
+ if (RMAlias.AliasRegID)
+ AliasedReg = RMAlias.AliasRegID;
+
+ RegisterMappings[AliasReg].second.AliasRegID = AliasedReg;
+ for (MCSubRegIterator I(AliasReg, &MRI); I.isValid(); ++I)
+ RegisterMappings[*I].second.AliasRegID = AliasedReg;
+
+ if (IsZeroMove) {
+ WS.setWriteZero();
+ RS.setReadZero();
+ }
+ WS.setEliminated();
+ RMT.NumMoveEliminated++;
+
+ return true;
+}
+
+void RegisterFile::collectWrites(const ReadState &RS,
+ SmallVectorImpl<WriteRef> &Writes) const {
+ MCPhysReg RegID = RS.getRegisterID();
+ assert(RegID && RegID < RegisterMappings.size());
+ LLVM_DEBUG(dbgs() << "RegisterFile: collecting writes for register "
+ << MRI.getName(RegID) << '\n');
+
+ // Check if this is an alias.
+ const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
+ if (RRI.AliasRegID)
+ RegID = RRI.AliasRegID;
+
+ const WriteRef &WR = RegisterMappings[RegID].first;
+ if (WR.isValid())
+ Writes.push_back(WR);
+
+ // Handle potential partial register updates.
+ for (MCSubRegIterator I(RegID, &MRI); I.isValid(); ++I) {
+ const WriteRef &WR = RegisterMappings[*I].first;
+ if (WR.isValid())
+ Writes.push_back(WR);
+ }
+
+ // Remove duplicate entries and resize the input vector.
+ if (Writes.size() > 1) {
+ sort(Writes, [](const WriteRef &Lhs, const WriteRef &Rhs) {
+ return Lhs.getWriteState() < Rhs.getWriteState();
+ });
+ auto It = std::unique(Writes.begin(), Writes.end());
+ Writes.resize(std::distance(Writes.begin(), It));
+ }
+
+ LLVM_DEBUG({
+ for (const WriteRef &WR : Writes) {
+ const WriteState &WS = *WR.getWriteState();
+ dbgs() << "[PRF] Found a dependent use of Register "
+ << MRI.getName(WS.getRegisterID()) << " (defined by instruction #"
+ << WR.getSourceIndex() << ")\n";
+ }
+ });
+}
+
+void RegisterFile::addRegisterRead(ReadState &RS,
+ const MCSubtargetInfo &STI) const {
+ MCPhysReg RegID = RS.getRegisterID();
+ const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
+ RS.setPRF(RRI.IndexPlusCost.first);
+ if (RS.isIndependentFromDef())
+ return;
+
+ if (ZeroRegisters[RS.getRegisterID()])
+ RS.setReadZero();
+
+ SmallVector<WriteRef, 4> DependentWrites;
+ collectWrites(RS, DependentWrites);
+ RS.setDependentWrites(DependentWrites.size());
+
+ // We know that this read depends on all the writes in DependentWrites.
+ // For each write, check if we have ReadAdvance information, and use it
+ // to figure out in how many cycles this read becomes available.
+ const ReadDescriptor &RD = RS.getDescriptor();
+ const MCSchedModel &SM = STI.getSchedModel();
+ const MCSchedClassDesc *SC = SM.getSchedClassDesc(RD.SchedClassID);
+ for (WriteRef &WR : DependentWrites) {
+ WriteState &WS = *WR.getWriteState();
+ unsigned WriteResID = WS.getWriteResourceID();
+ int ReadAdvance = STI.getReadAdvanceCycles(SC, RD.UseIndex, WriteResID);
+ WS.addUser(WR.getSourceIndex(), &RS, ReadAdvance);
+ }
+}
+
+unsigned RegisterFile::isAvailable(ArrayRef<MCPhysReg> Regs) const {
+ SmallVector<unsigned, 4> NumPhysRegs(getNumRegisterFiles());
+
+ // Find how many new mappings must be created for each register file.
+ for (const MCPhysReg RegID : Regs) {
+ const RegisterRenamingInfo &RRI = RegisterMappings[RegID].second;
+ const IndexPlusCostPairTy &Entry = RRI.IndexPlusCost;
+ if (Entry.first)
+ NumPhysRegs[Entry.first] += Entry.second;
+ NumPhysRegs[0] += Entry.second;
+ }
+
+ unsigned Response = 0;
+ for (unsigned I = 0, E = getNumRegisterFiles(); I < E; ++I) {
+ unsigned NumRegs = NumPhysRegs[I];
+ if (!NumRegs)
+ continue;
+
+ const RegisterMappingTracker &RMT = RegisterFiles[I];
+ if (!RMT.NumPhysRegs) {
+ // The register file has an unbounded number of microarchitectural
+ // registers.
+ continue;
+ }
+
+ if (RMT.NumPhysRegs < NumRegs) {
+ // The current register file is too small. This may occur if the number of
+ // microarchitectural registers in register file #0 was changed by the
+ // users via flag -reg-file-size. Alternatively, the scheduling model
+ // specified a too small number of registers for this register file.
+ LLVM_DEBUG(dbgs() << "Not enough registers in the register file.\n");
+
+ // FIXME: Normalize the instruction register count to match the
+ // NumPhysRegs value. This is a highly unusual case, and is not expected
+ // to occur. This normalization is hiding an inconsistency in either the
+ // scheduling model or in the value that the user might have specified
+ // for NumPhysRegs.
+ NumRegs = RMT.NumPhysRegs;
+ }
+
+ if (RMT.NumPhysRegs < (RMT.NumUsedPhysRegs + NumRegs))
+ Response |= (1U << I);
+ }
+
+ return Response;
+}
+
+#ifndef NDEBUG
+void RegisterFile::dump() const {
+ for (unsigned I = 0, E = MRI.getNumRegs(); I < E; ++I) {
+ const RegisterMapping &RM = RegisterMappings[I];
+ const RegisterRenamingInfo &RRI = RM.second;
+ if (ZeroRegisters[I]) {
+ dbgs() << MRI.getName(I) << ", " << I
+ << ", PRF=" << RRI.IndexPlusCost.first
+ << ", Cost=" << RRI.IndexPlusCost.second
+ << ", RenameAs=" << RRI.RenameAs << ", IsZero=" << ZeroRegisters[I]
+ << ",";
+ RM.first.dump();
+ dbgs() << '\n';
+ }
+ }
+
+ for (unsigned I = 0, E = getNumRegisterFiles(); I < E; ++I) {
+ dbgs() << "Register File #" << I;
+ const RegisterMappingTracker &RMT = RegisterFiles[I];
+ dbgs() << "\n TotalMappings: " << RMT.NumPhysRegs
+ << "\n NumUsedMappings: " << RMT.NumUsedPhysRegs << '\n';
+ }
+}
+#endif
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/HardwareUnits/ResourceManager.cpp b/contrib/libs/llvm12/lib/MCA/HardwareUnits/ResourceManager.cpp
new file mode 100644
index 00000000000..30c4f14d13a
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/HardwareUnits/ResourceManager.cpp
@@ -0,0 +1,364 @@
+//===--------------------- ResourceManager.cpp ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// The classes here represent processor resource units and their management
+/// strategy. These classes are managed by the Scheduler.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/ResourceManager.h"
+#include "llvm/MCA/Support.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace mca {
+
+#define DEBUG_TYPE "llvm-mca"
+ResourceStrategy::~ResourceStrategy() = default;
+
+static uint64_t selectImpl(uint64_t CandidateMask,
+ uint64_t &NextInSequenceMask) {
+ // The upper bit set in CandidateMask identifies our next candidate resource.
+ CandidateMask = 1ULL << getResourceStateIndex(CandidateMask);
+ NextInSequenceMask &= (CandidateMask | (CandidateMask - 1));
+ return CandidateMask;
+}
+
+uint64_t DefaultResourceStrategy::select(uint64_t ReadyMask) {
+ // This method assumes that ReadyMask cannot be zero.
+ uint64_t CandidateMask = ReadyMask & NextInSequenceMask;
+ if (CandidateMask)
+ return selectImpl(CandidateMask, NextInSequenceMask);
+
+ NextInSequenceMask = ResourceUnitMask ^ RemovedFromNextInSequence;
+ RemovedFromNextInSequence = 0;
+ CandidateMask = ReadyMask & NextInSequenceMask;
+ if (CandidateMask)
+ return selectImpl(CandidateMask, NextInSequenceMask);
+
+ NextInSequenceMask = ResourceUnitMask;
+ CandidateMask = ReadyMask & NextInSequenceMask;
+ return selectImpl(CandidateMask, NextInSequenceMask);
+}
+
+void DefaultResourceStrategy::used(uint64_t Mask) {
+ if (Mask > NextInSequenceMask) {
+ RemovedFromNextInSequence |= Mask;
+ return;
+ }
+
+ NextInSequenceMask &= (~Mask);
+ if (NextInSequenceMask)
+ return;
+
+ NextInSequenceMask = ResourceUnitMask ^ RemovedFromNextInSequence;
+ RemovedFromNextInSequence = 0;
+}
+
+ResourceState::ResourceState(const MCProcResourceDesc &Desc, unsigned Index,
+ uint64_t Mask)
+ : ProcResourceDescIndex(Index), ResourceMask(Mask),
+ BufferSize(Desc.BufferSize), IsAGroup(countPopulation(ResourceMask) > 1) {
+ if (IsAGroup) {
+ ResourceSizeMask =
+ ResourceMask ^ 1ULL << getResourceStateIndex(ResourceMask);
+ } else {
+ ResourceSizeMask = (1ULL << Desc.NumUnits) - 1;
+ }
+ ReadyMask = ResourceSizeMask;
+ AvailableSlots = BufferSize == -1 ? 0U : static_cast<unsigned>(BufferSize);
+ Unavailable = false;
+}
+
+bool ResourceState::isReady(unsigned NumUnits) const {
+ return (!isReserved() || isADispatchHazard()) &&
+ countPopulation(ReadyMask) >= NumUnits;
+}
+
+ResourceStateEvent ResourceState::isBufferAvailable() const {
+ if (isADispatchHazard() && isReserved())
+ return RS_RESERVED;
+ if (!isBuffered() || AvailableSlots)
+ return RS_BUFFER_AVAILABLE;
+ return RS_BUFFER_UNAVAILABLE;
+}
+
+#ifndef NDEBUG
+void ResourceState::dump() const {
+ dbgs() << "MASK=" << format_hex(ResourceMask, 16)
+ << ", SZMASK=" << format_hex(ResourceSizeMask, 16)
+ << ", RDYMASK=" << format_hex(ReadyMask, 16)
+ << ", BufferSize=" << BufferSize
+ << ", AvailableSlots=" << AvailableSlots
+ << ", Reserved=" << Unavailable << '\n';
+}
+#endif
+
+static std::unique_ptr<ResourceStrategy>
+getStrategyFor(const ResourceState &RS) {
+ if (RS.isAResourceGroup() || RS.getNumUnits() > 1)
+ return std::make_unique<DefaultResourceStrategy>(RS.getReadyMask());
+ return std::unique_ptr<ResourceStrategy>(nullptr);
+}
+
+ResourceManager::ResourceManager(const MCSchedModel &SM)
+ : Resources(SM.getNumProcResourceKinds() - 1),
+ Strategies(SM.getNumProcResourceKinds() - 1),
+ Resource2Groups(SM.getNumProcResourceKinds() - 1, 0),
+ ProcResID2Mask(SM.getNumProcResourceKinds(), 0),
+ ResIndex2ProcResID(SM.getNumProcResourceKinds() - 1, 0),
+ ProcResUnitMask(0), ReservedResourceGroups(0),
+ AvailableBuffers(~0ULL), ReservedBuffers(0) {
+ computeProcResourceMasks(SM, ProcResID2Mask);
+
+ // initialize vector ResIndex2ProcResID.
+ for (unsigned I = 1, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+ unsigned Index = getResourceStateIndex(ProcResID2Mask[I]);
+ ResIndex2ProcResID[Index] = I;
+ }
+
+ for (unsigned I = 1, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+ uint64_t Mask = ProcResID2Mask[I];
+ unsigned Index = getResourceStateIndex(Mask);
+ Resources[Index] =
+ std::make_unique<ResourceState>(*SM.getProcResource(I), I, Mask);
+ Strategies[Index] = getStrategyFor(*Resources[Index]);
+ }
+
+ for (unsigned I = 1, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+ uint64_t Mask = ProcResID2Mask[I];
+ unsigned Index = getResourceStateIndex(Mask);
+ const ResourceState &RS = *Resources[Index];
+ if (!RS.isAResourceGroup()) {
+ ProcResUnitMask |= Mask;
+ continue;
+ }
+
+ uint64_t GroupMaskIdx = 1ULL << Index;
+ Mask -= GroupMaskIdx;
+ while (Mask) {
+ // Extract lowest set isolated bit.
+ uint64_t Unit = Mask & (-Mask);
+ unsigned IndexUnit = getResourceStateIndex(Unit);
+ Resource2Groups[IndexUnit] |= GroupMaskIdx;
+ Mask ^= Unit;
+ }
+ }
+
+ AvailableProcResUnits = ProcResUnitMask;
+}
+
+void ResourceManager::setCustomStrategyImpl(std::unique_ptr<ResourceStrategy> S,
+ uint64_t ResourceMask) {
+ unsigned Index = getResourceStateIndex(ResourceMask);
+ assert(Index < Resources.size() && "Invalid processor resource index!");
+ assert(S && "Unexpected null strategy in input!");
+ Strategies[Index] = std::move(S);
+}
+
+unsigned ResourceManager::resolveResourceMask(uint64_t Mask) const {
+ return ResIndex2ProcResID[getResourceStateIndex(Mask)];
+}
+
+unsigned ResourceManager::getNumUnits(uint64_t ResourceID) const {
+ return Resources[getResourceStateIndex(ResourceID)]->getNumUnits();
+}
+
+// Returns the actual resource consumed by this Use.
+// First, is the primary resource ID.
+// Second, is the specific sub-resource ID.
+ResourceRef ResourceManager::selectPipe(uint64_t ResourceID) {
+ unsigned Index = getResourceStateIndex(ResourceID);
+ assert(Index < Resources.size() && "Invalid resource use!");
+ ResourceState &RS = *Resources[Index];
+ assert(RS.isReady() && "No available units to select!");
+
+ // Special case where RS is not a group, and it only declares a single
+ // resource unit.
+ if (!RS.isAResourceGroup() && RS.getNumUnits() == 1)
+ return std::make_pair(ResourceID, RS.getReadyMask());
+
+ uint64_t SubResourceID = Strategies[Index]->select(RS.getReadyMask());
+ if (RS.isAResourceGroup())
+ return selectPipe(SubResourceID);
+ return std::make_pair(ResourceID, SubResourceID);
+}
+
+void ResourceManager::use(const ResourceRef &RR) {
+ // Mark the sub-resource referenced by RR as used.
+ unsigned RSID = getResourceStateIndex(RR.first);
+ ResourceState &RS = *Resources[RSID];
+ RS.markSubResourceAsUsed(RR.second);
+ // Remember to update the resource strategy for non-group resources with
+ // multiple units.
+ if (RS.getNumUnits() > 1)
+ Strategies[RSID]->used(RR.second);
+
+ // If there are still available units in RR.first,
+ // then we are done.
+ if (RS.isReady())
+ return;
+
+ AvailableProcResUnits ^= RR.first;
+
+ // Notify groups that RR.first is no longer available.
+ uint64_t Users = Resource2Groups[RSID];
+ while (Users) {
+ // Extract lowest set isolated bit.
+ unsigned GroupIndex = getResourceStateIndex(Users & (-Users));
+ ResourceState &CurrentUser = *Resources[GroupIndex];
+ CurrentUser.markSubResourceAsUsed(RR.first);
+ Strategies[GroupIndex]->used(RR.first);
+ // Reset lowest set bit.
+ Users &= Users - 1;
+ }
+}
+
+void ResourceManager::release(const ResourceRef &RR) {
+ unsigned RSID = getResourceStateIndex(RR.first);
+ ResourceState &RS = *Resources[RSID];
+ bool WasFullyUsed = !RS.isReady();
+ RS.releaseSubResource(RR.second);
+ if (!WasFullyUsed)
+ return;
+
+ AvailableProcResUnits ^= RR.first;
+
+ // Notify groups that RR.first is now available again.
+ uint64_t Users = Resource2Groups[RSID];
+ while (Users) {
+ unsigned GroupIndex = getResourceStateIndex(Users & (-Users));
+ ResourceState &CurrentUser = *Resources[GroupIndex];
+ CurrentUser.releaseSubResource(RR.first);
+ Users &= Users - 1;
+ }
+}
+
+ResourceStateEvent
+ResourceManager::canBeDispatched(uint64_t ConsumedBuffers) const {
+ if (ConsumedBuffers & ReservedBuffers)
+ return ResourceStateEvent::RS_RESERVED;
+ if (ConsumedBuffers & (~AvailableBuffers))
+ return ResourceStateEvent::RS_BUFFER_UNAVAILABLE;
+ return ResourceStateEvent::RS_BUFFER_AVAILABLE;
+}
+
+void ResourceManager::reserveBuffers(uint64_t ConsumedBuffers) {
+ while (ConsumedBuffers) {
+ uint64_t CurrentBuffer = ConsumedBuffers & (-ConsumedBuffers);
+ ResourceState &RS = *Resources[getResourceStateIndex(CurrentBuffer)];
+ ConsumedBuffers ^= CurrentBuffer;
+ assert(RS.isBufferAvailable() == ResourceStateEvent::RS_BUFFER_AVAILABLE);
+ if (!RS.reserveBuffer())
+ AvailableBuffers ^= CurrentBuffer;
+ if (RS.isADispatchHazard()) {
+ // Reserve this buffer now, and release it once pipeline resources
+ // consumed by the instruction become available again.
+ // We do this to simulate an in-order dispatch/issue of instructions.
+ ReservedBuffers ^= CurrentBuffer;
+ }
+ }
+}
+
+void ResourceManager::releaseBuffers(uint64_t ConsumedBuffers) {
+ AvailableBuffers |= ConsumedBuffers;
+ while (ConsumedBuffers) {
+ uint64_t CurrentBuffer = ConsumedBuffers & (-ConsumedBuffers);
+ ResourceState &RS = *Resources[getResourceStateIndex(CurrentBuffer)];
+ ConsumedBuffers ^= CurrentBuffer;
+ RS.releaseBuffer();
+ // Do not unreserve dispatch hazard resource buffers. Wait until all
+ // pipeline resources have been freed too.
+ }
+}
+
+uint64_t ResourceManager::checkAvailability(const InstrDesc &Desc) const {
+ uint64_t BusyResourceMask = 0;
+ for (const std::pair<uint64_t, ResourceUsage> &E : Desc.Resources) {
+ unsigned NumUnits = E.second.isReserved() ? 0U : E.second.NumUnits;
+ unsigned Index = getResourceStateIndex(E.first);
+ if (!Resources[Index]->isReady(NumUnits))
+ BusyResourceMask |= E.first;
+ }
+
+ BusyResourceMask &= ProcResUnitMask;
+ if (BusyResourceMask)
+ return BusyResourceMask;
+ return Desc.UsedProcResGroups & ReservedResourceGroups;
+}
+
+void ResourceManager::issueInstruction(
+ const InstrDesc &Desc,
+ SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &Pipes) {
+ for (const std::pair<uint64_t, ResourceUsage> &R : Desc.Resources) {
+ const CycleSegment &CS = R.second.CS;
+ if (!CS.size()) {
+ releaseResource(R.first);
+ continue;
+ }
+
+ assert(CS.begin() == 0 && "Invalid {Start, End} cycles!");
+ if (!R.second.isReserved()) {
+ ResourceRef Pipe = selectPipe(R.first);
+ use(Pipe);
+ BusyResources[Pipe] += CS.size();
+ Pipes.emplace_back(std::pair<ResourceRef, ResourceCycles>(
+ Pipe, ResourceCycles(CS.size())));
+ } else {
+ assert((countPopulation(R.first) > 1) && "Expected a group!");
+ // Mark this group as reserved.
+ assert(R.second.isReserved());
+ reserveResource(R.first);
+ BusyResources[ResourceRef(R.first, R.first)] += CS.size();
+ }
+ }
+}
+
+void ResourceManager::cycleEvent(SmallVectorImpl<ResourceRef> &ResourcesFreed) {
+ for (std::pair<ResourceRef, unsigned> &BR : BusyResources) {
+ if (BR.second)
+ BR.second--;
+ if (!BR.second) {
+ // Release this resource.
+ const ResourceRef &RR = BR.first;
+
+ if (countPopulation(RR.first) == 1)
+ release(RR);
+ releaseResource(RR.first);
+ ResourcesFreed.push_back(RR);
+ }
+ }
+
+ for (const ResourceRef &RF : ResourcesFreed)
+ BusyResources.erase(RF);
+}
+
+void ResourceManager::reserveResource(uint64_t ResourceID) {
+ const unsigned Index = getResourceStateIndex(ResourceID);
+ ResourceState &Resource = *Resources[Index];
+ assert(Resource.isAResourceGroup() && !Resource.isReserved() &&
+ "Unexpected resource state found!");
+ Resource.setReserved();
+ ReservedResourceGroups ^= 1ULL << Index;
+}
+
+void ResourceManager::releaseResource(uint64_t ResourceID) {
+ const unsigned Index = getResourceStateIndex(ResourceID);
+ ResourceState &Resource = *Resources[Index];
+ Resource.clearReserved();
+ if (Resource.isAResourceGroup())
+ ReservedResourceGroups ^= 1ULL << Index;
+ // Now it is safe to release dispatch/issue resources.
+ if (Resource.isADispatchHazard())
+ ReservedBuffers ^= 1ULL << Index;
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/HardwareUnits/RetireControlUnit.cpp b/contrib/libs/llvm12/lib/MCA/HardwareUnits/RetireControlUnit.cpp
new file mode 100644
index 00000000000..de519d7fd94
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/HardwareUnits/RetireControlUnit.cpp
@@ -0,0 +1,100 @@
+//===---------------------- RetireControlUnit.cpp ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file simulates the hardware responsible for retiring instructions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/RetireControlUnit.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+RetireControlUnit::RetireControlUnit(const MCSchedModel &SM)
+ : NextAvailableSlotIdx(0), CurrentInstructionSlotIdx(0),
+ NumROBEntries(SM.MicroOpBufferSize),
+ AvailableEntries(SM.MicroOpBufferSize), MaxRetirePerCycle(0) {
+ // Check if the scheduling model provides extra information about the machine
+ // processor. If so, then use that information to set the reorder buffer size
+ // and the maximum number of instructions retired per cycle.
+ if (SM.hasExtraProcessorInfo()) {
+ const MCExtraProcessorInfo &EPI = SM.getExtraProcessorInfo();
+ if (EPI.ReorderBufferSize)
+ AvailableEntries = EPI.ReorderBufferSize;
+ MaxRetirePerCycle = EPI.MaxRetirePerCycle;
+ }
+ NumROBEntries = AvailableEntries;
+ assert(NumROBEntries && "Invalid reorder buffer size!");
+ Queue.resize(2 * NumROBEntries);
+}
+
+// Reserves a number of slots, and returns a new token.
+unsigned RetireControlUnit::dispatch(const InstRef &IR) {
+ const Instruction &Inst = *IR.getInstruction();
+ unsigned Entries = normalizeQuantity(Inst.getNumMicroOps());
+ assert((AvailableEntries >= Entries) && "Reorder Buffer unavailable!");
+
+ unsigned TokenID = NextAvailableSlotIdx;
+ Queue[NextAvailableSlotIdx] = {IR, Entries, false};
+ NextAvailableSlotIdx += std::max(1U, Entries);
+ NextAvailableSlotIdx %= Queue.size();
+
+ AvailableEntries -= Entries;
+ return TokenID;
+}
+
+const RetireControlUnit::RUToken &RetireControlUnit::getCurrentToken() const {
+ const RetireControlUnit::RUToken &Current = Queue[CurrentInstructionSlotIdx];
+#ifndef NDEBUG
+ const Instruction *Inst = Current.IR.getInstruction();
+ assert(Inst && "Invalid RUToken in the RCU queue.");
+#endif
+ return Current;
+}
+
+unsigned RetireControlUnit::computeNextSlotIdx() const {
+ const RetireControlUnit::RUToken &Current = getCurrentToken();
+ unsigned NextSlotIdx = CurrentInstructionSlotIdx + std::max(1U, Current.NumSlots);
+ return NextSlotIdx % Queue.size();
+}
+
+const RetireControlUnit::RUToken &RetireControlUnit::peekNextToken() const {
+ return Queue[computeNextSlotIdx()];
+}
+
+void RetireControlUnit::consumeCurrentToken() {
+ RetireControlUnit::RUToken &Current = Queue[CurrentInstructionSlotIdx];
+ Current.IR.getInstruction()->retire();
+
+ // Update the slot index to be the next item in the circular queue.
+ CurrentInstructionSlotIdx += std::max(1U, Current.NumSlots);
+ CurrentInstructionSlotIdx %= Queue.size();
+ AvailableEntries += Current.NumSlots;
+ Current = { InstRef(), 0U, false };
+}
+
+void RetireControlUnit::onInstructionExecuted(unsigned TokenID) {
+ assert(Queue.size() > TokenID);
+ assert(Queue[TokenID].IR.getInstruction() && "Instruction was not dispatched!");
+ assert(Queue[TokenID].Executed == false && "Instruction already executed!");
+ Queue[TokenID].Executed = true;
+}
+
+#ifndef NDEBUG
+void RetireControlUnit::dump() const {
+ dbgs() << "Retire Unit: { Total ROB Entries =" << NumROBEntries
+ << ", Available ROB entries=" << AvailableEntries << " }\n";
+}
+#endif
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/HardwareUnits/Scheduler.cpp b/contrib/libs/llvm12/lib/MCA/HardwareUnits/Scheduler.cpp
new file mode 100644
index 00000000000..31ea751f1c4
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/HardwareUnits/Scheduler.cpp
@@ -0,0 +1,341 @@
+//===--------------------- Scheduler.cpp ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A scheduler for processor resource units and processor resource groups.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace mca {
+
+#define DEBUG_TYPE "llvm-mca"
+
+void Scheduler::initializeStrategy(std::unique_ptr<SchedulerStrategy> S) {
+ // Ensure we have a valid (non-null) strategy object.
+ Strategy = S ? std::move(S) : std::make_unique<DefaultSchedulerStrategy>();
+}
+
+// Anchor the vtable of SchedulerStrategy and DefaultSchedulerStrategy.
+SchedulerStrategy::~SchedulerStrategy() = default;
+DefaultSchedulerStrategy::~DefaultSchedulerStrategy() = default;
+
+#ifndef NDEBUG
+void Scheduler::dump() const {
+ dbgs() << "[SCHEDULER]: WaitSet size is: " << WaitSet.size() << '\n';
+ dbgs() << "[SCHEDULER]: ReadySet size is: " << ReadySet.size() << '\n';
+ dbgs() << "[SCHEDULER]: IssuedSet size is: " << IssuedSet.size() << '\n';
+ Resources->dump();
+}
+#endif
+
+Scheduler::Status Scheduler::isAvailable(const InstRef &IR) {
+ ResourceStateEvent RSE =
+ Resources->canBeDispatched(IR.getInstruction()->getUsedBuffers());
+ HadTokenStall = RSE != RS_BUFFER_AVAILABLE;
+
+ switch (RSE) {
+ case ResourceStateEvent::RS_BUFFER_UNAVAILABLE:
+ return Scheduler::SC_BUFFERS_FULL;
+ case ResourceStateEvent::RS_RESERVED:
+ return Scheduler::SC_DISPATCH_GROUP_STALL;
+ case ResourceStateEvent::RS_BUFFER_AVAILABLE:
+ break;
+ }
+
+ // Give lower priority to LSUnit stall events.
+ LSUnit::Status LSS = LSU.isAvailable(IR);
+ HadTokenStall = LSS != LSUnit::LSU_AVAILABLE;
+
+ switch (LSS) {
+ case LSUnit::LSU_LQUEUE_FULL:
+ return Scheduler::SC_LOAD_QUEUE_FULL;
+ case LSUnit::LSU_SQUEUE_FULL:
+ return Scheduler::SC_STORE_QUEUE_FULL;
+ case LSUnit::LSU_AVAILABLE:
+ return Scheduler::SC_AVAILABLE;
+ }
+
+ llvm_unreachable("Don't know how to process this LSU state result!");
+}
+
+void Scheduler::issueInstructionImpl(
+ InstRef &IR,
+ SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &UsedResources) {
+ Instruction *IS = IR.getInstruction();
+ const InstrDesc &D = IS->getDesc();
+
+ // Issue the instruction and collect all the consumed resources
+ // into a vector. That vector is then used to notify the listener.
+ Resources->issueInstruction(D, UsedResources);
+
+ // Notify the instruction that it started executing.
+ // This updates the internal state of each write.
+ IS->execute(IR.getSourceIndex());
+
+ IS->computeCriticalRegDep();
+
+ if (IS->isMemOp()) {
+ LSU.onInstructionIssued(IR);
+ const MemoryGroup &Group = LSU.getGroup(IS->getLSUTokenID());
+ IS->setCriticalMemDep(Group.getCriticalPredecessor());
+ }
+
+ if (IS->isExecuting())
+ IssuedSet.emplace_back(IR);
+ else if (IS->isExecuted())
+ LSU.onInstructionExecuted(IR);
+}
+
+// Release the buffered resources and issue the instruction.
+void Scheduler::issueInstruction(
+ InstRef &IR,
+ SmallVectorImpl<std::pair<ResourceRef, ResourceCycles>> &UsedResources,
+ SmallVectorImpl<InstRef> &PendingInstructions,
+ SmallVectorImpl<InstRef> &ReadyInstructions) {
+ const Instruction &Inst = *IR.getInstruction();
+ bool HasDependentUsers = Inst.hasDependentUsers();
+ HasDependentUsers |= Inst.isMemOp() && LSU.hasDependentUsers(IR);
+
+ Resources->releaseBuffers(Inst.getUsedBuffers());
+ issueInstructionImpl(IR, UsedResources);
+ // Instructions that have been issued during this cycle might have unblocked
+ // other dependent instructions. Dependent instructions may be issued during
+ // this same cycle if operands have ReadAdvance entries. Promote those
+ // instructions to the ReadySet and notify the caller that those are ready.
+ if (HasDependentUsers)
+ if (promoteToPendingSet(PendingInstructions))
+ promoteToReadySet(ReadyInstructions);
+}
+
+bool Scheduler::promoteToReadySet(SmallVectorImpl<InstRef> &Ready) {
+ // Scan the set of waiting instructions and promote them to the
+ // ready set if operands are all ready.
+ unsigned PromotedElements = 0;
+ for (auto I = PendingSet.begin(), E = PendingSet.end(); I != E;) {
+ InstRef &IR = *I;
+ if (!IR)
+ break;
+
+ // Check if there are unsolved register dependencies.
+ Instruction &IS = *IR.getInstruction();
+ if (!IS.isReady() && !IS.updatePending()) {
+ ++I;
+ continue;
+ }
+ // Check if there are unsolved memory dependencies.
+ if (IS.isMemOp() && !LSU.isReady(IR)) {
+ ++I;
+ continue;
+ }
+
+ LLVM_DEBUG(dbgs() << "[SCHEDULER]: Instruction #" << IR
+ << " promoted to the READY set.\n");
+
+ Ready.emplace_back(IR);
+ ReadySet.emplace_back(IR);
+
+ IR.invalidate();
+ ++PromotedElements;
+ std::iter_swap(I, E - PromotedElements);
+ }
+
+ PendingSet.resize(PendingSet.size() - PromotedElements);
+ return PromotedElements;
+}
+
+bool Scheduler::promoteToPendingSet(SmallVectorImpl<InstRef> &Pending) {
+ // Scan the set of waiting instructions and promote them to the
+ // pending set if operands are all ready.
+ unsigned RemovedElements = 0;
+ for (auto I = WaitSet.begin(), E = WaitSet.end(); I != E;) {
+ InstRef &IR = *I;
+ if (!IR)
+ break;
+
+ // Check if this instruction is now ready. In case, force
+ // a transition in state using method 'updateDispatched()'.
+ Instruction &IS = *IR.getInstruction();
+ if (IS.isDispatched() && !IS.updateDispatched()) {
+ ++I;
+ continue;
+ }
+
+ if (IS.isMemOp() && LSU.isWaiting(IR)) {
+ ++I;
+ continue;
+ }
+
+ LLVM_DEBUG(dbgs() << "[SCHEDULER]: Instruction #" << IR
+ << " promoted to the PENDING set.\n");
+
+ Pending.emplace_back(IR);
+ PendingSet.emplace_back(IR);
+
+ IR.invalidate();
+ ++RemovedElements;
+ std::iter_swap(I, E - RemovedElements);
+ }
+
+ WaitSet.resize(WaitSet.size() - RemovedElements);
+ return RemovedElements;
+}
+
+InstRef Scheduler::select() {
+ unsigned QueueIndex = ReadySet.size();
+ for (unsigned I = 0, E = ReadySet.size(); I != E; ++I) {
+ InstRef &IR = ReadySet[I];
+ if (QueueIndex == ReadySet.size() ||
+ Strategy->compare(IR, ReadySet[QueueIndex])) {
+ Instruction &IS = *IR.getInstruction();
+ uint64_t BusyResourceMask = Resources->checkAvailability(IS.getDesc());
+ if (BusyResourceMask)
+ IS.setCriticalResourceMask(BusyResourceMask);
+ BusyResourceUnits |= BusyResourceMask;
+ if (!BusyResourceMask)
+ QueueIndex = I;
+ }
+ }
+
+ if (QueueIndex == ReadySet.size())
+ return InstRef();
+
+ // We found an instruction to issue.
+ InstRef IR = ReadySet[QueueIndex];
+ std::swap(ReadySet[QueueIndex], ReadySet[ReadySet.size() - 1]);
+ ReadySet.pop_back();
+ return IR;
+}
+
+void Scheduler::updateIssuedSet(SmallVectorImpl<InstRef> &Executed) {
+ unsigned RemovedElements = 0;
+ for (auto I = IssuedSet.begin(), E = IssuedSet.end(); I != E;) {
+ InstRef &IR = *I;
+ if (!IR)
+ break;
+ Instruction &IS = *IR.getInstruction();
+ if (!IS.isExecuted()) {
+ LLVM_DEBUG(dbgs() << "[SCHEDULER]: Instruction #" << IR
+ << " is still executing.\n");
+ ++I;
+ continue;
+ }
+
+ // Instruction IR has completed execution.
+ LSU.onInstructionExecuted(IR);
+ Executed.emplace_back(IR);
+ ++RemovedElements;
+ IR.invalidate();
+ std::iter_swap(I, E - RemovedElements);
+ }
+
+ IssuedSet.resize(IssuedSet.size() - RemovedElements);
+}
+
+uint64_t Scheduler::analyzeResourcePressure(SmallVectorImpl<InstRef> &Insts) {
+ llvm::append_range(Insts, ReadySet);
+ return BusyResourceUnits;
+}
+
+void Scheduler::analyzeDataDependencies(SmallVectorImpl<InstRef> &RegDeps,
+ SmallVectorImpl<InstRef> &MemDeps) {
+ const auto EndIt = PendingSet.end() - NumDispatchedToThePendingSet;
+ for (const InstRef &IR : make_range(PendingSet.begin(), EndIt)) {
+ const Instruction &IS = *IR.getInstruction();
+ if (Resources->checkAvailability(IS.getDesc()))
+ continue;
+
+ if (IS.isMemOp() && LSU.isPending(IR))
+ MemDeps.emplace_back(IR);
+
+ if (IS.isPending())
+ RegDeps.emplace_back(IR);
+ }
+}
+
+void Scheduler::cycleEvent(SmallVectorImpl<ResourceRef> &Freed,
+ SmallVectorImpl<InstRef> &Executed,
+ SmallVectorImpl<InstRef> &Pending,
+ SmallVectorImpl<InstRef> &Ready) {
+ LSU.cycleEvent();
+
+ // Release consumed resources.
+ Resources->cycleEvent(Freed);
+
+ for (InstRef &IR : IssuedSet)
+ IR.getInstruction()->cycleEvent();
+ updateIssuedSet(Executed);
+
+ for (InstRef &IR : PendingSet)
+ IR.getInstruction()->cycleEvent();
+
+ for (InstRef &IR : WaitSet)
+ IR.getInstruction()->cycleEvent();
+
+ promoteToPendingSet(Pending);
+ promoteToReadySet(Ready);
+
+ NumDispatchedToThePendingSet = 0;
+ BusyResourceUnits = 0;
+}
+
+bool Scheduler::mustIssueImmediately(const InstRef &IR) const {
+ const InstrDesc &Desc = IR.getInstruction()->getDesc();
+ if (Desc.isZeroLatency())
+ return true;
+ // Instructions that use an in-order dispatch/issue processor resource must be
+ // issued immediately to the pipeline(s). Any other in-order buffered
+ // resources (i.e. BufferSize=1) is consumed.
+ return Desc.MustIssueImmediately;
+}
+
+bool Scheduler::dispatch(InstRef &IR) {
+ Instruction &IS = *IR.getInstruction();
+ Resources->reserveBuffers(IS.getUsedBuffers());
+
+ // If necessary, reserve queue entries in the load-store unit (LSU).
+ if (IS.isMemOp())
+ IS.setLSUTokenID(LSU.dispatch(IR));
+
+ if (IS.isDispatched() || (IS.isMemOp() && LSU.isWaiting(IR))) {
+ LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding #" << IR << " to the WaitSet\n");
+ WaitSet.push_back(IR);
+ return false;
+ }
+
+ if (IS.isPending() || (IS.isMemOp() && LSU.isPending(IR))) {
+ LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding #" << IR
+ << " to the PendingSet\n");
+ PendingSet.push_back(IR);
+ ++NumDispatchedToThePendingSet;
+ return false;
+ }
+
+ assert(IS.isReady() && (!IS.isMemOp() || LSU.isReady(IR)) &&
+ "Unexpected internal state found!");
+ // Don't add a zero-latency instruction to the Ready queue.
+ // A zero-latency instruction doesn't consume any scheduler resources. That is
+ // because it doesn't need to be executed, and it is often removed at register
+ // renaming stage. For example, register-register moves are often optimized at
+ // register renaming stage by simply updating register aliases. On some
+ // targets, zero-idiom instructions (for example: a xor that clears the value
+ // of a register) are treated specially, and are often eliminated at register
+ // renaming stage.
+ if (!mustIssueImmediately(IR)) {
+ LLVM_DEBUG(dbgs() << "[SCHEDULER] Adding #" << IR << " to the ReadySet\n");
+ ReadySet.push_back(IR);
+ }
+
+ return true;
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/InstrBuilder.cpp b/contrib/libs/llvm12/lib/MCA/InstrBuilder.cpp
new file mode 100644
index 00000000000..2bad1360171
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/InstrBuilder.cpp
@@ -0,0 +1,712 @@
+//===--------------------- InstrBuilder.cpp ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements the InstrBuilder interface.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/InstrBuilder.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/WithColor.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+InstrBuilder::InstrBuilder(const llvm::MCSubtargetInfo &sti,
+ const llvm::MCInstrInfo &mcii,
+ const llvm::MCRegisterInfo &mri,
+ const llvm::MCInstrAnalysis *mcia)
+ : STI(sti), MCII(mcii), MRI(mri), MCIA(mcia), FirstCallInst(true),
+ FirstReturnInst(true) {
+ const MCSchedModel &SM = STI.getSchedModel();
+ ProcResourceMasks.resize(SM.getNumProcResourceKinds());
+ computeProcResourceMasks(STI.getSchedModel(), ProcResourceMasks);
+}
+
+static void initializeUsedResources(InstrDesc &ID,
+ const MCSchedClassDesc &SCDesc,
+ const MCSubtargetInfo &STI,
+ ArrayRef<uint64_t> ProcResourceMasks) {
+ const MCSchedModel &SM = STI.getSchedModel();
+
+ // Populate resources consumed.
+ using ResourcePlusCycles = std::pair<uint64_t, ResourceUsage>;
+ std::vector<ResourcePlusCycles> Worklist;
+
+ // Track cycles contributed by resources that are in a "Super" relationship.
+ // This is required if we want to correctly match the behavior of method
+ // SubtargetEmitter::ExpandProcResource() in Tablegen. When computing the set
+ // of "consumed" processor resources and resource cycles, the logic in
+ // ExpandProcResource() doesn't update the number of resource cycles
+ // contributed by a "Super" resource to a group.
+ // We need to take this into account when we find that a processor resource is
+ // part of a group, and it is also used as the "Super" of other resources.
+ // This map stores the number of cycles contributed by sub-resources that are
+ // part of a "Super" resource. The key value is the "Super" resource mask ID.
+ DenseMap<uint64_t, unsigned> SuperResources;
+
+ unsigned NumProcResources = SM.getNumProcResourceKinds();
+ APInt Buffers(NumProcResources, 0);
+
+ bool AllInOrderResources = true;
+ bool AnyDispatchHazards = false;
+ for (unsigned I = 0, E = SCDesc.NumWriteProcResEntries; I < E; ++I) {
+ const MCWriteProcResEntry *PRE = STI.getWriteProcResBegin(&SCDesc) + I;
+ const MCProcResourceDesc &PR = *SM.getProcResource(PRE->ProcResourceIdx);
+ if (!PRE->Cycles) {
+#ifndef NDEBUG
+ WithColor::warning()
+ << "Ignoring invalid write of zero cycles on processor resource "
+ << PR.Name << "\n";
+ WithColor::note() << "found in scheduling class " << SCDesc.Name
+ << " (write index #" << I << ")\n";
+#endif
+ continue;
+ }
+
+ uint64_t Mask = ProcResourceMasks[PRE->ProcResourceIdx];
+ if (PR.BufferSize < 0) {
+ AllInOrderResources = false;
+ } else {
+ Buffers.setBit(getResourceStateIndex(Mask));
+ AnyDispatchHazards |= (PR.BufferSize == 0);
+ AllInOrderResources &= (PR.BufferSize <= 1);
+ }
+
+ CycleSegment RCy(0, PRE->Cycles, false);
+ Worklist.emplace_back(ResourcePlusCycles(Mask, ResourceUsage(RCy)));
+ if (PR.SuperIdx) {
+ uint64_t Super = ProcResourceMasks[PR.SuperIdx];
+ SuperResources[Super] += PRE->Cycles;
+ }
+ }
+
+ ID.MustIssueImmediately = AllInOrderResources && AnyDispatchHazards;
+
+ // Sort elements by mask popcount, so that we prioritize resource units over
+ // resource groups, and smaller groups over larger groups.
+ sort(Worklist, [](const ResourcePlusCycles &A, const ResourcePlusCycles &B) {
+ unsigned popcntA = countPopulation(A.first);
+ unsigned popcntB = countPopulation(B.first);
+ if (popcntA < popcntB)
+ return true;
+ if (popcntA > popcntB)
+ return false;
+ return A.first < B.first;
+ });
+
+ uint64_t UsedResourceUnits = 0;
+ uint64_t UsedResourceGroups = 0;
+
+ // Remove cycles contributed by smaller resources.
+ for (unsigned I = 0, E = Worklist.size(); I < E; ++I) {
+ ResourcePlusCycles &A = Worklist[I];
+ if (!A.second.size()) {
+ assert(countPopulation(A.first) > 1 && "Expected a group!");
+ UsedResourceGroups |= PowerOf2Floor(A.first);
+ continue;
+ }
+
+ ID.Resources.emplace_back(A);
+ uint64_t NormalizedMask = A.first;
+ if (countPopulation(A.first) == 1) {
+ UsedResourceUnits |= A.first;
+ } else {
+ // Remove the leading 1 from the resource group mask.
+ NormalizedMask ^= PowerOf2Floor(NormalizedMask);
+ UsedResourceGroups |= (A.first ^ NormalizedMask);
+ }
+
+ for (unsigned J = I + 1; J < E; ++J) {
+ ResourcePlusCycles &B = Worklist[J];
+ if ((NormalizedMask & B.first) == NormalizedMask) {
+ B.second.CS.subtract(A.second.size() - SuperResources[A.first]);
+ if (countPopulation(B.first) > 1)
+ B.second.NumUnits++;
+ }
+ }
+ }
+
+ // A SchedWrite may specify a number of cycles in which a resource group
+ // is reserved. For example (on target x86; cpu Haswell):
+ //
+ // SchedWriteRes<[HWPort0, HWPort1, HWPort01]> {
+ // let ResourceCycles = [2, 2, 3];
+ // }
+ //
+ // This means:
+ // Resource units HWPort0 and HWPort1 are both used for 2cy.
+ // Resource group HWPort01 is the union of HWPort0 and HWPort1.
+ // Since this write touches both HWPort0 and HWPort1 for 2cy, HWPort01
+ // will not be usable for 2 entire cycles from instruction issue.
+ //
+ // On top of those 2cy, SchedWriteRes explicitly specifies an extra latency
+ // of 3 cycles for HWPort01. This tool assumes that the 3cy latency is an
+ // extra delay on top of the 2 cycles latency.
+ // During those extra cycles, HWPort01 is not usable by other instructions.
+ for (ResourcePlusCycles &RPC : ID.Resources) {
+ if (countPopulation(RPC.first) > 1 && !RPC.second.isReserved()) {
+ // Remove the leading 1 from the resource group mask.
+ uint64_t Mask = RPC.first ^ PowerOf2Floor(RPC.first);
+ uint64_t MaxResourceUnits = countPopulation(Mask);
+ if (RPC.second.NumUnits > countPopulation(Mask)) {
+ RPC.second.setReserved();
+ RPC.second.NumUnits = MaxResourceUnits;
+ }
+ }
+ }
+
+ // Identify extra buffers that are consumed through super resources.
+ for (const std::pair<uint64_t, unsigned> &SR : SuperResources) {
+ for (unsigned I = 1, E = NumProcResources; I < E; ++I) {
+ const MCProcResourceDesc &PR = *SM.getProcResource(I);
+ if (PR.BufferSize == -1)
+ continue;
+
+ uint64_t Mask = ProcResourceMasks[I];
+ if (Mask != SR.first && ((Mask & SR.first) == SR.first))
+ Buffers.setBit(getResourceStateIndex(Mask));
+ }
+ }
+
+ ID.UsedBuffers = Buffers.getZExtValue();
+ ID.UsedProcResUnits = UsedResourceUnits;
+ ID.UsedProcResGroups = UsedResourceGroups;
+
+ LLVM_DEBUG({
+ for (const std::pair<uint64_t, ResourceUsage> &R : ID.Resources)
+ dbgs() << "\t\tResource Mask=" << format_hex(R.first, 16) << ", "
+ << "Reserved=" << R.second.isReserved() << ", "
+ << "#Units=" << R.second.NumUnits << ", "
+ << "cy=" << R.second.size() << '\n';
+ uint64_t BufferIDs = ID.UsedBuffers;
+ while (BufferIDs) {
+ uint64_t Current = BufferIDs & (-BufferIDs);
+ dbgs() << "\t\tBuffer Mask=" << format_hex(Current, 16) << '\n';
+ BufferIDs ^= Current;
+ }
+ dbgs() << "\t\t Used Units=" << format_hex(ID.UsedProcResUnits, 16) << '\n';
+ dbgs() << "\t\tUsed Groups=" << format_hex(ID.UsedProcResGroups, 16)
+ << '\n';
+ });
+}
+
+static void computeMaxLatency(InstrDesc &ID, const MCInstrDesc &MCDesc,
+ const MCSchedClassDesc &SCDesc,
+ const MCSubtargetInfo &STI) {
+ if (MCDesc.isCall()) {
+ // We cannot estimate how long this call will take.
+ // Artificially set an arbitrarily high latency (100cy).
+ ID.MaxLatency = 100U;
+ return;
+ }
+
+ int Latency = MCSchedModel::computeInstrLatency(STI, SCDesc);
+ // If latency is unknown, then conservatively assume a MaxLatency of 100cy.
+ ID.MaxLatency = Latency < 0 ? 100U : static_cast<unsigned>(Latency);
+}
+
+static Error verifyOperands(const MCInstrDesc &MCDesc, const MCInst &MCI) {
+ // Count register definitions, and skip non register operands in the process.
+ unsigned I, E;
+ unsigned NumExplicitDefs = MCDesc.getNumDefs();
+ for (I = 0, E = MCI.getNumOperands(); NumExplicitDefs && I < E; ++I) {
+ const MCOperand &Op = MCI.getOperand(I);
+ if (Op.isReg())
+ --NumExplicitDefs;
+ }
+
+ if (NumExplicitDefs) {
+ return make_error<InstructionError<MCInst>>(
+ "Expected more register operand definitions.", MCI);
+ }
+
+ if (MCDesc.hasOptionalDef()) {
+ // Always assume that the optional definition is the last operand.
+ const MCOperand &Op = MCI.getOperand(MCDesc.getNumOperands() - 1);
+ if (I == MCI.getNumOperands() || !Op.isReg()) {
+ std::string Message =
+ "expected a register operand for an optional definition. Instruction "
+ "has not been correctly analyzed.";
+ return make_error<InstructionError<MCInst>>(Message, MCI);
+ }
+ }
+
+ return ErrorSuccess();
+}
+
+void InstrBuilder::populateWrites(InstrDesc &ID, const MCInst &MCI,
+ unsigned SchedClassID) {
+ const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
+ const MCSchedModel &SM = STI.getSchedModel();
+ const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
+
+ // Assumptions made by this algorithm:
+ // 1. The number of explicit and implicit register definitions in a MCInst
+ // matches the number of explicit and implicit definitions according to
+ // the opcode descriptor (MCInstrDesc).
+ // 2. Uses start at index #(MCDesc.getNumDefs()).
+ // 3. There can only be a single optional register definition, an it is
+ // either the last operand of the sequence (excluding extra operands
+ // contributed by variadic opcodes) or one of the explicit register
+ // definitions. The latter occurs for some Thumb1 instructions.
+ //
+ // These assumptions work quite well for most out-of-order in-tree targets
+ // like x86. This is mainly because the vast majority of instructions is
+ // expanded to MCInst using a straightforward lowering logic that preserves
+ // the ordering of the operands.
+ //
+ // About assumption 1.
+ // The algorithm allows non-register operands between register operand
+ // definitions. This helps to handle some special ARM instructions with
+ // implicit operand increment (-mtriple=armv7):
+ //
+ // vld1.32 {d18, d19}, [r1]! @ <MCInst #1463 VLD1q32wb_fixed
+ // @ <MCOperand Reg:59>
+ // @ <MCOperand Imm:0> (!!)
+ // @ <MCOperand Reg:67>
+ // @ <MCOperand Imm:0>
+ // @ <MCOperand Imm:14>
+ // @ <MCOperand Reg:0>>
+ //
+ // MCDesc reports:
+ // 6 explicit operands.
+ // 1 optional definition
+ // 2 explicit definitions (!!)
+ //
+ // The presence of an 'Imm' operand between the two register definitions
+ // breaks the assumption that "register definitions are always at the
+ // beginning of the operand sequence".
+ //
+ // To workaround this issue, this algorithm ignores (i.e. skips) any
+ // non-register operands between register definitions. The optional
+ // definition is still at index #(NumOperands-1).
+ //
+ // According to assumption 2. register reads start at #(NumExplicitDefs-1).
+ // That means, register R1 from the example is both read and written.
+ unsigned NumExplicitDefs = MCDesc.getNumDefs();
+ unsigned NumImplicitDefs = MCDesc.getNumImplicitDefs();
+ unsigned NumWriteLatencyEntries = SCDesc.NumWriteLatencyEntries;
+ unsigned TotalDefs = NumExplicitDefs + NumImplicitDefs;
+ if (MCDesc.hasOptionalDef())
+ TotalDefs++;
+
+ unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
+ ID.Writes.resize(TotalDefs + NumVariadicOps);
+ // Iterate over the operands list, and skip non-register operands.
+ // The first NumExplicitDefs register operands are expected to be register
+ // definitions.
+ unsigned CurrentDef = 0;
+ unsigned OptionalDefIdx = MCDesc.getNumOperands() - 1;
+ unsigned i = 0;
+ for (; i < MCI.getNumOperands() && CurrentDef < NumExplicitDefs; ++i) {
+ const MCOperand &Op = MCI.getOperand(i);
+ if (!Op.isReg())
+ continue;
+
+ if (MCDesc.OpInfo[CurrentDef].isOptionalDef()) {
+ OptionalDefIdx = CurrentDef++;
+ continue;
+ }
+
+ WriteDescriptor &Write = ID.Writes[CurrentDef];
+ Write.OpIndex = i;
+ if (CurrentDef < NumWriteLatencyEntries) {
+ const MCWriteLatencyEntry &WLE =
+ *STI.getWriteLatencyEntry(&SCDesc, CurrentDef);
+ // Conservatively default to MaxLatency.
+ Write.Latency =
+ WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
+ Write.SClassOrWriteResourceID = WLE.WriteResourceID;
+ } else {
+ // Assign a default latency for this write.
+ Write.Latency = ID.MaxLatency;
+ Write.SClassOrWriteResourceID = 0;
+ }
+ Write.IsOptionalDef = false;
+ LLVM_DEBUG({
+ dbgs() << "\t\t[Def] OpIdx=" << Write.OpIndex
+ << ", Latency=" << Write.Latency
+ << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
+ });
+ CurrentDef++;
+ }
+
+ assert(CurrentDef == NumExplicitDefs &&
+ "Expected more register operand definitions.");
+ for (CurrentDef = 0; CurrentDef < NumImplicitDefs; ++CurrentDef) {
+ unsigned Index = NumExplicitDefs + CurrentDef;
+ WriteDescriptor &Write = ID.Writes[Index];
+ Write.OpIndex = ~CurrentDef;
+ Write.RegisterID = MCDesc.getImplicitDefs()[CurrentDef];
+ if (Index < NumWriteLatencyEntries) {
+ const MCWriteLatencyEntry &WLE =
+ *STI.getWriteLatencyEntry(&SCDesc, Index);
+ // Conservatively default to MaxLatency.
+ Write.Latency =
+ WLE.Cycles < 0 ? ID.MaxLatency : static_cast<unsigned>(WLE.Cycles);
+ Write.SClassOrWriteResourceID = WLE.WriteResourceID;
+ } else {
+ // Assign a default latency for this write.
+ Write.Latency = ID.MaxLatency;
+ Write.SClassOrWriteResourceID = 0;
+ }
+
+ Write.IsOptionalDef = false;
+ assert(Write.RegisterID != 0 && "Expected a valid phys register!");
+ LLVM_DEBUG({
+ dbgs() << "\t\t[Def][I] OpIdx=" << ~Write.OpIndex
+ << ", PhysReg=" << MRI.getName(Write.RegisterID)
+ << ", Latency=" << Write.Latency
+ << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
+ });
+ }
+
+ if (MCDesc.hasOptionalDef()) {
+ WriteDescriptor &Write = ID.Writes[NumExplicitDefs + NumImplicitDefs];
+ Write.OpIndex = OptionalDefIdx;
+ // Assign a default latency for this write.
+ Write.Latency = ID.MaxLatency;
+ Write.SClassOrWriteResourceID = 0;
+ Write.IsOptionalDef = true;
+ LLVM_DEBUG({
+ dbgs() << "\t\t[Def][O] OpIdx=" << Write.OpIndex
+ << ", Latency=" << Write.Latency
+ << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
+ });
+ }
+
+ if (!NumVariadicOps)
+ return;
+
+ // FIXME: if an instruction opcode is flagged 'mayStore', and it has no
+ // "unmodeledSideEffects', then this logic optimistically assumes that any
+ // extra register operands in the variadic sequence is not a register
+ // definition.
+ //
+ // Otherwise, we conservatively assume that any register operand from the
+ // variadic sequence is both a register read and a register write.
+ bool AssumeUsesOnly = MCDesc.mayStore() && !MCDesc.mayLoad() &&
+ !MCDesc.hasUnmodeledSideEffects();
+ CurrentDef = NumExplicitDefs + NumImplicitDefs + MCDesc.hasOptionalDef();
+ for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
+ I < NumVariadicOps && !AssumeUsesOnly; ++I, ++OpIndex) {
+ const MCOperand &Op = MCI.getOperand(OpIndex);
+ if (!Op.isReg())
+ continue;
+
+ WriteDescriptor &Write = ID.Writes[CurrentDef];
+ Write.OpIndex = OpIndex;
+ // Assign a default latency for this write.
+ Write.Latency = ID.MaxLatency;
+ Write.SClassOrWriteResourceID = 0;
+ Write.IsOptionalDef = false;
+ ++CurrentDef;
+ LLVM_DEBUG({
+ dbgs() << "\t\t[Def][V] OpIdx=" << Write.OpIndex
+ << ", Latency=" << Write.Latency
+ << ", WriteResourceID=" << Write.SClassOrWriteResourceID << '\n';
+ });
+ }
+
+ ID.Writes.resize(CurrentDef);
+}
+
+void InstrBuilder::populateReads(InstrDesc &ID, const MCInst &MCI,
+ unsigned SchedClassID) {
+ const MCInstrDesc &MCDesc = MCII.get(MCI.getOpcode());
+ unsigned NumExplicitUses = MCDesc.getNumOperands() - MCDesc.getNumDefs();
+ unsigned NumImplicitUses = MCDesc.getNumImplicitUses();
+ // Remove the optional definition.
+ if (MCDesc.hasOptionalDef())
+ --NumExplicitUses;
+ unsigned NumVariadicOps = MCI.getNumOperands() - MCDesc.getNumOperands();
+ unsigned TotalUses = NumExplicitUses + NumImplicitUses + NumVariadicOps;
+ ID.Reads.resize(TotalUses);
+ unsigned CurrentUse = 0;
+ for (unsigned I = 0, OpIndex = MCDesc.getNumDefs(); I < NumExplicitUses;
+ ++I, ++OpIndex) {
+ const MCOperand &Op = MCI.getOperand(OpIndex);
+ if (!Op.isReg())
+ continue;
+
+ ReadDescriptor &Read = ID.Reads[CurrentUse];
+ Read.OpIndex = OpIndex;
+ Read.UseIndex = I;
+ Read.SchedClassID = SchedClassID;
+ ++CurrentUse;
+ LLVM_DEBUG(dbgs() << "\t\t[Use] OpIdx=" << Read.OpIndex
+ << ", UseIndex=" << Read.UseIndex << '\n');
+ }
+
+ // For the purpose of ReadAdvance, implicit uses come directly after explicit
+ // uses. The "UseIndex" must be updated according to that implicit layout.
+ for (unsigned I = 0; I < NumImplicitUses; ++I) {
+ ReadDescriptor &Read = ID.Reads[CurrentUse + I];
+ Read.OpIndex = ~I;
+ Read.UseIndex = NumExplicitUses + I;
+ Read.RegisterID = MCDesc.getImplicitUses()[I];
+ Read.SchedClassID = SchedClassID;
+ LLVM_DEBUG(dbgs() << "\t\t[Use][I] OpIdx=" << ~Read.OpIndex
+ << ", UseIndex=" << Read.UseIndex << ", RegisterID="
+ << MRI.getName(Read.RegisterID) << '\n');
+ }
+
+ CurrentUse += NumImplicitUses;
+
+ // FIXME: If an instruction opcode is marked as 'mayLoad', and it has no
+ // "unmodeledSideEffects", then this logic optimistically assumes that any
+ // extra register operand in the variadic sequence is not a register
+ // definition.
+ bool AssumeDefsOnly = !MCDesc.mayStore() && MCDesc.mayLoad() &&
+ !MCDesc.hasUnmodeledSideEffects();
+ for (unsigned I = 0, OpIndex = MCDesc.getNumOperands();
+ I < NumVariadicOps && !AssumeDefsOnly; ++I, ++OpIndex) {
+ const MCOperand &Op = MCI.getOperand(OpIndex);
+ if (!Op.isReg())
+ continue;
+
+ ReadDescriptor &Read = ID.Reads[CurrentUse];
+ Read.OpIndex = OpIndex;
+ Read.UseIndex = NumExplicitUses + NumImplicitUses + I;
+ Read.SchedClassID = SchedClassID;
+ ++CurrentUse;
+ LLVM_DEBUG(dbgs() << "\t\t[Use][V] OpIdx=" << Read.OpIndex
+ << ", UseIndex=" << Read.UseIndex << '\n');
+ }
+
+ ID.Reads.resize(CurrentUse);
+}
+
+Error InstrBuilder::verifyInstrDesc(const InstrDesc &ID,
+ const MCInst &MCI) const {
+ if (ID.NumMicroOps != 0)
+ return ErrorSuccess();
+
+ bool UsesBuffers = ID.UsedBuffers;
+ bool UsesResources = !ID.Resources.empty();
+ if (!UsesBuffers && !UsesResources)
+ return ErrorSuccess();
+
+ // FIXME: see PR44797. We should revisit these checks and possibly move them
+ // in CodeGenSchedule.cpp.
+ StringRef Message = "found an inconsistent instruction that decodes to zero "
+ "opcodes and that consumes scheduler resources.";
+ return make_error<InstructionError<MCInst>>(std::string(Message), MCI);
+}
+
+Expected<const InstrDesc &>
+InstrBuilder::createInstrDescImpl(const MCInst &MCI) {
+ assert(STI.getSchedModel().hasInstrSchedModel() &&
+ "Itineraries are not yet supported!");
+
+ // Obtain the instruction descriptor from the opcode.
+ unsigned short Opcode = MCI.getOpcode();
+ const MCInstrDesc &MCDesc = MCII.get(Opcode);
+ const MCSchedModel &SM = STI.getSchedModel();
+
+ // Then obtain the scheduling class information from the instruction.
+ unsigned SchedClassID = MCDesc.getSchedClass();
+ bool IsVariant = SM.getSchedClassDesc(SchedClassID)->isVariant();
+
+ // Try to solve variant scheduling classes.
+ if (IsVariant) {
+ unsigned CPUID = SM.getProcessorID();
+ while (SchedClassID && SM.getSchedClassDesc(SchedClassID)->isVariant())
+ SchedClassID =
+ STI.resolveVariantSchedClass(SchedClassID, &MCI, &MCII, CPUID);
+
+ if (!SchedClassID) {
+ return make_error<InstructionError<MCInst>>(
+ "unable to resolve scheduling class for write variant.", MCI);
+ }
+ }
+
+ // Check if this instruction is supported. Otherwise, report an error.
+ const MCSchedClassDesc &SCDesc = *SM.getSchedClassDesc(SchedClassID);
+ if (SCDesc.NumMicroOps == MCSchedClassDesc::InvalidNumMicroOps) {
+ return make_error<InstructionError<MCInst>>(
+ "found an unsupported instruction in the input assembly sequence.",
+ MCI);
+ }
+
+ LLVM_DEBUG(dbgs() << "\n\t\tOpcode Name= " << MCII.getName(Opcode) << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tSchedClassID=" << SchedClassID << '\n');
+
+ // Create a new empty descriptor.
+ std::unique_ptr<InstrDesc> ID = std::make_unique<InstrDesc>();
+ ID->NumMicroOps = SCDesc.NumMicroOps;
+ ID->SchedClassID = SchedClassID;
+
+ if (MCDesc.isCall() && FirstCallInst) {
+ // We don't correctly model calls.
+ WithColor::warning() << "found a call in the input assembly sequence.\n";
+ WithColor::note() << "call instructions are not correctly modeled. "
+ << "Assume a latency of 100cy.\n";
+ FirstCallInst = false;
+ }
+
+ if (MCDesc.isReturn() && FirstReturnInst) {
+ WithColor::warning() << "found a return instruction in the input"
+ << " assembly sequence.\n";
+ WithColor::note() << "program counter updates are ignored.\n";
+ FirstReturnInst = false;
+ }
+
+ ID->MayLoad = MCDesc.mayLoad();
+ ID->MayStore = MCDesc.mayStore();
+ ID->HasSideEffects = MCDesc.hasUnmodeledSideEffects();
+ ID->BeginGroup = SCDesc.BeginGroup;
+ ID->EndGroup = SCDesc.EndGroup;
+
+ initializeUsedResources(*ID, SCDesc, STI, ProcResourceMasks);
+ computeMaxLatency(*ID, MCDesc, SCDesc, STI);
+
+ if (Error Err = verifyOperands(MCDesc, MCI))
+ return std::move(Err);
+
+ populateWrites(*ID, MCI, SchedClassID);
+ populateReads(*ID, MCI, SchedClassID);
+
+ LLVM_DEBUG(dbgs() << "\t\tMaxLatency=" << ID->MaxLatency << '\n');
+ LLVM_DEBUG(dbgs() << "\t\tNumMicroOps=" << ID->NumMicroOps << '\n');
+
+ // Sanity check on the instruction descriptor.
+ if (Error Err = verifyInstrDesc(*ID, MCI))
+ return std::move(Err);
+
+ // Now add the new descriptor.
+ bool IsVariadic = MCDesc.isVariadic();
+ if (!IsVariadic && !IsVariant) {
+ Descriptors[MCI.getOpcode()] = std::move(ID);
+ return *Descriptors[MCI.getOpcode()];
+ }
+
+ VariantDescriptors[&MCI] = std::move(ID);
+ return *VariantDescriptors[&MCI];
+}
+
+Expected<const InstrDesc &>
+InstrBuilder::getOrCreateInstrDesc(const MCInst &MCI) {
+ if (Descriptors.find_as(MCI.getOpcode()) != Descriptors.end())
+ return *Descriptors[MCI.getOpcode()];
+
+ if (VariantDescriptors.find(&MCI) != VariantDescriptors.end())
+ return *VariantDescriptors[&MCI];
+
+ return createInstrDescImpl(MCI);
+}
+
+Expected<std::unique_ptr<Instruction>>
+InstrBuilder::createInstruction(const MCInst &MCI) {
+ Expected<const InstrDesc &> DescOrErr = getOrCreateInstrDesc(MCI);
+ if (!DescOrErr)
+ return DescOrErr.takeError();
+ const InstrDesc &D = *DescOrErr;
+ std::unique_ptr<Instruction> NewIS = std::make_unique<Instruction>(D);
+
+ // Check if this is a dependency breaking instruction.
+ APInt Mask;
+
+ bool IsZeroIdiom = false;
+ bool IsDepBreaking = false;
+ if (MCIA) {
+ unsigned ProcID = STI.getSchedModel().getProcessorID();
+ IsZeroIdiom = MCIA->isZeroIdiom(MCI, Mask, ProcID);
+ IsDepBreaking =
+ IsZeroIdiom || MCIA->isDependencyBreaking(MCI, Mask, ProcID);
+ if (MCIA->isOptimizableRegisterMove(MCI, ProcID))
+ NewIS->setOptimizableMove();
+ }
+
+ // Initialize Reads first.
+ MCPhysReg RegID = 0;
+ for (const ReadDescriptor &RD : D.Reads) {
+ if (!RD.isImplicitRead()) {
+ // explicit read.
+ const MCOperand &Op = MCI.getOperand(RD.OpIndex);
+ // Skip non-register operands.
+ if (!Op.isReg())
+ continue;
+ RegID = Op.getReg();
+ } else {
+ // Implicit read.
+ RegID = RD.RegisterID;
+ }
+
+ // Skip invalid register operands.
+ if (!RegID)
+ continue;
+
+ // Okay, this is a register operand. Create a ReadState for it.
+ NewIS->getUses().emplace_back(RD, RegID);
+ ReadState &RS = NewIS->getUses().back();
+
+ if (IsDepBreaking) {
+ // A mask of all zeroes means: explicit input operands are not
+ // independent.
+ if (Mask.isNullValue()) {
+ if (!RD.isImplicitRead())
+ RS.setIndependentFromDef();
+ } else {
+ // Check if this register operand is independent according to `Mask`.
+ // Note that Mask may not have enough bits to describe all explicit and
+ // implicit input operands. If this register operand doesn't have a
+ // corresponding bit in Mask, then conservatively assume that it is
+ // dependent.
+ if (Mask.getBitWidth() > RD.UseIndex) {
+ // Okay. This map describe register use `RD.UseIndex`.
+ if (Mask[RD.UseIndex])
+ RS.setIndependentFromDef();
+ }
+ }
+ }
+ }
+
+ // Early exit if there are no writes.
+ if (D.Writes.empty())
+ return std::move(NewIS);
+
+ // Track register writes that implicitly clear the upper portion of the
+ // underlying super-registers using an APInt.
+ APInt WriteMask(D.Writes.size(), 0);
+
+ // Now query the MCInstrAnalysis object to obtain information about which
+ // register writes implicitly clear the upper portion of a super-register.
+ if (MCIA)
+ MCIA->clearsSuperRegisters(MRI, MCI, WriteMask);
+
+ // Initialize writes.
+ unsigned WriteIndex = 0;
+ for (const WriteDescriptor &WD : D.Writes) {
+ RegID = WD.isImplicitWrite() ? WD.RegisterID
+ : MCI.getOperand(WD.OpIndex).getReg();
+ // Check if this is a optional definition that references NoReg.
+ if (WD.IsOptionalDef && !RegID) {
+ ++WriteIndex;
+ continue;
+ }
+
+ assert(RegID && "Expected a valid register ID!");
+ NewIS->getDefs().emplace_back(WD, RegID,
+ /* ClearsSuperRegs */ WriteMask[WriteIndex],
+ /* WritesZero */ IsZeroIdiom);
+ ++WriteIndex;
+ }
+
+ return std::move(NewIS);
+}
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/Instruction.cpp b/contrib/libs/llvm12/lib/MCA/Instruction.cpp
new file mode 100644
index 00000000000..e5f2c4fd1ee
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/Instruction.cpp
@@ -0,0 +1,254 @@
+//===--------------------- Instruction.cpp ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines abstractions used by the Pipeline to model register reads,
+// register writes and instructions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Instruction.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace mca {
+
+void WriteState::writeStartEvent(unsigned IID, MCPhysReg RegID,
+ unsigned Cycles) {
+ CRD.IID = IID;
+ CRD.RegID = RegID;
+ CRD.Cycles = Cycles;
+ DependentWriteCyclesLeft = Cycles;
+ DependentWrite = nullptr;
+}
+
+void ReadState::writeStartEvent(unsigned IID, MCPhysReg RegID, unsigned Cycles) {
+ assert(DependentWrites);
+ assert(CyclesLeft == UNKNOWN_CYCLES);
+
+ // This read may be dependent on more than one write. This typically occurs
+ // when a definition is the result of multiple writes where at least one
+ // write does a partial register update.
+ // The HW is forced to do some extra bookkeeping to track of all the
+ // dependent writes, and implement a merging scheme for the partial writes.
+ --DependentWrites;
+ if (TotalCycles < Cycles) {
+ CRD.IID = IID;
+ CRD.RegID = RegID;
+ CRD.Cycles = Cycles;
+ TotalCycles = Cycles;
+ }
+
+ if (!DependentWrites) {
+ CyclesLeft = TotalCycles;
+ IsReady = !CyclesLeft;
+ }
+}
+
+void WriteState::onInstructionIssued(unsigned IID) {
+ assert(CyclesLeft == UNKNOWN_CYCLES);
+ // Update the number of cycles left based on the WriteDescriptor info.
+ CyclesLeft = getLatency();
+
+ // Now that the time left before write-back is known, notify
+ // all the users.
+ for (const std::pair<ReadState *, int> &User : Users) {
+ ReadState *RS = User.first;
+ unsigned ReadCycles = std::max(0, CyclesLeft - User.second);
+ RS->writeStartEvent(IID, RegisterID, ReadCycles);
+ }
+
+ // Notify any writes that are in a false dependency with this write.
+ if (PartialWrite)
+ PartialWrite->writeStartEvent(IID, RegisterID, CyclesLeft);
+}
+
+void WriteState::addUser(unsigned IID, ReadState *User, int ReadAdvance) {
+ // If CyclesLeft is different than -1, then we don't need to
+ // update the list of users. We can just notify the user with
+ // the actual number of cycles left (which may be zero).
+ if (CyclesLeft != UNKNOWN_CYCLES) {
+ unsigned ReadCycles = std::max(0, CyclesLeft - ReadAdvance);
+ User->writeStartEvent(IID, RegisterID, ReadCycles);
+ return;
+ }
+
+ Users.emplace_back(User, ReadAdvance);
+}
+
+void WriteState::addUser(unsigned IID, WriteState *User) {
+ if (CyclesLeft != UNKNOWN_CYCLES) {
+ User->writeStartEvent(IID, RegisterID, std::max(0, CyclesLeft));
+ return;
+ }
+
+ assert(!PartialWrite && "PartialWrite already set!");
+ PartialWrite = User;
+ User->setDependentWrite(this);
+}
+
+void WriteState::cycleEvent() {
+ // Note: CyclesLeft can be a negative number. It is an error to
+ // make it an unsigned quantity because users of this write may
+ // specify a negative ReadAdvance.
+ if (CyclesLeft != UNKNOWN_CYCLES)
+ CyclesLeft--;
+
+ if (DependentWriteCyclesLeft)
+ DependentWriteCyclesLeft--;
+}
+
+void ReadState::cycleEvent() {
+ // Update the total number of cycles.
+ if (DependentWrites && TotalCycles) {
+ --TotalCycles;
+ return;
+ }
+
+ // Bail out immediately if we don't know how many cycles are left.
+ if (CyclesLeft == UNKNOWN_CYCLES)
+ return;
+
+ if (CyclesLeft) {
+ --CyclesLeft;
+ IsReady = !CyclesLeft;
+ }
+}
+
+#ifndef NDEBUG
+void WriteState::dump() const {
+ dbgs() << "{ OpIdx=" << WD->OpIndex << ", Lat=" << getLatency() << ", RegID "
+ << getRegisterID() << ", Cycles Left=" << getCyclesLeft() << " }";
+}
+
+void WriteRef::dump() const {
+ dbgs() << "IID=" << getSourceIndex() << ' ';
+ if (isValid())
+ getWriteState()->dump();
+ else
+ dbgs() << "(null)";
+}
+#endif
+
+const CriticalDependency &Instruction::computeCriticalRegDep() {
+ if (CriticalRegDep.Cycles)
+ return CriticalRegDep;
+
+ unsigned MaxLatency = 0;
+ for (const WriteState &WS : getDefs()) {
+ const CriticalDependency &WriteCRD = WS.getCriticalRegDep();
+ if (WriteCRD.Cycles > MaxLatency)
+ CriticalRegDep = WriteCRD;
+ }
+
+ for (const ReadState &RS : getUses()) {
+ const CriticalDependency &ReadCRD = RS.getCriticalRegDep();
+ if (ReadCRD.Cycles > MaxLatency)
+ CriticalRegDep = ReadCRD;
+ }
+
+ return CriticalRegDep;
+}
+
+void Instruction::dispatch(unsigned RCUToken) {
+ assert(Stage == IS_INVALID);
+ Stage = IS_DISPATCHED;
+ RCUTokenID = RCUToken;
+
+ // Check if input operands are already available.
+ if (updateDispatched())
+ updatePending();
+}
+
+void Instruction::execute(unsigned IID) {
+ assert(Stage == IS_READY);
+ Stage = IS_EXECUTING;
+
+ // Set the cycles left before the write-back stage.
+ CyclesLeft = getLatency();
+
+ for (WriteState &WS : getDefs())
+ WS.onInstructionIssued(IID);
+
+ // Transition to the "executed" stage if this is a zero-latency instruction.
+ if (!CyclesLeft)
+ Stage = IS_EXECUTED;
+}
+
+void Instruction::forceExecuted() {
+ assert(Stage == IS_READY && "Invalid internal state!");
+ CyclesLeft = 0;
+ Stage = IS_EXECUTED;
+}
+
+bool Instruction::updatePending() {
+ assert(isPending() && "Unexpected instruction stage found!");
+
+ if (!all_of(getUses(), [](const ReadState &Use) { return Use.isReady(); }))
+ return false;
+
+ // A partial register write cannot complete before a dependent write.
+ if (!all_of(getDefs(), [](const WriteState &Def) { return Def.isReady(); }))
+ return false;
+
+ Stage = IS_READY;
+ return true;
+}
+
+bool Instruction::updateDispatched() {
+ assert(isDispatched() && "Unexpected instruction stage found!");
+
+ if (!all_of(getUses(), [](const ReadState &Use) {
+ return Use.isPending() || Use.isReady();
+ }))
+ return false;
+
+ // A partial register write cannot complete before a dependent write.
+ if (!all_of(getDefs(),
+ [](const WriteState &Def) { return !Def.getDependentWrite(); }))
+ return false;
+
+ Stage = IS_PENDING;
+ return true;
+}
+
+void Instruction::update() {
+ if (isDispatched())
+ updateDispatched();
+ if (isPending())
+ updatePending();
+}
+
+void Instruction::cycleEvent() {
+ if (isReady())
+ return;
+
+ if (isDispatched() || isPending()) {
+ for (ReadState &Use : getUses())
+ Use.cycleEvent();
+
+ for (WriteState &Def : getDefs())
+ Def.cycleEvent();
+
+ update();
+ return;
+ }
+
+ assert(isExecuting() && "Instruction not in-flight?");
+ assert(CyclesLeft && "Instruction already executed?");
+ for (WriteState &Def : getDefs())
+ Def.cycleEvent();
+ CyclesLeft--;
+ if (!CyclesLeft)
+ Stage = IS_EXECUTED;
+}
+
+const unsigned WriteRef::INVALID_IID = std::numeric_limits<unsigned>::max();
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/Pipeline.cpp b/contrib/libs/llvm12/lib/MCA/Pipeline.cpp
new file mode 100644
index 00000000000..22b9d0799f7
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/Pipeline.cpp
@@ -0,0 +1,97 @@
+//===--------------------- Pipeline.cpp -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements an ordered container of stages that simulate the
+/// pipeline of a hardware backend.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Pipeline.h"
+#include "llvm/MCA/HWEventListener.h"
+#include "llvm/Support/Debug.h"
+
+namespace llvm {
+namespace mca {
+
+#define DEBUG_TYPE "llvm-mca"
+
+void Pipeline::addEventListener(HWEventListener *Listener) {
+ if (Listener)
+ Listeners.insert(Listener);
+ for (auto &S : Stages)
+ S->addListener(Listener);
+}
+
+bool Pipeline::hasWorkToProcess() {
+ return any_of(Stages, [](const std::unique_ptr<Stage> &S) {
+ return S->hasWorkToComplete();
+ });
+}
+
+Expected<unsigned> Pipeline::run() {
+ assert(!Stages.empty() && "Unexpected empty pipeline found!");
+
+ do {
+ notifyCycleBegin();
+ if (Error Err = runCycle())
+ return std::move(Err);
+ notifyCycleEnd();
+ ++Cycles;
+ } while (hasWorkToProcess());
+
+ return Cycles;
+}
+
+Error Pipeline::runCycle() {
+ Error Err = ErrorSuccess();
+ // Update stages before we start processing new instructions.
+ for (auto I = Stages.rbegin(), E = Stages.rend(); I != E && !Err; ++I) {
+ const std::unique_ptr<Stage> &S = *I;
+ Err = S->cycleStart();
+ }
+
+ // Now fetch and execute new instructions.
+ InstRef IR;
+ Stage &FirstStage = *Stages[0];
+ while (!Err && FirstStage.isAvailable(IR))
+ Err = FirstStage.execute(IR);
+
+ // Update stages in preparation for a new cycle.
+ for (const std::unique_ptr<Stage> &S : Stages) {
+ Err = S->cycleEnd();
+ if (Err)
+ break;
+ }
+
+ return Err;
+}
+
+void Pipeline::appendStage(std::unique_ptr<Stage> S) {
+ assert(S && "Invalid null stage in input!");
+ if (!Stages.empty()) {
+ Stage *Last = Stages.back().get();
+ Last->setNextInSequence(S.get());
+ }
+
+ Stages.push_back(std::move(S));
+}
+
+void Pipeline::notifyCycleBegin() {
+ LLVM_DEBUG(dbgs() << "\n[E] Cycle begin: " << Cycles << '\n');
+ for (HWEventListener *Listener : Listeners)
+ Listener->onCycleBegin();
+}
+
+void Pipeline::notifyCycleEnd() {
+ LLVM_DEBUG(dbgs() << "[E] Cycle end: " << Cycles << "\n");
+ for (HWEventListener *Listener : Listeners)
+ Listener->onCycleEnd();
+}
+} // namespace mca.
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/Stages/DispatchStage.cpp b/contrib/libs/llvm12/lib/MCA/Stages/DispatchStage.cpp
new file mode 100644
index 00000000000..3a3d8225916
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/Stages/DispatchStage.cpp
@@ -0,0 +1,187 @@
+//===--------------------- DispatchStage.cpp --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file models the dispatch component of an instruction pipeline.
+///
+/// The DispatchStage is responsible for updating instruction dependencies
+/// and communicating to the simulated instruction scheduler that an instruction
+/// is ready to be scheduled for execution.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/DispatchStage.h"
+#include "llvm/MCA/HWEventListener.h"
+#include "llvm/MCA/HardwareUnits/Scheduler.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+DispatchStage::DispatchStage(const MCSubtargetInfo &Subtarget,
+ const MCRegisterInfo &MRI,
+ unsigned MaxDispatchWidth, RetireControlUnit &R,
+ RegisterFile &F)
+ : DispatchWidth(MaxDispatchWidth), AvailableEntries(MaxDispatchWidth),
+ CarryOver(0U), CarriedOver(), STI(Subtarget), RCU(R), PRF(F) {
+ if (!DispatchWidth)
+ DispatchWidth = Subtarget.getSchedModel().IssueWidth;
+}
+
+void DispatchStage::notifyInstructionDispatched(const InstRef &IR,
+ ArrayRef<unsigned> UsedRegs,
+ unsigned UOps) const {
+ LLVM_DEBUG(dbgs() << "[E] Instruction Dispatched: #" << IR << '\n');
+ notifyEvent<HWInstructionEvent>(
+ HWInstructionDispatchedEvent(IR, UsedRegs, UOps));
+}
+
+bool DispatchStage::checkPRF(const InstRef &IR) const {
+ SmallVector<MCPhysReg, 4> RegDefs;
+ for (const WriteState &RegDef : IR.getInstruction()->getDefs())
+ RegDefs.emplace_back(RegDef.getRegisterID());
+
+ const unsigned RegisterMask = PRF.isAvailable(RegDefs);
+ // A mask with all zeroes means: register files are available.
+ if (RegisterMask) {
+ notifyEvent<HWStallEvent>(
+ HWStallEvent(HWStallEvent::RegisterFileStall, IR));
+ return false;
+ }
+
+ return true;
+}
+
+bool DispatchStage::checkRCU(const InstRef &IR) const {
+ const unsigned NumMicroOps = IR.getInstruction()->getNumMicroOps();
+ if (RCU.isAvailable(NumMicroOps))
+ return true;
+ notifyEvent<HWStallEvent>(
+ HWStallEvent(HWStallEvent::RetireControlUnitStall, IR));
+ return false;
+}
+
+bool DispatchStage::canDispatch(const InstRef &IR) const {
+ bool CanDispatch = checkRCU(IR);
+ CanDispatch &= checkPRF(IR);
+ CanDispatch &= checkNextStage(IR);
+ return CanDispatch;
+}
+
+Error DispatchStage::dispatch(InstRef IR) {
+ assert(!CarryOver && "Cannot dispatch another instruction!");
+ Instruction &IS = *IR.getInstruction();
+ const InstrDesc &Desc = IS.getDesc();
+ const unsigned NumMicroOps = IS.getNumMicroOps();
+ if (NumMicroOps > DispatchWidth) {
+ assert(AvailableEntries == DispatchWidth);
+ AvailableEntries = 0;
+ CarryOver = NumMicroOps - DispatchWidth;
+ CarriedOver = IR;
+ } else {
+ assert(AvailableEntries >= NumMicroOps);
+ AvailableEntries -= NumMicroOps;
+ }
+
+ // Check if this instructions ends the dispatch group.
+ if (Desc.EndGroup)
+ AvailableEntries = 0;
+
+ // Check if this is an optimizable reg-reg move.
+ if (IS.isOptimizableMove()) {
+ assert(IS.getDefs().size() == 1 && "Expected a single input!");
+ assert(IS.getUses().size() == 1 && "Expected a single output!");
+ if (PRF.tryEliminateMove(IS.getDefs()[0], IS.getUses()[0]))
+ IS.setEliminated();
+ }
+
+ // A dependency-breaking instruction doesn't have to wait on the register
+ // input operands, and it is often optimized at register renaming stage.
+ // Update RAW dependencies if this instruction is not a dependency-breaking
+ // instruction. A dependency-breaking instruction is a zero-latency
+ // instruction that doesn't consume hardware resources.
+ // An example of dependency-breaking instruction on X86 is a zero-idiom XOR.
+ //
+ // We also don't update data dependencies for instructions that have been
+ // eliminated at register renaming stage.
+ if (!IS.isEliminated()) {
+ for (ReadState &RS : IS.getUses())
+ PRF.addRegisterRead(RS, STI);
+ }
+
+ // By default, a dependency-breaking zero-idiom is expected to be optimized
+ // at register renaming stage. That means, no physical register is allocated
+ // to the instruction.
+ SmallVector<unsigned, 4> RegisterFiles(PRF.getNumRegisterFiles());
+ for (WriteState &WS : IS.getDefs())
+ PRF.addRegisterWrite(WriteRef(IR.getSourceIndex(), &WS), RegisterFiles);
+
+ // Reserve entries in the reorder buffer.
+ unsigned RCUTokenID = RCU.dispatch(IR);
+ // Notify the instruction that it has been dispatched.
+ IS.dispatch(RCUTokenID);
+
+ // Notify listeners of the "instruction dispatched" event,
+ // and move IR to the next stage.
+ notifyInstructionDispatched(IR, RegisterFiles,
+ std::min(DispatchWidth, NumMicroOps));
+ return moveToTheNextStage(IR);
+}
+
+Error DispatchStage::cycleStart() {
+ PRF.cycleStart();
+
+ if (!CarryOver) {
+ AvailableEntries = DispatchWidth;
+ return ErrorSuccess();
+ }
+
+ AvailableEntries = CarryOver >= DispatchWidth ? 0 : DispatchWidth - CarryOver;
+ unsigned DispatchedOpcodes = DispatchWidth - AvailableEntries;
+ CarryOver -= DispatchedOpcodes;
+ assert(CarriedOver && "Invalid dispatched instruction");
+
+ SmallVector<unsigned, 8> RegisterFiles(PRF.getNumRegisterFiles(), 0U);
+ notifyInstructionDispatched(CarriedOver, RegisterFiles, DispatchedOpcodes);
+ if (!CarryOver)
+ CarriedOver = InstRef();
+ return ErrorSuccess();
+}
+
+bool DispatchStage::isAvailable(const InstRef &IR) const {
+ const Instruction &Inst = *IR.getInstruction();
+ unsigned NumMicroOps = Inst.getNumMicroOps();
+ const InstrDesc &Desc = Inst.getDesc();
+ unsigned Required = std::min(NumMicroOps, DispatchWidth);
+ if (Required > AvailableEntries)
+ return false;
+
+ if (Desc.BeginGroup && AvailableEntries != DispatchWidth)
+ return false;
+
+ // The dispatch logic doesn't internally buffer instructions. It only accepts
+ // instructions that can be successfully moved to the next stage during this
+ // same cycle.
+ return canDispatch(IR);
+}
+
+Error DispatchStage::execute(InstRef &IR) {
+ assert(canDispatch(IR) && "Cannot dispatch another instruction!");
+ return dispatch(IR);
+}
+
+#ifndef NDEBUG
+void DispatchStage::dump() const {
+ PRF.dump();
+ RCU.dump();
+}
+#endif
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/Stages/EntryStage.cpp b/contrib/libs/llvm12/lib/MCA/Stages/EntryStage.cpp
new file mode 100644
index 00000000000..66135790a4c
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/Stages/EntryStage.cpp
@@ -0,0 +1,77 @@
+//===---------------------- EntryStage.cpp ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the Fetch stage of an instruction pipeline. Its sole
+/// purpose in life is to produce instructions for the rest of the pipeline.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/EntryStage.h"
+#include "llvm/MCA/Instruction.h"
+
+namespace llvm {
+namespace mca {
+
+bool EntryStage::hasWorkToComplete() const {
+ return static_cast<bool>(CurrentInstruction);
+}
+
+bool EntryStage::isAvailable(const InstRef & /* unused */) const {
+ if (CurrentInstruction)
+ return checkNextStage(CurrentInstruction);
+ return false;
+}
+
+void EntryStage::getNextInstruction() {
+ assert(!CurrentInstruction && "There is already an instruction to process!");
+ if (!SM.hasNext())
+ return;
+ SourceRef SR = SM.peekNext();
+ std::unique_ptr<Instruction> Inst = std::make_unique<Instruction>(SR.second);
+ CurrentInstruction = InstRef(SR.first, Inst.get());
+ Instructions.emplace_back(std::move(Inst));
+ SM.updateNext();
+}
+
+llvm::Error EntryStage::execute(InstRef & /*unused */) {
+ assert(CurrentInstruction && "There is no instruction to process!");
+ if (llvm::Error Val = moveToTheNextStage(CurrentInstruction))
+ return Val;
+
+ // Move the program counter.
+ CurrentInstruction.invalidate();
+ getNextInstruction();
+ return llvm::ErrorSuccess();
+}
+
+llvm::Error EntryStage::cycleStart() {
+ if (!CurrentInstruction)
+ getNextInstruction();
+ return llvm::ErrorSuccess();
+}
+
+llvm::Error EntryStage::cycleEnd() {
+ // Find the first instruction which hasn't been retired.
+ auto Range = make_range(&Instructions[NumRetired], Instructions.end());
+ auto It = find_if(Range, [](const std::unique_ptr<Instruction> &I) {
+ return !I->isRetired();
+ });
+
+ NumRetired = std::distance(Instructions.begin(), It);
+ // Erase instructions up to the first that hasn't been retired.
+ if ((NumRetired * 2) >= Instructions.size()) {
+ Instructions.erase(Instructions.begin(), It);
+ NumRetired = 0;
+ }
+
+ return llvm::ErrorSuccess();
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/Stages/ExecuteStage.cpp b/contrib/libs/llvm12/lib/MCA/Stages/ExecuteStage.cpp
new file mode 100644
index 00000000000..2284ed7f281
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/Stages/ExecuteStage.cpp
@@ -0,0 +1,296 @@
+//===---------------------- ExecuteStage.cpp --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the execution stage of an instruction pipeline.
+///
+/// The ExecuteStage is responsible for managing the hardware scheduler
+/// and issuing notifications that an instruction has been executed.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/ExecuteStage.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+HWStallEvent::GenericEventType toHWStallEventType(Scheduler::Status Status) {
+ switch (Status) {
+ case Scheduler::SC_LOAD_QUEUE_FULL:
+ return HWStallEvent::LoadQueueFull;
+ case Scheduler::SC_STORE_QUEUE_FULL:
+ return HWStallEvent::StoreQueueFull;
+ case Scheduler::SC_BUFFERS_FULL:
+ return HWStallEvent::SchedulerQueueFull;
+ case Scheduler::SC_DISPATCH_GROUP_STALL:
+ return HWStallEvent::DispatchGroupStall;
+ case Scheduler::SC_AVAILABLE:
+ return HWStallEvent::Invalid;
+ }
+
+ llvm_unreachable("Don't know how to process this StallKind!");
+}
+
+bool ExecuteStage::isAvailable(const InstRef &IR) const {
+ if (Scheduler::Status S = HWS.isAvailable(IR)) {
+ HWStallEvent::GenericEventType ET = toHWStallEventType(S);
+ notifyEvent<HWStallEvent>(HWStallEvent(ET, IR));
+ return false;
+ }
+
+ return true;
+}
+
+Error ExecuteStage::issueInstruction(InstRef &IR) {
+ SmallVector<std::pair<ResourceRef, ResourceCycles>, 4> Used;
+ SmallVector<InstRef, 4> Pending;
+ SmallVector<InstRef, 4> Ready;
+
+ HWS.issueInstruction(IR, Used, Pending, Ready);
+ Instruction &IS = *IR.getInstruction();
+ NumIssuedOpcodes += IS.getNumMicroOps();
+
+ notifyReservedOrReleasedBuffers(IR, /* Reserved */ false);
+
+ notifyInstructionIssued(IR, Used);
+ if (IS.isExecuted()) {
+ notifyInstructionExecuted(IR);
+ // FIXME: add a buffer of executed instructions.
+ if (Error S = moveToTheNextStage(IR))
+ return S;
+ }
+
+ for (const InstRef &I : Pending)
+ notifyInstructionPending(I);
+
+ for (const InstRef &I : Ready)
+ notifyInstructionReady(I);
+ return ErrorSuccess();
+}
+
+Error ExecuteStage::issueReadyInstructions() {
+ InstRef IR = HWS.select();
+ while (IR) {
+ if (Error Err = issueInstruction(IR))
+ return Err;
+
+ // Select the next instruction to issue.
+ IR = HWS.select();
+ }
+
+ return ErrorSuccess();
+}
+
+Error ExecuteStage::cycleStart() {
+ SmallVector<ResourceRef, 8> Freed;
+ SmallVector<InstRef, 4> Executed;
+ SmallVector<InstRef, 4> Pending;
+ SmallVector<InstRef, 4> Ready;
+
+ HWS.cycleEvent(Freed, Executed, Pending, Ready);
+ NumDispatchedOpcodes = 0;
+ NumIssuedOpcodes = 0;
+
+ for (const ResourceRef &RR : Freed)
+ notifyResourceAvailable(RR);
+
+ for (InstRef &IR : Executed) {
+ notifyInstructionExecuted(IR);
+ // FIXME: add a buffer of executed instructions.
+ if (Error S = moveToTheNextStage(IR))
+ return S;
+ }
+
+ for (const InstRef &IR : Pending)
+ notifyInstructionPending(IR);
+
+ for (const InstRef &IR : Ready)
+ notifyInstructionReady(IR);
+
+ return issueReadyInstructions();
+}
+
+Error ExecuteStage::cycleEnd() {
+ if (!EnablePressureEvents)
+ return ErrorSuccess();
+
+ // Always conservatively report any backpressure events if the dispatch logic
+ // was stalled due to unavailable scheduler resources.
+ if (!HWS.hadTokenStall() && NumDispatchedOpcodes <= NumIssuedOpcodes)
+ return ErrorSuccess();
+
+ SmallVector<InstRef, 8> Insts;
+ uint64_t Mask = HWS.analyzeResourcePressure(Insts);
+ if (Mask) {
+ LLVM_DEBUG(dbgs() << "[E] Backpressure increased because of unavailable "
+ "pipeline resources: "
+ << format_hex(Mask, 16) << '\n');
+ HWPressureEvent Ev(HWPressureEvent::RESOURCES, Insts, Mask);
+ notifyEvent(Ev);
+ }
+
+ SmallVector<InstRef, 8> RegDeps;
+ SmallVector<InstRef, 8> MemDeps;
+ HWS.analyzeDataDependencies(RegDeps, MemDeps);
+ if (RegDeps.size()) {
+ LLVM_DEBUG(
+ dbgs() << "[E] Backpressure increased by register dependencies\n");
+ HWPressureEvent Ev(HWPressureEvent::REGISTER_DEPS, RegDeps);
+ notifyEvent(Ev);
+ }
+
+ if (MemDeps.size()) {
+ LLVM_DEBUG(dbgs() << "[E] Backpressure increased by memory dependencies\n");
+ HWPressureEvent Ev(HWPressureEvent::MEMORY_DEPS, MemDeps);
+ notifyEvent(Ev);
+ }
+
+ return ErrorSuccess();
+}
+
+#ifndef NDEBUG
+static void verifyInstructionEliminated(const InstRef &IR) {
+ const Instruction &Inst = *IR.getInstruction();
+ assert(Inst.isEliminated() && "Instruction was not eliminated!");
+ assert(Inst.isReady() && "Instruction in an inconsistent state!");
+
+ // Ensure that instructions eliminated at register renaming stage are in a
+ // consistent state.
+ const InstrDesc &Desc = Inst.getDesc();
+ assert(!Desc.MayLoad && !Desc.MayStore && "Cannot eliminate a memory op!");
+}
+#endif
+
+Error ExecuteStage::handleInstructionEliminated(InstRef &IR) {
+#ifndef NDEBUG
+ verifyInstructionEliminated(IR);
+#endif
+ notifyInstructionPending(IR);
+ notifyInstructionReady(IR);
+ notifyInstructionIssued(IR, {});
+ IR.getInstruction()->forceExecuted();
+ notifyInstructionExecuted(IR);
+ return moveToTheNextStage(IR);
+}
+
+// Schedule the instruction for execution on the hardware.
+Error ExecuteStage::execute(InstRef &IR) {
+ assert(isAvailable(IR) && "Scheduler is not available!");
+
+#ifndef NDEBUG
+ // Ensure that the HWS has not stored this instruction in its queues.
+ HWS.sanityCheck(IR);
+#endif
+
+ if (IR.getInstruction()->isEliminated())
+ return handleInstructionEliminated(IR);
+
+ // Reserve a slot in each buffered resource. Also, mark units with
+ // BufferSize=0 as reserved. Resources with a buffer size of zero will only
+ // be released after MCIS is issued, and all the ResourceCycles for those
+ // units have been consumed.
+ bool IsReadyInstruction = HWS.dispatch(IR);
+ const Instruction &Inst = *IR.getInstruction();
+ unsigned NumMicroOps = Inst.getNumMicroOps();
+ NumDispatchedOpcodes += NumMicroOps;
+ notifyReservedOrReleasedBuffers(IR, /* Reserved */ true);
+
+ if (!IsReadyInstruction) {
+ if (Inst.isPending())
+ notifyInstructionPending(IR);
+ return ErrorSuccess();
+ }
+
+ notifyInstructionPending(IR);
+
+ // If we did not return early, then the scheduler is ready for execution.
+ notifyInstructionReady(IR);
+
+ // If we cannot issue immediately, the HWS will add IR to its ready queue for
+ // execution later, so we must return early here.
+ if (!HWS.mustIssueImmediately(IR))
+ return ErrorSuccess();
+
+ // Issue IR to the underlying pipelines.
+ return issueInstruction(IR);
+}
+
+void ExecuteStage::notifyInstructionExecuted(const InstRef &IR) const {
+ LLVM_DEBUG(dbgs() << "[E] Instruction Executed: #" << IR << '\n');
+ notifyEvent<HWInstructionEvent>(
+ HWInstructionEvent(HWInstructionEvent::Executed, IR));
+}
+
+void ExecuteStage::notifyInstructionPending(const InstRef &IR) const {
+ LLVM_DEBUG(dbgs() << "[E] Instruction Pending: #" << IR << '\n');
+ notifyEvent<HWInstructionEvent>(
+ HWInstructionEvent(HWInstructionEvent::Pending, IR));
+}
+
+void ExecuteStage::notifyInstructionReady(const InstRef &IR) const {
+ LLVM_DEBUG(dbgs() << "[E] Instruction Ready: #" << IR << '\n');
+ notifyEvent<HWInstructionEvent>(
+ HWInstructionEvent(HWInstructionEvent::Ready, IR));
+}
+
+void ExecuteStage::notifyResourceAvailable(const ResourceRef &RR) const {
+ LLVM_DEBUG(dbgs() << "[E] Resource Available: [" << RR.first << '.'
+ << RR.second << "]\n");
+ for (HWEventListener *Listener : getListeners())
+ Listener->onResourceAvailable(RR);
+}
+
+void ExecuteStage::notifyInstructionIssued(
+ const InstRef &IR,
+ MutableArrayRef<std::pair<ResourceRef, ResourceCycles>> Used) const {
+ LLVM_DEBUG({
+ dbgs() << "[E] Instruction Issued: #" << IR << '\n';
+ for (const std::pair<ResourceRef, ResourceCycles> &Resource : Used) {
+ assert(Resource.second.getDenominator() == 1 && "Invalid cycles!");
+ dbgs() << "[E] Resource Used: [" << Resource.first.first << '.'
+ << Resource.first.second << "], ";
+ dbgs() << "cycles: " << Resource.second.getNumerator() << '\n';
+ }
+ });
+
+ // Replace resource masks with valid resource processor IDs.
+ for (std::pair<ResourceRef, ResourceCycles> &Use : Used)
+ Use.first.first = HWS.getResourceID(Use.first.first);
+
+ notifyEvent<HWInstructionEvent>(HWInstructionIssuedEvent(IR, Used));
+}
+
+void ExecuteStage::notifyReservedOrReleasedBuffers(const InstRef &IR,
+ bool Reserved) const {
+ uint64_t UsedBuffers = IR.getInstruction()->getDesc().UsedBuffers;
+ if (!UsedBuffers)
+ return;
+
+ SmallVector<unsigned, 4> BufferIDs(countPopulation(UsedBuffers), 0);
+ for (unsigned I = 0, E = BufferIDs.size(); I < E; ++I) {
+ uint64_t CurrentBufferMask = UsedBuffers & (-UsedBuffers);
+ BufferIDs[I] = HWS.getResourceID(CurrentBufferMask);
+ UsedBuffers ^= CurrentBufferMask;
+ }
+
+ if (Reserved) {
+ for (HWEventListener *Listener : getListeners())
+ Listener->onReservedBuffers(IR, BufferIDs);
+ return;
+ }
+
+ for (HWEventListener *Listener : getListeners())
+ Listener->onReleasedBuffers(IR, BufferIDs);
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/Stages/InstructionTables.cpp b/contrib/libs/llvm12/lib/MCA/Stages/InstructionTables.cpp
new file mode 100644
index 00000000000..93e36812306
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/Stages/InstructionTables.cpp
@@ -0,0 +1,68 @@
+//===--------------------- InstructionTables.cpp ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements the method InstructionTables::execute().
+/// Method execute() prints a theoretical resource pressure distribution based
+/// on the information available in the scheduling model, and without running
+/// the pipeline.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/InstructionTables.h"
+
+namespace llvm {
+namespace mca {
+
+Error InstructionTables::execute(InstRef &IR) {
+ const InstrDesc &Desc = IR.getInstruction()->getDesc();
+ UsedResources.clear();
+
+ // Identify the resources consumed by this instruction.
+ for (const std::pair<const uint64_t, ResourceUsage> Resource :
+ Desc.Resources) {
+ // Skip zero-cycle resources (i.e., unused resources).
+ if (!Resource.second.size())
+ continue;
+ unsigned Cycles = Resource.second.size();
+ unsigned Index = std::distance(Masks.begin(), find(Masks, Resource.first));
+ const MCProcResourceDesc &ProcResource = *SM.getProcResource(Index);
+ unsigned NumUnits = ProcResource.NumUnits;
+ if (!ProcResource.SubUnitsIdxBegin) {
+ // The number of cycles consumed by each unit.
+ for (unsigned I = 0, E = NumUnits; I < E; ++I) {
+ ResourceRef ResourceUnit = std::make_pair(Index, 1U << I);
+ UsedResources.emplace_back(
+ std::make_pair(ResourceUnit, ResourceCycles(Cycles, NumUnits)));
+ }
+ continue;
+ }
+
+ // This is a group. Obtain the set of resources contained in this
+ // group. Some of these resources may implement multiple units.
+ // Uniformly distribute Cycles across all of the units.
+ for (unsigned I1 = 0; I1 < NumUnits; ++I1) {
+ unsigned SubUnitIdx = ProcResource.SubUnitsIdxBegin[I1];
+ const MCProcResourceDesc &SubUnit = *SM.getProcResource(SubUnitIdx);
+ // Compute the number of cycles consumed by each resource unit.
+ for (unsigned I2 = 0, E2 = SubUnit.NumUnits; I2 < E2; ++I2) {
+ ResourceRef ResourceUnit = std::make_pair(SubUnitIdx, 1U << I2);
+ UsedResources.emplace_back(std::make_pair(
+ ResourceUnit, ResourceCycles(Cycles, NumUnits * SubUnit.NumUnits)));
+ }
+ }
+ }
+
+ // Send a fake instruction issued event to all the views.
+ HWInstructionIssuedEvent Event(IR, UsedResources);
+ notifyEvent<HWInstructionIssuedEvent>(Event);
+ return ErrorSuccess();
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/Stages/MicroOpQueueStage.cpp b/contrib/libs/llvm12/lib/MCA/Stages/MicroOpQueueStage.cpp
new file mode 100644
index 00000000000..cb3e4c6979a
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/Stages/MicroOpQueueStage.cpp
@@ -0,0 +1,70 @@
+//===---------------------- MicroOpQueueStage.cpp ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the MicroOpQueueStage.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/MicroOpQueueStage.h"
+
+namespace llvm {
+namespace mca {
+
+#define DEBUG_TYPE "llvm-mca"
+
+Error MicroOpQueueStage::moveInstructions() {
+ InstRef IR = Buffer[CurrentInstructionSlotIdx];
+ while (IR && checkNextStage(IR)) {
+ if (llvm::Error Val = moveToTheNextStage(IR))
+ return Val;
+
+ Buffer[CurrentInstructionSlotIdx].invalidate();
+ unsigned NormalizedOpcodes = getNormalizedOpcodes(IR);
+ CurrentInstructionSlotIdx += NormalizedOpcodes;
+ CurrentInstructionSlotIdx %= Buffer.size();
+ AvailableEntries += NormalizedOpcodes;
+ IR = Buffer[CurrentInstructionSlotIdx];
+ }
+
+ return llvm::ErrorSuccess();
+}
+
+MicroOpQueueStage::MicroOpQueueStage(unsigned Size, unsigned IPC,
+ bool ZeroLatencyStage)
+ : NextAvailableSlotIdx(0), CurrentInstructionSlotIdx(0), MaxIPC(IPC),
+ CurrentIPC(0), IsZeroLatencyStage(ZeroLatencyStage) {
+ Buffer.resize(Size ? Size : 1);
+ AvailableEntries = Buffer.size();
+}
+
+Error MicroOpQueueStage::execute(InstRef &IR) {
+ Buffer[NextAvailableSlotIdx] = IR;
+ unsigned NormalizedOpcodes = getNormalizedOpcodes(IR);
+ NextAvailableSlotIdx += NormalizedOpcodes;
+ NextAvailableSlotIdx %= Buffer.size();
+ AvailableEntries -= NormalizedOpcodes;
+ ++CurrentIPC;
+ return llvm::ErrorSuccess();
+}
+
+Error MicroOpQueueStage::cycleStart() {
+ CurrentIPC = 0;
+ if (!IsZeroLatencyStage)
+ return moveInstructions();
+ return llvm::ErrorSuccess();
+}
+
+Error MicroOpQueueStage::cycleEnd() {
+ if (IsZeroLatencyStage)
+ return moveInstructions();
+ return llvm::ErrorSuccess();
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/Stages/RetireStage.cpp b/contrib/libs/llvm12/lib/MCA/Stages/RetireStage.cpp
new file mode 100644
index 00000000000..f792af748bc
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/Stages/RetireStage.cpp
@@ -0,0 +1,65 @@
+//===---------------------- RetireStage.cpp ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines the retire stage of an instruction pipeline.
+/// The RetireStage represents the process logic that interacts with the
+/// simulated RetireControlUnit hardware.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/RetireStage.h"
+#include "llvm/MCA/HWEventListener.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "llvm-mca"
+
+namespace llvm {
+namespace mca {
+
+llvm::Error RetireStage::cycleStart() {
+ if (RCU.isEmpty())
+ return llvm::ErrorSuccess();
+
+ const unsigned MaxRetirePerCycle = RCU.getMaxRetirePerCycle();
+ unsigned NumRetired = 0;
+ while (!RCU.isEmpty()) {
+ if (MaxRetirePerCycle != 0 && NumRetired == MaxRetirePerCycle)
+ break;
+ const RetireControlUnit::RUToken &Current = RCU.getCurrentToken();
+ if (!Current.Executed)
+ break;
+ notifyInstructionRetired(Current.IR);
+ RCU.consumeCurrentToken();
+ NumRetired++;
+ }
+
+ return llvm::ErrorSuccess();
+}
+
+llvm::Error RetireStage::execute(InstRef &IR) {
+ RCU.onInstructionExecuted(IR.getInstruction()->getRCUTokenID());
+ return llvm::ErrorSuccess();
+}
+
+void RetireStage::notifyInstructionRetired(const InstRef &IR) const {
+ LLVM_DEBUG(llvm::dbgs() << "[E] Instruction Retired: #" << IR << '\n');
+ llvm::SmallVector<unsigned, 4> FreedRegs(PRF.getNumRegisterFiles());
+ const Instruction &Inst = *IR.getInstruction();
+
+ // Release the load/store queue entries.
+ if (Inst.isMemOp())
+ LSU.onInstructionRetired(IR);
+
+ for (const WriteState &WS : Inst.getDefs())
+ PRF.removeRegisterWrite(WS, FreedRegs);
+ notifyEvent<HWInstructionEvent>(HWInstructionRetiredEvent(IR, FreedRegs));
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/Stages/Stage.cpp b/contrib/libs/llvm12/lib/MCA/Stages/Stage.cpp
new file mode 100644
index 00000000000..ed512ac9711
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/Stages/Stage.cpp
@@ -0,0 +1,28 @@
+//===---------------------- Stage.cpp ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines a stage.
+/// A chain of stages compose an instruction pipeline.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Stages/Stage.h"
+
+namespace llvm {
+namespace mca {
+
+// Pin the vtable here in the implementation file.
+Stage::~Stage() = default;
+
+void Stage::addListener(HWEventListener *Listener) {
+ Listeners.insert(Listener);
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/MCA/Support.cpp b/contrib/libs/llvm12/lib/MCA/Support.cpp
new file mode 100644
index 00000000000..ce1f0f6f211
--- /dev/null
+++ b/contrib/libs/llvm12/lib/MCA/Support.cpp
@@ -0,0 +1,110 @@
+//===--------------------- Support.cpp --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file implements a few helper functions used by various pipeline
+/// components.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MCA/Support.h"
+#include "llvm/MC/MCSchedule.h"
+
+namespace llvm {
+namespace mca {
+
+#define DEBUG_TYPE "llvm-mca"
+
+ResourceCycles &ResourceCycles::operator+=(const ResourceCycles &RHS) {
+ if (Denominator == RHS.Denominator)
+ Numerator += RHS.Numerator;
+ else {
+ // Create a common denominator for LHS and RHS by calculating the least
+ // common multiple from the GCD.
+ unsigned GCD = GreatestCommonDivisor64(Denominator, RHS.Denominator);
+ unsigned LCM = (Denominator * RHS.Denominator) / GCD;
+ unsigned LHSNumerator = Numerator * (LCM / Denominator);
+ unsigned RHSNumerator = RHS.Numerator * (LCM / RHS.Denominator);
+ Numerator = LHSNumerator + RHSNumerator;
+ Denominator = LCM;
+ }
+ return *this;
+}
+
+void computeProcResourceMasks(const MCSchedModel &SM,
+ MutableArrayRef<uint64_t> Masks) {
+ unsigned ProcResourceID = 0;
+
+ assert(Masks.size() == SM.getNumProcResourceKinds() &&
+ "Invalid number of elements");
+ // Resource at index 0 is the 'InvalidUnit'. Set an invalid mask for it.
+ Masks[0] = 0;
+
+ // Create a unique bitmask for every processor resource unit.
+ for (unsigned I = 1, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+ const MCProcResourceDesc &Desc = *SM.getProcResource(I);
+ if (Desc.SubUnitsIdxBegin)
+ continue;
+ Masks[I] = 1ULL << ProcResourceID;
+ ProcResourceID++;
+ }
+
+ // Create a unique bitmask for every processor resource group.
+ for (unsigned I = 1, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+ const MCProcResourceDesc &Desc = *SM.getProcResource(I);
+ if (!Desc.SubUnitsIdxBegin)
+ continue;
+ Masks[I] = 1ULL << ProcResourceID;
+ for (unsigned U = 0; U < Desc.NumUnits; ++U) {
+ uint64_t OtherMask = Masks[Desc.SubUnitsIdxBegin[U]];
+ Masks[I] |= OtherMask;
+ }
+ ProcResourceID++;
+ }
+
+#ifndef NDEBUG
+ LLVM_DEBUG(dbgs() << "\nProcessor resource masks:"
+ << "\n");
+ for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+ const MCProcResourceDesc &Desc = *SM.getProcResource(I);
+ LLVM_DEBUG(dbgs() << '[' << format_decimal(I,2) << "] " << " - "
+ << format_hex(Masks[I],16) << " - "
+ << Desc.Name << '\n');
+ }
+#endif
+}
+
+double computeBlockRThroughput(const MCSchedModel &SM, unsigned DispatchWidth,
+ unsigned NumMicroOps,
+ ArrayRef<unsigned> ProcResourceUsage) {
+ // The block throughput is bounded from above by the hardware dispatch
+ // throughput. That is because the DispatchWidth is an upper bound on the
+ // number of opcodes that can be part of a single dispatch group.
+ double Max = static_cast<double>(NumMicroOps) / DispatchWidth;
+
+ // The block throughput is also limited by the amount of hardware parallelism.
+ // The number of available resource units affects the resource pressure
+ // distribution, as well as how many blocks can be executed every cycle.
+ for (unsigned I = 0, E = SM.getNumProcResourceKinds(); I < E; ++I) {
+ unsigned ResourceCycles = ProcResourceUsage[I];
+ if (!ResourceCycles)
+ continue;
+
+ const MCProcResourceDesc &MCDesc = *SM.getProcResource(I);
+ double Throughput = static_cast<double>(ResourceCycles) / MCDesc.NumUnits;
+ Max = std::max(Max, Throughput);
+ }
+
+ // The block reciprocal throughput is computed as the MAX of:
+ // - (NumMicroOps / DispatchWidth)
+ // - (NumUnits / ResourceCycles) for every consumed processor resource.
+ return Max;
+}
+
+} // namespace mca
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/Object/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Object/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Object/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/ArchiveEmitter.cpp b/contrib/libs/llvm12/lib/ObjectYAML/ArchiveEmitter.cpp
new file mode 100644
index 00000000000..a0cf8fe360d
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/ArchiveEmitter.cpp
@@ -0,0 +1,51 @@
+//===- ArchiveEmitter.cpp ---------------------------- --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/ArchiveYAML.h"
+#include "llvm/ObjectYAML/yaml2obj.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace ArchYAML;
+
+namespace llvm {
+namespace yaml {
+
+bool yaml2archive(ArchYAML::Archive &Doc, raw_ostream &Out, ErrorHandler EH) {
+ Out.write(Doc.Magic.data(), Doc.Magic.size());
+
+ if (Doc.Content) {
+ Doc.Content->writeAsBinary(Out);
+ return true;
+ }
+
+ if (!Doc.Members)
+ return true;
+
+ auto WriteField = [&](StringRef Field, uint8_t Size) {
+ Out.write(Field.data(), Field.size());
+ for (size_t I = Field.size(); I != Size; ++I)
+ Out.write(' ');
+ };
+
+ for (const Archive::Child &C : *Doc.Members) {
+ for (auto &P : C.Fields)
+ WriteField(P.second.Value, P.second.MaxLength);
+
+ if (C.Content)
+ C.Content->writeAsBinary(Out);
+ if (C.PaddingByte)
+ Out.write(*C.PaddingByte);
+ }
+
+ return true;
+}
+
+} // namespace yaml
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/ArchiveYAML.cpp b/contrib/libs/llvm12/lib/ObjectYAML/ArchiveYAML.cpp
new file mode 100644
index 00000000000..d2ea1eaf521
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/ArchiveYAML.cpp
@@ -0,0 +1,58 @@
+//===- ArchiveYAML.cpp - ELF YAMLIO implementation -------------------- ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of archives.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/ArchiveYAML.h"
+
+namespace llvm {
+
+namespace yaml {
+
+void MappingTraits<ArchYAML::Archive>::mapping(IO &IO, ArchYAML::Archive &A) {
+ assert(!IO.getContext() && "The IO context is initialized already");
+ IO.setContext(&A);
+ IO.mapTag("!Arch", true);
+ IO.mapOptional("Magic", A.Magic, "!<arch>\n");
+ IO.mapOptional("Members", A.Members);
+ IO.mapOptional("Content", A.Content);
+ IO.setContext(nullptr);
+}
+
+std::string MappingTraits<ArchYAML::Archive>::validate(IO &,
+ ArchYAML::Archive &A) {
+ if (A.Members && A.Content)
+ return "\"Content\" and \"Members\" cannot be used together";
+ return "";
+}
+
+void MappingTraits<ArchYAML::Archive::Child>::mapping(
+ IO &IO, ArchYAML::Archive::Child &E) {
+ assert(IO.getContext() && "The IO context is not initialized");
+ for (auto &P : E.Fields)
+ IO.mapOptional(P.first.data(), P.second.Value, P.second.DefaultValue);
+ IO.mapOptional("Content", E.Content);
+ IO.mapOptional("PaddingByte", E.PaddingByte);
+}
+
+std::string
+MappingTraits<ArchYAML::Archive::Child>::validate(IO &,
+ ArchYAML::Archive::Child &C) {
+ for (auto &P : C.Fields)
+ if (P.second.Value.size() > P.second.MaxLength)
+ return ("the maximum length of \"" + P.first + "\" field is " +
+ Twine(P.second.MaxLength))
+ .str();
+ return "";
+}
+
+} // end namespace yaml
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/COFFEmitter.cpp b/contrib/libs/llvm12/lib/ObjectYAML/COFFEmitter.cpp
new file mode 100644
index 00000000000..06ce93affd3
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/COFFEmitter.cpp
@@ -0,0 +1,628 @@
+//===- yaml2coff - Convert YAML to a COFF object file ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// The COFF component of yaml2obj.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/DebugInfo/CodeView/DebugStringTableSubsection.h"
+#include "llvm/DebugInfo/CodeView/StringsAndChecksums.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/ObjectYAML/ObjectYAML.h"
+#include "llvm/ObjectYAML/yaml2obj.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/WithColor.h"
+#include "llvm/Support/raw_ostream.h"
+#include <vector>
+
+using namespace llvm;
+
+namespace {
+
+/// This parses a yaml stream that represents a COFF object file.
+/// See docs/yaml2obj for the yaml scheema.
+struct COFFParser {
+ COFFParser(COFFYAML::Object &Obj, yaml::ErrorHandler EH)
+ : Obj(Obj), SectionTableStart(0), SectionTableSize(0), ErrHandler(EH) {
+ // A COFF string table always starts with a 4 byte size field. Offsets into
+ // it include this size, so allocate it now.
+ StringTable.append(4, char(0));
+ }
+
+ bool useBigObj() const {
+ return static_cast<int32_t>(Obj.Sections.size()) >
+ COFF::MaxNumberOfSections16;
+ }
+
+ bool isPE() const { return Obj.OptionalHeader.hasValue(); }
+ bool is64Bit() const {
+ return Obj.Header.Machine == COFF::IMAGE_FILE_MACHINE_AMD64 ||
+ Obj.Header.Machine == COFF::IMAGE_FILE_MACHINE_ARM64;
+ }
+
+ uint32_t getFileAlignment() const {
+ return Obj.OptionalHeader->Header.FileAlignment;
+ }
+
+ unsigned getHeaderSize() const {
+ return useBigObj() ? COFF::Header32Size : COFF::Header16Size;
+ }
+
+ unsigned getSymbolSize() const {
+ return useBigObj() ? COFF::Symbol32Size : COFF::Symbol16Size;
+ }
+
+ bool parseSections() {
+ for (std::vector<COFFYAML::Section>::iterator i = Obj.Sections.begin(),
+ e = Obj.Sections.end();
+ i != e; ++i) {
+ COFFYAML::Section &Sec = *i;
+
+ // If the name is less than 8 bytes, store it in place, otherwise
+ // store it in the string table.
+ StringRef Name = Sec.Name;
+
+ if (Name.size() <= COFF::NameSize) {
+ std::copy(Name.begin(), Name.end(), Sec.Header.Name);
+ } else {
+ // Add string to the string table and format the index for output.
+ unsigned Index = getStringIndex(Name);
+ std::string str = utostr(Index);
+ if (str.size() > 7) {
+ ErrHandler("string table got too large");
+ return false;
+ }
+ Sec.Header.Name[0] = '/';
+ std::copy(str.begin(), str.end(), Sec.Header.Name + 1);
+ }
+
+ if (Sec.Alignment) {
+ if (Sec.Alignment > 8192) {
+ ErrHandler("section alignment is too large");
+ return false;
+ }
+ if (!isPowerOf2_32(Sec.Alignment)) {
+ ErrHandler("section alignment is not a power of 2");
+ return false;
+ }
+ Sec.Header.Characteristics |= (Log2_32(Sec.Alignment) + 1) << 20;
+ }
+ }
+ return true;
+ }
+
+ bool parseSymbols() {
+ for (std::vector<COFFYAML::Symbol>::iterator i = Obj.Symbols.begin(),
+ e = Obj.Symbols.end();
+ i != e; ++i) {
+ COFFYAML::Symbol &Sym = *i;
+
+ // If the name is less than 8 bytes, store it in place, otherwise
+ // store it in the string table.
+ StringRef Name = Sym.Name;
+ if (Name.size() <= COFF::NameSize) {
+ std::copy(Name.begin(), Name.end(), Sym.Header.Name);
+ } else {
+ // Add string to the string table and format the index for output.
+ unsigned Index = getStringIndex(Name);
+ *reinterpret_cast<support::aligned_ulittle32_t *>(Sym.Header.Name + 4) =
+ Index;
+ }
+
+ Sym.Header.Type = Sym.SimpleType;
+ Sym.Header.Type |= Sym.ComplexType << COFF::SCT_COMPLEX_TYPE_SHIFT;
+ }
+ return true;
+ }
+
+ bool parse() {
+ if (!parseSections())
+ return false;
+ if (!parseSymbols())
+ return false;
+ return true;
+ }
+
+ unsigned getStringIndex(StringRef Str) {
+ StringMap<unsigned>::iterator i = StringTableMap.find(Str);
+ if (i == StringTableMap.end()) {
+ unsigned Index = StringTable.size();
+ StringTable.append(Str.begin(), Str.end());
+ StringTable.push_back(0);
+ StringTableMap[Str] = Index;
+ return Index;
+ }
+ return i->second;
+ }
+
+ COFFYAML::Object &Obj;
+
+ codeview::StringsAndChecksums StringsAndChecksums;
+ BumpPtrAllocator Allocator;
+ StringMap<unsigned> StringTableMap;
+ std::string StringTable;
+ uint32_t SectionTableStart;
+ uint32_t SectionTableSize;
+
+ yaml::ErrorHandler ErrHandler;
+};
+
+enum { DOSStubSize = 128 };
+
+} // end anonymous namespace
+
+// Take a CP and assign addresses and sizes to everything. Returns false if the
+// layout is not valid to do.
+static bool layoutOptionalHeader(COFFParser &CP) {
+ if (!CP.isPE())
+ return true;
+ unsigned PEHeaderSize = CP.is64Bit() ? sizeof(object::pe32plus_header)
+ : sizeof(object::pe32_header);
+ CP.Obj.Header.SizeOfOptionalHeader =
+ PEHeaderSize +
+ sizeof(object::data_directory) * (COFF::NUM_DATA_DIRECTORIES + 1);
+ return true;
+}
+
+static yaml::BinaryRef
+toDebugS(ArrayRef<CodeViewYAML::YAMLDebugSubsection> Subsections,
+ const codeview::StringsAndChecksums &SC, BumpPtrAllocator &Allocator) {
+ using namespace codeview;
+ ExitOnError Err("Error occurred writing .debug$S section");
+ auto CVSS =
+ Err(CodeViewYAML::toCodeViewSubsectionList(Allocator, Subsections, SC));
+
+ std::vector<DebugSubsectionRecordBuilder> Builders;
+ uint32_t Size = sizeof(uint32_t);
+ for (auto &SS : CVSS) {
+ DebugSubsectionRecordBuilder B(SS);
+ Size += B.calculateSerializedLength();
+ Builders.push_back(std::move(B));
+ }
+ uint8_t *Buffer = Allocator.Allocate<uint8_t>(Size);
+ MutableArrayRef<uint8_t> Output(Buffer, Size);
+ BinaryStreamWriter Writer(Output, support::little);
+
+ Err(Writer.writeInteger<uint32_t>(COFF::DEBUG_SECTION_MAGIC));
+ for (const auto &B : Builders) {
+ Err(B.commit(Writer, CodeViewContainer::ObjectFile));
+ }
+ return {Output};
+}
+
+// Take a CP and assign addresses and sizes to everything. Returns false if the
+// layout is not valid to do.
+static bool layoutCOFF(COFFParser &CP) {
+ // The section table starts immediately after the header, including the
+ // optional header.
+ CP.SectionTableStart =
+ CP.getHeaderSize() + CP.Obj.Header.SizeOfOptionalHeader;
+ if (CP.isPE())
+ CP.SectionTableStart += DOSStubSize + sizeof(COFF::PEMagic);
+ CP.SectionTableSize = COFF::SectionSize * CP.Obj.Sections.size();
+
+ uint32_t CurrentSectionDataOffset =
+ CP.SectionTableStart + CP.SectionTableSize;
+
+ for (COFFYAML::Section &S : CP.Obj.Sections) {
+ // We support specifying exactly one of SectionData or Subsections. So if
+ // there is already some SectionData, then we don't need to do any of this.
+ if (S.Name == ".debug$S" && S.SectionData.binary_size() == 0) {
+ CodeViewYAML::initializeStringsAndChecksums(S.DebugS,
+ CP.StringsAndChecksums);
+ if (CP.StringsAndChecksums.hasChecksums() &&
+ CP.StringsAndChecksums.hasStrings())
+ break;
+ }
+ }
+
+ // Assign each section data address consecutively.
+ for (COFFYAML::Section &S : CP.Obj.Sections) {
+ if (S.Name == ".debug$S") {
+ if (S.SectionData.binary_size() == 0) {
+ assert(CP.StringsAndChecksums.hasStrings() &&
+ "Object file does not have debug string table!");
+
+ S.SectionData =
+ toDebugS(S.DebugS, CP.StringsAndChecksums, CP.Allocator);
+ }
+ } else if (S.Name == ".debug$T") {
+ if (S.SectionData.binary_size() == 0)
+ S.SectionData = CodeViewYAML::toDebugT(S.DebugT, CP.Allocator, S.Name);
+ } else if (S.Name == ".debug$P") {
+ if (S.SectionData.binary_size() == 0)
+ S.SectionData = CodeViewYAML::toDebugT(S.DebugP, CP.Allocator, S.Name);
+ } else if (S.Name == ".debug$H") {
+ if (S.DebugH.hasValue() && S.SectionData.binary_size() == 0)
+ S.SectionData = CodeViewYAML::toDebugH(*S.DebugH, CP.Allocator);
+ }
+
+ if (S.SectionData.binary_size() > 0) {
+ CurrentSectionDataOffset = alignTo(CurrentSectionDataOffset,
+ CP.isPE() ? CP.getFileAlignment() : 4);
+ S.Header.SizeOfRawData = S.SectionData.binary_size();
+ if (CP.isPE())
+ S.Header.SizeOfRawData =
+ alignTo(S.Header.SizeOfRawData, CP.getFileAlignment());
+ S.Header.PointerToRawData = CurrentSectionDataOffset;
+ CurrentSectionDataOffset += S.Header.SizeOfRawData;
+ if (!S.Relocations.empty()) {
+ S.Header.PointerToRelocations = CurrentSectionDataOffset;
+ if (S.Header.Characteristics & COFF::IMAGE_SCN_LNK_NRELOC_OVFL) {
+ S.Header.NumberOfRelocations = 0xffff;
+ CurrentSectionDataOffset += COFF::RelocationSize;
+ } else
+ S.Header.NumberOfRelocations = S.Relocations.size();
+ CurrentSectionDataOffset += S.Relocations.size() * COFF::RelocationSize;
+ }
+ } else {
+ // Leave SizeOfRawData unaltered. For .bss sections in object files, it
+ // carries the section size.
+ S.Header.PointerToRawData = 0;
+ }
+ }
+
+ uint32_t SymbolTableStart = CurrentSectionDataOffset;
+
+ // Calculate number of symbols.
+ uint32_t NumberOfSymbols = 0;
+ for (std::vector<COFFYAML::Symbol>::iterator i = CP.Obj.Symbols.begin(),
+ e = CP.Obj.Symbols.end();
+ i != e; ++i) {
+ uint32_t NumberOfAuxSymbols = 0;
+ if (i->FunctionDefinition)
+ NumberOfAuxSymbols += 1;
+ if (i->bfAndefSymbol)
+ NumberOfAuxSymbols += 1;
+ if (i->WeakExternal)
+ NumberOfAuxSymbols += 1;
+ if (!i->File.empty())
+ NumberOfAuxSymbols +=
+ (i->File.size() + CP.getSymbolSize() - 1) / CP.getSymbolSize();
+ if (i->SectionDefinition)
+ NumberOfAuxSymbols += 1;
+ if (i->CLRToken)
+ NumberOfAuxSymbols += 1;
+ i->Header.NumberOfAuxSymbols = NumberOfAuxSymbols;
+ NumberOfSymbols += 1 + NumberOfAuxSymbols;
+ }
+
+ // Store all the allocated start addresses in the header.
+ CP.Obj.Header.NumberOfSections = CP.Obj.Sections.size();
+ CP.Obj.Header.NumberOfSymbols = NumberOfSymbols;
+ if (NumberOfSymbols > 0 || CP.StringTable.size() > 4)
+ CP.Obj.Header.PointerToSymbolTable = SymbolTableStart;
+ else
+ CP.Obj.Header.PointerToSymbolTable = 0;
+
+ *reinterpret_cast<support::ulittle32_t *>(&CP.StringTable[0]) =
+ CP.StringTable.size();
+
+ return true;
+}
+
+template <typename value_type> struct binary_le_impl {
+ value_type Value;
+ binary_le_impl(value_type V) : Value(V) {}
+};
+
+template <typename value_type>
+raw_ostream &operator<<(raw_ostream &OS,
+ const binary_le_impl<value_type> &BLE) {
+ char Buffer[sizeof(BLE.Value)];
+ support::endian::write<value_type, support::little, support::unaligned>(
+ Buffer, BLE.Value);
+ OS.write(Buffer, sizeof(BLE.Value));
+ return OS;
+}
+
+template <typename value_type>
+binary_le_impl<value_type> binary_le(value_type V) {
+ return binary_le_impl<value_type>(V);
+}
+
+template <size_t NumBytes> struct zeros_impl {};
+
+template <size_t NumBytes>
+raw_ostream &operator<<(raw_ostream &OS, const zeros_impl<NumBytes> &) {
+ char Buffer[NumBytes];
+ memset(Buffer, 0, sizeof(Buffer));
+ OS.write(Buffer, sizeof(Buffer));
+ return OS;
+}
+
+template <typename T> zeros_impl<sizeof(T)> zeros(const T &) {
+ return zeros_impl<sizeof(T)>();
+}
+
+template <typename T>
+static uint32_t initializeOptionalHeader(COFFParser &CP, uint16_t Magic,
+ T Header) {
+ memset(Header, 0, sizeof(*Header));
+ Header->Magic = Magic;
+ Header->SectionAlignment = CP.Obj.OptionalHeader->Header.SectionAlignment;
+ Header->FileAlignment = CP.Obj.OptionalHeader->Header.FileAlignment;
+ uint32_t SizeOfCode = 0, SizeOfInitializedData = 0,
+ SizeOfUninitializedData = 0;
+ uint32_t SizeOfHeaders = alignTo(CP.SectionTableStart + CP.SectionTableSize,
+ Header->FileAlignment);
+ uint32_t SizeOfImage = alignTo(SizeOfHeaders, Header->SectionAlignment);
+ uint32_t BaseOfData = 0;
+ for (const COFFYAML::Section &S : CP.Obj.Sections) {
+ if (S.Header.Characteristics & COFF::IMAGE_SCN_CNT_CODE)
+ SizeOfCode += S.Header.SizeOfRawData;
+ if (S.Header.Characteristics & COFF::IMAGE_SCN_CNT_INITIALIZED_DATA)
+ SizeOfInitializedData += S.Header.SizeOfRawData;
+ if (S.Header.Characteristics & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA)
+ SizeOfUninitializedData += S.Header.SizeOfRawData;
+ if (S.Name.equals(".text"))
+ Header->BaseOfCode = S.Header.VirtualAddress; // RVA
+ else if (S.Name.equals(".data"))
+ BaseOfData = S.Header.VirtualAddress; // RVA
+ if (S.Header.VirtualAddress)
+ SizeOfImage += alignTo(S.Header.VirtualSize, Header->SectionAlignment);
+ }
+ Header->SizeOfCode = SizeOfCode;
+ Header->SizeOfInitializedData = SizeOfInitializedData;
+ Header->SizeOfUninitializedData = SizeOfUninitializedData;
+ Header->AddressOfEntryPoint =
+ CP.Obj.OptionalHeader->Header.AddressOfEntryPoint; // RVA
+ Header->ImageBase = CP.Obj.OptionalHeader->Header.ImageBase;
+ Header->MajorOperatingSystemVersion =
+ CP.Obj.OptionalHeader->Header.MajorOperatingSystemVersion;
+ Header->MinorOperatingSystemVersion =
+ CP.Obj.OptionalHeader->Header.MinorOperatingSystemVersion;
+ Header->MajorImageVersion = CP.Obj.OptionalHeader->Header.MajorImageVersion;
+ Header->MinorImageVersion = CP.Obj.OptionalHeader->Header.MinorImageVersion;
+ Header->MajorSubsystemVersion =
+ CP.Obj.OptionalHeader->Header.MajorSubsystemVersion;
+ Header->MinorSubsystemVersion =
+ CP.Obj.OptionalHeader->Header.MinorSubsystemVersion;
+ Header->SizeOfImage = SizeOfImage;
+ Header->SizeOfHeaders = SizeOfHeaders;
+ Header->Subsystem = CP.Obj.OptionalHeader->Header.Subsystem;
+ Header->DLLCharacteristics = CP.Obj.OptionalHeader->Header.DLLCharacteristics;
+ Header->SizeOfStackReserve = CP.Obj.OptionalHeader->Header.SizeOfStackReserve;
+ Header->SizeOfStackCommit = CP.Obj.OptionalHeader->Header.SizeOfStackCommit;
+ Header->SizeOfHeapReserve = CP.Obj.OptionalHeader->Header.SizeOfHeapReserve;
+ Header->SizeOfHeapCommit = CP.Obj.OptionalHeader->Header.SizeOfHeapCommit;
+ Header->NumberOfRvaAndSize = COFF::NUM_DATA_DIRECTORIES + 1;
+ return BaseOfData;
+}
+
+static bool writeCOFF(COFFParser &CP, raw_ostream &OS) {
+ if (CP.isPE()) {
+ // PE files start with a DOS stub.
+ object::dos_header DH;
+ memset(&DH, 0, sizeof(DH));
+
+ // DOS EXEs start with "MZ" magic.
+ DH.Magic[0] = 'M';
+ DH.Magic[1] = 'Z';
+ // Initializing the AddressOfRelocationTable is strictly optional but
+ // mollifies certain tools which expect it to have a value greater than
+ // 0x40.
+ DH.AddressOfRelocationTable = sizeof(DH);
+ // This is the address of the PE signature.
+ DH.AddressOfNewExeHeader = DOSStubSize;
+
+ // Write out our DOS stub.
+ OS.write(reinterpret_cast<char *>(&DH), sizeof(DH));
+ // Write padding until we reach the position of where our PE signature
+ // should live.
+ OS.write_zeros(DOSStubSize - sizeof(DH));
+ // Write out the PE signature.
+ OS.write(COFF::PEMagic, sizeof(COFF::PEMagic));
+ }
+ if (CP.useBigObj()) {
+ OS << binary_le(static_cast<uint16_t>(COFF::IMAGE_FILE_MACHINE_UNKNOWN))
+ << binary_le(static_cast<uint16_t>(0xffff))
+ << binary_le(
+ static_cast<uint16_t>(COFF::BigObjHeader::MinBigObjectVersion))
+ << binary_le(CP.Obj.Header.Machine)
+ << binary_le(CP.Obj.Header.TimeDateStamp);
+ OS.write(COFF::BigObjMagic, sizeof(COFF::BigObjMagic));
+ OS << zeros(uint32_t(0)) << zeros(uint32_t(0)) << zeros(uint32_t(0))
+ << zeros(uint32_t(0)) << binary_le(CP.Obj.Header.NumberOfSections)
+ << binary_le(CP.Obj.Header.PointerToSymbolTable)
+ << binary_le(CP.Obj.Header.NumberOfSymbols);
+ } else {
+ OS << binary_le(CP.Obj.Header.Machine)
+ << binary_le(static_cast<int16_t>(CP.Obj.Header.NumberOfSections))
+ << binary_le(CP.Obj.Header.TimeDateStamp)
+ << binary_le(CP.Obj.Header.PointerToSymbolTable)
+ << binary_le(CP.Obj.Header.NumberOfSymbols)
+ << binary_le(CP.Obj.Header.SizeOfOptionalHeader)
+ << binary_le(CP.Obj.Header.Characteristics);
+ }
+ if (CP.isPE()) {
+ if (CP.is64Bit()) {
+ object::pe32plus_header PEH;
+ initializeOptionalHeader(CP, COFF::PE32Header::PE32_PLUS, &PEH);
+ OS.write(reinterpret_cast<char *>(&PEH), sizeof(PEH));
+ } else {
+ object::pe32_header PEH;
+ uint32_t BaseOfData =
+ initializeOptionalHeader(CP, COFF::PE32Header::PE32, &PEH);
+ PEH.BaseOfData = BaseOfData;
+ OS.write(reinterpret_cast<char *>(&PEH), sizeof(PEH));
+ }
+ for (const Optional<COFF::DataDirectory> &DD :
+ CP.Obj.OptionalHeader->DataDirectories) {
+ if (!DD.hasValue()) {
+ OS << zeros(uint32_t(0));
+ OS << zeros(uint32_t(0));
+ } else {
+ OS << binary_le(DD->RelativeVirtualAddress);
+ OS << binary_le(DD->Size);
+ }
+ }
+ OS << zeros(uint32_t(0));
+ OS << zeros(uint32_t(0));
+ }
+
+ assert(OS.tell() == CP.SectionTableStart);
+ // Output section table.
+ for (std::vector<COFFYAML::Section>::iterator i = CP.Obj.Sections.begin(),
+ e = CP.Obj.Sections.end();
+ i != e; ++i) {
+ OS.write(i->Header.Name, COFF::NameSize);
+ OS << binary_le(i->Header.VirtualSize)
+ << binary_le(i->Header.VirtualAddress)
+ << binary_le(i->Header.SizeOfRawData)
+ << binary_le(i->Header.PointerToRawData)
+ << binary_le(i->Header.PointerToRelocations)
+ << binary_le(i->Header.PointerToLineNumbers)
+ << binary_le(i->Header.NumberOfRelocations)
+ << binary_le(i->Header.NumberOfLineNumbers)
+ << binary_le(i->Header.Characteristics);
+ }
+ assert(OS.tell() == CP.SectionTableStart + CP.SectionTableSize);
+
+ unsigned CurSymbol = 0;
+ StringMap<unsigned> SymbolTableIndexMap;
+ for (std::vector<COFFYAML::Symbol>::iterator I = CP.Obj.Symbols.begin(),
+ E = CP.Obj.Symbols.end();
+ I != E; ++I) {
+ SymbolTableIndexMap[I->Name] = CurSymbol;
+ CurSymbol += 1 + I->Header.NumberOfAuxSymbols;
+ }
+
+ // Output section data.
+ for (const COFFYAML::Section &S : CP.Obj.Sections) {
+ if (S.Header.SizeOfRawData == 0 || S.Header.PointerToRawData == 0)
+ continue;
+ assert(S.Header.PointerToRawData >= OS.tell());
+ OS.write_zeros(S.Header.PointerToRawData - OS.tell());
+ S.SectionData.writeAsBinary(OS);
+ assert(S.Header.SizeOfRawData >= S.SectionData.binary_size());
+ OS.write_zeros(S.Header.SizeOfRawData - S.SectionData.binary_size());
+ if (S.Header.Characteristics & COFF::IMAGE_SCN_LNK_NRELOC_OVFL)
+ OS << binary_le<uint32_t>(/*VirtualAddress=*/ S.Relocations.size() + 1)
+ << binary_le<uint32_t>(/*SymbolTableIndex=*/ 0)
+ << binary_le<uint16_t>(/*Type=*/ 0);
+ for (const COFFYAML::Relocation &R : S.Relocations) {
+ uint32_t SymbolTableIndex;
+ if (R.SymbolTableIndex) {
+ if (!R.SymbolName.empty())
+ WithColor::error()
+ << "Both SymbolName and SymbolTableIndex specified\n";
+ SymbolTableIndex = *R.SymbolTableIndex;
+ } else {
+ SymbolTableIndex = SymbolTableIndexMap[R.SymbolName];
+ }
+ OS << binary_le(R.VirtualAddress) << binary_le(SymbolTableIndex)
+ << binary_le(R.Type);
+ }
+ }
+
+ // Output symbol table.
+
+ for (std::vector<COFFYAML::Symbol>::const_iterator i = CP.Obj.Symbols.begin(),
+ e = CP.Obj.Symbols.end();
+ i != e; ++i) {
+ OS.write(i->Header.Name, COFF::NameSize);
+ OS << binary_le(i->Header.Value);
+ if (CP.useBigObj())
+ OS << binary_le(i->Header.SectionNumber);
+ else
+ OS << binary_le(static_cast<int16_t>(i->Header.SectionNumber));
+ OS << binary_le(i->Header.Type) << binary_le(i->Header.StorageClass)
+ << binary_le(i->Header.NumberOfAuxSymbols);
+
+ if (i->FunctionDefinition) {
+ OS << binary_le(i->FunctionDefinition->TagIndex)
+ << binary_le(i->FunctionDefinition->TotalSize)
+ << binary_le(i->FunctionDefinition->PointerToLinenumber)
+ << binary_le(i->FunctionDefinition->PointerToNextFunction)
+ << zeros(i->FunctionDefinition->unused);
+ OS.write_zeros(CP.getSymbolSize() - COFF::Symbol16Size);
+ }
+ if (i->bfAndefSymbol) {
+ OS << zeros(i->bfAndefSymbol->unused1)
+ << binary_le(i->bfAndefSymbol->Linenumber)
+ << zeros(i->bfAndefSymbol->unused2)
+ << binary_le(i->bfAndefSymbol->PointerToNextFunction)
+ << zeros(i->bfAndefSymbol->unused3);
+ OS.write_zeros(CP.getSymbolSize() - COFF::Symbol16Size);
+ }
+ if (i->WeakExternal) {
+ OS << binary_le(i->WeakExternal->TagIndex)
+ << binary_le(i->WeakExternal->Characteristics)
+ << zeros(i->WeakExternal->unused);
+ OS.write_zeros(CP.getSymbolSize() - COFF::Symbol16Size);
+ }
+ if (!i->File.empty()) {
+ unsigned SymbolSize = CP.getSymbolSize();
+ uint32_t NumberOfAuxRecords =
+ (i->File.size() + SymbolSize - 1) / SymbolSize;
+ uint32_t NumberOfAuxBytes = NumberOfAuxRecords * SymbolSize;
+ uint32_t NumZeros = NumberOfAuxBytes - i->File.size();
+ OS.write(i->File.data(), i->File.size());
+ OS.write_zeros(NumZeros);
+ }
+ if (i->SectionDefinition) {
+ OS << binary_le(i->SectionDefinition->Length)
+ << binary_le(i->SectionDefinition->NumberOfRelocations)
+ << binary_le(i->SectionDefinition->NumberOfLinenumbers)
+ << binary_le(i->SectionDefinition->CheckSum)
+ << binary_le(static_cast<int16_t>(i->SectionDefinition->Number))
+ << binary_le(i->SectionDefinition->Selection)
+ << zeros(i->SectionDefinition->unused)
+ << binary_le(static_cast<int16_t>(i->SectionDefinition->Number >> 16));
+ OS.write_zeros(CP.getSymbolSize() - COFF::Symbol16Size);
+ }
+ if (i->CLRToken) {
+ OS << binary_le(i->CLRToken->AuxType) << zeros(i->CLRToken->unused1)
+ << binary_le(i->CLRToken->SymbolTableIndex)
+ << zeros(i->CLRToken->unused2);
+ OS.write_zeros(CP.getSymbolSize() - COFF::Symbol16Size);
+ }
+ }
+
+ // Output string table.
+ if (CP.Obj.Header.PointerToSymbolTable)
+ OS.write(&CP.StringTable[0], CP.StringTable.size());
+ return true;
+}
+
+namespace llvm {
+namespace yaml {
+
+bool yaml2coff(llvm::COFFYAML::Object &Doc, raw_ostream &Out,
+ ErrorHandler ErrHandler) {
+ COFFParser CP(Doc, ErrHandler);
+ if (!CP.parse()) {
+ ErrHandler("failed to parse YAML file");
+ return false;
+ }
+
+ if (!layoutOptionalHeader(CP)) {
+ ErrHandler("failed to layout optional header for COFF file");
+ return false;
+ }
+
+ if (!layoutCOFF(CP)) {
+ ErrHandler("failed to layout COFF file");
+ return false;
+ }
+ if (!writeCOFF(CP, Out)) {
+ ErrHandler("failed to write COFF file");
+ return false;
+ }
+ return true;
+}
+
+} // namespace yaml
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/COFFYAML.cpp b/contrib/libs/llvm12/lib/ObjectYAML/COFFYAML.cpp
new file mode 100644
index 00000000000..b5154467f11
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/COFFYAML.cpp
@@ -0,0 +1,600 @@
+//===- COFFYAML.cpp - COFF YAMLIO implementation --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of COFF.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/COFFYAML.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <cstdint>
+#include <cstring>
+
+#define ECase(X) IO.enumCase(Value, #X, COFF::X);
+
+namespace llvm {
+
+namespace COFFYAML {
+
+Section::Section() { memset(&Header, 0, sizeof(COFF::section)); }
+Symbol::Symbol() { memset(&Header, 0, sizeof(COFF::symbol)); }
+Object::Object() { memset(&Header, 0, sizeof(COFF::header)); }
+
+} // end namespace COFFYAML
+
+namespace yaml {
+
+void ScalarEnumerationTraits<COFFYAML::COMDATType>::enumeration(
+ IO &IO, COFFYAML::COMDATType &Value) {
+ IO.enumCase(Value, "0", 0);
+ ECase(IMAGE_COMDAT_SELECT_NODUPLICATES);
+ ECase(IMAGE_COMDAT_SELECT_ANY);
+ ECase(IMAGE_COMDAT_SELECT_SAME_SIZE);
+ ECase(IMAGE_COMDAT_SELECT_EXACT_MATCH);
+ ECase(IMAGE_COMDAT_SELECT_ASSOCIATIVE);
+ ECase(IMAGE_COMDAT_SELECT_LARGEST);
+ ECase(IMAGE_COMDAT_SELECT_NEWEST);
+}
+
+void
+ScalarEnumerationTraits<COFFYAML::WeakExternalCharacteristics>::enumeration(
+ IO &IO, COFFYAML::WeakExternalCharacteristics &Value) {
+ IO.enumCase(Value, "0", 0);
+ ECase(IMAGE_WEAK_EXTERN_SEARCH_NOLIBRARY);
+ ECase(IMAGE_WEAK_EXTERN_SEARCH_LIBRARY);
+ ECase(IMAGE_WEAK_EXTERN_SEARCH_ALIAS);
+}
+
+void ScalarEnumerationTraits<COFFYAML::AuxSymbolType>::enumeration(
+ IO &IO, COFFYAML::AuxSymbolType &Value) {
+ ECase(IMAGE_AUX_SYMBOL_TYPE_TOKEN_DEF);
+}
+
+void ScalarEnumerationTraits<COFF::MachineTypes>::enumeration(
+ IO &IO, COFF::MachineTypes &Value) {
+ ECase(IMAGE_FILE_MACHINE_UNKNOWN);
+ ECase(IMAGE_FILE_MACHINE_AM33);
+ ECase(IMAGE_FILE_MACHINE_AMD64);
+ ECase(IMAGE_FILE_MACHINE_ARM);
+ ECase(IMAGE_FILE_MACHINE_ARMNT);
+ ECase(IMAGE_FILE_MACHINE_ARM64);
+ ECase(IMAGE_FILE_MACHINE_EBC);
+ ECase(IMAGE_FILE_MACHINE_I386);
+ ECase(IMAGE_FILE_MACHINE_IA64);
+ ECase(IMAGE_FILE_MACHINE_M32R);
+ ECase(IMAGE_FILE_MACHINE_MIPS16);
+ ECase(IMAGE_FILE_MACHINE_MIPSFPU);
+ ECase(IMAGE_FILE_MACHINE_MIPSFPU16);
+ ECase(IMAGE_FILE_MACHINE_POWERPC);
+ ECase(IMAGE_FILE_MACHINE_POWERPCFP);
+ ECase(IMAGE_FILE_MACHINE_R4000);
+ ECase(IMAGE_FILE_MACHINE_SH3);
+ ECase(IMAGE_FILE_MACHINE_SH3DSP);
+ ECase(IMAGE_FILE_MACHINE_SH4);
+ ECase(IMAGE_FILE_MACHINE_SH5);
+ ECase(IMAGE_FILE_MACHINE_THUMB);
+ ECase(IMAGE_FILE_MACHINE_WCEMIPSV2);
+}
+
+void ScalarEnumerationTraits<COFF::SymbolBaseType>::enumeration(
+ IO &IO, COFF::SymbolBaseType &Value) {
+ ECase(IMAGE_SYM_TYPE_NULL);
+ ECase(IMAGE_SYM_TYPE_VOID);
+ ECase(IMAGE_SYM_TYPE_CHAR);
+ ECase(IMAGE_SYM_TYPE_SHORT);
+ ECase(IMAGE_SYM_TYPE_INT);
+ ECase(IMAGE_SYM_TYPE_LONG);
+ ECase(IMAGE_SYM_TYPE_FLOAT);
+ ECase(IMAGE_SYM_TYPE_DOUBLE);
+ ECase(IMAGE_SYM_TYPE_STRUCT);
+ ECase(IMAGE_SYM_TYPE_UNION);
+ ECase(IMAGE_SYM_TYPE_ENUM);
+ ECase(IMAGE_SYM_TYPE_MOE);
+ ECase(IMAGE_SYM_TYPE_BYTE);
+ ECase(IMAGE_SYM_TYPE_WORD);
+ ECase(IMAGE_SYM_TYPE_UINT);
+ ECase(IMAGE_SYM_TYPE_DWORD);
+}
+
+void ScalarEnumerationTraits<COFF::SymbolStorageClass>::enumeration(
+ IO &IO, COFF::SymbolStorageClass &Value) {
+ ECase(IMAGE_SYM_CLASS_END_OF_FUNCTION);
+ ECase(IMAGE_SYM_CLASS_NULL);
+ ECase(IMAGE_SYM_CLASS_AUTOMATIC);
+ ECase(IMAGE_SYM_CLASS_EXTERNAL);
+ ECase(IMAGE_SYM_CLASS_STATIC);
+ ECase(IMAGE_SYM_CLASS_REGISTER);
+ ECase(IMAGE_SYM_CLASS_EXTERNAL_DEF);
+ ECase(IMAGE_SYM_CLASS_LABEL);
+ ECase(IMAGE_SYM_CLASS_UNDEFINED_LABEL);
+ ECase(IMAGE_SYM_CLASS_MEMBER_OF_STRUCT);
+ ECase(IMAGE_SYM_CLASS_ARGUMENT);
+ ECase(IMAGE_SYM_CLASS_STRUCT_TAG);
+ ECase(IMAGE_SYM_CLASS_MEMBER_OF_UNION);
+ ECase(IMAGE_SYM_CLASS_UNION_TAG);
+ ECase(IMAGE_SYM_CLASS_TYPE_DEFINITION);
+ ECase(IMAGE_SYM_CLASS_UNDEFINED_STATIC);
+ ECase(IMAGE_SYM_CLASS_ENUM_TAG);
+ ECase(IMAGE_SYM_CLASS_MEMBER_OF_ENUM);
+ ECase(IMAGE_SYM_CLASS_REGISTER_PARAM);
+ ECase(IMAGE_SYM_CLASS_BIT_FIELD);
+ ECase(IMAGE_SYM_CLASS_BLOCK);
+ ECase(IMAGE_SYM_CLASS_FUNCTION);
+ ECase(IMAGE_SYM_CLASS_END_OF_STRUCT);
+ ECase(IMAGE_SYM_CLASS_FILE);
+ ECase(IMAGE_SYM_CLASS_SECTION);
+ ECase(IMAGE_SYM_CLASS_WEAK_EXTERNAL);
+ ECase(IMAGE_SYM_CLASS_CLR_TOKEN);
+}
+
+void ScalarEnumerationTraits<COFF::SymbolComplexType>::enumeration(
+ IO &IO, COFF::SymbolComplexType &Value) {
+ ECase(IMAGE_SYM_DTYPE_NULL);
+ ECase(IMAGE_SYM_DTYPE_POINTER);
+ ECase(IMAGE_SYM_DTYPE_FUNCTION);
+ ECase(IMAGE_SYM_DTYPE_ARRAY);
+}
+
+void ScalarEnumerationTraits<COFF::RelocationTypeI386>::enumeration(
+ IO &IO, COFF::RelocationTypeI386 &Value) {
+ ECase(IMAGE_REL_I386_ABSOLUTE);
+ ECase(IMAGE_REL_I386_DIR16);
+ ECase(IMAGE_REL_I386_REL16);
+ ECase(IMAGE_REL_I386_DIR32);
+ ECase(IMAGE_REL_I386_DIR32NB);
+ ECase(IMAGE_REL_I386_SEG12);
+ ECase(IMAGE_REL_I386_SECTION);
+ ECase(IMAGE_REL_I386_SECREL);
+ ECase(IMAGE_REL_I386_TOKEN);
+ ECase(IMAGE_REL_I386_SECREL7);
+ ECase(IMAGE_REL_I386_REL32);
+}
+
+void ScalarEnumerationTraits<COFF::RelocationTypeAMD64>::enumeration(
+ IO &IO, COFF::RelocationTypeAMD64 &Value) {
+ ECase(IMAGE_REL_AMD64_ABSOLUTE);
+ ECase(IMAGE_REL_AMD64_ADDR64);
+ ECase(IMAGE_REL_AMD64_ADDR32);
+ ECase(IMAGE_REL_AMD64_ADDR32NB);
+ ECase(IMAGE_REL_AMD64_REL32);
+ ECase(IMAGE_REL_AMD64_REL32_1);
+ ECase(IMAGE_REL_AMD64_REL32_2);
+ ECase(IMAGE_REL_AMD64_REL32_3);
+ ECase(IMAGE_REL_AMD64_REL32_4);
+ ECase(IMAGE_REL_AMD64_REL32_5);
+ ECase(IMAGE_REL_AMD64_SECTION);
+ ECase(IMAGE_REL_AMD64_SECREL);
+ ECase(IMAGE_REL_AMD64_SECREL7);
+ ECase(IMAGE_REL_AMD64_TOKEN);
+ ECase(IMAGE_REL_AMD64_SREL32);
+ ECase(IMAGE_REL_AMD64_PAIR);
+ ECase(IMAGE_REL_AMD64_SSPAN32);
+}
+
+void ScalarEnumerationTraits<COFF::RelocationTypesARM>::enumeration(
+ IO &IO, COFF::RelocationTypesARM &Value) {
+ ECase(IMAGE_REL_ARM_ABSOLUTE);
+ ECase(IMAGE_REL_ARM_ADDR32);
+ ECase(IMAGE_REL_ARM_ADDR32NB);
+ ECase(IMAGE_REL_ARM_BRANCH24);
+ ECase(IMAGE_REL_ARM_BRANCH11);
+ ECase(IMAGE_REL_ARM_TOKEN);
+ ECase(IMAGE_REL_ARM_BLX24);
+ ECase(IMAGE_REL_ARM_BLX11);
+ ECase(IMAGE_REL_ARM_REL32);
+ ECase(IMAGE_REL_ARM_SECTION);
+ ECase(IMAGE_REL_ARM_SECREL);
+ ECase(IMAGE_REL_ARM_MOV32A);
+ ECase(IMAGE_REL_ARM_MOV32T);
+ ECase(IMAGE_REL_ARM_BRANCH20T);
+ ECase(IMAGE_REL_ARM_BRANCH24T);
+ ECase(IMAGE_REL_ARM_BLX23T);
+ ECase(IMAGE_REL_ARM_PAIR);
+}
+
+void ScalarEnumerationTraits<COFF::RelocationTypesARM64>::enumeration(
+ IO &IO, COFF::RelocationTypesARM64 &Value) {
+ ECase(IMAGE_REL_ARM64_ABSOLUTE);
+ ECase(IMAGE_REL_ARM64_ADDR32);
+ ECase(IMAGE_REL_ARM64_ADDR32NB);
+ ECase(IMAGE_REL_ARM64_BRANCH26);
+ ECase(IMAGE_REL_ARM64_PAGEBASE_REL21);
+ ECase(IMAGE_REL_ARM64_REL21);
+ ECase(IMAGE_REL_ARM64_PAGEOFFSET_12A);
+ ECase(IMAGE_REL_ARM64_PAGEOFFSET_12L);
+ ECase(IMAGE_REL_ARM64_SECREL);
+ ECase(IMAGE_REL_ARM64_SECREL_LOW12A);
+ ECase(IMAGE_REL_ARM64_SECREL_HIGH12A);
+ ECase(IMAGE_REL_ARM64_SECREL_LOW12L);
+ ECase(IMAGE_REL_ARM64_TOKEN);
+ ECase(IMAGE_REL_ARM64_SECTION);
+ ECase(IMAGE_REL_ARM64_ADDR64);
+ ECase(IMAGE_REL_ARM64_BRANCH19);
+ ECase(IMAGE_REL_ARM64_BRANCH14);
+ ECase(IMAGE_REL_ARM64_REL32);
+}
+
+void ScalarEnumerationTraits<COFF::WindowsSubsystem>::enumeration(
+ IO &IO, COFF::WindowsSubsystem &Value) {
+ ECase(IMAGE_SUBSYSTEM_UNKNOWN);
+ ECase(IMAGE_SUBSYSTEM_NATIVE);
+ ECase(IMAGE_SUBSYSTEM_WINDOWS_GUI);
+ ECase(IMAGE_SUBSYSTEM_WINDOWS_CUI);
+ ECase(IMAGE_SUBSYSTEM_OS2_CUI);
+ ECase(IMAGE_SUBSYSTEM_POSIX_CUI);
+ ECase(IMAGE_SUBSYSTEM_NATIVE_WINDOWS);
+ ECase(IMAGE_SUBSYSTEM_WINDOWS_CE_GUI);
+ ECase(IMAGE_SUBSYSTEM_EFI_APPLICATION);
+ ECase(IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER);
+ ECase(IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER);
+ ECase(IMAGE_SUBSYSTEM_EFI_ROM);
+ ECase(IMAGE_SUBSYSTEM_XBOX);
+ ECase(IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION);
+}
+#undef ECase
+
+#define BCase(X) IO.bitSetCase(Value, #X, COFF::X);
+void ScalarBitSetTraits<COFF::Characteristics>::bitset(
+ IO &IO, COFF::Characteristics &Value) {
+ BCase(IMAGE_FILE_RELOCS_STRIPPED);
+ BCase(IMAGE_FILE_EXECUTABLE_IMAGE);
+ BCase(IMAGE_FILE_LINE_NUMS_STRIPPED);
+ BCase(IMAGE_FILE_LOCAL_SYMS_STRIPPED);
+ BCase(IMAGE_FILE_AGGRESSIVE_WS_TRIM);
+ BCase(IMAGE_FILE_LARGE_ADDRESS_AWARE);
+ BCase(IMAGE_FILE_BYTES_REVERSED_LO);
+ BCase(IMAGE_FILE_32BIT_MACHINE);
+ BCase(IMAGE_FILE_DEBUG_STRIPPED);
+ BCase(IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP);
+ BCase(IMAGE_FILE_NET_RUN_FROM_SWAP);
+ BCase(IMAGE_FILE_SYSTEM);
+ BCase(IMAGE_FILE_DLL);
+ BCase(IMAGE_FILE_UP_SYSTEM_ONLY);
+ BCase(IMAGE_FILE_BYTES_REVERSED_HI);
+}
+
+void ScalarBitSetTraits<COFF::SectionCharacteristics>::bitset(
+ IO &IO, COFF::SectionCharacteristics &Value) {
+ BCase(IMAGE_SCN_TYPE_NOLOAD);
+ BCase(IMAGE_SCN_TYPE_NO_PAD);
+ BCase(IMAGE_SCN_CNT_CODE);
+ BCase(IMAGE_SCN_CNT_INITIALIZED_DATA);
+ BCase(IMAGE_SCN_CNT_UNINITIALIZED_DATA);
+ BCase(IMAGE_SCN_LNK_OTHER);
+ BCase(IMAGE_SCN_LNK_INFO);
+ BCase(IMAGE_SCN_LNK_REMOVE);
+ BCase(IMAGE_SCN_LNK_COMDAT);
+ BCase(IMAGE_SCN_GPREL);
+ BCase(IMAGE_SCN_MEM_PURGEABLE);
+ BCase(IMAGE_SCN_MEM_16BIT);
+ BCase(IMAGE_SCN_MEM_LOCKED);
+ BCase(IMAGE_SCN_MEM_PRELOAD);
+ BCase(IMAGE_SCN_LNK_NRELOC_OVFL);
+ BCase(IMAGE_SCN_MEM_DISCARDABLE);
+ BCase(IMAGE_SCN_MEM_NOT_CACHED);
+ BCase(IMAGE_SCN_MEM_NOT_PAGED);
+ BCase(IMAGE_SCN_MEM_SHARED);
+ BCase(IMAGE_SCN_MEM_EXECUTE);
+ BCase(IMAGE_SCN_MEM_READ);
+ BCase(IMAGE_SCN_MEM_WRITE);
+}
+
+void ScalarBitSetTraits<COFF::DLLCharacteristics>::bitset(
+ IO &IO, COFF::DLLCharacteristics &Value) {
+ BCase(IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA);
+ BCase(IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE);
+ BCase(IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY);
+ BCase(IMAGE_DLL_CHARACTERISTICS_NX_COMPAT);
+ BCase(IMAGE_DLL_CHARACTERISTICS_NO_ISOLATION);
+ BCase(IMAGE_DLL_CHARACTERISTICS_NO_SEH);
+ BCase(IMAGE_DLL_CHARACTERISTICS_NO_BIND);
+ BCase(IMAGE_DLL_CHARACTERISTICS_APPCONTAINER);
+ BCase(IMAGE_DLL_CHARACTERISTICS_WDM_DRIVER);
+ BCase(IMAGE_DLL_CHARACTERISTICS_GUARD_CF);
+ BCase(IMAGE_DLL_CHARACTERISTICS_TERMINAL_SERVER_AWARE);
+}
+#undef BCase
+
+namespace {
+
+struct NSectionSelectionType {
+ NSectionSelectionType(IO &)
+ : SelectionType(COFFYAML::COMDATType(0)) {}
+ NSectionSelectionType(IO &, uint8_t C)
+ : SelectionType(COFFYAML::COMDATType(C)) {}
+
+ uint8_t denormalize(IO &) { return SelectionType; }
+
+ COFFYAML::COMDATType SelectionType;
+};
+
+struct NWeakExternalCharacteristics {
+ NWeakExternalCharacteristics(IO &)
+ : Characteristics(COFFYAML::WeakExternalCharacteristics(0)) {}
+ NWeakExternalCharacteristics(IO &, uint32_t C)
+ : Characteristics(COFFYAML::WeakExternalCharacteristics(C)) {}
+
+ uint32_t denormalize(IO &) { return Characteristics; }
+
+ COFFYAML::WeakExternalCharacteristics Characteristics;
+};
+
+struct NSectionCharacteristics {
+ NSectionCharacteristics(IO &)
+ : Characteristics(COFF::SectionCharacteristics(0)) {}
+ NSectionCharacteristics(IO &, uint32_t C)
+ : Characteristics(COFF::SectionCharacteristics(C)) {}
+
+ uint32_t denormalize(IO &) { return Characteristics; }
+
+ COFF::SectionCharacteristics Characteristics;
+};
+
+struct NAuxTokenType {
+ NAuxTokenType(IO &)
+ : AuxType(COFFYAML::AuxSymbolType(0)) {}
+ NAuxTokenType(IO &, uint8_t C)
+ : AuxType(COFFYAML::AuxSymbolType(C)) {}
+
+ uint32_t denormalize(IO &) { return AuxType; }
+
+ COFFYAML::AuxSymbolType AuxType;
+};
+
+struct NStorageClass {
+ NStorageClass(IO &) : StorageClass(COFF::SymbolStorageClass(0)) {}
+ NStorageClass(IO &, uint8_t S) : StorageClass(COFF::SymbolStorageClass(S)) {}
+
+ uint8_t denormalize(IO &) { return StorageClass; }
+
+ COFF::SymbolStorageClass StorageClass;
+};
+
+struct NMachine {
+ NMachine(IO &) : Machine(COFF::MachineTypes(0)) {}
+ NMachine(IO &, uint16_t M) : Machine(COFF::MachineTypes(M)) {}
+
+ uint16_t denormalize(IO &) { return Machine; }
+
+ COFF::MachineTypes Machine;
+};
+
+struct NHeaderCharacteristics {
+ NHeaderCharacteristics(IO &) : Characteristics(COFF::Characteristics(0)) {}
+ NHeaderCharacteristics(IO &, uint16_t C)
+ : Characteristics(COFF::Characteristics(C)) {}
+
+ uint16_t denormalize(IO &) { return Characteristics; }
+
+ COFF::Characteristics Characteristics;
+};
+
+template <typename RelocType>
+struct NType {
+ NType(IO &) : Type(RelocType(0)) {}
+ NType(IO &, uint16_t T) : Type(RelocType(T)) {}
+
+ uint16_t denormalize(IO &) { return Type; }
+
+ RelocType Type;
+};
+
+struct NWindowsSubsystem {
+ NWindowsSubsystem(IO &) : Subsystem(COFF::WindowsSubsystem(0)) {}
+ NWindowsSubsystem(IO &, uint16_t C) : Subsystem(COFF::WindowsSubsystem(C)) {}
+
+ uint16_t denormalize(IO &) { return Subsystem; }
+
+ COFF::WindowsSubsystem Subsystem;
+};
+
+struct NDLLCharacteristics {
+ NDLLCharacteristics(IO &) : Characteristics(COFF::DLLCharacteristics(0)) {}
+ NDLLCharacteristics(IO &, uint16_t C)
+ : Characteristics(COFF::DLLCharacteristics(C)) {}
+
+ uint16_t denormalize(IO &) { return Characteristics; }
+
+ COFF::DLLCharacteristics Characteristics;
+};
+
+} // end anonymous namespace
+
+void MappingTraits<COFFYAML::Relocation>::mapping(IO &IO,
+ COFFYAML::Relocation &Rel) {
+ IO.mapRequired("VirtualAddress", Rel.VirtualAddress);
+ IO.mapOptional("SymbolName", Rel.SymbolName, StringRef());
+ IO.mapOptional("SymbolTableIndex", Rel.SymbolTableIndex);
+
+ COFF::header &H = *static_cast<COFF::header *>(IO.getContext());
+ if (H.Machine == COFF::IMAGE_FILE_MACHINE_I386) {
+ MappingNormalization<NType<COFF::RelocationTypeI386>, uint16_t> NT(
+ IO, Rel.Type);
+ IO.mapRequired("Type", NT->Type);
+ } else if (H.Machine == COFF::IMAGE_FILE_MACHINE_AMD64) {
+ MappingNormalization<NType<COFF::RelocationTypeAMD64>, uint16_t> NT(
+ IO, Rel.Type);
+ IO.mapRequired("Type", NT->Type);
+ } else if (H.Machine == COFF::IMAGE_FILE_MACHINE_ARMNT) {
+ MappingNormalization<NType<COFF::RelocationTypesARM>, uint16_t> NT(
+ IO, Rel.Type);
+ IO.mapRequired("Type", NT->Type);
+ } else if (H.Machine == COFF::IMAGE_FILE_MACHINE_ARM64) {
+ MappingNormalization<NType<COFF::RelocationTypesARM64>, uint16_t> NT(
+ IO, Rel.Type);
+ IO.mapRequired("Type", NT->Type);
+ } else {
+ IO.mapRequired("Type", Rel.Type);
+ }
+}
+
+void MappingTraits<COFF::DataDirectory>::mapping(IO &IO,
+ COFF::DataDirectory &DD) {
+ IO.mapRequired("RelativeVirtualAddress", DD.RelativeVirtualAddress);
+ IO.mapRequired("Size", DD.Size);
+}
+
+void MappingTraits<COFFYAML::PEHeader>::mapping(IO &IO,
+ COFFYAML::PEHeader &PH) {
+ MappingNormalization<NWindowsSubsystem, uint16_t> NWS(IO,
+ PH.Header.Subsystem);
+ MappingNormalization<NDLLCharacteristics, uint16_t> NDC(
+ IO, PH.Header.DLLCharacteristics);
+
+ IO.mapRequired("AddressOfEntryPoint", PH.Header.AddressOfEntryPoint);
+ IO.mapRequired("ImageBase", PH.Header.ImageBase);
+ IO.mapRequired("SectionAlignment", PH.Header.SectionAlignment);
+ IO.mapRequired("FileAlignment", PH.Header.FileAlignment);
+ IO.mapRequired("MajorOperatingSystemVersion",
+ PH.Header.MajorOperatingSystemVersion);
+ IO.mapRequired("MinorOperatingSystemVersion",
+ PH.Header.MinorOperatingSystemVersion);
+ IO.mapRequired("MajorImageVersion", PH.Header.MajorImageVersion);
+ IO.mapRequired("MinorImageVersion", PH.Header.MinorImageVersion);
+ IO.mapRequired("MajorSubsystemVersion", PH.Header.MajorSubsystemVersion);
+ IO.mapRequired("MinorSubsystemVersion", PH.Header.MinorSubsystemVersion);
+ IO.mapRequired("Subsystem", NWS->Subsystem);
+ IO.mapRequired("DLLCharacteristics", NDC->Characteristics);
+ IO.mapRequired("SizeOfStackReserve", PH.Header.SizeOfStackReserve);
+ IO.mapRequired("SizeOfStackCommit", PH.Header.SizeOfStackCommit);
+ IO.mapRequired("SizeOfHeapReserve", PH.Header.SizeOfHeapReserve);
+ IO.mapRequired("SizeOfHeapCommit", PH.Header.SizeOfHeapCommit);
+
+ IO.mapOptional("ExportTable", PH.DataDirectories[COFF::EXPORT_TABLE]);
+ IO.mapOptional("ImportTable", PH.DataDirectories[COFF::IMPORT_TABLE]);
+ IO.mapOptional("ResourceTable", PH.DataDirectories[COFF::RESOURCE_TABLE]);
+ IO.mapOptional("ExceptionTable", PH.DataDirectories[COFF::EXCEPTION_TABLE]);
+ IO.mapOptional("CertificateTable", PH.DataDirectories[COFF::CERTIFICATE_TABLE]);
+ IO.mapOptional("BaseRelocationTable",
+ PH.DataDirectories[COFF::BASE_RELOCATION_TABLE]);
+ IO.mapOptional("Debug", PH.DataDirectories[COFF::DEBUG_DIRECTORY]);
+ IO.mapOptional("Architecture", PH.DataDirectories[COFF::ARCHITECTURE]);
+ IO.mapOptional("GlobalPtr", PH.DataDirectories[COFF::GLOBAL_PTR]);
+ IO.mapOptional("TlsTable", PH.DataDirectories[COFF::TLS_TABLE]);
+ IO.mapOptional("LoadConfigTable",
+ PH.DataDirectories[COFF::LOAD_CONFIG_TABLE]);
+ IO.mapOptional("BoundImport", PH.DataDirectories[COFF::BOUND_IMPORT]);
+ IO.mapOptional("IAT", PH.DataDirectories[COFF::IAT]);
+ IO.mapOptional("DelayImportDescriptor",
+ PH.DataDirectories[COFF::DELAY_IMPORT_DESCRIPTOR]);
+ IO.mapOptional("ClrRuntimeHeader",
+ PH.DataDirectories[COFF::CLR_RUNTIME_HEADER]);
+}
+
+void MappingTraits<COFF::header>::mapping(IO &IO, COFF::header &H) {
+ MappingNormalization<NMachine, uint16_t> NM(IO, H.Machine);
+ MappingNormalization<NHeaderCharacteristics, uint16_t> NC(IO,
+ H.Characteristics);
+
+ IO.mapRequired("Machine", NM->Machine);
+ IO.mapOptional("Characteristics", NC->Characteristics);
+ IO.setContext(static_cast<void *>(&H));
+}
+
+void MappingTraits<COFF::AuxiliaryFunctionDefinition>::mapping(
+ IO &IO, COFF::AuxiliaryFunctionDefinition &AFD) {
+ IO.mapRequired("TagIndex", AFD.TagIndex);
+ IO.mapRequired("TotalSize", AFD.TotalSize);
+ IO.mapRequired("PointerToLinenumber", AFD.PointerToLinenumber);
+ IO.mapRequired("PointerToNextFunction", AFD.PointerToNextFunction);
+}
+
+void MappingTraits<COFF::AuxiliarybfAndefSymbol>::mapping(
+ IO &IO, COFF::AuxiliarybfAndefSymbol &AAS) {
+ IO.mapRequired("Linenumber", AAS.Linenumber);
+ IO.mapRequired("PointerToNextFunction", AAS.PointerToNextFunction);
+}
+
+void MappingTraits<COFF::AuxiliaryWeakExternal>::mapping(
+ IO &IO, COFF::AuxiliaryWeakExternal &AWE) {
+ MappingNormalization<NWeakExternalCharacteristics, uint32_t> NWEC(
+ IO, AWE.Characteristics);
+ IO.mapRequired("TagIndex", AWE.TagIndex);
+ IO.mapRequired("Characteristics", NWEC->Characteristics);
+}
+
+void MappingTraits<COFF::AuxiliarySectionDefinition>::mapping(
+ IO &IO, COFF::AuxiliarySectionDefinition &ASD) {
+ MappingNormalization<NSectionSelectionType, uint8_t> NSST(
+ IO, ASD.Selection);
+
+ IO.mapRequired("Length", ASD.Length);
+ IO.mapRequired("NumberOfRelocations", ASD.NumberOfRelocations);
+ IO.mapRequired("NumberOfLinenumbers", ASD.NumberOfLinenumbers);
+ IO.mapRequired("CheckSum", ASD.CheckSum);
+ IO.mapRequired("Number", ASD.Number);
+ IO.mapOptional("Selection", NSST->SelectionType, COFFYAML::COMDATType(0));
+}
+
+void MappingTraits<COFF::AuxiliaryCLRToken>::mapping(
+ IO &IO, COFF::AuxiliaryCLRToken &ACT) {
+ MappingNormalization<NAuxTokenType, uint8_t> NATT(IO, ACT.AuxType);
+ IO.mapRequired("AuxType", NATT->AuxType);
+ IO.mapRequired("SymbolTableIndex", ACT.SymbolTableIndex);
+}
+
+void MappingTraits<COFFYAML::Symbol>::mapping(IO &IO, COFFYAML::Symbol &S) {
+ MappingNormalization<NStorageClass, uint8_t> NS(IO, S.Header.StorageClass);
+
+ IO.mapRequired("Name", S.Name);
+ IO.mapRequired("Value", S.Header.Value);
+ IO.mapRequired("SectionNumber", S.Header.SectionNumber);
+ IO.mapRequired("SimpleType", S.SimpleType);
+ IO.mapRequired("ComplexType", S.ComplexType);
+ IO.mapRequired("StorageClass", NS->StorageClass);
+ IO.mapOptional("FunctionDefinition", S.FunctionDefinition);
+ IO.mapOptional("bfAndefSymbol", S.bfAndefSymbol);
+ IO.mapOptional("WeakExternal", S.WeakExternal);
+ IO.mapOptional("File", S.File, StringRef());
+ IO.mapOptional("SectionDefinition", S.SectionDefinition);
+ IO.mapOptional("CLRToken", S.CLRToken);
+}
+
+void MappingTraits<COFFYAML::Section>::mapping(IO &IO, COFFYAML::Section &Sec) {
+ MappingNormalization<NSectionCharacteristics, uint32_t> NC(
+ IO, Sec.Header.Characteristics);
+ IO.mapRequired("Name", Sec.Name);
+ IO.mapRequired("Characteristics", NC->Characteristics);
+ IO.mapOptional("VirtualAddress", Sec.Header.VirtualAddress, 0U);
+ IO.mapOptional("VirtualSize", Sec.Header.VirtualSize, 0U);
+ IO.mapOptional("Alignment", Sec.Alignment, 0U);
+
+ // If this is a .debug$S .debug$T .debug$P, or .debug$H section parse the
+ // semantic representation of the symbols/types. If it is any other kind
+ // of section, just deal in raw bytes.
+ IO.mapOptional("SectionData", Sec.SectionData);
+ if (Sec.Name == ".debug$S")
+ IO.mapOptional("Subsections", Sec.DebugS);
+ else if (Sec.Name == ".debug$T")
+ IO.mapOptional("Types", Sec.DebugT);
+ else if (Sec.Name == ".debug$P")
+ IO.mapOptional("PrecompTypes", Sec.DebugP);
+ else if (Sec.Name == ".debug$H")
+ IO.mapOptional("GlobalHashes", Sec.DebugH);
+
+ // Uninitialized sections, such as .bss, typically have no data, but the size
+ // is carried in SizeOfRawData, even though PointerToRawData is zero.
+ if (Sec.SectionData.binary_size() == 0 &&
+ NC->Characteristics & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA)
+ IO.mapOptional("SizeOfRawData", Sec.Header.SizeOfRawData);
+
+ IO.mapOptional("Relocations", Sec.Relocations);
+}
+
+void MappingTraits<COFFYAML::Object>::mapping(IO &IO, COFFYAML::Object &Obj) {
+ IO.mapTag("!COFF", true);
+ IO.mapOptional("OptionalHeader", Obj.OptionalHeader);
+ IO.mapRequired("header", Obj.Header);
+ IO.mapRequired("sections", Obj.Sections);
+ IO.mapRequired("symbols", Obj.Symbols);
+}
+
+} // end namespace yaml
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLDebugSections.cpp b/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLDebugSections.cpp
new file mode 100644
index 00000000000..02f053bb0e0
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLDebugSections.cpp
@@ -0,0 +1,957 @@
+//===- CodeViewYAMLDebugSections.cpp - CodeView YAMLIO debug sections -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of CodeView
+// Debug Info.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/CodeViewYAMLDebugSections.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/CodeViewError.h"
+#include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugCrossExSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugCrossImpSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugLinesSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugStringTableSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h"
+#include "llvm/DebugInfo/CodeView/DebugSymbolRVASubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugSymbolsSubsection.h"
+#include "llvm/DebugInfo/CodeView/Line.h"
+#include "llvm/DebugInfo/CodeView/StringsAndChecksums.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/ObjectYAML/CodeViewYAMLSymbols.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <tuple>
+#include <vector>
+
+using namespace llvm;
+using namespace llvm::codeview;
+using namespace llvm::CodeViewYAML;
+using namespace llvm::CodeViewYAML::detail;
+using namespace llvm::yaml;
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(SourceFileChecksumEntry)
+LLVM_YAML_IS_SEQUENCE_VECTOR(SourceLineEntry)
+LLVM_YAML_IS_SEQUENCE_VECTOR(SourceColumnEntry)
+LLVM_YAML_IS_SEQUENCE_VECTOR(SourceLineBlock)
+LLVM_YAML_IS_SEQUENCE_VECTOR(SourceLineInfo)
+LLVM_YAML_IS_SEQUENCE_VECTOR(InlineeSite)
+LLVM_YAML_IS_SEQUENCE_VECTOR(InlineeInfo)
+LLVM_YAML_IS_SEQUENCE_VECTOR(CrossModuleExport)
+LLVM_YAML_IS_SEQUENCE_VECTOR(YAMLCrossModuleImport)
+LLVM_YAML_IS_SEQUENCE_VECTOR(YAMLFrameData)
+
+LLVM_YAML_DECLARE_SCALAR_TRAITS(HexFormattedString, QuotingType::None)
+LLVM_YAML_DECLARE_ENUM_TRAITS(DebugSubsectionKind)
+LLVM_YAML_DECLARE_ENUM_TRAITS(FileChecksumKind)
+LLVM_YAML_DECLARE_BITSET_TRAITS(LineFlags)
+
+LLVM_YAML_DECLARE_MAPPING_TRAITS(CrossModuleExport)
+LLVM_YAML_DECLARE_MAPPING_TRAITS(YAMLFrameData)
+LLVM_YAML_DECLARE_MAPPING_TRAITS(YAMLCrossModuleImport)
+LLVM_YAML_DECLARE_MAPPING_TRAITS(CrossModuleImportItem)
+LLVM_YAML_DECLARE_MAPPING_TRAITS(SourceLineEntry)
+LLVM_YAML_DECLARE_MAPPING_TRAITS(SourceColumnEntry)
+LLVM_YAML_DECLARE_MAPPING_TRAITS(SourceFileChecksumEntry)
+LLVM_YAML_DECLARE_MAPPING_TRAITS(SourceLineBlock)
+LLVM_YAML_DECLARE_MAPPING_TRAITS(InlineeSite)
+
+namespace llvm {
+namespace CodeViewYAML {
+namespace detail {
+
+struct YAMLSubsectionBase {
+ explicit YAMLSubsectionBase(DebugSubsectionKind Kind) : Kind(Kind) {}
+ virtual ~YAMLSubsectionBase() = default;
+
+ virtual void map(IO &IO) = 0;
+ virtual std::shared_ptr<DebugSubsection>
+ toCodeViewSubsection(BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const = 0;
+
+ DebugSubsectionKind Kind;
+};
+
+} // end namespace detail
+} // end namespace CodeViewYAML
+} // end namespace llvm
+
+namespace {
+
+struct YAMLChecksumsSubsection : public YAMLSubsectionBase {
+ YAMLChecksumsSubsection()
+ : YAMLSubsectionBase(DebugSubsectionKind::FileChecksums) {}
+
+ void map(IO &IO) override;
+ std::shared_ptr<DebugSubsection>
+ toCodeViewSubsection(BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const override;
+ static Expected<std::shared_ptr<YAMLChecksumsSubsection>>
+ fromCodeViewSubsection(const DebugStringTableSubsectionRef &Strings,
+ const DebugChecksumsSubsectionRef &FC);
+
+ std::vector<SourceFileChecksumEntry> Checksums;
+};
+
+struct YAMLLinesSubsection : public YAMLSubsectionBase {
+ YAMLLinesSubsection() : YAMLSubsectionBase(DebugSubsectionKind::Lines) {}
+
+ void map(IO &IO) override;
+ std::shared_ptr<DebugSubsection>
+ toCodeViewSubsection(BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const override;
+ static Expected<std::shared_ptr<YAMLLinesSubsection>>
+ fromCodeViewSubsection(const DebugStringTableSubsectionRef &Strings,
+ const DebugChecksumsSubsectionRef &Checksums,
+ const DebugLinesSubsectionRef &Lines);
+
+ SourceLineInfo Lines;
+};
+
+struct YAMLInlineeLinesSubsection : public YAMLSubsectionBase {
+ YAMLInlineeLinesSubsection()
+ : YAMLSubsectionBase(DebugSubsectionKind::InlineeLines) {}
+
+ void map(IO &IO) override;
+ std::shared_ptr<DebugSubsection>
+ toCodeViewSubsection(BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const override;
+ static Expected<std::shared_ptr<YAMLInlineeLinesSubsection>>
+ fromCodeViewSubsection(const DebugStringTableSubsectionRef &Strings,
+ const DebugChecksumsSubsectionRef &Checksums,
+ const DebugInlineeLinesSubsectionRef &Lines);
+
+ InlineeInfo InlineeLines;
+};
+
+struct YAMLCrossModuleExportsSubsection : public YAMLSubsectionBase {
+ YAMLCrossModuleExportsSubsection()
+ : YAMLSubsectionBase(DebugSubsectionKind::CrossScopeExports) {}
+
+ void map(IO &IO) override;
+ std::shared_ptr<DebugSubsection>
+ toCodeViewSubsection(BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const override;
+ static Expected<std::shared_ptr<YAMLCrossModuleExportsSubsection>>
+ fromCodeViewSubsection(const DebugCrossModuleExportsSubsectionRef &Exports);
+
+ std::vector<CrossModuleExport> Exports;
+};
+
+struct YAMLCrossModuleImportsSubsection : public YAMLSubsectionBase {
+ YAMLCrossModuleImportsSubsection()
+ : YAMLSubsectionBase(DebugSubsectionKind::CrossScopeImports) {}
+
+ void map(IO &IO) override;
+ std::shared_ptr<DebugSubsection>
+ toCodeViewSubsection(BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const override;
+ static Expected<std::shared_ptr<YAMLCrossModuleImportsSubsection>>
+ fromCodeViewSubsection(const DebugStringTableSubsectionRef &Strings,
+ const DebugCrossModuleImportsSubsectionRef &Imports);
+
+ std::vector<YAMLCrossModuleImport> Imports;
+};
+
+struct YAMLSymbolsSubsection : public YAMLSubsectionBase {
+ YAMLSymbolsSubsection() : YAMLSubsectionBase(DebugSubsectionKind::Symbols) {}
+
+ void map(IO &IO) override;
+ std::shared_ptr<DebugSubsection>
+ toCodeViewSubsection(BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const override;
+ static Expected<std::shared_ptr<YAMLSymbolsSubsection>>
+ fromCodeViewSubsection(const DebugSymbolsSubsectionRef &Symbols);
+
+ std::vector<CodeViewYAML::SymbolRecord> Symbols;
+};
+
+struct YAMLStringTableSubsection : public YAMLSubsectionBase {
+ YAMLStringTableSubsection()
+ : YAMLSubsectionBase(DebugSubsectionKind::StringTable) {}
+
+ void map(IO &IO) override;
+ std::shared_ptr<DebugSubsection>
+ toCodeViewSubsection(BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const override;
+ static Expected<std::shared_ptr<YAMLStringTableSubsection>>
+ fromCodeViewSubsection(const DebugStringTableSubsectionRef &Strings);
+
+ std::vector<StringRef> Strings;
+};
+
+struct YAMLFrameDataSubsection : public YAMLSubsectionBase {
+ YAMLFrameDataSubsection()
+ : YAMLSubsectionBase(DebugSubsectionKind::FrameData) {}
+
+ void map(IO &IO) override;
+ std::shared_ptr<DebugSubsection>
+ toCodeViewSubsection(BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const override;
+ static Expected<std::shared_ptr<YAMLFrameDataSubsection>>
+ fromCodeViewSubsection(const DebugStringTableSubsectionRef &Strings,
+ const DebugFrameDataSubsectionRef &Frames);
+
+ std::vector<YAMLFrameData> Frames;
+};
+
+struct YAMLCoffSymbolRVASubsection : public YAMLSubsectionBase {
+ YAMLCoffSymbolRVASubsection()
+ : YAMLSubsectionBase(DebugSubsectionKind::CoffSymbolRVA) {}
+
+ void map(IO &IO) override;
+ std::shared_ptr<DebugSubsection>
+ toCodeViewSubsection(BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const override;
+ static Expected<std::shared_ptr<YAMLCoffSymbolRVASubsection>>
+ fromCodeViewSubsection(const DebugSymbolRVASubsectionRef &RVAs);
+
+ std::vector<uint32_t> RVAs;
+};
+
+} // end anonymous namespace
+
+void ScalarBitSetTraits<LineFlags>::bitset(IO &io, LineFlags &Flags) {
+ io.bitSetCase(Flags, "HasColumnInfo", LF_HaveColumns);
+ io.enumFallback<Hex16>(Flags);
+}
+
+void ScalarEnumerationTraits<FileChecksumKind>::enumeration(
+ IO &io, FileChecksumKind &Kind) {
+ io.enumCase(Kind, "None", FileChecksumKind::None);
+ io.enumCase(Kind, "MD5", FileChecksumKind::MD5);
+ io.enumCase(Kind, "SHA1", FileChecksumKind::SHA1);
+ io.enumCase(Kind, "SHA256", FileChecksumKind::SHA256);
+}
+
+void ScalarTraits<HexFormattedString>::output(const HexFormattedString &Value,
+ void *ctx, raw_ostream &Out) {
+ StringRef Bytes(reinterpret_cast<const char *>(Value.Bytes.data()),
+ Value.Bytes.size());
+ Out << toHex(Bytes);
+}
+
+StringRef ScalarTraits<HexFormattedString>::input(StringRef Scalar, void *ctxt,
+ HexFormattedString &Value) {
+ std::string H = fromHex(Scalar);
+ Value.Bytes.assign(H.begin(), H.end());
+ return StringRef();
+}
+
+void MappingTraits<SourceLineEntry>::mapping(IO &IO, SourceLineEntry &Obj) {
+ IO.mapRequired("Offset", Obj.Offset);
+ IO.mapRequired("LineStart", Obj.LineStart);
+ IO.mapRequired("IsStatement", Obj.IsStatement);
+ IO.mapRequired("EndDelta", Obj.EndDelta);
+}
+
+void MappingTraits<SourceColumnEntry>::mapping(IO &IO, SourceColumnEntry &Obj) {
+ IO.mapRequired("StartColumn", Obj.StartColumn);
+ IO.mapRequired("EndColumn", Obj.EndColumn);
+}
+
+void MappingTraits<SourceLineBlock>::mapping(IO &IO, SourceLineBlock &Obj) {
+ IO.mapRequired("FileName", Obj.FileName);
+ IO.mapRequired("Lines", Obj.Lines);
+ IO.mapRequired("Columns", Obj.Columns);
+}
+
+void MappingTraits<CrossModuleExport>::mapping(IO &IO, CrossModuleExport &Obj) {
+ IO.mapRequired("LocalId", Obj.Local);
+ IO.mapRequired("GlobalId", Obj.Global);
+}
+
+void MappingTraits<YAMLCrossModuleImport>::mapping(IO &IO,
+ YAMLCrossModuleImport &Obj) {
+ IO.mapRequired("Module", Obj.ModuleName);
+ IO.mapRequired("Imports", Obj.ImportIds);
+}
+
+void MappingTraits<SourceFileChecksumEntry>::mapping(
+ IO &IO, SourceFileChecksumEntry &Obj) {
+ IO.mapRequired("FileName", Obj.FileName);
+ IO.mapRequired("Kind", Obj.Kind);
+ IO.mapRequired("Checksum", Obj.ChecksumBytes);
+}
+
+void MappingTraits<InlineeSite>::mapping(IO &IO, InlineeSite &Obj) {
+ IO.mapRequired("FileName", Obj.FileName);
+ IO.mapRequired("LineNum", Obj.SourceLineNum);
+ IO.mapRequired("Inlinee", Obj.Inlinee);
+ IO.mapOptional("ExtraFiles", Obj.ExtraFiles);
+}
+
+void MappingTraits<YAMLFrameData>::mapping(IO &IO, YAMLFrameData &Obj) {
+ IO.mapRequired("CodeSize", Obj.CodeSize);
+ IO.mapRequired("FrameFunc", Obj.FrameFunc);
+ IO.mapRequired("LocalSize", Obj.LocalSize);
+ IO.mapOptional("MaxStackSize", Obj.MaxStackSize);
+ IO.mapOptional("ParamsSize", Obj.ParamsSize);
+ IO.mapOptional("PrologSize", Obj.PrologSize);
+ IO.mapOptional("RvaStart", Obj.RvaStart);
+ IO.mapOptional("SavedRegsSize", Obj.SavedRegsSize);
+}
+
+void YAMLChecksumsSubsection::map(IO &IO) {
+ IO.mapTag("!FileChecksums", true);
+ IO.mapRequired("Checksums", Checksums);
+}
+
+void YAMLLinesSubsection::map(IO &IO) {
+ IO.mapTag("!Lines", true);
+ IO.mapRequired("CodeSize", Lines.CodeSize);
+
+ IO.mapRequired("Flags", Lines.Flags);
+ IO.mapRequired("RelocOffset", Lines.RelocOffset);
+ IO.mapRequired("RelocSegment", Lines.RelocSegment);
+ IO.mapRequired("Blocks", Lines.Blocks);
+}
+
+void YAMLInlineeLinesSubsection::map(IO &IO) {
+ IO.mapTag("!InlineeLines", true);
+ IO.mapRequired("HasExtraFiles", InlineeLines.HasExtraFiles);
+ IO.mapRequired("Sites", InlineeLines.Sites);
+}
+
+void YAMLCrossModuleExportsSubsection::map(IO &IO) {
+ IO.mapTag("!CrossModuleExports", true);
+ IO.mapOptional("Exports", Exports);
+}
+
+void YAMLCrossModuleImportsSubsection::map(IO &IO) {
+ IO.mapTag("!CrossModuleImports", true);
+ IO.mapOptional("Imports", Imports);
+}
+
+void YAMLSymbolsSubsection::map(IO &IO) {
+ IO.mapTag("!Symbols", true);
+ IO.mapRequired("Records", Symbols);
+}
+
+void YAMLStringTableSubsection::map(IO &IO) {
+ IO.mapTag("!StringTable", true);
+ IO.mapRequired("Strings", Strings);
+}
+
+void YAMLFrameDataSubsection::map(IO &IO) {
+ IO.mapTag("!FrameData", true);
+ IO.mapRequired("Frames", Frames);
+}
+
+void YAMLCoffSymbolRVASubsection::map(IO &IO) {
+ IO.mapTag("!COFFSymbolRVAs", true);
+ IO.mapRequired("RVAs", RVAs);
+}
+
+void MappingTraits<YAMLDebugSubsection>::mapping(
+ IO &IO, YAMLDebugSubsection &Subsection) {
+ if (!IO.outputting()) {
+ if (IO.mapTag("!FileChecksums")) {
+ auto SS = std::make_shared<YAMLChecksumsSubsection>();
+ Subsection.Subsection = SS;
+ } else if (IO.mapTag("!Lines")) {
+ Subsection.Subsection = std::make_shared<YAMLLinesSubsection>();
+ } else if (IO.mapTag("!InlineeLines")) {
+ Subsection.Subsection = std::make_shared<YAMLInlineeLinesSubsection>();
+ } else if (IO.mapTag("!CrossModuleExports")) {
+ Subsection.Subsection =
+ std::make_shared<YAMLCrossModuleExportsSubsection>();
+ } else if (IO.mapTag("!CrossModuleImports")) {
+ Subsection.Subsection =
+ std::make_shared<YAMLCrossModuleImportsSubsection>();
+ } else if (IO.mapTag("!Symbols")) {
+ Subsection.Subsection = std::make_shared<YAMLSymbolsSubsection>();
+ } else if (IO.mapTag("!StringTable")) {
+ Subsection.Subsection = std::make_shared<YAMLStringTableSubsection>();
+ } else if (IO.mapTag("!FrameData")) {
+ Subsection.Subsection = std::make_shared<YAMLFrameDataSubsection>();
+ } else if (IO.mapTag("!COFFSymbolRVAs")) {
+ Subsection.Subsection = std::make_shared<YAMLCoffSymbolRVASubsection>();
+ } else {
+ llvm_unreachable("Unexpected subsection tag!");
+ }
+ }
+ Subsection.Subsection->map(IO);
+}
+
+std::shared_ptr<DebugSubsection> YAMLChecksumsSubsection::toCodeViewSubsection(
+ BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const {
+ assert(SC.hasStrings());
+ auto Result = std::make_shared<DebugChecksumsSubsection>(*SC.strings());
+ for (const auto &CS : Checksums) {
+ Result->addChecksum(CS.FileName, CS.Kind, CS.ChecksumBytes.Bytes);
+ }
+ return Result;
+}
+
+std::shared_ptr<DebugSubsection> YAMLLinesSubsection::toCodeViewSubsection(
+ BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const {
+ assert(SC.hasStrings() && SC.hasChecksums());
+ auto Result =
+ std::make_shared<DebugLinesSubsection>(*SC.checksums(), *SC.strings());
+ Result->setCodeSize(Lines.CodeSize);
+ Result->setRelocationAddress(Lines.RelocSegment, Lines.RelocOffset);
+ Result->setFlags(Lines.Flags);
+ for (const auto &LC : Lines.Blocks) {
+ Result->createBlock(LC.FileName);
+ if (Result->hasColumnInfo()) {
+ for (auto Item : zip(LC.Lines, LC.Columns)) {
+ auto &L = std::get<0>(Item);
+ auto &C = std::get<1>(Item);
+ uint32_t LE = L.LineStart + L.EndDelta;
+ Result->addLineAndColumnInfo(L.Offset,
+ LineInfo(L.LineStart, LE, L.IsStatement),
+ C.StartColumn, C.EndColumn);
+ }
+ } else {
+ for (const auto &L : LC.Lines) {
+ uint32_t LE = L.LineStart + L.EndDelta;
+ Result->addLineInfo(L.Offset, LineInfo(L.LineStart, LE, L.IsStatement));
+ }
+ }
+ }
+ return Result;
+}
+
+std::shared_ptr<DebugSubsection>
+YAMLInlineeLinesSubsection::toCodeViewSubsection(
+ BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const {
+ assert(SC.hasChecksums());
+ auto Result = std::make_shared<DebugInlineeLinesSubsection>(
+ *SC.checksums(), InlineeLines.HasExtraFiles);
+
+ for (const auto &Site : InlineeLines.Sites) {
+ Result->addInlineSite(TypeIndex(Site.Inlinee), Site.FileName,
+ Site.SourceLineNum);
+ if (!InlineeLines.HasExtraFiles)
+ continue;
+
+ for (auto EF : Site.ExtraFiles) {
+ Result->addExtraFile(EF);
+ }
+ }
+ return Result;
+}
+
+std::shared_ptr<DebugSubsection>
+YAMLCrossModuleExportsSubsection::toCodeViewSubsection(
+ BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const {
+ auto Result = std::make_shared<DebugCrossModuleExportsSubsection>();
+ for (const auto &M : Exports)
+ Result->addMapping(M.Local, M.Global);
+ return Result;
+}
+
+std::shared_ptr<DebugSubsection>
+YAMLCrossModuleImportsSubsection::toCodeViewSubsection(
+ BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const {
+ assert(SC.hasStrings());
+
+ auto Result =
+ std::make_shared<DebugCrossModuleImportsSubsection>(*SC.strings());
+ for (const auto &M : Imports) {
+ for (const auto Id : M.ImportIds)
+ Result->addImport(M.ModuleName, Id);
+ }
+ return Result;
+}
+
+std::shared_ptr<DebugSubsection> YAMLSymbolsSubsection::toCodeViewSubsection(
+ BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const {
+ auto Result = std::make_shared<DebugSymbolsSubsection>();
+ for (const auto &Sym : Symbols)
+ Result->addSymbol(
+ Sym.toCodeViewSymbol(Allocator, CodeViewContainer::ObjectFile));
+ return Result;
+}
+
+std::shared_ptr<DebugSubsection>
+YAMLStringTableSubsection::toCodeViewSubsection(
+ BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const {
+ auto Result = std::make_shared<DebugStringTableSubsection>();
+ for (const auto &Str : this->Strings)
+ Result->insert(Str);
+ return Result;
+}
+
+std::shared_ptr<DebugSubsection> YAMLFrameDataSubsection::toCodeViewSubsection(
+ BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const {
+ assert(SC.hasStrings());
+
+ auto Result = std::make_shared<DebugFrameDataSubsection>(true);
+ for (const auto &YF : Frames) {
+ codeview::FrameData F;
+ F.CodeSize = YF.CodeSize;
+ F.Flags = YF.Flags;
+ F.LocalSize = YF.LocalSize;
+ F.MaxStackSize = YF.MaxStackSize;
+ F.ParamsSize = YF.ParamsSize;
+ F.PrologSize = YF.PrologSize;
+ F.RvaStart = YF.RvaStart;
+ F.SavedRegsSize = YF.SavedRegsSize;
+ F.FrameFunc = SC.strings()->insert(YF.FrameFunc);
+ Result->addFrameData(F);
+ }
+ return Result;
+}
+
+std::shared_ptr<DebugSubsection>
+YAMLCoffSymbolRVASubsection::toCodeViewSubsection(
+ BumpPtrAllocator &Allocator,
+ const codeview::StringsAndChecksums &SC) const {
+ auto Result = std::make_shared<DebugSymbolRVASubsection>();
+ for (const auto &RVA : RVAs)
+ Result->addRVA(RVA);
+ return Result;
+}
+
+static Expected<SourceFileChecksumEntry>
+convertOneChecksum(const DebugStringTableSubsectionRef &Strings,
+ const FileChecksumEntry &CS) {
+ auto ExpectedString = Strings.getString(CS.FileNameOffset);
+ if (!ExpectedString)
+ return ExpectedString.takeError();
+
+ SourceFileChecksumEntry Result;
+ Result.ChecksumBytes.Bytes = CS.Checksum;
+ Result.Kind = CS.Kind;
+ Result.FileName = *ExpectedString;
+ return Result;
+}
+
+static Expected<StringRef>
+getFileName(const DebugStringTableSubsectionRef &Strings,
+ const DebugChecksumsSubsectionRef &Checksums, uint32_t FileID) {
+ auto Iter = Checksums.getArray().at(FileID);
+ if (Iter == Checksums.getArray().end())
+ return make_error<CodeViewError>(cv_error_code::no_records);
+ uint32_t Offset = Iter->FileNameOffset;
+ return Strings.getString(Offset);
+}
+
+Expected<std::shared_ptr<YAMLChecksumsSubsection>>
+YAMLChecksumsSubsection::fromCodeViewSubsection(
+ const DebugStringTableSubsectionRef &Strings,
+ const DebugChecksumsSubsectionRef &FC) {
+ auto Result = std::make_shared<YAMLChecksumsSubsection>();
+
+ for (const auto &CS : FC) {
+ auto ConvertedCS = convertOneChecksum(Strings, CS);
+ if (!ConvertedCS)
+ return ConvertedCS.takeError();
+ Result->Checksums.push_back(*ConvertedCS);
+ }
+ return Result;
+}
+
+Expected<std::shared_ptr<YAMLLinesSubsection>>
+YAMLLinesSubsection::fromCodeViewSubsection(
+ const DebugStringTableSubsectionRef &Strings,
+ const DebugChecksumsSubsectionRef &Checksums,
+ const DebugLinesSubsectionRef &Lines) {
+ auto Result = std::make_shared<YAMLLinesSubsection>();
+ Result->Lines.CodeSize = Lines.header()->CodeSize;
+ Result->Lines.RelocOffset = Lines.header()->RelocOffset;
+ Result->Lines.RelocSegment = Lines.header()->RelocSegment;
+ Result->Lines.Flags = static_cast<LineFlags>(uint16_t(Lines.header()->Flags));
+ for (const auto &L : Lines) {
+ SourceLineBlock Block;
+ auto EF = getFileName(Strings, Checksums, L.NameIndex);
+ if (!EF)
+ return EF.takeError();
+ Block.FileName = *EF;
+ if (Lines.hasColumnInfo()) {
+ for (const auto &C : L.Columns) {
+ SourceColumnEntry SCE;
+ SCE.EndColumn = C.EndColumn;
+ SCE.StartColumn = C.StartColumn;
+ Block.Columns.push_back(SCE);
+ }
+ }
+ for (const auto &LN : L.LineNumbers) {
+ SourceLineEntry SLE;
+ LineInfo LI(LN.Flags);
+ SLE.Offset = LN.Offset;
+ SLE.LineStart = LI.getStartLine();
+ SLE.EndDelta = LI.getLineDelta();
+ SLE.IsStatement = LI.isStatement();
+ Block.Lines.push_back(SLE);
+ }
+ Result->Lines.Blocks.push_back(Block);
+ }
+ return Result;
+}
+
+Expected<std::shared_ptr<YAMLInlineeLinesSubsection>>
+YAMLInlineeLinesSubsection::fromCodeViewSubsection(
+ const DebugStringTableSubsectionRef &Strings,
+ const DebugChecksumsSubsectionRef &Checksums,
+ const DebugInlineeLinesSubsectionRef &Lines) {
+ auto Result = std::make_shared<YAMLInlineeLinesSubsection>();
+
+ Result->InlineeLines.HasExtraFiles = Lines.hasExtraFiles();
+ for (const auto &IL : Lines) {
+ InlineeSite Site;
+ auto ExpF = getFileName(Strings, Checksums, IL.Header->FileID);
+ if (!ExpF)
+ return ExpF.takeError();
+ Site.FileName = *ExpF;
+ Site.Inlinee = IL.Header->Inlinee.getIndex();
+ Site.SourceLineNum = IL.Header->SourceLineNum;
+ if (Lines.hasExtraFiles()) {
+ for (const auto EF : IL.ExtraFiles) {
+ auto ExpF2 = getFileName(Strings, Checksums, EF);
+ if (!ExpF2)
+ return ExpF2.takeError();
+ Site.ExtraFiles.push_back(*ExpF2);
+ }
+ }
+ Result->InlineeLines.Sites.push_back(Site);
+ }
+ return Result;
+}
+
+Expected<std::shared_ptr<YAMLCrossModuleExportsSubsection>>
+YAMLCrossModuleExportsSubsection::fromCodeViewSubsection(
+ const DebugCrossModuleExportsSubsectionRef &Exports) {
+ auto Result = std::make_shared<YAMLCrossModuleExportsSubsection>();
+ Result->Exports.assign(Exports.begin(), Exports.end());
+ return Result;
+}
+
+Expected<std::shared_ptr<YAMLCrossModuleImportsSubsection>>
+YAMLCrossModuleImportsSubsection::fromCodeViewSubsection(
+ const DebugStringTableSubsectionRef &Strings,
+ const DebugCrossModuleImportsSubsectionRef &Imports) {
+ auto Result = std::make_shared<YAMLCrossModuleImportsSubsection>();
+ for (const auto &CMI : Imports) {
+ YAMLCrossModuleImport YCMI;
+ auto ExpectedStr = Strings.getString(CMI.Header->ModuleNameOffset);
+ if (!ExpectedStr)
+ return ExpectedStr.takeError();
+ YCMI.ModuleName = *ExpectedStr;
+ YCMI.ImportIds.assign(CMI.Imports.begin(), CMI.Imports.end());
+ Result->Imports.push_back(YCMI);
+ }
+ return Result;
+}
+
+Expected<std::shared_ptr<YAMLSymbolsSubsection>>
+YAMLSymbolsSubsection::fromCodeViewSubsection(
+ const DebugSymbolsSubsectionRef &Symbols) {
+ auto Result = std::make_shared<YAMLSymbolsSubsection>();
+ for (const auto &Sym : Symbols) {
+ auto S = CodeViewYAML::SymbolRecord::fromCodeViewSymbol(Sym);
+ if (!S)
+ return joinErrors(make_error<CodeViewError>(
+ cv_error_code::corrupt_record,
+ "Invalid CodeView Symbol Record in SymbolRecord "
+ "subsection of .debug$S while converting to YAML!"),
+ S.takeError());
+
+ Result->Symbols.push_back(*S);
+ }
+ return Result;
+}
+
+Expected<std::shared_ptr<YAMLStringTableSubsection>>
+YAMLStringTableSubsection::fromCodeViewSubsection(
+ const DebugStringTableSubsectionRef &Strings) {
+ auto Result = std::make_shared<YAMLStringTableSubsection>();
+ BinaryStreamReader Reader(Strings.getBuffer());
+ StringRef S;
+ // First item is a single null string, skip it.
+ if (auto EC = Reader.readCString(S))
+ return std::move(EC);
+ assert(S.empty());
+ while (Reader.bytesRemaining() > 0) {
+ if (auto EC = Reader.readCString(S))
+ return std::move(EC);
+ Result->Strings.push_back(S);
+ }
+ return Result;
+}
+
+Expected<std::shared_ptr<YAMLFrameDataSubsection>>
+YAMLFrameDataSubsection::fromCodeViewSubsection(
+ const DebugStringTableSubsectionRef &Strings,
+ const DebugFrameDataSubsectionRef &Frames) {
+ auto Result = std::make_shared<YAMLFrameDataSubsection>();
+ for (const auto &F : Frames) {
+ YAMLFrameData YF;
+ YF.CodeSize = F.CodeSize;
+ YF.Flags = F.Flags;
+ YF.LocalSize = F.LocalSize;
+ YF.MaxStackSize = F.MaxStackSize;
+ YF.ParamsSize = F.ParamsSize;
+ YF.PrologSize = F.PrologSize;
+ YF.RvaStart = F.RvaStart;
+ YF.SavedRegsSize = F.SavedRegsSize;
+
+ auto ES = Strings.getString(F.FrameFunc);
+ if (!ES)
+ return joinErrors(
+ make_error<CodeViewError>(
+ cv_error_code::no_records,
+ "Could not find string for string id while mapping FrameData!"),
+ ES.takeError());
+ YF.FrameFunc = *ES;
+ Result->Frames.push_back(YF);
+ }
+ return Result;
+}
+
+Expected<std::shared_ptr<YAMLCoffSymbolRVASubsection>>
+YAMLCoffSymbolRVASubsection::fromCodeViewSubsection(
+ const DebugSymbolRVASubsectionRef &Section) {
+ auto Result = std::make_shared<YAMLCoffSymbolRVASubsection>();
+ for (const auto &RVA : Section) {
+ Result->RVAs.push_back(RVA);
+ }
+ return Result;
+}
+
+Expected<std::vector<std::shared_ptr<DebugSubsection>>>
+llvm::CodeViewYAML::toCodeViewSubsectionList(
+ BumpPtrAllocator &Allocator, ArrayRef<YAMLDebugSubsection> Subsections,
+ const codeview::StringsAndChecksums &SC) {
+ std::vector<std::shared_ptr<DebugSubsection>> Result;
+ if (Subsections.empty())
+ return std::move(Result);
+
+ for (const auto &SS : Subsections) {
+ std::shared_ptr<DebugSubsection> CVS;
+ CVS = SS.Subsection->toCodeViewSubsection(Allocator, SC);
+ assert(CVS != nullptr);
+ Result.push_back(std::move(CVS));
+ }
+ return std::move(Result);
+}
+
+namespace {
+
+struct SubsectionConversionVisitor : public DebugSubsectionVisitor {
+ SubsectionConversionVisitor() = default;
+
+ Error visitUnknown(DebugUnknownSubsectionRef &Unknown) override;
+ Error visitLines(DebugLinesSubsectionRef &Lines,
+ const StringsAndChecksumsRef &State) override;
+ Error visitFileChecksums(DebugChecksumsSubsectionRef &Checksums,
+ const StringsAndChecksumsRef &State) override;
+ Error visitInlineeLines(DebugInlineeLinesSubsectionRef &Inlinees,
+ const StringsAndChecksumsRef &State) override;
+ Error visitCrossModuleExports(DebugCrossModuleExportsSubsectionRef &Checksums,
+ const StringsAndChecksumsRef &State) override;
+ Error visitCrossModuleImports(DebugCrossModuleImportsSubsectionRef &Inlinees,
+ const StringsAndChecksumsRef &State) override;
+ Error visitStringTable(DebugStringTableSubsectionRef &ST,
+ const StringsAndChecksumsRef &State) override;
+ Error visitSymbols(DebugSymbolsSubsectionRef &Symbols,
+ const StringsAndChecksumsRef &State) override;
+ Error visitFrameData(DebugFrameDataSubsectionRef &Symbols,
+ const StringsAndChecksumsRef &State) override;
+ Error visitCOFFSymbolRVAs(DebugSymbolRVASubsectionRef &Symbols,
+ const StringsAndChecksumsRef &State) override;
+
+ YAMLDebugSubsection Subsection;
+};
+
+} // end anonymous namespace
+
+Error SubsectionConversionVisitor::visitUnknown(
+ DebugUnknownSubsectionRef &Unknown) {
+ return make_error<CodeViewError>(cv_error_code::operation_unsupported);
+}
+
+Error SubsectionConversionVisitor::visitLines(
+ DebugLinesSubsectionRef &Lines, const StringsAndChecksumsRef &State) {
+ auto Result = YAMLLinesSubsection::fromCodeViewSubsection(
+ State.strings(), State.checksums(), Lines);
+ if (!Result)
+ return Result.takeError();
+ Subsection.Subsection = *Result;
+ return Error::success();
+}
+
+Error SubsectionConversionVisitor::visitFileChecksums(
+ DebugChecksumsSubsectionRef &Checksums,
+ const StringsAndChecksumsRef &State) {
+ auto Result = YAMLChecksumsSubsection::fromCodeViewSubsection(State.strings(),
+ Checksums);
+ if (!Result)
+ return Result.takeError();
+ Subsection.Subsection = *Result;
+ return Error::success();
+}
+
+Error SubsectionConversionVisitor::visitInlineeLines(
+ DebugInlineeLinesSubsectionRef &Inlinees,
+ const StringsAndChecksumsRef &State) {
+ auto Result = YAMLInlineeLinesSubsection::fromCodeViewSubsection(
+ State.strings(), State.checksums(), Inlinees);
+ if (!Result)
+ return Result.takeError();
+ Subsection.Subsection = *Result;
+ return Error::success();
+}
+
+Error SubsectionConversionVisitor::visitCrossModuleExports(
+ DebugCrossModuleExportsSubsectionRef &Exports,
+ const StringsAndChecksumsRef &State) {
+ auto Result =
+ YAMLCrossModuleExportsSubsection::fromCodeViewSubsection(Exports);
+ if (!Result)
+ return Result.takeError();
+ Subsection.Subsection = *Result;
+ return Error::success();
+}
+
+Error SubsectionConversionVisitor::visitCrossModuleImports(
+ DebugCrossModuleImportsSubsectionRef &Imports,
+ const StringsAndChecksumsRef &State) {
+ auto Result = YAMLCrossModuleImportsSubsection::fromCodeViewSubsection(
+ State.strings(), Imports);
+ if (!Result)
+ return Result.takeError();
+ Subsection.Subsection = *Result;
+ return Error::success();
+}
+
+Error SubsectionConversionVisitor::visitStringTable(
+ DebugStringTableSubsectionRef &Strings,
+ const StringsAndChecksumsRef &State) {
+ auto Result = YAMLStringTableSubsection::fromCodeViewSubsection(Strings);
+ if (!Result)
+ return Result.takeError();
+ Subsection.Subsection = *Result;
+ return Error::success();
+}
+
+Error SubsectionConversionVisitor::visitSymbols(
+ DebugSymbolsSubsectionRef &Symbols, const StringsAndChecksumsRef &State) {
+ auto Result = YAMLSymbolsSubsection::fromCodeViewSubsection(Symbols);
+ if (!Result)
+ return Result.takeError();
+ Subsection.Subsection = *Result;
+ return Error::success();
+}
+
+Error SubsectionConversionVisitor::visitFrameData(
+ DebugFrameDataSubsectionRef &Frames, const StringsAndChecksumsRef &State) {
+ auto Result =
+ YAMLFrameDataSubsection::fromCodeViewSubsection(State.strings(), Frames);
+ if (!Result)
+ return Result.takeError();
+ Subsection.Subsection = *Result;
+ return Error::success();
+}
+
+Error SubsectionConversionVisitor::visitCOFFSymbolRVAs(
+ DebugSymbolRVASubsectionRef &RVAs, const StringsAndChecksumsRef &State) {
+ auto Result = YAMLCoffSymbolRVASubsection::fromCodeViewSubsection(RVAs);
+ if (!Result)
+ return Result.takeError();
+ Subsection.Subsection = *Result;
+ return Error::success();
+}
+
+Expected<YAMLDebugSubsection>
+YAMLDebugSubsection::fromCodeViewSubection(const StringsAndChecksumsRef &SC,
+ const DebugSubsectionRecord &SS) {
+ SubsectionConversionVisitor V;
+ if (auto EC = visitDebugSubsection(SS, V, SC))
+ return std::move(EC);
+
+ return V.Subsection;
+}
+
+std::vector<YAMLDebugSubsection>
+llvm::CodeViewYAML::fromDebugS(ArrayRef<uint8_t> Data,
+ const StringsAndChecksumsRef &SC) {
+ BinaryStreamReader Reader(Data, support::little);
+ uint32_t Magic;
+
+ ExitOnError Err("Invalid .debug$S section!");
+ Err(Reader.readInteger(Magic));
+ assert(Magic == COFF::DEBUG_SECTION_MAGIC && "Invalid .debug$S section!");
+
+ DebugSubsectionArray Subsections;
+ Err(Reader.readArray(Subsections, Reader.bytesRemaining()));
+
+ std::vector<YAMLDebugSubsection> Result;
+
+ for (const auto &SS : Subsections) {
+ auto YamlSS = Err(YAMLDebugSubsection::fromCodeViewSubection(SC, SS));
+ Result.push_back(YamlSS);
+ }
+ return Result;
+}
+
+void llvm::CodeViewYAML::initializeStringsAndChecksums(
+ ArrayRef<YAMLDebugSubsection> Sections, codeview::StringsAndChecksums &SC) {
+ // String Table and Checksums subsections don't use the allocator.
+ BumpPtrAllocator Allocator;
+
+ // It's possible for checksums and strings to even appear in different debug$S
+ // sections, so we have to make this a stateful function that can build up
+ // the strings and checksums field over multiple iterations.
+
+ // File Checksums require the string table, but may become before it, so we
+ // have to scan for strings first, then scan for checksums again from the
+ // beginning.
+ if (!SC.hasStrings()) {
+ for (const auto &SS : Sections) {
+ if (SS.Subsection->Kind != DebugSubsectionKind::StringTable)
+ continue;
+
+ auto Result = SS.Subsection->toCodeViewSubsection(Allocator, SC);
+ SC.setStrings(
+ std::static_pointer_cast<DebugStringTableSubsection>(Result));
+ break;
+ }
+ }
+
+ if (SC.hasStrings() && !SC.hasChecksums()) {
+ for (const auto &SS : Sections) {
+ if (SS.Subsection->Kind != DebugSubsectionKind::FileChecksums)
+ continue;
+
+ auto Result = SS.Subsection->toCodeViewSubsection(Allocator, SC);
+ SC.setChecksums(
+ std::static_pointer_cast<DebugChecksumsSubsection>(Result));
+ break;
+ }
+ }
+}
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLSymbols.cpp b/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLSymbols.cpp
new file mode 100644
index 00000000000..6b6a1176628
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLSymbols.cpp
@@ -0,0 +1,659 @@
+//===- CodeViewYAMLSymbols.cpp - CodeView YAMLIO Symbol implementation ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of CodeView
+// Debug Info.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/CodeViewYAMLSymbols.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/CodeViewError.h"
+#include "llvm/DebugInfo/CodeView/EnumTables.h"
+#include "llvm/DebugInfo/CodeView/RecordSerialization.h"
+#include "llvm/DebugInfo/CodeView/SymbolDeserializer.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/CodeView/SymbolSerializer.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/ObjectYAML/YAML.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+using namespace llvm::codeview;
+using namespace llvm::CodeViewYAML;
+using namespace llvm::CodeViewYAML::detail;
+using namespace llvm::yaml;
+
+LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(TypeIndex)
+LLVM_YAML_IS_SEQUENCE_VECTOR(LocalVariableAddrGap)
+
+// We only need to declare these, the definitions are in CodeViewYAMLTypes.cpp
+LLVM_YAML_DECLARE_SCALAR_TRAITS(APSInt, QuotingType::None)
+LLVM_YAML_DECLARE_SCALAR_TRAITS(TypeIndex, QuotingType::None)
+
+LLVM_YAML_DECLARE_ENUM_TRAITS(SymbolKind)
+LLVM_YAML_DECLARE_ENUM_TRAITS(FrameCookieKind)
+
+LLVM_YAML_DECLARE_BITSET_TRAITS(CompileSym2Flags)
+LLVM_YAML_DECLARE_BITSET_TRAITS(CompileSym3Flags)
+LLVM_YAML_DECLARE_BITSET_TRAITS(ExportFlags)
+LLVM_YAML_DECLARE_BITSET_TRAITS(PublicSymFlags)
+LLVM_YAML_DECLARE_BITSET_TRAITS(LocalSymFlags)
+LLVM_YAML_DECLARE_BITSET_TRAITS(ProcSymFlags)
+LLVM_YAML_DECLARE_BITSET_TRAITS(FrameProcedureOptions)
+LLVM_YAML_DECLARE_ENUM_TRAITS(CPUType)
+LLVM_YAML_DECLARE_ENUM_TRAITS(RegisterId)
+LLVM_YAML_DECLARE_ENUM_TRAITS(TrampolineType)
+LLVM_YAML_DECLARE_ENUM_TRAITS(ThunkOrdinal)
+
+LLVM_YAML_STRONG_TYPEDEF(StringRef, TypeName)
+
+LLVM_YAML_DECLARE_SCALAR_TRAITS(TypeName, QuotingType::Single)
+
+StringRef ScalarTraits<TypeName>::input(StringRef S, void *V, TypeName &T) {
+ return ScalarTraits<StringRef>::input(S, V, T.value);
+}
+
+void ScalarTraits<TypeName>::output(const TypeName &T, void *V,
+ raw_ostream &R) {
+ ScalarTraits<StringRef>::output(T.value, V, R);
+}
+
+void ScalarEnumerationTraits<SymbolKind>::enumeration(IO &io,
+ SymbolKind &Value) {
+ auto SymbolNames = getSymbolTypeNames();
+ for (const auto &E : SymbolNames)
+ io.enumCase(Value, E.Name.str().c_str(), E.Value);
+}
+
+void ScalarBitSetTraits<CompileSym2Flags>::bitset(IO &io,
+ CompileSym2Flags &Flags) {
+ auto FlagNames = getCompileSym2FlagNames();
+ for (const auto &E : FlagNames) {
+ io.bitSetCase(Flags, E.Name.str().c_str(),
+ static_cast<CompileSym2Flags>(E.Value));
+ }
+}
+
+void ScalarBitSetTraits<CompileSym3Flags>::bitset(IO &io,
+ CompileSym3Flags &Flags) {
+ auto FlagNames = getCompileSym3FlagNames();
+ for (const auto &E : FlagNames) {
+ io.bitSetCase(Flags, E.Name.str().c_str(),
+ static_cast<CompileSym3Flags>(E.Value));
+ }
+}
+
+void ScalarBitSetTraits<ExportFlags>::bitset(IO &io, ExportFlags &Flags) {
+ auto FlagNames = getExportSymFlagNames();
+ for (const auto &E : FlagNames) {
+ io.bitSetCase(Flags, E.Name.str().c_str(),
+ static_cast<ExportFlags>(E.Value));
+ }
+}
+
+void ScalarBitSetTraits<PublicSymFlags>::bitset(IO &io, PublicSymFlags &Flags) {
+ auto FlagNames = getPublicSymFlagNames();
+ for (const auto &E : FlagNames) {
+ io.bitSetCase(Flags, E.Name.str().c_str(),
+ static_cast<PublicSymFlags>(E.Value));
+ }
+}
+
+void ScalarBitSetTraits<LocalSymFlags>::bitset(IO &io, LocalSymFlags &Flags) {
+ auto FlagNames = getLocalFlagNames();
+ for (const auto &E : FlagNames) {
+ io.bitSetCase(Flags, E.Name.str().c_str(),
+ static_cast<LocalSymFlags>(E.Value));
+ }
+}
+
+void ScalarBitSetTraits<ProcSymFlags>::bitset(IO &io, ProcSymFlags &Flags) {
+ auto FlagNames = getProcSymFlagNames();
+ for (const auto &E : FlagNames) {
+ io.bitSetCase(Flags, E.Name.str().c_str(),
+ static_cast<ProcSymFlags>(E.Value));
+ }
+}
+
+void ScalarBitSetTraits<FrameProcedureOptions>::bitset(
+ IO &io, FrameProcedureOptions &Flags) {
+ auto FlagNames = getFrameProcSymFlagNames();
+ for (const auto &E : FlagNames) {
+ io.bitSetCase(Flags, E.Name.str().c_str(),
+ static_cast<FrameProcedureOptions>(E.Value));
+ }
+}
+
+void ScalarEnumerationTraits<CPUType>::enumeration(IO &io, CPUType &Cpu) {
+ auto CpuNames = getCPUTypeNames();
+ for (const auto &E : CpuNames) {
+ io.enumCase(Cpu, E.Name.str().c_str(), static_cast<CPUType>(E.Value));
+ }
+}
+
+void ScalarEnumerationTraits<RegisterId>::enumeration(IO &io, RegisterId &Reg) {
+ const auto *Header = static_cast<COFF::header *>(io.getContext());
+ assert(Header && "The IO context is not initialized");
+
+ Optional<CPUType> CpuType;
+ ArrayRef<EnumEntry<uint16_t>> RegNames;
+
+ switch (Header->Machine) {
+ case COFF::IMAGE_FILE_MACHINE_I386:
+ CpuType = CPUType::Pentium3;
+ break;
+ case COFF::IMAGE_FILE_MACHINE_AMD64:
+ CpuType = CPUType::X64;
+ break;
+ case COFF::IMAGE_FILE_MACHINE_ARMNT:
+ CpuType = CPUType::ARMNT;
+ break;
+ case COFF::IMAGE_FILE_MACHINE_ARM64:
+ CpuType = CPUType::ARM64;
+ break;
+ }
+
+ if (CpuType)
+ RegNames = getRegisterNames(*CpuType);
+
+ for (const auto &E : RegNames) {
+ io.enumCase(Reg, E.Name.str().c_str(), static_cast<RegisterId>(E.Value));
+ }
+ io.enumFallback<Hex16>(Reg);
+}
+
+void ScalarEnumerationTraits<TrampolineType>::enumeration(
+ IO &io, TrampolineType &Tramp) {
+ auto TrampNames = getTrampolineNames();
+ for (const auto &E : TrampNames) {
+ io.enumCase(Tramp, E.Name.str().c_str(),
+ static_cast<TrampolineType>(E.Value));
+ }
+}
+
+void ScalarEnumerationTraits<ThunkOrdinal>::enumeration(IO &io,
+ ThunkOrdinal &Ord) {
+ auto ThunkNames = getThunkOrdinalNames();
+ for (const auto &E : ThunkNames) {
+ io.enumCase(Ord, E.Name.str().c_str(), static_cast<ThunkOrdinal>(E.Value));
+ }
+}
+
+void ScalarEnumerationTraits<FrameCookieKind>::enumeration(
+ IO &io, FrameCookieKind &FC) {
+ auto ThunkNames = getFrameCookieKindNames();
+ for (const auto &E : ThunkNames) {
+ io.enumCase(FC, E.Name.str().c_str(),
+ static_cast<FrameCookieKind>(E.Value));
+ }
+}
+
+namespace llvm {
+namespace yaml {
+template <> struct MappingTraits<LocalVariableAddrRange> {
+ static void mapping(IO &io, LocalVariableAddrRange &Range) {
+ io.mapRequired("OffsetStart", Range.OffsetStart);
+ io.mapRequired("ISectStart", Range.ISectStart);
+ io.mapRequired("Range", Range.Range);
+ }
+};
+template <> struct MappingTraits<LocalVariableAddrGap> {
+ static void mapping(IO &io, LocalVariableAddrGap &Gap) {
+ io.mapRequired("GapStartOffset", Gap.GapStartOffset);
+ io.mapRequired("Range", Gap.Range);
+ }
+};
+} // namespace yaml
+} // namespace llvm
+
+namespace llvm {
+namespace CodeViewYAML {
+namespace detail {
+
+struct SymbolRecordBase {
+ codeview::SymbolKind Kind;
+
+ explicit SymbolRecordBase(codeview::SymbolKind K) : Kind(K) {}
+ virtual ~SymbolRecordBase() = default;
+
+ virtual void map(yaml::IO &io) = 0;
+ virtual codeview::CVSymbol
+ toCodeViewSymbol(BumpPtrAllocator &Allocator,
+ CodeViewContainer Container) const = 0;
+ virtual Error fromCodeViewSymbol(codeview::CVSymbol Type) = 0;
+};
+
+template <typename T> struct SymbolRecordImpl : public SymbolRecordBase {
+ explicit SymbolRecordImpl(codeview::SymbolKind K)
+ : SymbolRecordBase(K), Symbol(static_cast<SymbolRecordKind>(K)) {}
+
+ void map(yaml::IO &io) override;
+
+ codeview::CVSymbol
+ toCodeViewSymbol(BumpPtrAllocator &Allocator,
+ CodeViewContainer Container) const override {
+ return SymbolSerializer::writeOneSymbol(Symbol, Allocator, Container);
+ }
+
+ Error fromCodeViewSymbol(codeview::CVSymbol CVS) override {
+ return SymbolDeserializer::deserializeAs<T>(CVS, Symbol);
+ }
+
+ mutable T Symbol;
+};
+
+struct UnknownSymbolRecord : public SymbolRecordBase {
+ explicit UnknownSymbolRecord(codeview::SymbolKind K) : SymbolRecordBase(K) {}
+
+ void map(yaml::IO &io) override;
+
+ CVSymbol toCodeViewSymbol(BumpPtrAllocator &Allocator,
+ CodeViewContainer Container) const override {
+ RecordPrefix Prefix;
+ uint32_t TotalLen = sizeof(RecordPrefix) + Data.size();
+ Prefix.RecordKind = Kind;
+ Prefix.RecordLen = TotalLen - 2;
+ uint8_t *Buffer = Allocator.Allocate<uint8_t>(TotalLen);
+ ::memcpy(Buffer, &Prefix, sizeof(RecordPrefix));
+ ::memcpy(Buffer + sizeof(RecordPrefix), Data.data(), Data.size());
+ return CVSymbol(ArrayRef<uint8_t>(Buffer, TotalLen));
+ }
+
+ Error fromCodeViewSymbol(CVSymbol CVS) override {
+ this->Kind = CVS.kind();
+ Data = CVS.RecordData.drop_front(sizeof(RecordPrefix));
+ return Error::success();
+ }
+
+ std::vector<uint8_t> Data;
+};
+
+template <> void SymbolRecordImpl<ScopeEndSym>::map(IO &IO) {}
+
+void UnknownSymbolRecord::map(yaml::IO &io) {
+ yaml::BinaryRef Binary;
+ if (io.outputting())
+ Binary = yaml::BinaryRef(Data);
+ io.mapRequired("Data", Binary);
+ if (!io.outputting()) {
+ std::string Str;
+ raw_string_ostream OS(Str);
+ Binary.writeAsBinary(OS);
+ OS.flush();
+ Data.assign(Str.begin(), Str.end());
+ }
+}
+
+template <> void SymbolRecordImpl<Thunk32Sym>::map(IO &IO) {
+ IO.mapRequired("Parent", Symbol.Parent);
+ IO.mapRequired("End", Symbol.End);
+ IO.mapRequired("Next", Symbol.Next);
+ IO.mapRequired("Off", Symbol.Offset);
+ IO.mapRequired("Seg", Symbol.Segment);
+ IO.mapRequired("Len", Symbol.Length);
+ IO.mapRequired("Ordinal", Symbol.Thunk);
+}
+
+template <> void SymbolRecordImpl<TrampolineSym>::map(IO &IO) {
+ IO.mapRequired("Type", Symbol.Type);
+ IO.mapRequired("Size", Symbol.Size);
+ IO.mapRequired("ThunkOff", Symbol.ThunkOffset);
+ IO.mapRequired("TargetOff", Symbol.TargetOffset);
+ IO.mapRequired("ThunkSection", Symbol.ThunkSection);
+ IO.mapRequired("TargetSection", Symbol.TargetSection);
+}
+
+template <> void SymbolRecordImpl<SectionSym>::map(IO &IO) {
+ IO.mapRequired("SectionNumber", Symbol.SectionNumber);
+ IO.mapRequired("Alignment", Symbol.Alignment);
+ IO.mapRequired("Rva", Symbol.Rva);
+ IO.mapRequired("Length", Symbol.Length);
+ IO.mapRequired("Characteristics", Symbol.Characteristics);
+ IO.mapRequired("Name", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<CoffGroupSym>::map(IO &IO) {
+ IO.mapRequired("Size", Symbol.Size);
+ IO.mapRequired("Characteristics", Symbol.Characteristics);
+ IO.mapRequired("Offset", Symbol.Offset);
+ IO.mapRequired("Segment", Symbol.Segment);
+ IO.mapRequired("Name", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<ExportSym>::map(IO &IO) {
+ IO.mapRequired("Ordinal", Symbol.Ordinal);
+ IO.mapRequired("Flags", Symbol.Flags);
+ IO.mapRequired("Name", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<ProcSym>::map(IO &IO) {
+ IO.mapOptional("PtrParent", Symbol.Parent, 0U);
+ IO.mapOptional("PtrEnd", Symbol.End, 0U);
+ IO.mapOptional("PtrNext", Symbol.Next, 0U);
+ IO.mapRequired("CodeSize", Symbol.CodeSize);
+ IO.mapRequired("DbgStart", Symbol.DbgStart);
+ IO.mapRequired("DbgEnd", Symbol.DbgEnd);
+ IO.mapRequired("FunctionType", Symbol.FunctionType);
+ IO.mapOptional("Offset", Symbol.CodeOffset, 0U);
+ IO.mapOptional("Segment", Symbol.Segment, uint16_t(0));
+ IO.mapRequired("Flags", Symbol.Flags);
+ IO.mapRequired("DisplayName", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<RegisterSym>::map(IO &IO) {
+ IO.mapRequired("Type", Symbol.Index);
+ IO.mapRequired("Seg", Symbol.Register);
+ IO.mapRequired("Name", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<PublicSym32>::map(IO &IO) {
+ IO.mapRequired("Flags", Symbol.Flags);
+ IO.mapOptional("Offset", Symbol.Offset, 0U);
+ IO.mapOptional("Segment", Symbol.Segment, uint16_t(0));
+ IO.mapRequired("Name", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<ProcRefSym>::map(IO &IO) {
+ IO.mapRequired("SumName", Symbol.SumName);
+ IO.mapRequired("SymOffset", Symbol.SymOffset);
+ IO.mapRequired("Mod", Symbol.Module);
+ IO.mapRequired("Name", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<EnvBlockSym>::map(IO &IO) {
+ IO.mapRequired("Entries", Symbol.Fields);
+}
+
+template <> void SymbolRecordImpl<InlineSiteSym>::map(IO &IO) {
+ IO.mapOptional("PtrParent", Symbol.Parent, 0U);
+ IO.mapOptional("PtrEnd", Symbol.End, 0U);
+ IO.mapRequired("Inlinee", Symbol.Inlinee);
+ // TODO: The binary annotations
+}
+
+template <> void SymbolRecordImpl<LocalSym>::map(IO &IO) {
+ IO.mapRequired("Type", Symbol.Type);
+ IO.mapRequired("Flags", Symbol.Flags);
+
+ IO.mapRequired("VarName", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<DefRangeSym>::map(IO &IO) {
+ IO.mapRequired("Program", Symbol.Program);
+ IO.mapRequired("Range", Symbol.Range);
+ IO.mapRequired("Gaps", Symbol.Gaps);
+}
+
+template <> void SymbolRecordImpl<DefRangeSubfieldSym>::map(IO &IO) {
+ IO.mapRequired("Program", Symbol.Program);
+ IO.mapRequired("OffsetInParent", Symbol.OffsetInParent);
+ IO.mapRequired("Range", Symbol.Range);
+ IO.mapRequired("Gaps", Symbol.Gaps);
+}
+
+template <> void SymbolRecordImpl<DefRangeRegisterSym>::map(IO &IO) {
+ IO.mapRequired("Register", Symbol.Hdr.Register);
+ IO.mapRequired("MayHaveNoName", Symbol.Hdr.MayHaveNoName);
+ IO.mapRequired("Range", Symbol.Range);
+ IO.mapRequired("Gaps", Symbol.Gaps);
+}
+
+template <> void SymbolRecordImpl<DefRangeFramePointerRelSym>::map(IO &IO) {
+ IO.mapRequired("Offset", Symbol.Hdr.Offset);
+ IO.mapRequired("Range", Symbol.Range);
+ IO.mapRequired("Gaps", Symbol.Gaps);
+}
+
+template <> void SymbolRecordImpl<DefRangeSubfieldRegisterSym>::map(IO &IO) {
+ IO.mapRequired("Register", Symbol.Hdr.Register);
+ IO.mapRequired("MayHaveNoName", Symbol.Hdr.MayHaveNoName);
+ IO.mapRequired("OffsetInParent", Symbol.Hdr.OffsetInParent);
+ IO.mapRequired("Range", Symbol.Range);
+ IO.mapRequired("Gaps", Symbol.Gaps);
+}
+
+template <>
+void SymbolRecordImpl<DefRangeFramePointerRelFullScopeSym>::map(IO &IO) {
+ IO.mapRequired("Register", Symbol.Offset);
+}
+
+template <> void SymbolRecordImpl<DefRangeRegisterRelSym>::map(IO &IO) {
+ IO.mapRequired("Register", Symbol.Hdr.Register);
+ IO.mapRequired("Flags", Symbol.Hdr.Flags);
+ IO.mapRequired("BasePointerOffset", Symbol.Hdr.BasePointerOffset);
+ IO.mapRequired("Range", Symbol.Range);
+ IO.mapRequired("Gaps", Symbol.Gaps);
+}
+
+template <> void SymbolRecordImpl<BlockSym>::map(IO &IO) {
+ IO.mapOptional("PtrParent", Symbol.Parent, 0U);
+ IO.mapOptional("PtrEnd", Symbol.End, 0U);
+ IO.mapRequired("CodeSize", Symbol.CodeSize);
+ IO.mapOptional("Offset", Symbol.CodeOffset, 0U);
+ IO.mapOptional("Segment", Symbol.Segment, uint16_t(0));
+ IO.mapRequired("BlockName", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<LabelSym>::map(IO &IO) {
+ IO.mapOptional("Offset", Symbol.CodeOffset, 0U);
+ IO.mapOptional("Segment", Symbol.Segment, uint16_t(0));
+ IO.mapRequired("Flags", Symbol.Flags);
+ IO.mapRequired("Flags", Symbol.Flags);
+ IO.mapRequired("DisplayName", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<ObjNameSym>::map(IO &IO) {
+ IO.mapRequired("Signature", Symbol.Signature);
+ IO.mapRequired("ObjectName", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<Compile2Sym>::map(IO &IO) {
+ IO.mapRequired("Flags", Symbol.Flags);
+ IO.mapRequired("Machine", Symbol.Machine);
+ IO.mapRequired("FrontendMajor", Symbol.VersionFrontendMajor);
+ IO.mapRequired("FrontendMinor", Symbol.VersionFrontendMinor);
+ IO.mapRequired("FrontendBuild", Symbol.VersionFrontendBuild);
+ IO.mapRequired("BackendMajor", Symbol.VersionBackendMajor);
+ IO.mapRequired("BackendMinor", Symbol.VersionBackendMinor);
+ IO.mapRequired("BackendBuild", Symbol.VersionBackendBuild);
+ IO.mapRequired("Version", Symbol.Version);
+}
+
+template <> void SymbolRecordImpl<Compile3Sym>::map(IO &IO) {
+ IO.mapRequired("Flags", Symbol.Flags);
+ IO.mapRequired("Machine", Symbol.Machine);
+ IO.mapRequired("FrontendMajor", Symbol.VersionFrontendMajor);
+ IO.mapRequired("FrontendMinor", Symbol.VersionFrontendMinor);
+ IO.mapRequired("FrontendBuild", Symbol.VersionFrontendBuild);
+ IO.mapRequired("FrontendQFE", Symbol.VersionFrontendQFE);
+ IO.mapRequired("BackendMajor", Symbol.VersionBackendMajor);
+ IO.mapRequired("BackendMinor", Symbol.VersionBackendMinor);
+ IO.mapRequired("BackendBuild", Symbol.VersionBackendBuild);
+ IO.mapRequired("BackendQFE", Symbol.VersionBackendQFE);
+ IO.mapRequired("Version", Symbol.Version);
+}
+
+template <> void SymbolRecordImpl<FrameProcSym>::map(IO &IO) {
+ IO.mapRequired("TotalFrameBytes", Symbol.TotalFrameBytes);
+ IO.mapRequired("PaddingFrameBytes", Symbol.PaddingFrameBytes);
+ IO.mapRequired("OffsetToPadding", Symbol.OffsetToPadding);
+ IO.mapRequired("BytesOfCalleeSavedRegisters",
+ Symbol.BytesOfCalleeSavedRegisters);
+ IO.mapRequired("OffsetOfExceptionHandler", Symbol.OffsetOfExceptionHandler);
+ IO.mapRequired("SectionIdOfExceptionHandler",
+ Symbol.SectionIdOfExceptionHandler);
+ IO.mapRequired("Flags", Symbol.Flags);
+}
+
+template <> void SymbolRecordImpl<CallSiteInfoSym>::map(IO &IO) {
+ IO.mapOptional("Offset", Symbol.CodeOffset, 0U);
+ IO.mapOptional("Segment", Symbol.Segment, uint16_t(0));
+ IO.mapRequired("Type", Symbol.Type);
+}
+
+template <> void SymbolRecordImpl<FileStaticSym>::map(IO &IO) {
+ IO.mapRequired("Index", Symbol.Index);
+ IO.mapRequired("ModFilenameOffset", Symbol.ModFilenameOffset);
+ IO.mapRequired("Flags", Symbol.Flags);
+ IO.mapRequired("Name", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<HeapAllocationSiteSym>::map(IO &IO) {
+ IO.mapOptional("Offset", Symbol.CodeOffset, 0U);
+ IO.mapOptional("Segment", Symbol.Segment, uint16_t(0));
+ IO.mapRequired("CallInstructionSize", Symbol.CallInstructionSize);
+ IO.mapRequired("Type", Symbol.Type);
+}
+
+template <> void SymbolRecordImpl<FrameCookieSym>::map(IO &IO) {
+ IO.mapRequired("Register", Symbol.Register);
+ IO.mapRequired("CookieKind", Symbol.CookieKind);
+ IO.mapRequired("Flags", Symbol.Flags);
+}
+
+template <> void SymbolRecordImpl<CallerSym>::map(IO &IO) {
+ IO.mapRequired("FuncID", Symbol.Indices);
+}
+
+template <> void SymbolRecordImpl<UDTSym>::map(IO &IO) {
+ IO.mapRequired("Type", Symbol.Type);
+ IO.mapRequired("UDTName", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<BuildInfoSym>::map(IO &IO) {
+ IO.mapRequired("BuildId", Symbol.BuildId);
+}
+
+template <> void SymbolRecordImpl<BPRelativeSym>::map(IO &IO) {
+ IO.mapRequired("Offset", Symbol.Offset);
+ IO.mapRequired("Type", Symbol.Type);
+ IO.mapRequired("VarName", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<RegRelativeSym>::map(IO &IO) {
+ IO.mapRequired("Offset", Symbol.Offset);
+ IO.mapRequired("Type", Symbol.Type);
+ IO.mapRequired("Register", Symbol.Register);
+ IO.mapRequired("VarName", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<ConstantSym>::map(IO &IO) {
+ IO.mapRequired("Type", Symbol.Type);
+ IO.mapRequired("Value", Symbol.Value);
+ IO.mapRequired("Name", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<DataSym>::map(IO &IO) {
+ IO.mapRequired("Type", Symbol.Type);
+ IO.mapOptional("Offset", Symbol.DataOffset, 0U);
+ IO.mapOptional("Segment", Symbol.Segment, uint16_t(0));
+ IO.mapRequired("DisplayName", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<ThreadLocalDataSym>::map(IO &IO) {
+ IO.mapRequired("Type", Symbol.Type);
+ IO.mapOptional("Offset", Symbol.DataOffset, 0U);
+ IO.mapOptional("Segment", Symbol.Segment, uint16_t(0));
+ IO.mapRequired("DisplayName", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<UsingNamespaceSym>::map(IO &IO) {
+ IO.mapRequired("Namespace", Symbol.Name);
+}
+
+template <> void SymbolRecordImpl<AnnotationSym>::map(IO &IO) {
+ IO.mapOptional("Offset", Symbol.CodeOffset, 0U);
+ IO.mapOptional("Segment", Symbol.Segment, uint16_t(0));
+ IO.mapRequired("Strings", Symbol.Strings);
+}
+
+} // end namespace detail
+} // end namespace CodeViewYAML
+} // end namespace llvm
+
+CVSymbol CodeViewYAML::SymbolRecord::toCodeViewSymbol(
+ BumpPtrAllocator &Allocator, CodeViewContainer Container) const {
+ return Symbol->toCodeViewSymbol(Allocator, Container);
+}
+
+namespace llvm {
+namespace yaml {
+
+template <> struct MappingTraits<SymbolRecordBase> {
+ static void mapping(IO &io, SymbolRecordBase &Record) { Record.map(io); }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+template <typename SymbolType>
+static inline Expected<CodeViewYAML::SymbolRecord>
+fromCodeViewSymbolImpl(CVSymbol Symbol) {
+ CodeViewYAML::SymbolRecord Result;
+
+ auto Impl = std::make_shared<SymbolType>(Symbol.kind());
+ if (auto EC = Impl->fromCodeViewSymbol(Symbol))
+ return std::move(EC);
+ Result.Symbol = Impl;
+ return Result;
+}
+
+Expected<CodeViewYAML::SymbolRecord>
+CodeViewYAML::SymbolRecord::fromCodeViewSymbol(CVSymbol Symbol) {
+#define SYMBOL_RECORD(EnumName, EnumVal, ClassName) \
+ case EnumName: \
+ return fromCodeViewSymbolImpl<SymbolRecordImpl<ClassName>>(Symbol);
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, AliasName, ClassName) \
+ SYMBOL_RECORD(EnumName, EnumVal, ClassName)
+ switch (Symbol.kind()) {
+#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"
+ default:
+ return fromCodeViewSymbolImpl<UnknownSymbolRecord>(Symbol);
+ }
+ return make_error<CodeViewError>(cv_error_code::corrupt_record);
+}
+
+template <typename ConcreteType>
+static void mapSymbolRecordImpl(IO &IO, const char *Class, SymbolKind Kind,
+ CodeViewYAML::SymbolRecord &Obj) {
+ if (!IO.outputting())
+ Obj.Symbol = std::make_shared<ConcreteType>(Kind);
+
+ IO.mapRequired(Class, *Obj.Symbol);
+}
+
+void MappingTraits<CodeViewYAML::SymbolRecord>::mapping(
+ IO &IO, CodeViewYAML::SymbolRecord &Obj) {
+ SymbolKind Kind;
+ if (IO.outputting())
+ Kind = Obj.Symbol->Kind;
+ IO.mapRequired("Kind", Kind);
+
+#define SYMBOL_RECORD(EnumName, EnumVal, ClassName) \
+ case EnumName: \
+ mapSymbolRecordImpl<SymbolRecordImpl<ClassName>>(IO, #ClassName, Kind, \
+ Obj); \
+ break;
+#define SYMBOL_RECORD_ALIAS(EnumName, EnumVal, AliasName, ClassName) \
+ SYMBOL_RECORD(EnumName, EnumVal, ClassName)
+ switch (Kind) {
+#include "llvm/DebugInfo/CodeView/CodeViewSymbols.def"
+ default:
+ mapSymbolRecordImpl<UnknownSymbolRecord>(IO, "UnknownSym", Kind, Obj);
+ }
+}
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLTypeHashing.cpp b/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLTypeHashing.cpp
new file mode 100644
index 00000000000..e921ae1e7d8
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLTypeHashing.cpp
@@ -0,0 +1,87 @@
+//===- CodeViewYAMLTypeHashing.cpp - CodeView YAMLIO type hashing ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of CodeView
+// Debug Info.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/CodeViewYAMLTypeHashing.h"
+
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+
+using namespace llvm;
+using namespace llvm::codeview;
+using namespace llvm::CodeViewYAML;
+using namespace llvm::yaml;
+
+namespace llvm {
+namespace yaml {
+
+void MappingTraits<DebugHSection>::mapping(IO &io, DebugHSection &DebugH) {
+ io.mapRequired("Version", DebugH.Version);
+ io.mapRequired("HashAlgorithm", DebugH.HashAlgorithm);
+ io.mapOptional("HashValues", DebugH.Hashes);
+}
+
+void ScalarTraits<GlobalHash>::output(const GlobalHash &GH, void *Ctx,
+ raw_ostream &OS) {
+ ScalarTraits<BinaryRef>::output(GH.Hash, Ctx, OS);
+}
+
+StringRef ScalarTraits<GlobalHash>::input(StringRef Scalar, void *Ctx,
+ GlobalHash &GH) {
+ return ScalarTraits<BinaryRef>::input(Scalar, Ctx, GH.Hash);
+}
+
+} // end namespace yaml
+} // end namespace llvm
+
+DebugHSection llvm::CodeViewYAML::fromDebugH(ArrayRef<uint8_t> DebugH) {
+ assert(DebugH.size() >= 8);
+ assert((DebugH.size() - 8) % 8 == 0);
+
+ BinaryStreamReader Reader(DebugH, llvm::support::little);
+ DebugHSection DHS;
+ cantFail(Reader.readInteger(DHS.Magic));
+ cantFail(Reader.readInteger(DHS.Version));
+ cantFail(Reader.readInteger(DHS.HashAlgorithm));
+
+ while (Reader.bytesRemaining() != 0) {
+ ArrayRef<uint8_t> S;
+ cantFail(Reader.readBytes(S, 8));
+ DHS.Hashes.emplace_back(S);
+ }
+ assert(Reader.bytesRemaining() == 0);
+ return DHS;
+}
+
+ArrayRef<uint8_t> llvm::CodeViewYAML::toDebugH(const DebugHSection &DebugH,
+ BumpPtrAllocator &Alloc) {
+ uint32_t Size = 8 + 8 * DebugH.Hashes.size();
+ uint8_t *Data = Alloc.Allocate<uint8_t>(Size);
+ MutableArrayRef<uint8_t> Buffer(Data, Size);
+ BinaryStreamWriter Writer(Buffer, llvm::support::little);
+
+ cantFail(Writer.writeInteger(DebugH.Magic));
+ cantFail(Writer.writeInteger(DebugH.Version));
+ cantFail(Writer.writeInteger(DebugH.HashAlgorithm));
+ SmallString<8> Hash;
+ for (const auto &H : DebugH.Hashes) {
+ Hash.clear();
+ raw_svector_ostream OS(Hash);
+ H.Hash.writeAsBinary(OS);
+ assert((Hash.size() == 8) && "Invalid hash size!");
+ cantFail(Writer.writeFixedString(Hash));
+ }
+ assert(Writer.bytesRemaining() == 0);
+ return Buffer;
+}
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLTypes.cpp b/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLTypes.cpp
new file mode 100644
index 00000000000..a5e3ce1e71e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/CodeViewYAMLTypes.cpp
@@ -0,0 +1,817 @@
+//===- CodeViewYAMLTypes.cpp - CodeView YAMLIO types implementation -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of CodeView
+// Debug Info.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/CodeViewYAMLTypes.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/DebugInfo/CodeView/AppendingTypeTableBuilder.h"
+#include "llvm/DebugInfo/CodeView/CVTypeVisitor.h"
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/CodeViewError.h"
+#include "llvm/DebugInfo/CodeView/ContinuationRecordBuilder.h"
+#include "llvm/DebugInfo/CodeView/TypeDeserializer.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
+#include "llvm/DebugInfo/CodeView/TypeVisitorCallbacks.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <vector>
+
+using namespace llvm;
+using namespace llvm::codeview;
+using namespace llvm::CodeViewYAML;
+using namespace llvm::CodeViewYAML::detail;
+using namespace llvm::yaml;
+
+LLVM_YAML_IS_SEQUENCE_VECTOR(OneMethodRecord)
+LLVM_YAML_IS_SEQUENCE_VECTOR(VFTableSlotKind)
+LLVM_YAML_IS_FLOW_SEQUENCE_VECTOR(TypeIndex)
+
+LLVM_YAML_DECLARE_SCALAR_TRAITS(TypeIndex, QuotingType::None)
+LLVM_YAML_DECLARE_SCALAR_TRAITS(APSInt, QuotingType::None)
+
+LLVM_YAML_DECLARE_ENUM_TRAITS(TypeLeafKind)
+LLVM_YAML_DECLARE_ENUM_TRAITS(PointerToMemberRepresentation)
+LLVM_YAML_DECLARE_ENUM_TRAITS(VFTableSlotKind)
+LLVM_YAML_DECLARE_ENUM_TRAITS(CallingConvention)
+LLVM_YAML_DECLARE_ENUM_TRAITS(PointerKind)
+LLVM_YAML_DECLARE_ENUM_TRAITS(PointerMode)
+LLVM_YAML_DECLARE_ENUM_TRAITS(HfaKind)
+LLVM_YAML_DECLARE_ENUM_TRAITS(MemberAccess)
+LLVM_YAML_DECLARE_ENUM_TRAITS(MethodKind)
+LLVM_YAML_DECLARE_ENUM_TRAITS(WindowsRTClassKind)
+LLVM_YAML_DECLARE_ENUM_TRAITS(LabelType)
+
+LLVM_YAML_DECLARE_BITSET_TRAITS(PointerOptions)
+LLVM_YAML_DECLARE_BITSET_TRAITS(ModifierOptions)
+LLVM_YAML_DECLARE_BITSET_TRAITS(FunctionOptions)
+LLVM_YAML_DECLARE_BITSET_TRAITS(ClassOptions)
+LLVM_YAML_DECLARE_BITSET_TRAITS(MethodOptions)
+
+LLVM_YAML_DECLARE_MAPPING_TRAITS(OneMethodRecord)
+LLVM_YAML_DECLARE_MAPPING_TRAITS(MemberPointerInfo)
+
+namespace llvm {
+namespace CodeViewYAML {
+namespace detail {
+
+struct LeafRecordBase {
+ TypeLeafKind Kind;
+
+ explicit LeafRecordBase(TypeLeafKind K) : Kind(K) {}
+ virtual ~LeafRecordBase() = default;
+
+ virtual void map(yaml::IO &io) = 0;
+ virtual CVType toCodeViewRecord(AppendingTypeTableBuilder &TS) const = 0;
+ virtual Error fromCodeViewRecord(CVType Type) = 0;
+};
+
+template <typename T> struct LeafRecordImpl : public LeafRecordBase {
+ explicit LeafRecordImpl(TypeLeafKind K)
+ : LeafRecordBase(K), Record(static_cast<TypeRecordKind>(K)) {}
+
+ void map(yaml::IO &io) override;
+
+ Error fromCodeViewRecord(CVType Type) override {
+ return TypeDeserializer::deserializeAs<T>(Type, Record);
+ }
+
+ CVType toCodeViewRecord(AppendingTypeTableBuilder &TS) const override {
+ TS.writeLeafType(Record);
+ return CVType(TS.records().back());
+ }
+
+ mutable T Record;
+};
+
+template <> struct LeafRecordImpl<FieldListRecord> : public LeafRecordBase {
+ explicit LeafRecordImpl(TypeLeafKind K) : LeafRecordBase(K) {}
+
+ void map(yaml::IO &io) override;
+ CVType toCodeViewRecord(AppendingTypeTableBuilder &TS) const override;
+ Error fromCodeViewRecord(CVType Type) override;
+
+ std::vector<MemberRecord> Members;
+};
+
+struct MemberRecordBase {
+ TypeLeafKind Kind;
+
+ explicit MemberRecordBase(TypeLeafKind K) : Kind(K) {}
+ virtual ~MemberRecordBase() = default;
+
+ virtual void map(yaml::IO &io) = 0;
+ virtual void writeTo(ContinuationRecordBuilder &CRB) = 0;
+};
+
+template <typename T> struct MemberRecordImpl : public MemberRecordBase {
+ explicit MemberRecordImpl(TypeLeafKind K)
+ : MemberRecordBase(K), Record(static_cast<TypeRecordKind>(K)) {}
+
+ void map(yaml::IO &io) override;
+
+ void writeTo(ContinuationRecordBuilder &CRB) override {
+ CRB.writeMemberType(Record);
+ }
+
+ mutable T Record;
+};
+
+} // end namespace detail
+} // end namespace CodeViewYAML
+} // end namespace llvm
+
+void ScalarTraits<GUID>::output(const GUID &G, void *, llvm::raw_ostream &OS) {
+ OS << G;
+}
+
+StringRef ScalarTraits<GUID>::input(StringRef Scalar, void *Ctx, GUID &S) {
+ if (Scalar.size() != 38)
+ return "GUID strings are 38 characters long";
+ if (Scalar[0] != '{' || Scalar[37] != '}')
+ return "GUID is not enclosed in {}";
+ if (Scalar[9] != '-' || Scalar[14] != '-' || Scalar[19] != '-' ||
+ Scalar[24] != '-')
+ return "GUID sections are not properly delineated with dashes";
+
+ uint8_t *OutBuffer = S.Guid;
+ for (auto Iter = Scalar.begin(); Iter != Scalar.end();) {
+ if (*Iter == '-' || *Iter == '{' || *Iter == '}') {
+ ++Iter;
+ continue;
+ }
+ uint8_t Value = (llvm::hexDigitValue(*Iter++) << 4);
+ Value |= llvm::hexDigitValue(*Iter++);
+ *OutBuffer++ = Value;
+ }
+
+ return "";
+}
+
+void ScalarTraits<TypeIndex>::output(const TypeIndex &S, void *,
+ raw_ostream &OS) {
+ OS << S.getIndex();
+}
+
+StringRef ScalarTraits<TypeIndex>::input(StringRef Scalar, void *Ctx,
+ TypeIndex &S) {
+ uint32_t I;
+ StringRef Result = ScalarTraits<uint32_t>::input(Scalar, Ctx, I);
+ S.setIndex(I);
+ return Result;
+}
+
+void ScalarTraits<APSInt>::output(const APSInt &S, void *, raw_ostream &OS) {
+ S.print(OS, S.isSigned());
+}
+
+StringRef ScalarTraits<APSInt>::input(StringRef Scalar, void *Ctx, APSInt &S) {
+ S = APSInt(Scalar);
+ return "";
+}
+
+void ScalarEnumerationTraits<TypeLeafKind>::enumeration(IO &io,
+ TypeLeafKind &Value) {
+#define CV_TYPE(name, val) io.enumCase(Value, #name, name);
+#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
+#undef CV_TYPE
+}
+
+void ScalarEnumerationTraits<PointerToMemberRepresentation>::enumeration(
+ IO &IO, PointerToMemberRepresentation &Value) {
+ IO.enumCase(Value, "Unknown", PointerToMemberRepresentation::Unknown);
+ IO.enumCase(Value, "SingleInheritanceData",
+ PointerToMemberRepresentation::SingleInheritanceData);
+ IO.enumCase(Value, "MultipleInheritanceData",
+ PointerToMemberRepresentation::MultipleInheritanceData);
+ IO.enumCase(Value, "VirtualInheritanceData",
+ PointerToMemberRepresentation::VirtualInheritanceData);
+ IO.enumCase(Value, "GeneralData", PointerToMemberRepresentation::GeneralData);
+ IO.enumCase(Value, "SingleInheritanceFunction",
+ PointerToMemberRepresentation::SingleInheritanceFunction);
+ IO.enumCase(Value, "MultipleInheritanceFunction",
+ PointerToMemberRepresentation::MultipleInheritanceFunction);
+ IO.enumCase(Value, "VirtualInheritanceFunction",
+ PointerToMemberRepresentation::VirtualInheritanceFunction);
+ IO.enumCase(Value, "GeneralFunction",
+ PointerToMemberRepresentation::GeneralFunction);
+}
+
+void ScalarEnumerationTraits<VFTableSlotKind>::enumeration(
+ IO &IO, VFTableSlotKind &Kind) {
+ IO.enumCase(Kind, "Near16", VFTableSlotKind::Near16);
+ IO.enumCase(Kind, "Far16", VFTableSlotKind::Far16);
+ IO.enumCase(Kind, "This", VFTableSlotKind::This);
+ IO.enumCase(Kind, "Outer", VFTableSlotKind::Outer);
+ IO.enumCase(Kind, "Meta", VFTableSlotKind::Meta);
+ IO.enumCase(Kind, "Near", VFTableSlotKind::Near);
+ IO.enumCase(Kind, "Far", VFTableSlotKind::Far);
+}
+
+void ScalarEnumerationTraits<CallingConvention>::enumeration(
+ IO &IO, CallingConvention &Value) {
+ IO.enumCase(Value, "NearC", CallingConvention::NearC);
+ IO.enumCase(Value, "FarC", CallingConvention::FarC);
+ IO.enumCase(Value, "NearPascal", CallingConvention::NearPascal);
+ IO.enumCase(Value, "FarPascal", CallingConvention::FarPascal);
+ IO.enumCase(Value, "NearFast", CallingConvention::NearFast);
+ IO.enumCase(Value, "FarFast", CallingConvention::FarFast);
+ IO.enumCase(Value, "NearStdCall", CallingConvention::NearStdCall);
+ IO.enumCase(Value, "FarStdCall", CallingConvention::FarStdCall);
+ IO.enumCase(Value, "NearSysCall", CallingConvention::NearSysCall);
+ IO.enumCase(Value, "FarSysCall", CallingConvention::FarSysCall);
+ IO.enumCase(Value, "ThisCall", CallingConvention::ThisCall);
+ IO.enumCase(Value, "MipsCall", CallingConvention::MipsCall);
+ IO.enumCase(Value, "Generic", CallingConvention::Generic);
+ IO.enumCase(Value, "AlphaCall", CallingConvention::AlphaCall);
+ IO.enumCase(Value, "PpcCall", CallingConvention::PpcCall);
+ IO.enumCase(Value, "SHCall", CallingConvention::SHCall);
+ IO.enumCase(Value, "ArmCall", CallingConvention::ArmCall);
+ IO.enumCase(Value, "AM33Call", CallingConvention::AM33Call);
+ IO.enumCase(Value, "TriCall", CallingConvention::TriCall);
+ IO.enumCase(Value, "SH5Call", CallingConvention::SH5Call);
+ IO.enumCase(Value, "M32RCall", CallingConvention::M32RCall);
+ IO.enumCase(Value, "ClrCall", CallingConvention::ClrCall);
+ IO.enumCase(Value, "Inline", CallingConvention::Inline);
+ IO.enumCase(Value, "NearVector", CallingConvention::NearVector);
+}
+
+void ScalarEnumerationTraits<PointerKind>::enumeration(IO &IO,
+ PointerKind &Kind) {
+ IO.enumCase(Kind, "Near16", PointerKind::Near16);
+ IO.enumCase(Kind, "Far16", PointerKind::Far16);
+ IO.enumCase(Kind, "Huge16", PointerKind::Huge16);
+ IO.enumCase(Kind, "BasedOnSegment", PointerKind::BasedOnSegment);
+ IO.enumCase(Kind, "BasedOnValue", PointerKind::BasedOnValue);
+ IO.enumCase(Kind, "BasedOnSegmentValue", PointerKind::BasedOnSegmentValue);
+ IO.enumCase(Kind, "BasedOnAddress", PointerKind::BasedOnAddress);
+ IO.enumCase(Kind, "BasedOnSegmentAddress",
+ PointerKind::BasedOnSegmentAddress);
+ IO.enumCase(Kind, "BasedOnType", PointerKind::BasedOnType);
+ IO.enumCase(Kind, "BasedOnSelf", PointerKind::BasedOnSelf);
+ IO.enumCase(Kind, "Near32", PointerKind::Near32);
+ IO.enumCase(Kind, "Far32", PointerKind::Far32);
+ IO.enumCase(Kind, "Near64", PointerKind::Near64);
+}
+
+void ScalarEnumerationTraits<PointerMode>::enumeration(IO &IO,
+ PointerMode &Mode) {
+ IO.enumCase(Mode, "Pointer", PointerMode::Pointer);
+ IO.enumCase(Mode, "LValueReference", PointerMode::LValueReference);
+ IO.enumCase(Mode, "PointerToDataMember", PointerMode::PointerToDataMember);
+ IO.enumCase(Mode, "PointerToMemberFunction",
+ PointerMode::PointerToMemberFunction);
+ IO.enumCase(Mode, "RValueReference", PointerMode::RValueReference);
+}
+
+void ScalarEnumerationTraits<HfaKind>::enumeration(IO &IO, HfaKind &Value) {
+ IO.enumCase(Value, "None", HfaKind::None);
+ IO.enumCase(Value, "Float", HfaKind::Float);
+ IO.enumCase(Value, "Double", HfaKind::Double);
+ IO.enumCase(Value, "Other", HfaKind::Other);
+}
+
+void ScalarEnumerationTraits<MemberAccess>::enumeration(IO &IO,
+ MemberAccess &Access) {
+ IO.enumCase(Access, "None", MemberAccess::None);
+ IO.enumCase(Access, "Private", MemberAccess::Private);
+ IO.enumCase(Access, "Protected", MemberAccess::Protected);
+ IO.enumCase(Access, "Public", MemberAccess::Public);
+}
+
+void ScalarEnumerationTraits<MethodKind>::enumeration(IO &IO,
+ MethodKind &Kind) {
+ IO.enumCase(Kind, "Vanilla", MethodKind::Vanilla);
+ IO.enumCase(Kind, "Virtual", MethodKind::Virtual);
+ IO.enumCase(Kind, "Static", MethodKind::Static);
+ IO.enumCase(Kind, "Friend", MethodKind::Friend);
+ IO.enumCase(Kind, "IntroducingVirtual", MethodKind::IntroducingVirtual);
+ IO.enumCase(Kind, "PureVirtual", MethodKind::PureVirtual);
+ IO.enumCase(Kind, "PureIntroducingVirtual",
+ MethodKind::PureIntroducingVirtual);
+}
+
+void ScalarEnumerationTraits<WindowsRTClassKind>::enumeration(
+ IO &IO, WindowsRTClassKind &Value) {
+ IO.enumCase(Value, "None", WindowsRTClassKind::None);
+ IO.enumCase(Value, "Ref", WindowsRTClassKind::RefClass);
+ IO.enumCase(Value, "Value", WindowsRTClassKind::ValueClass);
+ IO.enumCase(Value, "Interface", WindowsRTClassKind::Interface);
+}
+
+void ScalarEnumerationTraits<LabelType>::enumeration(IO &IO, LabelType &Value) {
+ IO.enumCase(Value, "Near", LabelType::Near);
+ IO.enumCase(Value, "Far", LabelType::Far);
+}
+
+void ScalarBitSetTraits<PointerOptions>::bitset(IO &IO,
+ PointerOptions &Options) {
+ IO.bitSetCase(Options, "None", PointerOptions::None);
+ IO.bitSetCase(Options, "Flat32", PointerOptions::Flat32);
+ IO.bitSetCase(Options, "Volatile", PointerOptions::Volatile);
+ IO.bitSetCase(Options, "Const", PointerOptions::Const);
+ IO.bitSetCase(Options, "Unaligned", PointerOptions::Unaligned);
+ IO.bitSetCase(Options, "Restrict", PointerOptions::Restrict);
+ IO.bitSetCase(Options, "WinRTSmartPointer",
+ PointerOptions::WinRTSmartPointer);
+}
+
+void ScalarBitSetTraits<ModifierOptions>::bitset(IO &IO,
+ ModifierOptions &Options) {
+ IO.bitSetCase(Options, "None", ModifierOptions::None);
+ IO.bitSetCase(Options, "Const", ModifierOptions::Const);
+ IO.bitSetCase(Options, "Volatile", ModifierOptions::Volatile);
+ IO.bitSetCase(Options, "Unaligned", ModifierOptions::Unaligned);
+}
+
+void ScalarBitSetTraits<FunctionOptions>::bitset(IO &IO,
+ FunctionOptions &Options) {
+ IO.bitSetCase(Options, "None", FunctionOptions::None);
+ IO.bitSetCase(Options, "CxxReturnUdt", FunctionOptions::CxxReturnUdt);
+ IO.bitSetCase(Options, "Constructor", FunctionOptions::Constructor);
+ IO.bitSetCase(Options, "ConstructorWithVirtualBases",
+ FunctionOptions::ConstructorWithVirtualBases);
+}
+
+void ScalarBitSetTraits<ClassOptions>::bitset(IO &IO, ClassOptions &Options) {
+ IO.bitSetCase(Options, "None", ClassOptions::None);
+ IO.bitSetCase(Options, "HasConstructorOrDestructor",
+ ClassOptions::HasConstructorOrDestructor);
+ IO.bitSetCase(Options, "HasOverloadedOperator",
+ ClassOptions::HasOverloadedOperator);
+ IO.bitSetCase(Options, "Nested", ClassOptions::Nested);
+ IO.bitSetCase(Options, "ContainsNestedClass",
+ ClassOptions::ContainsNestedClass);
+ IO.bitSetCase(Options, "HasOverloadedAssignmentOperator",
+ ClassOptions::HasOverloadedAssignmentOperator);
+ IO.bitSetCase(Options, "HasConversionOperator",
+ ClassOptions::HasConversionOperator);
+ IO.bitSetCase(Options, "ForwardReference", ClassOptions::ForwardReference);
+ IO.bitSetCase(Options, "Scoped", ClassOptions::Scoped);
+ IO.bitSetCase(Options, "HasUniqueName", ClassOptions::HasUniqueName);
+ IO.bitSetCase(Options, "Sealed", ClassOptions::Sealed);
+ IO.bitSetCase(Options, "Intrinsic", ClassOptions::Intrinsic);
+}
+
+void ScalarBitSetTraits<MethodOptions>::bitset(IO &IO, MethodOptions &Options) {
+ IO.bitSetCase(Options, "None", MethodOptions::None);
+ IO.bitSetCase(Options, "Pseudo", MethodOptions::Pseudo);
+ IO.bitSetCase(Options, "NoInherit", MethodOptions::NoInherit);
+ IO.bitSetCase(Options, "NoConstruct", MethodOptions::NoConstruct);
+ IO.bitSetCase(Options, "CompilerGenerated", MethodOptions::CompilerGenerated);
+ IO.bitSetCase(Options, "Sealed", MethodOptions::Sealed);
+}
+
+void MappingTraits<MemberPointerInfo>::mapping(IO &IO, MemberPointerInfo &MPI) {
+ IO.mapRequired("ContainingType", MPI.ContainingType);
+ IO.mapRequired("Representation", MPI.Representation);
+}
+
+namespace llvm {
+namespace CodeViewYAML {
+namespace detail {
+
+template <> void LeafRecordImpl<ModifierRecord>::map(IO &IO) {
+ IO.mapRequired("ModifiedType", Record.ModifiedType);
+ IO.mapRequired("Modifiers", Record.Modifiers);
+}
+
+template <> void LeafRecordImpl<ProcedureRecord>::map(IO &IO) {
+ IO.mapRequired("ReturnType", Record.ReturnType);
+ IO.mapRequired("CallConv", Record.CallConv);
+ IO.mapRequired("Options", Record.Options);
+ IO.mapRequired("ParameterCount", Record.ParameterCount);
+ IO.mapRequired("ArgumentList", Record.ArgumentList);
+}
+
+template <> void LeafRecordImpl<MemberFunctionRecord>::map(IO &IO) {
+ IO.mapRequired("ReturnType", Record.ReturnType);
+ IO.mapRequired("ClassType", Record.ClassType);
+ IO.mapRequired("ThisType", Record.ThisType);
+ IO.mapRequired("CallConv", Record.CallConv);
+ IO.mapRequired("Options", Record.Options);
+ IO.mapRequired("ParameterCount", Record.ParameterCount);
+ IO.mapRequired("ArgumentList", Record.ArgumentList);
+ IO.mapRequired("ThisPointerAdjustment", Record.ThisPointerAdjustment);
+}
+
+template <> void LeafRecordImpl<LabelRecord>::map(IO &IO) {
+ IO.mapRequired("Mode", Record.Mode);
+}
+
+template <> void LeafRecordImpl<MemberFuncIdRecord>::map(IO &IO) {
+ IO.mapRequired("ClassType", Record.ClassType);
+ IO.mapRequired("FunctionType", Record.FunctionType);
+ IO.mapRequired("Name", Record.Name);
+}
+
+template <> void LeafRecordImpl<ArgListRecord>::map(IO &IO) {
+ IO.mapRequired("ArgIndices", Record.ArgIndices);
+}
+
+template <> void LeafRecordImpl<StringListRecord>::map(IO &IO) {
+ IO.mapRequired("StringIndices", Record.StringIndices);
+}
+
+template <> void LeafRecordImpl<PointerRecord>::map(IO &IO) {
+ IO.mapRequired("ReferentType", Record.ReferentType);
+ IO.mapRequired("Attrs", Record.Attrs);
+ IO.mapOptional("MemberInfo", Record.MemberInfo);
+}
+
+template <> void LeafRecordImpl<ArrayRecord>::map(IO &IO) {
+ IO.mapRequired("ElementType", Record.ElementType);
+ IO.mapRequired("IndexType", Record.IndexType);
+ IO.mapRequired("Size", Record.Size);
+ IO.mapRequired("Name", Record.Name);
+}
+
+void LeafRecordImpl<FieldListRecord>::map(IO &IO) {
+ IO.mapRequired("FieldList", Members);
+}
+
+} // end namespace detail
+} // end namespace CodeViewYAML
+} // end namespace llvm
+
+namespace {
+
+class MemberRecordConversionVisitor : public TypeVisitorCallbacks {
+public:
+ explicit MemberRecordConversionVisitor(std::vector<MemberRecord> &Records)
+ : Records(Records) {}
+
+#define TYPE_RECORD(EnumName, EnumVal, Name)
+#define MEMBER_RECORD(EnumName, EnumVal, Name) \
+ Error visitKnownMember(CVMemberRecord &CVR, Name##Record &Record) override { \
+ return visitKnownMemberImpl(Record); \
+ }
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, Name, AliasName)
+#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
+private:
+ template <typename T> Error visitKnownMemberImpl(T &Record) {
+ TypeLeafKind K = static_cast<TypeLeafKind>(Record.getKind());
+ auto Impl = std::make_shared<MemberRecordImpl<T>>(K);
+ Impl->Record = Record;
+ Records.push_back(MemberRecord{Impl});
+ return Error::success();
+ }
+
+ std::vector<MemberRecord> &Records;
+};
+
+} // end anonymous namespace
+
+Error LeafRecordImpl<FieldListRecord>::fromCodeViewRecord(CVType Type) {
+ MemberRecordConversionVisitor V(Members);
+ return visitMemberRecordStream(Type.content(), V);
+}
+
+CVType LeafRecordImpl<FieldListRecord>::toCodeViewRecord(
+ AppendingTypeTableBuilder &TS) const {
+ ContinuationRecordBuilder CRB;
+ CRB.begin(ContinuationRecordKind::FieldList);
+ for (const auto &Member : Members) {
+ Member.Member->writeTo(CRB);
+ }
+ TS.insertRecord(CRB);
+ return CVType(TS.records().back());
+}
+
+void MappingTraits<OneMethodRecord>::mapping(IO &io, OneMethodRecord &Record) {
+ io.mapRequired("Type", Record.Type);
+ io.mapRequired("Attrs", Record.Attrs.Attrs);
+ io.mapRequired("VFTableOffset", Record.VFTableOffset);
+ io.mapRequired("Name", Record.Name);
+}
+
+namespace llvm {
+namespace CodeViewYAML {
+namespace detail {
+
+template <> void LeafRecordImpl<ClassRecord>::map(IO &IO) {
+ IO.mapRequired("MemberCount", Record.MemberCount);
+ IO.mapRequired("Options", Record.Options);
+ IO.mapRequired("FieldList", Record.FieldList);
+ IO.mapRequired("Name", Record.Name);
+ IO.mapRequired("UniqueName", Record.UniqueName);
+ IO.mapRequired("DerivationList", Record.DerivationList);
+ IO.mapRequired("VTableShape", Record.VTableShape);
+ IO.mapRequired("Size", Record.Size);
+}
+
+template <> void LeafRecordImpl<UnionRecord>::map(IO &IO) {
+ IO.mapRequired("MemberCount", Record.MemberCount);
+ IO.mapRequired("Options", Record.Options);
+ IO.mapRequired("FieldList", Record.FieldList);
+ IO.mapRequired("Name", Record.Name);
+ IO.mapRequired("UniqueName", Record.UniqueName);
+ IO.mapRequired("Size", Record.Size);
+}
+
+template <> void LeafRecordImpl<EnumRecord>::map(IO &IO) {
+ IO.mapRequired("NumEnumerators", Record.MemberCount);
+ IO.mapRequired("Options", Record.Options);
+ IO.mapRequired("FieldList", Record.FieldList);
+ IO.mapRequired("Name", Record.Name);
+ IO.mapRequired("UniqueName", Record.UniqueName);
+ IO.mapRequired("UnderlyingType", Record.UnderlyingType);
+}
+
+template <> void LeafRecordImpl<BitFieldRecord>::map(IO &IO) {
+ IO.mapRequired("Type", Record.Type);
+ IO.mapRequired("BitSize", Record.BitSize);
+ IO.mapRequired("BitOffset", Record.BitOffset);
+}
+
+template <> void LeafRecordImpl<VFTableShapeRecord>::map(IO &IO) {
+ IO.mapRequired("Slots", Record.Slots);
+}
+
+template <> void LeafRecordImpl<TypeServer2Record>::map(IO &IO) {
+ IO.mapRequired("Guid", Record.Guid);
+ IO.mapRequired("Age", Record.Age);
+ IO.mapRequired("Name", Record.Name);
+}
+
+template <> void LeafRecordImpl<StringIdRecord>::map(IO &IO) {
+ IO.mapRequired("Id", Record.Id);
+ IO.mapRequired("String", Record.String);
+}
+
+template <> void LeafRecordImpl<FuncIdRecord>::map(IO &IO) {
+ IO.mapRequired("ParentScope", Record.ParentScope);
+ IO.mapRequired("FunctionType", Record.FunctionType);
+ IO.mapRequired("Name", Record.Name);
+}
+
+template <> void LeafRecordImpl<UdtSourceLineRecord>::map(IO &IO) {
+ IO.mapRequired("UDT", Record.UDT);
+ IO.mapRequired("SourceFile", Record.SourceFile);
+ IO.mapRequired("LineNumber", Record.LineNumber);
+}
+
+template <> void LeafRecordImpl<UdtModSourceLineRecord>::map(IO &IO) {
+ IO.mapRequired("UDT", Record.UDT);
+ IO.mapRequired("SourceFile", Record.SourceFile);
+ IO.mapRequired("LineNumber", Record.LineNumber);
+ IO.mapRequired("Module", Record.Module);
+}
+
+template <> void LeafRecordImpl<BuildInfoRecord>::map(IO &IO) {
+ IO.mapRequired("ArgIndices", Record.ArgIndices);
+}
+
+template <> void LeafRecordImpl<VFTableRecord>::map(IO &IO) {
+ IO.mapRequired("CompleteClass", Record.CompleteClass);
+ IO.mapRequired("OverriddenVFTable", Record.OverriddenVFTable);
+ IO.mapRequired("VFPtrOffset", Record.VFPtrOffset);
+ IO.mapRequired("MethodNames", Record.MethodNames);
+}
+
+template <> void LeafRecordImpl<MethodOverloadListRecord>::map(IO &IO) {
+ IO.mapRequired("Methods", Record.Methods);
+}
+
+template <> void LeafRecordImpl<PrecompRecord>::map(IO &IO) {
+ IO.mapRequired("StartTypeIndex", Record.StartTypeIndex);
+ IO.mapRequired("TypesCount", Record.TypesCount);
+ IO.mapRequired("Signature", Record.Signature);
+ IO.mapRequired("PrecompFilePath", Record.PrecompFilePath);
+}
+
+template <> void LeafRecordImpl<EndPrecompRecord>::map(IO &IO) {
+ IO.mapRequired("Signature", Record.Signature);
+}
+
+template <> void MemberRecordImpl<OneMethodRecord>::map(IO &IO) {
+ MappingTraits<OneMethodRecord>::mapping(IO, Record);
+}
+
+template <> void MemberRecordImpl<OverloadedMethodRecord>::map(IO &IO) {
+ IO.mapRequired("NumOverloads", Record.NumOverloads);
+ IO.mapRequired("MethodList", Record.MethodList);
+ IO.mapRequired("Name", Record.Name);
+}
+
+template <> void MemberRecordImpl<NestedTypeRecord>::map(IO &IO) {
+ IO.mapRequired("Type", Record.Type);
+ IO.mapRequired("Name", Record.Name);
+}
+
+template <> void MemberRecordImpl<DataMemberRecord>::map(IO &IO) {
+ IO.mapRequired("Attrs", Record.Attrs.Attrs);
+ IO.mapRequired("Type", Record.Type);
+ IO.mapRequired("FieldOffset", Record.FieldOffset);
+ IO.mapRequired("Name", Record.Name);
+}
+
+template <> void MemberRecordImpl<StaticDataMemberRecord>::map(IO &IO) {
+ IO.mapRequired("Attrs", Record.Attrs.Attrs);
+ IO.mapRequired("Type", Record.Type);
+ IO.mapRequired("Name", Record.Name);
+}
+
+template <> void MemberRecordImpl<EnumeratorRecord>::map(IO &IO) {
+ IO.mapRequired("Attrs", Record.Attrs.Attrs);
+ IO.mapRequired("Value", Record.Value);
+ IO.mapRequired("Name", Record.Name);
+}
+
+template <> void MemberRecordImpl<VFPtrRecord>::map(IO &IO) {
+ IO.mapRequired("Type", Record.Type);
+}
+
+template <> void MemberRecordImpl<BaseClassRecord>::map(IO &IO) {
+ IO.mapRequired("Attrs", Record.Attrs.Attrs);
+ IO.mapRequired("Type", Record.Type);
+ IO.mapRequired("Offset", Record.Offset);
+}
+
+template <> void MemberRecordImpl<VirtualBaseClassRecord>::map(IO &IO) {
+ IO.mapRequired("Attrs", Record.Attrs.Attrs);
+ IO.mapRequired("BaseType", Record.BaseType);
+ IO.mapRequired("VBPtrType", Record.VBPtrType);
+ IO.mapRequired("VBPtrOffset", Record.VBPtrOffset);
+ IO.mapRequired("VTableIndex", Record.VTableIndex);
+}
+
+template <> void MemberRecordImpl<ListContinuationRecord>::map(IO &IO) {
+ IO.mapRequired("ContinuationIndex", Record.ContinuationIndex);
+}
+
+} // end namespace detail
+} // end namespace CodeViewYAML
+} // end namespace llvm
+
+template <typename T>
+static inline Expected<LeafRecord> fromCodeViewRecordImpl(CVType Type) {
+ LeafRecord Result;
+
+ auto Impl = std::make_shared<LeafRecordImpl<T>>(Type.kind());
+ if (auto EC = Impl->fromCodeViewRecord(Type))
+ return std::move(EC);
+ Result.Leaf = Impl;
+ return Result;
+}
+
+Expected<LeafRecord> LeafRecord::fromCodeViewRecord(CVType Type) {
+#define TYPE_RECORD(EnumName, EnumVal, ClassName) \
+ case EnumName: \
+ return fromCodeViewRecordImpl<ClassName##Record>(Type);
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, AliasName, ClassName) \
+ TYPE_RECORD(EnumName, EnumVal, ClassName)
+#define MEMBER_RECORD(EnumName, EnumVal, ClassName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, AliasName, ClassName)
+ switch (Type.kind()) {
+#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
+ default:
+ llvm_unreachable("Unknown leaf kind!");
+ }
+ return make_error<CodeViewError>(cv_error_code::corrupt_record);
+}
+
+CVType
+LeafRecord::toCodeViewRecord(AppendingTypeTableBuilder &Serializer) const {
+ return Leaf->toCodeViewRecord(Serializer);
+}
+
+namespace llvm {
+namespace yaml {
+
+template <> struct MappingTraits<LeafRecordBase> {
+ static void mapping(IO &io, LeafRecordBase &Record) { Record.map(io); }
+};
+
+template <> struct MappingTraits<MemberRecordBase> {
+ static void mapping(IO &io, MemberRecordBase &Record) { Record.map(io); }
+};
+
+} // end namespace yaml
+} // end namespace llvm
+
+template <typename ConcreteType>
+static void mapLeafRecordImpl(IO &IO, const char *Class, TypeLeafKind Kind,
+ LeafRecord &Obj) {
+ if (!IO.outputting())
+ Obj.Leaf = std::make_shared<LeafRecordImpl<ConcreteType>>(Kind);
+
+ if (Kind == LF_FIELDLIST)
+ Obj.Leaf->map(IO);
+ else
+ IO.mapRequired(Class, *Obj.Leaf);
+}
+
+void MappingTraits<LeafRecord>::mapping(IO &IO, LeafRecord &Obj) {
+ TypeLeafKind Kind;
+ if (IO.outputting())
+ Kind = Obj.Leaf->Kind;
+ IO.mapRequired("Kind", Kind);
+
+#define TYPE_RECORD(EnumName, EnumVal, ClassName) \
+ case EnumName: \
+ mapLeafRecordImpl<ClassName##Record>(IO, #ClassName, Kind, Obj); \
+ break;
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, AliasName, ClassName) \
+ TYPE_RECORD(EnumName, EnumVal, ClassName)
+#define MEMBER_RECORD(EnumName, EnumVal, ClassName)
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, AliasName, ClassName)
+ switch (Kind) {
+#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
+ default: { llvm_unreachable("Unknown leaf kind!"); }
+ }
+}
+
+template <typename ConcreteType>
+static void mapMemberRecordImpl(IO &IO, const char *Class, TypeLeafKind Kind,
+ MemberRecord &Obj) {
+ if (!IO.outputting())
+ Obj.Member = std::make_shared<MemberRecordImpl<ConcreteType>>(Kind);
+
+ IO.mapRequired(Class, *Obj.Member);
+}
+
+void MappingTraits<MemberRecord>::mapping(IO &IO, MemberRecord &Obj) {
+ TypeLeafKind Kind;
+ if (IO.outputting())
+ Kind = Obj.Member->Kind;
+ IO.mapRequired("Kind", Kind);
+
+#define MEMBER_RECORD(EnumName, EnumVal, ClassName) \
+ case EnumName: \
+ mapMemberRecordImpl<ClassName##Record>(IO, #ClassName, Kind, Obj); \
+ break;
+#define MEMBER_RECORD_ALIAS(EnumName, EnumVal, AliasName, ClassName) \
+ MEMBER_RECORD(EnumName, EnumVal, ClassName)
+#define TYPE_RECORD(EnumName, EnumVal, ClassName)
+#define TYPE_RECORD_ALIAS(EnumName, EnumVal, AliasName, ClassName)
+ switch (Kind) {
+#include "llvm/DebugInfo/CodeView/CodeViewTypes.def"
+ default: { llvm_unreachable("Unknown member kind!"); }
+ }
+}
+
+std::vector<LeafRecord>
+llvm::CodeViewYAML::fromDebugT(ArrayRef<uint8_t> DebugTorP,
+ StringRef SectionName) {
+ ExitOnError Err("Invalid " + std::string(SectionName) + " section!");
+ BinaryStreamReader Reader(DebugTorP, support::little);
+ CVTypeArray Types;
+ uint32_t Magic;
+
+ Err(Reader.readInteger(Magic));
+ assert(Magic == COFF::DEBUG_SECTION_MAGIC &&
+ "Invalid .debug$T or .debug$P section!");
+
+ std::vector<LeafRecord> Result;
+ Err(Reader.readArray(Types, Reader.bytesRemaining()));
+ for (const auto &T : Types) {
+ auto CVT = Err(LeafRecord::fromCodeViewRecord(T));
+ Result.push_back(CVT);
+ }
+ return Result;
+}
+
+ArrayRef<uint8_t> llvm::CodeViewYAML::toDebugT(ArrayRef<LeafRecord> Leafs,
+ BumpPtrAllocator &Alloc,
+ StringRef SectionName) {
+ AppendingTypeTableBuilder TS(Alloc);
+ uint32_t Size = sizeof(uint32_t);
+ for (const auto &Leaf : Leafs) {
+ CVType T = Leaf.Leaf->toCodeViewRecord(TS);
+ Size += T.length();
+ assert(T.length() % 4 == 0 && "Improper type record alignment!");
+ }
+ uint8_t *ResultBuffer = Alloc.Allocate<uint8_t>(Size);
+ MutableArrayRef<uint8_t> Output(ResultBuffer, Size);
+ BinaryStreamWriter Writer(Output, support::little);
+ ExitOnError Err("Error writing type record to " + std::string(SectionName) +
+ " section");
+ Err(Writer.writeInteger<uint32_t>(COFF::DEBUG_SECTION_MAGIC));
+ for (const auto &R : TS.records()) {
+ Err(Writer.writeBytes(R));
+ }
+ assert(Writer.bytesRemaining() == 0 && "Didn't write all type record bytes!");
+ return Output;
+}
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/DWARFEmitter.cpp b/contrib/libs/llvm12/lib/ObjectYAML/DWARFEmitter.cpp
new file mode 100644
index 00000000000..eec733c7d7f
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/DWARFEmitter.cpp
@@ -0,0 +1,1080 @@
+//===- DWARFEmitter - Convert YAML to DWARF binary data -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// The DWARF component of yaml2obj. Provided as library code for tests.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/DWARFEmitter.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/ObjectYAML/DWARFYAML.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/LEB128.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <string>
+#include <vector>
+
+using namespace llvm;
+
+template <typename T>
+static void writeInteger(T Integer, raw_ostream &OS, bool IsLittleEndian) {
+ if (IsLittleEndian != sys::IsLittleEndianHost)
+ sys::swapByteOrder(Integer);
+ OS.write(reinterpret_cast<char *>(&Integer), sizeof(T));
+}
+
+static Error writeVariableSizedInteger(uint64_t Integer, size_t Size,
+ raw_ostream &OS, bool IsLittleEndian) {
+ if (8 == Size)
+ writeInteger((uint64_t)Integer, OS, IsLittleEndian);
+ else if (4 == Size)
+ writeInteger((uint32_t)Integer, OS, IsLittleEndian);
+ else if (2 == Size)
+ writeInteger((uint16_t)Integer, OS, IsLittleEndian);
+ else if (1 == Size)
+ writeInteger((uint8_t)Integer, OS, IsLittleEndian);
+ else
+ return createStringError(errc::not_supported,
+ "invalid integer write size: %zu", Size);
+
+ return Error::success();
+}
+
+static void ZeroFillBytes(raw_ostream &OS, size_t Size) {
+ std::vector<uint8_t> FillData(Size, 0);
+ OS.write(reinterpret_cast<char *>(FillData.data()), Size);
+}
+
+static void writeInitialLength(const dwarf::DwarfFormat Format,
+ const uint64_t Length, raw_ostream &OS,
+ bool IsLittleEndian) {
+ bool IsDWARF64 = Format == dwarf::DWARF64;
+ if (IsDWARF64)
+ cantFail(writeVariableSizedInteger(dwarf::DW_LENGTH_DWARF64, 4, OS,
+ IsLittleEndian));
+ cantFail(
+ writeVariableSizedInteger(Length, IsDWARF64 ? 8 : 4, OS, IsLittleEndian));
+}
+
+static void writeDWARFOffset(uint64_t Offset, dwarf::DwarfFormat Format,
+ raw_ostream &OS, bool IsLittleEndian) {
+ cantFail(writeVariableSizedInteger(Offset, Format == dwarf::DWARF64 ? 8 : 4,
+ OS, IsLittleEndian));
+}
+
+Error DWARFYAML::emitDebugStr(raw_ostream &OS, const DWARFYAML::Data &DI) {
+ for (StringRef Str : *DI.DebugStrings) {
+ OS.write(Str.data(), Str.size());
+ OS.write('\0');
+ }
+
+ return Error::success();
+}
+
+StringRef DWARFYAML::Data::getAbbrevTableContentByIndex(uint64_t Index) const {
+ assert(Index < DebugAbbrev.size() &&
+ "Index should be less than the size of DebugAbbrev array");
+ auto It = AbbrevTableContents.find(Index);
+ if (It != AbbrevTableContents.cend())
+ return It->second;
+
+ std::string AbbrevTableBuffer;
+ raw_string_ostream OS(AbbrevTableBuffer);
+
+ uint64_t AbbrevCode = 0;
+ for (const DWARFYAML::Abbrev &AbbrevDecl : DebugAbbrev[Index].Table) {
+ AbbrevCode = AbbrevDecl.Code ? (uint64_t)*AbbrevDecl.Code : AbbrevCode + 1;
+ encodeULEB128(AbbrevCode, OS);
+ encodeULEB128(AbbrevDecl.Tag, OS);
+ OS.write(AbbrevDecl.Children);
+ for (const auto &Attr : AbbrevDecl.Attributes) {
+ encodeULEB128(Attr.Attribute, OS);
+ encodeULEB128(Attr.Form, OS);
+ if (Attr.Form == dwarf::DW_FORM_implicit_const)
+ encodeSLEB128(Attr.Value, OS);
+ }
+ encodeULEB128(0, OS);
+ encodeULEB128(0, OS);
+ }
+
+ // The abbreviations for a given compilation unit end with an entry
+ // consisting of a 0 byte for the abbreviation code.
+ OS.write_zeros(1);
+
+ AbbrevTableContents.insert({Index, AbbrevTableBuffer});
+
+ return AbbrevTableContents[Index];
+}
+
+Error DWARFYAML::emitDebugAbbrev(raw_ostream &OS, const DWARFYAML::Data &DI) {
+ for (uint64_t I = 0; I < DI.DebugAbbrev.size(); ++I) {
+ StringRef AbbrevTableContent = DI.getAbbrevTableContentByIndex(I);
+ OS.write(AbbrevTableContent.data(), AbbrevTableContent.size());
+ }
+
+ return Error::success();
+}
+
+Error DWARFYAML::emitDebugAranges(raw_ostream &OS, const DWARFYAML::Data &DI) {
+ assert(DI.DebugAranges && "unexpected emitDebugAranges() call");
+ for (const auto &Range : *DI.DebugAranges) {
+ uint8_t AddrSize;
+ if (Range.AddrSize)
+ AddrSize = *Range.AddrSize;
+ else
+ AddrSize = DI.Is64BitAddrSize ? 8 : 4;
+
+ uint64_t Length = 4; // sizeof(version) 2 + sizeof(address_size) 1 +
+ // sizeof(segment_selector_size) 1
+ Length +=
+ Range.Format == dwarf::DWARF64 ? 8 : 4; // sizeof(debug_info_offset)
+
+ const uint64_t HeaderLength =
+ Length + (Range.Format == dwarf::DWARF64
+ ? 12
+ : 4); // sizeof(unit_header) = 12 (DWARF64) or 4 (DWARF32)
+ const uint64_t PaddedHeaderLength = alignTo(HeaderLength, AddrSize * 2);
+
+ if (Range.Length) {
+ Length = *Range.Length;
+ } else {
+ Length += PaddedHeaderLength - HeaderLength;
+ Length += AddrSize * 2 * (Range.Descriptors.size() + 1);
+ }
+
+ writeInitialLength(Range.Format, Length, OS, DI.IsLittleEndian);
+ writeInteger((uint16_t)Range.Version, OS, DI.IsLittleEndian);
+ writeDWARFOffset(Range.CuOffset, Range.Format, OS, DI.IsLittleEndian);
+ writeInteger((uint8_t)AddrSize, OS, DI.IsLittleEndian);
+ writeInteger((uint8_t)Range.SegSize, OS, DI.IsLittleEndian);
+ ZeroFillBytes(OS, PaddedHeaderLength - HeaderLength);
+
+ for (const auto &Descriptor : Range.Descriptors) {
+ if (Error Err = writeVariableSizedInteger(Descriptor.Address, AddrSize,
+ OS, DI.IsLittleEndian))
+ return createStringError(errc::not_supported,
+ "unable to write debug_aranges address: %s",
+ toString(std::move(Err)).c_str());
+ cantFail(writeVariableSizedInteger(Descriptor.Length, AddrSize, OS,
+ DI.IsLittleEndian));
+ }
+ ZeroFillBytes(OS, AddrSize * 2);
+ }
+
+ return Error::success();
+}
+
+Error DWARFYAML::emitDebugRanges(raw_ostream &OS, const DWARFYAML::Data &DI) {
+ const size_t RangesOffset = OS.tell();
+ uint64_t EntryIndex = 0;
+ for (const auto &DebugRanges : *DI.DebugRanges) {
+ const size_t CurrOffset = OS.tell() - RangesOffset;
+ if (DebugRanges.Offset && (uint64_t)*DebugRanges.Offset < CurrOffset)
+ return createStringError(errc::invalid_argument,
+ "'Offset' for 'debug_ranges' with index " +
+ Twine(EntryIndex) +
+ " must be greater than or equal to the "
+ "number of bytes written already (0x" +
+ Twine::utohexstr(CurrOffset) + ")");
+ if (DebugRanges.Offset)
+ ZeroFillBytes(OS, *DebugRanges.Offset - CurrOffset);
+
+ uint8_t AddrSize;
+ if (DebugRanges.AddrSize)
+ AddrSize = *DebugRanges.AddrSize;
+ else
+ AddrSize = DI.Is64BitAddrSize ? 8 : 4;
+ for (const auto &Entry : DebugRanges.Entries) {
+ if (Error Err = writeVariableSizedInteger(Entry.LowOffset, AddrSize, OS,
+ DI.IsLittleEndian))
+ return createStringError(
+ errc::not_supported,
+ "unable to write debug_ranges address offset: %s",
+ toString(std::move(Err)).c_str());
+ cantFail(writeVariableSizedInteger(Entry.HighOffset, AddrSize, OS,
+ DI.IsLittleEndian));
+ }
+ ZeroFillBytes(OS, AddrSize * 2);
+ ++EntryIndex;
+ }
+
+ return Error::success();
+}
+
+static Error emitPubSection(raw_ostream &OS, const DWARFYAML::PubSection &Sect,
+ bool IsLittleEndian, bool IsGNUPubSec = false) {
+ writeInitialLength(Sect.Format, Sect.Length, OS, IsLittleEndian);
+ writeInteger((uint16_t)Sect.Version, OS, IsLittleEndian);
+ writeInteger((uint32_t)Sect.UnitOffset, OS, IsLittleEndian);
+ writeInteger((uint32_t)Sect.UnitSize, OS, IsLittleEndian);
+ for (const auto &Entry : Sect.Entries) {
+ writeInteger((uint32_t)Entry.DieOffset, OS, IsLittleEndian);
+ if (IsGNUPubSec)
+ writeInteger((uint8_t)Entry.Descriptor, OS, IsLittleEndian);
+ OS.write(Entry.Name.data(), Entry.Name.size());
+ OS.write('\0');
+ }
+ return Error::success();
+}
+
+Error DWARFYAML::emitDebugPubnames(raw_ostream &OS, const Data &DI) {
+ assert(DI.PubNames && "unexpected emitDebugPubnames() call");
+ return emitPubSection(OS, *DI.PubNames, DI.IsLittleEndian);
+}
+
+Error DWARFYAML::emitDebugPubtypes(raw_ostream &OS, const Data &DI) {
+ assert(DI.PubTypes && "unexpected emitDebugPubtypes() call");
+ return emitPubSection(OS, *DI.PubTypes, DI.IsLittleEndian);
+}
+
+Error DWARFYAML::emitDebugGNUPubnames(raw_ostream &OS, const Data &DI) {
+ assert(DI.GNUPubNames && "unexpected emitDebugGNUPubnames() call");
+ return emitPubSection(OS, *DI.GNUPubNames, DI.IsLittleEndian,
+ /*IsGNUStyle=*/true);
+}
+
+Error DWARFYAML::emitDebugGNUPubtypes(raw_ostream &OS, const Data &DI) {
+ assert(DI.GNUPubTypes && "unexpected emitDebugGNUPubtypes() call");
+ return emitPubSection(OS, *DI.GNUPubTypes, DI.IsLittleEndian,
+ /*IsGNUStyle=*/true);
+}
+
+static Expected<uint64_t> writeDIE(const DWARFYAML::Data &DI, uint64_t CUIndex,
+ uint64_t AbbrevTableID,
+ const dwarf::FormParams &Params,
+ const DWARFYAML::Entry &Entry,
+ raw_ostream &OS, bool IsLittleEndian) {
+ uint64_t EntryBegin = OS.tell();
+ encodeULEB128(Entry.AbbrCode, OS);
+ uint32_t AbbrCode = Entry.AbbrCode;
+ if (AbbrCode == 0 || Entry.Values.empty())
+ return OS.tell() - EntryBegin;
+
+ Expected<DWARFYAML::Data::AbbrevTableInfo> AbbrevTableInfoOrErr =
+ DI.getAbbrevTableInfoByID(AbbrevTableID);
+ if (!AbbrevTableInfoOrErr)
+ return createStringError(errc::invalid_argument,
+ toString(AbbrevTableInfoOrErr.takeError()) +
+ " for compilation unit with index " +
+ utostr(CUIndex));
+
+ ArrayRef<DWARFYAML::Abbrev> AbbrevDecls(
+ DI.DebugAbbrev[AbbrevTableInfoOrErr->Index].Table);
+
+ if (AbbrCode > AbbrevDecls.size())
+ return createStringError(
+ errc::invalid_argument,
+ "abbrev code must be less than or equal to the number of "
+ "entries in abbreviation table");
+ const DWARFYAML::Abbrev &Abbrev = AbbrevDecls[AbbrCode - 1];
+ auto FormVal = Entry.Values.begin();
+ auto AbbrForm = Abbrev.Attributes.begin();
+ for (; FormVal != Entry.Values.end() && AbbrForm != Abbrev.Attributes.end();
+ ++FormVal, ++AbbrForm) {
+ dwarf::Form Form = AbbrForm->Form;
+ bool Indirect;
+ do {
+ Indirect = false;
+ switch (Form) {
+ case dwarf::DW_FORM_addr:
+ // TODO: Test this error.
+ if (Error Err = writeVariableSizedInteger(
+ FormVal->Value, Params.AddrSize, OS, IsLittleEndian))
+ return std::move(Err);
+ break;
+ case dwarf::DW_FORM_ref_addr:
+ // TODO: Test this error.
+ if (Error Err = writeVariableSizedInteger(FormVal->Value,
+ Params.getRefAddrByteSize(),
+ OS, IsLittleEndian))
+ return std::move(Err);
+ break;
+ case dwarf::DW_FORM_exprloc:
+ case dwarf::DW_FORM_block:
+ encodeULEB128(FormVal->BlockData.size(), OS);
+ OS.write((const char *)FormVal->BlockData.data(),
+ FormVal->BlockData.size());
+ break;
+ case dwarf::DW_FORM_block1: {
+ writeInteger((uint8_t)FormVal->BlockData.size(), OS, IsLittleEndian);
+ OS.write((const char *)FormVal->BlockData.data(),
+ FormVal->BlockData.size());
+ break;
+ }
+ case dwarf::DW_FORM_block2: {
+ writeInteger((uint16_t)FormVal->BlockData.size(), OS, IsLittleEndian);
+ OS.write((const char *)FormVal->BlockData.data(),
+ FormVal->BlockData.size());
+ break;
+ }
+ case dwarf::DW_FORM_block4: {
+ writeInteger((uint32_t)FormVal->BlockData.size(), OS, IsLittleEndian);
+ OS.write((const char *)FormVal->BlockData.data(),
+ FormVal->BlockData.size());
+ break;
+ }
+ case dwarf::DW_FORM_strx:
+ case dwarf::DW_FORM_addrx:
+ case dwarf::DW_FORM_rnglistx:
+ case dwarf::DW_FORM_loclistx:
+ case dwarf::DW_FORM_udata:
+ case dwarf::DW_FORM_ref_udata:
+ case dwarf::DW_FORM_GNU_addr_index:
+ case dwarf::DW_FORM_GNU_str_index:
+ encodeULEB128(FormVal->Value, OS);
+ break;
+ case dwarf::DW_FORM_data1:
+ case dwarf::DW_FORM_ref1:
+ case dwarf::DW_FORM_flag:
+ case dwarf::DW_FORM_strx1:
+ case dwarf::DW_FORM_addrx1:
+ writeInteger((uint8_t)FormVal->Value, OS, IsLittleEndian);
+ break;
+ case dwarf::DW_FORM_data2:
+ case dwarf::DW_FORM_ref2:
+ case dwarf::DW_FORM_strx2:
+ case dwarf::DW_FORM_addrx2:
+ writeInteger((uint16_t)FormVal->Value, OS, IsLittleEndian);
+ break;
+ case dwarf::DW_FORM_data4:
+ case dwarf::DW_FORM_ref4:
+ case dwarf::DW_FORM_ref_sup4:
+ case dwarf::DW_FORM_strx4:
+ case dwarf::DW_FORM_addrx4:
+ writeInteger((uint32_t)FormVal->Value, OS, IsLittleEndian);
+ break;
+ case dwarf::DW_FORM_data8:
+ case dwarf::DW_FORM_ref8:
+ case dwarf::DW_FORM_ref_sup8:
+ case dwarf::DW_FORM_ref_sig8:
+ writeInteger((uint64_t)FormVal->Value, OS, IsLittleEndian);
+ break;
+ case dwarf::DW_FORM_sdata:
+ encodeSLEB128(FormVal->Value, OS);
+ break;
+ case dwarf::DW_FORM_string:
+ OS.write(FormVal->CStr.data(), FormVal->CStr.size());
+ OS.write('\0');
+ break;
+ case dwarf::DW_FORM_indirect:
+ encodeULEB128(FormVal->Value, OS);
+ Indirect = true;
+ Form = static_cast<dwarf::Form>((uint64_t)FormVal->Value);
+ ++FormVal;
+ break;
+ case dwarf::DW_FORM_strp:
+ case dwarf::DW_FORM_sec_offset:
+ case dwarf::DW_FORM_GNU_ref_alt:
+ case dwarf::DW_FORM_GNU_strp_alt:
+ case dwarf::DW_FORM_line_strp:
+ case dwarf::DW_FORM_strp_sup:
+ cantFail(writeVariableSizedInteger(FormVal->Value,
+ Params.getDwarfOffsetByteSize(), OS,
+ IsLittleEndian));
+ break;
+ default:
+ break;
+ }
+ } while (Indirect);
+ }
+
+ return OS.tell() - EntryBegin;
+}
+
+Error DWARFYAML::emitDebugInfo(raw_ostream &OS, const DWARFYAML::Data &DI) {
+ for (uint64_t I = 0; I < DI.CompileUnits.size(); ++I) {
+ const DWARFYAML::Unit &Unit = DI.CompileUnits[I];
+ uint8_t AddrSize;
+ if (Unit.AddrSize)
+ AddrSize = *Unit.AddrSize;
+ else
+ AddrSize = DI.Is64BitAddrSize ? 8 : 4;
+ dwarf::FormParams Params = {Unit.Version, AddrSize, Unit.Format};
+ uint64_t Length = 3; // sizeof(version) + sizeof(address_size)
+ Length += Unit.Version >= 5 ? 1 : 0; // sizeof(unit_type)
+ Length += Params.getDwarfOffsetByteSize(); // sizeof(debug_abbrev_offset)
+
+ // Since the length of the current compilation unit is undetermined yet, we
+ // firstly write the content of the compilation unit to a buffer to
+ // calculate it and then serialize the buffer content to the actual output
+ // stream.
+ std::string EntryBuffer;
+ raw_string_ostream EntryBufferOS(EntryBuffer);
+
+ uint64_t AbbrevTableID = Unit.AbbrevTableID.getValueOr(I);
+ for (const DWARFYAML::Entry &Entry : Unit.Entries) {
+ if (Expected<uint64_t> EntryLength =
+ writeDIE(DI, I, AbbrevTableID, Params, Entry, EntryBufferOS,
+ DI.IsLittleEndian))
+ Length += *EntryLength;
+ else
+ return EntryLength.takeError();
+ }
+
+ // If the length is specified in the YAML description, we use it instead of
+ // the actual length.
+ if (Unit.Length)
+ Length = *Unit.Length;
+
+ writeInitialLength(Unit.Format, Length, OS, DI.IsLittleEndian);
+ writeInteger((uint16_t)Unit.Version, OS, DI.IsLittleEndian);
+
+ uint64_t AbbrevTableOffset = 0;
+ if (Unit.AbbrOffset) {
+ AbbrevTableOffset = *Unit.AbbrOffset;
+ } else {
+ if (Expected<DWARFYAML::Data::AbbrevTableInfo> AbbrevTableInfoOrErr =
+ DI.getAbbrevTableInfoByID(AbbrevTableID)) {
+ AbbrevTableOffset = AbbrevTableInfoOrErr->Offset;
+ } else {
+ // The current compilation unit may not have DIEs and it will not be
+ // able to find the associated abbrev table. We consume the error and
+ // assign 0 to the debug_abbrev_offset in such circumstances.
+ consumeError(AbbrevTableInfoOrErr.takeError());
+ }
+ }
+
+ if (Unit.Version >= 5) {
+ writeInteger((uint8_t)Unit.Type, OS, DI.IsLittleEndian);
+ writeInteger((uint8_t)AddrSize, OS, DI.IsLittleEndian);
+ writeDWARFOffset(AbbrevTableOffset, Unit.Format, OS, DI.IsLittleEndian);
+ } else {
+ writeDWARFOffset(AbbrevTableOffset, Unit.Format, OS, DI.IsLittleEndian);
+ writeInteger((uint8_t)AddrSize, OS, DI.IsLittleEndian);
+ }
+
+ OS.write(EntryBuffer.data(), EntryBuffer.size());
+ }
+
+ return Error::success();
+}
+
+static void emitFileEntry(raw_ostream &OS, const DWARFYAML::File &File) {
+ OS.write(File.Name.data(), File.Name.size());
+ OS.write('\0');
+ encodeULEB128(File.DirIdx, OS);
+ encodeULEB128(File.ModTime, OS);
+ encodeULEB128(File.Length, OS);
+}
+
+static void writeExtendedOpcode(const DWARFYAML::LineTableOpcode &Op,
+ uint8_t AddrSize, bool IsLittleEndian,
+ raw_ostream &OS) {
+ // The first byte of extended opcodes is a zero byte. The next bytes are an
+ // ULEB128 integer giving the number of bytes in the instruction itself (does
+ // not include the first zero byte or the size). We serialize the instruction
+ // itself into the OpBuffer and then write the size of the buffer and the
+ // buffer to the real output stream.
+ std::string OpBuffer;
+ raw_string_ostream OpBufferOS(OpBuffer);
+ writeInteger((uint8_t)Op.SubOpcode, OpBufferOS, IsLittleEndian);
+ switch (Op.SubOpcode) {
+ case dwarf::DW_LNE_set_address:
+ cantFail(writeVariableSizedInteger(Op.Data, AddrSize, OpBufferOS,
+ IsLittleEndian));
+ break;
+ case dwarf::DW_LNE_define_file:
+ emitFileEntry(OpBufferOS, Op.FileEntry);
+ break;
+ case dwarf::DW_LNE_set_discriminator:
+ encodeULEB128(Op.Data, OpBufferOS);
+ break;
+ case dwarf::DW_LNE_end_sequence:
+ break;
+ default:
+ for (auto OpByte : Op.UnknownOpcodeData)
+ writeInteger((uint8_t)OpByte, OpBufferOS, IsLittleEndian);
+ }
+ uint64_t ExtLen = Op.ExtLen.getValueOr(OpBuffer.size());
+ encodeULEB128(ExtLen, OS);
+ OS.write(OpBuffer.data(), OpBuffer.size());
+}
+
+static void writeLineTableOpcode(const DWARFYAML::LineTableOpcode &Op,
+ uint8_t OpcodeBase, uint8_t AddrSize,
+ raw_ostream &OS, bool IsLittleEndian) {
+ writeInteger((uint8_t)Op.Opcode, OS, IsLittleEndian);
+ if (Op.Opcode == 0) {
+ writeExtendedOpcode(Op, AddrSize, IsLittleEndian, OS);
+ } else if (Op.Opcode < OpcodeBase) {
+ switch (Op.Opcode) {
+ case dwarf::DW_LNS_copy:
+ case dwarf::DW_LNS_negate_stmt:
+ case dwarf::DW_LNS_set_basic_block:
+ case dwarf::DW_LNS_const_add_pc:
+ case dwarf::DW_LNS_set_prologue_end:
+ case dwarf::DW_LNS_set_epilogue_begin:
+ break;
+
+ case dwarf::DW_LNS_advance_pc:
+ case dwarf::DW_LNS_set_file:
+ case dwarf::DW_LNS_set_column:
+ case dwarf::DW_LNS_set_isa:
+ encodeULEB128(Op.Data, OS);
+ break;
+
+ case dwarf::DW_LNS_advance_line:
+ encodeSLEB128(Op.SData, OS);
+ break;
+
+ case dwarf::DW_LNS_fixed_advance_pc:
+ writeInteger((uint16_t)Op.Data, OS, IsLittleEndian);
+ break;
+
+ default:
+ for (auto OpData : Op.StandardOpcodeData) {
+ encodeULEB128(OpData, OS);
+ }
+ }
+ }
+}
+
+static std::vector<uint8_t>
+getStandardOpcodeLengths(uint16_t Version, Optional<uint8_t> OpcodeBase) {
+ // If the opcode_base field isn't specified, we returns the
+ // standard_opcode_lengths array according to the version by default.
+ std::vector<uint8_t> StandardOpcodeLengths{0, 1, 1, 1, 1, 0,
+ 0, 0, 1, 0, 0, 1};
+ if (Version == 2) {
+ // DWARF v2 uses the same first 9 standard opcodes as v3-5.
+ StandardOpcodeLengths.resize(9);
+ } else if (OpcodeBase) {
+ StandardOpcodeLengths.resize(*OpcodeBase > 0 ? *OpcodeBase - 1 : 0, 0);
+ }
+ return StandardOpcodeLengths;
+}
+
+Error DWARFYAML::emitDebugLine(raw_ostream &OS, const DWARFYAML::Data &DI) {
+ for (const DWARFYAML::LineTable &LineTable : DI.DebugLines) {
+ // Buffer holds the bytes following the header_length (or prologue_length in
+ // DWARFv2) field to the end of the line number program itself.
+ std::string Buffer;
+ raw_string_ostream BufferOS(Buffer);
+
+ writeInteger(LineTable.MinInstLength, BufferOS, DI.IsLittleEndian);
+ // TODO: Add support for emitting DWARFv5 line table.
+ if (LineTable.Version >= 4)
+ writeInteger(LineTable.MaxOpsPerInst, BufferOS, DI.IsLittleEndian);
+ writeInteger(LineTable.DefaultIsStmt, BufferOS, DI.IsLittleEndian);
+ writeInteger(LineTable.LineBase, BufferOS, DI.IsLittleEndian);
+ writeInteger(LineTable.LineRange, BufferOS, DI.IsLittleEndian);
+
+ std::vector<uint8_t> StandardOpcodeLengths =
+ LineTable.StandardOpcodeLengths.getValueOr(
+ getStandardOpcodeLengths(LineTable.Version, LineTable.OpcodeBase));
+ uint8_t OpcodeBase = LineTable.OpcodeBase
+ ? *LineTable.OpcodeBase
+ : StandardOpcodeLengths.size() + 1;
+ writeInteger(OpcodeBase, BufferOS, DI.IsLittleEndian);
+ for (uint8_t OpcodeLength : StandardOpcodeLengths)
+ writeInteger(OpcodeLength, BufferOS, DI.IsLittleEndian);
+
+ for (StringRef IncludeDir : LineTable.IncludeDirs) {
+ BufferOS.write(IncludeDir.data(), IncludeDir.size());
+ BufferOS.write('\0');
+ }
+ BufferOS.write('\0');
+
+ for (const DWARFYAML::File &File : LineTable.Files)
+ emitFileEntry(BufferOS, File);
+ BufferOS.write('\0');
+
+ uint64_t HeaderLength =
+ LineTable.PrologueLength ? *LineTable.PrologueLength : Buffer.size();
+
+ for (const DWARFYAML::LineTableOpcode &Op : LineTable.Opcodes)
+ writeLineTableOpcode(Op, OpcodeBase, DI.Is64BitAddrSize ? 8 : 4, BufferOS,
+ DI.IsLittleEndian);
+
+ uint64_t Length;
+ if (LineTable.Length) {
+ Length = *LineTable.Length;
+ } else {
+ Length = 2; // sizeof(version)
+ Length +=
+ (LineTable.Format == dwarf::DWARF64 ? 8 : 4); // sizeof(header_length)
+ Length += Buffer.size();
+ }
+
+ writeInitialLength(LineTable.Format, Length, OS, DI.IsLittleEndian);
+ writeInteger(LineTable.Version, OS, DI.IsLittleEndian);
+ writeDWARFOffset(HeaderLength, LineTable.Format, OS, DI.IsLittleEndian);
+ OS.write(Buffer.data(), Buffer.size());
+ }
+
+ return Error::success();
+}
+
+Error DWARFYAML::emitDebugAddr(raw_ostream &OS, const Data &DI) {
+ for (const AddrTableEntry &TableEntry : *DI.DebugAddr) {
+ uint8_t AddrSize;
+ if (TableEntry.AddrSize)
+ AddrSize = *TableEntry.AddrSize;
+ else
+ AddrSize = DI.Is64BitAddrSize ? 8 : 4;
+
+ uint64_t Length;
+ if (TableEntry.Length)
+ Length = (uint64_t)*TableEntry.Length;
+ else
+ // 2 (version) + 1 (address_size) + 1 (segment_selector_size) = 4
+ Length = 4 + (AddrSize + TableEntry.SegSelectorSize) *
+ TableEntry.SegAddrPairs.size();
+
+ writeInitialLength(TableEntry.Format, Length, OS, DI.IsLittleEndian);
+ writeInteger((uint16_t)TableEntry.Version, OS, DI.IsLittleEndian);
+ writeInteger((uint8_t)AddrSize, OS, DI.IsLittleEndian);
+ writeInteger((uint8_t)TableEntry.SegSelectorSize, OS, DI.IsLittleEndian);
+
+ for (const SegAddrPair &Pair : TableEntry.SegAddrPairs) {
+ if (TableEntry.SegSelectorSize != yaml::Hex8{0})
+ if (Error Err = writeVariableSizedInteger(Pair.Segment,
+ TableEntry.SegSelectorSize,
+ OS, DI.IsLittleEndian))
+ return createStringError(errc::not_supported,
+ "unable to write debug_addr segment: %s",
+ toString(std::move(Err)).c_str());
+ if (AddrSize != 0)
+ if (Error Err = writeVariableSizedInteger(Pair.Address, AddrSize, OS,
+ DI.IsLittleEndian))
+ return createStringError(errc::not_supported,
+ "unable to write debug_addr address: %s",
+ toString(std::move(Err)).c_str());
+ }
+ }
+
+ return Error::success();
+}
+
+Error DWARFYAML::emitDebugStrOffsets(raw_ostream &OS, const Data &DI) {
+ assert(DI.DebugStrOffsets && "unexpected emitDebugStrOffsets() call");
+ for (const DWARFYAML::StringOffsetsTable &Table : *DI.DebugStrOffsets) {
+ uint64_t Length;
+ if (Table.Length)
+ Length = *Table.Length;
+ else
+ // sizeof(version) + sizeof(padding) = 4
+ Length =
+ 4 + Table.Offsets.size() * (Table.Format == dwarf::DWARF64 ? 8 : 4);
+
+ writeInitialLength(Table.Format, Length, OS, DI.IsLittleEndian);
+ writeInteger((uint16_t)Table.Version, OS, DI.IsLittleEndian);
+ writeInteger((uint16_t)Table.Padding, OS, DI.IsLittleEndian);
+
+ for (uint64_t Offset : Table.Offsets)
+ writeDWARFOffset(Offset, Table.Format, OS, DI.IsLittleEndian);
+ }
+
+ return Error::success();
+}
+
+static Error checkOperandCount(StringRef EncodingString,
+ ArrayRef<yaml::Hex64> Values,
+ uint64_t ExpectedOperands) {
+ if (Values.size() != ExpectedOperands)
+ return createStringError(
+ errc::invalid_argument,
+ "invalid number (%zu) of operands for the operator: %s, %" PRIu64
+ " expected",
+ Values.size(), EncodingString.str().c_str(), ExpectedOperands);
+
+ return Error::success();
+}
+
+static Error writeListEntryAddress(StringRef EncodingName, raw_ostream &OS,
+ uint64_t Addr, uint8_t AddrSize,
+ bool IsLittleEndian) {
+ if (Error Err = writeVariableSizedInteger(Addr, AddrSize, OS, IsLittleEndian))
+ return createStringError(errc::invalid_argument,
+ "unable to write address for the operator %s: %s",
+ EncodingName.str().c_str(),
+ toString(std::move(Err)).c_str());
+
+ return Error::success();
+}
+
+static Expected<uint64_t>
+writeDWARFExpression(raw_ostream &OS,
+ const DWARFYAML::DWARFOperation &Operation,
+ uint8_t AddrSize, bool IsLittleEndian) {
+ auto CheckOperands = [&](uint64_t ExpectedOperands) -> Error {
+ return checkOperandCount(dwarf::OperationEncodingString(Operation.Operator),
+ Operation.Values, ExpectedOperands);
+ };
+
+ uint64_t ExpressionBegin = OS.tell();
+ writeInteger((uint8_t)Operation.Operator, OS, IsLittleEndian);
+ switch (Operation.Operator) {
+ case dwarf::DW_OP_consts:
+ if (Error Err = CheckOperands(1))
+ return std::move(Err);
+ encodeSLEB128(Operation.Values[0], OS);
+ break;
+ case dwarf::DW_OP_stack_value:
+ if (Error Err = CheckOperands(0))
+ return std::move(Err);
+ break;
+ default:
+ StringRef EncodingStr = dwarf::OperationEncodingString(Operation.Operator);
+ return createStringError(errc::not_supported,
+ "DWARF expression: " +
+ (EncodingStr.empty()
+ ? "0x" + utohexstr(Operation.Operator)
+ : EncodingStr) +
+ " is not supported");
+ }
+ return OS.tell() - ExpressionBegin;
+}
+
+static Expected<uint64_t> writeListEntry(raw_ostream &OS,
+ const DWARFYAML::RnglistEntry &Entry,
+ uint8_t AddrSize,
+ bool IsLittleEndian) {
+ uint64_t BeginOffset = OS.tell();
+ writeInteger((uint8_t)Entry.Operator, OS, IsLittleEndian);
+
+ StringRef EncodingName = dwarf::RangeListEncodingString(Entry.Operator);
+
+ auto CheckOperands = [&](uint64_t ExpectedOperands) -> Error {
+ return checkOperandCount(EncodingName, Entry.Values, ExpectedOperands);
+ };
+
+ auto WriteAddress = [&](uint64_t Addr) -> Error {
+ return writeListEntryAddress(EncodingName, OS, Addr, AddrSize,
+ IsLittleEndian);
+ };
+
+ switch (Entry.Operator) {
+ case dwarf::DW_RLE_end_of_list:
+ if (Error Err = CheckOperands(0))
+ return std::move(Err);
+ break;
+ case dwarf::DW_RLE_base_addressx:
+ if (Error Err = CheckOperands(1))
+ return std::move(Err);
+ encodeULEB128(Entry.Values[0], OS);
+ break;
+ case dwarf::DW_RLE_startx_endx:
+ case dwarf::DW_RLE_startx_length:
+ case dwarf::DW_RLE_offset_pair:
+ if (Error Err = CheckOperands(2))
+ return std::move(Err);
+ encodeULEB128(Entry.Values[0], OS);
+ encodeULEB128(Entry.Values[1], OS);
+ break;
+ case dwarf::DW_RLE_base_address:
+ if (Error Err = CheckOperands(1))
+ return std::move(Err);
+ if (Error Err = WriteAddress(Entry.Values[0]))
+ return std::move(Err);
+ break;
+ case dwarf::DW_RLE_start_end:
+ if (Error Err = CheckOperands(2))
+ return std::move(Err);
+ if (Error Err = WriteAddress(Entry.Values[0]))
+ return std::move(Err);
+ cantFail(WriteAddress(Entry.Values[1]));
+ break;
+ case dwarf::DW_RLE_start_length:
+ if (Error Err = CheckOperands(2))
+ return std::move(Err);
+ if (Error Err = WriteAddress(Entry.Values[0]))
+ return std::move(Err);
+ encodeULEB128(Entry.Values[1], OS);
+ break;
+ }
+
+ return OS.tell() - BeginOffset;
+}
+
+static Expected<uint64_t> writeListEntry(raw_ostream &OS,
+ const DWARFYAML::LoclistEntry &Entry,
+ uint8_t AddrSize,
+ bool IsLittleEndian) {
+ uint64_t BeginOffset = OS.tell();
+ writeInteger((uint8_t)Entry.Operator, OS, IsLittleEndian);
+
+ StringRef EncodingName = dwarf::LocListEncodingString(Entry.Operator);
+
+ auto CheckOperands = [&](uint64_t ExpectedOperands) -> Error {
+ return checkOperandCount(EncodingName, Entry.Values, ExpectedOperands);
+ };
+
+ auto WriteAddress = [&](uint64_t Addr) -> Error {
+ return writeListEntryAddress(EncodingName, OS, Addr, AddrSize,
+ IsLittleEndian);
+ };
+
+ auto WriteDWARFOperations = [&]() -> Error {
+ std::string OpBuffer;
+ raw_string_ostream OpBufferOS(OpBuffer);
+ uint64_t DescriptionsLength = 0;
+
+ for (const DWARFYAML::DWARFOperation &Op : Entry.Descriptions) {
+ if (Expected<uint64_t> OpSize =
+ writeDWARFExpression(OpBufferOS, Op, AddrSize, IsLittleEndian))
+ DescriptionsLength += *OpSize;
+ else
+ return OpSize.takeError();
+ }
+
+ if (Entry.DescriptionsLength)
+ DescriptionsLength = *Entry.DescriptionsLength;
+ else
+ DescriptionsLength = OpBuffer.size();
+
+ encodeULEB128(DescriptionsLength, OS);
+ OS.write(OpBuffer.data(), OpBuffer.size());
+
+ return Error::success();
+ };
+
+ switch (Entry.Operator) {
+ case dwarf::DW_LLE_end_of_list:
+ if (Error Err = CheckOperands(0))
+ return std::move(Err);
+ break;
+ case dwarf::DW_LLE_base_addressx:
+ if (Error Err = CheckOperands(1))
+ return std::move(Err);
+ encodeULEB128(Entry.Values[0], OS);
+ break;
+ case dwarf::DW_LLE_startx_endx:
+ case dwarf::DW_LLE_startx_length:
+ case dwarf::DW_LLE_offset_pair:
+ if (Error Err = CheckOperands(2))
+ return std::move(Err);
+ encodeULEB128(Entry.Values[0], OS);
+ encodeULEB128(Entry.Values[1], OS);
+ if (Error Err = WriteDWARFOperations())
+ return std::move(Err);
+ break;
+ case dwarf::DW_LLE_default_location:
+ if (Error Err = CheckOperands(0))
+ return std::move(Err);
+ if (Error Err = WriteDWARFOperations())
+ return std::move(Err);
+ break;
+ case dwarf::DW_LLE_base_address:
+ if (Error Err = CheckOperands(1))
+ return std::move(Err);
+ if (Error Err = WriteAddress(Entry.Values[0]))
+ return std::move(Err);
+ break;
+ case dwarf::DW_LLE_start_end:
+ if (Error Err = CheckOperands(2))
+ return std::move(Err);
+ if (Error Err = WriteAddress(Entry.Values[0]))
+ return std::move(Err);
+ cantFail(WriteAddress(Entry.Values[1]));
+ if (Error Err = WriteDWARFOperations())
+ return std::move(Err);
+ break;
+ case dwarf::DW_LLE_start_length:
+ if (Error Err = CheckOperands(2))
+ return std::move(Err);
+ if (Error Err = WriteAddress(Entry.Values[0]))
+ return std::move(Err);
+ encodeULEB128(Entry.Values[1], OS);
+ if (Error Err = WriteDWARFOperations())
+ return std::move(Err);
+ break;
+ }
+
+ return OS.tell() - BeginOffset;
+}
+
+template <typename EntryType>
+static Error writeDWARFLists(raw_ostream &OS,
+ ArrayRef<DWARFYAML::ListTable<EntryType>> Tables,
+ bool IsLittleEndian, bool Is64BitAddrSize) {
+ for (const DWARFYAML::ListTable<EntryType> &Table : Tables) {
+ // sizeof(version) + sizeof(address_size) + sizeof(segment_selector_size) +
+ // sizeof(offset_entry_count) = 8
+ uint64_t Length = 8;
+
+ uint8_t AddrSize;
+ if (Table.AddrSize)
+ AddrSize = *Table.AddrSize;
+ else
+ AddrSize = Is64BitAddrSize ? 8 : 4;
+
+ // Since the length of the current range/location lists entry is
+ // undetermined yet, we firstly write the content of the range/location
+ // lists to a buffer to calculate the length and then serialize the buffer
+ // content to the actual output stream.
+ std::string ListBuffer;
+ raw_string_ostream ListBufferOS(ListBuffer);
+
+ // Offsets holds offsets for each range/location list. The i-th element is
+ // the offset from the beginning of the first range/location list to the
+ // location of the i-th range list.
+ std::vector<uint64_t> Offsets;
+
+ for (const DWARFYAML::ListEntries<EntryType> &List : Table.Lists) {
+ Offsets.push_back(ListBufferOS.tell());
+ if (List.Content) {
+ List.Content->writeAsBinary(ListBufferOS, UINT64_MAX);
+ Length += List.Content->binary_size();
+ } else if (List.Entries) {
+ for (const EntryType &Entry : *List.Entries) {
+ Expected<uint64_t> EntrySize =
+ writeListEntry(ListBufferOS, Entry, AddrSize, IsLittleEndian);
+ if (!EntrySize)
+ return EntrySize.takeError();
+ Length += *EntrySize;
+ }
+ }
+ }
+
+ // If the offset_entry_count field isn't specified, yaml2obj will infer it
+ // from the 'Offsets' field in the YAML description. If the 'Offsets' field
+ // isn't specified either, yaml2obj will infer it from the auto-generated
+ // offsets.
+ uint32_t OffsetEntryCount;
+ if (Table.OffsetEntryCount)
+ OffsetEntryCount = *Table.OffsetEntryCount;
+ else
+ OffsetEntryCount = Table.Offsets ? Table.Offsets->size() : Offsets.size();
+ uint64_t OffsetsSize =
+ OffsetEntryCount * (Table.Format == dwarf::DWARF64 ? 8 : 4);
+ Length += OffsetsSize;
+
+ // If the length is specified in the YAML description, we use it instead of
+ // the actual length.
+ if (Table.Length)
+ Length = *Table.Length;
+
+ writeInitialLength(Table.Format, Length, OS, IsLittleEndian);
+ writeInteger((uint16_t)Table.Version, OS, IsLittleEndian);
+ writeInteger((uint8_t)AddrSize, OS, IsLittleEndian);
+ writeInteger((uint8_t)Table.SegSelectorSize, OS, IsLittleEndian);
+ writeInteger((uint32_t)OffsetEntryCount, OS, IsLittleEndian);
+
+ auto EmitOffsets = [&](ArrayRef<uint64_t> Offsets, uint64_t OffsetsSize) {
+ for (uint64_t Offset : Offsets)
+ writeDWARFOffset(OffsetsSize + Offset, Table.Format, OS,
+ IsLittleEndian);
+ };
+
+ if (Table.Offsets)
+ EmitOffsets(ArrayRef<uint64_t>((const uint64_t *)Table.Offsets->data(),
+ Table.Offsets->size()),
+ 0);
+ else if (OffsetEntryCount != 0)
+ EmitOffsets(Offsets, OffsetsSize);
+
+ OS.write(ListBuffer.data(), ListBuffer.size());
+ }
+
+ return Error::success();
+}
+
+Error DWARFYAML::emitDebugRnglists(raw_ostream &OS, const Data &DI) {
+ assert(DI.DebugRnglists && "unexpected emitDebugRnglists() call");
+ return writeDWARFLists<DWARFYAML::RnglistEntry>(
+ OS, *DI.DebugRnglists, DI.IsLittleEndian, DI.Is64BitAddrSize);
+}
+
+Error DWARFYAML::emitDebugLoclists(raw_ostream &OS, const Data &DI) {
+ assert(DI.DebugLoclists && "unexpected emitDebugRnglists() call");
+ return writeDWARFLists<DWARFYAML::LoclistEntry>(
+ OS, *DI.DebugLoclists, DI.IsLittleEndian, DI.Is64BitAddrSize);
+}
+
+std::function<Error(raw_ostream &, const DWARFYAML::Data &)>
+DWARFYAML::getDWARFEmitterByName(StringRef SecName) {
+ auto EmitFunc =
+ StringSwitch<
+ std::function<Error(raw_ostream &, const DWARFYAML::Data &)>>(SecName)
+ .Case("debug_abbrev", DWARFYAML::emitDebugAbbrev)
+ .Case("debug_addr", DWARFYAML::emitDebugAddr)
+ .Case("debug_aranges", DWARFYAML::emitDebugAranges)
+ .Case("debug_gnu_pubnames", DWARFYAML::emitDebugGNUPubnames)
+ .Case("debug_gnu_pubtypes", DWARFYAML::emitDebugGNUPubtypes)
+ .Case("debug_info", DWARFYAML::emitDebugInfo)
+ .Case("debug_line", DWARFYAML::emitDebugLine)
+ .Case("debug_loclists", DWARFYAML::emitDebugLoclists)
+ .Case("debug_pubnames", DWARFYAML::emitDebugPubnames)
+ .Case("debug_pubtypes", DWARFYAML::emitDebugPubtypes)
+ .Case("debug_ranges", DWARFYAML::emitDebugRanges)
+ .Case("debug_rnglists", DWARFYAML::emitDebugRnglists)
+ .Case("debug_str", DWARFYAML::emitDebugStr)
+ .Case("debug_str_offsets", DWARFYAML::emitDebugStrOffsets)
+ .Default([&](raw_ostream &, const DWARFYAML::Data &) {
+ return createStringError(errc::not_supported,
+ SecName + " is not supported");
+ });
+
+ return EmitFunc;
+}
+
+static Error
+emitDebugSectionImpl(const DWARFYAML::Data &DI, StringRef Sec,
+ StringMap<std::unique_ptr<MemoryBuffer>> &OutputBuffers) {
+ std::string Data;
+ raw_string_ostream DebugInfoStream(Data);
+
+ auto EmitFunc = DWARFYAML::getDWARFEmitterByName(Sec);
+
+ if (Error Err = EmitFunc(DebugInfoStream, DI))
+ return Err;
+ DebugInfoStream.flush();
+ if (!Data.empty())
+ OutputBuffers[Sec] = MemoryBuffer::getMemBufferCopy(Data);
+
+ return Error::success();
+}
+
+Expected<StringMap<std::unique_ptr<MemoryBuffer>>>
+DWARFYAML::emitDebugSections(StringRef YAMLString, bool IsLittleEndian,
+ bool Is64BitAddrSize) {
+ auto CollectDiagnostic = [](const SMDiagnostic &Diag, void *DiagContext) {
+ *static_cast<SMDiagnostic *>(DiagContext) = Diag;
+ };
+
+ SMDiagnostic GeneratedDiag;
+ yaml::Input YIn(YAMLString, /*Ctxt=*/nullptr, CollectDiagnostic,
+ &GeneratedDiag);
+
+ DWARFYAML::Data DI;
+ DI.IsLittleEndian = IsLittleEndian;
+ DI.Is64BitAddrSize = Is64BitAddrSize;
+
+ YIn >> DI;
+ if (YIn.error())
+ return createStringError(YIn.error(), GeneratedDiag.getMessage());
+
+ StringMap<std::unique_ptr<MemoryBuffer>> DebugSections;
+ Error Err = Error::success();
+
+ for (StringRef SecName : DI.getNonEmptySectionNames())
+ Err = joinErrors(std::move(Err),
+ emitDebugSectionImpl(DI, SecName, DebugSections));
+
+ if (Err)
+ return std::move(Err);
+ return std::move(DebugSections);
+}
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/DWARFYAML.cpp b/contrib/libs/llvm12/lib/ObjectYAML/DWARFYAML.cpp
new file mode 100644
index 00000000000..2591bf4d5af
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/DWARFYAML.cpp
@@ -0,0 +1,329 @@
+//===- DWARFYAML.cpp - DWARF YAMLIO implementation ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of DWARF Debug
+// Info.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/DWARFYAML.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+
+bool DWARFYAML::Data::isEmpty() const {
+ return getNonEmptySectionNames().empty();
+}
+
+SetVector<StringRef> DWARFYAML::Data::getNonEmptySectionNames() const {
+ SetVector<StringRef> SecNames;
+ if (DebugStrings)
+ SecNames.insert("debug_str");
+ if (DebugAranges)
+ SecNames.insert("debug_aranges");
+ if (DebugRanges)
+ SecNames.insert("debug_ranges");
+ if (!DebugLines.empty())
+ SecNames.insert("debug_line");
+ if (DebugAddr)
+ SecNames.insert("debug_addr");
+ if (!DebugAbbrev.empty())
+ SecNames.insert("debug_abbrev");
+ if (!CompileUnits.empty())
+ SecNames.insert("debug_info");
+ if (PubNames)
+ SecNames.insert("debug_pubnames");
+ if (PubTypes)
+ SecNames.insert("debug_pubtypes");
+ if (GNUPubNames)
+ SecNames.insert("debug_gnu_pubnames");
+ if (GNUPubTypes)
+ SecNames.insert("debug_gnu_pubtypes");
+ if (DebugStrOffsets)
+ SecNames.insert("debug_str_offsets");
+ if (DebugRnglists)
+ SecNames.insert("debug_rnglists");
+ if (DebugLoclists)
+ SecNames.insert("debug_loclists");
+ return SecNames;
+}
+
+Expected<DWARFYAML::Data::AbbrevTableInfo>
+DWARFYAML::Data::getAbbrevTableInfoByID(uint64_t ID) const {
+ if (AbbrevTableInfoMap.empty()) {
+ uint64_t AbbrevTableOffset = 0;
+ for (auto &AbbrevTable : enumerate(DebugAbbrev)) {
+ // If the abbrev table's ID isn't specified, we use the index as its ID.
+ uint64_t AbbrevTableID =
+ AbbrevTable.value().ID.getValueOr(AbbrevTable.index());
+ auto It = AbbrevTableInfoMap.insert(
+ {AbbrevTableID, AbbrevTableInfo{/*Index=*/AbbrevTable.index(),
+ /*Offset=*/AbbrevTableOffset}});
+ if (!It.second)
+ return createStringError(
+ errc::invalid_argument,
+ "the ID (%" PRIu64 ") of abbrev table with index %zu has been used "
+ "by abbrev table with index %" PRIu64,
+ AbbrevTableID, AbbrevTable.index(), It.first->second.Index);
+
+ AbbrevTableOffset +=
+ getAbbrevTableContentByIndex(AbbrevTable.index()).size();
+ }
+ }
+
+ auto It = AbbrevTableInfoMap.find(ID);
+ if (It == AbbrevTableInfoMap.end())
+ return createStringError(errc::invalid_argument,
+ "cannot find abbrev table whose ID is %" PRIu64,
+ ID);
+ return It->second;
+}
+
+namespace yaml {
+
+void MappingTraits<DWARFYAML::Data>::mapping(IO &IO, DWARFYAML::Data &DWARF) {
+ void *OldContext = IO.getContext();
+ DWARFYAML::DWARFContext DWARFCtx;
+ IO.setContext(&DWARFCtx);
+ IO.mapOptional("debug_str", DWARF.DebugStrings);
+ IO.mapOptional("debug_abbrev", DWARF.DebugAbbrev);
+ IO.mapOptional("debug_aranges", DWARF.DebugAranges);
+ IO.mapOptional("debug_ranges", DWARF.DebugRanges);
+ IO.mapOptional("debug_pubnames", DWARF.PubNames);
+ IO.mapOptional("debug_pubtypes", DWARF.PubTypes);
+ DWARFCtx.IsGNUPubSec = true;
+ IO.mapOptional("debug_gnu_pubnames", DWARF.GNUPubNames);
+ IO.mapOptional("debug_gnu_pubtypes", DWARF.GNUPubTypes);
+ IO.mapOptional("debug_info", DWARF.CompileUnits);
+ IO.mapOptional("debug_line", DWARF.DebugLines);
+ IO.mapOptional("debug_addr", DWARF.DebugAddr);
+ IO.mapOptional("debug_str_offsets", DWARF.DebugStrOffsets);
+ IO.mapOptional("debug_rnglists", DWARF.DebugRnglists);
+ IO.mapOptional("debug_loclists", DWARF.DebugLoclists);
+ IO.setContext(OldContext);
+}
+
+void MappingTraits<DWARFYAML::AbbrevTable>::mapping(
+ IO &IO, DWARFYAML::AbbrevTable &AbbrevTable) {
+ IO.mapOptional("ID", AbbrevTable.ID);
+ IO.mapOptional("Table", AbbrevTable.Table);
+}
+
+void MappingTraits<DWARFYAML::Abbrev>::mapping(IO &IO,
+ DWARFYAML::Abbrev &Abbrev) {
+ IO.mapOptional("Code", Abbrev.Code);
+ IO.mapRequired("Tag", Abbrev.Tag);
+ IO.mapRequired("Children", Abbrev.Children);
+ IO.mapOptional("Attributes", Abbrev.Attributes);
+}
+
+void MappingTraits<DWARFYAML::AttributeAbbrev>::mapping(
+ IO &IO, DWARFYAML::AttributeAbbrev &AttAbbrev) {
+ IO.mapRequired("Attribute", AttAbbrev.Attribute);
+ IO.mapRequired("Form", AttAbbrev.Form);
+ if(AttAbbrev.Form == dwarf::DW_FORM_implicit_const)
+ IO.mapRequired("Value", AttAbbrev.Value);
+}
+
+void MappingTraits<DWARFYAML::ARangeDescriptor>::mapping(
+ IO &IO, DWARFYAML::ARangeDescriptor &Descriptor) {
+ IO.mapRequired("Address", Descriptor.Address);
+ IO.mapRequired("Length", Descriptor.Length);
+}
+
+void MappingTraits<DWARFYAML::ARange>::mapping(IO &IO,
+ DWARFYAML::ARange &ARange) {
+ IO.mapOptional("Format", ARange.Format, dwarf::DWARF32);
+ IO.mapOptional("Length", ARange.Length);
+ IO.mapRequired("Version", ARange.Version);
+ IO.mapRequired("CuOffset", ARange.CuOffset);
+ IO.mapOptional("AddressSize", ARange.AddrSize);
+ IO.mapOptional("SegmentSelectorSize", ARange.SegSize, 0);
+ IO.mapOptional("Descriptors", ARange.Descriptors);
+}
+
+void MappingTraits<DWARFYAML::RangeEntry>::mapping(
+ IO &IO, DWARFYAML::RangeEntry &Descriptor) {
+ IO.mapRequired("LowOffset", Descriptor.LowOffset);
+ IO.mapRequired("HighOffset", Descriptor.HighOffset);
+}
+
+void MappingTraits<DWARFYAML::Ranges>::mapping(IO &IO,
+ DWARFYAML::Ranges &DebugRanges) {
+ IO.mapOptional("Offset", DebugRanges.Offset);
+ IO.mapOptional("AddrSize", DebugRanges.AddrSize);
+ IO.mapRequired("Entries", DebugRanges.Entries);
+}
+
+void MappingTraits<DWARFYAML::PubEntry>::mapping(IO &IO,
+ DWARFYAML::PubEntry &Entry) {
+ IO.mapRequired("DieOffset", Entry.DieOffset);
+ if (static_cast<DWARFYAML::DWARFContext *>(IO.getContext())->IsGNUPubSec)
+ IO.mapRequired("Descriptor", Entry.Descriptor);
+ IO.mapRequired("Name", Entry.Name);
+}
+
+void MappingTraits<DWARFYAML::PubSection>::mapping(
+ IO &IO, DWARFYAML::PubSection &Section) {
+ IO.mapOptional("Format", Section.Format, dwarf::DWARF32);
+ IO.mapRequired("Length", Section.Length);
+ IO.mapRequired("Version", Section.Version);
+ IO.mapRequired("UnitOffset", Section.UnitOffset);
+ IO.mapRequired("UnitSize", Section.UnitSize);
+ IO.mapRequired("Entries", Section.Entries);
+}
+
+void MappingTraits<DWARFYAML::Unit>::mapping(IO &IO, DWARFYAML::Unit &Unit) {
+ IO.mapOptional("Format", Unit.Format, dwarf::DWARF32);
+ IO.mapOptional("Length", Unit.Length);
+ IO.mapRequired("Version", Unit.Version);
+ if (Unit.Version >= 5)
+ IO.mapRequired("UnitType", Unit.Type);
+ IO.mapOptional("AbbrevTableID", Unit.AbbrevTableID);
+ IO.mapOptional("AbbrOffset", Unit.AbbrOffset);
+ IO.mapOptional("AddrSize", Unit.AddrSize);
+ IO.mapOptional("Entries", Unit.Entries);
+}
+
+void MappingTraits<DWARFYAML::Entry>::mapping(IO &IO, DWARFYAML::Entry &Entry) {
+ IO.mapRequired("AbbrCode", Entry.AbbrCode);
+ IO.mapOptional("Values", Entry.Values);
+}
+
+void MappingTraits<DWARFYAML::FormValue>::mapping(
+ IO &IO, DWARFYAML::FormValue &FormValue) {
+ IO.mapOptional("Value", FormValue.Value);
+ if (!FormValue.CStr.empty() || !IO.outputting())
+ IO.mapOptional("CStr", FormValue.CStr);
+ if (!FormValue.BlockData.empty() || !IO.outputting())
+ IO.mapOptional("BlockData", FormValue.BlockData);
+}
+
+void MappingTraits<DWARFYAML::File>::mapping(IO &IO, DWARFYAML::File &File) {
+ IO.mapRequired("Name", File.Name);
+ IO.mapRequired("DirIdx", File.DirIdx);
+ IO.mapRequired("ModTime", File.ModTime);
+ IO.mapRequired("Length", File.Length);
+}
+
+void MappingTraits<DWARFYAML::LineTableOpcode>::mapping(
+ IO &IO, DWARFYAML::LineTableOpcode &LineTableOpcode) {
+ IO.mapRequired("Opcode", LineTableOpcode.Opcode);
+ if (LineTableOpcode.Opcode == dwarf::DW_LNS_extended_op) {
+ IO.mapOptional("ExtLen", LineTableOpcode.ExtLen);
+ IO.mapRequired("SubOpcode", LineTableOpcode.SubOpcode);
+ }
+
+ if (!LineTableOpcode.UnknownOpcodeData.empty() || !IO.outputting())
+ IO.mapOptional("UnknownOpcodeData", LineTableOpcode.UnknownOpcodeData);
+ if (!LineTableOpcode.UnknownOpcodeData.empty() || !IO.outputting())
+ IO.mapOptional("StandardOpcodeData", LineTableOpcode.StandardOpcodeData);
+ if (!LineTableOpcode.FileEntry.Name.empty() || !IO.outputting())
+ IO.mapOptional("FileEntry", LineTableOpcode.FileEntry);
+ if (LineTableOpcode.Opcode == dwarf::DW_LNS_advance_line || !IO.outputting())
+ IO.mapOptional("SData", LineTableOpcode.SData);
+ IO.mapOptional("Data", LineTableOpcode.Data);
+}
+
+void MappingTraits<DWARFYAML::LineTable>::mapping(
+ IO &IO, DWARFYAML::LineTable &LineTable) {
+ IO.mapOptional("Format", LineTable.Format, dwarf::DWARF32);
+ IO.mapOptional("Length", LineTable.Length);
+ IO.mapRequired("Version", LineTable.Version);
+ IO.mapOptional("PrologueLength", LineTable.PrologueLength);
+ IO.mapRequired("MinInstLength", LineTable.MinInstLength);
+ if(LineTable.Version >= 4)
+ IO.mapRequired("MaxOpsPerInst", LineTable.MaxOpsPerInst);
+ IO.mapRequired("DefaultIsStmt", LineTable.DefaultIsStmt);
+ IO.mapRequired("LineBase", LineTable.LineBase);
+ IO.mapRequired("LineRange", LineTable.LineRange);
+ IO.mapOptional("OpcodeBase", LineTable.OpcodeBase);
+ IO.mapOptional("StandardOpcodeLengths", LineTable.StandardOpcodeLengths);
+ IO.mapOptional("IncludeDirs", LineTable.IncludeDirs);
+ IO.mapOptional("Files", LineTable.Files);
+ IO.mapOptional("Opcodes", LineTable.Opcodes);
+}
+
+void MappingTraits<DWARFYAML::SegAddrPair>::mapping(
+ IO &IO, DWARFYAML::SegAddrPair &SegAddrPair) {
+ IO.mapOptional("Segment", SegAddrPair.Segment, 0);
+ IO.mapOptional("Address", SegAddrPair.Address, 0);
+}
+
+void MappingTraits<DWARFYAML::AddrTableEntry>::mapping(
+ IO &IO, DWARFYAML::AddrTableEntry &AddrTable) {
+ IO.mapOptional("Format", AddrTable.Format, dwarf::DWARF32);
+ IO.mapOptional("Length", AddrTable.Length);
+ IO.mapRequired("Version", AddrTable.Version);
+ IO.mapOptional("AddressSize", AddrTable.AddrSize);
+ IO.mapOptional("SegmentSelectorSize", AddrTable.SegSelectorSize, 0);
+ IO.mapOptional("Entries", AddrTable.SegAddrPairs);
+}
+
+void MappingTraits<DWARFYAML::StringOffsetsTable>::mapping(
+ IO &IO, DWARFYAML::StringOffsetsTable &StrOffsetsTable) {
+ IO.mapOptional("Format", StrOffsetsTable.Format, dwarf::DWARF32);
+ IO.mapOptional("Length", StrOffsetsTable.Length);
+ IO.mapOptional("Version", StrOffsetsTable.Version, 5);
+ IO.mapOptional("Padding", StrOffsetsTable.Padding, 0);
+ IO.mapOptional("Offsets", StrOffsetsTable.Offsets);
+}
+
+void MappingTraits<DWARFYAML::DWARFOperation>::mapping(
+ IO &IO, DWARFYAML::DWARFOperation &DWARFOperation) {
+ IO.mapRequired("Operator", DWARFOperation.Operator);
+ IO.mapOptional("Values", DWARFOperation.Values);
+}
+
+void MappingTraits<DWARFYAML::RnglistEntry>::mapping(
+ IO &IO, DWARFYAML::RnglistEntry &RnglistEntry) {
+ IO.mapRequired("Operator", RnglistEntry.Operator);
+ IO.mapOptional("Values", RnglistEntry.Values);
+}
+
+void MappingTraits<DWARFYAML::LoclistEntry>::mapping(
+ IO &IO, DWARFYAML::LoclistEntry &LoclistEntry) {
+ IO.mapRequired("Operator", LoclistEntry.Operator);
+ IO.mapOptional("Values", LoclistEntry.Values);
+ IO.mapOptional("DescriptionsLength", LoclistEntry.DescriptionsLength);
+ IO.mapOptional("Descriptions", LoclistEntry.Descriptions);
+}
+
+template <typename EntryType>
+void MappingTraits<DWARFYAML::ListEntries<EntryType>>::mapping(
+ IO &IO, DWARFYAML::ListEntries<EntryType> &ListEntries) {
+ IO.mapOptional("Entries", ListEntries.Entries);
+ IO.mapOptional("Content", ListEntries.Content);
+}
+
+template <typename EntryType>
+std::string MappingTraits<DWARFYAML::ListEntries<EntryType>>::validate(
+ IO &IO, DWARFYAML::ListEntries<EntryType> &ListEntries) {
+ if (ListEntries.Entries && ListEntries.Content)
+ return "Entries and Content can't be used together";
+ return "";
+}
+
+template <typename EntryType>
+void MappingTraits<DWARFYAML::ListTable<EntryType>>::mapping(
+ IO &IO, DWARFYAML::ListTable<EntryType> &ListTable) {
+ IO.mapOptional("Format", ListTable.Format, dwarf::DWARF32);
+ IO.mapOptional("Length", ListTable.Length);
+ IO.mapOptional("Version", ListTable.Version, 5);
+ IO.mapOptional("AddressSize", ListTable.AddrSize);
+ IO.mapOptional("SegmentSelectorSize", ListTable.SegSelectorSize, 0);
+ IO.mapOptional("OffsetEntryCount", ListTable.OffsetEntryCount);
+ IO.mapOptional("Offsets", ListTable.Offsets);
+ IO.mapOptional("Lists", ListTable.Lists);
+}
+
+} // end namespace yaml
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/ELFEmitter.cpp b/contrib/libs/llvm12/lib/ObjectYAML/ELFEmitter.cpp
new file mode 100644
index 00000000000..e477a1b2b8f
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/ELFEmitter.cpp
@@ -0,0 +1,1953 @@
+//===- yaml2elf - Convert YAML to a ELF object file -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// The ELF component of yaml2obj.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/MC/StringTableBuilder.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/ObjectYAML/DWARFEmitter.h"
+#include "llvm/ObjectYAML/DWARFYAML.h"
+#include "llvm/ObjectYAML/ELFYAML.h"
+#include "llvm/ObjectYAML/yaml2obj.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/LEB128.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/WithColor.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+
+// This class is used to build up a contiguous binary blob while keeping
+// track of an offset in the output (which notionally begins at
+// `InitialOffset`).
+// The blob might be limited to an arbitrary size. All attempts to write data
+// are ignored and the error condition is remembered once the limit is reached.
+// Such an approach allows us to simplify the code by delaying error reporting
+// and doing it at a convenient time.
+namespace {
+class ContiguousBlobAccumulator {
+ const uint64_t InitialOffset;
+ const uint64_t MaxSize;
+
+ SmallVector<char, 128> Buf;
+ raw_svector_ostream OS;
+ Error ReachedLimitErr = Error::success();
+
+ bool checkLimit(uint64_t Size) {
+ if (!ReachedLimitErr && getOffset() + Size <= MaxSize)
+ return true;
+ if (!ReachedLimitErr)
+ ReachedLimitErr = createStringError(errc::invalid_argument,
+ "reached the output size limit");
+ return false;
+ }
+
+public:
+ ContiguousBlobAccumulator(uint64_t BaseOffset, uint64_t SizeLimit)
+ : InitialOffset(BaseOffset), MaxSize(SizeLimit), OS(Buf) {}
+
+ uint64_t tell() const { return OS.tell(); }
+ uint64_t getOffset() const { return InitialOffset + OS.tell(); }
+ void writeBlobToStream(raw_ostream &Out) const { Out << OS.str(); }
+
+ Error takeLimitError() {
+ // Request to write 0 bytes to check we did not reach the limit.
+ checkLimit(0);
+ return std::move(ReachedLimitErr);
+ }
+
+ /// \returns The new offset.
+ uint64_t padToAlignment(unsigned Align) {
+ uint64_t CurrentOffset = getOffset();
+ if (ReachedLimitErr)
+ return CurrentOffset;
+
+ uint64_t AlignedOffset = alignTo(CurrentOffset, Align == 0 ? 1 : Align);
+ uint64_t PaddingSize = AlignedOffset - CurrentOffset;
+ if (!checkLimit(PaddingSize))
+ return CurrentOffset;
+
+ writeZeros(PaddingSize);
+ return AlignedOffset;
+ }
+
+ raw_ostream *getRawOS(uint64_t Size) {
+ if (checkLimit(Size))
+ return &OS;
+ return nullptr;
+ }
+
+ void writeAsBinary(const yaml::BinaryRef &Bin, uint64_t N = UINT64_MAX) {
+ if (!checkLimit(Bin.binary_size()))
+ return;
+ Bin.writeAsBinary(OS, N);
+ }
+
+ void writeZeros(uint64_t Num) {
+ if (checkLimit(Num))
+ OS.write_zeros(Num);
+ }
+
+ void write(const char *Ptr, size_t Size) {
+ if (checkLimit(Size))
+ OS.write(Ptr, Size);
+ }
+
+ void write(unsigned char C) {
+ if (checkLimit(1))
+ OS.write(C);
+ }
+
+ unsigned writeULEB128(uint64_t Val) {
+ if (!checkLimit(sizeof(uint64_t)))
+ return 0;
+ return encodeULEB128(Val, OS);
+ }
+
+ template <typename T> void write(T Val, support::endianness E) {
+ if (checkLimit(sizeof(T)))
+ support::endian::write<T>(OS, Val, E);
+ }
+
+ void updateDataAt(uint64_t Pos, void *Data, size_t Size) {
+ assert(Pos >= InitialOffset && Pos + Size <= getOffset());
+ memcpy(&Buf[Pos - InitialOffset], Data, Size);
+ }
+};
+
+// Used to keep track of section and symbol names, so that in the YAML file
+// sections and symbols can be referenced by name instead of by index.
+class NameToIdxMap {
+ StringMap<unsigned> Map;
+
+public:
+ /// \Returns false if name is already present in the map.
+ bool addName(StringRef Name, unsigned Ndx) {
+ return Map.insert({Name, Ndx}).second;
+ }
+ /// \Returns false if name is not present in the map.
+ bool lookup(StringRef Name, unsigned &Idx) const {
+ auto I = Map.find(Name);
+ if (I == Map.end())
+ return false;
+ Idx = I->getValue();
+ return true;
+ }
+ /// Asserts if name is not present in the map.
+ unsigned get(StringRef Name) const {
+ unsigned Idx;
+ if (lookup(Name, Idx))
+ return Idx;
+ assert(false && "Expected section not found in index");
+ return 0;
+ }
+ unsigned size() const { return Map.size(); }
+};
+
+namespace {
+struct Fragment {
+ uint64_t Offset;
+ uint64_t Size;
+ uint32_t Type;
+ uint64_t AddrAlign;
+};
+} // namespace
+
+/// "Single point of truth" for the ELF file construction.
+/// TODO: This class still has a ways to go before it is truly a "single
+/// point of truth".
+template <class ELFT> class ELFState {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+
+ enum class SymtabType { Static, Dynamic };
+
+ /// The future ".strtab" section.
+ StringTableBuilder DotStrtab{StringTableBuilder::ELF};
+
+ /// The future ".shstrtab" section.
+ StringTableBuilder DotShStrtab{StringTableBuilder::ELF};
+
+ /// The future ".dynstr" section.
+ StringTableBuilder DotDynstr{StringTableBuilder::ELF};
+
+ NameToIdxMap SN2I;
+ NameToIdxMap SymN2I;
+ NameToIdxMap DynSymN2I;
+ ELFYAML::Object &Doc;
+
+ StringSet<> ExcludedSectionHeaders;
+
+ uint64_t LocationCounter = 0;
+ bool HasError = false;
+ yaml::ErrorHandler ErrHandler;
+ void reportError(const Twine &Msg);
+ void reportError(Error Err);
+
+ std::vector<Elf_Sym> toELFSymbols(ArrayRef<ELFYAML::Symbol> Symbols,
+ const StringTableBuilder &Strtab);
+ unsigned toSectionIndex(StringRef S, StringRef LocSec, StringRef LocSym = "");
+ unsigned toSymbolIndex(StringRef S, StringRef LocSec, bool IsDynamic);
+
+ void buildSectionIndex();
+ void buildSymbolIndexes();
+ void initProgramHeaders(std::vector<Elf_Phdr> &PHeaders);
+ bool initImplicitHeader(ContiguousBlobAccumulator &CBA, Elf_Shdr &Header,
+ StringRef SecName, ELFYAML::Section *YAMLSec);
+ void initSectionHeaders(std::vector<Elf_Shdr> &SHeaders,
+ ContiguousBlobAccumulator &CBA);
+ void initSymtabSectionHeader(Elf_Shdr &SHeader, SymtabType STType,
+ ContiguousBlobAccumulator &CBA,
+ ELFYAML::Section *YAMLSec);
+ void initStrtabSectionHeader(Elf_Shdr &SHeader, StringRef Name,
+ StringTableBuilder &STB,
+ ContiguousBlobAccumulator &CBA,
+ ELFYAML::Section *YAMLSec);
+ void initDWARFSectionHeader(Elf_Shdr &SHeader, StringRef Name,
+ ContiguousBlobAccumulator &CBA,
+ ELFYAML::Section *YAMLSec);
+ void setProgramHeaderLayout(std::vector<Elf_Phdr> &PHeaders,
+ std::vector<Elf_Shdr> &SHeaders);
+
+ std::vector<Fragment>
+ getPhdrFragments(const ELFYAML::ProgramHeader &Phdr,
+ ArrayRef<typename ELFT::Shdr> SHeaders);
+
+ void finalizeStrings();
+ void writeELFHeader(raw_ostream &OS);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::NoBitsSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::RawContentSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::RelocationSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::RelrSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::GroupSection &Group,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::SymtabShndxSection &Shndx,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::SymverSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::VerneedSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::VerdefSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::ARMIndexTableSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::MipsABIFlags &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::DynamicSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::StackSizesSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::BBAddrMapSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::HashSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::AddrsigSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::NoteSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::GnuHashSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::LinkerOptionsSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::DependentLibrariesSection &Section,
+ ContiguousBlobAccumulator &CBA);
+ void writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::CallGraphProfileSection &Section,
+ ContiguousBlobAccumulator &CBA);
+
+ void writeFill(ELFYAML::Fill &Fill, ContiguousBlobAccumulator &CBA);
+
+ ELFState(ELFYAML::Object &D, yaml::ErrorHandler EH);
+
+ void assignSectionAddress(Elf_Shdr &SHeader, ELFYAML::Section *YAMLSec);
+
+ DenseMap<StringRef, size_t> buildSectionHeaderReorderMap();
+
+ BumpPtrAllocator StringAlloc;
+ uint64_t alignToOffset(ContiguousBlobAccumulator &CBA, uint64_t Align,
+ llvm::Optional<llvm::yaml::Hex64> Offset);
+
+ uint64_t getSectionNameOffset(StringRef Name);
+
+public:
+ static bool writeELF(raw_ostream &OS, ELFYAML::Object &Doc,
+ yaml::ErrorHandler EH, uint64_t MaxSize);
+};
+} // end anonymous namespace
+
+template <class T> static size_t arrayDataSize(ArrayRef<T> A) {
+ return A.size() * sizeof(T);
+}
+
+template <class T> static void writeArrayData(raw_ostream &OS, ArrayRef<T> A) {
+ OS.write((const char *)A.data(), arrayDataSize(A));
+}
+
+template <class T> static void zero(T &Obj) { memset(&Obj, 0, sizeof(Obj)); }
+
+template <class ELFT>
+ELFState<ELFT>::ELFState(ELFYAML::Object &D, yaml::ErrorHandler EH)
+ : Doc(D), ErrHandler(EH) {
+ std::vector<ELFYAML::Section *> Sections = Doc.getSections();
+ // Insert SHT_NULL section implicitly when it is not defined in YAML.
+ if (Sections.empty() || Sections.front()->Type != ELF::SHT_NULL)
+ Doc.Chunks.insert(
+ Doc.Chunks.begin(),
+ std::make_unique<ELFYAML::Section>(
+ ELFYAML::Chunk::ChunkKind::RawContent, /*IsImplicit=*/true));
+
+ StringSet<> DocSections;
+ ELFYAML::SectionHeaderTable *SecHdrTable = nullptr;
+ for (size_t I = 0; I < Doc.Chunks.size(); ++I) {
+ const std::unique_ptr<ELFYAML::Chunk> &C = Doc.Chunks[I];
+
+ // We might have an explicit section header table declaration.
+ if (auto S = dyn_cast<ELFYAML::SectionHeaderTable>(C.get())) {
+ if (SecHdrTable)
+ reportError("multiple section header tables are not allowed");
+ SecHdrTable = S;
+ continue;
+ }
+
+ // We add a technical suffix for each unnamed section/fill. It does not
+ // affect the output, but allows us to map them by name in the code and
+ // report better error messages.
+ if (C->Name.empty()) {
+ std::string NewName = ELFYAML::appendUniqueSuffix(
+ /*Name=*/"", "index " + Twine(I));
+ C->Name = StringRef(NewName).copy(StringAlloc);
+ assert(ELFYAML::dropUniqueSuffix(C->Name).empty());
+ }
+
+ if (!DocSections.insert(C->Name).second)
+ reportError("repeated section/fill name: '" + C->Name +
+ "' at YAML section/fill number " + Twine(I));
+ }
+
+ std::vector<StringRef> ImplicitSections;
+ if (Doc.DynamicSymbols)
+ ImplicitSections.insert(ImplicitSections.end(), {".dynsym", ".dynstr"});
+ if (Doc.Symbols)
+ ImplicitSections.push_back(".symtab");
+ if (Doc.DWARF)
+ for (StringRef DebugSecName : Doc.DWARF->getNonEmptySectionNames()) {
+ std::string SecName = ("." + DebugSecName).str();
+ ImplicitSections.push_back(StringRef(SecName).copy(StringAlloc));
+ }
+ ImplicitSections.insert(ImplicitSections.end(), {".strtab"});
+ if (!SecHdrTable || !SecHdrTable->NoHeaders.getValueOr(false))
+ ImplicitSections.insert(ImplicitSections.end(), {".shstrtab"});
+
+ // Insert placeholders for implicit sections that are not
+ // defined explicitly in YAML.
+ for (StringRef SecName : ImplicitSections) {
+ if (DocSections.count(SecName))
+ continue;
+
+ std::unique_ptr<ELFYAML::Section> Sec = std::make_unique<ELFYAML::Section>(
+ ELFYAML::Chunk::ChunkKind::RawContent, true /*IsImplicit*/);
+ Sec->Name = SecName;
+
+ if (SecName == ".dynsym")
+ Sec->Type = ELF::SHT_DYNSYM;
+ else if (SecName == ".symtab")
+ Sec->Type = ELF::SHT_SYMTAB;
+ else
+ Sec->Type = ELF::SHT_STRTAB;
+
+ // When the section header table is explicitly defined at the end of the
+ // sections list, it is reasonable to assume that the user wants to reorder
+ // section headers, but still wants to place the section header table after
+ // all sections, like it normally happens. In this case we want to insert
+ // other implicit sections right before the section header table.
+ if (Doc.Chunks.back().get() == SecHdrTable)
+ Doc.Chunks.insert(Doc.Chunks.end() - 1, std::move(Sec));
+ else
+ Doc.Chunks.push_back(std::move(Sec));
+ }
+
+ // Insert the section header table implicitly at the end, when it is not
+ // explicitly defined.
+ if (!SecHdrTable)
+ Doc.Chunks.push_back(
+ std::make_unique<ELFYAML::SectionHeaderTable>(/*IsImplicit=*/true));
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeELFHeader(raw_ostream &OS) {
+ using namespace llvm::ELF;
+
+ Elf_Ehdr Header;
+ zero(Header);
+ Header.e_ident[EI_MAG0] = 0x7f;
+ Header.e_ident[EI_MAG1] = 'E';
+ Header.e_ident[EI_MAG2] = 'L';
+ Header.e_ident[EI_MAG3] = 'F';
+ Header.e_ident[EI_CLASS] = ELFT::Is64Bits ? ELFCLASS64 : ELFCLASS32;
+ Header.e_ident[EI_DATA] = Doc.Header.Data;
+ Header.e_ident[EI_VERSION] = EV_CURRENT;
+ Header.e_ident[EI_OSABI] = Doc.Header.OSABI;
+ Header.e_ident[EI_ABIVERSION] = Doc.Header.ABIVersion;
+ Header.e_type = Doc.Header.Type;
+
+ if (Doc.Header.Machine)
+ Header.e_machine = *Doc.Header.Machine;
+ else
+ Header.e_machine = EM_NONE;
+
+ Header.e_version = EV_CURRENT;
+ Header.e_entry = Doc.Header.Entry;
+ Header.e_flags = Doc.Header.Flags;
+ Header.e_ehsize = sizeof(Elf_Ehdr);
+
+ if (Doc.Header.EPhOff)
+ Header.e_phoff = *Doc.Header.EPhOff;
+ else if (!Doc.ProgramHeaders.empty())
+ Header.e_phoff = sizeof(Header);
+ else
+ Header.e_phoff = 0;
+
+ if (Doc.Header.EPhEntSize)
+ Header.e_phentsize = *Doc.Header.EPhEntSize;
+ else if (!Doc.ProgramHeaders.empty())
+ Header.e_phentsize = sizeof(Elf_Phdr);
+ else
+ Header.e_phentsize = 0;
+
+ if (Doc.Header.EPhNum)
+ Header.e_phnum = *Doc.Header.EPhNum;
+ else if (!Doc.ProgramHeaders.empty())
+ Header.e_phnum = Doc.ProgramHeaders.size();
+ else
+ Header.e_phnum = 0;
+
+ Header.e_shentsize = Doc.Header.EShEntSize ? (uint16_t)*Doc.Header.EShEntSize
+ : sizeof(Elf_Shdr);
+
+ const ELFYAML::SectionHeaderTable &SectionHeaders =
+ Doc.getSectionHeaderTable();
+
+ if (Doc.Header.EShOff)
+ Header.e_shoff = *Doc.Header.EShOff;
+ else if (SectionHeaders.Offset)
+ Header.e_shoff = *SectionHeaders.Offset;
+ else
+ Header.e_shoff = 0;
+
+ if (Doc.Header.EShNum)
+ Header.e_shnum = *Doc.Header.EShNum;
+ else
+ Header.e_shnum = SectionHeaders.getNumHeaders(Doc.getSections().size());
+
+ if (Doc.Header.EShStrNdx)
+ Header.e_shstrndx = *Doc.Header.EShStrNdx;
+ else if (SectionHeaders.Offset && !ExcludedSectionHeaders.count(".shstrtab"))
+ Header.e_shstrndx = SN2I.get(".shstrtab");
+ else
+ Header.e_shstrndx = 0;
+
+ OS.write((const char *)&Header, sizeof(Header));
+}
+
+template <class ELFT>
+void ELFState<ELFT>::initProgramHeaders(std::vector<Elf_Phdr> &PHeaders) {
+ DenseMap<StringRef, ELFYAML::Fill *> NameToFill;
+ DenseMap<StringRef, size_t> NameToIndex;
+ for (size_t I = 0, E = Doc.Chunks.size(); I != E; ++I) {
+ if (auto S = dyn_cast<ELFYAML::Fill>(Doc.Chunks[I].get()))
+ NameToFill[S->Name] = S;
+ NameToIndex[Doc.Chunks[I]->Name] = I + 1;
+ }
+
+ std::vector<ELFYAML::Section *> Sections = Doc.getSections();
+ for (size_t I = 0, E = Doc.ProgramHeaders.size(); I != E; ++I) {
+ ELFYAML::ProgramHeader &YamlPhdr = Doc.ProgramHeaders[I];
+ Elf_Phdr Phdr;
+ zero(Phdr);
+ Phdr.p_type = YamlPhdr.Type;
+ Phdr.p_flags = YamlPhdr.Flags;
+ Phdr.p_vaddr = YamlPhdr.VAddr;
+ Phdr.p_paddr = YamlPhdr.PAddr;
+ PHeaders.push_back(Phdr);
+
+ if (!YamlPhdr.FirstSec && !YamlPhdr.LastSec)
+ continue;
+
+ // Get the index of the section, or 0 in the case when the section doesn't exist.
+ size_t First = NameToIndex[*YamlPhdr.FirstSec];
+ if (!First)
+ reportError("unknown section or fill referenced: '" + *YamlPhdr.FirstSec +
+ "' by the 'FirstSec' key of the program header with index " +
+ Twine(I));
+ size_t Last = NameToIndex[*YamlPhdr.LastSec];
+ if (!Last)
+ reportError("unknown section or fill referenced: '" + *YamlPhdr.LastSec +
+ "' by the 'LastSec' key of the program header with index " +
+ Twine(I));
+ if (!First || !Last)
+ continue;
+
+ if (First > Last)
+ reportError("program header with index " + Twine(I) +
+ ": the section index of " + *YamlPhdr.FirstSec +
+ " is greater than the index of " + *YamlPhdr.LastSec);
+
+ for (size_t I = First; I <= Last; ++I)
+ YamlPhdr.Chunks.push_back(Doc.Chunks[I - 1].get());
+ }
+}
+
+template <class ELFT>
+unsigned ELFState<ELFT>::toSectionIndex(StringRef S, StringRef LocSec,
+ StringRef LocSym) {
+ assert(LocSec.empty() || LocSym.empty());
+
+ unsigned Index;
+ if (!SN2I.lookup(S, Index) && !to_integer(S, Index)) {
+ if (!LocSym.empty())
+ reportError("unknown section referenced: '" + S + "' by YAML symbol '" +
+ LocSym + "'");
+ else
+ reportError("unknown section referenced: '" + S + "' by YAML section '" +
+ LocSec + "'");
+ return 0;
+ }
+
+ const ELFYAML::SectionHeaderTable &SectionHeaders =
+ Doc.getSectionHeaderTable();
+ if (SectionHeaders.IsImplicit ||
+ (SectionHeaders.NoHeaders && !SectionHeaders.NoHeaders.getValue()))
+ return Index;
+
+ assert(!SectionHeaders.NoHeaders.getValueOr(false) ||
+ !SectionHeaders.Sections);
+ size_t FirstExcluded =
+ SectionHeaders.Sections ? SectionHeaders.Sections->size() : 0;
+ if (Index >= FirstExcluded) {
+ if (LocSym.empty())
+ reportError("unable to link '" + LocSec + "' to excluded section '" + S +
+ "'");
+ else
+ reportError("excluded section referenced: '" + S + "' by symbol '" +
+ LocSym + "'");
+ }
+ return Index;
+}
+
+template <class ELFT>
+unsigned ELFState<ELFT>::toSymbolIndex(StringRef S, StringRef LocSec,
+ bool IsDynamic) {
+ const NameToIdxMap &SymMap = IsDynamic ? DynSymN2I : SymN2I;
+ unsigned Index;
+ // Here we try to look up S in the symbol table. If it is not there,
+ // treat its value as a symbol index.
+ if (!SymMap.lookup(S, Index) && !to_integer(S, Index)) {
+ reportError("unknown symbol referenced: '" + S + "' by YAML section '" +
+ LocSec + "'");
+ return 0;
+ }
+ return Index;
+}
+
+template <class ELFT>
+static void overrideFields(ELFYAML::Section *From, typename ELFT::Shdr &To) {
+ if (!From)
+ return;
+ if (From->ShAddrAlign)
+ To.sh_addralign = *From->ShAddrAlign;
+ if (From->ShFlags)
+ To.sh_flags = *From->ShFlags;
+ if (From->ShName)
+ To.sh_name = *From->ShName;
+ if (From->ShOffset)
+ To.sh_offset = *From->ShOffset;
+ if (From->ShSize)
+ To.sh_size = *From->ShSize;
+ if (From->ShType)
+ To.sh_type = *From->ShType;
+}
+
+template <class ELFT>
+bool ELFState<ELFT>::initImplicitHeader(ContiguousBlobAccumulator &CBA,
+ Elf_Shdr &Header, StringRef SecName,
+ ELFYAML::Section *YAMLSec) {
+ // Check if the header was already initialized.
+ if (Header.sh_offset)
+ return false;
+
+ if (SecName == ".symtab")
+ initSymtabSectionHeader(Header, SymtabType::Static, CBA, YAMLSec);
+ else if (SecName == ".strtab")
+ initStrtabSectionHeader(Header, SecName, DotStrtab, CBA, YAMLSec);
+ else if (SecName == ".shstrtab")
+ initStrtabSectionHeader(Header, SecName, DotShStrtab, CBA, YAMLSec);
+ else if (SecName == ".dynsym")
+ initSymtabSectionHeader(Header, SymtabType::Dynamic, CBA, YAMLSec);
+ else if (SecName == ".dynstr")
+ initStrtabSectionHeader(Header, SecName, DotDynstr, CBA, YAMLSec);
+ else if (SecName.startswith(".debug_")) {
+ // If a ".debug_*" section's type is a preserved one, e.g., SHT_DYNAMIC, we
+ // will not treat it as a debug section.
+ if (YAMLSec && !isa<ELFYAML::RawContentSection>(YAMLSec))
+ return false;
+ initDWARFSectionHeader(Header, SecName, CBA, YAMLSec);
+ } else
+ return false;
+
+ LocationCounter += Header.sh_size;
+
+ // Override section fields if requested.
+ overrideFields<ELFT>(YAMLSec, Header);
+ return true;
+}
+
+constexpr char SuffixStart = '(';
+constexpr char SuffixEnd = ')';
+
+std::string llvm::ELFYAML::appendUniqueSuffix(StringRef Name,
+ const Twine &Msg) {
+ // Do not add a space when a Name is empty.
+ std::string Ret = Name.empty() ? "" : Name.str() + ' ';
+ return Ret + (Twine(SuffixStart) + Msg + Twine(SuffixEnd)).str();
+}
+
+StringRef llvm::ELFYAML::dropUniqueSuffix(StringRef S) {
+ if (S.empty() || S.back() != SuffixEnd)
+ return S;
+
+ // A special case for empty names. See appendUniqueSuffix() above.
+ size_t SuffixPos = S.rfind(SuffixStart);
+ if (SuffixPos == 0)
+ return "";
+
+ if (SuffixPos == StringRef::npos || S[SuffixPos - 1] != ' ')
+ return S;
+ return S.substr(0, SuffixPos - 1);
+}
+
+template <class ELFT>
+uint64_t ELFState<ELFT>::getSectionNameOffset(StringRef Name) {
+ // If a section is excluded from section headers, we do not save its name in
+ // the string table.
+ if (ExcludedSectionHeaders.count(Name))
+ return 0;
+ return DotShStrtab.getOffset(Name);
+}
+
+static uint64_t writeContent(ContiguousBlobAccumulator &CBA,
+ const Optional<yaml::BinaryRef> &Content,
+ const Optional<llvm::yaml::Hex64> &Size) {
+ size_t ContentSize = 0;
+ if (Content) {
+ CBA.writeAsBinary(*Content);
+ ContentSize = Content->binary_size();
+ }
+
+ if (!Size)
+ return ContentSize;
+
+ CBA.writeZeros(*Size - ContentSize);
+ return *Size;
+}
+
+static StringRef getDefaultLinkSec(unsigned SecType) {
+ switch (SecType) {
+ case ELF::SHT_REL:
+ case ELF::SHT_RELA:
+ case ELF::SHT_GROUP:
+ case ELF::SHT_LLVM_CALL_GRAPH_PROFILE:
+ case ELF::SHT_LLVM_ADDRSIG:
+ return ".symtab";
+ case ELF::SHT_GNU_versym:
+ case ELF::SHT_HASH:
+ case ELF::SHT_GNU_HASH:
+ return ".dynsym";
+ case ELF::SHT_DYNSYM:
+ case ELF::SHT_GNU_verdef:
+ case ELF::SHT_GNU_verneed:
+ return ".dynstr";
+ case ELF::SHT_SYMTAB:
+ return ".strtab";
+ default:
+ return "";
+ }
+}
+
+template <class ELFT>
+void ELFState<ELFT>::initSectionHeaders(std::vector<Elf_Shdr> &SHeaders,
+ ContiguousBlobAccumulator &CBA) {
+ // Ensure SHN_UNDEF entry is present. An all-zero section header is a
+ // valid SHN_UNDEF entry since SHT_NULL == 0.
+ SHeaders.resize(Doc.getSections().size());
+
+ for (const std::unique_ptr<ELFYAML::Chunk> &D : Doc.Chunks) {
+ if (ELFYAML::Fill *S = dyn_cast<ELFYAML::Fill>(D.get())) {
+ S->Offset = alignToOffset(CBA, /*Align=*/1, S->Offset);
+ writeFill(*S, CBA);
+ LocationCounter += S->Size;
+ continue;
+ }
+
+ if (ELFYAML::SectionHeaderTable *S =
+ dyn_cast<ELFYAML::SectionHeaderTable>(D.get())) {
+ if (S->NoHeaders.getValueOr(false))
+ continue;
+
+ if (!S->Offset)
+ S->Offset = alignToOffset(CBA, sizeof(typename ELFT::uint),
+ /*Offset=*/None);
+ else
+ S->Offset = alignToOffset(CBA, /*Align=*/1, S->Offset);
+
+ uint64_t Size = S->getNumHeaders(SHeaders.size()) * sizeof(Elf_Shdr);
+ // The full section header information might be not available here, so
+ // fill the space with zeroes as a placeholder.
+ CBA.writeZeros(Size);
+ LocationCounter += Size;
+ continue;
+ }
+
+ ELFYAML::Section *Sec = cast<ELFYAML::Section>(D.get());
+ bool IsFirstUndefSection = Sec == Doc.getSections().front();
+ if (IsFirstUndefSection && Sec->IsImplicit)
+ continue;
+
+ Elf_Shdr &SHeader = SHeaders[SN2I.get(Sec->Name)];
+ if (Sec->Link) {
+ SHeader.sh_link = toSectionIndex(*Sec->Link, Sec->Name);
+ } else {
+ StringRef LinkSec = getDefaultLinkSec(Sec->Type);
+ unsigned Link = 0;
+ if (!LinkSec.empty() && !ExcludedSectionHeaders.count(LinkSec) &&
+ SN2I.lookup(LinkSec, Link))
+ SHeader.sh_link = Link;
+ }
+
+ if (Sec->EntSize)
+ SHeader.sh_entsize = *Sec->EntSize;
+ else
+ SHeader.sh_entsize = ELFYAML::getDefaultShEntSize<ELFT>(
+ Doc.Header.Machine.getValueOr(ELF::EM_NONE), Sec->Type, Sec->Name);
+
+ // We have a few sections like string or symbol tables that are usually
+ // added implicitly to the end. However, if they are explicitly specified
+ // in the YAML, we need to write them here. This ensures the file offset
+ // remains correct.
+ if (initImplicitHeader(CBA, SHeader, Sec->Name,
+ Sec->IsImplicit ? nullptr : Sec))
+ continue;
+
+ assert(Sec && "It can't be null unless it is an implicit section. But all "
+ "implicit sections should already have been handled above.");
+
+ SHeader.sh_name =
+ getSectionNameOffset(ELFYAML::dropUniqueSuffix(Sec->Name));
+ SHeader.sh_type = Sec->Type;
+ if (Sec->Flags)
+ SHeader.sh_flags = *Sec->Flags;
+ SHeader.sh_addralign = Sec->AddressAlign;
+
+ // Set the offset for all sections, except the SHN_UNDEF section with index
+ // 0 when not explicitly requested.
+ if (!IsFirstUndefSection || Sec->Offset)
+ SHeader.sh_offset = alignToOffset(CBA, SHeader.sh_addralign, Sec->Offset);
+
+ assignSectionAddress(SHeader, Sec);
+
+ if (IsFirstUndefSection) {
+ if (auto RawSec = dyn_cast<ELFYAML::RawContentSection>(Sec)) {
+ // We do not write any content for special SHN_UNDEF section.
+ if (RawSec->Size)
+ SHeader.sh_size = *RawSec->Size;
+ if (RawSec->Info)
+ SHeader.sh_info = *RawSec->Info;
+ }
+
+ LocationCounter += SHeader.sh_size;
+ overrideFields<ELFT>(Sec, SHeader);
+ continue;
+ }
+
+ if (!isa<ELFYAML::NoBitsSection>(Sec) && (Sec->Content || Sec->Size))
+ SHeader.sh_size = writeContent(CBA, Sec->Content, Sec->Size);
+
+ if (auto S = dyn_cast<ELFYAML::RawContentSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::SymtabShndxSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::RelocationSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::RelrSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::GroupSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::ARMIndexTableSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::MipsABIFlags>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::NoBitsSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::DynamicSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::SymverSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::VerneedSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::VerdefSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::StackSizesSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::HashSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::AddrsigSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::LinkerOptionsSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::NoteSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::GnuHashSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::DependentLibrariesSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::CallGraphProfileSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else if (auto S = dyn_cast<ELFYAML::BBAddrMapSection>(Sec)) {
+ writeSectionContent(SHeader, *S, CBA);
+ } else {
+ llvm_unreachable("Unknown section type");
+ }
+
+ LocationCounter += SHeader.sh_size;
+
+ // Override section fields if requested.
+ overrideFields<ELFT>(Sec, SHeader);
+ }
+}
+
+template <class ELFT>
+void ELFState<ELFT>::assignSectionAddress(Elf_Shdr &SHeader,
+ ELFYAML::Section *YAMLSec) {
+ if (YAMLSec && YAMLSec->Address) {
+ SHeader.sh_addr = *YAMLSec->Address;
+ LocationCounter = *YAMLSec->Address;
+ return;
+ }
+
+ // sh_addr represents the address in the memory image of a process. Sections
+ // in a relocatable object file or non-allocatable sections do not need
+ // sh_addr assignment.
+ if (Doc.Header.Type.value == ELF::ET_REL ||
+ !(SHeader.sh_flags & ELF::SHF_ALLOC))
+ return;
+
+ LocationCounter =
+ alignTo(LocationCounter, SHeader.sh_addralign ? SHeader.sh_addralign : 1);
+ SHeader.sh_addr = LocationCounter;
+}
+
+static size_t findFirstNonGlobal(ArrayRef<ELFYAML::Symbol> Symbols) {
+ for (size_t I = 0; I < Symbols.size(); ++I)
+ if (Symbols[I].Binding.value != ELF::STB_LOCAL)
+ return I;
+ return Symbols.size();
+}
+
+template <class ELFT>
+std::vector<typename ELFT::Sym>
+ELFState<ELFT>::toELFSymbols(ArrayRef<ELFYAML::Symbol> Symbols,
+ const StringTableBuilder &Strtab) {
+ std::vector<Elf_Sym> Ret;
+ Ret.resize(Symbols.size() + 1);
+
+ size_t I = 0;
+ for (const ELFYAML::Symbol &Sym : Symbols) {
+ Elf_Sym &Symbol = Ret[++I];
+
+ // If NameIndex, which contains the name offset, is explicitly specified, we
+ // use it. This is useful for preparing broken objects. Otherwise, we add
+ // the specified Name to the string table builder to get its offset.
+ if (Sym.StName)
+ Symbol.st_name = *Sym.StName;
+ else if (!Sym.Name.empty())
+ Symbol.st_name = Strtab.getOffset(ELFYAML::dropUniqueSuffix(Sym.Name));
+
+ Symbol.setBindingAndType(Sym.Binding, Sym.Type);
+ if (Sym.Section)
+ Symbol.st_shndx = toSectionIndex(*Sym.Section, "", Sym.Name);
+ else if (Sym.Index)
+ Symbol.st_shndx = *Sym.Index;
+
+ Symbol.st_value = Sym.Value.getValueOr(yaml::Hex64(0));
+ Symbol.st_other = Sym.Other ? *Sym.Other : 0;
+ Symbol.st_size = Sym.Size.getValueOr(yaml::Hex64(0));
+ }
+
+ return Ret;
+}
+
+template <class ELFT>
+void ELFState<ELFT>::initSymtabSectionHeader(Elf_Shdr &SHeader,
+ SymtabType STType,
+ ContiguousBlobAccumulator &CBA,
+ ELFYAML::Section *YAMLSec) {
+
+ bool IsStatic = STType == SymtabType::Static;
+ ArrayRef<ELFYAML::Symbol> Symbols;
+ if (IsStatic && Doc.Symbols)
+ Symbols = *Doc.Symbols;
+ else if (!IsStatic && Doc.DynamicSymbols)
+ Symbols = *Doc.DynamicSymbols;
+
+ ELFYAML::RawContentSection *RawSec =
+ dyn_cast_or_null<ELFYAML::RawContentSection>(YAMLSec);
+ if (RawSec && (RawSec->Content || RawSec->Size)) {
+ bool HasSymbolsDescription =
+ (IsStatic && Doc.Symbols) || (!IsStatic && Doc.DynamicSymbols);
+ if (HasSymbolsDescription) {
+ StringRef Property = (IsStatic ? "`Symbols`" : "`DynamicSymbols`");
+ if (RawSec->Content)
+ reportError("cannot specify both `Content` and " + Property +
+ " for symbol table section '" + RawSec->Name + "'");
+ if (RawSec->Size)
+ reportError("cannot specify both `Size` and " + Property +
+ " for symbol table section '" + RawSec->Name + "'");
+ return;
+ }
+ }
+
+ SHeader.sh_name = getSectionNameOffset(IsStatic ? ".symtab" : ".dynsym");
+
+ if (YAMLSec)
+ SHeader.sh_type = YAMLSec->Type;
+ else
+ SHeader.sh_type = IsStatic ? ELF::SHT_SYMTAB : ELF::SHT_DYNSYM;
+
+ if (YAMLSec && YAMLSec->Flags)
+ SHeader.sh_flags = *YAMLSec->Flags;
+ else if (!IsStatic)
+ SHeader.sh_flags = ELF::SHF_ALLOC;
+
+ // If the symbol table section is explicitly described in the YAML
+ // then we should set the fields requested.
+ SHeader.sh_info = (RawSec && RawSec->Info) ? (unsigned)(*RawSec->Info)
+ : findFirstNonGlobal(Symbols) + 1;
+ SHeader.sh_addralign = YAMLSec ? (uint64_t)YAMLSec->AddressAlign : 8;
+
+ assignSectionAddress(SHeader, YAMLSec);
+
+ SHeader.sh_offset =
+ alignToOffset(CBA, SHeader.sh_addralign, RawSec ? RawSec->Offset : None);
+
+ if (RawSec && (RawSec->Content || RawSec->Size)) {
+ assert(Symbols.empty());
+ SHeader.sh_size = writeContent(CBA, RawSec->Content, RawSec->Size);
+ return;
+ }
+
+ std::vector<Elf_Sym> Syms =
+ toELFSymbols(Symbols, IsStatic ? DotStrtab : DotDynstr);
+ SHeader.sh_size = Syms.size() * sizeof(Elf_Sym);
+ CBA.write((const char *)Syms.data(), SHeader.sh_size);
+}
+
+template <class ELFT>
+void ELFState<ELFT>::initStrtabSectionHeader(Elf_Shdr &SHeader, StringRef Name,
+ StringTableBuilder &STB,
+ ContiguousBlobAccumulator &CBA,
+ ELFYAML::Section *YAMLSec) {
+ SHeader.sh_name = getSectionNameOffset(Name);
+ SHeader.sh_type = YAMLSec ? YAMLSec->Type : ELF::SHT_STRTAB;
+ SHeader.sh_addralign = YAMLSec ? (uint64_t)YAMLSec->AddressAlign : 1;
+
+ ELFYAML::RawContentSection *RawSec =
+ dyn_cast_or_null<ELFYAML::RawContentSection>(YAMLSec);
+
+ SHeader.sh_offset = alignToOffset(CBA, SHeader.sh_addralign,
+ YAMLSec ? YAMLSec->Offset : None);
+
+ if (RawSec && (RawSec->Content || RawSec->Size)) {
+ SHeader.sh_size = writeContent(CBA, RawSec->Content, RawSec->Size);
+ } else {
+ if (raw_ostream *OS = CBA.getRawOS(STB.getSize()))
+ STB.write(*OS);
+ SHeader.sh_size = STB.getSize();
+ }
+
+ if (RawSec && RawSec->Info)
+ SHeader.sh_info = *RawSec->Info;
+
+ if (YAMLSec && YAMLSec->Flags)
+ SHeader.sh_flags = *YAMLSec->Flags;
+ else if (Name == ".dynstr")
+ SHeader.sh_flags = ELF::SHF_ALLOC;
+
+ assignSectionAddress(SHeader, YAMLSec);
+}
+
+static bool shouldEmitDWARF(DWARFYAML::Data &DWARF, StringRef Name) {
+ SetVector<StringRef> DebugSecNames = DWARF.getNonEmptySectionNames();
+ return Name.consume_front(".") && DebugSecNames.count(Name);
+}
+
+template <class ELFT>
+Expected<uint64_t> emitDWARF(typename ELFT::Shdr &SHeader, StringRef Name,
+ const DWARFYAML::Data &DWARF,
+ ContiguousBlobAccumulator &CBA) {
+ // We are unable to predict the size of debug data, so we request to write 0
+ // bytes. This should always return us an output stream unless CBA is already
+ // in an error state.
+ raw_ostream *OS = CBA.getRawOS(0);
+ if (!OS)
+ return 0;
+
+ uint64_t BeginOffset = CBA.tell();
+
+ auto EmitFunc = DWARFYAML::getDWARFEmitterByName(Name.substr(1));
+ if (Error Err = EmitFunc(*OS, DWARF))
+ return std::move(Err);
+
+ return CBA.tell() - BeginOffset;
+}
+
+template <class ELFT>
+void ELFState<ELFT>::initDWARFSectionHeader(Elf_Shdr &SHeader, StringRef Name,
+ ContiguousBlobAccumulator &CBA,
+ ELFYAML::Section *YAMLSec) {
+ SHeader.sh_name = getSectionNameOffset(ELFYAML::dropUniqueSuffix(Name));
+ SHeader.sh_type = YAMLSec ? YAMLSec->Type : ELF::SHT_PROGBITS;
+ SHeader.sh_addralign = YAMLSec ? (uint64_t)YAMLSec->AddressAlign : 1;
+ SHeader.sh_offset = alignToOffset(CBA, SHeader.sh_addralign,
+ YAMLSec ? YAMLSec->Offset : None);
+
+ ELFYAML::RawContentSection *RawSec =
+ dyn_cast_or_null<ELFYAML::RawContentSection>(YAMLSec);
+ if (Doc.DWARF && shouldEmitDWARF(*Doc.DWARF, Name)) {
+ if (RawSec && (RawSec->Content || RawSec->Size))
+ reportError("cannot specify section '" + Name +
+ "' contents in the 'DWARF' entry and the 'Content' "
+ "or 'Size' in the 'Sections' entry at the same time");
+ else {
+ if (Expected<uint64_t> ShSizeOrErr =
+ emitDWARF<ELFT>(SHeader, Name, *Doc.DWARF, CBA))
+ SHeader.sh_size = *ShSizeOrErr;
+ else
+ reportError(ShSizeOrErr.takeError());
+ }
+ } else if (RawSec)
+ SHeader.sh_size = writeContent(CBA, RawSec->Content, RawSec->Size);
+ else
+ llvm_unreachable("debug sections can only be initialized via the 'DWARF' "
+ "entry or a RawContentSection");
+
+ if (RawSec && RawSec->Info)
+ SHeader.sh_info = *RawSec->Info;
+
+ if (YAMLSec && YAMLSec->Flags)
+ SHeader.sh_flags = *YAMLSec->Flags;
+ else if (Name == ".debug_str")
+ SHeader.sh_flags = ELF::SHF_MERGE | ELF::SHF_STRINGS;
+
+ assignSectionAddress(SHeader, YAMLSec);
+}
+
+template <class ELFT> void ELFState<ELFT>::reportError(const Twine &Msg) {
+ ErrHandler(Msg);
+ HasError = true;
+}
+
+template <class ELFT> void ELFState<ELFT>::reportError(Error Err) {
+ handleAllErrors(std::move(Err), [&](const ErrorInfoBase &Err) {
+ reportError(Err.message());
+ });
+}
+
+template <class ELFT>
+std::vector<Fragment>
+ELFState<ELFT>::getPhdrFragments(const ELFYAML::ProgramHeader &Phdr,
+ ArrayRef<Elf_Shdr> SHeaders) {
+ std::vector<Fragment> Ret;
+ for (const ELFYAML::Chunk *C : Phdr.Chunks) {
+ if (const ELFYAML::Fill *F = dyn_cast<ELFYAML::Fill>(C)) {
+ Ret.push_back({*F->Offset, F->Size, llvm::ELF::SHT_PROGBITS,
+ /*ShAddrAlign=*/1});
+ continue;
+ }
+
+ const ELFYAML::Section *S = cast<ELFYAML::Section>(C);
+ const Elf_Shdr &H = SHeaders[SN2I.get(S->Name)];
+ Ret.push_back({H.sh_offset, H.sh_size, H.sh_type, H.sh_addralign});
+ }
+ return Ret;
+}
+
+template <class ELFT>
+void ELFState<ELFT>::setProgramHeaderLayout(std::vector<Elf_Phdr> &PHeaders,
+ std::vector<Elf_Shdr> &SHeaders) {
+ uint32_t PhdrIdx = 0;
+ for (auto &YamlPhdr : Doc.ProgramHeaders) {
+ Elf_Phdr &PHeader = PHeaders[PhdrIdx++];
+ std::vector<Fragment> Fragments = getPhdrFragments(YamlPhdr, SHeaders);
+ if (!llvm::is_sorted(Fragments, [](const Fragment &A, const Fragment &B) {
+ return A.Offset < B.Offset;
+ }))
+ reportError("sections in the program header with index " +
+ Twine(PhdrIdx) + " are not sorted by their file offset");
+
+ if (YamlPhdr.Offset) {
+ if (!Fragments.empty() && *YamlPhdr.Offset > Fragments.front().Offset)
+ reportError("'Offset' for segment with index " + Twine(PhdrIdx) +
+ " must be less than or equal to the minimum file offset of "
+ "all included sections (0x" +
+ Twine::utohexstr(Fragments.front().Offset) + ")");
+ PHeader.p_offset = *YamlPhdr.Offset;
+ } else if (!Fragments.empty()) {
+ PHeader.p_offset = Fragments.front().Offset;
+ }
+
+ // Set the file size if not set explicitly.
+ if (YamlPhdr.FileSize) {
+ PHeader.p_filesz = *YamlPhdr.FileSize;
+ } else if (!Fragments.empty()) {
+ uint64_t FileSize = Fragments.back().Offset - PHeader.p_offset;
+ // SHT_NOBITS sections occupy no physical space in a file, we should not
+ // take their sizes into account when calculating the file size of a
+ // segment.
+ if (Fragments.back().Type != llvm::ELF::SHT_NOBITS)
+ FileSize += Fragments.back().Size;
+ PHeader.p_filesz = FileSize;
+ }
+
+ // Find the maximum offset of the end of a section in order to set p_memsz.
+ uint64_t MemOffset = PHeader.p_offset;
+ for (const Fragment &F : Fragments)
+ MemOffset = std::max(MemOffset, F.Offset + F.Size);
+ // Set the memory size if not set explicitly.
+ PHeader.p_memsz = YamlPhdr.MemSize ? uint64_t(*YamlPhdr.MemSize)
+ : MemOffset - PHeader.p_offset;
+
+ if (YamlPhdr.Align) {
+ PHeader.p_align = *YamlPhdr.Align;
+ } else {
+ // Set the alignment of the segment to be the maximum alignment of the
+ // sections so that by default the segment has a valid and sensible
+ // alignment.
+ PHeader.p_align = 1;
+ for (const Fragment &F : Fragments)
+ PHeader.p_align = std::max((uint64_t)PHeader.p_align, F.AddrAlign);
+ }
+ }
+}
+
+bool llvm::ELFYAML::shouldAllocateFileSpace(
+ ArrayRef<ELFYAML::ProgramHeader> Phdrs, const ELFYAML::NoBitsSection &S) {
+ for (const ELFYAML::ProgramHeader &PH : Phdrs) {
+ auto It = llvm::find_if(
+ PH.Chunks, [&](ELFYAML::Chunk *C) { return C->Name == S.Name; });
+ if (std::any_of(It, PH.Chunks.end(), [](ELFYAML::Chunk *C) {
+ return (isa<ELFYAML::Fill>(C) ||
+ cast<ELFYAML::Section>(C)->Type != ELF::SHT_NOBITS);
+ }))
+ return true;
+ }
+ return false;
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::NoBitsSection &S,
+ ContiguousBlobAccumulator &CBA) {
+ if (!S.Size)
+ return;
+
+ SHeader.sh_size = *S.Size;
+
+ // When a nobits section is followed by a non-nobits section or fill
+ // in the same segment, we allocate the file space for it. This behavior
+ // matches linkers.
+ if (shouldAllocateFileSpace(Doc.ProgramHeaders, S))
+ CBA.writeZeros(*S.Size);
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(
+ Elf_Shdr &SHeader, const ELFYAML::RawContentSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (Section.Info)
+ SHeader.sh_info = *Section.Info;
+}
+
+static bool isMips64EL(const ELFYAML::Object &Obj) {
+ return Obj.getMachine() == llvm::ELF::EM_MIPS &&
+ Obj.Header.Class == ELFYAML::ELF_ELFCLASS(ELF::ELFCLASS64) &&
+ Obj.Header.Data == ELFYAML::ELF_ELFDATA(ELF::ELFDATA2LSB);
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(
+ Elf_Shdr &SHeader, const ELFYAML::RelocationSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ assert((Section.Type == llvm::ELF::SHT_REL ||
+ Section.Type == llvm::ELF::SHT_RELA) &&
+ "Section type is not SHT_REL nor SHT_RELA");
+
+ if (!Section.RelocatableSec.empty())
+ SHeader.sh_info = toSectionIndex(Section.RelocatableSec, Section.Name);
+
+ if (!Section.Relocations)
+ return;
+
+ const bool IsRela = Section.Type == llvm::ELF::SHT_RELA;
+ for (const ELFYAML::Relocation &Rel : *Section.Relocations) {
+ const bool IsDynamic = Section.Link && (*Section.Link == ".dynsym");
+ unsigned SymIdx =
+ Rel.Symbol ? toSymbolIndex(*Rel.Symbol, Section.Name, IsDynamic) : 0;
+ if (IsRela) {
+ Elf_Rela REntry;
+ zero(REntry);
+ REntry.r_offset = Rel.Offset;
+ REntry.r_addend = Rel.Addend;
+ REntry.setSymbolAndType(SymIdx, Rel.Type, isMips64EL(Doc));
+ CBA.write((const char *)&REntry, sizeof(REntry));
+ } else {
+ Elf_Rel REntry;
+ zero(REntry);
+ REntry.r_offset = Rel.Offset;
+ REntry.setSymbolAndType(SymIdx, Rel.Type, isMips64EL(Doc));
+ CBA.write((const char *)&REntry, sizeof(REntry));
+ }
+ }
+
+ SHeader.sh_size = (IsRela ? sizeof(Elf_Rela) : sizeof(Elf_Rel)) *
+ Section.Relocations->size();
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::RelrSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.Entries)
+ return;
+
+ for (llvm::yaml::Hex64 E : *Section.Entries) {
+ if (!ELFT::Is64Bits && E > UINT32_MAX)
+ reportError(Section.Name + ": the value is too large for 32-bits: 0x" +
+ Twine::utohexstr(E));
+ CBA.write<uintX_t>(E, ELFT::TargetEndianness);
+ }
+
+ SHeader.sh_size = sizeof(uintX_t) * Section.Entries->size();
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(
+ Elf_Shdr &SHeader, const ELFYAML::SymtabShndxSection &Shndx,
+ ContiguousBlobAccumulator &CBA) {
+ if (Shndx.Content || Shndx.Size) {
+ SHeader.sh_size = writeContent(CBA, Shndx.Content, Shndx.Size);
+ return;
+ }
+
+ if (!Shndx.Entries)
+ return;
+
+ for (uint32_t E : *Shndx.Entries)
+ CBA.write<uint32_t>(E, ELFT::TargetEndianness);
+ SHeader.sh_size = Shndx.Entries->size() * SHeader.sh_entsize;
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::GroupSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ assert(Section.Type == llvm::ELF::SHT_GROUP &&
+ "Section type is not SHT_GROUP");
+
+ if (Section.Signature)
+ SHeader.sh_info =
+ toSymbolIndex(*Section.Signature, Section.Name, /*IsDynamic=*/false);
+
+ if (!Section.Members)
+ return;
+
+ for (const ELFYAML::SectionOrType &Member : *Section.Members) {
+ unsigned int SectionIndex = 0;
+ if (Member.sectionNameOrType == "GRP_COMDAT")
+ SectionIndex = llvm::ELF::GRP_COMDAT;
+ else
+ SectionIndex = toSectionIndex(Member.sectionNameOrType, Section.Name);
+ CBA.write<uint32_t>(SectionIndex, ELFT::TargetEndianness);
+ }
+ SHeader.sh_size = SHeader.sh_entsize * Section.Members->size();
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::SymverSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.Entries)
+ return;
+
+ for (uint16_t Version : *Section.Entries)
+ CBA.write<uint16_t>(Version, ELFT::TargetEndianness);
+ SHeader.sh_size = Section.Entries->size() * SHeader.sh_entsize;
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(
+ Elf_Shdr &SHeader, const ELFYAML::StackSizesSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.Entries)
+ return;
+
+ if (!Section.Entries)
+ return;
+
+ for (const ELFYAML::StackSizeEntry &E : *Section.Entries) {
+ CBA.write<uintX_t>(E.Address, ELFT::TargetEndianness);
+ SHeader.sh_size += sizeof(uintX_t) + CBA.writeULEB128(E.Size);
+ }
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(
+ Elf_Shdr &SHeader, const ELFYAML::BBAddrMapSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.Entries)
+ return;
+
+ for (const ELFYAML::BBAddrMapEntry &E : *Section.Entries) {
+ // Write the address of the function.
+ CBA.write<uintX_t>(E.Address, ELFT::TargetEndianness);
+ // Write number of BBEntries (number of basic blocks in the function).
+ size_t NumBlocks = E.BBEntries ? E.BBEntries->size() : 0;
+ SHeader.sh_size += sizeof(uintX_t) + CBA.writeULEB128(NumBlocks);
+ if (!NumBlocks)
+ continue;
+ // Write all BBEntries.
+ for (const ELFYAML::BBAddrMapEntry::BBEntry &BBE : *E.BBEntries)
+ SHeader.sh_size += CBA.writeULEB128(BBE.AddressOffset) +
+ CBA.writeULEB128(BBE.Size) +
+ CBA.writeULEB128(BBE.Metadata);
+ }
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(
+ Elf_Shdr &SHeader, const ELFYAML::LinkerOptionsSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.Options)
+ return;
+
+ for (const ELFYAML::LinkerOption &LO : *Section.Options) {
+ CBA.write(LO.Key.data(), LO.Key.size());
+ CBA.write('\0');
+ CBA.write(LO.Value.data(), LO.Value.size());
+ CBA.write('\0');
+ SHeader.sh_size += (LO.Key.size() + LO.Value.size() + 2);
+ }
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(
+ Elf_Shdr &SHeader, const ELFYAML::DependentLibrariesSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.Libs)
+ return;
+
+ for (StringRef Lib : *Section.Libs) {
+ CBA.write(Lib.data(), Lib.size());
+ CBA.write('\0');
+ SHeader.sh_size += Lib.size() + 1;
+ }
+}
+
+template <class ELFT>
+uint64_t
+ELFState<ELFT>::alignToOffset(ContiguousBlobAccumulator &CBA, uint64_t Align,
+ llvm::Optional<llvm::yaml::Hex64> Offset) {
+ uint64_t CurrentOffset = CBA.getOffset();
+ uint64_t AlignedOffset;
+
+ if (Offset) {
+ if ((uint64_t)*Offset < CurrentOffset) {
+ reportError("the 'Offset' value (0x" +
+ Twine::utohexstr((uint64_t)*Offset) + ") goes backward");
+ return CurrentOffset;
+ }
+
+ // We ignore an alignment when an explicit offset has been requested.
+ AlignedOffset = *Offset;
+ } else {
+ AlignedOffset = alignTo(CurrentOffset, std::max(Align, (uint64_t)1));
+ }
+
+ CBA.writeZeros(AlignedOffset - CurrentOffset);
+ return AlignedOffset;
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(
+ Elf_Shdr &SHeader, const ELFYAML::CallGraphProfileSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.Entries)
+ return;
+
+ for (const ELFYAML::CallGraphEntry &E : *Section.Entries) {
+ unsigned From = toSymbolIndex(E.From, Section.Name, /*IsDynamic=*/false);
+ unsigned To = toSymbolIndex(E.To, Section.Name, /*IsDynamic=*/false);
+
+ CBA.write<uint32_t>(From, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(To, ELFT::TargetEndianness);
+ CBA.write<uint64_t>(E.Weight, ELFT::TargetEndianness);
+ SHeader.sh_size += 16;
+ }
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::HashSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.Bucket)
+ return;
+
+ if (!Section.Bucket)
+ return;
+
+ CBA.write<uint32_t>(
+ Section.NBucket.getValueOr(llvm::yaml::Hex64(Section.Bucket->size())),
+ ELFT::TargetEndianness);
+ CBA.write<uint32_t>(
+ Section.NChain.getValueOr(llvm::yaml::Hex64(Section.Chain->size())),
+ ELFT::TargetEndianness);
+
+ for (uint32_t Val : *Section.Bucket)
+ CBA.write<uint32_t>(Val, ELFT::TargetEndianness);
+ for (uint32_t Val : *Section.Chain)
+ CBA.write<uint32_t>(Val, ELFT::TargetEndianness);
+
+ SHeader.sh_size = (2 + Section.Bucket->size() + Section.Chain->size()) * 4;
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::VerdefSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+
+ if (Section.Info)
+ SHeader.sh_info = *Section.Info;
+ else if (Section.Entries)
+ SHeader.sh_info = Section.Entries->size();
+
+ if (!Section.Entries)
+ return;
+
+ uint64_t AuxCnt = 0;
+ for (size_t I = 0; I < Section.Entries->size(); ++I) {
+ const ELFYAML::VerdefEntry &E = (*Section.Entries)[I];
+
+ Elf_Verdef VerDef;
+ VerDef.vd_version = E.Version.getValueOr(1);
+ VerDef.vd_flags = E.Flags.getValueOr(0);
+ VerDef.vd_ndx = E.VersionNdx.getValueOr(0);
+ VerDef.vd_hash = E.Hash.getValueOr(0);
+ VerDef.vd_aux = sizeof(Elf_Verdef);
+ VerDef.vd_cnt = E.VerNames.size();
+ if (I == Section.Entries->size() - 1)
+ VerDef.vd_next = 0;
+ else
+ VerDef.vd_next =
+ sizeof(Elf_Verdef) + E.VerNames.size() * sizeof(Elf_Verdaux);
+ CBA.write((const char *)&VerDef, sizeof(Elf_Verdef));
+
+ for (size_t J = 0; J < E.VerNames.size(); ++J, ++AuxCnt) {
+ Elf_Verdaux VernAux;
+ VernAux.vda_name = DotDynstr.getOffset(E.VerNames[J]);
+ if (J == E.VerNames.size() - 1)
+ VernAux.vda_next = 0;
+ else
+ VernAux.vda_next = sizeof(Elf_Verdaux);
+ CBA.write((const char *)&VernAux, sizeof(Elf_Verdaux));
+ }
+ }
+
+ SHeader.sh_size = Section.Entries->size() * sizeof(Elf_Verdef) +
+ AuxCnt * sizeof(Elf_Verdaux);
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::VerneedSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (Section.Info)
+ SHeader.sh_info = *Section.Info;
+ else if (Section.VerneedV)
+ SHeader.sh_info = Section.VerneedV->size();
+
+ if (!Section.VerneedV)
+ return;
+
+ uint64_t AuxCnt = 0;
+ for (size_t I = 0; I < Section.VerneedV->size(); ++I) {
+ const ELFYAML::VerneedEntry &VE = (*Section.VerneedV)[I];
+
+ Elf_Verneed VerNeed;
+ VerNeed.vn_version = VE.Version;
+ VerNeed.vn_file = DotDynstr.getOffset(VE.File);
+ if (I == Section.VerneedV->size() - 1)
+ VerNeed.vn_next = 0;
+ else
+ VerNeed.vn_next =
+ sizeof(Elf_Verneed) + VE.AuxV.size() * sizeof(Elf_Vernaux);
+ VerNeed.vn_cnt = VE.AuxV.size();
+ VerNeed.vn_aux = sizeof(Elf_Verneed);
+ CBA.write((const char *)&VerNeed, sizeof(Elf_Verneed));
+
+ for (size_t J = 0; J < VE.AuxV.size(); ++J, ++AuxCnt) {
+ const ELFYAML::VernauxEntry &VAuxE = VE.AuxV[J];
+
+ Elf_Vernaux VernAux;
+ VernAux.vna_hash = VAuxE.Hash;
+ VernAux.vna_flags = VAuxE.Flags;
+ VernAux.vna_other = VAuxE.Other;
+ VernAux.vna_name = DotDynstr.getOffset(VAuxE.Name);
+ if (J == VE.AuxV.size() - 1)
+ VernAux.vna_next = 0;
+ else
+ VernAux.vna_next = sizeof(Elf_Vernaux);
+ CBA.write((const char *)&VernAux, sizeof(Elf_Vernaux));
+ }
+ }
+
+ SHeader.sh_size = Section.VerneedV->size() * sizeof(Elf_Verneed) +
+ AuxCnt * sizeof(Elf_Vernaux);
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(
+ Elf_Shdr &SHeader, const ELFYAML::ARMIndexTableSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.Entries)
+ return;
+
+ for (const ELFYAML::ARMIndexTableEntry &E : *Section.Entries) {
+ CBA.write<uint32_t>(E.Offset, ELFT::TargetEndianness);
+ CBA.write<uint32_t>(E.Value, ELFT::TargetEndianness);
+ }
+ SHeader.sh_size = Section.Entries->size() * 8;
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::MipsABIFlags &Section,
+ ContiguousBlobAccumulator &CBA) {
+ assert(Section.Type == llvm::ELF::SHT_MIPS_ABIFLAGS &&
+ "Section type is not SHT_MIPS_ABIFLAGS");
+
+ object::Elf_Mips_ABIFlags<ELFT> Flags;
+ zero(Flags);
+ SHeader.sh_size = SHeader.sh_entsize;
+
+ Flags.version = Section.Version;
+ Flags.isa_level = Section.ISALevel;
+ Flags.isa_rev = Section.ISARevision;
+ Flags.gpr_size = Section.GPRSize;
+ Flags.cpr1_size = Section.CPR1Size;
+ Flags.cpr2_size = Section.CPR2Size;
+ Flags.fp_abi = Section.FpABI;
+ Flags.isa_ext = Section.ISAExtension;
+ Flags.ases = Section.ASEs;
+ Flags.flags1 = Section.Flags1;
+ Flags.flags2 = Section.Flags2;
+ CBA.write((const char *)&Flags, sizeof(Flags));
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::DynamicSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ assert(Section.Type == llvm::ELF::SHT_DYNAMIC &&
+ "Section type is not SHT_DYNAMIC");
+
+ if (!Section.Entries)
+ return;
+
+ for (const ELFYAML::DynamicEntry &DE : *Section.Entries) {
+ CBA.write<uintX_t>(DE.Tag, ELFT::TargetEndianness);
+ CBA.write<uintX_t>(DE.Val, ELFT::TargetEndianness);
+ }
+ SHeader.sh_size = 2 * sizeof(uintX_t) * Section.Entries->size();
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::AddrsigSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.Symbols)
+ return;
+
+ if (!Section.Symbols)
+ return;
+
+ for (StringRef Sym : *Section.Symbols)
+ SHeader.sh_size +=
+ CBA.writeULEB128(toSymbolIndex(Sym, Section.Name, /*IsDynamic=*/false));
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::NoteSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.Notes)
+ return;
+
+ uint64_t Offset = CBA.tell();
+ for (const ELFYAML::NoteEntry &NE : *Section.Notes) {
+ // Write name size.
+ if (NE.Name.empty())
+ CBA.write<uint32_t>(0, ELFT::TargetEndianness);
+ else
+ CBA.write<uint32_t>(NE.Name.size() + 1, ELFT::TargetEndianness);
+
+ // Write description size.
+ if (NE.Desc.binary_size() == 0)
+ CBA.write<uint32_t>(0, ELFT::TargetEndianness);
+ else
+ CBA.write<uint32_t>(NE.Desc.binary_size(), ELFT::TargetEndianness);
+
+ // Write type.
+ CBA.write<uint32_t>(NE.Type, ELFT::TargetEndianness);
+
+ // Write name, null terminator and padding.
+ if (!NE.Name.empty()) {
+ CBA.write(NE.Name.data(), NE.Name.size());
+ CBA.write('\0');
+ CBA.padToAlignment(4);
+ }
+
+ // Write description and padding.
+ if (NE.Desc.binary_size() != 0) {
+ CBA.writeAsBinary(NE.Desc);
+ CBA.padToAlignment(4);
+ }
+ }
+
+ SHeader.sh_size = CBA.tell() - Offset;
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeSectionContent(Elf_Shdr &SHeader,
+ const ELFYAML::GnuHashSection &Section,
+ ContiguousBlobAccumulator &CBA) {
+ if (!Section.HashBuckets)
+ return;
+
+ if (!Section.Header)
+ return;
+
+ // We write the header first, starting with the hash buckets count. Normally
+ // it is the number of entries in HashBuckets, but the "NBuckets" property can
+ // be used to override this field, which is useful for producing broken
+ // objects.
+ if (Section.Header->NBuckets)
+ CBA.write<uint32_t>(*Section.Header->NBuckets, ELFT::TargetEndianness);
+ else
+ CBA.write<uint32_t>(Section.HashBuckets->size(), ELFT::TargetEndianness);
+
+ // Write the index of the first symbol in the dynamic symbol table accessible
+ // via the hash table.
+ CBA.write<uint32_t>(Section.Header->SymNdx, ELFT::TargetEndianness);
+
+ // Write the number of words in the Bloom filter. As above, the "MaskWords"
+ // property can be used to set this field to any value.
+ if (Section.Header->MaskWords)
+ CBA.write<uint32_t>(*Section.Header->MaskWords, ELFT::TargetEndianness);
+ else
+ CBA.write<uint32_t>(Section.BloomFilter->size(), ELFT::TargetEndianness);
+
+ // Write the shift constant used by the Bloom filter.
+ CBA.write<uint32_t>(Section.Header->Shift2, ELFT::TargetEndianness);
+
+ // We've finished writing the header. Now write the Bloom filter.
+ for (llvm::yaml::Hex64 Val : *Section.BloomFilter)
+ CBA.write<uintX_t>(Val, ELFT::TargetEndianness);
+
+ // Write an array of hash buckets.
+ for (llvm::yaml::Hex32 Val : *Section.HashBuckets)
+ CBA.write<uint32_t>(Val, ELFT::TargetEndianness);
+
+ // Write an array of hash values.
+ for (llvm::yaml::Hex32 Val : *Section.HashValues)
+ CBA.write<uint32_t>(Val, ELFT::TargetEndianness);
+
+ SHeader.sh_size = 16 /*Header size*/ +
+ Section.BloomFilter->size() * sizeof(typename ELFT::uint) +
+ Section.HashBuckets->size() * 4 +
+ Section.HashValues->size() * 4;
+}
+
+template <class ELFT>
+void ELFState<ELFT>::writeFill(ELFYAML::Fill &Fill,
+ ContiguousBlobAccumulator &CBA) {
+ size_t PatternSize = Fill.Pattern ? Fill.Pattern->binary_size() : 0;
+ if (!PatternSize) {
+ CBA.writeZeros(Fill.Size);
+ return;
+ }
+
+ // Fill the content with the specified pattern.
+ uint64_t Written = 0;
+ for (; Written + PatternSize <= Fill.Size; Written += PatternSize)
+ CBA.writeAsBinary(*Fill.Pattern);
+ CBA.writeAsBinary(*Fill.Pattern, Fill.Size - Written);
+}
+
+template <class ELFT>
+DenseMap<StringRef, size_t> ELFState<ELFT>::buildSectionHeaderReorderMap() {
+ const ELFYAML::SectionHeaderTable &SectionHeaders =
+ Doc.getSectionHeaderTable();
+ if (SectionHeaders.IsImplicit || SectionHeaders.NoHeaders)
+ return DenseMap<StringRef, size_t>();
+
+ DenseMap<StringRef, size_t> Ret;
+ size_t SecNdx = 0;
+ StringSet<> Seen;
+
+ auto AddSection = [&](const ELFYAML::SectionHeader &Hdr) {
+ if (!Ret.try_emplace(Hdr.Name, ++SecNdx).second)
+ reportError("repeated section name: '" + Hdr.Name +
+ "' in the section header description");
+ Seen.insert(Hdr.Name);
+ };
+
+ if (SectionHeaders.Sections)
+ for (const ELFYAML::SectionHeader &Hdr : *SectionHeaders.Sections)
+ AddSection(Hdr);
+
+ if (SectionHeaders.Excluded)
+ for (const ELFYAML::SectionHeader &Hdr : *SectionHeaders.Excluded)
+ AddSection(Hdr);
+
+ for (const ELFYAML::Section *S : Doc.getSections()) {
+ // Ignore special first SHT_NULL section.
+ if (S == Doc.getSections().front())
+ continue;
+ if (!Seen.count(S->Name))
+ reportError("section '" + S->Name +
+ "' should be present in the 'Sections' or 'Excluded' lists");
+ Seen.erase(S->Name);
+ }
+
+ for (const auto &It : Seen)
+ reportError("section header contains undefined section '" + It.getKey() +
+ "'");
+ return Ret;
+}
+
+template <class ELFT> void ELFState<ELFT>::buildSectionIndex() {
+ // A YAML description can have an explicit section header declaration that
+ // allows to change the order of section headers.
+ DenseMap<StringRef, size_t> ReorderMap = buildSectionHeaderReorderMap();
+
+ if (HasError)
+ return;
+
+ // Build excluded section headers map.
+ std::vector<ELFYAML::Section *> Sections = Doc.getSections();
+ const ELFYAML::SectionHeaderTable &SectionHeaders =
+ Doc.getSectionHeaderTable();
+ if (SectionHeaders.Excluded)
+ for (const ELFYAML::SectionHeader &Hdr : *SectionHeaders.Excluded)
+ if (!ExcludedSectionHeaders.insert(Hdr.Name).second)
+ llvm_unreachable("buildSectionIndex() failed");
+
+ if (SectionHeaders.NoHeaders.getValueOr(false))
+ for (const ELFYAML::Section *S : Sections)
+ if (!ExcludedSectionHeaders.insert(S->Name).second)
+ llvm_unreachable("buildSectionIndex() failed");
+
+ size_t SecNdx = -1;
+ for (const ELFYAML::Section *S : Sections) {
+ ++SecNdx;
+
+ size_t Index = ReorderMap.empty() ? SecNdx : ReorderMap.lookup(S->Name);
+ if (!SN2I.addName(S->Name, Index))
+ llvm_unreachable("buildSectionIndex() failed");
+
+ if (!ExcludedSectionHeaders.count(S->Name))
+ DotShStrtab.add(ELFYAML::dropUniqueSuffix(S->Name));
+ }
+
+ DotShStrtab.finalize();
+}
+
+template <class ELFT> void ELFState<ELFT>::buildSymbolIndexes() {
+ auto Build = [this](ArrayRef<ELFYAML::Symbol> V, NameToIdxMap &Map) {
+ for (size_t I = 0, S = V.size(); I < S; ++I) {
+ const ELFYAML::Symbol &Sym = V[I];
+ if (!Sym.Name.empty() && !Map.addName(Sym.Name, I + 1))
+ reportError("repeated symbol name: '" + Sym.Name + "'");
+ }
+ };
+
+ if (Doc.Symbols)
+ Build(*Doc.Symbols, SymN2I);
+ if (Doc.DynamicSymbols)
+ Build(*Doc.DynamicSymbols, DynSymN2I);
+}
+
+template <class ELFT> void ELFState<ELFT>::finalizeStrings() {
+ // Add the regular symbol names to .strtab section.
+ if (Doc.Symbols)
+ for (const ELFYAML::Symbol &Sym : *Doc.Symbols)
+ DotStrtab.add(ELFYAML::dropUniqueSuffix(Sym.Name));
+ DotStrtab.finalize();
+
+ // Add the dynamic symbol names to .dynstr section.
+ if (Doc.DynamicSymbols)
+ for (const ELFYAML::Symbol &Sym : *Doc.DynamicSymbols)
+ DotDynstr.add(ELFYAML::dropUniqueSuffix(Sym.Name));
+
+ // SHT_GNU_verdef and SHT_GNU_verneed sections might also
+ // add strings to .dynstr section.
+ for (const ELFYAML::Chunk *Sec : Doc.getSections()) {
+ if (auto VerNeed = dyn_cast<ELFYAML::VerneedSection>(Sec)) {
+ if (VerNeed->VerneedV) {
+ for (const ELFYAML::VerneedEntry &VE : *VerNeed->VerneedV) {
+ DotDynstr.add(VE.File);
+ for (const ELFYAML::VernauxEntry &Aux : VE.AuxV)
+ DotDynstr.add(Aux.Name);
+ }
+ }
+ } else if (auto VerDef = dyn_cast<ELFYAML::VerdefSection>(Sec)) {
+ if (VerDef->Entries)
+ for (const ELFYAML::VerdefEntry &E : *VerDef->Entries)
+ for (StringRef Name : E.VerNames)
+ DotDynstr.add(Name);
+ }
+ }
+
+ DotDynstr.finalize();
+}
+
+template <class ELFT>
+bool ELFState<ELFT>::writeELF(raw_ostream &OS, ELFYAML::Object &Doc,
+ yaml::ErrorHandler EH, uint64_t MaxSize) {
+ ELFState<ELFT> State(Doc, EH);
+ if (State.HasError)
+ return false;
+
+ // Finalize .strtab and .dynstr sections. We do that early because want to
+ // finalize the string table builders before writing the content of the
+ // sections that might want to use them.
+ State.finalizeStrings();
+
+ State.buildSectionIndex();
+ State.buildSymbolIndexes();
+
+ if (State.HasError)
+ return false;
+
+ std::vector<Elf_Phdr> PHeaders;
+ State.initProgramHeaders(PHeaders);
+
+ // XXX: This offset is tightly coupled with the order that we write
+ // things to `OS`.
+ const size_t SectionContentBeginOffset =
+ sizeof(Elf_Ehdr) + sizeof(Elf_Phdr) * Doc.ProgramHeaders.size();
+ // It is quite easy to accidentally create output with yaml2obj that is larger
+ // than intended, for example, due to an issue in the YAML description.
+ // We limit the maximum allowed output size, but also provide a command line
+ // option to change this limitation.
+ ContiguousBlobAccumulator CBA(SectionContentBeginOffset, MaxSize);
+
+ std::vector<Elf_Shdr> SHeaders;
+ State.initSectionHeaders(SHeaders, CBA);
+
+ // Now we can decide segment offsets.
+ State.setProgramHeaderLayout(PHeaders, SHeaders);
+
+ bool ReachedLimit = CBA.getOffset() > MaxSize;
+ if (Error E = CBA.takeLimitError()) {
+ // We report a custom error message instead below.
+ consumeError(std::move(E));
+ ReachedLimit = true;
+ }
+
+ if (ReachedLimit)
+ State.reportError(
+ "the desired output size is greater than permitted. Use the "
+ "--max-size option to change the limit");
+
+ if (State.HasError)
+ return false;
+
+ State.writeELFHeader(OS);
+ writeArrayData(OS, makeArrayRef(PHeaders));
+
+ const ELFYAML::SectionHeaderTable &SHT = Doc.getSectionHeaderTable();
+ if (!SHT.NoHeaders.getValueOr(false))
+ CBA.updateDataAt(*SHT.Offset, SHeaders.data(),
+ SHT.getNumHeaders(SHeaders.size()) * sizeof(Elf_Shdr));
+
+ CBA.writeBlobToStream(OS);
+ return true;
+}
+
+namespace llvm {
+namespace yaml {
+
+bool yaml2elf(llvm::ELFYAML::Object &Doc, raw_ostream &Out, ErrorHandler EH,
+ uint64_t MaxSize) {
+ bool IsLE = Doc.Header.Data == ELFYAML::ELF_ELFDATA(ELF::ELFDATA2LSB);
+ bool Is64Bit = Doc.Header.Class == ELFYAML::ELF_ELFCLASS(ELF::ELFCLASS64);
+ if (Is64Bit) {
+ if (IsLE)
+ return ELFState<object::ELF64LE>::writeELF(Out, Doc, EH, MaxSize);
+ return ELFState<object::ELF64BE>::writeELF(Out, Doc, EH, MaxSize);
+ }
+ if (IsLE)
+ return ELFState<object::ELF32LE>::writeELF(Out, Doc, EH, MaxSize);
+ return ELFState<object::ELF32BE>::writeELF(Out, Doc, EH, MaxSize);
+}
+
+} // namespace yaml
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/ELFYAML.cpp b/contrib/libs/llvm12/lib/ObjectYAML/ELFYAML.cpp
new file mode 100644
index 00000000000..05d30577812
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/ELFYAML.cpp
@@ -0,0 +1,1719 @@
+//===- ELFYAML.cpp - ELF YAMLIO implementation ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of ELF.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/ELFYAML.h"
+#include "llvm/ADT/MapVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/Support/ARMEHABI.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MipsABIFlags.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/WithColor.h"
+#include <cassert>
+#include <cstdint>
+
+namespace llvm {
+
+ELFYAML::Chunk::~Chunk() = default;
+
+namespace ELFYAML {
+unsigned Object::getMachine() const {
+ if (Header.Machine)
+ return *Header.Machine;
+ return llvm::ELF::EM_NONE;
+}
+
+constexpr StringRef SectionHeaderTable::TypeStr;
+} // namespace ELFYAML
+
+namespace yaml {
+
+void ScalarEnumerationTraits<ELFYAML::ELF_ET>::enumeration(
+ IO &IO, ELFYAML::ELF_ET &Value) {
+#define ECase(X) IO.enumCase(Value, #X, ELF::X)
+ ECase(ET_NONE);
+ ECase(ET_REL);
+ ECase(ET_EXEC);
+ ECase(ET_DYN);
+ ECase(ET_CORE);
+#undef ECase
+ IO.enumFallback<Hex16>(Value);
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_PT>::enumeration(
+ IO &IO, ELFYAML::ELF_PT &Value) {
+#define ECase(X) IO.enumCase(Value, #X, ELF::X)
+ ECase(PT_NULL);
+ ECase(PT_LOAD);
+ ECase(PT_DYNAMIC);
+ ECase(PT_INTERP);
+ ECase(PT_NOTE);
+ ECase(PT_SHLIB);
+ ECase(PT_PHDR);
+ ECase(PT_TLS);
+ ECase(PT_GNU_EH_FRAME);
+ ECase(PT_GNU_STACK);
+ ECase(PT_GNU_RELRO);
+ ECase(PT_GNU_PROPERTY);
+#undef ECase
+ IO.enumFallback<Hex32>(Value);
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_EM>::enumeration(
+ IO &IO, ELFYAML::ELF_EM &Value) {
+#define ECase(X) IO.enumCase(Value, #X, ELF::X)
+ ECase(EM_NONE);
+ ECase(EM_M32);
+ ECase(EM_SPARC);
+ ECase(EM_386);
+ ECase(EM_68K);
+ ECase(EM_88K);
+ ECase(EM_IAMCU);
+ ECase(EM_860);
+ ECase(EM_MIPS);
+ ECase(EM_S370);
+ ECase(EM_MIPS_RS3_LE);
+ ECase(EM_PARISC);
+ ECase(EM_VPP500);
+ ECase(EM_SPARC32PLUS);
+ ECase(EM_960);
+ ECase(EM_PPC);
+ ECase(EM_PPC64);
+ ECase(EM_S390);
+ ECase(EM_SPU);
+ ECase(EM_V800);
+ ECase(EM_FR20);
+ ECase(EM_RH32);
+ ECase(EM_RCE);
+ ECase(EM_ARM);
+ ECase(EM_ALPHA);
+ ECase(EM_SH);
+ ECase(EM_SPARCV9);
+ ECase(EM_TRICORE);
+ ECase(EM_ARC);
+ ECase(EM_H8_300);
+ ECase(EM_H8_300H);
+ ECase(EM_H8S);
+ ECase(EM_H8_500);
+ ECase(EM_IA_64);
+ ECase(EM_MIPS_X);
+ ECase(EM_COLDFIRE);
+ ECase(EM_68HC12);
+ ECase(EM_MMA);
+ ECase(EM_PCP);
+ ECase(EM_NCPU);
+ ECase(EM_NDR1);
+ ECase(EM_STARCORE);
+ ECase(EM_ME16);
+ ECase(EM_ST100);
+ ECase(EM_TINYJ);
+ ECase(EM_X86_64);
+ ECase(EM_PDSP);
+ ECase(EM_PDP10);
+ ECase(EM_PDP11);
+ ECase(EM_FX66);
+ ECase(EM_ST9PLUS);
+ ECase(EM_ST7);
+ ECase(EM_68HC16);
+ ECase(EM_68HC11);
+ ECase(EM_68HC08);
+ ECase(EM_68HC05);
+ ECase(EM_SVX);
+ ECase(EM_ST19);
+ ECase(EM_VAX);
+ ECase(EM_CRIS);
+ ECase(EM_JAVELIN);
+ ECase(EM_FIREPATH);
+ ECase(EM_ZSP);
+ ECase(EM_MMIX);
+ ECase(EM_HUANY);
+ ECase(EM_PRISM);
+ ECase(EM_AVR);
+ ECase(EM_FR30);
+ ECase(EM_D10V);
+ ECase(EM_D30V);
+ ECase(EM_V850);
+ ECase(EM_M32R);
+ ECase(EM_MN10300);
+ ECase(EM_MN10200);
+ ECase(EM_PJ);
+ ECase(EM_OPENRISC);
+ ECase(EM_ARC_COMPACT);
+ ECase(EM_XTENSA);
+ ECase(EM_VIDEOCORE);
+ ECase(EM_TMM_GPP);
+ ECase(EM_NS32K);
+ ECase(EM_TPC);
+ ECase(EM_SNP1K);
+ ECase(EM_ST200);
+ ECase(EM_IP2K);
+ ECase(EM_MAX);
+ ECase(EM_CR);
+ ECase(EM_F2MC16);
+ ECase(EM_MSP430);
+ ECase(EM_BLACKFIN);
+ ECase(EM_SE_C33);
+ ECase(EM_SEP);
+ ECase(EM_ARCA);
+ ECase(EM_UNICORE);
+ ECase(EM_EXCESS);
+ ECase(EM_DXP);
+ ECase(EM_ALTERA_NIOS2);
+ ECase(EM_CRX);
+ ECase(EM_XGATE);
+ ECase(EM_C166);
+ ECase(EM_M16C);
+ ECase(EM_DSPIC30F);
+ ECase(EM_CE);
+ ECase(EM_M32C);
+ ECase(EM_TSK3000);
+ ECase(EM_RS08);
+ ECase(EM_SHARC);
+ ECase(EM_ECOG2);
+ ECase(EM_SCORE7);
+ ECase(EM_DSP24);
+ ECase(EM_VIDEOCORE3);
+ ECase(EM_LATTICEMICO32);
+ ECase(EM_SE_C17);
+ ECase(EM_TI_C6000);
+ ECase(EM_TI_C2000);
+ ECase(EM_TI_C5500);
+ ECase(EM_MMDSP_PLUS);
+ ECase(EM_CYPRESS_M8C);
+ ECase(EM_R32C);
+ ECase(EM_TRIMEDIA);
+ ECase(EM_HEXAGON);
+ ECase(EM_8051);
+ ECase(EM_STXP7X);
+ ECase(EM_NDS32);
+ ECase(EM_ECOG1);
+ ECase(EM_ECOG1X);
+ ECase(EM_MAXQ30);
+ ECase(EM_XIMO16);
+ ECase(EM_MANIK);
+ ECase(EM_CRAYNV2);
+ ECase(EM_RX);
+ ECase(EM_METAG);
+ ECase(EM_MCST_ELBRUS);
+ ECase(EM_ECOG16);
+ ECase(EM_CR16);
+ ECase(EM_ETPU);
+ ECase(EM_SLE9X);
+ ECase(EM_L10M);
+ ECase(EM_K10M);
+ ECase(EM_AARCH64);
+ ECase(EM_AVR32);
+ ECase(EM_STM8);
+ ECase(EM_TILE64);
+ ECase(EM_TILEPRO);
+ ECase(EM_CUDA);
+ ECase(EM_TILEGX);
+ ECase(EM_CLOUDSHIELD);
+ ECase(EM_COREA_1ST);
+ ECase(EM_COREA_2ND);
+ ECase(EM_ARC_COMPACT2);
+ ECase(EM_OPEN8);
+ ECase(EM_RL78);
+ ECase(EM_VIDEOCORE5);
+ ECase(EM_78KOR);
+ ECase(EM_56800EX);
+ ECase(EM_AMDGPU);
+ ECase(EM_RISCV);
+ ECase(EM_LANAI);
+ ECase(EM_BPF);
+ ECase(EM_VE);
+ ECase(EM_CSKY);
+#undef ECase
+ IO.enumFallback<Hex16>(Value);
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_ELFCLASS>::enumeration(
+ IO &IO, ELFYAML::ELF_ELFCLASS &Value) {
+#define ECase(X) IO.enumCase(Value, #X, ELF::X)
+ // Since the semantics of ELFCLASSNONE is "invalid", just don't accept it
+ // here.
+ ECase(ELFCLASS32);
+ ECase(ELFCLASS64);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_ELFDATA>::enumeration(
+ IO &IO, ELFYAML::ELF_ELFDATA &Value) {
+#define ECase(X) IO.enumCase(Value, #X, ELF::X)
+ // ELFDATANONE is an invalid data encoding, but we accept it because
+ // we want to be able to produce invalid binaries for the tests.
+ ECase(ELFDATANONE);
+ ECase(ELFDATA2LSB);
+ ECase(ELFDATA2MSB);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_ELFOSABI>::enumeration(
+ IO &IO, ELFYAML::ELF_ELFOSABI &Value) {
+#define ECase(X) IO.enumCase(Value, #X, ELF::X)
+ ECase(ELFOSABI_NONE);
+ ECase(ELFOSABI_HPUX);
+ ECase(ELFOSABI_NETBSD);
+ ECase(ELFOSABI_GNU);
+ ECase(ELFOSABI_LINUX);
+ ECase(ELFOSABI_HURD);
+ ECase(ELFOSABI_SOLARIS);
+ ECase(ELFOSABI_AIX);
+ ECase(ELFOSABI_IRIX);
+ ECase(ELFOSABI_FREEBSD);
+ ECase(ELFOSABI_TRU64);
+ ECase(ELFOSABI_MODESTO);
+ ECase(ELFOSABI_OPENBSD);
+ ECase(ELFOSABI_OPENVMS);
+ ECase(ELFOSABI_NSK);
+ ECase(ELFOSABI_AROS);
+ ECase(ELFOSABI_FENIXOS);
+ ECase(ELFOSABI_CLOUDABI);
+ ECase(ELFOSABI_AMDGPU_HSA);
+ ECase(ELFOSABI_AMDGPU_PAL);
+ ECase(ELFOSABI_AMDGPU_MESA3D);
+ ECase(ELFOSABI_ARM);
+ ECase(ELFOSABI_C6000_ELFABI);
+ ECase(ELFOSABI_C6000_LINUX);
+ ECase(ELFOSABI_STANDALONE);
+#undef ECase
+ IO.enumFallback<Hex8>(Value);
+}
+
+void ScalarBitSetTraits<ELFYAML::ELF_EF>::bitset(IO &IO,
+ ELFYAML::ELF_EF &Value) {
+ const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext());
+ assert(Object && "The IO context is not initialized");
+#define BCase(X) IO.bitSetCase(Value, #X, ELF::X)
+#define BCaseMask(X, M) IO.maskedBitSetCase(Value, #X, ELF::X, ELF::M)
+ switch (Object->getMachine()) {
+ case ELF::EM_ARM:
+ BCase(EF_ARM_SOFT_FLOAT);
+ BCase(EF_ARM_VFP_FLOAT);
+ BCaseMask(EF_ARM_EABI_UNKNOWN, EF_ARM_EABIMASK);
+ BCaseMask(EF_ARM_EABI_VER1, EF_ARM_EABIMASK);
+ BCaseMask(EF_ARM_EABI_VER2, EF_ARM_EABIMASK);
+ BCaseMask(EF_ARM_EABI_VER3, EF_ARM_EABIMASK);
+ BCaseMask(EF_ARM_EABI_VER4, EF_ARM_EABIMASK);
+ BCaseMask(EF_ARM_EABI_VER5, EF_ARM_EABIMASK);
+ break;
+ case ELF::EM_MIPS:
+ BCase(EF_MIPS_NOREORDER);
+ BCase(EF_MIPS_PIC);
+ BCase(EF_MIPS_CPIC);
+ BCase(EF_MIPS_ABI2);
+ BCase(EF_MIPS_32BITMODE);
+ BCase(EF_MIPS_FP64);
+ BCase(EF_MIPS_NAN2008);
+ BCase(EF_MIPS_MICROMIPS);
+ BCase(EF_MIPS_ARCH_ASE_M16);
+ BCase(EF_MIPS_ARCH_ASE_MDMX);
+ BCaseMask(EF_MIPS_ABI_O32, EF_MIPS_ABI);
+ BCaseMask(EF_MIPS_ABI_O64, EF_MIPS_ABI);
+ BCaseMask(EF_MIPS_ABI_EABI32, EF_MIPS_ABI);
+ BCaseMask(EF_MIPS_ABI_EABI64, EF_MIPS_ABI);
+ BCaseMask(EF_MIPS_MACH_3900, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_4010, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_4100, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_4650, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_4120, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_4111, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_SB1, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_OCTEON, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_XLR, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_OCTEON2, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_OCTEON3, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_5400, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_5900, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_5500, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_9000, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_LS2E, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_LS2F, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_MACH_LS3A, EF_MIPS_MACH);
+ BCaseMask(EF_MIPS_ARCH_1, EF_MIPS_ARCH);
+ BCaseMask(EF_MIPS_ARCH_2, EF_MIPS_ARCH);
+ BCaseMask(EF_MIPS_ARCH_3, EF_MIPS_ARCH);
+ BCaseMask(EF_MIPS_ARCH_4, EF_MIPS_ARCH);
+ BCaseMask(EF_MIPS_ARCH_5, EF_MIPS_ARCH);
+ BCaseMask(EF_MIPS_ARCH_32, EF_MIPS_ARCH);
+ BCaseMask(EF_MIPS_ARCH_64, EF_MIPS_ARCH);
+ BCaseMask(EF_MIPS_ARCH_32R2, EF_MIPS_ARCH);
+ BCaseMask(EF_MIPS_ARCH_64R2, EF_MIPS_ARCH);
+ BCaseMask(EF_MIPS_ARCH_32R6, EF_MIPS_ARCH);
+ BCaseMask(EF_MIPS_ARCH_64R6, EF_MIPS_ARCH);
+ break;
+ case ELF::EM_HEXAGON:
+ BCase(EF_HEXAGON_MACH_V2);
+ BCase(EF_HEXAGON_MACH_V3);
+ BCase(EF_HEXAGON_MACH_V4);
+ BCase(EF_HEXAGON_MACH_V5);
+ BCase(EF_HEXAGON_MACH_V55);
+ BCase(EF_HEXAGON_MACH_V60);
+ BCase(EF_HEXAGON_MACH_V62);
+ BCase(EF_HEXAGON_MACH_V65);
+ BCase(EF_HEXAGON_MACH_V66);
+ BCase(EF_HEXAGON_MACH_V67);
+ BCase(EF_HEXAGON_MACH_V67T);
+ BCase(EF_HEXAGON_ISA_V2);
+ BCase(EF_HEXAGON_ISA_V3);
+ BCase(EF_HEXAGON_ISA_V4);
+ BCase(EF_HEXAGON_ISA_V5);
+ BCase(EF_HEXAGON_ISA_V55);
+ BCase(EF_HEXAGON_ISA_V60);
+ BCase(EF_HEXAGON_ISA_V62);
+ BCase(EF_HEXAGON_ISA_V65);
+ BCase(EF_HEXAGON_ISA_V66);
+ BCase(EF_HEXAGON_ISA_V67);
+ break;
+ case ELF::EM_AVR:
+ BCase(EF_AVR_ARCH_AVR1);
+ BCase(EF_AVR_ARCH_AVR2);
+ BCase(EF_AVR_ARCH_AVR25);
+ BCase(EF_AVR_ARCH_AVR3);
+ BCase(EF_AVR_ARCH_AVR31);
+ BCase(EF_AVR_ARCH_AVR35);
+ BCase(EF_AVR_ARCH_AVR4);
+ BCase(EF_AVR_ARCH_AVR51);
+ BCase(EF_AVR_ARCH_AVR6);
+ BCase(EF_AVR_ARCH_AVRTINY);
+ BCase(EF_AVR_ARCH_XMEGA1);
+ BCase(EF_AVR_ARCH_XMEGA2);
+ BCase(EF_AVR_ARCH_XMEGA3);
+ BCase(EF_AVR_ARCH_XMEGA4);
+ BCase(EF_AVR_ARCH_XMEGA5);
+ BCase(EF_AVR_ARCH_XMEGA6);
+ BCase(EF_AVR_ARCH_XMEGA7);
+ break;
+ case ELF::EM_RISCV:
+ BCase(EF_RISCV_RVC);
+ BCaseMask(EF_RISCV_FLOAT_ABI_SOFT, EF_RISCV_FLOAT_ABI);
+ BCaseMask(EF_RISCV_FLOAT_ABI_SINGLE, EF_RISCV_FLOAT_ABI);
+ BCaseMask(EF_RISCV_FLOAT_ABI_DOUBLE, EF_RISCV_FLOAT_ABI);
+ BCaseMask(EF_RISCV_FLOAT_ABI_QUAD, EF_RISCV_FLOAT_ABI);
+ BCase(EF_RISCV_RVE);
+ break;
+ case ELF::EM_AMDGPU:
+ BCaseMask(EF_AMDGPU_MACH_NONE, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_R600, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_R630, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_RS880, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_RV670, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_RV710, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_RV730, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_RV770, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_CEDAR, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_CYPRESS, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_JUNIPER, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_REDWOOD, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_SUMO, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_BARTS, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_CAICOS, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_CAYMAN, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_R600_TURKS, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX600, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX601, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX602, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX700, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX701, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX702, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX703, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX704, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX705, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX801, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX802, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX803, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX805, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX810, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX900, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX902, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX904, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX906, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX908, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX909, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX90C, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX1010, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX1011, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX1012, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX1030, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX1031, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX1032, EF_AMDGPU_MACH);
+ BCaseMask(EF_AMDGPU_MACH_AMDGCN_GFX1033, EF_AMDGPU_MACH);
+ BCase(EF_AMDGPU_XNACK);
+ BCase(EF_AMDGPU_SRAM_ECC);
+ break;
+ default:
+ break;
+ }
+#undef BCase
+#undef BCaseMask
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_SHT>::enumeration(
+ IO &IO, ELFYAML::ELF_SHT &Value) {
+ const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext());
+ assert(Object && "The IO context is not initialized");
+#define ECase(X) IO.enumCase(Value, #X, ELF::X)
+ ECase(SHT_NULL);
+ ECase(SHT_PROGBITS);
+ ECase(SHT_SYMTAB);
+ // FIXME: Issue a diagnostic with this information.
+ ECase(SHT_STRTAB);
+ ECase(SHT_RELA);
+ ECase(SHT_HASH);
+ ECase(SHT_DYNAMIC);
+ ECase(SHT_NOTE);
+ ECase(SHT_NOBITS);
+ ECase(SHT_REL);
+ ECase(SHT_SHLIB);
+ ECase(SHT_DYNSYM);
+ ECase(SHT_INIT_ARRAY);
+ ECase(SHT_FINI_ARRAY);
+ ECase(SHT_PREINIT_ARRAY);
+ ECase(SHT_GROUP);
+ ECase(SHT_SYMTAB_SHNDX);
+ ECase(SHT_RELR);
+ ECase(SHT_ANDROID_REL);
+ ECase(SHT_ANDROID_RELA);
+ ECase(SHT_ANDROID_RELR);
+ ECase(SHT_LLVM_ODRTAB);
+ ECase(SHT_LLVM_LINKER_OPTIONS);
+ ECase(SHT_LLVM_CALL_GRAPH_PROFILE);
+ ECase(SHT_LLVM_ADDRSIG);
+ ECase(SHT_LLVM_DEPENDENT_LIBRARIES);
+ ECase(SHT_LLVM_SYMPART);
+ ECase(SHT_LLVM_PART_EHDR);
+ ECase(SHT_LLVM_PART_PHDR);
+ ECase(SHT_LLVM_BB_ADDR_MAP);
+ ECase(SHT_GNU_ATTRIBUTES);
+ ECase(SHT_GNU_HASH);
+ ECase(SHT_GNU_verdef);
+ ECase(SHT_GNU_verneed);
+ ECase(SHT_GNU_versym);
+ switch (Object->getMachine()) {
+ case ELF::EM_ARM:
+ ECase(SHT_ARM_EXIDX);
+ ECase(SHT_ARM_PREEMPTMAP);
+ ECase(SHT_ARM_ATTRIBUTES);
+ ECase(SHT_ARM_DEBUGOVERLAY);
+ ECase(SHT_ARM_OVERLAYSECTION);
+ break;
+ case ELF::EM_HEXAGON:
+ ECase(SHT_HEX_ORDERED);
+ break;
+ case ELF::EM_X86_64:
+ ECase(SHT_X86_64_UNWIND);
+ break;
+ case ELF::EM_MIPS:
+ ECase(SHT_MIPS_REGINFO);
+ ECase(SHT_MIPS_OPTIONS);
+ ECase(SHT_MIPS_DWARF);
+ ECase(SHT_MIPS_ABIFLAGS);
+ break;
+ case ELF::EM_RISCV:
+ ECase(SHT_RISCV_ATTRIBUTES);
+ break;
+ default:
+ // Nothing to do.
+ break;
+ }
+#undef ECase
+ IO.enumFallback<Hex32>(Value);
+}
+
+void ScalarBitSetTraits<ELFYAML::ELF_PF>::bitset(IO &IO,
+ ELFYAML::ELF_PF &Value) {
+#define BCase(X) IO.bitSetCase(Value, #X, ELF::X)
+ BCase(PF_X);
+ BCase(PF_W);
+ BCase(PF_R);
+}
+
+void ScalarBitSetTraits<ELFYAML::ELF_SHF>::bitset(IO &IO,
+ ELFYAML::ELF_SHF &Value) {
+ const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext());
+#define BCase(X) IO.bitSetCase(Value, #X, ELF::X)
+ BCase(SHF_WRITE);
+ BCase(SHF_ALLOC);
+ BCase(SHF_EXCLUDE);
+ BCase(SHF_EXECINSTR);
+ BCase(SHF_MERGE);
+ BCase(SHF_STRINGS);
+ BCase(SHF_INFO_LINK);
+ BCase(SHF_LINK_ORDER);
+ BCase(SHF_OS_NONCONFORMING);
+ BCase(SHF_GROUP);
+ BCase(SHF_TLS);
+ BCase(SHF_COMPRESSED);
+ switch (Object->getMachine()) {
+ case ELF::EM_ARM:
+ BCase(SHF_ARM_PURECODE);
+ break;
+ case ELF::EM_HEXAGON:
+ BCase(SHF_HEX_GPREL);
+ break;
+ case ELF::EM_MIPS:
+ BCase(SHF_MIPS_NODUPES);
+ BCase(SHF_MIPS_NAMES);
+ BCase(SHF_MIPS_LOCAL);
+ BCase(SHF_MIPS_NOSTRIP);
+ BCase(SHF_MIPS_GPREL);
+ BCase(SHF_MIPS_MERGE);
+ BCase(SHF_MIPS_ADDR);
+ BCase(SHF_MIPS_STRING);
+ break;
+ case ELF::EM_X86_64:
+ BCase(SHF_X86_64_LARGE);
+ break;
+ default:
+ // Nothing to do.
+ break;
+ }
+#undef BCase
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_SHN>::enumeration(
+ IO &IO, ELFYAML::ELF_SHN &Value) {
+#define ECase(X) IO.enumCase(Value, #X, ELF::X)
+ ECase(SHN_UNDEF);
+ ECase(SHN_LORESERVE);
+ ECase(SHN_LOPROC);
+ ECase(SHN_HIPROC);
+ ECase(SHN_LOOS);
+ ECase(SHN_HIOS);
+ ECase(SHN_ABS);
+ ECase(SHN_COMMON);
+ ECase(SHN_XINDEX);
+ ECase(SHN_HIRESERVE);
+ ECase(SHN_AMDGPU_LDS);
+ ECase(SHN_HEXAGON_SCOMMON);
+ ECase(SHN_HEXAGON_SCOMMON_1);
+ ECase(SHN_HEXAGON_SCOMMON_2);
+ ECase(SHN_HEXAGON_SCOMMON_4);
+ ECase(SHN_HEXAGON_SCOMMON_8);
+#undef ECase
+ IO.enumFallback<Hex16>(Value);
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_STB>::enumeration(
+ IO &IO, ELFYAML::ELF_STB &Value) {
+#define ECase(X) IO.enumCase(Value, #X, ELF::X)
+ ECase(STB_LOCAL);
+ ECase(STB_GLOBAL);
+ ECase(STB_WEAK);
+ ECase(STB_GNU_UNIQUE);
+#undef ECase
+ IO.enumFallback<Hex8>(Value);
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_STT>::enumeration(
+ IO &IO, ELFYAML::ELF_STT &Value) {
+#define ECase(X) IO.enumCase(Value, #X, ELF::X)
+ ECase(STT_NOTYPE);
+ ECase(STT_OBJECT);
+ ECase(STT_FUNC);
+ ECase(STT_SECTION);
+ ECase(STT_FILE);
+ ECase(STT_COMMON);
+ ECase(STT_TLS);
+ ECase(STT_GNU_IFUNC);
+#undef ECase
+ IO.enumFallback<Hex8>(Value);
+}
+
+
+void ScalarEnumerationTraits<ELFYAML::ELF_RSS>::enumeration(
+ IO &IO, ELFYAML::ELF_RSS &Value) {
+#define ECase(X) IO.enumCase(Value, #X, ELF::X)
+ ECase(RSS_UNDEF);
+ ECase(RSS_GP);
+ ECase(RSS_GP0);
+ ECase(RSS_LOC);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_REL>::enumeration(
+ IO &IO, ELFYAML::ELF_REL &Value) {
+ const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext());
+ assert(Object && "The IO context is not initialized");
+#define ELF_RELOC(X, Y) IO.enumCase(Value, #X, ELF::X);
+ switch (Object->getMachine()) {
+ case ELF::EM_X86_64:
+#include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
+ break;
+ case ELF::EM_MIPS:
+#include "llvm/BinaryFormat/ELFRelocs/Mips.def"
+ break;
+ case ELF::EM_HEXAGON:
+#include "llvm/BinaryFormat/ELFRelocs/Hexagon.def"
+ break;
+ case ELF::EM_386:
+ case ELF::EM_IAMCU:
+#include "llvm/BinaryFormat/ELFRelocs/i386.def"
+ break;
+ case ELF::EM_AARCH64:
+#include "llvm/BinaryFormat/ELFRelocs/AArch64.def"
+ break;
+ case ELF::EM_ARM:
+#include "llvm/BinaryFormat/ELFRelocs/ARM.def"
+ break;
+ case ELF::EM_ARC:
+#include "llvm/BinaryFormat/ELFRelocs/ARC.def"
+ break;
+ case ELF::EM_RISCV:
+#include "llvm/BinaryFormat/ELFRelocs/RISCV.def"
+ break;
+ case ELF::EM_LANAI:
+#include "llvm/BinaryFormat/ELFRelocs/Lanai.def"
+ break;
+ case ELF::EM_AMDGPU:
+#include "llvm/BinaryFormat/ELFRelocs/AMDGPU.def"
+ break;
+ case ELF::EM_BPF:
+#include "llvm/BinaryFormat/ELFRelocs/BPF.def"
+ break;
+ case ELF::EM_VE:
+#include "llvm/BinaryFormat/ELFRelocs/VE.def"
+ break;
+ case ELF::EM_CSKY:
+#include "llvm/BinaryFormat/ELFRelocs/CSKY.def"
+ break;
+ case ELF::EM_PPC64:
+#include "llvm/BinaryFormat/ELFRelocs/PowerPC64.def"
+ break;
+ default:
+ // Nothing to do.
+ break;
+ }
+#undef ELF_RELOC
+ IO.enumFallback<Hex32>(Value);
+}
+
+void ScalarEnumerationTraits<ELFYAML::ELF_DYNTAG>::enumeration(
+ IO &IO, ELFYAML::ELF_DYNTAG &Value) {
+ const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext());
+ assert(Object && "The IO context is not initialized");
+
+// Disable architecture specific tags by default. We might enable them below.
+#define AARCH64_DYNAMIC_TAG(name, value)
+#define MIPS_DYNAMIC_TAG(name, value)
+#define HEXAGON_DYNAMIC_TAG(name, value)
+#define PPC_DYNAMIC_TAG(name, value)
+#define PPC64_DYNAMIC_TAG(name, value)
+// Ignore marker tags such as DT_HIOS (maps to DT_VERNEEDNUM), etc.
+#define DYNAMIC_TAG_MARKER(name, value)
+
+#define STRINGIFY(X) (#X)
+#define DYNAMIC_TAG(X, Y) IO.enumCase(Value, STRINGIFY(DT_##X), ELF::DT_##X);
+ switch (Object->getMachine()) {
+ case ELF::EM_AARCH64:
+#undef AARCH64_DYNAMIC_TAG
+#define AARCH64_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
+#include "llvm/BinaryFormat/DynamicTags.def"
+#undef AARCH64_DYNAMIC_TAG
+#define AARCH64_DYNAMIC_TAG(name, value)
+ break;
+ case ELF::EM_MIPS:
+#undef MIPS_DYNAMIC_TAG
+#define MIPS_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
+#include "llvm/BinaryFormat/DynamicTags.def"
+#undef MIPS_DYNAMIC_TAG
+#define MIPS_DYNAMIC_TAG(name, value)
+ break;
+ case ELF::EM_HEXAGON:
+#undef HEXAGON_DYNAMIC_TAG
+#define HEXAGON_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
+#include "llvm/BinaryFormat/DynamicTags.def"
+#undef HEXAGON_DYNAMIC_TAG
+#define HEXAGON_DYNAMIC_TAG(name, value)
+ break;
+ case ELF::EM_PPC:
+#undef PPC_DYNAMIC_TAG
+#define PPC_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
+#include "llvm/BinaryFormat/DynamicTags.def"
+#undef PPC_DYNAMIC_TAG
+#define PPC_DYNAMIC_TAG(name, value)
+ break;
+ case ELF::EM_PPC64:
+#undef PPC64_DYNAMIC_TAG
+#define PPC64_DYNAMIC_TAG(name, value) DYNAMIC_TAG(name, value)
+#include "llvm/BinaryFormat/DynamicTags.def"
+#undef PPC64_DYNAMIC_TAG
+#define PPC64_DYNAMIC_TAG(name, value)
+ break;
+ default:
+#include "llvm/BinaryFormat/DynamicTags.def"
+ break;
+ }
+#undef AARCH64_DYNAMIC_TAG
+#undef MIPS_DYNAMIC_TAG
+#undef HEXAGON_DYNAMIC_TAG
+#undef PPC_DYNAMIC_TAG
+#undef PPC64_DYNAMIC_TAG
+#undef DYNAMIC_TAG_MARKER
+#undef STRINGIFY
+#undef DYNAMIC_TAG
+
+ IO.enumFallback<Hex64>(Value);
+}
+
+void ScalarEnumerationTraits<ELFYAML::MIPS_AFL_REG>::enumeration(
+ IO &IO, ELFYAML::MIPS_AFL_REG &Value) {
+#define ECase(X) IO.enumCase(Value, #X, Mips::AFL_##X)
+ ECase(REG_NONE);
+ ECase(REG_32);
+ ECase(REG_64);
+ ECase(REG_128);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<ELFYAML::MIPS_ABI_FP>::enumeration(
+ IO &IO, ELFYAML::MIPS_ABI_FP &Value) {
+#define ECase(X) IO.enumCase(Value, #X, Mips::Val_GNU_MIPS_ABI_##X)
+ ECase(FP_ANY);
+ ECase(FP_DOUBLE);
+ ECase(FP_SINGLE);
+ ECase(FP_SOFT);
+ ECase(FP_OLD_64);
+ ECase(FP_XX);
+ ECase(FP_64);
+ ECase(FP_64A);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<ELFYAML::MIPS_AFL_EXT>::enumeration(
+ IO &IO, ELFYAML::MIPS_AFL_EXT &Value) {
+#define ECase(X) IO.enumCase(Value, #X, Mips::AFL_##X)
+ ECase(EXT_NONE);
+ ECase(EXT_XLR);
+ ECase(EXT_OCTEON2);
+ ECase(EXT_OCTEONP);
+ ECase(EXT_LOONGSON_3A);
+ ECase(EXT_OCTEON);
+ ECase(EXT_5900);
+ ECase(EXT_4650);
+ ECase(EXT_4010);
+ ECase(EXT_4100);
+ ECase(EXT_3900);
+ ECase(EXT_10000);
+ ECase(EXT_SB1);
+ ECase(EXT_4111);
+ ECase(EXT_4120);
+ ECase(EXT_5400);
+ ECase(EXT_5500);
+ ECase(EXT_LOONGSON_2E);
+ ECase(EXT_LOONGSON_2F);
+ ECase(EXT_OCTEON3);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<ELFYAML::MIPS_ISA>::enumeration(
+ IO &IO, ELFYAML::MIPS_ISA &Value) {
+ IO.enumCase(Value, "MIPS1", 1);
+ IO.enumCase(Value, "MIPS2", 2);
+ IO.enumCase(Value, "MIPS3", 3);
+ IO.enumCase(Value, "MIPS4", 4);
+ IO.enumCase(Value, "MIPS5", 5);
+ IO.enumCase(Value, "MIPS32", 32);
+ IO.enumCase(Value, "MIPS64", 64);
+ IO.enumFallback<Hex32>(Value);
+}
+
+void ScalarBitSetTraits<ELFYAML::MIPS_AFL_ASE>::bitset(
+ IO &IO, ELFYAML::MIPS_AFL_ASE &Value) {
+#define BCase(X) IO.bitSetCase(Value, #X, Mips::AFL_ASE_##X)
+ BCase(DSP);
+ BCase(DSPR2);
+ BCase(EVA);
+ BCase(MCU);
+ BCase(MDMX);
+ BCase(MIPS3D);
+ BCase(MT);
+ BCase(SMARTMIPS);
+ BCase(VIRT);
+ BCase(MSA);
+ BCase(MIPS16);
+ BCase(MICROMIPS);
+ BCase(XPA);
+ BCase(CRC);
+ BCase(GINV);
+#undef BCase
+}
+
+void ScalarBitSetTraits<ELFYAML::MIPS_AFL_FLAGS1>::bitset(
+ IO &IO, ELFYAML::MIPS_AFL_FLAGS1 &Value) {
+#define BCase(X) IO.bitSetCase(Value, #X, Mips::AFL_FLAGS1_##X)
+ BCase(ODDSPREG);
+#undef BCase
+}
+
+void MappingTraits<ELFYAML::SectionHeader>::mapping(
+ IO &IO, ELFYAML::SectionHeader &SHdr) {
+ IO.mapRequired("Name", SHdr.Name);
+}
+
+void MappingTraits<ELFYAML::FileHeader>::mapping(IO &IO,
+ ELFYAML::FileHeader &FileHdr) {
+ IO.mapRequired("Class", FileHdr.Class);
+ IO.mapRequired("Data", FileHdr.Data);
+ IO.mapOptional("OSABI", FileHdr.OSABI, ELFYAML::ELF_ELFOSABI(0));
+ IO.mapOptional("ABIVersion", FileHdr.ABIVersion, Hex8(0));
+ IO.mapRequired("Type", FileHdr.Type);
+ IO.mapOptional("Machine", FileHdr.Machine);
+ IO.mapOptional("Flags", FileHdr.Flags, ELFYAML::ELF_EF(0));
+ IO.mapOptional("Entry", FileHdr.Entry, Hex64(0));
+
+ // obj2yaml does not dump these fields.
+ assert(!IO.outputting() ||
+ (!FileHdr.EPhOff && !FileHdr.EPhEntSize && !FileHdr.EPhNum));
+ IO.mapOptional("EPhOff", FileHdr.EPhOff);
+ IO.mapOptional("EPhEntSize", FileHdr.EPhEntSize);
+ IO.mapOptional("EPhNum", FileHdr.EPhNum);
+ IO.mapOptional("EShEntSize", FileHdr.EShEntSize);
+ IO.mapOptional("EShOff", FileHdr.EShOff);
+ IO.mapOptional("EShNum", FileHdr.EShNum);
+ IO.mapOptional("EShStrNdx", FileHdr.EShStrNdx);
+}
+
+void MappingTraits<ELFYAML::ProgramHeader>::mapping(
+ IO &IO, ELFYAML::ProgramHeader &Phdr) {
+ IO.mapRequired("Type", Phdr.Type);
+ IO.mapOptional("Flags", Phdr.Flags, ELFYAML::ELF_PF(0));
+ IO.mapOptional("FirstSec", Phdr.FirstSec);
+ IO.mapOptional("LastSec", Phdr.LastSec);
+ IO.mapOptional("VAddr", Phdr.VAddr, Hex64(0));
+ IO.mapOptional("PAddr", Phdr.PAddr, Phdr.VAddr);
+ IO.mapOptional("Align", Phdr.Align);
+ IO.mapOptional("FileSize", Phdr.FileSize);
+ IO.mapOptional("MemSize", Phdr.MemSize);
+ IO.mapOptional("Offset", Phdr.Offset);
+}
+
+std::string MappingTraits<ELFYAML::ProgramHeader>::validate(
+ IO &IO, ELFYAML::ProgramHeader &FileHdr) {
+ if (!FileHdr.FirstSec && FileHdr.LastSec)
+ return "the \"LastSec\" key can't be used without the \"FirstSec\" key";
+ if (FileHdr.FirstSec && !FileHdr.LastSec)
+ return "the \"FirstSec\" key can't be used without the \"LastSec\" key";
+ return "";
+}
+
+LLVM_YAML_STRONG_TYPEDEF(StringRef, StOtherPiece)
+
+template <> struct ScalarTraits<StOtherPiece> {
+ static void output(const StOtherPiece &Val, void *, raw_ostream &Out) {
+ Out << Val;
+ }
+ static StringRef input(StringRef Scalar, void *, StOtherPiece &Val) {
+ Val = Scalar;
+ return {};
+ }
+ static QuotingType mustQuote(StringRef) { return QuotingType::None; }
+};
+template <> struct SequenceElementTraits<StOtherPiece> {
+ static const bool flow = true;
+};
+
+template <> struct ScalarTraits<ELFYAML::YAMLFlowString> {
+ static void output(const ELFYAML::YAMLFlowString &Val, void *,
+ raw_ostream &Out) {
+ Out << Val;
+ }
+ static StringRef input(StringRef Scalar, void *,
+ ELFYAML::YAMLFlowString &Val) {
+ Val = Scalar;
+ return {};
+ }
+ static QuotingType mustQuote(StringRef S) {
+ return ScalarTraits<StringRef>::mustQuote(S);
+ }
+};
+template <> struct SequenceElementTraits<ELFYAML::YAMLFlowString> {
+ static const bool flow = true;
+};
+
+namespace {
+
+struct NormalizedOther {
+ NormalizedOther(IO &IO) : YamlIO(IO) {}
+ NormalizedOther(IO &IO, Optional<uint8_t> Original) : YamlIO(IO) {
+ assert(Original && "This constructor is only used for outputting YAML and "
+ "assumes a non-empty Original");
+ std::vector<StOtherPiece> Ret;
+ const auto *Object = static_cast<ELFYAML::Object *>(YamlIO.getContext());
+ for (std::pair<StringRef, uint8_t> &P :
+ getFlags(Object->getMachine()).takeVector()) {
+ uint8_t FlagValue = P.second;
+ if ((*Original & FlagValue) != FlagValue)
+ continue;
+ *Original &= ~FlagValue;
+ Ret.push_back({P.first});
+ }
+
+ if (*Original != 0) {
+ UnknownFlagsHolder = std::to_string(*Original);
+ Ret.push_back({UnknownFlagsHolder});
+ }
+
+ if (!Ret.empty())
+ Other = std::move(Ret);
+ }
+
+ uint8_t toValue(StringRef Name) {
+ const auto *Object = static_cast<ELFYAML::Object *>(YamlIO.getContext());
+ MapVector<StringRef, uint8_t> Flags = getFlags(Object->getMachine());
+
+ auto It = Flags.find(Name);
+ if (It != Flags.end())
+ return It->second;
+
+ uint8_t Val;
+ if (to_integer(Name, Val))
+ return Val;
+
+ YamlIO.setError("an unknown value is used for symbol's 'Other' field: " +
+ Name);
+ return 0;
+ }
+
+ Optional<uint8_t> denormalize(IO &) {
+ if (!Other)
+ return None;
+ uint8_t Ret = 0;
+ for (StOtherPiece &Val : *Other)
+ Ret |= toValue(Val);
+ return Ret;
+ }
+
+ // st_other field is used to encode symbol visibility and platform-dependent
+ // flags and values. This method returns a name to value map that is used for
+ // parsing and encoding this field.
+ MapVector<StringRef, uint8_t> getFlags(unsigned EMachine) {
+ MapVector<StringRef, uint8_t> Map;
+ // STV_* values are just enumeration values. We add them in a reversed order
+ // because when we convert the st_other to named constants when printing
+ // YAML we want to use a maximum number of bits on each step:
+ // when we have st_other == 3, we want to print it as STV_PROTECTED (3), but
+ // not as STV_HIDDEN (2) + STV_INTERNAL (1).
+ Map["STV_PROTECTED"] = ELF::STV_PROTECTED;
+ Map["STV_HIDDEN"] = ELF::STV_HIDDEN;
+ Map["STV_INTERNAL"] = ELF::STV_INTERNAL;
+ // STV_DEFAULT is used to represent the default visibility and has a value
+ // 0. We want to be able to read it from YAML documents, but there is no
+ // reason to print it.
+ if (!YamlIO.outputting())
+ Map["STV_DEFAULT"] = ELF::STV_DEFAULT;
+
+ // MIPS is not consistent. All of the STO_MIPS_* values are bit flags,
+ // except STO_MIPS_MIPS16 which overlaps them. It should be checked and
+ // consumed first when we print the output, because we do not want to print
+ // any other flags that have the same bits instead.
+ if (EMachine == ELF::EM_MIPS) {
+ Map["STO_MIPS_MIPS16"] = ELF::STO_MIPS_MIPS16;
+ Map["STO_MIPS_MICROMIPS"] = ELF::STO_MIPS_MICROMIPS;
+ Map["STO_MIPS_PIC"] = ELF::STO_MIPS_PIC;
+ Map["STO_MIPS_PLT"] = ELF::STO_MIPS_PLT;
+ Map["STO_MIPS_OPTIONAL"] = ELF::STO_MIPS_OPTIONAL;
+ }
+
+ if (EMachine == ELF::EM_AARCH64)
+ Map["STO_AARCH64_VARIANT_PCS"] = ELF::STO_AARCH64_VARIANT_PCS;
+ return Map;
+ }
+
+ IO &YamlIO;
+ Optional<std::vector<StOtherPiece>> Other;
+ std::string UnknownFlagsHolder;
+};
+
+} // end anonymous namespace
+
+void ScalarTraits<ELFYAML::YAMLIntUInt>::output(const ELFYAML::YAMLIntUInt &Val,
+ void *Ctx, raw_ostream &Out) {
+ Out << Val;
+}
+
+StringRef ScalarTraits<ELFYAML::YAMLIntUInt>::input(StringRef Scalar, void *Ctx,
+ ELFYAML::YAMLIntUInt &Val) {
+ const bool Is64 = static_cast<ELFYAML::Object *>(Ctx)->Header.Class ==
+ ELFYAML::ELF_ELFCLASS(ELF::ELFCLASS64);
+ StringRef ErrMsg = "invalid number";
+ // We do not accept negative hex numbers because their meaning is ambiguous.
+ // For example, would -0xfffffffff mean 1 or INT32_MIN?
+ if (Scalar.empty() || Scalar.startswith("-0x"))
+ return ErrMsg;
+
+ if (Scalar.startswith("-")) {
+ const int64_t MinVal = Is64 ? INT64_MIN : INT32_MIN;
+ long long Int;
+ if (getAsSignedInteger(Scalar, /*Radix=*/0, Int) || (Int < MinVal))
+ return ErrMsg;
+ Val = Int;
+ return "";
+ }
+
+ const uint64_t MaxVal = Is64 ? UINT64_MAX : UINT32_MAX;
+ unsigned long long UInt;
+ if (getAsUnsignedInteger(Scalar, /*Radix=*/0, UInt) || (UInt > MaxVal))
+ return ErrMsg;
+ Val = UInt;
+ return "";
+}
+
+void MappingTraits<ELFYAML::Symbol>::mapping(IO &IO, ELFYAML::Symbol &Symbol) {
+ IO.mapOptional("Name", Symbol.Name, StringRef());
+ IO.mapOptional("StName", Symbol.StName);
+ IO.mapOptional("Type", Symbol.Type, ELFYAML::ELF_STT(0));
+ IO.mapOptional("Section", Symbol.Section);
+ IO.mapOptional("Index", Symbol.Index);
+ IO.mapOptional("Binding", Symbol.Binding, ELFYAML::ELF_STB(0));
+ IO.mapOptional("Value", Symbol.Value);
+ IO.mapOptional("Size", Symbol.Size);
+
+ // Symbol's Other field is a bit special. It is usually a field that
+ // represents st_other and holds the symbol visibility. However, on some
+ // platforms, it can contain bit fields and regular values, or even sometimes a
+ // crazy mix of them (see comments for NormalizedOther). Because of this, we
+ // need special handling.
+ MappingNormalization<NormalizedOther, Optional<uint8_t>> Keys(IO,
+ Symbol.Other);
+ IO.mapOptional("Other", Keys->Other);
+}
+
+std::string MappingTraits<ELFYAML::Symbol>::validate(IO &IO,
+ ELFYAML::Symbol &Symbol) {
+ if (Symbol.Index && Symbol.Section)
+ return "Index and Section cannot both be specified for Symbol";
+ return "";
+}
+
+static void commonSectionMapping(IO &IO, ELFYAML::Section &Section) {
+ IO.mapOptional("Name", Section.Name, StringRef());
+ IO.mapRequired("Type", Section.Type);
+ IO.mapOptional("Flags", Section.Flags);
+ IO.mapOptional("Address", Section.Address);
+ IO.mapOptional("Link", Section.Link);
+ IO.mapOptional("AddressAlign", Section.AddressAlign, Hex64(0));
+ IO.mapOptional("EntSize", Section.EntSize);
+ IO.mapOptional("Offset", Section.Offset);
+
+ IO.mapOptional("Content", Section.Content);
+ IO.mapOptional("Size", Section.Size);
+
+ // obj2yaml does not dump these fields. They are expected to be empty when we
+ // are producing YAML, because yaml2obj sets appropriate values for them
+ // automatically when they are not explicitly defined.
+ assert(!IO.outputting() ||
+ (!Section.ShOffset && !Section.ShSize && !Section.ShName &&
+ !Section.ShFlags && !Section.ShType && !Section.ShAddrAlign));
+ IO.mapOptional("ShAddrAlign", Section.ShAddrAlign);
+ IO.mapOptional("ShName", Section.ShName);
+ IO.mapOptional("ShOffset", Section.ShOffset);
+ IO.mapOptional("ShSize", Section.ShSize);
+ IO.mapOptional("ShFlags", Section.ShFlags);
+ IO.mapOptional("ShType", Section.ShType);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::DynamicSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Entries", Section.Entries);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::RawContentSection &Section) {
+ commonSectionMapping(IO, Section);
+
+ // We also support reading a content as array of bytes using the ContentArray
+ // key. obj2yaml never prints this field.
+ assert(!IO.outputting() || !Section.ContentBuf.hasValue());
+ IO.mapOptional("ContentArray", Section.ContentBuf);
+ if (Section.ContentBuf) {
+ if (Section.Content)
+ IO.setError("Content and ContentArray can't be used together");
+ Section.Content = yaml::BinaryRef(*Section.ContentBuf);
+ }
+
+ IO.mapOptional("Info", Section.Info);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::BBAddrMapSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Content", Section.Content);
+ IO.mapOptional("Entries", Section.Entries);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::StackSizesSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Entries", Section.Entries);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::HashSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Bucket", Section.Bucket);
+ IO.mapOptional("Chain", Section.Chain);
+
+ // obj2yaml does not dump these fields. They can be used to override nchain
+ // and nbucket values for creating broken sections.
+ assert(!IO.outputting() ||
+ (!Section.NBucket.hasValue() && !Section.NChain.hasValue()));
+ IO.mapOptional("NChain", Section.NChain);
+ IO.mapOptional("NBucket", Section.NBucket);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::NoteSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Notes", Section.Notes);
+}
+
+
+static void sectionMapping(IO &IO, ELFYAML::GnuHashSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Header", Section.Header);
+ IO.mapOptional("BloomFilter", Section.BloomFilter);
+ IO.mapOptional("HashBuckets", Section.HashBuckets);
+ IO.mapOptional("HashValues", Section.HashValues);
+}
+static void sectionMapping(IO &IO, ELFYAML::NoBitsSection &Section) {
+ commonSectionMapping(IO, Section);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::VerdefSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Info", Section.Info);
+ IO.mapOptional("Entries", Section.Entries);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::SymverSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Entries", Section.Entries);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::VerneedSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Info", Section.Info);
+ IO.mapOptional("Dependencies", Section.VerneedV);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::RelocationSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Info", Section.RelocatableSec, StringRef());
+ IO.mapOptional("Relocations", Section.Relocations);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::RelrSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Entries", Section.Entries);
+}
+
+static void groupSectionMapping(IO &IO, ELFYAML::GroupSection &Group) {
+ commonSectionMapping(IO, Group);
+ IO.mapOptional("Info", Group.Signature);
+ IO.mapOptional("Members", Group.Members);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::SymtabShndxSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Entries", Section.Entries);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::AddrsigSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Symbols", Section.Symbols);
+}
+
+static void fillMapping(IO &IO, ELFYAML::Fill &Fill) {
+ IO.mapOptional("Name", Fill.Name, StringRef());
+ IO.mapOptional("Pattern", Fill.Pattern);
+ IO.mapOptional("Offset", Fill.Offset);
+ IO.mapRequired("Size", Fill.Size);
+}
+
+static void sectionHeaderTableMapping(IO &IO,
+ ELFYAML::SectionHeaderTable &SHT) {
+ IO.mapOptional("Offset", SHT.Offset);
+ IO.mapOptional("Sections", SHT.Sections);
+ IO.mapOptional("Excluded", SHT.Excluded);
+ IO.mapOptional("NoHeaders", SHT.NoHeaders);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::LinkerOptionsSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Options", Section.Options);
+}
+
+static void sectionMapping(IO &IO,
+ ELFYAML::DependentLibrariesSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Libraries", Section.Libs);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::CallGraphProfileSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Entries", Section.Entries);
+}
+
+void MappingTraits<ELFYAML::SectionOrType>::mapping(
+ IO &IO, ELFYAML::SectionOrType &sectionOrType) {
+ IO.mapRequired("SectionOrType", sectionOrType.sectionNameOrType);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::ARMIndexTableSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Entries", Section.Entries);
+}
+
+static void sectionMapping(IO &IO, ELFYAML::MipsABIFlags &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Version", Section.Version, Hex16(0));
+ IO.mapRequired("ISA", Section.ISALevel);
+ IO.mapOptional("ISARevision", Section.ISARevision, Hex8(0));
+ IO.mapOptional("ISAExtension", Section.ISAExtension,
+ ELFYAML::MIPS_AFL_EXT(Mips::AFL_EXT_NONE));
+ IO.mapOptional("ASEs", Section.ASEs, ELFYAML::MIPS_AFL_ASE(0));
+ IO.mapOptional("FpABI", Section.FpABI,
+ ELFYAML::MIPS_ABI_FP(Mips::Val_GNU_MIPS_ABI_FP_ANY));
+ IO.mapOptional("GPRSize", Section.GPRSize,
+ ELFYAML::MIPS_AFL_REG(Mips::AFL_REG_NONE));
+ IO.mapOptional("CPR1Size", Section.CPR1Size,
+ ELFYAML::MIPS_AFL_REG(Mips::AFL_REG_NONE));
+ IO.mapOptional("CPR2Size", Section.CPR2Size,
+ ELFYAML::MIPS_AFL_REG(Mips::AFL_REG_NONE));
+ IO.mapOptional("Flags1", Section.Flags1, ELFYAML::MIPS_AFL_FLAGS1(0));
+ IO.mapOptional("Flags2", Section.Flags2, Hex32(0));
+}
+
+static StringRef getStringValue(IO &IO, const char *Key) {
+ StringRef Val;
+ IO.mapRequired(Key, Val);
+ return Val;
+}
+
+static void setStringValue(IO &IO, const char *Key, StringRef Val) {
+ IO.mapRequired(Key, Val);
+}
+
+static bool isInteger(StringRef Val) {
+ APInt Tmp;
+ return !Val.getAsInteger(0, Tmp);
+}
+
+void MappingTraits<std::unique_ptr<ELFYAML::Chunk>>::mapping(
+ IO &IO, std::unique_ptr<ELFYAML::Chunk> &Section) {
+ ELFYAML::ELF_SHT Type;
+ StringRef TypeStr;
+ if (IO.outputting()) {
+ if (auto *S = dyn_cast<ELFYAML::Section>(Section.get()))
+ Type = S->Type;
+ else if (auto *SHT = dyn_cast<ELFYAML::SectionHeaderTable>(Section.get()))
+ TypeStr = SHT->TypeStr;
+ } else {
+ // When the Type string does not have a "SHT_" prefix, we know it is not a
+ // description of a regular ELF output section.
+ TypeStr = getStringValue(IO, "Type");
+ if (TypeStr.startswith("SHT_") || isInteger(TypeStr))
+ IO.mapRequired("Type", Type);
+ }
+
+ if (TypeStr == "Fill") {
+ assert(!IO.outputting()); // We don't dump fills currently.
+ Section.reset(new ELFYAML::Fill());
+ fillMapping(IO, *cast<ELFYAML::Fill>(Section.get()));
+ return;
+ }
+
+ if (TypeStr == ELFYAML::SectionHeaderTable::TypeStr) {
+ if (IO.outputting())
+ setStringValue(IO, "Type", TypeStr);
+ else
+ Section.reset(new ELFYAML::SectionHeaderTable(/*IsImplicit=*/false));
+
+ sectionHeaderTableMapping(
+ IO, *cast<ELFYAML::SectionHeaderTable>(Section.get()));
+ return;
+ }
+
+ const auto &Obj = *static_cast<ELFYAML::Object *>(IO.getContext());
+ if (Obj.getMachine() == ELF::EM_MIPS && Type == ELF::SHT_MIPS_ABIFLAGS) {
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::MipsABIFlags());
+ sectionMapping(IO, *cast<ELFYAML::MipsABIFlags>(Section.get()));
+ return;
+ }
+
+ if (Obj.getMachine() == ELF::EM_ARM && Type == ELF::SHT_ARM_EXIDX) {
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::ARMIndexTableSection());
+ sectionMapping(IO, *cast<ELFYAML::ARMIndexTableSection>(Section.get()));
+ return;
+ }
+
+ switch (Type) {
+ case ELF::SHT_DYNAMIC:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::DynamicSection());
+ sectionMapping(IO, *cast<ELFYAML::DynamicSection>(Section.get()));
+ break;
+ case ELF::SHT_REL:
+ case ELF::SHT_RELA:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::RelocationSection());
+ sectionMapping(IO, *cast<ELFYAML::RelocationSection>(Section.get()));
+ break;
+ case ELF::SHT_RELR:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::RelrSection());
+ sectionMapping(IO, *cast<ELFYAML::RelrSection>(Section.get()));
+ break;
+ case ELF::SHT_GROUP:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::GroupSection());
+ groupSectionMapping(IO, *cast<ELFYAML::GroupSection>(Section.get()));
+ break;
+ case ELF::SHT_NOBITS:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::NoBitsSection());
+ sectionMapping(IO, *cast<ELFYAML::NoBitsSection>(Section.get()));
+ break;
+ case ELF::SHT_HASH:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::HashSection());
+ sectionMapping(IO, *cast<ELFYAML::HashSection>(Section.get()));
+ break;
+ case ELF::SHT_NOTE:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::NoteSection());
+ sectionMapping(IO, *cast<ELFYAML::NoteSection>(Section.get()));
+ break;
+ case ELF::SHT_GNU_HASH:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::GnuHashSection());
+ sectionMapping(IO, *cast<ELFYAML::GnuHashSection>(Section.get()));
+ break;
+ case ELF::SHT_GNU_verdef:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::VerdefSection());
+ sectionMapping(IO, *cast<ELFYAML::VerdefSection>(Section.get()));
+ break;
+ case ELF::SHT_GNU_versym:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::SymverSection());
+ sectionMapping(IO, *cast<ELFYAML::SymverSection>(Section.get()));
+ break;
+ case ELF::SHT_GNU_verneed:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::VerneedSection());
+ sectionMapping(IO, *cast<ELFYAML::VerneedSection>(Section.get()));
+ break;
+ case ELF::SHT_SYMTAB_SHNDX:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::SymtabShndxSection());
+ sectionMapping(IO, *cast<ELFYAML::SymtabShndxSection>(Section.get()));
+ break;
+ case ELF::SHT_LLVM_ADDRSIG:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::AddrsigSection());
+ sectionMapping(IO, *cast<ELFYAML::AddrsigSection>(Section.get()));
+ break;
+ case ELF::SHT_LLVM_LINKER_OPTIONS:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::LinkerOptionsSection());
+ sectionMapping(IO, *cast<ELFYAML::LinkerOptionsSection>(Section.get()));
+ break;
+ case ELF::SHT_LLVM_DEPENDENT_LIBRARIES:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::DependentLibrariesSection());
+ sectionMapping(IO,
+ *cast<ELFYAML::DependentLibrariesSection>(Section.get()));
+ break;
+ case ELF::SHT_LLVM_CALL_GRAPH_PROFILE:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::CallGraphProfileSection());
+ sectionMapping(IO, *cast<ELFYAML::CallGraphProfileSection>(Section.get()));
+ break;
+ case ELF::SHT_LLVM_BB_ADDR_MAP:
+ if (!IO.outputting())
+ Section.reset(new ELFYAML::BBAddrMapSection());
+ sectionMapping(IO, *cast<ELFYAML::BBAddrMapSection>(Section.get()));
+ break;
+ default:
+ if (!IO.outputting()) {
+ StringRef Name;
+ IO.mapOptional("Name", Name, StringRef());
+ Name = ELFYAML::dropUniqueSuffix(Name);
+
+ if (ELFYAML::StackSizesSection::nameMatches(Name))
+ Section = std::make_unique<ELFYAML::StackSizesSection>();
+ else
+ Section = std::make_unique<ELFYAML::RawContentSection>();
+ }
+
+ if (auto S = dyn_cast<ELFYAML::RawContentSection>(Section.get()))
+ sectionMapping(IO, *S);
+ else
+ sectionMapping(IO, *cast<ELFYAML::StackSizesSection>(Section.get()));
+ }
+}
+
+std::string MappingTraits<std::unique_ptr<ELFYAML::Chunk>>::validate(
+ IO &io, std::unique_ptr<ELFYAML::Chunk> &C) {
+ if (const auto *F = dyn_cast<ELFYAML::Fill>(C.get())) {
+ if (F->Pattern && F->Pattern->binary_size() != 0 && !F->Size)
+ return "\"Size\" can't be 0 when \"Pattern\" is not empty";
+ return "";
+ }
+
+ if (const auto *SHT = dyn_cast<ELFYAML::SectionHeaderTable>(C.get())) {
+ if (SHT->NoHeaders && (SHT->Sections || SHT->Excluded || SHT->Offset))
+ return "NoHeaders can't be used together with Offset/Sections/Excluded";
+ if (!SHT->NoHeaders && !SHT->Sections && !SHT->Excluded)
+ return "SectionHeaderTable can't be empty. Use 'NoHeaders' key to drop "
+ "the section header table";
+ return "";
+ }
+
+ const ELFYAML::Section &Sec = *cast<ELFYAML::Section>(C.get());
+ if (Sec.Size && Sec.Content &&
+ (uint64_t)(*Sec.Size) < Sec.Content->binary_size())
+ return "Section size must be greater than or equal to the content size";
+
+ auto BuildErrPrefix = [](ArrayRef<std::pair<StringRef, bool>> EntV) {
+ std::string Msg;
+ for (size_t I = 0, E = EntV.size(); I != E; ++I) {
+ StringRef Name = EntV[I].first;
+ if (I == 0) {
+ Msg = "\"" + Name.str() + "\"";
+ continue;
+ }
+ if (I != EntV.size() - 1)
+ Msg += ", \"" + Name.str() + "\"";
+ else
+ Msg += " and \"" + Name.str() + "\"";
+ }
+ return Msg;
+ };
+
+ std::vector<std::pair<StringRef, bool>> Entries = Sec.getEntries();
+ const size_t NumUsedEntries = llvm::count_if(
+ Entries, [](const std::pair<StringRef, bool> &P) { return P.second; });
+
+ if ((Sec.Size || Sec.Content) && NumUsedEntries > 0)
+ return BuildErrPrefix(Entries) +
+ " cannot be used with \"Content\" or \"Size\"";
+
+ if (NumUsedEntries > 0 && Entries.size() != NumUsedEntries)
+ return BuildErrPrefix(Entries) + " must be used together";
+
+ if (const auto *RawSection = dyn_cast<ELFYAML::RawContentSection>(C.get())) {
+ if (RawSection->Flags && RawSection->ShFlags)
+ return "ShFlags and Flags cannot be used together";
+ return "";
+ }
+
+ if (const auto *NB = dyn_cast<ELFYAML::NoBitsSection>(C.get())) {
+ if (NB->Content)
+ return "SHT_NOBITS section cannot have \"Content\"";
+ return "";
+ }
+
+ if (const auto *MF = dyn_cast<ELFYAML::MipsABIFlags>(C.get())) {
+ if (MF->Content)
+ return "\"Content\" key is not implemented for SHT_MIPS_ABIFLAGS "
+ "sections";
+ if (MF->Size)
+ return "\"Size\" key is not implemented for SHT_MIPS_ABIFLAGS sections";
+ return "";
+ }
+
+ return "";
+}
+
+namespace {
+
+struct NormalizedMips64RelType {
+ NormalizedMips64RelType(IO &)
+ : Type(ELFYAML::ELF_REL(ELF::R_MIPS_NONE)),
+ Type2(ELFYAML::ELF_REL(ELF::R_MIPS_NONE)),
+ Type3(ELFYAML::ELF_REL(ELF::R_MIPS_NONE)),
+ SpecSym(ELFYAML::ELF_REL(ELF::RSS_UNDEF)) {}
+ NormalizedMips64RelType(IO &, ELFYAML::ELF_REL Original)
+ : Type(Original & 0xFF), Type2(Original >> 8 & 0xFF),
+ Type3(Original >> 16 & 0xFF), SpecSym(Original >> 24 & 0xFF) {}
+
+ ELFYAML::ELF_REL denormalize(IO &) {
+ ELFYAML::ELF_REL Res = Type | Type2 << 8 | Type3 << 16 | SpecSym << 24;
+ return Res;
+ }
+
+ ELFYAML::ELF_REL Type;
+ ELFYAML::ELF_REL Type2;
+ ELFYAML::ELF_REL Type3;
+ ELFYAML::ELF_RSS SpecSym;
+};
+
+} // end anonymous namespace
+
+void MappingTraits<ELFYAML::StackSizeEntry>::mapping(
+ IO &IO, ELFYAML::StackSizeEntry &E) {
+ assert(IO.getContext() && "The IO context is not initialized");
+ IO.mapOptional("Address", E.Address, Hex64(0));
+ IO.mapRequired("Size", E.Size);
+}
+
+void MappingTraits<ELFYAML::BBAddrMapEntry>::mapping(
+ IO &IO, ELFYAML::BBAddrMapEntry &E) {
+ assert(IO.getContext() && "The IO context is not initialized");
+ IO.mapOptional("Address", E.Address, Hex64(0));
+ IO.mapOptional("BBEntries", E.BBEntries);
+}
+
+void MappingTraits<ELFYAML::BBAddrMapEntry::BBEntry>::mapping(
+ IO &IO, ELFYAML::BBAddrMapEntry::BBEntry &E) {
+ assert(IO.getContext() && "The IO context is not initialized");
+ IO.mapRequired("AddressOffset", E.AddressOffset);
+ IO.mapRequired("Size", E.Size);
+ IO.mapRequired("Metadata", E.Metadata);
+}
+
+void MappingTraits<ELFYAML::GnuHashHeader>::mapping(IO &IO,
+ ELFYAML::GnuHashHeader &E) {
+ assert(IO.getContext() && "The IO context is not initialized");
+ IO.mapOptional("NBuckets", E.NBuckets);
+ IO.mapRequired("SymNdx", E.SymNdx);
+ IO.mapOptional("MaskWords", E.MaskWords);
+ IO.mapRequired("Shift2", E.Shift2);
+}
+
+void MappingTraits<ELFYAML::DynamicEntry>::mapping(IO &IO,
+ ELFYAML::DynamicEntry &Rel) {
+ assert(IO.getContext() && "The IO context is not initialized");
+
+ IO.mapRequired("Tag", Rel.Tag);
+ IO.mapRequired("Value", Rel.Val);
+}
+
+void MappingTraits<ELFYAML::NoteEntry>::mapping(IO &IO, ELFYAML::NoteEntry &N) {
+ assert(IO.getContext() && "The IO context is not initialized");
+
+ IO.mapOptional("Name", N.Name);
+ IO.mapOptional("Desc", N.Desc);
+ IO.mapRequired("Type", N.Type);
+}
+
+void MappingTraits<ELFYAML::VerdefEntry>::mapping(IO &IO,
+ ELFYAML::VerdefEntry &E) {
+ assert(IO.getContext() && "The IO context is not initialized");
+
+ IO.mapOptional("Version", E.Version);
+ IO.mapOptional("Flags", E.Flags);
+ IO.mapOptional("VersionNdx", E.VersionNdx);
+ IO.mapOptional("Hash", E.Hash);
+ IO.mapRequired("Names", E.VerNames);
+}
+
+void MappingTraits<ELFYAML::VerneedEntry>::mapping(IO &IO,
+ ELFYAML::VerneedEntry &E) {
+ assert(IO.getContext() && "The IO context is not initialized");
+
+ IO.mapRequired("Version", E.Version);
+ IO.mapRequired("File", E.File);
+ IO.mapRequired("Entries", E.AuxV);
+}
+
+void MappingTraits<ELFYAML::VernauxEntry>::mapping(IO &IO,
+ ELFYAML::VernauxEntry &E) {
+ assert(IO.getContext() && "The IO context is not initialized");
+
+ IO.mapRequired("Name", E.Name);
+ IO.mapRequired("Hash", E.Hash);
+ IO.mapRequired("Flags", E.Flags);
+ IO.mapRequired("Other", E.Other);
+}
+
+void MappingTraits<ELFYAML::Relocation>::mapping(IO &IO,
+ ELFYAML::Relocation &Rel) {
+ const auto *Object = static_cast<ELFYAML::Object *>(IO.getContext());
+ assert(Object && "The IO context is not initialized");
+
+ IO.mapOptional("Offset", Rel.Offset, (Hex64)0);
+ IO.mapOptional("Symbol", Rel.Symbol);
+
+ if (Object->getMachine() == ELFYAML::ELF_EM(ELF::EM_MIPS) &&
+ Object->Header.Class == ELFYAML::ELF_ELFCLASS(ELF::ELFCLASS64)) {
+ MappingNormalization<NormalizedMips64RelType, ELFYAML::ELF_REL> Key(
+ IO, Rel.Type);
+ IO.mapRequired("Type", Key->Type);
+ IO.mapOptional("Type2", Key->Type2, ELFYAML::ELF_REL(ELF::R_MIPS_NONE));
+ IO.mapOptional("Type3", Key->Type3, ELFYAML::ELF_REL(ELF::R_MIPS_NONE));
+ IO.mapOptional("SpecSym", Key->SpecSym, ELFYAML::ELF_RSS(ELF::RSS_UNDEF));
+ } else
+ IO.mapRequired("Type", Rel.Type);
+
+ IO.mapOptional("Addend", Rel.Addend, (ELFYAML::YAMLIntUInt)0);
+}
+
+void MappingTraits<ELFYAML::ARMIndexTableEntry>::mapping(
+ IO &IO, ELFYAML::ARMIndexTableEntry &E) {
+ assert(IO.getContext() && "The IO context is not initialized");
+ IO.mapRequired("Offset", E.Offset);
+
+ StringRef CantUnwind = "EXIDX_CANTUNWIND";
+ if (IO.outputting() && (uint32_t)E.Value == ARM::EHABI::EXIDX_CANTUNWIND)
+ IO.mapRequired("Value", CantUnwind);
+ else if (!IO.outputting() && getStringValue(IO, "Value") == CantUnwind)
+ E.Value = ARM::EHABI::EXIDX_CANTUNWIND;
+ else
+ IO.mapRequired("Value", E.Value);
+}
+
+void MappingTraits<ELFYAML::Object>::mapping(IO &IO, ELFYAML::Object &Object) {
+ assert(!IO.getContext() && "The IO context is initialized already");
+ IO.setContext(&Object);
+ IO.mapTag("!ELF", true);
+ IO.mapRequired("FileHeader", Object.Header);
+ IO.mapOptional("ProgramHeaders", Object.ProgramHeaders);
+ IO.mapOptional("Sections", Object.Chunks);
+ IO.mapOptional("Symbols", Object.Symbols);
+ IO.mapOptional("DynamicSymbols", Object.DynamicSymbols);
+ IO.mapOptional("DWARF", Object.DWARF);
+ if (Object.DWARF) {
+ Object.DWARF->IsLittleEndian =
+ Object.Header.Data == ELFYAML::ELF_ELFDATA(ELF::ELFDATA2LSB);
+ Object.DWARF->Is64BitAddrSize =
+ Object.Header.Class == ELFYAML::ELF_ELFCLASS(ELF::ELFCLASS64);
+ }
+ IO.setContext(nullptr);
+}
+
+void MappingTraits<ELFYAML::LinkerOption>::mapping(IO &IO,
+ ELFYAML::LinkerOption &Opt) {
+ assert(IO.getContext() && "The IO context is not initialized");
+ IO.mapRequired("Name", Opt.Key);
+ IO.mapRequired("Value", Opt.Value);
+}
+
+void MappingTraits<ELFYAML::CallGraphEntry>::mapping(
+ IO &IO, ELFYAML::CallGraphEntry &E) {
+ assert(IO.getContext() && "The IO context is not initialized");
+ IO.mapRequired("From", E.From);
+ IO.mapRequired("To", E.To);
+ IO.mapRequired("Weight", E.Weight);
+}
+
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_AFL_REG)
+LLVM_YAML_STRONG_TYPEDEF(uint8_t, MIPS_ABI_FP)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_EXT)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_ASE)
+LLVM_YAML_STRONG_TYPEDEF(uint32_t, MIPS_AFL_FLAGS1)
+
+} // end namespace yaml
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/MachOEmitter.cpp b/contrib/libs/llvm12/lib/ObjectYAML/MachOEmitter.cpp
new file mode 100644
index 00000000000..dec9c9f6960
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/MachOEmitter.cpp
@@ -0,0 +1,646 @@
+//===- yaml2macho - Convert YAML to a Mach object file --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// The Mach component of yaml2obj.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/ObjectYAML/DWARFEmitter.h"
+#include "llvm/ObjectYAML/ObjectYAML.h"
+#include "llvm/ObjectYAML/yaml2obj.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/LEB128.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include "llvm/Support/Format.h"
+
+using namespace llvm;
+
+namespace {
+
+class MachOWriter {
+public:
+ MachOWriter(MachOYAML::Object &Obj) : Obj(Obj), fileStart(0) {
+ is64Bit = Obj.Header.magic == MachO::MH_MAGIC_64 ||
+ Obj.Header.magic == MachO::MH_CIGAM_64;
+ memset(reinterpret_cast<void *>(&Header), 0, sizeof(MachO::mach_header_64));
+ }
+
+ Error writeMachO(raw_ostream &OS);
+
+private:
+ void writeHeader(raw_ostream &OS);
+ void writeLoadCommands(raw_ostream &OS);
+ Error writeSectionData(raw_ostream &OS);
+ void writeRelocations(raw_ostream &OS);
+ void writeLinkEditData(raw_ostream &OS);
+
+ void writeBindOpcodes(raw_ostream &OS,
+ std::vector<MachOYAML::BindOpcode> &BindOpcodes);
+ // LinkEdit writers
+ void writeRebaseOpcodes(raw_ostream &OS);
+ void writeBasicBindOpcodes(raw_ostream &OS);
+ void writeWeakBindOpcodes(raw_ostream &OS);
+ void writeLazyBindOpcodes(raw_ostream &OS);
+ void writeNameList(raw_ostream &OS);
+ void writeStringTable(raw_ostream &OS);
+ void writeExportTrie(raw_ostream &OS);
+
+ void dumpExportEntry(raw_ostream &OS, MachOYAML::ExportEntry &Entry);
+ void ZeroToOffset(raw_ostream &OS, size_t offset);
+
+ MachOYAML::Object &Obj;
+ bool is64Bit;
+ uint64_t fileStart;
+ MachO::mach_header_64 Header;
+
+ // Old PPC Object Files didn't have __LINKEDIT segments, the data was just
+ // stuck at the end of the file.
+ bool FoundLinkEditSeg = false;
+};
+
+Error MachOWriter::writeMachO(raw_ostream &OS) {
+ fileStart = OS.tell();
+ writeHeader(OS);
+ writeLoadCommands(OS);
+ if (Error Err = writeSectionData(OS))
+ return Err;
+ writeRelocations(OS);
+ if (!FoundLinkEditSeg)
+ writeLinkEditData(OS);
+ return Error::success();
+}
+
+void MachOWriter::writeHeader(raw_ostream &OS) {
+ Header.magic = Obj.Header.magic;
+ Header.cputype = Obj.Header.cputype;
+ Header.cpusubtype = Obj.Header.cpusubtype;
+ Header.filetype = Obj.Header.filetype;
+ Header.ncmds = Obj.Header.ncmds;
+ Header.sizeofcmds = Obj.Header.sizeofcmds;
+ Header.flags = Obj.Header.flags;
+ Header.reserved = Obj.Header.reserved;
+
+ if (Obj.IsLittleEndian != sys::IsLittleEndianHost)
+ MachO::swapStruct(Header);
+
+ auto header_size =
+ is64Bit ? sizeof(MachO::mach_header_64) : sizeof(MachO::mach_header);
+ OS.write((const char *)&Header, header_size);
+}
+
+template <typename SectionType>
+SectionType constructSection(MachOYAML::Section Sec) {
+ SectionType TempSec;
+ memcpy(reinterpret_cast<void *>(&TempSec.sectname[0]), &Sec.sectname[0], 16);
+ memcpy(reinterpret_cast<void *>(&TempSec.segname[0]), &Sec.segname[0], 16);
+ TempSec.addr = Sec.addr;
+ TempSec.size = Sec.size;
+ TempSec.offset = Sec.offset;
+ TempSec.align = Sec.align;
+ TempSec.reloff = Sec.reloff;
+ TempSec.nreloc = Sec.nreloc;
+ TempSec.flags = Sec.flags;
+ TempSec.reserved1 = Sec.reserved1;
+ TempSec.reserved2 = Sec.reserved2;
+ return TempSec;
+}
+
+template <typename StructType>
+size_t writeLoadCommandData(MachOYAML::LoadCommand &LC, raw_ostream &OS,
+ bool IsLittleEndian) {
+ return 0;
+}
+
+template <>
+size_t writeLoadCommandData<MachO::segment_command>(MachOYAML::LoadCommand &LC,
+ raw_ostream &OS,
+ bool IsLittleEndian) {
+ size_t BytesWritten = 0;
+ for (const auto &Sec : LC.Sections) {
+ auto TempSec = constructSection<MachO::section>(Sec);
+ if (IsLittleEndian != sys::IsLittleEndianHost)
+ MachO::swapStruct(TempSec);
+ OS.write(reinterpret_cast<const char *>(&(TempSec)),
+ sizeof(MachO::section));
+ BytesWritten += sizeof(MachO::section);
+ }
+ return BytesWritten;
+}
+
+template <>
+size_t writeLoadCommandData<MachO::segment_command_64>(
+ MachOYAML::LoadCommand &LC, raw_ostream &OS, bool IsLittleEndian) {
+ size_t BytesWritten = 0;
+ for (const auto &Sec : LC.Sections) {
+ auto TempSec = constructSection<MachO::section_64>(Sec);
+ TempSec.reserved3 = Sec.reserved3;
+ if (IsLittleEndian != sys::IsLittleEndianHost)
+ MachO::swapStruct(TempSec);
+ OS.write(reinterpret_cast<const char *>(&(TempSec)),
+ sizeof(MachO::section_64));
+ BytesWritten += sizeof(MachO::section_64);
+ }
+ return BytesWritten;
+}
+
+size_t writePayloadString(MachOYAML::LoadCommand &LC, raw_ostream &OS) {
+ size_t BytesWritten = 0;
+ if (!LC.PayloadString.empty()) {
+ OS.write(LC.PayloadString.c_str(), LC.PayloadString.length());
+ BytesWritten = LC.PayloadString.length();
+ }
+ return BytesWritten;
+}
+
+template <>
+size_t writeLoadCommandData<MachO::dylib_command>(MachOYAML::LoadCommand &LC,
+ raw_ostream &OS,
+ bool IsLittleEndian) {
+ return writePayloadString(LC, OS);
+}
+
+template <>
+size_t writeLoadCommandData<MachO::dylinker_command>(MachOYAML::LoadCommand &LC,
+ raw_ostream &OS,
+ bool IsLittleEndian) {
+ return writePayloadString(LC, OS);
+}
+
+template <>
+size_t writeLoadCommandData<MachO::rpath_command>(MachOYAML::LoadCommand &LC,
+ raw_ostream &OS,
+ bool IsLittleEndian) {
+ return writePayloadString(LC, OS);
+}
+
+template <>
+size_t writeLoadCommandData<MachO::build_version_command>(
+ MachOYAML::LoadCommand &LC, raw_ostream &OS, bool IsLittleEndian) {
+ size_t BytesWritten = 0;
+ for (const auto &T : LC.Tools) {
+ struct MachO::build_tool_version tool = T;
+ if (IsLittleEndian != sys::IsLittleEndianHost)
+ MachO::swapStruct(tool);
+ OS.write(reinterpret_cast<const char *>(&tool),
+ sizeof(MachO::build_tool_version));
+ BytesWritten += sizeof(MachO::build_tool_version);
+ }
+ return BytesWritten;
+}
+
+void ZeroFillBytes(raw_ostream &OS, size_t Size) {
+ std::vector<uint8_t> FillData(Size, 0);
+ OS.write(reinterpret_cast<char *>(FillData.data()), Size);
+}
+
+void Fill(raw_ostream &OS, size_t Size, uint32_t Data) {
+ std::vector<uint32_t> FillData((Size / 4) + 1, Data);
+ OS.write(reinterpret_cast<char *>(FillData.data()), Size);
+}
+
+void MachOWriter::ZeroToOffset(raw_ostream &OS, size_t Offset) {
+ auto currOffset = OS.tell() - fileStart;
+ if (currOffset < Offset)
+ ZeroFillBytes(OS, Offset - currOffset);
+}
+
+void MachOWriter::writeLoadCommands(raw_ostream &OS) {
+ for (auto &LC : Obj.LoadCommands) {
+ size_t BytesWritten = 0;
+ llvm::MachO::macho_load_command Data = LC.Data;
+
+#define HANDLE_LOAD_COMMAND(LCName, LCValue, LCStruct) \
+ case MachO::LCName: \
+ if (Obj.IsLittleEndian != sys::IsLittleEndianHost) \
+ MachO::swapStruct(Data.LCStruct##_data); \
+ OS.write(reinterpret_cast<const char *>(&(Data.LCStruct##_data)), \
+ sizeof(MachO::LCStruct)); \
+ BytesWritten = sizeof(MachO::LCStruct); \
+ BytesWritten += \
+ writeLoadCommandData<MachO::LCStruct>(LC, OS, Obj.IsLittleEndian); \
+ break;
+
+ switch (LC.Data.load_command_data.cmd) {
+ default:
+ if (Obj.IsLittleEndian != sys::IsLittleEndianHost)
+ MachO::swapStruct(Data.load_command_data);
+ OS.write(reinterpret_cast<const char *>(&(Data.load_command_data)),
+ sizeof(MachO::load_command));
+ BytesWritten = sizeof(MachO::load_command);
+ BytesWritten +=
+ writeLoadCommandData<MachO::load_command>(LC, OS, Obj.IsLittleEndian);
+ break;
+#include "llvm/BinaryFormat/MachO.def"
+ }
+
+ if (LC.PayloadBytes.size() > 0) {
+ OS.write(reinterpret_cast<const char *>(LC.PayloadBytes.data()),
+ LC.PayloadBytes.size());
+ BytesWritten += LC.PayloadBytes.size();
+ }
+
+ if (LC.ZeroPadBytes > 0) {
+ ZeroFillBytes(OS, LC.ZeroPadBytes);
+ BytesWritten += LC.ZeroPadBytes;
+ }
+
+ // Fill remaining bytes with 0. This will only get hit in partially
+ // specified test cases.
+ auto BytesRemaining = LC.Data.load_command_data.cmdsize - BytesWritten;
+ if (BytesRemaining > 0) {
+ ZeroFillBytes(OS, BytesRemaining);
+ }
+ }
+}
+
+Error MachOWriter::writeSectionData(raw_ostream &OS) {
+ for (auto &LC : Obj.LoadCommands) {
+ switch (LC.Data.load_command_data.cmd) {
+ case MachO::LC_SEGMENT:
+ case MachO::LC_SEGMENT_64:
+ uint64_t segOff = is64Bit ? LC.Data.segment_command_64_data.fileoff
+ : LC.Data.segment_command_data.fileoff;
+ if (0 ==
+ strncmp(&LC.Data.segment_command_data.segname[0], "__LINKEDIT", 16)) {
+ FoundLinkEditSeg = true;
+ writeLinkEditData(OS);
+ }
+ for (auto &Sec : LC.Sections) {
+ ZeroToOffset(OS, Sec.offset);
+ // Zero Fill any data between the end of the last thing we wrote and the
+ // start of this section.
+ if (OS.tell() - fileStart > Sec.offset && Sec.offset != (uint32_t)0)
+ return createStringError(
+ errc::invalid_argument,
+ "wrote too much data somewhere, section offsets don't line up");
+
+ StringRef SectName(Sec.sectname,
+ strnlen(Sec.sectname, sizeof(Sec.sectname)));
+ // If the section's content is specified in the 'DWARF' entry, we will
+ // emit it regardless of the section's segname.
+ if (Obj.DWARF.getNonEmptySectionNames().count(SectName.substr(2))) {
+ if (Sec.content)
+ return createStringError(errc::invalid_argument,
+ "cannot specify section '" + SectName +
+ "' contents in the 'DWARF' entry and "
+ "the 'content' at the same time");
+ auto EmitFunc = DWARFYAML::getDWARFEmitterByName(SectName.substr(2));
+ if (Error Err = EmitFunc(OS, Obj.DWARF))
+ return Err;
+ continue;
+ }
+
+ // Skip if it's a virtual section.
+ if (MachO::isVirtualSection(Sec.flags & MachO::SECTION_TYPE))
+ continue;
+
+ if (Sec.content) {
+ yaml::BinaryRef Content = *Sec.content;
+ Content.writeAsBinary(OS);
+ ZeroFillBytes(OS, Sec.size - Content.binary_size());
+ } else {
+ // Fill section data with 0xDEADBEEF.
+ Fill(OS, Sec.size, 0xDEADBEEFu);
+ }
+ }
+ uint64_t segSize = is64Bit ? LC.Data.segment_command_64_data.filesize
+ : LC.Data.segment_command_data.filesize;
+ ZeroToOffset(OS, segOff + segSize);
+ break;
+ }
+ }
+
+ return Error::success();
+}
+
+// The implementation of makeRelocationInfo and makeScatteredRelocationInfo is
+// consistent with how libObject parses MachO binary files. For the reference
+// see getStruct, getRelocation, getPlainRelocationPCRel,
+// getPlainRelocationLength and related methods in MachOObjectFile.cpp
+static MachO::any_relocation_info
+makeRelocationInfo(const MachOYAML::Relocation &R, bool IsLE) {
+ assert(!R.is_scattered && "non-scattered relocation expected");
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = R.address;
+ if (IsLE)
+ MRE.r_word1 = ((unsigned)R.symbolnum << 0) | ((unsigned)R.is_pcrel << 24) |
+ ((unsigned)R.length << 25) | ((unsigned)R.is_extern << 27) |
+ ((unsigned)R.type << 28);
+ else
+ MRE.r_word1 = ((unsigned)R.symbolnum << 8) | ((unsigned)R.is_pcrel << 7) |
+ ((unsigned)R.length << 5) | ((unsigned)R.is_extern << 4) |
+ ((unsigned)R.type << 0);
+ return MRE;
+}
+
+static MachO::any_relocation_info
+makeScatteredRelocationInfo(const MachOYAML::Relocation &R) {
+ assert(R.is_scattered && "scattered relocation expected");
+ MachO::any_relocation_info MRE;
+ MRE.r_word0 = (((unsigned)R.address << 0) | ((unsigned)R.type << 24) |
+ ((unsigned)R.length << 28) | ((unsigned)R.is_pcrel << 30) |
+ MachO::R_SCATTERED);
+ MRE.r_word1 = R.value;
+ return MRE;
+}
+
+void MachOWriter::writeRelocations(raw_ostream &OS) {
+ for (const MachOYAML::LoadCommand &LC : Obj.LoadCommands) {
+ switch (LC.Data.load_command_data.cmd) {
+ case MachO::LC_SEGMENT:
+ case MachO::LC_SEGMENT_64:
+ for (const MachOYAML::Section &Sec : LC.Sections) {
+ if (Sec.relocations.empty())
+ continue;
+ ZeroToOffset(OS, Sec.reloff);
+ for (const MachOYAML::Relocation &R : Sec.relocations) {
+ MachO::any_relocation_info MRE =
+ R.is_scattered ? makeScatteredRelocationInfo(R)
+ : makeRelocationInfo(R, Obj.IsLittleEndian);
+ if (Obj.IsLittleEndian != sys::IsLittleEndianHost)
+ MachO::swapStruct(MRE);
+ OS.write(reinterpret_cast<const char *>(&MRE),
+ sizeof(MachO::any_relocation_info));
+ }
+ }
+ }
+ }
+}
+
+void MachOWriter::writeBindOpcodes(
+ raw_ostream &OS, std::vector<MachOYAML::BindOpcode> &BindOpcodes) {
+
+ for (auto Opcode : BindOpcodes) {
+ uint8_t OpByte = Opcode.Opcode | Opcode.Imm;
+ OS.write(reinterpret_cast<char *>(&OpByte), 1);
+ for (auto Data : Opcode.ULEBExtraData) {
+ encodeULEB128(Data, OS);
+ }
+ for (auto Data : Opcode.SLEBExtraData) {
+ encodeSLEB128(Data, OS);
+ }
+ if (!Opcode.Symbol.empty()) {
+ OS.write(Opcode.Symbol.data(), Opcode.Symbol.size());
+ OS.write('\0');
+ }
+ }
+}
+
+void MachOWriter::dumpExportEntry(raw_ostream &OS,
+ MachOYAML::ExportEntry &Entry) {
+ encodeSLEB128(Entry.TerminalSize, OS);
+ if (Entry.TerminalSize > 0) {
+ encodeSLEB128(Entry.Flags, OS);
+ if (Entry.Flags & MachO::EXPORT_SYMBOL_FLAGS_REEXPORT) {
+ encodeSLEB128(Entry.Other, OS);
+ OS << Entry.ImportName;
+ OS.write('\0');
+ } else {
+ encodeSLEB128(Entry.Address, OS);
+ if (Entry.Flags & MachO::EXPORT_SYMBOL_FLAGS_STUB_AND_RESOLVER)
+ encodeSLEB128(Entry.Other, OS);
+ }
+ }
+ OS.write(static_cast<uint8_t>(Entry.Children.size()));
+ for (auto EE : Entry.Children) {
+ OS << EE.Name;
+ OS.write('\0');
+ encodeSLEB128(EE.NodeOffset, OS);
+ }
+ for (auto EE : Entry.Children)
+ dumpExportEntry(OS, EE);
+}
+
+void MachOWriter::writeExportTrie(raw_ostream &OS) {
+ dumpExportEntry(OS, Obj.LinkEdit.ExportTrie);
+}
+
+template <typename NListType>
+void writeNListEntry(MachOYAML::NListEntry &NLE, raw_ostream &OS,
+ bool IsLittleEndian) {
+ NListType ListEntry;
+ ListEntry.n_strx = NLE.n_strx;
+ ListEntry.n_type = NLE.n_type;
+ ListEntry.n_sect = NLE.n_sect;
+ ListEntry.n_desc = NLE.n_desc;
+ ListEntry.n_value = NLE.n_value;
+
+ if (IsLittleEndian != sys::IsLittleEndianHost)
+ MachO::swapStruct(ListEntry);
+ OS.write(reinterpret_cast<const char *>(&ListEntry), sizeof(NListType));
+}
+
+void MachOWriter::writeLinkEditData(raw_ostream &OS) {
+ typedef void (MachOWriter::*writeHandler)(raw_ostream &);
+ typedef std::pair<uint64_t, writeHandler> writeOperation;
+ std::vector<writeOperation> WriteQueue;
+
+ MachO::dyld_info_command *DyldInfoOnlyCmd = 0;
+ MachO::symtab_command *SymtabCmd = 0;
+ for (auto &LC : Obj.LoadCommands) {
+ switch (LC.Data.load_command_data.cmd) {
+ case MachO::LC_SYMTAB:
+ SymtabCmd = &LC.Data.symtab_command_data;
+ WriteQueue.push_back(
+ std::make_pair(SymtabCmd->symoff, &MachOWriter::writeNameList));
+ WriteQueue.push_back(
+ std::make_pair(SymtabCmd->stroff, &MachOWriter::writeStringTable));
+ break;
+ case MachO::LC_DYLD_INFO_ONLY:
+ DyldInfoOnlyCmd = &LC.Data.dyld_info_command_data;
+ WriteQueue.push_back(std::make_pair(DyldInfoOnlyCmd->rebase_off,
+ &MachOWriter::writeRebaseOpcodes));
+ WriteQueue.push_back(std::make_pair(DyldInfoOnlyCmd->bind_off,
+ &MachOWriter::writeBasicBindOpcodes));
+ WriteQueue.push_back(std::make_pair(DyldInfoOnlyCmd->weak_bind_off,
+ &MachOWriter::writeWeakBindOpcodes));
+ WriteQueue.push_back(std::make_pair(DyldInfoOnlyCmd->lazy_bind_off,
+ &MachOWriter::writeLazyBindOpcodes));
+ WriteQueue.push_back(std::make_pair(DyldInfoOnlyCmd->export_off,
+ &MachOWriter::writeExportTrie));
+ break;
+ }
+ }
+
+ llvm::sort(WriteQueue, [](const writeOperation &a, const writeOperation &b) {
+ return a.first < b.first;
+ });
+
+ for (auto writeOp : WriteQueue) {
+ ZeroToOffset(OS, writeOp.first);
+ (this->*writeOp.second)(OS);
+ }
+}
+
+void MachOWriter::writeRebaseOpcodes(raw_ostream &OS) {
+ MachOYAML::LinkEditData &LinkEdit = Obj.LinkEdit;
+
+ for (auto Opcode : LinkEdit.RebaseOpcodes) {
+ uint8_t OpByte = Opcode.Opcode | Opcode.Imm;
+ OS.write(reinterpret_cast<char *>(&OpByte), 1);
+ for (auto Data : Opcode.ExtraData)
+ encodeULEB128(Data, OS);
+ }
+}
+
+void MachOWriter::writeBasicBindOpcodes(raw_ostream &OS) {
+ writeBindOpcodes(OS, Obj.LinkEdit.BindOpcodes);
+}
+
+void MachOWriter::writeWeakBindOpcodes(raw_ostream &OS) {
+ writeBindOpcodes(OS, Obj.LinkEdit.WeakBindOpcodes);
+}
+
+void MachOWriter::writeLazyBindOpcodes(raw_ostream &OS) {
+ writeBindOpcodes(OS, Obj.LinkEdit.LazyBindOpcodes);
+}
+
+void MachOWriter::writeNameList(raw_ostream &OS) {
+ for (auto NLE : Obj.LinkEdit.NameList) {
+ if (is64Bit)
+ writeNListEntry<MachO::nlist_64>(NLE, OS, Obj.IsLittleEndian);
+ else
+ writeNListEntry<MachO::nlist>(NLE, OS, Obj.IsLittleEndian);
+ }
+}
+
+void MachOWriter::writeStringTable(raw_ostream &OS) {
+ for (auto Str : Obj.LinkEdit.StringTable) {
+ OS.write(Str.data(), Str.size());
+ OS.write('\0');
+ }
+}
+
+class UniversalWriter {
+public:
+ UniversalWriter(yaml::YamlObjectFile &ObjectFile)
+ : ObjectFile(ObjectFile), fileStart(0) {}
+
+ Error writeMachO(raw_ostream &OS);
+
+private:
+ void writeFatHeader(raw_ostream &OS);
+ void writeFatArchs(raw_ostream &OS);
+
+ void ZeroToOffset(raw_ostream &OS, size_t offset);
+
+ yaml::YamlObjectFile &ObjectFile;
+ uint64_t fileStart;
+};
+
+Error UniversalWriter::writeMachO(raw_ostream &OS) {
+ fileStart = OS.tell();
+ if (ObjectFile.MachO) {
+ MachOWriter Writer(*ObjectFile.MachO);
+ return Writer.writeMachO(OS);
+ }
+
+ writeFatHeader(OS);
+ writeFatArchs(OS);
+
+ auto &FatFile = *ObjectFile.FatMachO;
+ if (FatFile.FatArchs.size() < FatFile.Slices.size())
+ return createStringError(
+ errc::invalid_argument,
+ "cannot write 'Slices' if not described in 'FatArches'");
+
+ for (size_t i = 0; i < FatFile.Slices.size(); i++) {
+ ZeroToOffset(OS, FatFile.FatArchs[i].offset);
+ MachOWriter Writer(FatFile.Slices[i]);
+ if (Error Err = Writer.writeMachO(OS))
+ return Err;
+
+ auto SliceEnd = FatFile.FatArchs[i].offset + FatFile.FatArchs[i].size;
+ ZeroToOffset(OS, SliceEnd);
+ }
+
+ return Error::success();
+}
+
+void UniversalWriter::writeFatHeader(raw_ostream &OS) {
+ auto &FatFile = *ObjectFile.FatMachO;
+ MachO::fat_header header;
+ header.magic = FatFile.Header.magic;
+ header.nfat_arch = FatFile.Header.nfat_arch;
+ if (sys::IsLittleEndianHost)
+ swapStruct(header);
+ OS.write(reinterpret_cast<const char *>(&header), sizeof(MachO::fat_header));
+}
+
+template <typename FatArchType>
+FatArchType constructFatArch(MachOYAML::FatArch &Arch) {
+ FatArchType FatArch;
+ FatArch.cputype = Arch.cputype;
+ FatArch.cpusubtype = Arch.cpusubtype;
+ FatArch.offset = Arch.offset;
+ FatArch.size = Arch.size;
+ FatArch.align = Arch.align;
+ return FatArch;
+}
+
+template <typename StructType>
+void writeFatArch(MachOYAML::FatArch &LC, raw_ostream &OS) {}
+
+template <>
+void writeFatArch<MachO::fat_arch>(MachOYAML::FatArch &Arch, raw_ostream &OS) {
+ auto FatArch = constructFatArch<MachO::fat_arch>(Arch);
+ if (sys::IsLittleEndianHost)
+ swapStruct(FatArch);
+ OS.write(reinterpret_cast<const char *>(&FatArch), sizeof(MachO::fat_arch));
+}
+
+template <>
+void writeFatArch<MachO::fat_arch_64>(MachOYAML::FatArch &Arch,
+ raw_ostream &OS) {
+ auto FatArch = constructFatArch<MachO::fat_arch_64>(Arch);
+ FatArch.reserved = Arch.reserved;
+ if (sys::IsLittleEndianHost)
+ swapStruct(FatArch);
+ OS.write(reinterpret_cast<const char *>(&FatArch),
+ sizeof(MachO::fat_arch_64));
+}
+
+void UniversalWriter::writeFatArchs(raw_ostream &OS) {
+ auto &FatFile = *ObjectFile.FatMachO;
+ bool is64Bit = FatFile.Header.magic == MachO::FAT_MAGIC_64;
+ for (auto Arch : FatFile.FatArchs) {
+ if (is64Bit)
+ writeFatArch<MachO::fat_arch_64>(Arch, OS);
+ else
+ writeFatArch<MachO::fat_arch>(Arch, OS);
+ }
+}
+
+void UniversalWriter::ZeroToOffset(raw_ostream &OS, size_t Offset) {
+ auto currOffset = OS.tell() - fileStart;
+ if (currOffset < Offset)
+ ZeroFillBytes(OS, Offset - currOffset);
+}
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace yaml {
+
+bool yaml2macho(YamlObjectFile &Doc, raw_ostream &Out, ErrorHandler EH) {
+ UniversalWriter Writer(Doc);
+ if (Error Err = Writer.writeMachO(Out)) {
+ handleAllErrors(std::move(Err),
+ [&](const ErrorInfoBase &Err) { EH(Err.message()); });
+ return false;
+ }
+ return true;
+}
+
+} // namespace yaml
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/MachOYAML.cpp b/contrib/libs/llvm12/lib/ObjectYAML/MachOYAML.cpp
new file mode 100644
index 00000000000..5a27d37cb72
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/MachOYAML.cpp
@@ -0,0 +1,589 @@
+//===- MachOYAML.cpp - MachO YAMLIO implementation ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of MachO.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/MachOYAML.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/YAMLTraits.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cinttypes>
+#include <cstdint>
+#include <cstring>
+
+namespace llvm {
+
+MachOYAML::LoadCommand::~LoadCommand() = default;
+
+bool MachOYAML::LinkEditData::isEmpty() const {
+ return 0 ==
+ RebaseOpcodes.size() + BindOpcodes.size() + WeakBindOpcodes.size() +
+ LazyBindOpcodes.size() + ExportTrie.Children.size() +
+ NameList.size() + StringTable.size();
+}
+
+namespace yaml {
+
+void ScalarTraits<char_16>::output(const char_16 &Val, void *,
+ raw_ostream &Out) {
+ auto Len = strnlen(&Val[0], 16);
+ Out << StringRef(&Val[0], Len);
+}
+
+StringRef ScalarTraits<char_16>::input(StringRef Scalar, void *, char_16 &Val) {
+ size_t CopySize = 16 >= Scalar.size() ? 16 : Scalar.size();
+ memcpy((void *)Val, Scalar.data(), CopySize);
+
+ if (Scalar.size() < 16) {
+ memset((void *)&Val[Scalar.size()], 0, 16 - Scalar.size());
+ }
+
+ return StringRef();
+}
+
+QuotingType ScalarTraits<char_16>::mustQuote(StringRef S) {
+ return needsQuotes(S);
+}
+
+void ScalarTraits<uuid_t>::output(const uuid_t &Val, void *, raw_ostream &Out) {
+ Out.write_uuid(Val);
+}
+
+StringRef ScalarTraits<uuid_t>::input(StringRef Scalar, void *, uuid_t &Val) {
+ size_t OutIdx = 0;
+ for (size_t Idx = 0; Idx < Scalar.size(); ++Idx) {
+ if (Scalar[Idx] == '-' || OutIdx >= 16)
+ continue;
+ unsigned long long TempInt;
+ if (getAsUnsignedInteger(Scalar.slice(Idx, Idx + 2), 16, TempInt))
+ return "invalid number";
+ if (TempInt > 0xFF)
+ return "out of range number";
+ Val[OutIdx] = static_cast<uint8_t>(TempInt);
+ ++Idx; // increment idx an extra time because we're consuming 2 chars
+ ++OutIdx;
+ }
+ return StringRef();
+}
+
+QuotingType ScalarTraits<uuid_t>::mustQuote(StringRef S) {
+ return needsQuotes(S);
+}
+
+void MappingTraits<MachOYAML::FileHeader>::mapping(
+ IO &IO, MachOYAML::FileHeader &FileHdr) {
+ IO.mapRequired("magic", FileHdr.magic);
+ IO.mapRequired("cputype", FileHdr.cputype);
+ IO.mapRequired("cpusubtype", FileHdr.cpusubtype);
+ IO.mapRequired("filetype", FileHdr.filetype);
+ IO.mapRequired("ncmds", FileHdr.ncmds);
+ IO.mapRequired("sizeofcmds", FileHdr.sizeofcmds);
+ IO.mapRequired("flags", FileHdr.flags);
+ if (FileHdr.magic == MachO::MH_MAGIC_64 ||
+ FileHdr.magic == MachO::MH_CIGAM_64)
+ IO.mapRequired("reserved", FileHdr.reserved);
+}
+
+void MappingTraits<MachOYAML::Object>::mapping(IO &IO,
+ MachOYAML::Object &Object) {
+ // If the context isn't already set, tag the document as !mach-o.
+ // For Fat files there will be a different tag so they can be differentiated.
+ if (!IO.getContext()) {
+ IO.setContext(&Object);
+ }
+ IO.mapTag("!mach-o", true);
+ IO.mapOptional("IsLittleEndian", Object.IsLittleEndian,
+ sys::IsLittleEndianHost);
+ Object.DWARF.IsLittleEndian = Object.IsLittleEndian;
+
+ IO.mapRequired("FileHeader", Object.Header);
+ Object.DWARF.Is64BitAddrSize = Object.Header.magic == MachO::MH_MAGIC_64 ||
+ Object.Header.magic == MachO::MH_CIGAM_64;
+ IO.mapOptional("LoadCommands", Object.LoadCommands);
+ if(!Object.LinkEdit.isEmpty() || !IO.outputting())
+ IO.mapOptional("LinkEditData", Object.LinkEdit);
+
+ if(!Object.DWARF.isEmpty() || !IO.outputting())
+ IO.mapOptional("DWARF", Object.DWARF);
+
+ if (IO.getContext() == &Object)
+ IO.setContext(nullptr);
+}
+
+void MappingTraits<MachOYAML::FatHeader>::mapping(
+ IO &IO, MachOYAML::FatHeader &FatHeader) {
+ IO.mapRequired("magic", FatHeader.magic);
+ IO.mapRequired("nfat_arch", FatHeader.nfat_arch);
+}
+
+void MappingTraits<MachOYAML::FatArch>::mapping(IO &IO,
+ MachOYAML::FatArch &FatArch) {
+ IO.mapRequired("cputype", FatArch.cputype);
+ IO.mapRequired("cpusubtype", FatArch.cpusubtype);
+ IO.mapRequired("offset", FatArch.offset);
+ IO.mapRequired("size", FatArch.size);
+ IO.mapRequired("align", FatArch.align);
+ IO.mapOptional("reserved", FatArch.reserved,
+ static_cast<llvm::yaml::Hex32>(0));
+}
+
+void MappingTraits<MachOYAML::UniversalBinary>::mapping(
+ IO &IO, MachOYAML::UniversalBinary &UniversalBinary) {
+ if (!IO.getContext()) {
+ IO.setContext(&UniversalBinary);
+ IO.mapTag("!fat-mach-o", true);
+ }
+ IO.mapRequired("FatHeader", UniversalBinary.Header);
+ IO.mapRequired("FatArchs", UniversalBinary.FatArchs);
+ IO.mapRequired("Slices", UniversalBinary.Slices);
+
+ if (IO.getContext() == &UniversalBinary)
+ IO.setContext(nullptr);
+}
+
+void MappingTraits<MachOYAML::LinkEditData>::mapping(
+ IO &IO, MachOYAML::LinkEditData &LinkEditData) {
+ IO.mapOptional("RebaseOpcodes", LinkEditData.RebaseOpcodes);
+ IO.mapOptional("BindOpcodes", LinkEditData.BindOpcodes);
+ IO.mapOptional("WeakBindOpcodes", LinkEditData.WeakBindOpcodes);
+ IO.mapOptional("LazyBindOpcodes", LinkEditData.LazyBindOpcodes);
+ if (!LinkEditData.ExportTrie.Children.empty() || !IO.outputting())
+ IO.mapOptional("ExportTrie", LinkEditData.ExportTrie);
+ IO.mapOptional("NameList", LinkEditData.NameList);
+ IO.mapOptional("StringTable", LinkEditData.StringTable);
+}
+
+void MappingTraits<MachOYAML::RebaseOpcode>::mapping(
+ IO &IO, MachOYAML::RebaseOpcode &RebaseOpcode) {
+ IO.mapRequired("Opcode", RebaseOpcode.Opcode);
+ IO.mapRequired("Imm", RebaseOpcode.Imm);
+ IO.mapOptional("ExtraData", RebaseOpcode.ExtraData);
+}
+
+void MappingTraits<MachOYAML::BindOpcode>::mapping(
+ IO &IO, MachOYAML::BindOpcode &BindOpcode) {
+ IO.mapRequired("Opcode", BindOpcode.Opcode);
+ IO.mapRequired("Imm", BindOpcode.Imm);
+ IO.mapOptional("ULEBExtraData", BindOpcode.ULEBExtraData);
+ IO.mapOptional("SLEBExtraData", BindOpcode.SLEBExtraData);
+ IO.mapOptional("Symbol", BindOpcode.Symbol);
+}
+
+void MappingTraits<MachOYAML::ExportEntry>::mapping(
+ IO &IO, MachOYAML::ExportEntry &ExportEntry) {
+ IO.mapRequired("TerminalSize", ExportEntry.TerminalSize);
+ IO.mapOptional("NodeOffset", ExportEntry.NodeOffset);
+ IO.mapOptional("Name", ExportEntry.Name);
+ IO.mapOptional("Flags", ExportEntry.Flags);
+ IO.mapOptional("Address", ExportEntry.Address);
+ IO.mapOptional("Other", ExportEntry.Other);
+ IO.mapOptional("ImportName", ExportEntry.ImportName);
+ IO.mapOptional("Children", ExportEntry.Children);
+}
+
+void MappingTraits<MachOYAML::NListEntry>::mapping(
+ IO &IO, MachOYAML::NListEntry &NListEntry) {
+ IO.mapRequired("n_strx", NListEntry.n_strx);
+ IO.mapRequired("n_type", NListEntry.n_type);
+ IO.mapRequired("n_sect", NListEntry.n_sect);
+ IO.mapRequired("n_desc", NListEntry.n_desc);
+ IO.mapRequired("n_value", NListEntry.n_value);
+}
+
+template <typename StructType>
+void mapLoadCommandData(IO &IO, MachOYAML::LoadCommand &LoadCommand) {}
+
+template <>
+void mapLoadCommandData<MachO::segment_command>(
+ IO &IO, MachOYAML::LoadCommand &LoadCommand) {
+ IO.mapOptional("Sections", LoadCommand.Sections);
+}
+
+template <>
+void mapLoadCommandData<MachO::segment_command_64>(
+ IO &IO, MachOYAML::LoadCommand &LoadCommand) {
+ IO.mapOptional("Sections", LoadCommand.Sections);
+}
+
+template <>
+void mapLoadCommandData<MachO::dylib_command>(
+ IO &IO, MachOYAML::LoadCommand &LoadCommand) {
+ IO.mapOptional("PayloadString", LoadCommand.PayloadString);
+}
+
+template <>
+void mapLoadCommandData<MachO::rpath_command>(
+ IO &IO, MachOYAML::LoadCommand &LoadCommand) {
+ IO.mapOptional("PayloadString", LoadCommand.PayloadString);
+}
+
+template <>
+void mapLoadCommandData<MachO::dylinker_command>(
+ IO &IO, MachOYAML::LoadCommand &LoadCommand) {
+ IO.mapOptional("PayloadString", LoadCommand.PayloadString);
+}
+
+template <>
+void mapLoadCommandData<MachO::build_version_command>(
+ IO &IO, MachOYAML::LoadCommand &LoadCommand) {
+ IO.mapOptional("Tools", LoadCommand.Tools);
+}
+
+void MappingTraits<MachOYAML::LoadCommand>::mapping(
+ IO &IO, MachOYAML::LoadCommand &LoadCommand) {
+ MachO::LoadCommandType TempCmd = static_cast<MachO::LoadCommandType>(
+ LoadCommand.Data.load_command_data.cmd);
+ IO.mapRequired("cmd", TempCmd);
+ LoadCommand.Data.load_command_data.cmd = TempCmd;
+ IO.mapRequired("cmdsize", LoadCommand.Data.load_command_data.cmdsize);
+
+#define HANDLE_LOAD_COMMAND(LCName, LCValue, LCStruct) \
+ case MachO::LCName: \
+ MappingTraits<MachO::LCStruct>::mapping(IO, \
+ LoadCommand.Data.LCStruct##_data); \
+ mapLoadCommandData<MachO::LCStruct>(IO, LoadCommand); \
+ break;
+
+ switch (LoadCommand.Data.load_command_data.cmd) {
+#include "llvm/BinaryFormat/MachO.def"
+ }
+ IO.mapOptional("PayloadBytes", LoadCommand.PayloadBytes);
+ IO.mapOptional("ZeroPadBytes", LoadCommand.ZeroPadBytes, (uint64_t)0ull);
+}
+
+void MappingTraits<MachO::dyld_info_command>::mapping(
+ IO &IO, MachO::dyld_info_command &LoadCommand) {
+ IO.mapRequired("rebase_off", LoadCommand.rebase_off);
+ IO.mapRequired("rebase_size", LoadCommand.rebase_size);
+ IO.mapRequired("bind_off", LoadCommand.bind_off);
+ IO.mapRequired("bind_size", LoadCommand.bind_size);
+ IO.mapRequired("weak_bind_off", LoadCommand.weak_bind_off);
+ IO.mapRequired("weak_bind_size", LoadCommand.weak_bind_size);
+ IO.mapRequired("lazy_bind_off", LoadCommand.lazy_bind_off);
+ IO.mapRequired("lazy_bind_size", LoadCommand.lazy_bind_size);
+ IO.mapRequired("export_off", LoadCommand.export_off);
+ IO.mapRequired("export_size", LoadCommand.export_size);
+}
+
+void MappingTraits<MachOYAML::Relocation>::mapping(
+ IO &IO, MachOYAML::Relocation &Relocation) {
+ IO.mapRequired("address", Relocation.address);
+ IO.mapRequired("symbolnum", Relocation.symbolnum);
+ IO.mapRequired("pcrel", Relocation.is_pcrel);
+ IO.mapRequired("length", Relocation.length);
+ IO.mapRequired("extern", Relocation.is_extern);
+ IO.mapRequired("type", Relocation.type);
+ IO.mapRequired("scattered", Relocation.is_scattered);
+ IO.mapRequired("value", Relocation.value);
+}
+
+void MappingTraits<MachOYAML::Section>::mapping(IO &IO,
+ MachOYAML::Section &Section) {
+ IO.mapRequired("sectname", Section.sectname);
+ IO.mapRequired("segname", Section.segname);
+ IO.mapRequired("addr", Section.addr);
+ IO.mapRequired("size", Section.size);
+ IO.mapRequired("offset", Section.offset);
+ IO.mapRequired("align", Section.align);
+ IO.mapRequired("reloff", Section.reloff);
+ IO.mapRequired("nreloc", Section.nreloc);
+ IO.mapRequired("flags", Section.flags);
+ IO.mapRequired("reserved1", Section.reserved1);
+ IO.mapRequired("reserved2", Section.reserved2);
+ IO.mapOptional("reserved3", Section.reserved3);
+ IO.mapOptional("content", Section.content);
+ IO.mapOptional("relocations", Section.relocations);
+}
+
+std::string
+MappingTraits<MachOYAML::Section>::validate(IO &IO,
+ MachOYAML::Section &Section) {
+ if (Section.content && Section.size < Section.content->binary_size())
+ return "Section size must be greater than or equal to the content size";
+ return "";
+}
+
+void MappingTraits<MachO::build_tool_version>::mapping(
+ IO &IO, MachO::build_tool_version &tool) {
+ IO.mapRequired("tool", tool.tool);
+ IO.mapRequired("version", tool.version);
+}
+
+void MappingTraits<MachO::dylib>::mapping(IO &IO, MachO::dylib &DylibStruct) {
+ IO.mapRequired("name", DylibStruct.name);
+ IO.mapRequired("timestamp", DylibStruct.timestamp);
+ IO.mapRequired("current_version", DylibStruct.current_version);
+ IO.mapRequired("compatibility_version", DylibStruct.compatibility_version);
+}
+
+void MappingTraits<MachO::dylib_command>::mapping(
+ IO &IO, MachO::dylib_command &LoadCommand) {
+ IO.mapRequired("dylib", LoadCommand.dylib);
+}
+
+void MappingTraits<MachO::dylinker_command>::mapping(
+ IO &IO, MachO::dylinker_command &LoadCommand) {
+ IO.mapRequired("name", LoadCommand.name);
+}
+
+void MappingTraits<MachO::dysymtab_command>::mapping(
+ IO &IO, MachO::dysymtab_command &LoadCommand) {
+ IO.mapRequired("ilocalsym", LoadCommand.ilocalsym);
+ IO.mapRequired("nlocalsym", LoadCommand.nlocalsym);
+ IO.mapRequired("iextdefsym", LoadCommand.iextdefsym);
+ IO.mapRequired("nextdefsym", LoadCommand.nextdefsym);
+ IO.mapRequired("iundefsym", LoadCommand.iundefsym);
+ IO.mapRequired("nundefsym", LoadCommand.nundefsym);
+ IO.mapRequired("tocoff", LoadCommand.tocoff);
+ IO.mapRequired("ntoc", LoadCommand.ntoc);
+ IO.mapRequired("modtaboff", LoadCommand.modtaboff);
+ IO.mapRequired("nmodtab", LoadCommand.nmodtab);
+ IO.mapRequired("extrefsymoff", LoadCommand.extrefsymoff);
+ IO.mapRequired("nextrefsyms", LoadCommand.nextrefsyms);
+ IO.mapRequired("indirectsymoff", LoadCommand.indirectsymoff);
+ IO.mapRequired("nindirectsyms", LoadCommand.nindirectsyms);
+ IO.mapRequired("extreloff", LoadCommand.extreloff);
+ IO.mapRequired("nextrel", LoadCommand.nextrel);
+ IO.mapRequired("locreloff", LoadCommand.locreloff);
+ IO.mapRequired("nlocrel", LoadCommand.nlocrel);
+}
+
+void MappingTraits<MachO::encryption_info_command>::mapping(
+ IO &IO, MachO::encryption_info_command &LoadCommand) {
+ IO.mapRequired("cryptoff", LoadCommand.cryptoff);
+ IO.mapRequired("cryptsize", LoadCommand.cryptsize);
+ IO.mapRequired("cryptid", LoadCommand.cryptid);
+}
+
+void MappingTraits<MachO::encryption_info_command_64>::mapping(
+ IO &IO, MachO::encryption_info_command_64 &LoadCommand) {
+ IO.mapRequired("cryptoff", LoadCommand.cryptoff);
+ IO.mapRequired("cryptsize", LoadCommand.cryptsize);
+ IO.mapRequired("cryptid", LoadCommand.cryptid);
+ IO.mapRequired("pad", LoadCommand.pad);
+}
+
+void MappingTraits<MachO::entry_point_command>::mapping(
+ IO &IO, MachO::entry_point_command &LoadCommand) {
+ IO.mapRequired("entryoff", LoadCommand.entryoff);
+ IO.mapRequired("stacksize", LoadCommand.stacksize);
+}
+
+void MappingTraits<MachO::fvmfile_command>::mapping(
+ IO &IO, MachO::fvmfile_command &LoadCommand) {
+ IO.mapRequired("name", LoadCommand.name);
+ IO.mapRequired("header_addr", LoadCommand.header_addr);
+}
+
+void MappingTraits<MachO::fvmlib>::mapping(IO &IO, MachO::fvmlib &FVMLib) {
+ IO.mapRequired("name", FVMLib.name);
+ IO.mapRequired("minor_version", FVMLib.minor_version);
+ IO.mapRequired("header_addr", FVMLib.header_addr);
+}
+
+void MappingTraits<MachO::fvmlib_command>::mapping(
+ IO &IO, MachO::fvmlib_command &LoadCommand) {
+ IO.mapRequired("fvmlib", LoadCommand.fvmlib);
+}
+
+void MappingTraits<MachO::ident_command>::mapping(
+ IO &IO, MachO::ident_command &LoadCommand) {}
+
+void MappingTraits<MachO::linkedit_data_command>::mapping(
+ IO &IO, MachO::linkedit_data_command &LoadCommand) {
+ IO.mapRequired("dataoff", LoadCommand.dataoff);
+ IO.mapRequired("datasize", LoadCommand.datasize);
+}
+
+void MappingTraits<MachO::linker_option_command>::mapping(
+ IO &IO, MachO::linker_option_command &LoadCommand) {
+ IO.mapRequired("count", LoadCommand.count);
+}
+
+void MappingTraits<MachO::prebind_cksum_command>::mapping(
+ IO &IO, MachO::prebind_cksum_command &LoadCommand) {
+ IO.mapRequired("cksum", LoadCommand.cksum);
+}
+
+void MappingTraits<MachO::load_command>::mapping(
+ IO &IO, MachO::load_command &LoadCommand) {}
+
+void MappingTraits<MachO::prebound_dylib_command>::mapping(
+ IO &IO, MachO::prebound_dylib_command &LoadCommand) {
+ IO.mapRequired("name", LoadCommand.name);
+ IO.mapRequired("nmodules", LoadCommand.nmodules);
+ IO.mapRequired("linked_modules", LoadCommand.linked_modules);
+}
+
+void MappingTraits<MachO::routines_command>::mapping(
+ IO &IO, MachO::routines_command &LoadCommand) {
+ IO.mapRequired("init_address", LoadCommand.init_address);
+ IO.mapRequired("init_module", LoadCommand.init_module);
+ IO.mapRequired("reserved1", LoadCommand.reserved1);
+ IO.mapRequired("reserved2", LoadCommand.reserved2);
+ IO.mapRequired("reserved3", LoadCommand.reserved3);
+ IO.mapRequired("reserved4", LoadCommand.reserved4);
+ IO.mapRequired("reserved5", LoadCommand.reserved5);
+ IO.mapRequired("reserved6", LoadCommand.reserved6);
+}
+
+void MappingTraits<MachO::routines_command_64>::mapping(
+ IO &IO, MachO::routines_command_64 &LoadCommand) {
+ IO.mapRequired("init_address", LoadCommand.init_address);
+ IO.mapRequired("init_module", LoadCommand.init_module);
+ IO.mapRequired("reserved1", LoadCommand.reserved1);
+ IO.mapRequired("reserved2", LoadCommand.reserved2);
+ IO.mapRequired("reserved3", LoadCommand.reserved3);
+ IO.mapRequired("reserved4", LoadCommand.reserved4);
+ IO.mapRequired("reserved5", LoadCommand.reserved5);
+ IO.mapRequired("reserved6", LoadCommand.reserved6);
+}
+
+void MappingTraits<MachO::rpath_command>::mapping(
+ IO &IO, MachO::rpath_command &LoadCommand) {
+ IO.mapRequired("path", LoadCommand.path);
+}
+
+void MappingTraits<MachO::section>::mapping(IO &IO, MachO::section &Section) {
+ IO.mapRequired("sectname", Section.sectname);
+ IO.mapRequired("segname", Section.segname);
+ IO.mapRequired("addr", Section.addr);
+ IO.mapRequired("size", Section.size);
+ IO.mapRequired("offset", Section.offset);
+ IO.mapRequired("align", Section.align);
+ IO.mapRequired("reloff", Section.reloff);
+ IO.mapRequired("nreloc", Section.nreloc);
+ IO.mapRequired("flags", Section.flags);
+ IO.mapRequired("reserved1", Section.reserved1);
+ IO.mapRequired("reserved2", Section.reserved2);
+}
+
+void MappingTraits<MachO::section_64>::mapping(IO &IO,
+ MachO::section_64 &Section) {
+ IO.mapRequired("sectname", Section.sectname);
+ IO.mapRequired("segname", Section.segname);
+ IO.mapRequired("addr", Section.addr);
+ IO.mapRequired("size", Section.size);
+ IO.mapRequired("offset", Section.offset);
+ IO.mapRequired("align", Section.align);
+ IO.mapRequired("reloff", Section.reloff);
+ IO.mapRequired("nreloc", Section.nreloc);
+ IO.mapRequired("flags", Section.flags);
+ IO.mapRequired("reserved1", Section.reserved1);
+ IO.mapRequired("reserved2", Section.reserved2);
+ IO.mapRequired("reserved3", Section.reserved3);
+}
+
+void MappingTraits<MachO::segment_command>::mapping(
+ IO &IO, MachO::segment_command &LoadCommand) {
+ IO.mapRequired("segname", LoadCommand.segname);
+ IO.mapRequired("vmaddr", LoadCommand.vmaddr);
+ IO.mapRequired("vmsize", LoadCommand.vmsize);
+ IO.mapRequired("fileoff", LoadCommand.fileoff);
+ IO.mapRequired("filesize", LoadCommand.filesize);
+ IO.mapRequired("maxprot", LoadCommand.maxprot);
+ IO.mapRequired("initprot", LoadCommand.initprot);
+ IO.mapRequired("nsects", LoadCommand.nsects);
+ IO.mapRequired("flags", LoadCommand.flags);
+}
+
+void MappingTraits<MachO::segment_command_64>::mapping(
+ IO &IO, MachO::segment_command_64 &LoadCommand) {
+ IO.mapRequired("segname", LoadCommand.segname);
+ IO.mapRequired("vmaddr", LoadCommand.vmaddr);
+ IO.mapRequired("vmsize", LoadCommand.vmsize);
+ IO.mapRequired("fileoff", LoadCommand.fileoff);
+ IO.mapRequired("filesize", LoadCommand.filesize);
+ IO.mapRequired("maxprot", LoadCommand.maxprot);
+ IO.mapRequired("initprot", LoadCommand.initprot);
+ IO.mapRequired("nsects", LoadCommand.nsects);
+ IO.mapRequired("flags", LoadCommand.flags);
+}
+
+void MappingTraits<MachO::source_version_command>::mapping(
+ IO &IO, MachO::source_version_command &LoadCommand) {
+ IO.mapRequired("version", LoadCommand.version);
+}
+
+void MappingTraits<MachO::sub_client_command>::mapping(
+ IO &IO, MachO::sub_client_command &LoadCommand) {
+ IO.mapRequired("client", LoadCommand.client);
+}
+
+void MappingTraits<MachO::sub_framework_command>::mapping(
+ IO &IO, MachO::sub_framework_command &LoadCommand) {
+ IO.mapRequired("umbrella", LoadCommand.umbrella);
+}
+
+void MappingTraits<MachO::sub_library_command>::mapping(
+ IO &IO, MachO::sub_library_command &LoadCommand) {
+ IO.mapRequired("sub_library", LoadCommand.sub_library);
+}
+
+void MappingTraits<MachO::sub_umbrella_command>::mapping(
+ IO &IO, MachO::sub_umbrella_command &LoadCommand) {
+ IO.mapRequired("sub_umbrella", LoadCommand.sub_umbrella);
+}
+
+void MappingTraits<MachO::symseg_command>::mapping(
+ IO &IO, MachO::symseg_command &LoadCommand) {
+ IO.mapRequired("offset", LoadCommand.offset);
+ IO.mapRequired("size", LoadCommand.size);
+}
+
+void MappingTraits<MachO::symtab_command>::mapping(
+ IO &IO, MachO::symtab_command &LoadCommand) {
+ IO.mapRequired("symoff", LoadCommand.symoff);
+ IO.mapRequired("nsyms", LoadCommand.nsyms);
+ IO.mapRequired("stroff", LoadCommand.stroff);
+ IO.mapRequired("strsize", LoadCommand.strsize);
+}
+
+void MappingTraits<MachO::thread_command>::mapping(
+ IO &IO, MachO::thread_command &LoadCommand) {}
+
+void MappingTraits<MachO::twolevel_hints_command>::mapping(
+ IO &IO, MachO::twolevel_hints_command &LoadCommand) {
+ IO.mapRequired("offset", LoadCommand.offset);
+ IO.mapRequired("nhints", LoadCommand.nhints);
+}
+
+void MappingTraits<MachO::uuid_command>::mapping(
+ IO &IO, MachO::uuid_command &LoadCommand) {
+ IO.mapRequired("uuid", LoadCommand.uuid);
+}
+
+void MappingTraits<MachO::version_min_command>::mapping(
+ IO &IO, MachO::version_min_command &LoadCommand) {
+ IO.mapRequired("version", LoadCommand.version);
+ IO.mapRequired("sdk", LoadCommand.sdk);
+}
+
+void MappingTraits<MachO::note_command>::mapping(
+ IO &IO, MachO::note_command &LoadCommand) {
+ IO.mapRequired("data_owner", LoadCommand.data_owner);
+ IO.mapRequired("offset", LoadCommand.offset);
+ IO.mapRequired("size", LoadCommand.size);
+}
+
+void MappingTraits<MachO::build_version_command>::mapping(
+ IO &IO, MachO::build_version_command &LoadCommand) {
+ IO.mapRequired("platform", LoadCommand.platform);
+ IO.mapRequired("minos", LoadCommand.minos);
+ IO.mapRequired("sdk", LoadCommand.sdk);
+ IO.mapRequired("ntools", LoadCommand.ntools);
+}
+
+} // end namespace yaml
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/MinidumpEmitter.cpp b/contrib/libs/llvm12/lib/ObjectYAML/MinidumpEmitter.cpp
new file mode 100644
index 00000000000..bbfd2cd8cba
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/MinidumpEmitter.cpp
@@ -0,0 +1,247 @@
+//===- yaml2minidump.cpp - Convert a YAML file to a minidump file ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/MinidumpYAML.h"
+#include "llvm/ObjectYAML/yaml2obj.h"
+#include "llvm/Support/ConvertUTF.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::minidump;
+using namespace llvm::MinidumpYAML;
+
+namespace {
+/// A helper class to manage the placement of various structures into the final
+/// minidump binary. Space for objects can be allocated via various allocate***
+/// methods, while the final minidump file is written by calling the writeTo
+/// method. The plain versions of allocation functions take a reference to the
+/// data which is to be written (and hence the data must be available until
+/// writeTo is called), while the "New" versions allocate the data in an
+/// allocator-managed buffer, which is available until the allocator object is
+/// destroyed. For both kinds of functions, it is possible to modify the
+/// data for which the space has been "allocated" until the final writeTo call.
+/// This is useful for "linking" the allocated structures via their offsets.
+class BlobAllocator {
+public:
+ size_t tell() const { return NextOffset; }
+
+ size_t allocateCallback(size_t Size,
+ std::function<void(raw_ostream &)> Callback) {
+ size_t Offset = NextOffset;
+ NextOffset += Size;
+ Callbacks.push_back(std::move(Callback));
+ return Offset;
+ }
+
+ size_t allocateBytes(ArrayRef<uint8_t> Data) {
+ return allocateCallback(
+ Data.size(), [Data](raw_ostream &OS) { OS << toStringRef(Data); });
+ }
+
+ size_t allocateBytes(yaml::BinaryRef Data) {
+ return allocateCallback(Data.binary_size(), [Data](raw_ostream &OS) {
+ Data.writeAsBinary(OS);
+ });
+ }
+
+ template <typename T> size_t allocateArray(ArrayRef<T> Data) {
+ return allocateBytes({reinterpret_cast<const uint8_t *>(Data.data()),
+ sizeof(T) * Data.size()});
+ }
+
+ template <typename T, typename RangeType>
+ std::pair<size_t, MutableArrayRef<T>>
+ allocateNewArray(const iterator_range<RangeType> &Range);
+
+ template <typename T> size_t allocateObject(const T &Data) {
+ return allocateArray(makeArrayRef(Data));
+ }
+
+ template <typename T, typename... Types>
+ std::pair<size_t, T *> allocateNewObject(Types &&... Args) {
+ T *Object = new (Temporaries.Allocate<T>()) T(std::forward<Types>(Args)...);
+ return {allocateObject(*Object), Object};
+ }
+
+ size_t allocateString(StringRef Str);
+
+ void writeTo(raw_ostream &OS) const;
+
+private:
+ size_t NextOffset = 0;
+
+ BumpPtrAllocator Temporaries;
+ std::vector<std::function<void(raw_ostream &)>> Callbacks;
+};
+} // namespace
+
+template <typename T, typename RangeType>
+std::pair<size_t, MutableArrayRef<T>>
+BlobAllocator::allocateNewArray(const iterator_range<RangeType> &Range) {
+ size_t Num = std::distance(Range.begin(), Range.end());
+ MutableArrayRef<T> Array(Temporaries.Allocate<T>(Num), Num);
+ std::uninitialized_copy(Range.begin(), Range.end(), Array.begin());
+ return {allocateArray(Array), Array};
+}
+
+size_t BlobAllocator::allocateString(StringRef Str) {
+ SmallVector<UTF16, 32> WStr;
+ bool OK = convertUTF8ToUTF16String(Str, WStr);
+ assert(OK && "Invalid UTF8 in Str?");
+ (void)OK;
+
+ // The utf16 string is null-terminated, but the terminator is not counted in
+ // the string size.
+ WStr.push_back(0);
+ size_t Result =
+ allocateNewObject<support::ulittle32_t>(2 * (WStr.size() - 1)).first;
+ allocateNewArray<support::ulittle16_t>(make_range(WStr.begin(), WStr.end()));
+ return Result;
+}
+
+void BlobAllocator::writeTo(raw_ostream &OS) const {
+ size_t BeginOffset = OS.tell();
+ for (const auto &Callback : Callbacks)
+ Callback(OS);
+ assert(OS.tell() == BeginOffset + NextOffset &&
+ "Callbacks wrote an unexpected number of bytes.");
+ (void)BeginOffset;
+}
+
+static LocationDescriptor layout(BlobAllocator &File, yaml::BinaryRef Data) {
+ return {support::ulittle32_t(Data.binary_size()),
+ support::ulittle32_t(File.allocateBytes(Data))};
+}
+
+static size_t layout(BlobAllocator &File, MinidumpYAML::ExceptionStream &S) {
+ File.allocateObject(S.MDExceptionStream);
+
+ size_t DataEnd = File.tell();
+
+ // Lay out the thread context data, (which is not a part of the stream).
+ // TODO: This usually (always?) matches the thread context of the
+ // corresponding thread, and may overlap memory regions as well. We could
+ // add a level of indirection to the MinidumpYAML format (like an array of
+ // Blobs that the LocationDescriptors index into) to be able to distinguish
+ // the cases where location descriptions overlap vs happen to reference
+ // identical data.
+ S.MDExceptionStream.ThreadContext = layout(File, S.ThreadContext);
+
+ return DataEnd;
+}
+
+static void layout(BlobAllocator &File, MemoryListStream::entry_type &Range) {
+ Range.Entry.Memory = layout(File, Range.Content);
+}
+
+static void layout(BlobAllocator &File, ModuleListStream::entry_type &M) {
+ M.Entry.ModuleNameRVA = File.allocateString(M.Name);
+
+ M.Entry.CvRecord = layout(File, M.CvRecord);
+ M.Entry.MiscRecord = layout(File, M.MiscRecord);
+}
+
+static void layout(BlobAllocator &File, ThreadListStream::entry_type &T) {
+ T.Entry.Stack.Memory = layout(File, T.Stack);
+ T.Entry.Context = layout(File, T.Context);
+}
+
+template <typename EntryT>
+static size_t layout(BlobAllocator &File,
+ MinidumpYAML::detail::ListStream<EntryT> &S) {
+
+ File.allocateNewObject<support::ulittle32_t>(S.Entries.size());
+ for (auto &E : S.Entries)
+ File.allocateObject(E.Entry);
+
+ size_t DataEnd = File.tell();
+
+ // Lay out the auxiliary data, (which is not a part of the stream).
+ DataEnd = File.tell();
+ for (auto &E : S.Entries)
+ layout(File, E);
+
+ return DataEnd;
+}
+
+static Directory layout(BlobAllocator &File, Stream &S) {
+ Directory Result;
+ Result.Type = S.Type;
+ Result.Location.RVA = File.tell();
+ Optional<size_t> DataEnd;
+ switch (S.Kind) {
+ case Stream::StreamKind::Exception:
+ DataEnd = layout(File, cast<MinidumpYAML::ExceptionStream>(S));
+ break;
+ case Stream::StreamKind::MemoryInfoList: {
+ MemoryInfoListStream &InfoList = cast<MemoryInfoListStream>(S);
+ File.allocateNewObject<minidump::MemoryInfoListHeader>(
+ sizeof(minidump::MemoryInfoListHeader), sizeof(minidump::MemoryInfo),
+ InfoList.Infos.size());
+ File.allocateArray(makeArrayRef(InfoList.Infos));
+ break;
+ }
+ case Stream::StreamKind::MemoryList:
+ DataEnd = layout(File, cast<MemoryListStream>(S));
+ break;
+ case Stream::StreamKind::ModuleList:
+ DataEnd = layout(File, cast<ModuleListStream>(S));
+ break;
+ case Stream::StreamKind::RawContent: {
+ RawContentStream &Raw = cast<RawContentStream>(S);
+ File.allocateCallback(Raw.Size, [&Raw](raw_ostream &OS) {
+ Raw.Content.writeAsBinary(OS);
+ assert(Raw.Content.binary_size() <= Raw.Size);
+ OS << std::string(Raw.Size - Raw.Content.binary_size(), '\0');
+ });
+ break;
+ }
+ case Stream::StreamKind::SystemInfo: {
+ SystemInfoStream &SystemInfo = cast<SystemInfoStream>(S);
+ File.allocateObject(SystemInfo.Info);
+ // The CSD string is not a part of the stream.
+ DataEnd = File.tell();
+ SystemInfo.Info.CSDVersionRVA = File.allocateString(SystemInfo.CSDVersion);
+ break;
+ }
+ case Stream::StreamKind::TextContent:
+ File.allocateArray(arrayRefFromStringRef(cast<TextContentStream>(S).Text));
+ break;
+ case Stream::StreamKind::ThreadList:
+ DataEnd = layout(File, cast<ThreadListStream>(S));
+ break;
+ }
+ // If DataEnd is not set, we assume everything we generated is a part of the
+ // stream.
+ Result.Location.DataSize =
+ DataEnd.getValueOr(File.tell()) - Result.Location.RVA;
+ return Result;
+}
+
+namespace llvm {
+namespace yaml {
+
+bool yaml2minidump(MinidumpYAML::Object &Obj, raw_ostream &Out,
+ ErrorHandler /*EH*/) {
+ BlobAllocator File;
+ File.allocateObject(Obj.Header);
+
+ std::vector<Directory> StreamDirectory(Obj.Streams.size());
+ Obj.Header.StreamDirectoryRVA =
+ File.allocateArray(makeArrayRef(StreamDirectory));
+ Obj.Header.NumberOfStreams = StreamDirectory.size();
+
+ for (auto &Stream : enumerate(Obj.Streams))
+ StreamDirectory[Stream.index()] = layout(File, *Stream.value());
+
+ File.writeTo(Out);
+ return true;
+}
+
+} // namespace yaml
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/MinidumpYAML.cpp b/contrib/libs/llvm12/lib/ObjectYAML/MinidumpYAML.cpp
new file mode 100644
index 00000000000..e1a80b98e44
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/MinidumpYAML.cpp
@@ -0,0 +1,563 @@
+//===- MinidumpYAML.cpp - Minidump YAMLIO implementation ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/MinidumpYAML.h"
+#include "llvm/Support/Allocator.h"
+
+using namespace llvm;
+using namespace llvm::MinidumpYAML;
+using namespace llvm::minidump;
+
+/// Perform an optional yaml-mapping of an endian-aware type EndianType. The
+/// only purpose of this function is to avoid casting the Default value to the
+/// endian type;
+template <typename EndianType>
+static inline void mapOptional(yaml::IO &IO, const char *Key, EndianType &Val,
+ typename EndianType::value_type Default) {
+ IO.mapOptional(Key, Val, EndianType(Default));
+}
+
+/// Yaml-map an endian-aware type EndianType as some other type MapType.
+template <typename MapType, typename EndianType>
+static inline void mapRequiredAs(yaml::IO &IO, const char *Key,
+ EndianType &Val) {
+ MapType Mapped = static_cast<typename EndianType::value_type>(Val);
+ IO.mapRequired(Key, Mapped);
+ Val = static_cast<typename EndianType::value_type>(Mapped);
+}
+
+/// Perform an optional yaml-mapping of an endian-aware type EndianType as some
+/// other type MapType.
+template <typename MapType, typename EndianType>
+static inline void mapOptionalAs(yaml::IO &IO, const char *Key, EndianType &Val,
+ MapType Default) {
+ MapType Mapped = static_cast<typename EndianType::value_type>(Val);
+ IO.mapOptional(Key, Mapped, Default);
+ Val = static_cast<typename EndianType::value_type>(Mapped);
+}
+
+namespace {
+/// Return the appropriate yaml Hex type for a given endian-aware type.
+template <typename EndianType> struct HexType;
+template <> struct HexType<support::ulittle16_t> { using type = yaml::Hex16; };
+template <> struct HexType<support::ulittle32_t> { using type = yaml::Hex32; };
+template <> struct HexType<support::ulittle64_t> { using type = yaml::Hex64; };
+} // namespace
+
+/// Yaml-map an endian-aware type as an appropriately-sized hex value.
+template <typename EndianType>
+static inline void mapRequiredHex(yaml::IO &IO, const char *Key,
+ EndianType &Val) {
+ mapRequiredAs<typename HexType<EndianType>::type>(IO, Key, Val);
+}
+
+/// Perform an optional yaml-mapping of an endian-aware type as an
+/// appropriately-sized hex value.
+template <typename EndianType>
+static inline void mapOptionalHex(yaml::IO &IO, const char *Key,
+ EndianType &Val,
+ typename EndianType::value_type Default) {
+ mapOptionalAs<typename HexType<EndianType>::type>(IO, Key, Val, Default);
+}
+
+Stream::~Stream() = default;
+
+Stream::StreamKind Stream::getKind(StreamType Type) {
+ switch (Type) {
+ case StreamType::Exception:
+ return StreamKind::Exception;
+ case StreamType::MemoryInfoList:
+ return StreamKind::MemoryInfoList;
+ case StreamType::MemoryList:
+ return StreamKind::MemoryList;
+ case StreamType::ModuleList:
+ return StreamKind::ModuleList;
+ case StreamType::SystemInfo:
+ return StreamKind::SystemInfo;
+ case StreamType::LinuxCPUInfo:
+ case StreamType::LinuxProcStatus:
+ case StreamType::LinuxLSBRelease:
+ case StreamType::LinuxCMDLine:
+ case StreamType::LinuxMaps:
+ case StreamType::LinuxProcStat:
+ case StreamType::LinuxProcUptime:
+ return StreamKind::TextContent;
+ case StreamType::ThreadList:
+ return StreamKind::ThreadList;
+ default:
+ return StreamKind::RawContent;
+ }
+}
+
+std::unique_ptr<Stream> Stream::create(StreamType Type) {
+ StreamKind Kind = getKind(Type);
+ switch (Kind) {
+ case StreamKind::Exception:
+ return std::make_unique<ExceptionStream>();
+ case StreamKind::MemoryInfoList:
+ return std::make_unique<MemoryInfoListStream>();
+ case StreamKind::MemoryList:
+ return std::make_unique<MemoryListStream>();
+ case StreamKind::ModuleList:
+ return std::make_unique<ModuleListStream>();
+ case StreamKind::RawContent:
+ return std::make_unique<RawContentStream>(Type);
+ case StreamKind::SystemInfo:
+ return std::make_unique<SystemInfoStream>();
+ case StreamKind::TextContent:
+ return std::make_unique<TextContentStream>(Type);
+ case StreamKind::ThreadList:
+ return std::make_unique<ThreadListStream>();
+ }
+ llvm_unreachable("Unhandled stream kind!");
+}
+
+void yaml::ScalarBitSetTraits<MemoryProtection>::bitset(
+ IO &IO, MemoryProtection &Protect) {
+#define HANDLE_MDMP_PROTECT(CODE, NAME, NATIVENAME) \
+ IO.bitSetCase(Protect, #NATIVENAME, MemoryProtection::NAME);
+#include "llvm/BinaryFormat/MinidumpConstants.def"
+}
+
+void yaml::ScalarBitSetTraits<MemoryState>::bitset(IO &IO, MemoryState &State) {
+#define HANDLE_MDMP_MEMSTATE(CODE, NAME, NATIVENAME) \
+ IO.bitSetCase(State, #NATIVENAME, MemoryState::NAME);
+#include "llvm/BinaryFormat/MinidumpConstants.def"
+}
+
+void yaml::ScalarBitSetTraits<MemoryType>::bitset(IO &IO, MemoryType &Type) {
+#define HANDLE_MDMP_MEMTYPE(CODE, NAME, NATIVENAME) \
+ IO.bitSetCase(Type, #NATIVENAME, MemoryType::NAME);
+#include "llvm/BinaryFormat/MinidumpConstants.def"
+}
+
+void yaml::ScalarEnumerationTraits<ProcessorArchitecture>::enumeration(
+ IO &IO, ProcessorArchitecture &Arch) {
+#define HANDLE_MDMP_ARCH(CODE, NAME) \
+ IO.enumCase(Arch, #NAME, ProcessorArchitecture::NAME);
+#include "llvm/BinaryFormat/MinidumpConstants.def"
+ IO.enumFallback<Hex16>(Arch);
+}
+
+void yaml::ScalarEnumerationTraits<OSPlatform>::enumeration(IO &IO,
+ OSPlatform &Plat) {
+#define HANDLE_MDMP_PLATFORM(CODE, NAME) \
+ IO.enumCase(Plat, #NAME, OSPlatform::NAME);
+#include "llvm/BinaryFormat/MinidumpConstants.def"
+ IO.enumFallback<Hex32>(Plat);
+}
+
+void yaml::ScalarEnumerationTraits<StreamType>::enumeration(IO &IO,
+ StreamType &Type) {
+#define HANDLE_MDMP_STREAM_TYPE(CODE, NAME) \
+ IO.enumCase(Type, #NAME, StreamType::NAME);
+#include "llvm/BinaryFormat/MinidumpConstants.def"
+ IO.enumFallback<Hex32>(Type);
+}
+
+void yaml::MappingTraits<CPUInfo::ArmInfo>::mapping(IO &IO,
+ CPUInfo::ArmInfo &Info) {
+ mapRequiredHex(IO, "CPUID", Info.CPUID);
+ mapOptionalHex(IO, "ELF hwcaps", Info.ElfHWCaps, 0);
+}
+
+namespace {
+template <std::size_t N> struct FixedSizeHex {
+ FixedSizeHex(uint8_t (&Storage)[N]) : Storage(Storage) {}
+
+ uint8_t (&Storage)[N];
+};
+} // namespace
+
+namespace llvm {
+namespace yaml {
+template <std::size_t N> struct ScalarTraits<FixedSizeHex<N>> {
+ static void output(const FixedSizeHex<N> &Fixed, void *, raw_ostream &OS) {
+ OS << toHex(makeArrayRef(Fixed.Storage));
+ }
+
+ static StringRef input(StringRef Scalar, void *, FixedSizeHex<N> &Fixed) {
+ if (!all_of(Scalar, isHexDigit))
+ return "Invalid hex digit in input";
+ if (Scalar.size() < 2 * N)
+ return "String too short";
+ if (Scalar.size() > 2 * N)
+ return "String too long";
+ copy(fromHex(Scalar), Fixed.Storage);
+ return "";
+ }
+
+ static QuotingType mustQuote(StringRef S) { return QuotingType::None; }
+};
+} // namespace yaml
+} // namespace llvm
+void yaml::MappingTraits<CPUInfo::OtherInfo>::mapping(
+ IO &IO, CPUInfo::OtherInfo &Info) {
+ FixedSizeHex<sizeof(Info.ProcessorFeatures)> Features(Info.ProcessorFeatures);
+ IO.mapRequired("Features", Features);
+}
+
+namespace {
+/// A type which only accepts strings of a fixed size for yaml conversion.
+template <std::size_t N> struct FixedSizeString {
+ FixedSizeString(char (&Storage)[N]) : Storage(Storage) {}
+
+ char (&Storage)[N];
+};
+} // namespace
+
+namespace llvm {
+namespace yaml {
+template <std::size_t N> struct ScalarTraits<FixedSizeString<N>> {
+ static void output(const FixedSizeString<N> &Fixed, void *, raw_ostream &OS) {
+ OS << StringRef(Fixed.Storage, N);
+ }
+
+ static StringRef input(StringRef Scalar, void *, FixedSizeString<N> &Fixed) {
+ if (Scalar.size() < N)
+ return "String too short";
+ if (Scalar.size() > N)
+ return "String too long";
+ copy(Scalar, Fixed.Storage);
+ return "";
+ }
+
+ static QuotingType mustQuote(StringRef S) { return needsQuotes(S); }
+};
+} // namespace yaml
+} // namespace llvm
+
+void yaml::MappingTraits<CPUInfo::X86Info>::mapping(IO &IO,
+ CPUInfo::X86Info &Info) {
+ FixedSizeString<sizeof(Info.VendorID)> VendorID(Info.VendorID);
+ IO.mapRequired("Vendor ID", VendorID);
+
+ mapRequiredHex(IO, "Version Info", Info.VersionInfo);
+ mapRequiredHex(IO, "Feature Info", Info.FeatureInfo);
+ mapOptionalHex(IO, "AMD Extended Features", Info.AMDExtendedFeatures, 0);
+}
+
+void yaml::MappingTraits<MemoryInfo>::mapping(IO &IO, MemoryInfo &Info) {
+ mapRequiredHex(IO, "Base Address", Info.BaseAddress);
+ mapOptionalHex(IO, "Allocation Base", Info.AllocationBase, Info.BaseAddress);
+ mapRequiredAs<MemoryProtection>(IO, "Allocation Protect",
+ Info.AllocationProtect);
+ mapOptionalHex(IO, "Reserved0", Info.Reserved0, 0);
+ mapRequiredHex(IO, "Region Size", Info.RegionSize);
+ mapRequiredAs<MemoryState>(IO, "State", Info.State);
+ mapOptionalAs<MemoryProtection>(IO, "Protect", Info.Protect,
+ Info.AllocationProtect);
+ mapRequiredAs<MemoryType>(IO, "Type", Info.Type);
+ mapOptionalHex(IO, "Reserved1", Info.Reserved1, 0);
+}
+
+void yaml::MappingTraits<VSFixedFileInfo>::mapping(IO &IO,
+ VSFixedFileInfo &Info) {
+ mapOptionalHex(IO, "Signature", Info.Signature, 0);
+ mapOptionalHex(IO, "Struct Version", Info.StructVersion, 0);
+ mapOptionalHex(IO, "File Version High", Info.FileVersionHigh, 0);
+ mapOptionalHex(IO, "File Version Low", Info.FileVersionLow, 0);
+ mapOptionalHex(IO, "Product Version High", Info.ProductVersionHigh, 0);
+ mapOptionalHex(IO, "Product Version Low", Info.ProductVersionLow, 0);
+ mapOptionalHex(IO, "File Flags Mask", Info.FileFlagsMask, 0);
+ mapOptionalHex(IO, "File Flags", Info.FileFlags, 0);
+ mapOptionalHex(IO, "File OS", Info.FileOS, 0);
+ mapOptionalHex(IO, "File Type", Info.FileType, 0);
+ mapOptionalHex(IO, "File Subtype", Info.FileSubtype, 0);
+ mapOptionalHex(IO, "File Date High", Info.FileDateHigh, 0);
+ mapOptionalHex(IO, "File Date Low", Info.FileDateLow, 0);
+}
+
+void yaml::MappingTraits<ModuleListStream::entry_type>::mapping(
+ IO &IO, ModuleListStream::entry_type &M) {
+ mapRequiredHex(IO, "Base of Image", M.Entry.BaseOfImage);
+ mapRequiredHex(IO, "Size of Image", M.Entry.SizeOfImage);
+ mapOptionalHex(IO, "Checksum", M.Entry.Checksum, 0);
+ mapOptional(IO, "Time Date Stamp", M.Entry.TimeDateStamp, 0);
+ IO.mapRequired("Module Name", M.Name);
+ IO.mapOptional("Version Info", M.Entry.VersionInfo, VSFixedFileInfo());
+ IO.mapRequired("CodeView Record", M.CvRecord);
+ IO.mapOptional("Misc Record", M.MiscRecord, yaml::BinaryRef());
+ mapOptionalHex(IO, "Reserved0", M.Entry.Reserved0, 0);
+ mapOptionalHex(IO, "Reserved1", M.Entry.Reserved1, 0);
+}
+
+static void streamMapping(yaml::IO &IO, RawContentStream &Stream) {
+ IO.mapOptional("Content", Stream.Content);
+ IO.mapOptional("Size", Stream.Size, Stream.Content.binary_size());
+}
+
+static std::string streamValidate(RawContentStream &Stream) {
+ if (Stream.Size.value < Stream.Content.binary_size())
+ return "Stream size must be greater or equal to the content size";
+ return "";
+}
+
+void yaml::MappingTraits<MemoryListStream::entry_type>::mapping(
+ IO &IO, MemoryListStream::entry_type &Range) {
+ MappingContextTraits<MemoryDescriptor, yaml::BinaryRef>::mapping(
+ IO, Range.Entry, Range.Content);
+}
+
+static void streamMapping(yaml::IO &IO, MemoryInfoListStream &Stream) {
+ IO.mapRequired("Memory Ranges", Stream.Infos);
+}
+
+static void streamMapping(yaml::IO &IO, MemoryListStream &Stream) {
+ IO.mapRequired("Memory Ranges", Stream.Entries);
+}
+
+static void streamMapping(yaml::IO &IO, ModuleListStream &Stream) {
+ IO.mapRequired("Modules", Stream.Entries);
+}
+
+static void streamMapping(yaml::IO &IO, SystemInfoStream &Stream) {
+ SystemInfo &Info = Stream.Info;
+ IO.mapRequired("Processor Arch", Info.ProcessorArch);
+ mapOptional(IO, "Processor Level", Info.ProcessorLevel, 0);
+ mapOptional(IO, "Processor Revision", Info.ProcessorRevision, 0);
+ IO.mapOptional("Number of Processors", Info.NumberOfProcessors, 0);
+ IO.mapOptional("Product type", Info.ProductType, 0);
+ mapOptional(IO, "Major Version", Info.MajorVersion, 0);
+ mapOptional(IO, "Minor Version", Info.MinorVersion, 0);
+ mapOptional(IO, "Build Number", Info.BuildNumber, 0);
+ IO.mapRequired("Platform ID", Info.PlatformId);
+ IO.mapOptional("CSD Version", Stream.CSDVersion, "");
+ mapOptionalHex(IO, "Suite Mask", Info.SuiteMask, 0);
+ mapOptionalHex(IO, "Reserved", Info.Reserved, 0);
+ switch (static_cast<ProcessorArchitecture>(Info.ProcessorArch)) {
+ case ProcessorArchitecture::X86:
+ case ProcessorArchitecture::AMD64:
+ IO.mapOptional("CPU", Info.CPU.X86);
+ break;
+ case ProcessorArchitecture::ARM:
+ case ProcessorArchitecture::ARM64:
+ case ProcessorArchitecture::BP_ARM64:
+ IO.mapOptional("CPU", Info.CPU.Arm);
+ break;
+ default:
+ IO.mapOptional("CPU", Info.CPU.Other);
+ break;
+ }
+}
+
+static void streamMapping(yaml::IO &IO, TextContentStream &Stream) {
+ IO.mapOptional("Text", Stream.Text);
+}
+
+void yaml::MappingContextTraits<MemoryDescriptor, yaml::BinaryRef>::mapping(
+ IO &IO, MemoryDescriptor &Memory, BinaryRef &Content) {
+ mapRequiredHex(IO, "Start of Memory Range", Memory.StartOfMemoryRange);
+ IO.mapRequired("Content", Content);
+}
+
+void yaml::MappingTraits<ThreadListStream::entry_type>::mapping(
+ IO &IO, ThreadListStream::entry_type &T) {
+ mapRequiredHex(IO, "Thread Id", T.Entry.ThreadId);
+ mapOptionalHex(IO, "Suspend Count", T.Entry.SuspendCount, 0);
+ mapOptionalHex(IO, "Priority Class", T.Entry.PriorityClass, 0);
+ mapOptionalHex(IO, "Priority", T.Entry.Priority, 0);
+ mapOptionalHex(IO, "Environment Block", T.Entry.EnvironmentBlock, 0);
+ IO.mapRequired("Context", T.Context);
+ IO.mapRequired("Stack", T.Entry.Stack, T.Stack);
+}
+
+static void streamMapping(yaml::IO &IO, ThreadListStream &Stream) {
+ IO.mapRequired("Threads", Stream.Entries);
+}
+
+static void streamMapping(yaml::IO &IO, MinidumpYAML::ExceptionStream &Stream) {
+ mapRequiredHex(IO, "Thread ID", Stream.MDExceptionStream.ThreadId);
+ IO.mapRequired("Exception Record", Stream.MDExceptionStream.ExceptionRecord);
+ IO.mapRequired("Thread Context", Stream.ThreadContext);
+}
+
+void yaml::MappingTraits<minidump::Exception>::mapping(
+ yaml::IO &IO, minidump::Exception &Exception) {
+ mapRequiredHex(IO, "Exception Code", Exception.ExceptionCode);
+ mapOptionalHex(IO, "Exception Flags", Exception.ExceptionFlags, 0);
+ mapOptionalHex(IO, "Exception Record", Exception.ExceptionRecord, 0);
+ mapOptionalHex(IO, "Exception Address", Exception.ExceptionAddress, 0);
+ mapOptional(IO, "Number of Parameters", Exception.NumberParameters, 0);
+
+ for (size_t Index = 0; Index < Exception.MaxParameters; ++Index) {
+ SmallString<16> Name("Parameter ");
+ Twine(Index).toVector(Name);
+ support::ulittle64_t &Field = Exception.ExceptionInformation[Index];
+
+ if (Index < Exception.NumberParameters)
+ mapRequiredHex(IO, Name.c_str(), Field);
+ else
+ mapOptionalHex(IO, Name.c_str(), Field, 0);
+ }
+}
+
+void yaml::MappingTraits<std::unique_ptr<Stream>>::mapping(
+ yaml::IO &IO, std::unique_ptr<MinidumpYAML::Stream> &S) {
+ StreamType Type;
+ if (IO.outputting())
+ Type = S->Type;
+ IO.mapRequired("Type", Type);
+
+ if (!IO.outputting())
+ S = MinidumpYAML::Stream::create(Type);
+ switch (S->Kind) {
+ case MinidumpYAML::Stream::StreamKind::Exception:
+ streamMapping(IO, llvm::cast<MinidumpYAML::ExceptionStream>(*S));
+ break;
+ case MinidumpYAML::Stream::StreamKind::MemoryInfoList:
+ streamMapping(IO, llvm::cast<MemoryInfoListStream>(*S));
+ break;
+ case MinidumpYAML::Stream::StreamKind::MemoryList:
+ streamMapping(IO, llvm::cast<MemoryListStream>(*S));
+ break;
+ case MinidumpYAML::Stream::StreamKind::ModuleList:
+ streamMapping(IO, llvm::cast<ModuleListStream>(*S));
+ break;
+ case MinidumpYAML::Stream::StreamKind::RawContent:
+ streamMapping(IO, llvm::cast<RawContentStream>(*S));
+ break;
+ case MinidumpYAML::Stream::StreamKind::SystemInfo:
+ streamMapping(IO, llvm::cast<SystemInfoStream>(*S));
+ break;
+ case MinidumpYAML::Stream::StreamKind::TextContent:
+ streamMapping(IO, llvm::cast<TextContentStream>(*S));
+ break;
+ case MinidumpYAML::Stream::StreamKind::ThreadList:
+ streamMapping(IO, llvm::cast<ThreadListStream>(*S));
+ break;
+ }
+}
+
+std::string yaml::MappingTraits<std::unique_ptr<Stream>>::validate(
+ yaml::IO &IO, std::unique_ptr<MinidumpYAML::Stream> &S) {
+ switch (S->Kind) {
+ case MinidumpYAML::Stream::StreamKind::RawContent:
+ return streamValidate(cast<RawContentStream>(*S));
+ case MinidumpYAML::Stream::StreamKind::Exception:
+ case MinidumpYAML::Stream::StreamKind::MemoryInfoList:
+ case MinidumpYAML::Stream::StreamKind::MemoryList:
+ case MinidumpYAML::Stream::StreamKind::ModuleList:
+ case MinidumpYAML::Stream::StreamKind::SystemInfo:
+ case MinidumpYAML::Stream::StreamKind::TextContent:
+ case MinidumpYAML::Stream::StreamKind::ThreadList:
+ return "";
+ }
+ llvm_unreachable("Fully covered switch above!");
+}
+
+void yaml::MappingTraits<Object>::mapping(IO &IO, Object &O) {
+ IO.mapTag("!minidump", true);
+ mapOptionalHex(IO, "Signature", O.Header.Signature, Header::MagicSignature);
+ mapOptionalHex(IO, "Version", O.Header.Version, Header::MagicVersion);
+ mapOptionalHex(IO, "Flags", O.Header.Flags, 0);
+ IO.mapRequired("Streams", O.Streams);
+}
+
+Expected<std::unique_ptr<Stream>>
+Stream::create(const Directory &StreamDesc, const object::MinidumpFile &File) {
+ StreamKind Kind = getKind(StreamDesc.Type);
+ switch (Kind) {
+ case StreamKind::Exception: {
+ Expected<const minidump::ExceptionStream &> ExpectedExceptionStream =
+ File.getExceptionStream();
+ if (!ExpectedExceptionStream)
+ return ExpectedExceptionStream.takeError();
+ Expected<ArrayRef<uint8_t>> ExpectedThreadContext =
+ File.getRawData(ExpectedExceptionStream->ThreadContext);
+ if (!ExpectedThreadContext)
+ return ExpectedThreadContext.takeError();
+ return std::make_unique<ExceptionStream>(*ExpectedExceptionStream,
+ *ExpectedThreadContext);
+ }
+ case StreamKind::MemoryInfoList: {
+ if (auto ExpectedList = File.getMemoryInfoList())
+ return std::make_unique<MemoryInfoListStream>(*ExpectedList);
+ else
+ return ExpectedList.takeError();
+ }
+ case StreamKind::MemoryList: {
+ auto ExpectedList = File.getMemoryList();
+ if (!ExpectedList)
+ return ExpectedList.takeError();
+ std::vector<MemoryListStream::entry_type> Ranges;
+ for (const MemoryDescriptor &MD : *ExpectedList) {
+ auto ExpectedContent = File.getRawData(MD.Memory);
+ if (!ExpectedContent)
+ return ExpectedContent.takeError();
+ Ranges.push_back({MD, *ExpectedContent});
+ }
+ return std::make_unique<MemoryListStream>(std::move(Ranges));
+ }
+ case StreamKind::ModuleList: {
+ auto ExpectedList = File.getModuleList();
+ if (!ExpectedList)
+ return ExpectedList.takeError();
+ std::vector<ModuleListStream::entry_type> Modules;
+ for (const Module &M : *ExpectedList) {
+ auto ExpectedName = File.getString(M.ModuleNameRVA);
+ if (!ExpectedName)
+ return ExpectedName.takeError();
+ auto ExpectedCv = File.getRawData(M.CvRecord);
+ if (!ExpectedCv)
+ return ExpectedCv.takeError();
+ auto ExpectedMisc = File.getRawData(M.MiscRecord);
+ if (!ExpectedMisc)
+ return ExpectedMisc.takeError();
+ Modules.push_back(
+ {M, std::move(*ExpectedName), *ExpectedCv, *ExpectedMisc});
+ }
+ return std::make_unique<ModuleListStream>(std::move(Modules));
+ }
+ case StreamKind::RawContent:
+ return std::make_unique<RawContentStream>(StreamDesc.Type,
+ File.getRawStream(StreamDesc));
+ case StreamKind::SystemInfo: {
+ auto ExpectedInfo = File.getSystemInfo();
+ if (!ExpectedInfo)
+ return ExpectedInfo.takeError();
+ auto ExpectedCSDVersion = File.getString(ExpectedInfo->CSDVersionRVA);
+ if (!ExpectedCSDVersion)
+ return ExpectedInfo.takeError();
+ return std::make_unique<SystemInfoStream>(*ExpectedInfo,
+ std::move(*ExpectedCSDVersion));
+ }
+ case StreamKind::TextContent:
+ return std::make_unique<TextContentStream>(
+ StreamDesc.Type, toStringRef(File.getRawStream(StreamDesc)));
+ case StreamKind::ThreadList: {
+ auto ExpectedList = File.getThreadList();
+ if (!ExpectedList)
+ return ExpectedList.takeError();
+ std::vector<ThreadListStream::entry_type> Threads;
+ for (const Thread &T : *ExpectedList) {
+ auto ExpectedStack = File.getRawData(T.Stack.Memory);
+ if (!ExpectedStack)
+ return ExpectedStack.takeError();
+ auto ExpectedContext = File.getRawData(T.Context);
+ if (!ExpectedContext)
+ return ExpectedContext.takeError();
+ Threads.push_back({T, *ExpectedStack, *ExpectedContext});
+ }
+ return std::make_unique<ThreadListStream>(std::move(Threads));
+ }
+ }
+ llvm_unreachable("Unhandled stream kind!");
+}
+
+Expected<Object> Object::create(const object::MinidumpFile &File) {
+ std::vector<std::unique_ptr<Stream>> Streams;
+ Streams.reserve(File.streams().size());
+ for (const Directory &StreamDesc : File.streams()) {
+ auto ExpectedStream = Stream::create(StreamDesc, File);
+ if (!ExpectedStream)
+ return ExpectedStream.takeError();
+ Streams.push_back(std::move(*ExpectedStream));
+ }
+ return Object(File.header(), std::move(Streams));
+}
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/ObjectYAML.cpp b/contrib/libs/llvm12/lib/ObjectYAML/ObjectYAML.cpp
new file mode 100644
index 00000000000..4564b537c9a
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/ObjectYAML.cpp
@@ -0,0 +1,70 @@
+//===- ObjectYAML.cpp - YAML utilities for object files -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a wrapper class for handling tagged YAML input
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/ObjectYAML.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/YAMLParser.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <string>
+
+using namespace llvm;
+using namespace yaml;
+
+void MappingTraits<YamlObjectFile>::mapping(IO &IO,
+ YamlObjectFile &ObjectFile) {
+ if (IO.outputting()) {
+ if (ObjectFile.Elf)
+ MappingTraits<ELFYAML::Object>::mapping(IO, *ObjectFile.Elf);
+ if (ObjectFile.Coff)
+ MappingTraits<COFFYAML::Object>::mapping(IO, *ObjectFile.Coff);
+ if (ObjectFile.MachO)
+ MappingTraits<MachOYAML::Object>::mapping(IO, *ObjectFile.MachO);
+ if (ObjectFile.FatMachO)
+ MappingTraits<MachOYAML::UniversalBinary>::mapping(IO,
+ *ObjectFile.FatMachO);
+ } else {
+ Input &In = (Input &)IO;
+ if (IO.mapTag("!Arch")) {
+ ObjectFile.Arch.reset(new ArchYAML::Archive());
+ MappingTraits<ArchYAML::Archive>::mapping(IO, *ObjectFile.Arch);
+ std::string Err =
+ MappingTraits<ArchYAML::Archive>::validate(IO, *ObjectFile.Arch);
+ if (!Err.empty())
+ IO.setError(Err);
+ } else if (IO.mapTag("!ELF")) {
+ ObjectFile.Elf.reset(new ELFYAML::Object());
+ MappingTraits<ELFYAML::Object>::mapping(IO, *ObjectFile.Elf);
+ } else if (IO.mapTag("!COFF")) {
+ ObjectFile.Coff.reset(new COFFYAML::Object());
+ MappingTraits<COFFYAML::Object>::mapping(IO, *ObjectFile.Coff);
+ } else if (IO.mapTag("!mach-o")) {
+ ObjectFile.MachO.reset(new MachOYAML::Object());
+ MappingTraits<MachOYAML::Object>::mapping(IO, *ObjectFile.MachO);
+ } else if (IO.mapTag("!fat-mach-o")) {
+ ObjectFile.FatMachO.reset(new MachOYAML::UniversalBinary());
+ MappingTraits<MachOYAML::UniversalBinary>::mapping(IO,
+ *ObjectFile.FatMachO);
+ } else if (IO.mapTag("!minidump")) {
+ ObjectFile.Minidump.reset(new MinidumpYAML::Object());
+ MappingTraits<MinidumpYAML::Object>::mapping(IO, *ObjectFile.Minidump);
+ } else if (IO.mapTag("!WASM")) {
+ ObjectFile.Wasm.reset(new WasmYAML::Object());
+ MappingTraits<WasmYAML::Object>::mapping(IO, *ObjectFile.Wasm);
+ } else if (const Node *N = In.getCurrentNode()) {
+ if (N->getRawTag().empty())
+ IO.setError("YAML Object File missing document type tag!");
+ else
+ IO.setError("YAML Object File unsupported document type tag '" +
+ N->getRawTag() + "'!");
+ }
+ }
+}
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/WasmEmitter.cpp b/contrib/libs/llvm12/lib/ObjectYAML/WasmEmitter.cpp
new file mode 100644
index 00000000000..c09d7f1bc95
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/WasmEmitter.cpp
@@ -0,0 +1,672 @@
+//===- yaml2wasm - Convert YAML to a Wasm object file --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// The Wasm component of yaml2obj.
+///
+//===----------------------------------------------------------------------===//
+//
+
+#include "llvm/Object/Wasm.h"
+#include "llvm/ObjectYAML/ObjectYAML.h"
+#include "llvm/ObjectYAML/yaml2obj.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/LEB128.h"
+
+using namespace llvm;
+
+namespace {
+/// This parses a yaml stream that represents a Wasm object file.
+/// See docs/yaml2obj for the yaml scheema.
+class WasmWriter {
+public:
+ WasmWriter(WasmYAML::Object &Obj, yaml::ErrorHandler EH)
+ : Obj(Obj), ErrHandler(EH) {}
+ bool writeWasm(raw_ostream &OS);
+
+private:
+ void writeRelocSection(raw_ostream &OS, WasmYAML::Section &Sec,
+ uint32_t SectionIndex);
+
+ void writeInitExpr(raw_ostream &OS, const wasm::WasmInitExpr &InitExpr);
+
+ void writeSectionContent(raw_ostream &OS, WasmYAML::CustomSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::TypeSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::ImportSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::FunctionSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::TableSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::MemorySection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::EventSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::GlobalSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::ExportSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::StartSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::ElemSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::CodeSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::DataSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::DataCountSection &Section);
+
+ // Custom section types
+ void writeSectionContent(raw_ostream &OS, WasmYAML::DylinkSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::NameSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::LinkingSection &Section);
+ void writeSectionContent(raw_ostream &OS, WasmYAML::ProducersSection &Section);
+ void writeSectionContent(raw_ostream &OS,
+ WasmYAML::TargetFeaturesSection &Section);
+ WasmYAML::Object &Obj;
+ uint32_t NumImportedFunctions = 0;
+ uint32_t NumImportedGlobals = 0;
+ uint32_t NumImportedTables = 0;
+ uint32_t NumImportedEvents = 0;
+
+ bool HasError = false;
+ yaml::ErrorHandler ErrHandler;
+ void reportError(const Twine &Msg);
+};
+
+class SubSectionWriter {
+ raw_ostream &OS;
+ std::string OutString;
+ raw_string_ostream StringStream;
+
+public:
+ SubSectionWriter(raw_ostream &OS) : OS(OS), StringStream(OutString) {}
+
+ void done() {
+ StringStream.flush();
+ encodeULEB128(OutString.size(), OS);
+ OS << OutString;
+ OutString.clear();
+ }
+
+ raw_ostream &getStream() { return StringStream; }
+};
+
+} // end anonymous namespace
+
+static int writeUint64(raw_ostream &OS, uint64_t Value) {
+ char Data[sizeof(Value)];
+ support::endian::write64le(Data, Value);
+ OS.write(Data, sizeof(Data));
+ return 0;
+}
+
+static int writeUint32(raw_ostream &OS, uint32_t Value) {
+ char Data[sizeof(Value)];
+ support::endian::write32le(Data, Value);
+ OS.write(Data, sizeof(Data));
+ return 0;
+}
+
+static int writeUint8(raw_ostream &OS, uint8_t Value) {
+ char Data[sizeof(Value)];
+ memcpy(Data, &Value, sizeof(Data));
+ OS.write(Data, sizeof(Data));
+ return 0;
+}
+
+static int writeStringRef(const StringRef &Str, raw_ostream &OS) {
+ encodeULEB128(Str.size(), OS);
+ OS << Str;
+ return 0;
+}
+
+static int writeLimits(const WasmYAML::Limits &Lim, raw_ostream &OS) {
+ writeUint8(OS, Lim.Flags);
+ encodeULEB128(Lim.Initial, OS);
+ if (Lim.Flags & wasm::WASM_LIMITS_FLAG_HAS_MAX)
+ encodeULEB128(Lim.Maximum, OS);
+ return 0;
+}
+
+void WasmWriter::reportError(const Twine &Msg) {
+ ErrHandler(Msg);
+ HasError = true;
+}
+
+void WasmWriter::writeInitExpr(raw_ostream &OS,
+ const wasm::WasmInitExpr &InitExpr) {
+ writeUint8(OS, InitExpr.Opcode);
+ switch (InitExpr.Opcode) {
+ case wasm::WASM_OPCODE_I32_CONST:
+ encodeSLEB128(InitExpr.Value.Int32, OS);
+ break;
+ case wasm::WASM_OPCODE_I64_CONST:
+ encodeSLEB128(InitExpr.Value.Int64, OS);
+ break;
+ case wasm::WASM_OPCODE_F32_CONST:
+ writeUint32(OS, InitExpr.Value.Float32);
+ break;
+ case wasm::WASM_OPCODE_F64_CONST:
+ writeUint64(OS, InitExpr.Value.Float64);
+ break;
+ case wasm::WASM_OPCODE_GLOBAL_GET:
+ encodeULEB128(InitExpr.Value.Global, OS);
+ break;
+ default:
+ reportError("unknown opcode in init_expr: " + Twine(InitExpr.Opcode));
+ return;
+ }
+ writeUint8(OS, wasm::WASM_OPCODE_END);
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::DylinkSection &Section) {
+ writeStringRef(Section.Name, OS);
+ encodeULEB128(Section.MemorySize, OS);
+ encodeULEB128(Section.MemoryAlignment, OS);
+ encodeULEB128(Section.TableSize, OS);
+ encodeULEB128(Section.TableAlignment, OS);
+ encodeULEB128(Section.Needed.size(), OS);
+ for (StringRef Needed : Section.Needed)
+ writeStringRef(Needed, OS);
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::LinkingSection &Section) {
+ writeStringRef(Section.Name, OS);
+ encodeULEB128(Section.Version, OS);
+
+ SubSectionWriter SubSection(OS);
+
+ // SYMBOL_TABLE subsection
+ if (Section.SymbolTable.size()) {
+ writeUint8(OS, wasm::WASM_SYMBOL_TABLE);
+
+ encodeULEB128(Section.SymbolTable.size(), SubSection.getStream());
+#ifndef NDEBUG
+ uint32_t SymbolIndex = 0;
+#endif
+ for (const WasmYAML::SymbolInfo &Info : Section.SymbolTable) {
+ assert(Info.Index == SymbolIndex++);
+ writeUint8(SubSection.getStream(), Info.Kind);
+ encodeULEB128(Info.Flags, SubSection.getStream());
+ switch (Info.Kind) {
+ case wasm::WASM_SYMBOL_TYPE_FUNCTION:
+ case wasm::WASM_SYMBOL_TYPE_GLOBAL:
+ case wasm::WASM_SYMBOL_TYPE_TABLE:
+ case wasm::WASM_SYMBOL_TYPE_EVENT:
+ encodeULEB128(Info.ElementIndex, SubSection.getStream());
+ if ((Info.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0 ||
+ (Info.Flags & wasm::WASM_SYMBOL_EXPLICIT_NAME) != 0)
+ writeStringRef(Info.Name, SubSection.getStream());
+ break;
+ case wasm::WASM_SYMBOL_TYPE_DATA:
+ writeStringRef(Info.Name, SubSection.getStream());
+ if ((Info.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0) {
+ encodeULEB128(Info.DataRef.Segment, SubSection.getStream());
+ encodeULEB128(Info.DataRef.Offset, SubSection.getStream());
+ encodeULEB128(Info.DataRef.Size, SubSection.getStream());
+ }
+ break;
+ case wasm::WASM_SYMBOL_TYPE_SECTION:
+ encodeULEB128(Info.ElementIndex, SubSection.getStream());
+ break;
+ default:
+ llvm_unreachable("unexpected kind");
+ }
+ }
+
+ SubSection.done();
+ }
+
+ // SEGMENT_NAMES subsection
+ if (Section.SegmentInfos.size()) {
+ writeUint8(OS, wasm::WASM_SEGMENT_INFO);
+ encodeULEB128(Section.SegmentInfos.size(), SubSection.getStream());
+ for (const WasmYAML::SegmentInfo &SegmentInfo : Section.SegmentInfos) {
+ writeStringRef(SegmentInfo.Name, SubSection.getStream());
+ encodeULEB128(SegmentInfo.Alignment, SubSection.getStream());
+ encodeULEB128(SegmentInfo.Flags, SubSection.getStream());
+ }
+ SubSection.done();
+ }
+
+ // INIT_FUNCS subsection
+ if (Section.InitFunctions.size()) {
+ writeUint8(OS, wasm::WASM_INIT_FUNCS);
+ encodeULEB128(Section.InitFunctions.size(), SubSection.getStream());
+ for (const WasmYAML::InitFunction &Func : Section.InitFunctions) {
+ encodeULEB128(Func.Priority, SubSection.getStream());
+ encodeULEB128(Func.Symbol, SubSection.getStream());
+ }
+ SubSection.done();
+ }
+
+ // COMDAT_INFO subsection
+ if (Section.Comdats.size()) {
+ writeUint8(OS, wasm::WASM_COMDAT_INFO);
+ encodeULEB128(Section.Comdats.size(), SubSection.getStream());
+ for (const auto &C : Section.Comdats) {
+ writeStringRef(C.Name, SubSection.getStream());
+ encodeULEB128(0, SubSection.getStream()); // flags for future use
+ encodeULEB128(C.Entries.size(), SubSection.getStream());
+ for (const WasmYAML::ComdatEntry &Entry : C.Entries) {
+ writeUint8(SubSection.getStream(), Entry.Kind);
+ encodeULEB128(Entry.Index, SubSection.getStream());
+ }
+ }
+ SubSection.done();
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::NameSection &Section) {
+ writeStringRef(Section.Name, OS);
+ if (Section.FunctionNames.size()) {
+ writeUint8(OS, wasm::WASM_NAMES_FUNCTION);
+
+ SubSectionWriter SubSection(OS);
+
+ encodeULEB128(Section.FunctionNames.size(), SubSection.getStream());
+ for (const WasmYAML::NameEntry &NameEntry : Section.FunctionNames) {
+ encodeULEB128(NameEntry.Index, SubSection.getStream());
+ writeStringRef(NameEntry.Name, SubSection.getStream());
+ }
+
+ SubSection.done();
+ }
+ if (Section.GlobalNames.size()) {
+ writeUint8(OS, wasm::WASM_NAMES_GLOBAL);
+
+ SubSectionWriter SubSection(OS);
+
+ encodeULEB128(Section.GlobalNames.size(), SubSection.getStream());
+ for (const WasmYAML::NameEntry &NameEntry : Section.GlobalNames) {
+ encodeULEB128(NameEntry.Index, SubSection.getStream());
+ writeStringRef(NameEntry.Name, SubSection.getStream());
+ }
+
+ SubSection.done();
+ }
+ if (Section.DataSegmentNames.size()) {
+ writeUint8(OS, wasm::WASM_NAMES_DATA_SEGMENT);
+
+ SubSectionWriter SubSection(OS);
+
+ encodeULEB128(Section.DataSegmentNames.size(), SubSection.getStream());
+ for (const WasmYAML::NameEntry &NameEntry : Section.DataSegmentNames) {
+ encodeULEB128(NameEntry.Index, SubSection.getStream());
+ writeStringRef(NameEntry.Name, SubSection.getStream());
+ }
+
+ SubSection.done();
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::ProducersSection &Section) {
+ writeStringRef(Section.Name, OS);
+ int Fields = int(!Section.Languages.empty()) + int(!Section.Tools.empty()) +
+ int(!Section.SDKs.empty());
+ if (Fields == 0)
+ return;
+ encodeULEB128(Fields, OS);
+ for (auto &Field : {std::make_pair(StringRef("language"), &Section.Languages),
+ std::make_pair(StringRef("processed-by"), &Section.Tools),
+ std::make_pair(StringRef("sdk"), &Section.SDKs)}) {
+ if (Field.second->empty())
+ continue;
+ writeStringRef(Field.first, OS);
+ encodeULEB128(Field.second->size(), OS);
+ for (auto &Entry : *Field.second) {
+ writeStringRef(Entry.Name, OS);
+ writeStringRef(Entry.Version, OS);
+ }
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::TargetFeaturesSection &Section) {
+ writeStringRef(Section.Name, OS);
+ encodeULEB128(Section.Features.size(), OS);
+ for (auto &E : Section.Features) {
+ writeUint8(OS, E.Prefix);
+ writeStringRef(E.Name, OS);
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::CustomSection &Section) {
+ if (auto S = dyn_cast<WasmYAML::DylinkSection>(&Section)) {
+ writeSectionContent(OS, *S);
+ } else if (auto S = dyn_cast<WasmYAML::NameSection>(&Section)) {
+ writeSectionContent(OS, *S);
+ } else if (auto S = dyn_cast<WasmYAML::LinkingSection>(&Section)) {
+ writeSectionContent(OS, *S);
+ } else if (auto S = dyn_cast<WasmYAML::ProducersSection>(&Section)) {
+ writeSectionContent(OS, *S);
+ } else if (auto S = dyn_cast<WasmYAML::TargetFeaturesSection>(&Section)) {
+ writeSectionContent(OS, *S);
+ } else {
+ writeStringRef(Section.Name, OS);
+ Section.Payload.writeAsBinary(OS);
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::TypeSection &Section) {
+ encodeULEB128(Section.Signatures.size(), OS);
+ uint32_t ExpectedIndex = 0;
+ for (const WasmYAML::Signature &Sig : Section.Signatures) {
+ if (Sig.Index != ExpectedIndex) {
+ reportError("unexpected type index: " + Twine(Sig.Index));
+ return;
+ }
+ ++ExpectedIndex;
+ writeUint8(OS, Sig.Form);
+ encodeULEB128(Sig.ParamTypes.size(), OS);
+ for (auto ParamType : Sig.ParamTypes)
+ writeUint8(OS, ParamType);
+ encodeULEB128(Sig.ReturnTypes.size(), OS);
+ for (auto ReturnType : Sig.ReturnTypes)
+ writeUint8(OS, ReturnType);
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::ImportSection &Section) {
+ encodeULEB128(Section.Imports.size(), OS);
+ for (const WasmYAML::Import &Import : Section.Imports) {
+ writeStringRef(Import.Module, OS);
+ writeStringRef(Import.Field, OS);
+ writeUint8(OS, Import.Kind);
+ switch (Import.Kind) {
+ case wasm::WASM_EXTERNAL_FUNCTION:
+ encodeULEB128(Import.SigIndex, OS);
+ NumImportedFunctions++;
+ break;
+ case wasm::WASM_EXTERNAL_GLOBAL:
+ writeUint8(OS, Import.GlobalImport.Type);
+ writeUint8(OS, Import.GlobalImport.Mutable);
+ NumImportedGlobals++;
+ break;
+ case wasm::WASM_EXTERNAL_EVENT:
+ writeUint32(OS, Import.EventImport.Attribute);
+ writeUint32(OS, Import.EventImport.SigIndex);
+ NumImportedEvents++;
+ break;
+ case wasm::WASM_EXTERNAL_MEMORY:
+ writeLimits(Import.Memory, OS);
+ break;
+ case wasm::WASM_EXTERNAL_TABLE:
+ writeUint8(OS, Import.TableImport.ElemType);
+ writeLimits(Import.TableImport.TableLimits, OS);
+ NumImportedTables++;
+ break;
+ default:
+ reportError("unknown import type: " +Twine(Import.Kind));
+ return;
+ }
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::FunctionSection &Section) {
+ encodeULEB128(Section.FunctionTypes.size(), OS);
+ for (uint32_t FuncType : Section.FunctionTypes)
+ encodeULEB128(FuncType, OS);
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::ExportSection &Section) {
+ encodeULEB128(Section.Exports.size(), OS);
+ for (const WasmYAML::Export &Export : Section.Exports) {
+ writeStringRef(Export.Name, OS);
+ writeUint8(OS, Export.Kind);
+ encodeULEB128(Export.Index, OS);
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::StartSection &Section) {
+ encodeULEB128(Section.StartFunction, OS);
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::TableSection &Section) {
+ encodeULEB128(Section.Tables.size(), OS);
+ uint32_t ExpectedIndex = NumImportedTables;
+ for (auto &Table : Section.Tables) {
+ if (Table.Index != ExpectedIndex) {
+ reportError("unexpected table index: " + Twine(Table.Index));
+ return;
+ }
+ ++ExpectedIndex;
+ writeUint8(OS, Table.ElemType);
+ writeLimits(Table.TableLimits, OS);
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::MemorySection &Section) {
+ encodeULEB128(Section.Memories.size(), OS);
+ for (const WasmYAML::Limits &Mem : Section.Memories)
+ writeLimits(Mem, OS);
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::EventSection &Section) {
+ encodeULEB128(Section.Events.size(), OS);
+ uint32_t ExpectedIndex = NumImportedEvents;
+ for (auto &Event : Section.Events) {
+ if (Event.Index != ExpectedIndex) {
+ reportError("unexpected event index: " + Twine(Event.Index));
+ return;
+ }
+ ++ExpectedIndex;
+ encodeULEB128(Event.Attribute, OS);
+ encodeULEB128(Event.SigIndex, OS);
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::GlobalSection &Section) {
+ encodeULEB128(Section.Globals.size(), OS);
+ uint32_t ExpectedIndex = NumImportedGlobals;
+ for (auto &Global : Section.Globals) {
+ if (Global.Index != ExpectedIndex) {
+ reportError("unexpected global index: " + Twine(Global.Index));
+ return;
+ }
+ ++ExpectedIndex;
+ writeUint8(OS, Global.Type);
+ writeUint8(OS, Global.Mutable);
+ writeInitExpr(OS, Global.InitExpr);
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::ElemSection &Section) {
+ encodeULEB128(Section.Segments.size(), OS);
+ for (auto &Segment : Section.Segments) {
+ encodeULEB128(Segment.TableIndex, OS);
+ writeInitExpr(OS, Segment.Offset);
+
+ encodeULEB128(Segment.Functions.size(), OS);
+ for (auto &Function : Segment.Functions)
+ encodeULEB128(Function, OS);
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::CodeSection &Section) {
+ encodeULEB128(Section.Functions.size(), OS);
+ uint32_t ExpectedIndex = NumImportedFunctions;
+ for (auto &Func : Section.Functions) {
+ std::string OutString;
+ raw_string_ostream StringStream(OutString);
+ if (Func.Index != ExpectedIndex) {
+ reportError("unexpected function index: " + Twine(Func.Index));
+ return;
+ }
+ ++ExpectedIndex;
+
+ encodeULEB128(Func.Locals.size(), StringStream);
+ for (auto &LocalDecl : Func.Locals) {
+ encodeULEB128(LocalDecl.Count, StringStream);
+ writeUint8(StringStream, LocalDecl.Type);
+ }
+
+ Func.Body.writeAsBinary(StringStream);
+
+ // Write the section size followed by the content
+ StringStream.flush();
+ encodeULEB128(OutString.size(), OS);
+ OS << OutString;
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::DataSection &Section) {
+ encodeULEB128(Section.Segments.size(), OS);
+ for (auto &Segment : Section.Segments) {
+ encodeULEB128(Segment.InitFlags, OS);
+ if (Segment.InitFlags & wasm::WASM_DATA_SEGMENT_HAS_MEMINDEX)
+ encodeULEB128(Segment.MemoryIndex, OS);
+ if ((Segment.InitFlags & wasm::WASM_DATA_SEGMENT_IS_PASSIVE) == 0)
+ writeInitExpr(OS, Segment.Offset);
+ encodeULEB128(Segment.Content.binary_size(), OS);
+ Segment.Content.writeAsBinary(OS);
+ }
+}
+
+void WasmWriter::writeSectionContent(raw_ostream &OS,
+ WasmYAML::DataCountSection &Section) {
+ encodeULEB128(Section.Count, OS);
+}
+
+void WasmWriter::writeRelocSection(raw_ostream &OS, WasmYAML::Section &Sec,
+ uint32_t SectionIndex) {
+ switch (Sec.Type) {
+ case wasm::WASM_SEC_CODE:
+ writeStringRef("reloc.CODE", OS);
+ break;
+ case wasm::WASM_SEC_DATA:
+ writeStringRef("reloc.DATA", OS);
+ break;
+ case wasm::WASM_SEC_CUSTOM: {
+ auto *CustomSection = cast<WasmYAML::CustomSection>(&Sec);
+ writeStringRef(("reloc." + CustomSection->Name).str(), OS);
+ break;
+ }
+ default:
+ llvm_unreachable("not yet implemented");
+ }
+
+ encodeULEB128(SectionIndex, OS);
+ encodeULEB128(Sec.Relocations.size(), OS);
+
+ for (auto Reloc : Sec.Relocations) {
+ writeUint8(OS, Reloc.Type);
+ encodeULEB128(Reloc.Offset, OS);
+ encodeULEB128(Reloc.Index, OS);
+ switch (Reloc.Type) {
+ case wasm::R_WASM_MEMORY_ADDR_LEB:
+ case wasm::R_WASM_MEMORY_ADDR_LEB64:
+ case wasm::R_WASM_MEMORY_ADDR_SLEB:
+ case wasm::R_WASM_MEMORY_ADDR_SLEB64:
+ case wasm::R_WASM_MEMORY_ADDR_I32:
+ case wasm::R_WASM_MEMORY_ADDR_I64:
+ case wasm::R_WASM_FUNCTION_OFFSET_I32:
+ case wasm::R_WASM_FUNCTION_OFFSET_I64:
+ case wasm::R_WASM_SECTION_OFFSET_I32:
+ encodeULEB128(Reloc.Addend, OS);
+ }
+ }
+}
+
+bool WasmWriter::writeWasm(raw_ostream &OS) {
+ // Write headers
+ OS.write(wasm::WasmMagic, sizeof(wasm::WasmMagic));
+ writeUint32(OS, Obj.Header.Version);
+
+ // Write each section
+ llvm::object::WasmSectionOrderChecker Checker;
+ for (const std::unique_ptr<WasmYAML::Section> &Sec : Obj.Sections) {
+ StringRef SecName = "";
+ if (auto S = dyn_cast<WasmYAML::CustomSection>(Sec.get()))
+ SecName = S->Name;
+ if (!Checker.isValidSectionOrder(Sec->Type, SecName)) {
+ reportError("out of order section type: " + Twine(Sec->Type));
+ return false;
+ }
+ encodeULEB128(Sec->Type, OS);
+ std::string OutString;
+ raw_string_ostream StringStream(OutString);
+ if (auto S = dyn_cast<WasmYAML::CustomSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::TypeSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::ImportSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::FunctionSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::TableSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::MemorySection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::EventSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::GlobalSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::ExportSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::StartSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::ElemSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::CodeSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::DataSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else if (auto S = dyn_cast<WasmYAML::DataCountSection>(Sec.get()))
+ writeSectionContent(StringStream, *S);
+ else
+ reportError("unknown section type: " + Twine(Sec->Type));
+
+ if (HasError)
+ return false;
+
+ StringStream.flush();
+
+ // Write the section size followed by the content
+ encodeULEB128(OutString.size(), OS);
+ OS << OutString;
+ }
+
+ // write reloc sections for any section that have relocations
+ uint32_t SectionIndex = 0;
+ for (const std::unique_ptr<WasmYAML::Section> &Sec : Obj.Sections) {
+ if (Sec->Relocations.empty()) {
+ SectionIndex++;
+ continue;
+ }
+
+ writeUint8(OS, wasm::WASM_SEC_CUSTOM);
+ std::string OutString;
+ raw_string_ostream StringStream(OutString);
+ writeRelocSection(StringStream, *Sec, SectionIndex++);
+ StringStream.flush();
+
+ encodeULEB128(OutString.size(), OS);
+ OS << OutString;
+ }
+
+ return true;
+}
+
+namespace llvm {
+namespace yaml {
+
+bool yaml2wasm(WasmYAML::Object &Doc, raw_ostream &Out, ErrorHandler EH) {
+ WasmWriter Writer(Doc, EH);
+ return Writer.writeWasm(Out);
+}
+
+} // namespace yaml
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/WasmYAML.cpp b/contrib/libs/llvm12/lib/ObjectYAML/WasmYAML.cpp
new file mode 100644
index 00000000000..b4d2d113fb5
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/WasmYAML.cpp
@@ -0,0 +1,621 @@
+//===- WasmYAML.cpp - Wasm YAMLIO implementation --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of wasm.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/WasmYAML.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/YAMLTraits.h"
+
+namespace llvm {
+
+namespace WasmYAML {
+
+// Declared here rather than in the header to comply with:
+// http://llvm.org/docs/CodingStandards.html#provide-a-virtual-method-anchor-for-classes-in-headers
+Section::~Section() = default;
+
+} // end namespace WasmYAML
+
+namespace yaml {
+
+void MappingTraits<WasmYAML::FileHeader>::mapping(
+ IO &IO, WasmYAML::FileHeader &FileHdr) {
+ IO.mapRequired("Version", FileHdr.Version);
+}
+
+void MappingTraits<WasmYAML::Object>::mapping(IO &IO,
+ WasmYAML::Object &Object) {
+ IO.setContext(&Object);
+ IO.mapTag("!WASM", true);
+ IO.mapRequired("FileHeader", Object.Header);
+ IO.mapOptional("Sections", Object.Sections);
+ IO.setContext(nullptr);
+}
+
+static void commonSectionMapping(IO &IO, WasmYAML::Section &Section) {
+ IO.mapRequired("Type", Section.Type);
+ IO.mapOptional("Relocations", Section.Relocations);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::DylinkSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapRequired("Name", Section.Name);
+ IO.mapRequired("MemorySize", Section.MemorySize);
+ IO.mapRequired("MemoryAlignment", Section.MemoryAlignment);
+ IO.mapRequired("TableSize", Section.TableSize);
+ IO.mapRequired("TableAlignment", Section.TableAlignment);
+ IO.mapRequired("Needed", Section.Needed);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::NameSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapRequired("Name", Section.Name);
+ IO.mapOptional("FunctionNames", Section.FunctionNames);
+ IO.mapOptional("GlobalNames", Section.GlobalNames);
+ IO.mapOptional("DataSegmentNames", Section.DataSegmentNames);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::LinkingSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapRequired("Name", Section.Name);
+ IO.mapRequired("Version", Section.Version);
+ IO.mapOptional("SymbolTable", Section.SymbolTable);
+ IO.mapOptional("SegmentInfo", Section.SegmentInfos);
+ IO.mapOptional("InitFunctions", Section.InitFunctions);
+ IO.mapOptional("Comdats", Section.Comdats);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::ProducersSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapRequired("Name", Section.Name);
+ IO.mapOptional("Languages", Section.Languages);
+ IO.mapOptional("Tools", Section.Tools);
+ IO.mapOptional("SDKs", Section.SDKs);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::TargetFeaturesSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapRequired("Name", Section.Name);
+ IO.mapRequired("Features", Section.Features);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::CustomSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapRequired("Name", Section.Name);
+ IO.mapRequired("Payload", Section.Payload);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::TypeSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Signatures", Section.Signatures);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::ImportSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Imports", Section.Imports);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::FunctionSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("FunctionTypes", Section.FunctionTypes);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::TableSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Tables", Section.Tables);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::MemorySection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Memories", Section.Memories);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::EventSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Events", Section.Events);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::GlobalSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Globals", Section.Globals);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::ExportSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Exports", Section.Exports);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::StartSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("StartFunction", Section.StartFunction);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::ElemSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapOptional("Segments", Section.Segments);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::CodeSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapRequired("Functions", Section.Functions);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::DataSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapRequired("Segments", Section.Segments);
+}
+
+static void sectionMapping(IO &IO, WasmYAML::DataCountSection &Section) {
+ commonSectionMapping(IO, Section);
+ IO.mapRequired("Count", Section.Count);
+}
+
+void MappingTraits<std::unique_ptr<WasmYAML::Section>>::mapping(
+ IO &IO, std::unique_ptr<WasmYAML::Section> &Section) {
+ WasmYAML::SectionType SectionType;
+ if (IO.outputting())
+ SectionType = Section->Type;
+ else
+ IO.mapRequired("Type", SectionType);
+
+ switch (SectionType) {
+ case wasm::WASM_SEC_CUSTOM: {
+ StringRef SectionName;
+ if (IO.outputting()) {
+ auto CustomSection = cast<WasmYAML::CustomSection>(Section.get());
+ SectionName = CustomSection->Name;
+ } else {
+ IO.mapRequired("Name", SectionName);
+ }
+ if (SectionName == "dylink") {
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::DylinkSection());
+ sectionMapping(IO, *cast<WasmYAML::DylinkSection>(Section.get()));
+ } else if (SectionName == "linking") {
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::LinkingSection());
+ sectionMapping(IO, *cast<WasmYAML::LinkingSection>(Section.get()));
+ } else if (SectionName == "name") {
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::NameSection());
+ sectionMapping(IO, *cast<WasmYAML::NameSection>(Section.get()));
+ } else if (SectionName == "producers") {
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::ProducersSection());
+ sectionMapping(IO, *cast<WasmYAML::ProducersSection>(Section.get()));
+ } else if (SectionName == "target_features") {
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::TargetFeaturesSection());
+ sectionMapping(IO, *cast<WasmYAML::TargetFeaturesSection>(Section.get()));
+ } else {
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::CustomSection(SectionName));
+ sectionMapping(IO, *cast<WasmYAML::CustomSection>(Section.get()));
+ }
+ break;
+ }
+ case wasm::WASM_SEC_TYPE:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::TypeSection());
+ sectionMapping(IO, *cast<WasmYAML::TypeSection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_IMPORT:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::ImportSection());
+ sectionMapping(IO, *cast<WasmYAML::ImportSection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_FUNCTION:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::FunctionSection());
+ sectionMapping(IO, *cast<WasmYAML::FunctionSection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_TABLE:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::TableSection());
+ sectionMapping(IO, *cast<WasmYAML::TableSection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_MEMORY:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::MemorySection());
+ sectionMapping(IO, *cast<WasmYAML::MemorySection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_EVENT:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::EventSection());
+ sectionMapping(IO, *cast<WasmYAML::EventSection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_GLOBAL:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::GlobalSection());
+ sectionMapping(IO, *cast<WasmYAML::GlobalSection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_EXPORT:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::ExportSection());
+ sectionMapping(IO, *cast<WasmYAML::ExportSection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_START:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::StartSection());
+ sectionMapping(IO, *cast<WasmYAML::StartSection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_ELEM:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::ElemSection());
+ sectionMapping(IO, *cast<WasmYAML::ElemSection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_CODE:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::CodeSection());
+ sectionMapping(IO, *cast<WasmYAML::CodeSection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_DATA:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::DataSection());
+ sectionMapping(IO, *cast<WasmYAML::DataSection>(Section.get()));
+ break;
+ case wasm::WASM_SEC_DATACOUNT:
+ if (!IO.outputting())
+ Section.reset(new WasmYAML::DataCountSection());
+ sectionMapping(IO, *cast<WasmYAML::DataCountSection>(Section.get()));
+ break;
+ default:
+ llvm_unreachable("Unknown section type");
+ }
+}
+
+void ScalarEnumerationTraits<WasmYAML::SectionType>::enumeration(
+ IO &IO, WasmYAML::SectionType &Type) {
+#define ECase(X) IO.enumCase(Type, #X, wasm::WASM_SEC_##X);
+ ECase(CUSTOM);
+ ECase(TYPE);
+ ECase(IMPORT);
+ ECase(FUNCTION);
+ ECase(TABLE);
+ ECase(MEMORY);
+ ECase(GLOBAL);
+ ECase(EVENT);
+ ECase(EXPORT);
+ ECase(START);
+ ECase(ELEM);
+ ECase(CODE);
+ ECase(DATA);
+ ECase(DATACOUNT);
+#undef ECase
+}
+
+void MappingTraits<WasmYAML::Signature>::mapping(
+ IO &IO, WasmYAML::Signature &Signature) {
+ IO.mapRequired("Index", Signature.Index);
+ IO.mapRequired("ParamTypes", Signature.ParamTypes);
+ IO.mapRequired("ReturnTypes", Signature.ReturnTypes);
+}
+
+void MappingTraits<WasmYAML::Table>::mapping(IO &IO, WasmYAML::Table &Table) {
+ IO.mapRequired("Index", Table.Index);
+ IO.mapRequired("ElemType", Table.ElemType);
+ IO.mapRequired("Limits", Table.TableLimits);
+}
+
+void MappingTraits<WasmYAML::Function>::mapping(IO &IO,
+ WasmYAML::Function &Function) {
+ IO.mapRequired("Index", Function.Index);
+ IO.mapRequired("Locals", Function.Locals);
+ IO.mapRequired("Body", Function.Body);
+}
+
+void MappingTraits<WasmYAML::Relocation>::mapping(
+ IO &IO, WasmYAML::Relocation &Relocation) {
+ IO.mapRequired("Type", Relocation.Type);
+ IO.mapRequired("Index", Relocation.Index);
+ IO.mapRequired("Offset", Relocation.Offset);
+ IO.mapOptional("Addend", Relocation.Addend, 0);
+}
+
+void MappingTraits<WasmYAML::NameEntry>::mapping(
+ IO &IO, WasmYAML::NameEntry &NameEntry) {
+ IO.mapRequired("Index", NameEntry.Index);
+ IO.mapRequired("Name", NameEntry.Name);
+}
+
+void MappingTraits<WasmYAML::ProducerEntry>::mapping(
+ IO &IO, WasmYAML::ProducerEntry &ProducerEntry) {
+ IO.mapRequired("Name", ProducerEntry.Name);
+ IO.mapRequired("Version", ProducerEntry.Version);
+}
+
+void ScalarEnumerationTraits<WasmYAML::FeaturePolicyPrefix>::enumeration(
+ IO &IO, WasmYAML::FeaturePolicyPrefix &Kind) {
+#define ECase(X) IO.enumCase(Kind, #X, wasm::WASM_FEATURE_PREFIX_##X);
+ ECase(USED);
+ ECase(REQUIRED);
+ ECase(DISALLOWED);
+#undef ECase
+}
+
+void MappingTraits<WasmYAML::FeatureEntry>::mapping(
+ IO &IO, WasmYAML::FeatureEntry &FeatureEntry) {
+ IO.mapRequired("Prefix", FeatureEntry.Prefix);
+ IO.mapRequired("Name", FeatureEntry.Name);
+}
+
+void MappingTraits<WasmYAML::SegmentInfo>::mapping(
+ IO &IO, WasmYAML::SegmentInfo &SegmentInfo) {
+ IO.mapRequired("Index", SegmentInfo.Index);
+ IO.mapRequired("Name", SegmentInfo.Name);
+ IO.mapRequired("Alignment", SegmentInfo.Alignment);
+ IO.mapRequired("Flags", SegmentInfo.Flags);
+}
+
+void MappingTraits<WasmYAML::LocalDecl>::mapping(
+ IO &IO, WasmYAML::LocalDecl &LocalDecl) {
+ IO.mapRequired("Type", LocalDecl.Type);
+ IO.mapRequired("Count", LocalDecl.Count);
+}
+
+void MappingTraits<WasmYAML::Limits>::mapping(IO &IO,
+ WasmYAML::Limits &Limits) {
+ if (!IO.outputting() || Limits.Flags)
+ IO.mapOptional("Flags", Limits.Flags);
+ IO.mapRequired("Initial", Limits.Initial);
+ if (!IO.outputting() || Limits.Flags & wasm::WASM_LIMITS_FLAG_HAS_MAX)
+ IO.mapOptional("Maximum", Limits.Maximum);
+}
+
+void MappingTraits<WasmYAML::ElemSegment>::mapping(
+ IO &IO, WasmYAML::ElemSegment &Segment) {
+ IO.mapRequired("Offset", Segment.Offset);
+ IO.mapRequired("Functions", Segment.Functions);
+}
+
+void MappingTraits<WasmYAML::Import>::mapping(IO &IO,
+ WasmYAML::Import &Import) {
+ IO.mapRequired("Module", Import.Module);
+ IO.mapRequired("Field", Import.Field);
+ IO.mapRequired("Kind", Import.Kind);
+ if (Import.Kind == wasm::WASM_EXTERNAL_FUNCTION) {
+ IO.mapRequired("SigIndex", Import.SigIndex);
+ } else if (Import.Kind == wasm::WASM_EXTERNAL_GLOBAL) {
+ IO.mapRequired("GlobalType", Import.GlobalImport.Type);
+ IO.mapRequired("GlobalMutable", Import.GlobalImport.Mutable);
+ } else if (Import.Kind == wasm::WASM_EXTERNAL_EVENT) {
+ IO.mapRequired("EventAttribute", Import.EventImport.Attribute);
+ IO.mapRequired("EventSigIndex", Import.EventImport.SigIndex);
+ } else if (Import.Kind == wasm::WASM_EXTERNAL_TABLE) {
+ IO.mapRequired("Table", Import.TableImport);
+ } else if (Import.Kind == wasm::WASM_EXTERNAL_MEMORY) {
+ IO.mapRequired("Memory", Import.Memory);
+ } else {
+ llvm_unreachable("unhandled import type");
+ }
+}
+
+void MappingTraits<WasmYAML::Export>::mapping(IO &IO,
+ WasmYAML::Export &Export) {
+ IO.mapRequired("Name", Export.Name);
+ IO.mapRequired("Kind", Export.Kind);
+ IO.mapRequired("Index", Export.Index);
+}
+
+void MappingTraits<WasmYAML::Global>::mapping(IO &IO,
+ WasmYAML::Global &Global) {
+ IO.mapRequired("Index", Global.Index);
+ IO.mapRequired("Type", Global.Type);
+ IO.mapRequired("Mutable", Global.Mutable);
+ IO.mapRequired("InitExpr", Global.InitExpr);
+}
+
+void MappingTraits<wasm::WasmInitExpr>::mapping(IO &IO,
+ wasm::WasmInitExpr &Expr) {
+ WasmYAML::Opcode Op = Expr.Opcode;
+ IO.mapRequired("Opcode", Op);
+ Expr.Opcode = Op;
+ switch (Expr.Opcode) {
+ case wasm::WASM_OPCODE_I32_CONST:
+ IO.mapRequired("Value", Expr.Value.Int32);
+ break;
+ case wasm::WASM_OPCODE_I64_CONST:
+ IO.mapRequired("Value", Expr.Value.Int64);
+ break;
+ case wasm::WASM_OPCODE_F32_CONST:
+ IO.mapRequired("Value", Expr.Value.Float32);
+ break;
+ case wasm::WASM_OPCODE_F64_CONST:
+ IO.mapRequired("Value", Expr.Value.Float64);
+ break;
+ case wasm::WASM_OPCODE_GLOBAL_GET:
+ IO.mapRequired("Index", Expr.Value.Global);
+ break;
+ case wasm::WASM_OPCODE_REF_NULL: {
+ WasmYAML::ValueType Ty = wasm::WASM_TYPE_EXTERNREF;
+ IO.mapRequired("Type", Ty);
+ break;
+ }
+ }
+}
+
+void MappingTraits<WasmYAML::DataSegment>::mapping(
+ IO &IO, WasmYAML::DataSegment &Segment) {
+ IO.mapOptional("SectionOffset", Segment.SectionOffset);
+ IO.mapRequired("InitFlags", Segment.InitFlags);
+ if (Segment.InitFlags & wasm::WASM_DATA_SEGMENT_HAS_MEMINDEX) {
+ IO.mapRequired("MemoryIndex", Segment.MemoryIndex);
+ } else {
+ Segment.MemoryIndex = 0;
+ }
+ if ((Segment.InitFlags & wasm::WASM_DATA_SEGMENT_IS_PASSIVE) == 0) {
+ IO.mapRequired("Offset", Segment.Offset);
+ } else {
+ Segment.Offset.Opcode = wasm::WASM_OPCODE_I32_CONST;
+ Segment.Offset.Value.Int32 = 0;
+ }
+ IO.mapRequired("Content", Segment.Content);
+}
+
+void MappingTraits<WasmYAML::InitFunction>::mapping(
+ IO &IO, WasmYAML::InitFunction &Init) {
+ IO.mapRequired("Priority", Init.Priority);
+ IO.mapRequired("Symbol", Init.Symbol);
+}
+
+void ScalarEnumerationTraits<WasmYAML::ComdatKind>::enumeration(
+ IO &IO, WasmYAML::ComdatKind &Kind) {
+#define ECase(X) IO.enumCase(Kind, #X, wasm::WASM_COMDAT_##X);
+ ECase(FUNCTION);
+ ECase(DATA);
+ ECase(SECTION);
+#undef ECase
+}
+
+void MappingTraits<WasmYAML::ComdatEntry>::mapping(
+ IO &IO, WasmYAML::ComdatEntry &ComdatEntry) {
+ IO.mapRequired("Kind", ComdatEntry.Kind);
+ IO.mapRequired("Index", ComdatEntry.Index);
+}
+
+void MappingTraits<WasmYAML::Comdat>::mapping(IO &IO,
+ WasmYAML::Comdat &Comdat) {
+ IO.mapRequired("Name", Comdat.Name);
+ IO.mapRequired("Entries", Comdat.Entries);
+}
+
+void MappingTraits<WasmYAML::SymbolInfo>::mapping(IO &IO,
+ WasmYAML::SymbolInfo &Info) {
+ IO.mapRequired("Index", Info.Index);
+ IO.mapRequired("Kind", Info.Kind);
+ if (Info.Kind != wasm::WASM_SYMBOL_TYPE_SECTION)
+ IO.mapRequired("Name", Info.Name);
+ IO.mapRequired("Flags", Info.Flags);
+ if (Info.Kind == wasm::WASM_SYMBOL_TYPE_FUNCTION) {
+ IO.mapRequired("Function", Info.ElementIndex);
+ } else if (Info.Kind == wasm::WASM_SYMBOL_TYPE_GLOBAL) {
+ IO.mapRequired("Global", Info.ElementIndex);
+ } else if (Info.Kind == wasm::WASM_SYMBOL_TYPE_TABLE) {
+ IO.mapRequired("Table", Info.ElementIndex);
+ } else if (Info.Kind == wasm::WASM_SYMBOL_TYPE_EVENT) {
+ IO.mapRequired("Event", Info.ElementIndex);
+ } else if (Info.Kind == wasm::WASM_SYMBOL_TYPE_DATA) {
+ if ((Info.Flags & wasm::WASM_SYMBOL_UNDEFINED) == 0) {
+ IO.mapRequired("Segment", Info.DataRef.Segment);
+ IO.mapOptional("Offset", Info.DataRef.Offset, 0u);
+ IO.mapRequired("Size", Info.DataRef.Size);
+ }
+ } else if (Info.Kind == wasm::WASM_SYMBOL_TYPE_SECTION) {
+ IO.mapRequired("Section", Info.ElementIndex);
+ } else {
+ llvm_unreachable("unsupported symbol kind");
+ }
+}
+
+void MappingTraits<WasmYAML::Event>::mapping(IO &IO, WasmYAML::Event &Event) {
+ IO.mapRequired("Index", Event.Index);
+ IO.mapRequired("Attribute", Event.Attribute);
+ IO.mapRequired("SigIndex", Event.SigIndex);
+}
+
+void ScalarBitSetTraits<WasmYAML::LimitFlags>::bitset(
+ IO &IO, WasmYAML::LimitFlags &Value) {
+#define BCase(X) IO.bitSetCase(Value, #X, wasm::WASM_LIMITS_FLAG_##X)
+ BCase(HAS_MAX);
+ BCase(IS_SHARED);
+ BCase(IS_64);
+#undef BCase
+}
+
+void ScalarBitSetTraits<WasmYAML::SegmentFlags>::bitset(
+ IO &IO, WasmYAML::SegmentFlags &Value) {}
+
+void ScalarBitSetTraits<WasmYAML::SymbolFlags>::bitset(
+ IO &IO, WasmYAML::SymbolFlags &Value) {
+#define BCaseMask(M, X) \
+ IO.maskedBitSetCase(Value, #X, wasm::WASM_SYMBOL_##X, wasm::WASM_SYMBOL_##M)
+ // BCaseMask(BINDING_MASK, BINDING_GLOBAL);
+ BCaseMask(BINDING_MASK, BINDING_WEAK);
+ BCaseMask(BINDING_MASK, BINDING_LOCAL);
+ // BCaseMask(VISIBILITY_MASK, VISIBILITY_DEFAULT);
+ BCaseMask(VISIBILITY_MASK, VISIBILITY_HIDDEN);
+ BCaseMask(UNDEFINED, UNDEFINED);
+ BCaseMask(EXPORTED, EXPORTED);
+ BCaseMask(EXPLICIT_NAME, EXPLICIT_NAME);
+ BCaseMask(NO_STRIP, NO_STRIP);
+#undef BCaseMask
+}
+
+void ScalarEnumerationTraits<WasmYAML::SymbolKind>::enumeration(
+ IO &IO, WasmYAML::SymbolKind &Kind) {
+#define ECase(X) IO.enumCase(Kind, #X, wasm::WASM_SYMBOL_TYPE_##X);
+ ECase(FUNCTION);
+ ECase(DATA);
+ ECase(GLOBAL);
+ ECase(TABLE);
+ ECase(SECTION);
+ ECase(EVENT);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<WasmYAML::ValueType>::enumeration(
+ IO &IO, WasmYAML::ValueType &Type) {
+#define ECase(X) IO.enumCase(Type, #X, wasm::WASM_TYPE_##X);
+ ECase(I32);
+ ECase(I64);
+ ECase(F32);
+ ECase(F64);
+ ECase(V128);
+ ECase(FUNCREF);
+ ECase(EXTERNREF);
+ ECase(FUNC);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<WasmYAML::ExportKind>::enumeration(
+ IO &IO, WasmYAML::ExportKind &Kind) {
+#define ECase(X) IO.enumCase(Kind, #X, wasm::WASM_EXTERNAL_##X);
+ ECase(FUNCTION);
+ ECase(TABLE);
+ ECase(MEMORY);
+ ECase(GLOBAL);
+ ECase(EVENT);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<WasmYAML::Opcode>::enumeration(
+ IO &IO, WasmYAML::Opcode &Code) {
+#define ECase(X) IO.enumCase(Code, #X, wasm::WASM_OPCODE_##X);
+ ECase(END);
+ ECase(I32_CONST);
+ ECase(I64_CONST);
+ ECase(F64_CONST);
+ ECase(F32_CONST);
+ ECase(GLOBAL_GET);
+ ECase(REF_NULL);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<WasmYAML::TableType>::enumeration(
+ IO &IO, WasmYAML::TableType &Type) {
+#define ECase(X) IO.enumCase(Type, #X, wasm::WASM_TYPE_##X);
+ ECase(FUNCREF);
+ ECase(EXTERNREF);
+#undef ECase
+}
+
+void ScalarEnumerationTraits<WasmYAML::RelocType>::enumeration(
+ IO &IO, WasmYAML::RelocType &Type) {
+#define WASM_RELOC(name, value) IO.enumCase(Type, #name, wasm::name);
+#include "llvm/BinaryFormat/WasmRelocs.def"
+#undef WASM_RELOC
+}
+
+} // end namespace yaml
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/XCOFFYAML.cpp b/contrib/libs/llvm12/lib/ObjectYAML/XCOFFYAML.cpp
new file mode 100644
index 00000000000..982e6aecbb9
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/XCOFFYAML.cpp
@@ -0,0 +1,109 @@
+//===-- XCOFFYAML.cpp - XCOFF YAMLIO implementation -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines classes for handling the YAML representation of XCOFF.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/XCOFFYAML.h"
+#include "llvm/BinaryFormat/XCOFF.h"
+#include <string.h>
+
+namespace llvm {
+namespace XCOFFYAML {
+
+Object::Object() { memset(&Header, 0, sizeof(Header)); }
+
+} // namespace XCOFFYAML
+
+namespace yaml {
+
+void ScalarEnumerationTraits<XCOFF::StorageClass>::enumeration(
+ IO &IO, XCOFF::StorageClass &Value) {
+#define ECase(X) IO.enumCase(Value, #X, XCOFF::X)
+ ECase(C_NULL);
+ ECase(C_AUTO);
+ ECase(C_EXT);
+ ECase(C_STAT);
+ ECase(C_REG);
+ ECase(C_EXTDEF);
+ ECase(C_LABEL);
+ ECase(C_ULABEL);
+ ECase(C_MOS);
+ ECase(C_ARG);
+ ECase(C_STRTAG);
+ ECase(C_MOU);
+ ECase(C_UNTAG);
+ ECase(C_TPDEF);
+ ECase(C_USTATIC);
+ ECase(C_ENTAG);
+ ECase(C_MOE);
+ ECase(C_REGPARM);
+ ECase(C_FIELD);
+ ECase(C_BLOCK);
+ ECase(C_FCN);
+ ECase(C_EOS);
+ ECase(C_FILE);
+ ECase(C_LINE);
+ ECase(C_ALIAS);
+ ECase(C_HIDDEN);
+ ECase(C_HIDEXT);
+ ECase(C_BINCL);
+ ECase(C_EINCL);
+ ECase(C_INFO);
+ ECase(C_WEAKEXT);
+ ECase(C_DWARF);
+ ECase(C_GSYM);
+ ECase(C_LSYM);
+ ECase(C_PSYM);
+ ECase(C_RSYM);
+ ECase(C_RPSYM);
+ ECase(C_STSYM);
+ ECase(C_TCSYM);
+ ECase(C_BCOMM);
+ ECase(C_ECOML);
+ ECase(C_ECOMM);
+ ECase(C_DECL);
+ ECase(C_ENTRY);
+ ECase(C_FUN);
+ ECase(C_BSTAT);
+ ECase(C_ESTAT);
+ ECase(C_GTLS);
+ ECase(C_STTLS);
+ ECase(C_EFCN);
+#undef ECase
+}
+
+void MappingTraits<XCOFFYAML::FileHeader>::mapping(
+ IO &IO, XCOFFYAML::FileHeader &FileHdr) {
+ IO.mapRequired("MagicNumber", FileHdr.Magic);
+ IO.mapRequired("NumberOfSections", FileHdr.NumberOfSections);
+ IO.mapRequired("CreationTime", FileHdr.TimeStamp);
+ IO.mapRequired("OffsetToSymbolTable", FileHdr.SymbolTableOffset);
+ IO.mapRequired("EntriesInSymbolTable", FileHdr.NumberOfSymTableEntries);
+ IO.mapRequired("AuxiliaryHeaderSize", FileHdr.AuxHeaderSize);
+ IO.mapRequired("Flags", FileHdr.Flags);
+}
+
+void MappingTraits<XCOFFYAML::Symbol>::mapping(IO &IO, XCOFFYAML::Symbol &S) {
+ IO.mapRequired("Name", S.SymbolName);
+ IO.mapRequired("Value", S.Value);
+ IO.mapRequired("Section", S.SectionName);
+ IO.mapRequired("Type", S.Type);
+ IO.mapRequired("StorageClass", S.StorageClass);
+ IO.mapRequired("NumberOfAuxEntries", S.NumberOfAuxEntries);
+}
+
+void MappingTraits<XCOFFYAML::Object>::mapping(IO &IO, XCOFFYAML::Object &Obj) {
+ IO.mapTag("!XCOFF", true);
+ IO.mapRequired("FileHeader", Obj.Header);
+ IO.mapRequired("Symbols", Obj.Symbols);
+}
+
+} // namespace yaml
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/YAML.cpp b/contrib/libs/llvm12/lib/ObjectYAML/YAML.cpp
new file mode 100644
index 00000000000..5dcb113d339
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/YAML.cpp
@@ -0,0 +1,64 @@
+//===- YAML.cpp - YAMLIO utilities for object files -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines utility classes for handling the YAML representation of
+// object files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/YAML.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cctype>
+#include <cstdint>
+
+using namespace llvm;
+
+void yaml::ScalarTraits<yaml::BinaryRef>::output(
+ const yaml::BinaryRef &Val, void *, raw_ostream &Out) {
+ Val.writeAsHex(Out);
+}
+
+StringRef yaml::ScalarTraits<yaml::BinaryRef>::input(StringRef Scalar, void *,
+ yaml::BinaryRef &Val) {
+ if (Scalar.size() % 2 != 0)
+ return "BinaryRef hex string must contain an even number of nybbles.";
+ // TODO: Can we improve YAMLIO to permit a more accurate diagnostic here?
+ // (e.g. a caret pointing to the offending character).
+ for (unsigned I = 0, N = Scalar.size(); I != N; ++I)
+ if (!llvm::isHexDigit(Scalar[I]))
+ return "BinaryRef hex string must contain only hex digits.";
+ Val = yaml::BinaryRef(Scalar);
+ return {};
+}
+
+void yaml::BinaryRef::writeAsBinary(raw_ostream &OS, uint64_t N) const {
+ if (!DataIsHexString) {
+ OS.write((const char *)Data.data(), std::min<uint64_t>(N, Data.size()));
+ return;
+ }
+
+ for (uint64_t I = 0, E = std::min<uint64_t>(N, Data.size() / 2); I != E;
+ ++I) {
+ uint8_t Byte = llvm::hexDigitValue(Data[I * 2]);
+ Byte <<= 4;
+ Byte |= llvm::hexDigitValue(Data[I * 2 + 1]);
+ OS.write(Byte);
+ }
+}
+
+void yaml::BinaryRef::writeAsHex(raw_ostream &OS) const {
+ if (binary_size() == 0)
+ return;
+ if (DataIsHexString) {
+ OS.write((const char *)Data.data(), Data.size());
+ return;
+ }
+ for (uint8_t Byte : Data)
+ OS << hexdigit(Byte >> 4) << hexdigit(Byte & 0xf);
+}
diff --git a/contrib/libs/llvm12/lib/ObjectYAML/yaml2obj.cpp b/contrib/libs/llvm12/lib/ObjectYAML/yaml2obj.cpp
new file mode 100644
index 00000000000..ef2ab83dcd2
--- /dev/null
+++ b/contrib/libs/llvm12/lib/ObjectYAML/yaml2obj.cpp
@@ -0,0 +1,79 @@
+//===-- yaml2obj.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ObjectYAML/yaml2obj.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/ObjectYAML/ObjectYAML.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/WithColor.h"
+#include "llvm/Support/YAMLTraits.h"
+
+namespace llvm {
+namespace yaml {
+
+bool convertYAML(yaml::Input &YIn, raw_ostream &Out, ErrorHandler ErrHandler,
+ unsigned DocNum, uint64_t MaxSize) {
+ unsigned CurDocNum = 0;
+ do {
+ if (++CurDocNum != DocNum)
+ continue;
+
+ yaml::YamlObjectFile Doc;
+ YIn >> Doc;
+ if (std::error_code EC = YIn.error()) {
+ ErrHandler("failed to parse YAML input: " + EC.message());
+ return false;
+ }
+
+ if (Doc.Arch)
+ return yaml2archive(*Doc.Arch, Out, ErrHandler);
+ if (Doc.Elf)
+ return yaml2elf(*Doc.Elf, Out, ErrHandler, MaxSize);
+ if (Doc.Coff)
+ return yaml2coff(*Doc.Coff, Out, ErrHandler);
+ if (Doc.MachO || Doc.FatMachO)
+ return yaml2macho(Doc, Out, ErrHandler);
+ if (Doc.Minidump)
+ return yaml2minidump(*Doc.Minidump, Out, ErrHandler);
+ if (Doc.Wasm)
+ return yaml2wasm(*Doc.Wasm, Out, ErrHandler);
+
+ ErrHandler("unknown document type");
+ return false;
+
+ } while (YIn.nextDocument());
+
+ ErrHandler("cannot find the " + Twine(DocNum) +
+ getOrdinalSuffix(DocNum).data() + " document");
+ return false;
+}
+
+std::unique_ptr<object::ObjectFile>
+yaml2ObjectFile(SmallVectorImpl<char> &Storage, StringRef Yaml,
+ ErrorHandler ErrHandler) {
+ Storage.clear();
+ raw_svector_ostream OS(Storage);
+
+ yaml::Input YIn(Yaml);
+ if (!convertYAML(YIn, OS, ErrHandler))
+ return {};
+
+ Expected<std::unique_ptr<object::ObjectFile>> ObjOrErr =
+ object::ObjectFile::createObjectFile(
+ MemoryBufferRef(OS.str(), "YamlObject"));
+ if (ObjOrErr)
+ return std::move(*ObjOrErr);
+
+ ErrHandler(toString(ObjOrErr.takeError()));
+ return {};
+}
+
+} // namespace yaml
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/Option/Arg.cpp b/contrib/libs/llvm12/lib/Option/Arg.cpp
new file mode 100644
index 00000000000..2da32bfacf3
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Option/Arg.cpp
@@ -0,0 +1,125 @@
+//===- Arg.cpp - Argument Implementations ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::opt;
+
+Arg::Arg(const Option Opt, StringRef S, unsigned Index, const Arg *BaseArg)
+ : Opt(Opt), BaseArg(BaseArg), Spelling(S), Index(Index), Claimed(false),
+ OwnsValues(false) {}
+
+Arg::Arg(const Option Opt, StringRef S, unsigned Index, const char *Value0,
+ const Arg *BaseArg)
+ : Opt(Opt), BaseArg(BaseArg), Spelling(S), Index(Index), Claimed(false),
+ OwnsValues(false) {
+ Values.push_back(Value0);
+}
+
+Arg::Arg(const Option Opt, StringRef S, unsigned Index, const char *Value0,
+ const char *Value1, const Arg *BaseArg)
+ : Opt(Opt), BaseArg(BaseArg), Spelling(S), Index(Index), Claimed(false),
+ OwnsValues(false) {
+ Values.push_back(Value0);
+ Values.push_back(Value1);
+}
+
+Arg::~Arg() {
+ if (OwnsValues) {
+ for (unsigned i = 0, e = Values.size(); i != e; ++i)
+ delete[] Values[i];
+ }
+}
+
+void Arg::print(raw_ostream& O) const {
+ O << "<";
+
+ O << " Opt:";
+ Opt.print(O);
+
+ O << " Index:" << Index;
+
+ O << " Values: [";
+ for (unsigned i = 0, e = Values.size(); i != e; ++i) {
+ if (i) O << ", ";
+ O << "'" << Values[i] << "'";
+ }
+
+ O << "]>\n";
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void Arg::dump() const { print(dbgs()); }
+#endif
+
+std::string Arg::getAsString(const ArgList &Args) const {
+ if (Alias)
+ return Alias->getAsString(Args);
+
+ SmallString<256> Res;
+ raw_svector_ostream OS(Res);
+
+ ArgStringList ASL;
+ render(Args, ASL);
+ for (ArgStringList::iterator
+ it = ASL.begin(), ie = ASL.end(); it != ie; ++it) {
+ if (it != ASL.begin())
+ OS << ' ';
+ OS << *it;
+ }
+
+ return std::string(OS.str());
+}
+
+void Arg::renderAsInput(const ArgList &Args, ArgStringList &Output) const {
+ if (!getOption().hasNoOptAsInput()) {
+ render(Args, Output);
+ return;
+ }
+
+ Output.append(Values.begin(), Values.end());
+}
+
+void Arg::render(const ArgList &Args, ArgStringList &Output) const {
+ switch (getOption().getRenderStyle()) {
+ case Option::RenderValuesStyle:
+ Output.append(Values.begin(), Values.end());
+ break;
+
+ case Option::RenderCommaJoinedStyle: {
+ SmallString<256> Res;
+ raw_svector_ostream OS(Res);
+ OS << getSpelling();
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
+ if (i) OS << ',';
+ OS << getValue(i);
+ }
+ Output.push_back(Args.MakeArgString(OS.str()));
+ break;
+ }
+
+ case Option::RenderJoinedStyle:
+ Output.push_back(Args.GetOrMakeJoinedArgString(
+ getIndex(), getSpelling(), getValue(0)));
+ Output.append(Values.begin() + 1, Values.end());
+ break;
+
+ case Option::RenderSeparateStyle:
+ Output.push_back(Args.MakeArgString(getSpelling()));
+ Output.append(Values.begin(), Values.end());
+ break;
+ }
+}
diff --git a/contrib/libs/llvm12/lib/Option/ArgList.cpp b/contrib/libs/llvm12/lib/Option/ArgList.cpp
new file mode 100644
index 00000000000..ad7be5fbec1
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Option/ArgList.cpp
@@ -0,0 +1,274 @@
+//===- ArgList.cpp - Argument List Management -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Option/OptSpecifier.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+using namespace llvm::opt;
+
+void ArgList::append(Arg *A) {
+ Args.push_back(A);
+
+ // Update ranges for the option and all of its groups.
+ for (Option O = A->getOption().getUnaliasedOption(); O.isValid();
+ O = O.getGroup()) {
+ auto &R =
+ OptRanges.insert(std::make_pair(O.getID(), emptyRange())).first->second;
+ R.first = std::min<unsigned>(R.first, Args.size() - 1);
+ R.second = Args.size();
+ }
+}
+
+void ArgList::eraseArg(OptSpecifier Id) {
+ // Zero out the removed entries but keep them around so that we don't
+ // need to invalidate OptRanges.
+ for (Arg *const &A : filtered(Id)) {
+ // Avoid the need for a non-const filtered iterator variant.
+ Arg **ArgsBegin = Args.data();
+ ArgsBegin[&A - ArgsBegin] = nullptr;
+ }
+ OptRanges.erase(Id.getID());
+}
+
+ArgList::OptRange
+ArgList::getRange(std::initializer_list<OptSpecifier> Ids) const {
+ OptRange R = emptyRange();
+ for (auto Id : Ids) {
+ auto I = OptRanges.find(Id.getID());
+ if (I != OptRanges.end()) {
+ R.first = std::min(R.first, I->second.first);
+ R.second = std::max(R.second, I->second.second);
+ }
+ }
+ // Map an empty {-1, 0} range to {0, 0} so it can be used to form iterators.
+ if (R.first == -1u)
+ R.first = 0;
+ return R;
+}
+
+bool ArgList::hasFlag(OptSpecifier Pos, OptSpecifier Neg, bool Default) const {
+ if (Arg *A = getLastArg(Pos, Neg))
+ return A->getOption().matches(Pos);
+ return Default;
+}
+
+bool ArgList::hasFlag(OptSpecifier Pos, OptSpecifier PosAlias, OptSpecifier Neg,
+ bool Default) const {
+ if (Arg *A = getLastArg(Pos, PosAlias, Neg))
+ return A->getOption().matches(Pos) || A->getOption().matches(PosAlias);
+ return Default;
+}
+
+StringRef ArgList::getLastArgValue(OptSpecifier Id, StringRef Default) const {
+ if (Arg *A = getLastArg(Id))
+ return A->getValue();
+ return Default;
+}
+
+std::vector<std::string> ArgList::getAllArgValues(OptSpecifier Id) const {
+ SmallVector<const char *, 16> Values;
+ AddAllArgValues(Values, Id);
+ return std::vector<std::string>(Values.begin(), Values.end());
+}
+
+void ArgList::AddAllArgsExcept(ArgStringList &Output,
+ ArrayRef<OptSpecifier> Ids,
+ ArrayRef<OptSpecifier> ExcludeIds) const {
+ for (const Arg *Arg : *this) {
+ bool Excluded = false;
+ for (OptSpecifier Id : ExcludeIds) {
+ if (Arg->getOption().matches(Id)) {
+ Excluded = true;
+ break;
+ }
+ }
+ if (!Excluded) {
+ for (OptSpecifier Id : Ids) {
+ if (Arg->getOption().matches(Id)) {
+ Arg->claim();
+ Arg->render(*this, Output);
+ break;
+ }
+ }
+ }
+ }
+}
+
+/// This is a nicer interface when you don't have a list of Ids to exclude.
+void ArgList::AddAllArgs(ArgStringList &Output,
+ ArrayRef<OptSpecifier> Ids) const {
+ ArrayRef<OptSpecifier> Exclude = None;
+ AddAllArgsExcept(Output, Ids, Exclude);
+}
+
+/// This 3-opt variant of AddAllArgs could be eliminated in favor of one
+/// that accepts a single specifier, given the above which accepts any number.
+void ArgList::AddAllArgs(ArgStringList &Output, OptSpecifier Id0,
+ OptSpecifier Id1, OptSpecifier Id2) const {
+ for (auto Arg: filtered(Id0, Id1, Id2)) {
+ Arg->claim();
+ Arg->render(*this, Output);
+ }
+}
+
+void ArgList::AddAllArgValues(ArgStringList &Output, OptSpecifier Id0,
+ OptSpecifier Id1, OptSpecifier Id2) const {
+ for (auto Arg : filtered(Id0, Id1, Id2)) {
+ Arg->claim();
+ const auto &Values = Arg->getValues();
+ Output.append(Values.begin(), Values.end());
+ }
+}
+
+void ArgList::AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0,
+ const char *Translation,
+ bool Joined) const {
+ for (auto Arg: filtered(Id0)) {
+ Arg->claim();
+
+ if (Joined) {
+ Output.push_back(MakeArgString(StringRef(Translation) +
+ Arg->getValue(0)));
+ } else {
+ Output.push_back(Translation);
+ Output.push_back(Arg->getValue(0));
+ }
+ }
+}
+
+void ArgList::ClaimAllArgs(OptSpecifier Id0) const {
+ for (auto *Arg : filtered(Id0))
+ Arg->claim();
+}
+
+void ArgList::ClaimAllArgs() const {
+ for (auto *Arg : *this)
+ if (!Arg->isClaimed())
+ Arg->claim();
+}
+
+const char *ArgList::GetOrMakeJoinedArgString(unsigned Index,
+ StringRef LHS,
+ StringRef RHS) const {
+ StringRef Cur = getArgString(Index);
+ if (Cur.size() == LHS.size() + RHS.size() &&
+ Cur.startswith(LHS) && Cur.endswith(RHS))
+ return Cur.data();
+
+ return MakeArgString(LHS + RHS);
+}
+
+void ArgList::print(raw_ostream &O) const {
+ for (Arg *A : *this) {
+ O << "* ";
+ A->print(O);
+ }
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void ArgList::dump() const { print(dbgs()); }
+#endif
+
+void InputArgList::releaseMemory() {
+ // An InputArgList always owns its arguments.
+ for (Arg *A : *this)
+ delete A;
+}
+
+InputArgList::InputArgList(const char* const *ArgBegin,
+ const char* const *ArgEnd)
+ : NumInputArgStrings(ArgEnd - ArgBegin) {
+ ArgStrings.append(ArgBegin, ArgEnd);
+}
+
+unsigned InputArgList::MakeIndex(StringRef String0) const {
+ unsigned Index = ArgStrings.size();
+
+ // Tuck away so we have a reliable const char *.
+ SynthesizedStrings.push_back(std::string(String0));
+ ArgStrings.push_back(SynthesizedStrings.back().c_str());
+
+ return Index;
+}
+
+unsigned InputArgList::MakeIndex(StringRef String0,
+ StringRef String1) const {
+ unsigned Index0 = MakeIndex(String0);
+ unsigned Index1 = MakeIndex(String1);
+ assert(Index0 + 1 == Index1 && "Unexpected non-consecutive indices!");
+ (void) Index1;
+ return Index0;
+}
+
+const char *InputArgList::MakeArgStringRef(StringRef Str) const {
+ return getArgString(MakeIndex(Str));
+}
+
+DerivedArgList::DerivedArgList(const InputArgList &BaseArgs)
+ : BaseArgs(BaseArgs) {}
+
+const char *DerivedArgList::MakeArgStringRef(StringRef Str) const {
+ return BaseArgs.MakeArgString(Str);
+}
+
+void DerivedArgList::AddSynthesizedArg(Arg *A) {
+ SynthesizedArgs.push_back(std::unique_ptr<Arg>(A));
+}
+
+Arg *DerivedArgList::MakeFlagArg(const Arg *BaseArg, const Option Opt) const {
+ SynthesizedArgs.push_back(
+ std::make_unique<Arg>(Opt, MakeArgString(Opt.getPrefix() + Opt.getName()),
+ BaseArgs.MakeIndex(Opt.getName()), BaseArg));
+ return SynthesizedArgs.back().get();
+}
+
+Arg *DerivedArgList::MakePositionalArg(const Arg *BaseArg, const Option Opt,
+ StringRef Value) const {
+ unsigned Index = BaseArgs.MakeIndex(Value);
+ SynthesizedArgs.push_back(
+ std::make_unique<Arg>(Opt, MakeArgString(Opt.getPrefix() + Opt.getName()),
+ Index, BaseArgs.getArgString(Index), BaseArg));
+ return SynthesizedArgs.back().get();
+}
+
+Arg *DerivedArgList::MakeSeparateArg(const Arg *BaseArg, const Option Opt,
+ StringRef Value) const {
+ unsigned Index = BaseArgs.MakeIndex(Opt.getName(), Value);
+ SynthesizedArgs.push_back(
+ std::make_unique<Arg>(Opt, MakeArgString(Opt.getPrefix() + Opt.getName()),
+ Index, BaseArgs.getArgString(Index + 1), BaseArg));
+ return SynthesizedArgs.back().get();
+}
+
+Arg *DerivedArgList::MakeJoinedArg(const Arg *BaseArg, const Option Opt,
+ StringRef Value) const {
+ unsigned Index = BaseArgs.MakeIndex((Opt.getName() + Value).str());
+ SynthesizedArgs.push_back(std::make_unique<Arg>(
+ Opt, MakeArgString(Opt.getPrefix() + Opt.getName()), Index,
+ BaseArgs.getArgString(Index) + Opt.getName().size(), BaseArg));
+ return SynthesizedArgs.back().get();
+}
diff --git a/contrib/libs/llvm12/lib/Option/OptTable.cpp b/contrib/libs/llvm12/lib/Option/OptTable.cpp
new file mode 100644
index 00000000000..c78c2cee1ed
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Option/OptTable.cpp
@@ -0,0 +1,672 @@
+//===- OptTable.cpp - Option Table Implementation -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Option/OptTable.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/OptSpecifier.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Support/CommandLine.h" // for expandResponseFiles
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cctype>
+#include <cstring>
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+using namespace llvm::opt;
+
+namespace llvm {
+namespace opt {
+
+// Ordering on Info. The ordering is *almost* case-insensitive lexicographic,
+// with an exception. '\0' comes at the end of the alphabet instead of the
+// beginning (thus options precede any other options which prefix them).
+static int StrCmpOptionNameIgnoreCase(const char *A, const char *B) {
+ const char *X = A, *Y = B;
+ char a = tolower(*A), b = tolower(*B);
+ while (a == b) {
+ if (a == '\0')
+ return 0;
+
+ a = tolower(*++X);
+ b = tolower(*++Y);
+ }
+
+ if (a == '\0') // A is a prefix of B.
+ return 1;
+ if (b == '\0') // B is a prefix of A.
+ return -1;
+
+ // Otherwise lexicographic.
+ return (a < b) ? -1 : 1;
+}
+
+#ifndef NDEBUG
+static int StrCmpOptionName(const char *A, const char *B) {
+ if (int N = StrCmpOptionNameIgnoreCase(A, B))
+ return N;
+ return strcmp(A, B);
+}
+
+static inline bool operator<(const OptTable::Info &A, const OptTable::Info &B) {
+ if (&A == &B)
+ return false;
+
+ if (int N = StrCmpOptionName(A.Name, B.Name))
+ return N < 0;
+
+ for (const char * const *APre = A.Prefixes,
+ * const *BPre = B.Prefixes;
+ *APre != nullptr && *BPre != nullptr; ++APre, ++BPre){
+ if (int N = StrCmpOptionName(*APre, *BPre))
+ return N < 0;
+ }
+
+ // Names are the same, check that classes are in order; exactly one
+ // should be joined, and it should succeed the other.
+ assert(((A.Kind == Option::JoinedClass) ^ (B.Kind == Option::JoinedClass)) &&
+ "Unexpected classes for options with same name.");
+ return B.Kind == Option::JoinedClass;
+}
+#endif
+
+// Support lower_bound between info and an option name.
+static inline bool operator<(const OptTable::Info &I, const char *Name) {
+ return StrCmpOptionNameIgnoreCase(I.Name, Name) < 0;
+}
+
+} // end namespace opt
+} // end namespace llvm
+
+OptSpecifier::OptSpecifier(const Option *Opt) : ID(Opt->getID()) {}
+
+OptTable::OptTable(ArrayRef<Info> OptionInfos, bool IgnoreCase)
+ : OptionInfos(OptionInfos), IgnoreCase(IgnoreCase) {
+ // Explicitly zero initialize the error to work around a bug in array
+ // value-initialization on MinGW with gcc 4.3.5.
+
+ // Find start of normal options.
+ for (unsigned i = 0, e = getNumOptions(); i != e; ++i) {
+ unsigned Kind = getInfo(i + 1).Kind;
+ if (Kind == Option::InputClass) {
+ assert(!TheInputOptionID && "Cannot have multiple input options!");
+ TheInputOptionID = getInfo(i + 1).ID;
+ } else if (Kind == Option::UnknownClass) {
+ assert(!TheUnknownOptionID && "Cannot have multiple unknown options!");
+ TheUnknownOptionID = getInfo(i + 1).ID;
+ } else if (Kind != Option::GroupClass) {
+ FirstSearchableIndex = i;
+ break;
+ }
+ }
+ assert(FirstSearchableIndex != 0 && "No searchable options?");
+
+#ifndef NDEBUG
+ // Check that everything after the first searchable option is a
+ // regular option class.
+ for (unsigned i = FirstSearchableIndex, e = getNumOptions(); i != e; ++i) {
+ Option::OptionClass Kind = (Option::OptionClass) getInfo(i + 1).Kind;
+ assert((Kind != Option::InputClass && Kind != Option::UnknownClass &&
+ Kind != Option::GroupClass) &&
+ "Special options should be defined first!");
+ }
+
+ // Check that options are in order.
+ for (unsigned i = FirstSearchableIndex + 1, e = getNumOptions(); i != e; ++i){
+ if (!(getInfo(i) < getInfo(i + 1))) {
+ getOption(i).dump();
+ getOption(i + 1).dump();
+ llvm_unreachable("Options are not in order!");
+ }
+ }
+#endif
+
+ // Build prefixes.
+ for (unsigned i = FirstSearchableIndex + 1, e = getNumOptions() + 1;
+ i != e; ++i) {
+ if (const char *const *P = getInfo(i).Prefixes) {
+ for (; *P != nullptr; ++P) {
+ PrefixesUnion.insert(*P);
+ }
+ }
+ }
+
+ // Build prefix chars.
+ for (StringSet<>::const_iterator I = PrefixesUnion.begin(),
+ E = PrefixesUnion.end(); I != E; ++I) {
+ StringRef Prefix = I->getKey();
+ for (StringRef::const_iterator C = Prefix.begin(), CE = Prefix.end();
+ C != CE; ++C)
+ if (!is_contained(PrefixChars, *C))
+ PrefixChars.push_back(*C);
+ }
+}
+
+OptTable::~OptTable() = default;
+
+const Option OptTable::getOption(OptSpecifier Opt) const {
+ unsigned id = Opt.getID();
+ if (id == 0)
+ return Option(nullptr, nullptr);
+ assert((unsigned) (id - 1) < getNumOptions() && "Invalid ID.");
+ return Option(&getInfo(id), this);
+}
+
+static bool isInput(const StringSet<> &Prefixes, StringRef Arg) {
+ if (Arg == "-")
+ return true;
+ for (StringSet<>::const_iterator I = Prefixes.begin(),
+ E = Prefixes.end(); I != E; ++I)
+ if (Arg.startswith(I->getKey()))
+ return false;
+ return true;
+}
+
+/// \returns Matched size. 0 means no match.
+static unsigned matchOption(const OptTable::Info *I, StringRef Str,
+ bool IgnoreCase) {
+ for (const char * const *Pre = I->Prefixes; *Pre != nullptr; ++Pre) {
+ StringRef Prefix(*Pre);
+ if (Str.startswith(Prefix)) {
+ StringRef Rest = Str.substr(Prefix.size());
+ bool Matched = IgnoreCase
+ ? Rest.startswith_lower(I->Name)
+ : Rest.startswith(I->Name);
+ if (Matched)
+ return Prefix.size() + StringRef(I->Name).size();
+ }
+ }
+ return 0;
+}
+
+// Returns true if one of the Prefixes + In.Names matches Option
+static bool optionMatches(const OptTable::Info &In, StringRef Option) {
+ if (In.Prefixes) {
+ StringRef InName(In.Name);
+ for (size_t I = 0; In.Prefixes[I]; I++)
+ if (Option.endswith(InName))
+ if (Option.slice(0, Option.size() - InName.size()) == In.Prefixes[I])
+ return true;
+ }
+ return false;
+}
+
+// This function is for flag value completion.
+// Eg. When "-stdlib=" and "l" was passed to this function, it will return
+// appropiriate values for stdlib, which starts with l.
+std::vector<std::string>
+OptTable::suggestValueCompletions(StringRef Option, StringRef Arg) const {
+ // Search all options and return possible values.
+ for (size_t I = FirstSearchableIndex, E = OptionInfos.size(); I < E; I++) {
+ const Info &In = OptionInfos[I];
+ if (!In.Values || !optionMatches(In, Option))
+ continue;
+
+ SmallVector<StringRef, 8> Candidates;
+ StringRef(In.Values).split(Candidates, ",", -1, false);
+
+ std::vector<std::string> Result;
+ for (StringRef Val : Candidates)
+ if (Val.startswith(Arg) && Arg.compare(Val))
+ Result.push_back(std::string(Val));
+ return Result;
+ }
+ return {};
+}
+
+std::vector<std::string>
+OptTable::findByPrefix(StringRef Cur, unsigned int DisableFlags) const {
+ std::vector<std::string> Ret;
+ for (size_t I = FirstSearchableIndex, E = OptionInfos.size(); I < E; I++) {
+ const Info &In = OptionInfos[I];
+ if (!In.Prefixes || (!In.HelpText && !In.GroupID))
+ continue;
+ if (In.Flags & DisableFlags)
+ continue;
+
+ for (int I = 0; In.Prefixes[I]; I++) {
+ std::string S = std::string(In.Prefixes[I]) + std::string(In.Name) + "\t";
+ if (In.HelpText)
+ S += In.HelpText;
+ if (StringRef(S).startswith(Cur) && S != std::string(Cur) + "\t")
+ Ret.push_back(S);
+ }
+ }
+ return Ret;
+}
+
+unsigned OptTable::findNearest(StringRef Option, std::string &NearestString,
+ unsigned FlagsToInclude, unsigned FlagsToExclude,
+ unsigned MinimumLength) const {
+ assert(!Option.empty());
+
+ // Consider each [option prefix + option name] pair as a candidate, finding
+ // the closest match.
+ unsigned BestDistance = UINT_MAX;
+ for (const Info &CandidateInfo :
+ ArrayRef<Info>(OptionInfos).drop_front(FirstSearchableIndex)) {
+ StringRef CandidateName = CandidateInfo.Name;
+
+ // We can eliminate some option prefix/name pairs as candidates right away:
+ // * Ignore option candidates with empty names, such as "--", or names
+ // that do not meet the minimum length.
+ if (CandidateName.empty() || CandidateName.size() < MinimumLength)
+ continue;
+
+ // * If FlagsToInclude were specified, ignore options that don't include
+ // those flags.
+ if (FlagsToInclude && !(CandidateInfo.Flags & FlagsToInclude))
+ continue;
+ // * Ignore options that contain the FlagsToExclude.
+ if (CandidateInfo.Flags & FlagsToExclude)
+ continue;
+
+ // * Ignore positional argument option candidates (which do not
+ // have prefixes).
+ if (!CandidateInfo.Prefixes)
+ continue;
+
+ // Now check if the candidate ends with a character commonly used when
+ // delimiting an option from its value, such as '=' or ':'. If it does,
+ // attempt to split the given option based on that delimiter.
+ StringRef LHS, RHS;
+ char Last = CandidateName.back();
+ bool CandidateHasDelimiter = Last == '=' || Last == ':';
+ std::string NormalizedName = std::string(Option);
+ if (CandidateHasDelimiter) {
+ std::tie(LHS, RHS) = Option.split(Last);
+ NormalizedName = std::string(LHS);
+ if (Option.find(Last) == LHS.size())
+ NormalizedName += Last;
+ }
+
+ // Consider each possible prefix for each candidate to find the most
+ // appropriate one. For example, if a user asks for "--helm", suggest
+ // "--help" over "-help".
+ for (int P = 0;
+ const char *const CandidatePrefix = CandidateInfo.Prefixes[P]; P++) {
+ std::string Candidate = (CandidatePrefix + CandidateName).str();
+ StringRef CandidateRef = Candidate;
+ unsigned Distance =
+ CandidateRef.edit_distance(NormalizedName, /*AllowReplacements=*/true,
+ /*MaxEditDistance=*/BestDistance);
+ if (RHS.empty() && CandidateHasDelimiter) {
+ // The Candidate ends with a = or : delimiter, but the option passed in
+ // didn't contain the delimiter (or doesn't have anything after it).
+ // In that case, penalize the correction: `-nodefaultlibs` is more
+ // likely to be a spello for `-nodefaultlib` than `-nodefaultlib:` even
+ // though both have an unmodified editing distance of 1, since the
+ // latter would need an argument.
+ ++Distance;
+ }
+ if (Distance < BestDistance) {
+ BestDistance = Distance;
+ NearestString = (Candidate + RHS).str();
+ }
+ }
+ }
+ return BestDistance;
+}
+
+bool OptTable::addValues(const char *Option, const char *Values) {
+ for (size_t I = FirstSearchableIndex, E = OptionInfos.size(); I < E; I++) {
+ Info &In = OptionInfos[I];
+ if (optionMatches(In, Option)) {
+ In.Values = Values;
+ return true;
+ }
+ }
+ return false;
+}
+
+// Parse a single argument, return the new argument, and update Index. If
+// GroupedShortOptions is true, -a matches "-abc" and the argument in Args will
+// be updated to "-bc". This overload does not support
+// FlagsToInclude/FlagsToExclude or case insensitive options.
+Arg *OptTable::parseOneArgGrouped(InputArgList &Args, unsigned &Index) const {
+ // Anything that doesn't start with PrefixesUnion is an input, as is '-'
+ // itself.
+ const char *CStr = Args.getArgString(Index);
+ StringRef Str(CStr);
+ if (isInput(PrefixesUnion, Str))
+ return new Arg(getOption(TheInputOptionID), Str, Index++, CStr);
+
+ const Info *End = OptionInfos.data() + OptionInfos.size();
+ StringRef Name = Str.ltrim(PrefixChars);
+ const Info *Start = std::lower_bound(
+ OptionInfos.data() + FirstSearchableIndex, End, Name.data());
+ const Info *Fallback = nullptr;
+ unsigned Prev = Index;
+
+ // Search for the option which matches Str.
+ for (; Start != End; ++Start) {
+ unsigned ArgSize = matchOption(Start, Str, IgnoreCase);
+ if (!ArgSize)
+ continue;
+
+ Option Opt(Start, this);
+ if (Arg *A = Opt.accept(Args, StringRef(Args.getArgString(Index), ArgSize),
+ false, Index))
+ return A;
+
+ // If Opt is a Flag of length 2 (e.g. "-a"), we know it is a prefix of
+ // the current argument (e.g. "-abc"). Match it as a fallback if no longer
+ // option (e.g. "-ab") exists.
+ if (ArgSize == 2 && Opt.getKind() == Option::FlagClass)
+ Fallback = Start;
+
+ // Otherwise, see if the argument is missing.
+ if (Prev != Index)
+ return nullptr;
+ }
+ if (Fallback) {
+ Option Opt(Fallback, this);
+ if (Arg *A = Opt.accept(Args, Str.substr(0, 2), true, Index)) {
+ if (Str.size() == 2)
+ ++Index;
+ else
+ Args.replaceArgString(Index, Twine('-') + Str.substr(2));
+ return A;
+ }
+ }
+
+ return new Arg(getOption(TheUnknownOptionID), Str, Index++, CStr);
+}
+
+Arg *OptTable::ParseOneArg(const ArgList &Args, unsigned &Index,
+ unsigned FlagsToInclude,
+ unsigned FlagsToExclude) const {
+ unsigned Prev = Index;
+ const char *Str = Args.getArgString(Index);
+
+ // Anything that doesn't start with PrefixesUnion is an input, as is '-'
+ // itself.
+ if (isInput(PrefixesUnion, Str))
+ return new Arg(getOption(TheInputOptionID), Str, Index++, Str);
+
+ const Info *Start = OptionInfos.data() + FirstSearchableIndex;
+ const Info *End = OptionInfos.data() + OptionInfos.size();
+ StringRef Name = StringRef(Str).ltrim(PrefixChars);
+
+ // Search for the first next option which could be a prefix.
+ Start = std::lower_bound(Start, End, Name.data());
+
+ // Options are stored in sorted order, with '\0' at the end of the
+ // alphabet. Since the only options which can accept a string must
+ // prefix it, we iteratively search for the next option which could
+ // be a prefix.
+ //
+ // FIXME: This is searching much more than necessary, but I am
+ // blanking on the simplest way to make it fast. We can solve this
+ // problem when we move to TableGen.
+ for (; Start != End; ++Start) {
+ unsigned ArgSize = 0;
+ // Scan for first option which is a proper prefix.
+ for (; Start != End; ++Start)
+ if ((ArgSize = matchOption(Start, Str, IgnoreCase)))
+ break;
+ if (Start == End)
+ break;
+
+ Option Opt(Start, this);
+
+ if (FlagsToInclude && !Opt.hasFlag(FlagsToInclude))
+ continue;
+ if (Opt.hasFlag(FlagsToExclude))
+ continue;
+
+ // See if this option matches.
+ if (Arg *A = Opt.accept(Args, StringRef(Args.getArgString(Index), ArgSize),
+ false, Index))
+ return A;
+
+ // Otherwise, see if this argument was missing values.
+ if (Prev != Index)
+ return nullptr;
+ }
+
+ // If we failed to find an option and this arg started with /, then it's
+ // probably an input path.
+ if (Str[0] == '/')
+ return new Arg(getOption(TheInputOptionID), Str, Index++, Str);
+
+ return new Arg(getOption(TheUnknownOptionID), Str, Index++, Str);
+}
+
+InputArgList OptTable::ParseArgs(ArrayRef<const char *> ArgArr,
+ unsigned &MissingArgIndex,
+ unsigned &MissingArgCount,
+ unsigned FlagsToInclude,
+ unsigned FlagsToExclude) const {
+ InputArgList Args(ArgArr.begin(), ArgArr.end());
+
+ // FIXME: Handle '@' args (or at least error on them).
+
+ MissingArgIndex = MissingArgCount = 0;
+ unsigned Index = 0, End = ArgArr.size();
+ while (Index < End) {
+ // Ingore nullptrs, they are response file's EOL markers
+ if (Args.getArgString(Index) == nullptr) {
+ ++Index;
+ continue;
+ }
+ // Ignore empty arguments (other things may still take them as arguments).
+ StringRef Str = Args.getArgString(Index);
+ if (Str == "") {
+ ++Index;
+ continue;
+ }
+
+ unsigned Prev = Index;
+ Arg *A = GroupedShortOptions
+ ? parseOneArgGrouped(Args, Index)
+ : ParseOneArg(Args, Index, FlagsToInclude, FlagsToExclude);
+ assert((Index > Prev || GroupedShortOptions) &&
+ "Parser failed to consume argument.");
+
+ // Check for missing argument error.
+ if (!A) {
+ assert(Index >= End && "Unexpected parser error.");
+ assert(Index - Prev - 1 && "No missing arguments!");
+ MissingArgIndex = Prev;
+ MissingArgCount = Index - Prev - 1;
+ break;
+ }
+
+ Args.append(A);
+ }
+
+ return Args;
+}
+
+InputArgList OptTable::parseArgs(int Argc, char *const *Argv,
+ OptSpecifier Unknown, StringSaver &Saver,
+ function_ref<void(StringRef)> ErrorFn) const {
+ SmallVector<const char *, 0> NewArgv;
+ // The environment variable specifies initial options which can be overridden
+ // by commnad line options.
+ cl::expandResponseFiles(Argc, Argv, EnvVar, Saver, NewArgv);
+
+ unsigned MAI, MAC;
+ opt::InputArgList Args = ParseArgs(makeArrayRef(NewArgv), MAI, MAC);
+ if (MAC)
+ ErrorFn((Twine(Args.getArgString(MAI)) + ": missing argument").str());
+
+ // For each unknwon option, call ErrorFn with a formatted error message. The
+ // message includes a suggested alternative option spelling if available.
+ std::string Nearest;
+ for (const opt::Arg *A : Args.filtered(Unknown)) {
+ std::string Spelling = A->getAsString(Args);
+ if (findNearest(Spelling, Nearest) > 1)
+ ErrorFn("unknown argument '" + A->getAsString(Args) + "'");
+ else
+ ErrorFn("unknown argument '" + A->getAsString(Args) +
+ "', did you mean '" + Nearest + "'?");
+ }
+ return Args;
+}
+
+static std::string getOptionHelpName(const OptTable &Opts, OptSpecifier Id) {
+ const Option O = Opts.getOption(Id);
+ std::string Name = O.getPrefixedName();
+
+ // Add metavar, if used.
+ switch (O.getKind()) {
+ case Option::GroupClass: case Option::InputClass: case Option::UnknownClass:
+ llvm_unreachable("Invalid option with help text.");
+
+ case Option::MultiArgClass:
+ if (const char *MetaVarName = Opts.getOptionMetaVar(Id)) {
+ // For MultiArgs, metavar is full list of all argument names.
+ Name += ' ';
+ Name += MetaVarName;
+ }
+ else {
+ // For MultiArgs<N>, if metavar not supplied, print <value> N times.
+ for (unsigned i=0, e=O.getNumArgs(); i< e; ++i) {
+ Name += " <value>";
+ }
+ }
+ break;
+
+ case Option::FlagClass:
+ break;
+
+ case Option::ValuesClass:
+ break;
+
+ case Option::SeparateClass: case Option::JoinedOrSeparateClass:
+ case Option::RemainingArgsClass: case Option::RemainingArgsJoinedClass:
+ Name += ' ';
+ LLVM_FALLTHROUGH;
+ case Option::JoinedClass: case Option::CommaJoinedClass:
+ case Option::JoinedAndSeparateClass:
+ if (const char *MetaVarName = Opts.getOptionMetaVar(Id))
+ Name += MetaVarName;
+ else
+ Name += "<value>";
+ break;
+ }
+
+ return Name;
+}
+
+namespace {
+struct OptionInfo {
+ std::string Name;
+ StringRef HelpText;
+};
+} // namespace
+
+static void PrintHelpOptionList(raw_ostream &OS, StringRef Title,
+ std::vector<OptionInfo> &OptionHelp) {
+ OS << Title << ":\n";
+
+ // Find the maximum option length.
+ unsigned OptionFieldWidth = 0;
+ for (unsigned i = 0, e = OptionHelp.size(); i != e; ++i) {
+ // Limit the amount of padding we are willing to give up for alignment.
+ unsigned Length = OptionHelp[i].Name.size();
+ if (Length <= 23)
+ OptionFieldWidth = std::max(OptionFieldWidth, Length);
+ }
+
+ const unsigned InitialPad = 2;
+ for (unsigned i = 0, e = OptionHelp.size(); i != e; ++i) {
+ const std::string &Option = OptionHelp[i].Name;
+ int Pad = OptionFieldWidth - int(Option.size());
+ OS.indent(InitialPad) << Option;
+
+ // Break on long option names.
+ if (Pad < 0) {
+ OS << "\n";
+ Pad = OptionFieldWidth + InitialPad;
+ }
+ OS.indent(Pad + 1) << OptionHelp[i].HelpText << '\n';
+ }
+}
+
+static const char *getOptionHelpGroup(const OptTable &Opts, OptSpecifier Id) {
+ unsigned GroupID = Opts.getOptionGroupID(Id);
+
+ // If not in a group, return the default help group.
+ if (!GroupID)
+ return "OPTIONS";
+
+ // Abuse the help text of the option groups to store the "help group"
+ // name.
+ //
+ // FIXME: Split out option groups.
+ if (const char *GroupHelp = Opts.getOptionHelpText(GroupID))
+ return GroupHelp;
+
+ // Otherwise keep looking.
+ return getOptionHelpGroup(Opts, GroupID);
+}
+
+void OptTable::PrintHelp(raw_ostream &OS, const char *Usage, const char *Title,
+ bool ShowHidden, bool ShowAllAliases) const {
+ PrintHelp(OS, Usage, Title, /*Include*/ 0, /*Exclude*/
+ (ShowHidden ? 0 : HelpHidden), ShowAllAliases);
+}
+
+void OptTable::PrintHelp(raw_ostream &OS, const char *Usage, const char *Title,
+ unsigned FlagsToInclude, unsigned FlagsToExclude,
+ bool ShowAllAliases) const {
+ OS << "OVERVIEW: " << Title << "\n\n";
+ OS << "USAGE: " << Usage << "\n\n";
+
+ // Render help text into a map of group-name to a list of (option, help)
+ // pairs.
+ std::map<std::string, std::vector<OptionInfo>> GroupedOptionHelp;
+
+ for (unsigned Id = 1, e = getNumOptions() + 1; Id != e; ++Id) {
+ // FIXME: Split out option groups.
+ if (getOptionKind(Id) == Option::GroupClass)
+ continue;
+
+ unsigned Flags = getInfo(Id).Flags;
+ if (FlagsToInclude && !(Flags & FlagsToInclude))
+ continue;
+ if (Flags & FlagsToExclude)
+ continue;
+
+ // If an alias doesn't have a help text, show a help text for the aliased
+ // option instead.
+ const char *HelpText = getOptionHelpText(Id);
+ if (!HelpText && ShowAllAliases) {
+ const Option Alias = getOption(Id).getAlias();
+ if (Alias.isValid())
+ HelpText = getOptionHelpText(Alias.getID());
+ }
+
+ if (HelpText) {
+ const char *HelpGroup = getOptionHelpGroup(*this, Id);
+ const std::string &OptName = getOptionHelpName(*this, Id);
+ GroupedOptionHelp[HelpGroup].push_back({OptName, HelpText});
+ }
+ }
+
+ for (auto& OptionGroup : GroupedOptionHelp) {
+ if (OptionGroup.first != GroupedOptionHelp.begin()->first)
+ OS << "\n";
+ PrintHelpOptionList(OS, OptionGroup.first, OptionGroup.second);
+ }
+
+ OS.flush();
+}
diff --git a/contrib/libs/llvm12/lib/Option/Option.cpp b/contrib/libs/llvm12/lib/Option/Option.cpp
new file mode 100644
index 00000000000..68d074b2702
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Option/Option.cpp
@@ -0,0 +1,291 @@
+//===- Option.cpp - Abstract Driver Options -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Option/Arg.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include "llvm/Option/OptTable.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstring>
+
+using namespace llvm;
+using namespace llvm::opt;
+
+Option::Option(const OptTable::Info *info, const OptTable *owner)
+ : Info(info), Owner(owner) {
+ // Multi-level aliases are not supported. This just simplifies option
+ // tracking, it is not an inherent limitation.
+ assert((!Info || !getAlias().isValid() || !getAlias().getAlias().isValid()) &&
+ "Multi-level aliases are not supported.");
+
+ if (Info && getAliasArgs()) {
+ assert(getAlias().isValid() && "Only alias options can have alias args.");
+ assert(getKind() == FlagClass && "Only Flag aliases can have alias args.");
+ assert(getAlias().getKind() != FlagClass &&
+ "Cannot provide alias args to a flag option.");
+ }
+}
+
+void Option::print(raw_ostream &O) const {
+ O << "<";
+ switch (getKind()) {
+#define P(N) case N: O << #N; break
+ P(GroupClass);
+ P(InputClass);
+ P(UnknownClass);
+ P(FlagClass);
+ P(JoinedClass);
+ P(ValuesClass);
+ P(SeparateClass);
+ P(CommaJoinedClass);
+ P(MultiArgClass);
+ P(JoinedOrSeparateClass);
+ P(JoinedAndSeparateClass);
+ P(RemainingArgsClass);
+ P(RemainingArgsJoinedClass);
+#undef P
+ }
+
+ if (Info->Prefixes) {
+ O << " Prefixes:[";
+ for (const char *const *Pre = Info->Prefixes; *Pre != nullptr; ++Pre) {
+ O << '"' << *Pre << (*(Pre + 1) == nullptr ? "\"" : "\", ");
+ }
+ O << ']';
+ }
+
+ O << " Name:\"" << getName() << '"';
+
+ const Option Group = getGroup();
+ if (Group.isValid()) {
+ O << " Group:";
+ Group.print(O);
+ }
+
+ const Option Alias = getAlias();
+ if (Alias.isValid()) {
+ O << " Alias:";
+ Alias.print(O);
+ }
+
+ if (getKind() == MultiArgClass)
+ O << " NumArgs:" << getNumArgs();
+
+ O << ">\n";
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void Option::dump() const { print(dbgs()); }
+#endif
+
+bool Option::matches(OptSpecifier Opt) const {
+ // Aliases are never considered in matching, look through them.
+ const Option Alias = getAlias();
+ if (Alias.isValid())
+ return Alias.matches(Opt);
+
+ // Check exact match.
+ if (getID() == Opt.getID())
+ return true;
+
+ const Option Group = getGroup();
+ if (Group.isValid())
+ return Group.matches(Opt);
+ return false;
+}
+
+Arg *Option::acceptInternal(const ArgList &Args, StringRef Spelling,
+ unsigned &Index) const {
+ size_t ArgSize = Spelling.size();
+ switch (getKind()) {
+ case FlagClass: {
+ if (ArgSize != strlen(Args.getArgString(Index)))
+ return nullptr;
+ return new Arg(*this, Spelling, Index++);
+ }
+ case JoinedClass: {
+ const char *Value = Args.getArgString(Index) + ArgSize;
+ return new Arg(*this, Spelling, Index++, Value);
+ }
+ case CommaJoinedClass: {
+ // Always matches.
+ const char *Str = Args.getArgString(Index) + ArgSize;
+ Arg *A = new Arg(*this, Spelling, Index++);
+
+ // Parse out the comma separated values.
+ const char *Prev = Str;
+ for (;; ++Str) {
+ char c = *Str;
+
+ if (!c || c == ',') {
+ if (Prev != Str) {
+ char *Value = new char[Str - Prev + 1];
+ memcpy(Value, Prev, Str - Prev);
+ Value[Str - Prev] = '\0';
+ A->getValues().push_back(Value);
+ }
+
+ if (!c)
+ break;
+
+ Prev = Str + 1;
+ }
+ }
+ A->setOwnsValues(true);
+
+ return A;
+ }
+ case SeparateClass:
+ // Matches iff this is an exact match.
+ // FIXME: Avoid strlen.
+ if (ArgSize != strlen(Args.getArgString(Index)))
+ return nullptr;
+
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings() ||
+ Args.getArgString(Index - 1) == nullptr)
+ return nullptr;
+
+ return new Arg(*this, Spelling, Index - 2, Args.getArgString(Index - 1));
+ case MultiArgClass: {
+ // Matches iff this is an exact match.
+ // FIXME: Avoid strlen.
+ if (ArgSize != strlen(Args.getArgString(Index)))
+ return nullptr;
+
+ Index += 1 + getNumArgs();
+ if (Index > Args.getNumInputArgStrings())
+ return nullptr;
+
+ Arg *A = new Arg(*this, Spelling, Index - 1 - getNumArgs(),
+ Args.getArgString(Index - getNumArgs()));
+ for (unsigned i = 1; i != getNumArgs(); ++i)
+ A->getValues().push_back(Args.getArgString(Index - getNumArgs() + i));
+ return A;
+ }
+ case JoinedOrSeparateClass: {
+ // If this is not an exact match, it is a joined arg.
+ // FIXME: Avoid strlen.
+ if (ArgSize != strlen(Args.getArgString(Index))) {
+ const char *Value = Args.getArgString(Index) + ArgSize;
+ return new Arg(*this, Spelling, Index++, Value);
+ }
+
+ // Otherwise it must be separate.
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings() ||
+ Args.getArgString(Index - 1) == nullptr)
+ return nullptr;
+
+ return new Arg(*this, Spelling, Index - 2, Args.getArgString(Index - 1));
+ }
+ case JoinedAndSeparateClass:
+ // Always matches.
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings() ||
+ Args.getArgString(Index - 1) == nullptr)
+ return nullptr;
+
+ return new Arg(*this, Spelling, Index - 2,
+ Args.getArgString(Index - 2) + ArgSize,
+ Args.getArgString(Index - 1));
+ case RemainingArgsClass: {
+ // Matches iff this is an exact match.
+ // FIXME: Avoid strlen.
+ if (ArgSize != strlen(Args.getArgString(Index)))
+ return nullptr;
+ Arg *A = new Arg(*this, Spelling, Index++);
+ while (Index < Args.getNumInputArgStrings() &&
+ Args.getArgString(Index) != nullptr)
+ A->getValues().push_back(Args.getArgString(Index++));
+ return A;
+ }
+ case RemainingArgsJoinedClass: {
+ Arg *A = new Arg(*this, Spelling, Index);
+ if (ArgSize != strlen(Args.getArgString(Index))) {
+ // An inexact match means there is a joined arg.
+ A->getValues().push_back(Args.getArgString(Index) + ArgSize);
+ }
+ Index++;
+ while (Index < Args.getNumInputArgStrings() &&
+ Args.getArgString(Index) != nullptr)
+ A->getValues().push_back(Args.getArgString(Index++));
+ return A;
+ }
+
+ default:
+ llvm_unreachable("Invalid option kind!");
+ }
+}
+
+Arg *Option::accept(const ArgList &Args, StringRef CurArg,
+ bool GroupedShortOption, unsigned &Index) const {
+ std::unique_ptr<Arg> A(GroupedShortOption && getKind() == FlagClass
+ ? new Arg(*this, CurArg, Index)
+ : acceptInternal(Args, CurArg, Index));
+ if (!A)
+ return nullptr;
+
+ const Option &UnaliasedOption = getUnaliasedOption();
+ if (getID() == UnaliasedOption.getID())
+ return A.release();
+
+ // "A" is an alias for a different flag. For most clients it's more convenient
+ // if this function returns unaliased Args, so create an unaliased arg for
+ // returning.
+
+ // This creates a completely new Arg object for the unaliased Arg because
+ // the alias and the unaliased arg can have different Kinds and different
+ // Values (due to AliasArgs<>).
+
+ // Get the spelling from the unaliased option.
+ StringRef UnaliasedSpelling = Args.MakeArgString(
+ Twine(UnaliasedOption.getPrefix()) + Twine(UnaliasedOption.getName()));
+
+ // It's a bit weird that aliased and unaliased arg share one index, but
+ // the index is mostly use as a memory optimization in render().
+ // Due to this, ArgList::getArgString(A->getIndex()) will return the spelling
+ // of the aliased arg always, while A->getSpelling() returns either the
+ // unaliased or the aliased arg, depending on which Arg object it's called on.
+ Arg *UnaliasedA = new Arg(UnaliasedOption, UnaliasedSpelling, A->getIndex());
+ Arg *RawA = A.get();
+ UnaliasedA->setAlias(std::move(A));
+
+ if (getKind() != FlagClass) {
+ // Values are usually owned by the ArgList. The exception are
+ // CommaJoined flags, where the Arg owns the values. For aliased flags,
+ // make the unaliased Arg the owner of the values.
+ // FIXME: There aren't many uses of CommaJoined -- try removing
+ // CommaJoined in favor of just calling StringRef::split(',') instead.
+ UnaliasedA->getValues() = RawA->getValues();
+ UnaliasedA->setOwnsValues(RawA->getOwnsValues());
+ RawA->setOwnsValues(false);
+ return UnaliasedA;
+ }
+
+ // FlagClass aliases can have AliasArgs<>; add those to the unaliased arg.
+ if (const char *Val = getAliasArgs()) {
+ while (*Val != '\0') {
+ UnaliasedA->getValues().push_back(Val);
+
+ // Move past the '\0' to the next argument.
+ Val += strlen(Val) + 1;
+ }
+ }
+ if (UnaliasedOption.getKind() == JoinedClass && !getAliasArgs())
+ // A Flag alias for a Joined option must provide an argument.
+ UnaliasedA->getValues().push_back("");
+ return UnaliasedA;
+}
diff --git a/contrib/libs/llvm12/lib/Passes/PassBuilder.cpp b/contrib/libs/llvm12/lib/Passes/PassBuilder.cpp
new file mode 100644
index 00000000000..6c1a7c75d30
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Passes/PassBuilder.cpp
@@ -0,0 +1,3062 @@
+//===- Parsing, selection, and construction of pass pipelines -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file provides the implementation of the PassBuilder based on our
+/// static pass registry as well as related functionality. It also provides
+/// helpers to aid in analyzing, debugging, and testing passes and pass
+/// pipelines.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Passes/PassBuilder.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Analysis/AliasAnalysisEvaluator.h"
+#include "llvm/Analysis/AliasSetTracker.h"
+#include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/BasicAliasAnalysis.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/CFGPrinter.h"
+#include "llvm/Analysis/CFLAndersAliasAnalysis.h"
+#include "llvm/Analysis/CFLSteensAliasAnalysis.h"
+#include "llvm/Analysis/CGSCCPassManager.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/DDG.h"
+#include "llvm/Analysis/DDGPrinter.h"
+#include "llvm/Analysis/Delinearization.h"
+#include "llvm/Analysis/DemandedBits.h"
+#include "llvm/Analysis/DependenceAnalysis.h"
+#include "llvm/Analysis/DominanceFrontier.h"
+#include "llvm/Analysis/FunctionPropertiesAnalysis.h"
+#include "llvm/Analysis/GlobalsModRef.h"
+#include "llvm/Analysis/IRSimilarityIdentifier.h"
+#include "llvm/Analysis/IVUsers.h"
+#include "llvm/Analysis/InlineAdvisor.h"
+#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
+#include "llvm/Analysis/InstCount.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/Analysis/LazyValueInfo.h"
+#include "llvm/Analysis/Lint.h"
+#include "llvm/Analysis/LoopAccessAnalysis.h"
+#include "llvm/Analysis/LoopCacheAnalysis.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/Analysis/LoopNestAnalysis.h"
+#include "llvm/Analysis/MemDerefPrinter.h"
+#include "llvm/Analysis/MemoryDependenceAnalysis.h"
+#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/Analysis/ModuleDebugInfoPrinter.h"
+#include "llvm/Analysis/ModuleSummaryAnalysis.h"
+#include "llvm/Analysis/MustExecute.h"
+#include "llvm/Analysis/ObjCARCAliasAnalysis.h"
+#include "llvm/Analysis/OptimizationRemarkEmitter.h"
+#include "llvm/Analysis/PhiValues.h"
+#include "llvm/Analysis/PostDominators.h"
+#include "llvm/Analysis/ProfileSummaryInfo.h"
+#include "llvm/Analysis/RegionInfo.h"
+#include "llvm/Analysis/ScalarEvolution.h"
+#include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
+#include "llvm/Analysis/ScopedNoAliasAA.h"
+#include "llvm/Analysis/StackLifetime.h"
+#include "llvm/Analysis/StackSafetyAnalysis.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/TypeBasedAliasAnalysis.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRPrintingPasses.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/IR/PrintPasses.h"
+#include "llvm/IR/SafepointIRVerifier.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
+#include "llvm/Transforms/Coroutines/CoroCleanup.h"
+#include "llvm/Transforms/Coroutines/CoroEarly.h"
+#include "llvm/Transforms/Coroutines/CoroElide.h"
+#include "llvm/Transforms/Coroutines/CoroSplit.h"
+#include "llvm/Transforms/HelloNew/HelloWorld.h"
+#include "llvm/Transforms/IPO/AlwaysInliner.h"
+#include "llvm/Transforms/IPO/Annotation2Metadata.h"
+#include "llvm/Transforms/IPO/ArgumentPromotion.h"
+#include "llvm/Transforms/IPO/Attributor.h"
+#include "llvm/Transforms/IPO/BlockExtractor.h"
+#include "llvm/Transforms/IPO/CalledValuePropagation.h"
+#include "llvm/Transforms/IPO/ConstantMerge.h"
+#include "llvm/Transforms/IPO/CrossDSOCFI.h"
+#include "llvm/Transforms/IPO/DeadArgumentElimination.h"
+#include "llvm/Transforms/IPO/ElimAvailExtern.h"
+#include "llvm/Transforms/IPO/ForceFunctionAttrs.h"
+#include "llvm/Transforms/IPO/FunctionAttrs.h"
+#include "llvm/Transforms/IPO/FunctionImport.h"
+#include "llvm/Transforms/IPO/GlobalDCE.h"
+#include "llvm/Transforms/IPO/GlobalOpt.h"
+#include "llvm/Transforms/IPO/GlobalSplit.h"
+#include "llvm/Transforms/IPO/HotColdSplitting.h"
+#include "llvm/Transforms/IPO/IROutliner.h"
+#include "llvm/Transforms/IPO/InferFunctionAttrs.h"
+#include "llvm/Transforms/IPO/Inliner.h"
+#include "llvm/Transforms/IPO/Internalize.h"
+#include "llvm/Transforms/IPO/LoopExtractor.h"
+#include "llvm/Transforms/IPO/LowerTypeTests.h"
+#include "llvm/Transforms/IPO/MergeFunctions.h"
+#include "llvm/Transforms/IPO/OpenMPOpt.h"
+#include "llvm/Transforms/IPO/PartialInlining.h"
+#include "llvm/Transforms/IPO/SCCP.h"
+#include "llvm/Transforms/IPO/SampleProfile.h"
+#include "llvm/Transforms/IPO/SampleProfileProbe.h"
+#include "llvm/Transforms/IPO/StripDeadPrototypes.h"
+#include "llvm/Transforms/IPO/StripSymbols.h"
+#include "llvm/Transforms/IPO/SyntheticCountsPropagation.h"
+#include "llvm/Transforms/IPO/WholeProgramDevirt.h"
+#include "llvm/Transforms/InstCombine/InstCombine.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/Instrumentation/AddressSanitizer.h"
+#include "llvm/Transforms/Instrumentation/BoundsChecking.h"
+#include "llvm/Transforms/Instrumentation/CGProfile.h"
+#include "llvm/Transforms/Instrumentation/ControlHeightReduction.h"
+#include "llvm/Transforms/Instrumentation/DataFlowSanitizer.h"
+#include "llvm/Transforms/Instrumentation/GCOVProfiler.h"
+#include "llvm/Transforms/Instrumentation/HWAddressSanitizer.h"
+#include "llvm/Transforms/Instrumentation/InstrOrderFile.h"
+#include "llvm/Transforms/Instrumentation/InstrProfiling.h"
+#include "llvm/Transforms/Instrumentation/MemProfiler.h"
+#include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
+#include "llvm/Transforms/Instrumentation/PGOInstrumentation.h"
+#include "llvm/Transforms/Instrumentation/PoisonChecking.h"
+#include "llvm/Transforms/Instrumentation/SanitizerCoverage.h"
+#include "llvm/Transforms/Instrumentation/ThreadSanitizer.h"
+#include "llvm/Transforms/ObjCARC.h"
+#include "llvm/Transforms/Scalar/ADCE.h"
+#include "llvm/Transforms/Scalar/AlignmentFromAssumptions.h"
+#include "llvm/Transforms/Scalar/AnnotationRemarks.h"
+#include "llvm/Transforms/Scalar/BDCE.h"
+#include "llvm/Transforms/Scalar/CallSiteSplitting.h"
+#include "llvm/Transforms/Scalar/ConstantHoisting.h"
+#include "llvm/Transforms/Scalar/ConstraintElimination.h"
+#include "llvm/Transforms/Scalar/CorrelatedValuePropagation.h"
+#include "llvm/Transforms/Scalar/DCE.h"
+#include "llvm/Transforms/Scalar/DeadStoreElimination.h"
+#include "llvm/Transforms/Scalar/DivRemPairs.h"
+#include "llvm/Transforms/Scalar/EarlyCSE.h"
+#include "llvm/Transforms/Scalar/Float2Int.h"
+#include "llvm/Transforms/Scalar/GVN.h"
+#include "llvm/Transforms/Scalar/GuardWidening.h"
+#include "llvm/Transforms/Scalar/IVUsersPrinter.h"
+#include "llvm/Transforms/Scalar/IndVarSimplify.h"
+#include "llvm/Transforms/Scalar/InductiveRangeCheckElimination.h"
+#include "llvm/Transforms/Scalar/InferAddressSpaces.h"
+#include "llvm/Transforms/Scalar/InstSimplifyPass.h"
+#include "llvm/Transforms/Scalar/JumpThreading.h"
+#include "llvm/Transforms/Scalar/LICM.h"
+#include "llvm/Transforms/Scalar/LoopAccessAnalysisPrinter.h"
+#include "llvm/Transforms/Scalar/LoopDataPrefetch.h"
+#include "llvm/Transforms/Scalar/LoopDeletion.h"
+#include "llvm/Transforms/Scalar/LoopDistribute.h"
+#include "llvm/Transforms/Scalar/LoopFlatten.h"
+#include "llvm/Transforms/Scalar/LoopFuse.h"
+#include "llvm/Transforms/Scalar/LoopIdiomRecognize.h"
+#include "llvm/Transforms/Scalar/LoopInstSimplify.h"
+#include "llvm/Transforms/Scalar/LoopInterchange.h"
+#include "llvm/Transforms/Scalar/LoopLoadElimination.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+#include "llvm/Transforms/Scalar/LoopPredication.h"
+#include "llvm/Transforms/Scalar/LoopReroll.h"
+#include "llvm/Transforms/Scalar/LoopRotation.h"
+#include "llvm/Transforms/Scalar/LoopSimplifyCFG.h"
+#include "llvm/Transforms/Scalar/LoopSink.h"
+#include "llvm/Transforms/Scalar/LoopStrengthReduce.h"
+#include "llvm/Transforms/Scalar/LoopUnrollAndJamPass.h"
+#include "llvm/Transforms/Scalar/LoopUnrollPass.h"
+#include "llvm/Transforms/Scalar/LoopVersioningLICM.h"
+#include "llvm/Transforms/Scalar/LowerAtomic.h"
+#include "llvm/Transforms/Scalar/LowerConstantIntrinsics.h"
+#include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
+#include "llvm/Transforms/Scalar/LowerGuardIntrinsic.h"
+#include "llvm/Transforms/Scalar/LowerMatrixIntrinsics.h"
+#include "llvm/Transforms/Scalar/LowerWidenableCondition.h"
+#include "llvm/Transforms/Scalar/MakeGuardsExplicit.h"
+#include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
+#include "llvm/Transforms/Scalar/MergeICmps.h"
+#include "llvm/Transforms/Scalar/MergedLoadStoreMotion.h"
+#include "llvm/Transforms/Scalar/NaryReassociate.h"
+#include "llvm/Transforms/Scalar/NewGVN.h"
+#include "llvm/Transforms/Scalar/PartiallyInlineLibCalls.h"
+#include "llvm/Transforms/Scalar/Reassociate.h"
+#include "llvm/Transforms/Scalar/Reg2Mem.h"
+#include "llvm/Transforms/Scalar/RewriteStatepointsForGC.h"
+#include "llvm/Transforms/Scalar/SCCP.h"
+#include "llvm/Transforms/Scalar/SROA.h"
+#include "llvm/Transforms/Scalar/ScalarizeMaskedMemIntrin.h"
+#include "llvm/Transforms/Scalar/Scalarizer.h"
+#include "llvm/Transforms/Scalar/SeparateConstOffsetFromGEP.h"
+#include "llvm/Transforms/Scalar/SimpleLoopUnswitch.h"
+#include "llvm/Transforms/Scalar/SimplifyCFG.h"
+#include "llvm/Transforms/Scalar/Sink.h"
+#include "llvm/Transforms/Scalar/SpeculateAroundPHIs.h"
+#include "llvm/Transforms/Scalar/SpeculativeExecution.h"
+#include "llvm/Transforms/Scalar/StraightLineStrengthReduce.h"
+#include "llvm/Transforms/Scalar/StructurizeCFG.h"
+#include "llvm/Transforms/Scalar/TailRecursionElimination.h"
+#include "llvm/Transforms/Scalar/WarnMissedTransforms.h"
+#include "llvm/Transforms/Utils/AddDiscriminators.h"
+#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
+#include "llvm/Transforms/Utils/BreakCriticalEdges.h"
+#include "llvm/Transforms/Utils/CanonicalizeAliases.h"
+#include "llvm/Transforms/Utils/CanonicalizeFreezeInLoops.h"
+#include "llvm/Transforms/Utils/EntryExitInstrumenter.h"
+#include "llvm/Transforms/Utils/FixIrreducible.h"
+#include "llvm/Transforms/Utils/InjectTLIMappings.h"
+#include "llvm/Transforms/Utils/InstructionNamer.h"
+#include "llvm/Transforms/Utils/LCSSA.h"
+#include "llvm/Transforms/Utils/LibCallsShrinkWrap.h"
+#include "llvm/Transforms/Utils/LoopSimplify.h"
+#include "llvm/Transforms/Utils/LoopVersioning.h"
+#include "llvm/Transforms/Utils/LowerInvoke.h"
+#include "llvm/Transforms/Utils/LowerSwitch.h"
+#include "llvm/Transforms/Utils/Mem2Reg.h"
+#include "llvm/Transforms/Utils/MetaRenamer.h"
+#include "llvm/Transforms/Utils/NameAnonGlobals.h"
+#include "llvm/Transforms/Utils/StripGCRelocates.h"
+#include "llvm/Transforms/Utils/StripNonLineTableDebugInfo.h"
+#include "llvm/Transforms/Utils/SymbolRewriter.h"
+#include "llvm/Transforms/Utils/UnifyFunctionExitNodes.h"
+#include "llvm/Transforms/Utils/UnifyLoopExits.h"
+#include "llvm/Transforms/Utils/UniqueInternalLinkageNames.h"
+#include "llvm/Transforms/Vectorize/LoadStoreVectorizer.h"
+#include "llvm/Transforms/Vectorize/LoopVectorize.h"
+#include "llvm/Transforms/Vectorize/SLPVectorizer.h"
+#include "llvm/Transforms/Vectorize/VectorCombine.h"
+
+using namespace llvm;
+
+extern cl::opt<unsigned> MaxDevirtIterations;
+
+static cl::opt<InliningAdvisorMode> UseInlineAdvisor(
+ "enable-ml-inliner", cl::init(InliningAdvisorMode::Default), cl::Hidden,
+ cl::desc("Enable ML policy for inliner. Currently trained for -Oz only"),
+ cl::values(clEnumValN(InliningAdvisorMode::Default, "default",
+ "Heuristics-based inliner version."),
+ clEnumValN(InliningAdvisorMode::Development, "development",
+ "Use development mode (runtime-loadable model)."),
+ clEnumValN(InliningAdvisorMode::Release, "release",
+ "Use release mode (AOT-compiled model).")));
+
+static cl::opt<bool> EnableSyntheticCounts(
+ "enable-npm-synthetic-counts", cl::init(false), cl::Hidden, cl::ZeroOrMore,
+ cl::desc("Run synthetic function entry count generation "
+ "pass"));
+
+static const Regex DefaultAliasRegex(
+ "^(default|thinlto-pre-link|thinlto|lto-pre-link|lto)<(O[0123sz])>$");
+
+/// Flag to enable inline deferral during PGO.
+static cl::opt<bool>
+ EnablePGOInlineDeferral("enable-npm-pgo-inline-deferral", cl::init(true),
+ cl::Hidden,
+ cl::desc("Enable inline deferral during PGO"));
+
+static cl::opt<bool> EnableMemProfiler("enable-mem-prof", cl::init(false),
+ cl::Hidden, cl::ZeroOrMore,
+ cl::desc("Enable memory profiler"));
+
+static cl::opt<bool> PerformMandatoryInliningsFirst(
+ "mandatory-inlining-first", cl::init(true), cl::Hidden, cl::ZeroOrMore,
+ cl::desc("Perform mandatory inlinings module-wide, before performing "
+ "inlining."));
+
+PipelineTuningOptions::PipelineTuningOptions() {
+ LoopInterleaving = true;
+ LoopVectorization = true;
+ SLPVectorization = false;
+ LoopUnrolling = true;
+ ForgetAllSCEVInLoopUnroll = ForgetSCEVInLoopUnroll;
+ Coroutines = false;
+ LicmMssaOptCap = SetLicmMssaOptCap;
+ LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap;
+ CallGraphProfile = true;
+ MergeFunctions = false;
+ UniqueLinkageNames = false;
+}
+extern cl::opt<bool> ExtraVectorizerPasses;
+
+extern cl::opt<bool> EnableConstraintElimination;
+extern cl::opt<bool> EnableGVNHoist;
+extern cl::opt<bool> EnableGVNSink;
+extern cl::opt<bool> EnableHotColdSplit;
+extern cl::opt<bool> EnableIROutliner;
+extern cl::opt<bool> EnableOrderFileInstrumentation;
+extern cl::opt<bool> EnableCHR;
+extern cl::opt<bool> EnableUnrollAndJam;
+extern cl::opt<bool> EnableLoopFlatten;
+extern cl::opt<bool> RunNewGVN;
+extern cl::opt<bool> RunPartialInlining;
+
+extern cl::opt<bool> FlattenedProfileUsed;
+
+extern cl::opt<AttributorRunOption> AttributorRun;
+extern cl::opt<bool> EnableKnowledgeRetention;
+
+extern cl::opt<bool> EnableMatrix;
+
+extern cl::opt<bool> DisablePreInliner;
+extern cl::opt<int> PreInlineThreshold;
+
+const PassBuilder::OptimizationLevel PassBuilder::OptimizationLevel::O0 = {
+ /*SpeedLevel*/ 0,
+ /*SizeLevel*/ 0};
+const PassBuilder::OptimizationLevel PassBuilder::OptimizationLevel::O1 = {
+ /*SpeedLevel*/ 1,
+ /*SizeLevel*/ 0};
+const PassBuilder::OptimizationLevel PassBuilder::OptimizationLevel::O2 = {
+ /*SpeedLevel*/ 2,
+ /*SizeLevel*/ 0};
+const PassBuilder::OptimizationLevel PassBuilder::OptimizationLevel::O3 = {
+ /*SpeedLevel*/ 3,
+ /*SizeLevel*/ 0};
+const PassBuilder::OptimizationLevel PassBuilder::OptimizationLevel::Os = {
+ /*SpeedLevel*/ 2,
+ /*SizeLevel*/ 1};
+const PassBuilder::OptimizationLevel PassBuilder::OptimizationLevel::Oz = {
+ /*SpeedLevel*/ 2,
+ /*SizeLevel*/ 2};
+
+namespace {
+
+// The following passes/analyses have custom names, otherwise their name will
+// include `(anonymous namespace)`. These are special since they are only for
+// testing purposes and don't live in a header file.
+
+/// No-op module pass which does nothing.
+struct NoOpModulePass : PassInfoMixin<NoOpModulePass> {
+ PreservedAnalyses run(Module &M, ModuleAnalysisManager &) {
+ return PreservedAnalyses::all();
+ }
+
+ static StringRef name() { return "NoOpModulePass"; }
+};
+
+/// No-op module analysis.
+class NoOpModuleAnalysis : public AnalysisInfoMixin<NoOpModuleAnalysis> {
+ friend AnalysisInfoMixin<NoOpModuleAnalysis>;
+ static AnalysisKey Key;
+
+public:
+ struct Result {};
+ Result run(Module &, ModuleAnalysisManager &) { return Result(); }
+ static StringRef name() { return "NoOpModuleAnalysis"; }
+};
+
+/// No-op CGSCC pass which does nothing.
+struct NoOpCGSCCPass : PassInfoMixin<NoOpCGSCCPass> {
+ PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &,
+ LazyCallGraph &, CGSCCUpdateResult &UR) {
+ return PreservedAnalyses::all();
+ }
+ static StringRef name() { return "NoOpCGSCCPass"; }
+};
+
+/// No-op CGSCC analysis.
+class NoOpCGSCCAnalysis : public AnalysisInfoMixin<NoOpCGSCCAnalysis> {
+ friend AnalysisInfoMixin<NoOpCGSCCAnalysis>;
+ static AnalysisKey Key;
+
+public:
+ struct Result {};
+ Result run(LazyCallGraph::SCC &, CGSCCAnalysisManager &, LazyCallGraph &G) {
+ return Result();
+ }
+ static StringRef name() { return "NoOpCGSCCAnalysis"; }
+};
+
+/// No-op function pass which does nothing.
+struct NoOpFunctionPass : PassInfoMixin<NoOpFunctionPass> {
+ PreservedAnalyses run(Function &F, FunctionAnalysisManager &) {
+ return PreservedAnalyses::all();
+ }
+ static StringRef name() { return "NoOpFunctionPass"; }
+};
+
+/// No-op function analysis.
+class NoOpFunctionAnalysis : public AnalysisInfoMixin<NoOpFunctionAnalysis> {
+ friend AnalysisInfoMixin<NoOpFunctionAnalysis>;
+ static AnalysisKey Key;
+
+public:
+ struct Result {};
+ Result run(Function &, FunctionAnalysisManager &) { return Result(); }
+ static StringRef name() { return "NoOpFunctionAnalysis"; }
+};
+
+/// No-op loop pass which does nothing.
+struct NoOpLoopPass : PassInfoMixin<NoOpLoopPass> {
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &,
+ LoopStandardAnalysisResults &, LPMUpdater &) {
+ return PreservedAnalyses::all();
+ }
+ static StringRef name() { return "NoOpLoopPass"; }
+};
+
+/// No-op loop analysis.
+class NoOpLoopAnalysis : public AnalysisInfoMixin<NoOpLoopAnalysis> {
+ friend AnalysisInfoMixin<NoOpLoopAnalysis>;
+ static AnalysisKey Key;
+
+public:
+ struct Result {};
+ Result run(Loop &, LoopAnalysisManager &, LoopStandardAnalysisResults &) {
+ return Result();
+ }
+ static StringRef name() { return "NoOpLoopAnalysis"; }
+};
+
+AnalysisKey NoOpModuleAnalysis::Key;
+AnalysisKey NoOpCGSCCAnalysis::Key;
+AnalysisKey NoOpFunctionAnalysis::Key;
+AnalysisKey NoOpLoopAnalysis::Key;
+
+/// Whether or not we should populate a PassInstrumentationCallbacks's class to
+/// pass name map.
+///
+/// This is for optimization purposes so we don't populate it if we never use
+/// it. This should be updated if new pass instrumentation wants to use the map.
+/// We currently only use this for --print-before/after.
+bool shouldPopulateClassToPassNames() {
+ return !printBeforePasses().empty() || !printAfterPasses().empty();
+}
+
+} // namespace
+
+PassBuilder::PassBuilder(bool DebugLogging, TargetMachine *TM,
+ PipelineTuningOptions PTO, Optional<PGOOptions> PGOOpt,
+ PassInstrumentationCallbacks *PIC)
+ : DebugLogging(DebugLogging), TM(TM), PTO(PTO), PGOOpt(PGOOpt), PIC(PIC) {
+ if (TM)
+ TM->registerPassBuilderCallbacks(*this, DebugLogging);
+ if (PIC && shouldPopulateClassToPassNames()) {
+#define MODULE_PASS(NAME, CREATE_PASS) \
+ PIC->addClassToPassName(decltype(CREATE_PASS)::name(), NAME);
+#define MODULE_ANALYSIS(NAME, CREATE_PASS) \
+ PIC->addClassToPassName(decltype(CREATE_PASS)::name(), NAME);
+#define FUNCTION_PASS(NAME, CREATE_PASS) \
+ PIC->addClassToPassName(decltype(CREATE_PASS)::name(), NAME);
+#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
+ PIC->addClassToPassName(decltype(CREATE_PASS)::name(), NAME);
+#define LOOP_PASS(NAME, CREATE_PASS) \
+ PIC->addClassToPassName(decltype(CREATE_PASS)::name(), NAME);
+#define LOOP_ANALYSIS(NAME, CREATE_PASS) \
+ PIC->addClassToPassName(decltype(CREATE_PASS)::name(), NAME);
+#define CGSCC_PASS(NAME, CREATE_PASS) \
+ PIC->addClassToPassName(decltype(CREATE_PASS)::name(), NAME);
+#define CGSCC_ANALYSIS(NAME, CREATE_PASS) \
+ PIC->addClassToPassName(decltype(CREATE_PASS)::name(), NAME);
+#include "PassRegistry.def"
+ }
+}
+
+void PassBuilder::invokePeepholeEPCallbacks(
+ FunctionPassManager &FPM, PassBuilder::OptimizationLevel Level) {
+ for (auto &C : PeepholeEPCallbacks)
+ C(FPM, Level);
+}
+
+void PassBuilder::registerModuleAnalyses(ModuleAnalysisManager &MAM) {
+#define MODULE_ANALYSIS(NAME, CREATE_PASS) \
+ MAM.registerPass([&] { return CREATE_PASS; });
+#include "PassRegistry.def"
+
+ for (auto &C : ModuleAnalysisRegistrationCallbacks)
+ C(MAM);
+}
+
+void PassBuilder::registerCGSCCAnalyses(CGSCCAnalysisManager &CGAM) {
+#define CGSCC_ANALYSIS(NAME, CREATE_PASS) \
+ CGAM.registerPass([&] { return CREATE_PASS; });
+#include "PassRegistry.def"
+
+ for (auto &C : CGSCCAnalysisRegistrationCallbacks)
+ C(CGAM);
+}
+
+void PassBuilder::registerFunctionAnalyses(FunctionAnalysisManager &FAM) {
+#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
+ FAM.registerPass([&] { return CREATE_PASS; });
+#include "PassRegistry.def"
+
+ for (auto &C : FunctionAnalysisRegistrationCallbacks)
+ C(FAM);
+}
+
+void PassBuilder::registerLoopAnalyses(LoopAnalysisManager &LAM) {
+#define LOOP_ANALYSIS(NAME, CREATE_PASS) \
+ LAM.registerPass([&] { return CREATE_PASS; });
+#include "PassRegistry.def"
+
+ for (auto &C : LoopAnalysisRegistrationCallbacks)
+ C(LAM);
+}
+
+// Helper to add AnnotationRemarksPass.
+static void addAnnotationRemarksPass(ModulePassManager &MPM) {
+ FunctionPassManager FPM;
+ FPM.addPass(AnnotationRemarksPass());
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+}
+
+// TODO: Investigate the cost/benefit of tail call elimination on debugging.
+FunctionPassManager
+PassBuilder::buildO1FunctionSimplificationPipeline(OptimizationLevel Level,
+ ThinOrFullLTOPhase Phase) {
+
+ FunctionPassManager FPM(DebugLogging);
+
+ // Form SSA out of local memory accesses after breaking apart aggregates into
+ // scalars.
+ FPM.addPass(SROA());
+
+ // Catch trivial redundancies
+ FPM.addPass(EarlyCSEPass(true /* Enable mem-ssa. */));
+
+ // Hoisting of scalars and load expressions.
+ FPM.addPass(SimplifyCFGPass());
+ FPM.addPass(InstCombinePass());
+
+ FPM.addPass(LibCallsShrinkWrapPass());
+
+ invokePeepholeEPCallbacks(FPM, Level);
+
+ FPM.addPass(SimplifyCFGPass());
+
+ // Form canonically associated expression trees, and simplify the trees using
+ // basic mathematical properties. For example, this will form (nearly)
+ // minimal multiplication trees.
+ FPM.addPass(ReassociatePass());
+
+ // Add the primary loop simplification pipeline.
+ // FIXME: Currently this is split into two loop pass pipelines because we run
+ // some function passes in between them. These can and should be removed
+ // and/or replaced by scheduling the loop pass equivalents in the correct
+ // positions. But those equivalent passes aren't powerful enough yet.
+ // Specifically, `SimplifyCFGPass` and `InstCombinePass` are currently still
+ // used. We have `LoopSimplifyCFGPass` which isn't yet powerful enough yet to
+ // fully replace `SimplifyCFGPass`, and the closest to the other we have is
+ // `LoopInstSimplify`.
+ LoopPassManager LPM1(DebugLogging), LPM2(DebugLogging);
+
+ // Simplify the loop body. We do this initially to clean up after other loop
+ // passes run, either when iterating on a loop or on inner loops with
+ // implications on the outer loop.
+ LPM1.addPass(LoopInstSimplifyPass());
+ LPM1.addPass(LoopSimplifyCFGPass());
+
+ LPM1.addPass(LoopRotatePass(/* Disable header duplication */ true));
+ // TODO: Investigate promotion cap for O1.
+ LPM1.addPass(LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap));
+ LPM1.addPass(SimpleLoopUnswitchPass());
+
+ LPM2.addPass(LoopIdiomRecognizePass());
+ LPM2.addPass(IndVarSimplifyPass());
+
+ for (auto &C : LateLoopOptimizationsEPCallbacks)
+ C(LPM2, Level);
+
+ LPM2.addPass(LoopDeletionPass());
+ // Do not enable unrolling in PreLinkThinLTO phase during sample PGO
+ // because it changes IR to makes profile annotation in back compile
+ // inaccurate. The normal unroller doesn't pay attention to forced full unroll
+ // attributes so we need to make sure and allow the full unroll pass to pay
+ // attention to it.
+ if (Phase != ThinOrFullLTOPhase::ThinLTOPreLink || !PGOOpt ||
+ PGOOpt->Action != PGOOptions::SampleUse)
+ LPM2.addPass(LoopFullUnrollPass(Level.getSpeedupLevel(),
+ /* OnlyWhenForced= */ !PTO.LoopUnrolling,
+ PTO.ForgetAllSCEVInLoopUnroll));
+
+ for (auto &C : LoopOptimizerEndEPCallbacks)
+ C(LPM2, Level);
+
+ // We provide the opt remark emitter pass for LICM to use. We only need to do
+ // this once as it is immutable.
+ FPM.addPass(
+ RequireAnalysisPass<OptimizationRemarkEmitterAnalysis, Function>());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ std::move(LPM1), EnableMSSALoopDependency, /*UseBlockFrequencyInfo=*/true,
+ DebugLogging));
+ FPM.addPass(SimplifyCFGPass());
+ FPM.addPass(InstCombinePass());
+ if (EnableLoopFlatten)
+ FPM.addPass(LoopFlattenPass());
+ // The loop passes in LPM2 (LoopFullUnrollPass) do not preserve MemorySSA.
+ // *All* loop passes must preserve it, in order to be able to use it.
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ std::move(LPM2), /*UseMemorySSA=*/false, /*UseBlockFrequencyInfo=*/false,
+ DebugLogging));
+
+ // Delete small array after loop unroll.
+ FPM.addPass(SROA());
+
+ // Specially optimize memory movement as it doesn't look like dataflow in SSA.
+ FPM.addPass(MemCpyOptPass());
+
+ // Sparse conditional constant propagation.
+ // FIXME: It isn't clear why we do this *after* loop passes rather than
+ // before...
+ FPM.addPass(SCCPPass());
+
+ // Delete dead bit computations (instcombine runs after to fold away the dead
+ // computations, and then ADCE will run later to exploit any new DCE
+ // opportunities that creates).
+ FPM.addPass(BDCEPass());
+
+ // Run instcombine after redundancy and dead bit elimination to exploit
+ // opportunities opened up by them.
+ FPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(FPM, Level);
+
+ if (PTO.Coroutines)
+ FPM.addPass(CoroElidePass());
+
+ for (auto &C : ScalarOptimizerLateEPCallbacks)
+ C(FPM, Level);
+
+ // Finally, do an expensive DCE pass to catch all the dead code exposed by
+ // the simplifications and basic cleanup after all the simplifications.
+ // TODO: Investigate if this is too expensive.
+ FPM.addPass(ADCEPass());
+ FPM.addPass(SimplifyCFGPass());
+ FPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(FPM, Level);
+
+ return FPM;
+}
+
+FunctionPassManager
+PassBuilder::buildFunctionSimplificationPipeline(OptimizationLevel Level,
+ ThinOrFullLTOPhase Phase) {
+ assert(Level != OptimizationLevel::O0 && "Must request optimizations!");
+
+ // The O1 pipeline has a separate pipeline creation function to simplify
+ // construction readability.
+ if (Level.getSpeedupLevel() == 1)
+ return buildO1FunctionSimplificationPipeline(Level, Phase);
+
+ FunctionPassManager FPM(DebugLogging);
+
+ // Form SSA out of local memory accesses after breaking apart aggregates into
+ // scalars.
+ FPM.addPass(SROA());
+
+ // Catch trivial redundancies
+ FPM.addPass(EarlyCSEPass(true /* Enable mem-ssa. */));
+ if (EnableKnowledgeRetention)
+ FPM.addPass(AssumeSimplifyPass());
+
+ // Hoisting of scalars and load expressions.
+ if (EnableGVNHoist)
+ FPM.addPass(GVNHoistPass());
+
+ // Global value numbering based sinking.
+ if (EnableGVNSink) {
+ FPM.addPass(GVNSinkPass());
+ FPM.addPass(SimplifyCFGPass());
+ }
+
+ if (EnableConstraintElimination)
+ FPM.addPass(ConstraintEliminationPass());
+
+ // Speculative execution if the target has divergent branches; otherwise nop.
+ FPM.addPass(SpeculativeExecutionPass(/* OnlyIfDivergentTarget =*/true));
+
+ // Optimize based on known information about branches, and cleanup afterward.
+ FPM.addPass(JumpThreadingPass());
+ FPM.addPass(CorrelatedValuePropagationPass());
+
+ FPM.addPass(SimplifyCFGPass());
+ if (Level == OptimizationLevel::O3)
+ FPM.addPass(AggressiveInstCombinePass());
+ FPM.addPass(InstCombinePass());
+
+ if (!Level.isOptimizingForSize())
+ FPM.addPass(LibCallsShrinkWrapPass());
+
+ invokePeepholeEPCallbacks(FPM, Level);
+
+ // For PGO use pipeline, try to optimize memory intrinsics such as memcpy
+ // using the size value profile. Don't perform this when optimizing for size.
+ if (PGOOpt && PGOOpt->Action == PGOOptions::IRUse &&
+ !Level.isOptimizingForSize())
+ FPM.addPass(PGOMemOPSizeOpt());
+
+ FPM.addPass(TailCallElimPass());
+ FPM.addPass(SimplifyCFGPass());
+
+ // Form canonically associated expression trees, and simplify the trees using
+ // basic mathematical properties. For example, this will form (nearly)
+ // minimal multiplication trees.
+ FPM.addPass(ReassociatePass());
+
+ // Add the primary loop simplification pipeline.
+ // FIXME: Currently this is split into two loop pass pipelines because we run
+ // some function passes in between them. These can and should be removed
+ // and/or replaced by scheduling the loop pass equivalents in the correct
+ // positions. But those equivalent passes aren't powerful enough yet.
+ // Specifically, `SimplifyCFGPass` and `InstCombinePass` are currently still
+ // used. We have `LoopSimplifyCFGPass` which isn't yet powerful enough yet to
+ // fully replace `SimplifyCFGPass`, and the closest to the other we have is
+ // `LoopInstSimplify`.
+ LoopPassManager LPM1(DebugLogging), LPM2(DebugLogging);
+
+ // Simplify the loop body. We do this initially to clean up after other loop
+ // passes run, either when iterating on a loop or on inner loops with
+ // implications on the outer loop.
+ LPM1.addPass(LoopInstSimplifyPass());
+ LPM1.addPass(LoopSimplifyCFGPass());
+
+ // Disable header duplication in loop rotation at -Oz.
+ LPM1.addPass(LoopRotatePass(Level != OptimizationLevel::Oz));
+ // TODO: Investigate promotion cap for O1.
+ LPM1.addPass(LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap));
+ LPM1.addPass(
+ SimpleLoopUnswitchPass(/* NonTrivial */ Level == OptimizationLevel::O3));
+ LPM2.addPass(LoopIdiomRecognizePass());
+ LPM2.addPass(IndVarSimplifyPass());
+
+ for (auto &C : LateLoopOptimizationsEPCallbacks)
+ C(LPM2, Level);
+
+ LPM2.addPass(LoopDeletionPass());
+ // Do not enable unrolling in PreLinkThinLTO phase during sample PGO
+ // because it changes IR to makes profile annotation in back compile
+ // inaccurate. The normal unroller doesn't pay attention to forced full unroll
+ // attributes so we need to make sure and allow the full unroll pass to pay
+ // attention to it.
+ if (Phase != ThinOrFullLTOPhase::ThinLTOPreLink || !PGOOpt ||
+ PGOOpt->Action != PGOOptions::SampleUse)
+ LPM2.addPass(LoopFullUnrollPass(Level.getSpeedupLevel(),
+ /* OnlyWhenForced= */ !PTO.LoopUnrolling,
+ PTO.ForgetAllSCEVInLoopUnroll));
+
+ for (auto &C : LoopOptimizerEndEPCallbacks)
+ C(LPM2, Level);
+
+ // We provide the opt remark emitter pass for LICM to use. We only need to do
+ // this once as it is immutable.
+ FPM.addPass(
+ RequireAnalysisPass<OptimizationRemarkEmitterAnalysis, Function>());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ std::move(LPM1), EnableMSSALoopDependency, /*UseBlockFrequencyInfo=*/true,
+ DebugLogging));
+ FPM.addPass(SimplifyCFGPass());
+ FPM.addPass(InstCombinePass());
+ if (EnableLoopFlatten)
+ FPM.addPass(LoopFlattenPass());
+ // The loop passes in LPM2 (LoopIdiomRecognizePass, IndVarSimplifyPass,
+ // LoopDeletionPass and LoopFullUnrollPass) do not preserve MemorySSA.
+ // *All* loop passes must preserve it, in order to be able to use it.
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ std::move(LPM2), /*UseMemorySSA=*/false, /*UseBlockFrequencyInfo=*/false,
+ DebugLogging));
+
+ // Delete small array after loop unroll.
+ FPM.addPass(SROA());
+
+ // Eliminate redundancies.
+ FPM.addPass(MergedLoadStoreMotionPass());
+ if (RunNewGVN)
+ FPM.addPass(NewGVNPass());
+ else
+ FPM.addPass(GVN());
+
+ // Specially optimize memory movement as it doesn't look like dataflow in SSA.
+ FPM.addPass(MemCpyOptPass());
+
+ // Sparse conditional constant propagation.
+ // FIXME: It isn't clear why we do this *after* loop passes rather than
+ // before...
+ FPM.addPass(SCCPPass());
+
+ // Delete dead bit computations (instcombine runs after to fold away the dead
+ // computations, and then ADCE will run later to exploit any new DCE
+ // opportunities that creates).
+ FPM.addPass(BDCEPass());
+
+ // Run instcombine after redundancy and dead bit elimination to exploit
+ // opportunities opened up by them.
+ FPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(FPM, Level);
+
+ // Re-consider control flow based optimizations after redundancy elimination,
+ // redo DCE, etc.
+ FPM.addPass(JumpThreadingPass());
+ FPM.addPass(CorrelatedValuePropagationPass());
+
+ // Finally, do an expensive DCE pass to catch all the dead code exposed by
+ // the simplifications and basic cleanup after all the simplifications.
+ // TODO: Investigate if this is too expensive.
+ FPM.addPass(ADCEPass());
+
+ FPM.addPass(DSEPass());
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap),
+ EnableMSSALoopDependency, /*UseBlockFrequencyInfo=*/true, DebugLogging));
+
+ if (PTO.Coroutines)
+ FPM.addPass(CoroElidePass());
+
+ for (auto &C : ScalarOptimizerLateEPCallbacks)
+ C(FPM, Level);
+
+ FPM.addPass(SimplifyCFGPass());
+ FPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(FPM, Level);
+
+ if (EnableCHR && Level == OptimizationLevel::O3 && PGOOpt &&
+ (PGOOpt->Action == PGOOptions::IRUse ||
+ PGOOpt->Action == PGOOptions::SampleUse))
+ FPM.addPass(ControlHeightReductionPass());
+
+ return FPM;
+}
+
+void PassBuilder::addRequiredLTOPreLinkPasses(ModulePassManager &MPM) {
+ MPM.addPass(CanonicalizeAliasesPass());
+ MPM.addPass(NameAnonGlobalPass());
+}
+
+void PassBuilder::addPGOInstrPasses(ModulePassManager &MPM,
+ PassBuilder::OptimizationLevel Level,
+ bool RunProfileGen, bool IsCS,
+ std::string ProfileFile,
+ std::string ProfileRemappingFile) {
+ assert(Level != OptimizationLevel::O0 && "Not expecting O0 here!");
+ if (!IsCS && !DisablePreInliner) {
+ InlineParams IP;
+
+ IP.DefaultThreshold = PreInlineThreshold;
+
+ // FIXME: The hint threshold has the same value used by the regular inliner
+ // when not optimzing for size. This should probably be lowered after
+ // performance testing.
+ // FIXME: this comment is cargo culted from the old pass manager, revisit).
+ IP.HintThreshold = Level.isOptimizingForSize() ? PreInlineThreshold : 325;
+ ModuleInlinerWrapperPass MIWP(IP, DebugLogging);
+ CGSCCPassManager &CGPipeline = MIWP.getPM();
+
+ FunctionPassManager FPM;
+ FPM.addPass(SROA());
+ FPM.addPass(EarlyCSEPass()); // Catch trivial redundancies.
+ FPM.addPass(SimplifyCFGPass()); // Merge & remove basic blocks.
+ FPM.addPass(InstCombinePass()); // Combine silly sequences.
+ invokePeepholeEPCallbacks(FPM, Level);
+
+ CGPipeline.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
+
+ MPM.addPass(std::move(MIWP));
+
+ // Delete anything that is now dead to make sure that we don't instrument
+ // dead code. Instrumentation can end up keeping dead code around and
+ // dramatically increase code size.
+ MPM.addPass(GlobalDCEPass());
+ }
+
+ if (!RunProfileGen) {
+ assert(!ProfileFile.empty() && "Profile use expecting a profile file!");
+ MPM.addPass(PGOInstrumentationUse(ProfileFile, ProfileRemappingFile, IsCS));
+ // Cache ProfileSummaryAnalysis once to avoid the potential need to insert
+ // RequireAnalysisPass for PSI before subsequent non-module passes.
+ MPM.addPass(RequireAnalysisPass<ProfileSummaryAnalysis, Module>());
+ return;
+ }
+
+ // Perform PGO instrumentation.
+ MPM.addPass(PGOInstrumentationGen(IsCS));
+
+ FunctionPassManager FPM;
+ // Disable header duplication in loop rotation at -Oz.
+ FPM.addPass(createFunctionToLoopPassAdaptor(
+ LoopRotatePass(Level != OptimizationLevel::Oz), EnableMSSALoopDependency,
+ /*UseBlockFrequencyInfo=*/false, DebugLogging));
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+
+ // Add the profile lowering pass.
+ InstrProfOptions Options;
+ if (!ProfileFile.empty())
+ Options.InstrProfileOutput = ProfileFile;
+ // Do counter promotion at Level greater than O0.
+ Options.DoCounterPromotion = true;
+ Options.UseBFIInPromotion = IsCS;
+ MPM.addPass(InstrProfiling(Options, IsCS));
+}
+
+void PassBuilder::addPGOInstrPassesForO0(ModulePassManager &MPM,
+ bool RunProfileGen, bool IsCS,
+ std::string ProfileFile,
+ std::string ProfileRemappingFile) {
+ if (!RunProfileGen) {
+ assert(!ProfileFile.empty() && "Profile use expecting a profile file!");
+ MPM.addPass(PGOInstrumentationUse(ProfileFile, ProfileRemappingFile, IsCS));
+ // Cache ProfileSummaryAnalysis once to avoid the potential need to insert
+ // RequireAnalysisPass for PSI before subsequent non-module passes.
+ MPM.addPass(RequireAnalysisPass<ProfileSummaryAnalysis, Module>());
+ return;
+ }
+
+ // Perform PGO instrumentation.
+ MPM.addPass(PGOInstrumentationGen(IsCS));
+ // Add the profile lowering pass.
+ InstrProfOptions Options;
+ if (!ProfileFile.empty())
+ Options.InstrProfileOutput = ProfileFile;
+ // Do not do counter promotion at O0.
+ Options.DoCounterPromotion = false;
+ Options.UseBFIInPromotion = IsCS;
+ MPM.addPass(InstrProfiling(Options, IsCS));
+}
+
+static InlineParams
+getInlineParamsFromOptLevel(PassBuilder::OptimizationLevel Level) {
+ return getInlineParams(Level.getSpeedupLevel(), Level.getSizeLevel());
+}
+
+ModuleInlinerWrapperPass
+PassBuilder::buildInlinerPipeline(OptimizationLevel Level,
+ ThinOrFullLTOPhase Phase) {
+ InlineParams IP = getInlineParamsFromOptLevel(Level);
+ if (Phase == ThinOrFullLTOPhase::ThinLTOPreLink && PGOOpt &&
+ PGOOpt->Action == PGOOptions::SampleUse)
+ IP.HotCallSiteThreshold = 0;
+
+ if (PGOOpt)
+ IP.EnableDeferral = EnablePGOInlineDeferral;
+
+ ModuleInlinerWrapperPass MIWP(IP, DebugLogging,
+ PerformMandatoryInliningsFirst,
+ UseInlineAdvisor, MaxDevirtIterations);
+
+ // Require the GlobalsAA analysis for the module so we can query it within
+ // the CGSCC pipeline.
+ MIWP.addRequiredModuleAnalysis<GlobalsAA>();
+
+ // Require the ProfileSummaryAnalysis for the module so we can query it within
+ // the inliner pass.
+ MIWP.addRequiredModuleAnalysis<ProfileSummaryAnalysis>();
+
+ // Now begin the main postorder CGSCC pipeline.
+ // FIXME: The current CGSCC pipeline has its origins in the legacy pass
+ // manager and trying to emulate its precise behavior. Much of this doesn't
+ // make a lot of sense and we should revisit the core CGSCC structure.
+ CGSCCPassManager &MainCGPipeline = MIWP.getPM();
+
+ // Note: historically, the PruneEH pass was run first to deduce nounwind and
+ // generally clean up exception handling overhead. It isn't clear this is
+ // valuable as the inliner doesn't currently care whether it is inlining an
+ // invoke or a call.
+
+ if (AttributorRun & AttributorRunOption::CGSCC)
+ MainCGPipeline.addPass(AttributorCGSCCPass());
+
+ if (PTO.Coroutines)
+ MainCGPipeline.addPass(CoroSplitPass(Level != OptimizationLevel::O0));
+
+ // Now deduce any function attributes based in the current code.
+ MainCGPipeline.addPass(PostOrderFunctionAttrsPass());
+
+ // When at O3 add argument promotion to the pass pipeline.
+ // FIXME: It isn't at all clear why this should be limited to O3.
+ if (Level == OptimizationLevel::O3)
+ MainCGPipeline.addPass(ArgumentPromotionPass());
+
+ // Try to perform OpenMP specific optimizations. This is a (quick!) no-op if
+ // there are no OpenMP runtime calls present in the module.
+ if (Level == OptimizationLevel::O2 || Level == OptimizationLevel::O3)
+ MainCGPipeline.addPass(OpenMPOptPass());
+
+ for (auto &C : CGSCCOptimizerLateEPCallbacks)
+ C(MainCGPipeline, Level);
+
+ // Lastly, add the core function simplification pipeline nested inside the
+ // CGSCC walk.
+ MainCGPipeline.addPass(createCGSCCToFunctionPassAdaptor(
+ buildFunctionSimplificationPipeline(Level, Phase)));
+
+ return MIWP;
+}
+
+ModulePassManager
+PassBuilder::buildModuleSimplificationPipeline(OptimizationLevel Level,
+ ThinOrFullLTOPhase Phase) {
+ ModulePassManager MPM(DebugLogging);
+
+ // Add UniqueInternalLinkageNames Pass which renames internal linkage
+ // symbols with unique names.
+ if (PTO.UniqueLinkageNames)
+ MPM.addPass(UniqueInternalLinkageNamesPass());
+
+ // Place pseudo probe instrumentation as the first pass of the pipeline to
+ // minimize the impact of optimization changes.
+ if (PGOOpt && PGOOpt->PseudoProbeForProfiling &&
+ Phase != ThinOrFullLTOPhase::ThinLTOPostLink)
+ MPM.addPass(SampleProfileProbePass(TM));
+
+ bool HasSampleProfile = PGOOpt && (PGOOpt->Action == PGOOptions::SampleUse);
+
+ // In ThinLTO mode, when flattened profile is used, all the available
+ // profile information will be annotated in PreLink phase so there is
+ // no need to load the profile again in PostLink.
+ bool LoadSampleProfile =
+ HasSampleProfile &&
+ !(FlattenedProfileUsed && Phase == ThinOrFullLTOPhase::ThinLTOPostLink);
+
+ // During the ThinLTO backend phase we perform early indirect call promotion
+ // here, before globalopt. Otherwise imported available_externally functions
+ // look unreferenced and are removed. If we are going to load the sample
+ // profile then defer until later.
+ // TODO: See if we can move later and consolidate with the location where
+ // we perform ICP when we are loading a sample profile.
+ // TODO: We pass HasSampleProfile (whether there was a sample profile file
+ // passed to the compile) to the SamplePGO flag of ICP. This is used to
+ // determine whether the new direct calls are annotated with prof metadata.
+ // Ideally this should be determined from whether the IR is annotated with
+ // sample profile, and not whether the a sample profile was provided on the
+ // command line. E.g. for flattened profiles where we will not be reloading
+ // the sample profile in the ThinLTO backend, we ideally shouldn't have to
+ // provide the sample profile file.
+ if (Phase == ThinOrFullLTOPhase::ThinLTOPostLink && !LoadSampleProfile)
+ MPM.addPass(PGOIndirectCallPromotion(true /* InLTO */, HasSampleProfile));
+
+ // Do basic inference of function attributes from known properties of system
+ // libraries and other oracles.
+ MPM.addPass(InferFunctionAttrsPass());
+
+ // Create an early function pass manager to cleanup the output of the
+ // frontend.
+ FunctionPassManager EarlyFPM(DebugLogging);
+ EarlyFPM.addPass(SimplifyCFGPass());
+ EarlyFPM.addPass(SROA());
+ EarlyFPM.addPass(EarlyCSEPass());
+ EarlyFPM.addPass(LowerExpectIntrinsicPass());
+ if (PTO.Coroutines)
+ EarlyFPM.addPass(CoroEarlyPass());
+ if (Level == OptimizationLevel::O3)
+ EarlyFPM.addPass(CallSiteSplittingPass());
+
+ // In SamplePGO ThinLTO backend, we need instcombine before profile annotation
+ // to convert bitcast to direct calls so that they can be inlined during the
+ // profile annotation prepration step.
+ // More details about SamplePGO design can be found in:
+ // https://research.google.com/pubs/pub45290.html
+ // FIXME: revisit how SampleProfileLoad/Inliner/ICP is structured.
+ if (LoadSampleProfile)
+ EarlyFPM.addPass(InstCombinePass());
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(EarlyFPM)));
+
+ if (LoadSampleProfile) {
+ // Annotate sample profile right after early FPM to ensure freshness of
+ // the debug info.
+ MPM.addPass(SampleProfileLoaderPass(PGOOpt->ProfileFile,
+ PGOOpt->ProfileRemappingFile, Phase));
+ // Cache ProfileSummaryAnalysis once to avoid the potential need to insert
+ // RequireAnalysisPass for PSI before subsequent non-module passes.
+ MPM.addPass(RequireAnalysisPass<ProfileSummaryAnalysis, Module>());
+ // Do not invoke ICP in the ThinLTOPrelink phase as it makes it hard
+ // for the profile annotation to be accurate in the ThinLTO backend.
+ if (Phase != ThinOrFullLTOPhase::ThinLTOPreLink)
+ // We perform early indirect call promotion here, before globalopt.
+ // This is important for the ThinLTO backend phase because otherwise
+ // imported available_externally functions look unreferenced and are
+ // removed.
+ MPM.addPass(PGOIndirectCallPromotion(
+ Phase == ThinOrFullLTOPhase::ThinLTOPostLink, true /* SamplePGO */));
+ }
+
+ if (AttributorRun & AttributorRunOption::MODULE)
+ MPM.addPass(AttributorPass());
+
+ // Lower type metadata and the type.test intrinsic in the ThinLTO
+ // post link pipeline after ICP. This is to enable usage of the type
+ // tests in ICP sequences.
+ if (Phase == ThinOrFullLTOPhase::ThinLTOPostLink)
+ MPM.addPass(LowerTypeTestsPass(nullptr, nullptr, true));
+
+ for (auto &C : PipelineEarlySimplificationEPCallbacks)
+ C(MPM, Level);
+
+ // Interprocedural constant propagation now that basic cleanup has occurred
+ // and prior to optimizing globals.
+ // FIXME: This position in the pipeline hasn't been carefully considered in
+ // years, it should be re-analyzed.
+ MPM.addPass(IPSCCPPass());
+
+ // Attach metadata to indirect call sites indicating the set of functions
+ // they may target at run-time. This should follow IPSCCP.
+ MPM.addPass(CalledValuePropagationPass());
+
+ // Optimize globals to try and fold them into constants.
+ MPM.addPass(GlobalOptPass());
+
+ // Promote any localized globals to SSA registers.
+ // FIXME: Should this instead by a run of SROA?
+ // FIXME: We should probably run instcombine and simplify-cfg afterward to
+ // delete control flows that are dead once globals have been folded to
+ // constants.
+ MPM.addPass(createModuleToFunctionPassAdaptor(PromotePass()));
+
+ // Remove any dead arguments exposed by cleanups and constant folding
+ // globals.
+ MPM.addPass(DeadArgumentEliminationPass());
+
+ // Create a small function pass pipeline to cleanup after all the global
+ // optimizations.
+ FunctionPassManager GlobalCleanupPM(DebugLogging);
+ GlobalCleanupPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(GlobalCleanupPM, Level);
+
+ GlobalCleanupPM.addPass(SimplifyCFGPass());
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(GlobalCleanupPM)));
+
+ // Add all the requested passes for instrumentation PGO, if requested.
+ if (PGOOpt && Phase != ThinOrFullLTOPhase::ThinLTOPostLink &&
+ (PGOOpt->Action == PGOOptions::IRInstr ||
+ PGOOpt->Action == PGOOptions::IRUse)) {
+ addPGOInstrPasses(MPM, Level,
+ /* RunProfileGen */ PGOOpt->Action == PGOOptions::IRInstr,
+ /* IsCS */ false, PGOOpt->ProfileFile,
+ PGOOpt->ProfileRemappingFile);
+ MPM.addPass(PGOIndirectCallPromotion(false, false));
+ }
+ if (PGOOpt && Phase != ThinOrFullLTOPhase::ThinLTOPostLink &&
+ PGOOpt->CSAction == PGOOptions::CSIRInstr)
+ MPM.addPass(PGOInstrumentationGenCreateVar(PGOOpt->CSProfileGenFile));
+
+ // Synthesize function entry counts for non-PGO compilation.
+ if (EnableSyntheticCounts && !PGOOpt)
+ MPM.addPass(SyntheticCountsPropagation());
+
+ MPM.addPass(buildInlinerPipeline(Level, Phase));
+
+ if (EnableMemProfiler && Phase != ThinOrFullLTOPhase::ThinLTOPreLink) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(MemProfilerPass()));
+ MPM.addPass(ModuleMemProfilerPass());
+ }
+
+ return MPM;
+}
+
+ModulePassManager
+PassBuilder::buildModuleOptimizationPipeline(OptimizationLevel Level,
+ bool LTOPreLink) {
+ ModulePassManager MPM(DebugLogging);
+
+ // Optimize globals now that the module is fully simplified.
+ MPM.addPass(GlobalOptPass());
+ MPM.addPass(GlobalDCEPass());
+
+ // Run partial inlining pass to partially inline functions that have
+ // large bodies.
+ if (RunPartialInlining)
+ MPM.addPass(PartialInlinerPass());
+
+ // Remove avail extern fns and globals definitions since we aren't compiling
+ // an object file for later LTO. For LTO we want to preserve these so they
+ // are eligible for inlining at link-time. Note if they are unreferenced they
+ // will be removed by GlobalDCE later, so this only impacts referenced
+ // available externally globals. Eventually they will be suppressed during
+ // codegen, but eliminating here enables more opportunity for GlobalDCE as it
+ // may make globals referenced by available external functions dead and saves
+ // running remaining passes on the eliminated functions. These should be
+ // preserved during prelinking for link-time inlining decisions.
+ if (!LTOPreLink)
+ MPM.addPass(EliminateAvailableExternallyPass());
+
+ if (EnableOrderFileInstrumentation)
+ MPM.addPass(InstrOrderFilePass());
+
+ // Do RPO function attribute inference across the module to forward-propagate
+ // attributes where applicable.
+ // FIXME: Is this really an optimization rather than a canonicalization?
+ MPM.addPass(ReversePostOrderFunctionAttrsPass());
+
+ // Do a post inline PGO instrumentation and use pass. This is a context
+ // sensitive PGO pass. We don't want to do this in LTOPreLink phrase as
+ // cross-module inline has not been done yet. The context sensitive
+ // instrumentation is after all the inlines are done.
+ if (!LTOPreLink && PGOOpt) {
+ if (PGOOpt->CSAction == PGOOptions::CSIRInstr)
+ addPGOInstrPasses(MPM, Level, /* RunProfileGen */ true,
+ /* IsCS */ true, PGOOpt->CSProfileGenFile,
+ PGOOpt->ProfileRemappingFile);
+ else if (PGOOpt->CSAction == PGOOptions::CSIRUse)
+ addPGOInstrPasses(MPM, Level, /* RunProfileGen */ false,
+ /* IsCS */ true, PGOOpt->ProfileFile,
+ PGOOpt->ProfileRemappingFile);
+ }
+
+ // Re-require GloblasAA here prior to function passes. This is particularly
+ // useful as the above will have inlined, DCE'ed, and function-attr
+ // propagated everything. We should at this point have a reasonably minimal
+ // and richly annotated call graph. By computing aliasing and mod/ref
+ // information for all local globals here, the late loop passes and notably
+ // the vectorizer will be able to use them to help recognize vectorizable
+ // memory operations.
+ MPM.addPass(RequireAnalysisPass<GlobalsAA, Module>());
+
+ FunctionPassManager OptimizePM(DebugLogging);
+ OptimizePM.addPass(Float2IntPass());
+ OptimizePM.addPass(LowerConstantIntrinsicsPass());
+
+ if (EnableMatrix) {
+ OptimizePM.addPass(LowerMatrixIntrinsicsPass());
+ OptimizePM.addPass(EarlyCSEPass());
+ }
+
+ // FIXME: We need to run some loop optimizations to re-rotate loops after
+ // simplify-cfg and others undo their rotation.
+
+ // Optimize the loop execution. These passes operate on entire loop nests
+ // rather than on each loop in an inside-out manner, and so they are actually
+ // function passes.
+
+ for (auto &C : VectorizerStartEPCallbacks)
+ C(OptimizePM, Level);
+
+ // First rotate loops that may have been un-rotated by prior passes.
+ // Disable header duplication at -Oz.
+ OptimizePM.addPass(createFunctionToLoopPassAdaptor(
+ LoopRotatePass(Level != OptimizationLevel::Oz, LTOPreLink),
+ EnableMSSALoopDependency,
+ /*UseBlockFrequencyInfo=*/false, DebugLogging));
+
+ // Distribute loops to allow partial vectorization. I.e. isolate dependences
+ // into separate loop that would otherwise inhibit vectorization. This is
+ // currently only performed for loops marked with the metadata
+ // llvm.loop.distribute=true or when -enable-loop-distribute is specified.
+ OptimizePM.addPass(LoopDistributePass());
+
+ // Populates the VFABI attribute with the scalar-to-vector mappings
+ // from the TargetLibraryInfo.
+ OptimizePM.addPass(InjectTLIMappings());
+
+ // Now run the core loop vectorizer.
+ OptimizePM.addPass(LoopVectorizePass(
+ LoopVectorizeOptions(!PTO.LoopInterleaving, !PTO.LoopVectorization)));
+
+ // Eliminate loads by forwarding stores from the previous iteration to loads
+ // of the current iteration.
+ OptimizePM.addPass(LoopLoadEliminationPass());
+
+ // Cleanup after the loop optimization passes.
+ OptimizePM.addPass(InstCombinePass());
+
+ if (Level.getSpeedupLevel() > 1 && ExtraVectorizerPasses) {
+ // At higher optimization levels, try to clean up any runtime overlap and
+ // alignment checks inserted by the vectorizer. We want to track correlated
+ // runtime checks for two inner loops in the same outer loop, fold any
+ // common computations, hoist loop-invariant aspects out of any outer loop,
+ // and unswitch the runtime checks if possible. Once hoisted, we may have
+ // dead (or speculatable) control flows or more combining opportunities.
+ OptimizePM.addPass(EarlyCSEPass());
+ OptimizePM.addPass(CorrelatedValuePropagationPass());
+ OptimizePM.addPass(InstCombinePass());
+ LoopPassManager LPM(DebugLogging);
+ LPM.addPass(LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap));
+ LPM.addPass(
+ SimpleLoopUnswitchPass(/* NonTrivial */ Level == OptimizationLevel::O3));
+ OptimizePM.addPass(RequireAnalysisPass<OptimizationRemarkEmitterAnalysis, Function>());
+ OptimizePM.addPass(createFunctionToLoopPassAdaptor(
+ std::move(LPM), EnableMSSALoopDependency, /*UseBlockFrequencyInfo=*/true,
+ DebugLogging));
+ OptimizePM.addPass(SimplifyCFGPass());
+ OptimizePM.addPass(InstCombinePass());
+ }
+
+ // Now that we've formed fast to execute loop structures, we do further
+ // optimizations. These are run afterward as they might block doing complex
+ // analyses and transforms such as what are needed for loop vectorization.
+
+ // Cleanup after loop vectorization, etc. Simplification passes like CVP and
+ // GVN, loop transforms, and others have already run, so it's now better to
+ // convert to more optimized IR using more aggressive simplify CFG options.
+ // The extra sinking transform can create larger basic blocks, so do this
+ // before SLP vectorization.
+ // FIXME: study whether hoisting and/or sinking of common instructions should
+ // be delayed until after SLP vectorizer.
+ OptimizePM.addPass(SimplifyCFGPass(SimplifyCFGOptions()
+ .forwardSwitchCondToPhi(true)
+ .convertSwitchToLookupTable(true)
+ .needCanonicalLoops(false)
+ .hoistCommonInsts(true)
+ .sinkCommonInsts(true)));
+
+ // Optimize parallel scalar instruction chains into SIMD instructions.
+ if (PTO.SLPVectorization) {
+ OptimizePM.addPass(SLPVectorizerPass());
+ if (Level.getSpeedupLevel() > 1 && ExtraVectorizerPasses) {
+ OptimizePM.addPass(EarlyCSEPass());
+ }
+ }
+
+ // Enhance/cleanup vector code.
+ OptimizePM.addPass(VectorCombinePass());
+ OptimizePM.addPass(InstCombinePass());
+
+ // Unroll small loops to hide loop backedge latency and saturate any parallel
+ // execution resources of an out-of-order processor. We also then need to
+ // clean up redundancies and loop invariant code.
+ // FIXME: It would be really good to use a loop-integrated instruction
+ // combiner for cleanup here so that the unrolling and LICM can be pipelined
+ // across the loop nests.
+ // We do UnrollAndJam in a separate LPM to ensure it happens before unroll
+ if (EnableUnrollAndJam && PTO.LoopUnrolling) {
+ OptimizePM.addPass(LoopUnrollAndJamPass(Level.getSpeedupLevel()));
+ }
+ OptimizePM.addPass(LoopUnrollPass(LoopUnrollOptions(
+ Level.getSpeedupLevel(), /*OnlyWhenForced=*/!PTO.LoopUnrolling,
+ PTO.ForgetAllSCEVInLoopUnroll)));
+ OptimizePM.addPass(WarnMissedTransformationsPass());
+ OptimizePM.addPass(InstCombinePass());
+ OptimizePM.addPass(RequireAnalysisPass<OptimizationRemarkEmitterAnalysis, Function>());
+ OptimizePM.addPass(createFunctionToLoopPassAdaptor(
+ LICMPass(PTO.LicmMssaOptCap, PTO.LicmMssaNoAccForPromotionCap),
+ EnableMSSALoopDependency, /*UseBlockFrequencyInfo=*/true, DebugLogging));
+
+ // Now that we've vectorized and unrolled loops, we may have more refined
+ // alignment information, try to re-derive it here.
+ OptimizePM.addPass(AlignmentFromAssumptionsPass());
+
+ // Split out cold code. Splitting is done late to avoid hiding context from
+ // other optimizations and inadvertently regressing performance. The tradeoff
+ // is that this has a higher code size cost than splitting early.
+ if (EnableHotColdSplit && !LTOPreLink)
+ MPM.addPass(HotColdSplittingPass());
+
+ // Search the code for similar regions of code. If enough similar regions can
+ // be found where extracting the regions into their own function will decrease
+ // the size of the program, we extract the regions, a deduplicate the
+ // structurally similar regions.
+ if (EnableIROutliner)
+ MPM.addPass(IROutlinerPass());
+
+ // Merge functions if requested.
+ if (PTO.MergeFunctions)
+ MPM.addPass(MergeFunctionsPass());
+
+ // LoopSink pass sinks instructions hoisted by LICM, which serves as a
+ // canonicalization pass that enables other optimizations. As a result,
+ // LoopSink pass needs to be a very late IR pass to avoid undoing LICM
+ // result too early.
+ OptimizePM.addPass(LoopSinkPass());
+
+ // And finally clean up LCSSA form before generating code.
+ OptimizePM.addPass(InstSimplifyPass());
+
+ // This hoists/decomposes div/rem ops. It should run after other sink/hoist
+ // passes to avoid re-sinking, but before SimplifyCFG because it can allow
+ // flattening of blocks.
+ OptimizePM.addPass(DivRemPairsPass());
+
+ // LoopSink (and other loop passes since the last simplifyCFG) might have
+ // resulted in single-entry-single-exit or empty blocks. Clean up the CFG.
+ OptimizePM.addPass(SimplifyCFGPass());
+
+ // Optimize PHIs by speculating around them when profitable. Note that this
+ // pass needs to be run after any PRE or similar pass as it is essentially
+ // inserting redundancies into the program. This even includes SimplifyCFG.
+ OptimizePM.addPass(SpeculateAroundPHIsPass());
+
+ if (PTO.Coroutines)
+ OptimizePM.addPass(CoroCleanupPass());
+
+ // Add the core optimizing pipeline.
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(OptimizePM)));
+
+ for (auto &C : OptimizerLastEPCallbacks)
+ C(MPM, Level);
+
+ if (PTO.CallGraphProfile)
+ MPM.addPass(CGProfilePass());
+
+ // Now we need to do some global optimization transforms.
+ // FIXME: It would seem like these should come first in the optimization
+ // pipeline and maybe be the bottom of the canonicalization pipeline? Weird
+ // ordering here.
+ MPM.addPass(GlobalDCEPass());
+ MPM.addPass(ConstantMergePass());
+
+ return MPM;
+}
+
+ModulePassManager
+PassBuilder::buildPerModuleDefaultPipeline(OptimizationLevel Level,
+ bool LTOPreLink) {
+ assert(Level != OptimizationLevel::O0 &&
+ "Must request optimizations for the default pipeline!");
+
+ ModulePassManager MPM(DebugLogging);
+
+ // Convert @llvm.global.annotations to !annotation metadata.
+ MPM.addPass(Annotation2MetadataPass());
+
+ // Force any function attributes we want the rest of the pipeline to observe.
+ MPM.addPass(ForceFunctionAttrsPass());
+
+ // Apply module pipeline start EP callback.
+ for (auto &C : PipelineStartEPCallbacks)
+ C(MPM, Level);
+
+ if (PGOOpt && PGOOpt->DebugInfoForProfiling)
+ MPM.addPass(createModuleToFunctionPassAdaptor(AddDiscriminatorsPass()));
+
+ // Add the core simplification pipeline.
+ MPM.addPass(buildModuleSimplificationPipeline(
+ Level, LTOPreLink ? ThinOrFullLTOPhase::FullLTOPreLink
+ : ThinOrFullLTOPhase::None));
+
+ // Now add the optimization pipeline.
+ MPM.addPass(buildModuleOptimizationPipeline(Level, LTOPreLink));
+
+ if (PGOOpt && PGOOpt->PseudoProbeForProfiling)
+ MPM.addPass(PseudoProbeUpdatePass());
+
+ // Emit annotation remarks.
+ addAnnotationRemarksPass(MPM);
+
+ if (LTOPreLink)
+ addRequiredLTOPreLinkPasses(MPM);
+
+ return MPM;
+}
+
+ModulePassManager
+PassBuilder::buildThinLTOPreLinkDefaultPipeline(OptimizationLevel Level) {
+ assert(Level != OptimizationLevel::O0 &&
+ "Must request optimizations for the default pipeline!");
+
+ ModulePassManager MPM(DebugLogging);
+
+ // Convert @llvm.global.annotations to !annotation metadata.
+ MPM.addPass(Annotation2MetadataPass());
+
+ // Force any function attributes we want the rest of the pipeline to observe.
+ MPM.addPass(ForceFunctionAttrsPass());
+
+ if (PGOOpt && PGOOpt->DebugInfoForProfiling)
+ MPM.addPass(createModuleToFunctionPassAdaptor(AddDiscriminatorsPass()));
+
+ // Apply module pipeline start EP callback.
+ for (auto &C : PipelineStartEPCallbacks)
+ C(MPM, Level);
+
+ // If we are planning to perform ThinLTO later, we don't bloat the code with
+ // unrolling/vectorization/... now. Just simplify the module as much as we
+ // can.
+ MPM.addPass(buildModuleSimplificationPipeline(
+ Level, ThinOrFullLTOPhase::ThinLTOPreLink));
+
+ // Run partial inlining pass to partially inline functions that have
+ // large bodies.
+ // FIXME: It isn't clear whether this is really the right place to run this
+ // in ThinLTO. Because there is another canonicalization and simplification
+ // phase that will run after the thin link, running this here ends up with
+ // less information than will be available later and it may grow functions in
+ // ways that aren't beneficial.
+ if (RunPartialInlining)
+ MPM.addPass(PartialInlinerPass());
+
+ // Reduce the size of the IR as much as possible.
+ MPM.addPass(GlobalOptPass());
+
+ // Module simplification splits coroutines, but does not fully clean up
+ // coroutine intrinsics. To ensure ThinLTO optimization passes don't trip up
+ // on these, we schedule the cleanup here.
+ if (PTO.Coroutines)
+ MPM.addPass(createModuleToFunctionPassAdaptor(CoroCleanupPass()));
+
+ if (PGOOpt && PGOOpt->PseudoProbeForProfiling)
+ MPM.addPass(PseudoProbeUpdatePass());
+
+ // Emit annotation remarks.
+ addAnnotationRemarksPass(MPM);
+
+ addRequiredLTOPreLinkPasses(MPM);
+
+ return MPM;
+}
+
+ModulePassManager PassBuilder::buildThinLTODefaultPipeline(
+ OptimizationLevel Level, const ModuleSummaryIndex *ImportSummary) {
+ ModulePassManager MPM(DebugLogging);
+
+ // Convert @llvm.global.annotations to !annotation metadata.
+ MPM.addPass(Annotation2MetadataPass());
+
+ if (ImportSummary) {
+ // These passes import type identifier resolutions for whole-program
+ // devirtualization and CFI. They must run early because other passes may
+ // disturb the specific instruction patterns that these passes look for,
+ // creating dependencies on resolutions that may not appear in the summary.
+ //
+ // For example, GVN may transform the pattern assume(type.test) appearing in
+ // two basic blocks into assume(phi(type.test, type.test)), which would
+ // transform a dependency on a WPD resolution into a dependency on a type
+ // identifier resolution for CFI.
+ //
+ // Also, WPD has access to more precise information than ICP and can
+ // devirtualize more effectively, so it should operate on the IR first.
+ //
+ // The WPD and LowerTypeTest passes need to run at -O0 to lower type
+ // metadata and intrinsics.
+ MPM.addPass(WholeProgramDevirtPass(nullptr, ImportSummary));
+ MPM.addPass(LowerTypeTestsPass(nullptr, ImportSummary));
+ }
+
+ if (Level == OptimizationLevel::O0)
+ return MPM;
+
+ // Force any function attributes we want the rest of the pipeline to observe.
+ MPM.addPass(ForceFunctionAttrsPass());
+
+ // Add the core simplification pipeline.
+ MPM.addPass(buildModuleSimplificationPipeline(
+ Level, ThinOrFullLTOPhase::ThinLTOPostLink));
+
+ // Now add the optimization pipeline.
+ MPM.addPass(buildModuleOptimizationPipeline(Level));
+
+ // Emit annotation remarks.
+ addAnnotationRemarksPass(MPM);
+
+ return MPM;
+}
+
+ModulePassManager
+PassBuilder::buildLTOPreLinkDefaultPipeline(OptimizationLevel Level) {
+ assert(Level != OptimizationLevel::O0 &&
+ "Must request optimizations for the default pipeline!");
+ // FIXME: We should use a customized pre-link pipeline!
+ return buildPerModuleDefaultPipeline(Level,
+ /* LTOPreLink */ true);
+}
+
+ModulePassManager
+PassBuilder::buildLTODefaultPipeline(OptimizationLevel Level,
+ ModuleSummaryIndex *ExportSummary) {
+ ModulePassManager MPM(DebugLogging);
+
+ // Convert @llvm.global.annotations to !annotation metadata.
+ MPM.addPass(Annotation2MetadataPass());
+
+ if (Level == OptimizationLevel::O0) {
+ // The WPD and LowerTypeTest passes need to run at -O0 to lower type
+ // metadata and intrinsics.
+ MPM.addPass(WholeProgramDevirtPass(ExportSummary, nullptr));
+ MPM.addPass(LowerTypeTestsPass(ExportSummary, nullptr));
+ // Run a second time to clean up any type tests left behind by WPD for use
+ // in ICP.
+ MPM.addPass(LowerTypeTestsPass(nullptr, nullptr, true));
+
+ // Emit annotation remarks.
+ addAnnotationRemarksPass(MPM);
+
+ return MPM;
+ }
+
+ if (PGOOpt && PGOOpt->Action == PGOOptions::SampleUse) {
+ // Load sample profile before running the LTO optimization pipeline.
+ MPM.addPass(SampleProfileLoaderPass(PGOOpt->ProfileFile,
+ PGOOpt->ProfileRemappingFile,
+ ThinOrFullLTOPhase::FullLTOPostLink));
+ // Cache ProfileSummaryAnalysis once to avoid the potential need to insert
+ // RequireAnalysisPass for PSI before subsequent non-module passes.
+ MPM.addPass(RequireAnalysisPass<ProfileSummaryAnalysis, Module>());
+ }
+
+ // Remove unused virtual tables to improve the quality of code generated by
+ // whole-program devirtualization and bitset lowering.
+ MPM.addPass(GlobalDCEPass());
+
+ // Force any function attributes we want the rest of the pipeline to observe.
+ MPM.addPass(ForceFunctionAttrsPass());
+
+ // Do basic inference of function attributes from known properties of system
+ // libraries and other oracles.
+ MPM.addPass(InferFunctionAttrsPass());
+
+ if (Level.getSpeedupLevel() > 1) {
+ FunctionPassManager EarlyFPM(DebugLogging);
+ EarlyFPM.addPass(CallSiteSplittingPass());
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(EarlyFPM)));
+
+ // Indirect call promotion. This should promote all the targets that are
+ // left by the earlier promotion pass that promotes intra-module targets.
+ // This two-step promotion is to save the compile time. For LTO, it should
+ // produce the same result as if we only do promotion here.
+ MPM.addPass(PGOIndirectCallPromotion(
+ true /* InLTO */, PGOOpt && PGOOpt->Action == PGOOptions::SampleUse));
+ // Propagate constants at call sites into the functions they call. This
+ // opens opportunities for globalopt (and inlining) by substituting function
+ // pointers passed as arguments to direct uses of functions.
+ MPM.addPass(IPSCCPPass());
+
+ // Attach metadata to indirect call sites indicating the set of functions
+ // they may target at run-time. This should follow IPSCCP.
+ MPM.addPass(CalledValuePropagationPass());
+ }
+
+ // Now deduce any function attributes based in the current code.
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(
+ PostOrderFunctionAttrsPass()));
+
+ // Do RPO function attribute inference across the module to forward-propagate
+ // attributes where applicable.
+ // FIXME: Is this really an optimization rather than a canonicalization?
+ MPM.addPass(ReversePostOrderFunctionAttrsPass());
+
+ // Use in-range annotations on GEP indices to split globals where beneficial.
+ MPM.addPass(GlobalSplitPass());
+
+ // Run whole program optimization of virtual call when the list of callees
+ // is fixed.
+ MPM.addPass(WholeProgramDevirtPass(ExportSummary, nullptr));
+
+ // Stop here at -O1.
+ if (Level == OptimizationLevel::O1) {
+ // The LowerTypeTestsPass needs to run to lower type metadata and the
+ // type.test intrinsics. The pass does nothing if CFI is disabled.
+ MPM.addPass(LowerTypeTestsPass(ExportSummary, nullptr));
+ // Run a second time to clean up any type tests left behind by WPD for use
+ // in ICP (which is performed earlier than this in the regular LTO
+ // pipeline).
+ MPM.addPass(LowerTypeTestsPass(nullptr, nullptr, true));
+
+ // Emit annotation remarks.
+ addAnnotationRemarksPass(MPM);
+
+ return MPM;
+ }
+
+ // Optimize globals to try and fold them into constants.
+ MPM.addPass(GlobalOptPass());
+
+ // Promote any localized globals to SSA registers.
+ MPM.addPass(createModuleToFunctionPassAdaptor(PromotePass()));
+
+ // Linking modules together can lead to duplicate global constant, only
+ // keep one copy of each constant.
+ MPM.addPass(ConstantMergePass());
+
+ // Remove unused arguments from functions.
+ MPM.addPass(DeadArgumentEliminationPass());
+
+ // Reduce the code after globalopt and ipsccp. Both can open up significant
+ // simplification opportunities, and both can propagate functions through
+ // function pointers. When this happens, we often have to resolve varargs
+ // calls, etc, so let instcombine do this.
+ FunctionPassManager PeepholeFPM(DebugLogging);
+ if (Level == OptimizationLevel::O3)
+ PeepholeFPM.addPass(AggressiveInstCombinePass());
+ PeepholeFPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(PeepholeFPM, Level);
+
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(PeepholeFPM)));
+
+ // Note: historically, the PruneEH pass was run first to deduce nounwind and
+ // generally clean up exception handling overhead. It isn't clear this is
+ // valuable as the inliner doesn't currently care whether it is inlining an
+ // invoke or a call.
+ // Run the inliner now.
+ MPM.addPass(ModuleInlinerWrapperPass(getInlineParamsFromOptLevel(Level),
+ DebugLogging));
+
+ // Optimize globals again after we ran the inliner.
+ MPM.addPass(GlobalOptPass());
+
+ // Garbage collect dead functions.
+ // FIXME: Add ArgumentPromotion pass after once it's ported.
+ MPM.addPass(GlobalDCEPass());
+
+ FunctionPassManager FPM(DebugLogging);
+ // The IPO Passes may leave cruft around. Clean up after them.
+ FPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(FPM, Level);
+
+ FPM.addPass(JumpThreadingPass(/*InsertFreezeWhenUnfoldingSelect*/ true));
+
+ // Do a post inline PGO instrumentation and use pass. This is a context
+ // sensitive PGO pass.
+ if (PGOOpt) {
+ if (PGOOpt->CSAction == PGOOptions::CSIRInstr)
+ addPGOInstrPasses(MPM, Level, /* RunProfileGen */ true,
+ /* IsCS */ true, PGOOpt->CSProfileGenFile,
+ PGOOpt->ProfileRemappingFile);
+ else if (PGOOpt->CSAction == PGOOptions::CSIRUse)
+ addPGOInstrPasses(MPM, Level, /* RunProfileGen */ false,
+ /* IsCS */ true, PGOOpt->ProfileFile,
+ PGOOpt->ProfileRemappingFile);
+ }
+
+ // Break up allocas
+ FPM.addPass(SROA());
+
+ // LTO provides additional opportunities for tailcall elimination due to
+ // link-time inlining, and visibility of nocapture attribute.
+ FPM.addPass(TailCallElimPass());
+
+ // Run a few AA driver optimizations here and now to cleanup the code.
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(
+ PostOrderFunctionAttrsPass()));
+ // FIXME: here we run IP alias analysis in the legacy PM.
+
+ FunctionPassManager MainFPM;
+
+ // FIXME: once we fix LoopPass Manager, add LICM here.
+ // FIXME: once we provide support for enabling MLSM, add it here.
+ if (RunNewGVN)
+ MainFPM.addPass(NewGVNPass());
+ else
+ MainFPM.addPass(GVN());
+
+ // Remove dead memcpy()'s.
+ MainFPM.addPass(MemCpyOptPass());
+
+ // Nuke dead stores.
+ MainFPM.addPass(DSEPass());
+
+ // FIXME: at this point, we run a bunch of loop passes:
+ // indVarSimplify, loopDeletion, loopInterchange, loopUnroll,
+ // loopVectorize. Enable them once the remaining issue with LPM
+ // are sorted out.
+
+ MainFPM.addPass(InstCombinePass());
+ MainFPM.addPass(SimplifyCFGPass(SimplifyCFGOptions().hoistCommonInsts(true)));
+ MainFPM.addPass(SCCPPass());
+ MainFPM.addPass(InstCombinePass());
+ MainFPM.addPass(BDCEPass());
+
+ // FIXME: We may want to run SLPVectorizer here.
+ // After vectorization, assume intrinsics may tell us more
+ // about pointer alignments.
+#if 0
+ MainFPM.add(AlignmentFromAssumptionsPass());
+#endif
+
+ // FIXME: Conditionally run LoadCombine here, after it's ported
+ // (in case we still have this pass, given its questionable usefulness).
+
+ MainFPM.addPass(InstCombinePass());
+ invokePeepholeEPCallbacks(MainFPM, Level);
+ MainFPM.addPass(JumpThreadingPass(/*InsertFreezeWhenUnfoldingSelect*/ true));
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(MainFPM)));
+
+ // Create a function that performs CFI checks for cross-DSO calls with
+ // targets in the current module.
+ MPM.addPass(CrossDSOCFIPass());
+
+ // Lower type metadata and the type.test intrinsic. This pass supports
+ // clang's control flow integrity mechanisms (-fsanitize=cfi*) and needs
+ // to be run at link time if CFI is enabled. This pass does nothing if
+ // CFI is disabled.
+ MPM.addPass(LowerTypeTestsPass(ExportSummary, nullptr));
+ // Run a second time to clean up any type tests left behind by WPD for use
+ // in ICP (which is performed earlier than this in the regular LTO pipeline).
+ MPM.addPass(LowerTypeTestsPass(nullptr, nullptr, true));
+
+ // Enable splitting late in the FullLTO post-link pipeline. This is done in
+ // the same stage in the old pass manager (\ref addLateLTOOptimizationPasses).
+ if (EnableHotColdSplit)
+ MPM.addPass(HotColdSplittingPass());
+
+ // Add late LTO optimization passes.
+ // Delete basic blocks, which optimization passes may have killed.
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ SimplifyCFGPass(SimplifyCFGOptions().hoistCommonInsts(true))));
+
+ // Drop bodies of available eternally objects to improve GlobalDCE.
+ MPM.addPass(EliminateAvailableExternallyPass());
+
+ // Now that we have optimized the program, discard unreachable functions.
+ MPM.addPass(GlobalDCEPass());
+
+ if (PTO.MergeFunctions)
+ MPM.addPass(MergeFunctionsPass());
+
+ // Emit annotation remarks.
+ addAnnotationRemarksPass(MPM);
+
+ return MPM;
+}
+
+ModulePassManager PassBuilder::buildO0DefaultPipeline(OptimizationLevel Level,
+ bool LTOPreLink) {
+ assert(Level == OptimizationLevel::O0 &&
+ "buildO0DefaultPipeline should only be used with O0");
+
+ ModulePassManager MPM(DebugLogging);
+
+ // Add UniqueInternalLinkageNames Pass which renames internal linkage
+ // symbols with unique names.
+ if (PTO.UniqueLinkageNames)
+ MPM.addPass(UniqueInternalLinkageNamesPass());
+
+ if (PGOOpt && (PGOOpt->Action == PGOOptions::IRInstr ||
+ PGOOpt->Action == PGOOptions::IRUse))
+ addPGOInstrPassesForO0(
+ MPM,
+ /* RunProfileGen */ (PGOOpt->Action == PGOOptions::IRInstr),
+ /* IsCS */ false, PGOOpt->ProfileFile, PGOOpt->ProfileRemappingFile);
+
+ for (auto &C : PipelineStartEPCallbacks)
+ C(MPM, Level);
+ for (auto &C : PipelineEarlySimplificationEPCallbacks)
+ C(MPM, Level);
+
+ // Build a minimal pipeline based on the semantics required by LLVM,
+ // which is just that always inlining occurs. Further, disable generating
+ // lifetime intrinsics to avoid enabling further optimizations during
+ // code generation.
+ // However, we need to insert lifetime intrinsics to avoid invalid access
+ // caused by multithreaded coroutines.
+ MPM.addPass(AlwaysInlinerPass(
+ /*InsertLifetimeIntrinsics=*/PTO.Coroutines));
+
+ if (PTO.MergeFunctions)
+ MPM.addPass(MergeFunctionsPass());
+
+ if (EnableMatrix)
+ MPM.addPass(
+ createModuleToFunctionPassAdaptor(LowerMatrixIntrinsicsPass(true)));
+
+ if (!CGSCCOptimizerLateEPCallbacks.empty()) {
+ CGSCCPassManager CGPM(DebugLogging);
+ for (auto &C : CGSCCOptimizerLateEPCallbacks)
+ C(CGPM, Level);
+ if (!CGPM.isEmpty())
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
+ }
+ if (!LateLoopOptimizationsEPCallbacks.empty()) {
+ LoopPassManager LPM(DebugLogging);
+ for (auto &C : LateLoopOptimizationsEPCallbacks)
+ C(LPM, Level);
+ if (!LPM.isEmpty()) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ createFunctionToLoopPassAdaptor(std::move(LPM))));
+ }
+ }
+ if (!LoopOptimizerEndEPCallbacks.empty()) {
+ LoopPassManager LPM(DebugLogging);
+ for (auto &C : LoopOptimizerEndEPCallbacks)
+ C(LPM, Level);
+ if (!LPM.isEmpty()) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(
+ createFunctionToLoopPassAdaptor(std::move(LPM))));
+ }
+ }
+ if (!ScalarOptimizerLateEPCallbacks.empty()) {
+ FunctionPassManager FPM(DebugLogging);
+ for (auto &C : ScalarOptimizerLateEPCallbacks)
+ C(FPM, Level);
+ if (!FPM.isEmpty())
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+ }
+ if (!VectorizerStartEPCallbacks.empty()) {
+ FunctionPassManager FPM(DebugLogging);
+ for (auto &C : VectorizerStartEPCallbacks)
+ C(FPM, Level);
+ if (!FPM.isEmpty())
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+ }
+
+ if (PTO.Coroutines) {
+ MPM.addPass(createModuleToFunctionPassAdaptor(CoroEarlyPass()));
+
+ CGSCCPassManager CGPM(DebugLogging);
+ CGPM.addPass(CoroSplitPass());
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor(CoroElidePass()));
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
+
+ MPM.addPass(createModuleToFunctionPassAdaptor(CoroCleanupPass()));
+ }
+
+ for (auto &C : OptimizerLastEPCallbacks)
+ C(MPM, Level);
+
+ if (LTOPreLink)
+ addRequiredLTOPreLinkPasses(MPM);
+
+ return MPM;
+}
+
+AAManager PassBuilder::buildDefaultAAPipeline() {
+ AAManager AA;
+
+ // The order in which these are registered determines their priority when
+ // being queried.
+
+ // First we register the basic alias analysis that provides the majority of
+ // per-function local AA logic. This is a stateless, on-demand local set of
+ // AA techniques.
+ AA.registerFunctionAnalysis<BasicAA>();
+
+ // Next we query fast, specialized alias analyses that wrap IR-embedded
+ // information about aliasing.
+ AA.registerFunctionAnalysis<ScopedNoAliasAA>();
+ AA.registerFunctionAnalysis<TypeBasedAA>();
+
+ // Add support for querying global aliasing information when available.
+ // Because the `AAManager` is a function analysis and `GlobalsAA` is a module
+ // analysis, all that the `AAManager` can do is query for any *cached*
+ // results from `GlobalsAA` through a readonly proxy.
+ AA.registerModuleAnalysis<GlobalsAA>();
+
+ // Add target-specific alias analyses.
+ if (TM)
+ TM->registerDefaultAliasAnalyses(AA);
+
+ return AA;
+}
+
+static Optional<int> parseRepeatPassName(StringRef Name) {
+ if (!Name.consume_front("repeat<") || !Name.consume_back(">"))
+ return None;
+ int Count;
+ if (Name.getAsInteger(0, Count) || Count <= 0)
+ return None;
+ return Count;
+}
+
+static Optional<int> parseDevirtPassName(StringRef Name) {
+ if (!Name.consume_front("devirt<") || !Name.consume_back(">"))
+ return None;
+ int Count;
+ if (Name.getAsInteger(0, Count) || Count < 0)
+ return None;
+ return Count;
+}
+
+static bool checkParametrizedPassName(StringRef Name, StringRef PassName) {
+ if (!Name.consume_front(PassName))
+ return false;
+ // normal pass name w/o parameters == default parameters
+ if (Name.empty())
+ return true;
+ return Name.startswith("<") && Name.endswith(">");
+}
+
+namespace {
+
+/// This performs customized parsing of pass name with parameters.
+///
+/// We do not need parametrization of passes in textual pipeline very often,
+/// yet on a rare occasion ability to specify parameters right there can be
+/// useful.
+///
+/// \p Name - parameterized specification of a pass from a textual pipeline
+/// is a string in a form of :
+/// PassName '<' parameter-list '>'
+///
+/// Parameter list is being parsed by the parser callable argument, \p Parser,
+/// It takes a string-ref of parameters and returns either StringError or a
+/// parameter list in a form of a custom parameters type, all wrapped into
+/// Expected<> template class.
+///
+template <typename ParametersParseCallableT>
+auto parsePassParameters(ParametersParseCallableT &&Parser, StringRef Name,
+ StringRef PassName) -> decltype(Parser(StringRef{})) {
+ using ParametersT = typename decltype(Parser(StringRef{}))::value_type;
+
+ StringRef Params = Name;
+ if (!Params.consume_front(PassName)) {
+ assert(false &&
+ "unable to strip pass name from parametrized pass specification");
+ }
+ if (Params.empty())
+ return ParametersT{};
+ if (!Params.consume_front("<") || !Params.consume_back(">")) {
+ assert(false && "invalid format for parametrized pass name");
+ }
+
+ Expected<ParametersT> Result = Parser(Params);
+ assert((Result || Result.template errorIsA<StringError>()) &&
+ "Pass parameter parser can only return StringErrors.");
+ return Result;
+}
+
+/// Parser of parameters for LoopUnroll pass.
+Expected<LoopUnrollOptions> parseLoopUnrollOptions(StringRef Params) {
+ LoopUnrollOptions UnrollOpts;
+ while (!Params.empty()) {
+ StringRef ParamName;
+ std::tie(ParamName, Params) = Params.split(';');
+ int OptLevel = StringSwitch<int>(ParamName)
+ .Case("O0", 0)
+ .Case("O1", 1)
+ .Case("O2", 2)
+ .Case("O3", 3)
+ .Default(-1);
+ if (OptLevel >= 0) {
+ UnrollOpts.setOptLevel(OptLevel);
+ continue;
+ }
+ if (ParamName.consume_front("full-unroll-max=")) {
+ int Count;
+ if (ParamName.getAsInteger(0, Count))
+ return make_error<StringError>(
+ formatv("invalid LoopUnrollPass parameter '{0}' ", ParamName).str(),
+ inconvertibleErrorCode());
+ UnrollOpts.setFullUnrollMaxCount(Count);
+ continue;
+ }
+
+ bool Enable = !ParamName.consume_front("no-");
+ if (ParamName == "partial") {
+ UnrollOpts.setPartial(Enable);
+ } else if (ParamName == "peeling") {
+ UnrollOpts.setPeeling(Enable);
+ } else if (ParamName == "profile-peeling") {
+ UnrollOpts.setProfileBasedPeeling(Enable);
+ } else if (ParamName == "runtime") {
+ UnrollOpts.setRuntime(Enable);
+ } else if (ParamName == "upperbound") {
+ UnrollOpts.setUpperBound(Enable);
+ } else {
+ return make_error<StringError>(
+ formatv("invalid LoopUnrollPass parameter '{0}' ", ParamName).str(),
+ inconvertibleErrorCode());
+ }
+ }
+ return UnrollOpts;
+}
+
+Expected<MemorySanitizerOptions> parseMSanPassOptions(StringRef Params) {
+ MemorySanitizerOptions Result;
+ while (!Params.empty()) {
+ StringRef ParamName;
+ std::tie(ParamName, Params) = Params.split(';');
+
+ if (ParamName == "recover") {
+ Result.Recover = true;
+ } else if (ParamName == "kernel") {
+ Result.Kernel = true;
+ } else if (ParamName.consume_front("track-origins=")) {
+ if (ParamName.getAsInteger(0, Result.TrackOrigins))
+ return make_error<StringError>(
+ formatv("invalid argument to MemorySanitizer pass track-origins "
+ "parameter: '{0}' ",
+ ParamName)
+ .str(),
+ inconvertibleErrorCode());
+ } else {
+ return make_error<StringError>(
+ formatv("invalid MemorySanitizer pass parameter '{0}' ", ParamName)
+ .str(),
+ inconvertibleErrorCode());
+ }
+ }
+ return Result;
+}
+
+/// Parser of parameters for SimplifyCFG pass.
+Expected<SimplifyCFGOptions> parseSimplifyCFGOptions(StringRef Params) {
+ SimplifyCFGOptions Result;
+ while (!Params.empty()) {
+ StringRef ParamName;
+ std::tie(ParamName, Params) = Params.split(';');
+
+ bool Enable = !ParamName.consume_front("no-");
+ if (ParamName == "forward-switch-cond") {
+ Result.forwardSwitchCondToPhi(Enable);
+ } else if (ParamName == "switch-to-lookup") {
+ Result.convertSwitchToLookupTable(Enable);
+ } else if (ParamName == "keep-loops") {
+ Result.needCanonicalLoops(Enable);
+ } else if (ParamName == "hoist-common-insts") {
+ Result.hoistCommonInsts(Enable);
+ } else if (ParamName == "sink-common-insts") {
+ Result.sinkCommonInsts(Enable);
+ } else if (Enable && ParamName.consume_front("bonus-inst-threshold=")) {
+ APInt BonusInstThreshold;
+ if (ParamName.getAsInteger(0, BonusInstThreshold))
+ return make_error<StringError>(
+ formatv("invalid argument to SimplifyCFG pass bonus-threshold "
+ "parameter: '{0}' ",
+ ParamName).str(),
+ inconvertibleErrorCode());
+ Result.bonusInstThreshold(BonusInstThreshold.getSExtValue());
+ } else {
+ return make_error<StringError>(
+ formatv("invalid SimplifyCFG pass parameter '{0}' ", ParamName).str(),
+ inconvertibleErrorCode());
+ }
+ }
+ return Result;
+}
+
+/// Parser of parameters for LoopVectorize pass.
+Expected<LoopVectorizeOptions> parseLoopVectorizeOptions(StringRef Params) {
+ LoopVectorizeOptions Opts;
+ while (!Params.empty()) {
+ StringRef ParamName;
+ std::tie(ParamName, Params) = Params.split(';');
+
+ bool Enable = !ParamName.consume_front("no-");
+ if (ParamName == "interleave-forced-only") {
+ Opts.setInterleaveOnlyWhenForced(Enable);
+ } else if (ParamName == "vectorize-forced-only") {
+ Opts.setVectorizeOnlyWhenForced(Enable);
+ } else {
+ return make_error<StringError>(
+ formatv("invalid LoopVectorize parameter '{0}' ", ParamName).str(),
+ inconvertibleErrorCode());
+ }
+ }
+ return Opts;
+}
+
+Expected<bool> parseLoopUnswitchOptions(StringRef Params) {
+ bool Result = false;
+ while (!Params.empty()) {
+ StringRef ParamName;
+ std::tie(ParamName, Params) = Params.split(';');
+
+ bool Enable = !ParamName.consume_front("no-");
+ if (ParamName == "nontrivial") {
+ Result = Enable;
+ } else {
+ return make_error<StringError>(
+ formatv("invalid LoopUnswitch pass parameter '{0}' ", ParamName)
+ .str(),
+ inconvertibleErrorCode());
+ }
+ }
+ return Result;
+}
+
+Expected<bool> parseMergedLoadStoreMotionOptions(StringRef Params) {
+ bool Result = false;
+ while (!Params.empty()) {
+ StringRef ParamName;
+ std::tie(ParamName, Params) = Params.split(';');
+
+ bool Enable = !ParamName.consume_front("no-");
+ if (ParamName == "split-footer-bb") {
+ Result = Enable;
+ } else {
+ return make_error<StringError>(
+ formatv("invalid MergedLoadStoreMotion pass parameter '{0}' ",
+ ParamName)
+ .str(),
+ inconvertibleErrorCode());
+ }
+ }
+ return Result;
+}
+
+Expected<GVNOptions> parseGVNOptions(StringRef Params) {
+ GVNOptions Result;
+ while (!Params.empty()) {
+ StringRef ParamName;
+ std::tie(ParamName, Params) = Params.split(';');
+
+ bool Enable = !ParamName.consume_front("no-");
+ if (ParamName == "pre") {
+ Result.setPRE(Enable);
+ } else if (ParamName == "load-pre") {
+ Result.setLoadPRE(Enable);
+ } else if (ParamName == "split-backedge-load-pre") {
+ Result.setLoadPRESplitBackedge(Enable);
+ } else if (ParamName == "memdep") {
+ Result.setMemDep(Enable);
+ } else {
+ return make_error<StringError>(
+ formatv("invalid GVN pass parameter '{0}' ", ParamName).str(),
+ inconvertibleErrorCode());
+ }
+ }
+ return Result;
+}
+
+Expected<StackLifetime::LivenessType>
+parseStackLifetimeOptions(StringRef Params) {
+ StackLifetime::LivenessType Result = StackLifetime::LivenessType::May;
+ while (!Params.empty()) {
+ StringRef ParamName;
+ std::tie(ParamName, Params) = Params.split(';');
+
+ if (ParamName == "may") {
+ Result = StackLifetime::LivenessType::May;
+ } else if (ParamName == "must") {
+ Result = StackLifetime::LivenessType::Must;
+ } else {
+ return make_error<StringError>(
+ formatv("invalid StackLifetime parameter '{0}' ", ParamName).str(),
+ inconvertibleErrorCode());
+ }
+ }
+ return Result;
+}
+
+} // namespace
+
+/// Tests whether a pass name starts with a valid prefix for a default pipeline
+/// alias.
+static bool startsWithDefaultPipelineAliasPrefix(StringRef Name) {
+ return Name.startswith("default") || Name.startswith("thinlto") ||
+ Name.startswith("lto");
+}
+
+/// Tests whether registered callbacks will accept a given pass name.
+///
+/// When parsing a pipeline text, the type of the outermost pipeline may be
+/// omitted, in which case the type is automatically determined from the first
+/// pass name in the text. This may be a name that is handled through one of the
+/// callbacks. We check this through the oridinary parsing callbacks by setting
+/// up a dummy PassManager in order to not force the client to also handle this
+/// type of query.
+template <typename PassManagerT, typename CallbacksT>
+static bool callbacksAcceptPassName(StringRef Name, CallbacksT &Callbacks) {
+ if (!Callbacks.empty()) {
+ PassManagerT DummyPM;
+ for (auto &CB : Callbacks)
+ if (CB(Name, DummyPM, {}))
+ return true;
+ }
+ return false;
+}
+
+template <typename CallbacksT>
+static bool isModulePassName(StringRef Name, CallbacksT &Callbacks) {
+ // Manually handle aliases for pre-configured pipeline fragments.
+ if (startsWithDefaultPipelineAliasPrefix(Name))
+ return DefaultAliasRegex.match(Name);
+
+ // Explicitly handle pass manager names.
+ if (Name == "module")
+ return true;
+ if (Name == "cgscc")
+ return true;
+ if (Name == "function")
+ return true;
+
+ // Explicitly handle custom-parsed pass names.
+ if (parseRepeatPassName(Name))
+ return true;
+
+#define MODULE_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) \
+ return true;
+#define MODULE_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == "require<" NAME ">" || Name == "invalidate<" NAME ">") \
+ return true;
+#include "PassRegistry.def"
+
+ return callbacksAcceptPassName<ModulePassManager>(Name, Callbacks);
+}
+
+template <typename CallbacksT>
+static bool isCGSCCPassName(StringRef Name, CallbacksT &Callbacks) {
+ // Explicitly handle pass manager names.
+ if (Name == "cgscc")
+ return true;
+ if (Name == "function")
+ return true;
+
+ // Explicitly handle custom-parsed pass names.
+ if (parseRepeatPassName(Name))
+ return true;
+ if (parseDevirtPassName(Name))
+ return true;
+
+#define CGSCC_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) \
+ return true;
+#define CGSCC_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == "require<" NAME ">" || Name == "invalidate<" NAME ">") \
+ return true;
+#include "PassRegistry.def"
+
+ return callbacksAcceptPassName<CGSCCPassManager>(Name, Callbacks);
+}
+
+template <typename CallbacksT>
+static bool isFunctionPassName(StringRef Name, CallbacksT &Callbacks) {
+ // Explicitly handle pass manager names.
+ if (Name == "function")
+ return true;
+ if (Name == "loop" || Name == "loop-mssa")
+ return true;
+
+ // Explicitly handle custom-parsed pass names.
+ if (parseRepeatPassName(Name))
+ return true;
+
+#define FUNCTION_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) \
+ return true;
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER) \
+ if (checkParametrizedPassName(Name, NAME)) \
+ return true;
+#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == "require<" NAME ">" || Name == "invalidate<" NAME ">") \
+ return true;
+#include "PassRegistry.def"
+
+ return callbacksAcceptPassName<FunctionPassManager>(Name, Callbacks);
+}
+
+template <typename CallbacksT>
+static bool isLoopPassName(StringRef Name, CallbacksT &Callbacks) {
+ // Explicitly handle pass manager names.
+ if (Name == "loop" || Name == "loop-mssa")
+ return true;
+
+ // Explicitly handle custom-parsed pass names.
+ if (parseRepeatPassName(Name))
+ return true;
+
+#define LOOP_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) \
+ return true;
+#define LOOP_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER) \
+ if (checkParametrizedPassName(Name, NAME)) \
+ return true;
+#define LOOP_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == "require<" NAME ">" || Name == "invalidate<" NAME ">") \
+ return true;
+#include "PassRegistry.def"
+
+ return callbacksAcceptPassName<LoopPassManager>(Name, Callbacks);
+}
+
+Optional<std::vector<PassBuilder::PipelineElement>>
+PassBuilder::parsePipelineText(StringRef Text) {
+ std::vector<PipelineElement> ResultPipeline;
+
+ SmallVector<std::vector<PipelineElement> *, 4> PipelineStack = {
+ &ResultPipeline};
+ for (;;) {
+ std::vector<PipelineElement> &Pipeline = *PipelineStack.back();
+ size_t Pos = Text.find_first_of(",()");
+ Pipeline.push_back({Text.substr(0, Pos), {}});
+
+ // If we have a single terminating name, we're done.
+ if (Pos == Text.npos)
+ break;
+
+ char Sep = Text[Pos];
+ Text = Text.substr(Pos + 1);
+ if (Sep == ',')
+ // Just a name ending in a comma, continue.
+ continue;
+
+ if (Sep == '(') {
+ // Push the inner pipeline onto the stack to continue processing.
+ PipelineStack.push_back(&Pipeline.back().InnerPipeline);
+ continue;
+ }
+
+ assert(Sep == ')' && "Bogus separator!");
+ // When handling the close parenthesis, we greedily consume them to avoid
+ // empty strings in the pipeline.
+ do {
+ // If we try to pop the outer pipeline we have unbalanced parentheses.
+ if (PipelineStack.size() == 1)
+ return None;
+
+ PipelineStack.pop_back();
+ } while (Text.consume_front(")"));
+
+ // Check if we've finished parsing.
+ if (Text.empty())
+ break;
+
+ // Otherwise, the end of an inner pipeline always has to be followed by
+ // a comma, and then we can continue.
+ if (!Text.consume_front(","))
+ return None;
+ }
+
+ if (PipelineStack.size() > 1)
+ // Unbalanced paretheses.
+ return None;
+
+ assert(PipelineStack.back() == &ResultPipeline &&
+ "Wrong pipeline at the bottom of the stack!");
+ return {std::move(ResultPipeline)};
+}
+
+Error PassBuilder::parseModulePass(ModulePassManager &MPM,
+ const PipelineElement &E) {
+ auto &Name = E.Name;
+ auto &InnerPipeline = E.InnerPipeline;
+
+ // First handle complex passes like the pass managers which carry pipelines.
+ if (!InnerPipeline.empty()) {
+ if (Name == "module") {
+ ModulePassManager NestedMPM(DebugLogging);
+ if (auto Err = parseModulePassPipeline(NestedMPM, InnerPipeline))
+ return Err;
+ MPM.addPass(std::move(NestedMPM));
+ return Error::success();
+ }
+ if (Name == "cgscc") {
+ CGSCCPassManager CGPM(DebugLogging);
+ if (auto Err = parseCGSCCPassPipeline(CGPM, InnerPipeline))
+ return Err;
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(std::move(CGPM)));
+ return Error::success();
+ }
+ if (Name == "function") {
+ FunctionPassManager FPM(DebugLogging);
+ if (auto Err = parseFunctionPassPipeline(FPM, InnerPipeline))
+ return Err;
+ MPM.addPass(createModuleToFunctionPassAdaptor(std::move(FPM)));
+ return Error::success();
+ }
+ if (auto Count = parseRepeatPassName(Name)) {
+ ModulePassManager NestedMPM(DebugLogging);
+ if (auto Err = parseModulePassPipeline(NestedMPM, InnerPipeline))
+ return Err;
+ MPM.addPass(createRepeatedPass(*Count, std::move(NestedMPM)));
+ return Error::success();
+ }
+
+ for (auto &C : ModulePipelineParsingCallbacks)
+ if (C(Name, MPM, InnerPipeline))
+ return Error::success();
+
+ // Normal passes can't have pipelines.
+ return make_error<StringError>(
+ formatv("invalid use of '{0}' pass as module pipeline", Name).str(),
+ inconvertibleErrorCode());
+ ;
+ }
+
+ // Manually handle aliases for pre-configured pipeline fragments.
+ if (startsWithDefaultPipelineAliasPrefix(Name)) {
+ SmallVector<StringRef, 3> Matches;
+ if (!DefaultAliasRegex.match(Name, &Matches))
+ return make_error<StringError>(
+ formatv("unknown default pipeline alias '{0}'", Name).str(),
+ inconvertibleErrorCode());
+
+ assert(Matches.size() == 3 && "Must capture two matched strings!");
+
+ OptimizationLevel L = StringSwitch<OptimizationLevel>(Matches[2])
+ .Case("O0", OptimizationLevel::O0)
+ .Case("O1", OptimizationLevel::O1)
+ .Case("O2", OptimizationLevel::O2)
+ .Case("O3", OptimizationLevel::O3)
+ .Case("Os", OptimizationLevel::Os)
+ .Case("Oz", OptimizationLevel::Oz);
+ if (L == OptimizationLevel::O0 && Matches[1] != "thinlto" &&
+ Matches[1] != "lto") {
+ MPM.addPass(buildO0DefaultPipeline(L, Matches[1] == "thinlto-pre-link" ||
+ Matches[1] == "lto-pre-link"));
+ return Error::success();
+ }
+
+ // This is consistent with old pass manager invoked via opt, but
+ // inconsistent with clang. Clang doesn't enable loop vectorization
+ // but does enable slp vectorization at Oz.
+ PTO.LoopVectorization =
+ L.getSpeedupLevel() > 1 && L != OptimizationLevel::Oz;
+ PTO.SLPVectorization =
+ L.getSpeedupLevel() > 1 && L != OptimizationLevel::Oz;
+
+ if (Matches[1] == "default") {
+ MPM.addPass(buildPerModuleDefaultPipeline(L));
+ } else if (Matches[1] == "thinlto-pre-link") {
+ MPM.addPass(buildThinLTOPreLinkDefaultPipeline(L));
+ } else if (Matches[1] == "thinlto") {
+ MPM.addPass(buildThinLTODefaultPipeline(L, nullptr));
+ } else if (Matches[1] == "lto-pre-link") {
+ MPM.addPass(buildLTOPreLinkDefaultPipeline(L));
+ } else {
+ assert(Matches[1] == "lto" && "Not one of the matched options!");
+ MPM.addPass(buildLTODefaultPipeline(L, nullptr));
+ }
+ return Error::success();
+ }
+
+ // Finally expand the basic registered passes from the .inc file.
+#define MODULE_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ MPM.addPass(CREATE_PASS); \
+ return Error::success(); \
+ }
+#define MODULE_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == "require<" NAME ">") { \
+ MPM.addPass( \
+ RequireAnalysisPass< \
+ std::remove_reference<decltype(CREATE_PASS)>::type, Module>()); \
+ return Error::success(); \
+ } \
+ if (Name == "invalidate<" NAME ">") { \
+ MPM.addPass(InvalidateAnalysisPass< \
+ std::remove_reference<decltype(CREATE_PASS)>::type>()); \
+ return Error::success(); \
+ }
+#define CGSCC_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ MPM.addPass(createModuleToPostOrderCGSCCPassAdaptor(CREATE_PASS)); \
+ return Error::success(); \
+ }
+#define FUNCTION_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ MPM.addPass(createModuleToFunctionPassAdaptor(CREATE_PASS)); \
+ return Error::success(); \
+ }
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER) \
+ if (checkParametrizedPassName(Name, NAME)) { \
+ auto Params = parsePassParameters(PARSER, Name, NAME); \
+ if (!Params) \
+ return Params.takeError(); \
+ MPM.addPass(createModuleToFunctionPassAdaptor(CREATE_PASS(Params.get()))); \
+ return Error::success(); \
+ }
+#define LOOP_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ MPM.addPass( \
+ createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( \
+ CREATE_PASS, false, false, DebugLogging))); \
+ return Error::success(); \
+ }
+#define LOOP_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER) \
+ if (checkParametrizedPassName(Name, NAME)) { \
+ auto Params = parsePassParameters(PARSER, Name, NAME); \
+ if (!Params) \
+ return Params.takeError(); \
+ MPM.addPass( \
+ createModuleToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( \
+ CREATE_PASS(Params.get()), false, false, DebugLogging))); \
+ return Error::success(); \
+ }
+#include "PassRegistry.def"
+
+ for (auto &C : ModulePipelineParsingCallbacks)
+ if (C(Name, MPM, InnerPipeline))
+ return Error::success();
+ return make_error<StringError>(
+ formatv("unknown module pass '{0}'", Name).str(),
+ inconvertibleErrorCode());
+}
+
+Error PassBuilder::parseCGSCCPass(CGSCCPassManager &CGPM,
+ const PipelineElement &E) {
+ auto &Name = E.Name;
+ auto &InnerPipeline = E.InnerPipeline;
+
+ // First handle complex passes like the pass managers which carry pipelines.
+ if (!InnerPipeline.empty()) {
+ if (Name == "cgscc") {
+ CGSCCPassManager NestedCGPM(DebugLogging);
+ if (auto Err = parseCGSCCPassPipeline(NestedCGPM, InnerPipeline))
+ return Err;
+ // Add the nested pass manager with the appropriate adaptor.
+ CGPM.addPass(std::move(NestedCGPM));
+ return Error::success();
+ }
+ if (Name == "function") {
+ FunctionPassManager FPM(DebugLogging);
+ if (auto Err = parseFunctionPassPipeline(FPM, InnerPipeline))
+ return Err;
+ // Add the nested pass manager with the appropriate adaptor.
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor(std::move(FPM)));
+ return Error::success();
+ }
+ if (auto Count = parseRepeatPassName(Name)) {
+ CGSCCPassManager NestedCGPM(DebugLogging);
+ if (auto Err = parseCGSCCPassPipeline(NestedCGPM, InnerPipeline))
+ return Err;
+ CGPM.addPass(createRepeatedPass(*Count, std::move(NestedCGPM)));
+ return Error::success();
+ }
+ if (auto MaxRepetitions = parseDevirtPassName(Name)) {
+ CGSCCPassManager NestedCGPM(DebugLogging);
+ if (auto Err = parseCGSCCPassPipeline(NestedCGPM, InnerPipeline))
+ return Err;
+ CGPM.addPass(
+ createDevirtSCCRepeatedPass(std::move(NestedCGPM), *MaxRepetitions));
+ return Error::success();
+ }
+
+ for (auto &C : CGSCCPipelineParsingCallbacks)
+ if (C(Name, CGPM, InnerPipeline))
+ return Error::success();
+
+ // Normal passes can't have pipelines.
+ return make_error<StringError>(
+ formatv("invalid use of '{0}' pass as cgscc pipeline", Name).str(),
+ inconvertibleErrorCode());
+ }
+
+// Now expand the basic registered passes from the .inc file.
+#define CGSCC_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ CGPM.addPass(CREATE_PASS); \
+ return Error::success(); \
+ }
+#define CGSCC_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == "require<" NAME ">") { \
+ CGPM.addPass(RequireAnalysisPass< \
+ std::remove_reference<decltype(CREATE_PASS)>::type, \
+ LazyCallGraph::SCC, CGSCCAnalysisManager, LazyCallGraph &, \
+ CGSCCUpdateResult &>()); \
+ return Error::success(); \
+ } \
+ if (Name == "invalidate<" NAME ">") { \
+ CGPM.addPass(InvalidateAnalysisPass< \
+ std::remove_reference<decltype(CREATE_PASS)>::type>()); \
+ return Error::success(); \
+ }
+#define FUNCTION_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor(CREATE_PASS)); \
+ return Error::success(); \
+ }
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER) \
+ if (checkParametrizedPassName(Name, NAME)) { \
+ auto Params = parsePassParameters(PARSER, Name, NAME); \
+ if (!Params) \
+ return Params.takeError(); \
+ CGPM.addPass(createCGSCCToFunctionPassAdaptor(CREATE_PASS(Params.get()))); \
+ return Error::success(); \
+ }
+#define LOOP_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ CGPM.addPass( \
+ createCGSCCToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( \
+ CREATE_PASS, false, false, DebugLogging))); \
+ return Error::success(); \
+ }
+#define LOOP_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER) \
+ if (checkParametrizedPassName(Name, NAME)) { \
+ auto Params = parsePassParameters(PARSER, Name, NAME); \
+ if (!Params) \
+ return Params.takeError(); \
+ CGPM.addPass( \
+ createCGSCCToFunctionPassAdaptor(createFunctionToLoopPassAdaptor( \
+ CREATE_PASS(Params.get()), false, false, DebugLogging))); \
+ return Error::success(); \
+ }
+#include "PassRegistry.def"
+
+ for (auto &C : CGSCCPipelineParsingCallbacks)
+ if (C(Name, CGPM, InnerPipeline))
+ return Error::success();
+ return make_error<StringError>(
+ formatv("unknown cgscc pass '{0}'", Name).str(),
+ inconvertibleErrorCode());
+}
+
+Error PassBuilder::parseFunctionPass(FunctionPassManager &FPM,
+ const PipelineElement &E) {
+ auto &Name = E.Name;
+ auto &InnerPipeline = E.InnerPipeline;
+
+ // First handle complex passes like the pass managers which carry pipelines.
+ if (!InnerPipeline.empty()) {
+ if (Name == "function") {
+ FunctionPassManager NestedFPM(DebugLogging);
+ if (auto Err = parseFunctionPassPipeline(NestedFPM, InnerPipeline))
+ return Err;
+ // Add the nested pass manager with the appropriate adaptor.
+ FPM.addPass(std::move(NestedFPM));
+ return Error::success();
+ }
+ if (Name == "loop" || Name == "loop-mssa") {
+ LoopPassManager LPM(DebugLogging);
+ if (auto Err = parseLoopPassPipeline(LPM, InnerPipeline))
+ return Err;
+ // Add the nested pass manager with the appropriate adaptor.
+ bool UseMemorySSA = (Name == "loop-mssa");
+ bool UseBFI = llvm::any_of(
+ InnerPipeline, [](auto Pipeline) { return Pipeline.Name == "licm"; });
+ FPM.addPass(createFunctionToLoopPassAdaptor(std::move(LPM), UseMemorySSA,
+ UseBFI, DebugLogging));
+ return Error::success();
+ }
+ if (auto Count = parseRepeatPassName(Name)) {
+ FunctionPassManager NestedFPM(DebugLogging);
+ if (auto Err = parseFunctionPassPipeline(NestedFPM, InnerPipeline))
+ return Err;
+ FPM.addPass(createRepeatedPass(*Count, std::move(NestedFPM)));
+ return Error::success();
+ }
+
+ for (auto &C : FunctionPipelineParsingCallbacks)
+ if (C(Name, FPM, InnerPipeline))
+ return Error::success();
+
+ // Normal passes can't have pipelines.
+ return make_error<StringError>(
+ formatv("invalid use of '{0}' pass as function pipeline", Name).str(),
+ inconvertibleErrorCode());
+ }
+
+// Now expand the basic registered passes from the .inc file.
+#define FUNCTION_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ FPM.addPass(CREATE_PASS); \
+ return Error::success(); \
+ }
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER) \
+ if (checkParametrizedPassName(Name, NAME)) { \
+ auto Params = parsePassParameters(PARSER, Name, NAME); \
+ if (!Params) \
+ return Params.takeError(); \
+ FPM.addPass(CREATE_PASS(Params.get())); \
+ return Error::success(); \
+ }
+#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == "require<" NAME ">") { \
+ FPM.addPass( \
+ RequireAnalysisPass< \
+ std::remove_reference<decltype(CREATE_PASS)>::type, Function>()); \
+ return Error::success(); \
+ } \
+ if (Name == "invalidate<" NAME ">") { \
+ FPM.addPass(InvalidateAnalysisPass< \
+ std::remove_reference<decltype(CREATE_PASS)>::type>()); \
+ return Error::success(); \
+ }
+// FIXME: UseMemorySSA is set to false. Maybe we could do things like:
+// bool UseMemorySSA = !("canon-freeze" || "loop-predication" ||
+// "guard-widening");
+// The risk is that it may become obsolete if we're not careful.
+#define LOOP_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS, false, false, \
+ DebugLogging)); \
+ return Error::success(); \
+ }
+#define LOOP_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER) \
+ if (checkParametrizedPassName(Name, NAME)) { \
+ auto Params = parsePassParameters(PARSER, Name, NAME); \
+ if (!Params) \
+ return Params.takeError(); \
+ FPM.addPass(createFunctionToLoopPassAdaptor(CREATE_PASS(Params.get()), \
+ false, false, DebugLogging)); \
+ return Error::success(); \
+ }
+#include "PassRegistry.def"
+
+ for (auto &C : FunctionPipelineParsingCallbacks)
+ if (C(Name, FPM, InnerPipeline))
+ return Error::success();
+ return make_error<StringError>(
+ formatv("unknown function pass '{0}'", Name).str(),
+ inconvertibleErrorCode());
+}
+
+Error PassBuilder::parseLoopPass(LoopPassManager &LPM,
+ const PipelineElement &E) {
+ StringRef Name = E.Name;
+ auto &InnerPipeline = E.InnerPipeline;
+
+ // First handle complex passes like the pass managers which carry pipelines.
+ if (!InnerPipeline.empty()) {
+ if (Name == "loop") {
+ LoopPassManager NestedLPM(DebugLogging);
+ if (auto Err = parseLoopPassPipeline(NestedLPM, InnerPipeline))
+ return Err;
+ // Add the nested pass manager with the appropriate adaptor.
+ LPM.addPass(std::move(NestedLPM));
+ return Error::success();
+ }
+ if (auto Count = parseRepeatPassName(Name)) {
+ LoopPassManager NestedLPM(DebugLogging);
+ if (auto Err = parseLoopPassPipeline(NestedLPM, InnerPipeline))
+ return Err;
+ LPM.addPass(createRepeatedPass(*Count, std::move(NestedLPM)));
+ return Error::success();
+ }
+
+ for (auto &C : LoopPipelineParsingCallbacks)
+ if (C(Name, LPM, InnerPipeline))
+ return Error::success();
+
+ // Normal passes can't have pipelines.
+ return make_error<StringError>(
+ formatv("invalid use of '{0}' pass as loop pipeline", Name).str(),
+ inconvertibleErrorCode());
+ }
+
+// Now expand the basic registered passes from the .inc file.
+#define LOOP_PASS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ LPM.addPass(CREATE_PASS); \
+ return Error::success(); \
+ }
+#define LOOP_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER) \
+ if (checkParametrizedPassName(Name, NAME)) { \
+ auto Params = parsePassParameters(PARSER, Name, NAME); \
+ if (!Params) \
+ return Params.takeError(); \
+ LPM.addPass(CREATE_PASS(Params.get())); \
+ return Error::success(); \
+ }
+#define LOOP_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == "require<" NAME ">") { \
+ LPM.addPass(RequireAnalysisPass< \
+ std::remove_reference<decltype(CREATE_PASS)>::type, Loop, \
+ LoopAnalysisManager, LoopStandardAnalysisResults &, \
+ LPMUpdater &>()); \
+ return Error::success(); \
+ } \
+ if (Name == "invalidate<" NAME ">") { \
+ LPM.addPass(InvalidateAnalysisPass< \
+ std::remove_reference<decltype(CREATE_PASS)>::type>()); \
+ return Error::success(); \
+ }
+#include "PassRegistry.def"
+
+ for (auto &C : LoopPipelineParsingCallbacks)
+ if (C(Name, LPM, InnerPipeline))
+ return Error::success();
+ return make_error<StringError>(formatv("unknown loop pass '{0}'", Name).str(),
+ inconvertibleErrorCode());
+}
+
+bool PassBuilder::parseAAPassName(AAManager &AA, StringRef Name) {
+#define MODULE_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ AA.registerModuleAnalysis< \
+ std::remove_reference<decltype(CREATE_PASS)>::type>(); \
+ return true; \
+ }
+#define FUNCTION_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ if (Name == NAME) { \
+ AA.registerFunctionAnalysis< \
+ std::remove_reference<decltype(CREATE_PASS)>::type>(); \
+ return true; \
+ }
+#include "PassRegistry.def"
+
+ for (auto &C : AAParsingCallbacks)
+ if (C(Name, AA))
+ return true;
+ return false;
+}
+
+Error PassBuilder::parseLoopPassPipeline(LoopPassManager &LPM,
+ ArrayRef<PipelineElement> Pipeline) {
+ for (const auto &Element : Pipeline) {
+ if (auto Err = parseLoopPass(LPM, Element))
+ return Err;
+ }
+ return Error::success();
+}
+
+Error PassBuilder::parseFunctionPassPipeline(
+ FunctionPassManager &FPM, ArrayRef<PipelineElement> Pipeline) {
+ for (const auto &Element : Pipeline) {
+ if (auto Err = parseFunctionPass(FPM, Element))
+ return Err;
+ }
+ return Error::success();
+}
+
+Error PassBuilder::parseCGSCCPassPipeline(CGSCCPassManager &CGPM,
+ ArrayRef<PipelineElement> Pipeline) {
+ for (const auto &Element : Pipeline) {
+ if (auto Err = parseCGSCCPass(CGPM, Element))
+ return Err;
+ }
+ return Error::success();
+}
+
+void PassBuilder::crossRegisterProxies(LoopAnalysisManager &LAM,
+ FunctionAnalysisManager &FAM,
+ CGSCCAnalysisManager &CGAM,
+ ModuleAnalysisManager &MAM) {
+ MAM.registerPass([&] { return FunctionAnalysisManagerModuleProxy(FAM); });
+ MAM.registerPass([&] { return CGSCCAnalysisManagerModuleProxy(CGAM); });
+ CGAM.registerPass([&] { return ModuleAnalysisManagerCGSCCProxy(MAM); });
+ FAM.registerPass([&] { return CGSCCAnalysisManagerFunctionProxy(CGAM); });
+ FAM.registerPass([&] { return ModuleAnalysisManagerFunctionProxy(MAM); });
+ FAM.registerPass([&] { return LoopAnalysisManagerFunctionProxy(LAM); });
+ LAM.registerPass([&] { return FunctionAnalysisManagerLoopProxy(FAM); });
+}
+
+Error PassBuilder::parseModulePassPipeline(ModulePassManager &MPM,
+ ArrayRef<PipelineElement> Pipeline) {
+ for (const auto &Element : Pipeline) {
+ if (auto Err = parseModulePass(MPM, Element))
+ return Err;
+ }
+ return Error::success();
+}
+
+// Primary pass pipeline description parsing routine for a \c ModulePassManager
+// FIXME: Should this routine accept a TargetMachine or require the caller to
+// pre-populate the analysis managers with target-specific stuff?
+Error PassBuilder::parsePassPipeline(ModulePassManager &MPM,
+ StringRef PipelineText) {
+ auto Pipeline = parsePipelineText(PipelineText);
+ if (!Pipeline || Pipeline->empty())
+ return make_error<StringError>(
+ formatv("invalid pipeline '{0}'", PipelineText).str(),
+ inconvertibleErrorCode());
+
+ // If the first name isn't at the module layer, wrap the pipeline up
+ // automatically.
+ StringRef FirstName = Pipeline->front().Name;
+
+ if (!isModulePassName(FirstName, ModulePipelineParsingCallbacks)) {
+ if (isCGSCCPassName(FirstName, CGSCCPipelineParsingCallbacks)) {
+ Pipeline = {{"cgscc", std::move(*Pipeline)}};
+ } else if (isFunctionPassName(FirstName,
+ FunctionPipelineParsingCallbacks)) {
+ Pipeline = {{"function", std::move(*Pipeline)}};
+ } else if (isLoopPassName(FirstName, LoopPipelineParsingCallbacks)) {
+ Pipeline = {{"function", {{"loop", std::move(*Pipeline)}}}};
+ } else {
+ for (auto &C : TopLevelPipelineParsingCallbacks)
+ if (C(MPM, *Pipeline, DebugLogging))
+ return Error::success();
+
+ // Unknown pass or pipeline name!
+ auto &InnerPipeline = Pipeline->front().InnerPipeline;
+ return make_error<StringError>(
+ formatv("unknown {0} name '{1}'",
+ (InnerPipeline.empty() ? "pass" : "pipeline"), FirstName)
+ .str(),
+ inconvertibleErrorCode());
+ }
+ }
+
+ if (auto Err = parseModulePassPipeline(MPM, *Pipeline))
+ return Err;
+ return Error::success();
+}
+
+// Primary pass pipeline description parsing routine for a \c CGSCCPassManager
+Error PassBuilder::parsePassPipeline(CGSCCPassManager &CGPM,
+ StringRef PipelineText) {
+ auto Pipeline = parsePipelineText(PipelineText);
+ if (!Pipeline || Pipeline->empty())
+ return make_error<StringError>(
+ formatv("invalid pipeline '{0}'", PipelineText).str(),
+ inconvertibleErrorCode());
+
+ StringRef FirstName = Pipeline->front().Name;
+ if (!isCGSCCPassName(FirstName, CGSCCPipelineParsingCallbacks))
+ return make_error<StringError>(
+ formatv("unknown cgscc pass '{0}' in pipeline '{1}'", FirstName,
+ PipelineText)
+ .str(),
+ inconvertibleErrorCode());
+
+ if (auto Err = parseCGSCCPassPipeline(CGPM, *Pipeline))
+ return Err;
+ return Error::success();
+}
+
+// Primary pass pipeline description parsing routine for a \c
+// FunctionPassManager
+Error PassBuilder::parsePassPipeline(FunctionPassManager &FPM,
+ StringRef PipelineText) {
+ auto Pipeline = parsePipelineText(PipelineText);
+ if (!Pipeline || Pipeline->empty())
+ return make_error<StringError>(
+ formatv("invalid pipeline '{0}'", PipelineText).str(),
+ inconvertibleErrorCode());
+
+ StringRef FirstName = Pipeline->front().Name;
+ if (!isFunctionPassName(FirstName, FunctionPipelineParsingCallbacks))
+ return make_error<StringError>(
+ formatv("unknown function pass '{0}' in pipeline '{1}'", FirstName,
+ PipelineText)
+ .str(),
+ inconvertibleErrorCode());
+
+ if (auto Err = parseFunctionPassPipeline(FPM, *Pipeline))
+ return Err;
+ return Error::success();
+}
+
+// Primary pass pipeline description parsing routine for a \c LoopPassManager
+Error PassBuilder::parsePassPipeline(LoopPassManager &CGPM,
+ StringRef PipelineText) {
+ auto Pipeline = parsePipelineText(PipelineText);
+ if (!Pipeline || Pipeline->empty())
+ return make_error<StringError>(
+ formatv("invalid pipeline '{0}'", PipelineText).str(),
+ inconvertibleErrorCode());
+
+ if (auto Err = parseLoopPassPipeline(CGPM, *Pipeline))
+ return Err;
+
+ return Error::success();
+}
+
+Error PassBuilder::parseAAPipeline(AAManager &AA, StringRef PipelineText) {
+ // If the pipeline just consists of the word 'default' just replace the AA
+ // manager with our default one.
+ if (PipelineText == "default") {
+ AA = buildDefaultAAPipeline();
+ return Error::success();
+ }
+
+ while (!PipelineText.empty()) {
+ StringRef Name;
+ std::tie(Name, PipelineText) = PipelineText.split(',');
+ if (!parseAAPassName(AA, Name))
+ return make_error<StringError>(
+ formatv("unknown alias analysis name '{0}'", Name).str(),
+ inconvertibleErrorCode());
+ }
+
+ return Error::success();
+}
+
+bool PassBuilder::isAAPassName(StringRef PassName) {
+#define MODULE_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ if (PassName == NAME) \
+ return true;
+#define FUNCTION_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ if (PassName == NAME) \
+ return true;
+#include "PassRegistry.def"
+ return false;
+}
+
+bool PassBuilder::isAnalysisPassName(StringRef PassName) {
+#define MODULE_ANALYSIS(NAME, CREATE_PASS) \
+ if (PassName == NAME) \
+ return true;
+#define FUNCTION_ANALYSIS(NAME, CREATE_PASS) \
+ if (PassName == NAME) \
+ return true;
+#define LOOP_ANALYSIS(NAME, CREATE_PASS) \
+ if (PassName == NAME) \
+ return true;
+#define CGSCC_ANALYSIS(NAME, CREATE_PASS) \
+ if (PassName == NAME) \
+ return true;
+#define MODULE_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ if (PassName == NAME) \
+ return true;
+#define FUNCTION_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ if (PassName == NAME) \
+ return true;
+#include "PassRegistry.def"
+ return false;
+}
+
+void PassBuilder::registerParseTopLevelPipelineCallback(
+ const std::function<bool(ModulePassManager &, ArrayRef<PipelineElement>,
+ bool DebugLogging)> &C) {
+ TopLevelPipelineParsingCallbacks.push_back(C);
+}
diff --git a/contrib/libs/llvm12/lib/Passes/PassPlugin.cpp b/contrib/libs/llvm12/lib/Passes/PassPlugin.cpp
new file mode 100644
index 00000000000..ceefa25a703
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Passes/PassPlugin.cpp
@@ -0,0 +1,51 @@
+//===- lib/Passes/PassPluginLoader.cpp - Load Plugins for New PM Passes ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Passes/PassPlugin.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <cstdint>
+
+using namespace llvm;
+
+Expected<PassPlugin> PassPlugin::Load(const std::string &Filename) {
+ std::string Error;
+ auto Library =
+ sys::DynamicLibrary::getPermanentLibrary(Filename.c_str(), &Error);
+ if (!Library.isValid())
+ return make_error<StringError>(Twine("Could not load library '") +
+ Filename + "': " + Error,
+ inconvertibleErrorCode());
+
+ PassPlugin P{Filename, Library};
+ intptr_t getDetailsFn =
+ (intptr_t)Library.SearchForAddressOfSymbol("llvmGetPassPluginInfo");
+
+ if (!getDetailsFn)
+ // If the symbol isn't found, this is probably a legacy plugin, which is an
+ // error
+ return make_error<StringError>(Twine("Plugin entry point not found in '") +
+ Filename + "'. Is this a legacy plugin?",
+ inconvertibleErrorCode());
+
+ P.Info = reinterpret_cast<decltype(llvmGetPassPluginInfo) *>(getDetailsFn)();
+
+ if (P.Info.APIVersion != LLVM_PLUGIN_API_VERSION)
+ return make_error<StringError>(
+ Twine("Wrong API version on plugin '") + Filename + "'. Got version " +
+ Twine(P.Info.APIVersion) + ", supported version is " +
+ Twine(LLVM_PLUGIN_API_VERSION) + ".",
+ inconvertibleErrorCode());
+
+ if (!P.Info.RegisterPassBuilderCallbacks)
+ return make_error<StringError>(Twine("Empty entry callback in plugin '") +
+ Filename + "'.'",
+ inconvertibleErrorCode());
+
+ return P;
+}
diff --git a/contrib/libs/llvm12/lib/Passes/PassRegistry.def b/contrib/libs/llvm12/lib/Passes/PassRegistry.def
new file mode 100644
index 00000000000..877cb9ed13b
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Passes/PassRegistry.def
@@ -0,0 +1,421 @@
+//===- PassRegistry.def - Registry of passes --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is used as the registry of passes that are part of the core LLVM
+// libraries. This file describes both transformation passes and analyses
+// Analyses are registered while transformation passes have names registered
+// that can be used when providing a textual pass pipeline.
+//
+//===----------------------------------------------------------------------===//
+
+// NOTE: NO INCLUDE GUARD DESIRED!
+
+#ifndef MODULE_ANALYSIS
+#define MODULE_ANALYSIS(NAME, CREATE_PASS)
+#endif
+MODULE_ANALYSIS("callgraph", CallGraphAnalysis())
+MODULE_ANALYSIS("lcg", LazyCallGraphAnalysis())
+MODULE_ANALYSIS("module-summary", ModuleSummaryIndexAnalysis())
+MODULE_ANALYSIS("no-op-module", NoOpModuleAnalysis())
+MODULE_ANALYSIS("profile-summary", ProfileSummaryAnalysis())
+MODULE_ANALYSIS("stack-safety", StackSafetyGlobalAnalysis())
+MODULE_ANALYSIS("verify", VerifierAnalysis())
+MODULE_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis(PIC))
+MODULE_ANALYSIS("asan-globals-md", ASanGlobalsMetadataAnalysis())
+MODULE_ANALYSIS("inline-advisor", InlineAdvisorAnalysis())
+MODULE_ANALYSIS("ir-similarity", IRSimilarityAnalysis())
+
+#ifndef MODULE_ALIAS_ANALYSIS
+#define MODULE_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ MODULE_ANALYSIS(NAME, CREATE_PASS)
+#endif
+MODULE_ALIAS_ANALYSIS("globals-aa", GlobalsAA())
+#undef MODULE_ALIAS_ANALYSIS
+#undef MODULE_ANALYSIS
+
+#ifndef MODULE_PASS
+#define MODULE_PASS(NAME, CREATE_PASS)
+#endif
+MODULE_PASS("always-inline", AlwaysInlinerPass())
+MODULE_PASS("attributor", AttributorPass())
+MODULE_PASS("annotation2metadata", Annotation2MetadataPass())
+MODULE_PASS("called-value-propagation", CalledValuePropagationPass())
+MODULE_PASS("canonicalize-aliases", CanonicalizeAliasesPass())
+MODULE_PASS("cg-profile", CGProfilePass())
+MODULE_PASS("constmerge", ConstantMergePass())
+MODULE_PASS("cross-dso-cfi", CrossDSOCFIPass())
+MODULE_PASS("deadargelim", DeadArgumentEliminationPass())
+MODULE_PASS("elim-avail-extern", EliminateAvailableExternallyPass())
+MODULE_PASS("extract-blocks", BlockExtractorPass())
+MODULE_PASS("forceattrs", ForceFunctionAttrsPass())
+MODULE_PASS("function-import", FunctionImportPass())
+MODULE_PASS("globaldce", GlobalDCEPass())
+MODULE_PASS("globalopt", GlobalOptPass())
+MODULE_PASS("globalsplit", GlobalSplitPass())
+MODULE_PASS("hotcoldsplit", HotColdSplittingPass())
+MODULE_PASS("hwasan", HWAddressSanitizerPass(false, false))
+MODULE_PASS("khwasan", HWAddressSanitizerPass(true, true))
+MODULE_PASS("inferattrs", InferFunctionAttrsPass())
+MODULE_PASS("inliner-wrapper", ModuleInlinerWrapperPass())
+MODULE_PASS("inliner-wrapper-no-mandatory-first", ModuleInlinerWrapperPass(
+ getInlineParams(),
+ DebugLogging,
+ false))
+MODULE_PASS("insert-gcov-profiling", GCOVProfilerPass())
+MODULE_PASS("instrorderfile", InstrOrderFilePass())
+MODULE_PASS("instrprof", InstrProfiling())
+MODULE_PASS("internalize", InternalizePass())
+MODULE_PASS("invalidate<all>", InvalidateAllAnalysesPass())
+MODULE_PASS("ipsccp", IPSCCPPass())
+MODULE_PASS("iroutliner", IROutlinerPass())
+MODULE_PASS("print-ir-similarity", IRSimilarityAnalysisPrinterPass(dbgs()))
+MODULE_PASS("loop-extract", LoopExtractorPass())
+MODULE_PASS("lowertypetests", LowerTypeTestsPass())
+MODULE_PASS("metarenamer", MetaRenamerPass())
+MODULE_PASS("mergefunc", MergeFunctionsPass())
+MODULE_PASS("name-anon-globals", NameAnonGlobalPass())
+MODULE_PASS("no-op-module", NoOpModulePass())
+MODULE_PASS("objc-arc-apelim", ObjCARCAPElimPass())
+MODULE_PASS("partial-inliner", PartialInlinerPass())
+MODULE_PASS("pgo-icall-prom", PGOIndirectCallPromotion())
+MODULE_PASS("pgo-instr-gen", PGOInstrumentationGen())
+MODULE_PASS("pgo-instr-use", PGOInstrumentationUse())
+MODULE_PASS("print-profile-summary", ProfileSummaryPrinterPass(dbgs()))
+MODULE_PASS("print-callgraph", CallGraphPrinterPass(dbgs()))
+MODULE_PASS("print", PrintModulePass(dbgs()))
+MODULE_PASS("print-lcg", LazyCallGraphPrinterPass(dbgs()))
+MODULE_PASS("print-lcg-dot", LazyCallGraphDOTPrinterPass(dbgs()))
+MODULE_PASS("print-must-be-executed-contexts", MustBeExecutedContextPrinterPass(dbgs()))
+MODULE_PASS("print-stack-safety", StackSafetyGlobalPrinterPass(dbgs()))
+MODULE_PASS("print<module-debuginfo>", ModuleDebugInfoPrinterPass(dbgs()))
+MODULE_PASS("rewrite-statepoints-for-gc", RewriteStatepointsForGC())
+MODULE_PASS("rewrite-symbols", RewriteSymbolPass())
+MODULE_PASS("rpo-function-attrs", ReversePostOrderFunctionAttrsPass())
+MODULE_PASS("sample-profile", SampleProfileLoaderPass())
+MODULE_PASS("scc-oz-module-inliner",
+ buildInlinerPipeline(OptimizationLevel::Oz, ThinOrFullLTOPhase::None))
+MODULE_PASS("loop-extract-single", LoopExtractorPass(1))
+MODULE_PASS("strip", StripSymbolsPass())
+MODULE_PASS("strip-dead-debug-info", StripDeadDebugInfoPass())
+MODULE_PASS("pseudo-probe", SampleProfileProbePass(TM))
+MODULE_PASS("strip-dead-prototypes", StripDeadPrototypesPass())
+MODULE_PASS("strip-debug-declare", StripDebugDeclarePass())
+MODULE_PASS("strip-nondebug", StripNonDebugSymbolsPass())
+MODULE_PASS("strip-nonlinetable-debuginfo", StripNonLineTableDebugInfoPass())
+MODULE_PASS("synthetic-counts-propagation", SyntheticCountsPropagation())
+MODULE_PASS("unique-internal-linkage-names", UniqueInternalLinkageNamesPass())
+MODULE_PASS("verify", VerifierPass())
+MODULE_PASS("wholeprogramdevirt", WholeProgramDevirtPass())
+MODULE_PASS("dfsan", DataFlowSanitizerPass())
+MODULE_PASS("asan-module", ModuleAddressSanitizerPass(/*CompileKernel=*/false, false, true, false))
+MODULE_PASS("msan-module", MemorySanitizerPass({}))
+MODULE_PASS("tsan-module", ThreadSanitizerPass())
+MODULE_PASS("kasan-module", ModuleAddressSanitizerPass(/*CompileKernel=*/true, false, true, false))
+MODULE_PASS("sancov-module", ModuleSanitizerCoveragePass())
+MODULE_PASS("memprof-module", ModuleMemProfilerPass())
+MODULE_PASS("poison-checking", PoisonCheckingPass())
+MODULE_PASS("pseudo-probe-update", PseudoProbeUpdatePass())
+#undef MODULE_PASS
+
+#ifndef CGSCC_ANALYSIS
+#define CGSCC_ANALYSIS(NAME, CREATE_PASS)
+#endif
+CGSCC_ANALYSIS("no-op-cgscc", NoOpCGSCCAnalysis())
+CGSCC_ANALYSIS("fam-proxy", FunctionAnalysisManagerCGSCCProxy())
+CGSCC_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis(PIC))
+#undef CGSCC_ANALYSIS
+
+#ifndef CGSCC_PASS
+#define CGSCC_PASS(NAME, CREATE_PASS)
+#endif
+CGSCC_PASS("argpromotion", ArgumentPromotionPass())
+CGSCC_PASS("invalidate<all>", InvalidateAllAnalysesPass())
+CGSCC_PASS("function-attrs", PostOrderFunctionAttrsPass())
+CGSCC_PASS("attributor-cgscc", AttributorCGSCCPass())
+CGSCC_PASS("inline", InlinerPass())
+CGSCC_PASS("openmpopt", OpenMPOptPass())
+CGSCC_PASS("coro-split", CoroSplitPass())
+CGSCC_PASS("no-op-cgscc", NoOpCGSCCPass())
+#undef CGSCC_PASS
+
+#ifndef FUNCTION_ANALYSIS
+#define FUNCTION_ANALYSIS(NAME, CREATE_PASS)
+#endif
+FUNCTION_ANALYSIS("aa", AAManager())
+FUNCTION_ANALYSIS("assumptions", AssumptionAnalysis())
+FUNCTION_ANALYSIS("block-freq", BlockFrequencyAnalysis())
+FUNCTION_ANALYSIS("branch-prob", BranchProbabilityAnalysis())
+FUNCTION_ANALYSIS("domtree", DominatorTreeAnalysis())
+FUNCTION_ANALYSIS("postdomtree", PostDominatorTreeAnalysis())
+FUNCTION_ANALYSIS("demanded-bits", DemandedBitsAnalysis())
+FUNCTION_ANALYSIS("domfrontier", DominanceFrontierAnalysis())
+FUNCTION_ANALYSIS("func-properties", FunctionPropertiesAnalysis())
+FUNCTION_ANALYSIS("loops", LoopAnalysis())
+FUNCTION_ANALYSIS("lazy-value-info", LazyValueAnalysis())
+FUNCTION_ANALYSIS("da", DependenceAnalysis())
+FUNCTION_ANALYSIS("inliner-size-estimator", InlineSizeEstimatorAnalysis())
+FUNCTION_ANALYSIS("memdep", MemoryDependenceAnalysis())
+FUNCTION_ANALYSIS("memoryssa", MemorySSAAnalysis())
+FUNCTION_ANALYSIS("phi-values", PhiValuesAnalysis())
+FUNCTION_ANALYSIS("regions", RegionInfoAnalysis())
+FUNCTION_ANALYSIS("no-op-function", NoOpFunctionAnalysis())
+FUNCTION_ANALYSIS("opt-remark-emit", OptimizationRemarkEmitterAnalysis())
+FUNCTION_ANALYSIS("scalar-evolution", ScalarEvolutionAnalysis())
+FUNCTION_ANALYSIS("stack-safety-local", StackSafetyAnalysis())
+FUNCTION_ANALYSIS("targetlibinfo", TargetLibraryAnalysis())
+FUNCTION_ANALYSIS("targetir",
+ TM ? TM->getTargetIRAnalysis() : TargetIRAnalysis())
+FUNCTION_ANALYSIS("verify", VerifierAnalysis())
+FUNCTION_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis(PIC))
+
+#ifndef FUNCTION_ALIAS_ANALYSIS
+#define FUNCTION_ALIAS_ANALYSIS(NAME, CREATE_PASS) \
+ FUNCTION_ANALYSIS(NAME, CREATE_PASS)
+#endif
+FUNCTION_ALIAS_ANALYSIS("basic-aa", BasicAA())
+FUNCTION_ALIAS_ANALYSIS("cfl-anders-aa", CFLAndersAA())
+FUNCTION_ALIAS_ANALYSIS("cfl-steens-aa", CFLSteensAA())
+FUNCTION_ALIAS_ANALYSIS("objc-arc-aa", objcarc::ObjCARCAA())
+FUNCTION_ALIAS_ANALYSIS("scev-aa", SCEVAA())
+FUNCTION_ALIAS_ANALYSIS("scoped-noalias-aa", ScopedNoAliasAA())
+FUNCTION_ALIAS_ANALYSIS("tbaa", TypeBasedAA())
+#undef FUNCTION_ALIAS_ANALYSIS
+#undef FUNCTION_ANALYSIS
+
+#ifndef FUNCTION_PASS
+#define FUNCTION_PASS(NAME, CREATE_PASS)
+#endif
+FUNCTION_PASS("aa-eval", AAEvaluator())
+FUNCTION_PASS("adce", ADCEPass())
+FUNCTION_PASS("add-discriminators", AddDiscriminatorsPass())
+FUNCTION_PASS("aggressive-instcombine", AggressiveInstCombinePass())
+FUNCTION_PASS("assume-builder", AssumeBuilderPass())
+FUNCTION_PASS("assume-simplify", AssumeSimplifyPass())
+FUNCTION_PASS("alignment-from-assumptions", AlignmentFromAssumptionsPass())
+FUNCTION_PASS("annotation-remarks", AnnotationRemarksPass())
+FUNCTION_PASS("bdce", BDCEPass())
+FUNCTION_PASS("bounds-checking", BoundsCheckingPass())
+FUNCTION_PASS("break-crit-edges", BreakCriticalEdgesPass())
+FUNCTION_PASS("callsite-splitting", CallSiteSplittingPass())
+FUNCTION_PASS("consthoist", ConstantHoistingPass())
+FUNCTION_PASS("constraint-elimination", ConstraintEliminationPass())
+FUNCTION_PASS("chr", ControlHeightReductionPass())
+FUNCTION_PASS("coro-early", CoroEarlyPass())
+FUNCTION_PASS("coro-elide", CoroElidePass())
+FUNCTION_PASS("coro-cleanup", CoroCleanupPass())
+FUNCTION_PASS("correlated-propagation", CorrelatedValuePropagationPass())
+FUNCTION_PASS("dce", DCEPass())
+FUNCTION_PASS("div-rem-pairs", DivRemPairsPass())
+FUNCTION_PASS("dse", DSEPass())
+FUNCTION_PASS("dot-cfg", CFGPrinterPass())
+FUNCTION_PASS("dot-cfg-only", CFGOnlyPrinterPass())
+FUNCTION_PASS("early-cse", EarlyCSEPass(/*UseMemorySSA=*/false))
+FUNCTION_PASS("early-cse-memssa", EarlyCSEPass(/*UseMemorySSA=*/true))
+FUNCTION_PASS("ee-instrument", EntryExitInstrumenterPass(/*PostInlining=*/false))
+FUNCTION_PASS("fix-irreducible", FixIrreduciblePass())
+FUNCTION_PASS("make-guards-explicit", MakeGuardsExplicitPass())
+FUNCTION_PASS("post-inline-ee-instrument", EntryExitInstrumenterPass(/*PostInlining=*/true))
+FUNCTION_PASS("gvn-hoist", GVNHoistPass())
+FUNCTION_PASS("gvn-sink", GVNSinkPass())
+FUNCTION_PASS("helloworld", HelloWorldPass())
+FUNCTION_PASS("infer-address-spaces", InferAddressSpacesPass())
+FUNCTION_PASS("instcombine", InstCombinePass())
+FUNCTION_PASS("instcount", InstCountPass())
+FUNCTION_PASS("instsimplify", InstSimplifyPass())
+FUNCTION_PASS("invalidate<all>", InvalidateAllAnalysesPass())
+FUNCTION_PASS("irce", IRCEPass())
+FUNCTION_PASS("float2int", Float2IntPass())
+FUNCTION_PASS("no-op-function", NoOpFunctionPass())
+FUNCTION_PASS("libcalls-shrinkwrap", LibCallsShrinkWrapPass())
+FUNCTION_PASS("lint", LintPass())
+FUNCTION_PASS("inject-tli-mappings", InjectTLIMappings())
+FUNCTION_PASS("instnamer", InstructionNamerPass())
+FUNCTION_PASS("loweratomic", LowerAtomicPass())
+FUNCTION_PASS("lower-expect", LowerExpectIntrinsicPass())
+FUNCTION_PASS("lower-guard-intrinsic", LowerGuardIntrinsicPass())
+FUNCTION_PASS("lower-constant-intrinsics", LowerConstantIntrinsicsPass())
+FUNCTION_PASS("lower-matrix-intrinsics", LowerMatrixIntrinsicsPass())
+FUNCTION_PASS("lower-matrix-intrinsics-minimal", LowerMatrixIntrinsicsPass(true))
+FUNCTION_PASS("lower-widenable-condition", LowerWidenableConditionPass())
+FUNCTION_PASS("guard-widening", GuardWideningPass())
+FUNCTION_PASS("load-store-vectorizer", LoadStoreVectorizerPass())
+FUNCTION_PASS("loop-simplify", LoopSimplifyPass())
+FUNCTION_PASS("loop-sink", LoopSinkPass())
+FUNCTION_PASS("loop-unroll-and-jam", LoopUnrollAndJamPass())
+FUNCTION_PASS("loop-flatten", LoopFlattenPass())
+FUNCTION_PASS("lowerinvoke", LowerInvokePass())
+FUNCTION_PASS("lowerswitch", LowerSwitchPass())
+FUNCTION_PASS("mem2reg", PromotePass())
+FUNCTION_PASS("memcpyopt", MemCpyOptPass())
+FUNCTION_PASS("mergeicmps", MergeICmpsPass())
+FUNCTION_PASS("mergereturn", UnifyFunctionExitNodesPass())
+FUNCTION_PASS("nary-reassociate", NaryReassociatePass())
+FUNCTION_PASS("newgvn", NewGVNPass())
+FUNCTION_PASS("jump-threading", JumpThreadingPass())
+FUNCTION_PASS("partially-inline-libcalls", PartiallyInlineLibCallsPass())
+FUNCTION_PASS("lcssa", LCSSAPass())
+FUNCTION_PASS("loop-data-prefetch", LoopDataPrefetchPass())
+FUNCTION_PASS("loop-load-elim", LoopLoadEliminationPass())
+FUNCTION_PASS("loop-fusion", LoopFusePass())
+FUNCTION_PASS("loop-distribute", LoopDistributePass())
+FUNCTION_PASS("loop-versioning", LoopVersioningPass())
+FUNCTION_PASS("objc-arc", ObjCARCOptPass())
+FUNCTION_PASS("objc-arc-contract", ObjCARCContractPass())
+FUNCTION_PASS("objc-arc-expand", ObjCARCExpandPass())
+FUNCTION_PASS("pgo-memop-opt", PGOMemOPSizeOpt())
+FUNCTION_PASS("print", PrintFunctionPass(dbgs()))
+FUNCTION_PASS("print<assumptions>", AssumptionPrinterPass(dbgs()))
+FUNCTION_PASS("print<block-freq>", BlockFrequencyPrinterPass(dbgs()))
+FUNCTION_PASS("print<branch-prob>", BranchProbabilityPrinterPass(dbgs()))
+FUNCTION_PASS("print<da>", DependenceAnalysisPrinterPass(dbgs()))
+FUNCTION_PASS("print<domtree>", DominatorTreePrinterPass(dbgs()))
+FUNCTION_PASS("print<postdomtree>", PostDominatorTreePrinterPass(dbgs()))
+FUNCTION_PASS("print<delinearization>", DelinearizationPrinterPass(dbgs()))
+FUNCTION_PASS("print<demanded-bits>", DemandedBitsPrinterPass(dbgs()))
+FUNCTION_PASS("print<domfrontier>", DominanceFrontierPrinterPass(dbgs()))
+FUNCTION_PASS("print<func-properties>", FunctionPropertiesPrinterPass(dbgs()))
+FUNCTION_PASS("print<inline-cost>", InlineCostAnnotationPrinterPass(dbgs()))
+FUNCTION_PASS("print<inliner-size-estimator>",
+ InlineSizeEstimatorAnalysisPrinterPass(dbgs()))
+FUNCTION_PASS("print<loops>", LoopPrinterPass(dbgs()))
+FUNCTION_PASS("print<memoryssa>", MemorySSAPrinterPass(dbgs()))
+FUNCTION_PASS("print<phi-values>", PhiValuesPrinterPass(dbgs()))
+FUNCTION_PASS("print<regions>", RegionInfoPrinterPass(dbgs()))
+FUNCTION_PASS("print<scalar-evolution>", ScalarEvolutionPrinterPass(dbgs()))
+FUNCTION_PASS("print<stack-safety-local>", StackSafetyPrinterPass(dbgs()))
+// TODO: rename to print<foo> after NPM switch
+FUNCTION_PASS("print-alias-sets", AliasSetsPrinterPass(dbgs()))
+FUNCTION_PASS("print-predicateinfo", PredicateInfoPrinterPass(dbgs()))
+FUNCTION_PASS("print-mustexecute", MustExecutePrinterPass(dbgs()))
+FUNCTION_PASS("print-memderefs", MemDerefPrinterPass(dbgs()))
+FUNCTION_PASS("reassociate", ReassociatePass())
+FUNCTION_PASS("redundant-dbg-inst-elim", RedundantDbgInstEliminationPass())
+FUNCTION_PASS("reg2mem", RegToMemPass())
+FUNCTION_PASS("scalarize-masked-mem-intrin", ScalarizeMaskedMemIntrinPass())
+FUNCTION_PASS("scalarizer", ScalarizerPass())
+FUNCTION_PASS("separate-const-offset-from-gep", SeparateConstOffsetFromGEPPass())
+FUNCTION_PASS("sccp", SCCPPass())
+FUNCTION_PASS("simplifycfg", SimplifyCFGPass())
+FUNCTION_PASS("sink", SinkingPass())
+FUNCTION_PASS("slp-vectorizer", SLPVectorizerPass())
+FUNCTION_PASS("slsr", StraightLineStrengthReducePass())
+FUNCTION_PASS("speculative-execution", SpeculativeExecutionPass())
+FUNCTION_PASS("spec-phis", SpeculateAroundPHIsPass())
+FUNCTION_PASS("sroa", SROA())
+FUNCTION_PASS("strip-gc-relocates", StripGCRelocates())
+FUNCTION_PASS("structurizecfg", StructurizeCFGPass())
+FUNCTION_PASS("tailcallelim", TailCallElimPass())
+FUNCTION_PASS("unify-loop-exits", UnifyLoopExitsPass())
+FUNCTION_PASS("vector-combine", VectorCombinePass())
+FUNCTION_PASS("verify", VerifierPass())
+FUNCTION_PASS("verify<domtree>", DominatorTreeVerifierPass())
+FUNCTION_PASS("verify<loops>", LoopVerifierPass())
+FUNCTION_PASS("verify<memoryssa>", MemorySSAVerifierPass())
+FUNCTION_PASS("verify<regions>", RegionInfoVerifierPass())
+FUNCTION_PASS("verify<safepoint-ir>", SafepointIRVerifierPass())
+FUNCTION_PASS("verify<scalar-evolution>", ScalarEvolutionVerifierPass())
+FUNCTION_PASS("view-cfg", CFGViewerPass())
+FUNCTION_PASS("view-cfg-only", CFGOnlyViewerPass())
+FUNCTION_PASS("transform-warning", WarnMissedTransformationsPass())
+FUNCTION_PASS("asan", AddressSanitizerPass(false, false, false))
+FUNCTION_PASS("kasan", AddressSanitizerPass(true, false, false))
+FUNCTION_PASS("msan", MemorySanitizerPass({}))
+FUNCTION_PASS("kmsan", MemorySanitizerPass({0, false, /*Kernel=*/true}))
+FUNCTION_PASS("tsan", ThreadSanitizerPass())
+FUNCTION_PASS("memprof", MemProfilerPass())
+#undef FUNCTION_PASS
+
+#ifndef FUNCTION_PASS_WITH_PARAMS
+#define FUNCTION_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER)
+#endif
+FUNCTION_PASS_WITH_PARAMS("loop-unroll",
+ [](LoopUnrollOptions Opts) {
+ return LoopUnrollPass(Opts);
+ },
+ parseLoopUnrollOptions)
+FUNCTION_PASS_WITH_PARAMS("msan",
+ [](MemorySanitizerOptions Opts) {
+ return MemorySanitizerPass(Opts);
+ },
+ parseMSanPassOptions)
+FUNCTION_PASS_WITH_PARAMS("simplify-cfg",
+ [](SimplifyCFGOptions Opts) {
+ return SimplifyCFGPass(Opts);
+ },
+ parseSimplifyCFGOptions)
+FUNCTION_PASS_WITH_PARAMS("loop-vectorize",
+ [](LoopVectorizeOptions Opts) {
+ return LoopVectorizePass(Opts);
+ },
+ parseLoopVectorizeOptions)
+FUNCTION_PASS_WITH_PARAMS("mldst-motion",
+ [](MergedLoadStoreMotionOptions Opts) {
+ return MergedLoadStoreMotionPass(Opts);
+ },
+ parseMergedLoadStoreMotionOptions)
+FUNCTION_PASS_WITH_PARAMS("gvn",
+ [](GVNOptions Opts) {
+ return GVN(Opts);
+ },
+ parseGVNOptions)
+FUNCTION_PASS_WITH_PARAMS("print<stack-lifetime>",
+ [](StackLifetime::LivenessType Type) {
+ return StackLifetimePrinterPass(dbgs(), Type);
+ },
+ parseStackLifetimeOptions)
+#undef FUNCTION_PASS_WITH_PARAMS
+
+#ifndef LOOP_ANALYSIS
+#define LOOP_ANALYSIS(NAME, CREATE_PASS)
+#endif
+LOOP_ANALYSIS("no-op-loop", NoOpLoopAnalysis())
+LOOP_ANALYSIS("access-info", LoopAccessAnalysis())
+LOOP_ANALYSIS("ddg", DDGAnalysis())
+LOOP_ANALYSIS("iv-users", IVUsersAnalysis())
+LOOP_ANALYSIS("pass-instrumentation", PassInstrumentationAnalysis(PIC))
+#undef LOOP_ANALYSIS
+
+#ifndef LOOP_PASS
+#define LOOP_PASS(NAME, CREATE_PASS)
+#endif
+LOOP_PASS("canon-freeze", CanonicalizeFreezeInLoopsPass())
+LOOP_PASS("dot-ddg", DDGDotPrinterPass())
+LOOP_PASS("invalidate<all>", InvalidateAllAnalysesPass())
+LOOP_PASS("licm", LICMPass())
+LOOP_PASS("loop-idiom", LoopIdiomRecognizePass())
+LOOP_PASS("loop-instsimplify", LoopInstSimplifyPass())
+LOOP_PASS("loop-interchange", LoopInterchangePass())
+LOOP_PASS("loop-rotate", LoopRotatePass())
+LOOP_PASS("no-op-loop", NoOpLoopPass())
+LOOP_PASS("print", PrintLoopPass(dbgs()))
+LOOP_PASS("loop-deletion", LoopDeletionPass())
+LOOP_PASS("loop-simplifycfg", LoopSimplifyCFGPass())
+LOOP_PASS("loop-reduce", LoopStrengthReducePass())
+LOOP_PASS("indvars", IndVarSimplifyPass())
+LOOP_PASS("loop-unroll-full", LoopFullUnrollPass())
+LOOP_PASS("print-access-info", LoopAccessInfoPrinterPass(dbgs()))
+LOOP_PASS("print<ddg>", DDGAnalysisPrinterPass(dbgs()))
+LOOP_PASS("print<iv-users>", IVUsersPrinterPass(dbgs()))
+LOOP_PASS("print<loopnest>", LoopNestPrinterPass(dbgs()))
+LOOP_PASS("print<loop-cache-cost>", LoopCachePrinterPass(dbgs()))
+LOOP_PASS("loop-predication", LoopPredicationPass())
+LOOP_PASS("guard-widening", GuardWideningPass())
+LOOP_PASS("simple-loop-unswitch", SimpleLoopUnswitchPass())
+LOOP_PASS("loop-reroll", LoopRerollPass())
+LOOP_PASS("loop-versioning-licm", LoopVersioningLICMPass())
+#undef LOOP_PASS
+
+#ifndef LOOP_PASS_WITH_PARAMS
+#define LOOP_PASS_WITH_PARAMS(NAME, CREATE_PASS, PARSER)
+#endif
+LOOP_PASS_WITH_PARAMS("unswitch",
+ [](bool NonTrivial) {
+ return SimpleLoopUnswitchPass(NonTrivial);
+ },
+ parseLoopUnswitchOptions)
+#undef LOOP_PASS_WITH_PARAMS
diff --git a/contrib/libs/llvm12/lib/Passes/StandardInstrumentations.cpp b/contrib/libs/llvm12/lib/Passes/StandardInstrumentations.cpp
new file mode 100644
index 00000000000..6795aed7b04
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Passes/StandardInstrumentations.cpp
@@ -0,0 +1,895 @@
+//===- Standard pass instrumentations handling ----------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+///
+/// This file defines IR-printing pass instrumentation callbacks as well as
+/// StandardInstrumentations class that manages standard pass instrumentations.
+///
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Passes/StandardInstrumentations.h"
+#include "llvm/ADT/Any.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassInstrumentation.h"
+#include "llvm/IR/PrintPasses.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/raw_ostream.h"
+#include <unordered_set>
+#include <vector>
+
+using namespace llvm;
+
+cl::opt<bool> PreservedCFGCheckerInstrumentation::VerifyPreservedCFG(
+ "verify-cfg-preserved", cl::Hidden,
+#ifdef NDEBUG
+ cl::init(false));
+#else
+ cl::init(false));
+#endif
+
+// FIXME: Change `-debug-pass-manager` from boolean to enum type. Similar to
+// `-debug-pass` in legacy PM.
+static cl::opt<bool>
+ DebugPMVerbose("debug-pass-manager-verbose", cl::Hidden, cl::init(false),
+ cl::desc("Print all pass management debugging information. "
+ "`-debug-pass-manager` must also be specified"));
+
+// An option that prints out the IR after passes, similar to
+// -print-after-all except that it only prints the IR after passes that
+// change the IR. Those passes that do not make changes to the IR are
+// reported as not making any changes. In addition, the initial IR is
+// also reported. Other hidden options affect the output from this
+// option. -filter-passes will limit the output to the named passes
+// that actually change the IR and other passes are reported as filtered out.
+// The specified passes will either be reported as making no changes (with
+// no IR reported) or the changed IR will be reported. Also, the
+// -filter-print-funcs and -print-module-scope options will do similar
+// filtering based on function name, reporting changed IRs as functions(or
+// modules if -print-module-scope is specified) for a particular function
+// or indicating that the IR has been filtered out. The extra options
+// can be combined, allowing only changed IRs for certain passes on certain
+// functions to be reported in different formats, with the rest being
+// reported as filtered out. The -print-before-changed option will print
+// the IR as it was before each pass that changed it. The optional
+// value of quiet will only report when the IR changes, suppressing
+// all other messages, including the initial IR.
+enum ChangePrinter { NoChangePrinter, PrintChangedVerbose, PrintChangedQuiet };
+static cl::opt<ChangePrinter> PrintChanged(
+ "print-changed", cl::desc("Print changed IRs"), cl::Hidden,
+ cl::ValueOptional, cl::init(NoChangePrinter),
+ cl::values(clEnumValN(PrintChangedQuiet, "quiet", "Run in quiet mode"),
+ // Sentinel value for unspecified option.
+ clEnumValN(PrintChangedVerbose, "", "")));
+
+// An option that supports the -print-changed option. See
+// the description for -print-changed for an explanation of the use
+// of this option. Note that this option has no effect without -print-changed.
+static cl::list<std::string>
+ PrintPassesList("filter-passes", cl::value_desc("pass names"),
+ cl::desc("Only consider IR changes for passes whose names "
+ "match for the print-changed option"),
+ cl::CommaSeparated, cl::Hidden);
+// An option that supports the -print-changed option. See
+// the description for -print-changed for an explanation of the use
+// of this option. Note that this option has no effect without -print-changed.
+static cl::opt<bool>
+ PrintChangedBefore("print-before-changed",
+ cl::desc("Print before passes that change them"),
+ cl::init(false), cl::Hidden);
+
+namespace {
+
+/// Extracting Module out of \p IR unit. Also fills a textual description
+/// of \p IR for use in header when printing.
+Optional<std::pair<const Module *, std::string>>
+unwrapModule(Any IR, bool Force = false) {
+ if (any_isa<const Module *>(IR))
+ return std::make_pair(any_cast<const Module *>(IR), std::string());
+
+ if (any_isa<const Function *>(IR)) {
+ const Function *F = any_cast<const Function *>(IR);
+ if (!Force && !isFunctionInPrintList(F->getName()))
+ return None;
+
+ const Module *M = F->getParent();
+ return std::make_pair(M, formatv(" (function: {0})", F->getName()).str());
+ }
+
+ if (any_isa<const LazyCallGraph::SCC *>(IR)) {
+ const LazyCallGraph::SCC *C = any_cast<const LazyCallGraph::SCC *>(IR);
+ for (const LazyCallGraph::Node &N : *C) {
+ const Function &F = N.getFunction();
+ if (Force || (!F.isDeclaration() && isFunctionInPrintList(F.getName()))) {
+ const Module *M = F.getParent();
+ return std::make_pair(M, formatv(" (scc: {0})", C->getName()).str());
+ }
+ }
+ assert(!Force && "Expected to have made a pair when forced.");
+ return None;
+ }
+
+ if (any_isa<const Loop *>(IR)) {
+ const Loop *L = any_cast<const Loop *>(IR);
+ const Function *F = L->getHeader()->getParent();
+ if (!Force && !isFunctionInPrintList(F->getName()))
+ return None;
+ const Module *M = F->getParent();
+ std::string LoopName;
+ raw_string_ostream ss(LoopName);
+ L->getHeader()->printAsOperand(ss, false);
+ return std::make_pair(M, formatv(" (loop: {0})", ss.str()).str());
+ }
+
+ llvm_unreachable("Unknown IR unit");
+}
+
+void printIR(raw_ostream &OS, const Function *F, StringRef Banner,
+ StringRef Extra = StringRef(), bool Brief = false) {
+ if (Brief) {
+ OS << F->getName() << '\n';
+ return;
+ }
+
+ if (!isFunctionInPrintList(F->getName()))
+ return;
+ OS << Banner << Extra << "\n" << static_cast<const Value &>(*F);
+}
+
+void printIR(raw_ostream &OS, const Module *M, StringRef Banner,
+ StringRef Extra = StringRef(), bool Brief = false,
+ bool ShouldPreserveUseListOrder = false) {
+ if (Brief) {
+ OS << M->getName() << '\n';
+ return;
+ }
+
+ if (isFunctionInPrintList("*") || forcePrintModuleIR()) {
+ OS << Banner << Extra << "\n";
+ M->print(OS, nullptr, ShouldPreserveUseListOrder);
+ } else {
+ for (const auto &F : M->functions()) {
+ printIR(OS, &F, Banner, Extra);
+ }
+ }
+}
+
+void printIR(raw_ostream &OS, const LazyCallGraph::SCC *C, StringRef Banner,
+ StringRef Extra = StringRef(), bool Brief = false) {
+ if (Brief) {
+ OS << *C << '\n';
+ return;
+ }
+
+ bool BannerPrinted = false;
+ for (const LazyCallGraph::Node &N : *C) {
+ const Function &F = N.getFunction();
+ if (!F.isDeclaration() && isFunctionInPrintList(F.getName())) {
+ if (!BannerPrinted) {
+ OS << Banner << Extra << "\n";
+ BannerPrinted = true;
+ }
+ F.print(OS);
+ }
+ }
+}
+
+void printIR(raw_ostream &OS, const Loop *L, StringRef Banner,
+ bool Brief = false) {
+ if (Brief) {
+ OS << *L;
+ return;
+ }
+
+ const Function *F = L->getHeader()->getParent();
+ if (!isFunctionInPrintList(F->getName()))
+ return;
+ printLoop(const_cast<Loop &>(*L), OS, std::string(Banner));
+}
+
+/// Generic IR-printing helper that unpacks a pointer to IRUnit wrapped into
+/// llvm::Any and does actual print job.
+void unwrapAndPrint(raw_ostream &OS, Any IR, StringRef Banner,
+ bool ForceModule = false, bool Brief = false,
+ bool ShouldPreserveUseListOrder = false) {
+ if (ForceModule) {
+ if (auto UnwrappedModule = unwrapModule(IR))
+ printIR(OS, UnwrappedModule->first, Banner, UnwrappedModule->second,
+ Brief, ShouldPreserveUseListOrder);
+ return;
+ }
+
+ if (any_isa<const Module *>(IR)) {
+ const Module *M = any_cast<const Module *>(IR);
+ assert(M && "module should be valid for printing");
+ printIR(OS, M, Banner, "", Brief, ShouldPreserveUseListOrder);
+ return;
+ }
+
+ if (any_isa<const Function *>(IR)) {
+ const Function *F = any_cast<const Function *>(IR);
+ assert(F && "function should be valid for printing");
+ printIR(OS, F, Banner, "", Brief);
+ return;
+ }
+
+ if (any_isa<const LazyCallGraph::SCC *>(IR)) {
+ const LazyCallGraph::SCC *C = any_cast<const LazyCallGraph::SCC *>(IR);
+ assert(C && "scc should be valid for printing");
+ std::string Extra = std::string(formatv(" (scc: {0})", C->getName()));
+ printIR(OS, C, Banner, Extra, Brief);
+ return;
+ }
+
+ if (any_isa<const Loop *>(IR)) {
+ const Loop *L = any_cast<const Loop *>(IR);
+ assert(L && "Loop should be valid for printing");
+ printIR(OS, L, Banner, Brief);
+ return;
+ }
+ llvm_unreachable("Unknown wrapped IR type");
+}
+
+// Return true when this is a pass for which changes should be ignored
+bool isIgnored(StringRef PassID) {
+ return isSpecialPass(PassID,
+ {"PassManager", "PassAdaptor", "AnalysisManagerProxy"});
+}
+
+} // namespace
+
+template <typename IRUnitT>
+ChangeReporter<IRUnitT>::~ChangeReporter<IRUnitT>() {
+ assert(BeforeStack.empty() && "Problem with Change Printer stack.");
+}
+
+template <typename IRUnitT>
+bool ChangeReporter<IRUnitT>::isInterestingFunction(const Function &F) {
+ return isFunctionInPrintList(F.getName());
+}
+
+template <typename IRUnitT>
+bool ChangeReporter<IRUnitT>::isInterestingPass(StringRef PassID) {
+ if (isIgnored(PassID))
+ return false;
+
+ static std::unordered_set<std::string> PrintPassNames(PrintPassesList.begin(),
+ PrintPassesList.end());
+ return PrintPassNames.empty() || PrintPassNames.count(PassID.str());
+}
+
+// Return true when this is a pass on IR for which printing
+// of changes is desired.
+template <typename IRUnitT>
+bool ChangeReporter<IRUnitT>::isInteresting(Any IR, StringRef PassID) {
+ if (!isInterestingPass(PassID))
+ return false;
+ if (any_isa<const Function *>(IR))
+ return isInterestingFunction(*any_cast<const Function *>(IR));
+ return true;
+}
+
+template <typename IRUnitT>
+void ChangeReporter<IRUnitT>::saveIRBeforePass(Any IR, StringRef PassID) {
+ // Always need to place something on the stack because invalidated passes
+ // are not given the IR so it cannot be determined whether the pass was for
+ // something that was filtered out.
+ BeforeStack.emplace_back();
+
+ if (!isInteresting(IR, PassID))
+ return;
+ // Is this the initial IR?
+ if (InitialIR) {
+ InitialIR = false;
+ if (VerboseMode)
+ handleInitialIR(IR);
+ }
+
+ // Save the IR representation on the stack.
+ IRUnitT &Data = BeforeStack.back();
+ generateIRRepresentation(IR, PassID, Data);
+}
+
+template <typename IRUnitT>
+void ChangeReporter<IRUnitT>::handleIRAfterPass(Any IR, StringRef PassID) {
+ assert(!BeforeStack.empty() && "Unexpected empty stack encountered.");
+ std::string Name;
+
+ // unwrapModule has inconsistent handling of names for function IRs.
+ if (any_isa<const Function *>(IR)) {
+ const Function *F = any_cast<const Function *>(IR);
+ Name = formatv(" (function: {0})", F->getName()).str();
+ } else {
+ if (auto UM = unwrapModule(IR))
+ Name = UM->second;
+ }
+ if (Name == "")
+ Name = " (module)";
+
+ if (isIgnored(PassID)) {
+ if (VerboseMode)
+ handleIgnored(PassID, Name);
+ } else if (!isInteresting(IR, PassID)) {
+ if (VerboseMode)
+ handleFiltered(PassID, Name);
+ } else {
+ // Get the before rep from the stack
+ IRUnitT &Before = BeforeStack.back();
+ // Create the after rep
+ IRUnitT After;
+ generateIRRepresentation(IR, PassID, After);
+
+ // Was there a change in IR?
+ if (same(Before, After)) {
+ if (VerboseMode)
+ omitAfter(PassID, Name);
+ } else
+ handleAfter(PassID, Name, Before, After, IR);
+ }
+ BeforeStack.pop_back();
+}
+
+template <typename IRUnitT>
+void ChangeReporter<IRUnitT>::handleInvalidatedPass(StringRef PassID) {
+ assert(!BeforeStack.empty() && "Unexpected empty stack encountered.");
+
+ // Always flag it as invalidated as we cannot determine when
+ // a pass for a filtered function is invalidated since we do not
+ // get the IR in the call. Also, the output is just alternate
+ // forms of the banner anyway.
+ if (VerboseMode)
+ handleInvalidated(PassID);
+ BeforeStack.pop_back();
+}
+
+template <typename IRUnitT>
+void ChangeReporter<IRUnitT>::registerRequiredCallbacks(
+ PassInstrumentationCallbacks &PIC) {
+ PIC.registerBeforeNonSkippedPassCallback(
+ [this](StringRef P, Any IR) { saveIRBeforePass(IR, P); });
+
+ PIC.registerAfterPassCallback(
+ [this](StringRef P, Any IR, const PreservedAnalyses &) {
+ handleIRAfterPass(IR, P);
+ });
+ PIC.registerAfterPassInvalidatedCallback(
+ [this](StringRef P, const PreservedAnalyses &) {
+ handleInvalidatedPass(P);
+ });
+}
+
+template <typename IRUnitT>
+TextChangeReporter<IRUnitT>::TextChangeReporter(bool Verbose)
+ : ChangeReporter<IRUnitT>(Verbose), Out(dbgs()) {}
+
+template <typename IRUnitT>
+void TextChangeReporter<IRUnitT>::handleInitialIR(Any IR) {
+ // Always print the module.
+ // Unwrap and print directly to avoid filtering problems in general routines.
+ auto UnwrappedModule = unwrapModule(IR, /*Force=*/true);
+ assert(UnwrappedModule && "Expected module to be unwrapped when forced.");
+ Out << "*** IR Dump At Start: ***" << UnwrappedModule->second << "\n";
+ UnwrappedModule->first->print(Out, nullptr,
+ /*ShouldPreserveUseListOrder=*/true);
+}
+
+template <typename IRUnitT>
+void TextChangeReporter<IRUnitT>::omitAfter(StringRef PassID,
+ std::string &Name) {
+ Out << formatv("*** IR Dump After {0}{1} omitted because no change ***\n",
+ PassID, Name);
+}
+
+template <typename IRUnitT>
+void TextChangeReporter<IRUnitT>::handleInvalidated(StringRef PassID) {
+ Out << formatv("*** IR Pass {0} invalidated ***\n", PassID);
+}
+
+template <typename IRUnitT>
+void TextChangeReporter<IRUnitT>::handleFiltered(StringRef PassID,
+ std::string &Name) {
+ SmallString<20> Banner =
+ formatv("*** IR Dump After {0}{1} filtered out ***\n", PassID, Name);
+ Out << Banner;
+}
+
+template <typename IRUnitT>
+void TextChangeReporter<IRUnitT>::handleIgnored(StringRef PassID,
+ std::string &Name) {
+ Out << formatv("*** IR Pass {0}{1} ignored ***\n", PassID, Name);
+}
+
+IRChangedPrinter::~IRChangedPrinter() {}
+
+void IRChangedPrinter::registerCallbacks(PassInstrumentationCallbacks &PIC) {
+ if (PrintChanged != NoChangePrinter)
+ TextChangeReporter<std::string>::registerRequiredCallbacks(PIC);
+}
+
+void IRChangedPrinter::generateIRRepresentation(Any IR, StringRef PassID,
+ std::string &Output) {
+ raw_string_ostream OS(Output);
+ // use the after banner for all cases so it will match
+ SmallString<20> Banner = formatv("*** IR Dump After {0} ***", PassID);
+ unwrapAndPrint(OS, IR, Banner, forcePrintModuleIR(),
+ /*Brief=*/false, /*ShouldPreserveUseListOrder=*/true);
+
+ OS.str();
+}
+
+void IRChangedPrinter::handleAfter(StringRef PassID, std::string &Name,
+ const std::string &Before,
+ const std::string &After, Any) {
+ assert(After.find("*** IR Dump") == 0 && "Unexpected banner format.");
+ StringRef AfterRef = After;
+ StringRef Banner =
+ AfterRef.take_until([](char C) -> bool { return C == '\n'; });
+
+ // Report the IR before the changes when requested.
+ if (PrintChangedBefore) {
+ Out << "*** IR Dump Before" << Banner.substr(17);
+ // LazyCallGraph::SCC already has "(scc:..." in banner so only add
+ // in the name if it isn't already there.
+ if (Name.substr(0, 6) != " (scc:" && !forcePrintModuleIR())
+ Out << Name;
+
+ StringRef BeforeRef = Before;
+ Out << BeforeRef.substr(Banner.size());
+ }
+
+ Out << Banner;
+
+ // LazyCallGraph::SCC already has "(scc:..." in banner so only add
+ // in the name if it isn't already there.
+ if (Name.substr(0, 6) != " (scc:" && !forcePrintModuleIR())
+ Out << Name;
+
+ Out << After.substr(Banner.size());
+}
+
+bool IRChangedPrinter::same(const std::string &S1, const std::string &S2) {
+ return S1 == S2;
+}
+
+PrintIRInstrumentation::~PrintIRInstrumentation() {
+ assert(ModuleDescStack.empty() && "ModuleDescStack is not empty at exit");
+}
+
+void PrintIRInstrumentation::pushModuleDesc(StringRef PassID, Any IR) {
+ assert(StoreModuleDesc);
+ const Module *M = nullptr;
+ std::string Extra;
+ if (auto UnwrappedModule = unwrapModule(IR))
+ std::tie(M, Extra) = UnwrappedModule.getValue();
+ ModuleDescStack.emplace_back(M, Extra, PassID);
+}
+
+PrintIRInstrumentation::PrintModuleDesc
+PrintIRInstrumentation::popModuleDesc(StringRef PassID) {
+ assert(!ModuleDescStack.empty() && "empty ModuleDescStack");
+ PrintModuleDesc ModuleDesc = ModuleDescStack.pop_back_val();
+ assert(std::get<2>(ModuleDesc).equals(PassID) && "malformed ModuleDescStack");
+ return ModuleDesc;
+}
+
+void PrintIRInstrumentation::printBeforePass(StringRef PassID, Any IR) {
+ if (isIgnored(PassID))
+ return;
+
+ // Saving Module for AfterPassInvalidated operations.
+ // Note: here we rely on a fact that we do not change modules while
+ // traversing the pipeline, so the latest captured module is good
+ // for all print operations that has not happen yet.
+ if (StoreModuleDesc && shouldPrintAfterPass(PassID))
+ pushModuleDesc(PassID, IR);
+
+ if (!shouldPrintBeforePass(PassID))
+ return;
+
+ SmallString<20> Banner = formatv("*** IR Dump Before {0} ***", PassID);
+ unwrapAndPrint(dbgs(), IR, Banner, forcePrintModuleIR());
+}
+
+void PrintIRInstrumentation::printAfterPass(StringRef PassID, Any IR) {
+ if (isIgnored(PassID))
+ return;
+
+ if (!shouldPrintAfterPass(PassID))
+ return;
+
+ if (StoreModuleDesc)
+ popModuleDesc(PassID);
+
+ SmallString<20> Banner = formatv("*** IR Dump After {0} ***", PassID);
+ unwrapAndPrint(dbgs(), IR, Banner, forcePrintModuleIR());
+}
+
+void PrintIRInstrumentation::printAfterPassInvalidated(StringRef PassID) {
+ StringRef PassName = PIC->getPassNameForClassName(PassID);
+ if (!StoreModuleDesc || !shouldPrintAfterPass(PassName))
+ return;
+
+ if (isIgnored(PassID))
+ return;
+
+ const Module *M;
+ std::string Extra;
+ StringRef StoredPassID;
+ std::tie(M, Extra, StoredPassID) = popModuleDesc(PassID);
+ // Additional filtering (e.g. -filter-print-func) can lead to module
+ // printing being skipped.
+ if (!M)
+ return;
+
+ SmallString<20> Banner =
+ formatv("*** IR Dump After {0} *** invalidated: ", PassID);
+ printIR(dbgs(), M, Banner, Extra);
+}
+
+bool PrintIRInstrumentation::shouldPrintBeforePass(StringRef PassID) {
+ if (shouldPrintBeforeAll())
+ return true;
+
+ StringRef PassName = PIC->getPassNameForClassName(PassID);
+ for (const auto &P : printBeforePasses()) {
+ if (PassName == P)
+ return true;
+ }
+ return false;
+}
+
+bool PrintIRInstrumentation::shouldPrintAfterPass(StringRef PassID) {
+ if (shouldPrintAfterAll())
+ return true;
+
+ StringRef PassName = PIC->getPassNameForClassName(PassID);
+ for (const auto &P : printAfterPasses()) {
+ if (PassName == P)
+ return true;
+ }
+ return false;
+}
+
+void PrintIRInstrumentation::registerCallbacks(
+ PassInstrumentationCallbacks &PIC) {
+ this->PIC = &PIC;
+
+ // BeforePass callback is not just for printing, it also saves a Module
+ // for later use in AfterPassInvalidated.
+ StoreModuleDesc = forcePrintModuleIR() && shouldPrintAfterSomePass();
+ if (shouldPrintBeforeSomePass() || StoreModuleDesc)
+ PIC.registerBeforeNonSkippedPassCallback(
+ [this](StringRef P, Any IR) { this->printBeforePass(P, IR); });
+
+ if (shouldPrintAfterSomePass()) {
+ PIC.registerAfterPassCallback(
+ [this](StringRef P, Any IR, const PreservedAnalyses &) {
+ this->printAfterPass(P, IR);
+ });
+ PIC.registerAfterPassInvalidatedCallback(
+ [this](StringRef P, const PreservedAnalyses &) {
+ this->printAfterPassInvalidated(P);
+ });
+ }
+}
+
+void OptNoneInstrumentation::registerCallbacks(
+ PassInstrumentationCallbacks &PIC) {
+ PIC.registerShouldRunOptionalPassCallback(
+ [this](StringRef P, Any IR) { return this->shouldRun(P, IR); });
+}
+
+bool OptNoneInstrumentation::shouldRun(StringRef PassID, Any IR) {
+ const Function *F = nullptr;
+ if (any_isa<const Function *>(IR)) {
+ F = any_cast<const Function *>(IR);
+ } else if (any_isa<const Loop *>(IR)) {
+ F = any_cast<const Loop *>(IR)->getHeader()->getParent();
+ }
+ bool ShouldRun = !(F && F->hasOptNone());
+ if (!ShouldRun && DebugLogging) {
+ errs() << "Skipping pass " << PassID << " on " << F->getName()
+ << " due to optnone attribute\n";
+ }
+ return ShouldRun;
+}
+
+static std::string getBisectDescription(Any IR) {
+ if (any_isa<const Module *>(IR)) {
+ const Module *M = any_cast<const Module *>(IR);
+ assert(M && "module should be valid for printing");
+ return "module (" + M->getName().str() + ")";
+ }
+
+ if (any_isa<const Function *>(IR)) {
+ const Function *F = any_cast<const Function *>(IR);
+ assert(F && "function should be valid for printing");
+ return "function (" + F->getName().str() + ")";
+ }
+
+ if (any_isa<const LazyCallGraph::SCC *>(IR)) {
+ const LazyCallGraph::SCC *C = any_cast<const LazyCallGraph::SCC *>(IR);
+ assert(C && "scc should be valid for printing");
+ return "SCC " + C->getName();
+ }
+
+ if (any_isa<const Loop *>(IR)) {
+ return "loop";
+ }
+
+ llvm_unreachable("Unknown wrapped IR type");
+}
+
+void OptBisectInstrumentation::registerCallbacks(
+ PassInstrumentationCallbacks &PIC) {
+ if (!OptBisector->isEnabled())
+ return;
+ PIC.registerShouldRunOptionalPassCallback([](StringRef PassID, Any IR) {
+ return isIgnored(PassID) ||
+ OptBisector->checkPass(PassID, getBisectDescription(IR));
+ });
+}
+
+void PrintPassInstrumentation::registerCallbacks(
+ PassInstrumentationCallbacks &PIC) {
+ if (!DebugLogging)
+ return;
+
+ std::vector<StringRef> SpecialPasses = {"PassManager"};
+ if (!DebugPMVerbose)
+ SpecialPasses.emplace_back("PassAdaptor");
+
+ PIC.registerBeforeSkippedPassCallback(
+ [SpecialPasses](StringRef PassID, Any IR) {
+ assert(!isSpecialPass(PassID, SpecialPasses) &&
+ "Unexpectedly skipping special pass");
+
+ dbgs() << "Skipping pass: " << PassID << " on ";
+ unwrapAndPrint(dbgs(), IR, "", false, true);
+ });
+
+ PIC.registerBeforeNonSkippedPassCallback(
+ [SpecialPasses](StringRef PassID, Any IR) {
+ if (isSpecialPass(PassID, SpecialPasses))
+ return;
+
+ dbgs() << "Running pass: " << PassID << " on ";
+ unwrapAndPrint(dbgs(), IR, "", false, true);
+ });
+
+ PIC.registerBeforeAnalysisCallback([](StringRef PassID, Any IR) {
+ dbgs() << "Running analysis: " << PassID << " on ";
+ unwrapAndPrint(dbgs(), IR, "", false, true);
+ });
+}
+
+PreservedCFGCheckerInstrumentation::CFG::CFG(const Function *F,
+ bool TrackBBLifetime) {
+ if (TrackBBLifetime)
+ BBGuards = DenseMap<intptr_t, BBGuard>(F->size());
+ for (const auto &BB : *F) {
+ if (BBGuards)
+ BBGuards->try_emplace(intptr_t(&BB), &BB);
+ for (auto *Succ : successors(&BB)) {
+ Graph[&BB][Succ]++;
+ if (BBGuards)
+ BBGuards->try_emplace(intptr_t(Succ), Succ);
+ }
+ }
+}
+
+static void printBBName(raw_ostream &out, const BasicBlock *BB) {
+ if (BB->hasName()) {
+ out << BB->getName() << "<" << BB << ">";
+ return;
+ }
+
+ if (!BB->getParent()) {
+ out << "unnamed_removed<" << BB << ">";
+ return;
+ }
+
+ if (BB == &BB->getParent()->getEntryBlock()) {
+ out << "entry"
+ << "<" << BB << ">";
+ return;
+ }
+
+ unsigned FuncOrderBlockNum = 0;
+ for (auto &FuncBB : *BB->getParent()) {
+ if (&FuncBB == BB)
+ break;
+ FuncOrderBlockNum++;
+ }
+ out << "unnamed_" << FuncOrderBlockNum << "<" << BB << ">";
+}
+
+void PreservedCFGCheckerInstrumentation::CFG::printDiff(raw_ostream &out,
+ const CFG &Before,
+ const CFG &After) {
+ assert(!After.isPoisoned());
+
+ // Print function name.
+ const CFG *FuncGraph = nullptr;
+ if (!After.Graph.empty())
+ FuncGraph = &After;
+ else if (!Before.isPoisoned() && !Before.Graph.empty())
+ FuncGraph = &Before;
+
+ if (FuncGraph)
+ out << "In function @"
+ << FuncGraph->Graph.begin()->first->getParent()->getName() << "\n";
+
+ if (Before.isPoisoned()) {
+ out << "Some blocks were deleted\n";
+ return;
+ }
+
+ // Find and print graph differences.
+ if (Before.Graph.size() != After.Graph.size())
+ out << "Different number of non-leaf basic blocks: before="
+ << Before.Graph.size() << ", after=" << After.Graph.size() << "\n";
+
+ for (auto &BB : Before.Graph) {
+ auto BA = After.Graph.find(BB.first);
+ if (BA == After.Graph.end()) {
+ out << "Non-leaf block ";
+ printBBName(out, BB.first);
+ out << " is removed (" << BB.second.size() << " successors)\n";
+ }
+ }
+
+ for (auto &BA : After.Graph) {
+ auto BB = Before.Graph.find(BA.first);
+ if (BB == Before.Graph.end()) {
+ out << "Non-leaf block ";
+ printBBName(out, BA.first);
+ out << " is added (" << BA.second.size() << " successors)\n";
+ continue;
+ }
+
+ if (BB->second == BA.second)
+ continue;
+
+ out << "Different successors of block ";
+ printBBName(out, BA.first);
+ out << " (unordered):\n";
+ out << "- before (" << BB->second.size() << "): ";
+ for (auto &SuccB : BB->second) {
+ printBBName(out, SuccB.first);
+ if (SuccB.second != 1)
+ out << "(" << SuccB.second << "), ";
+ else
+ out << ", ";
+ }
+ out << "\n";
+ out << "- after (" << BA.second.size() << "): ";
+ for (auto &SuccA : BA.second) {
+ printBBName(out, SuccA.first);
+ if (SuccA.second != 1)
+ out << "(" << SuccA.second << "), ";
+ else
+ out << ", ";
+ }
+ out << "\n";
+ }
+}
+
+void PreservedCFGCheckerInstrumentation::registerCallbacks(
+ PassInstrumentationCallbacks &PIC) {
+ if (!VerifyPreservedCFG)
+ return;
+
+ PIC.registerBeforeNonSkippedPassCallback([this](StringRef P, Any IR) {
+ if (any_isa<const Function *>(IR))
+ GraphStackBefore.emplace_back(P, CFG(any_cast<const Function *>(IR)));
+ else
+ GraphStackBefore.emplace_back(P, None);
+ });
+
+ PIC.registerAfterPassInvalidatedCallback(
+ [this](StringRef P, const PreservedAnalyses &PassPA) {
+ auto Before = GraphStackBefore.pop_back_val();
+ assert(Before.first == P &&
+ "Before and After callbacks must correspond");
+ (void)Before;
+ });
+
+ PIC.registerAfterPassCallback([this](StringRef P, Any IR,
+ const PreservedAnalyses &PassPA) {
+ auto Before = GraphStackBefore.pop_back_val();
+ assert(Before.first == P && "Before and After callbacks must correspond");
+ auto &GraphBefore = Before.second;
+
+ if (!PassPA.allAnalysesInSetPreserved<CFGAnalyses>())
+ return;
+
+ if (any_isa<const Function *>(IR)) {
+ assert(GraphBefore && "Must be built in BeforePassCallback");
+ CFG GraphAfter(any_cast<const Function *>(IR), false /* NeedsGuard */);
+ if (GraphAfter == *GraphBefore)
+ return;
+
+ dbgs() << "Error: " << P
+ << " reported it preserved CFG, but changes detected:\n";
+ CFG::printDiff(dbgs(), *GraphBefore, GraphAfter);
+ report_fatal_error(Twine("Preserved CFG changed by ", P));
+ }
+ });
+}
+
+void VerifyInstrumentation::registerCallbacks(
+ PassInstrumentationCallbacks &PIC) {
+ PIC.registerAfterPassCallback(
+ [this](StringRef P, Any IR, const PreservedAnalyses &PassPA) {
+ if (isIgnored(P) || P == "VerifierPass")
+ return;
+ if (any_isa<const Function *>(IR) || any_isa<const Loop *>(IR)) {
+ const Function *F;
+ if (any_isa<const Loop *>(IR))
+ F = any_cast<const Loop *>(IR)->getHeader()->getParent();
+ else
+ F = any_cast<const Function *>(IR);
+ if (DebugLogging)
+ dbgs() << "Verifying function " << F->getName() << "\n";
+
+ if (verifyFunction(*F))
+ report_fatal_error("Broken function found, compilation aborted!");
+ } else if (any_isa<const Module *>(IR) ||
+ any_isa<const LazyCallGraph::SCC *>(IR)) {
+ const Module *M;
+ if (any_isa<const LazyCallGraph::SCC *>(IR))
+ M = any_cast<const LazyCallGraph::SCC *>(IR)
+ ->begin()
+ ->getFunction()
+ .getParent();
+ else
+ M = any_cast<const Module *>(IR);
+ if (DebugLogging)
+ dbgs() << "Verifying module " << M->getName() << "\n";
+
+ if (verifyModule(*M))
+ report_fatal_error("Broken module found, compilation aborted!");
+ }
+ });
+}
+
+StandardInstrumentations::StandardInstrumentations(bool DebugLogging,
+ bool VerifyEach)
+ : PrintPass(DebugLogging), OptNone(DebugLogging),
+ PrintChangedIR(PrintChanged != PrintChangedQuiet), Verify(DebugLogging),
+ VerifyEach(VerifyEach) {}
+
+void StandardInstrumentations::registerCallbacks(
+ PassInstrumentationCallbacks &PIC) {
+ PrintIR.registerCallbacks(PIC);
+ PrintPass.registerCallbacks(PIC);
+ TimePasses.registerCallbacks(PIC);
+ OptNone.registerCallbacks(PIC);
+ OptBisect.registerCallbacks(PIC);
+ PreservedCFGChecker.registerCallbacks(PIC);
+ PrintChangedIR.registerCallbacks(PIC);
+ PseudoProbeVerification.registerCallbacks(PIC);
+ if (VerifyEach)
+ Verify.registerCallbacks(PIC);
+}
+
+namespace llvm {
+
+template class ChangeReporter<std::string>;
+template class TextChangeReporter<std::string>;
+
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/ProfileData/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/ProfileData/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/ProfileData/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/ProfileData/Coverage/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/ProfileData/Coverage/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/ProfileData/Coverage/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Remarks/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Remarks/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Remarks/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Support/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Support/.yandex_meta/licenses.list.txt
deleted file mode 100644
index 02b6dcc8d5f..00000000000
--- a/contrib/libs/llvm12/lib/Support/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,491 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
- * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
- * See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
- * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-
-====================BSD-2-Clause====================
-* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
-
-
-====================BSD-2-Clause====================
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following disclaimer
-* in the documentation and/or other materials provided with the
-* distribution.
-*
-* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-====================BSD-3-Clause====================
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
-
-
-====================COPYRIGHT====================
- Copyright (C) 2012-2016, Yann Collet.
-
-
-====================COPYRIGHT====================
- * Copyright (c) 1992 Henry Spencer.
- * Copyright (c) 1992, 1993
- * The Regents of the University of California. All rights reserved.
-
-
-====================COPYRIGHT====================
- * Copyright (c) 1992, 1993, 1994 Henry Spencer.
- * Copyright (c) 1992, 1993, 1994
- * The Regents of the University of California. All rights reserved.
-
-
-====================COPYRIGHT====================
- * Copyright (c) 1994
- * The Regents of the University of California. All rights reserved.
-
-
-====================COPYRIGHT====================
- * Copyright (c) 1998 Todd C. Miller <Todd.Miller@courtesan.com>
-
-
-====================COPYRIGHT====================
- * Copyright 2001-2004 Unicode, Inc.
-
-
-====================COPYRIGHT====================
- * This software was written by Alexander Peslyak in 2001. No copyright is
- * claimed, and the software is hereby placed in the public domain.
- * In case this attempt to disclaim copyright and place the software in the
- * public domain is deemed null and void, then the software is
- * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
- * general public under the following terms:
-
-
-====================COPYRIGHT====================
-Copyright 1992, 1993, 1994 Henry Spencer. All rights reserved.
-
-
-====================File: LICENSE.TXT====================
-==============================================================================
-The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
-==============================================================================
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
----- LLVM Exceptions to the Apache 2.0 License ----
-
-As an exception, if, as a result of your compiling your source code, portions
-of this Software are embedded into an Object form of such source code, you
-may redistribute such embedded portions in such Object form without complying
-with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
-
-In addition, if you combine or link compiled forms of this Software with
-software that is licensed under the GPLv2 ("Combined Software") and if a
-court of competent jurisdiction determines that the patent provision (Section
-3), the indemnity provision (Section 9) or other Section of the License
-conflicts with the conditions of the GPLv2, you may retroactively and
-prospectively choose to deem waived or otherwise exclude such Section(s) of
-the License, but only in their entirety and only with respect to the Combined
-Software.
-
-==============================================================================
-Software from third parties included in the LLVM Project:
-==============================================================================
-The LLVM Project contains third party software which is under different license
-terms. All such code will be identified clearly using at least one of two
-mechanisms:
-1) It will be in a separate directory tree with its own `LICENSE.txt` or
- `LICENSE` file at the top containing the specific license and restrictions
- which apply to that software, or
-2) It will contain specific license and restriction terms at the top of every
- file.
-
-==============================================================================
-Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
-==============================================================================
-University of Illinois/NCSA
-Open Source License
-
-Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
-All rights reserved.
-
-Developed by:
-
- LLVM Team
-
- University of Illinois at Urbana-Champaign
-
- http://llvm.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal with
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimers.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimers in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the names of the LLVM Team, University of Illinois at
- Urbana-Champaign, nor the names of its contributors may be used to
- endorse or promote products derived from this Software without specific
- prior written permission.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
-SOFTWARE.
-
-
-
-====================File: include/llvm/Support/LICENSE.TXT====================
-LLVM System Interface Library
--------------------------------------------------------------------------------
-The LLVM System Interface Library is licensed under the Illinois Open Source
-License and has the following additional copyright:
-
-Copyright (C) 2004 eXtensible Systems, Inc.
-
-
-====================ISC====================
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-
-
-====================NCSA====================
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-
-
-====================Public-Domain====================
- * Homepage:
- * http://openwall.info/wiki/people/solar/software/public-domain-source-code/md5
- *
- * Author:
- * Alexander Peslyak, better known as Solar Designer <solar at openwall.com>
- *
- * This software was written by Alexander Peslyak in 2001. No copyright is
- * claimed, and the software is hereby placed in the public domain.
- * In case this attempt to disclaim copyright and place the software in the
- * public domain is deemed null and void, then the software is
- * Copyright (c) 2001 Alexander Peslyak and it is hereby released to the
- * general public under the following terms:
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted.
- *
- * There's ABSOLUTELY NO WARRANTY, express or implied.
- *
- * (This is a heavily cut-down "BSD license".)
-
-
-====================Public-Domain====================
-// This code is taken from public domain
-
-
-====================Spencer-94====================
-This software is not subject to any license of the American Telephone
-and Telegraph Company or of the Regents of the University of California.
-
-Permission is granted to anyone to use this software for any purpose on
-any computer system, and to alter it and redistribute it, subject
-to the following restrictions:
-
-1. The author is not responsible for the consequences of use of this
- software, no matter how awful, even if they arise from flaws in it.
-
-2. The origin of this software must not be misrepresented, either by
- explicit claim or by omission. Since few users ever read sources,
- credits must appear in the documentation.
-
-3. Altered versions must be plainly marked as such, and must not be
- misrepresented as being the original software. Since few users
- ever read sources, credits must appear in the documentation.
-
-4. This notice may not be removed or altered.
-
-
-====================Unicode-Mappings====================
- * Disclaimer
- *
- * This source code is provided as is by Unicode, Inc. No claims are
- * made as to fitness for any particular purpose. No warranties of any
- * kind are expressed or implied. The recipient agrees to determine
- * applicability of information provided. If this file has been
- * purchased on magnetic or optical media from Unicode, Inc., the
- * sole remedy for any claim will be exchange of defective media
- * within 90 days of receipt.
- *
- * Limitations on Rights to Redistribute This Code
- *
- * Unicode, Inc. hereby grants the right to freely use the information
- * supplied in this file in the creation of products supporting the
- * Unicode Standard, and to make copies of this file in any form
- * for internal or external distribution as long as this notice
- * remains attached.
diff --git a/contrib/libs/llvm12/lib/TableGen/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/TableGen/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/TableGen/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/AArch64/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/AArch64/.yandex_meta/licenses.list.txt
deleted file mode 100644
index ad3879fc450..00000000000
--- a/contrib/libs/llvm12/lib/Target/AArch64/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,303 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-
-====================File: LICENSE.TXT====================
-==============================================================================
-The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
-==============================================================================
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
----- LLVM Exceptions to the Apache 2.0 License ----
-
-As an exception, if, as a result of your compiling your source code, portions
-of this Software are embedded into an Object form of such source code, you
-may redistribute such embedded portions in such Object form without complying
-with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
-
-In addition, if you combine or link compiled forms of this Software with
-software that is licensed under the GPLv2 ("Combined Software") and if a
-court of competent jurisdiction determines that the patent provision (Section
-3), the indemnity provision (Section 9) or other Section of the License
-conflicts with the conditions of the GPLv2, you may retroactively and
-prospectively choose to deem waived or otherwise exclude such Section(s) of
-the License, but only in their entirety and only with respect to the Combined
-Software.
-
-==============================================================================
-Software from third parties included in the LLVM Project:
-==============================================================================
-The LLVM Project contains third party software which is under different license
-terms. All such code will be identified clearly using at least one of two
-mechanisms:
-1) It will be in a separate directory tree with its own `LICENSE.txt` or
- `LICENSE` file at the top containing the specific license and restrictions
- which apply to that software, or
-2) It will contain specific license and restriction terms at the top of every
- file.
-
-==============================================================================
-Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
-==============================================================================
-University of Illinois/NCSA
-Open Source License
-
-Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
-All rights reserved.
-
-Developed by:
-
- LLVM Team
-
- University of Illinois at Urbana-Champaign
-
- http://llvm.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal with
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimers.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimers in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the names of the LLVM Team, University of Illinois at
- Urbana-Champaign, nor the names of its contributors may be used to
- endorse or promote products derived from this Software without specific
- prior written permission.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
-SOFTWARE.
-
-
-
-====================File: include/llvm/Support/LICENSE.TXT====================
-LLVM System Interface Library
--------------------------------------------------------------------------------
-The LLVM System Interface Library is licensed under the Illinois Open Source
-License and has the following additional copyright:
-
-Copyright (C) 2004 eXtensible Systems, Inc.
-
-
-====================NCSA====================
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
diff --git a/contrib/libs/llvm12/lib/Target/AArch64/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/AArch64/AsmParser/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/AArch64/AsmParser/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/AArch64/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/AArch64/Disassembler/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/AArch64/Disassembler/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/AArch64/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/AArch64/MCTargetDesc/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/AArch64/MCTargetDesc/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/AArch64/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/AArch64/TargetInfo/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/AArch64/TargetInfo/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/AArch64/Utils/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/AArch64/Utils/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/AArch64/Utils/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/ARM/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/ARM/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/ARM/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/ARM/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/ARM/AsmParser/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/ARM/AsmParser/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/ARM/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/ARM/Disassembler/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/ARM/Disassembler/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/ARM/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/ARM/MCTargetDesc/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/ARM/MCTargetDesc/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/ARM/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/ARM/TargetInfo/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/ARM/TargetInfo/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/ARM/Utils/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/ARM/Utils/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/ARM/Utils/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/BPF/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/BPF/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/BPF/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/BPF/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/BPF/AsmParser/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/BPF/AsmParser/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/BPF/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/BPF/Disassembler/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/BPF/Disassembler/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/BPF/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/BPF/MCTargetDesc/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/BPF/MCTargetDesc/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/BPF/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/BPF/TargetInfo/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/BPF/TargetInfo/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/NVPTX/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/NVPTX/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/NVPTX/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/NVPTX/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/NVPTX/MCTargetDesc/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/NVPTX/MCTargetDesc/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/NVPTX/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/NVPTX/TargetInfo/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/NVPTX/TargetInfo/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/PowerPC/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/PowerPC/.yandex_meta/licenses.list.txt
deleted file mode 100644
index 2f43d3f2722..00000000000
--- a/contrib/libs/llvm12/lib/Target/PowerPC/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https)//llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier) Apache-2.0 WITH LLVM-exception
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/PowerPC/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/PowerPC/AsmParser/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/PowerPC/AsmParser/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/PowerPC/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/PowerPC/Disassembler/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/PowerPC/Disassembler/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/PowerPC/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/PowerPC/MCTargetDesc/.yandex_meta/licenses.list.txt
deleted file mode 100644
index ad3879fc450..00000000000
--- a/contrib/libs/llvm12/lib/Target/PowerPC/MCTargetDesc/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,303 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-
-====================File: LICENSE.TXT====================
-==============================================================================
-The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
-==============================================================================
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
----- LLVM Exceptions to the Apache 2.0 License ----
-
-As an exception, if, as a result of your compiling your source code, portions
-of this Software are embedded into an Object form of such source code, you
-may redistribute such embedded portions in such Object form without complying
-with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
-
-In addition, if you combine or link compiled forms of this Software with
-software that is licensed under the GPLv2 ("Combined Software") and if a
-court of competent jurisdiction determines that the patent provision (Section
-3), the indemnity provision (Section 9) or other Section of the License
-conflicts with the conditions of the GPLv2, you may retroactively and
-prospectively choose to deem waived or otherwise exclude such Section(s) of
-the License, but only in their entirety and only with respect to the Combined
-Software.
-
-==============================================================================
-Software from third parties included in the LLVM Project:
-==============================================================================
-The LLVM Project contains third party software which is under different license
-terms. All such code will be identified clearly using at least one of two
-mechanisms:
-1) It will be in a separate directory tree with its own `LICENSE.txt` or
- `LICENSE` file at the top containing the specific license and restrictions
- which apply to that software, or
-2) It will contain specific license and restriction terms at the top of every
- file.
-
-==============================================================================
-Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
-==============================================================================
-University of Illinois/NCSA
-Open Source License
-
-Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
-All rights reserved.
-
-Developed by:
-
- LLVM Team
-
- University of Illinois at Urbana-Champaign
-
- http://llvm.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal with
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimers.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimers in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the names of the LLVM Team, University of Illinois at
- Urbana-Champaign, nor the names of its contributors may be used to
- endorse or promote products derived from this Software without specific
- prior written permission.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
-SOFTWARE.
-
-
-
-====================File: include/llvm/Support/LICENSE.TXT====================
-LLVM System Interface Library
--------------------------------------------------------------------------------
-The LLVM System Interface Library is licensed under the Illinois Open Source
-License and has the following additional copyright:
-
-Copyright (C) 2004 eXtensible Systems, Inc.
-
-
-====================NCSA====================
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
diff --git a/contrib/libs/llvm12/lib/Target/PowerPC/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/PowerPC/TargetInfo/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/PowerPC/TargetInfo/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/X86/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/X86/.yandex_meta/licenses.list.txt
deleted file mode 100644
index ad3879fc450..00000000000
--- a/contrib/libs/llvm12/lib/Target/X86/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,303 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-
-====================File: LICENSE.TXT====================
-==============================================================================
-The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
-==============================================================================
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
----- LLVM Exceptions to the Apache 2.0 License ----
-
-As an exception, if, as a result of your compiling your source code, portions
-of this Software are embedded into an Object form of such source code, you
-may redistribute such embedded portions in such Object form without complying
-with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
-
-In addition, if you combine or link compiled forms of this Software with
-software that is licensed under the GPLv2 ("Combined Software") and if a
-court of competent jurisdiction determines that the patent provision (Section
-3), the indemnity provision (Section 9) or other Section of the License
-conflicts with the conditions of the GPLv2, you may retroactively and
-prospectively choose to deem waived or otherwise exclude such Section(s) of
-the License, but only in their entirety and only with respect to the Combined
-Software.
-
-==============================================================================
-Software from third parties included in the LLVM Project:
-==============================================================================
-The LLVM Project contains third party software which is under different license
-terms. All such code will be identified clearly using at least one of two
-mechanisms:
-1) It will be in a separate directory tree with its own `LICENSE.txt` or
- `LICENSE` file at the top containing the specific license and restrictions
- which apply to that software, or
-2) It will contain specific license and restriction terms at the top of every
- file.
-
-==============================================================================
-Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
-==============================================================================
-University of Illinois/NCSA
-Open Source License
-
-Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
-All rights reserved.
-
-Developed by:
-
- LLVM Team
-
- University of Illinois at Urbana-Champaign
-
- http://llvm.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal with
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimers.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimers in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the names of the LLVM Team, University of Illinois at
- Urbana-Champaign, nor the names of its contributors may be used to
- endorse or promote products derived from this Software without specific
- prior written permission.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
-SOFTWARE.
-
-
-
-====================File: include/llvm/Support/LICENSE.TXT====================
-LLVM System Interface Library
--------------------------------------------------------------------------------
-The LLVM System Interface Library is licensed under the Illinois Open Source
-License and has the following additional copyright:
-
-Copyright (C) 2004 eXtensible Systems, Inc.
-
-
-====================NCSA====================
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
diff --git a/contrib/libs/llvm12/lib/Target/X86/AsmParser/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/X86/AsmParser/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/X86/AsmParser/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/X86/Disassembler/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/X86/Disassembler/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/X86/Disassembler/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/X86/MCTargetDesc/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/X86/MCTargetDesc/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/X86/MCTargetDesc/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Target/X86/TargetInfo/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Target/X86/TargetInfo/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Target/X86/TargetInfo/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/TextAPI/MachO/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/TextAPI/MachO/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/TextAPI/MachO/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/ToolDrivers/llvm-dlltool/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/ToolDrivers/llvm-dlltool/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/ToolDrivers/llvm-dlltool/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/ToolDrivers/llvm-lib/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/ToolDrivers/llvm-lib/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/ToolDrivers/llvm-lib/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Transforms/AggressiveInstCombine/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Transforms/CFGuard/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Transforms/CFGuard/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Transforms/CFGuard/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroCleanup.cpp b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroCleanup.cpp
new file mode 100644
index 00000000000..298149f8b54
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroCleanup.cpp
@@ -0,0 +1,150 @@
+//===- CoroCleanup.cpp - Coroutine Cleanup Pass ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Coroutines/CoroCleanup.h"
+#include "CoroInternal.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Transforms/Scalar.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "coro-cleanup"
+
+namespace {
+// Created on demand if CoroCleanup pass has work to do.
+struct Lowerer : coro::LowererBase {
+ IRBuilder<> Builder;
+ Lowerer(Module &M) : LowererBase(M), Builder(Context) {}
+ bool lowerRemainingCoroIntrinsics(Function &F);
+};
+}
+
+static void simplifyCFG(Function &F) {
+ llvm::legacy::FunctionPassManager FPM(F.getParent());
+ FPM.add(createCFGSimplificationPass());
+
+ FPM.doInitialization();
+ FPM.run(F);
+ FPM.doFinalization();
+}
+
+static void lowerSubFn(IRBuilder<> &Builder, CoroSubFnInst *SubFn) {
+ Builder.SetInsertPoint(SubFn);
+ Value *FrameRaw = SubFn->getFrame();
+ int Index = SubFn->getIndex();
+
+ auto *FrameTy = StructType::get(
+ SubFn->getContext(), {Builder.getInt8PtrTy(), Builder.getInt8PtrTy()});
+ PointerType *FramePtrTy = FrameTy->getPointerTo();
+
+ Builder.SetInsertPoint(SubFn);
+ auto *FramePtr = Builder.CreateBitCast(FrameRaw, FramePtrTy);
+ auto *Gep = Builder.CreateConstInBoundsGEP2_32(FrameTy, FramePtr, 0, Index);
+ auto *Load = Builder.CreateLoad(FrameTy->getElementType(Index), Gep);
+
+ SubFn->replaceAllUsesWith(Load);
+}
+
+bool Lowerer::lowerRemainingCoroIntrinsics(Function &F) {
+ bool Changed = false;
+
+ for (auto IB = inst_begin(F), E = inst_end(F); IB != E;) {
+ Instruction &I = *IB++;
+ if (auto *II = dyn_cast<IntrinsicInst>(&I)) {
+ switch (II->getIntrinsicID()) {
+ default:
+ continue;
+ case Intrinsic::coro_begin:
+ II->replaceAllUsesWith(II->getArgOperand(1));
+ break;
+ case Intrinsic::coro_free:
+ II->replaceAllUsesWith(II->getArgOperand(1));
+ break;
+ case Intrinsic::coro_alloc:
+ II->replaceAllUsesWith(ConstantInt::getTrue(Context));
+ break;
+ case Intrinsic::coro_id:
+ case Intrinsic::coro_id_retcon:
+ case Intrinsic::coro_id_retcon_once:
+ case Intrinsic::coro_id_async:
+ II->replaceAllUsesWith(ConstantTokenNone::get(Context));
+ break;
+ case Intrinsic::coro_subfn_addr:
+ lowerSubFn(Builder, cast<CoroSubFnInst>(II));
+ break;
+ }
+ II->eraseFromParent();
+ Changed = true;
+ }
+ }
+
+ if (Changed) {
+ // After replacement were made we can cleanup the function body a little.
+ simplifyCFG(F);
+ }
+
+ return Changed;
+}
+
+static bool declaresCoroCleanupIntrinsics(const Module &M) {
+ return coro::declaresIntrinsics(M, {"llvm.coro.alloc", "llvm.coro.begin",
+ "llvm.coro.subfn.addr", "llvm.coro.free",
+ "llvm.coro.id", "llvm.coro.id.retcon",
+ "llvm.coro.id.retcon.once"});
+}
+
+PreservedAnalyses CoroCleanupPass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ auto &M = *F.getParent();
+ if (!declaresCoroCleanupIntrinsics(M) ||
+ !Lowerer(M).lowerRemainingCoroIntrinsics(F))
+ return PreservedAnalyses::all();
+
+ return PreservedAnalyses::none();
+}
+
+namespace {
+
+struct CoroCleanupLegacy : FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+
+ CoroCleanupLegacy() : FunctionPass(ID) {
+ initializeCoroCleanupLegacyPass(*PassRegistry::getPassRegistry());
+ }
+
+ std::unique_ptr<Lowerer> L;
+
+ // This pass has work to do only if we find intrinsics we are going to lower
+ // in the module.
+ bool doInitialization(Module &M) override {
+ if (declaresCoroCleanupIntrinsics(M))
+ L = std::make_unique<Lowerer>(M);
+ return false;
+ }
+
+ bool runOnFunction(Function &F) override {
+ if (L)
+ return L->lowerRemainingCoroIntrinsics(F);
+ return false;
+ }
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ if (!L)
+ AU.setPreservesAll();
+ }
+ StringRef getPassName() const override { return "Coroutine Cleanup"; }
+};
+}
+
+char CoroCleanupLegacy::ID = 0;
+INITIALIZE_PASS(CoroCleanupLegacy, "coro-cleanup",
+ "Lower all coroutine related intrinsics", false, false)
+
+Pass *llvm::createCoroCleanupLegacyPass() { return new CoroCleanupLegacy(); }
diff --git a/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroEarly.cpp b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroEarly.cpp
new file mode 100644
index 00000000000..5e5e513cdfd
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroEarly.cpp
@@ -0,0 +1,285 @@
+//===- CoroEarly.cpp - Coroutine Early Function Pass ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Coroutines/CoroEarly.h"
+#include "CoroInternal.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Pass.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "coro-early"
+
+namespace {
+// Created on demand if the coro-early pass has work to do.
+class Lowerer : public coro::LowererBase {
+ IRBuilder<> Builder;
+ PointerType *const AnyResumeFnPtrTy;
+ Constant *NoopCoro = nullptr;
+
+ void lowerResumeOrDestroy(CallBase &CB, CoroSubFnInst::ResumeKind);
+ void lowerCoroPromise(CoroPromiseInst *Intrin);
+ void lowerCoroDone(IntrinsicInst *II);
+ void lowerCoroNoop(IntrinsicInst *II);
+
+public:
+ Lowerer(Module &M)
+ : LowererBase(M), Builder(Context),
+ AnyResumeFnPtrTy(FunctionType::get(Type::getVoidTy(Context), Int8Ptr,
+ /*isVarArg=*/false)
+ ->getPointerTo()) {}
+ bool lowerEarlyIntrinsics(Function &F);
+};
+}
+
+// Replace a direct call to coro.resume or coro.destroy with an indirect call to
+// an address returned by coro.subfn.addr intrinsic. This is done so that
+// CGPassManager recognizes devirtualization when CoroElide pass replaces a call
+// to coro.subfn.addr with an appropriate function address.
+void Lowerer::lowerResumeOrDestroy(CallBase &CB,
+ CoroSubFnInst::ResumeKind Index) {
+ Value *ResumeAddr = makeSubFnCall(CB.getArgOperand(0), Index, &CB);
+ CB.setCalledOperand(ResumeAddr);
+ CB.setCallingConv(CallingConv::Fast);
+}
+
+// Coroutine promise field is always at the fixed offset from the beginning of
+// the coroutine frame. i8* coro.promise(i8*, i1 from) intrinsic adds an offset
+// to a passed pointer to move from coroutine frame to coroutine promise and
+// vice versa. Since we don't know exactly which coroutine frame it is, we build
+// a coroutine frame mock up starting with two function pointers, followed by a
+// properly aligned coroutine promise field.
+// TODO: Handle the case when coroutine promise alloca has align override.
+void Lowerer::lowerCoroPromise(CoroPromiseInst *Intrin) {
+ Value *Operand = Intrin->getArgOperand(0);
+ Align Alignment = Intrin->getAlignment();
+ Type *Int8Ty = Builder.getInt8Ty();
+
+ auto *SampleStruct =
+ StructType::get(Context, {AnyResumeFnPtrTy, AnyResumeFnPtrTy, Int8Ty});
+ const DataLayout &DL = TheModule.getDataLayout();
+ int64_t Offset = alignTo(
+ DL.getStructLayout(SampleStruct)->getElementOffset(2), Alignment);
+ if (Intrin->isFromPromise())
+ Offset = -Offset;
+
+ Builder.SetInsertPoint(Intrin);
+ Value *Replacement =
+ Builder.CreateConstInBoundsGEP1_32(Int8Ty, Operand, Offset);
+
+ Intrin->replaceAllUsesWith(Replacement);
+ Intrin->eraseFromParent();
+}
+
+// When a coroutine reaches final suspend point, it zeros out ResumeFnAddr in
+// the coroutine frame (it is UB to resume from a final suspend point).
+// The llvm.coro.done intrinsic is used to check whether a coroutine is
+// suspended at the final suspend point or not.
+void Lowerer::lowerCoroDone(IntrinsicInst *II) {
+ Value *Operand = II->getArgOperand(0);
+
+ // ResumeFnAddr is the first pointer sized element of the coroutine frame.
+ static_assert(coro::Shape::SwitchFieldIndex::Resume == 0,
+ "resume function not at offset zero");
+ auto *FrameTy = Int8Ptr;
+ PointerType *FramePtrTy = FrameTy->getPointerTo();
+
+ Builder.SetInsertPoint(II);
+ auto *BCI = Builder.CreateBitCast(Operand, FramePtrTy);
+ auto *Load = Builder.CreateLoad(FrameTy, BCI);
+ auto *Cond = Builder.CreateICmpEQ(Load, NullPtr);
+
+ II->replaceAllUsesWith(Cond);
+ II->eraseFromParent();
+}
+
+void Lowerer::lowerCoroNoop(IntrinsicInst *II) {
+ if (!NoopCoro) {
+ LLVMContext &C = Builder.getContext();
+ Module &M = *II->getModule();
+
+ // Create a noop.frame struct type.
+ StructType *FrameTy = StructType::create(C, "NoopCoro.Frame");
+ auto *FramePtrTy = FrameTy->getPointerTo();
+ auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
+ /*isVarArg=*/false);
+ auto *FnPtrTy = FnTy->getPointerTo();
+ FrameTy->setBody({FnPtrTy, FnPtrTy});
+
+ // Create a Noop function that does nothing.
+ Function *NoopFn =
+ Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
+ "NoopCoro.ResumeDestroy", &M);
+ NoopFn->setCallingConv(CallingConv::Fast);
+ auto *Entry = BasicBlock::Create(C, "entry", NoopFn);
+ ReturnInst::Create(C, Entry);
+
+ // Create a constant struct for the frame.
+ Constant* Values[] = {NoopFn, NoopFn};
+ Constant* NoopCoroConst = ConstantStruct::get(FrameTy, Values);
+ NoopCoro = new GlobalVariable(M, NoopCoroConst->getType(), /*isConstant=*/true,
+ GlobalVariable::PrivateLinkage, NoopCoroConst,
+ "NoopCoro.Frame.Const");
+ }
+
+ Builder.SetInsertPoint(II);
+ auto *NoopCoroVoidPtr = Builder.CreateBitCast(NoopCoro, Int8Ptr);
+ II->replaceAllUsesWith(NoopCoroVoidPtr);
+ II->eraseFromParent();
+}
+
+// Prior to CoroSplit, calls to coro.begin needs to be marked as NoDuplicate,
+// as CoroSplit assumes there is exactly one coro.begin. After CoroSplit,
+// NoDuplicate attribute will be removed from coro.begin otherwise, it will
+// interfere with inlining.
+static void setCannotDuplicate(CoroIdInst *CoroId) {
+ for (User *U : CoroId->users())
+ if (auto *CB = dyn_cast<CoroBeginInst>(U))
+ CB->setCannotDuplicate();
+}
+
+bool Lowerer::lowerEarlyIntrinsics(Function &F) {
+ bool Changed = false;
+ CoroIdInst *CoroId = nullptr;
+ SmallVector<CoroFreeInst *, 4> CoroFrees;
+ bool HasCoroSuspend = false;
+ for (auto IB = inst_begin(F), IE = inst_end(F); IB != IE;) {
+ Instruction &I = *IB++;
+ if (auto *CB = dyn_cast<CallBase>(&I)) {
+ switch (CB->getIntrinsicID()) {
+ default:
+ continue;
+ case Intrinsic::coro_free:
+ CoroFrees.push_back(cast<CoroFreeInst>(&I));
+ break;
+ case Intrinsic::coro_suspend:
+ // Make sure that final suspend point is not duplicated as CoroSplit
+ // pass expects that there is at most one final suspend point.
+ if (cast<CoroSuspendInst>(&I)->isFinal())
+ CB->setCannotDuplicate();
+ HasCoroSuspend = true;
+ break;
+ case Intrinsic::coro_end_async:
+ case Intrinsic::coro_end:
+ // Make sure that fallthrough coro.end is not duplicated as CoroSplit
+ // pass expects that there is at most one fallthrough coro.end.
+ if (cast<AnyCoroEndInst>(&I)->isFallthrough())
+ CB->setCannotDuplicate();
+ break;
+ case Intrinsic::coro_noop:
+ lowerCoroNoop(cast<IntrinsicInst>(&I));
+ break;
+ case Intrinsic::coro_id:
+ // Mark a function that comes out of the frontend that has a coro.id
+ // with a coroutine attribute.
+ if (auto *CII = cast<CoroIdInst>(&I)) {
+ if (CII->getInfo().isPreSplit()) {
+ F.addFnAttr(CORO_PRESPLIT_ATTR, UNPREPARED_FOR_SPLIT);
+ setCannotDuplicate(CII);
+ CII->setCoroutineSelf();
+ CoroId = cast<CoroIdInst>(&I);
+ }
+ }
+ break;
+ case Intrinsic::coro_id_retcon:
+ case Intrinsic::coro_id_retcon_once:
+ case Intrinsic::coro_id_async:
+ F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT);
+ break;
+ case Intrinsic::coro_resume:
+ lowerResumeOrDestroy(*CB, CoroSubFnInst::ResumeIndex);
+ break;
+ case Intrinsic::coro_destroy:
+ lowerResumeOrDestroy(*CB, CoroSubFnInst::DestroyIndex);
+ break;
+ case Intrinsic::coro_promise:
+ lowerCoroPromise(cast<CoroPromiseInst>(&I));
+ break;
+ case Intrinsic::coro_done:
+ lowerCoroDone(cast<IntrinsicInst>(&I));
+ break;
+ }
+ Changed = true;
+ }
+ }
+ // Make sure that all CoroFree reference the coro.id intrinsic.
+ // Token type is not exposed through coroutine C/C++ builtins to plain C, so
+ // we allow specifying none and fixing it up here.
+ if (CoroId)
+ for (CoroFreeInst *CF : CoroFrees)
+ CF->setArgOperand(0, CoroId);
+ // Coroutine suspention could potentially lead to any argument modified
+ // outside of the function, hence arguments should not have noalias
+ // attributes.
+ if (HasCoroSuspend)
+ for (Argument &A : F.args())
+ if (A.hasNoAliasAttr())
+ A.removeAttr(Attribute::NoAlias);
+ return Changed;
+}
+
+static bool declaresCoroEarlyIntrinsics(const Module &M) {
+ return coro::declaresIntrinsics(
+ M, {"llvm.coro.id", "llvm.coro.id.retcon", "llvm.coro.id.retcon.once",
+ "llvm.coro.id.async", "llvm.coro.destroy", "llvm.coro.done",
+ "llvm.coro.end", "llvm.coro.end.async", "llvm.coro.noop",
+ "llvm.coro.free", "llvm.coro.promise", "llvm.coro.resume",
+ "llvm.coro.suspend"});
+}
+
+PreservedAnalyses CoroEarlyPass::run(Function &F, FunctionAnalysisManager &) {
+ Module &M = *F.getParent();
+ if (!declaresCoroEarlyIntrinsics(M) || !Lowerer(M).lowerEarlyIntrinsics(F))
+ return PreservedAnalyses::all();
+
+ PreservedAnalyses PA;
+ PA.preserveSet<CFGAnalyses>();
+ return PA;
+}
+
+namespace {
+
+struct CoroEarlyLegacy : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid.
+ CoroEarlyLegacy() : FunctionPass(ID) {
+ initializeCoroEarlyLegacyPass(*PassRegistry::getPassRegistry());
+ }
+
+ std::unique_ptr<Lowerer> L;
+
+ // This pass has work to do only if we find intrinsics we are going to lower
+ // in the module.
+ bool doInitialization(Module &M) override {
+ if (declaresCoroEarlyIntrinsics(M))
+ L = std::make_unique<Lowerer>(M);
+ return false;
+ }
+
+ bool runOnFunction(Function &F) override {
+ if (!L)
+ return false;
+
+ return L->lowerEarlyIntrinsics(F);
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesCFG();
+ }
+ StringRef getPassName() const override {
+ return "Lower early coroutine intrinsics";
+ }
+};
+}
+
+char CoroEarlyLegacy::ID = 0;
+INITIALIZE_PASS(CoroEarlyLegacy, "coro-early",
+ "Lower early coroutine intrinsics", false, false)
+
+Pass *llvm::createCoroEarlyLegacyPass() { return new CoroEarlyLegacy(); }
diff --git a/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroElide.cpp b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroElide.cpp
new file mode 100644
index 00000000000..07a183cfc66
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroElide.cpp
@@ -0,0 +1,458 @@
+//===- CoroElide.cpp - Coroutine Frame Allocation Elision Pass ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Coroutines/CoroElide.h"
+#include "CoroInternal.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/InstructionSimplify.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "coro-elide"
+
+namespace {
+// Created on demand if the coro-elide pass has work to do.
+struct Lowerer : coro::LowererBase {
+ SmallVector<CoroIdInst *, 4> CoroIds;
+ SmallVector<CoroBeginInst *, 1> CoroBegins;
+ SmallVector<CoroAllocInst *, 1> CoroAllocs;
+ SmallVector<CoroSubFnInst *, 4> ResumeAddr;
+ DenseMap<CoroBeginInst *, SmallVector<CoroSubFnInst *, 4>> DestroyAddr;
+ SmallVector<CoroFreeInst *, 1> CoroFrees;
+ SmallPtrSet<const SwitchInst *, 4> CoroSuspendSwitches;
+
+ Lowerer(Module &M) : LowererBase(M) {}
+
+ void elideHeapAllocations(Function *F, uint64_t FrameSize, Align FrameAlign,
+ AAResults &AA);
+ bool shouldElide(Function *F, DominatorTree &DT) const;
+ void collectPostSplitCoroIds(Function *F);
+ bool processCoroId(CoroIdInst *, AAResults &AA, DominatorTree &DT);
+ bool hasEscapePath(const CoroBeginInst *,
+ const SmallPtrSetImpl<BasicBlock *> &) const;
+};
+} // end anonymous namespace
+
+// Go through the list of coro.subfn.addr intrinsics and replace them with the
+// provided constant.
+static void replaceWithConstant(Constant *Value,
+ SmallVectorImpl<CoroSubFnInst *> &Users) {
+ if (Users.empty())
+ return;
+
+ // See if we need to bitcast the constant to match the type of the intrinsic
+ // being replaced. Note: All coro.subfn.addr intrinsics return the same type,
+ // so we only need to examine the type of the first one in the list.
+ Type *IntrTy = Users.front()->getType();
+ Type *ValueTy = Value->getType();
+ if (ValueTy != IntrTy) {
+ // May need to tweak the function type to match the type expected at the
+ // use site.
+ assert(ValueTy->isPointerTy() && IntrTy->isPointerTy());
+ Value = ConstantExpr::getBitCast(Value, IntrTy);
+ }
+
+ // Now the value type matches the type of the intrinsic. Replace them all!
+ for (CoroSubFnInst *I : Users)
+ replaceAndRecursivelySimplify(I, Value);
+}
+
+// See if any operand of the call instruction references the coroutine frame.
+static bool operandReferences(CallInst *CI, AllocaInst *Frame, AAResults &AA) {
+ for (Value *Op : CI->operand_values())
+ if (AA.alias(Op, Frame) != NoAlias)
+ return true;
+ return false;
+}
+
+// Look for any tail calls referencing the coroutine frame and remove tail
+// attribute from them, since now coroutine frame resides on the stack and tail
+// call implies that the function does not references anything on the stack.
+static void removeTailCallAttribute(AllocaInst *Frame, AAResults &AA) {
+ Function &F = *Frame->getFunction();
+ for (Instruction &I : instructions(F))
+ if (auto *Call = dyn_cast<CallInst>(&I))
+ if (Call->isTailCall() && operandReferences(Call, Frame, AA))
+ Call->setTailCall(false);
+}
+
+// Given a resume function @f.resume(%f.frame* %frame), returns the size
+// and expected alignment of %f.frame type.
+static std::pair<uint64_t, Align> getFrameLayout(Function *Resume) {
+ // Prefer to pull information from the function attributes.
+ auto Size = Resume->getParamDereferenceableBytes(0);
+ auto Align = Resume->getParamAlign(0);
+
+ // If those aren't given, extract them from the type.
+ if (Size == 0 || !Align) {
+ auto *FrameTy = Resume->arg_begin()->getType()->getPointerElementType();
+
+ const DataLayout &DL = Resume->getParent()->getDataLayout();
+ if (!Size) Size = DL.getTypeAllocSize(FrameTy);
+ if (!Align) Align = DL.getABITypeAlign(FrameTy);
+ }
+
+ return std::make_pair(Size, *Align);
+}
+
+// Finds first non alloca instruction in the entry block of a function.
+static Instruction *getFirstNonAllocaInTheEntryBlock(Function *F) {
+ for (Instruction &I : F->getEntryBlock())
+ if (!isa<AllocaInst>(&I))
+ return &I;
+ llvm_unreachable("no terminator in the entry block");
+}
+
+// To elide heap allocations we need to suppress code blocks guarded by
+// llvm.coro.alloc and llvm.coro.free instructions.
+void Lowerer::elideHeapAllocations(Function *F, uint64_t FrameSize,
+ Align FrameAlign, AAResults &AA) {
+ LLVMContext &C = F->getContext();
+ auto *InsertPt =
+ getFirstNonAllocaInTheEntryBlock(CoroIds.front()->getFunction());
+
+ // Replacing llvm.coro.alloc with false will suppress dynamic
+ // allocation as it is expected for the frontend to generate the code that
+ // looks like:
+ // id = coro.id(...)
+ // mem = coro.alloc(id) ? malloc(coro.size()) : 0;
+ // coro.begin(id, mem)
+ auto *False = ConstantInt::getFalse(C);
+ for (auto *CA : CoroAllocs) {
+ CA->replaceAllUsesWith(False);
+ CA->eraseFromParent();
+ }
+
+ // FIXME: Design how to transmit alignment information for every alloca that
+ // is spilled into the coroutine frame and recreate the alignment information
+ // here. Possibly we will need to do a mini SROA here and break the coroutine
+ // frame into individual AllocaInst recreating the original alignment.
+ const DataLayout &DL = F->getParent()->getDataLayout();
+ auto FrameTy = ArrayType::get(Type::getInt8Ty(C), FrameSize);
+ auto *Frame = new AllocaInst(FrameTy, DL.getAllocaAddrSpace(), "", InsertPt);
+ Frame->setAlignment(FrameAlign);
+ auto *FrameVoidPtr =
+ new BitCastInst(Frame, Type::getInt8PtrTy(C), "vFrame", InsertPt);
+
+ for (auto *CB : CoroBegins) {
+ CB->replaceAllUsesWith(FrameVoidPtr);
+ CB->eraseFromParent();
+ }
+
+ // Since now coroutine frame lives on the stack we need to make sure that
+ // any tail call referencing it, must be made non-tail call.
+ removeTailCallAttribute(Frame, AA);
+}
+
+bool Lowerer::hasEscapePath(const CoroBeginInst *CB,
+ const SmallPtrSetImpl<BasicBlock *> &TIs) const {
+ const auto &It = DestroyAddr.find(CB);
+ assert(It != DestroyAddr.end());
+
+ // Limit the number of blocks we visit.
+ unsigned Limit = 32 * (1 + It->second.size());
+
+ SmallVector<const BasicBlock *, 32> Worklist;
+ Worklist.push_back(CB->getParent());
+
+ SmallPtrSet<const BasicBlock *, 32> Visited;
+ // Consider basicblock of coro.destroy as visited one, so that we
+ // skip the path pass through coro.destroy.
+ for (auto *DA : It->second)
+ Visited.insert(DA->getParent());
+
+ do {
+ const auto *BB = Worklist.pop_back_val();
+ if (!Visited.insert(BB).second)
+ continue;
+ if (TIs.count(BB))
+ return true;
+
+ // Conservatively say that there is potentially a path.
+ if (!--Limit)
+ return true;
+
+ auto TI = BB->getTerminator();
+ // Although the default dest of coro.suspend switches is suspend pointer
+ // which means a escape path to normal terminator, it is reasonable to skip
+ // it since coroutine frame doesn't change outside the coroutine body.
+ if (isa<SwitchInst>(TI) &&
+ CoroSuspendSwitches.count(cast<SwitchInst>(TI))) {
+ Worklist.push_back(cast<SwitchInst>(TI)->getSuccessor(1));
+ Worklist.push_back(cast<SwitchInst>(TI)->getSuccessor(2));
+ } else
+ Worklist.append(succ_begin(BB), succ_end(BB));
+
+ } while (!Worklist.empty());
+
+ // We have exhausted all possible paths and are certain that coro.begin can
+ // not reach to any of terminators.
+ return false;
+}
+
+bool Lowerer::shouldElide(Function *F, DominatorTree &DT) const {
+ // If no CoroAllocs, we cannot suppress allocation, so elision is not
+ // possible.
+ if (CoroAllocs.empty())
+ return false;
+
+ // Check that for every coro.begin there is at least one coro.destroy directly
+ // referencing the SSA value of that coro.begin along each
+ // non-exceptional path.
+ // If the value escaped, then coro.destroy would have been referencing a
+ // memory location storing that value and not the virtual register.
+
+ SmallPtrSet<BasicBlock *, 8> Terminators;
+ // First gather all of the non-exceptional terminators for the function.
+ // Consider the final coro.suspend as the real terminator when the current
+ // function is a coroutine.
+ for (BasicBlock &B : *F) {
+ auto *TI = B.getTerminator();
+ if (TI->getNumSuccessors() == 0 && !TI->isExceptionalTerminator() &&
+ !isa<UnreachableInst>(TI))
+ Terminators.insert(&B);
+ }
+
+ // Filter out the coro.destroy that lie along exceptional paths.
+ SmallPtrSet<CoroBeginInst *, 8> ReferencedCoroBegins;
+ for (auto &It : DestroyAddr) {
+ for (Instruction *DA : It.second) {
+ for (BasicBlock *TI : Terminators) {
+ if (DT.dominates(DA, TI->getTerminator())) {
+ ReferencedCoroBegins.insert(It.first);
+ break;
+ }
+ }
+ }
+
+ // Whether there is any paths from coro.begin to Terminators which not pass
+ // through any of the coro.destroys.
+ if (!ReferencedCoroBegins.count(It.first) &&
+ !hasEscapePath(It.first, Terminators))
+ ReferencedCoroBegins.insert(It.first);
+ }
+
+ // If size of the set is the same as total number of coro.begin, that means we
+ // found a coro.free or coro.destroy referencing each coro.begin, so we can
+ // perform heap elision.
+ if (ReferencedCoroBegins.size() != CoroBegins.size())
+ return false;
+
+ // If any call in the function is a musttail call, it usually won't work
+ // because we cannot drop the tailcall attribute, and a tail call will reuse
+ // the entire stack where we are going to put the new frame. In theory a more
+ // precise analysis can be done to check whether the new frame aliases with
+ // the call, however it's challenging to do so before the elision actually
+ // happened.
+ for (BasicBlock &BB : *F)
+ if (BB.getTerminatingMustTailCall())
+ return false;
+
+ return true;
+}
+
+void Lowerer::collectPostSplitCoroIds(Function *F) {
+ CoroIds.clear();
+ CoroSuspendSwitches.clear();
+ for (auto &I : instructions(F)) {
+ if (auto *CII = dyn_cast<CoroIdInst>(&I))
+ if (CII->getInfo().isPostSplit())
+ // If it is the coroutine itself, don't touch it.
+ if (CII->getCoroutine() != CII->getFunction())
+ CoroIds.push_back(CII);
+
+ // Consider case like:
+ // %0 = call i8 @llvm.coro.suspend(...)
+ // switch i8 %0, label %suspend [i8 0, label %resume
+ // i8 1, label %cleanup]
+ // and collect the SwitchInsts which are used by escape analysis later.
+ if (auto *CSI = dyn_cast<CoroSuspendInst>(&I))
+ if (CSI->hasOneUse() && isa<SwitchInst>(CSI->use_begin()->getUser())) {
+ SwitchInst *SWI = cast<SwitchInst>(CSI->use_begin()->getUser());
+ if (SWI->getNumCases() == 2)
+ CoroSuspendSwitches.insert(SWI);
+ }
+ }
+}
+
+bool Lowerer::processCoroId(CoroIdInst *CoroId, AAResults &AA,
+ DominatorTree &DT) {
+ CoroBegins.clear();
+ CoroAllocs.clear();
+ CoroFrees.clear();
+ ResumeAddr.clear();
+ DestroyAddr.clear();
+
+ // Collect all coro.begin and coro.allocs associated with this coro.id.
+ for (User *U : CoroId->users()) {
+ if (auto *CB = dyn_cast<CoroBeginInst>(U))
+ CoroBegins.push_back(CB);
+ else if (auto *CA = dyn_cast<CoroAllocInst>(U))
+ CoroAllocs.push_back(CA);
+ else if (auto *CF = dyn_cast<CoroFreeInst>(U))
+ CoroFrees.push_back(CF);
+ }
+
+ // Collect all coro.subfn.addrs associated with coro.begin.
+ // Note, we only devirtualize the calls if their coro.subfn.addr refers to
+ // coro.begin directly. If we run into cases where this check is too
+ // conservative, we can consider relaxing the check.
+ for (CoroBeginInst *CB : CoroBegins) {
+ for (User *U : CB->users())
+ if (auto *II = dyn_cast<CoroSubFnInst>(U))
+ switch (II->getIndex()) {
+ case CoroSubFnInst::ResumeIndex:
+ ResumeAddr.push_back(II);
+ break;
+ case CoroSubFnInst::DestroyIndex:
+ DestroyAddr[CB].push_back(II);
+ break;
+ default:
+ llvm_unreachable("unexpected coro.subfn.addr constant");
+ }
+ }
+
+ // PostSplit coro.id refers to an array of subfunctions in its Info
+ // argument.
+ ConstantArray *Resumers = CoroId->getInfo().Resumers;
+ assert(Resumers && "PostSplit coro.id Info argument must refer to an array"
+ "of coroutine subfunctions");
+ auto *ResumeAddrConstant =
+ ConstantExpr::getExtractValue(Resumers, CoroSubFnInst::ResumeIndex);
+
+ replaceWithConstant(ResumeAddrConstant, ResumeAddr);
+
+ bool ShouldElide = shouldElide(CoroId->getFunction(), DT);
+
+ auto *DestroyAddrConstant = ConstantExpr::getExtractValue(
+ Resumers,
+ ShouldElide ? CoroSubFnInst::CleanupIndex : CoroSubFnInst::DestroyIndex);
+
+ for (auto &It : DestroyAddr)
+ replaceWithConstant(DestroyAddrConstant, It.second);
+
+ if (ShouldElide) {
+ auto FrameSizeAndAlign = getFrameLayout(cast<Function>(ResumeAddrConstant));
+ elideHeapAllocations(CoroId->getFunction(), FrameSizeAndAlign.first,
+ FrameSizeAndAlign.second, AA);
+ coro::replaceCoroFree(CoroId, /*Elide=*/true);
+ }
+
+ return true;
+}
+
+// See if there are any coro.subfn.addr instructions referring to coro.devirt
+// trigger, if so, replace them with a direct call to devirt trigger function.
+static bool replaceDevirtTrigger(Function &F) {
+ SmallVector<CoroSubFnInst *, 1> DevirtAddr;
+ for (auto &I : instructions(F))
+ if (auto *SubFn = dyn_cast<CoroSubFnInst>(&I))
+ if (SubFn->getIndex() == CoroSubFnInst::RestartTrigger)
+ DevirtAddr.push_back(SubFn);
+
+ if (DevirtAddr.empty())
+ return false;
+
+ Module &M = *F.getParent();
+ Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN);
+ assert(DevirtFn && "coro.devirt.fn not found");
+ replaceWithConstant(DevirtFn, DevirtAddr);
+
+ return true;
+}
+
+static bool declaresCoroElideIntrinsics(Module &M) {
+ return coro::declaresIntrinsics(M, {"llvm.coro.id", "llvm.coro.id.async"});
+}
+
+PreservedAnalyses CoroElidePass::run(Function &F, FunctionAnalysisManager &AM) {
+ auto &M = *F.getParent();
+ if (!declaresCoroElideIntrinsics(M))
+ return PreservedAnalyses::all();
+
+ Lowerer L(M);
+ L.CoroIds.clear();
+ L.collectPostSplitCoroIds(&F);
+ // If we did not find any coro.id, there is nothing to do.
+ if (L.CoroIds.empty())
+ return PreservedAnalyses::all();
+
+ AAResults &AA = AM.getResult<AAManager>(F);
+ DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
+
+ bool Changed = false;
+ for (auto *CII : L.CoroIds)
+ Changed |= L.processCoroId(CII, AA, DT);
+
+ return Changed ? PreservedAnalyses::none() : PreservedAnalyses::all();
+}
+
+namespace {
+struct CoroElideLegacy : FunctionPass {
+ static char ID;
+ CoroElideLegacy() : FunctionPass(ID) {
+ initializeCoroElideLegacyPass(*PassRegistry::getPassRegistry());
+ }
+
+ std::unique_ptr<Lowerer> L;
+
+ bool doInitialization(Module &M) override {
+ if (declaresCoroElideIntrinsics(M))
+ L = std::make_unique<Lowerer>(M);
+ return false;
+ }
+
+ bool runOnFunction(Function &F) override {
+ if (!L)
+ return false;
+
+ bool Changed = false;
+
+ if (F.hasFnAttribute(CORO_PRESPLIT_ATTR))
+ Changed = replaceDevirtTrigger(F);
+
+ L->CoroIds.clear();
+ L->collectPostSplitCoroIds(&F);
+ // If we did not find any coro.id, there is nothing to do.
+ if (L->CoroIds.empty())
+ return Changed;
+
+ AAResults &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
+ DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
+
+ for (auto *CII : L->CoroIds)
+ Changed |= L->processCoroId(CII, AA, DT);
+
+ return Changed;
+ }
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<AAResultsWrapperPass>();
+ AU.addRequired<DominatorTreeWrapperPass>();
+ }
+ StringRef getPassName() const override { return "Coroutine Elision"; }
+};
+}
+
+char CoroElideLegacy::ID = 0;
+INITIALIZE_PASS_BEGIN(
+ CoroElideLegacy, "coro-elide",
+ "Coroutine frame allocation elision and indirect calls replacement", false,
+ false)
+INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
+INITIALIZE_PASS_END(
+ CoroElideLegacy, "coro-elide",
+ "Coroutine frame allocation elision and indirect calls replacement", false,
+ false)
+
+Pass *llvm::createCoroElideLegacyPass() { return new CoroElideLegacy(); }
diff --git a/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroFrame.cpp b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroFrame.cpp
new file mode 100644
index 00000000000..e1e0d50979d
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -0,0 +1,2373 @@
+//===- CoroFrame.cpp - Builds and manipulates coroutine frame -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This file contains classes used to discover if for a particular value
+// there from sue to definition that crosses a suspend block.
+//
+// Using the information discovered we form a Coroutine Frame structure to
+// contain those values. All uses of those values are replaced with appropriate
+// GEP + load from the coroutine frame. At the point of the definition we spill
+// the value into the coroutine frame.
+//
+// TODO: pack values tightly using liveness info.
+//===----------------------------------------------------------------------===//
+
+#include "CoroInternal.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Analysis/PtrUseVisitor.h"
+#include "llvm/Analysis/StackLifetime.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/DIBuilder.h"
+#include "llvm/IR/Dominators.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/OptimizedStructLayout.h"
+#include "llvm/Support/circular_raw_ostream.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/PromoteMemToReg.h"
+#include <algorithm>
+
+using namespace llvm;
+
+// The "coro-suspend-crossing" flag is very noisy. There is another debug type,
+// "coro-frame", which results in leaner debug spew.
+#define DEBUG_TYPE "coro-suspend-crossing"
+
+static cl::opt<bool> EnableReuseStorageInFrame(
+ "reuse-storage-in-coroutine-frame", cl::Hidden,
+ cl::desc(
+ "Enable the optimization which would reuse the storage in the coroutine \
+ frame for allocas whose liferanges are not overlapped, for testing purposes"),
+ llvm::cl::init(false));
+
+enum { SmallVectorThreshold = 32 };
+
+// Provides two way mapping between the blocks and numbers.
+namespace {
+class BlockToIndexMapping {
+ SmallVector<BasicBlock *, SmallVectorThreshold> V;
+
+public:
+ size_t size() const { return V.size(); }
+
+ BlockToIndexMapping(Function &F) {
+ for (BasicBlock &BB : F)
+ V.push_back(&BB);
+ llvm::sort(V);
+ }
+
+ size_t blockToIndex(BasicBlock *BB) const {
+ auto *I = llvm::lower_bound(V, BB);
+ assert(I != V.end() && *I == BB && "BasicBlockNumberng: Unknown block");
+ return I - V.begin();
+ }
+
+ BasicBlock *indexToBlock(unsigned Index) const { return V[Index]; }
+};
+} // end anonymous namespace
+
+// The SuspendCrossingInfo maintains data that allows to answer a question
+// whether given two BasicBlocks A and B there is a path from A to B that
+// passes through a suspend point.
+//
+// For every basic block 'i' it maintains a BlockData that consists of:
+// Consumes: a bit vector which contains a set of indices of blocks that can
+// reach block 'i'
+// Kills: a bit vector which contains a set of indices of blocks that can
+// reach block 'i', but one of the path will cross a suspend point
+// Suspend: a boolean indicating whether block 'i' contains a suspend point.
+// End: a boolean indicating whether block 'i' contains a coro.end intrinsic.
+//
+namespace {
+struct SuspendCrossingInfo {
+ BlockToIndexMapping Mapping;
+
+ struct BlockData {
+ BitVector Consumes;
+ BitVector Kills;
+ bool Suspend = false;
+ bool End = false;
+ };
+ SmallVector<BlockData, SmallVectorThreshold> Block;
+
+ iterator_range<succ_iterator> successors(BlockData const &BD) const {
+ BasicBlock *BB = Mapping.indexToBlock(&BD - &Block[0]);
+ return llvm::successors(BB);
+ }
+
+ BlockData &getBlockData(BasicBlock *BB) {
+ return Block[Mapping.blockToIndex(BB)];
+ }
+
+ void dump() const;
+ void dump(StringRef Label, BitVector const &BV) const;
+
+ SuspendCrossingInfo(Function &F, coro::Shape &Shape);
+
+ bool hasPathCrossingSuspendPoint(BasicBlock *DefBB, BasicBlock *UseBB) const {
+ size_t const DefIndex = Mapping.blockToIndex(DefBB);
+ size_t const UseIndex = Mapping.blockToIndex(UseBB);
+
+ bool const Result = Block[UseIndex].Kills[DefIndex];
+ LLVM_DEBUG(dbgs() << UseBB->getName() << " => " << DefBB->getName()
+ << " answer is " << Result << "\n");
+ return Result;
+ }
+
+ bool isDefinitionAcrossSuspend(BasicBlock *DefBB, User *U) const {
+ auto *I = cast<Instruction>(U);
+
+ // We rewrote PHINodes, so that only the ones with exactly one incoming
+ // value need to be analyzed.
+ if (auto *PN = dyn_cast<PHINode>(I))
+ if (PN->getNumIncomingValues() > 1)
+ return false;
+
+ BasicBlock *UseBB = I->getParent();
+
+ // As a special case, treat uses by an llvm.coro.suspend.retcon or an
+ // llvm.coro.suspend.async as if they were uses in the suspend's single
+ // predecessor: the uses conceptually occur before the suspend.
+ if (isa<CoroSuspendRetconInst>(I) || isa<CoroSuspendAsyncInst>(I)) {
+ UseBB = UseBB->getSinglePredecessor();
+ assert(UseBB && "should have split coro.suspend into its own block");
+ }
+
+ return hasPathCrossingSuspendPoint(DefBB, UseBB);
+ }
+
+ bool isDefinitionAcrossSuspend(Argument &A, User *U) const {
+ return isDefinitionAcrossSuspend(&A.getParent()->getEntryBlock(), U);
+ }
+
+ bool isDefinitionAcrossSuspend(Instruction &I, User *U) const {
+ auto *DefBB = I.getParent();
+
+ // As a special case, treat values produced by an llvm.coro.suspend.*
+ // as if they were defined in the single successor: the uses
+ // conceptually occur after the suspend.
+ if (isa<AnyCoroSuspendInst>(I)) {
+ DefBB = DefBB->getSingleSuccessor();
+ assert(DefBB && "should have split coro.suspend into its own block");
+ }
+
+ return isDefinitionAcrossSuspend(DefBB, U);
+ }
+};
+} // end anonymous namespace
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void SuspendCrossingInfo::dump(StringRef Label,
+ BitVector const &BV) const {
+ dbgs() << Label << ":";
+ for (size_t I = 0, N = BV.size(); I < N; ++I)
+ if (BV[I])
+ dbgs() << " " << Mapping.indexToBlock(I)->getName();
+ dbgs() << "\n";
+}
+
+LLVM_DUMP_METHOD void SuspendCrossingInfo::dump() const {
+ for (size_t I = 0, N = Block.size(); I < N; ++I) {
+ BasicBlock *const B = Mapping.indexToBlock(I);
+ dbgs() << B->getName() << ":\n";
+ dump(" Consumes", Block[I].Consumes);
+ dump(" Kills", Block[I].Kills);
+ }
+ dbgs() << "\n";
+}
+#endif
+
+SuspendCrossingInfo::SuspendCrossingInfo(Function &F, coro::Shape &Shape)
+ : Mapping(F) {
+ const size_t N = Mapping.size();
+ Block.resize(N);
+
+ // Initialize every block so that it consumes itself
+ for (size_t I = 0; I < N; ++I) {
+ auto &B = Block[I];
+ B.Consumes.resize(N);
+ B.Kills.resize(N);
+ B.Consumes.set(I);
+ }
+
+ // Mark all CoroEnd Blocks. We do not propagate Kills beyond coro.ends as
+ // the code beyond coro.end is reachable during initial invocation of the
+ // coroutine.
+ for (auto *CE : Shape.CoroEnds)
+ getBlockData(CE->getParent()).End = true;
+
+ // Mark all suspend blocks and indicate that they kill everything they
+ // consume. Note, that crossing coro.save also requires a spill, as any code
+ // between coro.save and coro.suspend may resume the coroutine and all of the
+ // state needs to be saved by that time.
+ auto markSuspendBlock = [&](IntrinsicInst *BarrierInst) {
+ BasicBlock *SuspendBlock = BarrierInst->getParent();
+ auto &B = getBlockData(SuspendBlock);
+ B.Suspend = true;
+ B.Kills |= B.Consumes;
+ };
+ for (auto *CSI : Shape.CoroSuspends) {
+ markSuspendBlock(CSI);
+ if (auto *Save = CSI->getCoroSave())
+ markSuspendBlock(Save);
+ }
+
+ // Iterate propagating consumes and kills until they stop changing.
+ int Iteration = 0;
+ (void)Iteration;
+
+ bool Changed;
+ do {
+ LLVM_DEBUG(dbgs() << "iteration " << ++Iteration);
+ LLVM_DEBUG(dbgs() << "==============\n");
+
+ Changed = false;
+ for (size_t I = 0; I < N; ++I) {
+ auto &B = Block[I];
+ for (BasicBlock *SI : successors(B)) {
+
+ auto SuccNo = Mapping.blockToIndex(SI);
+
+ // Saved Consumes and Kills bitsets so that it is easy to see
+ // if anything changed after propagation.
+ auto &S = Block[SuccNo];
+ auto SavedConsumes = S.Consumes;
+ auto SavedKills = S.Kills;
+
+ // Propagate Kills and Consumes from block B into its successor S.
+ S.Consumes |= B.Consumes;
+ S.Kills |= B.Kills;
+
+ // If block B is a suspend block, it should propagate kills into the
+ // its successor for every block B consumes.
+ if (B.Suspend) {
+ S.Kills |= B.Consumes;
+ }
+ if (S.Suspend) {
+ // If block S is a suspend block, it should kill all of the blocks it
+ // consumes.
+ S.Kills |= S.Consumes;
+ } else if (S.End) {
+ // If block S is an end block, it should not propagate kills as the
+ // blocks following coro.end() are reached during initial invocation
+ // of the coroutine while all the data are still available on the
+ // stack or in the registers.
+ S.Kills.reset();
+ } else {
+ // This is reached when S block it not Suspend nor coro.end and it
+ // need to make sure that it is not in the kill set.
+ S.Kills.reset(SuccNo);
+ }
+
+ // See if anything changed.
+ Changed |= (S.Kills != SavedKills) || (S.Consumes != SavedConsumes);
+
+ if (S.Kills != SavedKills) {
+ LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI->getName()
+ << "\n");
+ LLVM_DEBUG(dump("S.Kills", S.Kills));
+ LLVM_DEBUG(dump("SavedKills", SavedKills));
+ }
+ if (S.Consumes != SavedConsumes) {
+ LLVM_DEBUG(dbgs() << "\nblock " << I << " follower " << SI << "\n");
+ LLVM_DEBUG(dump("S.Consume", S.Consumes));
+ LLVM_DEBUG(dump("SavedCons", SavedConsumes));
+ }
+ }
+ }
+ } while (Changed);
+ LLVM_DEBUG(dump());
+}
+
+#undef DEBUG_TYPE // "coro-suspend-crossing"
+#define DEBUG_TYPE "coro-frame"
+
+namespace {
+class FrameTypeBuilder;
+// Mapping from the to-be-spilled value to all the users that need reload.
+using SpillInfo = SmallMapVector<Value *, SmallVector<Instruction *, 2>, 8>;
+struct AllocaInfo {
+ AllocaInst *Alloca;
+ DenseMap<Instruction *, llvm::Optional<APInt>> Aliases;
+ bool MayWriteBeforeCoroBegin;
+ AllocaInfo(AllocaInst *Alloca,
+ DenseMap<Instruction *, llvm::Optional<APInt>> Aliases,
+ bool MayWriteBeforeCoroBegin)
+ : Alloca(Alloca), Aliases(std::move(Aliases)),
+ MayWriteBeforeCoroBegin(MayWriteBeforeCoroBegin) {}
+};
+struct FrameDataInfo {
+ // All the values (that are not allocas) that needs to be spilled to the
+ // frame.
+ SpillInfo Spills;
+ // Allocas contains all values defined as allocas that need to live in the
+ // frame.
+ SmallVector<AllocaInfo, 8> Allocas;
+
+ SmallVector<Value *, 8> getAllDefs() const {
+ SmallVector<Value *, 8> Defs;
+ for (const auto &P : Spills)
+ Defs.push_back(P.first);
+ for (const auto &A : Allocas)
+ Defs.push_back(A.Alloca);
+ return Defs;
+ }
+
+ uint32_t getFieldIndex(Value *V) const {
+ auto Itr = FieldIndexMap.find(V);
+ assert(Itr != FieldIndexMap.end() &&
+ "Value does not have a frame field index");
+ return Itr->second;
+ }
+
+ void setFieldIndex(Value *V, uint32_t Index) {
+ assert((LayoutIndexUpdateStarted || FieldIndexMap.count(V) == 0) &&
+ "Cannot set the index for the same field twice.");
+ FieldIndexMap[V] = Index;
+ }
+
+ // Remap the index of every field in the frame, using the final layout index.
+ void updateLayoutIndex(FrameTypeBuilder &B);
+
+private:
+ // LayoutIndexUpdateStarted is used to avoid updating the index of any field
+ // twice by mistake.
+ bool LayoutIndexUpdateStarted = false;
+ // Map from values to their slot indexes on the frame. They will be first set
+ // with their original insertion field index. After the frame is built, their
+ // indexes will be updated into the final layout index.
+ DenseMap<Value *, uint32_t> FieldIndexMap;
+};
+} // namespace
+
+#ifndef NDEBUG
+static void dumpSpills(StringRef Title, const SpillInfo &Spills) {
+ dbgs() << "------------- " << Title << "--------------\n";
+ for (const auto &E : Spills) {
+ E.first->dump();
+ dbgs() << " user: ";
+ for (auto *I : E.second)
+ I->dump();
+ }
+}
+
+static void dumpAllocas(const SmallVectorImpl<AllocaInfo> &Allocas) {
+ dbgs() << "------------- Allocas --------------\n";
+ for (const auto &A : Allocas) {
+ A.Alloca->dump();
+ }
+}
+#endif
+
+namespace {
+using FieldIDType = size_t;
+// We cannot rely solely on natural alignment of a type when building a
+// coroutine frame and if the alignment specified on the Alloca instruction
+// differs from the natural alignment of the alloca type we will need to insert
+// padding.
+class FrameTypeBuilder {
+private:
+ struct Field {
+ uint64_t Size;
+ uint64_t Offset;
+ Type *Ty;
+ FieldIDType LayoutFieldIndex;
+ Align Alignment;
+ Align TyAlignment;
+ };
+
+ const DataLayout &DL;
+ LLVMContext &Context;
+ uint64_t StructSize = 0;
+ Align StructAlign;
+ bool IsFinished = false;
+
+ SmallVector<Field, 8> Fields;
+ DenseMap<Value*, unsigned> FieldIndexByKey;
+
+public:
+ FrameTypeBuilder(LLVMContext &Context, DataLayout const &DL)
+ : DL(DL), Context(Context) {}
+
+ /// Add a field to this structure for the storage of an `alloca`
+ /// instruction.
+ LLVM_NODISCARD FieldIDType addFieldForAlloca(AllocaInst *AI,
+ bool IsHeader = false) {
+ Type *Ty = AI->getAllocatedType();
+
+ // Make an array type if this is a static array allocation.
+ if (AI->isArrayAllocation()) {
+ if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
+ Ty = ArrayType::get(Ty, CI->getValue().getZExtValue());
+ else
+ report_fatal_error("Coroutines cannot handle non static allocas yet");
+ }
+
+ return addField(Ty, AI->getAlign(), IsHeader);
+ }
+
+ /// We want to put the allocas whose lifetime-ranges are not overlapped
+ /// into one slot of coroutine frame.
+ /// Consider the example at:https://bugs.llvm.org/show_bug.cgi?id=45566
+ ///
+ /// cppcoro::task<void> alternative_paths(bool cond) {
+ /// if (cond) {
+ /// big_structure a;
+ /// process(a);
+ /// co_await something();
+ /// } else {
+ /// big_structure b;
+ /// process2(b);
+ /// co_await something();
+ /// }
+ /// }
+ ///
+ /// We want to put variable a and variable b in the same slot to
+ /// reduce the size of coroutine frame.
+ ///
+ /// This function use StackLifetime algorithm to partition the AllocaInsts in
+ /// Spills to non-overlapped sets in order to put Alloca in the same
+ /// non-overlapped set into the same slot in the Coroutine Frame. Then add
+ /// field for the allocas in the same non-overlapped set by using the largest
+ /// type as the field type.
+ ///
+ /// Side Effects: Because We sort the allocas, the order of allocas in the
+ /// frame may be different with the order in the source code.
+ void addFieldForAllocas(const Function &F, FrameDataInfo &FrameData,
+ coro::Shape &Shape);
+
+ /// Add a field to this structure.
+ LLVM_NODISCARD FieldIDType addField(Type *Ty, MaybeAlign FieldAlignment,
+ bool IsHeader = false) {
+ assert(!IsFinished && "adding fields to a finished builder");
+ assert(Ty && "must provide a type for a field");
+
+ // The field size is always the alloc size of the type.
+ uint64_t FieldSize = DL.getTypeAllocSize(Ty);
+
+ // The field alignment might not be the type alignment, but we need
+ // to remember the type alignment anyway to build the type.
+ Align TyAlignment = DL.getABITypeAlign(Ty);
+ if (!FieldAlignment) FieldAlignment = TyAlignment;
+
+ // Lay out header fields immediately.
+ uint64_t Offset;
+ if (IsHeader) {
+ Offset = alignTo(StructSize, FieldAlignment);
+ StructSize = Offset + FieldSize;
+
+ // Everything else has a flexible offset.
+ } else {
+ Offset = OptimizedStructLayoutField::FlexibleOffset;
+ }
+
+ Fields.push_back({FieldSize, Offset, Ty, 0, *FieldAlignment, TyAlignment});
+ return Fields.size() - 1;
+ }
+
+ /// Finish the layout and set the body on the given type.
+ void finish(StructType *Ty);
+
+ uint64_t getStructSize() const {
+ assert(IsFinished && "not yet finished!");
+ return StructSize;
+ }
+
+ Align getStructAlign() const {
+ assert(IsFinished && "not yet finished!");
+ return StructAlign;
+ }
+
+ FieldIDType getLayoutFieldIndex(FieldIDType Id) const {
+ assert(IsFinished && "not yet finished!");
+ return Fields[Id].LayoutFieldIndex;
+ }
+};
+} // namespace
+
+void FrameDataInfo::updateLayoutIndex(FrameTypeBuilder &B) {
+ auto Updater = [&](Value *I) {
+ setFieldIndex(I, B.getLayoutFieldIndex(getFieldIndex(I)));
+ };
+ LayoutIndexUpdateStarted = true;
+ for (auto &S : Spills)
+ Updater(S.first);
+ for (const auto &A : Allocas)
+ Updater(A.Alloca);
+ LayoutIndexUpdateStarted = false;
+}
+
+void FrameTypeBuilder::addFieldForAllocas(const Function &F,
+ FrameDataInfo &FrameData,
+ coro::Shape &Shape) {
+ DenseMap<AllocaInst *, unsigned int> AllocaIndex;
+ using AllocaSetType = SmallVector<AllocaInst *, 4>;
+ SmallVector<AllocaSetType, 4> NonOverlapedAllocas;
+
+ // We need to add field for allocas at the end of this function. However, this
+ // function has multiple exits, so we use this helper to avoid redundant code.
+ struct RTTIHelper {
+ std::function<void()> func;
+ RTTIHelper(std::function<void()> &&func) : func(func) {}
+ ~RTTIHelper() { func(); }
+ } Helper([&]() {
+ for (auto AllocaList : NonOverlapedAllocas) {
+ auto *LargestAI = *AllocaList.begin();
+ FieldIDType Id = addFieldForAlloca(LargestAI);
+ for (auto *Alloca : AllocaList)
+ FrameData.setFieldIndex(Alloca, Id);
+ }
+ });
+
+ if (!Shape.ReuseFrameSlot && !EnableReuseStorageInFrame) {
+ for (const auto &A : FrameData.Allocas) {
+ AllocaInst *Alloca = A.Alloca;
+ AllocaIndex[Alloca] = NonOverlapedAllocas.size();
+ NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
+ }
+ return;
+ }
+
+ // Because there are pathes from the lifetime.start to coro.end
+ // for each alloca, the liferanges for every alloca is overlaped
+ // in the blocks who contain coro.end and the successor blocks.
+ // So we choose to skip there blocks when we calculates the liferange
+ // for each alloca. It should be reasonable since there shouldn't be uses
+ // in these blocks and the coroutine frame shouldn't be used outside the
+ // coroutine body.
+ //
+ // Note that the user of coro.suspend may not be SwitchInst. However, this
+ // case seems too complex to handle. And it is harmless to skip these
+ // patterns since it just prevend putting the allocas to live in the same
+ // slot.
+ DenseMap<SwitchInst *, BasicBlock *> DefaultSuspendDest;
+ for (auto CoroSuspendInst : Shape.CoroSuspends) {
+ for (auto U : CoroSuspendInst->users()) {
+ if (auto *ConstSWI = dyn_cast<SwitchInst>(U)) {
+ auto *SWI = const_cast<SwitchInst *>(ConstSWI);
+ DefaultSuspendDest[SWI] = SWI->getDefaultDest();
+ SWI->setDefaultDest(SWI->getSuccessor(1));
+ }
+ }
+ }
+
+ auto ExtractAllocas = [&]() {
+ AllocaSetType Allocas;
+ Allocas.reserve(FrameData.Allocas.size());
+ for (const auto &A : FrameData.Allocas)
+ Allocas.push_back(A.Alloca);
+ return Allocas;
+ };
+ StackLifetime StackLifetimeAnalyzer(F, ExtractAllocas(),
+ StackLifetime::LivenessType::May);
+ StackLifetimeAnalyzer.run();
+ auto IsAllocaInferenre = [&](const AllocaInst *AI1, const AllocaInst *AI2) {
+ return StackLifetimeAnalyzer.getLiveRange(AI1).overlaps(
+ StackLifetimeAnalyzer.getLiveRange(AI2));
+ };
+ auto GetAllocaSize = [&](const AllocaInfo &A) {
+ Optional<TypeSize> RetSize = A.Alloca->getAllocationSizeInBits(DL);
+ assert(RetSize && "Variable Length Arrays (VLA) are not supported.\n");
+ assert(!RetSize->isScalable() && "Scalable vectors are not yet supported");
+ return RetSize->getFixedSize();
+ };
+ // Put larger allocas in the front. So the larger allocas have higher
+ // priority to merge, which can save more space potentially. Also each
+ // AllocaSet would be ordered. So we can get the largest Alloca in one
+ // AllocaSet easily.
+ sort(FrameData.Allocas, [&](const auto &Iter1, const auto &Iter2) {
+ return GetAllocaSize(Iter1) > GetAllocaSize(Iter2);
+ });
+ for (const auto &A : FrameData.Allocas) {
+ AllocaInst *Alloca = A.Alloca;
+ bool Merged = false;
+ // Try to find if the Alloca is not inferenced with any existing
+ // NonOverlappedAllocaSet. If it is true, insert the alloca to that
+ // NonOverlappedAllocaSet.
+ for (auto &AllocaSet : NonOverlapedAllocas) {
+ assert(!AllocaSet.empty() && "Processing Alloca Set is not empty.\n");
+ bool NoInference = none_of(AllocaSet, [&](auto Iter) {
+ return IsAllocaInferenre(Alloca, Iter);
+ });
+ // If the alignment of A is multiple of the alignment of B, the address
+ // of A should satisfy the requirement for aligning for B.
+ //
+ // There may be other more fine-grained strategies to handle the alignment
+ // infomation during the merging process. But it seems hard to handle
+ // these strategies and benefit little.
+ bool Alignable = [&]() -> bool {
+ auto *LargestAlloca = *AllocaSet.begin();
+ return LargestAlloca->getAlign().value() % Alloca->getAlign().value() ==
+ 0;
+ }();
+ bool CouldMerge = NoInference && Alignable;
+ if (!CouldMerge)
+ continue;
+ AllocaIndex[Alloca] = AllocaIndex[*AllocaSet.begin()];
+ AllocaSet.push_back(Alloca);
+ Merged = true;
+ break;
+ }
+ if (!Merged) {
+ AllocaIndex[Alloca] = NonOverlapedAllocas.size();
+ NonOverlapedAllocas.emplace_back(AllocaSetType(1, Alloca));
+ }
+ }
+ // Recover the default target destination for each Switch statement
+ // reserved.
+ for (auto SwitchAndDefaultDest : DefaultSuspendDest) {
+ SwitchInst *SWI = SwitchAndDefaultDest.first;
+ BasicBlock *DestBB = SwitchAndDefaultDest.second;
+ SWI->setDefaultDest(DestBB);
+ }
+ // This Debug Info could tell us which allocas are merged into one slot.
+ LLVM_DEBUG(for (auto &AllocaSet
+ : NonOverlapedAllocas) {
+ if (AllocaSet.size() > 1) {
+ dbgs() << "In Function:" << F.getName() << "\n";
+ dbgs() << "Find Union Set "
+ << "\n";
+ dbgs() << "\tAllocas are \n";
+ for (auto Alloca : AllocaSet)
+ dbgs() << "\t\t" << *Alloca << "\n";
+ }
+ });
+}
+
+void FrameTypeBuilder::finish(StructType *Ty) {
+ assert(!IsFinished && "already finished!");
+
+ // Prepare the optimal-layout field array.
+ // The Id in the layout field is a pointer to our Field for it.
+ SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
+ LayoutFields.reserve(Fields.size());
+ for (auto &Field : Fields) {
+ LayoutFields.emplace_back(&Field, Field.Size, Field.Alignment,
+ Field.Offset);
+ }
+
+ // Perform layout.
+ auto SizeAndAlign = performOptimizedStructLayout(LayoutFields);
+ StructSize = SizeAndAlign.first;
+ StructAlign = SizeAndAlign.second;
+
+ auto getField = [](const OptimizedStructLayoutField &LayoutField) -> Field & {
+ return *static_cast<Field *>(const_cast<void*>(LayoutField.Id));
+ };
+
+ // We need to produce a packed struct type if there's a field whose
+ // assigned offset isn't a multiple of its natural type alignment.
+ bool Packed = [&] {
+ for (auto &LayoutField : LayoutFields) {
+ auto &F = getField(LayoutField);
+ if (!isAligned(F.TyAlignment, LayoutField.Offset))
+ return true;
+ }
+ return false;
+ }();
+
+ // Build the struct body.
+ SmallVector<Type*, 16> FieldTypes;
+ FieldTypes.reserve(LayoutFields.size() * 3 / 2);
+ uint64_t LastOffset = 0;
+ for (auto &LayoutField : LayoutFields) {
+ auto &F = getField(LayoutField);
+
+ auto Offset = LayoutField.Offset;
+
+ // Add a padding field if there's a padding gap and we're either
+ // building a packed struct or the padding gap is more than we'd
+ // get from aligning to the field type's natural alignment.
+ assert(Offset >= LastOffset);
+ if (Offset != LastOffset) {
+ if (Packed || alignTo(LastOffset, F.TyAlignment) != Offset)
+ FieldTypes.push_back(ArrayType::get(Type::getInt8Ty(Context),
+ Offset - LastOffset));
+ }
+
+ F.Offset = Offset;
+ F.LayoutFieldIndex = FieldTypes.size();
+
+ FieldTypes.push_back(F.Ty);
+ LastOffset = Offset + F.Size;
+ }
+
+ Ty->setBody(FieldTypes, Packed);
+
+#ifndef NDEBUG
+ // Check that the IR layout matches the offsets we expect.
+ auto Layout = DL.getStructLayout(Ty);
+ for (auto &F : Fields) {
+ assert(Ty->getElementType(F.LayoutFieldIndex) == F.Ty);
+ assert(Layout->getElementOffset(F.LayoutFieldIndex) == F.Offset);
+ }
+#endif
+
+ IsFinished = true;
+}
+
+// Build a struct that will keep state for an active coroutine.
+// struct f.frame {
+// ResumeFnTy ResumeFnAddr;
+// ResumeFnTy DestroyFnAddr;
+// int ResumeIndex;
+// ... promise (if present) ...
+// ... spills ...
+// };
+static StructType *buildFrameType(Function &F, coro::Shape &Shape,
+ FrameDataInfo &FrameData) {
+ LLVMContext &C = F.getContext();
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ StructType *FrameTy = [&] {
+ SmallString<32> Name(F.getName());
+ Name.append(".Frame");
+ return StructType::create(C, Name);
+ }();
+
+ FrameTypeBuilder B(C, DL);
+
+ AllocaInst *PromiseAlloca = Shape.getPromiseAlloca();
+ Optional<FieldIDType> SwitchIndexFieldId;
+
+ if (Shape.ABI == coro::ABI::Switch) {
+ auto *FramePtrTy = FrameTy->getPointerTo();
+ auto *FnTy = FunctionType::get(Type::getVoidTy(C), FramePtrTy,
+ /*IsVarArg=*/false);
+ auto *FnPtrTy = FnTy->getPointerTo();
+
+ // Add header fields for the resume and destroy functions.
+ // We can rely on these being perfectly packed.
+ (void)B.addField(FnPtrTy, None, /*header*/ true);
+ (void)B.addField(FnPtrTy, None, /*header*/ true);
+
+ // PromiseAlloca field needs to be explicitly added here because it's
+ // a header field with a fixed offset based on its alignment. Hence it
+ // needs special handling and cannot be added to FrameData.Allocas.
+ if (PromiseAlloca)
+ FrameData.setFieldIndex(
+ PromiseAlloca, B.addFieldForAlloca(PromiseAlloca, /*header*/ true));
+
+ // Add a field to store the suspend index. This doesn't need to
+ // be in the header.
+ unsigned IndexBits = std::max(1U, Log2_64_Ceil(Shape.CoroSuspends.size()));
+ Type *IndexType = Type::getIntNTy(C, IndexBits);
+
+ SwitchIndexFieldId = B.addField(IndexType, None);
+ } else {
+ assert(PromiseAlloca == nullptr && "lowering doesn't support promises");
+ }
+
+ // Because multiple allocas may own the same field slot,
+ // we add allocas to field here.
+ B.addFieldForAllocas(F, FrameData, Shape);
+ // Add PromiseAlloca to Allocas list so that
+ // 1. updateLayoutIndex could update its index after
+ // `performOptimizedStructLayout`
+ // 2. it is processed in insertSpills.
+ if (Shape.ABI == coro::ABI::Switch && PromiseAlloca)
+ // We assume that the promise alloca won't be modified before
+ // CoroBegin and no alias will be create before CoroBegin.
+ FrameData.Allocas.emplace_back(
+ PromiseAlloca, DenseMap<Instruction *, llvm::Optional<APInt>>{}, false);
+ // Create an entry for every spilled value.
+ for (auto &S : FrameData.Spills) {
+ Type *FieldType = S.first->getType();
+ // For byval arguments, we need to store the pointed value in the frame,
+ // instead of the pointer itself.
+ if (const Argument *A = dyn_cast<Argument>(S.first))
+ if (A->hasByValAttr())
+ FieldType = FieldType->getPointerElementType();
+ FieldIDType Id = B.addField(FieldType, None);
+ FrameData.setFieldIndex(S.first, Id);
+ }
+
+ B.finish(FrameTy);
+ FrameData.updateLayoutIndex(B);
+ Shape.FrameAlign = B.getStructAlign();
+ Shape.FrameSize = B.getStructSize();
+
+ switch (Shape.ABI) {
+ case coro::ABI::Switch:
+ // In the switch ABI, remember the switch-index field.
+ Shape.SwitchLowering.IndexField =
+ B.getLayoutFieldIndex(*SwitchIndexFieldId);
+
+ // Also round the frame size up to a multiple of its alignment, as is
+ // generally expected in C/C++.
+ Shape.FrameSize = alignTo(Shape.FrameSize, Shape.FrameAlign);
+ break;
+
+ // In the retcon ABI, remember whether the frame is inline in the storage.
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce: {
+ auto Id = Shape.getRetconCoroId();
+ Shape.RetconLowering.IsFrameInlineInStorage
+ = (B.getStructSize() <= Id->getStorageSize() &&
+ B.getStructAlign() <= Id->getStorageAlignment());
+ break;
+ }
+ case coro::ABI::Async: {
+ Shape.AsyncLowering.FrameOffset =
+ alignTo(Shape.AsyncLowering.ContextHeaderSize, Shape.FrameAlign);
+ // Also make the final context size a multiple of the context alignment to
+ // make allocation easier for allocators.
+ Shape.AsyncLowering.ContextSize =
+ alignTo(Shape.AsyncLowering.FrameOffset + Shape.FrameSize,
+ Shape.AsyncLowering.getContextAlignment());
+ if (Shape.AsyncLowering.getContextAlignment() < Shape.FrameAlign) {
+ report_fatal_error(
+ "The alignment requirment of frame variables cannot be higher than "
+ "the alignment of the async function context");
+ }
+ break;
+ }
+ }
+
+ return FrameTy;
+}
+
+// We use a pointer use visitor to track how an alloca is being used.
+// The goal is to be able to answer the following three questions:
+// 1. Should this alloca be allocated on the frame instead.
+// 2. Could the content of the alloca be modified prior to CoroBegn, which would
+// require copying the data from alloca to the frame after CoroBegin.
+// 3. Is there any alias created for this alloca prior to CoroBegin, but used
+// after CoroBegin. In that case, we will need to recreate the alias after
+// CoroBegin based off the frame. To answer question 1, we track two things:
+// a. List of all BasicBlocks that use this alloca or any of the aliases of
+// the alloca. In the end, we check if there exists any two basic blocks that
+// cross suspension points. If so, this alloca must be put on the frame. b.
+// Whether the alloca or any alias of the alloca is escaped at some point,
+// either by storing the address somewhere, or the address is used in a
+// function call that might capture. If it's ever escaped, this alloca must be
+// put on the frame conservatively.
+// To answer quetion 2, we track through the variable MayWriteBeforeCoroBegin.
+// Whenever a potential write happens, either through a store instruction, a
+// function call or any of the memory intrinsics, we check whether this
+// instruction is prior to CoroBegin. To answer question 3, we track the offsets
+// of all aliases created for the alloca prior to CoroBegin but used after
+// CoroBegin. llvm::Optional is used to be able to represent the case when the
+// offset is unknown (e.g. when you have a PHINode that takes in different
+// offset values). We cannot handle unknown offsets and will assert. This is the
+// potential issue left out. An ideal solution would likely require a
+// significant redesign.
+namespace {
+struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
+ using Base = PtrUseVisitor<AllocaUseVisitor>;
+ AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
+ const CoroBeginInst &CB, const SuspendCrossingInfo &Checker)
+ : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker) {}
+
+ void visit(Instruction &I) {
+ UserBBs.insert(I.getParent());
+ Base::visit(I);
+ // If the pointer is escaped prior to CoroBegin, we have to assume it would
+ // be written into before CoroBegin as well.
+ if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
+ MayWriteBeforeCoroBegin = true;
+ }
+ }
+ // We need to provide this overload as PtrUseVisitor uses a pointer based
+ // visiting function.
+ void visit(Instruction *I) { return visit(*I); }
+
+ void visitPHINode(PHINode &I) {
+ enqueueUsers(I);
+ handleAlias(I);
+ }
+
+ void visitSelectInst(SelectInst &I) {
+ enqueueUsers(I);
+ handleAlias(I);
+ }
+
+ void visitStoreInst(StoreInst &SI) {
+ // Regardless whether the alias of the alloca is the value operand or the
+ // pointer operand, we need to assume the alloca is been written.
+ handleMayWrite(SI);
+
+ if (SI.getValueOperand() != U->get())
+ return;
+
+ // We are storing the pointer into a memory location, potentially escaping.
+ // As an optimization, we try to detect simple cases where it doesn't
+ // actually escape, for example:
+ // %ptr = alloca ..
+ // %addr = alloca ..
+ // store %ptr, %addr
+ // %x = load %addr
+ // ..
+ // If %addr is only used by loading from it, we could simply treat %x as
+ // another alias of %ptr, and not considering %ptr being escaped.
+ auto IsSimpleStoreThenLoad = [&]() {
+ auto *AI = dyn_cast<AllocaInst>(SI.getPointerOperand());
+ // If the memory location we are storing to is not an alloca, it
+ // could be an alias of some other memory locations, which is difficult
+ // to analyze.
+ if (!AI)
+ return false;
+ // StoreAliases contains aliases of the memory location stored into.
+ SmallVector<Instruction *, 4> StoreAliases = {AI};
+ while (!StoreAliases.empty()) {
+ Instruction *I = StoreAliases.pop_back_val();
+ for (User *U : I->users()) {
+ // If we are loading from the memory location, we are creating an
+ // alias of the original pointer.
+ if (auto *LI = dyn_cast<LoadInst>(U)) {
+ enqueueUsers(*LI);
+ handleAlias(*LI);
+ continue;
+ }
+ // If we are overriding the memory location, the pointer certainly
+ // won't escape.
+ if (auto *S = dyn_cast<StoreInst>(U))
+ if (S->getPointerOperand() == I)
+ continue;
+ if (auto *II = dyn_cast<IntrinsicInst>(U))
+ if (II->isLifetimeStartOrEnd())
+ continue;
+ // BitCastInst creats aliases of the memory location being stored
+ // into.
+ if (auto *BI = dyn_cast<BitCastInst>(U)) {
+ StoreAliases.push_back(BI);
+ continue;
+ }
+ return false;
+ }
+ }
+
+ return true;
+ };
+
+ if (!IsSimpleStoreThenLoad())
+ PI.setEscaped(&SI);
+ }
+
+ // All mem intrinsics modify the data.
+ void visitMemIntrinsic(MemIntrinsic &MI) { handleMayWrite(MI); }
+
+ void visitBitCastInst(BitCastInst &BC) {
+ Base::visitBitCastInst(BC);
+ handleAlias(BC);
+ }
+
+ void visitAddrSpaceCastInst(AddrSpaceCastInst &ASC) {
+ Base::visitAddrSpaceCastInst(ASC);
+ handleAlias(ASC);
+ }
+
+ void visitGetElementPtrInst(GetElementPtrInst &GEPI) {
+ // The base visitor will adjust Offset accordingly.
+ Base::visitGetElementPtrInst(GEPI);
+ handleAlias(GEPI);
+ }
+
+ void visitCallBase(CallBase &CB) {
+ for (unsigned Op = 0, OpCount = CB.getNumArgOperands(); Op < OpCount; ++Op)
+ if (U->get() == CB.getArgOperand(Op) && !CB.doesNotCapture(Op))
+ PI.setEscaped(&CB);
+ handleMayWrite(CB);
+ }
+
+ bool getShouldLiveOnFrame() const {
+ if (!ShouldLiveOnFrame)
+ ShouldLiveOnFrame = computeShouldLiveOnFrame();
+ return ShouldLiveOnFrame.getValue();
+ }
+
+ bool getMayWriteBeforeCoroBegin() const { return MayWriteBeforeCoroBegin; }
+
+ DenseMap<Instruction *, llvm::Optional<APInt>> getAliasesCopy() const {
+ assert(getShouldLiveOnFrame() && "This method should only be called if the "
+ "alloca needs to live on the frame.");
+ for (const auto &P : AliasOffetMap)
+ if (!P.second)
+ report_fatal_error("Unable to handle an alias with unknown offset "
+ "created before CoroBegin.");
+ return AliasOffetMap;
+ }
+
+private:
+ const DominatorTree &DT;
+ const CoroBeginInst &CoroBegin;
+ const SuspendCrossingInfo &Checker;
+ // All alias to the original AllocaInst, created before CoroBegin and used
+ // after CoroBegin. Each entry contains the instruction and the offset in the
+ // original Alloca. They need to be recreated after CoroBegin off the frame.
+ DenseMap<Instruction *, llvm::Optional<APInt>> AliasOffetMap{};
+ SmallPtrSet<BasicBlock *, 2> UserBBs{};
+ bool MayWriteBeforeCoroBegin{false};
+
+ mutable llvm::Optional<bool> ShouldLiveOnFrame{};
+
+ bool computeShouldLiveOnFrame() const {
+ if (PI.isEscaped())
+ return true;
+
+ for (auto *BB1 : UserBBs)
+ for (auto *BB2 : UserBBs)
+ if (Checker.hasPathCrossingSuspendPoint(BB1, BB2))
+ return true;
+
+ return false;
+ }
+
+ void handleMayWrite(const Instruction &I) {
+ if (!DT.dominates(&CoroBegin, &I))
+ MayWriteBeforeCoroBegin = true;
+ }
+
+ bool usedAfterCoroBegin(Instruction &I) {
+ for (auto &U : I.uses())
+ if (DT.dominates(&CoroBegin, U))
+ return true;
+ return false;
+ }
+
+ void handleAlias(Instruction &I) {
+ // We track all aliases created prior to CoroBegin but used after.
+ // These aliases may need to be recreated after CoroBegin if the alloca
+ // need to live on the frame.
+ if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
+ return;
+
+ if (!IsOffsetKnown) {
+ AliasOffetMap[&I].reset();
+ } else {
+ auto Itr = AliasOffetMap.find(&I);
+ if (Itr == AliasOffetMap.end()) {
+ AliasOffetMap[&I] = Offset;
+ } else if (Itr->second.hasValue() && Itr->second.getValue() != Offset) {
+ // If we have seen two different possible values for this alias, we set
+ // it to empty.
+ AliasOffetMap[&I].reset();
+ }
+ }
+ }
+};
+} // namespace
+
+// We need to make room to insert a spill after initial PHIs, but before
+// catchswitch instruction. Placing it before violates the requirement that
+// catchswitch, like all other EHPads must be the first nonPHI in a block.
+//
+// Split away catchswitch into a separate block and insert in its place:
+//
+// cleanuppad <InsertPt> cleanupret.
+//
+// cleanupret instruction will act as an insert point for the spill.
+static Instruction *splitBeforeCatchSwitch(CatchSwitchInst *CatchSwitch) {
+ BasicBlock *CurrentBlock = CatchSwitch->getParent();
+ BasicBlock *NewBlock = CurrentBlock->splitBasicBlock(CatchSwitch);
+ CurrentBlock->getTerminator()->eraseFromParent();
+
+ auto *CleanupPad =
+ CleanupPadInst::Create(CatchSwitch->getParentPad(), {}, "", CurrentBlock);
+ auto *CleanupRet =
+ CleanupReturnInst::Create(CleanupPad, NewBlock, CurrentBlock);
+ return CleanupRet;
+}
+
+// Replace all alloca and SSA values that are accessed across suspend points
+// with GetElementPointer from coroutine frame + loads and stores. Create an
+// AllocaSpillBB that will become the new entry block for the resume parts of
+// the coroutine:
+//
+// %hdl = coro.begin(...)
+// whatever
+//
+// becomes:
+//
+// %hdl = coro.begin(...)
+// %FramePtr = bitcast i8* hdl to %f.frame*
+// br label %AllocaSpillBB
+//
+// AllocaSpillBB:
+// ; geps corresponding to allocas that were moved to coroutine frame
+// br label PostSpill
+//
+// PostSpill:
+// whatever
+//
+//
+static Instruction *insertSpills(const FrameDataInfo &FrameData,
+ coro::Shape &Shape) {
+ auto *CB = Shape.CoroBegin;
+ LLVMContext &C = CB->getContext();
+ IRBuilder<> Builder(CB->getNextNode());
+ StructType *FrameTy = Shape.FrameTy;
+ PointerType *FramePtrTy = FrameTy->getPointerTo();
+ auto *FramePtr =
+ cast<Instruction>(Builder.CreateBitCast(CB, FramePtrTy, "FramePtr"));
+ DominatorTree DT(*CB->getFunction());
+ SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
+
+ // Create a GEP with the given index into the coroutine frame for the original
+ // value Orig. Appends an extra 0 index for array-allocas, preserving the
+ // original type.
+ auto GetFramePointer = [&](Value *Orig) -> Value * {
+ FieldIDType Index = FrameData.getFieldIndex(Orig);
+ SmallVector<Value *, 3> Indices = {
+ ConstantInt::get(Type::getInt32Ty(C), 0),
+ ConstantInt::get(Type::getInt32Ty(C), Index),
+ };
+
+ if (auto *AI = dyn_cast<AllocaInst>(Orig)) {
+ if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize())) {
+ auto Count = CI->getValue().getZExtValue();
+ if (Count > 1) {
+ Indices.push_back(ConstantInt::get(Type::getInt32Ty(C), 0));
+ }
+ } else {
+ report_fatal_error("Coroutines cannot handle non static allocas yet");
+ }
+ }
+
+ auto GEP = cast<GetElementPtrInst>(
+ Builder.CreateInBoundsGEP(FrameTy, FramePtr, Indices));
+ if (isa<AllocaInst>(Orig)) {
+ // If the type of GEP is not equal to the type of AllocaInst, it implies
+ // that the AllocaInst may be reused in the Frame slot of other
+ // AllocaInst. So We cast GEP to the AllocaInst here to re-use
+ // the Frame storage.
+ //
+ // Note: If we change the strategy dealing with alignment, we need to refine
+ // this casting.
+ if (GEP->getResultElementType() != Orig->getType())
+ return Builder.CreateBitCast(GEP, Orig->getType(),
+ Orig->getName() + Twine(".cast"));
+ }
+ return GEP;
+ };
+
+ for (auto const &E : FrameData.Spills) {
+ Value *Def = E.first;
+ // Create a store instruction storing the value into the
+ // coroutine frame.
+ Instruction *InsertPt = nullptr;
+ bool NeedToCopyArgPtrValue = false;
+ if (auto *Arg = dyn_cast<Argument>(Def)) {
+ // For arguments, we will place the store instruction right after
+ // the coroutine frame pointer instruction, i.e. bitcast of
+ // coro.begin from i8* to %f.frame*.
+ InsertPt = FramePtr->getNextNode();
+
+ // If we're spilling an Argument, make sure we clear 'nocapture'
+ // from the coroutine function.
+ Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
+
+ if (Arg->hasByValAttr())
+ NeedToCopyArgPtrValue = true;
+
+ } else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
+ // Don't spill immediately after a suspend; splitting assumes
+ // that the suspend will be followed by a branch.
+ InsertPt = CSI->getParent()->getSingleSuccessor()->getFirstNonPHI();
+ } else {
+ auto *I = cast<Instruction>(Def);
+ if (!DT.dominates(CB, I)) {
+ // If it is not dominated by CoroBegin, then spill should be
+ // inserted immediately after CoroFrame is computed.
+ InsertPt = FramePtr->getNextNode();
+ } else if (auto *II = dyn_cast<InvokeInst>(I)) {
+ // If we are spilling the result of the invoke instruction, split
+ // the normal edge and insert the spill in the new block.
+ auto *NewBB = SplitEdge(II->getParent(), II->getNormalDest());
+ InsertPt = NewBB->getTerminator();
+ } else if (isa<PHINode>(I)) {
+ // Skip the PHINodes and EH pads instructions.
+ BasicBlock *DefBlock = I->getParent();
+ if (auto *CSI = dyn_cast<CatchSwitchInst>(DefBlock->getTerminator()))
+ InsertPt = splitBeforeCatchSwitch(CSI);
+ else
+ InsertPt = &*DefBlock->getFirstInsertionPt();
+ } else {
+ assert(!I->isTerminator() && "unexpected terminator");
+ // For all other values, the spill is placed immediately after
+ // the definition.
+ InsertPt = I->getNextNode();
+ }
+ }
+
+ auto Index = FrameData.getFieldIndex(Def);
+ Builder.SetInsertPoint(InsertPt);
+ auto *G = Builder.CreateConstInBoundsGEP2_32(
+ FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
+ if (NeedToCopyArgPtrValue) {
+ // For byval arguments, we need to store the pointed value in the frame,
+ // instead of the pointer itself.
+ auto *Value =
+ Builder.CreateLoad(Def->getType()->getPointerElementType(), Def);
+ Builder.CreateStore(Value, G);
+ } else {
+ Builder.CreateStore(Def, G);
+ }
+
+ BasicBlock *CurrentBlock = nullptr;
+ Value *CurrentReload = nullptr;
+ for (auto *U : E.second) {
+ // If we have not seen the use block, create a load instruction to reload
+ // the spilled value from the coroutine frame. Populates the Value pointer
+ // reference provided with the frame GEP.
+ if (CurrentBlock != U->getParent()) {
+ CurrentBlock = U->getParent();
+ Builder.SetInsertPoint(&*CurrentBlock->getFirstInsertionPt());
+
+ auto *GEP = GetFramePointer(E.first);
+ GEP->setName(E.first->getName() + Twine(".reload.addr"));
+ if (NeedToCopyArgPtrValue)
+ CurrentReload = GEP;
+ else
+ CurrentReload = Builder.CreateLoad(
+ FrameTy->getElementType(FrameData.getFieldIndex(E.first)), GEP,
+ E.first->getName() + Twine(".reload"));
+
+ TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Def);
+ for (DbgDeclareInst *DDI : DIs) {
+ bool AllowUnresolved = false;
+ // This dbg.declare is preserved for all coro-split function
+ // fragments. It will be unreachable in the main function, and
+ // processed by coro::salvageDebugInfo() by CoroCloner.
+ DIBuilder(*CurrentBlock->getParent()->getParent(), AllowUnresolved)
+ .insertDeclare(CurrentReload, DDI->getVariable(),
+ DDI->getExpression(), DDI->getDebugLoc(),
+ &*Builder.GetInsertPoint());
+ // This dbg.declare is for the main function entry point. It
+ // will be deleted in all coro-split functions.
+ coro::salvageDebugInfo(DbgPtrAllocaCache, DDI);
+ }
+ }
+
+ // If we have a single edge PHINode, remove it and replace it with a
+ // reload from the coroutine frame. (We already took care of multi edge
+ // PHINodes by rewriting them in the rewritePHIs function).
+ if (auto *PN = dyn_cast<PHINode>(U)) {
+ assert(PN->getNumIncomingValues() == 1 &&
+ "unexpected number of incoming "
+ "values in the PHINode");
+ PN->replaceAllUsesWith(CurrentReload);
+ PN->eraseFromParent();
+ continue;
+ }
+
+ // Replace all uses of CurrentValue in the current instruction with
+ // reload.
+ U->replaceUsesOfWith(Def, CurrentReload);
+ }
+ }
+
+ BasicBlock *FramePtrBB = FramePtr->getParent();
+
+ auto SpillBlock =
+ FramePtrBB->splitBasicBlock(FramePtr->getNextNode(), "AllocaSpillBB");
+ SpillBlock->splitBasicBlock(&SpillBlock->front(), "PostSpill");
+ Shape.AllocaSpillBlock = SpillBlock;
+
+ // retcon and retcon.once lowering assumes all uses have been sunk.
+ if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
+ Shape.ABI == coro::ABI::Async) {
+ // If we found any allocas, replace all of their remaining uses with Geps.
+ Builder.SetInsertPoint(&SpillBlock->front());
+ for (const auto &P : FrameData.Allocas) {
+ AllocaInst *Alloca = P.Alloca;
+ auto *G = GetFramePointer(Alloca);
+
+ // We are not using ReplaceInstWithInst(P.first, cast<Instruction>(G))
+ // here, as we are changing location of the instruction.
+ G->takeName(Alloca);
+ Alloca->replaceAllUsesWith(G);
+ Alloca->eraseFromParent();
+ }
+ return FramePtr;
+ }
+
+ // If we found any alloca, replace all of their remaining uses with GEP
+ // instructions. Because new dbg.declare have been created for these alloca,
+ // we also delete the original dbg.declare and replace other uses with undef.
+ // Note: We cannot replace the alloca with GEP instructions indiscriminately,
+ // as some of the uses may not be dominated by CoroBegin.
+ Builder.SetInsertPoint(&Shape.AllocaSpillBlock->front());
+ SmallVector<Instruction *, 4> UsersToUpdate;
+ for (const auto &A : FrameData.Allocas) {
+ AllocaInst *Alloca = A.Alloca;
+ UsersToUpdate.clear();
+ for (User *U : Alloca->users()) {
+ auto *I = cast<Instruction>(U);
+ if (DT.dominates(CB, I))
+ UsersToUpdate.push_back(I);
+ }
+ if (UsersToUpdate.empty())
+ continue;
+ auto *G = GetFramePointer(Alloca);
+ G->setName(Alloca->getName() + Twine(".reload.addr"));
+
+ SmallPtrSet<BasicBlock *, 4> SeenDbgBBs;
+ TinyPtrVector<DbgDeclareInst *> DIs = FindDbgDeclareUses(Alloca);
+ if (!DIs.empty())
+ DIBuilder(*Alloca->getModule(),
+ /*AllowUnresolved*/ false)
+ .insertDeclare(G, DIs.front()->getVariable(),
+ DIs.front()->getExpression(),
+ DIs.front()->getDebugLoc(), DIs.front());
+ for (auto *DI : FindDbgDeclareUses(Alloca))
+ DI->eraseFromParent();
+ replaceDbgUsesWithUndef(Alloca);
+
+ for (Instruction *I : UsersToUpdate)
+ I->replaceUsesOfWith(Alloca, G);
+ }
+ Builder.SetInsertPoint(FramePtr->getNextNode());
+ for (const auto &A : FrameData.Allocas) {
+ AllocaInst *Alloca = A.Alloca;
+ if (A.MayWriteBeforeCoroBegin) {
+ // isEscaped really means potentially modified before CoroBegin.
+ if (Alloca->isArrayAllocation())
+ report_fatal_error(
+ "Coroutines cannot handle copying of array allocas yet");
+
+ auto *G = GetFramePointer(Alloca);
+ auto *Value = Builder.CreateLoad(Alloca->getAllocatedType(), Alloca);
+ Builder.CreateStore(Value, G);
+ }
+ // For each alias to Alloca created before CoroBegin but used after
+ // CoroBegin, we recreate them after CoroBegin by appplying the offset
+ // to the pointer in the frame.
+ for (const auto &Alias : A.Aliases) {
+ auto *FramePtr = GetFramePointer(Alloca);
+ auto *FramePtrRaw =
+ Builder.CreateBitCast(FramePtr, Type::getInt8PtrTy(C));
+ auto *AliasPtr = Builder.CreateGEP(
+ FramePtrRaw,
+ ConstantInt::get(Type::getInt64Ty(C), Alias.second.getValue()));
+ auto *AliasPtrTyped =
+ Builder.CreateBitCast(AliasPtr, Alias.first->getType());
+ Alias.first->replaceUsesWithIf(
+ AliasPtrTyped, [&](Use &U) { return DT.dominates(CB, U); });
+ }
+ }
+ return FramePtr;
+}
+
+// Sets the unwind edge of an instruction to a particular successor.
+static void setUnwindEdgeTo(Instruction *TI, BasicBlock *Succ) {
+ if (auto *II = dyn_cast<InvokeInst>(TI))
+ II->setUnwindDest(Succ);
+ else if (auto *CS = dyn_cast<CatchSwitchInst>(TI))
+ CS->setUnwindDest(Succ);
+ else if (auto *CR = dyn_cast<CleanupReturnInst>(TI))
+ CR->setUnwindDest(Succ);
+ else
+ llvm_unreachable("unexpected terminator instruction");
+}
+
+// Replaces all uses of OldPred with the NewPred block in all PHINodes in a
+// block.
+static void updatePhiNodes(BasicBlock *DestBB, BasicBlock *OldPred,
+ BasicBlock *NewPred, PHINode *Until = nullptr) {
+ unsigned BBIdx = 0;
+ for (BasicBlock::iterator I = DestBB->begin(); isa<PHINode>(I); ++I) {
+ PHINode *PN = cast<PHINode>(I);
+
+ // We manually update the LandingPadReplacement PHINode and it is the last
+ // PHI Node. So, if we find it, we are done.
+ if (Until == PN)
+ break;
+
+ // Reuse the previous value of BBIdx if it lines up. In cases where we
+ // have multiple phi nodes with *lots* of predecessors, this is a speed
+ // win because we don't have to scan the PHI looking for TIBB. This
+ // happens because the BB list of PHI nodes are usually in the same
+ // order.
+ if (PN->getIncomingBlock(BBIdx) != OldPred)
+ BBIdx = PN->getBasicBlockIndex(OldPred);
+
+ assert(BBIdx != (unsigned)-1 && "Invalid PHI Index!");
+ PN->setIncomingBlock(BBIdx, NewPred);
+ }
+}
+
+// Uses SplitEdge unless the successor block is an EHPad, in which case do EH
+// specific handling.
+static BasicBlock *ehAwareSplitEdge(BasicBlock *BB, BasicBlock *Succ,
+ LandingPadInst *OriginalPad,
+ PHINode *LandingPadReplacement) {
+ auto *PadInst = Succ->getFirstNonPHI();
+ if (!LandingPadReplacement && !PadInst->isEHPad())
+ return SplitEdge(BB, Succ);
+
+ auto *NewBB = BasicBlock::Create(BB->getContext(), "", BB->getParent(), Succ);
+ setUnwindEdgeTo(BB->getTerminator(), NewBB);
+ updatePhiNodes(Succ, BB, NewBB, LandingPadReplacement);
+
+ if (LandingPadReplacement) {
+ auto *NewLP = OriginalPad->clone();
+ auto *Terminator = BranchInst::Create(Succ, NewBB);
+ NewLP->insertBefore(Terminator);
+ LandingPadReplacement->addIncoming(NewLP, NewBB);
+ return NewBB;
+ }
+ Value *ParentPad = nullptr;
+ if (auto *FuncletPad = dyn_cast<FuncletPadInst>(PadInst))
+ ParentPad = FuncletPad->getParentPad();
+ else if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(PadInst))
+ ParentPad = CatchSwitch->getParentPad();
+ else
+ llvm_unreachable("handling for other EHPads not implemented yet");
+
+ auto *NewCleanupPad = CleanupPadInst::Create(ParentPad, {}, "", NewBB);
+ CleanupReturnInst::Create(NewCleanupPad, Succ, NewBB);
+ return NewBB;
+}
+
+// Moves the values in the PHIs in SuccBB that correspong to PredBB into a new
+// PHI in InsertedBB.
+static void movePHIValuesToInsertedBlock(BasicBlock *SuccBB,
+ BasicBlock *InsertedBB,
+ BasicBlock *PredBB,
+ PHINode *UntilPHI = nullptr) {
+ auto *PN = cast<PHINode>(&SuccBB->front());
+ do {
+ int Index = PN->getBasicBlockIndex(InsertedBB);
+ Value *V = PN->getIncomingValue(Index);
+ PHINode *InputV = PHINode::Create(
+ V->getType(), 1, V->getName() + Twine(".") + SuccBB->getName(),
+ &InsertedBB->front());
+ InputV->addIncoming(V, PredBB);
+ PN->setIncomingValue(Index, InputV);
+ PN = dyn_cast<PHINode>(PN->getNextNode());
+ } while (PN != UntilPHI);
+}
+
+// Rewrites the PHI Nodes in a cleanuppad.
+static void rewritePHIsForCleanupPad(BasicBlock *CleanupPadBB,
+ CleanupPadInst *CleanupPad) {
+ // For every incoming edge to a CleanupPad we will create a new block holding
+ // all incoming values in single-value PHI nodes. We will then create another
+ // block to act as a dispather (as all unwind edges for related EH blocks
+ // must be the same).
+ //
+ // cleanuppad:
+ // %2 = phi i32[%0, %catchswitch], [%1, %catch.1]
+ // %3 = cleanuppad within none []
+ //
+ // It will create:
+ //
+ // cleanuppad.corodispatch
+ // %2 = phi i8[0, %catchswitch], [1, %catch.1]
+ // %3 = cleanuppad within none []
+ // switch i8 % 2, label %unreachable
+ // [i8 0, label %cleanuppad.from.catchswitch
+ // i8 1, label %cleanuppad.from.catch.1]
+ // cleanuppad.from.catchswitch:
+ // %4 = phi i32 [%0, %catchswitch]
+ // br %label cleanuppad
+ // cleanuppad.from.catch.1:
+ // %6 = phi i32 [%1, %catch.1]
+ // br %label cleanuppad
+ // cleanuppad:
+ // %8 = phi i32 [%4, %cleanuppad.from.catchswitch],
+ // [%6, %cleanuppad.from.catch.1]
+
+ // Unreachable BB, in case switching on an invalid value in the dispatcher.
+ auto *UnreachBB = BasicBlock::Create(
+ CleanupPadBB->getContext(), "unreachable", CleanupPadBB->getParent());
+ IRBuilder<> Builder(UnreachBB);
+ Builder.CreateUnreachable();
+
+ // Create a new cleanuppad which will be the dispatcher.
+ auto *NewCleanupPadBB =
+ BasicBlock::Create(CleanupPadBB->getContext(),
+ CleanupPadBB->getName() + Twine(".corodispatch"),
+ CleanupPadBB->getParent(), CleanupPadBB);
+ Builder.SetInsertPoint(NewCleanupPadBB);
+ auto *SwitchType = Builder.getInt8Ty();
+ auto *SetDispatchValuePN =
+ Builder.CreatePHI(SwitchType, pred_size(CleanupPadBB));
+ CleanupPad->removeFromParent();
+ CleanupPad->insertAfter(SetDispatchValuePN);
+ auto *SwitchOnDispatch = Builder.CreateSwitch(SetDispatchValuePN, UnreachBB,
+ pred_size(CleanupPadBB));
+
+ int SwitchIndex = 0;
+ SmallVector<BasicBlock *, 8> Preds(predecessors(CleanupPadBB));
+ for (BasicBlock *Pred : Preds) {
+ // Create a new cleanuppad and move the PHI values to there.
+ auto *CaseBB = BasicBlock::Create(CleanupPadBB->getContext(),
+ CleanupPadBB->getName() +
+ Twine(".from.") + Pred->getName(),
+ CleanupPadBB->getParent(), CleanupPadBB);
+ updatePhiNodes(CleanupPadBB, Pred, CaseBB);
+ CaseBB->setName(CleanupPadBB->getName() + Twine(".from.") +
+ Pred->getName());
+ Builder.SetInsertPoint(CaseBB);
+ Builder.CreateBr(CleanupPadBB);
+ movePHIValuesToInsertedBlock(CleanupPadBB, CaseBB, NewCleanupPadBB);
+
+ // Update this Pred to the new unwind point.
+ setUnwindEdgeTo(Pred->getTerminator(), NewCleanupPadBB);
+
+ // Setup the switch in the dispatcher.
+ auto *SwitchConstant = ConstantInt::get(SwitchType, SwitchIndex);
+ SetDispatchValuePN->addIncoming(SwitchConstant, Pred);
+ SwitchOnDispatch->addCase(SwitchConstant, CaseBB);
+ SwitchIndex++;
+ }
+}
+
+static void rewritePHIs(BasicBlock &BB) {
+ // For every incoming edge we will create a block holding all
+ // incoming values in a single PHI nodes.
+ //
+ // loop:
+ // %n.val = phi i32[%n, %entry], [%inc, %loop]
+ //
+ // It will create:
+ //
+ // loop.from.entry:
+ // %n.loop.pre = phi i32 [%n, %entry]
+ // br %label loop
+ // loop.from.loop:
+ // %inc.loop.pre = phi i32 [%inc, %loop]
+ // br %label loop
+ //
+ // After this rewrite, further analysis will ignore any phi nodes with more
+ // than one incoming edge.
+
+ // TODO: Simplify PHINodes in the basic block to remove duplicate
+ // predecessors.
+
+ // Special case for CleanupPad: all EH blocks must have the same unwind edge
+ // so we need to create an additional "dispatcher" block.
+ if (auto *CleanupPad =
+ dyn_cast_or_null<CleanupPadInst>(BB.getFirstNonPHI())) {
+ SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
+ for (BasicBlock *Pred : Preds) {
+ if (CatchSwitchInst *CS =
+ dyn_cast<CatchSwitchInst>(Pred->getTerminator())) {
+ // CleanupPad with a CatchSwitch predecessor: therefore this is an
+ // unwind destination that needs to be handle specially.
+ assert(CS->getUnwindDest() == &BB);
+ (void)CS;
+ rewritePHIsForCleanupPad(&BB, CleanupPad);
+ return;
+ }
+ }
+ }
+
+ LandingPadInst *LandingPad = nullptr;
+ PHINode *ReplPHI = nullptr;
+ if ((LandingPad = dyn_cast_or_null<LandingPadInst>(BB.getFirstNonPHI()))) {
+ // ehAwareSplitEdge will clone the LandingPad in all the edge blocks.
+ // We replace the original landing pad with a PHINode that will collect the
+ // results from all of them.
+ ReplPHI = PHINode::Create(LandingPad->getType(), 1, "", LandingPad);
+ ReplPHI->takeName(LandingPad);
+ LandingPad->replaceAllUsesWith(ReplPHI);
+ // We will erase the original landing pad at the end of this function after
+ // ehAwareSplitEdge cloned it in the transition blocks.
+ }
+
+ SmallVector<BasicBlock *, 8> Preds(predecessors(&BB));
+ for (BasicBlock *Pred : Preds) {
+ auto *IncomingBB = ehAwareSplitEdge(Pred, &BB, LandingPad, ReplPHI);
+ IncomingBB->setName(BB.getName() + Twine(".from.") + Pred->getName());
+
+ // Stop the moving of values at ReplPHI, as this is either null or the PHI
+ // that replaced the landing pad.
+ movePHIValuesToInsertedBlock(&BB, IncomingBB, Pred, ReplPHI);
+ }
+
+ if (LandingPad) {
+ // Calls to ehAwareSplitEdge function cloned the original lading pad.
+ // No longer need it.
+ LandingPad->eraseFromParent();
+ }
+}
+
+static void rewritePHIs(Function &F) {
+ SmallVector<BasicBlock *, 8> WorkList;
+
+ for (BasicBlock &BB : F)
+ if (auto *PN = dyn_cast<PHINode>(&BB.front()))
+ if (PN->getNumIncomingValues() > 1)
+ WorkList.push_back(&BB);
+
+ for (BasicBlock *BB : WorkList)
+ rewritePHIs(*BB);
+}
+
+// Check for instructions that we can recreate on resume as opposed to spill
+// the result into a coroutine frame.
+static bool materializable(Instruction &V) {
+ return isa<CastInst>(&V) || isa<GetElementPtrInst>(&V) ||
+ isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<SelectInst>(&V);
+}
+
+// Check for structural coroutine intrinsics that should not be spilled into
+// the coroutine frame.
+static bool isCoroutineStructureIntrinsic(Instruction &I) {
+ return isa<CoroIdInst>(&I) || isa<CoroSaveInst>(&I) ||
+ isa<CoroSuspendInst>(&I);
+}
+
+// For every use of the value that is across suspend point, recreate that value
+// after a suspend point.
+static void rewriteMaterializableInstructions(IRBuilder<> &IRB,
+ const SpillInfo &Spills) {
+ for (const auto &E : Spills) {
+ Value *Def = E.first;
+ BasicBlock *CurrentBlock = nullptr;
+ Instruction *CurrentMaterialization = nullptr;
+ for (Instruction *U : E.second) {
+ // If we have not seen this block, materialize the value.
+ if (CurrentBlock != U->getParent()) {
+ CurrentBlock = U->getParent();
+ CurrentMaterialization = cast<Instruction>(Def)->clone();
+ CurrentMaterialization->setName(Def->getName());
+ CurrentMaterialization->insertBefore(
+ &*CurrentBlock->getFirstInsertionPt());
+ }
+ if (auto *PN = dyn_cast<PHINode>(U)) {
+ assert(PN->getNumIncomingValues() == 1 &&
+ "unexpected number of incoming "
+ "values in the PHINode");
+ PN->replaceAllUsesWith(CurrentMaterialization);
+ PN->eraseFromParent();
+ continue;
+ }
+ // Replace all uses of Def in the current instruction with the
+ // CurrentMaterialization for the block.
+ U->replaceUsesOfWith(Def, CurrentMaterialization);
+ }
+ }
+}
+
+// Splits the block at a particular instruction unless it is the first
+// instruction in the block with a single predecessor.
+static BasicBlock *splitBlockIfNotFirst(Instruction *I, const Twine &Name) {
+ auto *BB = I->getParent();
+ if (&BB->front() == I) {
+ if (BB->getSinglePredecessor()) {
+ BB->setName(Name);
+ return BB;
+ }
+ }
+ return BB->splitBasicBlock(I, Name);
+}
+
+// Split above and below a particular instruction so that it
+// will be all alone by itself in a block.
+static void splitAround(Instruction *I, const Twine &Name) {
+ splitBlockIfNotFirst(I, Name);
+ splitBlockIfNotFirst(I->getNextNode(), "After" + Name);
+}
+
+static bool isSuspendBlock(BasicBlock *BB) {
+ return isa<AnyCoroSuspendInst>(BB->front());
+}
+
+typedef SmallPtrSet<BasicBlock*, 8> VisitedBlocksSet;
+
+/// Does control flow starting at the given block ever reach a suspend
+/// instruction before reaching a block in VisitedOrFreeBBs?
+static bool isSuspendReachableFrom(BasicBlock *From,
+ VisitedBlocksSet &VisitedOrFreeBBs) {
+ // Eagerly try to add this block to the visited set. If it's already
+ // there, stop recursing; this path doesn't reach a suspend before
+ // either looping or reaching a freeing block.
+ if (!VisitedOrFreeBBs.insert(From).second)
+ return false;
+
+ // We assume that we'll already have split suspends into their own blocks.
+ if (isSuspendBlock(From))
+ return true;
+
+ // Recurse on the successors.
+ for (auto Succ : successors(From)) {
+ if (isSuspendReachableFrom(Succ, VisitedOrFreeBBs))
+ return true;
+ }
+
+ return false;
+}
+
+/// Is the given alloca "local", i.e. bounded in lifetime to not cross a
+/// suspend point?
+static bool isLocalAlloca(CoroAllocaAllocInst *AI) {
+ // Seed the visited set with all the basic blocks containing a free
+ // so that we won't pass them up.
+ VisitedBlocksSet VisitedOrFreeBBs;
+ for (auto User : AI->users()) {
+ if (auto FI = dyn_cast<CoroAllocaFreeInst>(User))
+ VisitedOrFreeBBs.insert(FI->getParent());
+ }
+
+ return !isSuspendReachableFrom(AI->getParent(), VisitedOrFreeBBs);
+}
+
+/// After we split the coroutine, will the given basic block be along
+/// an obvious exit path for the resumption function?
+static bool willLeaveFunctionImmediatelyAfter(BasicBlock *BB,
+ unsigned depth = 3) {
+ // If we've bottomed out our depth count, stop searching and assume
+ // that the path might loop back.
+ if (depth == 0) return false;
+
+ // If this is a suspend block, we're about to exit the resumption function.
+ if (isSuspendBlock(BB)) return true;
+
+ // Recurse into the successors.
+ for (auto Succ : successors(BB)) {
+ if (!willLeaveFunctionImmediatelyAfter(Succ, depth - 1))
+ return false;
+ }
+
+ // If none of the successors leads back in a loop, we're on an exit/abort.
+ return true;
+}
+
+static bool localAllocaNeedsStackSave(CoroAllocaAllocInst *AI) {
+ // Look for a free that isn't sufficiently obviously followed by
+ // either a suspend or a termination, i.e. something that will leave
+ // the coro resumption frame.
+ for (auto U : AI->users()) {
+ auto FI = dyn_cast<CoroAllocaFreeInst>(U);
+ if (!FI) continue;
+
+ if (!willLeaveFunctionImmediatelyAfter(FI->getParent()))
+ return true;
+ }
+
+ // If we never found one, we don't need a stack save.
+ return false;
+}
+
+/// Turn each of the given local allocas into a normal (dynamic) alloca
+/// instruction.
+static void lowerLocalAllocas(ArrayRef<CoroAllocaAllocInst*> LocalAllocas,
+ SmallVectorImpl<Instruction*> &DeadInsts) {
+ for (auto AI : LocalAllocas) {
+ auto M = AI->getModule();
+ IRBuilder<> Builder(AI);
+
+ // Save the stack depth. Try to avoid doing this if the stackrestore
+ // is going to immediately precede a return or something.
+ Value *StackSave = nullptr;
+ if (localAllocaNeedsStackSave(AI))
+ StackSave = Builder.CreateCall(
+ Intrinsic::getDeclaration(M, Intrinsic::stacksave));
+
+ // Allocate memory.
+ auto Alloca = Builder.CreateAlloca(Builder.getInt8Ty(), AI->getSize());
+ Alloca->setAlignment(Align(AI->getAlignment()));
+
+ for (auto U : AI->users()) {
+ // Replace gets with the allocation.
+ if (isa<CoroAllocaGetInst>(U)) {
+ U->replaceAllUsesWith(Alloca);
+
+ // Replace frees with stackrestores. This is safe because
+ // alloca.alloc is required to obey a stack discipline, although we
+ // don't enforce that structurally.
+ } else {
+ auto FI = cast<CoroAllocaFreeInst>(U);
+ if (StackSave) {
+ Builder.SetInsertPoint(FI);
+ Builder.CreateCall(
+ Intrinsic::getDeclaration(M, Intrinsic::stackrestore),
+ StackSave);
+ }
+ }
+ DeadInsts.push_back(cast<Instruction>(U));
+ }
+
+ DeadInsts.push_back(AI);
+ }
+}
+
+/// Turn the given coro.alloca.alloc call into a dynamic allocation.
+/// This happens during the all-instructions iteration, so it must not
+/// delete the call.
+static Instruction *lowerNonLocalAlloca(CoroAllocaAllocInst *AI,
+ coro::Shape &Shape,
+ SmallVectorImpl<Instruction*> &DeadInsts) {
+ IRBuilder<> Builder(AI);
+ auto Alloc = Shape.emitAlloc(Builder, AI->getSize(), nullptr);
+
+ for (User *U : AI->users()) {
+ if (isa<CoroAllocaGetInst>(U)) {
+ U->replaceAllUsesWith(Alloc);
+ } else {
+ auto FI = cast<CoroAllocaFreeInst>(U);
+ Builder.SetInsertPoint(FI);
+ Shape.emitDealloc(Builder, Alloc, nullptr);
+ }
+ DeadInsts.push_back(cast<Instruction>(U));
+ }
+
+ // Push this on last so that it gets deleted after all the others.
+ DeadInsts.push_back(AI);
+
+ // Return the new allocation value so that we can check for needed spills.
+ return cast<Instruction>(Alloc);
+}
+
+/// Get the current swifterror value.
+static Value *emitGetSwiftErrorValue(IRBuilder<> &Builder, Type *ValueTy,
+ coro::Shape &Shape) {
+ // Make a fake function pointer as a sort of intrinsic.
+ auto FnTy = FunctionType::get(ValueTy, {}, false);
+ auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
+
+ auto Call = Builder.CreateCall(FnTy, Fn, {});
+ Shape.SwiftErrorOps.push_back(Call);
+
+ return Call;
+}
+
+/// Set the given value as the current swifterror value.
+///
+/// Returns a slot that can be used as a swifterror slot.
+static Value *emitSetSwiftErrorValue(IRBuilder<> &Builder, Value *V,
+ coro::Shape &Shape) {
+ // Make a fake function pointer as a sort of intrinsic.
+ auto FnTy = FunctionType::get(V->getType()->getPointerTo(),
+ {V->getType()}, false);
+ auto Fn = ConstantPointerNull::get(FnTy->getPointerTo());
+
+ auto Call = Builder.CreateCall(FnTy, Fn, { V });
+ Shape.SwiftErrorOps.push_back(Call);
+
+ return Call;
+}
+
+/// Set the swifterror value from the given alloca before a call,
+/// then put in back in the alloca afterwards.
+///
+/// Returns an address that will stand in for the swifterror slot
+/// until splitting.
+static Value *emitSetAndGetSwiftErrorValueAround(Instruction *Call,
+ AllocaInst *Alloca,
+ coro::Shape &Shape) {
+ auto ValueTy = Alloca->getAllocatedType();
+ IRBuilder<> Builder(Call);
+
+ // Load the current value from the alloca and set it as the
+ // swifterror value.
+ auto ValueBeforeCall = Builder.CreateLoad(ValueTy, Alloca);
+ auto Addr = emitSetSwiftErrorValue(Builder, ValueBeforeCall, Shape);
+
+ // Move to after the call. Since swifterror only has a guaranteed
+ // value on normal exits, we can ignore implicit and explicit unwind
+ // edges.
+ if (isa<CallInst>(Call)) {
+ Builder.SetInsertPoint(Call->getNextNode());
+ } else {
+ auto Invoke = cast<InvokeInst>(Call);
+ Builder.SetInsertPoint(Invoke->getNormalDest()->getFirstNonPHIOrDbg());
+ }
+
+ // Get the current swifterror value and store it to the alloca.
+ auto ValueAfterCall = emitGetSwiftErrorValue(Builder, ValueTy, Shape);
+ Builder.CreateStore(ValueAfterCall, Alloca);
+
+ return Addr;
+}
+
+/// Eliminate a formerly-swifterror alloca by inserting the get/set
+/// intrinsics and attempting to MemToReg the alloca away.
+static void eliminateSwiftErrorAlloca(Function &F, AllocaInst *Alloca,
+ coro::Shape &Shape) {
+ for (auto UI = Alloca->use_begin(), UE = Alloca->use_end(); UI != UE; ) {
+ // We're likely changing the use list, so use a mutation-safe
+ // iteration pattern.
+ auto &Use = *UI;
+ ++UI;
+
+ // swifterror values can only be used in very specific ways.
+ // We take advantage of that here.
+ auto User = Use.getUser();
+ if (isa<LoadInst>(User) || isa<StoreInst>(User))
+ continue;
+
+ assert(isa<CallInst>(User) || isa<InvokeInst>(User));
+ auto Call = cast<Instruction>(User);
+
+ auto Addr = emitSetAndGetSwiftErrorValueAround(Call, Alloca, Shape);
+
+ // Use the returned slot address as the call argument.
+ Use.set(Addr);
+ }
+
+ // All the uses should be loads and stores now.
+ assert(isAllocaPromotable(Alloca));
+}
+
+/// "Eliminate" a swifterror argument by reducing it to the alloca case
+/// and then loading and storing in the prologue and epilog.
+///
+/// The argument keeps the swifterror flag.
+static void eliminateSwiftErrorArgument(Function &F, Argument &Arg,
+ coro::Shape &Shape,
+ SmallVectorImpl<AllocaInst*> &AllocasToPromote) {
+ IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
+
+ auto ArgTy = cast<PointerType>(Arg.getType());
+ auto ValueTy = ArgTy->getElementType();
+
+ // Reduce to the alloca case:
+
+ // Create an alloca and replace all uses of the arg with it.
+ auto Alloca = Builder.CreateAlloca(ValueTy, ArgTy->getAddressSpace());
+ Arg.replaceAllUsesWith(Alloca);
+
+ // Set an initial value in the alloca. swifterror is always null on entry.
+ auto InitialValue = Constant::getNullValue(ValueTy);
+ Builder.CreateStore(InitialValue, Alloca);
+
+ // Find all the suspends in the function and save and restore around them.
+ for (auto Suspend : Shape.CoroSuspends) {
+ (void) emitSetAndGetSwiftErrorValueAround(Suspend, Alloca, Shape);
+ }
+
+ // Find all the coro.ends in the function and restore the error value.
+ for (auto End : Shape.CoroEnds) {
+ Builder.SetInsertPoint(End);
+ auto FinalValue = Builder.CreateLoad(ValueTy, Alloca);
+ (void) emitSetSwiftErrorValue(Builder, FinalValue, Shape);
+ }
+
+ // Now we can use the alloca logic.
+ AllocasToPromote.push_back(Alloca);
+ eliminateSwiftErrorAlloca(F, Alloca, Shape);
+}
+
+/// Eliminate all problematic uses of swifterror arguments and allocas
+/// from the function. We'll fix them up later when splitting the function.
+static void eliminateSwiftError(Function &F, coro::Shape &Shape) {
+ SmallVector<AllocaInst*, 4> AllocasToPromote;
+
+ // Look for a swifterror argument.
+ for (auto &Arg : F.args()) {
+ if (!Arg.hasSwiftErrorAttr()) continue;
+
+ eliminateSwiftErrorArgument(F, Arg, Shape, AllocasToPromote);
+ break;
+ }
+
+ // Look for swifterror allocas.
+ for (auto &Inst : F.getEntryBlock()) {
+ auto Alloca = dyn_cast<AllocaInst>(&Inst);
+ if (!Alloca || !Alloca->isSwiftError()) continue;
+
+ // Clear the swifterror flag.
+ Alloca->setSwiftError(false);
+
+ AllocasToPromote.push_back(Alloca);
+ eliminateSwiftErrorAlloca(F, Alloca, Shape);
+ }
+
+ // If we have any allocas to promote, compute a dominator tree and
+ // promote them en masse.
+ if (!AllocasToPromote.empty()) {
+ DominatorTree DT(F);
+ PromoteMemToReg(AllocasToPromote, DT);
+ }
+}
+
+/// retcon and retcon.once conventions assume that all spill uses can be sunk
+/// after the coro.begin intrinsic.
+static void sinkSpillUsesAfterCoroBegin(Function &F,
+ const FrameDataInfo &FrameData,
+ CoroBeginInst *CoroBegin) {
+ DominatorTree Dom(F);
+
+ SmallSetVector<Instruction *, 32> ToMove;
+ SmallVector<Instruction *, 32> Worklist;
+
+ // Collect all users that precede coro.begin.
+ for (auto *Def : FrameData.getAllDefs()) {
+ for (User *U : Def->users()) {
+ auto Inst = cast<Instruction>(U);
+ if (Inst->getParent() != CoroBegin->getParent() ||
+ Dom.dominates(CoroBegin, Inst))
+ continue;
+ if (ToMove.insert(Inst))
+ Worklist.push_back(Inst);
+ }
+ }
+ // Recursively collect users before coro.begin.
+ while (!Worklist.empty()) {
+ auto *Def = Worklist.pop_back_val();
+ for (User *U : Def->users()) {
+ auto Inst = cast<Instruction>(U);
+ if (Dom.dominates(CoroBegin, Inst))
+ continue;
+ if (ToMove.insert(Inst))
+ Worklist.push_back(Inst);
+ }
+ }
+
+ // Sort by dominance.
+ SmallVector<Instruction *, 64> InsertionList(ToMove.begin(), ToMove.end());
+ llvm::sort(InsertionList, [&Dom](Instruction *A, Instruction *B) -> bool {
+ // If a dominates b it should preceed (<) b.
+ return Dom.dominates(A, B);
+ });
+
+ Instruction *InsertPt = CoroBegin->getNextNode();
+ for (Instruction *Inst : InsertionList)
+ Inst->moveBefore(InsertPt);
+}
+
+/// For each local variable that all of its user are only used inside one of
+/// suspended region, we sink their lifetime.start markers to the place where
+/// after the suspend block. Doing so minimizes the lifetime of each variable,
+/// hence minimizing the amount of data we end up putting on the frame.
+static void sinkLifetimeStartMarkers(Function &F, coro::Shape &Shape,
+ SuspendCrossingInfo &Checker) {
+ DominatorTree DT(F);
+
+ // Collect all possible basic blocks which may dominate all uses of allocas.
+ SmallPtrSet<BasicBlock *, 4> DomSet;
+ DomSet.insert(&F.getEntryBlock());
+ for (auto *CSI : Shape.CoroSuspends) {
+ BasicBlock *SuspendBlock = CSI->getParent();
+ assert(isSuspendBlock(SuspendBlock) && SuspendBlock->getSingleSuccessor() &&
+ "should have split coro.suspend into its own block");
+ DomSet.insert(SuspendBlock->getSingleSuccessor());
+ }
+
+ for (Instruction &I : instructions(F)) {
+ AllocaInst* AI = dyn_cast<AllocaInst>(&I);
+ if (!AI)
+ continue;
+
+ for (BasicBlock *DomBB : DomSet) {
+ bool Valid = true;
+ SmallVector<Instruction *, 1> Lifetimes;
+
+ auto isLifetimeStart = [](Instruction* I) {
+ if (auto* II = dyn_cast<IntrinsicInst>(I))
+ return II->getIntrinsicID() == Intrinsic::lifetime_start;
+ return false;
+ };
+
+ auto collectLifetimeStart = [&](Instruction *U, AllocaInst *AI) {
+ if (isLifetimeStart(U)) {
+ Lifetimes.push_back(U);
+ return true;
+ }
+ if (!U->hasOneUse() || U->stripPointerCasts() != AI)
+ return false;
+ if (isLifetimeStart(U->user_back())) {
+ Lifetimes.push_back(U->user_back());
+ return true;
+ }
+ return false;
+ };
+
+ for (User *U : AI->users()) {
+ Instruction *UI = cast<Instruction>(U);
+ // For all users except lifetime.start markers, if they are all
+ // dominated by one of the basic blocks and do not cross
+ // suspend points as well, then there is no need to spill the
+ // instruction.
+ if (!DT.dominates(DomBB, UI->getParent()) ||
+ Checker.isDefinitionAcrossSuspend(DomBB, UI)) {
+ // Skip lifetime.start, GEP and bitcast used by lifetime.start
+ // markers.
+ if (collectLifetimeStart(UI, AI))
+ continue;
+ Valid = false;
+ break;
+ }
+ }
+ // Sink lifetime.start markers to dominate block when they are
+ // only used outside the region.
+ if (Valid && Lifetimes.size() != 0) {
+ // May be AI itself, when the type of AI is i8*
+ auto *NewBitCast = [&](AllocaInst *AI) -> Value* {
+ if (isa<AllocaInst>(Lifetimes[0]->getOperand(1)))
+ return AI;
+ auto *Int8PtrTy = Type::getInt8PtrTy(F.getContext());
+ return CastInst::Create(Instruction::BitCast, AI, Int8PtrTy, "",
+ DomBB->getTerminator());
+ }(AI);
+
+ auto *NewLifetime = Lifetimes[0]->clone();
+ NewLifetime->replaceUsesOfWith(NewLifetime->getOperand(1), NewBitCast);
+ NewLifetime->insertBefore(DomBB->getTerminator());
+
+ // All the outsided lifetime.start markers are no longer necessary.
+ for (Instruction *S : Lifetimes)
+ S->eraseFromParent();
+
+ break;
+ }
+ }
+ }
+}
+
+static void collectFrameAllocas(Function &F, coro::Shape &Shape,
+ const SuspendCrossingInfo &Checker,
+ SmallVectorImpl<AllocaInfo> &Allocas) {
+ // Collect lifetime.start info for each alloca.
+ using LifetimeStart = SmallPtrSet<Instruction *, 2>;
+ llvm::DenseMap<AllocaInst *, std::unique_ptr<LifetimeStart>> LifetimeMap;
+ for (Instruction &I : instructions(F)) {
+ auto *II = dyn_cast<IntrinsicInst>(&I);
+ if (!II || II->getIntrinsicID() != Intrinsic::lifetime_start)
+ continue;
+
+ if (auto *OpInst = dyn_cast<Instruction>(II->getOperand(1))) {
+ if (auto *AI = dyn_cast<AllocaInst>(OpInst->stripPointerCasts())) {
+
+ if (LifetimeMap.find(AI) == LifetimeMap.end())
+ LifetimeMap[AI] = std::make_unique<LifetimeStart>();
+ LifetimeMap[AI]->insert(isa<AllocaInst>(OpInst) ? II : OpInst);
+ }
+ }
+ }
+
+ for (Instruction &I : instructions(F)) {
+ auto *AI = dyn_cast<AllocaInst>(&I);
+ if (!AI)
+ continue;
+ // The PromiseAlloca will be specially handled since it needs to be in a
+ // fixed position in the frame.
+ if (AI == Shape.SwitchLowering.PromiseAlloca) {
+ continue;
+ }
+ bool ShouldLiveOnFrame = false;
+ auto Iter = LifetimeMap.find(AI);
+ if (Iter != LifetimeMap.end()) {
+ // Check against lifetime.start if the instruction has the info.
+ for (User *U : I.users()) {
+ for (auto *S : *Iter->second)
+ if ((ShouldLiveOnFrame = Checker.isDefinitionAcrossSuspend(*S, U)))
+ break;
+ if (ShouldLiveOnFrame)
+ break;
+ }
+ if (!ShouldLiveOnFrame)
+ continue;
+ }
+ // At this point, either ShouldLiveOnFrame is true or we didn't have
+ // lifetime information. We will need to rely on more precise pointer
+ // tracking.
+ DominatorTree DT(F);
+ AllocaUseVisitor Visitor{F.getParent()->getDataLayout(), DT,
+ *Shape.CoroBegin, Checker};
+ Visitor.visitPtr(*AI);
+ if (!Visitor.getShouldLiveOnFrame())
+ continue;
+ Allocas.emplace_back(AI, Visitor.getAliasesCopy(),
+ Visitor.getMayWriteBeforeCoroBegin());
+ }
+}
+
+void coro::salvageDebugInfo(
+ SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
+ DbgDeclareInst *DDI, bool LoadFromFramePtr) {
+ Function *F = DDI->getFunction();
+ IRBuilder<> Builder(F->getContext());
+ auto InsertPt = F->getEntryBlock().getFirstInsertionPt();
+ while (isa<IntrinsicInst>(InsertPt))
+ ++InsertPt;
+ Builder.SetInsertPoint(&F->getEntryBlock(), InsertPt);
+ DIExpression *Expr = DDI->getExpression();
+ // Follow the pointer arithmetic all the way to the incoming
+ // function argument and convert into a DIExpression.
+ Value *Storage = DDI->getAddress();
+ while (Storage) {
+ if (auto *LdInst = dyn_cast<LoadInst>(Storage)) {
+ Storage = LdInst->getOperand(0);
+ } else if (auto *StInst = dyn_cast<StoreInst>(Storage)) {
+ Storage = StInst->getOperand(0);
+ } else if (auto *GEPInst = dyn_cast<GetElementPtrInst>(Storage)) {
+ Expr = llvm::salvageDebugInfoImpl(*GEPInst, Expr,
+ /*WithStackValue=*/false);
+ Storage = GEPInst->getOperand(0);
+ } else if (auto *BCInst = dyn_cast<llvm::BitCastInst>(Storage))
+ Storage = BCInst->getOperand(0);
+ else
+ break;
+ }
+ // Store a pointer to the coroutine frame object in an alloca so it
+ // is available throughout the function when producing unoptimized
+ // code. Extending the lifetime this way is correct because the
+ // variable has been declared by a dbg.declare intrinsic.
+ if (auto Arg = dyn_cast_or_null<llvm::Argument>(Storage)) {
+ auto &Cached = DbgPtrAllocaCache[Storage];
+ if (!Cached) {
+ Cached = Builder.CreateAlloca(Storage->getType(), 0, nullptr,
+ Arg->getName() + ".debug");
+ Builder.CreateStore(Storage, Cached);
+ }
+ Storage = Cached;
+ }
+ // The FramePtr object adds one extra layer of indirection that
+ // needs to be unwrapped.
+ if (LoadFromFramePtr)
+ Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
+ auto &VMContext = DDI->getFunction()->getContext();
+ DDI->setOperand(
+ 0, MetadataAsValue::get(VMContext, ValueAsMetadata::get(Storage)));
+ DDI->setOperand(2, MetadataAsValue::get(VMContext, Expr));
+ if (auto *InsertPt = dyn_cast_or_null<Instruction>(Storage))
+ DDI->moveAfter(InsertPt);
+}
+
+void coro::buildCoroutineFrame(Function &F, Shape &Shape) {
+ eliminateSwiftError(F, Shape);
+
+ if (Shape.ABI == coro::ABI::Switch &&
+ Shape.SwitchLowering.PromiseAlloca) {
+ Shape.getSwitchCoroId()->clearPromise();
+ }
+
+ // Make sure that all coro.save, coro.suspend and the fallthrough coro.end
+ // intrinsics are in their own blocks to simplify the logic of building up
+ // SuspendCrossing data.
+ for (auto *CSI : Shape.CoroSuspends) {
+ if (auto *Save = CSI->getCoroSave())
+ splitAround(Save, "CoroSave");
+ splitAround(CSI, "CoroSuspend");
+ }
+
+ // Put CoroEnds into their own blocks.
+ for (AnyCoroEndInst *CE : Shape.CoroEnds) {
+ splitAround(CE, "CoroEnd");
+
+ // Emit the musttail call function in a new block before the CoroEnd.
+ // We do this here so that the right suspend crossing info is computed for
+ // the uses of the musttail call function call. (Arguments to the coro.end
+ // instructions would be ignored)
+ if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(CE)) {
+ auto *MustTailCallFn = AsyncEnd->getMustTailCallFunction();
+ if (!MustTailCallFn)
+ continue;
+ IRBuilder<> Builder(AsyncEnd);
+ SmallVector<Value *, 8> Args(AsyncEnd->args());
+ auto Arguments = ArrayRef<Value *>(Args).drop_front(3);
+ auto *Call = createMustTailCall(AsyncEnd->getDebugLoc(), MustTailCallFn,
+ Arguments, Builder);
+ splitAround(Call, "MustTailCall.Before.CoroEnd");
+ }
+ }
+
+ // Transforms multi-edge PHI Nodes, so that any value feeding into a PHI will
+ // never has its definition separated from the PHI by the suspend point.
+ rewritePHIs(F);
+
+ // Build suspend crossing info.
+ SuspendCrossingInfo Checker(F, Shape);
+
+ IRBuilder<> Builder(F.getContext());
+ FrameDataInfo FrameData;
+ SmallVector<CoroAllocaAllocInst*, 4> LocalAllocas;
+ SmallVector<Instruction*, 4> DeadInstructions;
+
+ {
+ SpillInfo Spills;
+ for (int Repeat = 0; Repeat < 4; ++Repeat) {
+ // See if there are materializable instructions across suspend points.
+ for (Instruction &I : instructions(F))
+ if (materializable(I))
+ for (User *U : I.users())
+ if (Checker.isDefinitionAcrossSuspend(I, U))
+ Spills[&I].push_back(cast<Instruction>(U));
+
+ if (Spills.empty())
+ break;
+
+ // Rewrite materializable instructions to be materialized at the use
+ // point.
+ LLVM_DEBUG(dumpSpills("Materializations", Spills));
+ rewriteMaterializableInstructions(Builder, Spills);
+ Spills.clear();
+ }
+ }
+
+ sinkLifetimeStartMarkers(F, Shape, Checker);
+ collectFrameAllocas(F, Shape, Checker, FrameData.Allocas);
+ LLVM_DEBUG(dumpAllocas(FrameData.Allocas));
+
+ // Collect the spills for arguments and other not-materializable values.
+ for (Argument &A : F.args())
+ for (User *U : A.users())
+ if (Checker.isDefinitionAcrossSuspend(A, U))
+ FrameData.Spills[&A].push_back(cast<Instruction>(U));
+
+ for (Instruction &I : instructions(F)) {
+ // Values returned from coroutine structure intrinsics should not be part
+ // of the Coroutine Frame.
+ if (isCoroutineStructureIntrinsic(I) || &I == Shape.CoroBegin)
+ continue;
+
+ // The Coroutine Promise always included into coroutine frame, no need to
+ // check for suspend crossing.
+ if (Shape.ABI == coro::ABI::Switch &&
+ Shape.SwitchLowering.PromiseAlloca == &I)
+ continue;
+
+ // Handle alloca.alloc specially here.
+ if (auto AI = dyn_cast<CoroAllocaAllocInst>(&I)) {
+ // Check whether the alloca's lifetime is bounded by suspend points.
+ if (isLocalAlloca(AI)) {
+ LocalAllocas.push_back(AI);
+ continue;
+ }
+
+ // If not, do a quick rewrite of the alloca and then add spills of
+ // the rewritten value. The rewrite doesn't invalidate anything in
+ // Spills because the other alloca intrinsics have no other operands
+ // besides AI, and it doesn't invalidate the iteration because we delay
+ // erasing AI.
+ auto Alloc = lowerNonLocalAlloca(AI, Shape, DeadInstructions);
+
+ for (User *U : Alloc->users()) {
+ if (Checker.isDefinitionAcrossSuspend(*Alloc, U))
+ FrameData.Spills[Alloc].push_back(cast<Instruction>(U));
+ }
+ continue;
+ }
+
+ // Ignore alloca.get; we process this as part of coro.alloca.alloc.
+ if (isa<CoroAllocaGetInst>(I))
+ continue;
+
+ if (isa<AllocaInst>(I))
+ continue;
+
+ for (User *U : I.users())
+ if (Checker.isDefinitionAcrossSuspend(I, U)) {
+ // We cannot spill a token.
+ if (I.getType()->isTokenTy())
+ report_fatal_error(
+ "token definition is separated from the use by a suspend point");
+ FrameData.Spills[&I].push_back(cast<Instruction>(U));
+ }
+ }
+ LLVM_DEBUG(dumpSpills("Spills", FrameData.Spills));
+ if (Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
+ Shape.ABI == coro::ABI::Async)
+ sinkSpillUsesAfterCoroBegin(F, FrameData, Shape.CoroBegin);
+ Shape.FrameTy = buildFrameType(F, Shape, FrameData);
+ Shape.FramePtr = insertSpills(FrameData, Shape);
+ lowerLocalAllocas(LocalAllocas, DeadInstructions);
+
+ for (auto I : DeadInstructions)
+ I->eraseFromParent();
+}
diff --git a/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroInstr.h b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroInstr.h
new file mode 100644
index 00000000000..9fa2fd12f80
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroInstr.h
@@ -0,0 +1,691 @@
+//===-- CoroInstr.h - Coroutine Intrinsics Instruction Wrappers -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This file defines classes that make it really easy to deal with intrinsic
+// functions with the isa/dyncast family of functions. In particular, this
+// allows you to do things like:
+//
+// if (auto *SF = dyn_cast<CoroSubFnInst>(Inst))
+// ... SF->getFrame() ...
+//
+// All intrinsic function calls are instances of the call instruction, so these
+// are all subclasses of the CallInst class. Note that none of these classes
+// has state or virtual methods, which is an important part of this gross/neat
+// hack working.
+//
+// The helpful comment above is borrowed from llvm/IntrinsicInst.h, we keep
+// coroutine intrinsic wrappers here since they are only used by the passes in
+// the Coroutine library.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TRANSFORMS_COROUTINES_COROINSTR_H
+#define LLVM_LIB_TRANSFORMS_COROUTINES_COROINSTR_H
+
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+
+/// This class represents the llvm.coro.subfn.addr instruction.
+class LLVM_LIBRARY_VISIBILITY CoroSubFnInst : public IntrinsicInst {
+ enum { FrameArg, IndexArg };
+
+public:
+ enum ResumeKind {
+ RestartTrigger = -1,
+ ResumeIndex,
+ DestroyIndex,
+ CleanupIndex,
+ IndexLast,
+ IndexFirst = RestartTrigger
+ };
+
+ Value *getFrame() const { return getArgOperand(FrameArg); }
+ ResumeKind getIndex() const {
+ int64_t Index = getRawIndex()->getValue().getSExtValue();
+ assert(Index >= IndexFirst && Index < IndexLast &&
+ "unexpected CoroSubFnInst index argument");
+ return static_cast<ResumeKind>(Index);
+ }
+
+ ConstantInt *getRawIndex() const {
+ return cast<ConstantInt>(getArgOperand(IndexArg));
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_subfn_addr;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.alloc instruction.
+class LLVM_LIBRARY_VISIBILITY CoroAllocInst : public IntrinsicInst {
+public:
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_alloc;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents a common base class for llvm.coro.id instructions.
+class LLVM_LIBRARY_VISIBILITY AnyCoroIdInst : public IntrinsicInst {
+public:
+ CoroAllocInst *getCoroAlloc() {
+ for (User *U : users())
+ if (auto *CA = dyn_cast<CoroAllocInst>(U))
+ return CA;
+ return nullptr;
+ }
+
+ IntrinsicInst *getCoroBegin() {
+ for (User *U : users())
+ if (auto *II = dyn_cast<IntrinsicInst>(U))
+ if (II->getIntrinsicID() == Intrinsic::coro_begin)
+ return II;
+ llvm_unreachable("no coro.begin associated with coro.id");
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ auto ID = I->getIntrinsicID();
+ return ID == Intrinsic::coro_id || ID == Intrinsic::coro_id_retcon ||
+ ID == Intrinsic::coro_id_retcon_once ||
+ ID == Intrinsic::coro_id_async;
+ }
+
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.id instruction.
+class LLVM_LIBRARY_VISIBILITY CoroIdInst : public AnyCoroIdInst {
+ enum { AlignArg, PromiseArg, CoroutineArg, InfoArg };
+
+public:
+ AllocaInst *getPromise() const {
+ Value *Arg = getArgOperand(PromiseArg);
+ return isa<ConstantPointerNull>(Arg)
+ ? nullptr
+ : cast<AllocaInst>(Arg->stripPointerCasts());
+ }
+
+ void clearPromise() {
+ Value *Arg = getArgOperand(PromiseArg);
+ setArgOperand(PromiseArg,
+ ConstantPointerNull::get(Type::getInt8PtrTy(getContext())));
+ if (isa<AllocaInst>(Arg))
+ return;
+ assert((isa<BitCastInst>(Arg) || isa<GetElementPtrInst>(Arg)) &&
+ "unexpected instruction designating the promise");
+ // TODO: Add a check that any remaining users of Inst are after coro.begin
+ // or add code to move the users after coro.begin.
+ auto *Inst = cast<Instruction>(Arg);
+ if (Inst->use_empty()) {
+ Inst->eraseFromParent();
+ return;
+ }
+ Inst->moveBefore(getCoroBegin()->getNextNode());
+ }
+
+ // Info argument of coro.id is
+ // fresh out of the frontend: null ;
+ // outlined : {Init, Return, Susp1, Susp2, ...} ;
+ // postsplit : [resume, destroy, cleanup] ;
+ //
+ // If parts of the coroutine were outlined to protect against undesirable
+ // code motion, these functions will be stored in a struct literal referred to
+ // by the Info parameter. Note: this is only needed before coroutine is split.
+ //
+ // After coroutine is split, resume functions are stored in an array
+ // referred to by this parameter.
+
+ struct Info {
+ ConstantStruct *OutlinedParts = nullptr;
+ ConstantArray *Resumers = nullptr;
+
+ bool hasOutlinedParts() const { return OutlinedParts != nullptr; }
+ bool isPostSplit() const { return Resumers != nullptr; }
+ bool isPreSplit() const { return !isPostSplit(); }
+ };
+ Info getInfo() const {
+ Info Result;
+ auto *GV = dyn_cast<GlobalVariable>(getRawInfo());
+ if (!GV)
+ return Result;
+
+ assert(GV->isConstant() && GV->hasDefinitiveInitializer());
+ Constant *Initializer = GV->getInitializer();
+ if ((Result.OutlinedParts = dyn_cast<ConstantStruct>(Initializer)))
+ return Result;
+
+ Result.Resumers = cast<ConstantArray>(Initializer);
+ return Result;
+ }
+ Constant *getRawInfo() const {
+ return cast<Constant>(getArgOperand(InfoArg)->stripPointerCasts());
+ }
+
+ void setInfo(Constant *C) { setArgOperand(InfoArg, C); }
+
+ Function *getCoroutine() const {
+ return cast<Function>(getArgOperand(CoroutineArg)->stripPointerCasts());
+ }
+ void setCoroutineSelf() {
+ assert(isa<ConstantPointerNull>(getArgOperand(CoroutineArg)) &&
+ "Coroutine argument is already assigned");
+ auto *const Int8PtrTy = Type::getInt8PtrTy(getContext());
+ setArgOperand(CoroutineArg,
+ ConstantExpr::getBitCast(getFunction(), Int8PtrTy));
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_id;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents either the llvm.coro.id.retcon or
+/// llvm.coro.id.retcon.once instruction.
+class LLVM_LIBRARY_VISIBILITY AnyCoroIdRetconInst : public AnyCoroIdInst {
+ enum { SizeArg, AlignArg, StorageArg, PrototypeArg, AllocArg, DeallocArg };
+
+public:
+ void checkWellFormed() const;
+
+ uint64_t getStorageSize() const {
+ return cast<ConstantInt>(getArgOperand(SizeArg))->getZExtValue();
+ }
+
+ Align getStorageAlignment() const {
+ return cast<ConstantInt>(getArgOperand(AlignArg))->getAlignValue();
+ }
+
+ Value *getStorage() const {
+ return getArgOperand(StorageArg);
+ }
+
+ /// Return the prototype for the continuation function. The type,
+ /// attributes, and calling convention of the continuation function(s)
+ /// are taken from this declaration.
+ Function *getPrototype() const {
+ return cast<Function>(getArgOperand(PrototypeArg)->stripPointerCasts());
+ }
+
+ /// Return the function to use for allocating memory.
+ Function *getAllocFunction() const {
+ return cast<Function>(getArgOperand(AllocArg)->stripPointerCasts());
+ }
+
+ /// Return the function to use for deallocating memory.
+ Function *getDeallocFunction() const {
+ return cast<Function>(getArgOperand(DeallocArg)->stripPointerCasts());
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ auto ID = I->getIntrinsicID();
+ return ID == Intrinsic::coro_id_retcon
+ || ID == Intrinsic::coro_id_retcon_once;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.id.retcon instruction.
+class LLVM_LIBRARY_VISIBILITY CoroIdRetconInst
+ : public AnyCoroIdRetconInst {
+public:
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_id_retcon;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.id.retcon.once instruction.
+class LLVM_LIBRARY_VISIBILITY CoroIdRetconOnceInst
+ : public AnyCoroIdRetconInst {
+public:
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_id_retcon_once;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.id.async instruction.
+class LLVM_LIBRARY_VISIBILITY CoroIdAsyncInst : public AnyCoroIdInst {
+ enum { SizeArg, AlignArg, StorageArg, AsyncFuncPtrArg };
+
+public:
+ void checkWellFormed() const;
+
+ /// The initial async function context size. The fields of which are reserved
+ /// for use by the frontend. The frame will be allocated as a tail of this
+ /// context.
+ uint64_t getStorageSize() const {
+ return cast<ConstantInt>(getArgOperand(SizeArg))->getZExtValue();
+ }
+
+ /// The alignment of the initial async function context.
+ Align getStorageAlignment() const {
+ return cast<ConstantInt>(getArgOperand(AlignArg))->getAlignValue();
+ }
+
+ /// The async context parameter.
+ Value *getStorage() const {
+ return getParent()->getParent()->getArg(getStorageArgumentIndex());
+ }
+
+ unsigned getStorageArgumentIndex() const {
+ auto *Arg = cast<ConstantInt>(getArgOperand(StorageArg));
+ return Arg->getZExtValue();
+ }
+
+ /// Return the async function pointer address. This should be the address of
+ /// a async function pointer struct for the current async function.
+ /// struct async_function_pointer {
+ /// uint32_t context_size;
+ /// uint32_t relative_async_function_pointer;
+ /// };
+ GlobalVariable *getAsyncFunctionPointer() const {
+ return cast<GlobalVariable>(
+ getArgOperand(AsyncFuncPtrArg)->stripPointerCasts());
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ auto ID = I->getIntrinsicID();
+ return ID == Intrinsic::coro_id_async;
+ }
+
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.context.alloc instruction.
+class LLVM_LIBRARY_VISIBILITY CoroAsyncContextAllocInst : public IntrinsicInst {
+ enum { AsyncFuncPtrArg };
+
+public:
+ GlobalVariable *getAsyncFunctionPointer() const {
+ return cast<GlobalVariable>(
+ getArgOperand(AsyncFuncPtrArg)->stripPointerCasts());
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_async_context_alloc;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.context.dealloc instruction.
+class LLVM_LIBRARY_VISIBILITY CoroAsyncContextDeallocInst
+ : public IntrinsicInst {
+ enum { AsyncContextArg };
+
+public:
+ Value *getAsyncContext() const {
+ return getArgOperand(AsyncContextArg)->stripPointerCasts();
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_async_context_dealloc;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.async.resume instruction.
+/// During lowering this is replaced by the resume function of a suspend point
+/// (the continuation function).
+class LLVM_LIBRARY_VISIBILITY CoroAsyncResumeInst : public IntrinsicInst {
+public:
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_async_resume;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.frame instruction.
+class LLVM_LIBRARY_VISIBILITY CoroFrameInst : public IntrinsicInst {
+public:
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_frame;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.free instruction.
+class LLVM_LIBRARY_VISIBILITY CoroFreeInst : public IntrinsicInst {
+ enum { IdArg, FrameArg };
+
+public:
+ Value *getFrame() const { return getArgOperand(FrameArg); }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_free;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This class represents the llvm.coro.begin instruction.
+class LLVM_LIBRARY_VISIBILITY CoroBeginInst : public IntrinsicInst {
+ enum { IdArg, MemArg };
+
+public:
+ AnyCoroIdInst *getId() const {
+ return cast<AnyCoroIdInst>(getArgOperand(IdArg));
+ }
+
+ Value *getMem() const { return getArgOperand(MemArg); }
+
+ // Methods for support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_begin;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.save instruction.
+class LLVM_LIBRARY_VISIBILITY CoroSaveInst : public IntrinsicInst {
+public:
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_save;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.promise instruction.
+class LLVM_LIBRARY_VISIBILITY CoroPromiseInst : public IntrinsicInst {
+ enum { FrameArg, AlignArg, FromArg };
+
+public:
+ /// Are we translating from the frame to the promise (false) or from
+ /// the promise to the frame (true)?
+ bool isFromPromise() const {
+ return cast<Constant>(getArgOperand(FromArg))->isOneValue();
+ }
+
+ /// The required alignment of the promise. This must match the
+ /// alignment of the promise alloca in the coroutine.
+ Align getAlignment() const {
+ return cast<ConstantInt>(getArgOperand(AlignArg))->getAlignValue();
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_promise;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY AnyCoroSuspendInst : public IntrinsicInst {
+public:
+ CoroSaveInst *getCoroSave() const;
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_suspend ||
+ I->getIntrinsicID() == Intrinsic::coro_suspend_async ||
+ I->getIntrinsicID() == Intrinsic::coro_suspend_retcon;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.suspend instruction.
+class LLVM_LIBRARY_VISIBILITY CoroSuspendInst : public AnyCoroSuspendInst {
+ enum { SaveArg, FinalArg };
+
+public:
+ CoroSaveInst *getCoroSave() const {
+ Value *Arg = getArgOperand(SaveArg);
+ if (auto *SI = dyn_cast<CoroSaveInst>(Arg))
+ return SI;
+ assert(isa<ConstantTokenNone>(Arg));
+ return nullptr;
+ }
+
+ bool isFinal() const {
+ return cast<Constant>(getArgOperand(FinalArg))->isOneValue();
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_suspend;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+inline CoroSaveInst *AnyCoroSuspendInst::getCoroSave() const {
+ if (auto Suspend = dyn_cast<CoroSuspendInst>(this))
+ return Suspend->getCoroSave();
+ return nullptr;
+}
+
+/// This represents the llvm.coro.suspend.async instruction.
+class LLVM_LIBRARY_VISIBILITY CoroSuspendAsyncInst : public AnyCoroSuspendInst {
+ enum { ResumeFunctionArg, AsyncContextProjectionArg, MustTailCallFuncArg };
+
+public:
+ void checkWellFormed() const;
+
+ Function *getAsyncContextProjectionFunction() const {
+ return cast<Function>(
+ getArgOperand(AsyncContextProjectionArg)->stripPointerCasts());
+ }
+
+ CoroAsyncResumeInst *getResumeFunction() const {
+ return cast<CoroAsyncResumeInst>(
+ getArgOperand(ResumeFunctionArg)->stripPointerCasts());
+ }
+
+ Function *getMustTailCallFunction() const {
+ return cast<Function>(
+ getArgOperand(MustTailCallFuncArg)->stripPointerCasts());
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_suspend_async;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.suspend.retcon instruction.
+class LLVM_LIBRARY_VISIBILITY CoroSuspendRetconInst : public AnyCoroSuspendInst {
+public:
+ op_iterator value_begin() { return arg_begin(); }
+ const_op_iterator value_begin() const { return arg_begin(); }
+
+ op_iterator value_end() { return arg_end(); }
+ const_op_iterator value_end() const { return arg_end(); }
+
+ iterator_range<op_iterator> value_operands() {
+ return make_range(value_begin(), value_end());
+ }
+ iterator_range<const_op_iterator> value_operands() const {
+ return make_range(value_begin(), value_end());
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_suspend_retcon;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.size instruction.
+class LLVM_LIBRARY_VISIBILITY CoroSizeInst : public IntrinsicInst {
+public:
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_size;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY AnyCoroEndInst : public IntrinsicInst {
+ enum { FrameArg, UnwindArg };
+
+public:
+ bool isFallthrough() const { return !isUnwind(); }
+ bool isUnwind() const {
+ return cast<Constant>(getArgOperand(UnwindArg))->isOneValue();
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ auto ID = I->getIntrinsicID();
+ return ID == Intrinsic::coro_end || ID == Intrinsic::coro_end_async;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.end instruction.
+class LLVM_LIBRARY_VISIBILITY CoroEndInst : public AnyCoroEndInst {
+public:
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_end;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.end instruction.
+class LLVM_LIBRARY_VISIBILITY CoroAsyncEndInst : public AnyCoroEndInst {
+ enum { FrameArg, UnwindArg, MustTailCallFuncArg };
+
+public:
+ void checkWellFormed() const;
+
+ Function *getMustTailCallFunction() const {
+ if (getNumArgOperands() < 3)
+ return nullptr;
+
+ return cast<Function>(
+ getArgOperand(MustTailCallFuncArg)->stripPointerCasts());
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_end_async;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.alloca.alloc instruction.
+class LLVM_LIBRARY_VISIBILITY CoroAllocaAllocInst : public IntrinsicInst {
+ enum { SizeArg, AlignArg };
+public:
+ Value *getSize() const {
+ return getArgOperand(SizeArg);
+ }
+ Align getAlignment() const {
+ return cast<ConstantInt>(getArgOperand(AlignArg))->getAlignValue();
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_alloca_alloc;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.alloca.get instruction.
+class LLVM_LIBRARY_VISIBILITY CoroAllocaGetInst : public IntrinsicInst {
+ enum { AllocArg };
+public:
+ CoroAllocaAllocInst *getAlloc() const {
+ return cast<CoroAllocaAllocInst>(getArgOperand(AllocArg));
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_alloca_get;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+/// This represents the llvm.coro.alloca.free instruction.
+class LLVM_LIBRARY_VISIBILITY CoroAllocaFreeInst : public IntrinsicInst {
+ enum { AllocArg };
+public:
+ CoroAllocaAllocInst *getAlloc() const {
+ return cast<CoroAllocaAllocInst>(getArgOperand(AllocArg));
+ }
+
+ // Methods to support type inquiry through isa, cast, and dyn_cast:
+ static bool classof(const IntrinsicInst *I) {
+ return I->getIntrinsicID() == Intrinsic::coro_alloca_free;
+ }
+ static bool classof(const Value *V) {
+ return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
+ }
+};
+
+} // End namespace llvm.
+
+#endif
diff --git a/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroInternal.h b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroInternal.h
new file mode 100644
index 00000000000..6c0e52f2454
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroInternal.h
@@ -0,0 +1,283 @@
+//===- CoroInternal.h - Internal Coroutine interfaces ---------*- C++ -*---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Common definitions/declarations used internally by coroutine lowering passes.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TRANSFORMS_COROUTINES_COROINTERNAL_H
+#define LLVM_LIB_TRANSFORMS_COROUTINES_COROINTERNAL_H
+
+#include "CoroInstr.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/Transforms/Coroutines.h"
+
+namespace llvm {
+
+class CallGraph;
+class CallGraphSCC;
+class PassRegistry;
+
+void initializeCoroEarlyLegacyPass(PassRegistry &);
+void initializeCoroSplitLegacyPass(PassRegistry &);
+void initializeCoroElideLegacyPass(PassRegistry &);
+void initializeCoroCleanupLegacyPass(PassRegistry &);
+
+// CoroEarly pass marks every function that has coro.begin with a string
+// attribute "coroutine.presplit"="0". CoroSplit pass processes the coroutine
+// twice. First, it lets it go through complete IPO optimization pipeline as a
+// single function. It forces restart of the pipeline by inserting an indirect
+// call to an empty function "coro.devirt.trigger" which is devirtualized by
+// CoroElide pass that triggers a restart of the pipeline by CGPassManager.
+// When CoroSplit pass sees the same coroutine the second time, it splits it up,
+// adds coroutine subfunctions to the SCC to be processed by IPO pipeline.
+// Async lowering similarily triggers a restart of the pipeline after it has
+// split the coroutine.
+#define CORO_PRESPLIT_ATTR "coroutine.presplit"
+#define UNPREPARED_FOR_SPLIT "0"
+#define PREPARED_FOR_SPLIT "1"
+#define ASYNC_RESTART_AFTER_SPLIT "2"
+
+#define CORO_DEVIRT_TRIGGER_FN "coro.devirt.trigger"
+
+namespace coro {
+
+bool declaresIntrinsics(const Module &M,
+ const std::initializer_list<StringRef>);
+void replaceCoroFree(CoroIdInst *CoroId, bool Elide);
+void updateCallGraph(Function &Caller, ArrayRef<Function *> Funcs,
+ CallGraph &CG, CallGraphSCC &SCC);
+/// Recover a dbg.declare prepared by the frontend and emit an alloca
+/// holding a pointer to the coroutine frame.
+void salvageDebugInfo(
+ SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> &DbgPtrAllocaCache,
+ DbgDeclareInst *DDI, bool LoadFromCoroFrame = false);
+
+// Keeps data and helper functions for lowering coroutine intrinsics.
+struct LowererBase {
+ Module &TheModule;
+ LLVMContext &Context;
+ PointerType *const Int8Ptr;
+ FunctionType *const ResumeFnType;
+ ConstantPointerNull *const NullPtr;
+
+ LowererBase(Module &M);
+ Value *makeSubFnCall(Value *Arg, int Index, Instruction *InsertPt);
+};
+
+enum class ABI {
+ /// The "resume-switch" lowering, where there are separate resume and
+ /// destroy functions that are shared between all suspend points. The
+ /// coroutine frame implicitly stores the resume and destroy functions,
+ /// the current index, and any promise value.
+ Switch,
+
+ /// The "returned-continuation" lowering, where each suspend point creates a
+ /// single continuation function that is used for both resuming and
+ /// destroying. Does not support promises.
+ Retcon,
+
+ /// The "unique returned-continuation" lowering, where each suspend point
+ /// creates a single continuation function that is used for both resuming
+ /// and destroying. Does not support promises. The function is known to
+ /// suspend at most once during its execution, and the return value of
+ /// the continuation is void.
+ RetconOnce,
+
+ /// The "async continuation" lowering, where each suspend point creates a
+ /// single continuation function. The continuation function is available as an
+ /// intrinsic.
+ Async,
+};
+
+// Holds structural Coroutine Intrinsics for a particular function and other
+// values used during CoroSplit pass.
+struct LLVM_LIBRARY_VISIBILITY Shape {
+ CoroBeginInst *CoroBegin;
+ SmallVector<AnyCoroEndInst *, 4> CoroEnds;
+ SmallVector<CoroSizeInst *, 2> CoroSizes;
+ SmallVector<AnyCoroSuspendInst *, 4> CoroSuspends;
+ SmallVector<CallInst*, 2> SwiftErrorOps;
+
+ // Field indexes for special fields in the switch lowering.
+ struct SwitchFieldIndex {
+ enum {
+ Resume,
+ Destroy
+
+ // The promise field is always at a fixed offset from the start of
+ // frame given its type, but the index isn't a constant for all
+ // possible frames.
+
+ // The switch-index field isn't at a fixed offset or index, either;
+ // we just work it in where it fits best.
+ };
+ };
+
+ coro::ABI ABI;
+
+ StructType *FrameTy;
+ Align FrameAlign;
+ uint64_t FrameSize;
+ Instruction *FramePtr;
+ BasicBlock *AllocaSpillBlock;
+
+ bool ReuseFrameSlot;
+
+ struct SwitchLoweringStorage {
+ SwitchInst *ResumeSwitch;
+ AllocaInst *PromiseAlloca;
+ BasicBlock *ResumeEntryBlock;
+ unsigned IndexField;
+ bool HasFinalSuspend;
+ };
+
+ struct RetconLoweringStorage {
+ Function *ResumePrototype;
+ Function *Alloc;
+ Function *Dealloc;
+ BasicBlock *ReturnBlock;
+ bool IsFrameInlineInStorage;
+ };
+
+ struct AsyncLoweringStorage {
+ FunctionType *AsyncFuncTy;
+ Value *Context;
+ unsigned ContextArgNo;
+ uint64_t ContextHeaderSize;
+ uint64_t ContextAlignment;
+ uint64_t FrameOffset; // Start of the frame.
+ uint64_t ContextSize; // Includes frame size.
+ GlobalVariable *AsyncFuncPointer;
+
+ Align getContextAlignment() const { return Align(ContextAlignment); }
+ };
+
+ union {
+ SwitchLoweringStorage SwitchLowering;
+ RetconLoweringStorage RetconLowering;
+ AsyncLoweringStorage AsyncLowering;
+ };
+
+ CoroIdInst *getSwitchCoroId() const {
+ assert(ABI == coro::ABI::Switch);
+ return cast<CoroIdInst>(CoroBegin->getId());
+ }
+
+ AnyCoroIdRetconInst *getRetconCoroId() const {
+ assert(ABI == coro::ABI::Retcon ||
+ ABI == coro::ABI::RetconOnce);
+ return cast<AnyCoroIdRetconInst>(CoroBegin->getId());
+ }
+
+ CoroIdAsyncInst *getAsyncCoroId() const {
+ assert(ABI == coro::ABI::Async);
+ return cast<CoroIdAsyncInst>(CoroBegin->getId());
+ }
+
+ unsigned getSwitchIndexField() const {
+ assert(ABI == coro::ABI::Switch);
+ assert(FrameTy && "frame type not assigned");
+ return SwitchLowering.IndexField;
+ }
+ IntegerType *getIndexType() const {
+ assert(ABI == coro::ABI::Switch);
+ assert(FrameTy && "frame type not assigned");
+ return cast<IntegerType>(FrameTy->getElementType(getSwitchIndexField()));
+ }
+ ConstantInt *getIndex(uint64_t Value) const {
+ return ConstantInt::get(getIndexType(), Value);
+ }
+
+ PointerType *getSwitchResumePointerType() const {
+ assert(ABI == coro::ABI::Switch);
+ assert(FrameTy && "frame type not assigned");
+ return cast<PointerType>(FrameTy->getElementType(SwitchFieldIndex::Resume));
+ }
+
+ FunctionType *getResumeFunctionType() const {
+ switch (ABI) {
+ case coro::ABI::Switch: {
+ auto *FnPtrTy = getSwitchResumePointerType();
+ return cast<FunctionType>(FnPtrTy->getPointerElementType());
+ }
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce:
+ return RetconLowering.ResumePrototype->getFunctionType();
+ case coro::ABI::Async:
+ return AsyncLowering.AsyncFuncTy;
+ }
+
+ llvm_unreachable("Unknown coro::ABI enum");
+ }
+
+ ArrayRef<Type*> getRetconResultTypes() const {
+ assert(ABI == coro::ABI::Retcon ||
+ ABI == coro::ABI::RetconOnce);
+ auto FTy = CoroBegin->getFunction()->getFunctionType();
+
+ // The safety of all this is checked by checkWFRetconPrototype.
+ if (auto STy = dyn_cast<StructType>(FTy->getReturnType())) {
+ return STy->elements().slice(1);
+ } else {
+ return ArrayRef<Type*>();
+ }
+ }
+
+ ArrayRef<Type*> getRetconResumeTypes() const {
+ assert(ABI == coro::ABI::Retcon ||
+ ABI == coro::ABI::RetconOnce);
+
+ // The safety of all this is checked by checkWFRetconPrototype.
+ auto FTy = RetconLowering.ResumePrototype->getFunctionType();
+ return FTy->params().slice(1);
+ }
+
+ CallingConv::ID getResumeFunctionCC() const {
+ switch (ABI) {
+ case coro::ABI::Switch:
+ return CallingConv::Fast;
+
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce:
+ return RetconLowering.ResumePrototype->getCallingConv();
+ case coro::ABI::Async:
+ return CallingConv::Swift;
+ }
+ llvm_unreachable("Unknown coro::ABI enum");
+ }
+
+ AllocaInst *getPromiseAlloca() const {
+ if (ABI == coro::ABI::Switch)
+ return SwitchLowering.PromiseAlloca;
+ return nullptr;
+ }
+
+ /// Allocate memory according to the rules of the active lowering.
+ ///
+ /// \param CG - if non-null, will be updated for the new call
+ Value *emitAlloc(IRBuilder<> &Builder, Value *Size, CallGraph *CG) const;
+
+ /// Deallocate memory according to the rules of the active lowering.
+ ///
+ /// \param CG - if non-null, will be updated for the new call
+ void emitDealloc(IRBuilder<> &Builder, Value *Ptr, CallGraph *CG) const;
+
+ Shape() = default;
+ explicit Shape(Function &F, bool ReuseFrameSlot = false)
+ : ReuseFrameSlot(ReuseFrameSlot) {
+ buildFrom(F);
+ }
+ void buildFrom(Function &F);
+};
+
+void buildCoroutineFrame(Function &F, Shape &Shape);
+CallInst *createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
+ ArrayRef<Value *> Arguments, IRBuilder<> &);
+} // End namespace coro.
+} // End namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroSplit.cpp b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroSplit.cpp
new file mode 100644
index 00000000000..c4d7db9153e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Transforms/Coroutines/CoroSplit.cpp
@@ -0,0 +1,2203 @@
+//===- CoroSplit.cpp - Converts a coroutine into a state machine ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This pass builds the coroutine frame and outlines resume and destroy parts
+// of the coroutine into separate functions.
+//
+// We present a coroutine to an LLVM as an ordinary function with suspension
+// points marked up with intrinsics. We let the optimizer party on the coroutine
+// as a single function for as long as possible. Shortly before the coroutine is
+// eligible to be inlined into its callers, we split up the coroutine into parts
+// corresponding to an initial, resume and destroy invocations of the coroutine,
+// add them to the current SCC and restart the IPO pipeline to optimize the
+// coroutine subfunctions we extracted before proceeding to the caller of the
+// coroutine.
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Coroutines/CoroSplit.h"
+#include "CoroInstr.h"
+#include "CoroInternal.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/Analysis/LazyCallGraph.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CFG.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/BasicBlockUtils.h"
+#include "llvm/Transforms/Utils/CallGraphUpdater.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <initializer_list>
+#include <iterator>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "coro-split"
+
+namespace {
+
+/// A little helper class for building
+class CoroCloner {
+public:
+ enum class Kind {
+ /// The shared resume function for a switch lowering.
+ SwitchResume,
+
+ /// The shared unwind function for a switch lowering.
+ SwitchUnwind,
+
+ /// The shared cleanup function for a switch lowering.
+ SwitchCleanup,
+
+ /// An individual continuation function.
+ Continuation,
+
+ /// An async resume function.
+ Async,
+ };
+
+private:
+ Function &OrigF;
+ Function *NewF;
+ const Twine &Suffix;
+ coro::Shape &Shape;
+ Kind FKind;
+ ValueToValueMapTy VMap;
+ IRBuilder<> Builder;
+ Value *NewFramePtr = nullptr;
+
+ /// The active suspend instruction; meaningful only for continuation and async
+ /// ABIs.
+ AnyCoroSuspendInst *ActiveSuspend = nullptr;
+
+public:
+ /// Create a cloner for a switch lowering.
+ CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
+ Kind FKind)
+ : OrigF(OrigF), NewF(nullptr), Suffix(Suffix), Shape(Shape),
+ FKind(FKind), Builder(OrigF.getContext()) {
+ assert(Shape.ABI == coro::ABI::Switch);
+ }
+
+ /// Create a cloner for a continuation lowering.
+ CoroCloner(Function &OrigF, const Twine &Suffix, coro::Shape &Shape,
+ Function *NewF, AnyCoroSuspendInst *ActiveSuspend)
+ : OrigF(OrigF), NewF(NewF), Suffix(Suffix), Shape(Shape),
+ FKind(Shape.ABI == coro::ABI::Async ? Kind::Async : Kind::Continuation),
+ Builder(OrigF.getContext()), ActiveSuspend(ActiveSuspend) {
+ assert(Shape.ABI == coro::ABI::Retcon ||
+ Shape.ABI == coro::ABI::RetconOnce || Shape.ABI == coro::ABI::Async);
+ assert(NewF && "need existing function for continuation");
+ assert(ActiveSuspend && "need active suspend point for continuation");
+ }
+
+ Function *getFunction() const {
+ assert(NewF != nullptr && "declaration not yet set");
+ return NewF;
+ }
+
+ void create();
+
+private:
+ bool isSwitchDestroyFunction() {
+ switch (FKind) {
+ case Kind::Async:
+ case Kind::Continuation:
+ case Kind::SwitchResume:
+ return false;
+ case Kind::SwitchUnwind:
+ case Kind::SwitchCleanup:
+ return true;
+ }
+ llvm_unreachable("Unknown CoroCloner::Kind enum");
+ }
+
+ void replaceEntryBlock();
+ Value *deriveNewFramePointer();
+ void replaceRetconOrAsyncSuspendUses();
+ void replaceCoroSuspends();
+ void replaceCoroEnds();
+ void replaceSwiftErrorOps();
+ void salvageDebugInfo();
+ void handleFinalSuspend();
+};
+
+} // end anonymous namespace
+
+static void maybeFreeRetconStorage(IRBuilder<> &Builder,
+ const coro::Shape &Shape, Value *FramePtr,
+ CallGraph *CG) {
+ assert(Shape.ABI == coro::ABI::Retcon ||
+ Shape.ABI == coro::ABI::RetconOnce);
+ if (Shape.RetconLowering.IsFrameInlineInStorage)
+ return;
+
+ Shape.emitDealloc(Builder, FramePtr, CG);
+}
+
+/// Replace an llvm.coro.end.async.
+/// Will inline the must tail call function call if there is one.
+/// \returns true if cleanup of the coro.end block is needed, false otherwise.
+static bool replaceCoroEndAsync(AnyCoroEndInst *End) {
+ IRBuilder<> Builder(End);
+
+ auto *EndAsync = dyn_cast<CoroAsyncEndInst>(End);
+ if (!EndAsync) {
+ Builder.CreateRetVoid();
+ return true /*needs cleanup of coro.end block*/;
+ }
+
+ auto *MustTailCallFunc = EndAsync->getMustTailCallFunction();
+ if (!MustTailCallFunc) {
+ Builder.CreateRetVoid();
+ return true /*needs cleanup of coro.end block*/;
+ }
+
+ // Move the must tail call from the predecessor block into the end block.
+ auto *CoroEndBlock = End->getParent();
+ auto *MustTailCallFuncBlock = CoroEndBlock->getSinglePredecessor();
+ assert(MustTailCallFuncBlock && "Must have a single predecessor block");
+ auto It = MustTailCallFuncBlock->getTerminator()->getIterator();
+ auto *MustTailCall = cast<CallInst>(&*std::prev(It));
+ CoroEndBlock->getInstList().splice(
+ End->getIterator(), MustTailCallFuncBlock->getInstList(), MustTailCall);
+
+ // Insert the return instruction.
+ Builder.SetInsertPoint(End);
+ Builder.CreateRetVoid();
+ InlineFunctionInfo FnInfo;
+
+ // Remove the rest of the block, by splitting it into an unreachable block.
+ auto *BB = End->getParent();
+ BB->splitBasicBlock(End);
+ BB->getTerminator()->eraseFromParent();
+
+ auto InlineRes = InlineFunction(*MustTailCall, FnInfo);
+ assert(InlineRes.isSuccess() && "Expected inlining to succeed");
+ (void)InlineRes;
+
+ // We have cleaned up the coro.end block above.
+ return false;
+}
+
+/// Replace a non-unwind call to llvm.coro.end.
+static void replaceFallthroughCoroEnd(AnyCoroEndInst *End,
+ const coro::Shape &Shape, Value *FramePtr,
+ bool InResume, CallGraph *CG) {
+ // Start inserting right before the coro.end.
+ IRBuilder<> Builder(End);
+
+ // Create the return instruction.
+ switch (Shape.ABI) {
+ // The cloned functions in switch-lowering always return void.
+ case coro::ABI::Switch:
+ // coro.end doesn't immediately end the coroutine in the main function
+ // in this lowering, because we need to deallocate the coroutine.
+ if (!InResume)
+ return;
+ Builder.CreateRetVoid();
+ break;
+
+ // In async lowering this returns.
+ case coro::ABI::Async: {
+ bool CoroEndBlockNeedsCleanup = replaceCoroEndAsync(End);
+ if (!CoroEndBlockNeedsCleanup)
+ return;
+ break;
+ }
+
+ // In unique continuation lowering, the continuations always return void.
+ // But we may have implicitly allocated storage.
+ case coro::ABI::RetconOnce:
+ maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
+ Builder.CreateRetVoid();
+ break;
+
+ // In non-unique continuation lowering, we signal completion by returning
+ // a null continuation.
+ case coro::ABI::Retcon: {
+ maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
+ auto RetTy = Shape.getResumeFunctionType()->getReturnType();
+ auto RetStructTy = dyn_cast<StructType>(RetTy);
+ PointerType *ContinuationTy =
+ cast<PointerType>(RetStructTy ? RetStructTy->getElementType(0) : RetTy);
+
+ Value *ReturnValue = ConstantPointerNull::get(ContinuationTy);
+ if (RetStructTy) {
+ ReturnValue = Builder.CreateInsertValue(UndefValue::get(RetStructTy),
+ ReturnValue, 0);
+ }
+ Builder.CreateRet(ReturnValue);
+ break;
+ }
+ }
+
+ // Remove the rest of the block, by splitting it into an unreachable block.
+ auto *BB = End->getParent();
+ BB->splitBasicBlock(End);
+ BB->getTerminator()->eraseFromParent();
+}
+
+/// Replace an unwind call to llvm.coro.end.
+static void replaceUnwindCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
+ Value *FramePtr, bool InResume,
+ CallGraph *CG) {
+ IRBuilder<> Builder(End);
+
+ switch (Shape.ABI) {
+ // In switch-lowering, this does nothing in the main function.
+ case coro::ABI::Switch:
+ if (!InResume)
+ return;
+ break;
+ // In async lowering this does nothing.
+ case coro::ABI::Async:
+ break;
+ // In continuation-lowering, this frees the continuation storage.
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce:
+ maybeFreeRetconStorage(Builder, Shape, FramePtr, CG);
+ break;
+ }
+
+ // If coro.end has an associated bundle, add cleanupret instruction.
+ if (auto Bundle = End->getOperandBundle(LLVMContext::OB_funclet)) {
+ auto *FromPad = cast<CleanupPadInst>(Bundle->Inputs[0]);
+ auto *CleanupRet = Builder.CreateCleanupRet(FromPad, nullptr);
+ End->getParent()->splitBasicBlock(End);
+ CleanupRet->getParent()->getTerminator()->eraseFromParent();
+ }
+}
+
+static void replaceCoroEnd(AnyCoroEndInst *End, const coro::Shape &Shape,
+ Value *FramePtr, bool InResume, CallGraph *CG) {
+ if (End->isUnwind())
+ replaceUnwindCoroEnd(End, Shape, FramePtr, InResume, CG);
+ else
+ replaceFallthroughCoroEnd(End, Shape, FramePtr, InResume, CG);
+
+ auto &Context = End->getContext();
+ End->replaceAllUsesWith(InResume ? ConstantInt::getTrue(Context)
+ : ConstantInt::getFalse(Context));
+ End->eraseFromParent();
+}
+
+// Create an entry block for a resume function with a switch that will jump to
+// suspend points.
+static void createResumeEntryBlock(Function &F, coro::Shape &Shape) {
+ assert(Shape.ABI == coro::ABI::Switch);
+ LLVMContext &C = F.getContext();
+
+ // resume.entry:
+ // %index.addr = getelementptr inbounds %f.Frame, %f.Frame* %FramePtr, i32 0,
+ // i32 2
+ // % index = load i32, i32* %index.addr
+ // switch i32 %index, label %unreachable [
+ // i32 0, label %resume.0
+ // i32 1, label %resume.1
+ // ...
+ // ]
+
+ auto *NewEntry = BasicBlock::Create(C, "resume.entry", &F);
+ auto *UnreachBB = BasicBlock::Create(C, "unreachable", &F);
+
+ IRBuilder<> Builder(NewEntry);
+ auto *FramePtr = Shape.FramePtr;
+ auto *FrameTy = Shape.FrameTy;
+ auto *GepIndex = Builder.CreateStructGEP(
+ FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
+ auto *Index = Builder.CreateLoad(Shape.getIndexType(), GepIndex, "index");
+ auto *Switch =
+ Builder.CreateSwitch(Index, UnreachBB, Shape.CoroSuspends.size());
+ Shape.SwitchLowering.ResumeSwitch = Switch;
+
+ size_t SuspendIndex = 0;
+ for (auto *AnyS : Shape.CoroSuspends) {
+ auto *S = cast<CoroSuspendInst>(AnyS);
+ ConstantInt *IndexVal = Shape.getIndex(SuspendIndex);
+
+ // Replace CoroSave with a store to Index:
+ // %index.addr = getelementptr %f.frame... (index field number)
+ // store i32 0, i32* %index.addr1
+ auto *Save = S->getCoroSave();
+ Builder.SetInsertPoint(Save);
+ if (S->isFinal()) {
+ // Final suspend point is represented by storing zero in ResumeFnAddr.
+ auto *GepIndex = Builder.CreateStructGEP(FrameTy, FramePtr,
+ coro::Shape::SwitchFieldIndex::Resume,
+ "ResumeFn.addr");
+ auto *NullPtr = ConstantPointerNull::get(cast<PointerType>(
+ cast<PointerType>(GepIndex->getType())->getElementType()));
+ Builder.CreateStore(NullPtr, GepIndex);
+ } else {
+ auto *GepIndex = Builder.CreateStructGEP(
+ FrameTy, FramePtr, Shape.getSwitchIndexField(), "index.addr");
+ Builder.CreateStore(IndexVal, GepIndex);
+ }
+ Save->replaceAllUsesWith(ConstantTokenNone::get(C));
+ Save->eraseFromParent();
+
+ // Split block before and after coro.suspend and add a jump from an entry
+ // switch:
+ //
+ // whateverBB:
+ // whatever
+ // %0 = call i8 @llvm.coro.suspend(token none, i1 false)
+ // switch i8 %0, label %suspend[i8 0, label %resume
+ // i8 1, label %cleanup]
+ // becomes:
+ //
+ // whateverBB:
+ // whatever
+ // br label %resume.0.landing
+ //
+ // resume.0: ; <--- jump from the switch in the resume.entry
+ // %0 = tail call i8 @llvm.coro.suspend(token none, i1 false)
+ // br label %resume.0.landing
+ //
+ // resume.0.landing:
+ // %1 = phi i8[-1, %whateverBB], [%0, %resume.0]
+ // switch i8 % 1, label %suspend [i8 0, label %resume
+ // i8 1, label %cleanup]
+
+ auto *SuspendBB = S->getParent();
+ auto *ResumeBB =
+ SuspendBB->splitBasicBlock(S, "resume." + Twine(SuspendIndex));
+ auto *LandingBB = ResumeBB->splitBasicBlock(
+ S->getNextNode(), ResumeBB->getName() + Twine(".landing"));
+ Switch->addCase(IndexVal, ResumeBB);
+
+ cast<BranchInst>(SuspendBB->getTerminator())->setSuccessor(0, LandingBB);
+ auto *PN = PHINode::Create(Builder.getInt8Ty(), 2, "", &LandingBB->front());
+ S->replaceAllUsesWith(PN);
+ PN->addIncoming(Builder.getInt8(-1), SuspendBB);
+ PN->addIncoming(S, ResumeBB);
+
+ ++SuspendIndex;
+ }
+
+ Builder.SetInsertPoint(UnreachBB);
+ Builder.CreateUnreachable();
+
+ Shape.SwitchLowering.ResumeEntryBlock = NewEntry;
+}
+
+
+// Rewrite final suspend point handling. We do not use suspend index to
+// represent the final suspend point. Instead we zero-out ResumeFnAddr in the
+// coroutine frame, since it is undefined behavior to resume a coroutine
+// suspended at the final suspend point. Thus, in the resume function, we can
+// simply remove the last case (when coro::Shape is built, the final suspend
+// point (if present) is always the last element of CoroSuspends array).
+// In the destroy function, we add a code sequence to check if ResumeFnAddress
+// is Null, and if so, jump to the appropriate label to handle cleanup from the
+// final suspend point.
+void CoroCloner::handleFinalSuspend() {
+ assert(Shape.ABI == coro::ABI::Switch &&
+ Shape.SwitchLowering.HasFinalSuspend);
+ auto *Switch = cast<SwitchInst>(VMap[Shape.SwitchLowering.ResumeSwitch]);
+ auto FinalCaseIt = std::prev(Switch->case_end());
+ BasicBlock *ResumeBB = FinalCaseIt->getCaseSuccessor();
+ Switch->removeCase(FinalCaseIt);
+ if (isSwitchDestroyFunction()) {
+ BasicBlock *OldSwitchBB = Switch->getParent();
+ auto *NewSwitchBB = OldSwitchBB->splitBasicBlock(Switch, "Switch");
+ Builder.SetInsertPoint(OldSwitchBB->getTerminator());
+ auto *GepIndex = Builder.CreateStructGEP(Shape.FrameTy, NewFramePtr,
+ coro::Shape::SwitchFieldIndex::Resume,
+ "ResumeFn.addr");
+ auto *Load = Builder.CreateLoad(Shape.getSwitchResumePointerType(),
+ GepIndex);
+ auto *Cond = Builder.CreateIsNull(Load);
+ Builder.CreateCondBr(Cond, ResumeBB, NewSwitchBB);
+ OldSwitchBB->getTerminator()->eraseFromParent();
+ }
+}
+
+static Function *createCloneDeclaration(Function &OrigF, coro::Shape &Shape,
+ const Twine &Suffix,
+ Module::iterator InsertBefore) {
+ Module *M = OrigF.getParent();
+ auto *FnTy = Shape.getResumeFunctionType();
+
+ Function *NewF =
+ Function::Create(FnTy, GlobalValue::LinkageTypes::InternalLinkage,
+ OrigF.getName() + Suffix);
+ NewF->addParamAttr(0, Attribute::NonNull);
+
+ // For the async lowering ABI we can't guarantee that the context argument is
+ // not access via a different pointer not based on the argument.
+ if (Shape.ABI != coro::ABI::Async)
+ NewF->addParamAttr(0, Attribute::NoAlias);
+
+ M->getFunctionList().insert(InsertBefore, NewF);
+
+ return NewF;
+}
+
+/// Replace uses of the active llvm.coro.suspend.retcon/async call with the
+/// arguments to the continuation function.
+///
+/// This assumes that the builder has a meaningful insertion point.
+void CoroCloner::replaceRetconOrAsyncSuspendUses() {
+ assert(Shape.ABI == coro::ABI::Retcon || Shape.ABI == coro::ABI::RetconOnce ||
+ Shape.ABI == coro::ABI::Async);
+
+ auto NewS = VMap[ActiveSuspend];
+ if (NewS->use_empty()) return;
+
+ // Copy out all the continuation arguments after the buffer pointer into
+ // an easily-indexed data structure for convenience.
+ SmallVector<Value*, 8> Args;
+ // The async ABI includes all arguments -- including the first argument.
+ bool IsAsyncABI = Shape.ABI == coro::ABI::Async;
+ for (auto I = IsAsyncABI ? NewF->arg_begin() : std::next(NewF->arg_begin()),
+ E = NewF->arg_end();
+ I != E; ++I)
+ Args.push_back(&*I);
+
+ // If the suspend returns a single scalar value, we can just do a simple
+ // replacement.
+ if (!isa<StructType>(NewS->getType())) {
+ assert(Args.size() == 1);
+ NewS->replaceAllUsesWith(Args.front());
+ return;
+ }
+
+ // Try to peephole extracts of an aggregate return.
+ for (auto UI = NewS->use_begin(), UE = NewS->use_end(); UI != UE; ) {
+ auto EVI = dyn_cast<ExtractValueInst>((UI++)->getUser());
+ if (!EVI || EVI->getNumIndices() != 1)
+ continue;
+
+ EVI->replaceAllUsesWith(Args[EVI->getIndices().front()]);
+ EVI->eraseFromParent();
+ }
+
+ // If we have no remaining uses, we're done.
+ if (NewS->use_empty()) return;
+
+ // Otherwise, we need to create an aggregate.
+ Value *Agg = UndefValue::get(NewS->getType());
+ for (size_t I = 0, E = Args.size(); I != E; ++I)
+ Agg = Builder.CreateInsertValue(Agg, Args[I], I);
+
+ NewS->replaceAllUsesWith(Agg);
+}
+
+void CoroCloner::replaceCoroSuspends() {
+ Value *SuspendResult;
+
+ switch (Shape.ABI) {
+ // In switch lowering, replace coro.suspend with the appropriate value
+ // for the type of function we're extracting.
+ // Replacing coro.suspend with (0) will result in control flow proceeding to
+ // a resume label associated with a suspend point, replacing it with (1) will
+ // result in control flow proceeding to a cleanup label associated with this
+ // suspend point.
+ case coro::ABI::Switch:
+ SuspendResult = Builder.getInt8(isSwitchDestroyFunction() ? 1 : 0);
+ break;
+
+ // In async lowering there are no uses of the result.
+ case coro::ABI::Async:
+ return;
+
+ // In returned-continuation lowering, the arguments from earlier
+ // continuations are theoretically arbitrary, and they should have been
+ // spilled.
+ case coro::ABI::RetconOnce:
+ case coro::ABI::Retcon:
+ return;
+ }
+
+ for (AnyCoroSuspendInst *CS : Shape.CoroSuspends) {
+ // The active suspend was handled earlier.
+ if (CS == ActiveSuspend) continue;
+
+ auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[CS]);
+ MappedCS->replaceAllUsesWith(SuspendResult);
+ MappedCS->eraseFromParent();
+ }
+}
+
+void CoroCloner::replaceCoroEnds() {
+ for (AnyCoroEndInst *CE : Shape.CoroEnds) {
+ // We use a null call graph because there's no call graph node for
+ // the cloned function yet. We'll just be rebuilding that later.
+ auto *NewCE = cast<AnyCoroEndInst>(VMap[CE]);
+ replaceCoroEnd(NewCE, Shape, NewFramePtr, /*in resume*/ true, nullptr);
+ }
+}
+
+static void replaceSwiftErrorOps(Function &F, coro::Shape &Shape,
+ ValueToValueMapTy *VMap) {
+ Value *CachedSlot = nullptr;
+ auto getSwiftErrorSlot = [&](Type *ValueTy) -> Value * {
+ if (CachedSlot) {
+ assert(CachedSlot->getType()->getPointerElementType() == ValueTy &&
+ "multiple swifterror slots in function with different types");
+ return CachedSlot;
+ }
+
+ // Check if the function has a swifterror argument.
+ for (auto &Arg : F.args()) {
+ if (Arg.isSwiftError()) {
+ CachedSlot = &Arg;
+ assert(Arg.getType()->getPointerElementType() == ValueTy &&
+ "swifterror argument does not have expected type");
+ return &Arg;
+ }
+ }
+
+ // Create a swifterror alloca.
+ IRBuilder<> Builder(F.getEntryBlock().getFirstNonPHIOrDbg());
+ auto Alloca = Builder.CreateAlloca(ValueTy);
+ Alloca->setSwiftError(true);
+
+ CachedSlot = Alloca;
+ return Alloca;
+ };
+
+ for (CallInst *Op : Shape.SwiftErrorOps) {
+ auto MappedOp = VMap ? cast<CallInst>((*VMap)[Op]) : Op;
+ IRBuilder<> Builder(MappedOp);
+
+ // If there are no arguments, this is a 'get' operation.
+ Value *MappedResult;
+ if (Op->getNumArgOperands() == 0) {
+ auto ValueTy = Op->getType();
+ auto Slot = getSwiftErrorSlot(ValueTy);
+ MappedResult = Builder.CreateLoad(ValueTy, Slot);
+ } else {
+ assert(Op->getNumArgOperands() == 1);
+ auto Value = MappedOp->getArgOperand(0);
+ auto ValueTy = Value->getType();
+ auto Slot = getSwiftErrorSlot(ValueTy);
+ Builder.CreateStore(Value, Slot);
+ MappedResult = Slot;
+ }
+
+ MappedOp->replaceAllUsesWith(MappedResult);
+ MappedOp->eraseFromParent();
+ }
+
+ // If we're updating the original function, we've invalidated SwiftErrorOps.
+ if (VMap == nullptr) {
+ Shape.SwiftErrorOps.clear();
+ }
+}
+
+void CoroCloner::replaceSwiftErrorOps() {
+ ::replaceSwiftErrorOps(*NewF, Shape, &VMap);
+}
+
+void CoroCloner::salvageDebugInfo() {
+ SmallVector<DbgDeclareInst *, 8> Worklist;
+ SmallDenseMap<llvm::Value *, llvm::AllocaInst *, 4> DbgPtrAllocaCache;
+ for (auto &BB : *NewF)
+ for (auto &I : BB)
+ if (auto *DDI = dyn_cast<DbgDeclareInst>(&I))
+ Worklist.push_back(DDI);
+ for (DbgDeclareInst *DDI : Worklist) {
+ // This is a heuristic that detects declares left by CoroFrame.
+ bool LoadFromFramePtr = !isa<AllocaInst>(DDI->getAddress());
+ coro::salvageDebugInfo(DbgPtrAllocaCache, DDI, LoadFromFramePtr);
+ }
+ // Remove all salvaged dbg.declare intrinsics that became
+ // either unreachable or stale due to the CoroSplit transformation.
+ auto IsUnreachableBlock = [&](BasicBlock *BB) {
+ return BB->hasNPredecessors(0) && BB != &NewF->getEntryBlock();
+ };
+ for (DbgDeclareInst *DDI : Worklist) {
+ if (IsUnreachableBlock(DDI->getParent()))
+ DDI->eraseFromParent();
+ else if (auto *Alloca = dyn_cast_or_null<AllocaInst>(DDI->getAddress())) {
+ // Count all non-debuginfo uses in reachable blocks.
+ unsigned Uses = 0;
+ for (auto *User : DDI->getAddress()->users())
+ if (auto *I = dyn_cast<Instruction>(User))
+ if (!isa<AllocaInst>(I) && !IsUnreachableBlock(I->getParent()))
+ ++Uses;
+ if (!Uses)
+ DDI->eraseFromParent();
+ }
+ }
+}
+
+void CoroCloner::replaceEntryBlock() {
+ // In the original function, the AllocaSpillBlock is a block immediately
+ // following the allocation of the frame object which defines GEPs for
+ // all the allocas that have been moved into the frame, and it ends by
+ // branching to the original beginning of the coroutine. Make this
+ // the entry block of the cloned function.
+ auto *Entry = cast<BasicBlock>(VMap[Shape.AllocaSpillBlock]);
+ auto *OldEntry = &NewF->getEntryBlock();
+ Entry->setName("entry" + Suffix);
+ Entry->moveBefore(OldEntry);
+ Entry->getTerminator()->eraseFromParent();
+
+ // Clear all predecessors of the new entry block. There should be
+ // exactly one predecessor, which we created when splitting out
+ // AllocaSpillBlock to begin with.
+ assert(Entry->hasOneUse());
+ auto BranchToEntry = cast<BranchInst>(Entry->user_back());
+ assert(BranchToEntry->isUnconditional());
+ Builder.SetInsertPoint(BranchToEntry);
+ Builder.CreateUnreachable();
+ BranchToEntry->eraseFromParent();
+
+ // Branch from the entry to the appropriate place.
+ Builder.SetInsertPoint(Entry);
+ switch (Shape.ABI) {
+ case coro::ABI::Switch: {
+ // In switch-lowering, we built a resume-entry block in the original
+ // function. Make the entry block branch to this.
+ auto *SwitchBB =
+ cast<BasicBlock>(VMap[Shape.SwitchLowering.ResumeEntryBlock]);
+ Builder.CreateBr(SwitchBB);
+ break;
+ }
+ case coro::ABI::Async:
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce: {
+ // In continuation ABIs, we want to branch to immediately after the
+ // active suspend point. Earlier phases will have put the suspend in its
+ // own basic block, so just thread our jump directly to its successor.
+ assert((Shape.ABI == coro::ABI::Async &&
+ isa<CoroSuspendAsyncInst>(ActiveSuspend)) ||
+ ((Shape.ABI == coro::ABI::Retcon ||
+ Shape.ABI == coro::ABI::RetconOnce) &&
+ isa<CoroSuspendRetconInst>(ActiveSuspend)));
+ auto *MappedCS = cast<AnyCoroSuspendInst>(VMap[ActiveSuspend]);
+ auto Branch = cast<BranchInst>(MappedCS->getNextNode());
+ assert(Branch->isUnconditional());
+ Builder.CreateBr(Branch->getSuccessor(0));
+ break;
+ }
+ }
+
+ // Any alloca that's still being used but not reachable from the new entry
+ // needs to be moved to the new entry.
+ Function *F = OldEntry->getParent();
+ DominatorTree DT{*F};
+ for (auto IT = inst_begin(F), End = inst_end(F); IT != End;) {
+ Instruction &I = *IT++;
+ if (!isa<AllocaInst>(&I) || I.use_empty())
+ continue;
+ if (DT.isReachableFromEntry(I.getParent()))
+ continue;
+ I.moveBefore(*Entry, Entry->getFirstInsertionPt());
+ }
+}
+
+/// Derive the value of the new frame pointer.
+Value *CoroCloner::deriveNewFramePointer() {
+ // Builder should be inserting to the front of the new entry block.
+
+ switch (Shape.ABI) {
+ // In switch-lowering, the argument is the frame pointer.
+ case coro::ABI::Switch:
+ return &*NewF->arg_begin();
+ // In async-lowering, one of the arguments is an async context as determined
+ // by the `llvm.coro.id.async` intrinsic. We can retrieve the async context of
+ // the resume function from the async context projection function associated
+ // with the active suspend. The frame is located as a tail to the async
+ // context header.
+ case coro::ABI::Async: {
+ auto *CalleeContext = NewF->getArg(Shape.AsyncLowering.ContextArgNo);
+ auto *FramePtrTy = Shape.FrameTy->getPointerTo();
+ auto *ProjectionFunc = cast<CoroSuspendAsyncInst>(ActiveSuspend)
+ ->getAsyncContextProjectionFunction();
+ auto DbgLoc =
+ cast<CoroSuspendAsyncInst>(VMap[ActiveSuspend])->getDebugLoc();
+ // Calling i8* (i8*)
+ auto *CallerContext = Builder.CreateCall(
+ cast<FunctionType>(ProjectionFunc->getType()->getPointerElementType()),
+ ProjectionFunc, CalleeContext);
+ CallerContext->setCallingConv(ProjectionFunc->getCallingConv());
+ CallerContext->setDebugLoc(DbgLoc);
+ // The frame is located after the async_context header.
+ auto &Context = Builder.getContext();
+ auto *FramePtrAddr = Builder.CreateConstInBoundsGEP1_32(
+ Type::getInt8Ty(Context), CallerContext,
+ Shape.AsyncLowering.FrameOffset, "async.ctx.frameptr");
+ // Inline the projection function.
+ InlineFunctionInfo InlineInfo;
+ auto InlineRes = InlineFunction(*CallerContext, InlineInfo);
+ assert(InlineRes.isSuccess());
+ (void)InlineRes;
+ return Builder.CreateBitCast(FramePtrAddr, FramePtrTy);
+ }
+ // In continuation-lowering, the argument is the opaque storage.
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce: {
+ Argument *NewStorage = &*NewF->arg_begin();
+ auto FramePtrTy = Shape.FrameTy->getPointerTo();
+
+ // If the storage is inline, just bitcast to the storage to the frame type.
+ if (Shape.RetconLowering.IsFrameInlineInStorage)
+ return Builder.CreateBitCast(NewStorage, FramePtrTy);
+
+ // Otherwise, load the real frame from the opaque storage.
+ auto FramePtrPtr =
+ Builder.CreateBitCast(NewStorage, FramePtrTy->getPointerTo());
+ return Builder.CreateLoad(FramePtrTy, FramePtrPtr);
+ }
+ }
+ llvm_unreachable("bad ABI");
+}
+
+static void addFramePointerAttrs(AttributeList &Attrs, LLVMContext &Context,
+ unsigned ParamIndex,
+ uint64_t Size, Align Alignment) {
+ AttrBuilder ParamAttrs;
+ ParamAttrs.addAttribute(Attribute::NonNull);
+ ParamAttrs.addAttribute(Attribute::NoAlias);
+ ParamAttrs.addAlignmentAttr(Alignment);
+ ParamAttrs.addDereferenceableAttr(Size);
+ Attrs = Attrs.addParamAttributes(Context, ParamIndex, ParamAttrs);
+}
+
+/// Clone the body of the original function into a resume function of
+/// some sort.
+void CoroCloner::create() {
+ // Create the new function if we don't already have one.
+ if (!NewF) {
+ NewF = createCloneDeclaration(OrigF, Shape, Suffix,
+ OrigF.getParent()->end());
+ }
+
+ // Replace all args with undefs. The buildCoroutineFrame algorithm already
+ // rewritten access to the args that occurs after suspend points with loads
+ // and stores to/from the coroutine frame.
+ for (Argument &A : OrigF.args())
+ VMap[&A] = UndefValue::get(A.getType());
+
+ SmallVector<ReturnInst *, 4> Returns;
+
+ // Ignore attempts to change certain attributes of the function.
+ // TODO: maybe there should be a way to suppress this during cloning?
+ auto savedVisibility = NewF->getVisibility();
+ auto savedUnnamedAddr = NewF->getUnnamedAddr();
+ auto savedDLLStorageClass = NewF->getDLLStorageClass();
+
+ // NewF's linkage (which CloneFunctionInto does *not* change) might not
+ // be compatible with the visibility of OrigF (which it *does* change),
+ // so protect against that.
+ auto savedLinkage = NewF->getLinkage();
+ NewF->setLinkage(llvm::GlobalValue::ExternalLinkage);
+
+ CloneFunctionInto(NewF, &OrigF, VMap, /*ModuleLevelChanges=*/true, Returns);
+
+ NewF->setLinkage(savedLinkage);
+ NewF->setVisibility(savedVisibility);
+ NewF->setUnnamedAddr(savedUnnamedAddr);
+ NewF->setDLLStorageClass(savedDLLStorageClass);
+
+ auto &Context = NewF->getContext();
+
+ // Replace the attributes of the new function:
+ auto OrigAttrs = NewF->getAttributes();
+ auto NewAttrs = AttributeList();
+
+ switch (Shape.ABI) {
+ case coro::ABI::Switch:
+ // Bootstrap attributes by copying function attributes from the
+ // original function. This should include optimization settings and so on.
+ NewAttrs = NewAttrs.addAttributes(Context, AttributeList::FunctionIndex,
+ OrigAttrs.getFnAttributes());
+
+ addFramePointerAttrs(NewAttrs, Context, 0,
+ Shape.FrameSize, Shape.FrameAlign);
+ break;
+ case coro::ABI::Async:
+ break;
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce:
+ // If we have a continuation prototype, just use its attributes,
+ // full-stop.
+ NewAttrs = Shape.RetconLowering.ResumePrototype->getAttributes();
+
+ addFramePointerAttrs(NewAttrs, Context, 0,
+ Shape.getRetconCoroId()->getStorageSize(),
+ Shape.getRetconCoroId()->getStorageAlignment());
+ break;
+ }
+
+ switch (Shape.ABI) {
+ // In these ABIs, the cloned functions always return 'void', and the
+ // existing return sites are meaningless. Note that for unique
+ // continuations, this includes the returns associated with suspends;
+ // this is fine because we can't suspend twice.
+ case coro::ABI::Switch:
+ case coro::ABI::RetconOnce:
+ // Remove old returns.
+ for (ReturnInst *Return : Returns)
+ changeToUnreachable(Return, /*UseLLVMTrap=*/false);
+ break;
+
+ // With multi-suspend continuations, we'll already have eliminated the
+ // original returns and inserted returns before all the suspend points,
+ // so we want to leave any returns in place.
+ case coro::ABI::Retcon:
+ break;
+ // Async lowering will insert musttail call functions at all suspend points
+ // followed by a return.
+ // Don't change returns to unreachable because that will trip up the verifier.
+ // These returns should be unreachable from the clone.
+ case coro::ABI::Async:
+ break;
+ }
+
+ NewF->setAttributes(NewAttrs);
+ NewF->setCallingConv(Shape.getResumeFunctionCC());
+
+ // Set up the new entry block.
+ replaceEntryBlock();
+
+ Builder.SetInsertPoint(&NewF->getEntryBlock().front());
+ NewFramePtr = deriveNewFramePointer();
+
+ // Remap frame pointer.
+ Value *OldFramePtr = VMap[Shape.FramePtr];
+ NewFramePtr->takeName(OldFramePtr);
+ OldFramePtr->replaceAllUsesWith(NewFramePtr);
+
+ // Remap vFrame pointer.
+ auto *NewVFrame = Builder.CreateBitCast(
+ NewFramePtr, Type::getInt8PtrTy(Builder.getContext()), "vFrame");
+ Value *OldVFrame = cast<Value>(VMap[Shape.CoroBegin]);
+ OldVFrame->replaceAllUsesWith(NewVFrame);
+
+ switch (Shape.ABI) {
+ case coro::ABI::Switch:
+ // Rewrite final suspend handling as it is not done via switch (allows to
+ // remove final case from the switch, since it is undefined behavior to
+ // resume the coroutine suspended at the final suspend point.
+ if (Shape.SwitchLowering.HasFinalSuspend)
+ handleFinalSuspend();
+ break;
+ case coro::ABI::Async:
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce:
+ // Replace uses of the active suspend with the corresponding
+ // continuation-function arguments.
+ assert(ActiveSuspend != nullptr &&
+ "no active suspend when lowering a continuation-style coroutine");
+ replaceRetconOrAsyncSuspendUses();
+ break;
+ }
+
+ // Handle suspends.
+ replaceCoroSuspends();
+
+ // Handle swifterror.
+ replaceSwiftErrorOps();
+
+ // Remove coro.end intrinsics.
+ replaceCoroEnds();
+
+ // Salvage debug info that points into the coroutine frame.
+ salvageDebugInfo();
+
+ // Eliminate coro.free from the clones, replacing it with 'null' in cleanup,
+ // to suppress deallocation code.
+ if (Shape.ABI == coro::ABI::Switch)
+ coro::replaceCoroFree(cast<CoroIdInst>(VMap[Shape.CoroBegin->getId()]),
+ /*Elide=*/ FKind == CoroCloner::Kind::SwitchCleanup);
+}
+
+// Create a resume clone by cloning the body of the original function, setting
+// new entry block and replacing coro.suspend an appropriate value to force
+// resume or cleanup pass for every suspend point.
+static Function *createClone(Function &F, const Twine &Suffix,
+ coro::Shape &Shape, CoroCloner::Kind FKind) {
+ CoroCloner Cloner(F, Suffix, Shape, FKind);
+ Cloner.create();
+ return Cloner.getFunction();
+}
+
+/// Remove calls to llvm.coro.end in the original function.
+static void removeCoroEnds(const coro::Shape &Shape, CallGraph *CG) {
+ for (auto End : Shape.CoroEnds) {
+ replaceCoroEnd(End, Shape, Shape.FramePtr, /*in resume*/ false, CG);
+ }
+}
+
+static void updateAsyncFuncPointerContextSize(coro::Shape &Shape) {
+ assert(Shape.ABI == coro::ABI::Async);
+
+ auto *FuncPtrStruct = cast<ConstantStruct>(
+ Shape.AsyncLowering.AsyncFuncPointer->getInitializer());
+ auto *OrigRelativeFunOffset = FuncPtrStruct->getOperand(0);
+ auto *OrigContextSize = FuncPtrStruct->getOperand(1);
+ auto *NewContextSize = ConstantInt::get(OrigContextSize->getType(),
+ Shape.AsyncLowering.ContextSize);
+ auto *NewFuncPtrStruct = ConstantStruct::get(
+ FuncPtrStruct->getType(), OrigRelativeFunOffset, NewContextSize);
+
+ Shape.AsyncLowering.AsyncFuncPointer->setInitializer(NewFuncPtrStruct);
+}
+
+static void replaceFrameSize(coro::Shape &Shape) {
+ if (Shape.ABI == coro::ABI::Async)
+ updateAsyncFuncPointerContextSize(Shape);
+
+ if (Shape.CoroSizes.empty())
+ return;
+
+ // In the same function all coro.sizes should have the same result type.
+ auto *SizeIntrin = Shape.CoroSizes.back();
+ Module *M = SizeIntrin->getModule();
+ const DataLayout &DL = M->getDataLayout();
+ auto Size = DL.getTypeAllocSize(Shape.FrameTy);
+ auto *SizeConstant = ConstantInt::get(SizeIntrin->getType(), Size);
+
+ for (CoroSizeInst *CS : Shape.CoroSizes) {
+ CS->replaceAllUsesWith(SizeConstant);
+ CS->eraseFromParent();
+ }
+}
+
+// Create a global constant array containing pointers to functions provided and
+// set Info parameter of CoroBegin to point at this constant. Example:
+//
+// @f.resumers = internal constant [2 x void(%f.frame*)*]
+// [void(%f.frame*)* @f.resume, void(%f.frame*)* @f.destroy]
+// define void @f() {
+// ...
+// call i8* @llvm.coro.begin(i8* null, i32 0, i8* null,
+// i8* bitcast([2 x void(%f.frame*)*] * @f.resumers to i8*))
+//
+// Assumes that all the functions have the same signature.
+static void setCoroInfo(Function &F, coro::Shape &Shape,
+ ArrayRef<Function *> Fns) {
+ // This only works under the switch-lowering ABI because coro elision
+ // only works on the switch-lowering ABI.
+ assert(Shape.ABI == coro::ABI::Switch);
+
+ SmallVector<Constant *, 4> Args(Fns.begin(), Fns.end());
+ assert(!Args.empty());
+ Function *Part = *Fns.begin();
+ Module *M = Part->getParent();
+ auto *ArrTy = ArrayType::get(Part->getType(), Args.size());
+
+ auto *ConstVal = ConstantArray::get(ArrTy, Args);
+ auto *GV = new GlobalVariable(*M, ConstVal->getType(), /*isConstant=*/true,
+ GlobalVariable::PrivateLinkage, ConstVal,
+ F.getName() + Twine(".resumers"));
+
+ // Update coro.begin instruction to refer to this constant.
+ LLVMContext &C = F.getContext();
+ auto *BC = ConstantExpr::getPointerCast(GV, Type::getInt8PtrTy(C));
+ Shape.getSwitchCoroId()->setInfo(BC);
+}
+
+// Store addresses of Resume/Destroy/Cleanup functions in the coroutine frame.
+static void updateCoroFrame(coro::Shape &Shape, Function *ResumeFn,
+ Function *DestroyFn, Function *CleanupFn) {
+ assert(Shape.ABI == coro::ABI::Switch);
+
+ IRBuilder<> Builder(Shape.FramePtr->getNextNode());
+ auto *ResumeAddr = Builder.CreateStructGEP(
+ Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Resume,
+ "resume.addr");
+ Builder.CreateStore(ResumeFn, ResumeAddr);
+
+ Value *DestroyOrCleanupFn = DestroyFn;
+
+ CoroIdInst *CoroId = Shape.getSwitchCoroId();
+ if (CoroAllocInst *CA = CoroId->getCoroAlloc()) {
+ // If there is a CoroAlloc and it returns false (meaning we elide the
+ // allocation, use CleanupFn instead of DestroyFn).
+ DestroyOrCleanupFn = Builder.CreateSelect(CA, DestroyFn, CleanupFn);
+ }
+
+ auto *DestroyAddr = Builder.CreateStructGEP(
+ Shape.FrameTy, Shape.FramePtr, coro::Shape::SwitchFieldIndex::Destroy,
+ "destroy.addr");
+ Builder.CreateStore(DestroyOrCleanupFn, DestroyAddr);
+}
+
+static void postSplitCleanup(Function &F) {
+ removeUnreachableBlocks(F);
+
+ // For now, we do a mandatory verification step because we don't
+ // entirely trust this pass. Note that we don't want to add a verifier
+ // pass to FPM below because it will also verify all the global data.
+ if (verifyFunction(F, &errs()))
+ report_fatal_error("Broken function");
+
+ legacy::FunctionPassManager FPM(F.getParent());
+
+ FPM.add(createSCCPPass());
+ FPM.add(createCFGSimplificationPass());
+ FPM.add(createEarlyCSEPass());
+ FPM.add(createCFGSimplificationPass());
+
+ FPM.doInitialization();
+ FPM.run(F);
+ FPM.doFinalization();
+}
+
+// Assuming we arrived at the block NewBlock from Prev instruction, store
+// PHI's incoming values in the ResolvedValues map.
+static void
+scanPHIsAndUpdateValueMap(Instruction *Prev, BasicBlock *NewBlock,
+ DenseMap<Value *, Value *> &ResolvedValues) {
+ auto *PrevBB = Prev->getParent();
+ for (PHINode &PN : NewBlock->phis()) {
+ auto V = PN.getIncomingValueForBlock(PrevBB);
+ // See if we already resolved it.
+ auto VI = ResolvedValues.find(V);
+ if (VI != ResolvedValues.end())
+ V = VI->second;
+ // Remember the value.
+ ResolvedValues[&PN] = V;
+ }
+}
+
+// Replace a sequence of branches leading to a ret, with a clone of a ret
+// instruction. Suspend instruction represented by a switch, track the PHI
+// values and select the correct case successor when possible.
+static bool simplifyTerminatorLeadingToRet(Instruction *InitialInst) {
+ DenseMap<Value *, Value *> ResolvedValues;
+ BasicBlock *UnconditionalSucc = nullptr;
+
+ Instruction *I = InitialInst;
+ while (I->isTerminator() ||
+ (isa<CmpInst>(I) && I->getNextNode()->isTerminator())) {
+ if (isa<ReturnInst>(I)) {
+ if (I != InitialInst) {
+ // If InitialInst is an unconditional branch,
+ // remove PHI values that come from basic block of InitialInst
+ if (UnconditionalSucc)
+ UnconditionalSucc->removePredecessor(InitialInst->getParent(), true);
+ ReplaceInstWithInst(InitialInst, I->clone());
+ }
+ return true;
+ }
+ if (auto *BR = dyn_cast<BranchInst>(I)) {
+ if (BR->isUnconditional()) {
+ BasicBlock *BB = BR->getSuccessor(0);
+ if (I == InitialInst)
+ UnconditionalSucc = BB;
+ scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
+ I = BB->getFirstNonPHIOrDbgOrLifetime();
+ continue;
+ }
+ } else if (auto *CondCmp = dyn_cast<CmpInst>(I)) {
+ auto *BR = dyn_cast<BranchInst>(I->getNextNode());
+ if (BR && BR->isConditional() && CondCmp == BR->getCondition()) {
+ // If the case number of suspended switch instruction is reduced to
+ // 1, then it is simplified to CmpInst in llvm::ConstantFoldTerminator.
+ // And the comparsion looks like : %cond = icmp eq i8 %V, constant.
+ ConstantInt *CondConst = dyn_cast<ConstantInt>(CondCmp->getOperand(1));
+ if (CondConst && CondCmp->getPredicate() == CmpInst::ICMP_EQ) {
+ Value *V = CondCmp->getOperand(0);
+ auto it = ResolvedValues.find(V);
+ if (it != ResolvedValues.end())
+ V = it->second;
+
+ if (ConstantInt *Cond0 = dyn_cast<ConstantInt>(V)) {
+ BasicBlock *BB = Cond0->equalsInt(CondConst->getZExtValue())
+ ? BR->getSuccessor(0)
+ : BR->getSuccessor(1);
+ scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
+ I = BB->getFirstNonPHIOrDbgOrLifetime();
+ continue;
+ }
+ }
+ }
+ } else if (auto *SI = dyn_cast<SwitchInst>(I)) {
+ Value *V = SI->getCondition();
+ auto it = ResolvedValues.find(V);
+ if (it != ResolvedValues.end())
+ V = it->second;
+ if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
+ BasicBlock *BB = SI->findCaseValue(Cond)->getCaseSuccessor();
+ scanPHIsAndUpdateValueMap(I, BB, ResolvedValues);
+ I = BB->getFirstNonPHIOrDbgOrLifetime();
+ continue;
+ }
+ }
+ return false;
+ }
+ return false;
+}
+
+// Check whether CI obeys the rules of musttail attribute.
+static bool shouldBeMustTail(const CallInst &CI, const Function &F) {
+ if (CI.isInlineAsm())
+ return false;
+
+ // Match prototypes and calling conventions of resume function.
+ FunctionType *CalleeTy = CI.getFunctionType();
+ if (!CalleeTy->getReturnType()->isVoidTy() || (CalleeTy->getNumParams() != 1))
+ return false;
+
+ Type *CalleeParmTy = CalleeTy->getParamType(0);
+ if (!CalleeParmTy->isPointerTy() ||
+ (CalleeParmTy->getPointerAddressSpace() != 0))
+ return false;
+
+ if (CI.getCallingConv() != F.getCallingConv())
+ return false;
+
+ // CI should not has any ABI-impacting function attributes.
+ static const Attribute::AttrKind ABIAttrs[] = {
+ Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
+ Attribute::Preallocated, Attribute::InReg, Attribute::Returned,
+ Attribute::SwiftSelf, Attribute::SwiftError};
+ AttributeList Attrs = CI.getAttributes();
+ for (auto AK : ABIAttrs)
+ if (Attrs.hasParamAttribute(0, AK))
+ return false;
+
+ return true;
+}
+
+// Add musttail to any resume instructions that is immediately followed by a
+// suspend (i.e. ret). We do this even in -O0 to support guaranteed tail call
+// for symmetrical coroutine control transfer (C++ Coroutines TS extension).
+// This transformation is done only in the resume part of the coroutine that has
+// identical signature and calling convention as the coro.resume call.
+static void addMustTailToCoroResumes(Function &F) {
+ bool changed = false;
+
+ // Collect potential resume instructions.
+ SmallVector<CallInst *, 4> Resumes;
+ for (auto &I : instructions(F))
+ if (auto *Call = dyn_cast<CallInst>(&I))
+ if (shouldBeMustTail(*Call, F))
+ Resumes.push_back(Call);
+
+ // Set musttail on those that are followed by a ret instruction.
+ for (CallInst *Call : Resumes)
+ if (simplifyTerminatorLeadingToRet(Call->getNextNode())) {
+ Call->setTailCallKind(CallInst::TCK_MustTail);
+ changed = true;
+ }
+
+ if (changed)
+ removeUnreachableBlocks(F);
+}
+
+// Coroutine has no suspend points. Remove heap allocation for the coroutine
+// frame if possible.
+static void handleNoSuspendCoroutine(coro::Shape &Shape) {
+ auto *CoroBegin = Shape.CoroBegin;
+ auto *CoroId = CoroBegin->getId();
+ auto *AllocInst = CoroId->getCoroAlloc();
+ switch (Shape.ABI) {
+ case coro::ABI::Switch: {
+ auto SwitchId = cast<CoroIdInst>(CoroId);
+ coro::replaceCoroFree(SwitchId, /*Elide=*/AllocInst != nullptr);
+ if (AllocInst) {
+ IRBuilder<> Builder(AllocInst);
+ auto *Frame = Builder.CreateAlloca(Shape.FrameTy);
+ Frame->setAlignment(Shape.FrameAlign);
+ auto *VFrame = Builder.CreateBitCast(Frame, Builder.getInt8PtrTy());
+ AllocInst->replaceAllUsesWith(Builder.getFalse());
+ AllocInst->eraseFromParent();
+ CoroBegin->replaceAllUsesWith(VFrame);
+ } else {
+ CoroBegin->replaceAllUsesWith(CoroBegin->getMem());
+ }
+ break;
+ }
+ case coro::ABI::Async:
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce:
+ CoroBegin->replaceAllUsesWith(UndefValue::get(CoroBegin->getType()));
+ break;
+ }
+
+ CoroBegin->eraseFromParent();
+}
+
+// SimplifySuspendPoint needs to check that there is no calls between
+// coro_save and coro_suspend, since any of the calls may potentially resume
+// the coroutine and if that is the case we cannot eliminate the suspend point.
+static bool hasCallsInBlockBetween(Instruction *From, Instruction *To) {
+ for (Instruction *I = From; I != To; I = I->getNextNode()) {
+ // Assume that no intrinsic can resume the coroutine.
+ if (isa<IntrinsicInst>(I))
+ continue;
+
+ if (isa<CallBase>(I))
+ return true;
+ }
+ return false;
+}
+
+static bool hasCallsInBlocksBetween(BasicBlock *SaveBB, BasicBlock *ResDesBB) {
+ SmallPtrSet<BasicBlock *, 8> Set;
+ SmallVector<BasicBlock *, 8> Worklist;
+
+ Set.insert(SaveBB);
+ Worklist.push_back(ResDesBB);
+
+ // Accumulate all blocks between SaveBB and ResDesBB. Because CoroSaveIntr
+ // returns a token consumed by suspend instruction, all blocks in between
+ // will have to eventually hit SaveBB when going backwards from ResDesBB.
+ while (!Worklist.empty()) {
+ auto *BB = Worklist.pop_back_val();
+ Set.insert(BB);
+ for (auto *Pred : predecessors(BB))
+ if (Set.count(Pred) == 0)
+ Worklist.push_back(Pred);
+ }
+
+ // SaveBB and ResDesBB are checked separately in hasCallsBetween.
+ Set.erase(SaveBB);
+ Set.erase(ResDesBB);
+
+ for (auto *BB : Set)
+ if (hasCallsInBlockBetween(BB->getFirstNonPHI(), nullptr))
+ return true;
+
+ return false;
+}
+
+static bool hasCallsBetween(Instruction *Save, Instruction *ResumeOrDestroy) {
+ auto *SaveBB = Save->getParent();
+ auto *ResumeOrDestroyBB = ResumeOrDestroy->getParent();
+
+ if (SaveBB == ResumeOrDestroyBB)
+ return hasCallsInBlockBetween(Save->getNextNode(), ResumeOrDestroy);
+
+ // Any calls from Save to the end of the block?
+ if (hasCallsInBlockBetween(Save->getNextNode(), nullptr))
+ return true;
+
+ // Any calls from begging of the block up to ResumeOrDestroy?
+ if (hasCallsInBlockBetween(ResumeOrDestroyBB->getFirstNonPHI(),
+ ResumeOrDestroy))
+ return true;
+
+ // Any calls in all of the blocks between SaveBB and ResumeOrDestroyBB?
+ if (hasCallsInBlocksBetween(SaveBB, ResumeOrDestroyBB))
+ return true;
+
+ return false;
+}
+
+// If a SuspendIntrin is preceded by Resume or Destroy, we can eliminate the
+// suspend point and replace it with nornal control flow.
+static bool simplifySuspendPoint(CoroSuspendInst *Suspend,
+ CoroBeginInst *CoroBegin) {
+ Instruction *Prev = Suspend->getPrevNode();
+ if (!Prev) {
+ auto *Pred = Suspend->getParent()->getSinglePredecessor();
+ if (!Pred)
+ return false;
+ Prev = Pred->getTerminator();
+ }
+
+ CallBase *CB = dyn_cast<CallBase>(Prev);
+ if (!CB)
+ return false;
+
+ auto *Callee = CB->getCalledOperand()->stripPointerCasts();
+
+ // See if the callsite is for resumption or destruction of the coroutine.
+ auto *SubFn = dyn_cast<CoroSubFnInst>(Callee);
+ if (!SubFn)
+ return false;
+
+ // Does not refer to the current coroutine, we cannot do anything with it.
+ if (SubFn->getFrame() != CoroBegin)
+ return false;
+
+ // See if the transformation is safe. Specifically, see if there are any
+ // calls in between Save and CallInstr. They can potenitally resume the
+ // coroutine rendering this optimization unsafe.
+ auto *Save = Suspend->getCoroSave();
+ if (hasCallsBetween(Save, CB))
+ return false;
+
+ // Replace llvm.coro.suspend with the value that results in resumption over
+ // the resume or cleanup path.
+ Suspend->replaceAllUsesWith(SubFn->getRawIndex());
+ Suspend->eraseFromParent();
+ Save->eraseFromParent();
+
+ // No longer need a call to coro.resume or coro.destroy.
+ if (auto *Invoke = dyn_cast<InvokeInst>(CB)) {
+ BranchInst::Create(Invoke->getNormalDest(), Invoke);
+ }
+
+ // Grab the CalledValue from CB before erasing the CallInstr.
+ auto *CalledValue = CB->getCalledOperand();
+ CB->eraseFromParent();
+
+ // If no more users remove it. Usually it is a bitcast of SubFn.
+ if (CalledValue != SubFn && CalledValue->user_empty())
+ if (auto *I = dyn_cast<Instruction>(CalledValue))
+ I->eraseFromParent();
+
+ // Now we are good to remove SubFn.
+ if (SubFn->user_empty())
+ SubFn->eraseFromParent();
+
+ return true;
+}
+
+// Remove suspend points that are simplified.
+static void simplifySuspendPoints(coro::Shape &Shape) {
+ // Currently, the only simplification we do is switch-lowering-specific.
+ if (Shape.ABI != coro::ABI::Switch)
+ return;
+
+ auto &S = Shape.CoroSuspends;
+ size_t I = 0, N = S.size();
+ if (N == 0)
+ return;
+ while (true) {
+ auto SI = cast<CoroSuspendInst>(S[I]);
+ // Leave final.suspend to handleFinalSuspend since it is undefined behavior
+ // to resume a coroutine suspended at the final suspend point.
+ if (!SI->isFinal() && simplifySuspendPoint(SI, Shape.CoroBegin)) {
+ if (--N == I)
+ break;
+ std::swap(S[I], S[N]);
+ continue;
+ }
+ if (++I == N)
+ break;
+ }
+ S.resize(N);
+}
+
+static void splitSwitchCoroutine(Function &F, coro::Shape &Shape,
+ SmallVectorImpl<Function *> &Clones) {
+ assert(Shape.ABI == coro::ABI::Switch);
+
+ createResumeEntryBlock(F, Shape);
+ auto ResumeClone = createClone(F, ".resume", Shape,
+ CoroCloner::Kind::SwitchResume);
+ auto DestroyClone = createClone(F, ".destroy", Shape,
+ CoroCloner::Kind::SwitchUnwind);
+ auto CleanupClone = createClone(F, ".cleanup", Shape,
+ CoroCloner::Kind::SwitchCleanup);
+
+ postSplitCleanup(*ResumeClone);
+ postSplitCleanup(*DestroyClone);
+ postSplitCleanup(*CleanupClone);
+
+ addMustTailToCoroResumes(*ResumeClone);
+
+ // Store addresses resume/destroy/cleanup functions in the coroutine frame.
+ updateCoroFrame(Shape, ResumeClone, DestroyClone, CleanupClone);
+
+ assert(Clones.empty());
+ Clones.push_back(ResumeClone);
+ Clones.push_back(DestroyClone);
+ Clones.push_back(CleanupClone);
+
+ // Create a constant array referring to resume/destroy/clone functions pointed
+ // by the last argument of @llvm.coro.info, so that CoroElide pass can
+ // determined correct function to call.
+ setCoroInfo(F, Shape, Clones);
+}
+
+static void replaceAsyncResumeFunction(CoroSuspendAsyncInst *Suspend,
+ Value *Continuation) {
+ auto *ResumeIntrinsic = Suspend->getResumeFunction();
+ auto &Context = Suspend->getParent()->getParent()->getContext();
+ auto *Int8PtrTy = Type::getInt8PtrTy(Context);
+
+ IRBuilder<> Builder(ResumeIntrinsic);
+ auto *Val = Builder.CreateBitOrPointerCast(Continuation, Int8PtrTy);
+ ResumeIntrinsic->replaceAllUsesWith(Val);
+ ResumeIntrinsic->eraseFromParent();
+ Suspend->setOperand(0, UndefValue::get(Int8PtrTy));
+}
+
+/// Coerce the arguments in \p FnArgs according to \p FnTy in \p CallArgs.
+static void coerceArguments(IRBuilder<> &Builder, FunctionType *FnTy,
+ ArrayRef<Value *> FnArgs,
+ SmallVectorImpl<Value *> &CallArgs) {
+ size_t ArgIdx = 0;
+ for (auto paramTy : FnTy->params()) {
+ assert(ArgIdx < FnArgs.size());
+ if (paramTy != FnArgs[ArgIdx]->getType())
+ CallArgs.push_back(
+ Builder.CreateBitOrPointerCast(FnArgs[ArgIdx], paramTy));
+ else
+ CallArgs.push_back(FnArgs[ArgIdx]);
+ ++ArgIdx;
+ }
+}
+
+CallInst *coro::createMustTailCall(DebugLoc Loc, Function *MustTailCallFn,
+ ArrayRef<Value *> Arguments,
+ IRBuilder<> &Builder) {
+ auto *FnTy =
+ cast<FunctionType>(MustTailCallFn->getType()->getPointerElementType());
+ // Coerce the arguments, llvm optimizations seem to ignore the types in
+ // vaarg functions and throws away casts in optimized mode.
+ SmallVector<Value *, 8> CallArgs;
+ coerceArguments(Builder, FnTy, Arguments, CallArgs);
+
+ auto *TailCall = Builder.CreateCall(FnTy, MustTailCallFn, CallArgs);
+ TailCall->setTailCallKind(CallInst::TCK_MustTail);
+ TailCall->setDebugLoc(Loc);
+ TailCall->setCallingConv(MustTailCallFn->getCallingConv());
+ return TailCall;
+}
+
+static void splitAsyncCoroutine(Function &F, coro::Shape &Shape,
+ SmallVectorImpl<Function *> &Clones) {
+ assert(Shape.ABI == coro::ABI::Async);
+ assert(Clones.empty());
+ // Reset various things that the optimizer might have decided it
+ // "knows" about the coroutine function due to not seeing a return.
+ F.removeFnAttr(Attribute::NoReturn);
+ F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
+ F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
+
+ auto &Context = F.getContext();
+ auto *Int8PtrTy = Type::getInt8PtrTy(Context);
+
+ auto *Id = cast<CoroIdAsyncInst>(Shape.CoroBegin->getId());
+ IRBuilder<> Builder(Id);
+
+ auto *FramePtr = Id->getStorage();
+ FramePtr = Builder.CreateBitOrPointerCast(FramePtr, Int8PtrTy);
+ FramePtr = Builder.CreateConstInBoundsGEP1_32(
+ Type::getInt8Ty(Context), FramePtr, Shape.AsyncLowering.FrameOffset,
+ "async.ctx.frameptr");
+
+ // Map all uses of llvm.coro.begin to the allocated frame pointer.
+ {
+ // Make sure we don't invalidate Shape.FramePtr.
+ TrackingVH<Instruction> Handle(Shape.FramePtr);
+ Shape.CoroBegin->replaceAllUsesWith(FramePtr);
+ Shape.FramePtr = Handle.getValPtr();
+ }
+
+ // Create all the functions in order after the main function.
+ auto NextF = std::next(F.getIterator());
+
+ // Create a continuation function for each of the suspend points.
+ Clones.reserve(Shape.CoroSuspends.size());
+ for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
+ auto *Suspend = cast<CoroSuspendAsyncInst>(Shape.CoroSuspends[Idx]);
+
+ // Create the clone declaration.
+ auto *Continuation =
+ createCloneDeclaration(F, Shape, ".resume." + Twine(Idx), NextF);
+ Clones.push_back(Continuation);
+
+ // Insert a branch to a new return block immediately before the suspend
+ // point.
+ auto *SuspendBB = Suspend->getParent();
+ auto *NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
+ auto *Branch = cast<BranchInst>(SuspendBB->getTerminator());
+
+ // Place it before the first suspend.
+ auto *ReturnBB =
+ BasicBlock::Create(F.getContext(), "coro.return", &F, NewSuspendBB);
+ Branch->setSuccessor(0, ReturnBB);
+
+ IRBuilder<> Builder(ReturnBB);
+
+ // Insert the call to the tail call function and inline it.
+ auto *Fn = Suspend->getMustTailCallFunction();
+ SmallVector<Value *, 8> Args(Suspend->args());
+ auto FnArgs = ArrayRef<Value *>(Args).drop_front(3);
+ auto *TailCall =
+ coro::createMustTailCall(Suspend->getDebugLoc(), Fn, FnArgs, Builder);
+ Builder.CreateRetVoid();
+ InlineFunctionInfo FnInfo;
+ auto InlineRes = InlineFunction(*TailCall, FnInfo);
+ assert(InlineRes.isSuccess() && "Expected inlining to succeed");
+ (void)InlineRes;
+
+ // Replace the lvm.coro.async.resume intrisic call.
+ replaceAsyncResumeFunction(Suspend, Continuation);
+ }
+
+ assert(Clones.size() == Shape.CoroSuspends.size());
+ for (size_t Idx = 0, End = Shape.CoroSuspends.size(); Idx != End; ++Idx) {
+ auto *Suspend = Shape.CoroSuspends[Idx];
+ auto *Clone = Clones[Idx];
+
+ CoroCloner(F, "resume." + Twine(Idx), Shape, Clone, Suspend).create();
+ }
+}
+
+static void splitRetconCoroutine(Function &F, coro::Shape &Shape,
+ SmallVectorImpl<Function *> &Clones) {
+ assert(Shape.ABI == coro::ABI::Retcon ||
+ Shape.ABI == coro::ABI::RetconOnce);
+ assert(Clones.empty());
+
+ // Reset various things that the optimizer might have decided it
+ // "knows" about the coroutine function due to not seeing a return.
+ F.removeFnAttr(Attribute::NoReturn);
+ F.removeAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
+ F.removeAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
+
+ // Allocate the frame.
+ auto *Id = cast<AnyCoroIdRetconInst>(Shape.CoroBegin->getId());
+ Value *RawFramePtr;
+ if (Shape.RetconLowering.IsFrameInlineInStorage) {
+ RawFramePtr = Id->getStorage();
+ } else {
+ IRBuilder<> Builder(Id);
+
+ // Determine the size of the frame.
+ const DataLayout &DL = F.getParent()->getDataLayout();
+ auto Size = DL.getTypeAllocSize(Shape.FrameTy);
+
+ // Allocate. We don't need to update the call graph node because we're
+ // going to recompute it from scratch after splitting.
+ // FIXME: pass the required alignment
+ RawFramePtr = Shape.emitAlloc(Builder, Builder.getInt64(Size), nullptr);
+ RawFramePtr =
+ Builder.CreateBitCast(RawFramePtr, Shape.CoroBegin->getType());
+
+ // Stash the allocated frame pointer in the continuation storage.
+ auto Dest = Builder.CreateBitCast(Id->getStorage(),
+ RawFramePtr->getType()->getPointerTo());
+ Builder.CreateStore(RawFramePtr, Dest);
+ }
+
+ // Map all uses of llvm.coro.begin to the allocated frame pointer.
+ {
+ // Make sure we don't invalidate Shape.FramePtr.
+ TrackingVH<Instruction> Handle(Shape.FramePtr);
+ Shape.CoroBegin->replaceAllUsesWith(RawFramePtr);
+ Shape.FramePtr = Handle.getValPtr();
+ }
+
+ // Create a unique return block.
+ BasicBlock *ReturnBB = nullptr;
+ SmallVector<PHINode *, 4> ReturnPHIs;
+
+ // Create all the functions in order after the main function.
+ auto NextF = std::next(F.getIterator());
+
+ // Create a continuation function for each of the suspend points.
+ Clones.reserve(Shape.CoroSuspends.size());
+ for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
+ auto Suspend = cast<CoroSuspendRetconInst>(Shape.CoroSuspends[i]);
+
+ // Create the clone declaration.
+ auto Continuation =
+ createCloneDeclaration(F, Shape, ".resume." + Twine(i), NextF);
+ Clones.push_back(Continuation);
+
+ // Insert a branch to the unified return block immediately before
+ // the suspend point.
+ auto SuspendBB = Suspend->getParent();
+ auto NewSuspendBB = SuspendBB->splitBasicBlock(Suspend);
+ auto Branch = cast<BranchInst>(SuspendBB->getTerminator());
+
+ // Create the unified return block.
+ if (!ReturnBB) {
+ // Place it before the first suspend.
+ ReturnBB = BasicBlock::Create(F.getContext(), "coro.return", &F,
+ NewSuspendBB);
+ Shape.RetconLowering.ReturnBlock = ReturnBB;
+
+ IRBuilder<> Builder(ReturnBB);
+
+ // Create PHIs for all the return values.
+ assert(ReturnPHIs.empty());
+
+ // First, the continuation.
+ ReturnPHIs.push_back(Builder.CreatePHI(Continuation->getType(),
+ Shape.CoroSuspends.size()));
+
+ // Next, all the directly-yielded values.
+ for (auto ResultTy : Shape.getRetconResultTypes())
+ ReturnPHIs.push_back(Builder.CreatePHI(ResultTy,
+ Shape.CoroSuspends.size()));
+
+ // Build the return value.
+ auto RetTy = F.getReturnType();
+
+ // Cast the continuation value if necessary.
+ // We can't rely on the types matching up because that type would
+ // have to be infinite.
+ auto CastedContinuationTy =
+ (ReturnPHIs.size() == 1 ? RetTy : RetTy->getStructElementType(0));
+ auto *CastedContinuation =
+ Builder.CreateBitCast(ReturnPHIs[0], CastedContinuationTy);
+
+ Value *RetV;
+ if (ReturnPHIs.size() == 1) {
+ RetV = CastedContinuation;
+ } else {
+ RetV = UndefValue::get(RetTy);
+ RetV = Builder.CreateInsertValue(RetV, CastedContinuation, 0);
+ for (size_t I = 1, E = ReturnPHIs.size(); I != E; ++I)
+ RetV = Builder.CreateInsertValue(RetV, ReturnPHIs[I], I);
+ }
+
+ Builder.CreateRet(RetV);
+ }
+
+ // Branch to the return block.
+ Branch->setSuccessor(0, ReturnBB);
+ ReturnPHIs[0]->addIncoming(Continuation, SuspendBB);
+ size_t NextPHIIndex = 1;
+ for (auto &VUse : Suspend->value_operands())
+ ReturnPHIs[NextPHIIndex++]->addIncoming(&*VUse, SuspendBB);
+ assert(NextPHIIndex == ReturnPHIs.size());
+ }
+
+ assert(Clones.size() == Shape.CoroSuspends.size());
+ for (size_t i = 0, e = Shape.CoroSuspends.size(); i != e; ++i) {
+ auto Suspend = Shape.CoroSuspends[i];
+ auto Clone = Clones[i];
+
+ CoroCloner(F, "resume." + Twine(i), Shape, Clone, Suspend).create();
+ }
+}
+
+namespace {
+ class PrettyStackTraceFunction : public PrettyStackTraceEntry {
+ Function &F;
+ public:
+ PrettyStackTraceFunction(Function &F) : F(F) {}
+ void print(raw_ostream &OS) const override {
+ OS << "While splitting coroutine ";
+ F.printAsOperand(OS, /*print type*/ false, F.getParent());
+ OS << "\n";
+ }
+ };
+}
+
+static coro::Shape splitCoroutine(Function &F,
+ SmallVectorImpl<Function *> &Clones,
+ bool ReuseFrameSlot) {
+ PrettyStackTraceFunction prettyStackTrace(F);
+
+ // The suspend-crossing algorithm in buildCoroutineFrame get tripped
+ // up by uses in unreachable blocks, so remove them as a first pass.
+ removeUnreachableBlocks(F);
+
+ coro::Shape Shape(F, ReuseFrameSlot);
+ if (!Shape.CoroBegin)
+ return Shape;
+
+ simplifySuspendPoints(Shape);
+ buildCoroutineFrame(F, Shape);
+ replaceFrameSize(Shape);
+
+ // If there are no suspend points, no split required, just remove
+ // the allocation and deallocation blocks, they are not needed.
+ if (Shape.CoroSuspends.empty()) {
+ handleNoSuspendCoroutine(Shape);
+ } else {
+ switch (Shape.ABI) {
+ case coro::ABI::Switch:
+ splitSwitchCoroutine(F, Shape, Clones);
+ break;
+ case coro::ABI::Async:
+ splitAsyncCoroutine(F, Shape, Clones);
+ break;
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce:
+ splitRetconCoroutine(F, Shape, Clones);
+ break;
+ }
+ }
+
+ // Replace all the swifterror operations in the original function.
+ // This invalidates SwiftErrorOps in the Shape.
+ replaceSwiftErrorOps(F, Shape, nullptr);
+
+ return Shape;
+}
+
+static void
+updateCallGraphAfterCoroutineSplit(Function &F, const coro::Shape &Shape,
+ const SmallVectorImpl<Function *> &Clones,
+ CallGraph &CG, CallGraphSCC &SCC) {
+ if (!Shape.CoroBegin)
+ return;
+
+ removeCoroEnds(Shape, &CG);
+ postSplitCleanup(F);
+
+ // Update call graph and add the functions we created to the SCC.
+ coro::updateCallGraph(F, Clones, CG, SCC);
+}
+
+static void updateCallGraphAfterCoroutineSplit(
+ LazyCallGraph::Node &N, const coro::Shape &Shape,
+ const SmallVectorImpl<Function *> &Clones, LazyCallGraph::SCC &C,
+ LazyCallGraph &CG, CGSCCAnalysisManager &AM, CGSCCUpdateResult &UR,
+ FunctionAnalysisManager &FAM) {
+ if (!Shape.CoroBegin)
+ return;
+
+ for (llvm::AnyCoroEndInst *End : Shape.CoroEnds) {
+ auto &Context = End->getContext();
+ End->replaceAllUsesWith(ConstantInt::getFalse(Context));
+ End->eraseFromParent();
+ }
+
+ if (!Clones.empty()) {
+ switch (Shape.ABI) {
+ case coro::ABI::Switch:
+ // Each clone in the Switch lowering is independent of the other clones.
+ // Let the LazyCallGraph know about each one separately.
+ for (Function *Clone : Clones)
+ CG.addSplitFunction(N.getFunction(), *Clone);
+ break;
+ case coro::ABI::Async:
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce:
+ // Each clone in the Async/Retcon lowering references of the other clones.
+ // Let the LazyCallGraph know about all of them at once.
+ CG.addSplitRefRecursiveFunctions(N.getFunction(), Clones);
+ break;
+ }
+
+ // Let the CGSCC infra handle the changes to the original function.
+ updateCGAndAnalysisManagerForCGSCCPass(CG, C, N, AM, UR, FAM);
+ }
+
+ // Do some cleanup and let the CGSCC infra see if we've cleaned up any edges
+ // to the split functions.
+ postSplitCleanup(N.getFunction());
+ updateCGAndAnalysisManagerForFunctionPass(CG, C, N, AM, UR, FAM);
+}
+
+// When we see the coroutine the first time, we insert an indirect call to a
+// devirt trigger function and mark the coroutine that it is now ready for
+// split.
+// Async lowering uses this after it has split the function to restart the
+// pipeline.
+static void prepareForSplit(Function &F, CallGraph &CG,
+ bool MarkForAsyncRestart = false) {
+ Module &M = *F.getParent();
+ LLVMContext &Context = F.getContext();
+#ifndef NDEBUG
+ Function *DevirtFn = M.getFunction(CORO_DEVIRT_TRIGGER_FN);
+ assert(DevirtFn && "coro.devirt.trigger function not found");
+#endif
+
+ F.addFnAttr(CORO_PRESPLIT_ATTR, MarkForAsyncRestart
+ ? ASYNC_RESTART_AFTER_SPLIT
+ : PREPARED_FOR_SPLIT);
+
+ // Insert an indirect call sequence that will be devirtualized by CoroElide
+ // pass:
+ // %0 = call i8* @llvm.coro.subfn.addr(i8* null, i8 -1)
+ // %1 = bitcast i8* %0 to void(i8*)*
+ // call void %1(i8* null)
+ coro::LowererBase Lowerer(M);
+ Instruction *InsertPt =
+ MarkForAsyncRestart ? F.getEntryBlock().getFirstNonPHIOrDbgOrLifetime()
+ : F.getEntryBlock().getTerminator();
+ auto *Null = ConstantPointerNull::get(Type::getInt8PtrTy(Context));
+ auto *DevirtFnAddr =
+ Lowerer.makeSubFnCall(Null, CoroSubFnInst::RestartTrigger, InsertPt);
+ FunctionType *FnTy = FunctionType::get(Type::getVoidTy(Context),
+ {Type::getInt8PtrTy(Context)}, false);
+ auto *IndirectCall = CallInst::Create(FnTy, DevirtFnAddr, Null, "", InsertPt);
+
+ // Update CG graph with an indirect call we just added.
+ CG[&F]->addCalledFunction(IndirectCall, CG.getCallsExternalNode());
+}
+
+// Make sure that there is a devirtualization trigger function that the
+// coro-split pass uses to force a restart of the CGSCC pipeline. If the devirt
+// trigger function is not found, we will create one and add it to the current
+// SCC.
+static void createDevirtTriggerFunc(CallGraph &CG, CallGraphSCC &SCC) {
+ Module &M = CG.getModule();
+ if (M.getFunction(CORO_DEVIRT_TRIGGER_FN))
+ return;
+
+ LLVMContext &C = M.getContext();
+ auto *FnTy = FunctionType::get(Type::getVoidTy(C), Type::getInt8PtrTy(C),
+ /*isVarArg=*/false);
+ Function *DevirtFn =
+ Function::Create(FnTy, GlobalValue::LinkageTypes::PrivateLinkage,
+ CORO_DEVIRT_TRIGGER_FN, &M);
+ DevirtFn->addFnAttr(Attribute::AlwaysInline);
+ auto *Entry = BasicBlock::Create(C, "entry", DevirtFn);
+ ReturnInst::Create(C, Entry);
+
+ auto *Node = CG.getOrInsertFunction(DevirtFn);
+
+ SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end());
+ Nodes.push_back(Node);
+ SCC.initialize(Nodes);
+}
+
+/// Replace a call to llvm.coro.prepare.retcon.
+static void replacePrepare(CallInst *Prepare, LazyCallGraph &CG,
+ LazyCallGraph::SCC &C) {
+ auto CastFn = Prepare->getArgOperand(0); // as an i8*
+ auto Fn = CastFn->stripPointerCasts(); // as its original type
+
+ // Attempt to peephole this pattern:
+ // %0 = bitcast [[TYPE]] @some_function to i8*
+ // %1 = call @llvm.coro.prepare.retcon(i8* %0)
+ // %2 = bitcast %1 to [[TYPE]]
+ // ==>
+ // %2 = @some_function
+ for (auto UI = Prepare->use_begin(), UE = Prepare->use_end(); UI != UE;) {
+ // Look for bitcasts back to the original function type.
+ auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
+ if (!Cast || Cast->getType() != Fn->getType())
+ continue;
+
+ // Replace and remove the cast.
+ Cast->replaceAllUsesWith(Fn);
+ Cast->eraseFromParent();
+ }
+
+ // Replace any remaining uses with the function as an i8*.
+ // This can never directly be a callee, so we don't need to update CG.
+ Prepare->replaceAllUsesWith(CastFn);
+ Prepare->eraseFromParent();
+
+ // Kill dead bitcasts.
+ while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
+ if (!Cast->use_empty())
+ break;
+ CastFn = Cast->getOperand(0);
+ Cast->eraseFromParent();
+ }
+}
+/// Replace a call to llvm.coro.prepare.retcon.
+static void replacePrepare(CallInst *Prepare, CallGraph &CG) {
+ auto CastFn = Prepare->getArgOperand(0); // as an i8*
+ auto Fn = CastFn->stripPointerCasts(); // as its original type
+
+ // Find call graph nodes for the preparation.
+ CallGraphNode *PrepareUserNode = nullptr, *FnNode = nullptr;
+ if (auto ConcreteFn = dyn_cast<Function>(Fn)) {
+ PrepareUserNode = CG[Prepare->getFunction()];
+ FnNode = CG[ConcreteFn];
+ }
+
+ // Attempt to peephole this pattern:
+ // %0 = bitcast [[TYPE]] @some_function to i8*
+ // %1 = call @llvm.coro.prepare.retcon(i8* %0)
+ // %2 = bitcast %1 to [[TYPE]]
+ // ==>
+ // %2 = @some_function
+ for (auto UI = Prepare->use_begin(), UE = Prepare->use_end();
+ UI != UE; ) {
+ // Look for bitcasts back to the original function type.
+ auto *Cast = dyn_cast<BitCastInst>((UI++)->getUser());
+ if (!Cast || Cast->getType() != Fn->getType()) continue;
+
+ // Check whether the replacement will introduce new direct calls.
+ // If so, we'll need to update the call graph.
+ if (PrepareUserNode) {
+ for (auto &Use : Cast->uses()) {
+ if (auto *CB = dyn_cast<CallBase>(Use.getUser())) {
+ if (!CB->isCallee(&Use))
+ continue;
+ PrepareUserNode->removeCallEdgeFor(*CB);
+ PrepareUserNode->addCalledFunction(CB, FnNode);
+ }
+ }
+ }
+
+ // Replace and remove the cast.
+ Cast->replaceAllUsesWith(Fn);
+ Cast->eraseFromParent();
+ }
+
+ // Replace any remaining uses with the function as an i8*.
+ // This can never directly be a callee, so we don't need to update CG.
+ Prepare->replaceAllUsesWith(CastFn);
+ Prepare->eraseFromParent();
+
+ // Kill dead bitcasts.
+ while (auto *Cast = dyn_cast<BitCastInst>(CastFn)) {
+ if (!Cast->use_empty()) break;
+ CastFn = Cast->getOperand(0);
+ Cast->eraseFromParent();
+ }
+}
+
+static bool replaceAllPrepares(Function *PrepareFn, LazyCallGraph &CG,
+ LazyCallGraph::SCC &C) {
+ bool Changed = false;
+ for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end(); PI != PE;) {
+ // Intrinsics can only be used in calls.
+ auto *Prepare = cast<CallInst>((PI++)->getUser());
+ replacePrepare(Prepare, CG, C);
+ Changed = true;
+ }
+
+ return Changed;
+}
+
+/// Remove calls to llvm.coro.prepare.retcon, a barrier meant to prevent
+/// IPO from operating on calls to a retcon coroutine before it's been
+/// split. This is only safe to do after we've split all retcon
+/// coroutines in the module. We can do that this in this pass because
+/// this pass does promise to split all retcon coroutines (as opposed to
+/// switch coroutines, which are lowered in multiple stages).
+static bool replaceAllPrepares(Function *PrepareFn, CallGraph &CG) {
+ bool Changed = false;
+ for (auto PI = PrepareFn->use_begin(), PE = PrepareFn->use_end();
+ PI != PE; ) {
+ // Intrinsics can only be used in calls.
+ auto *Prepare = cast<CallInst>((PI++)->getUser());
+ replacePrepare(Prepare, CG);
+ Changed = true;
+ }
+
+ return Changed;
+}
+
+static bool declaresCoroSplitIntrinsics(const Module &M) {
+ return coro::declaresIntrinsics(M, {"llvm.coro.begin",
+ "llvm.coro.prepare.retcon",
+ "llvm.coro.prepare.async"});
+}
+
+static void addPrepareFunction(const Module &M,
+ SmallVectorImpl<Function *> &Fns,
+ StringRef Name) {
+ auto *PrepareFn = M.getFunction(Name);
+ if (PrepareFn && !PrepareFn->use_empty())
+ Fns.push_back(PrepareFn);
+}
+
+PreservedAnalyses CoroSplitPass::run(LazyCallGraph::SCC &C,
+ CGSCCAnalysisManager &AM,
+ LazyCallGraph &CG, CGSCCUpdateResult &UR) {
+ // NB: One invariant of a valid LazyCallGraph::SCC is that it must contain a
+ // non-zero number of nodes, so we assume that here and grab the first
+ // node's function's module.
+ Module &M = *C.begin()->getFunction().getParent();
+ auto &FAM =
+ AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
+
+ if (!declaresCoroSplitIntrinsics(M))
+ return PreservedAnalyses::all();
+
+ // Check for uses of llvm.coro.prepare.retcon/async.
+ SmallVector<Function *, 2> PrepareFns;
+ addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
+ addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
+
+ // Find coroutines for processing.
+ SmallVector<LazyCallGraph::Node *, 4> Coroutines;
+ for (LazyCallGraph::Node &N : C)
+ if (N.getFunction().hasFnAttribute(CORO_PRESPLIT_ATTR))
+ Coroutines.push_back(&N);
+
+ if (Coroutines.empty() && PrepareFns.empty())
+ return PreservedAnalyses::all();
+
+ if (Coroutines.empty()) {
+ for (auto *PrepareFn : PrepareFns) {
+ replaceAllPrepares(PrepareFn, CG, C);
+ }
+ }
+
+ // Split all the coroutines.
+ for (LazyCallGraph::Node *N : Coroutines) {
+ Function &F = N->getFunction();
+ Attribute Attr = F.getFnAttribute(CORO_PRESPLIT_ATTR);
+ StringRef Value = Attr.getValueAsString();
+ LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F.getName()
+ << "' state: " << Value << "\n");
+ if (Value == UNPREPARED_FOR_SPLIT) {
+ // Enqueue a second iteration of the CGSCC pipeline on this SCC.
+ UR.CWorklist.insert(&C);
+ F.addFnAttr(CORO_PRESPLIT_ATTR, PREPARED_FOR_SPLIT);
+ continue;
+ }
+ F.removeFnAttr(CORO_PRESPLIT_ATTR);
+
+ SmallVector<Function *, 4> Clones;
+ const coro::Shape Shape = splitCoroutine(F, Clones, ReuseFrameSlot);
+ updateCallGraphAfterCoroutineSplit(*N, Shape, Clones, C, CG, AM, UR, FAM);
+
+ if ((Shape.ABI == coro::ABI::Async || Shape.ABI == coro::ABI::Retcon ||
+ Shape.ABI == coro::ABI::RetconOnce) &&
+ !Shape.CoroSuspends.empty()) {
+ // Run the CGSCC pipeline on the newly split functions.
+ // All clones will be in the same RefSCC, so choose a random clone.
+ UR.RCWorklist.insert(CG.lookupRefSCC(CG.get(*Clones[0])));
+ }
+ }
+
+ if (!PrepareFns.empty()) {
+ for (auto *PrepareFn : PrepareFns) {
+ replaceAllPrepares(PrepareFn, CG, C);
+ }
+ }
+
+ return PreservedAnalyses::none();
+}
+
+namespace {
+
+// We present a coroutine to LLVM as an ordinary function with suspension
+// points marked up with intrinsics. We let the optimizer party on the coroutine
+// as a single function for as long as possible. Shortly before the coroutine is
+// eligible to be inlined into its callers, we split up the coroutine into parts
+// corresponding to initial, resume and destroy invocations of the coroutine,
+// add them to the current SCC and restart the IPO pipeline to optimize the
+// coroutine subfunctions we extracted before proceeding to the caller of the
+// coroutine.
+struct CoroSplitLegacy : public CallGraphSCCPass {
+ static char ID; // Pass identification, replacement for typeid
+
+ CoroSplitLegacy(bool ReuseFrameSlot = false)
+ : CallGraphSCCPass(ID), ReuseFrameSlot(ReuseFrameSlot) {
+ initializeCoroSplitLegacyPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool Run = false;
+ bool ReuseFrameSlot;
+
+ // A coroutine is identified by the presence of coro.begin intrinsic, if
+ // we don't have any, this pass has nothing to do.
+ bool doInitialization(CallGraph &CG) override {
+ Run = declaresCoroSplitIntrinsics(CG.getModule());
+ return CallGraphSCCPass::doInitialization(CG);
+ }
+
+ bool runOnSCC(CallGraphSCC &SCC) override {
+ if (!Run)
+ return false;
+
+ // Check for uses of llvm.coro.prepare.retcon.
+ SmallVector<Function *, 2> PrepareFns;
+ auto &M = SCC.getCallGraph().getModule();
+ addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.retcon");
+ addPrepareFunction(M, PrepareFns, "llvm.coro.prepare.async");
+
+ // Find coroutines for processing.
+ SmallVector<Function *, 4> Coroutines;
+ for (CallGraphNode *CGN : SCC)
+ if (auto *F = CGN->getFunction())
+ if (F->hasFnAttribute(CORO_PRESPLIT_ATTR))
+ Coroutines.push_back(F);
+
+ if (Coroutines.empty() && PrepareFns.empty())
+ return false;
+
+ CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
+
+ if (Coroutines.empty()) {
+ bool Changed = false;
+ for (auto *PrepareFn : PrepareFns)
+ Changed |= replaceAllPrepares(PrepareFn, CG);
+ return Changed;
+ }
+
+ createDevirtTriggerFunc(CG, SCC);
+
+ // Split all the coroutines.
+ for (Function *F : Coroutines) {
+ Attribute Attr = F->getFnAttribute(CORO_PRESPLIT_ATTR);
+ StringRef Value = Attr.getValueAsString();
+ LLVM_DEBUG(dbgs() << "CoroSplit: Processing coroutine '" << F->getName()
+ << "' state: " << Value << "\n");
+ // Async lowering marks coroutines to trigger a restart of the pipeline
+ // after it has split them.
+ if (Value == ASYNC_RESTART_AFTER_SPLIT) {
+ F->removeFnAttr(CORO_PRESPLIT_ATTR);
+ continue;
+ }
+ if (Value == UNPREPARED_FOR_SPLIT) {
+ prepareForSplit(*F, CG);
+ continue;
+ }
+ F->removeFnAttr(CORO_PRESPLIT_ATTR);
+
+ SmallVector<Function *, 4> Clones;
+ const coro::Shape Shape = splitCoroutine(*F, Clones, ReuseFrameSlot);
+ updateCallGraphAfterCoroutineSplit(*F, Shape, Clones, CG, SCC);
+ if (Shape.ABI == coro::ABI::Async) {
+ // Restart SCC passes.
+ // Mark function for CoroElide pass. It will devirtualize causing a
+ // restart of the SCC pipeline.
+ prepareForSplit(*F, CG, true /*MarkForAsyncRestart*/);
+ }
+ }
+
+ for (auto *PrepareFn : PrepareFns)
+ replaceAllPrepares(PrepareFn, CG);
+
+ return true;
+ }
+
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ CallGraphSCCPass::getAnalysisUsage(AU);
+ }
+
+ StringRef getPassName() const override { return "Coroutine Splitting"; }
+};
+
+} // end anonymous namespace
+
+char CoroSplitLegacy::ID = 0;
+
+INITIALIZE_PASS_BEGIN(
+ CoroSplitLegacy, "coro-split",
+ "Split coroutine into a set of functions driving its state machine", false,
+ false)
+INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
+INITIALIZE_PASS_END(
+ CoroSplitLegacy, "coro-split",
+ "Split coroutine into a set of functions driving its state machine", false,
+ false)
+
+Pass *llvm::createCoroSplitLegacyPass(bool ReuseFrameSlot) {
+ return new CoroSplitLegacy(ReuseFrameSlot);
+}
diff --git a/contrib/libs/llvm12/lib/Transforms/Coroutines/Coroutines.cpp b/contrib/libs/llvm12/lib/Transforms/Coroutines/Coroutines.cpp
new file mode 100644
index 00000000000..6699a5c4631
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Transforms/Coroutines/Coroutines.cpp
@@ -0,0 +1,755 @@
+//===- Coroutines.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the common infrastructure for Coroutine Passes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/Coroutines.h"
+#include "CoroInstr.h"
+#include "CoroInternal.h"
+#include "llvm-c/Transforms/Coroutines.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/CallGraphSCCPass.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/Utils/Local.h"
+#include <cassert>
+#include <cstddef>
+#include <utility>
+
+using namespace llvm;
+
+void llvm::initializeCoroutines(PassRegistry &Registry) {
+ initializeCoroEarlyLegacyPass(Registry);
+ initializeCoroSplitLegacyPass(Registry);
+ initializeCoroElideLegacyPass(Registry);
+ initializeCoroCleanupLegacyPass(Registry);
+}
+
+static void addCoroutineOpt0Passes(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createCoroSplitLegacyPass());
+ PM.add(createCoroElideLegacyPass());
+
+ PM.add(createBarrierNoopPass());
+ PM.add(createCoroCleanupLegacyPass());
+}
+
+static void addCoroutineEarlyPasses(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createCoroEarlyLegacyPass());
+}
+
+static void addCoroutineScalarOptimizerPasses(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createCoroElideLegacyPass());
+}
+
+static void addCoroutineSCCPasses(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createCoroSplitLegacyPass(Builder.OptLevel != 0));
+}
+
+static void addCoroutineOptimizerLastPasses(const PassManagerBuilder &Builder,
+ legacy::PassManagerBase &PM) {
+ PM.add(createCoroCleanupLegacyPass());
+}
+
+void llvm::addCoroutinePassesToExtensionPoints(PassManagerBuilder &Builder) {
+ Builder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
+ addCoroutineEarlyPasses);
+ Builder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addCoroutineOpt0Passes);
+ Builder.addExtension(PassManagerBuilder::EP_CGSCCOptimizerLate,
+ addCoroutineSCCPasses);
+ Builder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addCoroutineScalarOptimizerPasses);
+ Builder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addCoroutineOptimizerLastPasses);
+}
+
+// Construct the lowerer base class and initialize its members.
+coro::LowererBase::LowererBase(Module &M)
+ : TheModule(M), Context(M.getContext()),
+ Int8Ptr(Type::getInt8PtrTy(Context)),
+ ResumeFnType(FunctionType::get(Type::getVoidTy(Context), Int8Ptr,
+ /*isVarArg=*/false)),
+ NullPtr(ConstantPointerNull::get(Int8Ptr)) {}
+
+// Creates a sequence of instructions to obtain a resume function address using
+// llvm.coro.subfn.addr. It generates the following sequence:
+//
+// call i8* @llvm.coro.subfn.addr(i8* %Arg, i8 %index)
+// bitcast i8* %2 to void(i8*)*
+
+Value *coro::LowererBase::makeSubFnCall(Value *Arg, int Index,
+ Instruction *InsertPt) {
+ auto *IndexVal = ConstantInt::get(Type::getInt8Ty(Context), Index);
+ auto *Fn = Intrinsic::getDeclaration(&TheModule, Intrinsic::coro_subfn_addr);
+
+ assert(Index >= CoroSubFnInst::IndexFirst &&
+ Index < CoroSubFnInst::IndexLast &&
+ "makeSubFnCall: Index value out of range");
+ auto *Call = CallInst::Create(Fn, {Arg, IndexVal}, "", InsertPt);
+
+ auto *Bitcast =
+ new BitCastInst(Call, ResumeFnType->getPointerTo(), "", InsertPt);
+ return Bitcast;
+}
+
+#ifndef NDEBUG
+static bool isCoroutineIntrinsicName(StringRef Name) {
+ // NOTE: Must be sorted!
+ static const char *const CoroIntrinsics[] = {
+ "llvm.coro.alloc",
+ "llvm.coro.async.context.alloc",
+ "llvm.coro.async.context.dealloc",
+ "llvm.coro.async.store_resume",
+ "llvm.coro.begin",
+ "llvm.coro.destroy",
+ "llvm.coro.done",
+ "llvm.coro.end",
+ "llvm.coro.end.async",
+ "llvm.coro.frame",
+ "llvm.coro.free",
+ "llvm.coro.id",
+ "llvm.coro.id.async",
+ "llvm.coro.id.retcon",
+ "llvm.coro.id.retcon.once",
+ "llvm.coro.noop",
+ "llvm.coro.param",
+ "llvm.coro.prepare.async",
+ "llvm.coro.prepare.retcon",
+ "llvm.coro.promise",
+ "llvm.coro.resume",
+ "llvm.coro.save",
+ "llvm.coro.size",
+ "llvm.coro.subfn.addr",
+ "llvm.coro.suspend",
+ "llvm.coro.suspend.async",
+ "llvm.coro.suspend.retcon",
+ };
+ return Intrinsic::lookupLLVMIntrinsicByName(CoroIntrinsics, Name) != -1;
+}
+#endif
+
+// Verifies if a module has named values listed. Also, in debug mode verifies
+// that names are intrinsic names.
+bool coro::declaresIntrinsics(const Module &M,
+ const std::initializer_list<StringRef> List) {
+ for (StringRef Name : List) {
+ assert(isCoroutineIntrinsicName(Name) && "not a coroutine intrinsic");
+ if (M.getNamedValue(Name))
+ return true;
+ }
+
+ return false;
+}
+
+// Replace all coro.frees associated with the provided CoroId either with 'null'
+// if Elide is true and with its frame parameter otherwise.
+void coro::replaceCoroFree(CoroIdInst *CoroId, bool Elide) {
+ SmallVector<CoroFreeInst *, 4> CoroFrees;
+ for (User *U : CoroId->users())
+ if (auto CF = dyn_cast<CoroFreeInst>(U))
+ CoroFrees.push_back(CF);
+
+ if (CoroFrees.empty())
+ return;
+
+ Value *Replacement =
+ Elide ? ConstantPointerNull::get(Type::getInt8PtrTy(CoroId->getContext()))
+ : CoroFrees.front()->getFrame();
+
+ for (CoroFreeInst *CF : CoroFrees) {
+ CF->replaceAllUsesWith(Replacement);
+ CF->eraseFromParent();
+ }
+}
+
+// FIXME: This code is stolen from CallGraph::addToCallGraph(Function *F), which
+// happens to be private. It is better for this functionality exposed by the
+// CallGraph.
+static void buildCGN(CallGraph &CG, CallGraphNode *Node) {
+ Function *F = Node->getFunction();
+
+ // Look for calls by this function.
+ for (Instruction &I : instructions(F))
+ if (auto *Call = dyn_cast<CallBase>(&I)) {
+ const Function *Callee = Call->getCalledFunction();
+ if (!Callee || !Intrinsic::isLeaf(Callee->getIntrinsicID()))
+ // Indirect calls of intrinsics are not allowed so no need to check.
+ // We can be more precise here by using TargetArg returned by
+ // Intrinsic::isLeaf.
+ Node->addCalledFunction(Call, CG.getCallsExternalNode());
+ else if (!Callee->isIntrinsic())
+ Node->addCalledFunction(Call, CG.getOrInsertFunction(Callee));
+ }
+}
+
+// Rebuild CGN after we extracted parts of the code from ParentFunc into
+// NewFuncs. Builds CGNs for the NewFuncs and adds them to the current SCC.
+void coro::updateCallGraph(Function &ParentFunc, ArrayRef<Function *> NewFuncs,
+ CallGraph &CG, CallGraphSCC &SCC) {
+ // Rebuild CGN from scratch for the ParentFunc
+ auto *ParentNode = CG[&ParentFunc];
+ ParentNode->removeAllCalledFunctions();
+ buildCGN(CG, ParentNode);
+
+ SmallVector<CallGraphNode *, 8> Nodes(SCC.begin(), SCC.end());
+
+ for (Function *F : NewFuncs) {
+ CallGraphNode *Callee = CG.getOrInsertFunction(F);
+ Nodes.push_back(Callee);
+ buildCGN(CG, Callee);
+ }
+
+ SCC.initialize(Nodes);
+}
+
+static void clear(coro::Shape &Shape) {
+ Shape.CoroBegin = nullptr;
+ Shape.CoroEnds.clear();
+ Shape.CoroSizes.clear();
+ Shape.CoroSuspends.clear();
+
+ Shape.FrameTy = nullptr;
+ Shape.FramePtr = nullptr;
+ Shape.AllocaSpillBlock = nullptr;
+}
+
+static CoroSaveInst *createCoroSave(CoroBeginInst *CoroBegin,
+ CoroSuspendInst *SuspendInst) {
+ Module *M = SuspendInst->getModule();
+ auto *Fn = Intrinsic::getDeclaration(M, Intrinsic::coro_save);
+ auto *SaveInst =
+ cast<CoroSaveInst>(CallInst::Create(Fn, CoroBegin, "", SuspendInst));
+ assert(!SuspendInst->getCoroSave());
+ SuspendInst->setArgOperand(0, SaveInst);
+ return SaveInst;
+}
+
+// Collect "interesting" coroutine intrinsics.
+void coro::Shape::buildFrom(Function &F) {
+ bool HasFinalSuspend = false;
+ size_t FinalSuspendIndex = 0;
+ clear(*this);
+ SmallVector<CoroFrameInst *, 8> CoroFrames;
+ SmallVector<CoroSaveInst *, 2> UnusedCoroSaves;
+
+ for (Instruction &I : instructions(F)) {
+ if (auto II = dyn_cast<IntrinsicInst>(&I)) {
+ switch (II->getIntrinsicID()) {
+ default:
+ continue;
+ case Intrinsic::coro_size:
+ CoroSizes.push_back(cast<CoroSizeInst>(II));
+ break;
+ case Intrinsic::coro_frame:
+ CoroFrames.push_back(cast<CoroFrameInst>(II));
+ break;
+ case Intrinsic::coro_save:
+ // After optimizations, coro_suspends using this coro_save might have
+ // been removed, remember orphaned coro_saves to remove them later.
+ if (II->use_empty())
+ UnusedCoroSaves.push_back(cast<CoroSaveInst>(II));
+ break;
+ case Intrinsic::coro_suspend_async: {
+ auto *Suspend = cast<CoroSuspendAsyncInst>(II);
+ Suspend->checkWellFormed();
+ CoroSuspends.push_back(Suspend);
+ break;
+ }
+ case Intrinsic::coro_suspend_retcon: {
+ auto Suspend = cast<CoroSuspendRetconInst>(II);
+ CoroSuspends.push_back(Suspend);
+ break;
+ }
+ case Intrinsic::coro_suspend: {
+ auto Suspend = cast<CoroSuspendInst>(II);
+ CoroSuspends.push_back(Suspend);
+ if (Suspend->isFinal()) {
+ if (HasFinalSuspend)
+ report_fatal_error(
+ "Only one suspend point can be marked as final");
+ HasFinalSuspend = true;
+ FinalSuspendIndex = CoroSuspends.size() - 1;
+ }
+ break;
+ }
+ case Intrinsic::coro_begin: {
+ auto CB = cast<CoroBeginInst>(II);
+
+ // Ignore coro id's that aren't pre-split.
+ auto Id = dyn_cast<CoroIdInst>(CB->getId());
+ if (Id && !Id->getInfo().isPreSplit())
+ break;
+
+ if (CoroBegin)
+ report_fatal_error(
+ "coroutine should have exactly one defining @llvm.coro.begin");
+ CB->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
+ CB->addAttribute(AttributeList::ReturnIndex, Attribute::NoAlias);
+ CB->removeAttribute(AttributeList::FunctionIndex,
+ Attribute::NoDuplicate);
+ CoroBegin = CB;
+ break;
+ }
+ case Intrinsic::coro_end_async:
+ case Intrinsic::coro_end:
+ CoroEnds.push_back(cast<AnyCoroEndInst>(II));
+ if (auto *AsyncEnd = dyn_cast<CoroAsyncEndInst>(II)) {
+ AsyncEnd->checkWellFormed();
+ }
+ if (CoroEnds.back()->isFallthrough() && isa<CoroEndInst>(II)) {
+ // Make sure that the fallthrough coro.end is the first element in the
+ // CoroEnds vector.
+ // Note: I don't think this is neccessary anymore.
+ if (CoroEnds.size() > 1) {
+ if (CoroEnds.front()->isFallthrough())
+ report_fatal_error(
+ "Only one coro.end can be marked as fallthrough");
+ std::swap(CoroEnds.front(), CoroEnds.back());
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ // If for some reason, we were not able to find coro.begin, bailout.
+ if (!CoroBegin) {
+ // Replace coro.frame which are supposed to be lowered to the result of
+ // coro.begin with undef.
+ auto *Undef = UndefValue::get(Type::getInt8PtrTy(F.getContext()));
+ for (CoroFrameInst *CF : CoroFrames) {
+ CF->replaceAllUsesWith(Undef);
+ CF->eraseFromParent();
+ }
+
+ // Replace all coro.suspend with undef and remove related coro.saves if
+ // present.
+ for (AnyCoroSuspendInst *CS : CoroSuspends) {
+ CS->replaceAllUsesWith(UndefValue::get(CS->getType()));
+ CS->eraseFromParent();
+ if (auto *CoroSave = CS->getCoroSave())
+ CoroSave->eraseFromParent();
+ }
+
+ // Replace all coro.ends with unreachable instruction.
+ for (AnyCoroEndInst *CE : CoroEnds)
+ changeToUnreachable(CE, /*UseLLVMTrap=*/false);
+
+ return;
+ }
+
+ auto Id = CoroBegin->getId();
+ switch (auto IdIntrinsic = Id->getIntrinsicID()) {
+ case Intrinsic::coro_id: {
+ auto SwitchId = cast<CoroIdInst>(Id);
+ this->ABI = coro::ABI::Switch;
+ this->SwitchLowering.HasFinalSuspend = HasFinalSuspend;
+ this->SwitchLowering.ResumeSwitch = nullptr;
+ this->SwitchLowering.PromiseAlloca = SwitchId->getPromise();
+ this->SwitchLowering.ResumeEntryBlock = nullptr;
+
+ for (auto AnySuspend : CoroSuspends) {
+ auto Suspend = dyn_cast<CoroSuspendInst>(AnySuspend);
+ if (!Suspend) {
+#ifndef NDEBUG
+ AnySuspend->dump();
+#endif
+ report_fatal_error("coro.id must be paired with coro.suspend");
+ }
+
+ if (!Suspend->getCoroSave())
+ createCoroSave(CoroBegin, Suspend);
+ }
+ break;
+ }
+ case Intrinsic::coro_id_async: {
+ auto *AsyncId = cast<CoroIdAsyncInst>(Id);
+ AsyncId->checkWellFormed();
+ this->ABI = coro::ABI::Async;
+ this->AsyncLowering.Context = AsyncId->getStorage();
+ this->AsyncLowering.ContextArgNo = AsyncId->getStorageArgumentIndex();
+ this->AsyncLowering.ContextHeaderSize = AsyncId->getStorageSize();
+ this->AsyncLowering.ContextAlignment =
+ AsyncId->getStorageAlignment().value();
+ this->AsyncLowering.AsyncFuncPointer = AsyncId->getAsyncFunctionPointer();
+ auto &Context = F.getContext();
+ auto *Int8PtrTy = Type::getInt8PtrTy(Context);
+ auto *VoidTy = Type::getVoidTy(Context);
+ this->AsyncLowering.AsyncFuncTy =
+ FunctionType::get(VoidTy, {Int8PtrTy, Int8PtrTy, Int8PtrTy}, false);
+ break;
+ };
+ case Intrinsic::coro_id_retcon:
+ case Intrinsic::coro_id_retcon_once: {
+ auto ContinuationId = cast<AnyCoroIdRetconInst>(Id);
+ ContinuationId->checkWellFormed();
+ this->ABI = (IdIntrinsic == Intrinsic::coro_id_retcon
+ ? coro::ABI::Retcon
+ : coro::ABI::RetconOnce);
+ auto Prototype = ContinuationId->getPrototype();
+ this->RetconLowering.ResumePrototype = Prototype;
+ this->RetconLowering.Alloc = ContinuationId->getAllocFunction();
+ this->RetconLowering.Dealloc = ContinuationId->getDeallocFunction();
+ this->RetconLowering.ReturnBlock = nullptr;
+ this->RetconLowering.IsFrameInlineInStorage = false;
+
+ // Determine the result value types, and make sure they match up with
+ // the values passed to the suspends.
+ auto ResultTys = getRetconResultTypes();
+ auto ResumeTys = getRetconResumeTypes();
+
+ for (auto AnySuspend : CoroSuspends) {
+ auto Suspend = dyn_cast<CoroSuspendRetconInst>(AnySuspend);
+ if (!Suspend) {
+#ifndef NDEBUG
+ AnySuspend->dump();
+#endif
+ report_fatal_error("coro.id.retcon.* must be paired with "
+ "coro.suspend.retcon");
+ }
+
+ // Check that the argument types of the suspend match the results.
+ auto SI = Suspend->value_begin(), SE = Suspend->value_end();
+ auto RI = ResultTys.begin(), RE = ResultTys.end();
+ for (; SI != SE && RI != RE; ++SI, ++RI) {
+ auto SrcTy = (*SI)->getType();
+ if (SrcTy != *RI) {
+ // The optimizer likes to eliminate bitcasts leading into variadic
+ // calls, but that messes with our invariants. Re-insert the
+ // bitcast and ignore this type mismatch.
+ if (CastInst::isBitCastable(SrcTy, *RI)) {
+ auto BCI = new BitCastInst(*SI, *RI, "", Suspend);
+ SI->set(BCI);
+ continue;
+ }
+
+#ifndef NDEBUG
+ Suspend->dump();
+ Prototype->getFunctionType()->dump();
+#endif
+ report_fatal_error("argument to coro.suspend.retcon does not "
+ "match corresponding prototype function result");
+ }
+ }
+ if (SI != SE || RI != RE) {
+#ifndef NDEBUG
+ Suspend->dump();
+ Prototype->getFunctionType()->dump();
+#endif
+ report_fatal_error("wrong number of arguments to coro.suspend.retcon");
+ }
+
+ // Check that the result type of the suspend matches the resume types.
+ Type *SResultTy = Suspend->getType();
+ ArrayRef<Type*> SuspendResultTys;
+ if (SResultTy->isVoidTy()) {
+ // leave as empty array
+ } else if (auto SResultStructTy = dyn_cast<StructType>(SResultTy)) {
+ SuspendResultTys = SResultStructTy->elements();
+ } else {
+ // forms an ArrayRef using SResultTy, be careful
+ SuspendResultTys = SResultTy;
+ }
+ if (SuspendResultTys.size() != ResumeTys.size()) {
+#ifndef NDEBUG
+ Suspend->dump();
+ Prototype->getFunctionType()->dump();
+#endif
+ report_fatal_error("wrong number of results from coro.suspend.retcon");
+ }
+ for (size_t I = 0, E = ResumeTys.size(); I != E; ++I) {
+ if (SuspendResultTys[I] != ResumeTys[I]) {
+#ifndef NDEBUG
+ Suspend->dump();
+ Prototype->getFunctionType()->dump();
+#endif
+ report_fatal_error("result from coro.suspend.retcon does not "
+ "match corresponding prototype function param");
+ }
+ }
+ }
+ break;
+ }
+
+ default:
+ llvm_unreachable("coro.begin is not dependent on a coro.id call");
+ }
+
+ // The coro.free intrinsic is always lowered to the result of coro.begin.
+ for (CoroFrameInst *CF : CoroFrames) {
+ CF->replaceAllUsesWith(CoroBegin);
+ CF->eraseFromParent();
+ }
+
+ // Move final suspend to be the last element in the CoroSuspends vector.
+ if (ABI == coro::ABI::Switch &&
+ SwitchLowering.HasFinalSuspend &&
+ FinalSuspendIndex != CoroSuspends.size() - 1)
+ std::swap(CoroSuspends[FinalSuspendIndex], CoroSuspends.back());
+
+ // Remove orphaned coro.saves.
+ for (CoroSaveInst *CoroSave : UnusedCoroSaves)
+ CoroSave->eraseFromParent();
+}
+
+static void propagateCallAttrsFromCallee(CallInst *Call, Function *Callee) {
+ Call->setCallingConv(Callee->getCallingConv());
+ // TODO: attributes?
+}
+
+static void addCallToCallGraph(CallGraph *CG, CallInst *Call, Function *Callee){
+ if (CG)
+ (*CG)[Call->getFunction()]->addCalledFunction(Call, (*CG)[Callee]);
+}
+
+Value *coro::Shape::emitAlloc(IRBuilder<> &Builder, Value *Size,
+ CallGraph *CG) const {
+ switch (ABI) {
+ case coro::ABI::Switch:
+ llvm_unreachable("can't allocate memory in coro switch-lowering");
+
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce: {
+ auto Alloc = RetconLowering.Alloc;
+ Size = Builder.CreateIntCast(Size,
+ Alloc->getFunctionType()->getParamType(0),
+ /*is signed*/ false);
+ auto *Call = Builder.CreateCall(Alloc, Size);
+ propagateCallAttrsFromCallee(Call, Alloc);
+ addCallToCallGraph(CG, Call, Alloc);
+ return Call;
+ }
+ case coro::ABI::Async:
+ llvm_unreachable("can't allocate memory in coro async-lowering");
+ }
+ llvm_unreachable("Unknown coro::ABI enum");
+}
+
+void coro::Shape::emitDealloc(IRBuilder<> &Builder, Value *Ptr,
+ CallGraph *CG) const {
+ switch (ABI) {
+ case coro::ABI::Switch:
+ llvm_unreachable("can't allocate memory in coro switch-lowering");
+
+ case coro::ABI::Retcon:
+ case coro::ABI::RetconOnce: {
+ auto Dealloc = RetconLowering.Dealloc;
+ Ptr = Builder.CreateBitCast(Ptr,
+ Dealloc->getFunctionType()->getParamType(0));
+ auto *Call = Builder.CreateCall(Dealloc, Ptr);
+ propagateCallAttrsFromCallee(Call, Dealloc);
+ addCallToCallGraph(CG, Call, Dealloc);
+ return;
+ }
+ case coro::ABI::Async:
+ llvm_unreachable("can't allocate memory in coro async-lowering");
+ }
+ llvm_unreachable("Unknown coro::ABI enum");
+}
+
+LLVM_ATTRIBUTE_NORETURN
+static void fail(const Instruction *I, const char *Reason, Value *V) {
+#ifndef NDEBUG
+ I->dump();
+ if (V) {
+ errs() << " Value: ";
+ V->printAsOperand(llvm::errs());
+ errs() << '\n';
+ }
+#endif
+ report_fatal_error(Reason);
+}
+
+/// Check that the given value is a well-formed prototype for the
+/// llvm.coro.id.retcon.* intrinsics.
+static void checkWFRetconPrototype(const AnyCoroIdRetconInst *I, Value *V) {
+ auto F = dyn_cast<Function>(V->stripPointerCasts());
+ if (!F)
+ fail(I, "llvm.coro.id.retcon.* prototype not a Function", V);
+
+ auto FT = F->getFunctionType();
+
+ if (isa<CoroIdRetconInst>(I)) {
+ bool ResultOkay;
+ if (FT->getReturnType()->isPointerTy()) {
+ ResultOkay = true;
+ } else if (auto SRetTy = dyn_cast<StructType>(FT->getReturnType())) {
+ ResultOkay = (!SRetTy->isOpaque() &&
+ SRetTy->getNumElements() > 0 &&
+ SRetTy->getElementType(0)->isPointerTy());
+ } else {
+ ResultOkay = false;
+ }
+ if (!ResultOkay)
+ fail(I, "llvm.coro.id.retcon prototype must return pointer as first "
+ "result", F);
+
+ if (FT->getReturnType() !=
+ I->getFunction()->getFunctionType()->getReturnType())
+ fail(I, "llvm.coro.id.retcon prototype return type must be same as"
+ "current function return type", F);
+ } else {
+ // No meaningful validation to do here for llvm.coro.id.unique.once.
+ }
+
+ if (FT->getNumParams() == 0 || !FT->getParamType(0)->isPointerTy())
+ fail(I, "llvm.coro.id.retcon.* prototype must take pointer as "
+ "its first parameter", F);
+}
+
+/// Check that the given value is a well-formed allocator.
+static void checkWFAlloc(const Instruction *I, Value *V) {
+ auto F = dyn_cast<Function>(V->stripPointerCasts());
+ if (!F)
+ fail(I, "llvm.coro.* allocator not a Function", V);
+
+ auto FT = F->getFunctionType();
+ if (!FT->getReturnType()->isPointerTy())
+ fail(I, "llvm.coro.* allocator must return a pointer", F);
+
+ if (FT->getNumParams() != 1 ||
+ !FT->getParamType(0)->isIntegerTy())
+ fail(I, "llvm.coro.* allocator must take integer as only param", F);
+}
+
+/// Check that the given value is a well-formed deallocator.
+static void checkWFDealloc(const Instruction *I, Value *V) {
+ auto F = dyn_cast<Function>(V->stripPointerCasts());
+ if (!F)
+ fail(I, "llvm.coro.* deallocator not a Function", V);
+
+ auto FT = F->getFunctionType();
+ if (!FT->getReturnType()->isVoidTy())
+ fail(I, "llvm.coro.* deallocator must return void", F);
+
+ if (FT->getNumParams() != 1 ||
+ !FT->getParamType(0)->isPointerTy())
+ fail(I, "llvm.coro.* deallocator must take pointer as only param", F);
+}
+
+static void checkConstantInt(const Instruction *I, Value *V,
+ const char *Reason) {
+ if (!isa<ConstantInt>(V)) {
+ fail(I, Reason, V);
+ }
+}
+
+void AnyCoroIdRetconInst::checkWellFormed() const {
+ checkConstantInt(this, getArgOperand(SizeArg),
+ "size argument to coro.id.retcon.* must be constant");
+ checkConstantInt(this, getArgOperand(AlignArg),
+ "alignment argument to coro.id.retcon.* must be constant");
+ checkWFRetconPrototype(this, getArgOperand(PrototypeArg));
+ checkWFAlloc(this, getArgOperand(AllocArg));
+ checkWFDealloc(this, getArgOperand(DeallocArg));
+}
+
+static void checkAsyncFuncPointer(const Instruction *I, Value *V) {
+ auto *AsyncFuncPtrAddr = dyn_cast<GlobalVariable>(V->stripPointerCasts());
+ if (!AsyncFuncPtrAddr)
+ fail(I, "llvm.coro.id.async async function pointer not a global", V);
+
+ auto *StructTy =
+ cast<StructType>(AsyncFuncPtrAddr->getType()->getPointerElementType());
+ if (StructTy->isOpaque() || !StructTy->isPacked() ||
+ StructTy->getNumElements() != 2 ||
+ !StructTy->getElementType(0)->isIntegerTy(32) ||
+ !StructTy->getElementType(1)->isIntegerTy(32))
+ fail(I,
+ "llvm.coro.id.async async function pointer argument's type is not "
+ "<{i32, i32}>",
+ V);
+}
+
+void CoroIdAsyncInst::checkWellFormed() const {
+ checkConstantInt(this, getArgOperand(SizeArg),
+ "size argument to coro.id.async must be constant");
+ checkConstantInt(this, getArgOperand(AlignArg),
+ "alignment argument to coro.id.async must be constant");
+ checkConstantInt(this, getArgOperand(StorageArg),
+ "storage argument offset to coro.id.async must be constant");
+ checkAsyncFuncPointer(this, getArgOperand(AsyncFuncPtrArg));
+}
+
+static void checkAsyncContextProjectFunction(const Instruction *I,
+ Function *F) {
+ auto *FunTy = cast<FunctionType>(F->getType()->getPointerElementType());
+ if (!FunTy->getReturnType()->isPointerTy() ||
+ !FunTy->getReturnType()->getPointerElementType()->isIntegerTy(8))
+ fail(I,
+ "llvm.coro.suspend.async resume function projection function must "
+ "return an i8* type",
+ F);
+ if (FunTy->getNumParams() != 1 || !FunTy->getParamType(0)->isPointerTy() ||
+ !FunTy->getParamType(0)->getPointerElementType()->isIntegerTy(8))
+ fail(I,
+ "llvm.coro.suspend.async resume function projection function must "
+ "take one i8* type as parameter",
+ F);
+}
+
+void CoroSuspendAsyncInst::checkWellFormed() const {
+ checkAsyncContextProjectFunction(this, getAsyncContextProjectionFunction());
+}
+
+void CoroAsyncEndInst::checkWellFormed() const {
+ auto *MustTailCallFunc = getMustTailCallFunction();
+ if (!MustTailCallFunc)
+ return;
+ auto *FnTy =
+ cast<FunctionType>(MustTailCallFunc->getType()->getPointerElementType());
+ if (FnTy->getNumParams() != (getNumArgOperands() - 3))
+ fail(this,
+ "llvm.coro.end.async must tail call function argument type must "
+ "match the tail arguments",
+ MustTailCallFunc);
+}
+
+void LLVMAddCoroEarlyPass(LLVMPassManagerRef PM) {
+ unwrap(PM)->add(createCoroEarlyLegacyPass());
+}
+
+void LLVMAddCoroSplitPass(LLVMPassManagerRef PM) {
+ unwrap(PM)->add(createCoroSplitLegacyPass());
+}
+
+void LLVMAddCoroElidePass(LLVMPassManagerRef PM) {
+ unwrap(PM)->add(createCoroElideLegacyPass());
+}
+
+void LLVMAddCoroCleanupPass(LLVMPassManagerRef PM) {
+ unwrap(PM)->add(createCoroCleanupLegacyPass());
+}
+
+void
+LLVMPassManagerBuilderAddCoroutinePassesToExtensionPoints(LLVMPassManagerBuilderRef PMB) {
+ PassManagerBuilder *Builder = unwrap(PMB);
+ addCoroutinePassesToExtensionPoints(*Builder);
+}
diff --git a/contrib/libs/llvm12/lib/Transforms/HelloNew/HelloWorld.cpp b/contrib/libs/llvm12/lib/Transforms/HelloNew/HelloWorld.cpp
new file mode 100644
index 00000000000..dea94f8a8f6
--- /dev/null
+++ b/contrib/libs/llvm12/lib/Transforms/HelloNew/HelloWorld.cpp
@@ -0,0 +1,17 @@
+//===-- HelloWorld.cpp - Example Transformations --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Transforms/HelloNew/HelloWorld.h"
+
+using namespace llvm;
+
+PreservedAnalyses HelloWorldPass::run(Function &F,
+ FunctionAnalysisManager &AM) {
+ errs() << F.getName() << "\n";
+ return PreservedAnalyses::all();
+}
diff --git a/contrib/libs/llvm12/lib/Transforms/IPO/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Transforms/IPO/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Transforms/IPO/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Transforms/InstCombine/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Transforms/InstCombine/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Transforms/InstCombine/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Transforms/Instrumentation/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Transforms/Instrumentation/.yandex_meta/licenses.list.txt
deleted file mode 100644
index ad3879fc450..00000000000
--- a/contrib/libs/llvm12/lib/Transforms/Instrumentation/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,303 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-
-
-====================File: LICENSE.TXT====================
-==============================================================================
-The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
-==============================================================================
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
----- LLVM Exceptions to the Apache 2.0 License ----
-
-As an exception, if, as a result of your compiling your source code, portions
-of this Software are embedded into an Object form of such source code, you
-may redistribute such embedded portions in such Object form without complying
-with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
-
-In addition, if you combine or link compiled forms of this Software with
-software that is licensed under the GPLv2 ("Combined Software") and if a
-court of competent jurisdiction determines that the patent provision (Section
-3), the indemnity provision (Section 9) or other Section of the License
-conflicts with the conditions of the GPLv2, you may retroactively and
-prospectively choose to deem waived or otherwise exclude such Section(s) of
-the License, but only in their entirety and only with respect to the Combined
-Software.
-
-==============================================================================
-Software from third parties included in the LLVM Project:
-==============================================================================
-The LLVM Project contains third party software which is under different license
-terms. All such code will be identified clearly using at least one of two
-mechanisms:
-1) It will be in a separate directory tree with its own `LICENSE.txt` or
- `LICENSE` file at the top containing the specific license and restrictions
- which apply to that software, or
-2) It will contain specific license and restriction terms at the top of every
- file.
-
-==============================================================================
-Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
-==============================================================================
-University of Illinois/NCSA
-Open Source License
-
-Copyright (c) 2003-2019 University of Illinois at Urbana-Champaign.
-All rights reserved.
-
-Developed by:
-
- LLVM Team
-
- University of Illinois at Urbana-Champaign
-
- http://llvm.org
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal with
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
- * Redistributions of source code must retain the above copyright notice,
- this list of conditions and the following disclaimers.
-
- * Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimers in the
- documentation and/or other materials provided with the distribution.
-
- * Neither the names of the LLVM Team, University of Illinois at
- Urbana-Champaign, nor the names of its contributors may be used to
- endorse or promote products derived from this Software without specific
- prior written permission.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
-SOFTWARE.
-
-
-
-====================File: include/llvm/Support/LICENSE.TXT====================
-LLVM System Interface Library
--------------------------------------------------------------------------------
-The LLVM System Interface Library is licensed under the Illinois Open Source
-License and has the following additional copyright:
-
-Copyright (C) 2004 eXtensible Systems, Inc.
-
-
-====================NCSA====================
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
diff --git a/contrib/libs/llvm12/lib/Transforms/ObjCARC/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Transforms/ObjCARC/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Transforms/ObjCARC/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Transforms/Scalar/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Transforms/Scalar/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Transforms/Scalar/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Transforms/Utils/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Transforms/Utils/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Transforms/Utils/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/Transforms/Vectorize/.yandex_meta/licenses.list.txt b/contrib/libs/llvm12/lib/Transforms/Vectorize/.yandex_meta/licenses.list.txt
deleted file mode 100644
index c62d353021c..00000000000
--- a/contrib/libs/llvm12/lib/Transforms/Vectorize/.yandex_meta/licenses.list.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-====================Apache-2.0 WITH LLVM-exception====================
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-
-
-====================Apache-2.0 WITH LLVM-exception====================
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
diff --git a/contrib/libs/llvm12/lib/WindowsManifest/WindowsManifestMerger.cpp b/contrib/libs/llvm12/lib/WindowsManifest/WindowsManifestMerger.cpp
new file mode 100644
index 00000000000..54adb0ae2b7
--- /dev/null
+++ b/contrib/libs/llvm12/lib/WindowsManifest/WindowsManifestMerger.cpp
@@ -0,0 +1,729 @@
+//===-- WindowsManifestMerger.cpp ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file implements the .manifest merger class.
+//
+//===---------------------------------------------------------------------===//
+
+#include "llvm/WindowsManifest/WindowsManifestMerger.h"
+#include "llvm/Config/config.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+#include <map>
+
+#if LLVM_ENABLE_LIBXML2
+#error #include <libxml/xmlreader.h>
+#endif
+
+#define TO_XML_CHAR(X) reinterpret_cast<const unsigned char *>(X)
+#define FROM_XML_CHAR(X) reinterpret_cast<const char *>(X)
+
+using namespace llvm;
+using namespace windows_manifest;
+
+char WindowsManifestError::ID = 0;
+
+WindowsManifestError::WindowsManifestError(const Twine &Msg) : Msg(Msg.str()) {}
+
+void WindowsManifestError::log(raw_ostream &OS) const { OS << Msg; }
+
+class WindowsManifestMerger::WindowsManifestMergerImpl {
+public:
+ ~WindowsManifestMergerImpl();
+ Error merge(const MemoryBuffer &Manifest);
+ std::unique_ptr<MemoryBuffer> getMergedManifest();
+
+private:
+ static void errorCallback(void *Ctx, const char *Format, ...);
+ Error getParseError();
+#if LLVM_ENABLE_LIBXML2
+ xmlDocPtr CombinedDoc = nullptr;
+ std::vector<xmlDocPtr> MergedDocs;
+
+ bool Merged = false;
+ struct XmlDeleter {
+ void operator()(xmlChar *Ptr) { xmlFree(Ptr); }
+ void operator()(xmlDoc *Ptr) { xmlFreeDoc(Ptr); }
+ };
+ int BufferSize = 0;
+ std::unique_ptr<xmlChar, XmlDeleter> Buffer;
+#endif
+ bool ParseErrorOccurred = false;
+};
+
+#if LLVM_ENABLE_LIBXML2
+
+static constexpr std::pair<StringLiteral, StringLiteral> MtNsHrefsPrefixes[] = {
+ {"urn:schemas-microsoft-com:asm.v1", "ms_asmv1"},
+ {"urn:schemas-microsoft-com:asm.v2", "ms_asmv2"},
+ {"urn:schemas-microsoft-com:asm.v3", "ms_asmv3"},
+ {"http://schemas.microsoft.com/SMI/2005/WindowsSettings",
+ "ms_windowsSettings"},
+ {"urn:schemas-microsoft-com:compatibility.v1", "ms_compatibilityv1"}};
+
+static bool xmlStringsEqual(const unsigned char *A, const unsigned char *B) {
+ // Handle null pointers. Comparison of 2 null pointers returns true because
+ // this indicates the prefix of a default namespace.
+ if (!A || !B)
+ return A == B;
+ return strcmp(FROM_XML_CHAR(A), FROM_XML_CHAR(B)) == 0;
+}
+
+static bool isMergeableElement(const unsigned char *ElementName) {
+ for (StringRef S : {"application", "assembly", "assemblyIdentity",
+ "compatibility", "noInherit", "requestedExecutionLevel",
+ "requestedPrivileges", "security", "trustInfo"}) {
+ if (S == FROM_XML_CHAR(ElementName)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static xmlNodePtr getChildWithName(xmlNodePtr Parent,
+ const unsigned char *ElementName) {
+ for (xmlNodePtr Child = Parent->children; Child; Child = Child->next) {
+ if (xmlStringsEqual(Child->name, ElementName)) {
+ return Child;
+ }
+ }
+ return nullptr;
+}
+
+static xmlAttrPtr getAttribute(xmlNodePtr Node,
+ const unsigned char *AttributeName) {
+ for (xmlAttrPtr Attribute = Node->properties; Attribute != nullptr;
+ Attribute = Attribute->next) {
+ if (xmlStringsEqual(Attribute->name, AttributeName)) {
+ return Attribute;
+ }
+ }
+ return nullptr;
+}
+
+// Check if namespace specified by HRef1 overrides that of HRef2.
+static bool namespaceOverrides(const unsigned char *HRef1,
+ const unsigned char *HRef2) {
+ auto HRef1Position = llvm::find_if(
+ MtNsHrefsPrefixes, [=](const std::pair<StringRef, StringRef> &Element) {
+ return xmlStringsEqual(HRef1, TO_XML_CHAR(Element.first.data()));
+ });
+ auto HRef2Position = llvm::find_if(
+ MtNsHrefsPrefixes, [=](const std::pair<StringRef, StringRef> &Element) {
+ return xmlStringsEqual(HRef2, TO_XML_CHAR(Element.first.data()));
+ });
+ return HRef1Position < HRef2Position;
+}
+
+// Search for prefix-defined namespace specified by HRef, starting on Node and
+// continuing recursively upwards. Returns the namespace or nullptr if not
+// found.
+static xmlNsPtr search(const unsigned char *HRef, xmlNodePtr Node) {
+ for (xmlNsPtr Def = Node->nsDef; Def; Def = Def->next) {
+ if (Def->prefix && xmlStringsEqual(Def->href, HRef)) {
+ return Def;
+ }
+ }
+ if (Node->parent) {
+ return search(HRef, Node->parent);
+ }
+ return nullptr;
+}
+
+// Return the prefix that corresponds to the HRef. If HRef is not a recognized
+// URI, then just return the HRef itself to use as the prefix.
+static const unsigned char *getPrefixForHref(const unsigned char *HRef) {
+ for (auto &Ns : MtNsHrefsPrefixes) {
+ if (xmlStringsEqual(HRef, TO_XML_CHAR(Ns.first.data()))) {
+ return TO_XML_CHAR(Ns.second.data());
+ }
+ }
+ return HRef;
+}
+
+// Search for prefix-defined namespace specified by HRef, starting on Node and
+// continuing recursively upwards. If it is found, then return it. If it is
+// not found, then prefix-define that namespace on the node and return a
+// reference to it.
+static Expected<xmlNsPtr> searchOrDefine(const unsigned char *HRef,
+ xmlNodePtr Node) {
+ if (xmlNsPtr Def = search(HRef, Node))
+ return Def;
+ if (xmlNsPtr Def = xmlNewNs(Node, HRef, getPrefixForHref(HRef)))
+ return Def;
+ return make_error<WindowsManifestError>("failed to create new namespace");
+}
+
+// Set the namespace of OrigionalAttribute on OriginalNode to be that of
+// AdditionalAttribute's.
+static Error copyAttributeNamespace(xmlAttrPtr OriginalAttribute,
+ xmlNodePtr OriginalNode,
+ xmlAttrPtr AdditionalAttribute) {
+
+ Expected<xmlNsPtr> ExplicitOrError =
+ searchOrDefine(AdditionalAttribute->ns->href, OriginalNode);
+ if (!ExplicitOrError)
+ return ExplicitOrError.takeError();
+ OriginalAttribute->ns = std::move(ExplicitOrError.get());
+ return Error::success();
+}
+
+// Return the corresponding namespace definition for the prefix, defined on the
+// given Node. Returns nullptr if there is no such definition.
+static xmlNsPtr getNamespaceWithPrefix(const unsigned char *Prefix,
+ xmlNodePtr Node) {
+ if (Node == nullptr)
+ return nullptr;
+ for (xmlNsPtr Def = Node->nsDef; Def; Def = Def->next) {
+ if (xmlStringsEqual(Def->prefix, Prefix)) {
+ return Def;
+ }
+ }
+ return nullptr;
+}
+
+// Search for the closest inheritable default namespace, starting on (and
+// including) the Node and traveling upwards through parent nodes. Returns
+// nullptr if there are no inheritable default namespaces.
+static xmlNsPtr getClosestDefault(xmlNodePtr Node) {
+ if (xmlNsPtr Ret = getNamespaceWithPrefix(nullptr, Node))
+ return Ret;
+ if (Node->parent == nullptr)
+ return nullptr;
+ return getClosestDefault(Node->parent);
+}
+
+// Merge the attributes of AdditionalNode into OriginalNode. If attributes
+// with identical types are present, they are not duplicated but rather if
+// their values are not consistent and error is thrown. In addition, the
+// higher priority namespace is used for each attribute, EXCEPT in the case
+// of merging two default namespaces and the lower priority namespace
+// definition occurs closer than the higher priority one.
+static Error mergeAttributes(xmlNodePtr OriginalNode,
+ xmlNodePtr AdditionalNode) {
+ xmlNsPtr ClosestDefault = getClosestDefault(OriginalNode);
+ for (xmlAttrPtr Attribute = AdditionalNode->properties; Attribute;
+ Attribute = Attribute->next) {
+ if (xmlAttrPtr OriginalAttribute =
+ getAttribute(OriginalNode, Attribute->name)) {
+ if (!xmlStringsEqual(OriginalAttribute->children->content,
+ Attribute->children->content)) {
+ return make_error<WindowsManifestError>(
+ Twine("conflicting attributes for ") +
+ FROM_XML_CHAR(OriginalNode->name));
+ }
+ if (!Attribute->ns) {
+ continue;
+ }
+ if (!OriginalAttribute->ns) {
+ if (auto E = copyAttributeNamespace(OriginalAttribute, OriginalNode,
+ Attribute)) {
+ return E;
+ }
+ continue;
+ }
+ if (namespaceOverrides(OriginalAttribute->ns->href,
+ Attribute->ns->href)) {
+ // In this case, the original attribute has a higher priority namespace
+ // than the incomiing attribute, however the namespace definition of
+ // the lower priority namespace occurs first traveling upwards in the
+ // tree. Therefore the lower priority namespace is applied.
+ if (!OriginalAttribute->ns->prefix && !Attribute->ns->prefix &&
+ ClosestDefault &&
+ xmlStringsEqual(Attribute->ns->href, ClosestDefault->href)) {
+ if (auto E = copyAttributeNamespace(OriginalAttribute, OriginalNode,
+ Attribute)) {
+ return E;
+ }
+ continue;
+ }
+ continue;
+ // This covers the case where the incoming attribute has the higher
+ // priority. The higher priority namespace is applied in all cases
+ // EXCEPT when both of the namespaces are default inherited, and the
+ // closest inherited default is the lower priority one.
+ }
+ if (Attribute->ns->prefix || OriginalAttribute->ns->prefix ||
+ (ClosestDefault && !xmlStringsEqual(OriginalAttribute->ns->href,
+ ClosestDefault->href))) {
+ if (auto E = copyAttributeNamespace(OriginalAttribute, OriginalNode,
+ Attribute)) {
+ return E;
+ }
+ continue;
+ }
+ continue;
+ }
+ // If the incoming attribute is not already found on the node, append it
+ // to the end of the properties list. Also explicitly apply its
+ // namespace as a prefix because it might be contained in a separate
+ // namespace that doesn't use the attribute.
+ xmlAttrPtr NewProp =
+ xmlNewProp(OriginalNode, Attribute->name, Attribute->children->content);
+ Expected<xmlNsPtr> ExplicitOrError =
+ searchOrDefine(Attribute->ns->href, OriginalNode);
+ if (!ExplicitOrError)
+ return ExplicitOrError.takeError();
+ NewProp->ns = std::move(ExplicitOrError.get());
+ }
+ return Error::success();
+}
+
+// Given two nodes, return the one with the higher priority namespace.
+static xmlNodePtr getDominantNode(xmlNodePtr Node1, xmlNodePtr Node2) {
+
+ if (!Node1 || !Node1->ns)
+ return Node2;
+ if (!Node2 || !Node2->ns)
+ return Node1;
+ if (namespaceOverrides(Node1->ns->href, Node2->ns->href))
+ return Node1;
+ return Node2;
+}
+
+// Checks if this Node's namespace is inherited or one it defined itself.
+static bool hasInheritedNs(xmlNodePtr Node) {
+ return Node->ns && Node->ns != getNamespaceWithPrefix(Node->ns->prefix, Node);
+}
+
+// Check if this Node's namespace is a default namespace that it inherited, as
+// opposed to defining itself.
+static bool hasInheritedDefaultNs(xmlNodePtr Node) {
+ return hasInheritedNs(Node) && Node->ns->prefix == nullptr;
+}
+
+// Check if this Node's namespace is a default namespace it defined itself.
+static bool hasDefinedDefaultNamespace(xmlNodePtr Node) {
+ return Node->ns && (Node->ns == getNamespaceWithPrefix(nullptr, Node));
+}
+
+// For the given explicit prefix-definition of a namespace, travel downwards
+// from a node recursively, and for every implicit, inherited default usage of
+// that namespace replace it with that explicit prefix use. This is important
+// when namespace overriding occurs when merging, so that elements unique to a
+// namespace will still stay in that namespace.
+static void explicateNamespace(xmlNsPtr PrefixDef, xmlNodePtr Node) {
+ // If a node as its own default namespace definition it clearly cannot have
+ // inherited the given default namespace, and neither will any of its
+ // children.
+ if (hasDefinedDefaultNamespace(Node))
+ return;
+ if (Node->ns && xmlStringsEqual(Node->ns->href, PrefixDef->href) &&
+ hasInheritedDefaultNs(Node))
+ Node->ns = PrefixDef;
+ for (xmlAttrPtr Attribute = Node->properties; Attribute;
+ Attribute = Attribute->next) {
+ if (Attribute->ns &&
+ xmlStringsEqual(Attribute->ns->href, PrefixDef->href)) {
+ Attribute->ns = PrefixDef;
+ }
+ }
+ for (xmlNodePtr Child = Node->children; Child; Child = Child->next) {
+ explicateNamespace(PrefixDef, Child);
+ }
+}
+
+// Perform the namespace merge between two nodes.
+static Error mergeNamespaces(xmlNodePtr OriginalNode,
+ xmlNodePtr AdditionalNode) {
+ // Save the original default namespace definition in case the incoming node
+ // overrides it.
+ const unsigned char *OriginalDefinedDefaultHref = nullptr;
+ if (xmlNsPtr OriginalDefinedDefaultNs =
+ getNamespaceWithPrefix(nullptr, OriginalNode)) {
+ OriginalDefinedDefaultHref = xmlStrdup(OriginalDefinedDefaultNs->href);
+ }
+ const unsigned char *NewDefinedDefaultHref = nullptr;
+ // Copy all namespace definitions. There can only be one default namespace
+ // definition per node, so the higher priority one takes precedence in the
+ // case of collision.
+ for (xmlNsPtr Def = AdditionalNode->nsDef; Def; Def = Def->next) {
+ if (xmlNsPtr OriginalNsDef =
+ getNamespaceWithPrefix(Def->prefix, OriginalNode)) {
+ if (!Def->prefix) {
+ if (namespaceOverrides(Def->href, OriginalNsDef->href)) {
+ NewDefinedDefaultHref = TO_XML_CHAR(strdup(FROM_XML_CHAR(Def->href)));
+ }
+ } else if (!xmlStringsEqual(OriginalNsDef->href, Def->href)) {
+ return make_error<WindowsManifestError>(
+ Twine("conflicting namespace definitions for ") +
+ FROM_XML_CHAR(Def->prefix));
+ }
+ } else {
+ xmlNsPtr NewDef = xmlCopyNamespace(Def);
+ NewDef->next = OriginalNode->nsDef;
+ OriginalNode->nsDef = NewDef;
+ }
+ }
+
+ // Check whether the original node or the incoming node has the higher
+ // priority namespace. Depending on which one is dominant, we will have
+ // to recursively apply namespace changes down to children of the original
+ // node.
+ xmlNodePtr DominantNode = getDominantNode(OriginalNode, AdditionalNode);
+ xmlNodePtr NonDominantNode =
+ DominantNode == OriginalNode ? AdditionalNode : OriginalNode;
+ if (DominantNode == OriginalNode) {
+ if (OriginalDefinedDefaultHref) {
+ xmlNsPtr NonDominantDefinedDefault =
+ getNamespaceWithPrefix(nullptr, NonDominantNode);
+ // In this case, both the nodes defined a default namespace. However
+ // the lower priority node ended up having a higher priority default
+ // definition. This can occur if the higher priority node is prefix
+ // namespace defined. In this case we have to define an explicit
+ // prefix for the overridden definition and apply it to all children
+ // who relied on that definition.
+ if (NonDominantDefinedDefault &&
+ namespaceOverrides(NonDominantDefinedDefault->href,
+ OriginalDefinedDefaultHref)) {
+ Expected<xmlNsPtr> EC =
+ searchOrDefine(OriginalDefinedDefaultHref, DominantNode);
+ if (!EC) {
+ return EC.takeError();
+ }
+ xmlNsPtr PrefixDominantDefinedDefault = std::move(EC.get());
+ explicateNamespace(PrefixDominantDefinedDefault, DominantNode);
+ }
+ // In this case the node with a higher priority namespace did not have a
+ // default namespace definition, but the lower priority node did. In this
+ // case the new default namespace definition is copied. A side effect of
+ // this is that all children will suddenly find themselves in a different
+ // default namespace. To maintain correctness we need to ensure that all
+ // children now explicitly refer to the namespace that they had previously
+ // implicitly inherited.
+ } else if (getNamespaceWithPrefix(nullptr, NonDominantNode)) {
+ if (DominantNode->parent) {
+ xmlNsPtr ClosestDefault = getClosestDefault(DominantNode->parent);
+ Expected<xmlNsPtr> EC =
+ searchOrDefine(ClosestDefault->href, DominantNode);
+ if (!EC) {
+ return EC.takeError();
+ }
+ xmlNsPtr ExplicitDefault = std::move(EC.get());
+ explicateNamespace(ExplicitDefault, DominantNode);
+ }
+ }
+ } else {
+ // Covers case where the incoming node has a default namespace definition
+ // that overrides the original node's namespace. This always leads to
+ // the original node receiving that new default namespace.
+ if (hasDefinedDefaultNamespace(DominantNode)) {
+ NonDominantNode->ns = getNamespaceWithPrefix(nullptr, NonDominantNode);
+ } else {
+ // This covers the case where the incoming node either has a prefix
+ // namespace, or an inherited default namespace. Since the namespace
+ // may not yet be defined in the original tree we do a searchOrDefine
+ // for it, and then set the namespace equal to it.
+ Expected<xmlNsPtr> EC =
+ searchOrDefine(DominantNode->ns->href, NonDominantNode);
+ if (!EC) {
+ return EC.takeError();
+ }
+ xmlNsPtr Explicit = std::move(EC.get());
+ NonDominantNode->ns = Explicit;
+ }
+ // This covers cases where the incoming dominant node HAS a default
+ // namespace definition, but MIGHT NOT NECESSARILY be in that namespace.
+ if (xmlNsPtr DominantDefaultDefined =
+ getNamespaceWithPrefix(nullptr, DominantNode)) {
+ if (OriginalDefinedDefaultHref) {
+ if (namespaceOverrides(DominantDefaultDefined->href,
+ OriginalDefinedDefaultHref)) {
+ // In this case, the incoming node's default definition overrides
+ // the original default definition, all children who relied on that
+ // definition must be updated accordingly.
+ Expected<xmlNsPtr> EC =
+ searchOrDefine(OriginalDefinedDefaultHref, NonDominantNode);
+ if (!EC) {
+ return EC.takeError();
+ }
+ xmlNsPtr ExplicitDefault = std::move(EC.get());
+ explicateNamespace(ExplicitDefault, NonDominantNode);
+ }
+ } else {
+ // The original did not define a default definition, however the new
+ // default definition still applies to all children, so they must be
+ // updated to explicitly refer to the namespace they had previously
+ // been inheriting implicitly.
+ xmlNsPtr ClosestDefault = getClosestDefault(NonDominantNode);
+ Expected<xmlNsPtr> EC =
+ searchOrDefine(ClosestDefault->href, NonDominantNode);
+ if (!EC) {
+ return EC.takeError();
+ }
+ xmlNsPtr ExplicitDefault = std::move(EC.get());
+ explicateNamespace(ExplicitDefault, NonDominantNode);
+ }
+ }
+ }
+ if (NewDefinedDefaultHref) {
+ xmlNsPtr OriginalNsDef = getNamespaceWithPrefix(nullptr, OriginalNode);
+ xmlFree(const_cast<unsigned char *>(OriginalNsDef->href));
+ OriginalNsDef->href = NewDefinedDefaultHref;
+ }
+ xmlFree(const_cast<unsigned char *>(OriginalDefinedDefaultHref));
+ return Error::success();
+}
+
+static bool isRecognizedNamespace(const unsigned char *NsHref) {
+ for (auto &Ns : MtNsHrefsPrefixes) {
+ if (xmlStringsEqual(NsHref, TO_XML_CHAR(Ns.first.data()))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool hasRecognizedNamespace(xmlNodePtr Node) {
+ return isRecognizedNamespace(Node->ns->href);
+}
+
+// Ensure a node's inherited namespace is actually defined in the tree it
+// resides in.
+static Error reconcileNamespaces(xmlNodePtr Node) {
+ if (!Node) {
+ return Error::success();
+ }
+ if (hasInheritedNs(Node)) {
+ Expected<xmlNsPtr> ExplicitOrError = searchOrDefine(Node->ns->href, Node);
+ if (!ExplicitOrError) {
+ return ExplicitOrError.takeError();
+ }
+ xmlNsPtr Explicit = std::move(ExplicitOrError.get());
+ Node->ns = Explicit;
+ }
+ for (xmlNodePtr Child = Node->children; Child; Child = Child->next) {
+ if (auto E = reconcileNamespaces(Child)) {
+ return E;
+ }
+ }
+ return Error::success();
+}
+
+// Recursively merge the two given manifest trees, depending on which elements
+// are of a mergeable type, and choose namespaces according to which have
+// higher priority.
+static Error treeMerge(xmlNodePtr OriginalRoot, xmlNodePtr AdditionalRoot) {
+ if (auto E = mergeAttributes(OriginalRoot, AdditionalRoot))
+ return E;
+ if (auto E = mergeNamespaces(OriginalRoot, AdditionalRoot))
+ return E;
+ xmlNodePtr AdditionalFirstChild = AdditionalRoot->children;
+ xmlNode StoreNext;
+ for (xmlNodePtr Child = AdditionalFirstChild; Child; Child = Child->next) {
+ xmlNodePtr OriginalChildWithName;
+ if (!isMergeableElement(Child->name) ||
+ !(OriginalChildWithName =
+ getChildWithName(OriginalRoot, Child->name)) ||
+ !hasRecognizedNamespace(Child)) {
+ StoreNext.next = Child->next;
+ xmlUnlinkNode(Child);
+ if (!xmlAddChild(OriginalRoot, Child)) {
+ return make_error<WindowsManifestError>(Twine("could not merge ") +
+ FROM_XML_CHAR(Child->name));
+ }
+ if (auto E = reconcileNamespaces(Child)) {
+ return E;
+ }
+ Child = &StoreNext;
+ } else if (auto E = treeMerge(OriginalChildWithName, Child)) {
+ return E;
+ }
+ }
+ return Error::success();
+}
+
+static void stripComments(xmlNodePtr Root) {
+ xmlNode StoreNext;
+ for (xmlNodePtr Child = Root->children; Child; Child = Child->next) {
+ if (!xmlStringsEqual(Child->name, TO_XML_CHAR("comment"))) {
+ stripComments(Child);
+ continue;
+ }
+ StoreNext.next = Child->next;
+ xmlNodePtr Remove = Child;
+ Child = &StoreNext;
+ xmlUnlinkNode(Remove);
+ xmlFreeNode(Remove);
+ }
+}
+
+// libxml2 assumes that attributes do not inherit default namespaces, whereas
+// the original mt.exe does make this assumption. This function reconciles
+// this by setting all attributes to have the inherited default namespace.
+static void setAttributeNamespaces(xmlNodePtr Node) {
+ for (xmlAttrPtr Attribute = Node->properties; Attribute;
+ Attribute = Attribute->next) {
+ if (!Attribute->ns) {
+ Attribute->ns = getClosestDefault(Node);
+ }
+ }
+ for (xmlNodePtr Child = Node->children; Child; Child = Child->next) {
+ setAttributeNamespaces(Child);
+ }
+}
+
+// The merging process may create too many prefix defined namespaces. This
+// function removes all unnecessary ones from the tree.
+static void checkAndStripPrefixes(xmlNodePtr Node,
+ std::vector<xmlNsPtr> &RequiredPrefixes) {
+ for (xmlNodePtr Child = Node->children; Child; Child = Child->next) {
+ checkAndStripPrefixes(Child, RequiredPrefixes);
+ }
+ if (Node->ns && Node->ns->prefix != nullptr) {
+ xmlNsPtr ClosestDefault = getClosestDefault(Node);
+ if (ClosestDefault &&
+ xmlStringsEqual(ClosestDefault->href, Node->ns->href)) {
+ Node->ns = ClosestDefault;
+ } else if (!llvm::is_contained(RequiredPrefixes, Node->ns)) {
+ RequiredPrefixes.push_back(Node->ns);
+ }
+ }
+ for (xmlAttrPtr Attribute = Node->properties; Attribute;
+ Attribute = Attribute->next) {
+ if (Attribute->ns && Attribute->ns->prefix != nullptr) {
+ xmlNsPtr ClosestDefault = getClosestDefault(Node);
+ if (ClosestDefault &&
+ xmlStringsEqual(ClosestDefault->href, Attribute->ns->href)) {
+ Attribute->ns = ClosestDefault;
+ } else if (!llvm::is_contained(RequiredPrefixes, Node->ns)) {
+ RequiredPrefixes.push_back(Attribute->ns);
+ }
+ }
+ }
+ xmlNsPtr Prev;
+ xmlNs Temp;
+ for (xmlNsPtr Def = Node->nsDef; Def; Def = Def->next) {
+ if (!Def->prefix || llvm::is_contained(RequiredPrefixes, Def)) {
+ Prev = Def;
+ continue;
+ }
+ if (Def == Node->nsDef) {
+ Node->nsDef = Def->next;
+ } else {
+ Prev->next = Def->next;
+ }
+ Temp.next = Def->next;
+ xmlFreeNs(Def);
+ Def = &Temp;
+ }
+}
+
+WindowsManifestMerger::WindowsManifestMergerImpl::~WindowsManifestMergerImpl() {
+ for (auto &Doc : MergedDocs)
+ xmlFreeDoc(Doc);
+}
+
+Error WindowsManifestMerger::WindowsManifestMergerImpl::merge(
+ const MemoryBuffer &Manifest) {
+ if (Merged)
+ return make_error<WindowsManifestError>(
+ "merge after getMergedManifest is not supported");
+ if (Manifest.getBufferSize() == 0)
+ return make_error<WindowsManifestError>(
+ "attempted to merge empty manifest");
+ xmlSetGenericErrorFunc((void *)this,
+ WindowsManifestMergerImpl::errorCallback);
+ xmlDocPtr ManifestXML = xmlReadMemory(
+ Manifest.getBufferStart(), Manifest.getBufferSize(), "manifest.xml",
+ nullptr, XML_PARSE_NOBLANKS | XML_PARSE_NODICT);
+ xmlSetGenericErrorFunc(nullptr, nullptr);
+ if (auto E = getParseError())
+ return E;
+ xmlNodePtr AdditionalRoot = xmlDocGetRootElement(ManifestXML);
+ stripComments(AdditionalRoot);
+ setAttributeNamespaces(AdditionalRoot);
+ if (CombinedDoc == nullptr) {
+ CombinedDoc = ManifestXML;
+ } else {
+ xmlNodePtr CombinedRoot = xmlDocGetRootElement(CombinedDoc);
+ if (!xmlStringsEqual(CombinedRoot->name, AdditionalRoot->name) ||
+ !isMergeableElement(AdditionalRoot->name) ||
+ !hasRecognizedNamespace(AdditionalRoot)) {
+ return make_error<WindowsManifestError>("multiple root nodes");
+ }
+ if (auto E = treeMerge(CombinedRoot, AdditionalRoot)) {
+ return E;
+ }
+ }
+ MergedDocs.push_back(ManifestXML);
+ return Error::success();
+}
+
+std::unique_ptr<MemoryBuffer>
+WindowsManifestMerger::WindowsManifestMergerImpl::getMergedManifest() {
+ if (!Merged) {
+ Merged = true;
+
+ if (!CombinedDoc)
+ return nullptr;
+
+ xmlNodePtr CombinedRoot = xmlDocGetRootElement(CombinedDoc);
+ std::vector<xmlNsPtr> RequiredPrefixes;
+ checkAndStripPrefixes(CombinedRoot, RequiredPrefixes);
+ std::unique_ptr<xmlDoc, XmlDeleter> OutputDoc(
+ xmlNewDoc((const unsigned char *)"1.0"));
+ xmlDocSetRootElement(OutputDoc.get(), CombinedRoot);
+ assert(0 == xmlDocGetRootElement(CombinedDoc));
+
+ xmlKeepBlanksDefault(0);
+ xmlChar *Buff = nullptr;
+ xmlDocDumpFormatMemoryEnc(OutputDoc.get(), &Buff, &BufferSize, "UTF-8", 1);
+ Buffer.reset(Buff);
+ }
+
+ return BufferSize ? MemoryBuffer::getMemBufferCopy(StringRef(
+ FROM_XML_CHAR(Buffer.get()), (size_t)BufferSize))
+ : nullptr;
+}
+
+bool windows_manifest::isAvailable() { return true; }
+
+#else
+
+WindowsManifestMerger::WindowsManifestMergerImpl::~WindowsManifestMergerImpl() {
+}
+
+Error WindowsManifestMerger::WindowsManifestMergerImpl::merge(
+ const MemoryBuffer &Manifest) {
+ return make_error<WindowsManifestError>("no libxml2");
+}
+
+std::unique_ptr<MemoryBuffer>
+WindowsManifestMerger::WindowsManifestMergerImpl::getMergedManifest() {
+ return nullptr;
+}
+
+bool windows_manifest::isAvailable() { return false; }
+
+#endif
+
+WindowsManifestMerger::WindowsManifestMerger()
+ : Impl(std::make_unique<WindowsManifestMergerImpl>()) {}
+
+WindowsManifestMerger::~WindowsManifestMerger() {}
+
+Error WindowsManifestMerger::merge(const MemoryBuffer &Manifest) {
+ return Impl->merge(Manifest);
+}
+
+std::unique_ptr<MemoryBuffer> WindowsManifestMerger::getMergedManifest() {
+ return Impl->getMergedManifest();
+}
+
+void WindowsManifestMerger::WindowsManifestMergerImpl::errorCallback(
+ void *Ctx, const char *Format, ...) {
+ auto *Merger = (WindowsManifestMergerImpl *)Ctx;
+ Merger->ParseErrorOccurred = true;
+}
+
+Error WindowsManifestMerger::WindowsManifestMergerImpl::getParseError() {
+ if (!ParseErrorOccurred)
+ return Error::success();
+ return make_error<WindowsManifestError>("invalid xml document");
+}
diff --git a/contrib/libs/llvm12/lib/XRay/BlockIndexer.cpp b/contrib/libs/llvm12/lib/XRay/BlockIndexer.cpp
new file mode 100644
index 00000000000..a99a6815f0d
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/BlockIndexer.cpp
@@ -0,0 +1,97 @@
+//===- BlockIndexer.cpp - FDR Block Indexing VIsitor ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// An implementation of the RecordVisitor which generates a mapping between a
+// thread and a range of records representing a block.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/BlockIndexer.h"
+
+namespace llvm {
+namespace xray {
+
+Error BlockIndexer::visit(BufferExtents &) { return Error::success(); }
+
+Error BlockIndexer::visit(WallclockRecord &R) {
+ CurrentBlock.Records.push_back(&R);
+ CurrentBlock.WallclockTime = &R;
+ return Error::success();
+}
+
+Error BlockIndexer::visit(NewCPUIDRecord &R) {
+ CurrentBlock.Records.push_back(&R);
+ return Error::success();
+}
+
+Error BlockIndexer::visit(TSCWrapRecord &R) {
+ CurrentBlock.Records.push_back(&R);
+ return Error::success();
+}
+
+Error BlockIndexer::visit(CustomEventRecord &R) {
+ CurrentBlock.Records.push_back(&R);
+ return Error::success();
+}
+
+Error BlockIndexer::visit(CustomEventRecordV5 &R) {
+ CurrentBlock.Records.push_back(&R);
+ return Error::success();
+}
+
+Error BlockIndexer::visit(TypedEventRecord &R) {
+ CurrentBlock.Records.push_back(&R);
+ return Error::success();
+}
+
+Error BlockIndexer::visit(CallArgRecord &R) {
+ CurrentBlock.Records.push_back(&R);
+ return Error::success();
+}
+
+Error BlockIndexer::visit(PIDRecord &R) {
+ CurrentBlock.ProcessID = R.pid();
+ CurrentBlock.Records.push_back(&R);
+ return Error::success();
+}
+
+Error BlockIndexer::visit(NewBufferRecord &R) {
+ if (!CurrentBlock.Records.empty())
+ if (auto E = flush())
+ return E;
+
+ CurrentBlock.ThreadID = R.tid();
+ CurrentBlock.Records.push_back(&R);
+ return Error::success();
+}
+
+Error BlockIndexer::visit(EndBufferRecord &R) {
+ CurrentBlock.Records.push_back(&R);
+ return Error::success();
+}
+
+Error BlockIndexer::visit(FunctionRecord &R) {
+ CurrentBlock.Records.push_back(&R);
+ return Error::success();
+}
+
+Error BlockIndexer::flush() {
+ Index::iterator It;
+ std::tie(It, std::ignore) =
+ Indices.insert({{CurrentBlock.ProcessID, CurrentBlock.ThreadID}, {}});
+ It->second.push_back({CurrentBlock.ProcessID, CurrentBlock.ThreadID,
+ CurrentBlock.WallclockTime,
+ std::move(CurrentBlock.Records)});
+ CurrentBlock.ProcessID = 0;
+ CurrentBlock.ThreadID = 0;
+ CurrentBlock.Records = {};
+ CurrentBlock.WallclockTime = nullptr;
+ return Error::success();
+}
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/BlockPrinter.cpp b/contrib/libs/llvm12/lib/XRay/BlockPrinter.cpp
new file mode 100644
index 00000000000..63a60c3c56a
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/BlockPrinter.cpp
@@ -0,0 +1,113 @@
+//===- BlockPrinter.cpp - FDR Block Pretty Printer Implementation --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/BlockPrinter.h"
+
+namespace llvm {
+namespace xray {
+
+Error BlockPrinter::visit(BufferExtents &R) {
+ OS << "\n[New Block]\n";
+ CurrentState = State::Preamble;
+ return RP.visit(R);
+}
+
+// Preamble printing.
+Error BlockPrinter::visit(NewBufferRecord &R) {
+ if (CurrentState == State::Start)
+ OS << "\n[New Block]\n";
+
+ OS << "Preamble: \n";
+ CurrentState = State::Preamble;
+ return RP.visit(R);
+}
+
+Error BlockPrinter::visit(WallclockRecord &R) {
+ CurrentState = State::Preamble;
+ return RP.visit(R);
+}
+
+Error BlockPrinter::visit(PIDRecord &R) {
+ CurrentState = State::Preamble;
+ return RP.visit(R);
+}
+
+// Metadata printing.
+Error BlockPrinter::visit(NewCPUIDRecord &R) {
+ if (CurrentState == State::Preamble)
+ OS << "\nBody:\n";
+ if (CurrentState == State::Function)
+ OS << "\nMetadata: ";
+ CurrentState = State::Metadata;
+ OS << " ";
+ auto E = RP.visit(R);
+ return E;
+}
+
+Error BlockPrinter::visit(TSCWrapRecord &R) {
+ if (CurrentState == State::Function)
+ OS << "\nMetadata:";
+ CurrentState = State::Metadata;
+ OS << " ";
+ auto E = RP.visit(R);
+ return E;
+}
+
+// Custom events will be rendered like "function" events.
+Error BlockPrinter::visit(CustomEventRecord &R) {
+ if (CurrentState == State::Metadata)
+ OS << "\n";
+ CurrentState = State::CustomEvent;
+ OS << "* ";
+ auto E = RP.visit(R);
+ return E;
+}
+
+Error BlockPrinter::visit(CustomEventRecordV5 &R) {
+ if (CurrentState == State::Metadata)
+ OS << "\n";
+ CurrentState = State::CustomEvent;
+ OS << "* ";
+ auto E = RP.visit(R);
+ return E;
+}
+
+Error BlockPrinter::visit(TypedEventRecord &R) {
+ if (CurrentState == State::Metadata)
+ OS << "\n";
+ CurrentState = State::CustomEvent;
+ OS << "* ";
+ auto E = RP.visit(R);
+ return E;
+}
+
+// Function call printing.
+Error BlockPrinter::visit(FunctionRecord &R) {
+ if (CurrentState == State::Metadata)
+ OS << "\n";
+ CurrentState = State::Function;
+ OS << "- ";
+ auto E = RP.visit(R);
+ return E;
+}
+
+Error BlockPrinter::visit(CallArgRecord &R) {
+ CurrentState = State::Arg;
+ OS << " : ";
+ auto E = RP.visit(R);
+ return E;
+}
+
+Error BlockPrinter::visit(EndBufferRecord &R) {
+ CurrentState = State::End;
+ OS << " *** ";
+ auto E = RP.visit(R);
+ return E;
+}
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/BlockVerifier.cpp b/contrib/libs/llvm12/lib/XRay/BlockVerifier.cpp
new file mode 100644
index 00000000000..9fb49fa9a86
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/BlockVerifier.cpp
@@ -0,0 +1,204 @@
+//===- BlockVerifier.cpp - FDR Block Verifier -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/BlockVerifier.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace xray {
+namespace {
+
+constexpr unsigned long long mask(BlockVerifier::State S) {
+ return 1uLL << static_cast<std::size_t>(S);
+}
+
+constexpr std::size_t number(BlockVerifier::State S) {
+ return static_cast<std::size_t>(S);
+}
+
+StringRef recordToString(BlockVerifier::State R) {
+ switch (R) {
+ case BlockVerifier::State::BufferExtents:
+ return "BufferExtents";
+ case BlockVerifier::State::NewBuffer:
+ return "NewBuffer";
+ case BlockVerifier::State::WallClockTime:
+ return "WallClockTime";
+ case BlockVerifier::State::PIDEntry:
+ return "PIDEntry";
+ case BlockVerifier::State::NewCPUId:
+ return "NewCPUId";
+ case BlockVerifier::State::TSCWrap:
+ return "TSCWrap";
+ case BlockVerifier::State::CustomEvent:
+ return "CustomEvent";
+ case BlockVerifier::State::Function:
+ return "Function";
+ case BlockVerifier::State::CallArg:
+ return "CallArg";
+ case BlockVerifier::State::EndOfBuffer:
+ return "EndOfBuffer";
+ case BlockVerifier::State::TypedEvent:
+ return "TypedEvent";
+ case BlockVerifier::State::StateMax:
+ case BlockVerifier::State::Unknown:
+ return "Unknown";
+ }
+ llvm_unreachable("Unkown state!");
+}
+
+struct Transition {
+ BlockVerifier::State From;
+ std::bitset<number(BlockVerifier::State::StateMax)> ToStates;
+};
+
+} // namespace
+
+Error BlockVerifier::transition(State To) {
+ using ToSet = std::bitset<number(State::StateMax)>;
+ static constexpr std::array<const Transition, number(State::StateMax)>
+ TransitionTable{{{State::Unknown,
+ {mask(State::BufferExtents) | mask(State::NewBuffer)}},
+
+ {State::BufferExtents, {mask(State::NewBuffer)}},
+
+ {State::NewBuffer, {mask(State::WallClockTime)}},
+
+ {State::WallClockTime,
+ {mask(State::PIDEntry) | mask(State::NewCPUId)}},
+
+ {State::PIDEntry, {mask(State::NewCPUId)}},
+
+ {State::NewCPUId,
+ {mask(State::NewCPUId) | mask(State::TSCWrap) |
+ mask(State::CustomEvent) | mask(State::Function) |
+ mask(State::EndOfBuffer) | mask(State::TypedEvent)}},
+
+ {State::TSCWrap,
+ {mask(State::TSCWrap) | mask(State::NewCPUId) |
+ mask(State::CustomEvent) | mask(State::Function) |
+ mask(State::EndOfBuffer) | mask(State::TypedEvent)}},
+
+ {State::CustomEvent,
+ {mask(State::CustomEvent) | mask(State::TSCWrap) |
+ mask(State::NewCPUId) | mask(State::Function) |
+ mask(State::EndOfBuffer) | mask(State::TypedEvent)}},
+
+ {State::TypedEvent,
+ {mask(State::TypedEvent) | mask(State::TSCWrap) |
+ mask(State::NewCPUId) | mask(State::Function) |
+ mask(State::EndOfBuffer) | mask(State::CustomEvent)}},
+
+ {State::Function,
+ {mask(State::Function) | mask(State::TSCWrap) |
+ mask(State::NewCPUId) | mask(State::CustomEvent) |
+ mask(State::CallArg) | mask(State::EndOfBuffer) |
+ mask(State::TypedEvent)}},
+
+ {State::CallArg,
+ {mask(State::CallArg) | mask(State::Function) |
+ mask(State::TSCWrap) | mask(State::NewCPUId) |
+ mask(State::CustomEvent) | mask(State::EndOfBuffer) |
+ mask(State::TypedEvent)}},
+
+ {State::EndOfBuffer, {}}}};
+
+ if (CurrentRecord >= State::StateMax)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "BUG (BlockVerifier): Cannot find transition table entry for %s, "
+ "transitioning to %s.",
+ recordToString(CurrentRecord).data(), recordToString(To).data());
+
+ // If we're at an EndOfBuffer record, we ignore anything that follows that
+ // isn't a NewBuffer record.
+ if (CurrentRecord == State::EndOfBuffer && To != State::NewBuffer)
+ return Error::success();
+
+ auto &Mapping = TransitionTable[number(CurrentRecord)];
+ auto &Destinations = Mapping.ToStates;
+ assert(Mapping.From == CurrentRecord &&
+ "BUG: Wrong index for record mapping.");
+ if ((Destinations & ToSet(mask(To))) == 0)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "BlockVerifier: Invalid transition from %s to %s.",
+ recordToString(CurrentRecord).data(), recordToString(To).data());
+
+ CurrentRecord = To;
+ return Error::success();
+} // namespace xray
+
+Error BlockVerifier::visit(BufferExtents &) {
+ return transition(State::BufferExtents);
+}
+
+Error BlockVerifier::visit(WallclockRecord &) {
+ return transition(State::WallClockTime);
+}
+
+Error BlockVerifier::visit(NewCPUIDRecord &) {
+ return transition(State::NewCPUId);
+}
+
+Error BlockVerifier::visit(TSCWrapRecord &) {
+ return transition(State::TSCWrap);
+}
+
+Error BlockVerifier::visit(CustomEventRecord &) {
+ return transition(State::CustomEvent);
+}
+
+Error BlockVerifier::visit(CustomEventRecordV5 &) {
+ return transition(State::CustomEvent);
+}
+
+Error BlockVerifier::visit(TypedEventRecord &) {
+ return transition(State::TypedEvent);
+}
+
+Error BlockVerifier::visit(CallArgRecord &) {
+ return transition(State::CallArg);
+}
+
+Error BlockVerifier::visit(PIDRecord &) { return transition(State::PIDEntry); }
+
+Error BlockVerifier::visit(NewBufferRecord &) {
+ return transition(State::NewBuffer);
+}
+
+Error BlockVerifier::visit(EndBufferRecord &) {
+ return transition(State::EndOfBuffer);
+}
+
+Error BlockVerifier::visit(FunctionRecord &) {
+ return transition(State::Function);
+}
+
+Error BlockVerifier::verify() {
+ // The known terminal conditions are the following:
+ switch (CurrentRecord) {
+ case State::EndOfBuffer:
+ case State::NewCPUId:
+ case State::CustomEvent:
+ case State::TypedEvent:
+ case State::Function:
+ case State::CallArg:
+ case State::TSCWrap:
+ return Error::success();
+ default:
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "BlockVerifier: Invalid terminal condition %s, malformed block.",
+ recordToString(CurrentRecord).data());
+ }
+}
+
+void BlockVerifier::reset() { CurrentRecord = State::Unknown; }
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/FDRRecordProducer.cpp b/contrib/libs/llvm12/lib/XRay/FDRRecordProducer.cpp
new file mode 100644
index 00000000000..479b710444b
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/FDRRecordProducer.cpp
@@ -0,0 +1,198 @@
+//===- FDRRecordProducer.cpp - XRay FDR Mode Record Producer --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/FDRRecordProducer.h"
+#include "llvm/Support/DataExtractor.h"
+
+#include <cstdint>
+
+namespace llvm {
+namespace xray {
+
+namespace {
+
+// Keep this in sync with the values written in the XRay FDR mode runtime in
+// compiler-rt.
+enum MetadataRecordKinds : uint8_t {
+ NewBufferKind,
+ EndOfBufferKind,
+ NewCPUIdKind,
+ TSCWrapKind,
+ WalltimeMarkerKind,
+ CustomEventMarkerKind,
+ CallArgumentKind,
+ BufferExtentsKind,
+ TypedEventMarkerKind,
+ PidKind,
+ // This is an end marker, used to identify the upper bound for this enum.
+ EnumEndMarker,
+};
+
+Expected<std::unique_ptr<Record>>
+metadataRecordType(const XRayFileHeader &Header, uint8_t T) {
+
+ if (T >= static_cast<uint8_t>(MetadataRecordKinds::EnumEndMarker))
+ return createStringError(std::make_error_code(std::errc::invalid_argument),
+ "Invalid metadata record type: %d", T);
+ switch (T) {
+ case MetadataRecordKinds::NewBufferKind:
+ return std::make_unique<NewBufferRecord>();
+ case MetadataRecordKinds::EndOfBufferKind:
+ if (Header.Version >= 2)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "End of buffer records are no longer supported starting version "
+ "2 of the log.");
+ return std::make_unique<EndBufferRecord>();
+ case MetadataRecordKinds::NewCPUIdKind:
+ return std::make_unique<NewCPUIDRecord>();
+ case MetadataRecordKinds::TSCWrapKind:
+ return std::make_unique<TSCWrapRecord>();
+ case MetadataRecordKinds::WalltimeMarkerKind:
+ return std::make_unique<WallclockRecord>();
+ case MetadataRecordKinds::CustomEventMarkerKind:
+ if (Header.Version >= 5)
+ return std::make_unique<CustomEventRecordV5>();
+ return std::make_unique<CustomEventRecord>();
+ case MetadataRecordKinds::CallArgumentKind:
+ return std::make_unique<CallArgRecord>();
+ case MetadataRecordKinds::BufferExtentsKind:
+ return std::make_unique<BufferExtents>();
+ case MetadataRecordKinds::TypedEventMarkerKind:
+ return std::make_unique<TypedEventRecord>();
+ case MetadataRecordKinds::PidKind:
+ return std::make_unique<PIDRecord>();
+ case MetadataRecordKinds::EnumEndMarker:
+ llvm_unreachable("Invalid MetadataRecordKind");
+ }
+ llvm_unreachable("Unhandled MetadataRecordKinds enum value");
+}
+
+constexpr bool isMetadataIntroducer(uint8_t FirstByte) {
+ return FirstByte & 0x01u;
+}
+
+} // namespace
+
+Expected<std::unique_ptr<Record>>
+FileBasedRecordProducer::findNextBufferExtent() {
+ // We seek one byte at a time until we find a suitable buffer extents metadata
+ // record introducer.
+ std::unique_ptr<Record> R;
+ while (!R) {
+ auto PreReadOffset = OffsetPtr;
+ uint8_t FirstByte = E.getU8(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading one byte from offset %" PRId64 ".", OffsetPtr);
+
+ if (isMetadataIntroducer(FirstByte)) {
+ auto LoadedType = FirstByte >> 1;
+ if (LoadedType == MetadataRecordKinds::BufferExtentsKind) {
+ auto MetadataRecordOrErr = metadataRecordType(Header, LoadedType);
+ if (!MetadataRecordOrErr)
+ return MetadataRecordOrErr.takeError();
+
+ R = std::move(MetadataRecordOrErr.get());
+ RecordInitializer RI(E, OffsetPtr);
+ if (auto Err = R->apply(RI))
+ return std::move(Err);
+ return std::move(R);
+ }
+ }
+ }
+ llvm_unreachable("Must always terminate with either an error or a record.");
+}
+
+Expected<std::unique_ptr<Record>> FileBasedRecordProducer::produce() {
+ // First, we set up our result record.
+ std::unique_ptr<Record> R;
+
+ // Before we do any further reading, we should check whether we're at the end
+ // of the current buffer we're been consuming. In FDR logs version >= 3, we
+ // rely on the buffer extents record to determine how many bytes we should be
+ // considering as valid records.
+ if (Header.Version >= 3 && CurrentBufferBytes == 0) {
+ // Find the next buffer extents record.
+ auto BufferExtentsOrError = findNextBufferExtent();
+ if (!BufferExtentsOrError)
+ return joinErrors(
+ BufferExtentsOrError.takeError(),
+ createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed to find the next BufferExtents record."));
+
+ R = std::move(BufferExtentsOrError.get());
+ assert(R != nullptr);
+ assert(isa<BufferExtents>(R.get()));
+ auto BE = cast<BufferExtents>(R.get());
+ CurrentBufferBytes = BE->size();
+ return std::move(R);
+ }
+
+ //
+ // At the top level, we read one byte to determine the type of the record to
+ // create. This byte will comprise of the following bits:
+ //
+ // - offset 0: A '1' indicates a metadata record, a '0' indicates a function
+ // record.
+ // - offsets 1-7: For metadata records, this will indicate the kind of
+ // metadata record should be loaded.
+ //
+ // We read first byte, then create the appropriate type of record to consume
+ // the rest of the bytes.
+ auto PreReadOffset = OffsetPtr;
+ uint8_t FirstByte = E.getU8(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading one byte from offset %" PRId64 ".", OffsetPtr);
+
+ // For metadata records, handle especially here.
+ if (isMetadataIntroducer(FirstByte)) {
+ auto LoadedType = FirstByte >> 1;
+ auto MetadataRecordOrErr = metadataRecordType(Header, LoadedType);
+ if (!MetadataRecordOrErr)
+ return joinErrors(
+ MetadataRecordOrErr.takeError(),
+ createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Encountered an unsupported metadata record (%d) "
+ "at offset %" PRId64 ".",
+ LoadedType, PreReadOffset));
+ R = std::move(MetadataRecordOrErr.get());
+ } else {
+ R = std::make_unique<FunctionRecord>();
+ }
+ RecordInitializer RI(E, OffsetPtr);
+
+ if (auto Err = R->apply(RI))
+ return std::move(Err);
+
+ // If we encountered a BufferExtents record, we should record the remaining
+ // bytes for the current buffer, to determine when we should start ignoring
+ // potentially malformed data and looking for buffer extents records.
+ if (auto BE = dyn_cast<BufferExtents>(R.get())) {
+ CurrentBufferBytes = BE->size();
+ } else if (Header.Version >= 3) {
+ if (OffsetPtr - PreReadOffset > CurrentBufferBytes)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Buffer over-read at offset %" PRId64 " (over-read by %" PRId64
+ " bytes); Record Type = %s.",
+ OffsetPtr, (OffsetPtr - PreReadOffset) - CurrentBufferBytes,
+ Record::kindToString(R->getRecordType()).data());
+
+ CurrentBufferBytes -= OffsetPtr - PreReadOffset;
+ }
+ assert(R != nullptr);
+ return std::move(R);
+}
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/FDRRecords.cpp b/contrib/libs/llvm12/lib/XRay/FDRRecords.cpp
new file mode 100644
index 00000000000..ff315d35417
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/FDRRecords.cpp
@@ -0,0 +1,66 @@
+//===- FDRRecords.cpp - XRay Flight Data Recorder Mode Records -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Define types and operations on these types that represent the different kinds
+// of records we encounter in XRay flight data recorder mode traces.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/FDRRecords.h"
+
+namespace llvm {
+namespace xray {
+
+Error BufferExtents::apply(RecordVisitor &V) { return V.visit(*this); }
+Error WallclockRecord::apply(RecordVisitor &V) { return V.visit(*this); }
+Error NewCPUIDRecord::apply(RecordVisitor &V) { return V.visit(*this); }
+Error TSCWrapRecord::apply(RecordVisitor &V) { return V.visit(*this); }
+Error CustomEventRecord::apply(RecordVisitor &V) { return V.visit(*this); }
+Error CallArgRecord::apply(RecordVisitor &V) { return V.visit(*this); }
+Error PIDRecord::apply(RecordVisitor &V) { return V.visit(*this); }
+Error NewBufferRecord::apply(RecordVisitor &V) { return V.visit(*this); }
+Error EndBufferRecord::apply(RecordVisitor &V) { return V.visit(*this); }
+Error FunctionRecord::apply(RecordVisitor &V) { return V.visit(*this); }
+Error CustomEventRecordV5::apply(RecordVisitor &V) { return V.visit(*this); }
+Error TypedEventRecord::apply(RecordVisitor &V) { return V.visit(*this); }
+
+StringRef Record::kindToString(RecordKind K) {
+ switch (K) {
+ case RecordKind::RK_Metadata:
+ return "Metadata";
+ case RecordKind::RK_Metadata_BufferExtents:
+ return "Metadata:BufferExtents";
+ case RecordKind::RK_Metadata_WallClockTime:
+ return "Metadata:WallClockTime";
+ case RecordKind::RK_Metadata_NewCPUId:
+ return "Metadata:NewCPUId";
+ case RecordKind::RK_Metadata_TSCWrap:
+ return "Metadata:TSCWrap";
+ case RecordKind::RK_Metadata_CustomEvent:
+ return "Metadata:CustomEvent";
+ case RecordKind::RK_Metadata_CustomEventV5:
+ return "Metadata:CustomEventV5";
+ case RecordKind::RK_Metadata_CallArg:
+ return "Metadata:CallArg";
+ case RecordKind::RK_Metadata_PIDEntry:
+ return "Metadata:PIDEntry";
+ case RecordKind::RK_Metadata_NewBuffer:
+ return "Metadata:NewBuffer";
+ case RecordKind::RK_Metadata_EndOfBuffer:
+ return "Metadata:EndOfBuffer";
+ case RecordKind::RK_Metadata_TypedEvent:
+ return "Metadata:TypedEvent";
+ case RecordKind::RK_Metadata_LastMetadata:
+ return "Metadata:LastMetadata";
+ case RecordKind::RK_Function:
+ return "Function";
+ }
+ return "Unknown";
+}
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/FDRTraceExpander.cpp b/contrib/libs/llvm12/lib/XRay/FDRTraceExpander.cpp
new file mode 100644
index 00000000000..b68e997fe70
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/FDRTraceExpander.cpp
@@ -0,0 +1,131 @@
+//===- FDRTraceExpander.cpp -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/FDRTraceExpander.h"
+
+namespace llvm {
+namespace xray {
+
+void TraceExpander::resetCurrentRecord() {
+ if (BuildingRecord)
+ C(CurrentRecord);
+ BuildingRecord = false;
+ CurrentRecord.CallArgs.clear();
+ CurrentRecord.Data.clear();
+}
+
+Error TraceExpander::visit(BufferExtents &) {
+ resetCurrentRecord();
+ return Error::success();
+}
+
+Error TraceExpander::visit(WallclockRecord &) { return Error::success(); }
+
+Error TraceExpander::visit(NewCPUIDRecord &R) {
+ CPUId = R.cpuid();
+ BaseTSC = R.tsc();
+ return Error::success();
+}
+
+Error TraceExpander::visit(TSCWrapRecord &R) {
+ BaseTSC = R.tsc();
+ return Error::success();
+}
+
+Error TraceExpander::visit(CustomEventRecord &R) {
+ resetCurrentRecord();
+ if (!IgnoringRecords) {
+ CurrentRecord.TSC = R.tsc();
+ CurrentRecord.CPU = R.cpu();
+ CurrentRecord.PId = PID;
+ CurrentRecord.TId = TID;
+ CurrentRecord.Type = RecordTypes::CUSTOM_EVENT;
+ CurrentRecord.Data = std::string(R.data());
+ BuildingRecord = true;
+ }
+ return Error::success();
+}
+
+Error TraceExpander::visit(CustomEventRecordV5 &R) {
+ resetCurrentRecord();
+ if (!IgnoringRecords) {
+ BaseTSC += R.delta();
+ CurrentRecord.TSC = BaseTSC;
+ CurrentRecord.CPU = CPUId;
+ CurrentRecord.PId = PID;
+ CurrentRecord.TId = TID;
+ CurrentRecord.Type = RecordTypes::CUSTOM_EVENT;
+ CurrentRecord.Data = std::string(R.data());
+ BuildingRecord = true;
+ }
+ return Error::success();
+}
+
+Error TraceExpander::visit(TypedEventRecord &R) {
+ resetCurrentRecord();
+ if (!IgnoringRecords) {
+ BaseTSC += R.delta();
+ CurrentRecord.TSC = BaseTSC;
+ CurrentRecord.CPU = CPUId;
+ CurrentRecord.PId = PID;
+ CurrentRecord.TId = TID;
+ CurrentRecord.RecordType = R.eventType();
+ CurrentRecord.Type = RecordTypes::TYPED_EVENT;
+ CurrentRecord.Data = std::string(R.data());
+ BuildingRecord = true;
+ }
+ return Error::success();
+}
+
+Error TraceExpander::visit(CallArgRecord &R) {
+ CurrentRecord.CallArgs.push_back(R.arg());
+ CurrentRecord.Type = RecordTypes::ENTER_ARG;
+ return Error::success();
+}
+
+Error TraceExpander::visit(PIDRecord &R) {
+ PID = R.pid();
+ return Error::success();
+}
+
+Error TraceExpander::visit(NewBufferRecord &R) {
+ if (IgnoringRecords)
+ IgnoringRecords = false;
+ TID = R.tid();
+ if (LogVersion == 2)
+ PID = R.tid();
+ return Error::success();
+}
+
+Error TraceExpander::visit(EndBufferRecord &) {
+ IgnoringRecords = true;
+ resetCurrentRecord();
+ return Error::success();
+}
+
+Error TraceExpander::visit(FunctionRecord &R) {
+ resetCurrentRecord();
+ if (!IgnoringRecords) {
+ BaseTSC += R.delta();
+ CurrentRecord.Type = R.recordType();
+ CurrentRecord.FuncId = R.functionId();
+ CurrentRecord.TSC = BaseTSC;
+ CurrentRecord.PId = PID;
+ CurrentRecord.TId = TID;
+ CurrentRecord.CPU = CPUId;
+ BuildingRecord = true;
+ }
+ return Error::success();
+}
+
+Error TraceExpander::flush() {
+ resetCurrentRecord();
+ return Error::success();
+}
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/FDRTraceWriter.cpp b/contrib/libs/llvm12/lib/XRay/FDRTraceWriter.cpp
new file mode 100644
index 00000000000..71c09bd4fce
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/FDRTraceWriter.cpp
@@ -0,0 +1,151 @@
+//===- FDRTraceWriter.cpp - XRay FDR Trace Writer ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Test a utility that can write out XRay FDR Mode formatted trace files.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/FDRTraceWriter.h"
+#include <tuple>
+
+namespace llvm {
+namespace xray {
+
+namespace {
+
+template <size_t Index> struct IndexedWriter {
+ template <
+ class Tuple,
+ std::enable_if_t<(Index <
+ std::tuple_size<std::remove_reference_t<Tuple>>::value),
+ int> = 0>
+ static size_t write(support::endian::Writer &OS, Tuple &&T) {
+ OS.write(std::get<Index>(T));
+ return sizeof(std::get<Index>(T)) + IndexedWriter<Index + 1>::write(OS, T);
+ }
+
+ template <
+ class Tuple,
+ std::enable_if_t<(Index >=
+ std::tuple_size<std::remove_reference_t<Tuple>>::value),
+ int> = 0>
+ static size_t write(support::endian::Writer &OS, Tuple &&) {
+ return 0;
+ }
+};
+
+template <uint8_t Kind, class... Values>
+Error writeMetadata(support::endian::Writer &OS, Values &&... Ds) {
+ // The first bit in the first byte of metadata records is always set to 1, so
+ // we ensure this is the case when we write out the first byte of the record.
+ uint8_t FirstByte = (static_cast<uint8_t>(Kind) << 1) | uint8_t{0x01u};
+ auto T = std::make_tuple(std::forward<Values>(std::move(Ds))...);
+ // Write in field order.
+ OS.write(FirstByte);
+ auto Bytes = IndexedWriter<0>::write(OS, T);
+ assert(Bytes <= 15 && "Must only ever write at most 16 byte metadata!");
+ // Pad out with appropriate numbers of zero's.
+ for (; Bytes < 15; ++Bytes)
+ OS.write('\0');
+ return Error::success();
+}
+
+} // namespace
+
+FDRTraceWriter::FDRTraceWriter(raw_ostream &O, const XRayFileHeader &H)
+ : OS(O, support::endianness::native) {
+ // We need to re-construct a header, by writing the fields we care about for
+ // traces, in the format that the runtime would have written.
+ uint32_t BitField =
+ (H.ConstantTSC ? 0x01 : 0x0) | (H.NonstopTSC ? 0x02 : 0x0);
+
+ // For endian-correctness, we need to write these fields in the order they
+ // appear and that we expect, instead of blasting bytes of the struct through.
+ OS.write(H.Version);
+ OS.write(H.Type);
+ OS.write(BitField);
+ OS.write(H.CycleFrequency);
+ ArrayRef<char> FreeFormBytes(H.FreeFormData,
+ sizeof(XRayFileHeader::FreeFormData));
+ OS.write(FreeFormBytes);
+}
+
+FDRTraceWriter::~FDRTraceWriter() {}
+
+Error FDRTraceWriter::visit(BufferExtents &R) {
+ return writeMetadata<7u>(OS, R.size());
+}
+
+Error FDRTraceWriter::visit(WallclockRecord &R) {
+ return writeMetadata<4u>(OS, R.seconds(), R.nanos());
+}
+
+Error FDRTraceWriter::visit(NewCPUIDRecord &R) {
+ return writeMetadata<2u>(OS, R.cpuid(), R.tsc());
+}
+
+Error FDRTraceWriter::visit(TSCWrapRecord &R) {
+ return writeMetadata<3u>(OS, R.tsc());
+}
+
+Error FDRTraceWriter::visit(CustomEventRecord &R) {
+ if (auto E = writeMetadata<5u>(OS, R.size(), R.tsc(), R.cpu()))
+ return E;
+ auto D = R.data();
+ ArrayRef<char> Bytes(D.data(), D.size());
+ OS.write(Bytes);
+ return Error::success();
+}
+
+Error FDRTraceWriter::visit(CustomEventRecordV5 &R) {
+ if (auto E = writeMetadata<5u>(OS, R.size(), R.delta()))
+ return E;
+ auto D = R.data();
+ ArrayRef<char> Bytes(D.data(), D.size());
+ OS.write(Bytes);
+ return Error::success();
+}
+
+Error FDRTraceWriter::visit(TypedEventRecord &R) {
+ if (auto E = writeMetadata<8u>(OS, R.size(), R.delta(), R.eventType()))
+ return E;
+ auto D = R.data();
+ ArrayRef<char> Bytes(D.data(), D.size());
+ OS.write(Bytes);
+ return Error::success();
+}
+
+Error FDRTraceWriter::visit(CallArgRecord &R) {
+ return writeMetadata<6u>(OS, R.arg());
+}
+
+Error FDRTraceWriter::visit(PIDRecord &R) {
+ return writeMetadata<9u>(OS, R.pid());
+}
+
+Error FDRTraceWriter::visit(NewBufferRecord &R) {
+ return writeMetadata<0u>(OS, R.tid());
+}
+
+Error FDRTraceWriter::visit(EndBufferRecord &R) {
+ return writeMetadata<1u>(OS, 0);
+}
+
+Error FDRTraceWriter::visit(FunctionRecord &R) {
+ // Write out the data in "field" order, to be endian-aware.
+ uint32_t TypeRecordFuncId = uint32_t{R.functionId() & ~uint32_t{0x0Fu << 28}};
+ TypeRecordFuncId <<= 3;
+ TypeRecordFuncId |= static_cast<uint32_t>(R.recordType());
+ TypeRecordFuncId <<= 1;
+ TypeRecordFuncId &= ~uint32_t{0x01};
+ OS.write(TypeRecordFuncId);
+ OS.write(R.delta());
+ return Error::success();
+}
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/FileHeaderReader.cpp b/contrib/libs/llvm12/lib/XRay/FileHeaderReader.cpp
new file mode 100644
index 00000000000..6b6daf9deba
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/FileHeaderReader.cpp
@@ -0,0 +1,73 @@
+//===- FileHeaderReader.cpp - XRay File Header Reader --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/FileHeaderReader.h"
+
+namespace llvm {
+namespace xray {
+
+// Populates the FileHeader reference by reading the first 32 bytes of the file.
+Expected<XRayFileHeader> readBinaryFormatHeader(DataExtractor &HeaderExtractor,
+ uint64_t &OffsetPtr) {
+ // FIXME: Maybe deduce whether the data is little or big-endian using some
+ // magic bytes in the beginning of the file?
+
+ // First 32 bytes of the file will always be the header. We assume a certain
+ // format here:
+ //
+ // (2) uint16 : version
+ // (2) uint16 : type
+ // (4) uint32 : bitfield
+ // (8) uint64 : cycle frequency
+ // (16) - : padding
+ XRayFileHeader FileHeader;
+ auto PreReadOffset = OffsetPtr;
+ FileHeader.Version = HeaderExtractor.getU16(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Failed reading version from file header at offset %" PRId64 ".",
+ OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ FileHeader.Type = HeaderExtractor.getU16(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Failed reading file type from file header at offset %" PRId64 ".",
+ OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ uint32_t Bitfield = HeaderExtractor.getU32(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Failed reading flag bits from file header at offset %" PRId64 ".",
+ OffsetPtr);
+
+ FileHeader.ConstantTSC = Bitfield & 1uL;
+ FileHeader.NonstopTSC = Bitfield & 1uL << 1;
+ PreReadOffset = OffsetPtr;
+ FileHeader.CycleFrequency = HeaderExtractor.getU64(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Failed reading cycle frequency from file header at offset %" PRId64
+ ".",
+ OffsetPtr);
+
+ std::memcpy(&FileHeader.FreeFormData,
+ HeaderExtractor.getData().bytes_begin() + OffsetPtr, 16);
+
+ // Manually advance the offset pointer 16 bytes, after getting a raw memcpy
+ // from the underlying data.
+ OffsetPtr += 16;
+ return std::move(FileHeader);
+}
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/InstrumentationMap.cpp b/contrib/libs/llvm12/lib/XRay/InstrumentationMap.cpp
new file mode 100644
index 00000000000..e6534e5a7be
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/InstrumentationMap.cpp
@@ -0,0 +1,293 @@
+//===- InstrumentationMap.cpp - XRay Instrumentation Map ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the InstrumentationMap type for XRay sleds.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/XRay/InstrumentationMap.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Object/Binary.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/RelocationResolver.h"
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/YAMLTraits.h"
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <system_error>
+#include <vector>
+
+using namespace llvm;
+using namespace xray;
+
+Optional<int32_t> InstrumentationMap::getFunctionId(uint64_t Addr) const {
+ auto I = FunctionIds.find(Addr);
+ if (I != FunctionIds.end())
+ return I->second;
+ return None;
+}
+
+Optional<uint64_t> InstrumentationMap::getFunctionAddr(int32_t FuncId) const {
+ auto I = FunctionAddresses.find(FuncId);
+ if (I != FunctionAddresses.end())
+ return I->second;
+ return None;
+}
+
+using RelocMap = DenseMap<uint64_t, uint64_t>;
+
+static Error
+loadObj(StringRef Filename, object::OwningBinary<object::ObjectFile> &ObjFile,
+ InstrumentationMap::SledContainer &Sleds,
+ InstrumentationMap::FunctionAddressMap &FunctionAddresses,
+ InstrumentationMap::FunctionAddressReverseMap &FunctionIds) {
+ InstrumentationMap Map;
+
+ // Find the section named "xray_instr_map".
+ if ((!ObjFile.getBinary()->isELF() && !ObjFile.getBinary()->isMachO()) ||
+ !(ObjFile.getBinary()->getArch() == Triple::x86_64 ||
+ ObjFile.getBinary()->getArch() == Triple::ppc64le ||
+ ObjFile.getBinary()->getArch() == Triple::arm ||
+ ObjFile.getBinary()->getArch() == Triple::aarch64))
+ return make_error<StringError>(
+ "File format not supported (only does ELF and Mach-O little endian "
+ "64-bit).",
+ std::make_error_code(std::errc::not_supported));
+
+ StringRef Contents = "";
+ const auto &Sections = ObjFile.getBinary()->sections();
+ uint64_t Address = 0;
+ auto I = llvm::find_if(Sections, [&](object::SectionRef Section) {
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (NameOrErr) {
+ Address = Section.getAddress();
+ return *NameOrErr == "xray_instr_map";
+ }
+ consumeError(NameOrErr.takeError());
+ return false;
+ });
+
+ if (I == Sections.end())
+ return make_error<StringError>(
+ "Failed to find XRay instrumentation map.",
+ std::make_error_code(std::errc::executable_format_error));
+
+ if (Expected<StringRef> E = I->getContents())
+ Contents = *E;
+ else
+ return E.takeError();
+
+ RelocMap Relocs;
+ if (ObjFile.getBinary()->isELF()) {
+ uint32_t RelativeRelocation = [](object::ObjectFile *ObjFile) {
+ if (const auto *ELFObj = dyn_cast<object::ELF32LEObjectFile>(ObjFile))
+ return ELFObj->getELFFile().getRelativeRelocationType();
+ else if (const auto *ELFObj =
+ dyn_cast<object::ELF32BEObjectFile>(ObjFile))
+ return ELFObj->getELFFile().getRelativeRelocationType();
+ else if (const auto *ELFObj =
+ dyn_cast<object::ELF64LEObjectFile>(ObjFile))
+ return ELFObj->getELFFile().getRelativeRelocationType();
+ else if (const auto *ELFObj =
+ dyn_cast<object::ELF64BEObjectFile>(ObjFile))
+ return ELFObj->getELFFile().getRelativeRelocationType();
+ else
+ return static_cast<uint32_t>(0);
+ }(ObjFile.getBinary());
+
+ object::SupportsRelocation Supports;
+ object::RelocationResolver Resolver;
+ std::tie(Supports, Resolver) =
+ object::getRelocationResolver(*ObjFile.getBinary());
+
+ for (const object::SectionRef &Section : Sections) {
+ for (const object::RelocationRef &Reloc : Section.relocations()) {
+ if (ObjFile.getBinary()->getArch() == Triple::arm) {
+ if (Supports && Supports(Reloc.getType())) {
+ Expected<uint64_t> ValueOrErr = Reloc.getSymbol()->getValue();
+ if (!ValueOrErr)
+ return ValueOrErr.takeError();
+ Relocs.insert(
+ {Reloc.getOffset(),
+ object::resolveRelocation(Resolver, Reloc, *ValueOrErr, 0)});
+ }
+ } else if (Supports && Supports(Reloc.getType())) {
+ auto AddendOrErr = object::ELFRelocationRef(Reloc).getAddend();
+ auto A = AddendOrErr ? *AddendOrErr : 0;
+ Expected<uint64_t> ValueOrErr = Reloc.getSymbol()->getValue();
+ if (!ValueOrErr)
+ // TODO: Test this error.
+ return ValueOrErr.takeError();
+ Relocs.insert(
+ {Reloc.getOffset(),
+ object::resolveRelocation(Resolver, Reloc, *ValueOrErr, A)});
+ } else if (Reloc.getType() == RelativeRelocation) {
+ if (auto AddendOrErr = object::ELFRelocationRef(Reloc).getAddend())
+ Relocs.insert({Reloc.getOffset(), *AddendOrErr});
+ }
+ }
+ }
+ }
+
+ // Copy the instrumentation map data into the Sleds data structure.
+ auto C = Contents.bytes_begin();
+ bool Is32Bit = ObjFile.getBinary()->makeTriple().isArch32Bit();
+ size_t ELFSledEntrySize = Is32Bit ? 16 : 32;
+
+ if ((C - Contents.bytes_end()) % ELFSledEntrySize != 0)
+ return make_error<StringError>(
+ Twine("Instrumentation map entries not evenly divisible by size of "
+ "an XRay sled entry."),
+ std::make_error_code(std::errc::executable_format_error));
+
+ auto RelocateOrElse = [&](uint64_t Offset, uint64_t Address) {
+ if (!Address) {
+ uint64_t A = I->getAddress() + C - Contents.bytes_begin() + Offset;
+ RelocMap::const_iterator R = Relocs.find(A);
+ if (R != Relocs.end())
+ return R->second;
+ }
+ return Address;
+ };
+
+ const int WordSize = Is32Bit ? 4 : 8;
+ int32_t FuncId = 1;
+ uint64_t CurFn = 0;
+ for (; C != Contents.bytes_end(); C += ELFSledEntrySize) {
+ DataExtractor Extractor(
+ StringRef(reinterpret_cast<const char *>(C), ELFSledEntrySize), true,
+ 8);
+ Sleds.push_back({});
+ auto &Entry = Sleds.back();
+ uint64_t OffsetPtr = 0;
+ uint64_t AddrOff = OffsetPtr;
+ if (Is32Bit)
+ Entry.Address = RelocateOrElse(AddrOff, Extractor.getU32(&OffsetPtr));
+ else
+ Entry.Address = RelocateOrElse(AddrOff, Extractor.getU64(&OffsetPtr));
+ uint64_t FuncOff = OffsetPtr;
+ if (Is32Bit)
+ Entry.Function = RelocateOrElse(FuncOff, Extractor.getU32(&OffsetPtr));
+ else
+ Entry.Function = RelocateOrElse(FuncOff, Extractor.getU64(&OffsetPtr));
+ auto Kind = Extractor.getU8(&OffsetPtr);
+ static constexpr SledEntry::FunctionKinds Kinds[] = {
+ SledEntry::FunctionKinds::ENTRY, SledEntry::FunctionKinds::EXIT,
+ SledEntry::FunctionKinds::TAIL,
+ SledEntry::FunctionKinds::LOG_ARGS_ENTER,
+ SledEntry::FunctionKinds::CUSTOM_EVENT};
+ if (Kind >= sizeof(Kinds))
+ return errorCodeToError(
+ std::make_error_code(std::errc::executable_format_error));
+ Entry.Kind = Kinds[Kind];
+ Entry.AlwaysInstrument = Extractor.getU8(&OffsetPtr) != 0;
+ Entry.Version = Extractor.getU8(&OffsetPtr);
+ if (Entry.Version >= 2) {
+ Entry.Address += C - Contents.bytes_begin() + Address;
+ Entry.Function += C - Contents.bytes_begin() + WordSize + Address;
+ }
+
+ // We do replicate the function id generation scheme implemented in the
+ // XRay runtime.
+ // FIXME: Figure out how to keep this consistent with the XRay runtime.
+ if (CurFn == 0) {
+ CurFn = Entry.Function;
+ FunctionAddresses[FuncId] = Entry.Function;
+ FunctionIds[Entry.Function] = FuncId;
+ }
+ if (Entry.Function != CurFn) {
+ ++FuncId;
+ CurFn = Entry.Function;
+ FunctionAddresses[FuncId] = Entry.Function;
+ FunctionIds[Entry.Function] = FuncId;
+ }
+ }
+ return Error::success();
+}
+
+static Error
+loadYAML(sys::fs::file_t Fd, size_t FileSize, StringRef Filename,
+ InstrumentationMap::SledContainer &Sleds,
+ InstrumentationMap::FunctionAddressMap &FunctionAddresses,
+ InstrumentationMap::FunctionAddressReverseMap &FunctionIds) {
+ std::error_code EC;
+ sys::fs::mapped_file_region MappedFile(
+ Fd, sys::fs::mapped_file_region::mapmode::readonly, FileSize, 0, EC);
+ sys::fs::closeFile(Fd);
+ if (EC)
+ return make_error<StringError>(
+ Twine("Failed memory-mapping file '") + Filename + "'.", EC);
+
+ std::vector<YAMLXRaySledEntry> YAMLSleds;
+ yaml::Input In(StringRef(MappedFile.data(), MappedFile.size()));
+ In >> YAMLSleds;
+ if (In.error())
+ return make_error<StringError>(
+ Twine("Failed loading YAML document from '") + Filename + "'.",
+ In.error());
+
+ Sleds.reserve(YAMLSleds.size());
+ for (const auto &Y : YAMLSleds) {
+ FunctionAddresses[Y.FuncId] = Y.Function;
+ FunctionIds[Y.Function] = Y.FuncId;
+ Sleds.push_back(SledEntry{Y.Address, Y.Function, Y.Kind, Y.AlwaysInstrument,
+ Y.Version});
+ }
+ return Error::success();
+}
+
+// FIXME: Create error types that encapsulate a bit more information than what
+// StringError instances contain.
+Expected<InstrumentationMap>
+llvm::xray::loadInstrumentationMap(StringRef Filename) {
+ // At this point we assume the file is an object file -- and if that doesn't
+ // work, we treat it as YAML.
+ // FIXME: Extend to support non-ELF and non-x86_64 binaries.
+
+ InstrumentationMap Map;
+ auto ObjectFileOrError = object::ObjectFile::createObjectFile(Filename);
+ if (!ObjectFileOrError) {
+ auto E = ObjectFileOrError.takeError();
+ // We try to load it as YAML if the ELF load didn't work.
+ Expected<sys::fs::file_t> FdOrErr =
+ sys::fs::openNativeFileForRead(Filename);
+ if (!FdOrErr) {
+ // Report the ELF load error if YAML failed.
+ consumeError(FdOrErr.takeError());
+ return std::move(E);
+ }
+
+ uint64_t FileSize;
+ if (sys::fs::file_size(Filename, FileSize))
+ return std::move(E);
+
+ // If the file is empty, we return the original error.
+ if (FileSize == 0)
+ return std::move(E);
+
+ // From this point on the errors will be only for the YAML parts, so we
+ // consume the errors at this point.
+ consumeError(std::move(E));
+ if (auto E = loadYAML(*FdOrErr, FileSize, Filename, Map.Sleds,
+ Map.FunctionAddresses, Map.FunctionIds))
+ return std::move(E);
+ } else if (auto E = loadObj(Filename, *ObjectFileOrError, Map.Sleds,
+ Map.FunctionAddresses, Map.FunctionIds)) {
+ return std::move(E);
+ }
+ return Map;
+}
diff --git a/contrib/libs/llvm12/lib/XRay/LogBuilderConsumer.cpp b/contrib/libs/llvm12/lib/XRay/LogBuilderConsumer.cpp
new file mode 100644
index 00000000000..ffb49f9eb4e
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/LogBuilderConsumer.cpp
@@ -0,0 +1,37 @@
+//===- FDRRecordConsumer.h - XRay Flight Data Recorder Mode Records -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/FDRRecordConsumer.h"
+
+namespace llvm {
+namespace xray {
+
+Error LogBuilderConsumer::consume(std::unique_ptr<Record> R) {
+ if (!R)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Must not call RecordConsumer::consume() with a null pointer.");
+ Records.push_back(std::move(R));
+ return Error::success();
+}
+
+Error PipelineConsumer::consume(std::unique_ptr<Record> R) {
+ if (!R)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Must not call RecordConsumer::consume() with a null pointer.");
+
+ // We apply all of the visitors in order, and concatenate errors
+ // appropriately.
+ Error Result = Error::success();
+ for (auto *V : Visitors)
+ Result = joinErrors(std::move(Result), R->apply(*V));
+ return Result;
+}
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/Profile.cpp b/contrib/libs/llvm12/lib/XRay/Profile.cpp
new file mode 100644
index 00000000000..c1a43632b60
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/Profile.cpp
@@ -0,0 +1,403 @@
+//===- Profile.cpp - XRay Profile Abstraction -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the XRay Profile class representing the latency profile generated by
+// XRay's profiling mode.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/Profile.h"
+
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/XRay/Trace.h"
+#include <deque>
+#include <memory>
+
+namespace llvm {
+namespace xray {
+
+Profile::Profile(const Profile &O) {
+ // We need to re-create all the tries from the original (O), into the current
+ // Profile being initialized, through the Block instances we see.
+ for (const auto &Block : O) {
+ Blocks.push_back({Block.Thread, {}});
+ auto &B = Blocks.back();
+ for (const auto &PathData : Block.PathData)
+ B.PathData.push_back({internPath(cantFail(O.expandPath(PathData.first))),
+ PathData.second});
+ }
+}
+
+Profile &Profile::operator=(const Profile &O) {
+ Profile P = O;
+ *this = std::move(P);
+ return *this;
+}
+
+namespace {
+
+struct BlockHeader {
+ uint32_t Size;
+ uint32_t Number;
+ uint64_t Thread;
+};
+
+static Expected<BlockHeader> readBlockHeader(DataExtractor &Extractor,
+ uint64_t &Offset) {
+ BlockHeader H;
+ uint64_t CurrentOffset = Offset;
+ H.Size = Extractor.getU32(&Offset);
+ if (Offset == CurrentOffset)
+ return make_error<StringError>(
+ Twine("Error parsing block header size at offset '") +
+ Twine(CurrentOffset) + "'",
+ std::make_error_code(std::errc::invalid_argument));
+ CurrentOffset = Offset;
+ H.Number = Extractor.getU32(&Offset);
+ if (Offset == CurrentOffset)
+ return make_error<StringError>(
+ Twine("Error parsing block header number at offset '") +
+ Twine(CurrentOffset) + "'",
+ std::make_error_code(std::errc::invalid_argument));
+ CurrentOffset = Offset;
+ H.Thread = Extractor.getU64(&Offset);
+ if (Offset == CurrentOffset)
+ return make_error<StringError>(
+ Twine("Error parsing block header thread id at offset '") +
+ Twine(CurrentOffset) + "'",
+ std::make_error_code(std::errc::invalid_argument));
+ return H;
+}
+
+static Expected<std::vector<Profile::FuncID>> readPath(DataExtractor &Extractor,
+ uint64_t &Offset) {
+ // We're reading a sequence of int32_t's until we find a 0.
+ std::vector<Profile::FuncID> Path;
+ auto CurrentOffset = Offset;
+ int32_t FuncId;
+ do {
+ FuncId = Extractor.getSigned(&Offset, 4);
+ if (CurrentOffset == Offset)
+ return make_error<StringError>(
+ Twine("Error parsing path at offset '") + Twine(CurrentOffset) + "'",
+ std::make_error_code(std::errc::invalid_argument));
+ CurrentOffset = Offset;
+ Path.push_back(FuncId);
+ } while (FuncId != 0);
+ return std::move(Path);
+}
+
+static Expected<Profile::Data> readData(DataExtractor &Extractor,
+ uint64_t &Offset) {
+ // We expect a certain number of elements for Data:
+ // - A 64-bit CallCount
+ // - A 64-bit CumulativeLocalTime counter
+ Profile::Data D;
+ auto CurrentOffset = Offset;
+ D.CallCount = Extractor.getU64(&Offset);
+ if (CurrentOffset == Offset)
+ return make_error<StringError>(
+ Twine("Error parsing call counts at offset '") + Twine(CurrentOffset) +
+ "'",
+ std::make_error_code(std::errc::invalid_argument));
+ CurrentOffset = Offset;
+ D.CumulativeLocalTime = Extractor.getU64(&Offset);
+ if (CurrentOffset == Offset)
+ return make_error<StringError>(
+ Twine("Error parsing cumulative local time at offset '") +
+ Twine(CurrentOffset) + "'",
+ std::make_error_code(std::errc::invalid_argument));
+ return D;
+}
+
+} // namespace
+
+Error Profile::addBlock(Block &&B) {
+ if (B.PathData.empty())
+ return make_error<StringError>(
+ "Block may not have empty path data.",
+ std::make_error_code(std::errc::invalid_argument));
+
+ Blocks.emplace_back(std::move(B));
+ return Error::success();
+}
+
+Expected<std::vector<Profile::FuncID>> Profile::expandPath(PathID P) const {
+ auto It = PathIDMap.find(P);
+ if (It == PathIDMap.end())
+ return make_error<StringError>(
+ Twine("PathID not found: ") + Twine(P),
+ std::make_error_code(std::errc::invalid_argument));
+ std::vector<Profile::FuncID> Path;
+ for (auto Node = It->second; Node; Node = Node->Caller)
+ Path.push_back(Node->Func);
+ return std::move(Path);
+}
+
+Profile::PathID Profile::internPath(ArrayRef<FuncID> P) {
+ if (P.empty())
+ return 0;
+
+ auto RootToLeafPath = reverse(P);
+
+ // Find the root.
+ auto It = RootToLeafPath.begin();
+ auto PathRoot = *It++;
+ auto RootIt =
+ find_if(Roots, [PathRoot](TrieNode *N) { return N->Func == PathRoot; });
+
+ // If we've not seen this root before, remember it.
+ TrieNode *Node = nullptr;
+ if (RootIt == Roots.end()) {
+ NodeStorage.emplace_back();
+ Node = &NodeStorage.back();
+ Node->Func = PathRoot;
+ Roots.push_back(Node);
+ } else {
+ Node = *RootIt;
+ }
+
+ // Now traverse the path, re-creating if necessary.
+ while (It != RootToLeafPath.end()) {
+ auto NodeFuncID = *It++;
+ auto CalleeIt = find_if(Node->Callees, [NodeFuncID](TrieNode *N) {
+ return N->Func == NodeFuncID;
+ });
+ if (CalleeIt == Node->Callees.end()) {
+ NodeStorage.emplace_back();
+ auto NewNode = &NodeStorage.back();
+ NewNode->Func = NodeFuncID;
+ NewNode->Caller = Node;
+ Node->Callees.push_back(NewNode);
+ Node = NewNode;
+ } else {
+ Node = *CalleeIt;
+ }
+ }
+
+ // At this point, Node *must* be pointing at the leaf.
+ assert(Node->Func == P.front());
+ if (Node->ID == 0) {
+ Node->ID = NextID++;
+ PathIDMap.insert({Node->ID, Node});
+ }
+ return Node->ID;
+}
+
+Profile mergeProfilesByThread(const Profile &L, const Profile &R) {
+ Profile Merged;
+ using PathDataMap = DenseMap<Profile::PathID, Profile::Data>;
+ using PathDataMapPtr = std::unique_ptr<PathDataMap>;
+ using PathDataVector = decltype(Profile::Block::PathData);
+ using ThreadProfileIndexMap = DenseMap<Profile::ThreadID, PathDataMapPtr>;
+ ThreadProfileIndexMap ThreadProfileIndex;
+
+ for (const auto &P : {std::ref(L), std::ref(R)})
+ for (const auto &Block : P.get()) {
+ ThreadProfileIndexMap::iterator It;
+ std::tie(It, std::ignore) = ThreadProfileIndex.insert(
+ {Block.Thread, PathDataMapPtr{new PathDataMap()}});
+ for (const auto &PathAndData : Block.PathData) {
+ auto &PathID = PathAndData.first;
+ auto &Data = PathAndData.second;
+ auto NewPathID =
+ Merged.internPath(cantFail(P.get().expandPath(PathID)));
+ PathDataMap::iterator PathDataIt;
+ bool Inserted;
+ std::tie(PathDataIt, Inserted) = It->second->insert({NewPathID, Data});
+ if (!Inserted) {
+ auto &ExistingData = PathDataIt->second;
+ ExistingData.CallCount += Data.CallCount;
+ ExistingData.CumulativeLocalTime += Data.CumulativeLocalTime;
+ }
+ }
+ }
+
+ for (const auto &IndexedThreadBlock : ThreadProfileIndex) {
+ PathDataVector PathAndData;
+ PathAndData.reserve(IndexedThreadBlock.second->size());
+ copy(*IndexedThreadBlock.second, std::back_inserter(PathAndData));
+ cantFail(
+ Merged.addBlock({IndexedThreadBlock.first, std::move(PathAndData)}));
+ }
+ return Merged;
+}
+
+Profile mergeProfilesByStack(const Profile &L, const Profile &R) {
+ Profile Merged;
+ using PathDataMap = DenseMap<Profile::PathID, Profile::Data>;
+ PathDataMap PathData;
+ using PathDataVector = decltype(Profile::Block::PathData);
+ for (const auto &P : {std::ref(L), std::ref(R)})
+ for (const auto &Block : P.get())
+ for (const auto &PathAndData : Block.PathData) {
+ auto &PathId = PathAndData.first;
+ auto &Data = PathAndData.second;
+ auto NewPathID =
+ Merged.internPath(cantFail(P.get().expandPath(PathId)));
+ PathDataMap::iterator PathDataIt;
+ bool Inserted;
+ std::tie(PathDataIt, Inserted) = PathData.insert({NewPathID, Data});
+ if (!Inserted) {
+ auto &ExistingData = PathDataIt->second;
+ ExistingData.CallCount += Data.CallCount;
+ ExistingData.CumulativeLocalTime += Data.CumulativeLocalTime;
+ }
+ }
+
+ // In the end there's a single Block, for thread 0.
+ PathDataVector Block;
+ Block.reserve(PathData.size());
+ copy(PathData, std::back_inserter(Block));
+ cantFail(Merged.addBlock({0, std::move(Block)}));
+ return Merged;
+}
+
+Expected<Profile> loadProfile(StringRef Filename) {
+ Expected<sys::fs::file_t> FdOrErr = sys::fs::openNativeFileForRead(Filename);
+ if (!FdOrErr)
+ return FdOrErr.takeError();
+
+ uint64_t FileSize;
+ if (auto EC = sys::fs::file_size(Filename, FileSize))
+ return make_error<StringError>(
+ Twine("Cannot get filesize of '") + Filename + "'", EC);
+
+ std::error_code EC;
+ sys::fs::mapped_file_region MappedFile(
+ *FdOrErr, sys::fs::mapped_file_region::mapmode::readonly, FileSize, 0,
+ EC);
+ sys::fs::closeFile(*FdOrErr);
+ if (EC)
+ return make_error<StringError>(
+ Twine("Cannot mmap profile '") + Filename + "'", EC);
+ StringRef Data(MappedFile.data(), MappedFile.size());
+
+ Profile P;
+ uint64_t Offset = 0;
+ DataExtractor Extractor(Data, true, 8);
+
+ // For each block we get from the file:
+ while (Offset != MappedFile.size()) {
+ auto HeaderOrError = readBlockHeader(Extractor, Offset);
+ if (!HeaderOrError)
+ return HeaderOrError.takeError();
+
+ // TODO: Maybe store this header information for each block, even just for
+ // debugging?
+ const auto &Header = HeaderOrError.get();
+
+ // Read in the path data.
+ auto PathOrError = readPath(Extractor, Offset);
+ if (!PathOrError)
+ return PathOrError.takeError();
+ const auto &Path = PathOrError.get();
+
+ // For each path we encounter, we should intern it to get a PathID.
+ auto DataOrError = readData(Extractor, Offset);
+ if (!DataOrError)
+ return DataOrError.takeError();
+ auto &Data = DataOrError.get();
+
+ if (auto E =
+ P.addBlock(Profile::Block{Profile::ThreadID{Header.Thread},
+ {{P.internPath(Path), std::move(Data)}}}))
+ return std::move(E);
+ }
+
+ return P;
+}
+
+namespace {
+
+struct StackEntry {
+ uint64_t Timestamp;
+ Profile::FuncID FuncId;
+};
+
+} // namespace
+
+Expected<Profile> profileFromTrace(const Trace &T) {
+ Profile P;
+
+ // The implementation of the algorithm re-creates the execution of
+ // the functions based on the trace data. To do this, we set up a number of
+ // data structures to track the execution context of every thread in the
+ // Trace.
+ DenseMap<Profile::ThreadID, std::vector<StackEntry>> ThreadStacks;
+ DenseMap<Profile::ThreadID, DenseMap<Profile::PathID, Profile::Data>>
+ ThreadPathData;
+
+ // We then do a pass through the Trace to account data on a per-thread-basis.
+ for (const auto &E : T) {
+ auto &TSD = ThreadStacks[E.TId];
+ switch (E.Type) {
+ case RecordTypes::ENTER:
+ case RecordTypes::ENTER_ARG:
+
+ // Push entries into the function call stack.
+ TSD.push_back({E.TSC, E.FuncId});
+ break;
+
+ case RecordTypes::EXIT:
+ case RecordTypes::TAIL_EXIT:
+
+ // Exits cause some accounting to happen, based on the state of the stack.
+ // For each function we pop off the stack, we take note of the path and
+ // record the cumulative state for this path. As we're doing this, we
+ // intern the path into the Profile.
+ while (!TSD.empty()) {
+ auto Top = TSD.back();
+ auto FunctionLocalTime = AbsoluteDifference(Top.Timestamp, E.TSC);
+ SmallVector<Profile::FuncID, 16> Path;
+ transform(reverse(TSD), std::back_inserter(Path),
+ std::mem_fn(&StackEntry::FuncId));
+ auto InternedPath = P.internPath(Path);
+ auto &TPD = ThreadPathData[E.TId][InternedPath];
+ ++TPD.CallCount;
+ TPD.CumulativeLocalTime += FunctionLocalTime;
+ TSD.pop_back();
+
+ // If we've matched the corresponding entry event for this function,
+ // then we exit the loop.
+ if (Top.FuncId == E.FuncId)
+ break;
+
+ // FIXME: Consider the intermediate times and the cumulative tree time
+ // as well.
+ }
+
+ break;
+
+ case RecordTypes::CUSTOM_EVENT:
+ case RecordTypes::TYPED_EVENT:
+ // TODO: Support an extension point to allow handling of custom and typed
+ // events in profiles.
+ break;
+ }
+ }
+
+ // Once we've gone through the Trace, we now create one Block per thread in
+ // the Profile.
+ for (const auto &ThreadPaths : ThreadPathData) {
+ const auto &TID = ThreadPaths.first;
+ const auto &PathsData = ThreadPaths.second;
+ if (auto E = P.addBlock({
+ TID,
+ std::vector<std::pair<Profile::PathID, Profile::Data>>(
+ PathsData.begin(), PathsData.end()),
+ }))
+ return std::move(E);
+ }
+
+ return P;
+}
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/RecordInitializer.cpp b/contrib/libs/llvm12/lib/XRay/RecordInitializer.cpp
new file mode 100644
index 00000000000..68ab3db0620
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/RecordInitializer.cpp
@@ -0,0 +1,431 @@
+//===- FDRRecordProducer.cpp - XRay FDR Mode Record Producer --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/FDRRecords.h"
+
+namespace llvm {
+namespace xray {
+
+Error RecordInitializer::visit(BufferExtents &R) {
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr, sizeof(uint64_t)))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for a buffer extent (%" PRId64 ").", OffsetPtr);
+
+ auto PreReadOffset = OffsetPtr;
+ R.Size = E.getU64(&OffsetPtr);
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(std::make_error_code(std::errc::invalid_argument),
+ "Cannot read buffer extent at offset %" PRId64 ".",
+ OffsetPtr);
+
+ OffsetPtr += MetadataRecord::kMetadataBodySize - (OffsetPtr - PreReadOffset);
+ return Error::success();
+}
+
+Error RecordInitializer::visit(WallclockRecord &R) {
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr,
+ MetadataRecord::kMetadataBodySize))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for a wallclock record (%" PRId64 ").", OffsetPtr);
+ auto BeginOffset = OffsetPtr;
+ auto PreReadOffset = OffsetPtr;
+ R.Seconds = E.getU64(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read wall clock 'seconds' field at offset %" PRId64 ".",
+ OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ R.Nanos = E.getU32(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read wall clock 'nanos' field at offset %" PRId64 ".",
+ OffsetPtr);
+
+ // Align to metadata record size boundary.
+ assert(OffsetPtr - BeginOffset <= MetadataRecord::kMetadataBodySize);
+ OffsetPtr += MetadataRecord::kMetadataBodySize - (OffsetPtr - BeginOffset);
+ return Error::success();
+}
+
+Error RecordInitializer::visit(NewCPUIDRecord &R) {
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr,
+ MetadataRecord::kMetadataBodySize))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for a new cpu id record (%" PRId64 ").", OffsetPtr);
+ auto BeginOffset = OffsetPtr;
+ auto PreReadOffset = OffsetPtr;
+ R.CPUId = E.getU16(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(std::make_error_code(std::errc::invalid_argument),
+ "Cannot read CPU id at offset %" PRId64 ".",
+ OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ R.TSC = E.getU64(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(std::make_error_code(std::errc::invalid_argument),
+ "Cannot read CPU TSC at offset %" PRId64 ".",
+ OffsetPtr);
+
+ OffsetPtr += MetadataRecord::kMetadataBodySize - (OffsetPtr - BeginOffset);
+ return Error::success();
+}
+
+Error RecordInitializer::visit(TSCWrapRecord &R) {
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr,
+ MetadataRecord::kMetadataBodySize))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for a new TSC wrap record (%" PRId64 ").", OffsetPtr);
+
+ auto PreReadOffset = OffsetPtr;
+ R.BaseTSC = E.getU64(&OffsetPtr);
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read TSC wrap record at offset %" PRId64 ".", OffsetPtr);
+
+ OffsetPtr += MetadataRecord::kMetadataBodySize - (OffsetPtr - PreReadOffset);
+ return Error::success();
+}
+
+Error RecordInitializer::visit(CustomEventRecord &R) {
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr,
+ MetadataRecord::kMetadataBodySize))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for a custom event record (%" PRId64 ").", OffsetPtr);
+
+ auto BeginOffset = OffsetPtr;
+ auto PreReadOffset = OffsetPtr;
+ R.Size = E.getSigned(&OffsetPtr, sizeof(int32_t));
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read a custom event record size field offset %" PRId64 ".",
+ OffsetPtr);
+
+ if (R.Size <= 0)
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid size for custom event (size = %d) at offset %" PRId64 ".",
+ R.Size, OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ R.TSC = E.getU64(&OffsetPtr);
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read a custom event TSC field at offset %" PRId64 ".",
+ OffsetPtr);
+
+ // For version 4 onwards, of the FDR log, we want to also capture the CPU ID
+ // of the custom event.
+ if (Version >= 4) {
+ PreReadOffset = OffsetPtr;
+ R.CPU = E.getU16(&OffsetPtr);
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Missing CPU field at offset %" PRId64 ".", OffsetPtr);
+ }
+
+ assert(OffsetPtr > BeginOffset &&
+ OffsetPtr - BeginOffset <= MetadataRecord::kMetadataBodySize);
+ OffsetPtr += MetadataRecord::kMetadataBodySize - (OffsetPtr - BeginOffset);
+
+ // Next we read in a fixed chunk of data from the given offset.
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr, R.Size))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Cannot read %d bytes of custom event data from offset %" PRId64 ".",
+ R.Size, OffsetPtr);
+
+ std::vector<uint8_t> Buffer;
+ Buffer.resize(R.Size);
+ PreReadOffset = OffsetPtr;
+ if (E.getU8(&OffsetPtr, Buffer.data(), R.Size) != Buffer.data())
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Failed reading data into buffer of size %d at offset %" PRId64 ".",
+ R.Size, OffsetPtr);
+
+ assert(OffsetPtr >= PreReadOffset);
+ if (OffsetPtr - PreReadOffset != static_cast<uint32_t>(R.Size))
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Failed reading enough bytes for the custom event payload -- read "
+ "%" PRId64 " expecting %d bytes at offset %" PRId64 ".",
+ OffsetPtr - PreReadOffset, R.Size, PreReadOffset);
+
+ R.Data.assign(Buffer.begin(), Buffer.end());
+ return Error::success();
+}
+
+Error RecordInitializer::visit(CustomEventRecordV5 &R) {
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr,
+ MetadataRecord::kMetadataBodySize))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for a custom event record (%" PRId64 ").", OffsetPtr);
+
+ auto BeginOffset = OffsetPtr;
+ auto PreReadOffset = OffsetPtr;
+
+ R.Size = E.getSigned(&OffsetPtr, sizeof(int32_t));
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read a custom event record size field offset %" PRId64 ".",
+ OffsetPtr);
+
+ if (R.Size <= 0)
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid size for custom event (size = %d) at offset %" PRId64 ".",
+ R.Size, OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ R.Delta = E.getSigned(&OffsetPtr, sizeof(int32_t));
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read a custom event record TSC delta field at offset "
+ "%" PRId64 ".",
+ OffsetPtr);
+
+ assert(OffsetPtr > BeginOffset &&
+ OffsetPtr - BeginOffset <= MetadataRecord::kMetadataBodySize);
+ OffsetPtr += MetadataRecord::kMetadataBodySize - (OffsetPtr - BeginOffset);
+
+ // Next we read in a fixed chunk of data from the given offset.
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr, R.Size))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Cannot read %d bytes of custom event data from offset %" PRId64 ".",
+ R.Size, OffsetPtr);
+
+ std::vector<uint8_t> Buffer;
+ Buffer.resize(R.Size);
+ PreReadOffset = OffsetPtr;
+ if (E.getU8(&OffsetPtr, Buffer.data(), R.Size) != Buffer.data())
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Failed reading data into buffer of size %d at offset %" PRId64 ".",
+ R.Size, OffsetPtr);
+
+ assert(OffsetPtr >= PreReadOffset);
+ if (OffsetPtr - PreReadOffset != static_cast<uint32_t>(R.Size))
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Failed reading enough bytes for the custom event payload -- read "
+ "%" PRId64 " expecting %d bytes at offset %" PRId64 ".",
+ OffsetPtr - PreReadOffset, R.Size, PreReadOffset);
+
+ R.Data.assign(Buffer.begin(), Buffer.end());
+ return Error::success();
+}
+
+Error RecordInitializer::visit(TypedEventRecord &R) {
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr,
+ MetadataRecord::kMetadataBodySize))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for a typed event record (%" PRId64 ").", OffsetPtr);
+
+ auto BeginOffset = OffsetPtr;
+ auto PreReadOffset = OffsetPtr;
+
+ R.Size = E.getSigned(&OffsetPtr, sizeof(int32_t));
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read a typed event record size field offset %" PRId64 ".",
+ OffsetPtr);
+
+ if (R.Size <= 0)
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid size for typed event (size = %d) at offset %" PRId64 ".",
+ R.Size, OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ R.Delta = E.getSigned(&OffsetPtr, sizeof(int32_t));
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read a typed event record TSC delta field at offset "
+ "%" PRId64 ".",
+ OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ R.EventType = E.getU16(&OffsetPtr);
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read a typed event record type field at offset %" PRId64 ".",
+ OffsetPtr);
+
+ assert(OffsetPtr > BeginOffset &&
+ OffsetPtr - BeginOffset <= MetadataRecord::kMetadataBodySize);
+ OffsetPtr += MetadataRecord::kMetadataBodySize - (OffsetPtr - BeginOffset);
+
+ // Next we read in a fixed chunk of data from the given offset.
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr, R.Size))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Cannot read %d bytes of custom event data from offset %" PRId64 ".",
+ R.Size, OffsetPtr);
+
+ std::vector<uint8_t> Buffer;
+ Buffer.resize(R.Size);
+ PreReadOffset = OffsetPtr;
+ if (E.getU8(&OffsetPtr, Buffer.data(), R.Size) != Buffer.data())
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Failed reading data into buffer of size %d at offset %" PRId64 ".",
+ R.Size, OffsetPtr);
+
+ assert(OffsetPtr >= PreReadOffset);
+ if (OffsetPtr - PreReadOffset != static_cast<uint32_t>(R.Size))
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Failed reading enough bytes for the typed event payload -- read "
+ "%" PRId64 " expecting %d bytes at offset %" PRId64 ".",
+ OffsetPtr - PreReadOffset, R.Size, PreReadOffset);
+
+ R.Data.assign(Buffer.begin(), Buffer.end());
+ return Error::success();
+}
+
+Error RecordInitializer::visit(CallArgRecord &R) {
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr,
+ MetadataRecord::kMetadataBodySize))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for a call argument record (%" PRId64 ").",
+ OffsetPtr);
+
+ auto PreReadOffset = OffsetPtr;
+ R.Arg = E.getU64(&OffsetPtr);
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read a call arg record at offset %" PRId64 ".", OffsetPtr);
+
+ OffsetPtr += MetadataRecord::kMetadataBodySize - (OffsetPtr - PreReadOffset);
+ return Error::success();
+}
+
+Error RecordInitializer::visit(PIDRecord &R) {
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr,
+ MetadataRecord::kMetadataBodySize))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for a process ID record (%" PRId64 ").", OffsetPtr);
+
+ auto PreReadOffset = OffsetPtr;
+ R.PID = E.getSigned(&OffsetPtr, 4);
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read a process ID record at offset %" PRId64 ".", OffsetPtr);
+
+ OffsetPtr += MetadataRecord::kMetadataBodySize - (OffsetPtr - PreReadOffset);
+ return Error::success();
+}
+
+Error RecordInitializer::visit(NewBufferRecord &R) {
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr,
+ MetadataRecord::kMetadataBodySize))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for a new buffer record (%" PRId64 ").", OffsetPtr);
+
+ auto PreReadOffset = OffsetPtr;
+ R.TID = E.getSigned(&OffsetPtr, sizeof(int32_t));
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Cannot read a new buffer record at offset %" PRId64 ".", OffsetPtr);
+
+ OffsetPtr += MetadataRecord::kMetadataBodySize - (OffsetPtr - PreReadOffset);
+ return Error::success();
+}
+
+Error RecordInitializer::visit(EndBufferRecord &R) {
+ if (!E.isValidOffsetForDataOfSize(OffsetPtr,
+ MetadataRecord::kMetadataBodySize))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for an end-of-buffer record (%" PRId64 ").",
+ OffsetPtr);
+
+ OffsetPtr += MetadataRecord::kMetadataBodySize;
+ return Error::success();
+}
+
+Error RecordInitializer::visit(FunctionRecord &R) {
+ // For function records, we need to retreat one byte back to read a full
+ // unsigned 32-bit value. The first four bytes will have the following
+ // layout:
+ //
+ // bit 0 : function record indicator (must be 0)
+ // bits 1..3 : function record type
+ // bits 4..32 : function id
+ //
+ if (OffsetPtr == 0 || !E.isValidOffsetForDataOfSize(
+ --OffsetPtr, FunctionRecord::kFunctionRecordSize))
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Invalid offset for a function record (%" PRId64 ").", OffsetPtr);
+
+ auto BeginOffset = OffsetPtr;
+ auto PreReadOffset = BeginOffset;
+ uint32_t Buffer = E.getU32(&OffsetPtr);
+ if (PreReadOffset == OffsetPtr)
+ return createStringError(
+ std::make_error_code(std::errc::bad_address),
+ "Cannot read function id field from offset %" PRId64 ".", OffsetPtr);
+
+ // To get the function record type, we shift the buffer one to the right
+ // (truncating the function record indicator) then take the three bits
+ // (0b0111) to get the record type as an unsigned value.
+ unsigned FunctionType = (Buffer >> 1) & 0x07u;
+ switch (FunctionType) {
+ case static_cast<unsigned>(RecordTypes::ENTER):
+ case static_cast<unsigned>(RecordTypes::ENTER_ARG):
+ case static_cast<unsigned>(RecordTypes::EXIT):
+ case static_cast<unsigned>(RecordTypes::TAIL_EXIT):
+ R.Kind = static_cast<RecordTypes>(FunctionType);
+ break;
+ default:
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Unknown function record type '%d' at offset %" PRId64 ".",
+ FunctionType, BeginOffset);
+ }
+
+ R.FuncId = Buffer >> 4;
+ PreReadOffset = OffsetPtr;
+ R.Delta = E.getU32(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::invalid_argument),
+ "Failed reading TSC delta from offset %" PRId64 ".", OffsetPtr);
+ assert(FunctionRecord::kFunctionRecordSize == (OffsetPtr - BeginOffset));
+ return Error::success();
+}
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/RecordPrinter.cpp b/contrib/libs/llvm12/lib/XRay/RecordPrinter.cpp
new file mode 100644
index 00000000000..32d42104db9
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/RecordPrinter.cpp
@@ -0,0 +1,108 @@
+//===- RecordPrinter.cpp - FDR Record Printer -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/RecordPrinter.h"
+
+#include "llvm/Support/FormatVariadic.h"
+
+namespace llvm {
+namespace xray {
+
+Error RecordPrinter::visit(BufferExtents &R) {
+ OS << formatv("<Buffer: size = {0} bytes>", R.size()) << Delim;
+ return Error::success();
+}
+
+Error RecordPrinter::visit(WallclockRecord &R) {
+ OS << formatv("<Wall Time: seconds = {0}.{1,0+6}>", R.seconds(), R.nanos())
+ << Delim;
+ return Error::success();
+}
+
+Error RecordPrinter::visit(NewCPUIDRecord &R) {
+ OS << formatv("<CPU: id = {0}, tsc = {1}>", R.cpuid(), R.tsc()) << Delim;
+ return Error::success();
+}
+
+Error RecordPrinter::visit(TSCWrapRecord &R) {
+ OS << formatv("<TSC Wrap: base = {0}>", R.tsc()) << Delim;
+ return Error::success();
+}
+
+Error RecordPrinter::visit(CustomEventRecord &R) {
+ OS << formatv(
+ "<Custom Event: tsc = {0}, cpu = {1}, size = {2}, data = '{3}'>",
+ R.tsc(), R.cpu(), R.size(), R.data())
+ << Delim;
+ return Error::success();
+}
+
+Error RecordPrinter::visit(CustomEventRecordV5 &R) {
+ OS << formatv("<Custom Event: delta = +{0}, size = {1}, data = '{2}'>",
+ R.delta(), R.size(), R.data())
+ << Delim;
+ return Error::success();
+}
+
+Error RecordPrinter::visit(TypedEventRecord &R) {
+ OS << formatv(
+ "<Typed Event: delta = +{0}, type = {1}, size = {2}, data = '{3}'",
+ R.delta(), R.eventType(), R.size(), R.data())
+ << Delim;
+ return Error::success();
+}
+
+Error RecordPrinter::visit(CallArgRecord &R) {
+ OS << formatv("<Call Argument: data = {0} (hex = {0:x})>", R.arg()) << Delim;
+ return Error::success();
+}
+
+Error RecordPrinter::visit(PIDRecord &R) {
+ OS << formatv("<PID: {0}>", R.pid()) << Delim;
+ return Error::success();
+}
+
+Error RecordPrinter::visit(NewBufferRecord &R) {
+ OS << formatv("<Thread ID: {0}>", R.tid()) << Delim;
+ return Error::success();
+}
+
+Error RecordPrinter::visit(EndBufferRecord &R) {
+ OS << "<End of Buffer>" << Delim;
+ return Error::success();
+}
+
+Error RecordPrinter::visit(FunctionRecord &R) {
+ // FIXME: Support symbolization here?
+ switch (R.recordType()) {
+ case RecordTypes::ENTER:
+ OS << formatv("<Function Enter: #{0} delta = +{1}>", R.functionId(),
+ R.delta());
+ break;
+ case RecordTypes::ENTER_ARG:
+ OS << formatv("<Function Enter With Arg: #{0} delta = +{1}>",
+ R.functionId(), R.delta());
+ break;
+ case RecordTypes::EXIT:
+ OS << formatv("<Function Exit: #{0} delta = +{1}>", R.functionId(),
+ R.delta());
+ break;
+ case RecordTypes::TAIL_EXIT:
+ OS << formatv("<Function Tail Exit: #{0} delta = +{1}>", R.functionId(),
+ R.delta());
+ break;
+ case RecordTypes::CUSTOM_EVENT:
+ case RecordTypes::TYPED_EVENT:
+ // TODO: Flag as a bug?
+ break;
+ }
+ OS << Delim;
+ return Error::success();
+}
+
+} // namespace xray
+} // namespace llvm
diff --git a/contrib/libs/llvm12/lib/XRay/Trace.cpp b/contrib/libs/llvm12/lib/XRay/Trace.cpp
new file mode 100644
index 00000000000..5ceb269b6d1
--- /dev/null
+++ b/contrib/libs/llvm12/lib/XRay/Trace.cpp
@@ -0,0 +1,477 @@
+//===- Trace.cpp - XRay Trace Loading implementation. ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// XRay log reader implementation.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/XRay/Trace.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/DataExtractor.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/XRay/BlockIndexer.h"
+#include "llvm/XRay/BlockVerifier.h"
+#include "llvm/XRay/FDRRecordConsumer.h"
+#include "llvm/XRay/FDRRecordProducer.h"
+#include "llvm/XRay/FDRRecords.h"
+#include "llvm/XRay/FDRTraceExpander.h"
+#include "llvm/XRay/FileHeaderReader.h"
+#include "llvm/XRay/YAMLXRayRecord.h"
+#include <memory>
+#include <vector>
+
+using namespace llvm;
+using namespace llvm::xray;
+using llvm::yaml::Input;
+
+namespace {
+using XRayRecordStorage =
+ std::aligned_storage<sizeof(XRayRecord), alignof(XRayRecord)>::type;
+
+Error loadNaiveFormatLog(StringRef Data, bool IsLittleEndian,
+ XRayFileHeader &FileHeader,
+ std::vector<XRayRecord> &Records) {
+ if (Data.size() < 32)
+ return make_error<StringError>(
+ "Not enough bytes for an XRay log.",
+ std::make_error_code(std::errc::invalid_argument));
+
+ if (Data.size() - 32 == 0 || Data.size() % 32 != 0)
+ return make_error<StringError>(
+ "Invalid-sized XRay data.",
+ std::make_error_code(std::errc::invalid_argument));
+
+ DataExtractor Reader(Data, IsLittleEndian, 8);
+ uint64_t OffsetPtr = 0;
+ auto FileHeaderOrError = readBinaryFormatHeader(Reader, OffsetPtr);
+ if (!FileHeaderOrError)
+ return FileHeaderOrError.takeError();
+ FileHeader = std::move(FileHeaderOrError.get());
+
+ // Each record after the header will be 32 bytes, in the following format:
+ //
+ // (2) uint16 : record type
+ // (1) uint8 : cpu id
+ // (1) uint8 : type
+ // (4) sint32 : function id
+ // (8) uint64 : tsc
+ // (4) uint32 : thread id
+ // (4) uint32 : process id
+ // (8) - : padding
+ while (Reader.isValidOffset(OffsetPtr)) {
+ if (!Reader.isValidOffsetForDataOfSize(OffsetPtr, 32))
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Not enough bytes to read a full record at offset %" PRId64 ".",
+ OffsetPtr);
+ auto PreReadOffset = OffsetPtr;
+ auto RecordType = Reader.getU16(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading record type at offset %" PRId64 ".", OffsetPtr);
+
+ switch (RecordType) {
+ case 0: { // Normal records.
+ Records.emplace_back();
+ auto &Record = Records.back();
+ Record.RecordType = RecordType;
+
+ PreReadOffset = OffsetPtr;
+ Record.CPU = Reader.getU8(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading CPU field at offset %" PRId64 ".", OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ auto Type = Reader.getU8(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading record type field at offset %" PRId64 ".",
+ OffsetPtr);
+
+ switch (Type) {
+ case 0:
+ Record.Type = RecordTypes::ENTER;
+ break;
+ case 1:
+ Record.Type = RecordTypes::EXIT;
+ break;
+ case 2:
+ Record.Type = RecordTypes::TAIL_EXIT;
+ break;
+ case 3:
+ Record.Type = RecordTypes::ENTER_ARG;
+ break;
+ default:
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Unknown record type '%d' at offset %" PRId64 ".", Type, OffsetPtr);
+ }
+
+ PreReadOffset = OffsetPtr;
+ Record.FuncId = Reader.getSigned(&OffsetPtr, sizeof(int32_t));
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading function id field at offset %" PRId64 ".",
+ OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ Record.TSC = Reader.getU64(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading TSC field at offset %" PRId64 ".", OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ Record.TId = Reader.getU32(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading thread id field at offset %" PRId64 ".", OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ Record.PId = Reader.getU32(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading process id at offset %" PRId64 ".", OffsetPtr);
+
+ break;
+ }
+ case 1: { // Arg payload record.
+ auto &Record = Records.back();
+
+ // We skip the next two bytes of the record, because we don't need the
+ // type and the CPU record for arg payloads.
+ OffsetPtr += 2;
+ PreReadOffset = OffsetPtr;
+ int32_t FuncId = Reader.getSigned(&OffsetPtr, sizeof(int32_t));
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading function id field at offset %" PRId64 ".",
+ OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ auto TId = Reader.getU32(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading thread id field at offset %" PRId64 ".", OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ auto PId = Reader.getU32(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading process id field at offset %" PRId64 ".",
+ OffsetPtr);
+
+ // Make a check for versions above 3 for the Pid field
+ if (Record.FuncId != FuncId || Record.TId != TId ||
+ (FileHeader.Version >= 3 ? Record.PId != PId : false))
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Corrupted log, found arg payload following non-matching "
+ "function+thread record. Record for function %d != %d at offset "
+ "%" PRId64 ".",
+ Record.FuncId, FuncId, OffsetPtr);
+
+ PreReadOffset = OffsetPtr;
+ auto Arg = Reader.getU64(&OffsetPtr);
+ if (OffsetPtr == PreReadOffset)
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Failed reading argument payload at offset %" PRId64 ".",
+ OffsetPtr);
+
+ Record.CallArgs.push_back(Arg);
+ break;
+ }
+ default:
+ return createStringError(
+ std::make_error_code(std::errc::executable_format_error),
+ "Unknown record type '%d' at offset %" PRId64 ".", RecordType,
+ OffsetPtr);
+ }
+ // Advance the offset pointer enough bytes to align to 32-byte records for
+ // basic mode logs.
+ OffsetPtr += 8;
+ }
+ return Error::success();
+}
+
+/// Reads a log in FDR mode for version 1 of this binary format. FDR mode is
+/// defined as part of the compiler-rt project in xray_fdr_logging.h, and such
+/// a log consists of the familiar 32 bit XRayHeader, followed by sequences of
+/// of interspersed 16 byte Metadata Records and 8 byte Function Records.
+///
+/// The following is an attempt to document the grammar of the format, which is
+/// parsed by this function for little-endian machines. Since the format makes
+/// use of BitFields, when we support big-endian architectures, we will need to
+/// adjust not only the endianness parameter to llvm's RecordExtractor, but also
+/// the bit twiddling logic, which is consistent with the little-endian
+/// convention that BitFields within a struct will first be packed into the
+/// least significant bits the address they belong to.
+///
+/// We expect a format complying with the grammar in the following pseudo-EBNF
+/// in Version 1 of the FDR log.
+///
+/// FDRLog: XRayFileHeader ThreadBuffer*
+/// XRayFileHeader: 32 bytes to identify the log as FDR with machine metadata.
+/// Includes BufferSize
+/// ThreadBuffer: NewBuffer WallClockTime NewCPUId FunctionSequence EOB
+/// BufSize: 8 byte unsigned integer indicating how large the buffer is.
+/// NewBuffer: 16 byte metadata record with Thread Id.
+/// WallClockTime: 16 byte metadata record with human readable time.
+/// Pid: 16 byte metadata record with Pid
+/// NewCPUId: 16 byte metadata record with CPUId and a 64 bit TSC reading.
+/// EOB: 16 byte record in a thread buffer plus mem garbage to fill BufSize.
+/// FunctionSequence: NewCPUId | TSCWrap | FunctionRecord
+/// TSCWrap: 16 byte metadata record with a full 64 bit TSC reading.
+/// FunctionRecord: 8 byte record with FunctionId, entry/exit, and TSC delta.
+///
+/// In Version 2, we make the following changes:
+///
+/// ThreadBuffer: BufferExtents NewBuffer WallClockTime NewCPUId
+/// FunctionSequence
+/// BufferExtents: 16 byte metdata record describing how many usable bytes are
+/// in the buffer. This is measured from the start of the buffer
+/// and must always be at least 48 (bytes).
+///
+/// In Version 3, we make the following changes:
+///
+/// ThreadBuffer: BufferExtents NewBuffer WallClockTime Pid NewCPUId
+/// FunctionSequence
+/// EOB: *deprecated*
+///
+/// In Version 4, we make the following changes:
+///
+/// CustomEventRecord now includes the CPU data.
+///
+/// In Version 5, we make the following changes:
+///
+/// CustomEventRecord and TypedEventRecord now use TSC delta encoding similar to
+/// what FunctionRecord instances use, and we no longer need to include the CPU
+/// id in the CustomEventRecord.
+///
+Error loadFDRLog(StringRef Data, bool IsLittleEndian,
+ XRayFileHeader &FileHeader, std::vector<XRayRecord> &Records) {
+
+ if (Data.size() < 32)
+ return createStringError(std::make_error_code(std::errc::invalid_argument),
+ "Not enough bytes for an XRay FDR log.");
+ DataExtractor DE(Data, IsLittleEndian, 8);
+
+ uint64_t OffsetPtr = 0;
+ auto FileHeaderOrError = readBinaryFormatHeader(DE, OffsetPtr);
+ if (!FileHeaderOrError)
+ return FileHeaderOrError.takeError();
+ FileHeader = std::move(FileHeaderOrError.get());
+
+ // First we load the records into memory.
+ std::vector<std::unique_ptr<Record>> FDRRecords;
+
+ {
+ FileBasedRecordProducer P(FileHeader, DE, OffsetPtr);
+ LogBuilderConsumer C(FDRRecords);
+ while (DE.isValidOffsetForDataOfSize(OffsetPtr, 1)) {
+ auto R = P.produce();
+ if (!R)
+ return R.takeError();
+ if (auto E = C.consume(std::move(R.get())))
+ return E;
+ }
+ }
+
+ // Next we index the records into blocks.
+ BlockIndexer::Index Index;
+ {
+ BlockIndexer Indexer(Index);
+ for (auto &R : FDRRecords)
+ if (auto E = R->apply(Indexer))
+ return E;
+ if (auto E = Indexer.flush())
+ return E;
+ }
+
+ // Then we verify the consistency of the blocks.
+ {
+ for (auto &PTB : Index) {
+ auto &Blocks = PTB.second;
+ for (auto &B : Blocks) {
+ BlockVerifier Verifier;
+ for (auto *R : B.Records)
+ if (auto E = R->apply(Verifier))
+ return E;
+ if (auto E = Verifier.verify())
+ return E;
+ }
+ }
+ }
+
+ // This is now the meat of the algorithm. Here we sort the blocks according to
+ // the Walltime record in each of the blocks for the same thread. This allows
+ // us to more consistently recreate the execution trace in temporal order.
+ // After the sort, we then reconstitute `Trace` records using a stateful
+ // visitor associated with a single process+thread pair.
+ {
+ for (auto &PTB : Index) {
+ auto &Blocks = PTB.second;
+ llvm::sort(Blocks, [](const BlockIndexer::Block &L,
+ const BlockIndexer::Block &R) {
+ return (L.WallclockTime->seconds() < R.WallclockTime->seconds() &&
+ L.WallclockTime->nanos() < R.WallclockTime->nanos());
+ });
+ auto Adder = [&](const XRayRecord &R) { Records.push_back(R); };
+ TraceExpander Expander(Adder, FileHeader.Version);
+ for (auto &B : Blocks) {
+ for (auto *R : B.Records)
+ if (auto E = R->apply(Expander))
+ return E;
+ }
+ if (auto E = Expander.flush())
+ return E;
+ }
+ }
+
+ return Error::success();
+}
+
+Error loadYAMLLog(StringRef Data, XRayFileHeader &FileHeader,
+ std::vector<XRayRecord> &Records) {
+ YAMLXRayTrace Trace;
+ Input In(Data);
+ In >> Trace;
+ if (In.error())
+ return make_error<StringError>("Failed loading YAML Data.", In.error());
+
+ FileHeader.Version = Trace.Header.Version;
+ FileHeader.Type = Trace.Header.Type;
+ FileHeader.ConstantTSC = Trace.Header.ConstantTSC;
+ FileHeader.NonstopTSC = Trace.Header.NonstopTSC;
+ FileHeader.CycleFrequency = Trace.Header.CycleFrequency;
+
+ if (FileHeader.Version != 1)
+ return make_error<StringError>(
+ Twine("Unsupported XRay file version: ") + Twine(FileHeader.Version),
+ std::make_error_code(std::errc::invalid_argument));
+
+ Records.clear();
+ std::transform(Trace.Records.begin(), Trace.Records.end(),
+ std::back_inserter(Records), [&](const YAMLXRayRecord &R) {
+ return XRayRecord{R.RecordType, R.CPU, R.Type,
+ R.FuncId, R.TSC, R.TId,
+ R.PId, R.CallArgs, R.Data};
+ });
+ return Error::success();
+}
+} // namespace
+
+Expected<Trace> llvm::xray::loadTraceFile(StringRef Filename, bool Sort) {
+ Expected<sys::fs::file_t> FdOrErr = sys::fs::openNativeFileForRead(Filename);
+ if (!FdOrErr)
+ return FdOrErr.takeError();
+
+ uint64_t FileSize;
+ if (auto EC = sys::fs::file_size(Filename, FileSize)) {
+ return make_error<StringError>(
+ Twine("Cannot read log from '") + Filename + "'", EC);
+ }
+ if (FileSize < 4) {
+ return make_error<StringError>(
+ Twine("File '") + Filename + "' too small for XRay.",
+ std::make_error_code(std::errc::executable_format_error));
+ }
+
+ // Map the opened file into memory and use a StringRef to access it later.
+ std::error_code EC;
+ sys::fs::mapped_file_region MappedFile(
+ *FdOrErr, sys::fs::mapped_file_region::mapmode::readonly, FileSize, 0,
+ EC);
+ sys::fs::closeFile(*FdOrErr);
+ if (EC) {
+ return make_error<StringError>(
+ Twine("Cannot read log from '") + Filename + "'", EC);
+ }
+ auto Data = StringRef(MappedFile.data(), MappedFile.size());
+
+ // TODO: Lift the endianness and implementation selection here.
+ DataExtractor LittleEndianDE(Data, true, 8);
+ auto TraceOrError = loadTrace(LittleEndianDE, Sort);
+ if (!TraceOrError) {
+ DataExtractor BigEndianDE(Data, false, 8);
+ consumeError(TraceOrError.takeError());
+ TraceOrError = loadTrace(BigEndianDE, Sort);
+ }
+ return TraceOrError;
+}
+
+Expected<Trace> llvm::xray::loadTrace(const DataExtractor &DE, bool Sort) {
+ // Attempt to detect the file type using file magic. We have a slight bias
+ // towards the binary format, and we do this by making sure that the first 4
+ // bytes of the binary file is some combination of the following byte
+ // patterns: (observe the code loading them assumes they're little endian)
+ //
+ // 0x01 0x00 0x00 0x00 - version 1, "naive" format
+ // 0x01 0x00 0x01 0x00 - version 1, "flight data recorder" format
+ // 0x02 0x00 0x01 0x00 - version 2, "flight data recorder" format
+ //
+ // YAML files don't typically have those first four bytes as valid text so we
+ // try loading assuming YAML if we don't find these bytes.
+ //
+ // Only if we can't load either the binary or the YAML format will we yield an
+ // error.
+ DataExtractor HeaderExtractor(DE.getData(), DE.isLittleEndian(), 8);
+ uint64_t OffsetPtr = 0;
+ uint16_t Version = HeaderExtractor.getU16(&OffsetPtr);
+ uint16_t Type = HeaderExtractor.getU16(&OffsetPtr);
+
+ enum BinaryFormatType { NAIVE_FORMAT = 0, FLIGHT_DATA_RECORDER_FORMAT = 1 };
+
+ Trace T;
+ switch (Type) {
+ case NAIVE_FORMAT:
+ if (Version == 1 || Version == 2 || Version == 3) {
+ if (auto E = loadNaiveFormatLog(DE.getData(), DE.isLittleEndian(),
+ T.FileHeader, T.Records))
+ return std::move(E);
+ } else {
+ return make_error<StringError>(
+ Twine("Unsupported version for Basic/Naive Mode logging: ") +
+ Twine(Version),
+ std::make_error_code(std::errc::executable_format_error));
+ }
+ break;
+ case FLIGHT_DATA_RECORDER_FORMAT:
+ if (Version >= 1 && Version <= 5) {
+ if (auto E = loadFDRLog(DE.getData(), DE.isLittleEndian(), T.FileHeader,
+ T.Records))
+ return std::move(E);
+ } else {
+ return make_error<StringError>(
+ Twine("Unsupported version for FDR Mode logging: ") + Twine(Version),
+ std::make_error_code(std::errc::executable_format_error));
+ }
+ break;
+ default:
+ if (auto E = loadYAMLLog(DE.getData(), T.FileHeader, T.Records))
+ return std::move(E);
+ }
+
+ if (Sort)
+ llvm::stable_sort(T.Records, [&](const XRayRecord &L, const XRayRecord &R) {
+ return L.TSC < R.TSC;
+ });
+
+ return std::move(T);
+}