aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm14/lib/ExecutionEngine
diff options
context:
space:
mode:
authorvitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
committervitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
commit6ffe9e53658409f212834330e13564e4952558f6 (patch)
tree85b1e00183517648b228aafa7c8fb07f5276f419 /contrib/libs/llvm14/lib/ExecutionEngine
parent726057070f9c5a91fc10fde0d5024913d10f1ab9 (diff)
downloadydb-6ffe9e53658409f212834330e13564e4952558f6.tar.gz
YQ Connector: support managed ClickHouse
Со стороны dqrun можно обратиться к инстансу коннектора, который работает на streaming стенде, и извлечь данные из облачного CH.
Diffstat (limited to 'contrib/libs/llvm14/lib/ExecutionEngine')
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/ExecutionEngine.cpp1308
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/ExecutionEngineBindings.cpp447
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/GDBRegistrationListener.cpp245
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Execution.cpp2168
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp511
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Interpreter.cpp102
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Interpreter.h235
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/ya.make35
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h116
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp806
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h134
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF.cpp103
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp33
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h530
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp183
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_riscv.cpp566
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp440
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLink.cpp421
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp314
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkGeneric.h161
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp470
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO.cpp90
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp816
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h250
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO_arm64.cpp771
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp516
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MemoryFlags.cpp33
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h126
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/aarch64.cpp30
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/riscv.cpp72
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/x86_64.cpp189
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ya.make49
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/MCJIT.cpp684
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/MCJIT.h336
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/ya.make32
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp382
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/CompileUtils.cpp95
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/Core.cpp3030
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp516
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebugUtils.cpp348
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp470
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp828
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp52
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp70
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp71
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp107
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp174
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp319
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp427
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/ExecutionUtils.cpp407
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp197
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/IRCompileLayer.cpp48
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/IRTransformLayer.cpp33
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/IndirectionUtils.cpp453
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp146
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/LLJIT.cpp933
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/Layer.cpp223
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/LazyReexports.cpp234
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp82
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/MachOPlatform.cpp988
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/Mangling.cpp84
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp205
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp833
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp44
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/OrcABISupport.cpp910
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp1018
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp355
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp44
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/OrcError.cpp120
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp47
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp250
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/ya.make29
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp406
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp306
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/Speculation.cpp143
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp129
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp83
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h36
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp183
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp129
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp261
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp293
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp43
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/ya.make33
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/TaskDispatch.cpp48
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp64
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/Orc/ya.make78
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp507
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/PerfJITEvents/ya.make32
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp169
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp297
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp1482
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp121
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h61
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp910
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h74
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp2396
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h202
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h584
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp382
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h167
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h376
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h228
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h325
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h315
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp320
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h66
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h541
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h431
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h250
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h238
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/ya.make37
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/SectionMemoryManager.cpp273
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/TargetSelect.cpp96
-rw-r--r--contrib/libs/llvm14/lib/ExecutionEngine/ya.make37
115 files changed, 41046 insertions, 0 deletions
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/ExecutionEngine.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/ExecutionEngine.cpp
new file mode 100644
index 0000000000..a14bd4d2c3
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/ExecutionEngine.cpp
@@ -0,0 +1,1308 @@
+//===-- ExecutionEngine.cpp - Common Implementation shared by EEs ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the common interface used by the various execution engine
+// subclasses.
+//
+// FIXME: This file needs to be updated to support scalable vectors
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetMachine.h"
+#include <cmath>
+#include <cstring>
+#include <mutex>
+using namespace llvm;
+
+#define DEBUG_TYPE "jit"
+
+STATISTIC(NumInitBytes, "Number of bytes of global vars initialized");
+STATISTIC(NumGlobals , "Number of global vars initialized");
+
+ExecutionEngine *(*ExecutionEngine::MCJITCtor)(
+ std::unique_ptr<Module> M, std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM) = nullptr;
+
+ExecutionEngine *(*ExecutionEngine::InterpCtor)(std::unique_ptr<Module> M,
+ std::string *ErrorStr) =nullptr;
+
+void JITEventListener::anchor() {}
+
+void ObjectCache::anchor() {}
+
+void ExecutionEngine::Init(std::unique_ptr<Module> M) {
+ CompilingLazily = false;
+ GVCompilationDisabled = false;
+ SymbolSearchingDisabled = false;
+
+ // IR module verification is enabled by default in debug builds, and disabled
+ // by default in release builds.
+#ifndef NDEBUG
+ VerifyModules = true;
+#else
+ VerifyModules = false;
+#endif
+
+ assert(M && "Module is null?");
+ Modules.push_back(std::move(M));
+}
+
+ExecutionEngine::ExecutionEngine(std::unique_ptr<Module> M)
+ : DL(M->getDataLayout()), LazyFunctionCreator(nullptr) {
+ Init(std::move(M));
+}
+
+ExecutionEngine::ExecutionEngine(DataLayout DL, std::unique_ptr<Module> M)
+ : DL(std::move(DL)), LazyFunctionCreator(nullptr) {
+ Init(std::move(M));
+}
+
+ExecutionEngine::~ExecutionEngine() {
+ clearAllGlobalMappings();
+}
+
+namespace {
+/// Helper class which uses a value handler to automatically deletes the
+/// memory block when the GlobalVariable is destroyed.
+class GVMemoryBlock final : public CallbackVH {
+ GVMemoryBlock(const GlobalVariable *GV)
+ : CallbackVH(const_cast<GlobalVariable*>(GV)) {}
+
+public:
+ /// Returns the address the GlobalVariable should be written into. The
+ /// GVMemoryBlock object prefixes that.
+ static char *Create(const GlobalVariable *GV, const DataLayout& TD) {
+ Type *ElTy = GV->getValueType();
+ size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
+ void *RawMemory = ::operator new(
+ alignTo(sizeof(GVMemoryBlock), TD.getPreferredAlign(GV)) + GVSize);
+ new(RawMemory) GVMemoryBlock(GV);
+ return static_cast<char*>(RawMemory) + sizeof(GVMemoryBlock);
+ }
+
+ void deleted() override {
+ // We allocated with operator new and with some extra memory hanging off the
+ // end, so don't just delete this. I'm not sure if this is actually
+ // required.
+ this->~GVMemoryBlock();
+ ::operator delete(this);
+ }
+};
+} // anonymous namespace
+
+char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) {
+ return GVMemoryBlock::Create(GV, getDataLayout());
+}
+
+void ExecutionEngine::addObjectFile(std::unique_ptr<object::ObjectFile> O) {
+ llvm_unreachable("ExecutionEngine subclass doesn't implement addObjectFile.");
+}
+
+void
+ExecutionEngine::addObjectFile(object::OwningBinary<object::ObjectFile> O) {
+ llvm_unreachable("ExecutionEngine subclass doesn't implement addObjectFile.");
+}
+
+void ExecutionEngine::addArchive(object::OwningBinary<object::Archive> A) {
+ llvm_unreachable("ExecutionEngine subclass doesn't implement addArchive.");
+}
+
+bool ExecutionEngine::removeModule(Module *M) {
+ for (auto I = Modules.begin(), E = Modules.end(); I != E; ++I) {
+ Module *Found = I->get();
+ if (Found == M) {
+ I->release();
+ Modules.erase(I);
+ clearGlobalMappingsFromModule(M);
+ return true;
+ }
+ }
+ return false;
+}
+
+Function *ExecutionEngine::FindFunctionNamed(StringRef FnName) {
+ for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
+ Function *F = Modules[i]->getFunction(FnName);
+ if (F && !F->isDeclaration())
+ return F;
+ }
+ return nullptr;
+}
+
+GlobalVariable *ExecutionEngine::FindGlobalVariableNamed(StringRef Name, bool AllowInternal) {
+ for (unsigned i = 0, e = Modules.size(); i != e; ++i) {
+ GlobalVariable *GV = Modules[i]->getGlobalVariable(Name,AllowInternal);
+ if (GV && !GV->isDeclaration())
+ return GV;
+ }
+ return nullptr;
+}
+
+uint64_t ExecutionEngineState::RemoveMapping(StringRef Name) {
+ GlobalAddressMapTy::iterator I = GlobalAddressMap.find(Name);
+ uint64_t OldVal;
+
+ // FIXME: This is silly, we shouldn't end up with a mapping -> 0 in the
+ // GlobalAddressMap.
+ if (I == GlobalAddressMap.end())
+ OldVal = 0;
+ else {
+ GlobalAddressReverseMap.erase(I->second);
+ OldVal = I->second;
+ GlobalAddressMap.erase(I);
+ }
+
+ return OldVal;
+}
+
+std::string ExecutionEngine::getMangledName(const GlobalValue *GV) {
+ assert(GV->hasName() && "Global must have name.");
+
+ std::lock_guard<sys::Mutex> locked(lock);
+ SmallString<128> FullName;
+
+ const DataLayout &DL =
+ GV->getParent()->getDataLayout().isDefault()
+ ? getDataLayout()
+ : GV->getParent()->getDataLayout();
+
+ Mangler::getNameWithPrefix(FullName, GV->getName(), DL);
+ return std::string(FullName.str());
+}
+
+void ExecutionEngine::addGlobalMapping(const GlobalValue *GV, void *Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ addGlobalMapping(getMangledName(GV), (uint64_t) Addr);
+}
+
+void ExecutionEngine::addGlobalMapping(StringRef Name, uint64_t Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ assert(!Name.empty() && "Empty GlobalMapping symbol name!");
+
+ LLVM_DEBUG(dbgs() << "JIT: Map \'" << Name << "\' to [" << Addr << "]\n";);
+ uint64_t &CurVal = EEState.getGlobalAddressMap()[Name];
+ assert((!CurVal || !Addr) && "GlobalMapping already established!");
+ CurVal = Addr;
+
+ // If we are using the reverse mapping, add it too.
+ if (!EEState.getGlobalAddressReverseMap().empty()) {
+ std::string &V = EEState.getGlobalAddressReverseMap()[CurVal];
+ assert((!V.empty() || !Name.empty()) &&
+ "GlobalMapping already established!");
+ V = std::string(Name);
+ }
+}
+
+void ExecutionEngine::clearAllGlobalMappings() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ EEState.getGlobalAddressMap().clear();
+ EEState.getGlobalAddressReverseMap().clear();
+}
+
+void ExecutionEngine::clearGlobalMappingsFromModule(Module *M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ for (GlobalObject &GO : M->global_objects())
+ EEState.RemoveMapping(getMangledName(&GO));
+}
+
+uint64_t ExecutionEngine::updateGlobalMapping(const GlobalValue *GV,
+ void *Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ return updateGlobalMapping(getMangledName(GV), (uint64_t) Addr);
+}
+
+uint64_t ExecutionEngine::updateGlobalMapping(StringRef Name, uint64_t Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ ExecutionEngineState::GlobalAddressMapTy &Map =
+ EEState.getGlobalAddressMap();
+
+ // Deleting from the mapping?
+ if (!Addr)
+ return EEState.RemoveMapping(Name);
+
+ uint64_t &CurVal = Map[Name];
+ uint64_t OldVal = CurVal;
+
+ if (CurVal && !EEState.getGlobalAddressReverseMap().empty())
+ EEState.getGlobalAddressReverseMap().erase(CurVal);
+ CurVal = Addr;
+
+ // If we are using the reverse mapping, add it too.
+ if (!EEState.getGlobalAddressReverseMap().empty()) {
+ std::string &V = EEState.getGlobalAddressReverseMap()[CurVal];
+ assert((!V.empty() || !Name.empty()) &&
+ "GlobalMapping already established!");
+ V = std::string(Name);
+ }
+ return OldVal;
+}
+
+uint64_t ExecutionEngine::getAddressToGlobalIfAvailable(StringRef S) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ uint64_t Address = 0;
+ ExecutionEngineState::GlobalAddressMapTy::iterator I =
+ EEState.getGlobalAddressMap().find(S);
+ if (I != EEState.getGlobalAddressMap().end())
+ Address = I->second;
+ return Address;
+}
+
+
+void *ExecutionEngine::getPointerToGlobalIfAvailable(StringRef S) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ if (void* Address = (void *) getAddressToGlobalIfAvailable(S))
+ return Address;
+ return nullptr;
+}
+
+void *ExecutionEngine::getPointerToGlobalIfAvailable(const GlobalValue *GV) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ return getPointerToGlobalIfAvailable(getMangledName(GV));
+}
+
+const GlobalValue *ExecutionEngine::getGlobalValueAtAddress(void *Addr) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // If we haven't computed the reverse mapping yet, do so first.
+ if (EEState.getGlobalAddressReverseMap().empty()) {
+ for (ExecutionEngineState::GlobalAddressMapTy::iterator
+ I = EEState.getGlobalAddressMap().begin(),
+ E = EEState.getGlobalAddressMap().end(); I != E; ++I) {
+ StringRef Name = I->first();
+ uint64_t Addr = I->second;
+ EEState.getGlobalAddressReverseMap().insert(
+ std::make_pair(Addr, std::string(Name)));
+ }
+ }
+
+ std::map<uint64_t, std::string>::iterator I =
+ EEState.getGlobalAddressReverseMap().find((uint64_t) Addr);
+
+ if (I != EEState.getGlobalAddressReverseMap().end()) {
+ StringRef Name = I->second;
+ for (unsigned i = 0, e = Modules.size(); i != e; ++i)
+ if (GlobalValue *GV = Modules[i]->getNamedValue(Name))
+ return GV;
+ }
+ return nullptr;
+}
+
+namespace {
+class ArgvArray {
+ std::unique_ptr<char[]> Array;
+ std::vector<std::unique_ptr<char[]>> Values;
+public:
+ /// Turn a vector of strings into a nice argv style array of pointers to null
+ /// terminated strings.
+ void *reset(LLVMContext &C, ExecutionEngine *EE,
+ const std::vector<std::string> &InputArgv);
+};
+} // anonymous namespace
+void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
+ const std::vector<std::string> &InputArgv) {
+ Values.clear(); // Free the old contents.
+ Values.reserve(InputArgv.size());
+ unsigned PtrSize = EE->getDataLayout().getPointerSize();
+ Array = std::make_unique<char[]>((InputArgv.size()+1)*PtrSize);
+
+ LLVM_DEBUG(dbgs() << "JIT: ARGV = " << (void *)Array.get() << "\n");
+ Type *SBytePtr = Type::getInt8PtrTy(C);
+
+ for (unsigned i = 0; i != InputArgv.size(); ++i) {
+ unsigned Size = InputArgv[i].size()+1;
+ auto Dest = std::make_unique<char[]>(Size);
+ LLVM_DEBUG(dbgs() << "JIT: ARGV[" << i << "] = " << (void *)Dest.get()
+ << "\n");
+
+ std::copy(InputArgv[i].begin(), InputArgv[i].end(), Dest.get());
+ Dest[Size-1] = 0;
+
+ // Endian safe: Array[i] = (PointerTy)Dest;
+ EE->StoreValueToMemory(PTOGV(Dest.get()),
+ (GenericValue*)(&Array[i*PtrSize]), SBytePtr);
+ Values.push_back(std::move(Dest));
+ }
+
+ // Null terminate it
+ EE->StoreValueToMemory(PTOGV(nullptr),
+ (GenericValue*)(&Array[InputArgv.size()*PtrSize]),
+ SBytePtr);
+ return Array.get();
+}
+
+void ExecutionEngine::runStaticConstructorsDestructors(Module &module,
+ bool isDtors) {
+ StringRef Name(isDtors ? "llvm.global_dtors" : "llvm.global_ctors");
+ GlobalVariable *GV = module.getNamedGlobal(Name);
+
+ // If this global has internal linkage, or if it has a use, then it must be
+ // an old-style (llvmgcc3) static ctor with __main linked in and in use. If
+ // this is the case, don't execute any of the global ctors, __main will do
+ // it.
+ if (!GV || GV->isDeclaration() || GV->hasLocalLinkage()) return;
+
+ // Should be an array of '{ i32, void ()* }' structs. The first value is
+ // the init priority, which we ignore.
+ ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
+ if (!InitList)
+ return;
+ for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i));
+ if (!CS) continue;
+
+ Constant *FP = CS->getOperand(1);
+ if (FP->isNullValue())
+ continue; // Found a sentinal value, ignore.
+
+ // Strip off constant expression casts.
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(FP))
+ if (CE->isCast())
+ FP = CE->getOperand(0);
+
+ // Execute the ctor/dtor function!
+ if (Function *F = dyn_cast<Function>(FP))
+ runFunction(F, None);
+
+ // FIXME: It is marginally lame that we just do nothing here if we see an
+ // entry we don't recognize. It might not be unreasonable for the verifier
+ // to not even allow this and just assert here.
+ }
+}
+
+void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) {
+ // Execute global ctors/dtors for each module in the program.
+ for (std::unique_ptr<Module> &M : Modules)
+ runStaticConstructorsDestructors(*M, isDtors);
+}
+
+#ifndef NDEBUG
+/// isTargetNullPtr - Return whether the target pointer stored at Loc is null.
+static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) {
+ unsigned PtrSize = EE->getDataLayout().getPointerSize();
+ for (unsigned i = 0; i < PtrSize; ++i)
+ if (*(i + (uint8_t*)Loc))
+ return false;
+ return true;
+}
+#endif
+
+int ExecutionEngine::runFunctionAsMain(Function *Fn,
+ const std::vector<std::string> &argv,
+ const char * const * envp) {
+ std::vector<GenericValue> GVArgs;
+ GenericValue GVArgc;
+ GVArgc.IntVal = APInt(32, argv.size());
+
+ // Check main() type
+ unsigned NumArgs = Fn->getFunctionType()->getNumParams();
+ FunctionType *FTy = Fn->getFunctionType();
+ Type* PPInt8Ty = Type::getInt8PtrTy(Fn->getContext())->getPointerTo();
+
+ // Check the argument types.
+ if (NumArgs > 3)
+ report_fatal_error("Invalid number of arguments of main() supplied");
+ if (NumArgs >= 3 && FTy->getParamType(2) != PPInt8Ty)
+ report_fatal_error("Invalid type for third argument of main() supplied");
+ if (NumArgs >= 2 && FTy->getParamType(1) != PPInt8Ty)
+ report_fatal_error("Invalid type for second argument of main() supplied");
+ if (NumArgs >= 1 && !FTy->getParamType(0)->isIntegerTy(32))
+ report_fatal_error("Invalid type for first argument of main() supplied");
+ if (!FTy->getReturnType()->isIntegerTy() &&
+ !FTy->getReturnType()->isVoidTy())
+ report_fatal_error("Invalid return type of main() supplied");
+
+ ArgvArray CArgv;
+ ArgvArray CEnv;
+ if (NumArgs) {
+ GVArgs.push_back(GVArgc); // Arg #0 = argc.
+ if (NumArgs > 1) {
+ // Arg #1 = argv.
+ GVArgs.push_back(PTOGV(CArgv.reset(Fn->getContext(), this, argv)));
+ assert(!isTargetNullPtr(this, GVTOP(GVArgs[1])) &&
+ "argv[0] was null after CreateArgv");
+ if (NumArgs > 2) {
+ std::vector<std::string> EnvVars;
+ for (unsigned i = 0; envp[i]; ++i)
+ EnvVars.emplace_back(envp[i]);
+ // Arg #2 = envp.
+ GVArgs.push_back(PTOGV(CEnv.reset(Fn->getContext(), this, EnvVars)));
+ }
+ }
+ }
+
+ return runFunction(Fn, GVArgs).IntVal.getZExtValue();
+}
+
+EngineBuilder::EngineBuilder() : EngineBuilder(nullptr) {}
+
+EngineBuilder::EngineBuilder(std::unique_ptr<Module> M)
+ : M(std::move(M)), WhichEngine(EngineKind::Either), ErrorStr(nullptr),
+ OptLevel(CodeGenOpt::Default), MemMgr(nullptr), Resolver(nullptr) {
+// IR module verification is enabled by default in debug builds, and disabled
+// by default in release builds.
+#ifndef NDEBUG
+ VerifyModules = true;
+#else
+ VerifyModules = false;
+#endif
+}
+
+EngineBuilder::~EngineBuilder() = default;
+
+EngineBuilder &EngineBuilder::setMCJITMemoryManager(
+ std::unique_ptr<RTDyldMemoryManager> mcjmm) {
+ auto SharedMM = std::shared_ptr<RTDyldMemoryManager>(std::move(mcjmm));
+ MemMgr = SharedMM;
+ Resolver = SharedMM;
+ return *this;
+}
+
+EngineBuilder&
+EngineBuilder::setMemoryManager(std::unique_ptr<MCJITMemoryManager> MM) {
+ MemMgr = std::shared_ptr<MCJITMemoryManager>(std::move(MM));
+ return *this;
+}
+
+EngineBuilder &
+EngineBuilder::setSymbolResolver(std::unique_ptr<LegacyJITSymbolResolver> SR) {
+ Resolver = std::shared_ptr<LegacyJITSymbolResolver>(std::move(SR));
+ return *this;
+}
+
+ExecutionEngine *EngineBuilder::create(TargetMachine *TM) {
+ std::unique_ptr<TargetMachine> TheTM(TM); // Take ownership.
+
+ // Make sure we can resolve symbols in the program as well. The zero arg
+ // to the function tells DynamicLibrary to load the program, not a library.
+ if (sys::DynamicLibrary::LoadLibraryPermanently(nullptr, ErrorStr))
+ return nullptr;
+
+ // If the user specified a memory manager but didn't specify which engine to
+ // create, we assume they only want the JIT, and we fail if they only want
+ // the interpreter.
+ if (MemMgr) {
+ if (WhichEngine & EngineKind::JIT)
+ WhichEngine = EngineKind::JIT;
+ else {
+ if (ErrorStr)
+ *ErrorStr = "Cannot create an interpreter with a memory manager.";
+ return nullptr;
+ }
+ }
+
+ // Unless the interpreter was explicitly selected or the JIT is not linked,
+ // try making a JIT.
+ if ((WhichEngine & EngineKind::JIT) && TheTM) {
+ if (!TM->getTarget().hasJIT()) {
+ errs() << "WARNING: This target JIT is not designed for the host"
+ << " you are running. If bad things happen, please choose"
+ << " a different -march switch.\n";
+ }
+
+ ExecutionEngine *EE = nullptr;
+ if (ExecutionEngine::MCJITCtor)
+ EE = ExecutionEngine::MCJITCtor(std::move(M), ErrorStr, std::move(MemMgr),
+ std::move(Resolver), std::move(TheTM));
+
+ if (EE) {
+ EE->setVerifyModules(VerifyModules);
+ return EE;
+ }
+ }
+
+ // If we can't make a JIT and we didn't request one specifically, try making
+ // an interpreter instead.
+ if (WhichEngine & EngineKind::Interpreter) {
+ if (ExecutionEngine::InterpCtor)
+ return ExecutionEngine::InterpCtor(std::move(M), ErrorStr);
+ if (ErrorStr)
+ *ErrorStr = "Interpreter has not been linked in.";
+ return nullptr;
+ }
+
+ if ((WhichEngine & EngineKind::JIT) && !ExecutionEngine::MCJITCtor) {
+ if (ErrorStr)
+ *ErrorStr = "JIT has not been linked in.";
+ }
+
+ return nullptr;
+}
+
+void *ExecutionEngine::getPointerToGlobal(const GlobalValue *GV) {
+ if (Function *F = const_cast<Function*>(dyn_cast<Function>(GV)))
+ return getPointerToFunction(F);
+
+ std::lock_guard<sys::Mutex> locked(lock);
+ if (void* P = getPointerToGlobalIfAvailable(GV))
+ return P;
+
+ // Global variable might have been added since interpreter started.
+ if (GlobalVariable *GVar =
+ const_cast<GlobalVariable *>(dyn_cast<GlobalVariable>(GV)))
+ emitGlobalVariable(GVar);
+ else
+ llvm_unreachable("Global hasn't had an address allocated yet!");
+
+ return getPointerToGlobalIfAvailable(GV);
+}
+
+/// Converts a Constant* into a GenericValue, including handling of
+/// ConstantExpr values.
+GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
+ // If its undefined, return the garbage.
+ if (isa<UndefValue>(C)) {
+ GenericValue Result;
+ switch (C->getType()->getTypeID()) {
+ default:
+ break;
+ case Type::IntegerTyID:
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ // Although the value is undefined, we still have to construct an APInt
+ // with the correct bit width.
+ Result.IntVal = APInt(C->getType()->getPrimitiveSizeInBits(), 0);
+ break;
+ case Type::StructTyID: {
+ // if the whole struct is 'undef' just reserve memory for the value.
+ if(StructType *STy = dyn_cast<StructType>(C->getType())) {
+ unsigned int elemNum = STy->getNumElements();
+ Result.AggregateVal.resize(elemNum);
+ for (unsigned int i = 0; i < elemNum; ++i) {
+ Type *ElemTy = STy->getElementType(i);
+ if (ElemTy->isIntegerTy())
+ Result.AggregateVal[i].IntVal =
+ APInt(ElemTy->getPrimitiveSizeInBits(), 0);
+ else if (ElemTy->isAggregateType()) {
+ const Constant *ElemUndef = UndefValue::get(ElemTy);
+ Result.AggregateVal[i] = getConstantValue(ElemUndef);
+ }
+ }
+ }
+ }
+ break;
+ case Type::ScalableVectorTyID:
+ report_fatal_error(
+ "Scalable vector support not yet implemented in ExecutionEngine");
+ case Type::FixedVectorTyID:
+ // if the whole vector is 'undef' just reserve memory for the value.
+ auto *VTy = cast<FixedVectorType>(C->getType());
+ Type *ElemTy = VTy->getElementType();
+ unsigned int elemNum = VTy->getNumElements();
+ Result.AggregateVal.resize(elemNum);
+ if (ElemTy->isIntegerTy())
+ for (unsigned int i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].IntVal =
+ APInt(ElemTy->getPrimitiveSizeInBits(), 0);
+ break;
+ }
+ return Result;
+ }
+
+ // Otherwise, if the value is a ConstantExpr...
+ if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ Constant *Op0 = CE->getOperand(0);
+ switch (CE->getOpcode()) {
+ case Instruction::GetElementPtr: {
+ // Compute the index
+ GenericValue Result = getConstantValue(Op0);
+ APInt Offset(DL.getPointerSizeInBits(), 0);
+ cast<GEPOperator>(CE)->accumulateConstantOffset(DL, Offset);
+
+ char* tmp = (char*) Result.PointerVal;
+ Result = PTOGV(tmp + Offset.getSExtValue());
+ return Result;
+ }
+ case Instruction::Trunc: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.trunc(BitWidth);
+ return GV;
+ }
+ case Instruction::ZExt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.zext(BitWidth);
+ return GV;
+ }
+ case Instruction::SExt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ GV.IntVal = GV.IntVal.sext(BitWidth);
+ return GV;
+ }
+ case Instruction::FPTrunc: {
+ // FIXME long double
+ GenericValue GV = getConstantValue(Op0);
+ GV.FloatVal = float(GV.DoubleVal);
+ return GV;
+ }
+ case Instruction::FPExt:{
+ // FIXME long double
+ GenericValue GV = getConstantValue(Op0);
+ GV.DoubleVal = double(GV.FloatVal);
+ return GV;
+ }
+ case Instruction::UIToFP: {
+ GenericValue GV = getConstantValue(Op0);
+ if (CE->getType()->isFloatTy())
+ GV.FloatVal = float(GV.IntVal.roundToDouble());
+ else if (CE->getType()->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.roundToDouble();
+ else if (CE->getType()->isX86_FP80Ty()) {
+ APFloat apf = APFloat::getZero(APFloat::x87DoubleExtended());
+ (void)apf.convertFromAPInt(GV.IntVal,
+ false,
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apf.bitcastToAPInt();
+ }
+ return GV;
+ }
+ case Instruction::SIToFP: {
+ GenericValue GV = getConstantValue(Op0);
+ if (CE->getType()->isFloatTy())
+ GV.FloatVal = float(GV.IntVal.signedRoundToDouble());
+ else if (CE->getType()->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.signedRoundToDouble();
+ else if (CE->getType()->isX86_FP80Ty()) {
+ APFloat apf = APFloat::getZero(APFloat::x87DoubleExtended());
+ (void)apf.convertFromAPInt(GV.IntVal,
+ true,
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apf.bitcastToAPInt();
+ }
+ return GV;
+ }
+ case Instruction::FPToUI: // double->APInt conversion handles sign
+ case Instruction::FPToSI: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t BitWidth = cast<IntegerType>(CE->getType())->getBitWidth();
+ if (Op0->getType()->isFloatTy())
+ GV.IntVal = APIntOps::RoundFloatToAPInt(GV.FloatVal, BitWidth);
+ else if (Op0->getType()->isDoubleTy())
+ GV.IntVal = APIntOps::RoundDoubleToAPInt(GV.DoubleVal, BitWidth);
+ else if (Op0->getType()->isX86_FP80Ty()) {
+ APFloat apf = APFloat(APFloat::x87DoubleExtended(), GV.IntVal);
+ uint64_t v;
+ bool ignored;
+ (void)apf.convertToInteger(makeMutableArrayRef(v), BitWidth,
+ CE->getOpcode()==Instruction::FPToSI,
+ APFloat::rmTowardZero, &ignored);
+ GV.IntVal = v; // endian?
+ }
+ return GV;
+ }
+ case Instruction::PtrToInt: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t PtrWidth = DL.getTypeSizeInBits(Op0->getType());
+ assert(PtrWidth <= 64 && "Bad pointer width");
+ GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal));
+ uint32_t IntWidth = DL.getTypeSizeInBits(CE->getType());
+ GV.IntVal = GV.IntVal.zextOrTrunc(IntWidth);
+ return GV;
+ }
+ case Instruction::IntToPtr: {
+ GenericValue GV = getConstantValue(Op0);
+ uint32_t PtrWidth = DL.getTypeSizeInBits(CE->getType());
+ GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
+ assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width");
+ GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue()));
+ return GV;
+ }
+ case Instruction::BitCast: {
+ GenericValue GV = getConstantValue(Op0);
+ Type* DestTy = CE->getType();
+ switch (Op0->getType()->getTypeID()) {
+ default: llvm_unreachable("Invalid bitcast operand");
+ case Type::IntegerTyID:
+ assert(DestTy->isFloatingPointTy() && "invalid bitcast");
+ if (DestTy->isFloatTy())
+ GV.FloatVal = GV.IntVal.bitsToFloat();
+ else if (DestTy->isDoubleTy())
+ GV.DoubleVal = GV.IntVal.bitsToDouble();
+ break;
+ case Type::FloatTyID:
+ assert(DestTy->isIntegerTy(32) && "Invalid bitcast");
+ GV.IntVal = APInt::floatToBits(GV.FloatVal);
+ break;
+ case Type::DoubleTyID:
+ assert(DestTy->isIntegerTy(64) && "Invalid bitcast");
+ GV.IntVal = APInt::doubleToBits(GV.DoubleVal);
+ break;
+ case Type::PointerTyID:
+ assert(DestTy->isPointerTy() && "Invalid bitcast");
+ break; // getConstantValue(Op0) above already converted it
+ }
+ return GV;
+ }
+ case Instruction::Add:
+ case Instruction::FAdd:
+ case Instruction::Sub:
+ case Instruction::FSub:
+ case Instruction::Mul:
+ case Instruction::FMul:
+ case Instruction::UDiv:
+ case Instruction::SDiv:
+ case Instruction::URem:
+ case Instruction::SRem:
+ case Instruction::And:
+ case Instruction::Or:
+ case Instruction::Xor: {
+ GenericValue LHS = getConstantValue(Op0);
+ GenericValue RHS = getConstantValue(CE->getOperand(1));
+ GenericValue GV;
+ switch (CE->getOperand(0)->getType()->getTypeID()) {
+ default: llvm_unreachable("Bad add type!");
+ case Type::IntegerTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid integer opcode");
+ case Instruction::Add: GV.IntVal = LHS.IntVal + RHS.IntVal; break;
+ case Instruction::Sub: GV.IntVal = LHS.IntVal - RHS.IntVal; break;
+ case Instruction::Mul: GV.IntVal = LHS.IntVal * RHS.IntVal; break;
+ case Instruction::UDiv:GV.IntVal = LHS.IntVal.udiv(RHS.IntVal); break;
+ case Instruction::SDiv:GV.IntVal = LHS.IntVal.sdiv(RHS.IntVal); break;
+ case Instruction::URem:GV.IntVal = LHS.IntVal.urem(RHS.IntVal); break;
+ case Instruction::SRem:GV.IntVal = LHS.IntVal.srem(RHS.IntVal); break;
+ case Instruction::And: GV.IntVal = LHS.IntVal & RHS.IntVal; break;
+ case Instruction::Or: GV.IntVal = LHS.IntVal | RHS.IntVal; break;
+ case Instruction::Xor: GV.IntVal = LHS.IntVal ^ RHS.IntVal; break;
+ }
+ break;
+ case Type::FloatTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid float opcode");
+ case Instruction::FAdd:
+ GV.FloatVal = LHS.FloatVal + RHS.FloatVal; break;
+ case Instruction::FSub:
+ GV.FloatVal = LHS.FloatVal - RHS.FloatVal; break;
+ case Instruction::FMul:
+ GV.FloatVal = LHS.FloatVal * RHS.FloatVal; break;
+ case Instruction::FDiv:
+ GV.FloatVal = LHS.FloatVal / RHS.FloatVal; break;
+ case Instruction::FRem:
+ GV.FloatVal = std::fmod(LHS.FloatVal,RHS.FloatVal); break;
+ }
+ break;
+ case Type::DoubleTyID:
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid double opcode");
+ case Instruction::FAdd:
+ GV.DoubleVal = LHS.DoubleVal + RHS.DoubleVal; break;
+ case Instruction::FSub:
+ GV.DoubleVal = LHS.DoubleVal - RHS.DoubleVal; break;
+ case Instruction::FMul:
+ GV.DoubleVal = LHS.DoubleVal * RHS.DoubleVal; break;
+ case Instruction::FDiv:
+ GV.DoubleVal = LHS.DoubleVal / RHS.DoubleVal; break;
+ case Instruction::FRem:
+ GV.DoubleVal = std::fmod(LHS.DoubleVal,RHS.DoubleVal); break;
+ }
+ break;
+ case Type::X86_FP80TyID:
+ case Type::PPC_FP128TyID:
+ case Type::FP128TyID: {
+ const fltSemantics &Sem = CE->getOperand(0)->getType()->getFltSemantics();
+ APFloat apfLHS = APFloat(Sem, LHS.IntVal);
+ switch (CE->getOpcode()) {
+ default: llvm_unreachable("Invalid long double opcode");
+ case Instruction::FAdd:
+ apfLHS.add(APFloat(Sem, RHS.IntVal), APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FSub:
+ apfLHS.subtract(APFloat(Sem, RHS.IntVal),
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FMul:
+ apfLHS.multiply(APFloat(Sem, RHS.IntVal),
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FDiv:
+ apfLHS.divide(APFloat(Sem, RHS.IntVal),
+ APFloat::rmNearestTiesToEven);
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ case Instruction::FRem:
+ apfLHS.mod(APFloat(Sem, RHS.IntVal));
+ GV.IntVal = apfLHS.bitcastToAPInt();
+ break;
+ }
+ }
+ break;
+ }
+ return GV;
+ }
+ default:
+ break;
+ }
+
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "ConstantExpr not handled: " << *CE;
+ report_fatal_error(OS.str());
+ }
+
+ // Otherwise, we have a simple constant.
+ GenericValue Result;
+ switch (C->getType()->getTypeID()) {
+ case Type::FloatTyID:
+ Result.FloatVal = cast<ConstantFP>(C)->getValueAPF().convertToFloat();
+ break;
+ case Type::DoubleTyID:
+ Result.DoubleVal = cast<ConstantFP>(C)->getValueAPF().convertToDouble();
+ break;
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ Result.IntVal = cast <ConstantFP>(C)->getValueAPF().bitcastToAPInt();
+ break;
+ case Type::IntegerTyID:
+ Result.IntVal = cast<ConstantInt>(C)->getValue();
+ break;
+ case Type::PointerTyID:
+ while (auto *A = dyn_cast<GlobalAlias>(C)) {
+ C = A->getAliasee();
+ }
+ if (isa<ConstantPointerNull>(C))
+ Result.PointerVal = nullptr;
+ else if (const Function *F = dyn_cast<Function>(C))
+ Result = PTOGV(getPointerToFunctionOrStub(const_cast<Function*>(F)));
+ else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
+ Result = PTOGV(getOrEmitGlobalVariable(const_cast<GlobalVariable*>(GV)));
+ else
+ llvm_unreachable("Unknown constant pointer type!");
+ break;
+ case Type::ScalableVectorTyID:
+ report_fatal_error(
+ "Scalable vector support not yet implemented in ExecutionEngine");
+ case Type::FixedVectorTyID: {
+ unsigned elemNum;
+ Type* ElemTy;
+ const ConstantDataVector *CDV = dyn_cast<ConstantDataVector>(C);
+ const ConstantVector *CV = dyn_cast<ConstantVector>(C);
+ const ConstantAggregateZero *CAZ = dyn_cast<ConstantAggregateZero>(C);
+
+ if (CDV) {
+ elemNum = CDV->getNumElements();
+ ElemTy = CDV->getElementType();
+ } else if (CV || CAZ) {
+ auto *VTy = cast<FixedVectorType>(C->getType());
+ elemNum = VTy->getNumElements();
+ ElemTy = VTy->getElementType();
+ } else {
+ llvm_unreachable("Unknown constant vector type!");
+ }
+
+ Result.AggregateVal.resize(elemNum);
+ // Check if vector holds floats.
+ if(ElemTy->isFloatTy()) {
+ if (CAZ) {
+ GenericValue floatZero;
+ floatZero.FloatVal = 0.f;
+ std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
+ floatZero);
+ break;
+ }
+ if(CV) {
+ for (unsigned i = 0; i < elemNum; ++i)
+ if (!isa<UndefValue>(CV->getOperand(i)))
+ Result.AggregateVal[i].FloatVal = cast<ConstantFP>(
+ CV->getOperand(i))->getValueAPF().convertToFloat();
+ break;
+ }
+ if(CDV)
+ for (unsigned i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].FloatVal = CDV->getElementAsFloat(i);
+
+ break;
+ }
+ // Check if vector holds doubles.
+ if (ElemTy->isDoubleTy()) {
+ if (CAZ) {
+ GenericValue doubleZero;
+ doubleZero.DoubleVal = 0.0;
+ std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
+ doubleZero);
+ break;
+ }
+ if(CV) {
+ for (unsigned i = 0; i < elemNum; ++i)
+ if (!isa<UndefValue>(CV->getOperand(i)))
+ Result.AggregateVal[i].DoubleVal = cast<ConstantFP>(
+ CV->getOperand(i))->getValueAPF().convertToDouble();
+ break;
+ }
+ if(CDV)
+ for (unsigned i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].DoubleVal = CDV->getElementAsDouble(i);
+
+ break;
+ }
+ // Check if vector holds integers.
+ if (ElemTy->isIntegerTy()) {
+ if (CAZ) {
+ GenericValue intZero;
+ intZero.IntVal = APInt(ElemTy->getScalarSizeInBits(), 0ull);
+ std::fill(Result.AggregateVal.begin(), Result.AggregateVal.end(),
+ intZero);
+ break;
+ }
+ if(CV) {
+ for (unsigned i = 0; i < elemNum; ++i)
+ if (!isa<UndefValue>(CV->getOperand(i)))
+ Result.AggregateVal[i].IntVal = cast<ConstantInt>(
+ CV->getOperand(i))->getValue();
+ else {
+ Result.AggregateVal[i].IntVal =
+ APInt(CV->getOperand(i)->getType()->getPrimitiveSizeInBits(), 0);
+ }
+ break;
+ }
+ if(CDV)
+ for (unsigned i = 0; i < elemNum; ++i)
+ Result.AggregateVal[i].IntVal = APInt(
+ CDV->getElementType()->getPrimitiveSizeInBits(),
+ CDV->getElementAsInteger(i));
+
+ break;
+ }
+ llvm_unreachable("Unknown constant pointer type!");
+ } break;
+
+ default:
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "ERROR: Constant unimplemented for type: " << *C->getType();
+ report_fatal_error(OS.str());
+ }
+
+ return Result;
+}
+
+void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
+ GenericValue *Ptr, Type *Ty) {
+ const unsigned StoreBytes = getDataLayout().getTypeStoreSize(Ty);
+
+ switch (Ty->getTypeID()) {
+ default:
+ dbgs() << "Cannot store value of type " << *Ty << "!\n";
+ break;
+ case Type::IntegerTyID:
+ StoreIntToMemory(Val.IntVal, (uint8_t*)Ptr, StoreBytes);
+ break;
+ case Type::FloatTyID:
+ *((float*)Ptr) = Val.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ *((double*)Ptr) = Val.DoubleVal;
+ break;
+ case Type::X86_FP80TyID:
+ memcpy(Ptr, Val.IntVal.getRawData(), 10);
+ break;
+ case Type::PointerTyID:
+ // Ensure 64 bit target pointers are fully initialized on 32 bit hosts.
+ if (StoreBytes != sizeof(PointerTy))
+ memset(&(Ptr->PointerVal), 0, StoreBytes);
+
+ *((PointerTy*)Ptr) = Val.PointerVal;
+ break;
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID:
+ for (unsigned i = 0; i < Val.AggregateVal.size(); ++i) {
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
+ *(((double*)Ptr)+i) = Val.AggregateVal[i].DoubleVal;
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
+ *(((float*)Ptr)+i) = Val.AggregateVal[i].FloatVal;
+ if (cast<VectorType>(Ty)->getElementType()->isIntegerTy()) {
+ unsigned numOfBytes =(Val.AggregateVal[i].IntVal.getBitWidth()+7)/8;
+ StoreIntToMemory(Val.AggregateVal[i].IntVal,
+ (uint8_t*)Ptr + numOfBytes*i, numOfBytes);
+ }
+ }
+ break;
+ }
+
+ if (sys::IsLittleEndianHost != getDataLayout().isLittleEndian())
+ // Host and target are different endian - reverse the stored bytes.
+ std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr);
+}
+
+/// FIXME: document
+///
+void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
+ GenericValue *Ptr,
+ Type *Ty) {
+ const unsigned LoadBytes = getDataLayout().getTypeStoreSize(Ty);
+
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ // An APInt with all words initially zero.
+ Result.IntVal = APInt(cast<IntegerType>(Ty)->getBitWidth(), 0);
+ LoadIntFromMemory(Result.IntVal, (uint8_t*)Ptr, LoadBytes);
+ break;
+ case Type::FloatTyID:
+ Result.FloatVal = *((float*)Ptr);
+ break;
+ case Type::DoubleTyID:
+ Result.DoubleVal = *((double*)Ptr);
+ break;
+ case Type::PointerTyID:
+ Result.PointerVal = *((PointerTy*)Ptr);
+ break;
+ case Type::X86_FP80TyID: {
+ // This is endian dependent, but it will only work on x86 anyway.
+ // FIXME: Will not trap if loading a signaling NaN.
+ uint64_t y[2];
+ memcpy(y, Ptr, 10);
+ Result.IntVal = APInt(80, y);
+ break;
+ }
+ case Type::ScalableVectorTyID:
+ report_fatal_error(
+ "Scalable vector support not yet implemented in ExecutionEngine");
+ case Type::FixedVectorTyID: {
+ auto *VT = cast<FixedVectorType>(Ty);
+ Type *ElemT = VT->getElementType();
+ const unsigned numElems = VT->getNumElements();
+ if (ElemT->isFloatTy()) {
+ Result.AggregateVal.resize(numElems);
+ for (unsigned i = 0; i < numElems; ++i)
+ Result.AggregateVal[i].FloatVal = *((float*)Ptr+i);
+ }
+ if (ElemT->isDoubleTy()) {
+ Result.AggregateVal.resize(numElems);
+ for (unsigned i = 0; i < numElems; ++i)
+ Result.AggregateVal[i].DoubleVal = *((double*)Ptr+i);
+ }
+ if (ElemT->isIntegerTy()) {
+ GenericValue intZero;
+ const unsigned elemBitWidth = cast<IntegerType>(ElemT)->getBitWidth();
+ intZero.IntVal = APInt(elemBitWidth, 0);
+ Result.AggregateVal.resize(numElems, intZero);
+ for (unsigned i = 0; i < numElems; ++i)
+ LoadIntFromMemory(Result.AggregateVal[i].IntVal,
+ (uint8_t*)Ptr+((elemBitWidth+7)/8)*i, (elemBitWidth+7)/8);
+ }
+ break;
+ }
+ default:
+ SmallString<256> Msg;
+ raw_svector_ostream OS(Msg);
+ OS << "Cannot load value of type " << *Ty << "!";
+ report_fatal_error(OS.str());
+ }
+}
+
+void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
+ LLVM_DEBUG(dbgs() << "JIT: Initializing " << Addr << " ");
+ LLVM_DEBUG(Init->dump());
+ if (isa<UndefValue>(Init))
+ return;
+
+ if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
+ unsigned ElementSize =
+ getDataLayout().getTypeAllocSize(CP->getType()->getElementType());
+ for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
+ InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
+ return;
+ }
+
+ if (isa<ConstantAggregateZero>(Init)) {
+ memset(Addr, 0, (size_t)getDataLayout().getTypeAllocSize(Init->getType()));
+ return;
+ }
+
+ if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
+ unsigned ElementSize =
+ getDataLayout().getTypeAllocSize(CPA->getType()->getElementType());
+ for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
+ InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
+ return;
+ }
+
+ if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) {
+ const StructLayout *SL =
+ getDataLayout().getStructLayout(cast<StructType>(CPS->getType()));
+ for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i)
+ InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i));
+ return;
+ }
+
+ if (const ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(Init)) {
+ // CDS is already laid out in host memory order.
+ StringRef Data = CDS->getRawDataValues();
+ memcpy(Addr, Data.data(), Data.size());
+ return;
+ }
+
+ if (Init->getType()->isFirstClassType()) {
+ GenericValue Val = getConstantValue(Init);
+ StoreValueToMemory(Val, (GenericValue*)Addr, Init->getType());
+ return;
+ }
+
+ LLVM_DEBUG(dbgs() << "Bad Type: " << *Init->getType() << "\n");
+ llvm_unreachable("Unknown constant type to initialize memory with!");
+}
+
+/// EmitGlobals - Emit all of the global variables to memory, storing their
+/// addresses into GlobalAddress. This must make sure to copy the contents of
+/// their initializers into the memory.
+void ExecutionEngine::emitGlobals() {
+ // Loop over all of the global variables in the program, allocating the memory
+ // to hold them. If there is more than one module, do a prepass over globals
+ // to figure out how the different modules should link together.
+ std::map<std::pair<std::string, Type*>,
+ const GlobalValue*> LinkedGlobalsMap;
+
+ if (Modules.size() != 1) {
+ for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
+ Module &M = *Modules[m];
+ for (const auto &GV : M.globals()) {
+ if (GV.hasLocalLinkage() || GV.isDeclaration() ||
+ GV.hasAppendingLinkage() || !GV.hasName())
+ continue;// Ignore external globals and globals with internal linkage.
+
+ const GlobalValue *&GVEntry = LinkedGlobalsMap[std::make_pair(
+ std::string(GV.getName()), GV.getType())];
+
+ // If this is the first time we've seen this global, it is the canonical
+ // version.
+ if (!GVEntry) {
+ GVEntry = &GV;
+ continue;
+ }
+
+ // If the existing global is strong, never replace it.
+ if (GVEntry->hasExternalLinkage())
+ continue;
+
+ // Otherwise, we know it's linkonce/weak, replace it if this is a strong
+ // symbol. FIXME is this right for common?
+ if (GV.hasExternalLinkage() || GVEntry->hasExternalWeakLinkage())
+ GVEntry = &GV;
+ }
+ }
+ }
+
+ std::vector<const GlobalValue*> NonCanonicalGlobals;
+ for (unsigned m = 0, e = Modules.size(); m != e; ++m) {
+ Module &M = *Modules[m];
+ for (const auto &GV : M.globals()) {
+ // In the multi-module case, see what this global maps to.
+ if (!LinkedGlobalsMap.empty()) {
+ if (const GlobalValue *GVEntry = LinkedGlobalsMap[std::make_pair(
+ std::string(GV.getName()), GV.getType())]) {
+ // If something else is the canonical global, ignore this one.
+ if (GVEntry != &GV) {
+ NonCanonicalGlobals.push_back(&GV);
+ continue;
+ }
+ }
+ }
+
+ if (!GV.isDeclaration()) {
+ addGlobalMapping(&GV, getMemoryForGV(&GV));
+ } else {
+ // External variable reference. Try to use the dynamic loader to
+ // get a pointer to it.
+ if (void *SymAddr = sys::DynamicLibrary::SearchForAddressOfSymbol(
+ std::string(GV.getName())))
+ addGlobalMapping(&GV, SymAddr);
+ else {
+ report_fatal_error("Could not resolve external global address: "
+ +GV.getName());
+ }
+ }
+ }
+
+ // If there are multiple modules, map the non-canonical globals to their
+ // canonical location.
+ if (!NonCanonicalGlobals.empty()) {
+ for (const GlobalValue *GV : NonCanonicalGlobals) {
+ const GlobalValue *CGV = LinkedGlobalsMap[std::make_pair(
+ std::string(GV->getName()), GV->getType())];
+ void *Ptr = getPointerToGlobalIfAvailable(CGV);
+ assert(Ptr && "Canonical global wasn't codegen'd!");
+ addGlobalMapping(GV, Ptr);
+ }
+ }
+
+ // Now that all of the globals are set up in memory, loop through them all
+ // and initialize their contents.
+ for (const auto &GV : M.globals()) {
+ if (!GV.isDeclaration()) {
+ if (!LinkedGlobalsMap.empty()) {
+ if (const GlobalValue *GVEntry = LinkedGlobalsMap[std::make_pair(
+ std::string(GV.getName()), GV.getType())])
+ if (GVEntry != &GV) // Not the canonical variable.
+ continue;
+ }
+ emitGlobalVariable(&GV);
+ }
+ }
+ }
+}
+
+// EmitGlobalVariable - This method emits the specified global variable to the
+// address specified in GlobalAddresses, or allocates new memory if it's not
+// already in the map.
+void ExecutionEngine::emitGlobalVariable(const GlobalVariable *GV) {
+ void *GA = getPointerToGlobalIfAvailable(GV);
+
+ if (!GA) {
+ // If it's not already specified, allocate memory for the global.
+ GA = getMemoryForGV(GV);
+
+ // If we failed to allocate memory for this global, return.
+ if (!GA) return;
+
+ addGlobalMapping(GV, GA);
+ }
+
+ // Don't initialize if it's thread local, let the client do it.
+ if (!GV->isThreadLocal())
+ InitializeMemory(GV->getInitializer(), GA);
+
+ Type *ElTy = GV->getValueType();
+ size_t GVSize = (size_t)getDataLayout().getTypeAllocSize(ElTy);
+ NumInitBytes += (unsigned)GVSize;
+ ++NumGlobals;
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/ExecutionEngineBindings.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/ExecutionEngineBindings.cpp
new file mode 100644
index 0000000000..672fd7b991
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/ExecutionEngineBindings.cpp
@@ -0,0 +1,447 @@
+//===-- ExecutionEngineBindings.cpp - C bindings for EEs ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the C bindings for the ExecutionEngine library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/CodeGenCWrappers.h"
+#include "llvm/Target/TargetOptions.h"
+#include <cstring>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "jit"
+
+// Wrapping the C bindings types.
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(GenericValue, LLVMGenericValueRef)
+
+
+static LLVMTargetMachineRef wrap(const TargetMachine *P) {
+ return
+ reinterpret_cast<LLVMTargetMachineRef>(const_cast<TargetMachine*>(P));
+}
+
+/*===-- Operations on generic values --------------------------------------===*/
+
+LLVMGenericValueRef LLVMCreateGenericValueOfInt(LLVMTypeRef Ty,
+ unsigned long long N,
+ LLVMBool IsSigned) {
+ GenericValue *GenVal = new GenericValue();
+ GenVal->IntVal = APInt(unwrap<IntegerType>(Ty)->getBitWidth(), N, IsSigned);
+ return wrap(GenVal);
+}
+
+LLVMGenericValueRef LLVMCreateGenericValueOfPointer(void *P) {
+ GenericValue *GenVal = new GenericValue();
+ GenVal->PointerVal = P;
+ return wrap(GenVal);
+}
+
+LLVMGenericValueRef LLVMCreateGenericValueOfFloat(LLVMTypeRef TyRef, double N) {
+ GenericValue *GenVal = new GenericValue();
+ switch (unwrap(TyRef)->getTypeID()) {
+ case Type::FloatTyID:
+ GenVal->FloatVal = N;
+ break;
+ case Type::DoubleTyID:
+ GenVal->DoubleVal = N;
+ break;
+ default:
+ llvm_unreachable("LLVMGenericValueToFloat supports only float and double.");
+ }
+ return wrap(GenVal);
+}
+
+unsigned LLVMGenericValueIntWidth(LLVMGenericValueRef GenValRef) {
+ return unwrap(GenValRef)->IntVal.getBitWidth();
+}
+
+unsigned long long LLVMGenericValueToInt(LLVMGenericValueRef GenValRef,
+ LLVMBool IsSigned) {
+ GenericValue *GenVal = unwrap(GenValRef);
+ if (IsSigned)
+ return GenVal->IntVal.getSExtValue();
+ else
+ return GenVal->IntVal.getZExtValue();
+}
+
+void *LLVMGenericValueToPointer(LLVMGenericValueRef GenVal) {
+ return unwrap(GenVal)->PointerVal;
+}
+
+double LLVMGenericValueToFloat(LLVMTypeRef TyRef, LLVMGenericValueRef GenVal) {
+ switch (unwrap(TyRef)->getTypeID()) {
+ case Type::FloatTyID:
+ return unwrap(GenVal)->FloatVal;
+ case Type::DoubleTyID:
+ return unwrap(GenVal)->DoubleVal;
+ default:
+ llvm_unreachable("LLVMGenericValueToFloat supports only float and double.");
+ }
+}
+
+void LLVMDisposeGenericValue(LLVMGenericValueRef GenVal) {
+ delete unwrap(GenVal);
+}
+
+/*===-- Operations on execution engines -----------------------------------===*/
+
+LLVMBool LLVMCreateExecutionEngineForModule(LLVMExecutionEngineRef *OutEE,
+ LLVMModuleRef M,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
+ builder.setEngineKind(EngineKind::Either)
+ .setErrorStr(&Error);
+ if (ExecutionEngine *EE = builder.create()){
+ *OutEE = wrap(EE);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+LLVMBool LLVMCreateInterpreterForModule(LLVMExecutionEngineRef *OutInterp,
+ LLVMModuleRef M,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
+ builder.setEngineKind(EngineKind::Interpreter)
+ .setErrorStr(&Error);
+ if (ExecutionEngine *Interp = builder.create()) {
+ *OutInterp = wrap(Interp);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+LLVMBool LLVMCreateJITCompilerForModule(LLVMExecutionEngineRef *OutJIT,
+ LLVMModuleRef M,
+ unsigned OptLevel,
+ char **OutError) {
+ std::string Error;
+ EngineBuilder builder(std::unique_ptr<Module>(unwrap(M)));
+ builder.setEngineKind(EngineKind::JIT)
+ .setErrorStr(&Error)
+ .setOptLevel((CodeGenOpt::Level)OptLevel);
+ if (ExecutionEngine *JIT = builder.create()) {
+ *OutJIT = wrap(JIT);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+void LLVMInitializeMCJITCompilerOptions(LLVMMCJITCompilerOptions *PassedOptions,
+ size_t SizeOfPassedOptions) {
+ LLVMMCJITCompilerOptions options;
+ memset(&options, 0, sizeof(options)); // Most fields are zero by default.
+ options.CodeModel = LLVMCodeModelJITDefault;
+
+ memcpy(PassedOptions, &options,
+ std::min(sizeof(options), SizeOfPassedOptions));
+}
+
+LLVMBool LLVMCreateMCJITCompilerForModule(
+ LLVMExecutionEngineRef *OutJIT, LLVMModuleRef M,
+ LLVMMCJITCompilerOptions *PassedOptions, size_t SizeOfPassedOptions,
+ char **OutError) {
+ LLVMMCJITCompilerOptions options;
+ // If the user passed a larger sized options struct, then they were compiled
+ // against a newer LLVM. Tell them that something is wrong.
+ if (SizeOfPassedOptions > sizeof(options)) {
+ *OutError = strdup(
+ "Refusing to use options struct that is larger than my own; assuming "
+ "LLVM library mismatch.");
+ return 1;
+ }
+
+ // Defend against the user having an old version of the API by ensuring that
+ // any fields they didn't see are cleared. We must defend against fields being
+ // set to the bitwise equivalent of zero, and assume that this means "do the
+ // default" as if that option hadn't been available.
+ LLVMInitializeMCJITCompilerOptions(&options, sizeof(options));
+ memcpy(&options, PassedOptions, SizeOfPassedOptions);
+
+ TargetOptions targetOptions;
+ targetOptions.EnableFastISel = options.EnableFastISel;
+ std::unique_ptr<Module> Mod(unwrap(M));
+
+ if (Mod)
+ // Set function attribute "frame-pointer" based on
+ // NoFramePointerElim.
+ for (auto &F : *Mod) {
+ auto Attrs = F.getAttributes();
+ StringRef Value = options.NoFramePointerElim ? "all" : "none";
+ Attrs = Attrs.addFnAttribute(F.getContext(), "frame-pointer", Value);
+ F.setAttributes(Attrs);
+ }
+
+ std::string Error;
+ EngineBuilder builder(std::move(Mod));
+ builder.setEngineKind(EngineKind::JIT)
+ .setErrorStr(&Error)
+ .setOptLevel((CodeGenOpt::Level)options.OptLevel)
+ .setTargetOptions(targetOptions);
+ bool JIT;
+ if (Optional<CodeModel::Model> CM = unwrap(options.CodeModel, JIT))
+ builder.setCodeModel(*CM);
+ if (options.MCJMM)
+ builder.setMCJITMemoryManager(
+ std::unique_ptr<RTDyldMemoryManager>(unwrap(options.MCJMM)));
+ if (ExecutionEngine *JIT = builder.create()) {
+ *OutJIT = wrap(JIT);
+ return 0;
+ }
+ *OutError = strdup(Error.c_str());
+ return 1;
+}
+
+void LLVMDisposeExecutionEngine(LLVMExecutionEngineRef EE) {
+ delete unwrap(EE);
+}
+
+void LLVMRunStaticConstructors(LLVMExecutionEngineRef EE) {
+ unwrap(EE)->finalizeObject();
+ unwrap(EE)->runStaticConstructorsDestructors(false);
+}
+
+void LLVMRunStaticDestructors(LLVMExecutionEngineRef EE) {
+ unwrap(EE)->finalizeObject();
+ unwrap(EE)->runStaticConstructorsDestructors(true);
+}
+
+int LLVMRunFunctionAsMain(LLVMExecutionEngineRef EE, LLVMValueRef F,
+ unsigned ArgC, const char * const *ArgV,
+ const char * const *EnvP) {
+ unwrap(EE)->finalizeObject();
+
+ std::vector<std::string> ArgVec(ArgV, ArgV + ArgC);
+ return unwrap(EE)->runFunctionAsMain(unwrap<Function>(F), ArgVec, EnvP);
+}
+
+LLVMGenericValueRef LLVMRunFunction(LLVMExecutionEngineRef EE, LLVMValueRef F,
+ unsigned NumArgs,
+ LLVMGenericValueRef *Args) {
+ unwrap(EE)->finalizeObject();
+
+ std::vector<GenericValue> ArgVec;
+ ArgVec.reserve(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ ArgVec.push_back(*unwrap(Args[I]));
+
+ GenericValue *Result = new GenericValue();
+ *Result = unwrap(EE)->runFunction(unwrap<Function>(F), ArgVec);
+ return wrap(Result);
+}
+
+void LLVMFreeMachineCodeForFunction(LLVMExecutionEngineRef EE, LLVMValueRef F) {
+}
+
+void LLVMAddModule(LLVMExecutionEngineRef EE, LLVMModuleRef M){
+ unwrap(EE)->addModule(std::unique_ptr<Module>(unwrap(M)));
+}
+
+LLVMBool LLVMRemoveModule(LLVMExecutionEngineRef EE, LLVMModuleRef M,
+ LLVMModuleRef *OutMod, char **OutError) {
+ Module *Mod = unwrap(M);
+ unwrap(EE)->removeModule(Mod);
+ *OutMod = wrap(Mod);
+ return 0;
+}
+
+LLVMBool LLVMFindFunction(LLVMExecutionEngineRef EE, const char *Name,
+ LLVMValueRef *OutFn) {
+ if (Function *F = unwrap(EE)->FindFunctionNamed(Name)) {
+ *OutFn = wrap(F);
+ return 0;
+ }
+ return 1;
+}
+
+void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE,
+ LLVMValueRef Fn) {
+ return nullptr;
+}
+
+LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
+ return wrap(&unwrap(EE)->getDataLayout());
+}
+
+LLVMTargetMachineRef
+LLVMGetExecutionEngineTargetMachine(LLVMExecutionEngineRef EE) {
+ return wrap(unwrap(EE)->getTargetMachine());
+}
+
+void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,
+ void* Addr) {
+ unwrap(EE)->addGlobalMapping(unwrap<GlobalValue>(Global), Addr);
+}
+
+void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global) {
+ unwrap(EE)->finalizeObject();
+
+ return unwrap(EE)->getPointerToGlobal(unwrap<GlobalValue>(Global));
+}
+
+uint64_t LLVMGetGlobalValueAddress(LLVMExecutionEngineRef EE, const char *Name) {
+ return unwrap(EE)->getGlobalValueAddress(Name);
+}
+
+uint64_t LLVMGetFunctionAddress(LLVMExecutionEngineRef EE, const char *Name) {
+ return unwrap(EE)->getFunctionAddress(Name);
+}
+
+LLVMBool LLVMExecutionEngineGetErrMsg(LLVMExecutionEngineRef EE,
+ char **OutError) {
+ assert(OutError && "OutError must be non-null");
+ auto *ExecEngine = unwrap(EE);
+ if (ExecEngine->hasError()) {
+ *OutError = strdup(ExecEngine->getErrorMessage().c_str());
+ ExecEngine->clearErrorMessage();
+ return true;
+ }
+ return false;
+}
+
+/*===-- Operations on memory managers -------------------------------------===*/
+
+namespace {
+
+struct SimpleBindingMMFunctions {
+ LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection;
+ LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection;
+ LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory;
+ LLVMMemoryManagerDestroyCallback Destroy;
+};
+
+class SimpleBindingMemoryManager : public RTDyldMemoryManager {
+public:
+ SimpleBindingMemoryManager(const SimpleBindingMMFunctions& Functions,
+ void *Opaque);
+ ~SimpleBindingMemoryManager() override;
+
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) override;
+
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID, StringRef SectionName,
+ bool isReadOnly) override;
+
+ bool finalizeMemory(std::string *ErrMsg) override;
+
+private:
+ SimpleBindingMMFunctions Functions;
+ void *Opaque;
+};
+
+SimpleBindingMemoryManager::SimpleBindingMemoryManager(
+ const SimpleBindingMMFunctions& Functions,
+ void *Opaque)
+ : Functions(Functions), Opaque(Opaque) {
+ assert(Functions.AllocateCodeSection &&
+ "No AllocateCodeSection function provided!");
+ assert(Functions.AllocateDataSection &&
+ "No AllocateDataSection function provided!");
+ assert(Functions.FinalizeMemory &&
+ "No FinalizeMemory function provided!");
+ assert(Functions.Destroy &&
+ "No Destroy function provided!");
+}
+
+SimpleBindingMemoryManager::~SimpleBindingMemoryManager() {
+ Functions.Destroy(Opaque);
+}
+
+uint8_t *SimpleBindingMemoryManager::allocateCodeSection(
+ uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ StringRef SectionName) {
+ return Functions.AllocateCodeSection(Opaque, Size, Alignment, SectionID,
+ SectionName.str().c_str());
+}
+
+uint8_t *SimpleBindingMemoryManager::allocateDataSection(
+ uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ StringRef SectionName, bool isReadOnly) {
+ return Functions.AllocateDataSection(Opaque, Size, Alignment, SectionID,
+ SectionName.str().c_str(),
+ isReadOnly);
+}
+
+bool SimpleBindingMemoryManager::finalizeMemory(std::string *ErrMsg) {
+ char *errMsgCString = nullptr;
+ bool result = Functions.FinalizeMemory(Opaque, &errMsgCString);
+ assert((result || !errMsgCString) &&
+ "Did not expect an error message if FinalizeMemory succeeded");
+ if (errMsgCString) {
+ if (ErrMsg)
+ *ErrMsg = errMsgCString;
+ free(errMsgCString);
+ }
+ return result;
+}
+
+} // anonymous namespace
+
+LLVMMCJITMemoryManagerRef LLVMCreateSimpleMCJITMemoryManager(
+ void *Opaque,
+ LLVMMemoryManagerAllocateCodeSectionCallback AllocateCodeSection,
+ LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection,
+ LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory,
+ LLVMMemoryManagerDestroyCallback Destroy) {
+
+ if (!AllocateCodeSection || !AllocateDataSection || !FinalizeMemory ||
+ !Destroy)
+ return nullptr;
+
+ SimpleBindingMMFunctions functions;
+ functions.AllocateCodeSection = AllocateCodeSection;
+ functions.AllocateDataSection = AllocateDataSection;
+ functions.FinalizeMemory = FinalizeMemory;
+ functions.Destroy = Destroy;
+ return wrap(new SimpleBindingMemoryManager(functions, Opaque));
+}
+
+void LLVMDisposeMCJITMemoryManager(LLVMMCJITMemoryManagerRef MM) {
+ delete unwrap(MM);
+}
+
+/*===-- JIT Event Listener functions -------------------------------------===*/
+
+
+#if !LLVM_USE_INTEL_JITEVENTS
+LLVMJITEventListenerRef LLVMCreateIntelJITEventListener(void)
+{
+ return nullptr;
+}
+#endif
+
+#if !LLVM_USE_OPROFILE
+LLVMJITEventListenerRef LLVMCreateOProfileJITEventListener(void)
+{
+ return nullptr;
+}
+#endif
+
+#if !LLVM_USE_PERF
+LLVMJITEventListenerRef LLVMCreatePerfJITEventListener(void)
+{
+ return nullptr;
+}
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/GDBRegistrationListener.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/GDBRegistrationListener.cpp
new file mode 100644
index 0000000000..1fb37ce7c5
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/GDBRegistrationListener.cpp
@@ -0,0 +1,245 @@
+//===----- GDBRegistrationListener.cpp - Registers objects with GDB -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/ExecutionEngine.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Mutex.h"
+#include <mutex>
+
+using namespace llvm;
+using namespace llvm::object;
+
+// This must be kept in sync with gdb/gdb/jit.h .
+extern "C" {
+
+ typedef enum {
+ JIT_NOACTION = 0,
+ JIT_REGISTER_FN,
+ JIT_UNREGISTER_FN
+ } jit_actions_t;
+
+ struct jit_code_entry {
+ struct jit_code_entry *next_entry;
+ struct jit_code_entry *prev_entry;
+ const char *symfile_addr;
+ uint64_t symfile_size;
+ };
+
+ struct jit_descriptor {
+ uint32_t version;
+ // This should be jit_actions_t, but we want to be specific about the
+ // bit-width.
+ uint32_t action_flag;
+ struct jit_code_entry *relevant_entry;
+ struct jit_code_entry *first_entry;
+ };
+
+ // We put information about the JITed function in this global, which the
+ // debugger reads. Make sure to specify the version statically, because the
+ // debugger checks the version before we can set it during runtime.
+ extern struct jit_descriptor __jit_debug_descriptor;
+
+ // Debuggers puts a breakpoint in this function.
+ extern "C" void __jit_debug_register_code();
+}
+
+namespace {
+
+// FIXME: lli aims to provide both, RuntimeDyld and JITLink, as the dynamic
+// loaders for it's JIT implementations. And they both offer debugging via the
+// GDB JIT interface, which builds on the two well-known symbol names below.
+// As these symbols must be unique accross the linked executable, we can only
+// define them in one of the libraries and make the other depend on it.
+// OrcTargetProcess is a minimal stub for embedding a JIT client in remote
+// executors. For the moment it seems reasonable to have the definition there
+// and let ExecutionEngine depend on it, until we find a better solution.
+//
+LLVM_ATTRIBUTE_USED void requiredSymbolDefinitionsFromOrcTargetProcess() {
+ errs() << (void *)&__jit_debug_register_code
+ << (void *)&__jit_debug_descriptor;
+}
+
+struct RegisteredObjectInfo {
+ RegisteredObjectInfo() {}
+
+ RegisteredObjectInfo(std::size_t Size, jit_code_entry *Entry,
+ OwningBinary<ObjectFile> Obj)
+ : Size(Size), Entry(Entry), Obj(std::move(Obj)) {}
+
+ std::size_t Size;
+ jit_code_entry *Entry;
+ OwningBinary<ObjectFile> Obj;
+};
+
+// Buffer for an in-memory object file in executable memory
+typedef llvm::DenseMap<JITEventListener::ObjectKey, RegisteredObjectInfo>
+ RegisteredObjectBufferMap;
+
+/// Global access point for the JIT debugging interface designed for use with a
+/// singleton toolbox. Handles thread-safe registration and deregistration of
+/// object files that are in executable memory managed by the client of this
+/// class.
+class GDBJITRegistrationListener : public JITEventListener {
+ /// A map of in-memory object files that have been registered with the
+ /// JIT interface.
+ RegisteredObjectBufferMap ObjectBufferMap;
+
+public:
+ /// Instantiates the JIT service.
+ GDBJITRegistrationListener() {}
+
+ /// Unregisters each object that was previously registered and releases all
+ /// internal resources.
+ ~GDBJITRegistrationListener() override;
+
+ /// Creates an entry in the JIT registry for the buffer @p Object,
+ /// which must contain an object file in executable memory with any
+ /// debug information for the debugger.
+ void notifyObjectLoaded(ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) override;
+
+ /// Removes the internal registration of @p Object, and
+ /// frees associated resources.
+ /// Returns true if @p Object was found in ObjectBufferMap.
+ void notifyFreeingObject(ObjectKey K) override;
+
+private:
+ /// Deregister the debug info for the given object file from the debugger
+ /// and delete any temporary copies. This private method does not remove
+ /// the function from Map so that it can be called while iterating over Map.
+ void deregisterObjectInternal(RegisteredObjectBufferMap::iterator I);
+};
+
+/// Lock used to serialize all jit registration events, since they
+/// modify global variables.
+ManagedStatic<sys::Mutex> JITDebugLock;
+
+/// Do the registration.
+void NotifyDebugger(jit_code_entry* JITCodeEntry) {
+ __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
+
+ // Insert this entry at the head of the list.
+ JITCodeEntry->prev_entry = nullptr;
+ jit_code_entry* NextEntry = __jit_debug_descriptor.first_entry;
+ JITCodeEntry->next_entry = NextEntry;
+ if (NextEntry) {
+ NextEntry->prev_entry = JITCodeEntry;
+ }
+ __jit_debug_descriptor.first_entry = JITCodeEntry;
+ __jit_debug_descriptor.relevant_entry = JITCodeEntry;
+ __jit_debug_register_code();
+}
+
+GDBJITRegistrationListener::~GDBJITRegistrationListener() {
+ // Free all registered object files.
+ std::lock_guard<llvm::sys::Mutex> locked(*JITDebugLock);
+ for (RegisteredObjectBufferMap::iterator I = ObjectBufferMap.begin(),
+ E = ObjectBufferMap.end();
+ I != E; ++I) {
+ // Call the private method that doesn't update the map so our iterator
+ // doesn't break.
+ deregisterObjectInternal(I);
+ }
+ ObjectBufferMap.clear();
+}
+
+void GDBJITRegistrationListener::notifyObjectLoaded(
+ ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+
+ OwningBinary<ObjectFile> DebugObj = L.getObjectForDebug(Obj);
+
+ // Bail out if debug objects aren't supported.
+ if (!DebugObj.getBinary())
+ return;
+
+ const char *Buffer = DebugObj.getBinary()->getMemoryBufferRef().getBufferStart();
+ size_t Size = DebugObj.getBinary()->getMemoryBufferRef().getBufferSize();
+
+ std::lock_guard<llvm::sys::Mutex> locked(*JITDebugLock);
+ assert(ObjectBufferMap.find(K) == ObjectBufferMap.end() &&
+ "Second attempt to perform debug registration.");
+ jit_code_entry* JITCodeEntry = new jit_code_entry();
+
+ if (!JITCodeEntry) {
+ llvm::report_fatal_error(
+ "Allocation failed when registering a JIT entry!\n");
+ } else {
+ JITCodeEntry->symfile_addr = Buffer;
+ JITCodeEntry->symfile_size = Size;
+
+ ObjectBufferMap[K] =
+ RegisteredObjectInfo(Size, JITCodeEntry, std::move(DebugObj));
+ NotifyDebugger(JITCodeEntry);
+ }
+}
+
+void GDBJITRegistrationListener::notifyFreeingObject(ObjectKey K) {
+ std::lock_guard<llvm::sys::Mutex> locked(*JITDebugLock);
+ RegisteredObjectBufferMap::iterator I = ObjectBufferMap.find(K);
+
+ if (I != ObjectBufferMap.end()) {
+ deregisterObjectInternal(I);
+ ObjectBufferMap.erase(I);
+ }
+}
+
+void GDBJITRegistrationListener::deregisterObjectInternal(
+ RegisteredObjectBufferMap::iterator I) {
+
+ jit_code_entry*& JITCodeEntry = I->second.Entry;
+
+ // Do the unregistration.
+ {
+ __jit_debug_descriptor.action_flag = JIT_UNREGISTER_FN;
+
+ // Remove the jit_code_entry from the linked list.
+ jit_code_entry* PrevEntry = JITCodeEntry->prev_entry;
+ jit_code_entry* NextEntry = JITCodeEntry->next_entry;
+
+ if (NextEntry) {
+ NextEntry->prev_entry = PrevEntry;
+ }
+ if (PrevEntry) {
+ PrevEntry->next_entry = NextEntry;
+ }
+ else {
+ assert(__jit_debug_descriptor.first_entry == JITCodeEntry);
+ __jit_debug_descriptor.first_entry = NextEntry;
+ }
+
+ // Tell the debugger which entry we removed, and unregister the code.
+ __jit_debug_descriptor.relevant_entry = JITCodeEntry;
+ __jit_debug_register_code();
+ }
+
+ delete JITCodeEntry;
+ JITCodeEntry = nullptr;
+}
+
+llvm::ManagedStatic<GDBJITRegistrationListener> GDBRegListener;
+
+} // end namespace
+
+namespace llvm {
+
+JITEventListener* JITEventListener::createGDBRegistrationListener() {
+ return &*GDBRegListener;
+}
+
+} // namespace llvm
+
+LLVMJITEventListenerRef LLVMCreateGDBRegistrationListener(void)
+{
+ return wrap(JITEventListener::createGDBRegistrationListener());
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Execution.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Execution.cpp
new file mode 100644
index 0000000000..770fc93490
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Execution.cpp
@@ -0,0 +1,2168 @@
+//===-- Execution.cpp - Implement code to simulate the program ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the actual instruction interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/GetElementPtrTypeIterator.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cmath>
+using namespace llvm;
+
+#define DEBUG_TYPE "interpreter"
+
+STATISTIC(NumDynamicInsts, "Number of dynamic instructions executed");
+
+static cl::opt<bool> PrintVolatile("interpreter-print-volatile", cl::Hidden,
+ cl::desc("make the interpreter print every volatile load and store"));
+
+//===----------------------------------------------------------------------===//
+// Various Helper Functions
+//===----------------------------------------------------------------------===//
+
+static void SetValue(Value *V, GenericValue Val, ExecutionContext &SF) {
+ SF.Values[V] = Val;
+}
+
+//===----------------------------------------------------------------------===//
+// Unary Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+static void executeFNegInst(GenericValue &Dest, GenericValue Src, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::FloatTyID:
+ Dest.FloatVal = -Src.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = -Src.DoubleVal;
+ break;
+ default:
+ llvm_unreachable("Unhandled type for FNeg instruction");
+ }
+}
+
+void Interpreter::visitUnaryOperator(UnaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src = getOperandValue(I.getOperand(0), SF);
+ GenericValue R; // Result
+
+ // First process vector operation
+ if (Ty->isVectorTy()) {
+ R.AggregateVal.resize(Src.AggregateVal.size());
+
+ switch(I.getOpcode()) {
+ default:
+ llvm_unreachable("Don't know how to handle this unary operator");
+ break;
+ case Instruction::FNeg:
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].FloatVal = -Src.AggregateVal[i].FloatVal;
+ } else if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) {
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].DoubleVal = -Src.AggregateVal[i].DoubleVal;
+ } else {
+ llvm_unreachable("Unhandled type for FNeg instruction");
+ }
+ break;
+ }
+ } else {
+ switch (I.getOpcode()) {
+ default:
+ llvm_unreachable("Don't know how to handle this unary operator");
+ break;
+ case Instruction::FNeg: executeFNegInst(R, Src, Ty); break;
+ }
+ }
+ SetValue(&I, R, SF);
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+#define IMPLEMENT_BINARY_OPERATOR(OP, TY) \
+ case Type::TY##TyID: \
+ Dest.TY##Val = Src1.TY##Val OP Src2.TY##Val; \
+ break
+
+static void executeFAddInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(+, Float);
+ IMPLEMENT_BINARY_OPERATOR(+, Double);
+ default:
+ dbgs() << "Unhandled type for FAdd instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFSubInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(-, Float);
+ IMPLEMENT_BINARY_OPERATOR(-, Double);
+ default:
+ dbgs() << "Unhandled type for FSub instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFMulInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(*, Float);
+ IMPLEMENT_BINARY_OPERATOR(*, Double);
+ default:
+ dbgs() << "Unhandled type for FMul instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_BINARY_OPERATOR(/, Float);
+ IMPLEMENT_BINARY_OPERATOR(/, Double);
+ default:
+ dbgs() << "Unhandled type for FDiv instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::FloatTyID:
+ Dest.FloatVal = fmod(Src1.FloatVal, Src2.FloatVal);
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = fmod(Src1.DoubleVal, Src2.DoubleVal);
+ break;
+ default:
+ dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+#define IMPLEMENT_INTEGER_ICMP(OP, TY) \
+ case Type::IntegerTyID: \
+ Dest.IntVal = APInt(1,Src1.IntVal.OP(Src2.IntVal)); \
+ break;
+
+#define IMPLEMENT_VECTOR_INTEGER_ICMP(OP, TY) \
+ case Type::FixedVectorTyID: \
+ case Type::ScalableVectorTyID: { \
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
+ Dest.AggregateVal.resize(Src1.AggregateVal.size()); \
+ for (uint32_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
+ Dest.AggregateVal[_i].IntVal = APInt( \
+ 1, Src1.AggregateVal[_i].IntVal.OP(Src2.AggregateVal[_i].IntVal)); \
+ } break;
+
+// Handle pointers specially because they must be compared with only as much
+// width as the host has. We _do not_ want to be comparing 64 bit values when
+// running on a 32-bit target, otherwise the upper 32 bits might mess up
+// comparisons if they contain garbage.
+#define IMPLEMENT_POINTER_ICMP(OP) \
+ case Type::PointerTyID: \
+ Dest.IntVal = APInt(1,(void*)(intptr_t)Src1.PointerVal OP \
+ (void*)(intptr_t)Src2.PointerVal); \
+ break;
+
+static GenericValue executeICMP_EQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(eq,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(eq,Ty);
+ IMPLEMENT_POINTER_ICMP(==);
+ default:
+ dbgs() << "Unhandled type for ICMP_EQ predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_NE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ne,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ne,Ty);
+ IMPLEMENT_POINTER_ICMP(!=);
+ default:
+ dbgs() << "Unhandled type for ICMP_NE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_ULT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ult,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ult,Ty);
+ IMPLEMENT_POINTER_ICMP(<);
+ default:
+ dbgs() << "Unhandled type for ICMP_ULT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SLT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(slt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(slt,Ty);
+ IMPLEMENT_POINTER_ICMP(<);
+ default:
+ dbgs() << "Unhandled type for ICMP_SLT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_UGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ugt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ugt,Ty);
+ IMPLEMENT_POINTER_ICMP(>);
+ default:
+ dbgs() << "Unhandled type for ICMP_UGT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sgt,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sgt,Ty);
+ IMPLEMENT_POINTER_ICMP(>);
+ default:
+ dbgs() << "Unhandled type for ICMP_SGT predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_ULE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(ule,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(ule,Ty);
+ IMPLEMENT_POINTER_ICMP(<=);
+ default:
+ dbgs() << "Unhandled type for ICMP_ULE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SLE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sle,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sle,Ty);
+ IMPLEMENT_POINTER_ICMP(<=);
+ default:
+ dbgs() << "Unhandled type for ICMP_SLE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_UGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(uge,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(uge,Ty);
+ IMPLEMENT_POINTER_ICMP(>=);
+ default:
+ dbgs() << "Unhandled type for ICMP_UGE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeICMP_SGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_INTEGER_ICMP(sge,Ty);
+ IMPLEMENT_VECTOR_INTEGER_ICMP(sge,Ty);
+ IMPLEMENT_POINTER_ICMP(>=);
+ default:
+ dbgs() << "Unhandled type for ICMP_SGE predicate: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+void Interpreter::visitICmpInst(ICmpInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ switch (I.getPredicate()) {
+ case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_ULT: R = executeICMP_ULT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SLT: R = executeICMP_SLT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_UGT: R = executeICMP_UGT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SGT: R = executeICMP_SGT(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_ULE: R = executeICMP_ULE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SLE: R = executeICMP_SLE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_UGE: R = executeICMP_UGE(Src1, Src2, Ty); break;
+ case ICmpInst::ICMP_SGE: R = executeICMP_SGE(Src1, Src2, Ty); break;
+ default:
+ dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
+ llvm_unreachable(nullptr);
+ }
+
+ SetValue(&I, R, SF);
+}
+
+#define IMPLEMENT_FCMP(OP, TY) \
+ case Type::TY##TyID: \
+ Dest.IntVal = APInt(1,Src1.TY##Val OP Src2.TY##Val); \
+ break
+
+#define IMPLEMENT_VECTOR_FCMP_T(OP, TY) \
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size()); \
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() ); \
+ for( uint32_t _i=0;_i<Src1.AggregateVal.size();_i++) \
+ Dest.AggregateVal[_i].IntVal = APInt(1, \
+ Src1.AggregateVal[_i].TY##Val OP Src2.AggregateVal[_i].TY##Val);\
+ break;
+
+#define IMPLEMENT_VECTOR_FCMP(OP) \
+ case Type::FixedVectorTyID: \
+ case Type::ScalableVectorTyID: \
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) { \
+ IMPLEMENT_VECTOR_FCMP_T(OP, Float); \
+ } else { \
+ IMPLEMENT_VECTOR_FCMP_T(OP, Double); \
+ }
+
+static GenericValue executeFCMP_OEQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(==, Float);
+ IMPLEMENT_FCMP(==, Double);
+ IMPLEMENT_VECTOR_FCMP(==);
+ default:
+ dbgs() << "Unhandled type for FCmp EQ instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+#define IMPLEMENT_SCALAR_NANS(TY, X,Y) \
+ if (TY->isFloatTy()) { \
+ if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
+ Dest.IntVal = APInt(1,false); \
+ return Dest; \
+ } \
+ } else { \
+ if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
+ Dest.IntVal = APInt(1,false); \
+ return Dest; \
+ } \
+ }
+
+#define MASK_VECTOR_NANS_T(X,Y, TZ, FLAG) \
+ assert(X.AggregateVal.size() == Y.AggregateVal.size()); \
+ Dest.AggregateVal.resize( X.AggregateVal.size() ); \
+ for( uint32_t _i=0;_i<X.AggregateVal.size();_i++) { \
+ if (X.AggregateVal[_i].TZ##Val != X.AggregateVal[_i].TZ##Val || \
+ Y.AggregateVal[_i].TZ##Val != Y.AggregateVal[_i].TZ##Val) \
+ Dest.AggregateVal[_i].IntVal = APInt(1,FLAG); \
+ else { \
+ Dest.AggregateVal[_i].IntVal = APInt(1,!FLAG); \
+ } \
+ }
+
+#define MASK_VECTOR_NANS(TY, X,Y, FLAG) \
+ if (TY->isVectorTy()) { \
+ if (cast<VectorType>(TY)->getElementType()->isFloatTy()) { \
+ MASK_VECTOR_NANS_T(X, Y, Float, FLAG) \
+ } else { \
+ MASK_VECTOR_NANS_T(X, Y, Double, FLAG) \
+ } \
+ } \
+
+
+
+static GenericValue executeFCMP_ONE(GenericValue Src1, GenericValue Src2,
+ Type *Ty)
+{
+ GenericValue Dest;
+ // if input is scalar value and Src1 or Src2 is NaN return false
+ IMPLEMENT_SCALAR_NANS(Ty, Src1, Src2)
+ // if vector input detect NaNs and fill mask
+ MASK_VECTOR_NANS(Ty, Src1, Src2, false)
+ GenericValue DestMask = Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(!=, Float);
+ IMPLEMENT_FCMP(!=, Double);
+ IMPLEMENT_VECTOR_FCMP(!=);
+ default:
+ dbgs() << "Unhandled type for FCmp NE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ // in vector case mask out NaN elements
+ if (Ty->isVectorTy())
+ for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
+ if (DestMask.AggregateVal[_i].IntVal == false)
+ Dest.AggregateVal[_i].IntVal = APInt(1,false);
+
+ return Dest;
+}
+
+static GenericValue executeFCMP_OLE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(<=, Float);
+ IMPLEMENT_FCMP(<=, Double);
+ IMPLEMENT_VECTOR_FCMP(<=);
+ default:
+ dbgs() << "Unhandled type for FCmp LE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(>=, Float);
+ IMPLEMENT_FCMP(>=, Double);
+ IMPLEMENT_VECTOR_FCMP(>=);
+ default:
+ dbgs() << "Unhandled type for FCmp GE instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OLT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(<, Float);
+ IMPLEMENT_FCMP(<, Double);
+ IMPLEMENT_VECTOR_FCMP(<);
+ default:
+ dbgs() << "Unhandled type for FCmp LT instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_OGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ switch (Ty->getTypeID()) {
+ IMPLEMENT_FCMP(>, Float);
+ IMPLEMENT_FCMP(>, Double);
+ IMPLEMENT_VECTOR_FCMP(>);
+ default:
+ dbgs() << "Unhandled type for FCmp GT instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ return Dest;
+}
+
+#define IMPLEMENT_UNORDERED(TY, X,Y) \
+ if (TY->isFloatTy()) { \
+ if (X.FloatVal != X.FloatVal || Y.FloatVal != Y.FloatVal) { \
+ Dest.IntVal = APInt(1,true); \
+ return Dest; \
+ } \
+ } else if (X.DoubleVal != X.DoubleVal || Y.DoubleVal != Y.DoubleVal) { \
+ Dest.IntVal = APInt(1,true); \
+ return Dest; \
+ }
+
+#define IMPLEMENT_VECTOR_UNORDERED(TY, X, Y, FUNC) \
+ if (TY->isVectorTy()) { \
+ GenericValue DestMask = Dest; \
+ Dest = FUNC(Src1, Src2, Ty); \
+ for (size_t _i = 0; _i < Src1.AggregateVal.size(); _i++) \
+ if (DestMask.AggregateVal[_i].IntVal == true) \
+ Dest.AggregateVal[_i].IntVal = APInt(1, true); \
+ return Dest; \
+ }
+
+static GenericValue executeFCMP_UEQ(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OEQ)
+ return executeFCMP_OEQ(Src1, Src2, Ty);
+
+}
+
+static GenericValue executeFCMP_UNE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_ONE)
+ return executeFCMP_ONE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ULE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLE)
+ return executeFCMP_OLE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_UGE(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGE)
+ return executeFCMP_OGE(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ULT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OLT)
+ return executeFCMP_OLT(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_UGT(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ IMPLEMENT_UNORDERED(Ty, Src1, Src2)
+ MASK_VECTOR_NANS(Ty, Src1, Src2, true)
+ IMPLEMENT_VECTOR_UNORDERED(Ty, Src1, Src2, executeFCMP_OGT)
+ return executeFCMP_OGT(Src1, Src2, Ty);
+}
+
+static GenericValue executeFCMP_ORD(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].FloatVal ==
+ Src1.AggregateVal[_i].FloatVal) &&
+ (Src2.AggregateVal[_i].FloatVal ==
+ Src2.AggregateVal[_i].FloatVal)));
+ } else {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].DoubleVal ==
+ Src1.AggregateVal[_i].DoubleVal) &&
+ (Src2.AggregateVal[_i].DoubleVal ==
+ Src2.AggregateVal[_i].DoubleVal)));
+ }
+ } else if (Ty->isFloatTy())
+ Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
+ Src2.FloatVal == Src2.FloatVal));
+ else {
+ Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
+ Src2.DoubleVal == Src2.DoubleVal));
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_UNO(GenericValue Src1, GenericValue Src2,
+ Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].FloatVal !=
+ Src1.AggregateVal[_i].FloatVal) ||
+ (Src2.AggregateVal[_i].FloatVal !=
+ Src2.AggregateVal[_i].FloatVal)));
+ } else {
+ for( size_t _i=0;_i<Src1.AggregateVal.size();_i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,
+ ( (Src1.AggregateVal[_i].DoubleVal !=
+ Src1.AggregateVal[_i].DoubleVal) ||
+ (Src2.AggregateVal[_i].DoubleVal !=
+ Src2.AggregateVal[_i].DoubleVal)));
+ }
+ } else if (Ty->isFloatTy())
+ Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
+ Src2.FloatVal != Src2.FloatVal));
+ else {
+ Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
+ Src2.DoubleVal != Src2.DoubleVal));
+ }
+ return Dest;
+}
+
+static GenericValue executeFCMP_BOOL(GenericValue Src1, GenericValue Src2,
+ Type *Ty, const bool val) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ for( size_t _i=0; _i<Src1.AggregateVal.size(); _i++)
+ Dest.AggregateVal[_i].IntVal = APInt(1,val);
+ } else {
+ Dest.IntVal = APInt(1, val);
+ }
+
+ return Dest;
+}
+
+void Interpreter::visitFCmpInst(FCmpInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ switch (I.getPredicate()) {
+ default:
+ dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
+ break;
+ case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
+ break;
+ case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UEQ: R = executeFCMP_UEQ(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OEQ: R = executeFCMP_OEQ(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UNE: R = executeFCMP_UNE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ONE: R = executeFCMP_ONE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ULT: R = executeFCMP_ULT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OLT: R = executeFCMP_OLT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UGT: R = executeFCMP_UGT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OGT: R = executeFCMP_OGT(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_ULE: R = executeFCMP_ULE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OLE: R = executeFCMP_OLE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
+ case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
+ }
+
+ SetValue(&I, R, SF);
+}
+
+static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
+ GenericValue Src2, Type *Ty) {
+ GenericValue Result;
+ switch (predicate) {
+ case ICmpInst::ICMP_EQ: return executeICMP_EQ(Src1, Src2, Ty);
+ case ICmpInst::ICMP_NE: return executeICMP_NE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_UGT: return executeICMP_UGT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SGT: return executeICMP_SGT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_ULT: return executeICMP_ULT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SLT: return executeICMP_SLT(Src1, Src2, Ty);
+ case ICmpInst::ICMP_UGE: return executeICMP_UGE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SGE: return executeICMP_SGE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_ULE: return executeICMP_ULE(Src1, Src2, Ty);
+ case ICmpInst::ICMP_SLE: return executeICMP_SLE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ORD: return executeFCMP_ORD(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UNO: return executeFCMP_UNO(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OEQ: return executeFCMP_OEQ(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UEQ: return executeFCMP_UEQ(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ONE: return executeFCMP_ONE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UNE: return executeFCMP_UNE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OLT: return executeFCMP_OLT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ULT: return executeFCMP_ULT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OGT: return executeFCMP_OGT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UGT: return executeFCMP_UGT(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OLE: return executeFCMP_OLE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_ULE: return executeFCMP_ULE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_OGE: return executeFCMP_OGE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_UGE: return executeFCMP_UGE(Src1, Src2, Ty);
+ case FCmpInst::FCMP_FALSE: return executeFCMP_BOOL(Src1, Src2, Ty, false);
+ case FCmpInst::FCMP_TRUE: return executeFCMP_BOOL(Src1, Src2, Ty, true);
+ default:
+ dbgs() << "Unhandled Cmp predicate\n";
+ llvm_unreachable(nullptr);
+ }
+}
+
+void Interpreter::visitBinaryOperator(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue R; // Result
+
+ // First process vector operation
+ if (Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ R.AggregateVal.resize(Src1.AggregateVal.size());
+
+ // Macros to execute binary operation 'OP' over integer vectors
+#define INTEGER_VECTOR_OPERATION(OP) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].IntVal = \
+ Src1.AggregateVal[i].IntVal OP Src2.AggregateVal[i].IntVal;
+
+ // Additional macros to execute binary operations udiv/sdiv/urem/srem since
+ // they have different notation.
+#define INTEGER_VECTOR_FUNCTION(OP) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].IntVal = \
+ Src1.AggregateVal[i].IntVal.OP(Src2.AggregateVal[i].IntVal);
+
+ // Macros to execute binary operation 'OP' over floating point type TY
+ // (float or double) vectors
+#define FLOAT_VECTOR_FUNCTION(OP, TY) \
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i) \
+ R.AggregateVal[i].TY = \
+ Src1.AggregateVal[i].TY OP Src2.AggregateVal[i].TY;
+
+ // Macros to choose appropriate TY: float or double and run operation
+ // execution
+#define FLOAT_VECTOR_OP(OP) { \
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy()) \
+ FLOAT_VECTOR_FUNCTION(OP, FloatVal) \
+ else { \
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy()) \
+ FLOAT_VECTOR_FUNCTION(OP, DoubleVal) \
+ else { \
+ dbgs() << "Unhandled type for OP instruction: " << *Ty << "\n"; \
+ llvm_unreachable(0); \
+ } \
+ } \
+}
+
+ switch(I.getOpcode()){
+ default:
+ dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case Instruction::Add: INTEGER_VECTOR_OPERATION(+) break;
+ case Instruction::Sub: INTEGER_VECTOR_OPERATION(-) break;
+ case Instruction::Mul: INTEGER_VECTOR_OPERATION(*) break;
+ case Instruction::UDiv: INTEGER_VECTOR_FUNCTION(udiv) break;
+ case Instruction::SDiv: INTEGER_VECTOR_FUNCTION(sdiv) break;
+ case Instruction::URem: INTEGER_VECTOR_FUNCTION(urem) break;
+ case Instruction::SRem: INTEGER_VECTOR_FUNCTION(srem) break;
+ case Instruction::And: INTEGER_VECTOR_OPERATION(&) break;
+ case Instruction::Or: INTEGER_VECTOR_OPERATION(|) break;
+ case Instruction::Xor: INTEGER_VECTOR_OPERATION(^) break;
+ case Instruction::FAdd: FLOAT_VECTOR_OP(+) break;
+ case Instruction::FSub: FLOAT_VECTOR_OP(-) break;
+ case Instruction::FMul: FLOAT_VECTOR_OP(*) break;
+ case Instruction::FDiv: FLOAT_VECTOR_OP(/) break;
+ case Instruction::FRem:
+ if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].FloatVal =
+ fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
+ else {
+ if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
+ for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
+ R.AggregateVal[i].DoubleVal =
+ fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
+ else {
+ dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+ }
+ break;
+ }
+ } else {
+ switch (I.getOpcode()) {
+ default:
+ dbgs() << "Don't know how to handle this binary operator!\n-->" << I;
+ llvm_unreachable(nullptr);
+ break;
+ case Instruction::Add: R.IntVal = Src1.IntVal + Src2.IntVal; break;
+ case Instruction::Sub: R.IntVal = Src1.IntVal - Src2.IntVal; break;
+ case Instruction::Mul: R.IntVal = Src1.IntVal * Src2.IntVal; break;
+ case Instruction::FAdd: executeFAddInst(R, Src1, Src2, Ty); break;
+ case Instruction::FSub: executeFSubInst(R, Src1, Src2, Ty); break;
+ case Instruction::FMul: executeFMulInst(R, Src1, Src2, Ty); break;
+ case Instruction::FDiv: executeFDivInst(R, Src1, Src2, Ty); break;
+ case Instruction::FRem: executeFRemInst(R, Src1, Src2, Ty); break;
+ case Instruction::UDiv: R.IntVal = Src1.IntVal.udiv(Src2.IntVal); break;
+ case Instruction::SDiv: R.IntVal = Src1.IntVal.sdiv(Src2.IntVal); break;
+ case Instruction::URem: R.IntVal = Src1.IntVal.urem(Src2.IntVal); break;
+ case Instruction::SRem: R.IntVal = Src1.IntVal.srem(Src2.IntVal); break;
+ case Instruction::And: R.IntVal = Src1.IntVal & Src2.IntVal; break;
+ case Instruction::Or: R.IntVal = Src1.IntVal | Src2.IntVal; break;
+ case Instruction::Xor: R.IntVal = Src1.IntVal ^ Src2.IntVal; break;
+ }
+ }
+ SetValue(&I, R, SF);
+}
+
+static GenericValue executeSelectInst(GenericValue Src1, GenericValue Src2,
+ GenericValue Src3, Type *Ty) {
+ GenericValue Dest;
+ if(Ty->isVectorTy()) {
+ assert(Src1.AggregateVal.size() == Src2.AggregateVal.size());
+ assert(Src2.AggregateVal.size() == Src3.AggregateVal.size());
+ Dest.AggregateVal.resize( Src1.AggregateVal.size() );
+ for (size_t i = 0; i < Src1.AggregateVal.size(); ++i)
+ Dest.AggregateVal[i] = (Src1.AggregateVal[i].IntVal == 0) ?
+ Src3.AggregateVal[i] : Src2.AggregateVal[i];
+ } else {
+ Dest = (Src1.IntVal == 0) ? Src3 : Src2;
+ }
+ return Dest;
+}
+
+void Interpreter::visitSelectInst(SelectInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type * Ty = I.getOperand(0)->getType();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
+ GenericValue R = executeSelectInst(Src1, Src2, Src3, Ty);
+ SetValue(&I, R, SF);
+}
+
+//===----------------------------------------------------------------------===//
+// Terminator Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::exitCalled(GenericValue GV) {
+ // runAtExitHandlers() assumes there are no stack frames, but
+ // if exit() was called, then it had a stack frame. Blow away
+ // the stack before interpreting atexit handlers.
+ ECStack.clear();
+ runAtExitHandlers();
+ exit(GV.IntVal.zextOrTrunc(32).getZExtValue());
+}
+
+/// Pop the last stack frame off of ECStack and then copy the result
+/// back into the result variable if we are not returning void. The
+/// result variable may be the ExitValue, or the Value of the calling
+/// CallInst if there was a previous stack frame. This method may
+/// invalidate any ECStack iterators you have. This method also takes
+/// care of switching to the normal destination BB, if we are returning
+/// from an invoke.
+///
+void Interpreter::popStackAndReturnValueToCaller(Type *RetTy,
+ GenericValue Result) {
+ // Pop the current stack frame.
+ ECStack.pop_back();
+
+ if (ECStack.empty()) { // Finished main. Put result into exit code...
+ if (RetTy && !RetTy->isVoidTy()) { // Nonvoid return type?
+ ExitValue = Result; // Capture the exit value of the program
+ } else {
+ memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
+ }
+ } else {
+ // If we have a previous stack frame, and we have a previous call,
+ // fill in the return value...
+ ExecutionContext &CallingSF = ECStack.back();
+ if (CallingSF.Caller) {
+ // Save result...
+ if (!CallingSF.Caller->getType()->isVoidTy())
+ SetValue(CallingSF.Caller, Result, CallingSF);
+ if (InvokeInst *II = dyn_cast<InvokeInst>(CallingSF.Caller))
+ SwitchToNewBasicBlock (II->getNormalDest (), CallingSF);
+ CallingSF.Caller = nullptr; // We returned from the call...
+ }
+ }
+}
+
+void Interpreter::visitReturnInst(ReturnInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Type *RetTy = Type::getVoidTy(I.getContext());
+ GenericValue Result;
+
+ // Save away the return value... (if we are not 'ret void')
+ if (I.getNumOperands()) {
+ RetTy = I.getReturnValue()->getType();
+ Result = getOperandValue(I.getReturnValue(), SF);
+ }
+
+ popStackAndReturnValueToCaller(RetTy, Result);
+}
+
+void Interpreter::visitUnreachableInst(UnreachableInst &I) {
+ report_fatal_error("Program executed an 'unreachable' instruction!");
+}
+
+void Interpreter::visitBranchInst(BranchInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ BasicBlock *Dest;
+
+ Dest = I.getSuccessor(0); // Uncond branches have a fixed dest...
+ if (!I.isUnconditional()) {
+ Value *Cond = I.getCondition();
+ if (getOperandValue(Cond, SF).IntVal == 0) // If false cond...
+ Dest = I.getSuccessor(1);
+ }
+ SwitchToNewBasicBlock(Dest, SF);
+}
+
+void Interpreter::visitSwitchInst(SwitchInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Value* Cond = I.getCondition();
+ Type *ElTy = Cond->getType();
+ GenericValue CondVal = getOperandValue(Cond, SF);
+
+ // Check to see if any of the cases match...
+ BasicBlock *Dest = nullptr;
+ for (auto Case : I.cases()) {
+ GenericValue CaseVal = getOperandValue(Case.getCaseValue(), SF);
+ if (executeICMP_EQ(CondVal, CaseVal, ElTy).IntVal != 0) {
+ Dest = cast<BasicBlock>(Case.getCaseSuccessor());
+ break;
+ }
+ }
+ if (!Dest) Dest = I.getDefaultDest(); // No cases matched: use default
+ SwitchToNewBasicBlock(Dest, SF);
+}
+
+void Interpreter::visitIndirectBrInst(IndirectBrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ void *Dest = GVTOP(getOperandValue(I.getAddress(), SF));
+ SwitchToNewBasicBlock((BasicBlock*)Dest, SF);
+}
+
+
+// SwitchToNewBasicBlock - This method is used to jump to a new basic block.
+// This function handles the actual updating of block and instruction iterators
+// as well as execution of all of the PHI nodes in the destination block.
+//
+// This method does this because all of the PHI nodes must be executed
+// atomically, reading their inputs before any of the results are updated. Not
+// doing this can cause problems if the PHI nodes depend on other PHI nodes for
+// their inputs. If the input PHI node is updated before it is read, incorrect
+// results can happen. Thus we use a two phase approach.
+//
+void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
+ BasicBlock *PrevBB = SF.CurBB; // Remember where we came from...
+ SF.CurBB = Dest; // Update CurBB to branch destination
+ SF.CurInst = SF.CurBB->begin(); // Update new instruction ptr...
+
+ if (!isa<PHINode>(SF.CurInst)) return; // Nothing fancy to do
+
+ // Loop over all of the PHI nodes in the current block, reading their inputs.
+ std::vector<GenericValue> ResultValues;
+
+ for (; PHINode *PN = dyn_cast<PHINode>(SF.CurInst); ++SF.CurInst) {
+ // Search for the value corresponding to this previous bb...
+ int i = PN->getBasicBlockIndex(PrevBB);
+ assert(i != -1 && "PHINode doesn't contain entry for predecessor??");
+ Value *IncomingValue = PN->getIncomingValue(i);
+
+ // Save the incoming value for this PHI node...
+ ResultValues.push_back(getOperandValue(IncomingValue, SF));
+ }
+
+ // Now loop over all of the PHI nodes setting their values...
+ SF.CurInst = SF.CurBB->begin();
+ for (unsigned i = 0; isa<PHINode>(SF.CurInst); ++SF.CurInst, ++i) {
+ PHINode *PN = cast<PHINode>(SF.CurInst);
+ SetValue(PN, ResultValues[i], SF);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Memory Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::visitAllocaInst(AllocaInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ Type *Ty = I.getAllocatedType(); // Type to be allocated
+
+ // Get the number of elements being allocated by the array...
+ unsigned NumElements =
+ getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
+
+ unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty);
+
+ // Avoid malloc-ing zero bytes, use max()...
+ unsigned MemToAlloc = std::max(1U, NumElements * TypeSize);
+
+ // Allocate enough memory to hold the type...
+ void *Memory = safe_malloc(MemToAlloc);
+
+ LLVM_DEBUG(dbgs() << "Allocated Type: " << *Ty << " (" << TypeSize
+ << " bytes) x " << NumElements << " (Total: " << MemToAlloc
+ << ") at " << uintptr_t(Memory) << '\n');
+
+ GenericValue Result = PTOGV(Memory);
+ assert(Result.PointerVal && "Null pointer returned by malloc!");
+ SetValue(&I, Result, SF);
+
+ if (I.getOpcode() == Instruction::Alloca)
+ ECStack.back().Allocas.add(Memory);
+}
+
+// getElementOffset - The workhorse for getelementptr.
+//
+GenericValue Interpreter::executeGEPOperation(Value *Ptr, gep_type_iterator I,
+ gep_type_iterator E,
+ ExecutionContext &SF) {
+ assert(Ptr->getType()->isPointerTy() &&
+ "Cannot getElementOffset of a nonpointer type!");
+
+ uint64_t Total = 0;
+
+ for (; I != E; ++I) {
+ if (StructType *STy = I.getStructTypeOrNull()) {
+ const StructLayout *SLO = getDataLayout().getStructLayout(STy);
+
+ const ConstantInt *CPU = cast<ConstantInt>(I.getOperand());
+ unsigned Index = unsigned(CPU->getZExtValue());
+
+ Total += SLO->getElementOffset(Index);
+ } else {
+ // Get the index number for the array... which must be long type...
+ GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
+
+ int64_t Idx;
+ unsigned BitWidth =
+ cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
+ if (BitWidth == 32)
+ Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
+ else {
+ assert(BitWidth == 64 && "Invalid index type for getelementptr");
+ Idx = (int64_t)IdxGV.IntVal.getZExtValue();
+ }
+ Total += getDataLayout().getTypeAllocSize(I.getIndexedType()) * Idx;
+ }
+ }
+
+ GenericValue Result;
+ Result.PointerVal = ((char*)getOperandValue(Ptr, SF).PointerVal) + Total;
+ LLVM_DEBUG(dbgs() << "GEP Index " << Total << " bytes.\n");
+ return Result;
+}
+
+void Interpreter::visitGetElementPtrInst(GetElementPtrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeGEPOperation(I.getPointerOperand(),
+ gep_type_begin(I), gep_type_end(I), SF), SF);
+}
+
+void Interpreter::visitLoadInst(LoadInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
+ GenericValue *Ptr = (GenericValue*)GVTOP(SRC);
+ GenericValue Result;
+ LoadValueFromMemory(Result, Ptr, I.getType());
+ SetValue(&I, Result, SF);
+ if (I.isVolatile() && PrintVolatile)
+ dbgs() << "Volatile load " << I;
+}
+
+void Interpreter::visitStoreInst(StoreInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Val = getOperandValue(I.getOperand(0), SF);
+ GenericValue SRC = getOperandValue(I.getPointerOperand(), SF);
+ StoreValueToMemory(Val, (GenericValue *)GVTOP(SRC),
+ I.getOperand(0)->getType());
+ if (I.isVolatile() && PrintVolatile)
+ dbgs() << "Volatile store: " << I;
+}
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous Instruction Implementations
+//===----------------------------------------------------------------------===//
+
+void Interpreter::visitVAStartInst(VAStartInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue ArgIndex;
+ ArgIndex.UIntPairVal.first = ECStack.size() - 1;
+ ArgIndex.UIntPairVal.second = 0;
+ SetValue(&I, ArgIndex, SF);
+}
+
+void Interpreter::visitVAEndInst(VAEndInst &I) {
+ // va_end is a noop for the interpreter
+}
+
+void Interpreter::visitVACopyInst(VACopyInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, getOperandValue(*I.arg_begin(), SF), SF);
+}
+
+void Interpreter::visitIntrinsicInst(IntrinsicInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ // If it is an unknown intrinsic function, use the intrinsic lowering
+ // class to transform it into hopefully tasty LLVM code.
+ //
+ BasicBlock::iterator Me(&I);
+ BasicBlock *Parent = I.getParent();
+ bool atBegin(Parent->begin() == Me);
+ if (!atBegin)
+ --Me;
+ IL->LowerIntrinsicCall(&I);
+
+ // Restore the CurInst pointer to the first instruction newly inserted, if
+ // any.
+ if (atBegin) {
+ SF.CurInst = Parent->begin();
+ } else {
+ SF.CurInst = Me;
+ ++SF.CurInst;
+ }
+}
+
+void Interpreter::visitCallBase(CallBase &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ SF.Caller = &I;
+ std::vector<GenericValue> ArgVals;
+ const unsigned NumArgs = SF.Caller->arg_size();
+ ArgVals.reserve(NumArgs);
+ for (Value *V : SF.Caller->args())
+ ArgVals.push_back(getOperandValue(V, SF));
+
+ // To handle indirect calls, we must get the pointer value from the argument
+ // and treat it as a function pointer.
+ GenericValue SRC = getOperandValue(SF.Caller->getCalledOperand(), SF);
+ callFunction((Function*)GVTOP(SRC), ArgVals);
+}
+
+// auxiliary function for shift operations
+static unsigned getShiftAmount(uint64_t orgShiftAmount,
+ llvm::APInt valueToShift) {
+ unsigned valueWidth = valueToShift.getBitWidth();
+ if (orgShiftAmount < (uint64_t)valueWidth)
+ return orgShiftAmount;
+ // according to the llvm documentation, if orgShiftAmount > valueWidth,
+ // the result is undfeined. but we do shift by this rule:
+ return (NextPowerOf2(valueWidth-1) - 1) & orgShiftAmount;
+}
+
+
+void Interpreter::visitShl(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.shl(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitLShr(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ uint32_t src1Size = uint32_t(Src1.AggregateVal.size());
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.lshr(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitAShr(BinaryOperator &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+ Type *Ty = I.getType();
+
+ if (Ty->isVectorTy()) {
+ size_t src1Size = Src1.AggregateVal.size();
+ assert(src1Size == Src2.AggregateVal.size());
+ for (unsigned i = 0; i < src1Size; i++) {
+ GenericValue Result;
+ uint64_t shiftAmount = Src2.AggregateVal[i].IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.AggregateVal[i].IntVal;
+ Result.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
+ Dest.AggregateVal.push_back(Result);
+ }
+ } else {
+ // scalar
+ uint64_t shiftAmount = Src2.IntVal.getZExtValue();
+ llvm::APInt valueToShift = Src1.IntVal;
+ Dest.IntVal = valueToShift.ashr(getShiftAmount(shiftAmount, valueToShift));
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+GenericValue Interpreter::executeTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ Type *SrcTy = SrcVal->getType();
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned NumElts = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(NumElts);
+ for (unsigned i = 0; i < NumElts; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.trunc(DBitWidth);
+ } else {
+ IntegerType *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.trunc(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeSExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.sext(DBitWidth);
+ } else {
+ auto *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.sext(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeZExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ if (SrcTy->isVectorTy()) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = Src.AggregateVal[i].IntVal.zext(DBitWidth);
+ } else {
+ auto *DITy = cast<IntegerType>(DstTy);
+ unsigned DBitWidth = DITy->getBitWidth();
+ Dest.IntVal = Src.IntVal.zext(DBitWidth);
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ assert(SrcVal->getType()->getScalarType()->isDoubleTy() &&
+ DstTy->getScalarType()->isFloatTy() &&
+ "Invalid FPTrunc instruction");
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal = (float)Src.AggregateVal[i].DoubleVal;
+ } else {
+ assert(SrcVal->getType()->isDoubleTy() && DstTy->isFloatTy() &&
+ "Invalid FPTrunc instruction");
+ Dest.FloatVal = (float)Src.DoubleVal;
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ assert(SrcVal->getType()->getScalarType()->isFloatTy() &&
+ DstTy->getScalarType()->isDoubleTy() && "Invalid FPExt instruction");
+
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal = (double)Src.AggregateVal[i].FloatVal;
+ } else {
+ assert(SrcVal->getType()->isFloatTy() && DstTy->isDoubleTy() &&
+ "Invalid FPExt instruction");
+ Dest.DoubleVal = (double)Src.FloatVal;
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPToUIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcTy)) {
+ Type *DstVecTy = DstTy->getScalarType();
+ Type *SrcVecTy = SrcTy->getScalarType();
+ uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal.
+ Dest.AggregateVal.resize(size);
+
+ if (SrcVecTy->getTypeID() == Type::FloatTyID) {
+ assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToUI instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
+ Src.AggregateVal[i].FloatVal, DBitWidth);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
+ Src.AggregateVal[i].DoubleVal, DBitWidth);
+ }
+ } else {
+ // scalar
+ uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ assert(SrcTy->isFloatingPointTy() && "Invalid FPToUI instruction");
+
+ if (SrcTy->getTypeID() == Type::FloatTyID)
+ Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
+ else {
+ Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
+ }
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executeFPToSIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcTy)) {
+ Type *DstVecTy = DstTy->getScalarType();
+ Type *SrcVecTy = SrcTy->getScalarType();
+ uint32_t DBitWidth = cast<IntegerType>(DstVecTy)->getBitWidth();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (SrcVecTy->getTypeID() == Type::FloatTyID) {
+ assert(SrcVecTy->isFloatingPointTy() && "Invalid FPToSI instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundFloatToAPInt(
+ Src.AggregateVal[i].FloatVal, DBitWidth);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].IntVal = APIntOps::RoundDoubleToAPInt(
+ Src.AggregateVal[i].DoubleVal, DBitWidth);
+ }
+ } else {
+ // scalar
+ unsigned DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ assert(SrcTy->isFloatingPointTy() && "Invalid FPToSI instruction");
+
+ if (SrcTy->getTypeID() == Type::FloatTyID)
+ Dest.IntVal = APIntOps::RoundFloatToAPInt(Src.FloatVal, DBitWidth);
+ else {
+ Dest.IntVal = APIntOps::RoundDoubleToAPInt(Src.DoubleVal, DBitWidth);
+ }
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeUIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (DstVecTy->getTypeID() == Type::FloatTyID) {
+ assert(DstVecTy->isFloatingPointTy() && "Invalid UIToFP instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal =
+ APIntOps::RoundAPIntToFloat(Src.AggregateVal[i].IntVal);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ APIntOps::RoundAPIntToDouble(Src.AggregateVal[i].IntVal);
+ }
+ } else {
+ // scalar
+ assert(DstTy->isFloatingPointTy() && "Invalid UIToFP instruction");
+ if (DstTy->getTypeID() == Type::FloatTyID)
+ Dest.FloatVal = APIntOps::RoundAPIntToFloat(Src.IntVal);
+ else {
+ Dest.DoubleVal = APIntOps::RoundAPIntToDouble(Src.IntVal);
+ }
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::executeSIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcVal->getType())) {
+ Type *DstVecTy = DstTy->getScalarType();
+ unsigned size = Src.AggregateVal.size();
+ // the sizes of src and dst vectors must be equal
+ Dest.AggregateVal.resize(size);
+
+ if (DstVecTy->getTypeID() == Type::FloatTyID) {
+ assert(DstVecTy->isFloatingPointTy() && "Invalid SIToFP instruction");
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].FloatVal =
+ APIntOps::RoundSignedAPIntToFloat(Src.AggregateVal[i].IntVal);
+ } else {
+ for (unsigned i = 0; i < size; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ APIntOps::RoundSignedAPIntToDouble(Src.AggregateVal[i].IntVal);
+ }
+ } else {
+ // scalar
+ assert(DstTy->isFloatingPointTy() && "Invalid SIToFP instruction");
+
+ if (DstTy->getTypeID() == Type::FloatTyID)
+ Dest.FloatVal = APIntOps::RoundSignedAPIntToFloat(Src.IntVal);
+ else {
+ Dest.DoubleVal = APIntOps::RoundSignedAPIntToDouble(Src.IntVal);
+ }
+ }
+
+ return Dest;
+}
+
+GenericValue Interpreter::executePtrToIntInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ uint32_t DBitWidth = cast<IntegerType>(DstTy)->getBitWidth();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(SrcVal->getType()->isPointerTy() && "Invalid PtrToInt instruction");
+
+ Dest.IntVal = APInt(DBitWidth, (intptr_t) Src.PointerVal);
+ return Dest;
+}
+
+GenericValue Interpreter::executeIntToPtrInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+ assert(DstTy->isPointerTy() && "Invalid PtrToInt instruction");
+
+ uint32_t PtrSize = getDataLayout().getPointerSizeInBits();
+ if (PtrSize != Src.IntVal.getBitWidth())
+ Src.IntVal = Src.IntVal.zextOrTrunc(PtrSize);
+
+ Dest.PointerVal = PointerTy(intptr_t(Src.IntVal.getZExtValue()));
+ return Dest;
+}
+
+GenericValue Interpreter::executeBitCastInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF) {
+
+ // This instruction supports bitwise conversion of vectors to integers and
+ // to vectors of other types (as long as they have the same size)
+ Type *SrcTy = SrcVal->getType();
+ GenericValue Dest, Src = getOperandValue(SrcVal, SF);
+
+ if (isa<VectorType>(SrcTy) || isa<VectorType>(DstTy)) {
+ // vector src bitcast to vector dst or vector src bitcast to scalar dst or
+ // scalar src bitcast to vector dst
+ bool isLittleEndian = getDataLayout().isLittleEndian();
+ GenericValue TempDst, TempSrc, SrcVec;
+ Type *SrcElemTy;
+ Type *DstElemTy;
+ unsigned SrcBitSize;
+ unsigned DstBitSize;
+ unsigned SrcNum;
+ unsigned DstNum;
+
+ if (isa<VectorType>(SrcTy)) {
+ SrcElemTy = SrcTy->getScalarType();
+ SrcBitSize = SrcTy->getScalarSizeInBits();
+ SrcNum = Src.AggregateVal.size();
+ SrcVec = Src;
+ } else {
+ // if src is scalar value, make it vector <1 x type>
+ SrcElemTy = SrcTy;
+ SrcBitSize = SrcTy->getPrimitiveSizeInBits();
+ SrcNum = 1;
+ SrcVec.AggregateVal.push_back(Src);
+ }
+
+ if (isa<VectorType>(DstTy)) {
+ DstElemTy = DstTy->getScalarType();
+ DstBitSize = DstTy->getScalarSizeInBits();
+ DstNum = (SrcNum * SrcBitSize) / DstBitSize;
+ } else {
+ DstElemTy = DstTy;
+ DstBitSize = DstTy->getPrimitiveSizeInBits();
+ DstNum = 1;
+ }
+
+ if (SrcNum * SrcBitSize != DstNum * DstBitSize)
+ llvm_unreachable("Invalid BitCast");
+
+ // If src is floating point, cast to integer first.
+ TempSrc.AggregateVal.resize(SrcNum);
+ if (SrcElemTy->isFloatTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal =
+ APInt::floatToBits(SrcVec.AggregateVal[i].FloatVal);
+
+ } else if (SrcElemTy->isDoubleTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal =
+ APInt::doubleToBits(SrcVec.AggregateVal[i].DoubleVal);
+ } else if (SrcElemTy->isIntegerTy()) {
+ for (unsigned i = 0; i < SrcNum; i++)
+ TempSrc.AggregateVal[i].IntVal = SrcVec.AggregateVal[i].IntVal;
+ } else {
+ // Pointers are not allowed as the element type of vector.
+ llvm_unreachable("Invalid Bitcast");
+ }
+
+ // now TempSrc is integer type vector
+ if (DstNum < SrcNum) {
+ // Example: bitcast <4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>
+ unsigned Ratio = SrcNum / DstNum;
+ unsigned SrcElt = 0;
+ for (unsigned i = 0; i < DstNum; i++) {
+ GenericValue Elt;
+ Elt.IntVal = 0;
+ Elt.IntVal = Elt.IntVal.zext(DstBitSize);
+ unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize * (Ratio - 1);
+ for (unsigned j = 0; j < Ratio; j++) {
+ APInt Tmp;
+ Tmp = Tmp.zext(SrcBitSize);
+ Tmp = TempSrc.AggregateVal[SrcElt++].IntVal;
+ Tmp = Tmp.zext(DstBitSize);
+ Tmp <<= ShiftAmt;
+ ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
+ Elt.IntVal |= Tmp;
+ }
+ TempDst.AggregateVal.push_back(Elt);
+ }
+ } else {
+ // Example: bitcast <2 x i64> <i64 0, i64 1> to <4 x i32>
+ unsigned Ratio = DstNum / SrcNum;
+ for (unsigned i = 0; i < SrcNum; i++) {
+ unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize * (Ratio - 1);
+ for (unsigned j = 0; j < Ratio; j++) {
+ GenericValue Elt;
+ Elt.IntVal = Elt.IntVal.zext(SrcBitSize);
+ Elt.IntVal = TempSrc.AggregateVal[i].IntVal;
+ Elt.IntVal.lshrInPlace(ShiftAmt);
+ // it could be DstBitSize == SrcBitSize, so check it
+ if (DstBitSize < SrcBitSize)
+ Elt.IntVal = Elt.IntVal.trunc(DstBitSize);
+ ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
+ TempDst.AggregateVal.push_back(Elt);
+ }
+ }
+ }
+
+ // convert result from integer to specified type
+ if (isa<VectorType>(DstTy)) {
+ if (DstElemTy->isDoubleTy()) {
+ Dest.AggregateVal.resize(DstNum);
+ for (unsigned i = 0; i < DstNum; i++)
+ Dest.AggregateVal[i].DoubleVal =
+ TempDst.AggregateVal[i].IntVal.bitsToDouble();
+ } else if (DstElemTy->isFloatTy()) {
+ Dest.AggregateVal.resize(DstNum);
+ for (unsigned i = 0; i < DstNum; i++)
+ Dest.AggregateVal[i].FloatVal =
+ TempDst.AggregateVal[i].IntVal.bitsToFloat();
+ } else {
+ Dest = TempDst;
+ }
+ } else {
+ if (DstElemTy->isDoubleTy())
+ Dest.DoubleVal = TempDst.AggregateVal[0].IntVal.bitsToDouble();
+ else if (DstElemTy->isFloatTy()) {
+ Dest.FloatVal = TempDst.AggregateVal[0].IntVal.bitsToFloat();
+ } else {
+ Dest.IntVal = TempDst.AggregateVal[0].IntVal;
+ }
+ }
+ } else { // if (isa<VectorType>(SrcTy)) || isa<VectorType>(DstTy))
+
+ // scalar src bitcast to scalar dst
+ if (DstTy->isPointerTy()) {
+ assert(SrcTy->isPointerTy() && "Invalid BitCast");
+ Dest.PointerVal = Src.PointerVal;
+ } else if (DstTy->isIntegerTy()) {
+ if (SrcTy->isFloatTy())
+ Dest.IntVal = APInt::floatToBits(Src.FloatVal);
+ else if (SrcTy->isDoubleTy()) {
+ Dest.IntVal = APInt::doubleToBits(Src.DoubleVal);
+ } else if (SrcTy->isIntegerTy()) {
+ Dest.IntVal = Src.IntVal;
+ } else {
+ llvm_unreachable("Invalid BitCast");
+ }
+ } else if (DstTy->isFloatTy()) {
+ if (SrcTy->isIntegerTy())
+ Dest.FloatVal = Src.IntVal.bitsToFloat();
+ else {
+ Dest.FloatVal = Src.FloatVal;
+ }
+ } else if (DstTy->isDoubleTy()) {
+ if (SrcTy->isIntegerTy())
+ Dest.DoubleVal = Src.IntVal.bitsToDouble();
+ else {
+ Dest.DoubleVal = Src.DoubleVal;
+ }
+ } else {
+ llvm_unreachable("Invalid Bitcast");
+ }
+ }
+
+ return Dest;
+}
+
+void Interpreter::visitTruncInst(TruncInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeTruncInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitSExtInst(SExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeSExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitZExtInst(ZExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeZExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPTruncInst(FPTruncInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPTruncInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPExtInst(FPExtInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPExtInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitUIToFPInst(UIToFPInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeUIToFPInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitSIToFPInst(SIToFPInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeSIToFPInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPToUIInst(FPToUIInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPToUIInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitFPToSIInst(FPToSIInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeFPToSIInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitPtrToIntInst(PtrToIntInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executePtrToIntInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitIntToPtrInst(IntToPtrInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeIntToPtrInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+void Interpreter::visitBitCastInst(BitCastInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ SetValue(&I, executeBitCastInst(I.getOperand(0), I.getType(), SF), SF);
+}
+
+#define IMPLEMENT_VAARG(TY) \
+ case Type::TY##TyID: Dest.TY##Val = Src.TY##Val; break
+
+void Interpreter::visitVAArgInst(VAArgInst &I) {
+ ExecutionContext &SF = ECStack.back();
+
+ // Get the incoming valist parameter. LLI treats the valist as a
+ // (ec-stack-depth var-arg-index) pair.
+ GenericValue VAList = getOperandValue(I.getOperand(0), SF);
+ GenericValue Dest;
+ GenericValue Src = ECStack[VAList.UIntPairVal.first]
+ .VarArgs[VAList.UIntPairVal.second];
+ Type *Ty = I.getType();
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ Dest.IntVal = Src.IntVal;
+ break;
+ IMPLEMENT_VAARG(Pointer);
+ IMPLEMENT_VAARG(Float);
+ IMPLEMENT_VAARG(Double);
+ default:
+ dbgs() << "Unhandled dest type for vaarg instruction: " << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ }
+
+ // Set the Value of this Instruction.
+ SetValue(&I, Dest, SF);
+
+ // Move the pointer to the next vararg.
+ ++VAList.UIntPairVal.second;
+}
+
+void Interpreter::visitExtractElementInst(ExtractElementInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+
+ Type *Ty = I.getType();
+ const unsigned indx = unsigned(Src2.IntVal.getZExtValue());
+
+ if(Src1.AggregateVal.size() > indx) {
+ switch (Ty->getTypeID()) {
+ default:
+ dbgs() << "Unhandled destination type for extractelement instruction: "
+ << *Ty << "\n";
+ llvm_unreachable(nullptr);
+ break;
+ case Type::IntegerTyID:
+ Dest.IntVal = Src1.AggregateVal[indx].IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.FloatVal = Src1.AggregateVal[indx].FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = Src1.AggregateVal[indx].DoubleVal;
+ break;
+ }
+ } else {
+ dbgs() << "Invalid index in extractelement instruction\n";
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitInsertElementInst(InsertElementInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ VectorType *Ty = cast<VectorType>(I.getType());
+
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Src3 = getOperandValue(I.getOperand(2), SF);
+ GenericValue Dest;
+
+ Type *TyContained = Ty->getElementType();
+
+ const unsigned indx = unsigned(Src3.IntVal.getZExtValue());
+ Dest.AggregateVal = Src1.AggregateVal;
+
+ if(Src1.AggregateVal.size() <= indx)
+ llvm_unreachable("Invalid index in insertelement instruction");
+ switch (TyContained->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ case Type::IntegerTyID:
+ Dest.AggregateVal[indx].IntVal = Src2.IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.AggregateVal[indx].FloatVal = Src2.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.AggregateVal[indx].DoubleVal = Src2.DoubleVal;
+ break;
+ }
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitShuffleVectorInst(ShuffleVectorInst &I){
+ ExecutionContext &SF = ECStack.back();
+
+ VectorType *Ty = cast<VectorType>(I.getType());
+
+ GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest;
+
+ // There is no need to check types of src1 and src2, because the compiled
+ // bytecode can't contain different types for src1 and src2 for a
+ // shufflevector instruction.
+
+ Type *TyContained = Ty->getElementType();
+ unsigned src1Size = (unsigned)Src1.AggregateVal.size();
+ unsigned src2Size = (unsigned)Src2.AggregateVal.size();
+ unsigned src3Size = I.getShuffleMask().size();
+
+ Dest.AggregateVal.resize(src3Size);
+
+ switch (TyContained->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ break;
+ case Type::IntegerTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = std::max(0, I.getMaskValue(i));
+ if(j < src1Size)
+ Dest.AggregateVal[i].IntVal = Src1.AggregateVal[j].IntVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].IntVal = Src2.AggregateVal[j-src1Size].IntVal;
+ else
+ // The selector may not be greater than sum of lengths of first and
+ // second operands and llasm should not allow situation like
+ // %tmp = shufflevector <2 x i32> <i32 3, i32 4>, <2 x i32> undef,
+ // <2 x i32> < i32 0, i32 5 >,
+ // where i32 5 is invalid, but let it be additional check here:
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ case Type::FloatTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = std::max(0, I.getMaskValue(i));
+ if(j < src1Size)
+ Dest.AggregateVal[i].FloatVal = Src1.AggregateVal[j].FloatVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].FloatVal = Src2.AggregateVal[j-src1Size].FloatVal;
+ else
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ case Type::DoubleTyID:
+ for( unsigned i=0; i<src3Size; i++) {
+ unsigned j = std::max(0, I.getMaskValue(i));
+ if(j < src1Size)
+ Dest.AggregateVal[i].DoubleVal = Src1.AggregateVal[j].DoubleVal;
+ else if(j < src1Size + src2Size)
+ Dest.AggregateVal[i].DoubleVal =
+ Src2.AggregateVal[j-src1Size].DoubleVal;
+ else
+ llvm_unreachable("Invalid mask in shufflevector instruction");
+ }
+ break;
+ }
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitExtractValueInst(ExtractValueInst &I) {
+ ExecutionContext &SF = ECStack.back();
+ Value *Agg = I.getAggregateOperand();
+ GenericValue Dest;
+ GenericValue Src = getOperandValue(Agg, SF);
+
+ ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
+ unsigned Num = I.getNumIndices();
+ GenericValue *pSrc = &Src;
+
+ for (unsigned i = 0 ; i < Num; ++i) {
+ pSrc = &pSrc->AggregateVal[*IdxBegin];
+ ++IdxBegin;
+ }
+
+ Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
+ switch (IndexedType->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for extractelement instruction");
+ break;
+ case Type::IntegerTyID:
+ Dest.IntVal = pSrc->IntVal;
+ break;
+ case Type::FloatTyID:
+ Dest.FloatVal = pSrc->FloatVal;
+ break;
+ case Type::DoubleTyID:
+ Dest.DoubleVal = pSrc->DoubleVal;
+ break;
+ case Type::ArrayTyID:
+ case Type::StructTyID:
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID:
+ Dest.AggregateVal = pSrc->AggregateVal;
+ break;
+ case Type::PointerTyID:
+ Dest.PointerVal = pSrc->PointerVal;
+ break;
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+void Interpreter::visitInsertValueInst(InsertValueInst &I) {
+
+ ExecutionContext &SF = ECStack.back();
+ Value *Agg = I.getAggregateOperand();
+
+ GenericValue Src1 = getOperandValue(Agg, SF);
+ GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
+ GenericValue Dest = Src1; // Dest is a slightly changed Src1
+
+ ExtractValueInst::idx_iterator IdxBegin = I.idx_begin();
+ unsigned Num = I.getNumIndices();
+
+ GenericValue *pDest = &Dest;
+ for (unsigned i = 0 ; i < Num; ++i) {
+ pDest = &pDest->AggregateVal[*IdxBegin];
+ ++IdxBegin;
+ }
+ // pDest points to the target value in the Dest now
+
+ Type *IndexedType = ExtractValueInst::getIndexedType(Agg->getType(), I.getIndices());
+
+ switch (IndexedType->getTypeID()) {
+ default:
+ llvm_unreachable("Unhandled dest type for insertelement instruction");
+ break;
+ case Type::IntegerTyID:
+ pDest->IntVal = Src2.IntVal;
+ break;
+ case Type::FloatTyID:
+ pDest->FloatVal = Src2.FloatVal;
+ break;
+ case Type::DoubleTyID:
+ pDest->DoubleVal = Src2.DoubleVal;
+ break;
+ case Type::ArrayTyID:
+ case Type::StructTyID:
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID:
+ pDest->AggregateVal = Src2.AggregateVal;
+ break;
+ case Type::PointerTyID:
+ pDest->PointerVal = Src2.PointerVal;
+ break;
+ }
+
+ SetValue(&I, Dest, SF);
+}
+
+GenericValue Interpreter::getConstantExprValue (ConstantExpr *CE,
+ ExecutionContext &SF) {
+ switch (CE->getOpcode()) {
+ case Instruction::Trunc:
+ return executeTruncInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::ZExt:
+ return executeZExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::SExt:
+ return executeSExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPTrunc:
+ return executeFPTruncInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPExt:
+ return executeFPExtInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::UIToFP:
+ return executeUIToFPInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::SIToFP:
+ return executeSIToFPInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPToUI:
+ return executeFPToUIInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::FPToSI:
+ return executeFPToSIInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::PtrToInt:
+ return executePtrToIntInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::IntToPtr:
+ return executeIntToPtrInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::BitCast:
+ return executeBitCastInst(CE->getOperand(0), CE->getType(), SF);
+ case Instruction::GetElementPtr:
+ return executeGEPOperation(CE->getOperand(0), gep_type_begin(CE),
+ gep_type_end(CE), SF);
+ case Instruction::FCmp:
+ case Instruction::ICmp:
+ return executeCmpInst(CE->getPredicate(),
+ getOperandValue(CE->getOperand(0), SF),
+ getOperandValue(CE->getOperand(1), SF),
+ CE->getOperand(0)->getType());
+ case Instruction::Select:
+ return executeSelectInst(getOperandValue(CE->getOperand(0), SF),
+ getOperandValue(CE->getOperand(1), SF),
+ getOperandValue(CE->getOperand(2), SF),
+ CE->getOperand(0)->getType());
+ default :
+ break;
+ }
+
+ // The cases below here require a GenericValue parameter for the result
+ // so we initialize one, compute it and then return it.
+ GenericValue Op0 = getOperandValue(CE->getOperand(0), SF);
+ GenericValue Op1 = getOperandValue(CE->getOperand(1), SF);
+ GenericValue Dest;
+ Type * Ty = CE->getOperand(0)->getType();
+ switch (CE->getOpcode()) {
+ case Instruction::Add: Dest.IntVal = Op0.IntVal + Op1.IntVal; break;
+ case Instruction::Sub: Dest.IntVal = Op0.IntVal - Op1.IntVal; break;
+ case Instruction::Mul: Dest.IntVal = Op0.IntVal * Op1.IntVal; break;
+ case Instruction::FAdd: executeFAddInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FSub: executeFSubInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FMul: executeFMulInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FDiv: executeFDivInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::FRem: executeFRemInst(Dest, Op0, Op1, Ty); break;
+ case Instruction::SDiv: Dest.IntVal = Op0.IntVal.sdiv(Op1.IntVal); break;
+ case Instruction::UDiv: Dest.IntVal = Op0.IntVal.udiv(Op1.IntVal); break;
+ case Instruction::URem: Dest.IntVal = Op0.IntVal.urem(Op1.IntVal); break;
+ case Instruction::SRem: Dest.IntVal = Op0.IntVal.srem(Op1.IntVal); break;
+ case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
+ case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
+ case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
+ case Instruction::Shl:
+ Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
+ break;
+ case Instruction::LShr:
+ Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
+ break;
+ case Instruction::AShr:
+ Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
+ break;
+ default:
+ dbgs() << "Unhandled ConstantExpr: " << *CE << "\n";
+ llvm_unreachable("Unhandled ConstantExpr");
+ }
+ return Dest;
+}
+
+GenericValue Interpreter::getOperandValue(Value *V, ExecutionContext &SF) {
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
+ return getConstantExprValue(CE, SF);
+ } else if (Constant *CPV = dyn_cast<Constant>(V)) {
+ return getConstantValue(CPV);
+ } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ return PTOGV(getPointerToGlobal(GV));
+ } else {
+ return SF.Values[V];
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Dispatch and Execution Code
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// callFunction - Execute the specified function...
+//
+void Interpreter::callFunction(Function *F, ArrayRef<GenericValue> ArgVals) {
+ assert((ECStack.empty() || !ECStack.back().Caller ||
+ ECStack.back().Caller->arg_size() == ArgVals.size()) &&
+ "Incorrect number of arguments passed into function call!");
+ // Make a new stack frame... and fill it in.
+ ECStack.emplace_back();
+ ExecutionContext &StackFrame = ECStack.back();
+ StackFrame.CurFunction = F;
+
+ // Special handling for external functions.
+ if (F->isDeclaration()) {
+ GenericValue Result = callExternalFunction (F, ArgVals);
+ // Simulate a 'ret' instruction of the appropriate type.
+ popStackAndReturnValueToCaller (F->getReturnType (), Result);
+ return;
+ }
+
+ // Get pointers to first LLVM BB & Instruction in function.
+ StackFrame.CurBB = &F->front();
+ StackFrame.CurInst = StackFrame.CurBB->begin();
+
+ // Run through the function arguments and initialize their values...
+ assert((ArgVals.size() == F->arg_size() ||
+ (ArgVals.size() > F->arg_size() && F->getFunctionType()->isVarArg()))&&
+ "Invalid number of values passed to function invocation!");
+
+ // Handle non-varargs arguments...
+ unsigned i = 0;
+ for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
+ AI != E; ++AI, ++i)
+ SetValue(&*AI, ArgVals[i], StackFrame);
+
+ // Handle varargs arguments...
+ StackFrame.VarArgs.assign(ArgVals.begin()+i, ArgVals.end());
+}
+
+
+void Interpreter::run() {
+ while (!ECStack.empty()) {
+ // Interpret a single instruction & increment the "PC".
+ ExecutionContext &SF = ECStack.back(); // Current stack frame
+ Instruction &I = *SF.CurInst++; // Increment before execute
+
+ // Track the number of dynamic instructions executed.
+ ++NumDynamicInsts;
+
+ LLVM_DEBUG(dbgs() << "About to interpret: " << I << "\n");
+ visit(I); // Dispatch to one of the visit* methods...
+ }
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
new file mode 100644
index 0000000000..c3ba5ebb36
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/ExternalFunctions.cpp
@@ -0,0 +1,511 @@
+//===-- ExternalFunctions.cpp - Implement External Functions --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains both code to deal with invoking "external" functions, but
+// also contains code that implements "exported" external functions.
+//
+// There are currently two mechanisms for handling external functions in the
+// Interpreter. The first is to implement lle_* wrapper functions that are
+// specific to well-known library functions which manually translate the
+// arguments from GenericValues and make the call. If such a wrapper does
+// not exist, and libffi is available, then the Interpreter will attempt to
+// invoke the function using libffi, after finding its address.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Config/config.h" // Detect libffi
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cmath>
+#include <csignal>
+#include <cstdint>
+#include <cstdio>
+#include <cstring>
+#include <map>
+#include <mutex>
+#include <string>
+#include <utility>
+#include <vector>
+
+#ifdef HAVE_FFI_CALL
+#ifdef HAVE_FFI_H
+#include <ffi.h>
+#define USE_LIBFFI
+#elif HAVE_FFI_FFI_H
+#include <ffi/ffi.h>
+#define USE_LIBFFI
+#endif
+#endif
+
+using namespace llvm;
+
+static ManagedStatic<sys::Mutex> FunctionsLock;
+
+typedef GenericValue (*ExFunc)(FunctionType *, ArrayRef<GenericValue>);
+static ManagedStatic<std::map<const Function *, ExFunc> > ExportedFunctions;
+static ManagedStatic<std::map<std::string, ExFunc> > FuncNames;
+
+#ifdef USE_LIBFFI
+typedef void (*RawFunc)();
+static ManagedStatic<std::map<const Function *, RawFunc> > RawFunctions;
+#endif
+
+static Interpreter *TheInterpreter;
+
+static char getTypeID(Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return 'V';
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 1: return 'o';
+ case 8: return 'B';
+ case 16: return 'S';
+ case 32: return 'I';
+ case 64: return 'L';
+ default: return 'N';
+ }
+ case Type::FloatTyID: return 'F';
+ case Type::DoubleTyID: return 'D';
+ case Type::PointerTyID: return 'P';
+ case Type::FunctionTyID:return 'M';
+ case Type::StructTyID: return 'T';
+ case Type::ArrayTyID: return 'A';
+ default: return 'U';
+ }
+}
+
+// Try to find address of external function given a Function object.
+// Please note, that interpreter doesn't know how to assemble a
+// real call in general case (this is JIT job), that's why it assumes,
+// that all external functions has the same (and pretty "general") signature.
+// The typical example of such functions are "lle_X_" ones.
+static ExFunc lookupFunction(const Function *F) {
+ // Function not found, look it up... start by figuring out what the
+ // composite function name should be.
+ std::string ExtName = "lle_";
+ FunctionType *FT = F->getFunctionType();
+ ExtName += getTypeID(FT->getReturnType());
+ for (Type *T : FT->params())
+ ExtName += getTypeID(T);
+ ExtName += ("_" + F->getName()).str();
+
+ sys::ScopedLock Writer(*FunctionsLock);
+ ExFunc FnPtr = (*FuncNames)[ExtName];
+ if (!FnPtr)
+ FnPtr = (*FuncNames)[("lle_X_" + F->getName()).str()];
+ if (!FnPtr) // Try calling a generic function... if it exists...
+ FnPtr = (ExFunc)(intptr_t)sys::DynamicLibrary::SearchForAddressOfSymbol(
+ ("lle_X_" + F->getName()).str());
+ if (FnPtr)
+ ExportedFunctions->insert(std::make_pair(F, FnPtr)); // Cache for later
+ return FnPtr;
+}
+
+#ifdef USE_LIBFFI
+static ffi_type *ffiTypeFor(Type *Ty) {
+ switch (Ty->getTypeID()) {
+ case Type::VoidTyID: return &ffi_type_void;
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 8: return &ffi_type_sint8;
+ case 16: return &ffi_type_sint16;
+ case 32: return &ffi_type_sint32;
+ case 64: return &ffi_type_sint64;
+ }
+ llvm_unreachable("Unhandled integer type bitwidth");
+ case Type::FloatTyID: return &ffi_type_float;
+ case Type::DoubleTyID: return &ffi_type_double;
+ case Type::PointerTyID: return &ffi_type_pointer;
+ default: break;
+ }
+ // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
+ report_fatal_error("Type could not be mapped for use with libffi.");
+ return NULL;
+}
+
+static void *ffiValueFor(Type *Ty, const GenericValue &AV,
+ void *ArgDataPtr) {
+ switch (Ty->getTypeID()) {
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(Ty)->getBitWidth()) {
+ case 8: {
+ int8_t *I8Ptr = (int8_t *) ArgDataPtr;
+ *I8Ptr = (int8_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 16: {
+ int16_t *I16Ptr = (int16_t *) ArgDataPtr;
+ *I16Ptr = (int16_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 32: {
+ int32_t *I32Ptr = (int32_t *) ArgDataPtr;
+ *I32Ptr = (int32_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ case 64: {
+ int64_t *I64Ptr = (int64_t *) ArgDataPtr;
+ *I64Ptr = (int64_t) AV.IntVal.getZExtValue();
+ return ArgDataPtr;
+ }
+ }
+ llvm_unreachable("Unhandled integer type bitwidth");
+ case Type::FloatTyID: {
+ float *FloatPtr = (float *) ArgDataPtr;
+ *FloatPtr = AV.FloatVal;
+ return ArgDataPtr;
+ }
+ case Type::DoubleTyID: {
+ double *DoublePtr = (double *) ArgDataPtr;
+ *DoublePtr = AV.DoubleVal;
+ return ArgDataPtr;
+ }
+ case Type::PointerTyID: {
+ void **PtrPtr = (void **) ArgDataPtr;
+ *PtrPtr = GVTOP(AV);
+ return ArgDataPtr;
+ }
+ default: break;
+ }
+ // TODO: Support other types such as StructTyID, ArrayTyID, OpaqueTyID, etc.
+ report_fatal_error("Type value could not be mapped for use with libffi.");
+ return NULL;
+}
+
+static bool ffiInvoke(RawFunc Fn, Function *F, ArrayRef<GenericValue> ArgVals,
+ const DataLayout &TD, GenericValue &Result) {
+ ffi_cif cif;
+ FunctionType *FTy = F->getFunctionType();
+ const unsigned NumArgs = F->arg_size();
+
+ // TODO: We don't have type information about the remaining arguments, because
+ // this information is never passed into ExecutionEngine::runFunction().
+ if (ArgVals.size() > NumArgs && F->isVarArg()) {
+ report_fatal_error("Calling external var arg function '" + F->getName()
+ + "' is not supported by the Interpreter.");
+ }
+
+ unsigned ArgBytes = 0;
+
+ std::vector<ffi_type*> args(NumArgs);
+ for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ const unsigned ArgNo = A->getArgNo();
+ Type *ArgTy = FTy->getParamType(ArgNo);
+ args[ArgNo] = ffiTypeFor(ArgTy);
+ ArgBytes += TD.getTypeStoreSize(ArgTy);
+ }
+
+ SmallVector<uint8_t, 128> ArgData;
+ ArgData.resize(ArgBytes);
+ uint8_t *ArgDataPtr = ArgData.data();
+ SmallVector<void*, 16> values(NumArgs);
+ for (Function::const_arg_iterator A = F->arg_begin(), E = F->arg_end();
+ A != E; ++A) {
+ const unsigned ArgNo = A->getArgNo();
+ Type *ArgTy = FTy->getParamType(ArgNo);
+ values[ArgNo] = ffiValueFor(ArgTy, ArgVals[ArgNo], ArgDataPtr);
+ ArgDataPtr += TD.getTypeStoreSize(ArgTy);
+ }
+
+ Type *RetTy = FTy->getReturnType();
+ ffi_type *rtype = ffiTypeFor(RetTy);
+
+ if (ffi_prep_cif(&cif, FFI_DEFAULT_ABI, NumArgs, rtype, args.data()) ==
+ FFI_OK) {
+ SmallVector<uint8_t, 128> ret;
+ if (RetTy->getTypeID() != Type::VoidTyID)
+ ret.resize(TD.getTypeStoreSize(RetTy));
+ ffi_call(&cif, Fn, ret.data(), values.data());
+ switch (RetTy->getTypeID()) {
+ case Type::IntegerTyID:
+ switch (cast<IntegerType>(RetTy)->getBitWidth()) {
+ case 8: Result.IntVal = APInt(8 , *(int8_t *) ret.data()); break;
+ case 16: Result.IntVal = APInt(16, *(int16_t*) ret.data()); break;
+ case 32: Result.IntVal = APInt(32, *(int32_t*) ret.data()); break;
+ case 64: Result.IntVal = APInt(64, *(int64_t*) ret.data()); break;
+ }
+ break;
+ case Type::FloatTyID: Result.FloatVal = *(float *) ret.data(); break;
+ case Type::DoubleTyID: Result.DoubleVal = *(double*) ret.data(); break;
+ case Type::PointerTyID: Result.PointerVal = *(void **) ret.data(); break;
+ default: break;
+ }
+ return true;
+ }
+
+ return false;
+}
+#endif // USE_LIBFFI
+
+GenericValue Interpreter::callExternalFunction(Function *F,
+ ArrayRef<GenericValue> ArgVals) {
+ TheInterpreter = this;
+
+ std::unique_lock<sys::Mutex> Guard(*FunctionsLock);
+
+ // Do a lookup to see if the function is in our cache... this should just be a
+ // deferred annotation!
+ std::map<const Function *, ExFunc>::iterator FI = ExportedFunctions->find(F);
+ if (ExFunc Fn = (FI == ExportedFunctions->end()) ? lookupFunction(F)
+ : FI->second) {
+ Guard.unlock();
+ return Fn(F->getFunctionType(), ArgVals);
+ }
+
+#ifdef USE_LIBFFI
+ std::map<const Function *, RawFunc>::iterator RF = RawFunctions->find(F);
+ RawFunc RawFn;
+ if (RF == RawFunctions->end()) {
+ RawFn = (RawFunc)(intptr_t)
+ sys::DynamicLibrary::SearchForAddressOfSymbol(std::string(F->getName()));
+ if (!RawFn)
+ RawFn = (RawFunc)(intptr_t)getPointerToGlobalIfAvailable(F);
+ if (RawFn != 0)
+ RawFunctions->insert(std::make_pair(F, RawFn)); // Cache for later
+ } else {
+ RawFn = RF->second;
+ }
+
+ Guard.unlock();
+
+ GenericValue Result;
+ if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getDataLayout(), Result))
+ return Result;
+#endif // USE_LIBFFI
+
+ if (F->getName() == "__main")
+ errs() << "Tried to execute an unknown external function: "
+ << *F->getType() << " __main\n";
+ else
+ report_fatal_error("Tried to execute an unknown external function: " +
+ F->getName());
+#ifndef USE_LIBFFI
+ errs() << "Recompiling LLVM with --enable-libffi might help.\n";
+#endif
+ return GenericValue();
+}
+
+//===----------------------------------------------------------------------===//
+// Functions "exported" to the running application...
+//
+
+// void atexit(Function*)
+static GenericValue lle_X_atexit(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ assert(Args.size() == 1);
+ TheInterpreter->addAtExitHandler((Function*)GVTOP(Args[0]));
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+// void exit(int)
+static GenericValue lle_X_exit(FunctionType *FT, ArrayRef<GenericValue> Args) {
+ TheInterpreter->exitCalled(Args[0]);
+ return GenericValue();
+}
+
+// void abort(void)
+static GenericValue lle_X_abort(FunctionType *FT, ArrayRef<GenericValue> Args) {
+ //FIXME: should we report or raise here?
+ //report_fatal_error("Interpreted program raised SIGABRT");
+ raise (SIGABRT);
+ return GenericValue();
+}
+
+// int sprintf(char *, const char *, ...) - a very rough implementation to make
+// output useful.
+static GenericValue lle_X_sprintf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ char *OutputBuffer = (char *)GVTOP(Args[0]);
+ const char *FmtStr = (const char *)GVTOP(Args[1]);
+ unsigned ArgNo = 2;
+
+ // printf should return # chars printed. This is completely incorrect, but
+ // close enough for now.
+ GenericValue GV;
+ GV.IntVal = APInt(32, strlen(FmtStr));
+ while (true) {
+ switch (*FmtStr) {
+ case 0: return GV; // Null terminator...
+ default: // Normal nonspecial character
+ sprintf(OutputBuffer++, "%c", *FmtStr++);
+ break;
+ case '\\': { // Handle escape codes
+ sprintf(OutputBuffer, "%c%c", *FmtStr, *(FmtStr+1));
+ FmtStr += 2; OutputBuffer += 2;
+ break;
+ }
+ case '%': { // Handle format specifiers
+ char FmtBuf[100] = "", Buffer[1000] = "";
+ char *FB = FmtBuf;
+ *FB++ = *FmtStr++;
+ char Last = *FB++ = *FmtStr++;
+ unsigned HowLong = 0;
+ while (Last != 'c' && Last != 'd' && Last != 'i' && Last != 'u' &&
+ Last != 'o' && Last != 'x' && Last != 'X' && Last != 'e' &&
+ Last != 'E' && Last != 'g' && Last != 'G' && Last != 'f' &&
+ Last != 'p' && Last != 's' && Last != '%') {
+ if (Last == 'l' || Last == 'L') HowLong++; // Keep track of l's
+ Last = *FB++ = *FmtStr++;
+ }
+ *FB = 0;
+
+ switch (Last) {
+ case '%':
+ memcpy(Buffer, "%", 2); break;
+ case 'c':
+ sprintf(Buffer, FmtBuf, uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
+ break;
+ case 'd': case 'i':
+ case 'u': case 'o':
+ case 'x': case 'X':
+ if (HowLong >= 1) {
+ if (HowLong == 1 &&
+ TheInterpreter->getDataLayout().getPointerSizeInBits() == 64 &&
+ sizeof(long) < sizeof(int64_t)) {
+ // Make sure we use %lld with a 64 bit argument because we might be
+ // compiling LLI on a 32 bit compiler.
+ unsigned Size = strlen(FmtBuf);
+ FmtBuf[Size] = FmtBuf[Size-1];
+ FmtBuf[Size+1] = 0;
+ FmtBuf[Size-1] = 'l';
+ }
+ sprintf(Buffer, FmtBuf, Args[ArgNo++].IntVal.getZExtValue());
+ } else
+ sprintf(Buffer, FmtBuf,uint32_t(Args[ArgNo++].IntVal.getZExtValue()));
+ break;
+ case 'e': case 'E': case 'g': case 'G': case 'f':
+ sprintf(Buffer, FmtBuf, Args[ArgNo++].DoubleVal); break;
+ case 'p':
+ sprintf(Buffer, FmtBuf, (void*)GVTOP(Args[ArgNo++])); break;
+ case 's':
+ sprintf(Buffer, FmtBuf, (char*)GVTOP(Args[ArgNo++])); break;
+ default:
+ errs() << "<unknown printf code '" << *FmtStr << "'!>";
+ ArgNo++; break;
+ }
+ size_t Len = strlen(Buffer);
+ memcpy(OutputBuffer, Buffer, Len + 1);
+ OutputBuffer += Len;
+ }
+ break;
+ }
+ }
+ return GV;
+}
+
+// int printf(const char *, ...) - a very rough implementation to make output
+// useful.
+static GenericValue lle_X_printf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ char Buffer[10000];
+ std::vector<GenericValue> NewArgs;
+ NewArgs.push_back(PTOGV((void*)&Buffer[0]));
+ llvm::append_range(NewArgs, Args);
+ GenericValue GV = lle_X_sprintf(FT, NewArgs);
+ outs() << Buffer;
+ return GV;
+}
+
+// int sscanf(const char *format, ...);
+static GenericValue lle_X_sscanf(FunctionType *FT,
+ ArrayRef<GenericValue> args) {
+ assert(args.size() < 10 && "Only handle up to 10 args to sscanf right now!");
+
+ char *Args[10];
+ for (unsigned i = 0; i < args.size(); ++i)
+ Args[i] = (char*)GVTOP(args[i]);
+
+ GenericValue GV;
+ GV.IntVal = APInt(32, sscanf(Args[0], Args[1], Args[2], Args[3], Args[4],
+ Args[5], Args[6], Args[7], Args[8], Args[9]));
+ return GV;
+}
+
+// int scanf(const char *format, ...);
+static GenericValue lle_X_scanf(FunctionType *FT, ArrayRef<GenericValue> args) {
+ assert(args.size() < 10 && "Only handle up to 10 args to scanf right now!");
+
+ char *Args[10];
+ for (unsigned i = 0; i < args.size(); ++i)
+ Args[i] = (char*)GVTOP(args[i]);
+
+ GenericValue GV;
+ GV.IntVal = APInt(32, scanf( Args[0], Args[1], Args[2], Args[3], Args[4],
+ Args[5], Args[6], Args[7], Args[8], Args[9]));
+ return GV;
+}
+
+// int fprintf(FILE *, const char *, ...) - a very rough implementation to make
+// output useful.
+static GenericValue lle_X_fprintf(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ assert(Args.size() >= 2);
+ char Buffer[10000];
+ std::vector<GenericValue> NewArgs;
+ NewArgs.push_back(PTOGV(Buffer));
+ NewArgs.insert(NewArgs.end(), Args.begin()+1, Args.end());
+ GenericValue GV = lle_X_sprintf(FT, NewArgs);
+
+ fputs(Buffer, (FILE *) GVTOP(Args[0]));
+ return GV;
+}
+
+static GenericValue lle_X_memset(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ int val = (int)Args[1].IntVal.getSExtValue();
+ size_t len = (size_t)Args[2].IntVal.getZExtValue();
+ memset((void *)GVTOP(Args[0]), val, len);
+ // llvm.memset.* returns void, lle_X_* returns GenericValue,
+ // so here we return GenericValue with IntVal set to zero
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+static GenericValue lle_X_memcpy(FunctionType *FT,
+ ArrayRef<GenericValue> Args) {
+ memcpy(GVTOP(Args[0]), GVTOP(Args[1]),
+ (size_t)(Args[2].IntVal.getLimitedValue()));
+
+ // llvm.memcpy* returns void, lle_X_* returns GenericValue,
+ // so here we return GenericValue with IntVal set to zero
+ GenericValue GV;
+ GV.IntVal = 0;
+ return GV;
+}
+
+void Interpreter::initializeExternalFunctions() {
+ sys::ScopedLock Writer(*FunctionsLock);
+ (*FuncNames)["lle_X_atexit"] = lle_X_atexit;
+ (*FuncNames)["lle_X_exit"] = lle_X_exit;
+ (*FuncNames)["lle_X_abort"] = lle_X_abort;
+
+ (*FuncNames)["lle_X_printf"] = lle_X_printf;
+ (*FuncNames)["lle_X_sprintf"] = lle_X_sprintf;
+ (*FuncNames)["lle_X_sscanf"] = lle_X_sscanf;
+ (*FuncNames)["lle_X_scanf"] = lle_X_scanf;
+ (*FuncNames)["lle_X_fprintf"] = lle_X_fprintf;
+ (*FuncNames)["lle_X_memset"] = lle_X_memset;
+ (*FuncNames)["lle_X_memcpy"] = lle_X_memcpy;
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Interpreter.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Interpreter.cpp
new file mode 100644
index 0000000000..5727f7adb4
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Interpreter.cpp
@@ -0,0 +1,102 @@
+//===- Interpreter.cpp - Top-Level LLVM Interpreter Implementation --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the top-level functionality for the LLVM interpreter.
+// This interpreter is designed to be a very simple, portable, inefficient
+// interpreter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Interpreter.h"
+#include "llvm/CodeGen/IntrinsicLowering.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Module.h"
+#include <cstring>
+using namespace llvm;
+
+namespace {
+
+static struct RegisterInterp {
+ RegisterInterp() { Interpreter::Register(); }
+} InterpRegistrator;
+
+}
+
+extern "C" void LLVMLinkInInterpreter() { }
+
+/// Create a new interpreter object.
+///
+ExecutionEngine *Interpreter::create(std::unique_ptr<Module> M,
+ std::string *ErrStr) {
+ // Tell this Module to materialize everything and release the GVMaterializer.
+ if (Error Err = M->materializeAll()) {
+ std::string Msg;
+ handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+ Msg = EIB.message();
+ });
+ if (ErrStr)
+ *ErrStr = Msg;
+ // We got an error, just return 0
+ return nullptr;
+ }
+
+ return new Interpreter(std::move(M));
+}
+
+//===----------------------------------------------------------------------===//
+// Interpreter ctor - Initialize stuff
+//
+Interpreter::Interpreter(std::unique_ptr<Module> M)
+ : ExecutionEngine(std::move(M)) {
+
+ memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
+ // Initialize the "backend"
+ initializeExecutionEngine();
+ initializeExternalFunctions();
+ emitGlobals();
+
+ IL = new IntrinsicLowering(getDataLayout());
+}
+
+Interpreter::~Interpreter() {
+ delete IL;
+}
+
+void Interpreter::runAtExitHandlers () {
+ while (!AtExitHandlers.empty()) {
+ callFunction(AtExitHandlers.back(), None);
+ AtExitHandlers.pop_back();
+ run();
+ }
+}
+
+/// run - Start execution with the specified function and arguments.
+///
+GenericValue Interpreter::runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) {
+ assert (F && "Function *F was null at entry to run()");
+
+ // Try extra hard not to pass extra args to a function that isn't
+ // expecting them. C programmers frequently bend the rules and
+ // declare main() with fewer parameters than it actually gets
+ // passed, and the interpreter barfs if you pass a function more
+ // parameters than it is declared to take. This does not attempt to
+ // take into account gratuitous differences in declared types,
+ // though.
+ const size_t ArgCount = F->getFunctionType()->getNumParams();
+ ArrayRef<GenericValue> ActualArgs =
+ ArgValues.slice(0, std::min(ArgValues.size(), ArgCount));
+
+ // Set up the function call.
+ callFunction(F, ActualArgs);
+
+ // Start executing the function.
+ run();
+
+ return ExitValue;
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Interpreter.h b/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Interpreter.h
new file mode 100644
index 0000000000..fd7fa21df1
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/Interpreter.h
@@ -0,0 +1,235 @@
+//===-- Interpreter.h ------------------------------------------*- C++ -*--===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This header file defines the interpreter structure
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_INTERPRETER_INTERPRETER_H
+#define LLVM_LIB_EXECUTIONENGINE_INTERPRETER_INTERPRETER_H
+
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/InstVisitor.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+namespace llvm {
+
+class IntrinsicLowering;
+template<typename T> class generic_gep_type_iterator;
+class ConstantExpr;
+typedef generic_gep_type_iterator<User::const_op_iterator> gep_type_iterator;
+
+
+// AllocaHolder - Object to track all of the blocks of memory allocated by
+// alloca. When the function returns, this object is popped off the execution
+// stack, which causes the dtor to be run, which frees all the alloca'd memory.
+//
+class AllocaHolder {
+ std::vector<void *> Allocations;
+
+public:
+ AllocaHolder() {}
+
+ // Make this type move-only.
+ AllocaHolder(AllocaHolder &&) = default;
+ AllocaHolder &operator=(AllocaHolder &&RHS) = default;
+
+ ~AllocaHolder() {
+ for (void *Allocation : Allocations)
+ free(Allocation);
+ }
+
+ void add(void *Mem) { Allocations.push_back(Mem); }
+};
+
+typedef std::vector<GenericValue> ValuePlaneTy;
+
+// ExecutionContext struct - This struct represents one stack frame currently
+// executing.
+//
+struct ExecutionContext {
+ Function *CurFunction;// The currently executing function
+ BasicBlock *CurBB; // The currently executing BB
+ BasicBlock::iterator CurInst; // The next instruction to execute
+ CallBase *Caller; // Holds the call that called subframes.
+ // NULL if main func or debugger invoked fn
+ std::map<Value *, GenericValue> Values; // LLVM values used in this invocation
+ std::vector<GenericValue> VarArgs; // Values passed through an ellipsis
+ AllocaHolder Allocas; // Track memory allocated by alloca
+
+ ExecutionContext() : CurFunction(nullptr), CurBB(nullptr), CurInst(nullptr) {}
+};
+
+// Interpreter - This class represents the entirety of the interpreter.
+//
+class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> {
+ GenericValue ExitValue; // The return value of the called function
+ IntrinsicLowering *IL;
+
+ // The runtime stack of executing code. The top of the stack is the current
+ // function record.
+ std::vector<ExecutionContext> ECStack;
+
+ // AtExitHandlers - List of functions to call when the program exits,
+ // registered with the atexit() library function.
+ std::vector<Function*> AtExitHandlers;
+
+public:
+ explicit Interpreter(std::unique_ptr<Module> M);
+ ~Interpreter() override;
+
+ /// runAtExitHandlers - Run any functions registered by the program's calls to
+ /// atexit(3), which we intercept and store in AtExitHandlers.
+ ///
+ void runAtExitHandlers();
+
+ static void Register() {
+ InterpCtor = create;
+ }
+
+ /// Create an interpreter ExecutionEngine.
+ ///
+ static ExecutionEngine *create(std::unique_ptr<Module> M,
+ std::string *ErrorStr = nullptr);
+
+ /// run - Start execution with the specified function and arguments.
+ ///
+ GenericValue runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) override;
+
+ void *getPointerToNamedFunction(StringRef Name,
+ bool AbortOnFailure = true) override {
+ // FIXME: not implemented.
+ return nullptr;
+ }
+
+ // Methods used to execute code:
+ // Place a call on the stack
+ void callFunction(Function *F, ArrayRef<GenericValue> ArgVals);
+ void run(); // Execute instructions until nothing left to do
+
+ // Opcode Implementations
+ void visitReturnInst(ReturnInst &I);
+ void visitBranchInst(BranchInst &I);
+ void visitSwitchInst(SwitchInst &I);
+ void visitIndirectBrInst(IndirectBrInst &I);
+
+ void visitUnaryOperator(UnaryOperator &I);
+ void visitBinaryOperator(BinaryOperator &I);
+ void visitICmpInst(ICmpInst &I);
+ void visitFCmpInst(FCmpInst &I);
+ void visitAllocaInst(AllocaInst &I);
+ void visitLoadInst(LoadInst &I);
+ void visitStoreInst(StoreInst &I);
+ void visitGetElementPtrInst(GetElementPtrInst &I);
+ void visitPHINode(PHINode &PN) {
+ llvm_unreachable("PHI nodes already handled!");
+ }
+ void visitTruncInst(TruncInst &I);
+ void visitZExtInst(ZExtInst &I);
+ void visitSExtInst(SExtInst &I);
+ void visitFPTruncInst(FPTruncInst &I);
+ void visitFPExtInst(FPExtInst &I);
+ void visitUIToFPInst(UIToFPInst &I);
+ void visitSIToFPInst(SIToFPInst &I);
+ void visitFPToUIInst(FPToUIInst &I);
+ void visitFPToSIInst(FPToSIInst &I);
+ void visitPtrToIntInst(PtrToIntInst &I);
+ void visitIntToPtrInst(IntToPtrInst &I);
+ void visitBitCastInst(BitCastInst &I);
+ void visitSelectInst(SelectInst &I);
+
+ void visitVAStartInst(VAStartInst &I);
+ void visitVAEndInst(VAEndInst &I);
+ void visitVACopyInst(VACopyInst &I);
+ void visitIntrinsicInst(IntrinsicInst &I);
+ void visitCallBase(CallBase &I);
+ void visitUnreachableInst(UnreachableInst &I);
+
+ void visitShl(BinaryOperator &I);
+ void visitLShr(BinaryOperator &I);
+ void visitAShr(BinaryOperator &I);
+
+ void visitVAArgInst(VAArgInst &I);
+ void visitExtractElementInst(ExtractElementInst &I);
+ void visitInsertElementInst(InsertElementInst &I);
+ void visitShuffleVectorInst(ShuffleVectorInst &I);
+
+ void visitExtractValueInst(ExtractValueInst &I);
+ void visitInsertValueInst(InsertValueInst &I);
+
+ void visitInstruction(Instruction &I) {
+ errs() << I << "\n";
+ llvm_unreachable("Instruction not interpretable yet!");
+ }
+
+ GenericValue callExternalFunction(Function *F,
+ ArrayRef<GenericValue> ArgVals);
+ void exitCalled(GenericValue GV);
+
+ void addAtExitHandler(Function *F) {
+ AtExitHandlers.push_back(F);
+ }
+
+ GenericValue *getFirstVarArg () {
+ return &(ECStack.back ().VarArgs[0]);
+ }
+
+private: // Helper functions
+ GenericValue executeGEPOperation(Value *Ptr, gep_type_iterator I,
+ gep_type_iterator E, ExecutionContext &SF);
+
+ // SwitchToNewBasicBlock - Start execution in a new basic block and run any
+ // PHI nodes in the top of the block. This is used for intraprocedural
+ // control flow.
+ //
+ void SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF);
+
+ void *getPointerToFunction(Function *F) override { return (void*)F; }
+
+ void initializeExecutionEngine() { }
+ void initializeExternalFunctions();
+ GenericValue getConstantExprValue(ConstantExpr *CE, ExecutionContext &SF);
+ GenericValue getOperandValue(Value *V, ExecutionContext &SF);
+ GenericValue executeTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeSExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeZExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPTruncInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPExtInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPToUIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeFPToSIInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeUIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeSIToFPInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executePtrToIntInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeIntToPtrInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeBitCastInst(Value *SrcVal, Type *DstTy,
+ ExecutionContext &SF);
+ GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
+ Type *Ty, ExecutionContext &SF);
+ void popStackAndReturnValueToCaller(Type *RetTy, GenericValue Result);
+
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/ya.make b/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/ya.make
new file mode 100644
index 0000000000..fa037e9ed7
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Interpreter/ya.make
@@ -0,0 +1,35 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm14
+ contrib/libs/llvm14/include
+ contrib/libs/llvm14/lib/CodeGen
+ contrib/libs/llvm14/lib/ExecutionEngine
+ contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld
+ contrib/libs/llvm14/lib/IR
+ contrib/libs/llvm14/lib/Support
+ contrib/restricted/libffi
+)
+
+ADDINCL(
+ contrib/libs/llvm14/lib/ExecutionEngine/Interpreter
+ contrib/restricted/libffi/include
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ Execution.cpp
+ ExternalFunctions.cpp
+ Interpreter.cpp
+)
+
+END()
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h
new file mode 100644
index 0000000000..159880e4b1
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/DefineExternalSectionStartAndEndSymbols.h
@@ -0,0 +1,116 @@
+//===--------- DefineExternalSectionStartAndEndSymbols.h --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Utility class for recognizing external section start and end symbols and
+// transforming them into defined symbols for the start and end blocks of the
+// associated Section.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITLINK_DEFINEEXTERNALSECTIONSTARTANDENDSYMBOLS_H
+#define LLVM_EXECUTIONENGINE_JITLINK_DEFINEEXTERNALSECTIONSTARTANDENDSYMBOLS_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+struct SectionRangeSymbolDesc {
+ SectionRangeSymbolDesc() = default;
+ SectionRangeSymbolDesc(Section &Sec, bool IsStart)
+ : Sec(&Sec), IsStart(IsStart) {}
+ Section *Sec = nullptr;
+ bool IsStart = false;
+};
+
+/// Pass implementation for the createDefineExternalSectionStartAndEndSymbols
+/// function.
+template <typename SymbolIdentifierFunction>
+class DefineExternalSectionStartAndEndSymbols {
+public:
+ DefineExternalSectionStartAndEndSymbols(SymbolIdentifierFunction F)
+ : F(std::move(F)) {}
+
+ Error operator()(LinkGraph &G) {
+
+ // This pass will affect the external symbols set, so copy them out into a
+ // vector and iterate over that.
+ std::vector<Symbol *> Externals(G.external_symbols().begin(),
+ G.external_symbols().end());
+
+ for (auto *Sym : Externals) {
+ SectionRangeSymbolDesc D = F(G, *Sym);
+ if (D.Sec) {
+ auto &SR = getSectionRange(*D.Sec);
+ if (D.IsStart) {
+ if (SR.empty())
+ G.makeAbsolute(*Sym, orc::ExecutorAddr());
+ else
+ G.makeDefined(*Sym, *SR.getFirstBlock(), 0, 0, Linkage::Strong,
+ Scope::Local, false);
+ } else {
+ if (SR.empty())
+ G.makeAbsolute(*Sym, orc::ExecutorAddr());
+ else
+ G.makeDefined(*Sym, *SR.getLastBlock(),
+ SR.getLastBlock()->getSize(), 0, Linkage::Strong,
+ Scope::Local, false);
+ }
+ }
+ }
+ return Error::success();
+ }
+
+private:
+ SectionRange &getSectionRange(Section &Sec) {
+ auto I = SectionRanges.find(&Sec);
+ if (I == SectionRanges.end())
+ I = SectionRanges.insert(std::make_pair(&Sec, SectionRange(Sec))).first;
+ return I->second;
+ }
+
+ DenseMap<Section *, SectionRange> SectionRanges;
+ SymbolIdentifierFunction F;
+};
+
+/// Returns a JITLink pass (as a function class) that uses the given symbol
+/// identification function to identify external section start and end symbols
+/// (and their associated Section*s) and transform the identified externals
+/// into defined symbols pointing to the start of the first block in the
+/// section and the end of the last (start and end symbols for empty sections
+/// will be transformed into absolute symbols at address 0).
+///
+/// The identification function should be callable as
+///
+/// SectionRangeSymbolDesc (LinkGraph &G, Symbol &Sym)
+///
+/// If Sym is not a section range start or end symbol then a default
+/// constructed SectionRangeSymbolDesc should be returned. If Sym is a start
+/// symbol then SectionRangeSymbolDesc(Sec, true), where Sec is a reference to
+/// the target Section. If Sym is an end symbol then
+/// SectionRangeSymbolDesc(Sec, false) should be returned.
+///
+/// This pass should be run in the PostAllocationPass pipeline, at which point
+/// all blocks should have been assigned their final addresses.
+template <typename SymbolIdentifierFunction>
+DefineExternalSectionStartAndEndSymbols<SymbolIdentifierFunction>
+createDefineExternalSectionStartAndEndSymbolsPass(
+ SymbolIdentifierFunction &&F) {
+ return DefineExternalSectionStartAndEndSymbols<SymbolIdentifierFunction>(
+ std::forward<SymbolIdentifierFunction>(F));
+}
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_DEFINEEXTERNALSECTIONSTARTANDENDSYMBOLS_H
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp
new file mode 100644
index 0000000000..2ae193595f
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/EHFrameSupport.cpp
@@ -0,0 +1,806 @@
+//===-------- JITLink_EHFrameSupport.cpp - JITLink eh-frame utils ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "EHFrameSupportImpl.h"
+
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h"
+#include "llvm/Support/DynamicLibrary.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+EHFrameSplitter::EHFrameSplitter(StringRef EHFrameSectionName)
+ : EHFrameSectionName(EHFrameSectionName) {}
+
+Error EHFrameSplitter::operator()(LinkGraph &G) {
+ auto *EHFrame = G.findSectionByName(EHFrameSectionName);
+
+ if (!EHFrame) {
+ LLVM_DEBUG({
+ dbgs() << "EHFrameSplitter: No " << EHFrameSectionName
+ << " section. Nothing to do\n";
+ });
+ return Error::success();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "EHFrameSplitter: Processing " << EHFrameSectionName << "...\n";
+ });
+
+ DenseMap<Block *, LinkGraph::SplitBlockCache> Caches;
+
+ {
+ // Pre-build the split caches.
+ for (auto *B : EHFrame->blocks())
+ Caches[B] = LinkGraph::SplitBlockCache::value_type();
+ for (auto *Sym : EHFrame->symbols())
+ Caches[&Sym->getBlock()]->push_back(Sym);
+ for (auto *B : EHFrame->blocks())
+ llvm::sort(*Caches[B], [](const Symbol *LHS, const Symbol *RHS) {
+ return LHS->getOffset() > RHS->getOffset();
+ });
+ }
+
+ // Iterate over blocks (we do this by iterating over Caches entries rather
+ // than EHFrame->blocks() as we will be inserting new blocks along the way,
+ // which would invalidate iterators in the latter sequence.
+ for (auto &KV : Caches) {
+ auto &B = *KV.first;
+ auto &BCache = KV.second;
+ if (auto Err = processBlock(G, B, BCache))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+Error EHFrameSplitter::processBlock(LinkGraph &G, Block &B,
+ LinkGraph::SplitBlockCache &Cache) {
+ LLVM_DEBUG(dbgs() << " Processing block at " << B.getAddress() << "\n");
+
+ // eh-frame should not contain zero-fill blocks.
+ if (B.isZeroFill())
+ return make_error<JITLinkError>("Unexpected zero-fill block in " +
+ EHFrameSectionName + " section");
+
+ if (B.getSize() == 0) {
+ LLVM_DEBUG(dbgs() << " Block is empty. Skipping.\n");
+ return Error::success();
+ }
+
+ BinaryStreamReader BlockReader(
+ StringRef(B.getContent().data(), B.getContent().size()),
+ G.getEndianness());
+
+ while (true) {
+ uint64_t RecordStartOffset = BlockReader.getOffset();
+
+ LLVM_DEBUG({
+ dbgs() << " Processing CFI record at "
+ << formatv("{0:x16}", B.getAddress()) << "\n";
+ });
+
+ uint32_t Length;
+ if (auto Err = BlockReader.readInteger(Length))
+ return Err;
+ if (Length != 0xffffffff) {
+ if (auto Err = BlockReader.skip(Length))
+ return Err;
+ } else {
+ uint64_t ExtendedLength;
+ if (auto Err = BlockReader.readInteger(ExtendedLength))
+ return Err;
+ if (auto Err = BlockReader.skip(ExtendedLength))
+ return Err;
+ }
+
+ // If this was the last block then there's nothing to split
+ if (BlockReader.empty()) {
+ LLVM_DEBUG(dbgs() << " Extracted " << B << "\n");
+ return Error::success();
+ }
+
+ uint64_t BlockSize = BlockReader.getOffset() - RecordStartOffset;
+ auto &NewBlock = G.splitBlock(B, BlockSize);
+ (void)NewBlock;
+ LLVM_DEBUG(dbgs() << " Extracted " << NewBlock << "\n");
+ }
+}
+
+EHFrameEdgeFixer::EHFrameEdgeFixer(StringRef EHFrameSectionName,
+ unsigned PointerSize, Edge::Kind Delta64,
+ Edge::Kind Delta32, Edge::Kind NegDelta32)
+ : EHFrameSectionName(EHFrameSectionName), PointerSize(PointerSize),
+ Delta64(Delta64), Delta32(Delta32), NegDelta32(NegDelta32) {}
+
+Error EHFrameEdgeFixer::operator()(LinkGraph &G) {
+ auto *EHFrame = G.findSectionByName(EHFrameSectionName);
+
+ if (!EHFrame) {
+ LLVM_DEBUG({
+ dbgs() << "EHFrameEdgeFixer: No " << EHFrameSectionName
+ << " section. Nothing to do\n";
+ });
+ return Error::success();
+ }
+
+ // Check that we support the graph's pointer size.
+ if (G.getPointerSize() != 4 && G.getPointerSize() != 8)
+ return make_error<JITLinkError>(
+ "EHFrameEdgeFixer only supports 32 and 64 bit targets");
+
+ LLVM_DEBUG({
+ dbgs() << "EHFrameEdgeFixer: Processing " << EHFrameSectionName << "...\n";
+ });
+
+ ParseContext PC(G);
+
+ // Build a map of all blocks and symbols in the text sections. We will use
+ // these for finding / building edge targets when processing FDEs.
+ for (auto &Sec : G.sections()) {
+ PC.AddrToSyms.addSymbols(Sec.symbols());
+ if (auto Err = PC.AddrToBlock.addBlocks(Sec.blocks(),
+ BlockAddressMap::includeNonNull))
+ return Err;
+ }
+
+ // Sort eh-frame blocks into address order to ensure we visit CIEs before
+ // their child FDEs.
+ std::vector<Block *> EHFrameBlocks;
+ for (auto *B : EHFrame->blocks())
+ EHFrameBlocks.push_back(B);
+ llvm::sort(EHFrameBlocks, [](const Block *LHS, const Block *RHS) {
+ return LHS->getAddress() < RHS->getAddress();
+ });
+
+ // Loop over the blocks in address order.
+ for (auto *B : EHFrameBlocks)
+ if (auto Err = processBlock(PC, *B))
+ return Err;
+
+ return Error::success();
+}
+
+Error EHFrameEdgeFixer::processBlock(ParseContext &PC, Block &B) {
+
+ LLVM_DEBUG({
+ dbgs() << " Processing block at " << formatv("{0:x16}", B.getAddress())
+ << "\n";
+ });
+
+ // eh-frame should not contain zero-fill blocks.
+ if (B.isZeroFill())
+ return make_error<JITLinkError>("Unexpected zero-fill block in " +
+ EHFrameSectionName + " section");
+
+ if (B.getSize() == 0) {
+ LLVM_DEBUG(dbgs() << " Block is empty. Skipping.\n");
+ return Error::success();
+ }
+
+ // Find the offsets of any existing edges from this block.
+ BlockEdgeMap BlockEdges;
+ for (auto &E : B.edges())
+ if (E.isRelocation()) {
+ if (BlockEdges.count(E.getOffset()))
+ return make_error<JITLinkError>(
+ "Multiple relocations at offset " +
+ formatv("{0:x16}", E.getOffset()) + " in " + EHFrameSectionName +
+ " block at address " + formatv("{0:x16}", B.getAddress()));
+
+ BlockEdges[E.getOffset()] = EdgeTarget(E);
+ }
+
+ CIEInfosMap CIEInfos;
+ BinaryStreamReader BlockReader(
+ StringRef(B.getContent().data(), B.getContent().size()),
+ PC.G.getEndianness());
+ while (!BlockReader.empty()) {
+ size_t RecordStartOffset = BlockReader.getOffset();
+
+ LLVM_DEBUG({
+ dbgs() << " Processing CFI record at "
+ << formatv("{0:x16}", B.getAddress() + RecordStartOffset) << "\n";
+ });
+
+ // Get the record length.
+ size_t RecordRemaining;
+ {
+ uint32_t Length;
+ if (auto Err = BlockReader.readInteger(Length))
+ return Err;
+ // If Length < 0xffffffff then use the regular length field, otherwise
+ // read the extended length field.
+ if (Length != 0xffffffff)
+ RecordRemaining = Length;
+ else {
+ uint64_t ExtendedLength;
+ if (auto Err = BlockReader.readInteger(ExtendedLength))
+ return Err;
+ RecordRemaining = ExtendedLength;
+ }
+ }
+
+ if (BlockReader.bytesRemaining() < RecordRemaining)
+ return make_error<JITLinkError>(
+ "Incomplete CFI record at " +
+ formatv("{0:x16}", B.getAddress() + RecordStartOffset));
+
+ // Read the CIE delta for this record.
+ uint64_t CIEDeltaFieldOffset = BlockReader.getOffset() - RecordStartOffset;
+ uint32_t CIEDelta;
+ if (auto Err = BlockReader.readInteger(CIEDelta))
+ return Err;
+
+ if (CIEDelta == 0) {
+ if (auto Err = processCIE(PC, B, RecordStartOffset,
+ CIEDeltaFieldOffset + RecordRemaining,
+ CIEDeltaFieldOffset))
+ return Err;
+ } else {
+ if (auto Err = processFDE(PC, B, RecordStartOffset,
+ CIEDeltaFieldOffset + RecordRemaining,
+ CIEDeltaFieldOffset, CIEDelta, BlockEdges))
+ return Err;
+ }
+
+ // Move to the next record.
+ BlockReader.setOffset(RecordStartOffset + CIEDeltaFieldOffset +
+ RecordRemaining);
+ }
+
+ return Error::success();
+}
+
+Error EHFrameEdgeFixer::processCIE(ParseContext &PC, Block &B,
+ size_t RecordOffset, size_t RecordLength,
+ size_t CIEDeltaFieldOffset) {
+
+ LLVM_DEBUG(dbgs() << " Record is CIE\n");
+
+ auto RecordContent = B.getContent().slice(RecordOffset, RecordLength);
+ BinaryStreamReader RecordReader(
+ StringRef(RecordContent.data(), RecordContent.size()),
+ PC.G.getEndianness());
+
+ // Skip past the CIE delta field: we've already processed this far.
+ RecordReader.setOffset(CIEDeltaFieldOffset + 4);
+
+ auto &CIESymbol =
+ PC.G.addAnonymousSymbol(B, RecordOffset, RecordLength, false, false);
+ CIEInformation CIEInfo(CIESymbol);
+
+ uint8_t Version = 0;
+ if (auto Err = RecordReader.readInteger(Version))
+ return Err;
+
+ if (Version != 0x01)
+ return make_error<JITLinkError>("Bad CIE version " + Twine(Version) +
+ " (should be 0x01) in eh-frame");
+
+ auto AugInfo = parseAugmentationString(RecordReader);
+ if (!AugInfo)
+ return AugInfo.takeError();
+
+ // Skip the EH Data field if present.
+ if (AugInfo->EHDataFieldPresent)
+ if (auto Err = RecordReader.skip(PC.G.getPointerSize()))
+ return Err;
+
+ // Read and validate the code alignment factor.
+ {
+ uint64_t CodeAlignmentFactor = 0;
+ if (auto Err = RecordReader.readULEB128(CodeAlignmentFactor))
+ return Err;
+ if (CodeAlignmentFactor != 1)
+ return make_error<JITLinkError>("Unsupported CIE code alignment factor " +
+ Twine(CodeAlignmentFactor) +
+ " (expected 1)");
+ }
+
+ // Read and validate the data alignment factor.
+ {
+ int64_t DataAlignmentFactor = 0;
+ if (auto Err = RecordReader.readSLEB128(DataAlignmentFactor))
+ return Err;
+ if (DataAlignmentFactor != -8)
+ return make_error<JITLinkError>("Unsupported CIE data alignment factor " +
+ Twine(DataAlignmentFactor) +
+ " (expected -8)");
+ }
+
+ // Skip the return address register field.
+ if (auto Err = RecordReader.skip(1))
+ return Err;
+
+ uint64_t AugmentationDataLength = 0;
+ if (auto Err = RecordReader.readULEB128(AugmentationDataLength))
+ return Err;
+
+ uint32_t AugmentationDataStartOffset = RecordReader.getOffset();
+
+ uint8_t *NextField = &AugInfo->Fields[0];
+ while (uint8_t Field = *NextField++) {
+ switch (Field) {
+ case 'L': {
+ CIEInfo.FDEsHaveLSDAField = true;
+ uint8_t LSDAPointerEncoding;
+ if (auto Err = RecordReader.readInteger(LSDAPointerEncoding))
+ return Err;
+ if (!isSupportedPointerEncoding(LSDAPointerEncoding))
+ return make_error<JITLinkError>(
+ "Unsupported LSDA pointer encoding " +
+ formatv("{0:x2}", LSDAPointerEncoding) + " in CIE at " +
+ formatv("{0:x16}", CIESymbol.getAddress()));
+ CIEInfo.LSDAPointerEncoding = LSDAPointerEncoding;
+ break;
+ }
+ case 'P': {
+ uint8_t PersonalityPointerEncoding = 0;
+ if (auto Err = RecordReader.readInteger(PersonalityPointerEncoding))
+ return Err;
+ if (PersonalityPointerEncoding !=
+ (dwarf::DW_EH_PE_indirect | dwarf::DW_EH_PE_pcrel |
+ dwarf::DW_EH_PE_sdata4))
+ return make_error<JITLinkError>(
+ "Unspported personality pointer "
+ "encoding " +
+ formatv("{0:x2}", PersonalityPointerEncoding) + " in CIE at " +
+ formatv("{0:x16}", CIESymbol.getAddress()));
+ uint32_t PersonalityPointerAddress;
+ if (auto Err = RecordReader.readInteger(PersonalityPointerAddress))
+ return Err;
+ break;
+ }
+ case 'R': {
+ uint8_t FDEPointerEncoding;
+ if (auto Err = RecordReader.readInteger(FDEPointerEncoding))
+ return Err;
+ if (!isSupportedPointerEncoding(FDEPointerEncoding))
+ return make_error<JITLinkError>(
+ "Unsupported FDE pointer encoding " +
+ formatv("{0:x2}", FDEPointerEncoding) + " in CIE at " +
+ formatv("{0:x16}", CIESymbol.getAddress()));
+ CIEInfo.FDEPointerEncoding = FDEPointerEncoding;
+ break;
+ }
+ default:
+ llvm_unreachable("Invalid augmentation string field");
+ }
+ }
+
+ if (RecordReader.getOffset() - AugmentationDataStartOffset >
+ AugmentationDataLength)
+ return make_error<JITLinkError>("Read past the end of the augmentation "
+ "data while parsing fields");
+
+ assert(!PC.CIEInfos.count(CIESymbol.getAddress()) &&
+ "Multiple CIEs recorded at the same address?");
+ PC.CIEInfos[CIESymbol.getAddress()] = std::move(CIEInfo);
+
+ return Error::success();
+}
+
+Error EHFrameEdgeFixer::processFDE(ParseContext &PC, Block &B,
+ size_t RecordOffset, size_t RecordLength,
+ size_t CIEDeltaFieldOffset,
+ uint32_t CIEDelta,
+ BlockEdgeMap &BlockEdges) {
+ LLVM_DEBUG(dbgs() << " Record is FDE\n");
+
+ orc::ExecutorAddr RecordAddress = B.getAddress() + RecordOffset;
+
+ auto RecordContent = B.getContent().slice(RecordOffset, RecordLength);
+ BinaryStreamReader RecordReader(
+ StringRef(RecordContent.data(), RecordContent.size()),
+ PC.G.getEndianness());
+
+ // Skip past the CIE delta field: we've already read this far.
+ RecordReader.setOffset(CIEDeltaFieldOffset + 4);
+
+ auto &FDESymbol =
+ PC.G.addAnonymousSymbol(B, RecordOffset, RecordLength, false, false);
+
+ CIEInformation *CIEInfo = nullptr;
+
+ {
+ // Process the CIE pointer field.
+ auto CIEEdgeItr = BlockEdges.find(RecordOffset + CIEDeltaFieldOffset);
+ orc::ExecutorAddr CIEAddress =
+ RecordAddress + orc::ExecutorAddrDiff(CIEDeltaFieldOffset) -
+ orc::ExecutorAddrDiff(CIEDelta);
+ if (CIEEdgeItr == BlockEdges.end()) {
+
+ LLVM_DEBUG({
+ dbgs() << " Adding edge at "
+ << formatv("{0:x16}", RecordAddress + CIEDeltaFieldOffset)
+ << " to CIE at: " << formatv("{0:x16}", CIEAddress) << "\n";
+ });
+ if (auto CIEInfoOrErr = PC.findCIEInfo(CIEAddress))
+ CIEInfo = *CIEInfoOrErr;
+ else
+ return CIEInfoOrErr.takeError();
+ assert(CIEInfo->CIESymbol && "CIEInfo has no CIE symbol set");
+ B.addEdge(NegDelta32, RecordOffset + CIEDeltaFieldOffset,
+ *CIEInfo->CIESymbol, 0);
+ } else {
+ LLVM_DEBUG({
+ dbgs() << " Already has edge at "
+ << formatv("{0:x16}", RecordAddress + CIEDeltaFieldOffset)
+ << " to CIE at " << formatv("{0:x16}", CIEAddress) << "\n";
+ });
+ auto &EI = CIEEdgeItr->second;
+ if (EI.Addend)
+ return make_error<JITLinkError>(
+ "CIE edge at " +
+ formatv("{0:x16}", RecordAddress + CIEDeltaFieldOffset) +
+ " has non-zero addend");
+ if (auto CIEInfoOrErr = PC.findCIEInfo(EI.Target->getAddress()))
+ CIEInfo = *CIEInfoOrErr;
+ else
+ return CIEInfoOrErr.takeError();
+ }
+ }
+
+ {
+ // Process the PC-Begin field.
+ Block *PCBeginBlock = nullptr;
+ orc::ExecutorAddrDiff PCBeginFieldOffset = RecordReader.getOffset();
+ auto PCEdgeItr = BlockEdges.find(RecordOffset + PCBeginFieldOffset);
+ if (PCEdgeItr == BlockEdges.end()) {
+ auto PCBeginPtrInfo =
+ readEncodedPointer(CIEInfo->FDEPointerEncoding,
+ RecordAddress + PCBeginFieldOffset, RecordReader);
+ if (!PCBeginPtrInfo)
+ return PCBeginPtrInfo.takeError();
+ orc::ExecutorAddr PCBegin = PCBeginPtrInfo->first;
+ Edge::Kind PCBeginEdgeKind = PCBeginPtrInfo->second;
+ LLVM_DEBUG({
+ dbgs() << " Adding edge at "
+ << (RecordAddress + PCBeginFieldOffset) << " to PC at "
+ << formatv("{0:x16}", PCBegin) << "\n";
+ });
+ auto PCBeginSym = getOrCreateSymbol(PC, PCBegin);
+ if (!PCBeginSym)
+ return PCBeginSym.takeError();
+ B.addEdge(PCBeginEdgeKind, RecordOffset + PCBeginFieldOffset, *PCBeginSym,
+ 0);
+ PCBeginBlock = &PCBeginSym->getBlock();
+ } else {
+ auto &EI = PCEdgeItr->second;
+ LLVM_DEBUG({
+ dbgs() << " Already has edge at "
+ << formatv("{0:x16}", RecordAddress + PCBeginFieldOffset)
+ << " to PC at " << formatv("{0:x16}", EI.Target->getAddress());
+ if (EI.Addend)
+ dbgs() << " + " << formatv("{0:x16}", EI.Addend);
+ dbgs() << "\n";
+ });
+
+ // Make sure the existing edge points at a defined block.
+ if (!EI.Target->isDefined()) {
+ auto EdgeAddr = RecordAddress + PCBeginFieldOffset;
+ return make_error<JITLinkError>("FDE edge at " +
+ formatv("{0:x16}", EdgeAddr) +
+ " points at external block");
+ }
+ PCBeginBlock = &EI.Target->getBlock();
+ if (auto Err = RecordReader.skip(
+ getPointerEncodingDataSize(CIEInfo->FDEPointerEncoding)))
+ return Err;
+ }
+
+ // Add a keep-alive edge from the FDE target to the FDE to ensure that the
+ // FDE is kept alive if its target is.
+ assert(PCBeginBlock && "PC-begin block not recorded");
+ LLVM_DEBUG({
+ dbgs() << " Adding keep-alive edge from target at "
+ << formatv("{0:x16}", PCBeginBlock->getAddress()) << " to FDE at "
+ << formatv("{0:x16}", RecordAddress) << "\n";
+ });
+ PCBeginBlock->addEdge(Edge::KeepAlive, 0, FDESymbol, 0);
+ }
+
+ // Skip over the PC range size field.
+ if (auto Err = RecordReader.skip(
+ getPointerEncodingDataSize(CIEInfo->FDEPointerEncoding)))
+ return Err;
+
+ if (CIEInfo->FDEsHaveLSDAField) {
+ uint64_t AugmentationDataSize;
+ if (auto Err = RecordReader.readULEB128(AugmentationDataSize))
+ return Err;
+
+ orc::ExecutorAddrDiff LSDAFieldOffset = RecordReader.getOffset();
+ auto LSDAEdgeItr = BlockEdges.find(RecordOffset + LSDAFieldOffset);
+ if (LSDAEdgeItr == BlockEdges.end()) {
+ auto LSDAPointerInfo =
+ readEncodedPointer(CIEInfo->LSDAPointerEncoding,
+ RecordAddress + LSDAFieldOffset, RecordReader);
+ if (!LSDAPointerInfo)
+ return LSDAPointerInfo.takeError();
+ orc::ExecutorAddr LSDA = LSDAPointerInfo->first;
+ Edge::Kind LSDAEdgeKind = LSDAPointerInfo->second;
+ auto LSDASym = getOrCreateSymbol(PC, LSDA);
+ if (!LSDASym)
+ return LSDASym.takeError();
+ LLVM_DEBUG({
+ dbgs() << " Adding edge at "
+ << formatv("{0:x16}", RecordAddress + LSDAFieldOffset)
+ << " to LSDA at " << formatv("{0:x16}", LSDA) << "\n";
+ });
+ B.addEdge(LSDAEdgeKind, RecordOffset + LSDAFieldOffset, *LSDASym, 0);
+ } else {
+ LLVM_DEBUG({
+ auto &EI = LSDAEdgeItr->second;
+ dbgs() << " Already has edge at "
+ << formatv("{0:x16}", RecordAddress + LSDAFieldOffset)
+ << " to LSDA at " << formatv("{0:x16}", EI.Target->getAddress());
+ if (EI.Addend)
+ dbgs() << " + " << formatv("{0:x16}", EI.Addend);
+ dbgs() << "\n";
+ });
+ if (auto Err = RecordReader.skip(AugmentationDataSize))
+ return Err;
+ }
+ } else {
+ LLVM_DEBUG(dbgs() << " Record does not have LSDA field.\n");
+ }
+
+ return Error::success();
+}
+
+Expected<EHFrameEdgeFixer::AugmentationInfo>
+EHFrameEdgeFixer::parseAugmentationString(BinaryStreamReader &RecordReader) {
+ AugmentationInfo AugInfo;
+ uint8_t NextChar;
+ uint8_t *NextField = &AugInfo.Fields[0];
+
+ if (auto Err = RecordReader.readInteger(NextChar))
+ return std::move(Err);
+
+ while (NextChar != 0) {
+ switch (NextChar) {
+ case 'z':
+ AugInfo.AugmentationDataPresent = true;
+ break;
+ case 'e':
+ if (auto Err = RecordReader.readInteger(NextChar))
+ return std::move(Err);
+ if (NextChar != 'h')
+ return make_error<JITLinkError>("Unrecognized substring e" +
+ Twine(NextChar) +
+ " in augmentation string");
+ AugInfo.EHDataFieldPresent = true;
+ break;
+ case 'L':
+ case 'P':
+ case 'R':
+ *NextField++ = NextChar;
+ break;
+ default:
+ return make_error<JITLinkError>("Unrecognized character " +
+ Twine(NextChar) +
+ " in augmentation string");
+ }
+
+ if (auto Err = RecordReader.readInteger(NextChar))
+ return std::move(Err);
+ }
+
+ return std::move(AugInfo);
+}
+
+bool EHFrameEdgeFixer::isSupportedPointerEncoding(uint8_t PointerEncoding) {
+ using namespace dwarf;
+
+ // We only support PC-rel for now.
+ if ((PointerEncoding & 0x70) != DW_EH_PE_pcrel)
+ return false;
+
+ // readEncodedPointer does not handle indirect.
+ if (PointerEncoding & DW_EH_PE_indirect)
+ return false;
+
+ // Supported datatypes.
+ switch (PointerEncoding & 0xf) {
+ case DW_EH_PE_absptr:
+ case DW_EH_PE_udata4:
+ case DW_EH_PE_udata8:
+ case DW_EH_PE_sdata4:
+ case DW_EH_PE_sdata8:
+ return true;
+ }
+
+ return false;
+}
+
+unsigned EHFrameEdgeFixer::getPointerEncodingDataSize(uint8_t PointerEncoding) {
+ using namespace dwarf;
+
+ assert(isSupportedPointerEncoding(PointerEncoding) &&
+ "Unsupported pointer encoding");
+ switch (PointerEncoding & 0xf) {
+ case DW_EH_PE_absptr:
+ return PointerSize;
+ case DW_EH_PE_udata4:
+ case DW_EH_PE_sdata4:
+ return 4;
+ case DW_EH_PE_udata8:
+ case DW_EH_PE_sdata8:
+ return 8;
+ default:
+ llvm_unreachable("Unsupported encoding");
+ }
+}
+
+Expected<std::pair<orc::ExecutorAddr, Edge::Kind>>
+EHFrameEdgeFixer::readEncodedPointer(uint8_t PointerEncoding,
+ orc::ExecutorAddr PointerFieldAddress,
+ BinaryStreamReader &RecordReader) {
+ assert(isSupportedPointerEncoding(PointerEncoding) &&
+ "Unsupported pointer encoding");
+
+ using namespace dwarf;
+
+ // Isolate data type, remap absptr to udata4 or udata8. This relies on us
+ // having verified that the graph uses 32-bit or 64-bit pointers only at the
+ // start of this pass.
+ uint8_t EffectiveType = PointerEncoding & 0xf;
+ if (EffectiveType == DW_EH_PE_absptr)
+ EffectiveType = (PointerSize == 8) ? DW_EH_PE_udata8 : DW_EH_PE_udata4;
+
+ orc::ExecutorAddr Addr;
+ Edge::Kind PointerEdgeKind = Edge::Invalid;
+ switch (EffectiveType) {
+ case DW_EH_PE_udata4: {
+ uint32_t Val;
+ if (auto Err = RecordReader.readInteger(Val))
+ return std::move(Err);
+ Addr = PointerFieldAddress + Val;
+ PointerEdgeKind = Delta32;
+ break;
+ }
+ case DW_EH_PE_udata8: {
+ uint64_t Val;
+ if (auto Err = RecordReader.readInteger(Val))
+ return std::move(Err);
+ Addr = PointerFieldAddress + Val;
+ PointerEdgeKind = Delta64;
+ break;
+ }
+ case DW_EH_PE_sdata4: {
+ int32_t Val;
+ if (auto Err = RecordReader.readInteger(Val))
+ return std::move(Err);
+ Addr = PointerFieldAddress + Val;
+ PointerEdgeKind = Delta32;
+ break;
+ }
+ case DW_EH_PE_sdata8: {
+ int64_t Val;
+ if (auto Err = RecordReader.readInteger(Val))
+ return std::move(Err);
+ Addr = PointerFieldAddress + Val;
+ PointerEdgeKind = Delta64;
+ break;
+ }
+ }
+
+ if (PointerEdgeKind == Edge::Invalid)
+ return make_error<JITLinkError>(
+ "Unspported edge kind for encoded pointer at " +
+ formatv("{0:x}", PointerFieldAddress));
+
+ return std::make_pair(Addr, Delta64);
+}
+
+Expected<Symbol &> EHFrameEdgeFixer::getOrCreateSymbol(ParseContext &PC,
+ orc::ExecutorAddr Addr) {
+ Symbol *CanonicalSym = nullptr;
+
+ auto UpdateCanonicalSym = [&](Symbol *Sym) {
+ if (!CanonicalSym || Sym->getLinkage() < CanonicalSym->getLinkage() ||
+ Sym->getScope() < CanonicalSym->getScope() ||
+ (Sym->hasName() && !CanonicalSym->hasName()) ||
+ Sym->getName() < CanonicalSym->getName())
+ CanonicalSym = Sym;
+ };
+
+ if (auto *SymbolsAtAddr = PC.AddrToSyms.getSymbolsAt(Addr))
+ for (auto *Sym : *SymbolsAtAddr)
+ UpdateCanonicalSym(Sym);
+
+ // If we found an existing symbol at the given address then use it.
+ if (CanonicalSym)
+ return *CanonicalSym;
+
+ // Otherwise search for a block covering the address and create a new symbol.
+ auto *B = PC.AddrToBlock.getBlockCovering(Addr);
+ if (!B)
+ return make_error<JITLinkError>("No symbol or block covering address " +
+ formatv("{0:x16}", Addr));
+
+ return PC.G.addAnonymousSymbol(*B, Addr - B->getAddress(), 0, false, false);
+}
+
+char EHFrameNullTerminator::NullTerminatorBlockContent[4] = {0, 0, 0, 0};
+
+EHFrameNullTerminator::EHFrameNullTerminator(StringRef EHFrameSectionName)
+ : EHFrameSectionName(EHFrameSectionName) {}
+
+Error EHFrameNullTerminator::operator()(LinkGraph &G) {
+ auto *EHFrame = G.findSectionByName(EHFrameSectionName);
+
+ if (!EHFrame)
+ return Error::success();
+
+ LLVM_DEBUG({
+ dbgs() << "EHFrameNullTerminator adding null terminator to "
+ << EHFrameSectionName << "\n";
+ });
+
+ auto &NullTerminatorBlock =
+ G.createContentBlock(*EHFrame, NullTerminatorBlockContent,
+ orc::ExecutorAddr(~uint64_t(4)), 1, 0);
+ G.addAnonymousSymbol(NullTerminatorBlock, 0, 4, false, true);
+ return Error::success();
+}
+
+EHFrameRegistrar::~EHFrameRegistrar() {}
+
+Error InProcessEHFrameRegistrar::registerEHFrames(
+ orc::ExecutorAddrRange EHFrameSection) {
+ return orc::registerEHFrameSection(EHFrameSection.Start.toPtr<void *>(),
+ EHFrameSection.size());
+}
+
+Error InProcessEHFrameRegistrar::deregisterEHFrames(
+ orc::ExecutorAddrRange EHFrameSection) {
+ return orc::deregisterEHFrameSection(EHFrameSection.Start.toPtr<void *>(),
+ EHFrameSection.size());
+}
+
+LinkGraphPassFunction
+createEHFrameRecorderPass(const Triple &TT,
+ StoreFrameRangeFunction StoreRangeAddress) {
+ const char *EHFrameSectionName = nullptr;
+ if (TT.getObjectFormat() == Triple::MachO)
+ EHFrameSectionName = "__TEXT,__eh_frame";
+ else
+ EHFrameSectionName = ".eh_frame";
+
+ auto RecordEHFrame =
+ [EHFrameSectionName,
+ StoreFrameRange = std::move(StoreRangeAddress)](LinkGraph &G) -> Error {
+ // Search for a non-empty eh-frame and record the address of the first
+ // symbol in it.
+ orc::ExecutorAddr Addr;
+ size_t Size = 0;
+ if (auto *S = G.findSectionByName(EHFrameSectionName)) {
+ auto R = SectionRange(*S);
+ Addr = R.getStart();
+ Size = R.getSize();
+ }
+ if (!Addr && Size != 0)
+ return make_error<JITLinkError>(
+ StringRef(EHFrameSectionName) +
+ " section can not have zero address with non-zero size");
+ StoreFrameRange(Addr, Size);
+ return Error::success();
+ };
+
+ return RecordEHFrame;
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h
new file mode 100644
index 0000000000..ef4b47b9aa
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/EHFrameSupportImpl.h
@@ -0,0 +1,134 @@
+//===------- EHFrameSupportImpl.h - JITLink eh-frame utils ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// EHFrame registration support for JITLink.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
+
+#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Support/BinaryStreamReader.h"
+
+namespace llvm {
+namespace jitlink {
+
+/// A LinkGraph pass that splits blocks in an eh-frame section into sub-blocks
+/// representing individual eh-frames.
+/// EHFrameSplitter should not be run without EHFrameEdgeFixer, which is
+/// responsible for adding FDE-to-CIE edges.
+class EHFrameSplitter {
+public:
+ EHFrameSplitter(StringRef EHFrameSectionName);
+ Error operator()(LinkGraph &G);
+
+private:
+ Error processBlock(LinkGraph &G, Block &B, LinkGraph::SplitBlockCache &Cache);
+
+ StringRef EHFrameSectionName;
+};
+
+/// A LinkGraph pass that adds missing FDE-to-CIE, FDE-to-PC and FDE-to-LSDA
+/// edges.
+class EHFrameEdgeFixer {
+public:
+ EHFrameEdgeFixer(StringRef EHFrameSectionName, unsigned PointerSize,
+ Edge::Kind Delta64, Edge::Kind Delta32,
+ Edge::Kind NegDelta32);
+ Error operator()(LinkGraph &G);
+
+private:
+
+ struct AugmentationInfo {
+ bool AugmentationDataPresent = false;
+ bool EHDataFieldPresent = false;
+ uint8_t Fields[4] = {0x0, 0x0, 0x0, 0x0};
+ };
+
+ struct CIEInformation {
+ CIEInformation() = default;
+ CIEInformation(Symbol &CIESymbol) : CIESymbol(&CIESymbol) {}
+ Symbol *CIESymbol = nullptr;
+ bool FDEsHaveLSDAField = false;
+ uint8_t FDEPointerEncoding = 0;
+ uint8_t LSDAPointerEncoding = 0;
+ };
+
+ struct EdgeTarget {
+ EdgeTarget() = default;
+ EdgeTarget(const Edge &E) : Target(&E.getTarget()), Addend(E.getAddend()) {}
+
+ Symbol *Target = nullptr;
+ Edge::AddendT Addend = 0;
+ };
+
+ using BlockEdgeMap = DenseMap<Edge::OffsetT, EdgeTarget>;
+ using CIEInfosMap = DenseMap<orc::ExecutorAddr, CIEInformation>;
+
+ struct ParseContext {
+ ParseContext(LinkGraph &G) : G(G) {}
+
+ Expected<CIEInformation *> findCIEInfo(orc::ExecutorAddr Address) {
+ auto I = CIEInfos.find(Address);
+ if (I == CIEInfos.end())
+ return make_error<JITLinkError>("No CIE found at address " +
+ formatv("{0:x16}", Address));
+ return &I->second;
+ }
+
+ LinkGraph &G;
+ CIEInfosMap CIEInfos;
+ BlockAddressMap AddrToBlock;
+ SymbolAddressMap AddrToSyms;
+ };
+
+ Error processBlock(ParseContext &PC, Block &B);
+ Error processCIE(ParseContext &PC, Block &B, size_t RecordOffset,
+ size_t RecordLength, size_t CIEDeltaFieldOffset);
+ Error processFDE(ParseContext &PC, Block &B, size_t RecordOffset,
+ size_t RecordLength, size_t CIEDeltaFieldOffset,
+ uint32_t CIEDelta, BlockEdgeMap &BlockEdges);
+
+ Expected<AugmentationInfo>
+ parseAugmentationString(BinaryStreamReader &RecordReader);
+
+ static bool isSupportedPointerEncoding(uint8_t PointerEncoding);
+ unsigned getPointerEncodingDataSize(uint8_t PointerEncoding);
+ Expected<std::pair<orc::ExecutorAddr, Edge::Kind>>
+ readEncodedPointer(uint8_t PointerEncoding,
+ orc::ExecutorAddr PointerFieldAddress,
+ BinaryStreamReader &RecordReader);
+
+ Expected<Symbol &> getOrCreateSymbol(ParseContext &PC,
+ orc::ExecutorAddr Addr);
+
+ StringRef EHFrameSectionName;
+ unsigned PointerSize;
+ Edge::Kind Delta64;
+ Edge::Kind Delta32;
+ Edge::Kind NegDelta32;
+};
+
+/// Add a 32-bit null-terminator to the end of the eh-frame section.
+class EHFrameNullTerminator {
+public:
+ EHFrameNullTerminator(StringRef EHFrameSectionName);
+ Error operator()(LinkGraph &G);
+
+private:
+ static char NullTerminatorBlockContent[];
+ StringRef EHFrameSectionName;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LLVM_LIB_EXECUTIONENGINE_JITLINK_EHFRAMESUPPORTIMPL_H
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF.cpp
new file mode 100644
index 0000000000..eb98e4ba40
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF.cpp
@@ -0,0 +1,103 @@
+//===-------------- ELF.cpp - JIT linker function for ELF -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF jit-link function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF.h"
+
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_aarch64.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_riscv.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_x86_64.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstring>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+Expected<uint16_t> readTargetMachineArch(StringRef Buffer) {
+ const char *Data = Buffer.data();
+
+ if (Data[ELF::EI_DATA] == ELF::ELFDATA2LSB) {
+ if (Data[ELF::EI_CLASS] == ELF::ELFCLASS64) {
+ if (auto File = llvm::object::ELF64LEFile::create(Buffer)) {
+ return File->getHeader().e_machine;
+ } else {
+ return File.takeError();
+ }
+ } else if (Data[ELF::EI_CLASS] == ELF::ELFCLASS32) {
+ if (auto File = llvm::object::ELF32LEFile::create(Buffer)) {
+ return File->getHeader().e_machine;
+ } else {
+ return File.takeError();
+ }
+ }
+ }
+
+ return ELF::EM_NONE;
+}
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject(MemoryBufferRef ObjectBuffer) {
+ StringRef Buffer = ObjectBuffer.getBuffer();
+ if (Buffer.size() < ELF::EI_MAG3 + 1)
+ return make_error<JITLinkError>("Truncated ELF buffer");
+
+ if (memcmp(Buffer.data(), ELF::ElfMagic, strlen(ELF::ElfMagic)) != 0)
+ return make_error<JITLinkError>("ELF magic not valid");
+
+ Expected<uint16_t> TargetMachineArch = readTargetMachineArch(Buffer);
+ if (!TargetMachineArch)
+ return TargetMachineArch.takeError();
+
+ switch (*TargetMachineArch) {
+ case ELF::EM_AARCH64:
+ return createLinkGraphFromELFObject_aarch64(ObjectBuffer);
+ case ELF::EM_RISCV:
+ return createLinkGraphFromELFObject_riscv(ObjectBuffer);
+ case ELF::EM_X86_64:
+ return createLinkGraphFromELFObject_x86_64(ObjectBuffer);
+ default:
+ return make_error<JITLinkError>(
+ "Unsupported target machine architecture in ELF object " +
+ ObjectBuffer.getBufferIdentifier());
+ }
+}
+
+void link_ELF(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ switch (G->getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ link_ELF_aarch64(std::move(G), std::move(Ctx));
+ return;
+ case Triple::riscv32:
+ case Triple::riscv64:
+ link_ELF_riscv(std::move(G), std::move(Ctx));
+ return;
+ case Triple::x86_64:
+ link_ELF_x86_64(std::move(G), std::move(Ctx));
+ return;
+ default:
+ Ctx->notifyFailed(make_error<JITLinkError>(
+ "Unsupported target machine architecture in ELF link graph " +
+ G->getName()));
+ return;
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp
new file mode 100644
index 0000000000..2194a4fbf1
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.cpp
@@ -0,0 +1,33 @@
+//=----------- ELFLinkGraphBuilder.cpp - ELF LinkGraph builder ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic ELF LinkGraph buliding code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ELFLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+static const char *DWSecNames[] = {
+#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME, OPTION) \
+ ELF_NAME,
+#include "llvm/BinaryFormat/Dwarf.def"
+#undef HANDLE_DWARF_SECTION
+};
+
+namespace llvm {
+namespace jitlink {
+
+StringRef ELFLinkGraphBuilderBase::CommonSectionName(".common");
+ArrayRef<const char *> ELFLinkGraphBuilderBase::DwarfSectionNames = DWSecNames;
+
+ELFLinkGraphBuilderBase::~ELFLinkGraphBuilderBase() {}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h
new file mode 100644
index 0000000000..2ab7ed61f7
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELFLinkGraphBuilder.h
@@ -0,0 +1,530 @@
+//===------- ELFLinkGraphBuilder.h - ELF LinkGraph builder ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic ELF LinkGraph building code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_JITLINK_ELFLINKGRAPHBUILDER_H
+#define LIB_EXECUTIONENGINE_JITLINK_ELFLINKGRAPHBUILDER_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+/// Common link-graph building code shared between all ELFFiles.
+class ELFLinkGraphBuilderBase {
+public:
+ ELFLinkGraphBuilderBase(std::unique_ptr<LinkGraph> G) : G(std::move(G)) {}
+ virtual ~ELFLinkGraphBuilderBase();
+
+protected:
+ static bool isDwarfSection(StringRef SectionName) {
+ return llvm::is_contained(DwarfSectionNames, SectionName);
+ }
+
+ Section &getCommonSection() {
+ if (!CommonSection)
+ CommonSection =
+ &G->createSection(CommonSectionName, MemProt::Read | MemProt::Write);
+ return *CommonSection;
+ }
+
+ std::unique_ptr<LinkGraph> G;
+
+private:
+ static StringRef CommonSectionName;
+ static ArrayRef<const char *> DwarfSectionNames;
+
+ Section *CommonSection = nullptr;
+};
+
+/// Ling-graph building code that's specific to the given ELFT, but common
+/// across all architectures.
+template <typename ELFT>
+class ELFLinkGraphBuilder : public ELFLinkGraphBuilderBase {
+ using ELFFile = object::ELFFile<ELFT>;
+
+public:
+ ELFLinkGraphBuilder(const object::ELFFile<ELFT> &Obj, Triple TT,
+ StringRef FileName,
+ LinkGraph::GetEdgeKindNameFunction GetEdgeKindName);
+
+ /// Attempt to construct and return the LinkGraph.
+ Expected<std::unique_ptr<LinkGraph>> buildGraph();
+
+ /// Call to derived class to handle relocations. These require
+ /// architecture specific knowledge to map to JITLink edge kinds.
+ virtual Error addRelocations() = 0;
+
+protected:
+ using ELFSectionIndex = unsigned;
+ using ELFSymbolIndex = unsigned;
+
+ bool isRelocatable() const {
+ return Obj.getHeader().e_type == llvm::ELF::ET_REL;
+ }
+
+ void setGraphBlock(ELFSectionIndex SecIndex, Block *B) {
+ assert(!GraphBlocks.count(SecIndex) && "Duplicate section at index");
+ GraphBlocks[SecIndex] = B;
+ }
+
+ Block *getGraphBlock(ELFSectionIndex SecIndex) {
+ auto I = GraphBlocks.find(SecIndex);
+ if (I == GraphBlocks.end())
+ return nullptr;
+ return I->second;
+ }
+
+ void setGraphSymbol(ELFSymbolIndex SymIndex, Symbol &Sym) {
+ assert(!GraphSymbols.count(SymIndex) && "Duplicate symbol at index");
+ GraphSymbols[SymIndex] = &Sym;
+ }
+
+ Symbol *getGraphSymbol(ELFSymbolIndex SymIndex) {
+ auto I = GraphSymbols.find(SymIndex);
+ if (I == GraphSymbols.end())
+ return nullptr;
+ return I->second;
+ }
+
+ Expected<std::pair<Linkage, Scope>>
+ getSymbolLinkageAndScope(const typename ELFT::Sym &Sym, StringRef Name);
+
+ Error prepare();
+ Error graphifySections();
+ Error graphifySymbols();
+
+ /// Traverse all matching relocation records in the given section. The handler
+ /// function Func should be callable with this signature:
+ /// Error(const typename ELFT::Rela &,
+ /// const typename ELFT::Shdr &, Section &)
+ ///
+ template <typename RelocHandlerFunction>
+ Error forEachRelocation(const typename ELFT::Shdr &RelSect,
+ RelocHandlerFunction &&Func,
+ bool ProcessDebugSections = false);
+
+ /// Traverse all matching relocation records in the given section. Convenience
+ /// wrapper to allow passing a member function for the handler.
+ ///
+ template <typename ClassT, typename RelocHandlerMethod>
+ Error forEachRelocation(const typename ELFT::Shdr &RelSect, ClassT *Instance,
+ RelocHandlerMethod &&Method,
+ bool ProcessDebugSections = false) {
+ return forEachRelocation(
+ RelSect,
+ [Instance, Method](const auto &Rel, const auto &Target, auto &GS) {
+ return (Instance->*Method)(Rel, Target, GS);
+ },
+ ProcessDebugSections);
+ }
+
+ const ELFFile &Obj;
+
+ typename ELFFile::Elf_Shdr_Range Sections;
+ const typename ELFFile::Elf_Shdr *SymTabSec = nullptr;
+ StringRef SectionStringTab;
+
+ // Maps ELF section indexes to LinkGraph Blocks.
+ // Only SHF_ALLOC sections will have graph blocks.
+ DenseMap<ELFSectionIndex, Block *> GraphBlocks;
+ DenseMap<ELFSymbolIndex, Symbol *> GraphSymbols;
+ DenseMap<const typename ELFFile::Elf_Shdr *,
+ ArrayRef<typename ELFFile::Elf_Word>>
+ ShndxTables;
+};
+
+template <typename ELFT>
+ELFLinkGraphBuilder<ELFT>::ELFLinkGraphBuilder(
+ const ELFFile &Obj, Triple TT, StringRef FileName,
+ LinkGraph::GetEdgeKindNameFunction GetEdgeKindName)
+ : ELFLinkGraphBuilderBase(std::make_unique<LinkGraph>(
+ FileName.str(), Triple(std::move(TT)), ELFT::Is64Bits ? 8 : 4,
+ support::endianness(ELFT::TargetEndianness),
+ std::move(GetEdgeKindName))),
+ Obj(Obj) {
+ LLVM_DEBUG(
+ { dbgs() << "Created ELFLinkGraphBuilder for \"" << FileName << "\""; });
+}
+
+template <typename ELFT>
+Expected<std::unique_ptr<LinkGraph>> ELFLinkGraphBuilder<ELFT>::buildGraph() {
+ if (!isRelocatable())
+ return make_error<JITLinkError>("Object is not a relocatable ELF file");
+
+ if (auto Err = prepare())
+ return std::move(Err);
+
+ if (auto Err = graphifySections())
+ return std::move(Err);
+
+ if (auto Err = graphifySymbols())
+ return std::move(Err);
+
+ if (auto Err = addRelocations())
+ return std::move(Err);
+
+ return std::move(G);
+}
+
+template <typename ELFT>
+Expected<std::pair<Linkage, Scope>>
+ELFLinkGraphBuilder<ELFT>::getSymbolLinkageAndScope(
+ const typename ELFT::Sym &Sym, StringRef Name) {
+ Linkage L = Linkage::Strong;
+ Scope S = Scope::Default;
+
+ switch (Sym.getBinding()) {
+ case ELF::STB_LOCAL:
+ S = Scope::Local;
+ break;
+ case ELF::STB_GLOBAL:
+ // Nothing to do here.
+ break;
+ case ELF::STB_WEAK:
+ case ELF::STB_GNU_UNIQUE:
+ L = Linkage::Weak;
+ break;
+ default:
+ return make_error<StringError>(
+ "Unrecognized symbol binding " +
+ Twine(static_cast<int>(Sym.getBinding())) + " for " + Name,
+ inconvertibleErrorCode());
+ }
+
+ switch (Sym.getVisibility()) {
+ case ELF::STV_DEFAULT:
+ case ELF::STV_PROTECTED:
+ // FIXME: Make STV_DEFAULT symbols pre-emptible? This probably needs
+ // Orc support.
+ // Otherwise nothing to do here.
+ break;
+ case ELF::STV_HIDDEN:
+ // Default scope -> Hidden scope. No effect on local scope.
+ if (S == Scope::Default)
+ S = Scope::Hidden;
+ break;
+ case ELF::STV_INTERNAL:
+ return make_error<StringError>(
+ "Unrecognized symbol visibility " +
+ Twine(static_cast<int>(Sym.getVisibility())) + " for " + Name,
+ inconvertibleErrorCode());
+ }
+
+ return std::make_pair(L, S);
+}
+
+template <typename ELFT> Error ELFLinkGraphBuilder<ELFT>::prepare() {
+ LLVM_DEBUG(dbgs() << " Preparing to build...\n");
+
+ // Get the sections array.
+ if (auto SectionsOrErr = Obj.sections())
+ Sections = *SectionsOrErr;
+ else
+ return SectionsOrErr.takeError();
+
+ // Get the section string table.
+ if (auto SectionStringTabOrErr = Obj.getSectionStringTable(Sections))
+ SectionStringTab = *SectionStringTabOrErr;
+ else
+ return SectionStringTabOrErr.takeError();
+
+ // Get the SHT_SYMTAB section.
+ for (auto &Sec : Sections) {
+ if (Sec.sh_type == ELF::SHT_SYMTAB) {
+ if (!SymTabSec)
+ SymTabSec = &Sec;
+ else
+ return make_error<JITLinkError>("Multiple SHT_SYMTAB sections in " +
+ G->getName());
+ }
+
+ // Extended table.
+ if (Sec.sh_type == ELF::SHT_SYMTAB_SHNDX) {
+ uint32_t SymtabNdx = Sec.sh_link;
+ if (SymtabNdx >= Sections.size())
+ return make_error<JITLinkError>("sh_link is out of bound");
+
+ auto ShndxTable = Obj.getSHNDXTable(Sec);
+ if (!ShndxTable)
+ return ShndxTable.takeError();
+
+ ShndxTables.insert({&Sections[SymtabNdx], *ShndxTable});
+ }
+ }
+
+ return Error::success();
+}
+
+template <typename ELFT> Error ELFLinkGraphBuilder<ELFT>::graphifySections() {
+ LLVM_DEBUG(dbgs() << " Creating graph sections...\n");
+
+ // For each section...
+ for (ELFSectionIndex SecIndex = 0; SecIndex != Sections.size(); ++SecIndex) {
+
+ auto &Sec = Sections[SecIndex];
+
+ // Start by getting the section name.
+ auto Name = Obj.getSectionName(Sec, SectionStringTab);
+ if (!Name)
+ return Name.takeError();
+
+ // If the name indicates that it's a debug section then skip it: We don't
+ // support those yet.
+ if (isDwarfSection(*Name)) {
+ LLVM_DEBUG({
+ dbgs() << " " << SecIndex << ": \"" << *Name
+ << "\" is a debug section: "
+ "No graph section will be created.\n";
+ });
+ continue;
+ }
+
+ // Skip non-SHF_ALLOC sections
+ if (!(Sec.sh_flags & ELF::SHF_ALLOC)) {
+ LLVM_DEBUG({
+ dbgs() << " " << SecIndex << ": \"" << *Name
+ << "\" is not an SHF_ALLOC section: "
+ "No graph section will be created.\n";
+ });
+ continue;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " " << SecIndex << ": Creating section for \"" << *Name
+ << "\"\n";
+ });
+
+ // Get the section's memory protection flags.
+ MemProt Prot;
+ if (Sec.sh_flags & ELF::SHF_EXECINSTR)
+ Prot = MemProt::Read | MemProt::Exec;
+ else
+ Prot = MemProt::Read | MemProt::Write;
+
+ // Look for existing sections first.
+ auto *GraphSec = G->findSectionByName(*Name);
+ if (!GraphSec)
+ GraphSec = &G->createSection(*Name, Prot);
+ assert(GraphSec->getMemProt() == Prot && "MemProt should match");
+
+ Block *B = nullptr;
+ if (Sec.sh_type != ELF::SHT_NOBITS) {
+ auto Data = Obj.template getSectionContentsAsArray<char>(Sec);
+ if (!Data)
+ return Data.takeError();
+
+ B = &G->createContentBlock(*GraphSec, *Data,
+ orc::ExecutorAddr(Sec.sh_addr),
+ Sec.sh_addralign, 0);
+ } else
+ B = &G->createZeroFillBlock(*GraphSec, Sec.sh_size,
+ orc::ExecutorAddr(Sec.sh_addr),
+ Sec.sh_addralign, 0);
+
+ setGraphBlock(SecIndex, B);
+ }
+
+ return Error::success();
+}
+
+template <typename ELFT> Error ELFLinkGraphBuilder<ELFT>::graphifySymbols() {
+ LLVM_DEBUG(dbgs() << " Creating graph symbols...\n");
+
+ // No SYMTAB -- Bail out early.
+ if (!SymTabSec)
+ return Error::success();
+
+ // Get the section content as a Symbols array.
+ auto Symbols = Obj.symbols(SymTabSec);
+ if (!Symbols)
+ return Symbols.takeError();
+
+ // Get the string table for this section.
+ auto StringTab = Obj.getStringTableForSymtab(*SymTabSec, Sections);
+ if (!StringTab)
+ return StringTab.takeError();
+
+ LLVM_DEBUG({
+ StringRef SymTabName;
+
+ if (auto SymTabNameOrErr = Obj.getSectionName(*SymTabSec, SectionStringTab))
+ SymTabName = *SymTabNameOrErr;
+ else {
+ dbgs() << "Could not get ELF SHT_SYMTAB section name for logging: "
+ << toString(SymTabNameOrErr.takeError()) << "\n";
+ SymTabName = "<SHT_SYMTAB section with invalid name>";
+ }
+
+ dbgs() << " Adding symbols from symtab section \"" << SymTabName
+ << "\"\n";
+ });
+
+ for (ELFSymbolIndex SymIndex = 0; SymIndex != Symbols->size(); ++SymIndex) {
+ auto &Sym = (*Symbols)[SymIndex];
+
+ // Check symbol type.
+ switch (Sym.getType()) {
+ case ELF::STT_FILE:
+ LLVM_DEBUG({
+ if (auto Name = Sym.getName(*StringTab))
+ dbgs() << " " << SymIndex << ": Skipping STT_FILE symbol \""
+ << *Name << "\"\n";
+ else {
+ dbgs() << "Could not get STT_FILE symbol name: "
+ << toString(Name.takeError()) << "\n";
+ dbgs() << " " << SymIndex
+ << ": Skipping STT_FILE symbol with invalid name\n";
+ }
+ });
+ continue;
+ break;
+ }
+
+ // Get the symbol name.
+ auto Name = Sym.getName(*StringTab);
+ if (!Name)
+ return Name.takeError();
+
+ // Handle common symbols specially.
+ if (Sym.isCommon()) {
+ Symbol &GSym = G->addCommonSymbol(*Name, Scope::Default,
+ getCommonSection(), orc::ExecutorAddr(),
+ Sym.st_size, Sym.getValue(), false);
+ setGraphSymbol(SymIndex, GSym);
+ continue;
+ }
+
+ // Map Visibility and Binding to Scope and Linkage:
+ Linkage L;
+ Scope S;
+
+ if (auto LSOrErr = getSymbolLinkageAndScope(Sym, *Name))
+ std::tie(L, S) = *LSOrErr;
+ else
+ return LSOrErr.takeError();
+
+ if (Sym.isDefined() &&
+ (Sym.getType() == ELF::STT_NOTYPE || Sym.getType() == ELF::STT_FUNC ||
+ Sym.getType() == ELF::STT_OBJECT ||
+ Sym.getType() == ELF::STT_SECTION || Sym.getType() == ELF::STT_TLS)) {
+ // Handle extended tables.
+ unsigned Shndx = Sym.st_shndx;
+ if (Shndx == ELF::SHN_XINDEX) {
+ auto ShndxTable = ShndxTables.find(SymTabSec);
+ if (ShndxTable == ShndxTables.end())
+ continue;
+ auto NdxOrErr = object::getExtendedSymbolTableIndex<ELFT>(
+ Sym, SymIndex, ShndxTable->second);
+ if (!NdxOrErr)
+ return NdxOrErr.takeError();
+ Shndx = *NdxOrErr;
+ }
+ if (auto *B = getGraphBlock(Shndx)) {
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Creating defined graph symbol for ELF symbol \"" << *Name
+ << "\"\n";
+ });
+
+ // In RISCV, temporary symbols (Used to generate dwarf, eh_frame
+ // sections...) will appear in object code's symbol table, and LLVM does
+ // not use names on these temporary symbols (RISCV gnu toolchain uses
+ // names on these temporary symbols). If the symbol is unnamed, add an
+ // anonymous symbol.
+ auto &GSym =
+ Name->empty()
+ ? G->addAnonymousSymbol(*B, Sym.getValue(), Sym.st_size,
+ false, false)
+ : G->addDefinedSymbol(*B, Sym.getValue(), *Name, Sym.st_size, L,
+ S, Sym.getType() == ELF::STT_FUNC, false);
+ setGraphSymbol(SymIndex, GSym);
+ }
+ } else if (Sym.isUndefined() && Sym.isExternal()) {
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Creating external graph symbol for ELF symbol \"" << *Name
+ << "\"\n";
+ });
+ auto &GSym = G->addExternalSymbol(*Name, Sym.st_size, L);
+ setGraphSymbol(SymIndex, GSym);
+ } else {
+ LLVM_DEBUG({
+ dbgs() << " " << SymIndex
+ << ": Not creating graph symbol for ELF symbol \"" << *Name
+ << "\" with unrecognized type\n";
+ });
+ }
+ }
+
+ return Error::success();
+}
+
+template <typename ELFT>
+template <typename RelocHandlerFunction>
+Error ELFLinkGraphBuilder<ELFT>::forEachRelocation(
+ const typename ELFT::Shdr &RelSect, RelocHandlerFunction &&Func,
+ bool ProcessDebugSections) {
+
+ // Only look into sections that store relocation entries.
+ if (RelSect.sh_type != ELF::SHT_RELA && RelSect.sh_type != ELF::SHT_REL)
+ return Error::success();
+
+ // sh_info contains the section header index of the target (FixupSection),
+ // which is the section to which all relocations in RelSect apply.
+ auto FixupSection = Obj.getSection(RelSect.sh_info);
+ if (!FixupSection)
+ return FixupSection.takeError();
+
+ // Target sections have names in valid ELF object files.
+ Expected<StringRef> Name = Obj.getSectionName(**FixupSection);
+ if (!Name)
+ return Name.takeError();
+ LLVM_DEBUG(dbgs() << " " << *Name << ":\n");
+
+ // Consider skipping these relocations.
+ if (!ProcessDebugSections && isDwarfSection(*Name)) {
+ LLVM_DEBUG(dbgs() << " skipped (dwarf section)\n\n");
+ return Error::success();
+ }
+
+ // Lookup the link-graph node corresponding to the target section name.
+ auto *BlockToFix = getGraphBlock(RelSect.sh_info);
+ if (!BlockToFix)
+ return make_error<StringError>(
+ "Refencing a section that wasn't added to the graph: " + *Name,
+ inconvertibleErrorCode());
+
+ auto RelEntries = Obj.relas(RelSect);
+ if (!RelEntries)
+ return RelEntries.takeError();
+
+ // Let the callee process relocation entries one by one.
+ for (const typename ELFT::Rela &R : *RelEntries)
+ if (Error Err = Func(R, **FixupSection, *BlockToFix))
+ return Err;
+
+ LLVM_DEBUG(dbgs() << "\n");
+ return Error::success();
+}
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LIB_EXECUTIONENGINE_JITLINK_ELFLINKGRAPHBUILDER_H
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
new file mode 100644
index 0000000000..dd3eb97c21
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_aarch64.cpp
@@ -0,0 +1,183 @@
+//===----- ELF_aarch64.cpp - JIT linker implementation for ELF/aarch64 ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF/aarch64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF_aarch64.h"
+#include "ELFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/aarch64.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/MathExtras.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+
+namespace llvm {
+namespace jitlink {
+
+class ELFJITLinker_aarch64 : public JITLinker<ELFJITLinker_aarch64> {
+ friend class JITLinker<ELFJITLinker_aarch64>;
+
+public:
+ ELFJITLinker_aarch64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ using namespace aarch64;
+ using namespace llvm::support;
+
+ char *BlockWorkingMem = B.getAlreadyMutableContent().data();
+ char *FixupPtr = BlockWorkingMem + E.getOffset();
+ auto FixupAddress = B.getAddress() + E.getOffset();
+ switch (E.getKind()) {
+ case aarch64::R_AARCH64_CALL26: {
+ assert((FixupAddress.getValue() & 0x3) == 0 &&
+ "Call-inst is not 32-bit aligned");
+ int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+
+ if (static_cast<uint64_t>(Value) & 0x3)
+ return make_error<JITLinkError>("Call target is not 32-bit aligned");
+
+ if (!isInt<28>(Value))
+ return makeTargetOutOfRangeError(G, B, E);
+
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ assert((RawInstr & 0x7fffffff) == 0x14000000 &&
+ "RawInstr isn't a B or BR immediate instruction");
+ uint32_t Imm = (static_cast<uint32_t>(Value) & ((1 << 28) - 1)) >> 2;
+ uint32_t FixedInstr = RawInstr | Imm;
+ *(little32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ }
+ return Error::success();
+ }
+};
+
+template <typename ELFT>
+class ELFLinkGraphBuilder_aarch64 : public ELFLinkGraphBuilder<ELFT> {
+private:
+ static Expected<aarch64::EdgeKind_aarch64>
+ getRelocationKind(const uint32_t Type) {
+ using namespace aarch64;
+ switch (Type) {
+ case ELF::R_AARCH64_CALL26:
+ return EdgeKind_aarch64::R_AARCH64_CALL26;
+ }
+
+ return make_error<JITLinkError>("Unsupported aarch64 relocation:" +
+ formatv("{0:d}", Type));
+ }
+
+ Error addRelocations() override {
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ using Base = ELFLinkGraphBuilder<ELFT>;
+ using Self = ELFLinkGraphBuilder_aarch64<ELFT>;
+ for (const auto &RelSect : Base::Sections)
+ if (Error Err = Base::forEachRelocation(RelSect, this,
+ &Self::addSingleRelocation))
+ return Err;
+
+ return Error::success();
+ }
+
+ Error addSingleRelocation(const typename ELFT::Rela &Rel,
+ const typename ELFT::Shdr &FixupSect,
+ Block &BlockToFix) {
+ using Base = ELFLinkGraphBuilder<ELFT>;
+
+ uint32_t SymbolIndex = Rel.getSymbol(false);
+ auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec);
+ if (!ObjSymbol)
+ return ObjSymbol.takeError();
+
+ Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex);
+ if (!GraphSymbol)
+ return make_error<StringError>(
+ formatv("Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}",
+ SymbolIndex, (*ObjSymbol)->st_shndx,
+ Base::GraphSymbols.size()),
+ inconvertibleErrorCode());
+
+ uint32_t Type = Rel.getType(false);
+ Expected<aarch64::EdgeKind_aarch64> Kind = getRelocationKind(Type);
+ if (!Kind)
+ return Kind.takeError();
+
+ int64_t Addend = Rel.r_addend;
+ orc::ExecutorAddr FixupAddress =
+ orc::ExecutorAddr(FixupSect.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
+ Edge GE(*Kind, Offset, *GraphSymbol, Addend);
+ LLVM_DEBUG({
+ dbgs() << " ";
+ printEdge(dbgs(), BlockToFix, GE, aarch64::getEdgeKindName(*Kind));
+ dbgs() << "\n";
+ });
+
+ BlockToFix.addEdge(std::move(GE));
+ return Error::success();
+ }
+
+public:
+ ELFLinkGraphBuilder_aarch64(StringRef FileName,
+ const object::ELFFile<ELFT> &Obj, const Triple T)
+ : ELFLinkGraphBuilder<ELFT>(Obj, std::move(T), FileName,
+ aarch64::getEdgeKindName) {}
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_aarch64(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto ELFObj = object::ObjectFile::createELFObjectFile(ObjectBuffer);
+ if (!ELFObj)
+ return ELFObj.takeError();
+
+ assert((*ELFObj)->getArch() == Triple::aarch64 &&
+ "Only AArch64 (little endian) is supported for now");
+
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF64LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_aarch64<object::ELF64LE>((*ELFObj)->getFileName(),
+ ELFObjFile.getELFFile(),
+ (*ELFObj)->makeTriple())
+ .buildGraph();
+}
+
+void link_ELF_aarch64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+ const Triple &TT = G->getTargetTriple();
+ if (Ctx->shouldAddDefaultTargetPasses(TT)) {
+ if (auto MarkLive = Ctx->getMarkLivePass(TT))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+ }
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ ELFJITLinker_aarch64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_riscv.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_riscv.cpp
new file mode 100644
index 0000000000..f83001417e
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_riscv.cpp
@@ -0,0 +1,566 @@
+//===------- ELF_riscv.cpp -JIT linker implementation for ELF/riscv -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF/riscv jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF_riscv.h"
+#include "ELFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+#include "PerGraphGOTAndPLTStubsBuilder.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/ExecutionEngine/JITLink/riscv.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/Endian.h"
+
+#define DEBUG_TYPE "jitlink"
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::jitlink::riscv;
+
+namespace {
+
+class PerGraphGOTAndPLTStubsBuilder_ELF_riscv
+ : public PerGraphGOTAndPLTStubsBuilder<
+ PerGraphGOTAndPLTStubsBuilder_ELF_riscv> {
+public:
+ static constexpr size_t StubEntrySize = 16;
+ static const uint8_t NullGOTEntryContent[8];
+ static const uint8_t RV64StubContent[StubEntrySize];
+ static const uint8_t RV32StubContent[StubEntrySize];
+
+ using PerGraphGOTAndPLTStubsBuilder<
+ PerGraphGOTAndPLTStubsBuilder_ELF_riscv>::PerGraphGOTAndPLTStubsBuilder;
+
+ bool isRV64() const { return G.getPointerSize() == 8; }
+
+ bool isGOTEdgeToFix(Edge &E) const { return E.getKind() == R_RISCV_GOT_HI20; }
+
+ Symbol &createGOTEntry(Symbol &Target) {
+ Block &GOTBlock =
+ G.createContentBlock(getGOTSection(), getGOTEntryBlockContent(),
+ orc::ExecutorAddr(), G.getPointerSize(), 0);
+ GOTBlock.addEdge(isRV64() ? R_RISCV_64 : R_RISCV_32, 0, Target, 0);
+ return G.addAnonymousSymbol(GOTBlock, 0, G.getPointerSize(), false, false);
+ }
+
+ Symbol &createPLTStub(Symbol &Target) {
+ Block &StubContentBlock = G.createContentBlock(
+ getStubsSection(), getStubBlockContent(), orc::ExecutorAddr(), 4, 0);
+ auto &GOTEntrySymbol = getGOTEntry(Target);
+ StubContentBlock.addEdge(R_RISCV_CALL, 0, GOTEntrySymbol, 0);
+ return G.addAnonymousSymbol(StubContentBlock, 0, StubEntrySize, true,
+ false);
+ }
+
+ void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
+ // Replace the relocation pair (R_RISCV_GOT_HI20, R_RISCV_PCREL_LO12)
+ // with (R_RISCV_PCREL_HI20, R_RISCV_PCREL_LO12)
+ // Therefore, here just change the R_RISCV_GOT_HI20 to R_RISCV_PCREL_HI20
+ E.setKind(R_RISCV_PCREL_HI20);
+ E.setTarget(GOTEntry);
+ }
+
+ void fixPLTEdge(Edge &E, Symbol &PLTStubs) {
+ assert(E.getKind() == R_RISCV_CALL_PLT && "Not a R_RISCV_CALL_PLT edge?");
+ E.setKind(R_RISCV_CALL);
+ E.setTarget(PLTStubs);
+ }
+
+ bool isExternalBranchEdge(Edge &E) const {
+ return E.getKind() == R_RISCV_CALL_PLT;
+ }
+
+private:
+ Section &getGOTSection() const {
+ if (!GOTSection)
+ GOTSection = &G.createSection("$__GOT", MemProt::Read);
+ return *GOTSection;
+ }
+
+ Section &getStubsSection() const {
+ if (!StubsSection)
+ StubsSection =
+ &G.createSection("$__STUBS", MemProt::Read | MemProt::Exec);
+ return *StubsSection;
+ }
+
+ ArrayRef<char> getGOTEntryBlockContent() {
+ return {reinterpret_cast<const char *>(NullGOTEntryContent),
+ G.getPointerSize()};
+ }
+
+ ArrayRef<char> getStubBlockContent() {
+ auto StubContent = isRV64() ? RV64StubContent : RV32StubContent;
+ return {reinterpret_cast<const char *>(StubContent), StubEntrySize};
+ }
+
+ mutable Section *GOTSection = nullptr;
+ mutable Section *StubsSection = nullptr;
+};
+
+const uint8_t PerGraphGOTAndPLTStubsBuilder_ELF_riscv::NullGOTEntryContent[8] =
+ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+
+const uint8_t
+ PerGraphGOTAndPLTStubsBuilder_ELF_riscv::RV64StubContent[StubEntrySize] = {
+ 0x17, 0x0e, 0x00, 0x00, // auipc t3, literal
+ 0x03, 0x3e, 0x0e, 0x00, // ld t3, literal(t3)
+ 0x67, 0x00, 0x0e, 0x00, // jr t3
+ 0x13, 0x00, 0x00, 0x00}; // nop
+
+const uint8_t
+ PerGraphGOTAndPLTStubsBuilder_ELF_riscv::RV32StubContent[StubEntrySize] = {
+ 0x17, 0x0e, 0x00, 0x00, // auipc t3, literal
+ 0x03, 0x2e, 0x0e, 0x00, // lw t3, literal(t3)
+ 0x67, 0x00, 0x0e, 0x00, // jr t3
+ 0x13, 0x00, 0x00, 0x00}; // nop
+} // namespace
+namespace llvm {
+namespace jitlink {
+
+static Expected<const Edge &> getRISCVPCRelHi20(const Edge &E) {
+ using namespace riscv;
+ assert((E.getKind() == R_RISCV_PCREL_LO12_I ||
+ E.getKind() == R_RISCV_PCREL_LO12_S) &&
+ "Can only have high relocation for R_RISCV_PCREL_LO12_I or "
+ "R_RISCV_PCREL_LO12_S");
+
+ const Symbol &Sym = E.getTarget();
+ const Block &B = Sym.getBlock();
+ orc::ExecutorAddrDiff Offset = Sym.getOffset();
+
+ struct Comp {
+ bool operator()(const Edge &Lhs, orc::ExecutorAddrDiff Offset) {
+ return Lhs.getOffset() < Offset;
+ }
+ bool operator()(orc::ExecutorAddrDiff Offset, const Edge &Rhs) {
+ return Offset < Rhs.getOffset();
+ }
+ };
+
+ auto Bound =
+ std::equal_range(B.edges().begin(), B.edges().end(), Offset, Comp{});
+
+ for (auto It = Bound.first; It != Bound.second; ++It) {
+ if (It->getKind() == R_RISCV_PCREL_HI20)
+ return *It;
+ }
+
+ return make_error<JITLinkError>(
+ "No HI20 PCREL relocation type be found for LO12 PCREL relocation type");
+}
+
+static uint32_t extractBits(uint32_t Num, unsigned Low, unsigned Size) {
+ return (Num & (((1ULL << (Size + 1)) - 1) << Low)) >> Low;
+}
+
+inline Error checkAlignment(llvm::orc::ExecutorAddr loc, uint64_t v, int n,
+ const Edge &E) {
+ if (v & (n - 1))
+ return make_error<JITLinkError>("0x" + llvm::utohexstr(loc.getValue()) +
+ " improper alignment for relocation " +
+ formatv("{0:d}", E.getKind()) + ": 0x" +
+ llvm::utohexstr(v) + " is not aligned to " +
+ Twine(n) + " bytes");
+ return Error::success();
+}
+
+static inline bool isInRangeForImmS32(int64_t Value) {
+ return (Value >= std::numeric_limits<int32_t>::min() &&
+ Value <= std::numeric_limits<int32_t>::max());
+}
+
+class ELFJITLinker_riscv : public JITLinker<ELFJITLinker_riscv> {
+ friend class JITLinker<ELFJITLinker_riscv>;
+
+public:
+ ELFJITLinker_riscv(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G, PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ using namespace riscv;
+ using namespace llvm::support;
+
+ char *BlockWorkingMem = B.getAlreadyMutableContent().data();
+ char *FixupPtr = BlockWorkingMem + E.getOffset();
+ orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset();
+ switch (E.getKind()) {
+ case R_RISCV_32: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_64: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ *(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
+ break;
+ }
+ case R_RISCV_BRANCH: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ Error AlignmentIssue = checkAlignment(FixupAddress, Value, 2, E);
+ if (AlignmentIssue) {
+ return AlignmentIssue;
+ }
+ int64_t Lo = Value & 0xFFF;
+ uint32_t Imm31_25 = extractBits(Lo, 5, 6) << 25 | extractBits(Lo, 12, 1)
+ << 31;
+ uint32_t Imm11_7 = extractBits(Lo, 1, 4) << 8 | extractBits(Lo, 11, 1)
+ << 7;
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr = (RawInstr & 0x1FFF07F) | Imm31_25 | Imm11_7;
+ break;
+ }
+ case R_RISCV_HI20: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ int64_t Hi = Value + 0x800;
+ if (LLVM_UNLIKELY(!isInRangeForImmS32(Hi)))
+ return makeTargetOutOfRangeError(G, B, E);
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0xFFF) | (static_cast<uint32_t>(Hi & 0xFFFFF000));
+ break;
+ }
+ case R_RISCV_LO12_I: {
+ // FIXME: We assume that R_RISCV_HI20 is present in object code and pairs
+ // with current relocation R_RISCV_LO12_I. So here may need a check.
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ int32_t Lo = Value & 0xFFF;
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0xFFFFF) | (static_cast<uint32_t>(Lo & 0xFFF) << 20);
+ break;
+ }
+ case R_RISCV_CALL: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ int64_t Hi = Value + 0x800;
+ if (LLVM_UNLIKELY(!isInRangeForImmS32(Hi)))
+ return makeTargetOutOfRangeError(G, B, E);
+ int32_t Lo = Value & 0xFFF;
+ uint32_t RawInstrAuipc = *(little32_t *)FixupPtr;
+ uint32_t RawInstrJalr = *(little32_t *)(FixupPtr + 4);
+ *(little32_t *)FixupPtr =
+ RawInstrAuipc | (static_cast<uint32_t>(Hi & 0xFFFFF000));
+ *(little32_t *)(FixupPtr + 4) =
+ RawInstrJalr | (static_cast<uint32_t>(Lo) << 20);
+ break;
+ }
+ case R_RISCV_PCREL_HI20: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ int64_t Hi = Value + 0x800;
+ if (LLVM_UNLIKELY(!isInRangeForImmS32(Hi)))
+ return makeTargetOutOfRangeError(G, B, E);
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0xFFF) | (static_cast<uint32_t>(Hi & 0xFFFFF000));
+ break;
+ }
+ case R_RISCV_PCREL_LO12_I: {
+ // FIXME: We assume that R_RISCV_PCREL_HI20 is present in object code and
+ // pairs with current relocation R_RISCV_PCREL_LO12_I. So here may need a
+ // check.
+ auto RelHI20 = getRISCVPCRelHi20(E);
+ if (!RelHI20)
+ return RelHI20.takeError();
+ int64_t Value = RelHI20->getTarget().getAddress() +
+ RelHI20->getAddend() - E.getTarget().getAddress();
+ int64_t Lo = Value & 0xFFF;
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ *(little32_t *)FixupPtr =
+ (RawInstr & 0xFFFFF) | (static_cast<uint32_t>(Lo & 0xFFF) << 20);
+ break;
+ }
+ case R_RISCV_PCREL_LO12_S: {
+ // FIXME: We assume that R_RISCV_PCREL_HI20 is present in object code and
+ // pairs with current relocation R_RISCV_PCREL_LO12_S. So here may need a
+ // check.
+ auto RelHI20 = getRISCVPCRelHi20(E);
+ int64_t Value = RelHI20->getTarget().getAddress() +
+ RelHI20->getAddend() - E.getTarget().getAddress();
+ int64_t Lo = Value & 0xFFF;
+ uint32_t Imm31_25 = extractBits(Lo, 5, 7) << 25;
+ uint32_t Imm11_7 = extractBits(Lo, 0, 5) << 7;
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+
+ *(little32_t *)FixupPtr = (RawInstr & 0x1FFF07F) | Imm31_25 | Imm11_7;
+ break;
+ }
+ case R_RISCV_ADD64: {
+ int64_t Value = (E.getTarget().getAddress() +
+ support::endian::read64le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) +
+ E.getAddend())
+ .getValue();
+ *(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
+ break;
+ }
+ case R_RISCV_ADD32: {
+ int64_t Value = (E.getTarget().getAddress() +
+ support::endian::read32le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) +
+ E.getAddend())
+ .getValue();
+ *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_ADD16: {
+ int64_t Value = (E.getTarget().getAddress() +
+ support::endian::read16le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) +
+ E.getAddend())
+ .getValue();
+ *(little16_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_ADD8: {
+ int64_t Value =
+ (E.getTarget().getAddress() +
+ *(reinterpret_cast<const uint8_t *>(FixupAddress.getValue())) +
+ E.getAddend())
+ .getValue();
+ *FixupPtr = static_cast<uint8_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB64: {
+ int64_t Value = support::endian::read64le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *(little64_t *)FixupPtr = static_cast<uint64_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB32: {
+ int64_t Value = support::endian::read32le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *(little32_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB16: {
+ int64_t Value = support::endian::read16le(reinterpret_cast<const void *>(
+ FixupAddress.getValue())) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *(little16_t *)FixupPtr = static_cast<uint32_t>(Value);
+ break;
+ }
+ case R_RISCV_SUB8: {
+ int64_t Value =
+ *(reinterpret_cast<const uint8_t *>(FixupAddress.getValue())) -
+ E.getTarget().getAddress().getValue() - E.getAddend();
+ *FixupPtr = static_cast<uint8_t>(Value);
+ break;
+ }
+ case R_RISCV_SET6: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ uint32_t RawData = *(little32_t *)FixupPtr;
+ int64_t Word6 = Value & 0x3f;
+ *(little32_t *)FixupPtr = (RawData & 0xffffffc0) | Word6;
+ break;
+ }
+ case R_RISCV_SET8: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ uint32_t RawData = *(little32_t *)FixupPtr;
+ int64_t Word8 = Value & 0xff;
+ *(little32_t *)FixupPtr = (RawData & 0xffffff00) | Word8;
+ break;
+ }
+ case R_RISCV_SET16: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ uint32_t RawData = *(little32_t *)FixupPtr;
+ int64_t Word16 = Value & 0xffff;
+ *(little32_t *)FixupPtr = (RawData & 0xffff0000) | Word16;
+ break;
+ }
+ case R_RISCV_SET32: {
+ int64_t Value = (E.getTarget().getAddress() + E.getAddend()).getValue();
+ int64_t Word32 = Value & 0xffffffff;
+ *(little32_t *)FixupPtr = Word32;
+ break;
+ }
+ case R_RISCV_32_PCREL: {
+ int64_t Value = E.getTarget().getAddress() + E.getAddend() - FixupAddress;
+ int64_t Word32 = Value & 0xffffffff;
+ *(little32_t *)FixupPtr = Word32;
+ break;
+ }
+ }
+ return Error::success();
+ }
+};
+
+template <typename ELFT>
+class ELFLinkGraphBuilder_riscv : public ELFLinkGraphBuilder<ELFT> {
+private:
+ static Expected<riscv::EdgeKind_riscv>
+ getRelocationKind(const uint32_t Type) {
+ using namespace riscv;
+ switch (Type) {
+ case ELF::R_RISCV_32:
+ return EdgeKind_riscv::R_RISCV_32;
+ case ELF::R_RISCV_64:
+ return EdgeKind_riscv::R_RISCV_64;
+ case ELF::R_RISCV_BRANCH:
+ return EdgeKind_riscv::R_RISCV_BRANCH;
+ case ELF::R_RISCV_HI20:
+ return EdgeKind_riscv::R_RISCV_HI20;
+ case ELF::R_RISCV_LO12_I:
+ return EdgeKind_riscv::R_RISCV_LO12_I;
+ case ELF::R_RISCV_CALL:
+ return EdgeKind_riscv::R_RISCV_CALL;
+ case ELF::R_RISCV_PCREL_HI20:
+ return EdgeKind_riscv::R_RISCV_PCREL_HI20;
+ case ELF::R_RISCV_PCREL_LO12_I:
+ return EdgeKind_riscv::R_RISCV_PCREL_LO12_I;
+ case ELF::R_RISCV_PCREL_LO12_S:
+ return EdgeKind_riscv::R_RISCV_PCREL_LO12_S;
+ case ELF::R_RISCV_GOT_HI20:
+ return EdgeKind_riscv::R_RISCV_GOT_HI20;
+ case ELF::R_RISCV_CALL_PLT:
+ return EdgeKind_riscv::R_RISCV_CALL_PLT;
+ case ELF::R_RISCV_ADD64:
+ return EdgeKind_riscv::R_RISCV_ADD64;
+ case ELF::R_RISCV_ADD32:
+ return EdgeKind_riscv::R_RISCV_ADD32;
+ case ELF::R_RISCV_ADD16:
+ return EdgeKind_riscv::R_RISCV_ADD16;
+ case ELF::R_RISCV_ADD8:
+ return EdgeKind_riscv::R_RISCV_ADD8;
+ case ELF::R_RISCV_SUB64:
+ return EdgeKind_riscv::R_RISCV_SUB64;
+ case ELF::R_RISCV_SUB32:
+ return EdgeKind_riscv::R_RISCV_SUB32;
+ case ELF::R_RISCV_SUB16:
+ return EdgeKind_riscv::R_RISCV_SUB16;
+ case ELF::R_RISCV_SUB8:
+ return EdgeKind_riscv::R_RISCV_SUB8;
+ case ELF::R_RISCV_SET6:
+ return EdgeKind_riscv::R_RISCV_SET6;
+ case ELF::R_RISCV_SET8:
+ return EdgeKind_riscv::R_RISCV_SET8;
+ case ELF::R_RISCV_SET16:
+ return EdgeKind_riscv::R_RISCV_SET16;
+ case ELF::R_RISCV_SET32:
+ return EdgeKind_riscv::R_RISCV_SET32;
+ case ELF::R_RISCV_32_PCREL:
+ return EdgeKind_riscv::R_RISCV_32_PCREL;
+ }
+
+ return make_error<JITLinkError>("Unsupported riscv relocation:" +
+ formatv("{0:d}", Type));
+ }
+
+ Error addRelocations() override {
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ using Base = ELFLinkGraphBuilder<ELFT>;
+ using Self = ELFLinkGraphBuilder_riscv<ELFT>;
+ for (const auto &RelSect : Base::Sections)
+ if (Error Err = Base::forEachRelocation(RelSect, this,
+ &Self::addSingleRelocation))
+ return Err;
+
+ return Error::success();
+ }
+
+ Error addSingleRelocation(const typename ELFT::Rela &Rel,
+ const typename ELFT::Shdr &FixupSect,
+ Block &BlockToFix) {
+ using Base = ELFLinkGraphBuilder<ELFT>;
+
+ uint32_t SymbolIndex = Rel.getSymbol(false);
+ auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec);
+ if (!ObjSymbol)
+ return ObjSymbol.takeError();
+
+ Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex);
+ if (!GraphSymbol)
+ return make_error<StringError>(
+ formatv("Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}",
+ SymbolIndex, (*ObjSymbol)->st_shndx,
+ Base::GraphSymbols.size()),
+ inconvertibleErrorCode());
+
+ uint32_t Type = Rel.getType(false);
+ Expected<riscv::EdgeKind_riscv> Kind = getRelocationKind(Type);
+ if (!Kind)
+ return Kind.takeError();
+
+ int64_t Addend = Rel.r_addend;
+ auto FixupAddress = orc::ExecutorAddr(FixupSect.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
+ Edge GE(*Kind, Offset, *GraphSymbol, Addend);
+ LLVM_DEBUG({
+ dbgs() << " ";
+ printEdge(dbgs(), BlockToFix, GE, riscv::getEdgeKindName(*Kind));
+ dbgs() << "\n";
+ });
+
+ BlockToFix.addEdge(std::move(GE));
+ return Error::success();
+ }
+
+public:
+ ELFLinkGraphBuilder_riscv(StringRef FileName,
+ const object::ELFFile<ELFT> &Obj, const Triple T)
+ : ELFLinkGraphBuilder<ELFT>(Obj, std::move(T), FileName,
+ riscv::getEdgeKindName) {}
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_riscv(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto ELFObj = object::ObjectFile::createELFObjectFile(ObjectBuffer);
+ if (!ELFObj)
+ return ELFObj.takeError();
+
+ if ((*ELFObj)->getArch() == Triple::riscv64) {
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF64LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_riscv<object::ELF64LE>(
+ (*ELFObj)->getFileName(), ELFObjFile.getELFFile(),
+ (*ELFObj)->makeTriple())
+ .buildGraph();
+ } else {
+ assert((*ELFObj)->getArch() == Triple::riscv32 &&
+ "Invalid triple for RISCV ELF object file");
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF32LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_riscv<object::ELF32LE>(
+ (*ELFObj)->getFileName(), ELFObjFile.getELFFile(),
+ (*ELFObj)->makeTriple())
+ .buildGraph();
+ }
+}
+
+void link_ELF_riscv(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+ const Triple &TT = G->getTargetTriple();
+ if (Ctx->shouldAddDefaultTargetPasses(TT)) {
+ if (auto MarkLive = Ctx->getMarkLivePass(TT))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+ Config.PostPrunePasses.push_back(
+ PerGraphGOTAndPLTStubsBuilder_ELF_riscv::asPass);
+ }
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ ELFJITLinker_riscv::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
new file mode 100644
index 0000000000..79d2cdbb30
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ELF_x86_64.cpp
@@ -0,0 +1,440 @@
+//===---- ELF_x86_64.cpp -JIT linker implementation for ELF/x86-64 ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF/x86-64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/ELF_x86_64.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/ExecutionEngine/JITLink/TableManager.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/Endian.h"
+
+#include "DefineExternalSectionStartAndEndSymbols.h"
+#include "EHFrameSupportImpl.h"
+#include "ELFLinkGraphBuilder.h"
+#include "JITLinkGeneric.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::jitlink::ELF_x86_64_Edges;
+
+namespace {
+
+constexpr StringRef ELFGOTSymbolName = "_GLOBAL_OFFSET_TABLE_";
+constexpr StringRef ELFTLSInfoSectionName = "$__TLSINFO";
+
+class TLSInfoTableManager_ELF_x86_64
+ : public TableManager<TLSInfoTableManager_ELF_x86_64> {
+public:
+ static const uint8_t TLSInfoEntryContent[16];
+
+ static StringRef getSectionName() { return ELFTLSInfoSectionName; }
+
+ bool visitEdge(LinkGraph &G, Block *B, Edge &E) {
+ if (E.getKind() == x86_64::RequestTLSDescInGOTAndTransformToDelta32) {
+ LLVM_DEBUG({
+ dbgs() << " Fixing " << G.getEdgeKindName(E.getKind()) << " edge at "
+ << formatv("{0:x}", B->getFixupAddress(E)) << " ("
+ << formatv("{0:x}", B->getAddress()) << " + "
+ << formatv("{0:x}", E.getOffset()) << ")\n";
+ });
+ E.setKind(x86_64::Delta32);
+ E.setTarget(getEntryForTarget(G, E.getTarget()));
+ return true;
+ }
+ return false;
+ }
+
+ Symbol &createEntry(LinkGraph &G, Symbol &Target) {
+ // the TLS Info entry's key value will be written by the fixTLVSectionByName
+ // pass, so create mutable content.
+ auto &TLSInfoEntry = G.createMutableContentBlock(
+ getTLSInfoSection(G), G.allocateContent(getTLSInfoEntryContent()),
+ orc::ExecutorAddr(), 8, 0);
+ TLSInfoEntry.addEdge(x86_64::Pointer64, 8, Target, 0);
+ return G.addAnonymousSymbol(TLSInfoEntry, 0, 16, false, false);
+ }
+
+private:
+ Section &getTLSInfoSection(LinkGraph &G) {
+ if (!TLSInfoTable)
+ TLSInfoTable = &G.createSection(ELFTLSInfoSectionName, MemProt::Read);
+ return *TLSInfoTable;
+ }
+
+ ArrayRef<char> getTLSInfoEntryContent() const {
+ return {reinterpret_cast<const char *>(TLSInfoEntryContent),
+ sizeof(TLSInfoEntryContent)};
+ }
+
+ Section *TLSInfoTable = nullptr;
+};
+
+const uint8_t TLSInfoTableManager_ELF_x86_64::TLSInfoEntryContent[16] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /*pthread key */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 /*data address*/
+};
+
+Error buildTables_ELF_x86_64(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Visiting edges in graph:\n");
+
+ x86_64::GOTTableManager GOT;
+ x86_64::PLTTableManager PLT(GOT);
+ TLSInfoTableManager_ELF_x86_64 TLSInfo;
+ visitExistingEdges(G, GOT, PLT, TLSInfo);
+ return Error::success();
+}
+} // namespace
+
+static const char *getELFX86_64RelocName(uint32_t Type) {
+ switch (Type) {
+#define ELF_RELOC(Name, Number) \
+ case Number: \
+ return #Name;
+#include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
+#undef ELF_RELOC
+ }
+ return "Unrecognized ELF/x86-64 relocation type";
+}
+
+namespace llvm {
+namespace jitlink {
+
+// This should become a template as the ELFFile is so a lot of this could become
+// generic
+class ELFLinkGraphBuilder_x86_64 : public ELFLinkGraphBuilder<object::ELF64LE> {
+private:
+ using ELFT = object::ELF64LE;
+
+ static Expected<ELF_x86_64_Edges::ELFX86RelocationKind>
+ getRelocationKind(const uint32_t Type) {
+ switch (Type) {
+ case ELF::R_X86_64_32S:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::Pointer32Signed;
+ case ELF::R_X86_64_PC32:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::PCRel32;
+ case ELF::R_X86_64_PC64:
+ case ELF::R_X86_64_GOTPC64:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::Delta64;
+ case ELF::R_X86_64_64:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::Pointer64;
+ case ELF::R_X86_64_GOTPCREL:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::PCRel32GOTLoad;
+ case ELF::R_X86_64_GOTPCRELX:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::PCRel32GOTLoadRelaxable;
+ case ELF::R_X86_64_REX_GOTPCRELX:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::PCRel32REXGOTLoadRelaxable;
+ case ELF::R_X86_64_GOTPCREL64:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::PCRel64GOT;
+ case ELF::R_X86_64_GOT64:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::GOT64;
+ case ELF::R_X86_64_GOTOFF64:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::GOTOFF64;
+ case ELF::R_X86_64_PLT32:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::Branch32;
+ case ELF::R_X86_64_TLSGD:
+ return ELF_x86_64_Edges::ELFX86RelocationKind::PCRel32TLV;
+ }
+ return make_error<JITLinkError>("Unsupported x86-64 relocation type " +
+ formatv("{0:d}: ", Type) +
+ getELFX86_64RelocName(Type));
+ }
+
+ Error addRelocations() override {
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ using Base = ELFLinkGraphBuilder<ELFT>;
+ using Self = ELFLinkGraphBuilder_x86_64;
+ for (const auto &RelSect : Base::Sections) {
+ // Validate the section to read relocation entries from.
+ if (RelSect.sh_type == ELF::SHT_REL)
+ return make_error<StringError>(
+ "No SHT_REL in valid x64 ELF object files",
+ inconvertibleErrorCode());
+
+ if (Error Err = Base::forEachRelocation(RelSect, this,
+ &Self::addSingleRelocation))
+ return Err;
+ }
+
+ return Error::success();
+ }
+
+ Error addSingleRelocation(const typename ELFT::Rela &Rel,
+ const typename ELFT::Shdr &FixupSection,
+ Block &BlockToFix) {
+ using Base = ELFLinkGraphBuilder<ELFT>;
+
+ uint32_t SymbolIndex = Rel.getSymbol(false);
+ auto ObjSymbol = Base::Obj.getRelocationSymbol(Rel, Base::SymTabSec);
+ if (!ObjSymbol)
+ return ObjSymbol.takeError();
+
+ Symbol *GraphSymbol = Base::getGraphSymbol(SymbolIndex);
+ if (!GraphSymbol)
+ return make_error<StringError>(
+ formatv("Could not find symbol at given index, did you add it to "
+ "JITSymbolTable? index: {0}, shndx: {1} Size of table: {2}",
+ SymbolIndex, (*ObjSymbol)->st_shndx,
+ Base::GraphSymbols.size()),
+ inconvertibleErrorCode());
+
+ // Validate the relocation kind.
+ auto ELFRelocKind = getRelocationKind(Rel.getType(false));
+ if (!ELFRelocKind)
+ return ELFRelocKind.takeError();
+
+ int64_t Addend = Rel.r_addend;
+ Edge::Kind Kind = Edge::Invalid;
+ switch (*ELFRelocKind) {
+ case PCRel32:
+ Kind = x86_64::Delta32;
+ break;
+ case Delta64:
+ Kind = x86_64::Delta64;
+ break;
+ case Pointer32Signed:
+ Kind = x86_64::Pointer32Signed;
+ break;
+ case Pointer64:
+ Kind = x86_64::Pointer64;
+ break;
+ case PCRel32GOTLoad: {
+ Kind = x86_64::RequestGOTAndTransformToDelta32;
+ break;
+ }
+ case PCRel32REXGOTLoadRelaxable: {
+ Kind = x86_64::RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable;
+ Addend = 0;
+ break;
+ }
+ case PCRel32TLV: {
+ Kind = x86_64::RequestTLSDescInGOTAndTransformToDelta32;
+ break;
+ }
+ case PCRel32GOTLoadRelaxable: {
+ Kind = x86_64::RequestGOTAndTransformToPCRel32GOTLoadRelaxable;
+ Addend = 0;
+ break;
+ }
+ case PCRel64GOT: {
+ Kind = x86_64::RequestGOTAndTransformToDelta64;
+ break;
+ }
+ case GOT64: {
+ Kind = x86_64::RequestGOTAndTransformToDelta64FromGOT;
+ break;
+ }
+ case GOTOFF64: {
+ Kind = x86_64::Delta64FromGOT;
+ break;
+ }
+ case Branch32: {
+ Kind = x86_64::BranchPCRel32;
+ // BranchPCRel32 implicitly handles the '-4' PC adjustment, so we have to
+ // adjust the addend by '+4' to compensate.
+ Addend += 4;
+ break;
+ }
+ }
+
+ auto FixupAddress = orc::ExecutorAddr(FixupSection.sh_addr) + Rel.r_offset;
+ Edge::OffsetT Offset = FixupAddress - BlockToFix.getAddress();
+ Edge GE(Kind, Offset, *GraphSymbol, Addend);
+ LLVM_DEBUG({
+ dbgs() << " ";
+ printEdge(dbgs(), BlockToFix, GE, x86_64::getEdgeKindName(Kind));
+ dbgs() << "\n";
+ });
+
+ BlockToFix.addEdge(std::move(GE));
+ return Error::success();
+ }
+
+public:
+ ELFLinkGraphBuilder_x86_64(StringRef FileName,
+ const object::ELFFile<object::ELF64LE> &Obj)
+ : ELFLinkGraphBuilder(Obj, Triple("x86_64-unknown-linux"), FileName,
+ x86_64::getEdgeKindName) {}
+};
+
+class ELFJITLinker_x86_64 : public JITLinker<ELFJITLinker_x86_64> {
+ friend class JITLinker<ELFJITLinker_x86_64>;
+
+public:
+ ELFJITLinker_x86_64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {
+ getPassConfig().PostAllocationPasses.push_back(
+ [this](LinkGraph &G) { return getOrCreateGOTSymbol(G); });
+ }
+
+private:
+ Symbol *GOTSymbol = nullptr;
+
+ Error getOrCreateGOTSymbol(LinkGraph &G) {
+ auto DefineExternalGOTSymbolIfPresent =
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ [&](LinkGraph &LG, Symbol &Sym) -> SectionRangeSymbolDesc {
+ if (Sym.getName() == ELFGOTSymbolName)
+ if (auto *GOTSection = G.findSectionByName(
+ x86_64::GOTTableManager::getSectionName())) {
+ GOTSymbol = &Sym;
+ return {*GOTSection, true};
+ }
+ return {};
+ });
+
+ // Try to attach _GLOBAL_OFFSET_TABLE_ to the GOT if it's defined as an
+ // external.
+ if (auto Err = DefineExternalGOTSymbolIfPresent(G))
+ return Err;
+
+ // If we succeeded then we're done.
+ if (GOTSymbol)
+ return Error::success();
+
+ // Otherwise look for a GOT section: If it already has a start symbol we'll
+ // record it, otherwise we'll create our own.
+ // If there's a GOT section but we didn't find an external GOT symbol...
+ if (auto *GOTSection =
+ G.findSectionByName(x86_64::GOTTableManager::getSectionName())) {
+
+ // Check for an existing defined symbol.
+ for (auto *Sym : GOTSection->symbols())
+ if (Sym->getName() == ELFGOTSymbolName) {
+ GOTSymbol = Sym;
+ return Error::success();
+ }
+
+ // If there's no defined symbol then create one.
+ SectionRange SR(*GOTSection);
+ if (SR.empty())
+ GOTSymbol =
+ &G.addAbsoluteSymbol(ELFGOTSymbolName, orc::ExecutorAddr(), 0,
+ Linkage::Strong, Scope::Local, true);
+ else
+ GOTSymbol =
+ &G.addDefinedSymbol(*SR.getFirstBlock(), 0, ELFGOTSymbolName, 0,
+ Linkage::Strong, Scope::Local, false, true);
+ }
+
+ return Error::success();
+ }
+
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ return x86_64::applyFixup(G, B, E, GOTSymbol);
+ }
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromELFObject_x86_64(MemoryBufferRef ObjectBuffer) {
+ LLVM_DEBUG({
+ dbgs() << "Building jitlink graph for new input "
+ << ObjectBuffer.getBufferIdentifier() << "...\n";
+ });
+
+ auto ELFObj = object::ObjectFile::createELFObjectFile(ObjectBuffer);
+ if (!ELFObj)
+ return ELFObj.takeError();
+
+ auto &ELFObjFile = cast<object::ELFObjectFile<object::ELF64LE>>(**ELFObj);
+ return ELFLinkGraphBuilder_x86_64((*ELFObj)->getFileName(),
+ ELFObjFile.getELFFile())
+ .buildGraph();
+}
+
+static SectionRangeSymbolDesc
+identifyELFSectionStartAndEndSymbols(LinkGraph &G, Symbol &Sym) {
+ constexpr StringRef StartSymbolPrefix = "__start";
+ constexpr StringRef EndSymbolPrefix = "__end";
+
+ auto SymName = Sym.getName();
+ if (SymName.startswith(StartSymbolPrefix)) {
+ if (auto *Sec =
+ G.findSectionByName(SymName.drop_front(StartSymbolPrefix.size())))
+ return {*Sec, true};
+ } else if (SymName.startswith(EndSymbolPrefix)) {
+ if (auto *Sec =
+ G.findSectionByName(SymName.drop_front(EndSymbolPrefix.size())))
+ return {*Sec, false};
+ }
+ return {};
+}
+
+void link_ELF_x86_64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+ PassConfiguration Config;
+
+ if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
+
+ Config.PrePrunePasses.push_back(EHFrameSplitter(".eh_frame"));
+ Config.PrePrunePasses.push_back(
+ EHFrameEdgeFixer(".eh_frame", x86_64::PointerSize, x86_64::Delta64,
+ x86_64::Delta32, x86_64::NegDelta32));
+ Config.PrePrunePasses.push_back(EHFrameNullTerminator(".eh_frame"));
+
+ // Construct a JITLinker and run the link function.
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add an in-place GOT/Stubs/TLSInfoEntry build pass.
+ Config.PostPrunePasses.push_back(buildTables_ELF_x86_64);
+
+ // Resolve any external section start / end symbols.
+ Config.PostAllocationPasses.push_back(
+ createDefineExternalSectionStartAndEndSymbolsPass(
+ identifyELFSectionStartAndEndSymbols));
+
+ // Add GOT/Stubs optimizer pass.
+ Config.PreFixupPasses.push_back(x86_64::optimizeGOTAndStubAccesses);
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ ELFJITLinker_x86_64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+const char *getELFX86RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case Branch32:
+ return "Branch32";
+ case Pointer32Signed:
+ return "Pointer32Signed";
+ case Pointer64:
+ return "Pointer64";
+ case PCRel32:
+ return "PCRel32";
+ case PCRel32GOTLoad:
+ return "PCRel32GOTLoad";
+ case PCRel32GOTLoadRelaxable:
+ return "PCRel32GOTLoadRelaxable";
+ case PCRel32REXGOTLoadRelaxable:
+ return "PCRel32REXGOTLoad";
+ case PCRel64GOT:
+ return "PCRel64GOT";
+ case Delta64:
+ return "Delta64";
+ case GOT64:
+ return "GOT64";
+ case GOTOFF64:
+ return "GOTOFF64";
+ }
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
+}
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLink.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLink.cpp
new file mode 100644
index 0000000000..78a603cfed
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLink.cpp
@@ -0,0 +1,421 @@
+//===------------- JITLink.cpp - Core Run-time JIT linker APIs ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+#include "llvm/BinaryFormat/Magic.h"
+#include "llvm/ExecutionEngine/JITLink/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/MachO.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace {
+
+enum JITLinkErrorCode { GenericJITLinkError = 1 };
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class JITLinkerErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "runtimedyld"; }
+
+ std::string message(int Condition) const override {
+ switch (static_cast<JITLinkErrorCode>(Condition)) {
+ case GenericJITLinkError:
+ return "Generic JITLink error";
+ }
+ llvm_unreachable("Unrecognized JITLinkErrorCode");
+ }
+};
+
+static ManagedStatic<JITLinkerErrorCategory> JITLinkerErrorCategory;
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+char JITLinkError::ID = 0;
+
+void JITLinkError::log(raw_ostream &OS) const { OS << ErrMsg; }
+
+std::error_code JITLinkError::convertToErrorCode() const {
+ return std::error_code(GenericJITLinkError, *JITLinkerErrorCategory);
+}
+
+const char *getGenericEdgeKindName(Edge::Kind K) {
+ switch (K) {
+ case Edge::Invalid:
+ return "INVALID RELOCATION";
+ case Edge::KeepAlive:
+ return "Keep-Alive";
+ default:
+ return "<Unrecognized edge kind>";
+ }
+}
+
+const char *getLinkageName(Linkage L) {
+ switch (L) {
+ case Linkage::Strong:
+ return "strong";
+ case Linkage::Weak:
+ return "weak";
+ }
+ llvm_unreachable("Unrecognized llvm.jitlink.Linkage enum");
+}
+
+const char *getScopeName(Scope S) {
+ switch (S) {
+ case Scope::Default:
+ return "default";
+ case Scope::Hidden:
+ return "hidden";
+ case Scope::Local:
+ return "local";
+ }
+ llvm_unreachable("Unrecognized llvm.jitlink.Scope enum");
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const Block &B) {
+ return OS << B.getAddress() << " -- " << (B.getAddress() + B.getSize())
+ << ": "
+ << "size = " << formatv("{0:x8}", B.getSize()) << ", "
+ << (B.isZeroFill() ? "zero-fill" : "content")
+ << ", align = " << B.getAlignment()
+ << ", align-ofs = " << B.getAlignmentOffset()
+ << ", section = " << B.getSection().getName();
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const Symbol &Sym) {
+ OS << Sym.getAddress() << " (" << (Sym.isDefined() ? "block" : "addressable")
+ << " + " << formatv("{0:x8}", Sym.getOffset())
+ << "): size: " << formatv("{0:x8}", Sym.getSize())
+ << ", linkage: " << formatv("{0:6}", getLinkageName(Sym.getLinkage()))
+ << ", scope: " << formatv("{0:8}", getScopeName(Sym.getScope())) << ", "
+ << (Sym.isLive() ? "live" : "dead") << " - "
+ << (Sym.hasName() ? Sym.getName() : "<anonymous symbol>");
+ return OS;
+}
+
+void printEdge(raw_ostream &OS, const Block &B, const Edge &E,
+ StringRef EdgeKindName) {
+ OS << "edge@" << B.getAddress() + E.getOffset() << ": " << B.getAddress()
+ << " + " << formatv("{0:x}", E.getOffset()) << " -- " << EdgeKindName
+ << " -> ";
+
+ auto &TargetSym = E.getTarget();
+ if (TargetSym.hasName())
+ OS << TargetSym.getName();
+ else {
+ auto &TargetBlock = TargetSym.getBlock();
+ auto &TargetSec = TargetBlock.getSection();
+ orc::ExecutorAddr SecAddress(~uint64_t(0));
+ for (auto *B : TargetSec.blocks())
+ if (B->getAddress() < SecAddress)
+ SecAddress = B->getAddress();
+
+ orc::ExecutorAddrDiff SecDelta = TargetSym.getAddress() - SecAddress;
+ OS << TargetSym.getAddress() << " (section " << TargetSec.getName();
+ if (SecDelta)
+ OS << " + " << formatv("{0:x}", SecDelta);
+ OS << " / block " << TargetBlock.getAddress();
+ if (TargetSym.getOffset())
+ OS << " + " << formatv("{0:x}", TargetSym.getOffset());
+ OS << ")";
+ }
+
+ if (E.getAddend() != 0)
+ OS << " + " << E.getAddend();
+}
+
+Section::~Section() {
+ for (auto *Sym : Symbols)
+ Sym->~Symbol();
+ for (auto *B : Blocks)
+ B->~Block();
+}
+
+Block &LinkGraph::splitBlock(Block &B, size_t SplitIndex,
+ SplitBlockCache *Cache) {
+
+ assert(SplitIndex > 0 && "splitBlock can not be called with SplitIndex == 0");
+
+ // If the split point covers all of B then just return B.
+ if (SplitIndex == B.getSize())
+ return B;
+
+ assert(SplitIndex < B.getSize() && "SplitIndex out of range");
+
+ // Create the new block covering [ 0, SplitIndex ).
+ auto &NewBlock =
+ B.isZeroFill()
+ ? createZeroFillBlock(B.getSection(), SplitIndex, B.getAddress(),
+ B.getAlignment(), B.getAlignmentOffset())
+ : createContentBlock(
+ B.getSection(), B.getContent().slice(0, SplitIndex),
+ B.getAddress(), B.getAlignment(), B.getAlignmentOffset());
+
+ // Modify B to cover [ SplitIndex, B.size() ).
+ B.setAddress(B.getAddress() + SplitIndex);
+ B.setContent(B.getContent().slice(SplitIndex));
+ B.setAlignmentOffset((B.getAlignmentOffset() + SplitIndex) %
+ B.getAlignment());
+
+ // Handle edge transfer/update.
+ {
+ // Copy edges to NewBlock (recording their iterators so that we can remove
+ // them from B), and update of Edges remaining on B.
+ std::vector<Block::edge_iterator> EdgesToRemove;
+ for (auto I = B.edges().begin(); I != B.edges().end();) {
+ if (I->getOffset() < SplitIndex) {
+ NewBlock.addEdge(*I);
+ I = B.removeEdge(I);
+ } else {
+ I->setOffset(I->getOffset() - SplitIndex);
+ ++I;
+ }
+ }
+ }
+
+ // Handle symbol transfer/update.
+ {
+ // Initialize the symbols cache if necessary.
+ SplitBlockCache LocalBlockSymbolsCache;
+ if (!Cache)
+ Cache = &LocalBlockSymbolsCache;
+ if (*Cache == None) {
+ *Cache = SplitBlockCache::value_type();
+ for (auto *Sym : B.getSection().symbols())
+ if (&Sym->getBlock() == &B)
+ (*Cache)->push_back(Sym);
+
+ llvm::sort(**Cache, [](const Symbol *LHS, const Symbol *RHS) {
+ return LHS->getOffset() > RHS->getOffset();
+ });
+ }
+ auto &BlockSymbols = **Cache;
+
+ // Transfer all symbols with offset less than SplitIndex to NewBlock.
+ while (!BlockSymbols.empty() &&
+ BlockSymbols.back()->getOffset() < SplitIndex) {
+ auto *Sym = BlockSymbols.back();
+ // If the symbol extends beyond the split, update the size to be within
+ // the new block.
+ if (Sym->getOffset() + Sym->getSize() > SplitIndex)
+ Sym->setSize(SplitIndex - Sym->getOffset());
+ Sym->setBlock(NewBlock);
+ BlockSymbols.pop_back();
+ }
+
+ // Update offsets for all remaining symbols in B.
+ for (auto *Sym : BlockSymbols)
+ Sym->setOffset(Sym->getOffset() - SplitIndex);
+ }
+
+ return NewBlock;
+}
+
+void LinkGraph::dump(raw_ostream &OS) {
+ DenseMap<Block *, std::vector<Symbol *>> BlockSymbols;
+
+ // Map from blocks to the symbols pointing at them.
+ for (auto *Sym : defined_symbols())
+ BlockSymbols[&Sym->getBlock()].push_back(Sym);
+
+ // For each block, sort its symbols by something approximating
+ // relevance.
+ for (auto &KV : BlockSymbols)
+ llvm::sort(KV.second, [](const Symbol *LHS, const Symbol *RHS) {
+ if (LHS->getOffset() != RHS->getOffset())
+ return LHS->getOffset() < RHS->getOffset();
+ if (LHS->getLinkage() != RHS->getLinkage())
+ return LHS->getLinkage() < RHS->getLinkage();
+ if (LHS->getScope() != RHS->getScope())
+ return LHS->getScope() < RHS->getScope();
+ if (LHS->hasName()) {
+ if (!RHS->hasName())
+ return true;
+ return LHS->getName() < RHS->getName();
+ }
+ return false;
+ });
+
+ for (auto &Sec : sections()) {
+ OS << "section " << Sec.getName() << ":\n\n";
+
+ std::vector<Block *> SortedBlocks;
+ llvm::copy(Sec.blocks(), std::back_inserter(SortedBlocks));
+ llvm::sort(SortedBlocks, [](const Block *LHS, const Block *RHS) {
+ return LHS->getAddress() < RHS->getAddress();
+ });
+
+ for (auto *B : SortedBlocks) {
+ OS << " block " << B->getAddress()
+ << " size = " << formatv("{0:x8}", B->getSize())
+ << ", align = " << B->getAlignment()
+ << ", alignment-offset = " << B->getAlignmentOffset();
+ if (B->isZeroFill())
+ OS << ", zero-fill";
+ OS << "\n";
+
+ auto BlockSymsI = BlockSymbols.find(B);
+ if (BlockSymsI != BlockSymbols.end()) {
+ OS << " symbols:\n";
+ auto &Syms = BlockSymsI->second;
+ for (auto *Sym : Syms)
+ OS << " " << *Sym << "\n";
+ } else
+ OS << " no symbols\n";
+
+ if (!B->edges_empty()) {
+ OS << " edges:\n";
+ std::vector<Edge> SortedEdges;
+ llvm::copy(B->edges(), std::back_inserter(SortedEdges));
+ llvm::sort(SortedEdges, [](const Edge &LHS, const Edge &RHS) {
+ return LHS.getOffset() < RHS.getOffset();
+ });
+ for (auto &E : SortedEdges) {
+ OS << " " << B->getFixupAddress(E) << " (block + "
+ << formatv("{0:x8}", E.getOffset()) << "), addend = ";
+ if (E.getAddend() >= 0)
+ OS << formatv("+{0:x8}", E.getAddend());
+ else
+ OS << formatv("-{0:x8}", -E.getAddend());
+ OS << ", kind = " << getEdgeKindName(E.getKind()) << ", target = ";
+ if (E.getTarget().hasName())
+ OS << E.getTarget().getName();
+ else
+ OS << "addressable@"
+ << formatv("{0:x16}", E.getTarget().getAddress()) << "+"
+ << formatv("{0:x8}", E.getTarget().getOffset());
+ OS << "\n";
+ }
+ } else
+ OS << " no edges\n";
+ OS << "\n";
+ }
+ }
+
+ OS << "Absolute symbols:\n";
+ if (!llvm::empty(absolute_symbols())) {
+ for (auto *Sym : absolute_symbols())
+ OS << " " << Sym->getAddress() << ": " << *Sym << "\n";
+ } else
+ OS << " none\n";
+
+ OS << "\nExternal symbols:\n";
+ if (!llvm::empty(external_symbols())) {
+ for (auto *Sym : external_symbols())
+ OS << " " << Sym->getAddress() << ": " << *Sym << "\n";
+ } else
+ OS << " none\n";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupFlags &LF) {
+ switch (LF) {
+ case SymbolLookupFlags::RequiredSymbol:
+ return OS << "RequiredSymbol";
+ case SymbolLookupFlags::WeaklyReferencedSymbol:
+ return OS << "WeaklyReferencedSymbol";
+ }
+ llvm_unreachable("Unrecognized lookup flags");
+}
+
+void JITLinkAsyncLookupContinuation::anchor() {}
+
+JITLinkContext::~JITLinkContext() {}
+
+bool JITLinkContext::shouldAddDefaultTargetPasses(const Triple &TT) const {
+ return true;
+}
+
+LinkGraphPassFunction JITLinkContext::getMarkLivePass(const Triple &TT) const {
+ return LinkGraphPassFunction();
+}
+
+Error JITLinkContext::modifyPassConfig(LinkGraph &G,
+ PassConfiguration &Config) {
+ return Error::success();
+}
+
+Error markAllSymbolsLive(LinkGraph &G) {
+ for (auto *Sym : G.defined_symbols())
+ Sym->setLive(true);
+ return Error::success();
+}
+
+Error makeTargetOutOfRangeError(const LinkGraph &G, const Block &B,
+ const Edge &E) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ Section &Sec = B.getSection();
+ ErrStream << "In graph " << G.getName() << ", section " << Sec.getName()
+ << ": relocation target ";
+ if (E.getTarget().hasName()) {
+ ErrStream << "\"" << E.getTarget().getName() << "\"";
+ } else
+ ErrStream << E.getTarget().getBlock().getSection().getName() << " + "
+ << formatv("{0:x}", E.getOffset());
+ ErrStream << " at address " << formatv("{0:x}", E.getTarget().getAddress())
+ << " is out of range of " << G.getEdgeKindName(E.getKind())
+ << " fixup at " << formatv("{0:x}", B.getFixupAddress(E)) << " (";
+
+ Symbol *BestSymbolForBlock = nullptr;
+ for (auto *Sym : Sec.symbols())
+ if (&Sym->getBlock() == &B && Sym->hasName() && Sym->getOffset() == 0 &&
+ (!BestSymbolForBlock ||
+ Sym->getScope() < BestSymbolForBlock->getScope() ||
+ Sym->getLinkage() < BestSymbolForBlock->getLinkage()))
+ BestSymbolForBlock = Sym;
+
+ if (BestSymbolForBlock)
+ ErrStream << BestSymbolForBlock->getName() << ", ";
+ else
+ ErrStream << "<anonymous block> @ ";
+
+ ErrStream << formatv("{0:x}", B.getAddress()) << " + "
+ << formatv("{0:x}", E.getOffset()) << ")";
+ }
+ return make_error<JITLinkError>(std::move(ErrMsg));
+}
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromObject(MemoryBufferRef ObjectBuffer) {
+ auto Magic = identify_magic(ObjectBuffer.getBuffer());
+ switch (Magic) {
+ case file_magic::macho_object:
+ return createLinkGraphFromMachOObject(ObjectBuffer);
+ case file_magic::elf_relocatable:
+ return createLinkGraphFromELFObject(ObjectBuffer);
+ default:
+ return make_error<JITLinkError>("Unsupported file format");
+ };
+}
+
+void link(std::unique_ptr<LinkGraph> G, std::unique_ptr<JITLinkContext> Ctx) {
+ switch (G->getTargetTriple().getObjectFormat()) {
+ case Triple::MachO:
+ return link_MachO(std::move(G), std::move(Ctx));
+ case Triple::ELF:
+ return link_ELF(std::move(G), std::move(Ctx));
+ default:
+ Ctx->notifyFailed(make_error<JITLinkError>("Unsupported object format"));
+ };
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
new file mode 100644
index 0000000000..35ee050c85
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp
@@ -0,0 +1,314 @@
+//===--------- JITLinkGeneric.cpp - Generic JIT linker utilities ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic JITLinker utility class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "JITLinkGeneric.h"
+
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+JITLinkerBase::~JITLinkerBase() {}
+
+void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) {
+
+ LLVM_DEBUG({
+ dbgs() << "Starting link phase 1 for graph " << G->getName() << "\n";
+ });
+
+ // Prune and optimize the graph.
+ if (auto Err = runPasses(Passes.PrePrunePasses))
+ return Ctx->notifyFailed(std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" pre-pruning:\n";
+ G->dump(dbgs());
+ });
+
+ prune(*G);
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" post-pruning:\n";
+ G->dump(dbgs());
+ });
+
+ // Run post-pruning passes.
+ if (auto Err = runPasses(Passes.PostPrunePasses))
+ return Ctx->notifyFailed(std::move(Err));
+
+ Ctx->getMemoryManager().allocate(
+ Ctx->getJITLinkDylib(), *G,
+ [S = std::move(Self)](AllocResult AR) mutable {
+ auto *TmpSelf = S.get();
+ TmpSelf->linkPhase2(std::move(S), std::move(AR));
+ });
+}
+
+void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self,
+ AllocResult AR) {
+
+ if (AR)
+ Alloc = std::move(*AR);
+ else
+ return Ctx->notifyFailed(AR.takeError());
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName()
+ << "\" before post-allocation passes:\n";
+ G->dump(dbgs());
+ });
+
+ // Run post-allocation passes.
+ if (auto Err = runPasses(Passes.PostAllocationPasses))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Notify client that the defined symbols have been assigned addresses.
+ LLVM_DEBUG(dbgs() << "Resolving symbols defined in " << G->getName() << "\n");
+
+ if (auto Err = Ctx->notifyResolved(*G))
+ return Ctx->notifyFailed(std::move(Err));
+
+ auto ExternalSymbols = getExternalSymbolNames();
+
+ // If there are no external symbols then proceed immediately with phase 3.
+ if (ExternalSymbols.empty()) {
+ LLVM_DEBUG({
+ dbgs() << "No external symbols for " << G->getName()
+ << ". Proceeding immediately with link phase 3.\n";
+ });
+ // FIXME: Once callee expressions are defined to be sequenced before
+ // argument expressions (c++17) we can simplify this. See below.
+ auto &TmpSelf = *Self;
+ TmpSelf.linkPhase3(std::move(Self), AsyncLookupResult());
+ return;
+ }
+
+ // Otherwise look up the externals.
+ LLVM_DEBUG({
+ dbgs() << "Issuing lookup for external symbols for " << G->getName()
+ << " (may trigger materialization/linking of other graphs)...\n";
+ });
+
+ // We're about to hand off ownership of ourself to the continuation. Grab a
+ // pointer to the context so that we can call it to initiate the lookup.
+ //
+ // FIXME: Once callee expressions are defined to be sequenced before argument
+ // expressions (c++17) we can simplify all this to:
+ //
+ // Ctx->lookup(std::move(UnresolvedExternals),
+ // [Self=std::move(Self)](Expected<AsyncLookupResult> Result) {
+ // Self->linkPhase3(std::move(Self), std::move(Result));
+ // });
+ Ctx->lookup(std::move(ExternalSymbols),
+ createLookupContinuation(
+ [S = std::move(Self)](
+ Expected<AsyncLookupResult> LookupResult) mutable {
+ auto &TmpSelf = *S;
+ TmpSelf.linkPhase3(std::move(S), std::move(LookupResult));
+ }));
+}
+
+void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self,
+ Expected<AsyncLookupResult> LR) {
+
+ LLVM_DEBUG({
+ dbgs() << "Starting link phase 3 for graph " << G->getName() << "\n";
+ });
+
+ // If the lookup failed, bail out.
+ if (!LR)
+ return abandonAllocAndBailOut(std::move(Self), LR.takeError());
+
+ // Assign addresses to external addressables.
+ applyLookupResult(*LR);
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName()
+ << "\" before pre-fixup passes:\n";
+ G->dump(dbgs());
+ });
+
+ if (auto Err = runPasses(Passes.PreFixupPasses))
+ return abandonAllocAndBailOut(std::move(Self), std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" before copy-and-fixup:\n";
+ G->dump(dbgs());
+ });
+
+ // Fix up block content.
+ if (auto Err = fixUpBlocks(*G))
+ return abandonAllocAndBailOut(std::move(Self), std::move(Err));
+
+ LLVM_DEBUG({
+ dbgs() << "Link graph \"" << G->getName() << "\" after copy-and-fixup:\n";
+ G->dump(dbgs());
+ });
+
+ if (auto Err = runPasses(Passes.PostFixupPasses))
+ return abandonAllocAndBailOut(std::move(Self), std::move(Err));
+
+ Alloc->finalize([S = std::move(Self)](FinalizeResult FR) mutable {
+ auto *TmpSelf = S.get();
+ TmpSelf->linkPhase4(std::move(S), std::move(FR));
+ });
+}
+
+void JITLinkerBase::linkPhase4(std::unique_ptr<JITLinkerBase> Self,
+ FinalizeResult FR) {
+
+ LLVM_DEBUG({
+ dbgs() << "Starting link phase 4 for graph " << G->getName() << "\n";
+ });
+
+ if (!FR)
+ return Ctx->notifyFailed(FR.takeError());
+
+ Ctx->notifyFinalized(std::move(*FR));
+
+ LLVM_DEBUG({ dbgs() << "Link of graph " << G->getName() << " complete\n"; });
+}
+
+Error JITLinkerBase::runPasses(LinkGraphPassList &Passes) {
+ for (auto &P : Passes)
+ if (auto Err = P(*G))
+ return Err;
+ return Error::success();
+}
+
+JITLinkContext::LookupMap JITLinkerBase::getExternalSymbolNames() const {
+ // Identify unresolved external symbols.
+ JITLinkContext::LookupMap UnresolvedExternals;
+ for (auto *Sym : G->external_symbols()) {
+ assert(!Sym->getAddress() &&
+ "External has already been assigned an address");
+ assert(Sym->getName() != StringRef() && Sym->getName() != "" &&
+ "Externals must be named");
+ SymbolLookupFlags LookupFlags =
+ Sym->getLinkage() == Linkage::Weak
+ ? SymbolLookupFlags::WeaklyReferencedSymbol
+ : SymbolLookupFlags::RequiredSymbol;
+ UnresolvedExternals[Sym->getName()] = LookupFlags;
+ }
+ return UnresolvedExternals;
+}
+
+void JITLinkerBase::applyLookupResult(AsyncLookupResult Result) {
+ for (auto *Sym : G->external_symbols()) {
+ assert(Sym->getOffset() == 0 &&
+ "External symbol is not at the start of its addressable block");
+ assert(!Sym->getAddress() && "Symbol already resolved");
+ assert(!Sym->isDefined() && "Symbol being resolved is already defined");
+ auto ResultI = Result.find(Sym->getName());
+ if (ResultI != Result.end())
+ Sym->getAddressable().setAddress(
+ orc::ExecutorAddr(ResultI->second.getAddress()));
+ else
+ assert(Sym->getLinkage() == Linkage::Weak &&
+ "Failed to resolve non-weak reference");
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Externals after applying lookup result:\n";
+ for (auto *Sym : G->external_symbols())
+ dbgs() << " " << Sym->getName() << ": "
+ << formatv("{0:x16}", Sym->getAddress().getValue()) << "\n";
+ });
+}
+
+void JITLinkerBase::abandonAllocAndBailOut(std::unique_ptr<JITLinkerBase> Self,
+ Error Err) {
+ assert(Err && "Should not be bailing out on success value");
+ assert(Alloc && "can not call abandonAllocAndBailOut before allocation");
+ Alloc->abandon([S = std::move(Self), E1 = std::move(Err)](Error E2) mutable {
+ S->Ctx->notifyFailed(joinErrors(std::move(E1), std::move(E2)));
+ });
+}
+
+void prune(LinkGraph &G) {
+ std::vector<Symbol *> Worklist;
+ DenseSet<Block *> VisitedBlocks;
+
+ // Build the initial worklist from all symbols initially live.
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->isLive())
+ Worklist.push_back(Sym);
+
+ // Propagate live flags to all symbols reachable from the initial live set.
+ while (!Worklist.empty()) {
+ auto *Sym = Worklist.back();
+ Worklist.pop_back();
+
+ auto &B = Sym->getBlock();
+
+ // Skip addressables that we've visited before.
+ if (VisitedBlocks.count(&B))
+ continue;
+
+ VisitedBlocks.insert(&B);
+
+ for (auto &E : Sym->getBlock().edges()) {
+ // If the edge target is a defined symbol that is being newly marked live
+ // then add it to the worklist.
+ if (E.getTarget().isDefined() && !E.getTarget().isLive())
+ Worklist.push_back(&E.getTarget());
+
+ // Mark the target live.
+ E.getTarget().setLive(true);
+ }
+ }
+
+ // Collect all defined symbols to remove, then remove them.
+ {
+ LLVM_DEBUG(dbgs() << "Dead-stripping defined symbols:\n");
+ std::vector<Symbol *> SymbolsToRemove;
+ for (auto *Sym : G.defined_symbols())
+ if (!Sym->isLive())
+ SymbolsToRemove.push_back(Sym);
+ for (auto *Sym : SymbolsToRemove) {
+ LLVM_DEBUG(dbgs() << " " << *Sym << "...\n");
+ G.removeDefinedSymbol(*Sym);
+ }
+ }
+
+ // Delete any unused blocks.
+ {
+ LLVM_DEBUG(dbgs() << "Dead-stripping blocks:\n");
+ std::vector<Block *> BlocksToRemove;
+ for (auto *B : G.blocks())
+ if (!VisitedBlocks.count(B))
+ BlocksToRemove.push_back(B);
+ for (auto *B : BlocksToRemove) {
+ LLVM_DEBUG(dbgs() << " " << *B << "...\n");
+ G.removeBlock(*B);
+ }
+ }
+
+ // Collect all external symbols to remove, then remove them.
+ {
+ LLVM_DEBUG(dbgs() << "Removing unused external symbols:\n");
+ std::vector<Symbol *> SymbolsToRemove;
+ for (auto *Sym : G.external_symbols())
+ if (!Sym->isLive())
+ SymbolsToRemove.push_back(Sym);
+ for (auto *Sym : SymbolsToRemove) {
+ LLVM_DEBUG(dbgs() << " " << *Sym << "...\n");
+ G.removeExternalSymbol(*Sym);
+ }
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkGeneric.h b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkGeneric.h
new file mode 100644
index 0000000000..1095fa5ce7
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkGeneric.h
@@ -0,0 +1,161 @@
+//===------ JITLinkGeneric.h - Generic JIT linker utilities -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic JITLinker utilities. E.g. graph pruning, eh-frame parsing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
+#define LIB_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
+
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+/// Base class for a JIT linker.
+///
+/// A JITLinkerBase instance links one object file into an ongoing JIT
+/// session. Symbol resolution and finalization operations are pluggable,
+/// and called using continuation passing (passing a continuation for the
+/// remaining linker work) to allow them to be performed asynchronously.
+class JITLinkerBase {
+public:
+ JITLinkerBase(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G, PassConfiguration Passes)
+ : Ctx(std::move(Ctx)), G(std::move(G)), Passes(std::move(Passes)) {
+ assert(this->Ctx && "Ctx can not be null");
+ assert(this->G && "G can not be null");
+ }
+
+ virtual ~JITLinkerBase();
+
+protected:
+ using InFlightAlloc = JITLinkMemoryManager::InFlightAlloc;
+ using AllocResult = Expected<std::unique_ptr<InFlightAlloc>>;
+ using FinalizeResult = Expected<JITLinkMemoryManager::FinalizedAlloc>;
+
+ // Returns the PassConfiguration for this instance. This can be used by
+ // JITLinkerBase implementations to add late passes that reference their
+ // own data structures (e.g. for ELF implementations to locate / construct
+ // a GOT start symbol prior to fixup).
+ PassConfiguration &getPassConfig() { return Passes; }
+
+ // Phase 1:
+ // 1.1: Run pre-prune passes
+ // 1.2: Prune graph
+ // 1.3: Run post-prune passes
+ // 1.4: Allocate memory.
+ void linkPhase1(std::unique_ptr<JITLinkerBase> Self);
+
+ // Phase 2:
+ // 2.2: Run post-allocation passes
+ // 2.3: Notify context of final assigned symbol addresses
+ // 2.4: Identify external symbols and make an async call to resolve
+ void linkPhase2(std::unique_ptr<JITLinkerBase> Self, AllocResult AR);
+
+ // Phase 3:
+ // 3.1: Apply resolution results
+ // 3.2: Run pre-fixup passes
+ // 3.3: Fix up block contents
+ // 3.4: Run post-fixup passes
+ // 3.5: Make an async call to transfer and finalize memory.
+ void linkPhase3(std::unique_ptr<JITLinkerBase> Self,
+ Expected<AsyncLookupResult> LookupResult);
+
+ // Phase 4:
+ // 4.1: Call OnFinalized callback, handing off allocation.
+ void linkPhase4(std::unique_ptr<JITLinkerBase> Self, FinalizeResult FR);
+
+private:
+ // Run all passes in the given pass list, bailing out immediately if any pass
+ // returns an error.
+ Error runPasses(LinkGraphPassList &Passes);
+
+ // Copy block contents and apply relocations.
+ // Implemented in JITLinker.
+ virtual Error fixUpBlocks(LinkGraph &G) const = 0;
+
+ JITLinkContext::LookupMap getExternalSymbolNames() const;
+ void applyLookupResult(AsyncLookupResult LR);
+ void abandonAllocAndBailOut(std::unique_ptr<JITLinkerBase> Self, Error Err);
+
+ std::unique_ptr<JITLinkContext> Ctx;
+ std::unique_ptr<LinkGraph> G;
+ PassConfiguration Passes;
+ std::unique_ptr<InFlightAlloc> Alloc;
+};
+
+template <typename LinkerImpl> class JITLinker : public JITLinkerBase {
+public:
+ using JITLinkerBase::JITLinkerBase;
+
+ /// Link constructs a LinkerImpl instance and calls linkPhase1.
+ /// Link should be called with the constructor arguments for LinkerImpl, which
+ /// will be forwarded to the constructor.
+ template <typename... ArgTs> static void link(ArgTs &&... Args) {
+ auto L = std::make_unique<LinkerImpl>(std::forward<ArgTs>(Args)...);
+
+ // Ownership of the linker is passed into the linker's doLink function to
+ // allow it to be passed on to async continuations.
+ //
+ // FIXME: Remove LTmp once we have c++17.
+ // C++17 sequencing rules guarantee that function name expressions are
+ // sequenced before arguments, so L->linkPhase1(std::move(L), ...) will be
+ // well formed.
+ auto &LTmp = *L;
+ LTmp.linkPhase1(std::move(L));
+ }
+
+private:
+ const LinkerImpl &impl() const {
+ return static_cast<const LinkerImpl &>(*this);
+ }
+
+ Error fixUpBlocks(LinkGraph &G) const override {
+ LLVM_DEBUG(dbgs() << "Fixing up blocks:\n");
+
+ for (auto *B : G.blocks()) {
+ LLVM_DEBUG(dbgs() << " " << *B << ":\n");
+
+ // Copy Block data and apply fixups.
+ LLVM_DEBUG(dbgs() << " Applying fixups.\n");
+ assert((!B->isZeroFill() || B->edges_size() == 0) &&
+ "Edges in zero-fill block?");
+ for (auto &E : B->edges()) {
+
+ // Skip non-relocation edges.
+ if (!E.isRelocation())
+ continue;
+
+ // Dispatch to LinkerImpl for fixup.
+ if (auto Err = impl().applyFixup(G, *B, E))
+ return Err;
+ }
+ }
+
+ return Error::success();
+ }
+};
+
+/// Removes dead symbols/blocks/addressables.
+///
+/// Finds the set of symbols and addressables reachable from any symbol
+/// initially marked live. All symbols/addressables not marked live at the end
+/// of this process are removed.
+void prune(LinkGraph &G);
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#undef DEBUG_TYPE // "jitlink"
+
+#endif // LIB_EXECUTIONENGINE_JITLINK_JITLINKGENERIC_H
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
new file mode 100644
index 0000000000..9315ac4f61
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp
@@ -0,0 +1,470 @@
+//===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Process.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+
+namespace llvm {
+namespace jitlink {
+
+JITLinkMemoryManager::~JITLinkMemoryManager() = default;
+JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default;
+
+BasicLayout::BasicLayout(LinkGraph &G) : G(G) {
+
+ for (auto &Sec : G.sections()) {
+ // Skip empty sections.
+ if (empty(Sec.blocks()))
+ continue;
+
+ auto &Seg = Segments[{Sec.getMemProt(), Sec.getMemDeallocPolicy()}];
+ for (auto *B : Sec.blocks())
+ if (LLVM_LIKELY(!B->isZeroFill()))
+ Seg.ContentBlocks.push_back(B);
+ else
+ Seg.ZeroFillBlocks.push_back(B);
+ }
+
+ // Build Segments map.
+ auto CompareBlocks = [](const Block *LHS, const Block *RHS) {
+ // Sort by section, address and size
+ if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal())
+ return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal();
+ if (LHS->getAddress() != RHS->getAddress())
+ return LHS->getAddress() < RHS->getAddress();
+ return LHS->getSize() < RHS->getSize();
+ };
+
+ LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G.getName() << ":\n");
+ for (auto &KV : Segments) {
+ auto &Seg = KV.second;
+
+ llvm::sort(Seg.ContentBlocks, CompareBlocks);
+ llvm::sort(Seg.ZeroFillBlocks, CompareBlocks);
+
+ for (auto *B : Seg.ContentBlocks) {
+ Seg.ContentSize = alignToBlock(Seg.ContentSize, *B);
+ Seg.ContentSize += B->getSize();
+ Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
+ }
+
+ uint64_t SegEndOffset = Seg.ContentSize;
+ for (auto *B : Seg.ZeroFillBlocks) {
+ SegEndOffset = alignToBlock(SegEndOffset, *B);
+ SegEndOffset += B->getSize();
+ Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
+ }
+ Seg.ZeroFillSize = SegEndOffset - Seg.ContentSize;
+
+ LLVM_DEBUG({
+ dbgs() << " Seg " << KV.first
+ << ": content-size=" << formatv("{0:x}", Seg.ContentSize)
+ << ", zero-fill-size=" << formatv("{0:x}", Seg.ZeroFillSize)
+ << ", align=" << formatv("{0:x}", Seg.Alignment.value()) << "\n";
+ });
+ }
+}
+
+Expected<BasicLayout::ContiguousPageBasedLayoutSizes>
+BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize) {
+ ContiguousPageBasedLayoutSizes SegsSizes;
+
+ for (auto &KV : segments()) {
+ auto &AG = KV.first;
+ auto &Seg = KV.second;
+
+ if (Seg.Alignment > PageSize)
+ return make_error<StringError>("Segment alignment greater than page size",
+ inconvertibleErrorCode());
+
+ uint64_t SegSize = alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
+ if (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard)
+ SegsSizes.StandardSegs += SegSize;
+ else
+ SegsSizes.FinalizeSegs += SegSize;
+ }
+
+ return SegsSizes;
+}
+
+Error BasicLayout::apply() {
+ for (auto &KV : Segments) {
+ auto &Seg = KV.second;
+
+ assert(!(Seg.ContentBlocks.empty() && Seg.ZeroFillBlocks.empty()) &&
+ "Empty section recorded?");
+
+ for (auto *B : Seg.ContentBlocks) {
+ // Align addr and working-mem-offset.
+ Seg.Addr = alignToBlock(Seg.Addr, *B);
+ Seg.NextWorkingMemOffset = alignToBlock(Seg.NextWorkingMemOffset, *B);
+
+ // Update block addr.
+ B->setAddress(Seg.Addr);
+ Seg.Addr += B->getSize();
+
+ // Copy content to working memory, then update content to point at working
+ // memory.
+ memcpy(Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getContent().data(),
+ B->getSize());
+ B->setMutableContent(
+ {Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getSize()});
+ Seg.NextWorkingMemOffset += B->getSize();
+ }
+
+ for (auto *B : Seg.ZeroFillBlocks) {
+ // Align addr.
+ Seg.Addr = alignToBlock(Seg.Addr, *B);
+ // Update block addr.
+ B->setAddress(Seg.Addr);
+ Seg.Addr += B->getSize();
+ }
+
+ Seg.ContentBlocks.clear();
+ Seg.ZeroFillBlocks.clear();
+ }
+
+ return Error::success();
+}
+
+orc::shared::AllocActions &BasicLayout::graphAllocActions() {
+ return G.allocActions();
+}
+
+void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr,
+ const JITLinkDylib *JD, SegmentMap Segments,
+ OnCreatedFunction OnCreated) {
+
+ static_assert(AllocGroup::NumGroups == 16,
+ "AllocGroup has changed. Section names below must be updated");
+ StringRef AGSectionNames[] = {
+ "__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard",
+ "__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard",
+ "__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize",
+ "__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"};
+
+ auto G =
+ std::make_unique<LinkGraph>("", Triple(), 0, support::native, nullptr);
+ AllocGroupSmallMap<Block *> ContentBlocks;
+
+ orc::ExecutorAddr NextAddr(0x100000);
+ for (auto &KV : Segments) {
+ auto &AG = KV.first;
+ auto &Seg = KV.second;
+
+ auto AGSectionName =
+ AGSectionNames[static_cast<unsigned>(AG.getMemProt()) |
+ static_cast<bool>(AG.getMemDeallocPolicy()) << 3];
+
+ auto &Sec = G->createSection(AGSectionName, AG.getMemProt());
+ Sec.setMemDeallocPolicy(AG.getMemDeallocPolicy());
+
+ if (Seg.ContentSize != 0) {
+ NextAddr =
+ orc::ExecutorAddr(alignTo(NextAddr.getValue(), Seg.ContentAlign));
+ auto &B =
+ G->createMutableContentBlock(Sec, G->allocateBuffer(Seg.ContentSize),
+ NextAddr, Seg.ContentAlign.value(), 0);
+ ContentBlocks[AG] = &B;
+ NextAddr += Seg.ContentSize;
+ }
+ }
+
+ // GRef declared separately since order-of-argument-eval isn't specified.
+ auto &GRef = *G;
+ MemMgr.allocate(JD, GRef,
+ [G = std::move(G), ContentBlocks = std::move(ContentBlocks),
+ OnCreated = std::move(OnCreated)](
+ JITLinkMemoryManager::AllocResult Alloc) mutable {
+ if (!Alloc)
+ OnCreated(Alloc.takeError());
+ else
+ OnCreated(SimpleSegmentAlloc(std::move(G),
+ std::move(ContentBlocks),
+ std::move(*Alloc)));
+ });
+}
+
+Expected<SimpleSegmentAlloc>
+SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
+ SegmentMap Segments) {
+ std::promise<MSVCPExpected<SimpleSegmentAlloc>> AllocP;
+ auto AllocF = AllocP.get_future();
+ Create(MemMgr, JD, std::move(Segments),
+ [&](Expected<SimpleSegmentAlloc> Result) {
+ AllocP.set_value(std::move(Result));
+ });
+ return AllocF.get();
+}
+
+SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc &&) = default;
+SimpleSegmentAlloc &
+SimpleSegmentAlloc::operator=(SimpleSegmentAlloc &&) = default;
+SimpleSegmentAlloc::~SimpleSegmentAlloc() {}
+
+SimpleSegmentAlloc::SegmentInfo SimpleSegmentAlloc::getSegInfo(AllocGroup AG) {
+ auto I = ContentBlocks.find(AG);
+ if (I != ContentBlocks.end()) {
+ auto &B = *I->second;
+ return {B.getAddress(), B.getAlreadyMutableContent()};
+ }
+ return {};
+}
+
+SimpleSegmentAlloc::SimpleSegmentAlloc(
+ std::unique_ptr<LinkGraph> G, AllocGroupSmallMap<Block *> ContentBlocks,
+ std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc)
+ : G(std::move(G)), ContentBlocks(std::move(ContentBlocks)),
+ Alloc(std::move(Alloc)) {}
+
+class InProcessMemoryManager::IPInFlightAlloc
+ : public JITLinkMemoryManager::InFlightAlloc {
+public:
+ IPInFlightAlloc(InProcessMemoryManager &MemMgr, LinkGraph &G, BasicLayout BL,
+ sys::MemoryBlock StandardSegments,
+ sys::MemoryBlock FinalizationSegments)
+ : MemMgr(MemMgr), G(G), BL(std::move(BL)),
+ StandardSegments(std::move(StandardSegments)),
+ FinalizationSegments(std::move(FinalizationSegments)) {}
+
+ void finalize(OnFinalizedFunction OnFinalized) override {
+
+ // Apply memory protections to all segments.
+ if (auto Err = applyProtections()) {
+ OnFinalized(std::move(Err));
+ return;
+ }
+
+ // Run finalization actions.
+ auto DeallocActions = runFinalizeActions(G.allocActions());
+ if (!DeallocActions) {
+ OnFinalized(DeallocActions.takeError());
+ return;
+ }
+
+ // Release the finalize segments slab.
+ if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) {
+ OnFinalized(errorCodeToError(EC));
+ return;
+ }
+
+ // Continue with finalized allocation.
+ OnFinalized(MemMgr.createFinalizedAlloc(std::move(StandardSegments),
+ std::move(*DeallocActions)));
+ }
+
+ void abandon(OnAbandonedFunction OnAbandoned) override {
+ Error Err = Error::success();
+ if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments))
+ Err = joinErrors(std::move(Err), errorCodeToError(EC));
+ if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
+ Err = joinErrors(std::move(Err), errorCodeToError(EC));
+ OnAbandoned(std::move(Err));
+ }
+
+private:
+ Error applyProtections() {
+ for (auto &KV : BL.segments()) {
+ const auto &AG = KV.first;
+ auto &Seg = KV.second;
+
+ auto Prot = toSysMemoryProtectionFlags(AG.getMemProt());
+
+ uint64_t SegSize =
+ alignTo(Seg.ContentSize + Seg.ZeroFillSize, MemMgr.PageSize);
+ sys::MemoryBlock MB(Seg.WorkingMem, SegSize);
+ if (auto EC = sys::Memory::protectMappedMemory(MB, Prot))
+ return errorCodeToError(EC);
+ if (Prot & sys::Memory::MF_EXEC)
+ sys::Memory::InvalidateInstructionCache(MB.base(), MB.allocatedSize());
+ }
+ return Error::success();
+ }
+
+ InProcessMemoryManager &MemMgr;
+ LinkGraph &G;
+ BasicLayout BL;
+ sys::MemoryBlock StandardSegments;
+ sys::MemoryBlock FinalizationSegments;
+};
+
+Expected<std::unique_ptr<InProcessMemoryManager>>
+InProcessMemoryManager::Create() {
+ if (auto PageSize = sys::Process::getPageSize())
+ return std::make_unique<InProcessMemoryManager>(*PageSize);
+ else
+ return PageSize.takeError();
+}
+
+void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
+ OnAllocatedFunction OnAllocated) {
+
+ // FIXME: Just check this once on startup.
+ if (!isPowerOf2_64((uint64_t)PageSize)) {
+ OnAllocated(make_error<StringError>("Page size is not a power of 2",
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ BasicLayout BL(G);
+
+ /// Scan the request and calculate the group and total sizes.
+ /// Check that segment size is no larger than a page.
+ auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize);
+ if (!SegsSizes) {
+ OnAllocated(SegsSizes.takeError());
+ return;
+ }
+
+ /// Check that the total size requested (including zero fill) is not larger
+ /// than a size_t.
+ if (SegsSizes->total() > std::numeric_limits<size_t>::max()) {
+ OnAllocated(make_error<JITLinkError>(
+ "Total requested size " + formatv("{0:x}", SegsSizes->total()) +
+ " for graph " + G.getName() + " exceeds address space"));
+ return;
+ }
+
+ // Allocate one slab for the whole thing (to make sure everything is
+ // in-range), then partition into standard and finalization blocks.
+ //
+ // FIXME: Make two separate allocations in the future to reduce
+ // fragmentation: finalization segments will usually be a single page, and
+ // standard segments are likely to be more than one page. Where multiple
+ // allocations are in-flight at once (likely) the current approach will leave
+ // a lot of single-page holes.
+ sys::MemoryBlock Slab;
+ sys::MemoryBlock StandardSegsMem;
+ sys::MemoryBlock FinalizeSegsMem;
+ {
+ const sys::Memory::ProtectionFlags ReadWrite =
+ static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_WRITE);
+
+ std::error_code EC;
+ Slab = sys::Memory::allocateMappedMemory(SegsSizes->total(), nullptr,
+ ReadWrite, EC);
+
+ if (EC) {
+ OnAllocated(errorCodeToError(EC));
+ return;
+ }
+
+ // Zero-fill the whole slab up-front.
+ memset(Slab.base(), 0, Slab.allocatedSize());
+
+ StandardSegsMem = {Slab.base(),
+ static_cast<size_t>(SegsSizes->StandardSegs)};
+ FinalizeSegsMem = {(void *)((char *)Slab.base() + SegsSizes->StandardSegs),
+ static_cast<size_t>(SegsSizes->FinalizeSegs)};
+ }
+
+ auto NextStandardSegAddr = orc::ExecutorAddr::fromPtr(StandardSegsMem.base());
+ auto NextFinalizeSegAddr = orc::ExecutorAddr::fromPtr(FinalizeSegsMem.base());
+
+ LLVM_DEBUG({
+ dbgs() << "InProcessMemoryManager allocated:\n";
+ if (SegsSizes->StandardSegs)
+ dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextStandardSegAddr,
+ NextStandardSegAddr + StandardSegsMem.allocatedSize())
+ << " to stardard segs\n";
+ else
+ dbgs() << " no standard segs\n";
+ if (SegsSizes->FinalizeSegs)
+ dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr,
+ NextFinalizeSegAddr + FinalizeSegsMem.allocatedSize())
+ << " to finalize segs\n";
+ else
+ dbgs() << " no finalize segs\n";
+ });
+
+ // Build ProtMap, assign addresses.
+ for (auto &KV : BL.segments()) {
+ auto &AG = KV.first;
+ auto &Seg = KV.second;
+
+ auto &SegAddr = (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard)
+ ? NextStandardSegAddr
+ : NextFinalizeSegAddr;
+
+ Seg.WorkingMem = SegAddr.toPtr<char *>();
+ Seg.Addr = SegAddr;
+
+ SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
+ }
+
+ if (auto Err = BL.apply()) {
+ OnAllocated(std::move(Err));
+ return;
+ }
+
+ OnAllocated(std::make_unique<IPInFlightAlloc>(*this, G, std::move(BL),
+ std::move(StandardSegsMem),
+ std::move(FinalizeSegsMem)));
+}
+
+void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs,
+ OnDeallocatedFunction OnDeallocated) {
+ std::vector<sys::MemoryBlock> StandardSegmentsList;
+ std::vector<std::vector<orc::shared::WrapperFunctionCall>> DeallocActionsList;
+
+ {
+ std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
+ for (auto &Alloc : Allocs) {
+ auto *FA = Alloc.release().toPtr<FinalizedAllocInfo *>();
+ StandardSegmentsList.push_back(std::move(FA->StandardSegments));
+ if (!FA->DeallocActions.empty())
+ DeallocActionsList.push_back(std::move(FA->DeallocActions));
+ FA->~FinalizedAllocInfo();
+ FinalizedAllocInfos.Deallocate(FA);
+ }
+ }
+
+ Error DeallocErr = Error::success();
+
+ while (!DeallocActionsList.empty()) {
+ auto &DeallocActions = DeallocActionsList.back();
+ auto &StandardSegments = StandardSegmentsList.back();
+
+ /// Run any deallocate calls.
+ while (!DeallocActions.empty()) {
+ if (auto Err = DeallocActions.back().runWithSPSRetErrorMerged())
+ DeallocErr = joinErrors(std::move(DeallocErr), std::move(Err));
+ DeallocActions.pop_back();
+ }
+
+ /// Release the standard segments slab.
+ if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
+ DeallocErr = joinErrors(std::move(DeallocErr), errorCodeToError(EC));
+
+ DeallocActionsList.pop_back();
+ StandardSegmentsList.pop_back();
+ }
+
+ OnDeallocated(std::move(DeallocErr));
+}
+
+JITLinkMemoryManager::FinalizedAlloc
+InProcessMemoryManager::createFinalizedAlloc(
+ sys::MemoryBlock StandardSegments,
+ std::vector<orc::shared::WrapperFunctionCall> DeallocActions) {
+ std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
+ auto *FA = FinalizedAllocInfos.Allocate<FinalizedAllocInfo>();
+ new (FA) FinalizedAllocInfo(
+ {std::move(StandardSegments), std::move(DeallocActions)});
+ return FinalizedAlloc(orc::ExecutorAddr::fromPtr(FA));
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO.cpp
new file mode 100644
index 0000000000..e49480c786
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO.cpp
@@ -0,0 +1,90 @@
+//===-------------- MachO.cpp - JIT linker function for MachO -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO jit-link function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO.h"
+
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
+#include "llvm/ExecutionEngine/JITLink/MachO_x86_64.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SwapByteOrder.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromMachOObject(MemoryBufferRef ObjectBuffer) {
+ StringRef Data = ObjectBuffer.getBuffer();
+ if (Data.size() < 4)
+ return make_error<JITLinkError>("Truncated MachO buffer \"" +
+ ObjectBuffer.getBufferIdentifier() + "\"");
+
+ uint32_t Magic;
+ memcpy(&Magic, Data.data(), sizeof(uint32_t));
+ LLVM_DEBUG({
+ dbgs() << "jitLink_MachO: magic = " << format("0x%08" PRIx32, Magic)
+ << ", identifier = \"" << ObjectBuffer.getBufferIdentifier()
+ << "\"\n";
+ });
+
+ if (Magic == MachO::MH_MAGIC || Magic == MachO::MH_CIGAM)
+ return make_error<JITLinkError>("MachO 32-bit platforms not supported");
+ else if (Magic == MachO::MH_MAGIC_64 || Magic == MachO::MH_CIGAM_64) {
+
+ if (Data.size() < sizeof(MachO::mach_header_64))
+ return make_error<JITLinkError>("Truncated MachO buffer \"" +
+ ObjectBuffer.getBufferIdentifier() +
+ "\"");
+
+ // Read the CPU type from the header.
+ uint32_t CPUType;
+ memcpy(&CPUType, Data.data() + 4, sizeof(uint32_t));
+ if (Magic == MachO::MH_CIGAM_64)
+ CPUType = ByteSwap_32(CPUType);
+
+ LLVM_DEBUG({
+ dbgs() << "jitLink_MachO: cputype = " << format("0x%08" PRIx32, CPUType)
+ << "\n";
+ });
+
+ switch (CPUType) {
+ case MachO::CPU_TYPE_ARM64:
+ return createLinkGraphFromMachOObject_arm64(ObjectBuffer);
+ case MachO::CPU_TYPE_X86_64:
+ return createLinkGraphFromMachOObject_x86_64(ObjectBuffer);
+ }
+ return make_error<JITLinkError>("MachO-64 CPU type not valid");
+ } else
+ return make_error<JITLinkError>("Unrecognized MachO magic value");
+}
+
+void link_MachO(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+
+ switch (G->getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ return link_MachO_arm64(std::move(G), std::move(Ctx));
+ case Triple::x86_64:
+ return link_MachO_x86_64(std::move(G), std::move(Ctx));
+ default:
+ Ctx->notifyFailed(make_error<JITLinkError>("MachO-64 CPU type not valid"));
+ return;
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp
new file mode 100644
index 0000000000..6257460445
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.cpp
@@ -0,0 +1,816 @@
+//=--------- MachOLinkGraphBuilder.cpp - MachO LinkGraph builder ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic MachO LinkGraph buliding code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MachOLinkGraphBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+static const char *CommonSectionName = "__common";
+
+namespace llvm {
+namespace jitlink {
+
+MachOLinkGraphBuilder::~MachOLinkGraphBuilder() {}
+
+Expected<std::unique_ptr<LinkGraph>> MachOLinkGraphBuilder::buildGraph() {
+
+ // We only operate on relocatable objects.
+ if (!Obj.isRelocatableObject())
+ return make_error<JITLinkError>("Object is not a relocatable MachO");
+
+ if (auto Err = createNormalizedSections())
+ return std::move(Err);
+
+ if (auto Err = createNormalizedSymbols())
+ return std::move(Err);
+
+ if (auto Err = graphifyRegularSymbols())
+ return std::move(Err);
+
+ if (auto Err = graphifySectionsWithCustomParsers())
+ return std::move(Err);
+
+ if (auto Err = addRelocations())
+ return std::move(Err);
+
+ return std::move(G);
+}
+
+MachOLinkGraphBuilder::MachOLinkGraphBuilder(
+ const object::MachOObjectFile &Obj, Triple TT,
+ LinkGraph::GetEdgeKindNameFunction GetEdgeKindName)
+ : Obj(Obj),
+ G(std::make_unique<LinkGraph>(
+ std::string(Obj.getFileName()), std::move(TT), getPointerSize(Obj),
+ getEndianness(Obj), std::move(GetEdgeKindName))) {}
+
+void MachOLinkGraphBuilder::addCustomSectionParser(
+ StringRef SectionName, SectionParserFunction Parser) {
+ assert(!CustomSectionParserFunctions.count(SectionName) &&
+ "Custom parser for this section already exists");
+ CustomSectionParserFunctions[SectionName] = std::move(Parser);
+}
+
+Linkage MachOLinkGraphBuilder::getLinkage(uint16_t Desc) {
+ if ((Desc & MachO::N_WEAK_DEF) || (Desc & MachO::N_WEAK_REF))
+ return Linkage::Weak;
+ return Linkage::Strong;
+}
+
+Scope MachOLinkGraphBuilder::getScope(StringRef Name, uint8_t Type) {
+ if (Type & MachO::N_EXT) {
+ if ((Type & MachO::N_PEXT) || Name.startswith("l"))
+ return Scope::Hidden;
+ else
+ return Scope::Default;
+ }
+ return Scope::Local;
+}
+
+bool MachOLinkGraphBuilder::isAltEntry(const NormalizedSymbol &NSym) {
+ return NSym.Desc & MachO::N_ALT_ENTRY;
+}
+
+bool MachOLinkGraphBuilder::isDebugSection(const NormalizedSection &NSec) {
+ return (NSec.Flags & MachO::S_ATTR_DEBUG &&
+ strcmp(NSec.SegName, "__DWARF") == 0);
+}
+
+bool MachOLinkGraphBuilder::isZeroFillSection(const NormalizedSection &NSec) {
+ switch (NSec.Flags & MachO::SECTION_TYPE) {
+ case MachO::S_ZEROFILL:
+ case MachO::S_GB_ZEROFILL:
+ case MachO::S_THREAD_LOCAL_ZEROFILL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+unsigned
+MachOLinkGraphBuilder::getPointerSize(const object::MachOObjectFile &Obj) {
+ return Obj.is64Bit() ? 8 : 4;
+}
+
+support::endianness
+MachOLinkGraphBuilder::getEndianness(const object::MachOObjectFile &Obj) {
+ return Obj.isLittleEndian() ? support::little : support::big;
+}
+
+Section &MachOLinkGraphBuilder::getCommonSection() {
+ if (!CommonSection)
+ CommonSection =
+ &G->createSection(CommonSectionName, MemProt::Read | MemProt::Write);
+ return *CommonSection;
+}
+
+Error MachOLinkGraphBuilder::createNormalizedSections() {
+ // Build normalized sections. Verifies that section data is in-range (for
+ // sections with content) and that address ranges are non-overlapping.
+
+ LLVM_DEBUG(dbgs() << "Creating normalized sections...\n");
+
+ for (auto &SecRef : Obj.sections()) {
+ NormalizedSection NSec;
+ uint32_t DataOffset = 0;
+
+ auto SecIndex = Obj.getSectionIndex(SecRef.getRawDataRefImpl());
+
+ if (Obj.is64Bit()) {
+ const MachO::section_64 &Sec64 =
+ Obj.getSection64(SecRef.getRawDataRefImpl());
+
+ memcpy(&NSec.SectName, &Sec64.sectname, 16);
+ NSec.SectName[16] = '\0';
+ memcpy(&NSec.SegName, Sec64.segname, 16);
+ NSec.SegName[16] = '\0';
+
+ NSec.Address = orc::ExecutorAddr(Sec64.addr);
+ NSec.Size = Sec64.size;
+ NSec.Alignment = 1ULL << Sec64.align;
+ NSec.Flags = Sec64.flags;
+ DataOffset = Sec64.offset;
+ } else {
+ const MachO::section &Sec32 = Obj.getSection(SecRef.getRawDataRefImpl());
+
+ memcpy(&NSec.SectName, &Sec32.sectname, 16);
+ NSec.SectName[16] = '\0';
+ memcpy(&NSec.SegName, Sec32.segname, 16);
+ NSec.SegName[16] = '\0';
+
+ NSec.Address = orc::ExecutorAddr(Sec32.addr);
+ NSec.Size = Sec32.size;
+ NSec.Alignment = 1ULL << Sec32.align;
+ NSec.Flags = Sec32.flags;
+ DataOffset = Sec32.offset;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " " << NSec.SegName << "," << NSec.SectName << ": "
+ << formatv("{0:x16}", NSec.Address) << " -- "
+ << formatv("{0:x16}", NSec.Address + NSec.Size)
+ << ", align: " << NSec.Alignment << ", index: " << SecIndex
+ << "\n";
+ });
+
+ // Get the section data if any.
+ if (!isZeroFillSection(NSec)) {
+ if (DataOffset + NSec.Size > Obj.getData().size())
+ return make_error<JITLinkError>(
+ "Section data extends past end of file");
+
+ NSec.Data = Obj.getData().data() + DataOffset;
+ }
+
+ // Get prot flags.
+ // FIXME: Make sure this test is correct (it's probably missing cases
+ // as-is).
+ MemProt Prot;
+ if (NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS)
+ Prot = MemProt::Read | MemProt::Exec;
+ else
+ Prot = MemProt::Read | MemProt::Write;
+
+ auto FullyQualifiedName =
+ G->allocateString(StringRef(NSec.SegName) + "," + NSec.SectName);
+ NSec.GraphSection = &G->createSection(
+ StringRef(FullyQualifiedName.data(), FullyQualifiedName.size()), Prot);
+
+ IndexToSection.insert(std::make_pair(SecIndex, std::move(NSec)));
+ }
+
+ std::vector<NormalizedSection *> Sections;
+ Sections.reserve(IndexToSection.size());
+ for (auto &KV : IndexToSection)
+ Sections.push_back(&KV.second);
+
+ // If we didn't end up creating any sections then bail out. The code below
+ // assumes that we have at least one section.
+ if (Sections.empty())
+ return Error::success();
+
+ llvm::sort(Sections,
+ [](const NormalizedSection *LHS, const NormalizedSection *RHS) {
+ assert(LHS && RHS && "Null section?");
+ if (LHS->Address != RHS->Address)
+ return LHS->Address < RHS->Address;
+ return LHS->Size < RHS->Size;
+ });
+
+ for (unsigned I = 0, E = Sections.size() - 1; I != E; ++I) {
+ auto &Cur = *Sections[I];
+ auto &Next = *Sections[I + 1];
+ if (Next.Address < Cur.Address + Cur.Size)
+ return make_error<JITLinkError>(
+ "Address range for section " +
+ formatv("\"{0}/{1}\" [ {2:x16} -- {3:x16} ] ", Cur.SegName,
+ Cur.SectName, Cur.Address, Cur.Address + Cur.Size) +
+ "overlaps section \"" + Next.SegName + "/" + Next.SectName + "\"" +
+ formatv("\"{0}/{1}\" [ {2:x16} -- {3:x16} ] ", Next.SegName,
+ Next.SectName, Next.Address, Next.Address + Next.Size));
+ }
+
+ return Error::success();
+}
+
+Error MachOLinkGraphBuilder::createNormalizedSymbols() {
+ LLVM_DEBUG(dbgs() << "Creating normalized symbols...\n");
+
+ for (auto &SymRef : Obj.symbols()) {
+
+ unsigned SymbolIndex = Obj.getSymbolIndex(SymRef.getRawDataRefImpl());
+ uint64_t Value;
+ uint32_t NStrX;
+ uint8_t Type;
+ uint8_t Sect;
+ uint16_t Desc;
+
+ if (Obj.is64Bit()) {
+ const MachO::nlist_64 &NL64 =
+ Obj.getSymbol64TableEntry(SymRef.getRawDataRefImpl());
+ Value = NL64.n_value;
+ NStrX = NL64.n_strx;
+ Type = NL64.n_type;
+ Sect = NL64.n_sect;
+ Desc = NL64.n_desc;
+ } else {
+ const MachO::nlist &NL32 =
+ Obj.getSymbolTableEntry(SymRef.getRawDataRefImpl());
+ Value = NL32.n_value;
+ NStrX = NL32.n_strx;
+ Type = NL32.n_type;
+ Sect = NL32.n_sect;
+ Desc = NL32.n_desc;
+ }
+
+ // Skip stabs.
+ // FIXME: Are there other symbols we should be skipping?
+ if (Type & MachO::N_STAB)
+ continue;
+
+ Optional<StringRef> Name;
+ if (NStrX) {
+ if (auto NameOrErr = SymRef.getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " ";
+ if (!Name)
+ dbgs() << "<anonymous symbol>";
+ else
+ dbgs() << *Name;
+ dbgs() << ": value = " << formatv("{0:x16}", Value)
+ << ", type = " << formatv("{0:x2}", Type)
+ << ", desc = " << formatv("{0:x4}", Desc) << ", sect = ";
+ if (Sect)
+ dbgs() << static_cast<unsigned>(Sect - 1);
+ else
+ dbgs() << "none";
+ dbgs() << "\n";
+ });
+
+ // If this symbol has a section, verify that the addresses line up.
+ if (Sect != 0) {
+ auto NSec = findSectionByIndex(Sect - 1);
+ if (!NSec)
+ return NSec.takeError();
+
+ if (orc::ExecutorAddr(Value) < NSec->Address ||
+ orc::ExecutorAddr(Value) > NSec->Address + NSec->Size)
+ return make_error<JITLinkError>("Address " + formatv("{0:x}", Value) +
+ " for symbol " + *Name +
+ " does not fall within section");
+
+ if (!NSec->GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping: Symbol is in section " << NSec->SegName << "/"
+ << NSec->SectName
+ << " which has no associated graph section.\n";
+ });
+ continue;
+ }
+ }
+
+ IndexToSymbol[SymbolIndex] =
+ &createNormalizedSymbol(*Name, Value, Type, Sect, Desc,
+ getLinkage(Desc), getScope(*Name, Type));
+ }
+
+ return Error::success();
+}
+
+void MachOLinkGraphBuilder::addSectionStartSymAndBlock(
+ unsigned SecIndex, Section &GraphSec, orc::ExecutorAddr Address,
+ const char *Data, orc::ExecutorAddrDiff Size, uint32_t Alignment,
+ bool IsLive) {
+ Block &B =
+ Data ? G->createContentBlock(GraphSec, ArrayRef<char>(Data, Size),
+ Address, Alignment, 0)
+ : G->createZeroFillBlock(GraphSec, Size, Address, Alignment, 0);
+ auto &Sym = G->addAnonymousSymbol(B, 0, Size, false, IsLive);
+ auto SecI = IndexToSection.find(SecIndex);
+ assert(SecI != IndexToSection.end() && "SecIndex invalid");
+ auto &NSec = SecI->second;
+ assert(!NSec.CanonicalSymbols.count(Sym.getAddress()) &&
+ "Anonymous block start symbol clashes with existing symbol address");
+ NSec.CanonicalSymbols[Sym.getAddress()] = &Sym;
+}
+
+Error MachOLinkGraphBuilder::graphifyRegularSymbols() {
+
+ LLVM_DEBUG(dbgs() << "Creating graph symbols...\n");
+
+ /// We only have 256 section indexes: Use a vector rather than a map.
+ std::vector<std::vector<NormalizedSymbol *>> SecIndexToSymbols;
+ SecIndexToSymbols.resize(256);
+
+ // Create commons, externs, and absolutes, and partition all other symbols by
+ // section.
+ for (auto &KV : IndexToSymbol) {
+ auto &NSym = *KV.second;
+
+ switch (NSym.Type & MachO::N_TYPE) {
+ case MachO::N_UNDF:
+ if (NSym.Value) {
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous common symbol at index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addCommonSymbol(
+ *NSym.Name, NSym.S, getCommonSection(), orc::ExecutorAddr(),
+ orc::ExecutorAddrDiff(NSym.Value),
+ 1ull << MachO::GET_COMM_ALIGN(NSym.Desc),
+ NSym.Desc & MachO::N_NO_DEAD_STRIP);
+ } else {
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous external symbol at "
+ "index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addExternalSymbol(
+ *NSym.Name, 0,
+ NSym.Desc & MachO::N_WEAK_REF ? Linkage::Weak : Linkage::Strong);
+ }
+ break;
+ case MachO::N_ABS:
+ if (!NSym.Name)
+ return make_error<JITLinkError>("Anonymous absolute symbol at index " +
+ Twine(KV.first));
+ NSym.GraphSymbol = &G->addAbsoluteSymbol(
+ *NSym.Name, orc::ExecutorAddr(NSym.Value), 0, Linkage::Strong,
+ Scope::Default, NSym.Desc & MachO::N_NO_DEAD_STRIP);
+ break;
+ case MachO::N_SECT:
+ SecIndexToSymbols[NSym.Sect - 1].push_back(&NSym);
+ break;
+ case MachO::N_PBUD:
+ return make_error<JITLinkError>(
+ "Unupported N_PBUD symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ case MachO::N_INDR:
+ return make_error<JITLinkError>(
+ "Unupported N_INDR symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ default:
+ return make_error<JITLinkError>(
+ "Unrecognized symbol type " + Twine(NSym.Type & MachO::N_TYPE) +
+ " for symbol " +
+ (NSym.Name ? ("\"" + *NSym.Name + "\"") : Twine("<anon>")) +
+ " at index " + Twine(KV.first));
+ }
+ }
+
+ // Loop over sections performing regular graphification for those that
+ // don't have custom parsers.
+ for (auto &KV : IndexToSection) {
+ auto SecIndex = KV.first;
+ auto &NSec = KV.second;
+
+ if (!NSec.GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " " << NSec.SegName << "/" << NSec.SectName
+ << " has no graph section. Skipping.\n";
+ });
+ continue;
+ }
+
+ // Skip sections with custom parsers.
+ if (CustomSectionParserFunctions.count(NSec.GraphSection->getName())) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping section " << NSec.GraphSection->getName()
+ << " as it has a custom parser.\n";
+ });
+ continue;
+ } else if ((NSec.Flags & MachO::SECTION_TYPE) ==
+ MachO::S_CSTRING_LITERALS) {
+ if (auto Err = graphifyCStringSection(
+ NSec, std::move(SecIndexToSymbols[SecIndex])))
+ return Err;
+ continue;
+ } else
+ LLVM_DEBUG({
+ dbgs() << " Graphifying regular section "
+ << NSec.GraphSection->getName() << "...\n";
+ });
+
+ bool SectionIsNoDeadStrip = NSec.Flags & MachO::S_ATTR_NO_DEAD_STRIP;
+ bool SectionIsText = NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS;
+
+ auto &SecNSymStack = SecIndexToSymbols[SecIndex];
+
+ // If this section is non-empty but there are no symbols covering it then
+ // create one block and anonymous symbol to cover the entire section.
+ if (SecNSymStack.empty()) {
+ if (NSec.Size > 0) {
+ LLVM_DEBUG({
+ dbgs() << " Section non-empty, but contains no symbols. "
+ "Creating anonymous block to cover "
+ << formatv("{0:x16}", NSec.Address) << " -- "
+ << formatv("{0:x16}", NSec.Address + NSec.Size) << "\n";
+ });
+ addSectionStartSymAndBlock(SecIndex, *NSec.GraphSection, NSec.Address,
+ NSec.Data, NSec.Size, NSec.Alignment,
+ SectionIsNoDeadStrip);
+ } else
+ LLVM_DEBUG({
+ dbgs() << " Section empty and contains no symbols. Skipping.\n";
+ });
+ continue;
+ }
+
+ // Sort the symbol stack in by address, alt-entry status, scope, and name.
+ // We sort in reverse order so that symbols will be visited in the right
+ // order when we pop off the stack below.
+ llvm::sort(SecNSymStack, [](const NormalizedSymbol *LHS,
+ const NormalizedSymbol *RHS) {
+ if (LHS->Value != RHS->Value)
+ return LHS->Value > RHS->Value;
+ if (isAltEntry(*LHS) != isAltEntry(*RHS))
+ return isAltEntry(*RHS);
+ if (LHS->S != RHS->S)
+ return static_cast<uint8_t>(LHS->S) < static_cast<uint8_t>(RHS->S);
+ return LHS->Name < RHS->Name;
+ });
+
+ // The first symbol in a section can not be an alt-entry symbol.
+ if (!SecNSymStack.empty() && isAltEntry(*SecNSymStack.back()))
+ return make_error<JITLinkError>(
+ "First symbol in " + NSec.GraphSection->getName() + " is alt-entry");
+
+ // If the section is non-empty but there is no symbol covering the start
+ // address then add an anonymous one.
+ if (orc::ExecutorAddr(SecNSymStack.back()->Value) != NSec.Address) {
+ auto AnonBlockSize =
+ orc::ExecutorAddr(SecNSymStack.back()->Value) - NSec.Address;
+ LLVM_DEBUG({
+ dbgs() << " Section start not covered by symbol. "
+ << "Creating anonymous block to cover [ " << NSec.Address
+ << " -- " << (NSec.Address + AnonBlockSize) << " ]\n";
+ });
+ addSectionStartSymAndBlock(SecIndex, *NSec.GraphSection, NSec.Address,
+ NSec.Data, AnonBlockSize, NSec.Alignment,
+ SectionIsNoDeadStrip);
+ }
+
+ // Visit section symbols in order by popping off the reverse-sorted stack,
+ // building blocks for each alt-entry chain and creating symbols as we go.
+ while (!SecNSymStack.empty()) {
+ SmallVector<NormalizedSymbol *, 8> BlockSyms;
+
+ BlockSyms.push_back(SecNSymStack.back());
+ SecNSymStack.pop_back();
+ while (!SecNSymStack.empty() &&
+ (isAltEntry(*SecNSymStack.back()) ||
+ SecNSymStack.back()->Value == BlockSyms.back()->Value)) {
+ BlockSyms.push_back(SecNSymStack.back());
+ SecNSymStack.pop_back();
+ }
+
+ // BlockNSyms now contains the block symbols in reverse canonical order.
+ auto BlockStart = orc::ExecutorAddr(BlockSyms.front()->Value);
+ orc::ExecutorAddr BlockEnd =
+ SecNSymStack.empty() ? NSec.Address + NSec.Size
+ : orc::ExecutorAddr(SecNSymStack.back()->Value);
+ orc::ExecutorAddrDiff BlockOffset = BlockStart - NSec.Address;
+ orc::ExecutorAddrDiff BlockSize = BlockEnd - BlockStart;
+
+ LLVM_DEBUG({
+ dbgs() << " Creating block for " << formatv("{0:x16}", BlockStart)
+ << " -- " << formatv("{0:x16}", BlockEnd) << ": "
+ << NSec.GraphSection->getName() << " + "
+ << formatv("{0:x16}", BlockOffset) << " with "
+ << BlockSyms.size() << " symbol(s)...\n";
+ });
+
+ Block &B =
+ NSec.Data
+ ? G->createContentBlock(
+ *NSec.GraphSection,
+ ArrayRef<char>(NSec.Data + BlockOffset, BlockSize),
+ BlockStart, NSec.Alignment, BlockStart % NSec.Alignment)
+ : G->createZeroFillBlock(*NSec.GraphSection, BlockSize,
+ BlockStart, NSec.Alignment,
+ BlockStart % NSec.Alignment);
+
+ Optional<orc::ExecutorAddr> LastCanonicalAddr;
+ auto SymEnd = BlockEnd;
+ while (!BlockSyms.empty()) {
+ auto &NSym = *BlockSyms.back();
+ BlockSyms.pop_back();
+
+ bool SymLive =
+ (NSym.Desc & MachO::N_NO_DEAD_STRIP) || SectionIsNoDeadStrip;
+
+ auto &Sym = createStandardGraphSymbol(
+ NSym, B, SymEnd - orc::ExecutorAddr(NSym.Value), SectionIsText,
+ SymLive, LastCanonicalAddr != orc::ExecutorAddr(NSym.Value));
+
+ if (LastCanonicalAddr != Sym.getAddress()) {
+ if (LastCanonicalAddr)
+ SymEnd = *LastCanonicalAddr;
+ LastCanonicalAddr = Sym.getAddress();
+ }
+ }
+ }
+ }
+
+ return Error::success();
+}
+
+Symbol &MachOLinkGraphBuilder::createStandardGraphSymbol(NormalizedSymbol &NSym,
+ Block &B, size_t Size,
+ bool IsText,
+ bool IsNoDeadStrip,
+ bool IsCanonical) {
+
+ LLVM_DEBUG({
+ dbgs() << " " << formatv("{0:x16}", NSym.Value) << " -- "
+ << formatv("{0:x16}", NSym.Value + Size) << ": ";
+ if (!NSym.Name)
+ dbgs() << "<anonymous symbol>";
+ else
+ dbgs() << NSym.Name;
+ if (IsText)
+ dbgs() << " [text]";
+ if (IsNoDeadStrip)
+ dbgs() << " [no-dead-strip]";
+ if (!IsCanonical)
+ dbgs() << " [non-canonical]";
+ dbgs() << "\n";
+ });
+
+ auto SymOffset = orc::ExecutorAddr(NSym.Value) - B.getAddress();
+ auto &Sym =
+ NSym.Name
+ ? G->addDefinedSymbol(B, SymOffset, *NSym.Name, Size, NSym.L, NSym.S,
+ IsText, IsNoDeadStrip)
+ : G->addAnonymousSymbol(B, SymOffset, Size, IsText, IsNoDeadStrip);
+ NSym.GraphSymbol = &Sym;
+
+ if (IsCanonical)
+ setCanonicalSymbol(getSectionByIndex(NSym.Sect - 1), Sym);
+
+ return Sym;
+}
+
+Error MachOLinkGraphBuilder::graphifySectionsWithCustomParsers() {
+ // Graphify special sections.
+ for (auto &KV : IndexToSection) {
+ auto &NSec = KV.second;
+
+ // Skip non-graph sections.
+ if (!NSec.GraphSection)
+ continue;
+
+ auto HI = CustomSectionParserFunctions.find(NSec.GraphSection->getName());
+ if (HI != CustomSectionParserFunctions.end()) {
+ auto &Parse = HI->second;
+ if (auto Err = Parse(NSec))
+ return Err;
+ }
+ }
+
+ return Error::success();
+}
+
+Error MachOLinkGraphBuilder::graphifyCStringSection(
+ NormalizedSection &NSec, std::vector<NormalizedSymbol *> NSyms) {
+ assert(NSec.GraphSection && "C string literal section missing graph section");
+ assert(NSec.Data && "C string literal section has no data");
+
+ LLVM_DEBUG({
+ dbgs() << " Graphifying C-string literal section "
+ << NSec.GraphSection->getName() << "\n";
+ });
+
+ if (NSec.Data[NSec.Size - 1] != '\0')
+ return make_error<JITLinkError>("C string literal section " +
+ NSec.GraphSection->getName() +
+ " does not end with null terminator");
+
+ /// Sort into reverse order to use as a stack.
+ llvm::sort(NSyms,
+ [](const NormalizedSymbol *LHS, const NormalizedSymbol *RHS) {
+ if (LHS->Value != RHS->Value)
+ return LHS->Value > RHS->Value;
+ if (LHS->L != RHS->L)
+ return LHS->L > RHS->L;
+ if (LHS->S != RHS->S)
+ return LHS->S > RHS->S;
+ if (RHS->Name) {
+ if (!LHS->Name)
+ return true;
+ return *LHS->Name > *RHS->Name;
+ }
+ return false;
+ });
+
+ bool SectionIsNoDeadStrip = NSec.Flags & MachO::S_ATTR_NO_DEAD_STRIP;
+ bool SectionIsText = NSec.Flags & MachO::S_ATTR_PURE_INSTRUCTIONS;
+ orc::ExecutorAddrDiff BlockStart = 0;
+
+ // Scan section for null characters.
+ for (size_t I = 0; I != NSec.Size; ++I)
+ if (NSec.Data[I] == '\0') {
+ orc::ExecutorAddrDiff BlockEnd = I + 1;
+ size_t BlockSize = BlockEnd - BlockStart;
+ // Create a block for this null terminated string.
+ auto &B = G->createContentBlock(*NSec.GraphSection,
+ {NSec.Data + BlockStart, BlockSize},
+ NSec.Address + BlockStart, 1, 0);
+
+ LLVM_DEBUG({
+ dbgs() << " Created block " << formatv("{0:x}", B.getAddress())
+ << " -- " << formatv("{0:x}", B.getAddress() + B.getSize())
+ << " for \"" << StringRef(B.getContent().data()) << "\"\n";
+ });
+
+ // If there's no symbol at the start of this block then create one.
+ if (NSyms.empty() ||
+ orc::ExecutorAddr(NSyms.back()->Value) != B.getAddress()) {
+ auto &S = G->addAnonymousSymbol(B, 0, BlockSize, false, false);
+ setCanonicalSymbol(NSec, S);
+ LLVM_DEBUG({
+ dbgs() << " Adding anonymous symbol for c-string block "
+ << formatv("{0:x16} -- {1:x16}", S.getAddress(),
+ S.getAddress() + BlockSize)
+ << "\n";
+ });
+ }
+
+ // Process any remaining symbols that point into this block.
+ auto LastCanonicalAddr = B.getAddress() + BlockEnd;
+ while (!NSyms.empty() && orc::ExecutorAddr(NSyms.back()->Value) <
+ B.getAddress() + BlockSize) {
+ auto &NSym = *NSyms.back();
+ size_t SymSize = (B.getAddress() + BlockSize) -
+ orc::ExecutorAddr(NSyms.back()->Value);
+ bool SymLive =
+ (NSym.Desc & MachO::N_NO_DEAD_STRIP) || SectionIsNoDeadStrip;
+
+ bool IsCanonical = false;
+ if (LastCanonicalAddr != orc::ExecutorAddr(NSym.Value)) {
+ IsCanonical = true;
+ LastCanonicalAddr = orc::ExecutorAddr(NSym.Value);
+ }
+
+ createStandardGraphSymbol(NSym, B, SymSize, SectionIsText, SymLive,
+ IsCanonical);
+
+ NSyms.pop_back();
+ }
+
+ BlockStart += BlockSize;
+ }
+
+ return Error::success();
+}
+
+Error CompactUnwindSplitter::operator()(LinkGraph &G) {
+ auto *CUSec = G.findSectionByName(CompactUnwindSectionName);
+ if (!CUSec)
+ return Error::success();
+
+ if (!G.getTargetTriple().isOSBinFormatMachO())
+ return make_error<JITLinkError>(
+ "Error linking " + G.getName() +
+ ": compact unwind splitting not supported on non-macho target " +
+ G.getTargetTriple().str());
+
+ unsigned CURecordSize = 0;
+ unsigned PersonalityEdgeOffset = 0;
+ unsigned LSDAEdgeOffset = 0;
+ switch (G.getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ case Triple::x86_64:
+ // 64-bit compact-unwind record format:
+ // Range start: 8 bytes.
+ // Range size: 4 bytes.
+ // CU encoding: 4 bytes.
+ // Personality: 8 bytes.
+ // LSDA: 8 bytes.
+ CURecordSize = 32;
+ PersonalityEdgeOffset = 16;
+ LSDAEdgeOffset = 24;
+ break;
+ default:
+ return make_error<JITLinkError>(
+ "Error linking " + G.getName() +
+ ": compact unwind splitting not supported on " +
+ G.getTargetTriple().getArchName());
+ }
+
+ std::vector<Block *> OriginalBlocks(CUSec->blocks().begin(),
+ CUSec->blocks().end());
+ LLVM_DEBUG({
+ dbgs() << "In " << G.getName() << " splitting compact unwind section "
+ << CompactUnwindSectionName << " containing "
+ << OriginalBlocks.size() << " initial blocks...\n";
+ });
+
+ while (!OriginalBlocks.empty()) {
+ auto *B = OriginalBlocks.back();
+ OriginalBlocks.pop_back();
+
+ if (B->getSize() == 0) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping empty block at "
+ << formatv("{0:x16}", B->getAddress()) << "\n";
+ });
+ continue;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " Splitting block at " << formatv("{0:x16}", B->getAddress())
+ << " into " << (B->getSize() / CURecordSize)
+ << " compact unwind record(s)\n";
+ });
+
+ if (B->getSize() % CURecordSize)
+ return make_error<JITLinkError>(
+ "Error splitting compact unwind record in " + G.getName() +
+ ": block at " + formatv("{0:x}", B->getAddress()) + " has size " +
+ formatv("{0:x}", B->getSize()) +
+ " (not a multiple of CU record size of " +
+ formatv("{0:x}", CURecordSize) + ")");
+
+ unsigned NumBlocks = B->getSize() / CURecordSize;
+ LinkGraph::SplitBlockCache C;
+
+ for (unsigned I = 0; I != NumBlocks; ++I) {
+ auto &CURec = G.splitBlock(*B, CURecordSize, &C);
+ bool AddedKeepAlive = false;
+
+ for (auto &E : CURec.edges()) {
+ if (E.getOffset() == 0) {
+ LLVM_DEBUG({
+ dbgs() << " Updating compact unwind record at "
+ << formatv("{0:x16}", CURec.getAddress()) << " to point to "
+ << (E.getTarget().hasName() ? E.getTarget().getName()
+ : StringRef())
+ << " (at " << formatv("{0:x16}", E.getTarget().getAddress())
+ << ")\n";
+ });
+
+ if (E.getTarget().isExternal())
+ return make_error<JITLinkError>(
+ "Error adding keep-alive edge for compact unwind record at " +
+ formatv("{0:x}", CURec.getAddress()) + ": target " +
+ E.getTarget().getName() + " is an external symbol");
+ auto &TgtBlock = E.getTarget().getBlock();
+ auto &CURecSym =
+ G.addAnonymousSymbol(CURec, 0, CURecordSize, false, false);
+ TgtBlock.addEdge(Edge::KeepAlive, 0, CURecSym, 0);
+ AddedKeepAlive = true;
+ } else if (E.getOffset() != PersonalityEdgeOffset &&
+ E.getOffset() != LSDAEdgeOffset)
+ return make_error<JITLinkError>("Unexpected edge at offset " +
+ formatv("{0:x}", E.getOffset()) +
+ " in compact unwind record at " +
+ formatv("{0:x}", CURec.getAddress()));
+ }
+
+ if (!AddedKeepAlive)
+ return make_error<JITLinkError>(
+ "Error adding keep-alive edge for compact unwind record at " +
+ formatv("{0:x}", CURec.getAddress()) +
+ ": no outgoing target edge at offset 0");
+ }
+ }
+ return Error::success();
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h
new file mode 100644
index 0000000000..2951a85330
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachOLinkGraphBuilder.h
@@ -0,0 +1,250 @@
+//===----- MachOLinkGraphBuilder.h - MachO LinkGraph builder ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic MachO LinkGraph building code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
+#define LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Object/MachO.h"
+
+#include "EHFrameSupportImpl.h"
+#include "JITLinkGeneric.h"
+
+#include <list>
+
+namespace llvm {
+namespace jitlink {
+
+class MachOLinkGraphBuilder {
+public:
+ virtual ~MachOLinkGraphBuilder();
+ Expected<std::unique_ptr<LinkGraph>> buildGraph();
+
+protected:
+
+ struct NormalizedSymbol {
+ friend class MachOLinkGraphBuilder;
+
+ private:
+ NormalizedSymbol(Optional<StringRef> Name, uint64_t Value, uint8_t Type,
+ uint8_t Sect, uint16_t Desc, Linkage L, Scope S)
+ : Name(Name), Value(Value), Type(Type), Sect(Sect), Desc(Desc), L(L),
+ S(S) {
+ assert((!Name || !Name->empty()) && "Name must be none or non-empty");
+ }
+
+ public:
+ NormalizedSymbol(const NormalizedSymbol &) = delete;
+ NormalizedSymbol &operator=(const NormalizedSymbol &) = delete;
+ NormalizedSymbol(NormalizedSymbol &&) = delete;
+ NormalizedSymbol &operator=(NormalizedSymbol &&) = delete;
+
+ Optional<StringRef> Name;
+ uint64_t Value = 0;
+ uint8_t Type = 0;
+ uint8_t Sect = 0;
+ uint16_t Desc = 0;
+ Linkage L = Linkage::Strong;
+ Scope S = Scope::Default;
+ Symbol *GraphSymbol = nullptr;
+ };
+
+ // Normalized section representation. Section and segment names are guaranteed
+ // to be null-terminated, hence the extra bytes on SegName and SectName.
+ class NormalizedSection {
+ friend class MachOLinkGraphBuilder;
+
+ private:
+ NormalizedSection() = default;
+
+ public:
+ char SectName[17];
+ char SegName[17];
+ orc::ExecutorAddr Address;
+ uint64_t Size = 0;
+ uint64_t Alignment = 0;
+ uint32_t Flags = 0;
+ const char *Data = nullptr;
+ Section *GraphSection = nullptr;
+ std::map<orc::ExecutorAddr, Symbol *> CanonicalSymbols;
+ };
+
+ using SectionParserFunction = std::function<Error(NormalizedSection &S)>;
+
+ MachOLinkGraphBuilder(const object::MachOObjectFile &Obj, Triple TT,
+ LinkGraph::GetEdgeKindNameFunction GetEdgeKindName);
+
+ LinkGraph &getGraph() const { return *G; }
+
+ const object::MachOObjectFile &getObject() const { return Obj; }
+
+ void addCustomSectionParser(StringRef SectionName,
+ SectionParserFunction Parse);
+
+ virtual Error addRelocations() = 0;
+
+ /// Create a symbol.
+ template <typename... ArgTs>
+ NormalizedSymbol &createNormalizedSymbol(ArgTs &&... Args) {
+ NormalizedSymbol *Sym = reinterpret_cast<NormalizedSymbol *>(
+ Allocator.Allocate<NormalizedSymbol>());
+ new (Sym) NormalizedSymbol(std::forward<ArgTs>(Args)...);
+ return *Sym;
+ }
+
+ /// Index is zero-based (MachO section indexes are usually one-based) and
+ /// assumed to be in-range. Client is responsible for checking.
+ NormalizedSection &getSectionByIndex(unsigned Index) {
+ auto I = IndexToSection.find(Index);
+ assert(I != IndexToSection.end() && "No section recorded at index");
+ return I->second;
+ }
+
+ /// Try to get the section at the given index. Will return an error if the
+ /// given index is out of range, or if no section has been added for the given
+ /// index.
+ Expected<NormalizedSection &> findSectionByIndex(unsigned Index) {
+ auto I = IndexToSection.find(Index);
+ if (I == IndexToSection.end())
+ return make_error<JITLinkError>("No section recorded for index " +
+ formatv("{0:d}", Index));
+ return I->second;
+ }
+
+ /// Try to get the symbol at the given index. Will return an error if the
+ /// given index is out of range, or if no symbol has been added for the given
+ /// index.
+ Expected<NormalizedSymbol &> findSymbolByIndex(uint64_t Index) {
+ auto I = IndexToSymbol.find(Index);
+ if (I == IndexToSymbol.end())
+ return make_error<JITLinkError>("No symbol at index " +
+ formatv("{0:d}", Index));
+ assert(I->second && "Null symbol at index");
+ return *I->second;
+ }
+
+ /// Returns the symbol with the highest address not greater than the search
+ /// address, or null if no such symbol exists.
+ Symbol *getSymbolByAddress(NormalizedSection &NSec,
+ orc::ExecutorAddr Address) {
+ auto I = NSec.CanonicalSymbols.upper_bound(Address);
+ if (I == NSec.CanonicalSymbols.begin())
+ return nullptr;
+ return std::prev(I)->second;
+ }
+
+ /// Returns the symbol with the highest address not greater than the search
+ /// address, or an error if no such symbol exists.
+ Expected<Symbol &> findSymbolByAddress(NormalizedSection &NSec,
+ orc::ExecutorAddr Address) {
+ auto *Sym = getSymbolByAddress(NSec, Address);
+ if (Sym)
+ if (Address <= Sym->getAddress() + Sym->getSize())
+ return *Sym;
+ return make_error<JITLinkError>("No symbol covering address " +
+ formatv("{0:x16}", Address));
+ }
+
+ static Linkage getLinkage(uint16_t Desc);
+ static Scope getScope(StringRef Name, uint8_t Type);
+ static bool isAltEntry(const NormalizedSymbol &NSym);
+
+ static bool isDebugSection(const NormalizedSection &NSec);
+ static bool isZeroFillSection(const NormalizedSection &NSec);
+
+ MachO::relocation_info
+ getRelocationInfo(const object::relocation_iterator RelItr) {
+ MachO::any_relocation_info ARI =
+ getObject().getRelocation(RelItr->getRawDataRefImpl());
+ MachO::relocation_info RI;
+ RI.r_address = ARI.r_word0;
+ RI.r_symbolnum = ARI.r_word1 & 0xffffff;
+ RI.r_pcrel = (ARI.r_word1 >> 24) & 1;
+ RI.r_length = (ARI.r_word1 >> 25) & 3;
+ RI.r_extern = (ARI.r_word1 >> 27) & 1;
+ RI.r_type = (ARI.r_word1 >> 28);
+ return RI;
+ }
+
+private:
+ static unsigned getPointerSize(const object::MachOObjectFile &Obj);
+ static support::endianness getEndianness(const object::MachOObjectFile &Obj);
+
+ void setCanonicalSymbol(NormalizedSection &NSec, Symbol &Sym) {
+ auto *&CanonicalSymEntry = NSec.CanonicalSymbols[Sym.getAddress()];
+ // There should be no symbol at this address, or, if there is,
+ // it should be a zero-sized symbol from an empty section (which
+ // we can safely override).
+ assert((!CanonicalSymEntry || CanonicalSymEntry->getSize() == 0) &&
+ "Duplicate canonical symbol at address");
+ CanonicalSymEntry = &Sym;
+ }
+
+ Section &getCommonSection();
+ void addSectionStartSymAndBlock(unsigned SecIndex, Section &GraphSec,
+ orc::ExecutorAddr Address, const char *Data,
+ orc::ExecutorAddrDiff Size,
+ uint32_t Alignment, bool IsLive);
+
+ Error createNormalizedSections();
+ Error createNormalizedSymbols();
+
+ /// Create graph blocks and symbols for externals, absolutes, commons and
+ /// all defined symbols in sections without custom parsers.
+ Error graphifyRegularSymbols();
+
+ /// Create and return a graph symbol for the given normalized symbol.
+ ///
+ /// NSym's GraphSymbol member will be updated to point at the newly created
+ /// symbol.
+ Symbol &createStandardGraphSymbol(NormalizedSymbol &Sym, Block &B,
+ size_t Size, bool IsText,
+ bool IsNoDeadStrip, bool IsCanonical);
+
+ /// Create graph blocks and symbols for all sections.
+ Error graphifySectionsWithCustomParsers();
+
+ /// Graphify cstring section.
+ Error graphifyCStringSection(NormalizedSection &NSec,
+ std::vector<NormalizedSymbol *> NSyms);
+
+ // Put the BumpPtrAllocator first so that we don't free any of the underlying
+ // memory until the Symbol/Addressable destructors have been run.
+ BumpPtrAllocator Allocator;
+
+ const object::MachOObjectFile &Obj;
+ std::unique_ptr<LinkGraph> G;
+
+ DenseMap<unsigned, NormalizedSection> IndexToSection;
+ Section *CommonSection = nullptr;
+
+ DenseMap<uint32_t, NormalizedSymbol *> IndexToSymbol;
+ StringMap<SectionParserFunction> CustomSectionParserFunctions;
+};
+
+/// A pass to split up __LD,__compact_unwind sections.
+class CompactUnwindSplitter {
+public:
+ CompactUnwindSplitter(StringRef CompactUnwindSectionName)
+ : CompactUnwindSectionName(CompactUnwindSectionName) {}
+ Error operator()(LinkGraph &G);
+
+private:
+ StringRef CompactUnwindSectionName;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#endif // LIB_EXECUTIONENGINE_JITLINK_MACHOLINKGRAPHBUILDER_H
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO_arm64.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
new file mode 100644
index 0000000000..3ca2e40c72
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO_arm64.cpp
@@ -0,0 +1,771 @@
+//===---- MachO_arm64.cpp - JIT linker implementation for MachO/arm64 -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO/arm64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO_arm64.h"
+
+#include "MachOLinkGraphBuilder.h"
+#include "PerGraphGOTAndPLTStubsBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::jitlink::MachO_arm64_Edges;
+
+namespace {
+
+class MachOLinkGraphBuilder_arm64 : public MachOLinkGraphBuilder {
+public:
+ MachOLinkGraphBuilder_arm64(const object::MachOObjectFile &Obj)
+ : MachOLinkGraphBuilder(Obj, Triple("arm64-apple-darwin"),
+ getMachOARM64RelocationKindName),
+ NumSymbols(Obj.getSymtabLoadCommand().nsyms) {}
+
+private:
+ static Expected<MachOARM64RelocationKind>
+ getRelocationKind(const MachO::relocation_info &RI) {
+ switch (RI.r_type) {
+ case MachO::ARM64_RELOC_UNSIGNED:
+ if (!RI.r_pcrel) {
+ if (RI.r_length == 3)
+ return RI.r_extern ? Pointer64 : Pointer64Anon;
+ else if (RI.r_length == 2)
+ return Pointer32;
+ }
+ break;
+ case MachO::ARM64_RELOC_SUBTRACTOR:
+ // SUBTRACTOR must be non-pc-rel, extern, with length 2 or 3.
+ // Initially represent SUBTRACTOR relocations with 'Delta<W>'.
+ // They may be turned into NegDelta<W> by parsePairRelocation.
+ if (!RI.r_pcrel && RI.r_extern) {
+ if (RI.r_length == 2)
+ return Delta32;
+ else if (RI.r_length == 3)
+ return Delta64;
+ }
+ break;
+ case MachO::ARM64_RELOC_BRANCH26:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return Branch26;
+ break;
+ case MachO::ARM64_RELOC_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return Page21;
+ break;
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PageOffset12;
+ break;
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return GOTPage21;
+ break;
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return GOTPageOffset12;
+ break;
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return PointerToGOT;
+ break;
+ case MachO::ARM64_RELOC_ADDEND:
+ if (!RI.r_pcrel && !RI.r_extern && RI.r_length == 2)
+ return PairedAddend;
+ break;
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return TLVPage21;
+ break;
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
+ if (!RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return TLVPageOffset12;
+ break;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported arm64 relocation: address=" +
+ formatv("{0:x8}", RI.r_address) +
+ ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
+ ", kind=" + formatv("{0:x1}", RI.r_type) +
+ ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
+ ", extern=" + (RI.r_extern ? "true" : "false") +
+ ", length=" + formatv("{0:d}", RI.r_length));
+ }
+
+ using PairRelocInfo =
+ std::tuple<MachOARM64RelocationKind, Symbol *, uint64_t>;
+
+ // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
+ // returns the edge kind and addend to be used.
+ Expected<PairRelocInfo>
+ parsePairRelocation(Block &BlockToFix, Edge::Kind SubtractorKind,
+ const MachO::relocation_info &SubRI,
+ orc::ExecutorAddr FixupAddress, const char *FixupContent,
+ object::relocation_iterator &UnsignedRelItr,
+ object::relocation_iterator &RelEnd) {
+ using namespace support;
+
+ assert(((SubtractorKind == Delta32 && SubRI.r_length == 2) ||
+ (SubtractorKind == Delta64 && SubRI.r_length == 3)) &&
+ "Subtractor kind should match length");
+ assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
+ assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
+
+ if (UnsignedRelItr == RelEnd)
+ return make_error<JITLinkError>("arm64 SUBTRACTOR without paired "
+ "UNSIGNED relocation");
+
+ auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
+
+ if (SubRI.r_address != UnsignedRI.r_address)
+ return make_error<JITLinkError>("arm64 SUBTRACTOR and paired UNSIGNED "
+ "point to different addresses");
+
+ if (SubRI.r_length != UnsignedRI.r_length)
+ return make_error<JITLinkError>("length of arm64 SUBTRACTOR and paired "
+ "UNSIGNED reloc must match");
+
+ Symbol *FromSymbol;
+ if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
+ FromSymbol = FromSymbolOrErr->GraphSymbol;
+ else
+ return FromSymbolOrErr.takeError();
+
+ // Read the current fixup value.
+ uint64_t FixupValue = 0;
+ if (SubRI.r_length == 3)
+ FixupValue = *(const little64_t *)FixupContent;
+ else
+ FixupValue = *(const little32_t *)FixupContent;
+
+ // Find 'ToSymbol' using symbol number or address, depending on whether the
+ // paired UNSIGNED relocation is extern.
+ Symbol *ToSymbol = nullptr;
+ if (UnsignedRI.r_extern) {
+ // Find target symbol by symbol index.
+ if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
+ ToSymbol = ToSymbolOrErr->GraphSymbol;
+ else
+ return ToSymbolOrErr.takeError();
+ } else {
+ auto ToSymbolSec = findSectionByIndex(UnsignedRI.r_symbolnum - 1);
+ if (!ToSymbolSec)
+ return ToSymbolSec.takeError();
+ ToSymbol = getSymbolByAddress(*ToSymbolSec, ToSymbolSec->Address);
+ assert(ToSymbol && "No symbol for section");
+ FixupValue -= ToSymbol->getAddress().getValue();
+ }
+
+ MachOARM64RelocationKind DeltaKind;
+ Symbol *TargetSymbol;
+ uint64_t Addend;
+ if (&BlockToFix == &FromSymbol->getAddressable()) {
+ TargetSymbol = ToSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? Delta64 : Delta32;
+ Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
+ // FIXME: handle extern 'from'.
+ } else if (&BlockToFix == &ToSymbol->getAddressable()) {
+ TargetSymbol = &*FromSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? NegDelta64 : NegDelta32;
+ Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
+ } else {
+ // BlockToFix was neither FromSymbol nor ToSymbol.
+ return make_error<JITLinkError>("SUBTRACTOR relocation must fix up "
+ "either 'A' or 'B' (or a symbol in one "
+ "of their alt-entry groups)");
+ }
+
+ return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
+ }
+
+ Error addRelocations() override {
+ using namespace support;
+ auto &Obj = getObject();
+
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ for (auto &S : Obj.sections()) {
+
+ orc::ExecutorAddr SectionAddress(S.getAddress());
+
+ // Skip relocations virtual sections.
+ if (S.isVirtual()) {
+ if (S.relocation_begin() != S.relocation_end())
+ return make_error<JITLinkError>("Virtual section contains "
+ "relocations");
+ continue;
+ }
+
+ auto NSec =
+ findSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
+ if (!NSec)
+ return NSec.takeError();
+
+ // Skip relocations for MachO sections without corresponding graph
+ // sections.
+ {
+ if (!NSec->GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping relocations for MachO section "
+ << NSec->SegName << "/" << NSec->SectName
+ << " which has no associated graph section\n";
+ });
+ continue;
+ }
+ }
+
+ for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
+ RelItr != RelEnd; ++RelItr) {
+
+ MachO::relocation_info RI = getRelocationInfo(RelItr);
+
+ // Validate the relocation kind.
+ auto Kind = getRelocationKind(RI);
+ if (!Kind)
+ return Kind.takeError();
+
+ // Find the address of the value to fix up.
+ orc::ExecutorAddr FixupAddress =
+ SectionAddress + (uint32_t)RI.r_address;
+ LLVM_DEBUG({
+ dbgs() << " " << NSec->SectName << " + "
+ << formatv("{0:x8}", RI.r_address) << ":\n";
+ });
+
+ // Find the block that the fixup points to.
+ Block *BlockToFix = nullptr;
+ {
+ auto SymbolToFixOrErr = findSymbolByAddress(*NSec, FixupAddress);
+ if (!SymbolToFixOrErr)
+ return SymbolToFixOrErr.takeError();
+ BlockToFix = &SymbolToFixOrErr->getBlock();
+ }
+
+ if (FixupAddress + orc::ExecutorAddrDiff(1ULL << RI.r_length) >
+ BlockToFix->getAddress() + BlockToFix->getContent().size())
+ return make_error<JITLinkError>(
+ "Relocation content extends past end of fixup block");
+
+ // Get a pointer to the fixup content.
+ const char *FixupContent = BlockToFix->getContent().data() +
+ (FixupAddress - BlockToFix->getAddress());
+
+ // The target symbol and addend will be populated by the switch below.
+ Symbol *TargetSymbol = nullptr;
+ uint64_t Addend = 0;
+
+ if (*Kind == PairedAddend) {
+ // If this is an Addend relocation then process it and move to the
+ // paired reloc.
+
+ Addend = SignExtend64(RI.r_symbolnum, 24);
+
+ if (RelItr == RelEnd)
+ return make_error<JITLinkError>("Unpaired Addend reloc at " +
+ formatv("{0:x16}", FixupAddress));
+ ++RelItr;
+ RI = getRelocationInfo(RelItr);
+
+ Kind = getRelocationKind(RI);
+ if (!Kind)
+ return Kind.takeError();
+
+ if (*Kind != Branch26 && *Kind != Page21 && *Kind != PageOffset12)
+ return make_error<JITLinkError>(
+ "Invalid relocation pair: Addend + " +
+ StringRef(getMachOARM64RelocationKindName(*Kind)));
+
+ LLVM_DEBUG({
+ dbgs() << " Addend: value = " << formatv("{0:x6}", Addend)
+ << ", pair is " << getMachOARM64RelocationKindName(*Kind)
+ << "\n";
+ });
+
+ // Find the address of the value to fix up.
+ orc::ExecutorAddr PairedFixupAddress =
+ SectionAddress + (uint32_t)RI.r_address;
+ if (PairedFixupAddress != FixupAddress)
+ return make_error<JITLinkError>("Paired relocation points at "
+ "different target");
+ }
+
+ switch (*Kind) {
+ case Branch26: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0x7fffffff) != 0x14000000)
+ return make_error<JITLinkError>("BRANCH26 target is not a B or BL "
+ "instruction with a zero addend");
+ break;
+ }
+ case Pointer32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle32_t *)FixupContent;
+ break;
+ case Pointer64:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle64_t *)FixupContent;
+ break;
+ case Pointer64Anon: {
+ orc::ExecutorAddr TargetAddress(*(const ulittle64_t *)FixupContent);
+ auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
+ if (!TargetNSec)
+ return TargetNSec.takeError();
+ if (auto TargetSymbolOrErr =
+ findSymbolByAddress(*TargetNSec, TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ break;
+ }
+ case Page21:
+ case TLVPage21:
+ case GOTPage21: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0xffffffe0) != 0x90000000)
+ return make_error<JITLinkError>("PAGE21/GOTPAGE21 target is not an "
+ "ADRP instruction with a zero "
+ "addend");
+ break;
+ }
+ case PageOffset12: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ uint32_t EncodedAddend = (Instr & 0x003FFC00) >> 10;
+ if (EncodedAddend != 0)
+ return make_error<JITLinkError>("GOTPAGEOFF12 target has non-zero "
+ "encoded addend");
+ break;
+ }
+ case TLVPageOffset12:
+ case GOTPageOffset12: {
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ uint32_t Instr = *(const ulittle32_t *)FixupContent;
+ if ((Instr & 0xfffffc00) != 0xf9400000)
+ return make_error<JITLinkError>("GOTPAGEOFF12 target is not an LDR "
+ "immediate instruction with a zero "
+ "addend");
+ break;
+ }
+ case PointerToGOT:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ break;
+ case Delta32:
+ case Delta64: {
+ // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
+ // parsePairRelocation handles the paired reloc, and returns the
+ // edge kind to be used (either Delta32/Delta64, or
+ // NegDelta32/NegDelta64, depending on the direction of the
+ // subtraction) along with the addend.
+ auto PairInfo =
+ parsePairRelocation(*BlockToFix, *Kind, RI, FixupAddress,
+ FixupContent, ++RelItr, RelEnd);
+ if (!PairInfo)
+ return PairInfo.takeError();
+ std::tie(*Kind, TargetSymbol, Addend) = *PairInfo;
+ assert(TargetSymbol && "No target symbol from parsePairRelocation?");
+ break;
+ }
+ default:
+ llvm_unreachable("Special relocation kind should not appear in "
+ "mach-o file");
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " ";
+ Edge GE(*Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
+ Addend);
+ printEdge(dbgs(), *BlockToFix, GE,
+ getMachOARM64RelocationKindName(*Kind));
+ dbgs() << "\n";
+ });
+ BlockToFix->addEdge(*Kind, FixupAddress - BlockToFix->getAddress(),
+ *TargetSymbol, Addend);
+ }
+ }
+ return Error::success();
+ }
+
+ unsigned NumSymbols = 0;
+};
+
+class PerGraphGOTAndPLTStubsBuilder_MachO_arm64
+ : public PerGraphGOTAndPLTStubsBuilder<
+ PerGraphGOTAndPLTStubsBuilder_MachO_arm64> {
+public:
+ using PerGraphGOTAndPLTStubsBuilder<
+ PerGraphGOTAndPLTStubsBuilder_MachO_arm64>::PerGraphGOTAndPLTStubsBuilder;
+
+ bool isGOTEdgeToFix(Edge &E) const {
+ return E.getKind() == GOTPage21 || E.getKind() == GOTPageOffset12 ||
+ E.getKind() == TLVPage21 || E.getKind() == TLVPageOffset12 ||
+ E.getKind() == PointerToGOT;
+ }
+
+ Symbol &createGOTEntry(Symbol &Target) {
+ auto &GOTEntryBlock = G.createContentBlock(
+ getGOTSection(), getGOTEntryBlockContent(), orc::ExecutorAddr(), 8, 0);
+ GOTEntryBlock.addEdge(Pointer64, 0, Target, 0);
+ return G.addAnonymousSymbol(GOTEntryBlock, 0, 8, false, false);
+ }
+
+ void fixGOTEdge(Edge &E, Symbol &GOTEntry) {
+ if (E.getKind() == GOTPage21 || E.getKind() == GOTPageOffset12 ||
+ E.getKind() == TLVPage21 || E.getKind() == TLVPageOffset12) {
+ // Update the target, but leave the edge addend as-is.
+ E.setTarget(GOTEntry);
+ } else if (E.getKind() == PointerToGOT) {
+ E.setTarget(GOTEntry);
+ E.setKind(Delta32);
+ } else
+ llvm_unreachable("Not a GOT edge?");
+ }
+
+ bool isExternalBranchEdge(Edge &E) {
+ return E.getKind() == Branch26 && !E.getTarget().isDefined();
+ }
+
+ Symbol &createPLTStub(Symbol &Target) {
+ auto &StubContentBlock = G.createContentBlock(
+ getStubsSection(), getStubBlockContent(), orc::ExecutorAddr(), 1, 0);
+ // Re-use GOT entries for stub targets.
+ auto &GOTEntrySymbol = getGOTEntry(Target);
+ StubContentBlock.addEdge(LDRLiteral19, 0, GOTEntrySymbol, 0);
+ return G.addAnonymousSymbol(StubContentBlock, 0, 8, true, false);
+ }
+
+ void fixPLTEdge(Edge &E, Symbol &Stub) {
+ assert(E.getKind() == Branch26 && "Not a Branch32 edge?");
+ assert(E.getAddend() == 0 && "Branch32 edge has non-zero addend?");
+ E.setTarget(Stub);
+ }
+
+private:
+ Section &getGOTSection() {
+ if (!GOTSection)
+ GOTSection = &G.createSection("$__GOT", MemProt::Read | MemProt::Exec);
+ return *GOTSection;
+ }
+
+ Section &getStubsSection() {
+ if (!StubsSection)
+ StubsSection =
+ &G.createSection("$__STUBS", MemProt::Read | MemProt::Exec);
+ return *StubsSection;
+ }
+
+ ArrayRef<char> getGOTEntryBlockContent() {
+ return {reinterpret_cast<const char *>(NullGOTEntryContent),
+ sizeof(NullGOTEntryContent)};
+ }
+
+ ArrayRef<char> getStubBlockContent() {
+ return {reinterpret_cast<const char *>(StubContent), sizeof(StubContent)};
+ }
+
+ static const uint8_t NullGOTEntryContent[8];
+ static const uint8_t StubContent[8];
+ Section *GOTSection = nullptr;
+ Section *StubsSection = nullptr;
+};
+
+const uint8_t
+ PerGraphGOTAndPLTStubsBuilder_MachO_arm64::NullGOTEntryContent[8] = {
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+const uint8_t PerGraphGOTAndPLTStubsBuilder_MachO_arm64::StubContent[8] = {
+ 0x10, 0x00, 0x00, 0x58, // LDR x16, <literal>
+ 0x00, 0x02, 0x1f, 0xd6 // BR x16
+};
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+class MachOJITLinker_arm64 : public JITLinker<MachOJITLinker_arm64> {
+ friend class JITLinker<MachOJITLinker_arm64>;
+
+public:
+ MachOJITLinker_arm64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+
+ static unsigned getPageOffset12Shift(uint32_t Instr) {
+ constexpr uint32_t LoadStoreImm12Mask = 0x3b000000;
+ constexpr uint32_t Vec128Mask = 0x04800000;
+
+ if ((Instr & LoadStoreImm12Mask) == 0x39000000) {
+ uint32_t ImplicitShift = Instr >> 30;
+ if (ImplicitShift == 0)
+ if ((Instr & Vec128Mask) == Vec128Mask)
+ ImplicitShift = 4;
+
+ return ImplicitShift;
+ }
+
+ return 0;
+ }
+
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ using namespace support;
+
+ char *BlockWorkingMem = B.getAlreadyMutableContent().data();
+ char *FixupPtr = BlockWorkingMem + E.getOffset();
+ orc::ExecutorAddr FixupAddress = B.getAddress() + E.getOffset();
+
+ switch (E.getKind()) {
+ case Branch26: {
+ assert((FixupAddress.getValue() & 0x3) == 0 &&
+ "Branch-inst is not 32-bit aligned");
+
+ int64_t Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+
+ if (static_cast<uint64_t>(Value) & 0x3)
+ return make_error<JITLinkError>("Branch26 target is not 32-bit "
+ "aligned");
+
+ if (Value < -(1 << 27) || Value > ((1 << 27) - 1))
+ return makeTargetOutOfRangeError(G, B, E);
+
+ uint32_t RawInstr = *(little32_t *)FixupPtr;
+ assert((RawInstr & 0x7fffffff) == 0x14000000 &&
+ "RawInstr isn't a B or BR immediate instruction");
+ uint32_t Imm = (static_cast<uint32_t>(Value) & ((1 << 28) - 1)) >> 2;
+ uint32_t FixedInstr = RawInstr | Imm;
+ *(little32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case Pointer32: {
+ uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
+ if (Value > std::numeric_limits<uint32_t>::max())
+ return makeTargetOutOfRangeError(G, B, E);
+ *(ulittle32_t *)FixupPtr = Value;
+ break;
+ }
+ case Pointer64:
+ case Pointer64Anon: {
+ uint64_t Value = E.getTarget().getAddress().getValue() + E.getAddend();
+ *(ulittle64_t *)FixupPtr = Value;
+ break;
+ }
+ case Page21:
+ case TLVPage21:
+ case GOTPage21: {
+ assert((E.getKind() != GOTPage21 || E.getAddend() == 0) &&
+ "GOTPAGE21 with non-zero addend");
+ uint64_t TargetPage =
+ (E.getTarget().getAddress().getValue() + E.getAddend()) &
+ ~static_cast<uint64_t>(4096 - 1);
+ uint64_t PCPage =
+ FixupAddress.getValue() & ~static_cast<uint64_t>(4096 - 1);
+
+ int64_t PageDelta = TargetPage - PCPage;
+ if (PageDelta < -(1 << 30) || PageDelta > ((1 << 30) - 1))
+ return makeTargetOutOfRangeError(G, B, E);
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert((RawInstr & 0xffffffe0) == 0x90000000 &&
+ "RawInstr isn't an ADRP instruction");
+ uint32_t ImmLo = (static_cast<uint64_t>(PageDelta) >> 12) & 0x3;
+ uint32_t ImmHi = (static_cast<uint64_t>(PageDelta) >> 14) & 0x7ffff;
+ uint32_t FixedInstr = RawInstr | (ImmLo << 29) | (ImmHi << 5);
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case PageOffset12: {
+ uint64_t TargetOffset =
+ (E.getTarget().getAddress() + E.getAddend()).getValue() & 0xfff;
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ unsigned ImmShift = getPageOffset12Shift(RawInstr);
+
+ if (TargetOffset & ((1 << ImmShift) - 1))
+ return make_error<JITLinkError>("PAGEOFF12 target is not aligned");
+
+ uint32_t EncodedImm = (TargetOffset >> ImmShift) << 10;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case TLVPageOffset12:
+ case GOTPageOffset12: {
+ assert(E.getAddend() == 0 && "GOTPAGEOF12 with non-zero addend");
+
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert((RawInstr & 0xfffffc00) == 0xf9400000 &&
+ "RawInstr isn't a 64-bit LDR immediate");
+
+ uint32_t TargetOffset = E.getTarget().getAddress().getValue() & 0xfff;
+ assert((TargetOffset & 0x7) == 0 && "GOT entry is not 8-byte aligned");
+ uint32_t EncodedImm = (TargetOffset >> 3) << 10;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case LDRLiteral19: {
+ assert((FixupAddress.getValue() & 0x3) == 0 &&
+ "LDR is not 32-bit aligned");
+ assert(E.getAddend() == 0 && "LDRLiteral19 with non-zero addend");
+ uint32_t RawInstr = *(ulittle32_t *)FixupPtr;
+ assert(RawInstr == 0x58000010 && "RawInstr isn't a 64-bit LDR literal");
+ int64_t Delta = E.getTarget().getAddress() - FixupAddress;
+ if (Delta & 0x3)
+ return make_error<JITLinkError>("LDR literal target is not 32-bit "
+ "aligned");
+ if (Delta < -(1 << 20) || Delta > ((1 << 20) - 1))
+ return makeTargetOutOfRangeError(G, B, E);
+
+ uint32_t EncodedImm =
+ ((static_cast<uint32_t>(Delta) >> 2) & 0x7ffff) << 5;
+ uint32_t FixedInstr = RawInstr | EncodedImm;
+ *(ulittle32_t *)FixupPtr = FixedInstr;
+ break;
+ }
+ case Delta32:
+ case Delta64:
+ case NegDelta32:
+ case NegDelta64: {
+ int64_t Value;
+ if (E.getKind() == Delta32 || E.getKind() == Delta64)
+ Value = E.getTarget().getAddress() - FixupAddress + E.getAddend();
+ else
+ Value = FixupAddress - E.getTarget().getAddress() + E.getAddend();
+
+ if (E.getKind() == Delta32 || E.getKind() == NegDelta32) {
+ if (Value < std::numeric_limits<int32_t>::min() ||
+ Value > std::numeric_limits<int32_t>::max())
+ return makeTargetOutOfRangeError(G, B, E);
+ *(little32_t *)FixupPtr = Value;
+ } else
+ *(little64_t *)FixupPtr = Value;
+ break;
+ }
+ default:
+ llvm_unreachable("Unrecognized edge kind");
+ }
+
+ return Error::success();
+ }
+
+ uint64_t NullValue = 0;
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromMachOObject_arm64(MemoryBufferRef ObjectBuffer) {
+ auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjectBuffer);
+ if (!MachOObj)
+ return MachOObj.takeError();
+ return MachOLinkGraphBuilder_arm64(**MachOObj).buildGraph();
+}
+
+void link_MachO_arm64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+
+ PassConfiguration Config;
+
+ if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add compact unwind splitter pass.
+ Config.PrePrunePasses.push_back(
+ CompactUnwindSplitter("__LD,__compact_unwind"));
+
+ // Add eh-frame passses.
+ // FIXME: Prune eh-frames for which compact-unwind is available once
+ // we support compact-unwind registration with libunwind.
+ Config.PrePrunePasses.push_back(EHFrameSplitter("__TEXT,__eh_frame"));
+ Config.PrePrunePasses.push_back(
+ EHFrameEdgeFixer("__TEXT,__eh_frame", 8, Delta64, Delta32, NegDelta32));
+
+ // Add an in-place GOT/Stubs pass.
+ Config.PostPrunePasses.push_back(
+ PerGraphGOTAndPLTStubsBuilder_MachO_arm64::asPass);
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Construct a JITLinker and run the link function.
+ MachOJITLinker_arm64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+const char *getMachOARM64RelocationKindName(Edge::Kind R) {
+ switch (R) {
+ case Branch26:
+ return "Branch26";
+ case Pointer64:
+ return "Pointer64";
+ case Pointer64Anon:
+ return "Pointer64Anon";
+ case Page21:
+ return "Page21";
+ case PageOffset12:
+ return "PageOffset12";
+ case GOTPage21:
+ return "GOTPage21";
+ case GOTPageOffset12:
+ return "GOTPageOffset12";
+ case TLVPage21:
+ return "TLVPage21";
+ case TLVPageOffset12:
+ return "TLVPageOffset12";
+ case PointerToGOT:
+ return "PointerToGOT";
+ case PairedAddend:
+ return "PairedAddend";
+ case LDRLiteral19:
+ return "LDRLiteral19";
+ case Delta32:
+ return "Delta32";
+ case Delta64:
+ return "Delta64";
+ case NegDelta32:
+ return "NegDelta32";
+ case NegDelta64:
+ return "NegDelta64";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(R));
+ }
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
new file mode 100644
index 0000000000..82afaa3aa3
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MachO_x86_64.cpp
@@ -0,0 +1,516 @@
+//===---- MachO_x86_64.cpp -JIT linker implementation for MachO/x86-64 ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO/x86-64 jit-link implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MachO_x86_64.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+
+#include "MachOLinkGraphBuilder.h"
+#include "PerGraphGOTAndPLTStubsBuilder.h"
+
+#define DEBUG_TYPE "jitlink"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+
+namespace {
+
+class MachOLinkGraphBuilder_x86_64 : public MachOLinkGraphBuilder {
+public:
+ MachOLinkGraphBuilder_x86_64(const object::MachOObjectFile &Obj)
+ : MachOLinkGraphBuilder(Obj, Triple("x86_64-apple-darwin"),
+ x86_64::getEdgeKindName) {}
+
+private:
+ enum MachONormalizedRelocationType : unsigned {
+ MachOBranch32,
+ MachOPointer32,
+ MachOPointer64,
+ MachOPointer64Anon,
+ MachOPCRel32,
+ MachOPCRel32Minus1,
+ MachOPCRel32Minus2,
+ MachOPCRel32Minus4,
+ MachOPCRel32Anon,
+ MachOPCRel32Minus1Anon,
+ MachOPCRel32Minus2Anon,
+ MachOPCRel32Minus4Anon,
+ MachOPCRel32GOTLoad,
+ MachOPCRel32GOT,
+ MachOPCRel32TLV,
+ MachOSubtractor32,
+ MachOSubtractor64,
+ };
+
+ static Expected<MachONormalizedRelocationType>
+ getRelocKind(const MachO::relocation_info &RI) {
+ switch (RI.r_type) {
+ case MachO::X86_64_RELOC_UNSIGNED:
+ if (!RI.r_pcrel) {
+ if (RI.r_length == 3)
+ return RI.r_extern ? MachOPointer64 : MachOPointer64Anon;
+ else if (RI.r_extern && RI.r_length == 2)
+ return MachOPointer32;
+ }
+ break;
+ case MachO::X86_64_RELOC_SIGNED:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? MachOPCRel32 : MachOPCRel32Anon;
+ break;
+ case MachO::X86_64_RELOC_BRANCH:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOBranch32;
+ break;
+ case MachO::X86_64_RELOC_GOT_LOAD:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOPCRel32GOTLoad;
+ break;
+ case MachO::X86_64_RELOC_GOT:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOPCRel32GOT;
+ break;
+ case MachO::X86_64_RELOC_SUBTRACTOR:
+ if (!RI.r_pcrel && RI.r_extern) {
+ if (RI.r_length == 2)
+ return MachOSubtractor32;
+ else if (RI.r_length == 3)
+ return MachOSubtractor64;
+ }
+ break;
+ case MachO::X86_64_RELOC_SIGNED_1:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? MachOPCRel32Minus1 : MachOPCRel32Minus1Anon;
+ break;
+ case MachO::X86_64_RELOC_SIGNED_2:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? MachOPCRel32Minus2 : MachOPCRel32Minus2Anon;
+ break;
+ case MachO::X86_64_RELOC_SIGNED_4:
+ if (RI.r_pcrel && RI.r_length == 2)
+ return RI.r_extern ? MachOPCRel32Minus4 : MachOPCRel32Minus4Anon;
+ break;
+ case MachO::X86_64_RELOC_TLV:
+ if (RI.r_pcrel && RI.r_extern && RI.r_length == 2)
+ return MachOPCRel32TLV;
+ break;
+ }
+
+ return make_error<JITLinkError>(
+ "Unsupported x86-64 relocation: address=" +
+ formatv("{0:x8}", RI.r_address) +
+ ", symbolnum=" + formatv("{0:x6}", RI.r_symbolnum) +
+ ", kind=" + formatv("{0:x1}", RI.r_type) +
+ ", pc_rel=" + (RI.r_pcrel ? "true" : "false") +
+ ", extern=" + (RI.r_extern ? "true" : "false") +
+ ", length=" + formatv("{0:d}", RI.r_length));
+ }
+
+ using PairRelocInfo = std::tuple<Edge::Kind, Symbol *, uint64_t>;
+
+ // Parses paired SUBTRACTOR/UNSIGNED relocations and, on success,
+ // returns the edge kind and addend to be used.
+ Expected<PairRelocInfo> parsePairRelocation(
+ Block &BlockToFix, MachONormalizedRelocationType SubtractorKind,
+ const MachO::relocation_info &SubRI, orc::ExecutorAddr FixupAddress,
+ const char *FixupContent, object::relocation_iterator &UnsignedRelItr,
+ object::relocation_iterator &RelEnd) {
+ using namespace support;
+
+ assert(((SubtractorKind == MachOSubtractor32 && SubRI.r_length == 2) ||
+ (SubtractorKind == MachOSubtractor64 && SubRI.r_length == 3)) &&
+ "Subtractor kind should match length");
+ assert(SubRI.r_extern && "SUBTRACTOR reloc symbol should be extern");
+ assert(!SubRI.r_pcrel && "SUBTRACTOR reloc should not be PCRel");
+
+ if (UnsignedRelItr == RelEnd)
+ return make_error<JITLinkError>("x86_64 SUBTRACTOR without paired "
+ "UNSIGNED relocation");
+
+ auto UnsignedRI = getRelocationInfo(UnsignedRelItr);
+
+ if (SubRI.r_address != UnsignedRI.r_address)
+ return make_error<JITLinkError>("x86_64 SUBTRACTOR and paired UNSIGNED "
+ "point to different addresses");
+
+ if (SubRI.r_length != UnsignedRI.r_length)
+ return make_error<JITLinkError>("length of x86_64 SUBTRACTOR and paired "
+ "UNSIGNED reloc must match");
+
+ Symbol *FromSymbol;
+ if (auto FromSymbolOrErr = findSymbolByIndex(SubRI.r_symbolnum))
+ FromSymbol = FromSymbolOrErr->GraphSymbol;
+ else
+ return FromSymbolOrErr.takeError();
+
+ // Read the current fixup value.
+ uint64_t FixupValue = 0;
+ if (SubRI.r_length == 3)
+ FixupValue = *(const little64_t *)FixupContent;
+ else
+ FixupValue = *(const little32_t *)FixupContent;
+
+ // Find 'ToSymbol' using symbol number or address, depending on whether the
+ // paired UNSIGNED relocation is extern.
+ Symbol *ToSymbol = nullptr;
+ if (UnsignedRI.r_extern) {
+ // Find target symbol by symbol index.
+ if (auto ToSymbolOrErr = findSymbolByIndex(UnsignedRI.r_symbolnum))
+ ToSymbol = ToSymbolOrErr->GraphSymbol;
+ else
+ return ToSymbolOrErr.takeError();
+ } else {
+ auto ToSymbolSec = findSectionByIndex(UnsignedRI.r_symbolnum - 1);
+ if (!ToSymbolSec)
+ return ToSymbolSec.takeError();
+ ToSymbol = getSymbolByAddress(*ToSymbolSec, ToSymbolSec->Address);
+ assert(ToSymbol && "No symbol for section");
+ FixupValue -= ToSymbol->getAddress().getValue();
+ }
+
+ Edge::Kind DeltaKind;
+ Symbol *TargetSymbol;
+ uint64_t Addend;
+ if (&BlockToFix == &FromSymbol->getAddressable()) {
+ TargetSymbol = ToSymbol;
+ DeltaKind = (SubRI.r_length == 3) ? x86_64::Delta64 : x86_64::Delta32;
+ Addend = FixupValue + (FixupAddress - FromSymbol->getAddress());
+ // FIXME: handle extern 'from'.
+ } else if (&BlockToFix == &ToSymbol->getAddressable()) {
+ TargetSymbol = FromSymbol;
+ DeltaKind =
+ (SubRI.r_length == 3) ? x86_64::NegDelta64 : x86_64::NegDelta32;
+ Addend = FixupValue - (FixupAddress - ToSymbol->getAddress());
+ } else {
+ // BlockToFix was neither FromSymbol nor ToSymbol.
+ return make_error<JITLinkError>("SUBTRACTOR relocation must fix up "
+ "either 'A' or 'B' (or a symbol in one "
+ "of their alt-entry chains)");
+ }
+
+ return PairRelocInfo(DeltaKind, TargetSymbol, Addend);
+ }
+
+ Error addRelocations() override {
+ using namespace support;
+ auto &Obj = getObject();
+
+ LLVM_DEBUG(dbgs() << "Processing relocations:\n");
+
+ for (auto &S : Obj.sections()) {
+
+ orc::ExecutorAddr SectionAddress(S.getAddress());
+
+ // Skip relocations virtual sections.
+ if (S.isVirtual()) {
+ if (S.relocation_begin() != S.relocation_end())
+ return make_error<JITLinkError>("Virtual section contains "
+ "relocations");
+ continue;
+ }
+
+ auto NSec =
+ findSectionByIndex(Obj.getSectionIndex(S.getRawDataRefImpl()));
+ if (!NSec)
+ return NSec.takeError();
+
+ // Skip relocations for MachO sections without corresponding graph
+ // sections.
+ {
+ if (!NSec->GraphSection) {
+ LLVM_DEBUG({
+ dbgs() << " Skipping relocations for MachO section "
+ << NSec->SegName << "/" << NSec->SectName
+ << " which has no associated graph section\n";
+ });
+ continue;
+ }
+ }
+
+ // Add relocations for section.
+ for (auto RelItr = S.relocation_begin(), RelEnd = S.relocation_end();
+ RelItr != RelEnd; ++RelItr) {
+
+ MachO::relocation_info RI = getRelocationInfo(RelItr);
+
+ // Find the address of the value to fix up.
+ auto FixupAddress = SectionAddress + (uint32_t)RI.r_address;
+
+ LLVM_DEBUG({
+ dbgs() << " " << NSec->SectName << " + "
+ << formatv("{0:x8}", RI.r_address) << ":\n";
+ });
+
+ // Find the block that the fixup points to.
+ Block *BlockToFix = nullptr;
+ {
+ auto SymbolToFixOrErr = findSymbolByAddress(*NSec, FixupAddress);
+ if (!SymbolToFixOrErr)
+ return SymbolToFixOrErr.takeError();
+ BlockToFix = &SymbolToFixOrErr->getBlock();
+ }
+
+ if (FixupAddress + orc::ExecutorAddrDiff(1ULL << RI.r_length) >
+ BlockToFix->getAddress() + BlockToFix->getContent().size())
+ return make_error<JITLinkError>(
+ "Relocation extends past end of fixup block");
+
+ // Get a pointer to the fixup content.
+ const char *FixupContent = BlockToFix->getContent().data() +
+ (FixupAddress - BlockToFix->getAddress());
+
+ size_t FixupOffset = FixupAddress - BlockToFix->getAddress();
+
+ // The target symbol and addend will be populated by the switch below.
+ Symbol *TargetSymbol = nullptr;
+ uint64_t Addend = 0;
+
+ // Validate the relocation kind.
+ auto MachORelocKind = getRelocKind(RI);
+ if (!MachORelocKind)
+ return MachORelocKind.takeError();
+
+ Edge::Kind Kind = Edge::Invalid;
+
+ switch (*MachORelocKind) {
+ case MachOBranch32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent;
+ Kind = x86_64::BranchPCRel32;
+ break;
+ case MachOPCRel32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent - 4;
+ Kind = x86_64::Delta32;
+ break;
+ case MachOPCRel32GOTLoad:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent;
+ Kind = x86_64::RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable;
+ if (FixupOffset < 3)
+ return make_error<JITLinkError>("GOTLD at invalid offset " +
+ formatv("{0}", FixupOffset));
+ break;
+ case MachOPCRel32GOT:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent - 4;
+ Kind = x86_64::RequestGOTAndTransformToDelta32;
+ break;
+ case MachOPCRel32TLV:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent;
+ Kind = x86_64::RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable;
+ if (FixupOffset < 3)
+ return make_error<JITLinkError>("TLV at invalid offset " +
+ formatv("{0}", FixupOffset));
+ break;
+ case MachOPointer32:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle32_t *)FixupContent;
+ Kind = x86_64::Pointer32;
+ break;
+ case MachOPointer64:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const ulittle64_t *)FixupContent;
+ Kind = x86_64::Pointer64;
+ break;
+ case MachOPointer64Anon: {
+ orc::ExecutorAddr TargetAddress(*(const ulittle64_t *)FixupContent);
+ auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
+ if (!TargetNSec)
+ return TargetNSec.takeError();
+ if (auto TargetSymbolOrErr =
+ findSymbolByAddress(*TargetNSec, TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress();
+ Kind = x86_64::Pointer64;
+ break;
+ }
+ case MachOPCRel32Minus1:
+ case MachOPCRel32Minus2:
+ case MachOPCRel32Minus4:
+ if (auto TargetSymbolOrErr = findSymbolByIndex(RI.r_symbolnum))
+ TargetSymbol = TargetSymbolOrErr->GraphSymbol;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = *(const little32_t *)FixupContent - 4;
+ Kind = x86_64::Delta32;
+ break;
+ case MachOPCRel32Anon: {
+ orc::ExecutorAddr TargetAddress(FixupAddress + 4 +
+ *(const little32_t *)FixupContent);
+ auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
+ if (!TargetNSec)
+ return TargetNSec.takeError();
+ if (auto TargetSymbolOrErr =
+ findSymbolByAddress(*TargetNSec, TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress() - 4;
+ Kind = x86_64::Delta32;
+ break;
+ }
+ case MachOPCRel32Minus1Anon:
+ case MachOPCRel32Minus2Anon:
+ case MachOPCRel32Minus4Anon: {
+ orc::ExecutorAddrDiff Delta =
+ 4 + orc::ExecutorAddrDiff(
+ 1ULL << (*MachORelocKind - MachOPCRel32Minus1Anon));
+ orc::ExecutorAddr TargetAddress =
+ FixupAddress + Delta + *(const little32_t *)FixupContent;
+ auto TargetNSec = findSectionByIndex(RI.r_symbolnum - 1);
+ if (!TargetNSec)
+ return TargetNSec.takeError();
+ if (auto TargetSymbolOrErr =
+ findSymbolByAddress(*TargetNSec, TargetAddress))
+ TargetSymbol = &*TargetSymbolOrErr;
+ else
+ return TargetSymbolOrErr.takeError();
+ Addend = TargetAddress - TargetSymbol->getAddress() - Delta;
+ Kind = x86_64::Delta32;
+ break;
+ }
+ case MachOSubtractor32:
+ case MachOSubtractor64: {
+ // We use Delta32/Delta64 to represent SUBTRACTOR relocations.
+ // parsePairRelocation handles the paired reloc, and returns the
+ // edge kind to be used (either Delta32/Delta64, or
+ // NegDelta32/NegDelta64, depending on the direction of the
+ // subtraction) along with the addend.
+ auto PairInfo =
+ parsePairRelocation(*BlockToFix, *MachORelocKind, RI,
+ FixupAddress, FixupContent, ++RelItr, RelEnd);
+ if (!PairInfo)
+ return PairInfo.takeError();
+ std::tie(Kind, TargetSymbol, Addend) = *PairInfo;
+ assert(TargetSymbol && "No target symbol from parsePairRelocation?");
+ break;
+ }
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " ";
+ Edge GE(Kind, FixupAddress - BlockToFix->getAddress(), *TargetSymbol,
+ Addend);
+ printEdge(dbgs(), *BlockToFix, GE, x86_64::getEdgeKindName(Kind));
+ dbgs() << "\n";
+ });
+ BlockToFix->addEdge(Kind, FixupAddress - BlockToFix->getAddress(),
+ *TargetSymbol, Addend);
+ }
+ }
+ return Error::success();
+ }
+};
+
+Error buildGOTAndStubs_MachO_x86_64(LinkGraph &G) {
+ x86_64::GOTTableManager GOT;
+ x86_64::PLTTableManager PLT(GOT);
+ visitExistingEdges(G, GOT, PLT);
+ return Error::success();
+}
+
+} // namespace
+
+namespace llvm {
+namespace jitlink {
+
+class MachOJITLinker_x86_64 : public JITLinker<MachOJITLinker_x86_64> {
+ friend class JITLinker<MachOJITLinker_x86_64>;
+
+public:
+ MachOJITLinker_x86_64(std::unique_ptr<JITLinkContext> Ctx,
+ std::unique_ptr<LinkGraph> G,
+ PassConfiguration PassConfig)
+ : JITLinker(std::move(Ctx), std::move(G), std::move(PassConfig)) {}
+
+private:
+ Error applyFixup(LinkGraph &G, Block &B, const Edge &E) const {
+ return x86_64::applyFixup(G, B, E, nullptr);
+ }
+};
+
+Expected<std::unique_ptr<LinkGraph>>
+createLinkGraphFromMachOObject_x86_64(MemoryBufferRef ObjectBuffer) {
+ auto MachOObj = object::ObjectFile::createMachOObjectFile(ObjectBuffer);
+ if (!MachOObj)
+ return MachOObj.takeError();
+ return MachOLinkGraphBuilder_x86_64(**MachOObj).buildGraph();
+}
+
+void link_MachO_x86_64(std::unique_ptr<LinkGraph> G,
+ std::unique_ptr<JITLinkContext> Ctx) {
+
+ PassConfiguration Config;
+
+ if (Ctx->shouldAddDefaultTargetPasses(G->getTargetTriple())) {
+ // Add eh-frame passses.
+ Config.PrePrunePasses.push_back(createEHFrameSplitterPass_MachO_x86_64());
+ Config.PrePrunePasses.push_back(createEHFrameEdgeFixerPass_MachO_x86_64());
+
+ // Add compact unwind splitter pass.
+ Config.PrePrunePasses.push_back(
+ CompactUnwindSplitter("__LD,__compact_unwind"));
+
+ // Add a mark-live pass.
+ if (auto MarkLive = Ctx->getMarkLivePass(G->getTargetTriple()))
+ Config.PrePrunePasses.push_back(std::move(MarkLive));
+ else
+ Config.PrePrunePasses.push_back(markAllSymbolsLive);
+
+ // Add an in-place GOT/Stubs pass.
+ Config.PostPrunePasses.push_back(buildGOTAndStubs_MachO_x86_64);
+
+ // Add GOT/Stubs optimizer pass.
+ Config.PreFixupPasses.push_back(x86_64::optimizeGOTAndStubAccesses);
+ }
+
+ if (auto Err = Ctx->modifyPassConfig(*G, Config))
+ return Ctx->notifyFailed(std::move(Err));
+
+ // Construct a JITLinker and run the link function.
+ MachOJITLinker_x86_64::link(std::move(Ctx), std::move(G), std::move(Config));
+}
+
+LinkGraphPassFunction createEHFrameSplitterPass_MachO_x86_64() {
+ return EHFrameSplitter("__TEXT,__eh_frame");
+}
+
+LinkGraphPassFunction createEHFrameEdgeFixerPass_MachO_x86_64() {
+ return EHFrameEdgeFixer("__TEXT,__eh_frame", x86_64::PointerSize,
+ x86_64::Delta64, x86_64::Delta32, x86_64::NegDelta32);
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MemoryFlags.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MemoryFlags.cpp
new file mode 100644
index 0000000000..b73a310b29
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/MemoryFlags.cpp
@@ -0,0 +1,33 @@
+//===------------- MemoryFlags.cpp - Memory allocation flags --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/MemoryFlags.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+raw_ostream &operator<<(raw_ostream &OS, MemProt MP) {
+ return OS << (((MP & MemProt::Read) != MemProt::None) ? 'R' : '-')
+ << (((MP & MemProt::Write) != MemProt::None) ? 'W' : '-')
+ << (((MP & MemProt::Exec) != MemProt::None) ? 'X' : '-');
+}
+
+raw_ostream &operator<<(raw_ostream &OS, MemDeallocPolicy MDP) {
+ return OS << (MDP == MemDeallocPolicy::Standard ? "standard" : "finalize");
+}
+
+raw_ostream &operator<<(raw_ostream &OS, AllocGroup AG) {
+ return OS << '(' << AG.getMemProt() << ", " << AG.getMemDeallocPolicy()
+ << ')';
+}
+
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h
new file mode 100644
index 0000000000..6e325f92ba
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/PerGraphGOTAndPLTStubsBuilder.h
@@ -0,0 +1,126 @@
+//===--------------- PerGraphGOTAndPLTStubBuilder.h -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Construct GOT and PLT entries for each graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_EXECUTIONENGINE_JITLINK_PERGRAPHGOTANDPLTSTUBSBUILDER_H
+#define LLVM_EXECUTIONENGINE_JITLINK_PERGRAPHGOTANDPLTSTUBSBUILDER_H
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+
+/// Per-object GOT and PLT Stub builder.
+///
+/// Constructs GOT entries and PLT stubs in every graph for referenced symbols.
+/// Building these blocks in every graph is likely to lead to duplicate entries
+/// in the JITLinkDylib, but allows graphs to be trivially removed independently
+/// without affecting other graphs (since those other graphs will have their own
+/// copies of any required entries).
+template <typename BuilderImplT>
+class PerGraphGOTAndPLTStubsBuilder {
+public:
+ PerGraphGOTAndPLTStubsBuilder(LinkGraph &G) : G(G) {}
+
+ static Error asPass(LinkGraph &G) { return BuilderImplT(G).run(); }
+
+ Error run() {
+ LLVM_DEBUG(dbgs() << "Running Per-Graph GOT and Stubs builder:\n");
+
+ // We're going to be adding new blocks, but we don't want to iterate over
+ // the new ones, so build a worklist.
+ std::vector<Block *> Worklist(G.blocks().begin(), G.blocks().end());
+
+ for (auto *B : Worklist)
+ for (auto &E : B->edges()) {
+ if (impl().isGOTEdgeToFix(E)) {
+ LLVM_DEBUG({
+ dbgs() << " Fixing " << G.getEdgeKindName(E.getKind())
+ << " edge at " << B->getFixupAddress(E) << " ("
+ << B->getAddress() << " + "
+ << formatv("{0:x}", E.getOffset()) << ")\n";
+ });
+ impl().fixGOTEdge(E, getGOTEntry(E.getTarget()));
+ } else if (impl().isExternalBranchEdge(E)) {
+ LLVM_DEBUG({
+ dbgs() << " Fixing " << G.getEdgeKindName(E.getKind())
+ << " edge at " << B->getFixupAddress(E) << " ("
+ << B->getAddress() << " + "
+ << formatv("{0:x}", E.getOffset()) << ")\n";
+ });
+ impl().fixPLTEdge(E, getPLTStub(E.getTarget()));
+ }
+ }
+
+ return Error::success();
+ }
+
+protected:
+ Symbol &getGOTEntry(Symbol &Target) {
+ assert(Target.hasName() && "GOT edge cannot point to anonymous target");
+
+ auto GOTEntryI = GOTEntries.find(Target.getName());
+
+ // Build the entry if it doesn't exist.
+ if (GOTEntryI == GOTEntries.end()) {
+ auto &GOTEntry = impl().createGOTEntry(Target);
+ LLVM_DEBUG({
+ dbgs() << " Created GOT entry for " << Target.getName() << ": "
+ << GOTEntry << "\n";
+ });
+ GOTEntryI =
+ GOTEntries.insert(std::make_pair(Target.getName(), &GOTEntry)).first;
+ }
+
+ assert(GOTEntryI != GOTEntries.end() && "Could not get GOT entry symbol");
+ LLVM_DEBUG(
+ { dbgs() << " Using GOT entry " << *GOTEntryI->second << "\n"; });
+ return *GOTEntryI->second;
+ }
+
+ Symbol &getPLTStub(Symbol &Target) {
+ assert(Target.hasName() &&
+ "External branch edge can not point to an anonymous target");
+ auto StubI = PLTStubs.find(Target.getName());
+
+ if (StubI == PLTStubs.end()) {
+ auto &StubSymbol = impl().createPLTStub(Target);
+ LLVM_DEBUG({
+ dbgs() << " Created PLT stub for " << Target.getName() << ": "
+ << StubSymbol << "\n";
+ });
+ StubI =
+ PLTStubs.insert(std::make_pair(Target.getName(), &StubSymbol)).first;
+ }
+
+ assert(StubI != PLTStubs.end() && "Count not get stub symbol");
+ LLVM_DEBUG({ dbgs() << " Using PLT stub " << *StubI->second << "\n"; });
+ return *StubI->second;
+ }
+
+ LinkGraph &G;
+
+private:
+ BuilderImplT &impl() { return static_cast<BuilderImplT &>(*this); }
+
+ DenseMap<StringRef, Symbol *> GOTEntries;
+ DenseMap<StringRef, Symbol *> PLTStubs;
+};
+
+} // end namespace jitlink
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif // LLVM_EXECUTIONENGINE_JITLINK_PERGRAPHGOTANDPLTSTUBSBUILDER_H
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/aarch64.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/aarch64.cpp
new file mode 100644
index 0000000000..6dccc48118
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/aarch64.cpp
@@ -0,0 +1,30 @@
+//===---- aarch64.cpp - Generic JITLink aarch64 edge kinds, utilities -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic utilities for graphs representing aarch64 objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/aarch64.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+namespace aarch64 {
+
+const char *getEdgeKindName(Edge::Kind K) {
+ switch (K) {
+ case R_AARCH64_CALL26:
+ return "R_AARCH64_CALL26";
+ }
+ return getGenericEdgeKindName(K);
+}
+} // namespace aarch64
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/riscv.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/riscv.cpp
new file mode 100644
index 0000000000..3ce2cf10a2
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/riscv.cpp
@@ -0,0 +1,72 @@
+//===------ riscv.cpp - Generic JITLink riscv edge kinds, utilities -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic utilities for graphs representing riscv objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/riscv.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+namespace riscv {
+
+const char *getEdgeKindName(Edge::Kind K) {
+ switch (K) {
+ case R_RISCV_32:
+ return "R_RISCV_32";
+ case R_RISCV_64:
+ return "R_RISCV_64";
+ case R_RISCV_BRANCH:
+ return "R_RISCV_BRANCH";
+ case R_RISCV_HI20:
+ return "R_RISCV_HI20";
+ case R_RISCV_LO12_I:
+ return "R_RISCV_LO12_I";
+ case R_RISCV_PCREL_HI20:
+ return "R_RISCV_PCREL_HI20";
+ case R_RISCV_PCREL_LO12_I:
+ return "R_RISCV_PCREL_LO12_I";
+ case R_RISCV_PCREL_LO12_S:
+ return "R_RISCV_PCREL_LO12_S";
+ case R_RISCV_CALL:
+ return "R_RISCV_CALL";
+ case R_RISCV_32_PCREL:
+ return "R_RISCV_32_PCREL";
+ case R_RISCV_ADD64:
+ return "R_RISCV_ADD64";
+ case R_RISCV_ADD32:
+ return "R_RISCV_ADD32";
+ case R_RISCV_ADD16:
+ return "R_RISCV_ADD16";
+ case R_RISCV_ADD8:
+ return "R_RISCV_ADD8";
+ case R_RISCV_SUB64:
+ return "R_RISCV_SUB64";
+ case R_RISCV_SUB32:
+ return "R_RISCV_SUB32";
+ case R_RISCV_SUB16:
+ return "R_RISCV_SUB16";
+ case R_RISCV_SUB8:
+ return "R_RISCV_SUB8";
+ case R_RISCV_SET6:
+ return "R_RISCV_SET6";
+ case R_RISCV_SET8:
+ return "R_RISCV_SET8";
+ case R_RISCV_SET16:
+ return "R_RISCV_SET16";
+ case R_RISCV_SET32:
+ return "R_RISCV_SET32";
+ }
+ return getGenericEdgeKindName(K);
+}
+} // namespace riscv
+} // namespace jitlink
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/x86_64.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/x86_64.cpp
new file mode 100644
index 0000000000..df9979b47e
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/x86_64.cpp
@@ -0,0 +1,189 @@
+//===----- x86_64.cpp - Generic JITLink x86-64 edge kinds, utilities ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic utilities for graphs representing x86-64 objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+
+#define DEBUG_TYPE "jitlink"
+
+namespace llvm {
+namespace jitlink {
+namespace x86_64 {
+
+const char *getEdgeKindName(Edge::Kind K) {
+ switch (K) {
+ case Pointer64:
+ return "Pointer64";
+ case Pointer32:
+ return "Pointer32";
+ case Pointer32Signed:
+ return "Pointer32Signed";
+ case Delta64:
+ return "Delta64";
+ case Delta32:
+ return "Delta32";
+ case NegDelta64:
+ return "NegDelta64";
+ case NegDelta32:
+ return "NegDelta32";
+ case Delta64FromGOT:
+ return "Delta64FromGOT";
+ case BranchPCRel32:
+ return "BranchPCRel32";
+ case BranchPCRel32ToPtrJumpStub:
+ return "BranchPCRel32ToPtrJumpStub";
+ case BranchPCRel32ToPtrJumpStubBypassable:
+ return "BranchPCRel32ToPtrJumpStubBypassable";
+ case RequestGOTAndTransformToDelta32:
+ return "RequestGOTAndTransformToDelta32";
+ case RequestGOTAndTransformToDelta64:
+ return "RequestGOTAndTransformToDelta64";
+ case RequestGOTAndTransformToDelta64FromGOT:
+ return "RequestGOTAndTransformToDelta64FromGOT";
+ case PCRel32GOTLoadREXRelaxable:
+ return "PCRel32GOTLoadREXRelaxable";
+ case RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable:
+ return "RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable";
+ case PCRel32GOTLoadRelaxable:
+ return "PCRel32GOTLoadRelaxable";
+ case RequestGOTAndTransformToPCRel32GOTLoadRelaxable:
+ return "RequestGOTAndTransformToPCRel32GOTLoadRelaxable";
+ case PCRel32TLVPLoadREXRelaxable:
+ return "PCRel32TLVPLoadREXRelaxable";
+ case RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable:
+ return "RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable";
+ default:
+ return getGenericEdgeKindName(static_cast<Edge::Kind>(K));
+ }
+}
+
+const char NullPointerContent[PointerSize] = {0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00};
+
+const char PointerJumpStubContent[6] = {
+ static_cast<char>(0xFFu), 0x25, 0x00, 0x00, 0x00, 0x00};
+
+Error optimizeGOTAndStubAccesses(LinkGraph &G) {
+ LLVM_DEBUG(dbgs() << "Optimizing GOT entries and stubs:\n");
+
+ for (auto *B : G.blocks())
+ for (auto &E : B->edges()) {
+ if (E.getKind() == x86_64::PCRel32GOTLoadRelaxable ||
+ E.getKind() == x86_64::PCRel32GOTLoadREXRelaxable) {
+#ifndef NDEBUG
+ bool REXPrefix = E.getKind() == x86_64::PCRel32GOTLoadREXRelaxable;
+ assert(E.getOffset() >= (REXPrefix ? 3u : 2u) &&
+ "GOT edge occurs too early in block");
+#endif
+ auto *FixupData = reinterpret_cast<uint8_t *>(
+ const_cast<char *>(B->getContent().data())) +
+ E.getOffset();
+ const uint8_t Op = FixupData[-2];
+ const uint8_t ModRM = FixupData[-1];
+
+ auto &GOTEntryBlock = E.getTarget().getBlock();
+ assert(GOTEntryBlock.getSize() == G.getPointerSize() &&
+ "GOT entry block should be pointer sized");
+ assert(GOTEntryBlock.edges_size() == 1 &&
+ "GOT entry should only have one outgoing edge");
+ auto &GOTTarget = GOTEntryBlock.edges().begin()->getTarget();
+ orc::ExecutorAddr TargetAddr = GOTTarget.getAddress();
+ orc::ExecutorAddr EdgeAddr = B->getFixupAddress(E);
+ int64_t Displacement = TargetAddr - EdgeAddr + 4;
+ bool TargetInRangeForImmU32 = isInRangeForImmU32(TargetAddr.getValue());
+ bool DisplacementInRangeForImmS32 = isInRangeForImmS32(Displacement);
+
+ // If both of the Target and displacement is out of range, then
+ // there isn't optimization chance.
+ if (!(TargetInRangeForImmU32 || DisplacementInRangeForImmS32))
+ continue;
+
+ // Transform "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
+ if (Op == 0x8b && DisplacementInRangeForImmS32) {
+ FixupData[-2] = 0x8d;
+ E.setKind(x86_64::Delta32);
+ E.setTarget(GOTTarget);
+ E.setAddend(E.getAddend() - 4);
+ LLVM_DEBUG({
+ dbgs() << " Replaced GOT load wih LEA:\n ";
+ printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ continue;
+ }
+
+ // Transform call/jmp instructions
+ if (Op == 0xff && TargetInRangeForImmU32) {
+ if (ModRM == 0x15) {
+ // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call
+ // foo" But lld convert it to "addr32 call foo, because that makes
+ // result expression to be a single instruction.
+ FixupData[-2] = 0x67;
+ FixupData[-1] = 0xe8;
+ LLVM_DEBUG({
+ dbgs() << " replaced call instruction's memory operand wih imm "
+ "operand:\n ";
+ printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ } else {
+ // Transform "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop"
+ assert(ModRM == 0x25 && "Invalid ModRm for call/jmp instructions");
+ FixupData[-2] = 0xe9;
+ FixupData[3] = 0x90;
+ E.setOffset(E.getOffset() - 1);
+ LLVM_DEBUG({
+ dbgs() << " replaced jmp instruction's memory operand wih imm "
+ "operand:\n ";
+ printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ }
+ E.setKind(x86_64::Pointer32);
+ E.setTarget(GOTTarget);
+ continue;
+ }
+ } else if (E.getKind() == x86_64::BranchPCRel32ToPtrJumpStubBypassable) {
+ auto &StubBlock = E.getTarget().getBlock();
+ assert(StubBlock.getSize() == sizeof(PointerJumpStubContent) &&
+ "Stub block should be stub sized");
+ assert(StubBlock.edges_size() == 1 &&
+ "Stub block should only have one outgoing edge");
+
+ auto &GOTBlock = StubBlock.edges().begin()->getTarget().getBlock();
+ assert(GOTBlock.getSize() == G.getPointerSize() &&
+ "GOT block should be pointer sized");
+ assert(GOTBlock.edges_size() == 1 &&
+ "GOT block should only have one outgoing edge");
+
+ auto &GOTTarget = GOTBlock.edges().begin()->getTarget();
+ orc::ExecutorAddr EdgeAddr = B->getAddress() + E.getOffset();
+ orc::ExecutorAddr TargetAddr = GOTTarget.getAddress();
+
+ int64_t Displacement = TargetAddr - EdgeAddr + 4;
+ if (isInRangeForImmS32(Displacement)) {
+ E.setKind(x86_64::BranchPCRel32);
+ E.setTarget(GOTTarget);
+ LLVM_DEBUG({
+ dbgs() << " Replaced stub branch with direct branch:\n ";
+ printEdge(dbgs(), *B, E, getEdgeKindName(E.getKind()));
+ dbgs() << "\n";
+ });
+ }
+ }
+ }
+
+ return Error::success();
+}
+
+} // end namespace x86_64
+} // end namespace jitlink
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ya.make b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ya.make
new file mode 100644
index 0000000000..0f63dae75b
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/JITLink/ya.make
@@ -0,0 +1,49 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(
+ Apache-2.0 WITH LLVM-exception AND
+ NCSA
+)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm14
+ contrib/libs/llvm14/lib/BinaryFormat
+ contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared
+ contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess
+ contrib/libs/llvm14/lib/Object
+ contrib/libs/llvm14/lib/Support
+)
+
+ADDINCL(
+ contrib/libs/llvm14/lib/ExecutionEngine/JITLink
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ EHFrameSupport.cpp
+ ELF.cpp
+ ELFLinkGraphBuilder.cpp
+ ELF_aarch64.cpp
+ ELF_riscv.cpp
+ ELF_x86_64.cpp
+ JITLink.cpp
+ JITLinkGeneric.cpp
+ JITLinkMemoryManager.cpp
+ MachO.cpp
+ MachOLinkGraphBuilder.cpp
+ MachO_arm64.cpp
+ MachO_x86_64.cpp
+ MemoryFlags.cpp
+ aarch64.cpp
+ riscv.cpp
+ x86_64.cpp
+)
+
+END()
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/MCJIT.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/MCJIT.cpp
new file mode 100644
index 0000000000..ed912280ac
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/MCJIT.cpp
@@ -0,0 +1,684 @@
+//===-- MCJIT.cpp - MC-based Just-in-Time Compiler ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCJIT.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/MCJIT.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
+#include <mutex>
+
+using namespace llvm;
+
+namespace {
+
+static struct RegisterJIT {
+ RegisterJIT() { MCJIT::Register(); }
+} JITRegistrator;
+
+}
+
+extern "C" void LLVMLinkInMCJIT() {
+}
+
+ExecutionEngine *
+MCJIT::createJIT(std::unique_ptr<Module> M, std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM) {
+ // Try to register the program as a source of symbols to resolve against.
+ //
+ // FIXME: Don't do this here.
+ sys::DynamicLibrary::LoadLibraryPermanently(nullptr, nullptr);
+
+ if (!MemMgr || !Resolver) {
+ auto RTDyldMM = std::make_shared<SectionMemoryManager>();
+ if (!MemMgr)
+ MemMgr = RTDyldMM;
+ if (!Resolver)
+ Resolver = RTDyldMM;
+ }
+
+ return new MCJIT(std::move(M), std::move(TM), std::move(MemMgr),
+ std::move(Resolver));
+}
+
+MCJIT::MCJIT(std::unique_ptr<Module> M, std::unique_ptr<TargetMachine> TM,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver)
+ : ExecutionEngine(TM->createDataLayout(), std::move(M)), TM(std::move(TM)),
+ Ctx(nullptr), MemMgr(std::move(MemMgr)),
+ Resolver(*this, std::move(Resolver)), Dyld(*this->MemMgr, this->Resolver),
+ ObjCache(nullptr) {
+ // FIXME: We are managing our modules, so we do not want the base class
+ // ExecutionEngine to manage them as well. To avoid double destruction
+ // of the first (and only) module added in ExecutionEngine constructor
+ // we remove it from EE and will destruct it ourselves.
+ //
+ // It may make sense to move our module manager (based on SmallStPtr) back
+ // into EE if the JIT and Interpreter can live with it.
+ // If so, additional functions: addModule, removeModule, FindFunctionNamed,
+ // runStaticConstructorsDestructors could be moved back to EE as well.
+ //
+ std::unique_ptr<Module> First = std::move(Modules[0]);
+ Modules.clear();
+
+ if (First->getDataLayout().isDefault())
+ First->setDataLayout(getDataLayout());
+
+ OwnedModules.addModule(std::move(First));
+ RegisterJITEventListener(JITEventListener::createGDBRegistrationListener());
+}
+
+MCJIT::~MCJIT() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ Dyld.deregisterEHFrames();
+
+ for (auto &Obj : LoadedObjects)
+ if (Obj)
+ notifyFreeingObject(*Obj);
+
+ Archives.clear();
+}
+
+void MCJIT::addModule(std::unique_ptr<Module> M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ if (M->getDataLayout().isDefault())
+ M->setDataLayout(getDataLayout());
+
+ OwnedModules.addModule(std::move(M));
+}
+
+bool MCJIT::removeModule(Module *M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ return OwnedModules.removeModule(M);
+}
+
+void MCJIT::addObjectFile(std::unique_ptr<object::ObjectFile> Obj) {
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> L = Dyld.loadObject(*Obj);
+ if (Dyld.hasError())
+ report_fatal_error(Dyld.getErrorString());
+
+ notifyObjectLoaded(*Obj, *L);
+
+ LoadedObjects.push_back(std::move(Obj));
+}
+
+void MCJIT::addObjectFile(object::OwningBinary<object::ObjectFile> Obj) {
+ std::unique_ptr<object::ObjectFile> ObjFile;
+ std::unique_ptr<MemoryBuffer> MemBuf;
+ std::tie(ObjFile, MemBuf) = Obj.takeBinary();
+ addObjectFile(std::move(ObjFile));
+ Buffers.push_back(std::move(MemBuf));
+}
+
+void MCJIT::addArchive(object::OwningBinary<object::Archive> A) {
+ Archives.push_back(std::move(A));
+}
+
+void MCJIT::setObjectCache(ObjectCache* NewCache) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ ObjCache = NewCache;
+}
+
+std::unique_ptr<MemoryBuffer> MCJIT::emitObject(Module *M) {
+ assert(M && "Can not emit a null module");
+
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Materialize all globals in the module if they have not been
+ // materialized already.
+ cantFail(M->materializeAll());
+
+ // This must be a module which has already been added but not loaded to this
+ // MCJIT instance, since these conditions are tested by our caller,
+ // generateCodeForModule.
+
+ legacy::PassManager PM;
+
+ // The RuntimeDyld will take ownership of this shortly
+ SmallVector<char, 4096> ObjBufferSV;
+ raw_svector_ostream ObjStream(ObjBufferSV);
+
+ // Turn the machine code intermediate representation into bytes in memory
+ // that may be executed.
+ if (TM->addPassesToEmitMC(PM, Ctx, ObjStream, !getVerifyModules()))
+ report_fatal_error("Target does not support MC emission!");
+
+ // Initialize passes.
+ PM.run(*M);
+ // Flush the output buffer to get the generated code into memory
+
+ auto CompiledObjBuffer = std::make_unique<SmallVectorMemoryBuffer>(
+ std::move(ObjBufferSV), /*RequiresNullTerminator=*/false);
+
+ // If we have an object cache, tell it about the new object.
+ // Note that we're using the compiled image, not the loaded image (as below).
+ if (ObjCache) {
+ // MemoryBuffer is a thin wrapper around the actual memory, so it's OK
+ // to create a temporary object here and delete it after the call.
+ MemoryBufferRef MB = CompiledObjBuffer->getMemBufferRef();
+ ObjCache->notifyObjectCompiled(M, MB);
+ }
+
+ return CompiledObjBuffer;
+}
+
+void MCJIT::generateCodeForModule(Module *M) {
+ // Get a thread lock to make sure we aren't trying to load multiple times
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // This must be a module which has already been added to this MCJIT instance.
+ assert(OwnedModules.ownsModule(M) &&
+ "MCJIT::generateCodeForModule: Unknown module.");
+
+ // Re-compilation is not supported
+ if (OwnedModules.hasModuleBeenLoaded(M))
+ return;
+
+ std::unique_ptr<MemoryBuffer> ObjectToLoad;
+ // Try to load the pre-compiled object from cache if possible
+ if (ObjCache)
+ ObjectToLoad = ObjCache->getObject(M);
+
+ assert(M->getDataLayout() == getDataLayout() && "DataLayout Mismatch");
+
+ // If the cache did not contain a suitable object, compile the object
+ if (!ObjectToLoad) {
+ ObjectToLoad = emitObject(M);
+ assert(ObjectToLoad && "Compilation did not produce an object.");
+ }
+
+ // Load the object into the dynamic linker.
+ // MCJIT now owns the ObjectImage pointer (via its LoadedObjects list).
+ Expected<std::unique_ptr<object::ObjectFile>> LoadedObject =
+ object::ObjectFile::createObjectFile(ObjectToLoad->getMemBufferRef());
+ if (!LoadedObject) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(LoadedObject.takeError(), OS);
+ report_fatal_error(Twine(OS.str()));
+ }
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> L =
+ Dyld.loadObject(*LoadedObject.get());
+
+ if (Dyld.hasError())
+ report_fatal_error(Dyld.getErrorString());
+
+ notifyObjectLoaded(*LoadedObject.get(), *L);
+
+ Buffers.push_back(std::move(ObjectToLoad));
+ LoadedObjects.push_back(std::move(*LoadedObject));
+
+ OwnedModules.markModuleAsLoaded(M);
+}
+
+void MCJIT::finalizeLoadedModules() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Resolve any outstanding relocations.
+ Dyld.resolveRelocations();
+
+ // Check for Dyld error.
+ if (Dyld.hasError())
+ ErrMsg = Dyld.getErrorString().str();
+
+ OwnedModules.markAllLoadedModulesAsFinalized();
+
+ // Register EH frame data for any module we own which has been loaded
+ Dyld.registerEHFrames();
+
+ // Set page permissions.
+ MemMgr->finalizeMemory();
+}
+
+// FIXME: Rename this.
+void MCJIT::finalizeObject() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Generate code for module is going to move objects out of the 'added' list,
+ // so we need to copy that out before using it:
+ SmallVector<Module*, 16> ModsToAdd;
+ for (auto M : OwnedModules.added())
+ ModsToAdd.push_back(M);
+
+ for (auto M : ModsToAdd)
+ generateCodeForModule(M);
+
+ finalizeLoadedModules();
+}
+
+void MCJIT::finalizeModule(Module *M) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // This must be a module which has already been added to this MCJIT instance.
+ assert(OwnedModules.ownsModule(M) && "MCJIT::finalizeModule: Unknown module.");
+
+ // If the module hasn't been compiled, just do that.
+ if (!OwnedModules.hasModuleBeenLoaded(M))
+ generateCodeForModule(M);
+
+ finalizeLoadedModules();
+}
+
+JITSymbol MCJIT::findExistingSymbol(const std::string &Name) {
+ if (void *Addr = getPointerToGlobalIfAvailable(Name))
+ return JITSymbol(static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(Addr)),
+ JITSymbolFlags::Exported);
+
+ return Dyld.getSymbol(Name);
+}
+
+Module *MCJIT::findModuleForSymbol(const std::string &Name,
+ bool CheckFunctionsOnly) {
+ StringRef DemangledName = Name;
+ if (DemangledName[0] == getDataLayout().getGlobalPrefix())
+ DemangledName = DemangledName.substr(1);
+
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // If it hasn't already been generated, see if it's in one of our modules.
+ for (ModulePtrSet::iterator I = OwnedModules.begin_added(),
+ E = OwnedModules.end_added();
+ I != E; ++I) {
+ Module *M = *I;
+ Function *F = M->getFunction(DemangledName);
+ if (F && !F->isDeclaration())
+ return M;
+ if (!CheckFunctionsOnly) {
+ GlobalVariable *G = M->getGlobalVariable(DemangledName);
+ if (G && !G->isDeclaration())
+ return M;
+ // FIXME: Do we need to worry about global aliases?
+ }
+ }
+ // We didn't find the symbol in any of our modules.
+ return nullptr;
+}
+
+uint64_t MCJIT::getSymbolAddress(const std::string &Name,
+ bool CheckFunctionsOnly) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, Name, getDataLayout());
+ }
+ if (auto Sym = findSymbol(MangledName, CheckFunctionsOnly)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ return *AddrOrErr;
+ else
+ report_fatal_error(AddrOrErr.takeError());
+ } else if (auto Err = Sym.takeError())
+ report_fatal_error(Sym.takeError());
+ return 0;
+}
+
+JITSymbol MCJIT::findSymbol(const std::string &Name,
+ bool CheckFunctionsOnly) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // First, check to see if we already have this symbol.
+ if (auto Sym = findExistingSymbol(Name))
+ return Sym;
+
+ for (object::OwningBinary<object::Archive> &OB : Archives) {
+ object::Archive *A = OB.getBinary();
+ // Look for our symbols in each Archive
+ auto OptionalChildOrErr = A->findSym(Name);
+ if (!OptionalChildOrErr)
+ report_fatal_error(OptionalChildOrErr.takeError());
+ auto &OptionalChild = *OptionalChildOrErr;
+ if (OptionalChild) {
+ // FIXME: Support nested archives?
+ Expected<std::unique_ptr<object::Binary>> ChildBinOrErr =
+ OptionalChild->getAsBinary();
+ if (!ChildBinOrErr) {
+ // TODO: Actually report errors helpfully.
+ consumeError(ChildBinOrErr.takeError());
+ continue;
+ }
+ std::unique_ptr<object::Binary> &ChildBin = ChildBinOrErr.get();
+ if (ChildBin->isObject()) {
+ std::unique_ptr<object::ObjectFile> OF(
+ static_cast<object::ObjectFile *>(ChildBin.release()));
+ // This causes the object file to be loaded.
+ addObjectFile(std::move(OF));
+ // The address should be here now.
+ if (auto Sym = findExistingSymbol(Name))
+ return Sym;
+ }
+ }
+ }
+
+ // If it hasn't already been generated, see if it's in one of our modules.
+ Module *M = findModuleForSymbol(Name, CheckFunctionsOnly);
+ if (M) {
+ generateCodeForModule(M);
+
+ // Check the RuntimeDyld table again, it should be there now.
+ return findExistingSymbol(Name);
+ }
+
+ // If a LazyFunctionCreator is installed, use it to get/create the function.
+ // FIXME: Should we instead have a LazySymbolCreator callback?
+ if (LazyFunctionCreator) {
+ auto Addr = static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(LazyFunctionCreator(Name)));
+ return JITSymbol(Addr, JITSymbolFlags::Exported);
+ }
+
+ return nullptr;
+}
+
+uint64_t MCJIT::getGlobalValueAddress(const std::string &Name) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ uint64_t Result = getSymbolAddress(Name, false);
+ if (Result != 0)
+ finalizeLoadedModules();
+ return Result;
+}
+
+uint64_t MCJIT::getFunctionAddress(const std::string &Name) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ uint64_t Result = getSymbolAddress(Name, true);
+ if (Result != 0)
+ finalizeLoadedModules();
+ return Result;
+}
+
+// Deprecated. Use getFunctionAddress instead.
+void *MCJIT::getPointerToFunction(Function *F) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ Mangler Mang;
+ SmallString<128> Name;
+ TM->getNameWithPrefix(Name, F, Mang);
+
+ if (F->isDeclaration() || F->hasAvailableExternallyLinkage()) {
+ bool AbortOnFailure = !F->hasExternalWeakLinkage();
+ void *Addr = getPointerToNamedFunction(Name, AbortOnFailure);
+ updateGlobalMapping(F, Addr);
+ return Addr;
+ }
+
+ Module *M = F->getParent();
+ bool HasBeenAddedButNotLoaded = OwnedModules.hasModuleBeenAddedButNotLoaded(M);
+
+ // Make sure the relevant module has been compiled and loaded.
+ if (HasBeenAddedButNotLoaded)
+ generateCodeForModule(M);
+ else if (!OwnedModules.hasModuleBeenLoaded(M)) {
+ // If this function doesn't belong to one of our modules, we're done.
+ // FIXME: Asking for the pointer to a function that hasn't been registered,
+ // and isn't a declaration (which is handled above) should probably
+ // be an assertion.
+ return nullptr;
+ }
+
+ // FIXME: Should the Dyld be retaining module information? Probably not.
+ //
+ // This is the accessor for the target address, so make sure to check the
+ // load address of the symbol, not the local address.
+ return (void*)Dyld.getSymbol(Name).getAddress();
+}
+
+void MCJIT::runStaticConstructorsDestructorsInModulePtrSet(
+ bool isDtors, ModulePtrSet::iterator I, ModulePtrSet::iterator E) {
+ for (; I != E; ++I) {
+ ExecutionEngine::runStaticConstructorsDestructors(**I, isDtors);
+ }
+}
+
+void MCJIT::runStaticConstructorsDestructors(bool isDtors) {
+ // Execute global ctors/dtors for each module in the program.
+ runStaticConstructorsDestructorsInModulePtrSet(
+ isDtors, OwnedModules.begin_added(), OwnedModules.end_added());
+ runStaticConstructorsDestructorsInModulePtrSet(
+ isDtors, OwnedModules.begin_loaded(), OwnedModules.end_loaded());
+ runStaticConstructorsDestructorsInModulePtrSet(
+ isDtors, OwnedModules.begin_finalized(), OwnedModules.end_finalized());
+}
+
+Function *MCJIT::FindFunctionNamedInModulePtrSet(StringRef FnName,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E) {
+ for (; I != E; ++I) {
+ Function *F = (*I)->getFunction(FnName);
+ if (F && !F->isDeclaration())
+ return F;
+ }
+ return nullptr;
+}
+
+GlobalVariable *MCJIT::FindGlobalVariableNamedInModulePtrSet(StringRef Name,
+ bool AllowInternal,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E) {
+ for (; I != E; ++I) {
+ GlobalVariable *GV = (*I)->getGlobalVariable(Name, AllowInternal);
+ if (GV && !GV->isDeclaration())
+ return GV;
+ }
+ return nullptr;
+}
+
+
+Function *MCJIT::FindFunctionNamed(StringRef FnName) {
+ Function *F = FindFunctionNamedInModulePtrSet(
+ FnName, OwnedModules.begin_added(), OwnedModules.end_added());
+ if (!F)
+ F = FindFunctionNamedInModulePtrSet(FnName, OwnedModules.begin_loaded(),
+ OwnedModules.end_loaded());
+ if (!F)
+ F = FindFunctionNamedInModulePtrSet(FnName, OwnedModules.begin_finalized(),
+ OwnedModules.end_finalized());
+ return F;
+}
+
+GlobalVariable *MCJIT::FindGlobalVariableNamed(StringRef Name, bool AllowInternal) {
+ GlobalVariable *GV = FindGlobalVariableNamedInModulePtrSet(
+ Name, AllowInternal, OwnedModules.begin_added(), OwnedModules.end_added());
+ if (!GV)
+ GV = FindGlobalVariableNamedInModulePtrSet(Name, AllowInternal, OwnedModules.begin_loaded(),
+ OwnedModules.end_loaded());
+ if (!GV)
+ GV = FindGlobalVariableNamedInModulePtrSet(Name, AllowInternal, OwnedModules.begin_finalized(),
+ OwnedModules.end_finalized());
+ return GV;
+}
+
+GenericValue MCJIT::runFunction(Function *F, ArrayRef<GenericValue> ArgValues) {
+ assert(F && "Function *F was null at entry to run()");
+
+ void *FPtr = getPointerToFunction(F);
+ finalizeModule(F->getParent());
+ assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
+ FunctionType *FTy = F->getFunctionType();
+ Type *RetTy = FTy->getReturnType();
+
+ assert((FTy->getNumParams() == ArgValues.size() ||
+ (FTy->isVarArg() && FTy->getNumParams() <= ArgValues.size())) &&
+ "Wrong number of arguments passed into function!");
+ assert(FTy->getNumParams() == ArgValues.size() &&
+ "This doesn't support passing arguments through varargs (yet)!");
+
+ // Handle some common cases first. These cases correspond to common `main'
+ // prototypes.
+ if (RetTy->isIntegerTy(32) || RetTy->isVoidTy()) {
+ switch (ArgValues.size()) {
+ case 3:
+ if (FTy->getParamType(0)->isIntegerTy(32) &&
+ FTy->getParamType(1)->isPointerTy() &&
+ FTy->getParamType(2)->isPointerTy()) {
+ int (*PF)(int, char **, const char **) =
+ (int(*)(int, char **, const char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1]),
+ (const char **)GVTOP(ArgValues[2])));
+ return rv;
+ }
+ break;
+ case 2:
+ if (FTy->getParamType(0)->isIntegerTy(32) &&
+ FTy->getParamType(1)->isPointerTy()) {
+ int (*PF)(int, char **) = (int(*)(int, char **))(intptr_t)FPtr;
+
+ // Call the function.
+ GenericValue rv;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue(),
+ (char **)GVTOP(ArgValues[1])));
+ return rv;
+ }
+ break;
+ case 1:
+ if (FTy->getNumParams() == 1 &&
+ FTy->getParamType(0)->isIntegerTy(32)) {
+ GenericValue rv;
+ int (*PF)(int) = (int(*)(int))(intptr_t)FPtr;
+ rv.IntVal = APInt(32, PF(ArgValues[0].IntVal.getZExtValue()));
+ return rv;
+ }
+ break;
+ }
+ }
+
+ // Handle cases where no arguments are passed first.
+ if (ArgValues.empty()) {
+ GenericValue rv;
+ switch (RetTy->getTypeID()) {
+ default: llvm_unreachable("Unknown return type for function call!");
+ case Type::IntegerTyID: {
+ unsigned BitWidth = cast<IntegerType>(RetTy)->getBitWidth();
+ if (BitWidth == 1)
+ rv.IntVal = APInt(BitWidth, ((bool(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 8)
+ rv.IntVal = APInt(BitWidth, ((char(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 16)
+ rv.IntVal = APInt(BitWidth, ((short(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 32)
+ rv.IntVal = APInt(BitWidth, ((int(*)())(intptr_t)FPtr)());
+ else if (BitWidth <= 64)
+ rv.IntVal = APInt(BitWidth, ((int64_t(*)())(intptr_t)FPtr)());
+ else
+ llvm_unreachable("Integer types > 64 bits not supported");
+ return rv;
+ }
+ case Type::VoidTyID:
+ rv.IntVal = APInt(32, ((int(*)())(intptr_t)FPtr)());
+ return rv;
+ case Type::FloatTyID:
+ rv.FloatVal = ((float(*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::DoubleTyID:
+ rv.DoubleVal = ((double(*)())(intptr_t)FPtr)();
+ return rv;
+ case Type::X86_FP80TyID:
+ case Type::FP128TyID:
+ case Type::PPC_FP128TyID:
+ llvm_unreachable("long double not supported yet");
+ case Type::PointerTyID:
+ return PTOGV(((void*(*)())(intptr_t)FPtr)());
+ }
+ }
+
+ report_fatal_error("MCJIT::runFunction does not support full-featured "
+ "argument passing. Please use "
+ "ExecutionEngine::getFunctionAddress and cast the result "
+ "to the desired function pointer type.");
+}
+
+void *MCJIT::getPointerToNamedFunction(StringRef Name, bool AbortOnFailure) {
+ if (!isSymbolSearchingDisabled()) {
+ if (auto Sym = Resolver.findSymbol(std::string(Name))) {
+ if (auto AddrOrErr = Sym.getAddress())
+ return reinterpret_cast<void*>(
+ static_cast<uintptr_t>(*AddrOrErr));
+ } else if (auto Err = Sym.takeError())
+ report_fatal_error(std::move(Err));
+ }
+
+ /// If a LazyFunctionCreator is installed, use it to get/create the function.
+ if (LazyFunctionCreator)
+ if (void *RP = LazyFunctionCreator(std::string(Name)))
+ return RP;
+
+ if (AbortOnFailure) {
+ report_fatal_error("Program used external function '"+Name+
+ "' which could not be resolved!");
+ }
+ return nullptr;
+}
+
+void MCJIT::RegisterJITEventListener(JITEventListener *L) {
+ if (!L)
+ return;
+ std::lock_guard<sys::Mutex> locked(lock);
+ EventListeners.push_back(L);
+}
+
+void MCJIT::UnregisterJITEventListener(JITEventListener *L) {
+ if (!L)
+ return;
+ std::lock_guard<sys::Mutex> locked(lock);
+ auto I = find(reverse(EventListeners), L);
+ if (I != EventListeners.rend()) {
+ std::swap(*I, EventListeners.back());
+ EventListeners.pop_back();
+ }
+}
+
+void MCJIT::notifyObjectLoaded(const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+ uint64_t Key =
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Obj.getData().data()));
+ std::lock_guard<sys::Mutex> locked(lock);
+ MemMgr->notifyObjectLoaded(this, Obj);
+ for (unsigned I = 0, S = EventListeners.size(); I < S; ++I) {
+ EventListeners[I]->notifyObjectLoaded(Key, Obj, L);
+ }
+}
+
+void MCJIT::notifyFreeingObject(const object::ObjectFile &Obj) {
+ uint64_t Key =
+ static_cast<uint64_t>(reinterpret_cast<uintptr_t>(Obj.getData().data()));
+ std::lock_guard<sys::Mutex> locked(lock);
+ for (JITEventListener *L : EventListeners)
+ L->notifyFreeingObject(Key);
+}
+
+JITSymbol
+LinkingSymbolResolver::findSymbol(const std::string &Name) {
+ auto Result = ParentEngine.findSymbol(Name, false);
+ if (Result)
+ return Result;
+ if (ParentEngine.isSymbolSearchingDisabled())
+ return nullptr;
+ return ClientResolver->findSymbol(Name);
+}
+
+void LinkingSymbolResolver::anchor() {}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/MCJIT.h b/contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/MCJIT.h
new file mode 100644
index 0000000000..a5dd420c91
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/MCJIT.h
@@ -0,0 +1,336 @@
+//===-- MCJIT.h - Class definition for the MCJIT ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
+#define LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
+
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+
+namespace llvm {
+class MCJIT;
+class Module;
+class ObjectCache;
+
+// This is a helper class that the MCJIT execution engine uses for linking
+// functions across modules that it owns. It aggregates the memory manager
+// that is passed in to the MCJIT constructor and defers most functionality
+// to that object.
+class LinkingSymbolResolver : public LegacyJITSymbolResolver {
+public:
+ LinkingSymbolResolver(MCJIT &Parent,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver)
+ : ParentEngine(Parent), ClientResolver(std::move(Resolver)) {}
+
+ JITSymbol findSymbol(const std::string &Name) override;
+
+ // MCJIT doesn't support logical dylibs.
+ JITSymbol findSymbolInLogicalDylib(const std::string &Name) override {
+ return nullptr;
+ }
+
+private:
+ MCJIT &ParentEngine;
+ std::shared_ptr<LegacyJITSymbolResolver> ClientResolver;
+ void anchor() override;
+};
+
+// About Module states: added->loaded->finalized.
+//
+// The purpose of the "added" state is having modules in standby. (added=known
+// but not compiled). The idea is that you can add a module to provide function
+// definitions but if nothing in that module is referenced by a module in which
+// a function is executed (note the wording here because it's not exactly the
+// ideal case) then the module never gets compiled. This is sort of lazy
+// compilation.
+//
+// The purpose of the "loaded" state (loaded=compiled and required sections
+// copied into local memory but not yet ready for execution) is to have an
+// intermediate state wherein clients can remap the addresses of sections, using
+// MCJIT::mapSectionAddress, (in preparation for later copying to a new location
+// or an external process) before relocations and page permissions are applied.
+//
+// It might not be obvious at first glance, but the "remote-mcjit" case in the
+// lli tool does this. In that case, the intermediate action is taken by the
+// RemoteMemoryManager in response to the notifyObjectLoaded function being
+// called.
+
+class MCJIT : public ExecutionEngine {
+ MCJIT(std::unique_ptr<Module> M, std::unique_ptr<TargetMachine> tm,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver);
+
+ typedef llvm::SmallPtrSet<Module *, 4> ModulePtrSet;
+
+ class OwningModuleContainer {
+ public:
+ OwningModuleContainer() {
+ }
+ ~OwningModuleContainer() {
+ freeModulePtrSet(AddedModules);
+ freeModulePtrSet(LoadedModules);
+ freeModulePtrSet(FinalizedModules);
+ }
+
+ ModulePtrSet::iterator begin_added() { return AddedModules.begin(); }
+ ModulePtrSet::iterator end_added() { return AddedModules.end(); }
+ iterator_range<ModulePtrSet::iterator> added() {
+ return make_range(begin_added(), end_added());
+ }
+
+ ModulePtrSet::iterator begin_loaded() { return LoadedModules.begin(); }
+ ModulePtrSet::iterator end_loaded() { return LoadedModules.end(); }
+
+ ModulePtrSet::iterator begin_finalized() { return FinalizedModules.begin(); }
+ ModulePtrSet::iterator end_finalized() { return FinalizedModules.end(); }
+
+ void addModule(std::unique_ptr<Module> M) {
+ AddedModules.insert(M.release());
+ }
+
+ bool removeModule(Module *M) {
+ return AddedModules.erase(M) || LoadedModules.erase(M) ||
+ FinalizedModules.erase(M);
+ }
+
+ bool hasModuleBeenAddedButNotLoaded(Module *M) {
+ return AddedModules.contains(M);
+ }
+
+ bool hasModuleBeenLoaded(Module *M) {
+ // If the module is in either the "loaded" or "finalized" sections it
+ // has been loaded.
+ return LoadedModules.contains(M) || FinalizedModules.contains(M);
+ }
+
+ bool hasModuleBeenFinalized(Module *M) {
+ return FinalizedModules.contains(M);
+ }
+
+ bool ownsModule(Module* M) {
+ return AddedModules.contains(M) || LoadedModules.contains(M) ||
+ FinalizedModules.contains(M);
+ }
+
+ void markModuleAsLoaded(Module *M) {
+ // This checks against logic errors in the MCJIT implementation.
+ // This function should never be called with either a Module that MCJIT
+ // does not own or a Module that has already been loaded and/or finalized.
+ assert(AddedModules.count(M) &&
+ "markModuleAsLoaded: Module not found in AddedModules");
+
+ // Remove the module from the "Added" set.
+ AddedModules.erase(M);
+
+ // Add the Module to the "Loaded" set.
+ LoadedModules.insert(M);
+ }
+
+ void markModuleAsFinalized(Module *M) {
+ // This checks against logic errors in the MCJIT implementation.
+ // This function should never be called with either a Module that MCJIT
+ // does not own, a Module that has not been loaded or a Module that has
+ // already been finalized.
+ assert(LoadedModules.count(M) &&
+ "markModuleAsFinalized: Module not found in LoadedModules");
+
+ // Remove the module from the "Loaded" section of the list.
+ LoadedModules.erase(M);
+
+ // Add the Module to the "Finalized" section of the list by inserting it
+ // before the 'end' iterator.
+ FinalizedModules.insert(M);
+ }
+
+ void markAllLoadedModulesAsFinalized() {
+ for (Module *M : LoadedModules)
+ FinalizedModules.insert(M);
+ LoadedModules.clear();
+ }
+
+ private:
+ ModulePtrSet AddedModules;
+ ModulePtrSet LoadedModules;
+ ModulePtrSet FinalizedModules;
+
+ void freeModulePtrSet(ModulePtrSet& MPS) {
+ // Go through the module set and delete everything.
+ for (Module *M : MPS)
+ delete M;
+ MPS.clear();
+ }
+ };
+
+ std::unique_ptr<TargetMachine> TM;
+ MCContext *Ctx;
+ std::shared_ptr<MCJITMemoryManager> MemMgr;
+ LinkingSymbolResolver Resolver;
+ RuntimeDyld Dyld;
+ std::vector<JITEventListener*> EventListeners;
+
+ OwningModuleContainer OwnedModules;
+
+ SmallVector<object::OwningBinary<object::Archive>, 2> Archives;
+ SmallVector<std::unique_ptr<MemoryBuffer>, 2> Buffers;
+
+ SmallVector<std::unique_ptr<object::ObjectFile>, 2> LoadedObjects;
+
+ // An optional ObjectCache to be notified of compiled objects and used to
+ // perform lookup of pre-compiled code to avoid re-compilation.
+ ObjectCache *ObjCache;
+
+ Function *FindFunctionNamedInModulePtrSet(StringRef FnName,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E);
+
+ GlobalVariable *FindGlobalVariableNamedInModulePtrSet(StringRef Name,
+ bool AllowInternal,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E);
+
+ void runStaticConstructorsDestructorsInModulePtrSet(bool isDtors,
+ ModulePtrSet::iterator I,
+ ModulePtrSet::iterator E);
+
+public:
+ ~MCJIT() override;
+
+ /// @name ExecutionEngine interface implementation
+ /// @{
+ void addModule(std::unique_ptr<Module> M) override;
+ void addObjectFile(std::unique_ptr<object::ObjectFile> O) override;
+ void addObjectFile(object::OwningBinary<object::ObjectFile> O) override;
+ void addArchive(object::OwningBinary<object::Archive> O) override;
+ bool removeModule(Module *M) override;
+
+ /// FindFunctionNamed - Search all of the active modules to find the function that
+ /// defines FnName. This is very slow operation and shouldn't be used for
+ /// general code.
+ Function *FindFunctionNamed(StringRef FnName) override;
+
+ /// FindGlobalVariableNamed - Search all of the active modules to find the
+ /// global variable that defines Name. This is very slow operation and
+ /// shouldn't be used for general code.
+ GlobalVariable *FindGlobalVariableNamed(StringRef Name,
+ bool AllowInternal = false) override;
+
+ /// Sets the object manager that MCJIT should use to avoid compilation.
+ void setObjectCache(ObjectCache *manager) override;
+
+ void setProcessAllSections(bool ProcessAllSections) override {
+ Dyld.setProcessAllSections(ProcessAllSections);
+ }
+
+ void generateCodeForModule(Module *M) override;
+
+ /// finalizeObject - ensure the module is fully processed and is usable.
+ ///
+ /// It is the user-level function for completing the process of making the
+ /// object usable for execution. It should be called after sections within an
+ /// object have been relocated using mapSectionAddress. When this method is
+ /// called the MCJIT execution engine will reapply relocations for a loaded
+ /// object.
+ /// Is it OK to finalize a set of modules, add modules and finalize again.
+ // FIXME: Do we really need both of these?
+ void finalizeObject() override;
+ virtual void finalizeModule(Module *);
+ void finalizeLoadedModules();
+
+ /// runStaticConstructorsDestructors - This method is used to execute all of
+ /// the static constructors or destructors for a program.
+ ///
+ /// \param isDtors - Run the destructors instead of constructors.
+ void runStaticConstructorsDestructors(bool isDtors) override;
+
+ void *getPointerToFunction(Function *F) override;
+
+ GenericValue runFunction(Function *F,
+ ArrayRef<GenericValue> ArgValues) override;
+
+ /// getPointerToNamedFunction - This method returns the address of the
+ /// specified function by using the dlsym function call. As such it is only
+ /// useful for resolving library symbols, not code generated symbols.
+ ///
+ /// If AbortOnFailure is false and no function with the given name is
+ /// found, this function silently returns a null pointer. Otherwise,
+ /// it prints a message to stderr and aborts.
+ ///
+ void *getPointerToNamedFunction(StringRef Name,
+ bool AbortOnFailure = true) override;
+
+ /// mapSectionAddress - map a section to its target address space value.
+ /// Map the address of a JIT section as returned from the memory manager
+ /// to the address in the target process as the running code will see it.
+ /// This is the address which will be used for relocation resolution.
+ void mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) override {
+ Dyld.mapSectionAddress(LocalAddress, TargetAddress);
+ }
+ void RegisterJITEventListener(JITEventListener *L) override;
+ void UnregisterJITEventListener(JITEventListener *L) override;
+
+ // If successful, these function will implicitly finalize all loaded objects.
+ // To get a function address within MCJIT without causing a finalize, use
+ // getSymbolAddress.
+ uint64_t getGlobalValueAddress(const std::string &Name) override;
+ uint64_t getFunctionAddress(const std::string &Name) override;
+
+ TargetMachine *getTargetMachine() override { return TM.get(); }
+
+ /// @}
+ /// @name (Private) Registration Interfaces
+ /// @{
+
+ static void Register() {
+ MCJITCtor = createJIT;
+ }
+
+ static ExecutionEngine *
+ createJIT(std::unique_ptr<Module> M, std::string *ErrorStr,
+ std::shared_ptr<MCJITMemoryManager> MemMgr,
+ std::shared_ptr<LegacyJITSymbolResolver> Resolver,
+ std::unique_ptr<TargetMachine> TM);
+
+ // @}
+
+ // Takes a mangled name and returns the corresponding JITSymbol (if a
+ // definition of that mangled name has been added to the JIT).
+ JITSymbol findSymbol(const std::string &Name, bool CheckFunctionsOnly);
+
+ // DEPRECATED - Please use findSymbol instead.
+ //
+ // This is not directly exposed via the ExecutionEngine API, but it is
+ // used by the LinkingMemoryManager.
+ //
+ // getSymbolAddress takes an unmangled name and returns the corresponding
+ // JITSymbol if a definition of the name has been added to the JIT.
+ uint64_t getSymbolAddress(const std::string &Name,
+ bool CheckFunctionsOnly);
+
+protected:
+ /// emitObject -- Generate a JITed object in memory from the specified module
+ /// Currently, MCJIT only supports a single module and the module passed to
+ /// this function call is expected to be the contained module. The module
+ /// is passed as a parameter here to prepare for multiple module support in
+ /// the future.
+ std::unique_ptr<MemoryBuffer> emitObject(Module *M);
+
+ void notifyObjectLoaded(const object::ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L);
+ void notifyFreeingObject(const object::ObjectFile &Obj);
+
+ JITSymbol findExistingSymbol(const std::string &Name);
+ Module *findModuleForSymbol(const std::string &Name, bool CheckFunctionsOnly);
+};
+
+} // end llvm namespace
+
+#endif // LLVM_LIB_EXECUTIONENGINE_MCJIT_MCJIT_H
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/ya.make b/contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/ya.make
new file mode 100644
index 0000000000..712d16c1d0
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/MCJIT/ya.make
@@ -0,0 +1,32 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm14
+ contrib/libs/llvm14/include
+ contrib/libs/llvm14/lib/ExecutionEngine
+ contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld
+ contrib/libs/llvm14/lib/IR
+ contrib/libs/llvm14/lib/Object
+ contrib/libs/llvm14/lib/Support
+ contrib/libs/llvm14/lib/Target
+)
+
+ADDINCL(
+ contrib/libs/llvm14/lib/ExecutionEngine/MCJIT
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ MCJIT.cpp
+)
+
+END()
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp
new file mode 100644
index 0000000000..e2a0cadb63
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp
@@ -0,0 +1,382 @@
+//===----- CompileOnDemandLayer.cpp - Lazily emit IR on first call --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
+#include "llvm/ADT/Hashing.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/FormatVariadic.h"
+#include <string>
+
+using namespace llvm;
+using namespace llvm::orc;
+
+static ThreadSafeModule extractSubModule(ThreadSafeModule &TSM,
+ StringRef Suffix,
+ GVPredicate ShouldExtract) {
+
+ auto DeleteExtractedDefs = [](GlobalValue &GV) {
+ // Bump the linkage: this global will be provided by the external module.
+ GV.setLinkage(GlobalValue::ExternalLinkage);
+
+ // Delete the definition in the source module.
+ if (isa<Function>(GV)) {
+ auto &F = cast<Function>(GV);
+ F.deleteBody();
+ F.setPersonalityFn(nullptr);
+ } else if (isa<GlobalVariable>(GV)) {
+ cast<GlobalVariable>(GV).setInitializer(nullptr);
+ } else if (isa<GlobalAlias>(GV)) {
+ // We need to turn deleted aliases into function or variable decls based
+ // on the type of their aliasee.
+ auto &A = cast<GlobalAlias>(GV);
+ Constant *Aliasee = A.getAliasee();
+ assert(A.hasName() && "Anonymous alias?");
+ assert(Aliasee->hasName() && "Anonymous aliasee");
+ std::string AliasName = std::string(A.getName());
+
+ if (isa<Function>(Aliasee)) {
+ auto *F = cloneFunctionDecl(*A.getParent(), *cast<Function>(Aliasee));
+ A.replaceAllUsesWith(F);
+ A.eraseFromParent();
+ F->setName(AliasName);
+ } else if (isa<GlobalVariable>(Aliasee)) {
+ auto *G = cloneGlobalVariableDecl(*A.getParent(),
+ *cast<GlobalVariable>(Aliasee));
+ A.replaceAllUsesWith(G);
+ A.eraseFromParent();
+ G->setName(AliasName);
+ } else
+ llvm_unreachable("Alias to unsupported type");
+ } else
+ llvm_unreachable("Unsupported global type");
+ };
+
+ auto NewTSM = cloneToNewContext(TSM, ShouldExtract, DeleteExtractedDefs);
+ NewTSM.withModuleDo([&](Module &M) {
+ M.setModuleIdentifier((M.getModuleIdentifier() + Suffix).str());
+ });
+
+ return NewTSM;
+}
+
+namespace llvm {
+namespace orc {
+
+class PartitioningIRMaterializationUnit : public IRMaterializationUnit {
+public:
+ PartitioningIRMaterializationUnit(ExecutionSession &ES,
+ const IRSymbolMapper::ManglingOptions &MO,
+ ThreadSafeModule TSM,
+ CompileOnDemandLayer &Parent)
+ : IRMaterializationUnit(ES, MO, std::move(TSM)), Parent(Parent) {}
+
+ PartitioningIRMaterializationUnit(
+ ThreadSafeModule TSM, Interface I,
+ SymbolNameToDefinitionMap SymbolToDefinition,
+ CompileOnDemandLayer &Parent)
+ : IRMaterializationUnit(std::move(TSM), std::move(I),
+ std::move(SymbolToDefinition)),
+ Parent(Parent) {}
+
+private:
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ Parent.emitPartition(std::move(R), std::move(TSM),
+ std::move(SymbolToDefinition));
+ }
+
+ void discard(const JITDylib &V, const SymbolStringPtr &Name) override {
+ // All original symbols were materialized by the CODLayer and should be
+ // final. The function bodies provided by M should never be overridden.
+ llvm_unreachable("Discard should never be called on an "
+ "ExtractingIRMaterializationUnit");
+ }
+
+ mutable std::mutex SourceModuleMutex;
+ CompileOnDemandLayer &Parent;
+};
+
+Optional<CompileOnDemandLayer::GlobalValueSet>
+CompileOnDemandLayer::compileRequested(GlobalValueSet Requested) {
+ return std::move(Requested);
+}
+
+Optional<CompileOnDemandLayer::GlobalValueSet>
+CompileOnDemandLayer::compileWholeModule(GlobalValueSet Requested) {
+ return None;
+}
+
+CompileOnDemandLayer::CompileOnDemandLayer(
+ ExecutionSession &ES, IRLayer &BaseLayer, LazyCallThroughManager &LCTMgr,
+ IndirectStubsManagerBuilder BuildIndirectStubsManager)
+ : IRLayer(ES, BaseLayer.getManglingOptions()), BaseLayer(BaseLayer),
+ LCTMgr(LCTMgr),
+ BuildIndirectStubsManager(std::move(BuildIndirectStubsManager)) {}
+
+void CompileOnDemandLayer::setPartitionFunction(PartitionFunction Partition) {
+ this->Partition = std::move(Partition);
+}
+
+void CompileOnDemandLayer::setImplMap(ImplSymbolMap *Imp) {
+ this->AliaseeImpls = Imp;
+}
+void CompileOnDemandLayer::emit(
+ std::unique_ptr<MaterializationResponsibility> R, ThreadSafeModule TSM) {
+ assert(TSM && "Null module");
+
+ auto &ES = getExecutionSession();
+
+ // Sort the callables and non-callables, build re-exports and lodge the
+ // actual module with the implementation dylib.
+ auto &PDR = getPerDylibResources(R->getTargetJITDylib());
+
+ SymbolAliasMap NonCallables;
+ SymbolAliasMap Callables;
+ TSM.withModuleDo([&](Module &M) {
+ // First, do some cleanup on the module:
+ cleanUpModule(M);
+ });
+
+ for (auto &KV : R->getSymbols()) {
+ auto &Name = KV.first;
+ auto &Flags = KV.second;
+ if (Flags.isCallable())
+ Callables[Name] = SymbolAliasMapEntry(Name, Flags);
+ else
+ NonCallables[Name] = SymbolAliasMapEntry(Name, Flags);
+ }
+
+ // Create a partitioning materialization unit and lodge it with the
+ // implementation dylib.
+ if (auto Err = PDR.getImplDylib().define(
+ std::make_unique<PartitioningIRMaterializationUnit>(
+ ES, *getManglingOptions(), std::move(TSM), *this))) {
+ ES.reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+
+ if (!NonCallables.empty())
+ if (auto Err =
+ R->replace(reexports(PDR.getImplDylib(), std::move(NonCallables),
+ JITDylibLookupFlags::MatchAllSymbols))) {
+ getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ if (!Callables.empty()) {
+ if (auto Err = R->replace(
+ lazyReexports(LCTMgr, PDR.getISManager(), PDR.getImplDylib(),
+ std::move(Callables), AliaseeImpls))) {
+ getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ }
+}
+
+CompileOnDemandLayer::PerDylibResources &
+CompileOnDemandLayer::getPerDylibResources(JITDylib &TargetD) {
+ std::lock_guard<std::mutex> Lock(CODLayerMutex);
+
+ auto I = DylibResources.find(&TargetD);
+ if (I == DylibResources.end()) {
+ auto &ImplD =
+ getExecutionSession().createBareJITDylib(TargetD.getName() + ".impl");
+ JITDylibSearchOrder NewLinkOrder;
+ TargetD.withLinkOrderDo([&](const JITDylibSearchOrder &TargetLinkOrder) {
+ NewLinkOrder = TargetLinkOrder;
+ });
+
+ assert(!NewLinkOrder.empty() && NewLinkOrder.front().first == &TargetD &&
+ NewLinkOrder.front().second ==
+ JITDylibLookupFlags::MatchAllSymbols &&
+ "TargetD must be at the front of its own search order and match "
+ "non-exported symbol");
+ NewLinkOrder.insert(std::next(NewLinkOrder.begin()),
+ {&ImplD, JITDylibLookupFlags::MatchAllSymbols});
+ ImplD.setLinkOrder(NewLinkOrder, false);
+ TargetD.setLinkOrder(std::move(NewLinkOrder), false);
+
+ PerDylibResources PDR(ImplD, BuildIndirectStubsManager());
+ I = DylibResources.insert(std::make_pair(&TargetD, std::move(PDR))).first;
+ }
+
+ return I->second;
+}
+
+void CompileOnDemandLayer::cleanUpModule(Module &M) {
+ for (auto &F : M.functions()) {
+ if (F.isDeclaration())
+ continue;
+
+ if (F.hasAvailableExternallyLinkage()) {
+ F.deleteBody();
+ F.setPersonalityFn(nullptr);
+ continue;
+ }
+ }
+}
+
+void CompileOnDemandLayer::expandPartition(GlobalValueSet &Partition) {
+ // Expands the partition to ensure the following rules hold:
+ // (1) If any alias is in the partition, its aliasee is also in the partition.
+ // (2) If any aliasee is in the partition, its aliases are also in the
+ // partiton.
+ // (3) If any global variable is in the partition then all global variables
+ // are in the partition.
+ assert(!Partition.empty() && "Unexpected empty partition");
+
+ const Module &M = *(*Partition.begin())->getParent();
+ bool ContainsGlobalVariables = false;
+ std::vector<const GlobalValue *> GVsToAdd;
+
+ for (auto *GV : Partition)
+ if (isa<GlobalAlias>(GV))
+ GVsToAdd.push_back(
+ cast<GlobalValue>(cast<GlobalAlias>(GV)->getAliasee()));
+ else if (isa<GlobalVariable>(GV))
+ ContainsGlobalVariables = true;
+
+ for (auto &A : M.aliases())
+ if (Partition.count(cast<GlobalValue>(A.getAliasee())))
+ GVsToAdd.push_back(&A);
+
+ if (ContainsGlobalVariables)
+ for (auto &G : M.globals())
+ GVsToAdd.push_back(&G);
+
+ for (auto *GV : GVsToAdd)
+ Partition.insert(GV);
+}
+
+void CompileOnDemandLayer::emitPartition(
+ std::unique_ptr<MaterializationResponsibility> R, ThreadSafeModule TSM,
+ IRMaterializationUnit::SymbolNameToDefinitionMap Defs) {
+
+ // FIXME: Need a 'notify lazy-extracting/emitting' callback to tie the
+ // extracted module key, extracted module, and source module key
+ // together. This could be used, for example, to provide a specific
+ // memory manager instance to the linking layer.
+
+ auto &ES = getExecutionSession();
+ GlobalValueSet RequestedGVs;
+ for (auto &Name : R->getRequestedSymbols()) {
+ if (Name == R->getInitializerSymbol())
+ TSM.withModuleDo([&](Module &M) {
+ for (auto &GV : getStaticInitGVs(M))
+ RequestedGVs.insert(&GV);
+ });
+ else {
+ assert(Defs.count(Name) && "No definition for symbol");
+ RequestedGVs.insert(Defs[Name]);
+ }
+ }
+
+ /// Perform partitioning with the context lock held, since the partition
+ /// function is allowed to access the globals to compute the partition.
+ auto GVsToExtract =
+ TSM.withModuleDo([&](Module &M) { return Partition(RequestedGVs); });
+
+ // Take a 'None' partition to mean the whole module (as opposed to an empty
+ // partition, which means "materialize nothing"). Emit the whole module
+ // unmodified to the base layer.
+ if (GVsToExtract == None) {
+ Defs.clear();
+ BaseLayer.emit(std::move(R), std::move(TSM));
+ return;
+ }
+
+ // If the partition is empty, return the whole module to the symbol table.
+ if (GVsToExtract->empty()) {
+ if (auto Err =
+ R->replace(std::make_unique<PartitioningIRMaterializationUnit>(
+ std::move(TSM),
+ MaterializationUnit::Interface(R->getSymbols(),
+ R->getInitializerSymbol()),
+ std::move(Defs), *this))) {
+ getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ return;
+ }
+
+ // Ok -- we actually need to partition the symbols. Promote the symbol
+ // linkages/names, expand the partition to include any required symbols
+ // (i.e. symbols that can't be separated from our partition), and
+ // then extract the partition.
+ //
+ // FIXME: We apply this promotion once per partitioning. It's safe, but
+ // overkill.
+ auto ExtractedTSM =
+ TSM.withModuleDo([&](Module &M) -> Expected<ThreadSafeModule> {
+ auto PromotedGlobals = PromoteSymbols(M);
+ if (!PromotedGlobals.empty()) {
+
+ MangleAndInterner Mangle(ES, M.getDataLayout());
+ SymbolFlagsMap SymbolFlags;
+ IRSymbolMapper::add(ES, *getManglingOptions(),
+ PromotedGlobals, SymbolFlags);
+
+ if (auto Err = R->defineMaterializing(SymbolFlags))
+ return std::move(Err);
+ }
+
+ expandPartition(*GVsToExtract);
+
+ // Submodule name is given by hashing the names of the globals.
+ std::string SubModuleName;
+ {
+ std::vector<const GlobalValue*> HashGVs;
+ HashGVs.reserve(GVsToExtract->size());
+ for (auto *GV : *GVsToExtract)
+ HashGVs.push_back(GV);
+ llvm::sort(HashGVs, [](const GlobalValue *LHS, const GlobalValue *RHS) {
+ return LHS->getName() < RHS->getName();
+ });
+ hash_code HC(0);
+ for (auto *GV : HashGVs) {
+ assert(GV->hasName() && "All GVs to extract should be named by now");
+ auto GVName = GV->getName();
+ HC = hash_combine(HC, hash_combine_range(GVName.begin(), GVName.end()));
+ }
+ raw_string_ostream(SubModuleName)
+ << ".submodule."
+ << formatv(sizeof(size_t) == 8 ? "{0:x16}" : "{0:x8}",
+ static_cast<size_t>(HC))
+ << ".ll";
+ }
+
+ // Extract the requested partiton (plus any necessary aliases) and
+ // put the rest back into the impl dylib.
+ auto ShouldExtract = [&](const GlobalValue &GV) -> bool {
+ return GVsToExtract->count(&GV);
+ };
+
+ return extractSubModule(TSM, SubModuleName , ShouldExtract);
+ });
+
+ if (!ExtractedTSM) {
+ ES.reportError(ExtractedTSM.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ if (auto Err = R->replace(std::make_unique<PartitioningIRMaterializationUnit>(
+ ES, *getManglingOptions(), std::move(TSM), *this))) {
+ ES.reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ BaseLayer.emit(std::move(R), std::move(*ExtractedTSM));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/CompileUtils.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/CompileUtils.cpp
new file mode 100644
index 0000000000..f342470052
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/CompileUtils.cpp
@@ -0,0 +1,95 @@
+//===------ CompileUtils.cpp - Utilities for compiling IR in the JIT ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ExecutionEngine/ObjectCache.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SmallVectorMemoryBuffer.h"
+#include "llvm/Target/TargetMachine.h"
+
+#include <algorithm>
+
+namespace llvm {
+namespace orc {
+
+IRSymbolMapper::ManglingOptions
+irManglingOptionsFromTargetOptions(const TargetOptions &Opts) {
+ IRSymbolMapper::ManglingOptions MO;
+
+ MO.EmulatedTLS = Opts.EmulatedTLS;
+
+ return MO;
+}
+
+/// Compile a Module to an ObjectFile.
+Expected<SimpleCompiler::CompileResult> SimpleCompiler::operator()(Module &M) {
+ CompileResult CachedObject = tryToLoadFromObjectCache(M);
+ if (CachedObject)
+ return std::move(CachedObject);
+
+ SmallVector<char, 0> ObjBufferSV;
+
+ {
+ raw_svector_ostream ObjStream(ObjBufferSV);
+
+ legacy::PassManager PM;
+ MCContext *Ctx;
+ if (TM.addPassesToEmitMC(PM, Ctx, ObjStream))
+ return make_error<StringError>("Target does not support MC emission",
+ inconvertibleErrorCode());
+ PM.run(M);
+ }
+
+ auto ObjBuffer = std::make_unique<SmallVectorMemoryBuffer>(
+ std::move(ObjBufferSV), M.getModuleIdentifier() + "-jitted-objectbuffer",
+ /*RequiresNullTerminator=*/false);
+
+ auto Obj = object::ObjectFile::createObjectFile(ObjBuffer->getMemBufferRef());
+
+ if (!Obj)
+ return Obj.takeError();
+
+ notifyObjectCompiled(M, *ObjBuffer);
+ return std::move(ObjBuffer);
+}
+
+SimpleCompiler::CompileResult
+SimpleCompiler::tryToLoadFromObjectCache(const Module &M) {
+ if (!ObjCache)
+ return CompileResult();
+
+ return ObjCache->getObject(&M);
+}
+
+void SimpleCompiler::notifyObjectCompiled(const Module &M,
+ const MemoryBuffer &ObjBuffer) {
+ if (ObjCache)
+ ObjCache->notifyObjectCompiled(&M, ObjBuffer.getMemBufferRef());
+}
+
+ConcurrentIRCompiler::ConcurrentIRCompiler(JITTargetMachineBuilder JTMB,
+ ObjectCache *ObjCache)
+ : IRCompiler(irManglingOptionsFromTargetOptions(JTMB.getOptions())),
+ JTMB(std::move(JTMB)), ObjCache(ObjCache) {}
+
+Expected<std::unique_ptr<MemoryBuffer>>
+ConcurrentIRCompiler::operator()(Module &M) {
+ auto TM = cantFail(JTMB.createTargetMachine());
+ SimpleCompiler C(*TM, ObjCache);
+ return C(M);
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Core.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Core.cpp
new file mode 100644
index 0000000000..e5cb810391
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Core.cpp
@@ -0,0 +1,3030 @@
+//===--- Core.cpp - Core ORC APIs (MaterializationUnit, JITDylib, etc.) ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcError.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+
+#include <condition_variable>
+#include <future>
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+char ResourceTrackerDefunct::ID = 0;
+char FailedToMaterialize::ID = 0;
+char SymbolsNotFound::ID = 0;
+char SymbolsCouldNotBeRemoved::ID = 0;
+char MissingSymbolDefinitions::ID = 0;
+char UnexpectedSymbolDefinitions::ID = 0;
+char MaterializationTask::ID = 0;
+
+RegisterDependenciesFunction NoDependenciesToRegister =
+ RegisterDependenciesFunction();
+
+void MaterializationUnit::anchor() {}
+
+ResourceTracker::ResourceTracker(JITDylibSP JD) {
+ assert((reinterpret_cast<uintptr_t>(JD.get()) & 0x1) == 0 &&
+ "JITDylib must be two byte aligned");
+ JD->Retain();
+ JDAndFlag.store(reinterpret_cast<uintptr_t>(JD.get()));
+}
+
+ResourceTracker::~ResourceTracker() {
+ getJITDylib().getExecutionSession().destroyResourceTracker(*this);
+ getJITDylib().Release();
+}
+
+Error ResourceTracker::remove() {
+ return getJITDylib().getExecutionSession().removeResourceTracker(*this);
+}
+
+void ResourceTracker::transferTo(ResourceTracker &DstRT) {
+ getJITDylib().getExecutionSession().transferResourceTracker(DstRT, *this);
+}
+
+void ResourceTracker::makeDefunct() {
+ uintptr_t Val = JDAndFlag.load();
+ Val |= 0x1U;
+ JDAndFlag.store(Val);
+}
+
+ResourceManager::~ResourceManager() {}
+
+ResourceTrackerDefunct::ResourceTrackerDefunct(ResourceTrackerSP RT)
+ : RT(std::move(RT)) {}
+
+std::error_code ResourceTrackerDefunct::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void ResourceTrackerDefunct::log(raw_ostream &OS) const {
+ OS << "Resource tracker " << (void *)RT.get() << " became defunct";
+}
+
+FailedToMaterialize::FailedToMaterialize(
+ std::shared_ptr<SymbolDependenceMap> Symbols)
+ : Symbols(std::move(Symbols)) {
+ assert(!this->Symbols->empty() && "Can not fail to resolve an empty set");
+}
+
+std::error_code FailedToMaterialize::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void FailedToMaterialize::log(raw_ostream &OS) const {
+ OS << "Failed to materialize symbols: " << *Symbols;
+}
+
+SymbolsNotFound::SymbolsNotFound(std::shared_ptr<SymbolStringPool> SSP,
+ SymbolNameSet Symbols)
+ : SSP(std::move(SSP)) {
+ for (auto &Sym : Symbols)
+ this->Symbols.push_back(Sym);
+ assert(!this->Symbols.empty() && "Can not fail to resolve an empty set");
+}
+
+SymbolsNotFound::SymbolsNotFound(std::shared_ptr<SymbolStringPool> SSP,
+ SymbolNameVector Symbols)
+ : SSP(std::move(SSP)), Symbols(std::move(Symbols)) {
+ assert(!this->Symbols.empty() && "Can not fail to resolve an empty set");
+}
+
+std::error_code SymbolsNotFound::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void SymbolsNotFound::log(raw_ostream &OS) const {
+ OS << "Symbols not found: " << Symbols;
+}
+
+SymbolsCouldNotBeRemoved::SymbolsCouldNotBeRemoved(
+ std::shared_ptr<SymbolStringPool> SSP, SymbolNameSet Symbols)
+ : SSP(std::move(SSP)), Symbols(std::move(Symbols)) {
+ assert(!this->Symbols.empty() && "Can not fail to resolve an empty set");
+}
+
+std::error_code SymbolsCouldNotBeRemoved::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnknownORCError);
+}
+
+void SymbolsCouldNotBeRemoved::log(raw_ostream &OS) const {
+ OS << "Symbols could not be removed: " << Symbols;
+}
+
+std::error_code MissingSymbolDefinitions::convertToErrorCode() const {
+ return orcError(OrcErrorCode::MissingSymbolDefinitions);
+}
+
+void MissingSymbolDefinitions::log(raw_ostream &OS) const {
+ OS << "Missing definitions in module " << ModuleName
+ << ": " << Symbols;
+}
+
+std::error_code UnexpectedSymbolDefinitions::convertToErrorCode() const {
+ return orcError(OrcErrorCode::UnexpectedSymbolDefinitions);
+}
+
+void UnexpectedSymbolDefinitions::log(raw_ostream &OS) const {
+ OS << "Unexpected definitions in module " << ModuleName
+ << ": " << Symbols;
+}
+
+AsynchronousSymbolQuery::AsynchronousSymbolQuery(
+ const SymbolLookupSet &Symbols, SymbolState RequiredState,
+ SymbolsResolvedCallback NotifyComplete)
+ : NotifyComplete(std::move(NotifyComplete)), RequiredState(RequiredState) {
+ assert(RequiredState >= SymbolState::Resolved &&
+ "Cannot query for a symbols that have not reached the resolve state "
+ "yet");
+
+ OutstandingSymbolsCount = Symbols.size();
+
+ for (auto &KV : Symbols)
+ ResolvedSymbols[KV.first] = nullptr;
+}
+
+void AsynchronousSymbolQuery::notifySymbolMetRequiredState(
+ const SymbolStringPtr &Name, JITEvaluatedSymbol Sym) {
+ auto I = ResolvedSymbols.find(Name);
+ assert(I != ResolvedSymbols.end() &&
+ "Resolving symbol outside the requested set");
+ assert(I->second.getAddress() == 0 && "Redundantly resolving symbol Name");
+
+ // If this is a materialization-side-effects-only symbol then drop it,
+ // otherwise update its map entry with its resolved address.
+ if (Sym.getFlags().hasMaterializationSideEffectsOnly())
+ ResolvedSymbols.erase(I);
+ else
+ I->second = std::move(Sym);
+ --OutstandingSymbolsCount;
+}
+
+void AsynchronousSymbolQuery::handleComplete(ExecutionSession &ES) {
+ assert(OutstandingSymbolsCount == 0 &&
+ "Symbols remain, handleComplete called prematurely");
+
+ class RunQueryCompleteTask : public Task {
+ public:
+ RunQueryCompleteTask(SymbolMap ResolvedSymbols,
+ SymbolsResolvedCallback NotifyComplete)
+ : ResolvedSymbols(std::move(ResolvedSymbols)),
+ NotifyComplete(std::move(NotifyComplete)) {}
+ void printDescription(raw_ostream &OS) override {
+ OS << "Execute query complete callback for " << ResolvedSymbols;
+ }
+ void run() override { NotifyComplete(std::move(ResolvedSymbols)); }
+
+ private:
+ SymbolMap ResolvedSymbols;
+ SymbolsResolvedCallback NotifyComplete;
+ };
+
+ auto T = std::make_unique<RunQueryCompleteTask>(std::move(ResolvedSymbols),
+ std::move(NotifyComplete));
+ NotifyComplete = SymbolsResolvedCallback();
+ ES.dispatchTask(std::move(T));
+}
+
+void AsynchronousSymbolQuery::handleFailed(Error Err) {
+ assert(QueryRegistrations.empty() && ResolvedSymbols.empty() &&
+ OutstandingSymbolsCount == 0 &&
+ "Query should already have been abandoned");
+ NotifyComplete(std::move(Err));
+ NotifyComplete = SymbolsResolvedCallback();
+}
+
+void AsynchronousSymbolQuery::addQueryDependence(JITDylib &JD,
+ SymbolStringPtr Name) {
+ bool Added = QueryRegistrations[&JD].insert(std::move(Name)).second;
+ (void)Added;
+ assert(Added && "Duplicate dependence notification?");
+}
+
+void AsynchronousSymbolQuery::removeQueryDependence(
+ JITDylib &JD, const SymbolStringPtr &Name) {
+ auto QRI = QueryRegistrations.find(&JD);
+ assert(QRI != QueryRegistrations.end() &&
+ "No dependencies registered for JD");
+ assert(QRI->second.count(Name) && "No dependency on Name in JD");
+ QRI->second.erase(Name);
+ if (QRI->second.empty())
+ QueryRegistrations.erase(QRI);
+}
+
+void AsynchronousSymbolQuery::dropSymbol(const SymbolStringPtr &Name) {
+ auto I = ResolvedSymbols.find(Name);
+ assert(I != ResolvedSymbols.end() &&
+ "Redundant removal of weakly-referenced symbol");
+ ResolvedSymbols.erase(I);
+ --OutstandingSymbolsCount;
+}
+
+void AsynchronousSymbolQuery::detach() {
+ ResolvedSymbols.clear();
+ OutstandingSymbolsCount = 0;
+ for (auto &KV : QueryRegistrations)
+ KV.first->detachQueryHelper(*this, KV.second);
+ QueryRegistrations.clear();
+}
+
+AbsoluteSymbolsMaterializationUnit::AbsoluteSymbolsMaterializationUnit(
+ SymbolMap Symbols)
+ : MaterializationUnit(extractFlags(Symbols)), Symbols(std::move(Symbols)) {}
+
+StringRef AbsoluteSymbolsMaterializationUnit::getName() const {
+ return "<Absolute Symbols>";
+}
+
+void AbsoluteSymbolsMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+ // No dependencies, so these calls can't fail.
+ cantFail(R->notifyResolved(Symbols));
+ cantFail(R->notifyEmitted());
+}
+
+void AbsoluteSymbolsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(Symbols.count(Name) && "Symbol is not part of this MU");
+ Symbols.erase(Name);
+}
+
+MaterializationUnit::Interface
+AbsoluteSymbolsMaterializationUnit::extractFlags(const SymbolMap &Symbols) {
+ SymbolFlagsMap Flags;
+ for (const auto &KV : Symbols)
+ Flags[KV.first] = KV.second.getFlags();
+ return MaterializationUnit::Interface(std::move(Flags), nullptr);
+}
+
+ReExportsMaterializationUnit::ReExportsMaterializationUnit(
+ JITDylib *SourceJD, JITDylibLookupFlags SourceJDLookupFlags,
+ SymbolAliasMap Aliases)
+ : MaterializationUnit(extractFlags(Aliases)), SourceJD(SourceJD),
+ SourceJDLookupFlags(SourceJDLookupFlags), Aliases(std::move(Aliases)) {}
+
+StringRef ReExportsMaterializationUnit::getName() const {
+ return "<Reexports>";
+}
+
+void ReExportsMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+
+ auto &ES = R->getTargetJITDylib().getExecutionSession();
+ JITDylib &TgtJD = R->getTargetJITDylib();
+ JITDylib &SrcJD = SourceJD ? *SourceJD : TgtJD;
+
+ // Find the set of requested aliases and aliasees. Return any unrequested
+ // aliases back to the JITDylib so as to not prematurely materialize any
+ // aliasees.
+ auto RequestedSymbols = R->getRequestedSymbols();
+ SymbolAliasMap RequestedAliases;
+
+ for (auto &Name : RequestedSymbols) {
+ auto I = Aliases.find(Name);
+ assert(I != Aliases.end() && "Symbol not found in aliases map?");
+ RequestedAliases[Name] = std::move(I->second);
+ Aliases.erase(I);
+ }
+
+ LLVM_DEBUG({
+ ES.runSessionLocked([&]() {
+ dbgs() << "materializing reexports: target = " << TgtJD.getName()
+ << ", source = " << SrcJD.getName() << " " << RequestedAliases
+ << "\n";
+ });
+ });
+
+ if (!Aliases.empty()) {
+ auto Err = SourceJD ? R->replace(reexports(*SourceJD, std::move(Aliases),
+ SourceJDLookupFlags))
+ : R->replace(symbolAliases(std::move(Aliases)));
+
+ if (Err) {
+ // FIXME: Should this be reported / treated as failure to materialize?
+ // Or should this be treated as a sanctioned bailing-out?
+ ES.reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+ }
+
+ // The OnResolveInfo struct will hold the aliases and responsibilty for each
+ // query in the list.
+ struct OnResolveInfo {
+ OnResolveInfo(std::unique_ptr<MaterializationResponsibility> R,
+ SymbolAliasMap Aliases)
+ : R(std::move(R)), Aliases(std::move(Aliases)) {}
+
+ std::unique_ptr<MaterializationResponsibility> R;
+ SymbolAliasMap Aliases;
+ };
+
+ // Build a list of queries to issue. In each round we build a query for the
+ // largest set of aliases that we can resolve without encountering a chain of
+ // aliases (e.g. Foo -> Bar, Bar -> Baz). Such a chain would deadlock as the
+ // query would be waiting on a symbol that it itself had to resolve. Creating
+ // a new query for each link in such a chain eliminates the possibility of
+ // deadlock. In practice chains are likely to be rare, and this algorithm will
+ // usually result in a single query to issue.
+
+ std::vector<std::pair<SymbolLookupSet, std::shared_ptr<OnResolveInfo>>>
+ QueryInfos;
+ while (!RequestedAliases.empty()) {
+ SymbolNameSet ResponsibilitySymbols;
+ SymbolLookupSet QuerySymbols;
+ SymbolAliasMap QueryAliases;
+
+ // Collect as many aliases as we can without including a chain.
+ for (auto &KV : RequestedAliases) {
+ // Chain detected. Skip this symbol for this round.
+ if (&SrcJD == &TgtJD && (QueryAliases.count(KV.second.Aliasee) ||
+ RequestedAliases.count(KV.second.Aliasee)))
+ continue;
+
+ ResponsibilitySymbols.insert(KV.first);
+ QuerySymbols.add(KV.second.Aliasee,
+ KV.second.AliasFlags.hasMaterializationSideEffectsOnly()
+ ? SymbolLookupFlags::WeaklyReferencedSymbol
+ : SymbolLookupFlags::RequiredSymbol);
+ QueryAliases[KV.first] = std::move(KV.second);
+ }
+
+ // Remove the aliases collected this round from the RequestedAliases map.
+ for (auto &KV : QueryAliases)
+ RequestedAliases.erase(KV.first);
+
+ assert(!QuerySymbols.empty() && "Alias cycle detected!");
+
+ auto NewR = R->delegate(ResponsibilitySymbols);
+ if (!NewR) {
+ ES.reportError(NewR.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ auto QueryInfo = std::make_shared<OnResolveInfo>(std::move(*NewR),
+ std::move(QueryAliases));
+ QueryInfos.push_back(
+ make_pair(std::move(QuerySymbols), std::move(QueryInfo)));
+ }
+
+ // Issue the queries.
+ while (!QueryInfos.empty()) {
+ auto QuerySymbols = std::move(QueryInfos.back().first);
+ auto QueryInfo = std::move(QueryInfos.back().second);
+
+ QueryInfos.pop_back();
+
+ auto RegisterDependencies = [QueryInfo,
+ &SrcJD](const SymbolDependenceMap &Deps) {
+ // If there were no materializing symbols, just bail out.
+ if (Deps.empty())
+ return;
+
+ // Otherwise the only deps should be on SrcJD.
+ assert(Deps.size() == 1 && Deps.count(&SrcJD) &&
+ "Unexpected dependencies for reexports");
+
+ auto &SrcJDDeps = Deps.find(&SrcJD)->second;
+ SymbolDependenceMap PerAliasDepsMap;
+ auto &PerAliasDeps = PerAliasDepsMap[&SrcJD];
+
+ for (auto &KV : QueryInfo->Aliases)
+ if (SrcJDDeps.count(KV.second.Aliasee)) {
+ PerAliasDeps = {KV.second.Aliasee};
+ QueryInfo->R->addDependencies(KV.first, PerAliasDepsMap);
+ }
+ };
+
+ auto OnComplete = [QueryInfo](Expected<SymbolMap> Result) {
+ auto &ES = QueryInfo->R->getTargetJITDylib().getExecutionSession();
+ if (Result) {
+ SymbolMap ResolutionMap;
+ for (auto &KV : QueryInfo->Aliases) {
+ assert((KV.second.AliasFlags.hasMaterializationSideEffectsOnly() ||
+ Result->count(KV.second.Aliasee)) &&
+ "Result map missing entry?");
+ // Don't try to resolve materialization-side-effects-only symbols.
+ if (KV.second.AliasFlags.hasMaterializationSideEffectsOnly())
+ continue;
+
+ ResolutionMap[KV.first] = JITEvaluatedSymbol(
+ (*Result)[KV.second.Aliasee].getAddress(), KV.second.AliasFlags);
+ }
+ if (auto Err = QueryInfo->R->notifyResolved(ResolutionMap)) {
+ ES.reportError(std::move(Err));
+ QueryInfo->R->failMaterialization();
+ return;
+ }
+ if (auto Err = QueryInfo->R->notifyEmitted()) {
+ ES.reportError(std::move(Err));
+ QueryInfo->R->failMaterialization();
+ return;
+ }
+ } else {
+ ES.reportError(Result.takeError());
+ QueryInfo->R->failMaterialization();
+ }
+ };
+
+ ES.lookup(LookupKind::Static,
+ JITDylibSearchOrder({{&SrcJD, SourceJDLookupFlags}}),
+ QuerySymbols, SymbolState::Resolved, std::move(OnComplete),
+ std::move(RegisterDependencies));
+ }
+}
+
+void ReExportsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(Aliases.count(Name) &&
+ "Symbol not covered by this MaterializationUnit");
+ Aliases.erase(Name);
+}
+
+MaterializationUnit::Interface
+ReExportsMaterializationUnit::extractFlags(const SymbolAliasMap &Aliases) {
+ SymbolFlagsMap SymbolFlags;
+ for (auto &KV : Aliases)
+ SymbolFlags[KV.first] = KV.second.AliasFlags;
+
+ return MaterializationUnit::Interface(std::move(SymbolFlags), nullptr);
+}
+
+Expected<SymbolAliasMap> buildSimpleReexportsAliasMap(JITDylib &SourceJD,
+ SymbolNameSet Symbols) {
+ SymbolLookupSet LookupSet(Symbols);
+ auto Flags = SourceJD.getExecutionSession().lookupFlags(
+ LookupKind::Static, {{&SourceJD, JITDylibLookupFlags::MatchAllSymbols}},
+ SymbolLookupSet(std::move(Symbols)));
+
+ if (!Flags)
+ return Flags.takeError();
+
+ SymbolAliasMap Result;
+ for (auto &Name : Symbols) {
+ assert(Flags->count(Name) && "Missing entry in flags map");
+ Result[Name] = SymbolAliasMapEntry(Name, (*Flags)[Name]);
+ }
+
+ return Result;
+}
+
+class InProgressLookupState {
+public:
+ InProgressLookupState(LookupKind K, JITDylibSearchOrder SearchOrder,
+ SymbolLookupSet LookupSet, SymbolState RequiredState)
+ : K(K), SearchOrder(std::move(SearchOrder)),
+ LookupSet(std::move(LookupSet)), RequiredState(RequiredState) {
+ DefGeneratorCandidates = this->LookupSet;
+ }
+ virtual ~InProgressLookupState() {}
+ virtual void complete(std::unique_ptr<InProgressLookupState> IPLS) = 0;
+ virtual void fail(Error Err) = 0;
+
+ LookupKind K;
+ JITDylibSearchOrder SearchOrder;
+ SymbolLookupSet LookupSet;
+ SymbolState RequiredState;
+
+ std::unique_lock<std::mutex> GeneratorLock;
+ size_t CurSearchOrderIndex = 0;
+ bool NewJITDylib = true;
+ SymbolLookupSet DefGeneratorCandidates;
+ SymbolLookupSet DefGeneratorNonCandidates;
+ std::vector<std::weak_ptr<DefinitionGenerator>> CurDefGeneratorStack;
+};
+
+class InProgressLookupFlagsState : public InProgressLookupState {
+public:
+ InProgressLookupFlagsState(
+ LookupKind K, JITDylibSearchOrder SearchOrder, SymbolLookupSet LookupSet,
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete)
+ : InProgressLookupState(K, std::move(SearchOrder), std::move(LookupSet),
+ SymbolState::NeverSearched),
+ OnComplete(std::move(OnComplete)) {}
+
+ void complete(std::unique_ptr<InProgressLookupState> IPLS) override {
+ GeneratorLock = {}; // Unlock and release.
+ auto &ES = SearchOrder.front().first->getExecutionSession();
+ ES.OL_completeLookupFlags(std::move(IPLS), std::move(OnComplete));
+ }
+
+ void fail(Error Err) override {
+ GeneratorLock = {}; // Unlock and release.
+ OnComplete(std::move(Err));
+ }
+
+private:
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete;
+};
+
+class InProgressFullLookupState : public InProgressLookupState {
+public:
+ InProgressFullLookupState(LookupKind K, JITDylibSearchOrder SearchOrder,
+ SymbolLookupSet LookupSet,
+ SymbolState RequiredState,
+ std::shared_ptr<AsynchronousSymbolQuery> Q,
+ RegisterDependenciesFunction RegisterDependencies)
+ : InProgressLookupState(K, std::move(SearchOrder), std::move(LookupSet),
+ RequiredState),
+ Q(std::move(Q)), RegisterDependencies(std::move(RegisterDependencies)) {
+ }
+
+ void complete(std::unique_ptr<InProgressLookupState> IPLS) override {
+ GeneratorLock = {}; // Unlock and release.
+ auto &ES = SearchOrder.front().first->getExecutionSession();
+ ES.OL_completeLookup(std::move(IPLS), std::move(Q),
+ std::move(RegisterDependencies));
+ }
+
+ void fail(Error Err) override {
+ GeneratorLock = {};
+ Q->detach();
+ Q->handleFailed(std::move(Err));
+ }
+
+private:
+ std::shared_ptr<AsynchronousSymbolQuery> Q;
+ RegisterDependenciesFunction RegisterDependencies;
+};
+
+ReexportsGenerator::ReexportsGenerator(JITDylib &SourceJD,
+ JITDylibLookupFlags SourceJDLookupFlags,
+ SymbolPredicate Allow)
+ : SourceJD(SourceJD), SourceJDLookupFlags(SourceJDLookupFlags),
+ Allow(std::move(Allow)) {}
+
+Error ReexportsGenerator::tryToGenerate(LookupState &LS, LookupKind K,
+ JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags,
+ const SymbolLookupSet &LookupSet) {
+ assert(&JD != &SourceJD && "Cannot re-export from the same dylib");
+
+ // Use lookupFlags to find the subset of symbols that match our lookup.
+ auto Flags = JD.getExecutionSession().lookupFlags(
+ K, {{&SourceJD, JDLookupFlags}}, LookupSet);
+ if (!Flags)
+ return Flags.takeError();
+
+ // Create an alias map.
+ orc::SymbolAliasMap AliasMap;
+ for (auto &KV : *Flags)
+ if (!Allow || Allow(KV.first))
+ AliasMap[KV.first] = SymbolAliasMapEntry(KV.first, KV.second);
+
+ if (AliasMap.empty())
+ return Error::success();
+
+ // Define the re-exports.
+ return JD.define(reexports(SourceJD, AliasMap, SourceJDLookupFlags));
+}
+
+LookupState::LookupState(std::unique_ptr<InProgressLookupState> IPLS)
+ : IPLS(std::move(IPLS)) {}
+
+void LookupState::reset(InProgressLookupState *IPLS) { this->IPLS.reset(IPLS); }
+
+LookupState::LookupState() = default;
+LookupState::LookupState(LookupState &&) = default;
+LookupState &LookupState::operator=(LookupState &&) = default;
+LookupState::~LookupState() = default;
+
+void LookupState::continueLookup(Error Err) {
+ assert(IPLS && "Cannot call continueLookup on empty LookupState");
+ auto &ES = IPLS->SearchOrder.begin()->first->getExecutionSession();
+ ES.OL_applyQueryPhase1(std::move(IPLS), std::move(Err));
+}
+
+DefinitionGenerator::~DefinitionGenerator() {}
+
+JITDylib::~JITDylib() {
+ LLVM_DEBUG(dbgs() << "Destroying JITDylib " << getName() << "\n");
+}
+
+Error JITDylib::clear() {
+ std::vector<ResourceTrackerSP> TrackersToRemove;
+ ES.runSessionLocked([&]() {
+ assert(State != Closed && "JD is defunct");
+ for (auto &KV : TrackerSymbols)
+ TrackersToRemove.push_back(KV.first);
+ TrackersToRemove.push_back(getDefaultResourceTracker());
+ });
+
+ Error Err = Error::success();
+ for (auto &RT : TrackersToRemove)
+ Err = joinErrors(std::move(Err), RT->remove());
+ return Err;
+}
+
+ResourceTrackerSP JITDylib::getDefaultResourceTracker() {
+ return ES.runSessionLocked([this] {
+ assert(State != Closed && "JD is defunct");
+ if (!DefaultTracker)
+ DefaultTracker = new ResourceTracker(this);
+ return DefaultTracker;
+ });
+}
+
+ResourceTrackerSP JITDylib::createResourceTracker() {
+ return ES.runSessionLocked([this] {
+ assert(State == Open && "JD is defunct");
+ ResourceTrackerSP RT = new ResourceTracker(this);
+ return RT;
+ });
+}
+
+void JITDylib::removeGenerator(DefinitionGenerator &G) {
+ ES.runSessionLocked([&] {
+ assert(State == Open && "JD is defunct");
+ auto I = llvm::find_if(DefGenerators,
+ [&](const std::shared_ptr<DefinitionGenerator> &H) {
+ return H.get() == &G;
+ });
+ assert(I != DefGenerators.end() && "Generator not found");
+ DefGenerators.erase(I);
+ });
+}
+
+Expected<SymbolFlagsMap>
+JITDylib::defineMaterializing(SymbolFlagsMap SymbolFlags) {
+
+ return ES.runSessionLocked([&]() -> Expected<SymbolFlagsMap> {
+ std::vector<SymbolTable::iterator> AddedSyms;
+ std::vector<SymbolFlagsMap::iterator> RejectedWeakDefs;
+
+ for (auto SFItr = SymbolFlags.begin(), SFEnd = SymbolFlags.end();
+ SFItr != SFEnd; ++SFItr) {
+
+ auto &Name = SFItr->first;
+ auto &Flags = SFItr->second;
+
+ auto EntryItr = Symbols.find(Name);
+
+ // If the entry already exists...
+ if (EntryItr != Symbols.end()) {
+
+ // If this is a strong definition then error out.
+ if (!Flags.isWeak()) {
+ // Remove any symbols already added.
+ for (auto &SI : AddedSyms)
+ Symbols.erase(SI);
+
+ // FIXME: Return all duplicates.
+ return make_error<DuplicateDefinition>(std::string(*Name));
+ }
+
+ // Otherwise just make a note to discard this symbol after the loop.
+ RejectedWeakDefs.push_back(SFItr);
+ continue;
+ } else
+ EntryItr =
+ Symbols.insert(std::make_pair(Name, SymbolTableEntry(Flags))).first;
+
+ AddedSyms.push_back(EntryItr);
+ EntryItr->second.setState(SymbolState::Materializing);
+ }
+
+ // Remove any rejected weak definitions from the SymbolFlags map.
+ while (!RejectedWeakDefs.empty()) {
+ SymbolFlags.erase(RejectedWeakDefs.back());
+ RejectedWeakDefs.pop_back();
+ }
+
+ return SymbolFlags;
+ });
+}
+
+Error JITDylib::replace(MaterializationResponsibility &FromMR,
+ std::unique_ptr<MaterializationUnit> MU) {
+ assert(MU != nullptr && "Can not replace with a null MaterializationUnit");
+ std::unique_ptr<MaterializationUnit> MustRunMU;
+ std::unique_ptr<MaterializationResponsibility> MustRunMR;
+
+ auto Err =
+ ES.runSessionLocked([&, this]() -> Error {
+ if (FromMR.RT->isDefunct())
+ return make_error<ResourceTrackerDefunct>(std::move(FromMR.RT));
+
+#ifndef NDEBUG
+ for (auto &KV : MU->getSymbols()) {
+ auto SymI = Symbols.find(KV.first);
+ assert(SymI != Symbols.end() && "Replacing unknown symbol");
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Can not replace a symbol that ha is not materializing");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Symbol should not have materializer attached already");
+ assert(UnmaterializedInfos.count(KV.first) == 0 &&
+ "Symbol being replaced should have no UnmaterializedInfo");
+ }
+#endif // NDEBUG
+
+ // If the tracker is defunct we need to bail out immediately.
+
+ // If any symbol has pending queries against it then we need to
+ // materialize MU immediately.
+ for (auto &KV : MU->getSymbols()) {
+ auto MII = MaterializingInfos.find(KV.first);
+ if (MII != MaterializingInfos.end()) {
+ if (MII->second.hasQueriesPending()) {
+ MustRunMR = ES.createMaterializationResponsibility(
+ *FromMR.RT, std::move(MU->SymbolFlags),
+ std::move(MU->InitSymbol));
+ MustRunMU = std::move(MU);
+ return Error::success();
+ }
+ }
+ }
+
+ // Otherwise, make MU responsible for all the symbols.
+ auto UMI = std::make_shared<UnmaterializedInfo>(std::move(MU),
+ FromMR.RT.get());
+ for (auto &KV : UMI->MU->getSymbols()) {
+ auto SymI = Symbols.find(KV.first);
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Can not replace a symbol that is not materializing");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Can not replace a symbol that has a materializer attached");
+ assert(UnmaterializedInfos.count(KV.first) == 0 &&
+ "Unexpected materializer entry in map");
+ SymI->second.setAddress(SymI->second.getAddress());
+ SymI->second.setMaterializerAttached(true);
+
+ auto &UMIEntry = UnmaterializedInfos[KV.first];
+ assert((!UMIEntry || !UMIEntry->MU) &&
+ "Replacing symbol with materializer still attached");
+ UMIEntry = UMI;
+ }
+
+ return Error::success();
+ });
+
+ if (Err)
+ return Err;
+
+ if (MustRunMU) {
+ assert(MustRunMR && "MustRunMU set implies MustRunMR set");
+ ES.dispatchTask(std::make_unique<MaterializationTask>(
+ std::move(MustRunMU), std::move(MustRunMR)));
+ } else {
+ assert(!MustRunMR && "MustRunMU unset implies MustRunMR unset");
+ }
+
+ return Error::success();
+}
+
+Expected<std::unique_ptr<MaterializationResponsibility>>
+JITDylib::delegate(MaterializationResponsibility &FromMR,
+ SymbolFlagsMap SymbolFlags, SymbolStringPtr InitSymbol) {
+
+ return ES.runSessionLocked(
+ [&]() -> Expected<std::unique_ptr<MaterializationResponsibility>> {
+ if (FromMR.RT->isDefunct())
+ return make_error<ResourceTrackerDefunct>(std::move(FromMR.RT));
+
+ return ES.createMaterializationResponsibility(
+ *FromMR.RT, std::move(SymbolFlags), std::move(InitSymbol));
+ });
+}
+
+SymbolNameSet
+JITDylib::getRequestedSymbols(const SymbolFlagsMap &SymbolFlags) const {
+ return ES.runSessionLocked([&]() {
+ SymbolNameSet RequestedSymbols;
+
+ for (auto &KV : SymbolFlags) {
+ assert(Symbols.count(KV.first) && "JITDylib does not cover this symbol?");
+ assert(Symbols.find(KV.first)->second.getState() !=
+ SymbolState::NeverSearched &&
+ Symbols.find(KV.first)->second.getState() != SymbolState::Ready &&
+ "getRequestedSymbols can only be called for symbols that have "
+ "started materializing");
+ auto I = MaterializingInfos.find(KV.first);
+ if (I == MaterializingInfos.end())
+ continue;
+
+ if (I->second.hasQueriesPending())
+ RequestedSymbols.insert(KV.first);
+ }
+
+ return RequestedSymbols;
+ });
+}
+
+void JITDylib::addDependencies(const SymbolStringPtr &Name,
+ const SymbolDependenceMap &Dependencies) {
+ ES.runSessionLocked([&]() {
+ assert(Symbols.count(Name) && "Name not in symbol table");
+ assert(Symbols[Name].getState() < SymbolState::Emitted &&
+ "Can not add dependencies for a symbol that is not materializing");
+
+ LLVM_DEBUG({
+ dbgs() << "In " << getName() << " adding dependencies for " << *Name
+ << ": " << Dependencies << "\n";
+ });
+
+ // If Name is already in an error state then just bail out.
+ if (Symbols[Name].getFlags().hasError())
+ return;
+
+ auto &MI = MaterializingInfos[Name];
+ assert(Symbols[Name].getState() != SymbolState::Emitted &&
+ "Can not add dependencies to an emitted symbol");
+
+ bool DependsOnSymbolInErrorState = false;
+
+ // Register dependencies, record whether any depenendency is in the error
+ // state.
+ for (auto &KV : Dependencies) {
+ assert(KV.first && "Null JITDylib in dependency?");
+ auto &OtherJITDylib = *KV.first;
+ auto &DepsOnOtherJITDylib = MI.UnemittedDependencies[&OtherJITDylib];
+
+ for (auto &OtherSymbol : KV.second) {
+
+ // Check the sym entry for the dependency.
+ auto OtherSymI = OtherJITDylib.Symbols.find(OtherSymbol);
+
+ // Assert that this symbol exists and has not reached the ready state
+ // already.
+ assert(OtherSymI != OtherJITDylib.Symbols.end() &&
+ "Dependency on unknown symbol");
+
+ auto &OtherSymEntry = OtherSymI->second;
+
+ // If the other symbol is already in the Ready state then there's no
+ // dependency to add.
+ if (OtherSymEntry.getState() == SymbolState::Ready)
+ continue;
+
+ // If the dependency is in an error state then note this and continue,
+ // we will move this symbol to the error state below.
+ if (OtherSymEntry.getFlags().hasError()) {
+ DependsOnSymbolInErrorState = true;
+ continue;
+ }
+
+ // If the dependency was not in the error state then add it to
+ // our list of dependencies.
+ auto &OtherMI = OtherJITDylib.MaterializingInfos[OtherSymbol];
+
+ if (OtherSymEntry.getState() == SymbolState::Emitted)
+ transferEmittedNodeDependencies(MI, Name, OtherMI);
+ else if (&OtherJITDylib != this || OtherSymbol != Name) {
+ OtherMI.Dependants[this].insert(Name);
+ DepsOnOtherJITDylib.insert(OtherSymbol);
+ }
+ }
+
+ if (DepsOnOtherJITDylib.empty())
+ MI.UnemittedDependencies.erase(&OtherJITDylib);
+ }
+
+ // If this symbol dependended on any symbols in the error state then move
+ // this symbol to the error state too.
+ if (DependsOnSymbolInErrorState)
+ Symbols[Name].setFlags(Symbols[Name].getFlags() |
+ JITSymbolFlags::HasError);
+ });
+}
+
+Error JITDylib::resolve(MaterializationResponsibility &MR,
+ const SymbolMap &Resolved) {
+ AsynchronousSymbolQuerySet CompletedQueries;
+
+ if (auto Err = ES.runSessionLocked([&, this]() -> Error {
+ if (MR.RT->isDefunct())
+ return make_error<ResourceTrackerDefunct>(MR.RT);
+
+ if (State != Open)
+ return make_error<StringError>("JITDylib " + getName() +
+ " is defunct",
+ inconvertibleErrorCode());
+
+ struct WorklistEntry {
+ SymbolTable::iterator SymI;
+ JITEvaluatedSymbol ResolvedSym;
+ };
+
+ SymbolNameSet SymbolsInErrorState;
+ std::vector<WorklistEntry> Worklist;
+ Worklist.reserve(Resolved.size());
+
+ // Build worklist and check for any symbols in the error state.
+ for (const auto &KV : Resolved) {
+
+ assert(!KV.second.getFlags().hasError() &&
+ "Resolution result can not have error flag set");
+
+ auto SymI = Symbols.find(KV.first);
+
+ assert(SymI != Symbols.end() && "Symbol not found");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "Resolving symbol with materializer attached?");
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Symbol should be materializing");
+ assert(SymI->second.getAddress() == 0 &&
+ "Symbol has already been resolved");
+
+ if (SymI->second.getFlags().hasError())
+ SymbolsInErrorState.insert(KV.first);
+ else {
+ auto Flags = KV.second.getFlags();
+ Flags &= ~(JITSymbolFlags::Weak | JITSymbolFlags::Common);
+ assert(Flags ==
+ (SymI->second.getFlags() &
+ ~(JITSymbolFlags::Weak | JITSymbolFlags::Common)) &&
+ "Resolved flags should match the declared flags");
+
+ Worklist.push_back(
+ {SymI, JITEvaluatedSymbol(KV.second.getAddress(), Flags)});
+ }
+ }
+
+ // If any symbols were in the error state then bail out.
+ if (!SymbolsInErrorState.empty()) {
+ auto FailedSymbolsDepMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsDepMap)[this] = std::move(SymbolsInErrorState);
+ return make_error<FailedToMaterialize>(
+ std::move(FailedSymbolsDepMap));
+ }
+
+ while (!Worklist.empty()) {
+ auto SymI = Worklist.back().SymI;
+ auto ResolvedSym = Worklist.back().ResolvedSym;
+ Worklist.pop_back();
+
+ auto &Name = SymI->first;
+
+ // Resolved symbols can not be weak: discard the weak flag.
+ JITSymbolFlags ResolvedFlags = ResolvedSym.getFlags();
+ SymI->second.setAddress(ResolvedSym.getAddress());
+ SymI->second.setFlags(ResolvedFlags);
+ SymI->second.setState(SymbolState::Resolved);
+
+ auto MII = MaterializingInfos.find(Name);
+ if (MII == MaterializingInfos.end())
+ continue;
+
+ auto &MI = MII->second;
+ for (auto &Q : MI.takeQueriesMeeting(SymbolState::Resolved)) {
+ Q->notifySymbolMetRequiredState(Name, ResolvedSym);
+ Q->removeQueryDependence(*this, Name);
+ if (Q->isComplete())
+ CompletedQueries.insert(std::move(Q));
+ }
+ }
+
+ return Error::success();
+ }))
+ return Err;
+
+ // Otherwise notify all the completed queries.
+ for (auto &Q : CompletedQueries) {
+ assert(Q->isComplete() && "Q not completed");
+ Q->handleComplete(ES);
+ }
+
+ return Error::success();
+}
+
+Error JITDylib::emit(MaterializationResponsibility &MR,
+ const SymbolFlagsMap &Emitted) {
+ AsynchronousSymbolQuerySet CompletedQueries;
+ DenseMap<JITDylib *, SymbolNameVector> ReadySymbols;
+
+ if (auto Err = ES.runSessionLocked([&, this]() -> Error {
+ if (MR.RT->isDefunct())
+ return make_error<ResourceTrackerDefunct>(MR.RT);
+
+ if (State != Open)
+ return make_error<StringError>("JITDylib " + getName() +
+ " is defunct",
+ inconvertibleErrorCode());
+
+ SymbolNameSet SymbolsInErrorState;
+ std::vector<SymbolTable::iterator> Worklist;
+
+ // Scan to build worklist, record any symbols in the erorr state.
+ for (const auto &KV : Emitted) {
+ auto &Name = KV.first;
+
+ auto SymI = Symbols.find(Name);
+ assert(SymI != Symbols.end() && "No symbol table entry for Name");
+
+ if (SymI->second.getFlags().hasError())
+ SymbolsInErrorState.insert(Name);
+ else
+ Worklist.push_back(SymI);
+ }
+
+ // If any symbols were in the error state then bail out.
+ if (!SymbolsInErrorState.empty()) {
+ auto FailedSymbolsDepMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsDepMap)[this] = std::move(SymbolsInErrorState);
+ return make_error<FailedToMaterialize>(
+ std::move(FailedSymbolsDepMap));
+ }
+
+ // Otherwise update dependencies and move to the emitted state.
+ while (!Worklist.empty()) {
+ auto SymI = Worklist.back();
+ Worklist.pop_back();
+
+ auto &Name = SymI->first;
+ auto &SymEntry = SymI->second;
+
+ // Move symbol to the emitted state.
+ assert(((SymEntry.getFlags().hasMaterializationSideEffectsOnly() &&
+ SymEntry.getState() == SymbolState::Materializing) ||
+ SymEntry.getState() == SymbolState::Resolved) &&
+ "Emitting from state other than Resolved");
+ SymEntry.setState(SymbolState::Emitted);
+
+ auto MII = MaterializingInfos.find(Name);
+
+ // If this symbol has no MaterializingInfo then it's trivially ready.
+ // Update its state and continue.
+ if (MII == MaterializingInfos.end()) {
+ SymEntry.setState(SymbolState::Ready);
+ continue;
+ }
+
+ auto &MI = MII->second;
+
+ // For each dependant, transfer this node's emitted dependencies to
+ // it. If the dependant node is ready (i.e. has no unemitted
+ // dependencies) then notify any pending queries.
+ for (auto &KV : MI.Dependants) {
+ auto &DependantJD = *KV.first;
+ auto &DependantJDReadySymbols = ReadySymbols[&DependantJD];
+ for (auto &DependantName : KV.second) {
+ auto DependantMII =
+ DependantJD.MaterializingInfos.find(DependantName);
+ assert(DependantMII != DependantJD.MaterializingInfos.end() &&
+ "Dependant should have MaterializingInfo");
+
+ auto &DependantMI = DependantMII->second;
+
+ // Remove the dependant's dependency on this node.
+ assert(DependantMI.UnemittedDependencies.count(this) &&
+ "Dependant does not have an unemitted dependencies record "
+ "for "
+ "this JITDylib");
+ assert(DependantMI.UnemittedDependencies[this].count(Name) &&
+ "Dependant does not count this symbol as a dependency?");
+
+ DependantMI.UnemittedDependencies[this].erase(Name);
+ if (DependantMI.UnemittedDependencies[this].empty())
+ DependantMI.UnemittedDependencies.erase(this);
+
+ // Transfer unemitted dependencies from this node to the
+ // dependant.
+ DependantJD.transferEmittedNodeDependencies(DependantMI,
+ DependantName, MI);
+
+ auto DependantSymI = DependantJD.Symbols.find(DependantName);
+ assert(DependantSymI != DependantJD.Symbols.end() &&
+ "Dependant has no entry in the Symbols table");
+ auto &DependantSymEntry = DependantSymI->second;
+
+ // If the dependant is emitted and this node was the last of its
+ // unemitted dependencies then the dependant node is now ready, so
+ // notify any pending queries on the dependant node.
+ if (DependantSymEntry.getState() == SymbolState::Emitted &&
+ DependantMI.UnemittedDependencies.empty()) {
+ assert(DependantMI.Dependants.empty() &&
+ "Dependants should be empty by now");
+
+ // Since this dependant is now ready, we erase its
+ // MaterializingInfo and update its materializing state.
+ DependantSymEntry.setState(SymbolState::Ready);
+ DependantJDReadySymbols.push_back(DependantName);
+
+ for (auto &Q :
+ DependantMI.takeQueriesMeeting(SymbolState::Ready)) {
+ Q->notifySymbolMetRequiredState(
+ DependantName, DependantSymI->second.getSymbol());
+ if (Q->isComplete())
+ CompletedQueries.insert(Q);
+ Q->removeQueryDependence(DependantJD, DependantName);
+ }
+ DependantJD.MaterializingInfos.erase(DependantMII);
+ }
+ }
+ }
+
+ auto &ThisJDReadySymbols = ReadySymbols[this];
+ MI.Dependants.clear();
+ if (MI.UnemittedDependencies.empty()) {
+ SymI->second.setState(SymbolState::Ready);
+ ThisJDReadySymbols.push_back(Name);
+ for (auto &Q : MI.takeQueriesMeeting(SymbolState::Ready)) {
+ Q->notifySymbolMetRequiredState(Name, SymI->second.getSymbol());
+ if (Q->isComplete())
+ CompletedQueries.insert(Q);
+ Q->removeQueryDependence(*this, Name);
+ }
+ MaterializingInfos.erase(MII);
+ }
+ }
+
+ return Error::success();
+ }))
+ return Err;
+
+ // Otherwise notify all the completed queries.
+ for (auto &Q : CompletedQueries) {
+ assert(Q->isComplete() && "Q is not complete");
+ Q->handleComplete(ES);
+ }
+
+ return Error::success();
+}
+
+void JITDylib::unlinkMaterializationResponsibility(
+ MaterializationResponsibility &MR) {
+ ES.runSessionLocked([&]() {
+ auto I = TrackerMRs.find(MR.RT.get());
+ assert(I != TrackerMRs.end() && "No MRs in TrackerMRs list for RT");
+ assert(I->second.count(&MR) && "MR not in TrackerMRs list for RT");
+ I->second.erase(&MR);
+ if (I->second.empty())
+ TrackerMRs.erase(MR.RT.get());
+ });
+}
+
+std::pair<JITDylib::AsynchronousSymbolQuerySet,
+ std::shared_ptr<SymbolDependenceMap>>
+JITDylib::failSymbols(FailedSymbolsWorklist Worklist) {
+ AsynchronousSymbolQuerySet FailedQueries;
+ auto FailedSymbolsMap = std::make_shared<SymbolDependenceMap>();
+
+ while (!Worklist.empty()) {
+ assert(Worklist.back().first && "Failed JITDylib can not be null");
+ auto &JD = *Worklist.back().first;
+ auto Name = std::move(Worklist.back().second);
+ Worklist.pop_back();
+
+ (*FailedSymbolsMap)[&JD].insert(Name);
+
+ // Look up the symbol to fail.
+ auto SymI = JD.Symbols.find(Name);
+
+ // It's possible that this symbol has already been removed, e.g. if a
+ // materialization failure happens concurrently with a ResourceTracker or
+ // JITDylib removal. In that case we can safely skip this symbol and
+ // continue.
+ if (SymI == JD.Symbols.end())
+ continue;
+ auto &Sym = SymI->second;
+
+ // Move the symbol into the error state.
+ // Note that this may be redundant: The symbol might already have been
+ // moved to this state in response to the failure of a dependence.
+ Sym.setFlags(Sym.getFlags() | JITSymbolFlags::HasError);
+
+ // FIXME: Come up with a sane mapping of state to
+ // presence-of-MaterializingInfo so that we can assert presence / absence
+ // here, rather than testing it.
+ auto MII = JD.MaterializingInfos.find(Name);
+
+ if (MII == JD.MaterializingInfos.end())
+ continue;
+
+ auto &MI = MII->second;
+
+ // Move all dependants to the error state and disconnect from them.
+ for (auto &KV : MI.Dependants) {
+ auto &DependantJD = *KV.first;
+ for (auto &DependantName : KV.second) {
+ assert(DependantJD.Symbols.count(DependantName) &&
+ "No symbol table entry for DependantName");
+ auto &DependantSym = DependantJD.Symbols[DependantName];
+ DependantSym.setFlags(DependantSym.getFlags() |
+ JITSymbolFlags::HasError);
+
+ assert(DependantJD.MaterializingInfos.count(DependantName) &&
+ "No MaterializingInfo for dependant");
+ auto &DependantMI = DependantJD.MaterializingInfos[DependantName];
+
+ auto UnemittedDepI = DependantMI.UnemittedDependencies.find(&JD);
+ assert(UnemittedDepI != DependantMI.UnemittedDependencies.end() &&
+ "No UnemittedDependencies entry for this JITDylib");
+ assert(UnemittedDepI->second.count(Name) &&
+ "No UnemittedDependencies entry for this symbol");
+ UnemittedDepI->second.erase(Name);
+ if (UnemittedDepI->second.empty())
+ DependantMI.UnemittedDependencies.erase(UnemittedDepI);
+
+ // If this symbol is already in the emitted state then we need to
+ // take responsibility for failing its queries, so add it to the
+ // worklist.
+ if (DependantSym.getState() == SymbolState::Emitted) {
+ assert(DependantMI.Dependants.empty() &&
+ "Emitted symbol should not have dependants");
+ Worklist.push_back(std::make_pair(&DependantJD, DependantName));
+ }
+ }
+ }
+ MI.Dependants.clear();
+
+ // Disconnect from all unemitted depenencies.
+ for (auto &KV : MI.UnemittedDependencies) {
+ auto &UnemittedDepJD = *KV.first;
+ for (auto &UnemittedDepName : KV.second) {
+ auto UnemittedDepMII =
+ UnemittedDepJD.MaterializingInfos.find(UnemittedDepName);
+ assert(UnemittedDepMII != UnemittedDepJD.MaterializingInfos.end() &&
+ "Missing MII for unemitted dependency");
+ assert(UnemittedDepMII->second.Dependants.count(&JD) &&
+ "JD not listed as a dependant of unemitted dependency");
+ assert(UnemittedDepMII->second.Dependants[&JD].count(Name) &&
+ "Name is not listed as a dependant of unemitted dependency");
+ UnemittedDepMII->second.Dependants[&JD].erase(Name);
+ if (UnemittedDepMII->second.Dependants[&JD].empty())
+ UnemittedDepMII->second.Dependants.erase(&JD);
+ }
+ }
+ MI.UnemittedDependencies.clear();
+
+ // Collect queries to be failed for this MII.
+ AsynchronousSymbolQueryList ToDetach;
+ for (auto &Q : MII->second.pendingQueries()) {
+ // Add the query to the list to be failed and detach it.
+ FailedQueries.insert(Q);
+ ToDetach.push_back(Q);
+ }
+ for (auto &Q : ToDetach)
+ Q->detach();
+
+ assert(MI.Dependants.empty() &&
+ "Can not delete MaterializingInfo with dependants still attached");
+ assert(MI.UnemittedDependencies.empty() &&
+ "Can not delete MaterializingInfo with unemitted dependencies "
+ "still attached");
+ assert(!MI.hasQueriesPending() &&
+ "Can not delete MaterializingInfo with queries pending");
+ JD.MaterializingInfos.erase(MII);
+ }
+
+ return std::make_pair(std::move(FailedQueries), std::move(FailedSymbolsMap));
+}
+
+void JITDylib::setLinkOrder(JITDylibSearchOrder NewLinkOrder,
+ bool LinkAgainstThisJITDylibFirst) {
+ ES.runSessionLocked([&]() {
+ assert(State == Open && "JD is defunct");
+ if (LinkAgainstThisJITDylibFirst) {
+ LinkOrder.clear();
+ if (NewLinkOrder.empty() || NewLinkOrder.front().first != this)
+ LinkOrder.push_back(
+ std::make_pair(this, JITDylibLookupFlags::MatchAllSymbols));
+ llvm::append_range(LinkOrder, NewLinkOrder);
+ } else
+ LinkOrder = std::move(NewLinkOrder);
+ });
+}
+
+void JITDylib::addToLinkOrder(JITDylib &JD, JITDylibLookupFlags JDLookupFlags) {
+ ES.runSessionLocked([&]() { LinkOrder.push_back({&JD, JDLookupFlags}); });
+}
+
+void JITDylib::replaceInLinkOrder(JITDylib &OldJD, JITDylib &NewJD,
+ JITDylibLookupFlags JDLookupFlags) {
+ ES.runSessionLocked([&]() {
+ assert(State == Open && "JD is defunct");
+ for (auto &KV : LinkOrder)
+ if (KV.first == &OldJD) {
+ KV = {&NewJD, JDLookupFlags};
+ break;
+ }
+ });
+}
+
+void JITDylib::removeFromLinkOrder(JITDylib &JD) {
+ ES.runSessionLocked([&]() {
+ assert(State == Open && "JD is defunct");
+ auto I = llvm::find_if(LinkOrder,
+ [&](const JITDylibSearchOrder::value_type &KV) {
+ return KV.first == &JD;
+ });
+ if (I != LinkOrder.end())
+ LinkOrder.erase(I);
+ });
+}
+
+Error JITDylib::remove(const SymbolNameSet &Names) {
+ return ES.runSessionLocked([&]() -> Error {
+ assert(State == Open && "JD is defunct");
+ using SymbolMaterializerItrPair =
+ std::pair<SymbolTable::iterator, UnmaterializedInfosMap::iterator>;
+ std::vector<SymbolMaterializerItrPair> SymbolsToRemove;
+ SymbolNameSet Missing;
+ SymbolNameSet Materializing;
+
+ for (auto &Name : Names) {
+ auto I = Symbols.find(Name);
+
+ // Note symbol missing.
+ if (I == Symbols.end()) {
+ Missing.insert(Name);
+ continue;
+ }
+
+ // Note symbol materializing.
+ if (I->second.getState() != SymbolState::NeverSearched &&
+ I->second.getState() != SymbolState::Ready) {
+ Materializing.insert(Name);
+ continue;
+ }
+
+ auto UMII = I->second.hasMaterializerAttached()
+ ? UnmaterializedInfos.find(Name)
+ : UnmaterializedInfos.end();
+ SymbolsToRemove.push_back(std::make_pair(I, UMII));
+ }
+
+ // If any of the symbols are not defined, return an error.
+ if (!Missing.empty())
+ return make_error<SymbolsNotFound>(ES.getSymbolStringPool(),
+ std::move(Missing));
+
+ // If any of the symbols are currently materializing, return an error.
+ if (!Materializing.empty())
+ return make_error<SymbolsCouldNotBeRemoved>(ES.getSymbolStringPool(),
+ std::move(Materializing));
+
+ // Remove the symbols.
+ for (auto &SymbolMaterializerItrPair : SymbolsToRemove) {
+ auto UMII = SymbolMaterializerItrPair.second;
+
+ // If there is a materializer attached, call discard.
+ if (UMII != UnmaterializedInfos.end()) {
+ UMII->second->MU->doDiscard(*this, UMII->first);
+ UnmaterializedInfos.erase(UMII);
+ }
+
+ auto SymI = SymbolMaterializerItrPair.first;
+ Symbols.erase(SymI);
+ }
+
+ return Error::success();
+ });
+}
+
+void JITDylib::dump(raw_ostream &OS) {
+ ES.runSessionLocked([&, this]() {
+ OS << "JITDylib \"" << getName() << "\" (ES: "
+ << format("0x%016" PRIx64, reinterpret_cast<uintptr_t>(&ES))
+ << ", State = ";
+ switch (State) {
+ case Open:
+ OS << "Open";
+ break;
+ case Closing:
+ OS << "Closing";
+ break;
+ case Closed:
+ OS << "Closed";
+ break;
+ }
+ OS << ")\n";
+ if (State == Closed)
+ return;
+ OS << "Link order: " << LinkOrder << "\n"
+ << "Symbol table:\n";
+
+ for (auto &KV : Symbols) {
+ OS << " \"" << *KV.first << "\": ";
+ if (auto Addr = KV.second.getAddress())
+ OS << format("0x%016" PRIx64, Addr) << ", " << KV.second.getFlags()
+ << " ";
+ else
+ OS << "<not resolved> ";
+
+ OS << KV.second.getFlags() << " " << KV.second.getState();
+
+ if (KV.second.hasMaterializerAttached()) {
+ OS << " (Materializer ";
+ auto I = UnmaterializedInfos.find(KV.first);
+ assert(I != UnmaterializedInfos.end() &&
+ "Lazy symbol should have UnmaterializedInfo");
+ OS << I->second->MU.get() << ", " << I->second->MU->getName() << ")\n";
+ } else
+ OS << "\n";
+ }
+
+ if (!MaterializingInfos.empty())
+ OS << " MaterializingInfos entries:\n";
+ for (auto &KV : MaterializingInfos) {
+ OS << " \"" << *KV.first << "\":\n"
+ << " " << KV.second.pendingQueries().size()
+ << " pending queries: { ";
+ for (const auto &Q : KV.second.pendingQueries())
+ OS << Q.get() << " (" << Q->getRequiredState() << ") ";
+ OS << "}\n Dependants:\n";
+ for (auto &KV2 : KV.second.Dependants)
+ OS << " " << KV2.first->getName() << ": " << KV2.second << "\n";
+ OS << " Unemitted Dependencies:\n";
+ for (auto &KV2 : KV.second.UnemittedDependencies)
+ OS << " " << KV2.first->getName() << ": " << KV2.second << "\n";
+ assert((Symbols[KV.first].getState() != SymbolState::Ready ||
+ !KV.second.pendingQueries().empty() ||
+ !KV.second.Dependants.empty() ||
+ !KV.second.UnemittedDependencies.empty()) &&
+ "Stale materializing info entry");
+ }
+ });
+}
+
+void JITDylib::MaterializingInfo::addQuery(
+ std::shared_ptr<AsynchronousSymbolQuery> Q) {
+
+ auto I = std::lower_bound(
+ PendingQueries.rbegin(), PendingQueries.rend(), Q->getRequiredState(),
+ [](const std::shared_ptr<AsynchronousSymbolQuery> &V, SymbolState S) {
+ return V->getRequiredState() <= S;
+ });
+ PendingQueries.insert(I.base(), std::move(Q));
+}
+
+void JITDylib::MaterializingInfo::removeQuery(
+ const AsynchronousSymbolQuery &Q) {
+ // FIXME: Implement 'find_as' for shared_ptr<T>/T*.
+ auto I = llvm::find_if(
+ PendingQueries, [&Q](const std::shared_ptr<AsynchronousSymbolQuery> &V) {
+ return V.get() == &Q;
+ });
+ assert(I != PendingQueries.end() &&
+ "Query is not attached to this MaterializingInfo");
+ PendingQueries.erase(I);
+}
+
+JITDylib::AsynchronousSymbolQueryList
+JITDylib::MaterializingInfo::takeQueriesMeeting(SymbolState RequiredState) {
+ AsynchronousSymbolQueryList Result;
+ while (!PendingQueries.empty()) {
+ if (PendingQueries.back()->getRequiredState() > RequiredState)
+ break;
+
+ Result.push_back(std::move(PendingQueries.back()));
+ PendingQueries.pop_back();
+ }
+
+ return Result;
+}
+
+JITDylib::JITDylib(ExecutionSession &ES, std::string Name)
+ : JITLinkDylib(std::move(Name)), ES(ES) {
+ LinkOrder.push_back({this, JITDylibLookupFlags::MatchAllSymbols});
+}
+
+std::pair<JITDylib::AsynchronousSymbolQuerySet,
+ std::shared_ptr<SymbolDependenceMap>>
+JITDylib::removeTracker(ResourceTracker &RT) {
+ // Note: Should be called under the session lock.
+ assert(State != Closed && "JD is defunct");
+
+ SymbolNameVector SymbolsToRemove;
+ std::vector<std::pair<JITDylib *, SymbolStringPtr>> SymbolsToFail;
+
+ if (&RT == DefaultTracker.get()) {
+ SymbolNameSet TrackedSymbols;
+ for (auto &KV : TrackerSymbols)
+ for (auto &Sym : KV.second)
+ TrackedSymbols.insert(Sym);
+
+ for (auto &KV : Symbols) {
+ auto &Sym = KV.first;
+ if (!TrackedSymbols.count(Sym))
+ SymbolsToRemove.push_back(Sym);
+ }
+
+ DefaultTracker.reset();
+ } else {
+ /// Check for a non-default tracker.
+ auto I = TrackerSymbols.find(&RT);
+ if (I != TrackerSymbols.end()) {
+ SymbolsToRemove = std::move(I->second);
+ TrackerSymbols.erase(I);
+ }
+ // ... if not found this tracker was already defunct. Nothing to do.
+ }
+
+ for (auto &Sym : SymbolsToRemove) {
+ assert(Symbols.count(Sym) && "Symbol not in symbol table");
+
+ // If there is a MaterializingInfo then collect any queries to fail.
+ auto MII = MaterializingInfos.find(Sym);
+ if (MII != MaterializingInfos.end())
+ SymbolsToFail.push_back({this, Sym});
+ }
+
+ AsynchronousSymbolQuerySet QueriesToFail;
+ auto Result = failSymbols(std::move(SymbolsToFail));
+
+ // Removed symbols should be taken out of the table altogether.
+ for (auto &Sym : SymbolsToRemove) {
+ auto I = Symbols.find(Sym);
+ assert(I != Symbols.end() && "Symbol not present in table");
+
+ // Remove Materializer if present.
+ if (I->second.hasMaterializerAttached()) {
+ // FIXME: Should this discard the symbols?
+ UnmaterializedInfos.erase(Sym);
+ } else {
+ assert(!UnmaterializedInfos.count(Sym) &&
+ "Symbol has materializer attached");
+ }
+
+ Symbols.erase(I);
+ }
+
+ return Result;
+}
+
+void JITDylib::transferTracker(ResourceTracker &DstRT, ResourceTracker &SrcRT) {
+ assert(State != Closed && "JD is defunct");
+ assert(&DstRT != &SrcRT && "No-op transfers shouldn't call transferTracker");
+ assert(&DstRT.getJITDylib() == this && "DstRT is not for this JITDylib");
+ assert(&SrcRT.getJITDylib() == this && "SrcRT is not for this JITDylib");
+
+ // Update trackers for any not-yet materialized units.
+ for (auto &KV : UnmaterializedInfos) {
+ if (KV.second->RT == &SrcRT)
+ KV.second->RT = &DstRT;
+ }
+
+ // Update trackers for any active materialization responsibilities.
+ {
+ auto I = TrackerMRs.find(&SrcRT);
+ if (I != TrackerMRs.end()) {
+ auto &SrcMRs = I->second;
+ auto &DstMRs = TrackerMRs[&DstRT];
+ for (auto *MR : SrcMRs)
+ MR->RT = &DstRT;
+ if (DstMRs.empty())
+ DstMRs = std::move(SrcMRs);
+ else
+ for (auto *MR : SrcMRs)
+ DstMRs.insert(MR);
+ // Erase SrcRT entry in TrackerMRs. Use &SrcRT key rather than iterator I
+ // for this, since I may have been invalidated by 'TrackerMRs[&DstRT]'.
+ TrackerMRs.erase(&SrcRT);
+ }
+ }
+
+ // If we're transfering to the default tracker we just need to delete the
+ // tracked symbols for the source tracker.
+ if (&DstRT == DefaultTracker.get()) {
+ TrackerSymbols.erase(&SrcRT);
+ return;
+ }
+
+ // If we're transferring from the default tracker we need to find all
+ // currently untracked symbols.
+ if (&SrcRT == DefaultTracker.get()) {
+ assert(!TrackerSymbols.count(&SrcRT) &&
+ "Default tracker should not appear in TrackerSymbols");
+
+ SymbolNameVector SymbolsToTrack;
+
+ SymbolNameSet CurrentlyTrackedSymbols;
+ for (auto &KV : TrackerSymbols)
+ for (auto &Sym : KV.second)
+ CurrentlyTrackedSymbols.insert(Sym);
+
+ for (auto &KV : Symbols) {
+ auto &Sym = KV.first;
+ if (!CurrentlyTrackedSymbols.count(Sym))
+ SymbolsToTrack.push_back(Sym);
+ }
+
+ TrackerSymbols[&DstRT] = std::move(SymbolsToTrack);
+ return;
+ }
+
+ auto &DstTrackedSymbols = TrackerSymbols[&DstRT];
+
+ // Finally if neither SrtRT or DstRT are the default tracker then
+ // just append DstRT's tracked symbols to SrtRT's.
+ auto SI = TrackerSymbols.find(&SrcRT);
+ if (SI == TrackerSymbols.end())
+ return;
+
+ DstTrackedSymbols.reserve(DstTrackedSymbols.size() + SI->second.size());
+ for (auto &Sym : SI->second)
+ DstTrackedSymbols.push_back(std::move(Sym));
+ TrackerSymbols.erase(SI);
+}
+
+Error JITDylib::defineImpl(MaterializationUnit &MU) {
+
+ LLVM_DEBUG({ dbgs() << " " << MU.getSymbols() << "\n"; });
+
+ SymbolNameSet Duplicates;
+ std::vector<SymbolStringPtr> ExistingDefsOverridden;
+ std::vector<SymbolStringPtr> MUDefsOverridden;
+
+ for (const auto &KV : MU.getSymbols()) {
+ auto I = Symbols.find(KV.first);
+
+ if (I != Symbols.end()) {
+ if (KV.second.isStrong()) {
+ if (I->second.getFlags().isStrong() ||
+ I->second.getState() > SymbolState::NeverSearched)
+ Duplicates.insert(KV.first);
+ else {
+ assert(I->second.getState() == SymbolState::NeverSearched &&
+ "Overridden existing def should be in the never-searched "
+ "state");
+ ExistingDefsOverridden.push_back(KV.first);
+ }
+ } else
+ MUDefsOverridden.push_back(KV.first);
+ }
+ }
+
+ // If there were any duplicate definitions then bail out.
+ if (!Duplicates.empty()) {
+ LLVM_DEBUG(
+ { dbgs() << " Error: Duplicate symbols " << Duplicates << "\n"; });
+ return make_error<DuplicateDefinition>(std::string(**Duplicates.begin()));
+ }
+
+ // Discard any overridden defs in this MU.
+ LLVM_DEBUG({
+ if (!MUDefsOverridden.empty())
+ dbgs() << " Defs in this MU overridden: " << MUDefsOverridden << "\n";
+ });
+ for (auto &S : MUDefsOverridden)
+ MU.doDiscard(*this, S);
+
+ // Discard existing overridden defs.
+ LLVM_DEBUG({
+ if (!ExistingDefsOverridden.empty())
+ dbgs() << " Existing defs overridden by this MU: " << MUDefsOverridden
+ << "\n";
+ });
+ for (auto &S : ExistingDefsOverridden) {
+
+ auto UMII = UnmaterializedInfos.find(S);
+ assert(UMII != UnmaterializedInfos.end() &&
+ "Overridden existing def should have an UnmaterializedInfo");
+ UMII->second->MU->doDiscard(*this, S);
+ }
+
+ // Finally, add the defs from this MU.
+ for (auto &KV : MU.getSymbols()) {
+ auto &SymEntry = Symbols[KV.first];
+ SymEntry.setFlags(KV.second);
+ SymEntry.setState(SymbolState::NeverSearched);
+ SymEntry.setMaterializerAttached(true);
+ }
+
+ return Error::success();
+}
+
+void JITDylib::installMaterializationUnit(
+ std::unique_ptr<MaterializationUnit> MU, ResourceTracker &RT) {
+
+ /// defineImpl succeeded.
+ if (&RT != DefaultTracker.get()) {
+ auto &TS = TrackerSymbols[&RT];
+ TS.reserve(TS.size() + MU->getSymbols().size());
+ for (auto &KV : MU->getSymbols())
+ TS.push_back(KV.first);
+ }
+
+ auto UMI = std::make_shared<UnmaterializedInfo>(std::move(MU), &RT);
+ for (auto &KV : UMI->MU->getSymbols())
+ UnmaterializedInfos[KV.first] = UMI;
+}
+
+void JITDylib::detachQueryHelper(AsynchronousSymbolQuery &Q,
+ const SymbolNameSet &QuerySymbols) {
+ for (auto &QuerySymbol : QuerySymbols) {
+ assert(MaterializingInfos.count(QuerySymbol) &&
+ "QuerySymbol does not have MaterializingInfo");
+ auto &MI = MaterializingInfos[QuerySymbol];
+ MI.removeQuery(Q);
+ }
+}
+
+void JITDylib::transferEmittedNodeDependencies(
+ MaterializingInfo &DependantMI, const SymbolStringPtr &DependantName,
+ MaterializingInfo &EmittedMI) {
+ for (auto &KV : EmittedMI.UnemittedDependencies) {
+ auto &DependencyJD = *KV.first;
+ SymbolNameSet *UnemittedDependenciesOnDependencyJD = nullptr;
+
+ for (auto &DependencyName : KV.second) {
+ auto &DependencyMI = DependencyJD.MaterializingInfos[DependencyName];
+
+ // Do not add self dependencies.
+ if (&DependencyMI == &DependantMI)
+ continue;
+
+ // If we haven't looked up the dependencies for DependencyJD yet, do it
+ // now and cache the result.
+ if (!UnemittedDependenciesOnDependencyJD)
+ UnemittedDependenciesOnDependencyJD =
+ &DependantMI.UnemittedDependencies[&DependencyJD];
+
+ DependencyMI.Dependants[this].insert(DependantName);
+ UnemittedDependenciesOnDependencyJD->insert(DependencyName);
+ }
+ }
+}
+
+Platform::~Platform() {}
+
+Expected<DenseMap<JITDylib *, SymbolMap>> Platform::lookupInitSymbols(
+ ExecutionSession &ES,
+ const DenseMap<JITDylib *, SymbolLookupSet> &InitSyms) {
+
+ DenseMap<JITDylib *, SymbolMap> CompoundResult;
+ Error CompoundErr = Error::success();
+ std::mutex LookupMutex;
+ std::condition_variable CV;
+ uint64_t Count = InitSyms.size();
+
+ LLVM_DEBUG({
+ dbgs() << "Issuing init-symbol lookup:\n";
+ for (auto &KV : InitSyms)
+ dbgs() << " " << KV.first->getName() << ": " << KV.second << "\n";
+ });
+
+ for (auto &KV : InitSyms) {
+ auto *JD = KV.first;
+ auto Names = std::move(KV.second);
+ ES.lookup(
+ LookupKind::Static,
+ JITDylibSearchOrder({{JD, JITDylibLookupFlags::MatchAllSymbols}}),
+ std::move(Names), SymbolState::Ready,
+ [&, JD](Expected<SymbolMap> Result) {
+ {
+ std::lock_guard<std::mutex> Lock(LookupMutex);
+ --Count;
+ if (Result) {
+ assert(!CompoundResult.count(JD) &&
+ "Duplicate JITDylib in lookup?");
+ CompoundResult[JD] = std::move(*Result);
+ } else
+ CompoundErr =
+ joinErrors(std::move(CompoundErr), Result.takeError());
+ }
+ CV.notify_one();
+ },
+ NoDependenciesToRegister);
+ }
+
+ std::unique_lock<std::mutex> Lock(LookupMutex);
+ CV.wait(Lock, [&] { return Count == 0 || CompoundErr; });
+
+ if (CompoundErr)
+ return std::move(CompoundErr);
+
+ return std::move(CompoundResult);
+}
+
+void Platform::lookupInitSymbolsAsync(
+ unique_function<void(Error)> OnComplete, ExecutionSession &ES,
+ const DenseMap<JITDylib *, SymbolLookupSet> &InitSyms) {
+
+ class TriggerOnComplete {
+ public:
+ using OnCompleteFn = unique_function<void(Error)>;
+ TriggerOnComplete(OnCompleteFn OnComplete)
+ : OnComplete(std::move(OnComplete)) {}
+ ~TriggerOnComplete() { OnComplete(std::move(LookupResult)); }
+ void reportResult(Error Err) {
+ std::lock_guard<std::mutex> Lock(ResultMutex);
+ LookupResult = joinErrors(std::move(LookupResult), std::move(Err));
+ }
+
+ private:
+ std::mutex ResultMutex;
+ Error LookupResult{Error::success()};
+ OnCompleteFn OnComplete;
+ };
+
+ LLVM_DEBUG({
+ dbgs() << "Issuing init-symbol lookup:\n";
+ for (auto &KV : InitSyms)
+ dbgs() << " " << KV.first->getName() << ": " << KV.second << "\n";
+ });
+
+ auto TOC = std::make_shared<TriggerOnComplete>(std::move(OnComplete));
+
+ for (auto &KV : InitSyms) {
+ auto *JD = KV.first;
+ auto Names = std::move(KV.second);
+ ES.lookup(
+ LookupKind::Static,
+ JITDylibSearchOrder({{JD, JITDylibLookupFlags::MatchAllSymbols}}),
+ std::move(Names), SymbolState::Ready,
+ [TOC](Expected<SymbolMap> Result) {
+ TOC->reportResult(Result.takeError());
+ },
+ NoDependenciesToRegister);
+ }
+}
+
+void MaterializationTask::printDescription(raw_ostream &OS) {
+ OS << "Materialization task: " << MU->getName() << " in "
+ << MR->getTargetJITDylib().getName();
+}
+
+void MaterializationTask::run() { MU->materialize(std::move(MR)); }
+
+ExecutionSession::ExecutionSession(std::unique_ptr<ExecutorProcessControl> EPC)
+ : EPC(std::move(EPC)) {
+ // Associated EPC and this.
+ this->EPC->ES = this;
+}
+
+Error ExecutionSession::endSession() {
+ LLVM_DEBUG(dbgs() << "Ending ExecutionSession " << this << "\n");
+
+ std::vector<JITDylibSP> JITDylibsToClose = runSessionLocked([&] {
+ SessionOpen = false;
+ return std::move(JDs);
+ });
+
+ // TODO: notifiy platform? run static deinits?
+
+ Error Err = Error::success();
+ for (auto &JD : JITDylibsToClose)
+ Err = joinErrors(std::move(Err), JD->clear());
+
+ Err = joinErrors(std::move(Err), EPC->disconnect());
+
+ return Err;
+}
+
+void ExecutionSession::registerResourceManager(ResourceManager &RM) {
+ runSessionLocked([&] { ResourceManagers.push_back(&RM); });
+}
+
+void ExecutionSession::deregisterResourceManager(ResourceManager &RM) {
+ runSessionLocked([&] {
+ assert(!ResourceManagers.empty() && "No managers registered");
+ if (ResourceManagers.back() == &RM)
+ ResourceManagers.pop_back();
+ else {
+ auto I = llvm::find(ResourceManagers, &RM);
+ assert(I != ResourceManagers.end() && "RM not registered");
+ ResourceManagers.erase(I);
+ }
+ });
+}
+
+JITDylib *ExecutionSession::getJITDylibByName(StringRef Name) {
+ return runSessionLocked([&, this]() -> JITDylib * {
+ for (auto &JD : JDs)
+ if (JD->getName() == Name)
+ return JD.get();
+ return nullptr;
+ });
+}
+
+JITDylib &ExecutionSession::createBareJITDylib(std::string Name) {
+ assert(!getJITDylibByName(Name) && "JITDylib with that name already exists");
+ return runSessionLocked([&, this]() -> JITDylib & {
+ JDs.push_back(new JITDylib(*this, std::move(Name)));
+ return *JDs.back();
+ });
+}
+
+Expected<JITDylib &> ExecutionSession::createJITDylib(std::string Name) {
+ auto &JD = createBareJITDylib(Name);
+ if (P)
+ if (auto Err = P->setupJITDylib(JD))
+ return std::move(Err);
+ return JD;
+}
+
+Error ExecutionSession::removeJITDylib(JITDylib &JD) {
+ // Keep JD alive throughout this routine, even if all other references
+ // have been dropped.
+ JITDylibSP JDKeepAlive = &JD;
+
+ // Set JD to 'Closing' state and remove JD from the ExecutionSession.
+ runSessionLocked([&] {
+ assert(JD.State == JITDylib::Open && "JD already closed");
+ JD.State = JITDylib::Closing;
+ auto I = llvm::find(JDs, &JD);
+ assert(I != JDs.end() && "JD does not appear in session JDs");
+ JDs.erase(I);
+ });
+
+ // Clear the JITDylib. Hold on to any error while we clean up the
+ // JITDylib members below.
+ auto Err = JD.clear();
+
+ // Notify the platform of the teardown.
+ if (P)
+ Err = joinErrors(std::move(Err), P->teardownJITDylib(JD));
+
+ // Set JD to closed state. Clear remaining data structures.
+ runSessionLocked([&] {
+ assert(JD.State == JITDylib::Closing && "JD should be closing");
+ JD.State = JITDylib::Closed;
+ assert(JD.Symbols.empty() && "JD.Symbols is not empty after clear");
+ assert(JD.UnmaterializedInfos.empty() &&
+ "JD.UnmaterializedInfos is not empty after clear");
+ assert(JD.MaterializingInfos.empty() &&
+ "JD.MaterializingInfos is not empty after clear");
+ assert(JD.TrackerSymbols.empty() &&
+ "TrackerSymbols is not empty after clear");
+ JD.DefGenerators.clear();
+ JD.LinkOrder.clear();
+ });
+ return Err;
+}
+
+Expected<std::vector<JITDylibSP>>
+JITDylib::getDFSLinkOrder(ArrayRef<JITDylibSP> JDs) {
+ if (JDs.empty())
+ return std::vector<JITDylibSP>();
+
+ auto &ES = JDs.front()->getExecutionSession();
+ return ES.runSessionLocked([&]() -> Expected<std::vector<JITDylibSP>> {
+ DenseSet<JITDylib *> Visited;
+ std::vector<JITDylibSP> Result;
+
+ for (auto &JD : JDs) {
+
+ if (JD->State != Open)
+ return make_error<StringError>(
+ "Error building link order: " + JD->getName() + " is defunct",
+ inconvertibleErrorCode());
+ if (Visited.count(JD.get()))
+ continue;
+
+ SmallVector<JITDylibSP, 64> WorkStack;
+ WorkStack.push_back(JD);
+ Visited.insert(JD.get());
+
+ while (!WorkStack.empty()) {
+ Result.push_back(std::move(WorkStack.back()));
+ WorkStack.pop_back();
+
+ for (auto &KV : llvm::reverse(Result.back()->LinkOrder)) {
+ auto &JD = *KV.first;
+ if (Visited.count(&JD))
+ continue;
+ Visited.insert(&JD);
+ WorkStack.push_back(&JD);
+ }
+ }
+ }
+ return Result;
+ });
+}
+
+Expected<std::vector<JITDylibSP>>
+JITDylib::getReverseDFSLinkOrder(ArrayRef<JITDylibSP> JDs) {
+ auto Result = getDFSLinkOrder(JDs);
+ if (Result)
+ std::reverse(Result->begin(), Result->end());
+ return Result;
+}
+
+Expected<std::vector<JITDylibSP>> JITDylib::getDFSLinkOrder() {
+ return getDFSLinkOrder({this});
+}
+
+Expected<std::vector<JITDylibSP>> JITDylib::getReverseDFSLinkOrder() {
+ return getReverseDFSLinkOrder({this});
+}
+
+void ExecutionSession::lookupFlags(
+ LookupKind K, JITDylibSearchOrder SearchOrder, SymbolLookupSet LookupSet,
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete) {
+
+ OL_applyQueryPhase1(std::make_unique<InProgressLookupFlagsState>(
+ K, std::move(SearchOrder), std::move(LookupSet),
+ std::move(OnComplete)),
+ Error::success());
+}
+
+Expected<SymbolFlagsMap>
+ExecutionSession::lookupFlags(LookupKind K, JITDylibSearchOrder SearchOrder,
+ SymbolLookupSet LookupSet) {
+
+ std::promise<MSVCPExpected<SymbolFlagsMap>> ResultP;
+ OL_applyQueryPhase1(std::make_unique<InProgressLookupFlagsState>(
+ K, std::move(SearchOrder), std::move(LookupSet),
+ [&ResultP](Expected<SymbolFlagsMap> Result) {
+ ResultP.set_value(std::move(Result));
+ }),
+ Error::success());
+
+ auto ResultF = ResultP.get_future();
+ return ResultF.get();
+}
+
+void ExecutionSession::lookup(
+ LookupKind K, const JITDylibSearchOrder &SearchOrder,
+ SymbolLookupSet Symbols, SymbolState RequiredState,
+ SymbolsResolvedCallback NotifyComplete,
+ RegisterDependenciesFunction RegisterDependencies) {
+
+ LLVM_DEBUG({
+ runSessionLocked([&]() {
+ dbgs() << "Looking up " << Symbols << " in " << SearchOrder
+ << " (required state: " << RequiredState << ")\n";
+ });
+ });
+
+ // lookup can be re-entered recursively if running on a single thread. Run any
+ // outstanding MUs in case this query depends on them, otherwise this lookup
+ // will starve waiting for a result from an MU that is stuck in the queue.
+ dispatchOutstandingMUs();
+
+ auto Unresolved = std::move(Symbols);
+ auto Q = std::make_shared<AsynchronousSymbolQuery>(Unresolved, RequiredState,
+ std::move(NotifyComplete));
+
+ auto IPLS = std::make_unique<InProgressFullLookupState>(
+ K, SearchOrder, std::move(Unresolved), RequiredState, std::move(Q),
+ std::move(RegisterDependencies));
+
+ OL_applyQueryPhase1(std::move(IPLS), Error::success());
+}
+
+Expected<SymbolMap>
+ExecutionSession::lookup(const JITDylibSearchOrder &SearchOrder,
+ const SymbolLookupSet &Symbols, LookupKind K,
+ SymbolState RequiredState,
+ RegisterDependenciesFunction RegisterDependencies) {
+#if LLVM_ENABLE_THREADS
+ // In the threaded case we use promises to return the results.
+ std::promise<SymbolMap> PromisedResult;
+ Error ResolutionError = Error::success();
+
+ auto NotifyComplete = [&](Expected<SymbolMap> R) {
+ if (R)
+ PromisedResult.set_value(std::move(*R));
+ else {
+ ErrorAsOutParameter _(&ResolutionError);
+ ResolutionError = R.takeError();
+ PromisedResult.set_value(SymbolMap());
+ }
+ };
+
+#else
+ SymbolMap Result;
+ Error ResolutionError = Error::success();
+
+ auto NotifyComplete = [&](Expected<SymbolMap> R) {
+ ErrorAsOutParameter _(&ResolutionError);
+ if (R)
+ Result = std::move(*R);
+ else
+ ResolutionError = R.takeError();
+ };
+#endif
+
+ // Perform the asynchronous lookup.
+ lookup(K, SearchOrder, Symbols, RequiredState, NotifyComplete,
+ RegisterDependencies);
+
+#if LLVM_ENABLE_THREADS
+ auto ResultFuture = PromisedResult.get_future();
+ auto Result = ResultFuture.get();
+
+ if (ResolutionError)
+ return std::move(ResolutionError);
+
+ return std::move(Result);
+
+#else
+ if (ResolutionError)
+ return std::move(ResolutionError);
+
+ return Result;
+#endif
+}
+
+Expected<JITEvaluatedSymbol>
+ExecutionSession::lookup(const JITDylibSearchOrder &SearchOrder,
+ SymbolStringPtr Name, SymbolState RequiredState) {
+ SymbolLookupSet Names({Name});
+
+ if (auto ResultMap = lookup(SearchOrder, std::move(Names), LookupKind::Static,
+ RequiredState, NoDependenciesToRegister)) {
+ assert(ResultMap->size() == 1 && "Unexpected number of results");
+ assert(ResultMap->count(Name) && "Missing result for symbol");
+ return std::move(ResultMap->begin()->second);
+ } else
+ return ResultMap.takeError();
+}
+
+Expected<JITEvaluatedSymbol>
+ExecutionSession::lookup(ArrayRef<JITDylib *> SearchOrder, SymbolStringPtr Name,
+ SymbolState RequiredState) {
+ return lookup(makeJITDylibSearchOrder(SearchOrder), Name, RequiredState);
+}
+
+Expected<JITEvaluatedSymbol>
+ExecutionSession::lookup(ArrayRef<JITDylib *> SearchOrder, StringRef Name,
+ SymbolState RequiredState) {
+ return lookup(SearchOrder, intern(Name), RequiredState);
+}
+
+Error ExecutionSession::registerJITDispatchHandlers(
+ JITDylib &JD, JITDispatchHandlerAssociationMap WFs) {
+
+ auto TagAddrs = lookup({{&JD, JITDylibLookupFlags::MatchAllSymbols}},
+ SymbolLookupSet::fromMapKeys(
+ WFs, SymbolLookupFlags::WeaklyReferencedSymbol));
+ if (!TagAddrs)
+ return TagAddrs.takeError();
+
+ // Associate tag addresses with implementations.
+ std::lock_guard<std::mutex> Lock(JITDispatchHandlersMutex);
+ for (auto &KV : *TagAddrs) {
+ auto TagAddr = KV.second.getAddress();
+ if (JITDispatchHandlers.count(TagAddr))
+ return make_error<StringError>("Tag " + formatv("{0:x16}", TagAddr) +
+ " (for " + *KV.first +
+ ") already registered",
+ inconvertibleErrorCode());
+ auto I = WFs.find(KV.first);
+ assert(I != WFs.end() && I->second &&
+ "JITDispatchHandler implementation missing");
+ JITDispatchHandlers[KV.second.getAddress()] =
+ std::make_shared<JITDispatchHandlerFunction>(std::move(I->second));
+ LLVM_DEBUG({
+ dbgs() << "Associated function tag \"" << *KV.first << "\" ("
+ << formatv("{0:x}", KV.second.getAddress()) << ") with handler\n";
+ });
+ }
+ return Error::success();
+}
+
+void ExecutionSession::runJITDispatchHandler(
+ SendResultFunction SendResult, JITTargetAddress HandlerFnTagAddr,
+ ArrayRef<char> ArgBuffer) {
+
+ std::shared_ptr<JITDispatchHandlerFunction> F;
+ {
+ std::lock_guard<std::mutex> Lock(JITDispatchHandlersMutex);
+ auto I = JITDispatchHandlers.find(HandlerFnTagAddr);
+ if (I != JITDispatchHandlers.end())
+ F = I->second;
+ }
+
+ if (F)
+ (*F)(std::move(SendResult), ArgBuffer.data(), ArgBuffer.size());
+ else
+ SendResult(shared::WrapperFunctionResult::createOutOfBandError(
+ ("No function registered for tag " +
+ formatv("{0:x16}", HandlerFnTagAddr))
+ .str()));
+}
+
+void ExecutionSession::dump(raw_ostream &OS) {
+ runSessionLocked([this, &OS]() {
+ for (auto &JD : JDs)
+ JD->dump(OS);
+ });
+}
+
+void ExecutionSession::dispatchOutstandingMUs() {
+ LLVM_DEBUG(dbgs() << "Dispatching MaterializationUnits...\n");
+ while (true) {
+ Optional<std::pair<std::unique_ptr<MaterializationUnit>,
+ std::unique_ptr<MaterializationResponsibility>>>
+ JMU;
+
+ {
+ std::lock_guard<std::recursive_mutex> Lock(OutstandingMUsMutex);
+ if (!OutstandingMUs.empty()) {
+ JMU.emplace(std::move(OutstandingMUs.back()));
+ OutstandingMUs.pop_back();
+ }
+ }
+
+ if (!JMU)
+ break;
+
+ assert(JMU->first && "No MU?");
+ LLVM_DEBUG(dbgs() << " Dispatching \"" << JMU->first->getName() << "\"\n");
+ dispatchTask(std::make_unique<MaterializationTask>(std::move(JMU->first),
+ std::move(JMU->second)));
+ }
+ LLVM_DEBUG(dbgs() << "Done dispatching MaterializationUnits.\n");
+}
+
+Error ExecutionSession::removeResourceTracker(ResourceTracker &RT) {
+ LLVM_DEBUG({
+ dbgs() << "In " << RT.getJITDylib().getName() << " removing tracker "
+ << formatv("{0:x}", RT.getKeyUnsafe()) << "\n";
+ });
+ std::vector<ResourceManager *> CurrentResourceManagers;
+
+ JITDylib::AsynchronousSymbolQuerySet QueriesToFail;
+ std::shared_ptr<SymbolDependenceMap> FailedSymbols;
+
+ runSessionLocked([&] {
+ CurrentResourceManagers = ResourceManagers;
+ RT.makeDefunct();
+ std::tie(QueriesToFail, FailedSymbols) = RT.getJITDylib().removeTracker(RT);
+ });
+
+ Error Err = Error::success();
+
+ for (auto *L : reverse(CurrentResourceManagers))
+ Err =
+ joinErrors(std::move(Err), L->handleRemoveResources(RT.getKeyUnsafe()));
+
+ for (auto &Q : QueriesToFail)
+ Q->handleFailed(make_error<FailedToMaterialize>(FailedSymbols));
+
+ return Err;
+}
+
+void ExecutionSession::transferResourceTracker(ResourceTracker &DstRT,
+ ResourceTracker &SrcRT) {
+ LLVM_DEBUG({
+ dbgs() << "In " << SrcRT.getJITDylib().getName()
+ << " transfering resources from tracker "
+ << formatv("{0:x}", SrcRT.getKeyUnsafe()) << " to tracker "
+ << formatv("{0:x}", DstRT.getKeyUnsafe()) << "\n";
+ });
+
+ // No-op transfers are allowed and do not invalidate the source.
+ if (&DstRT == &SrcRT)
+ return;
+
+ assert(&DstRT.getJITDylib() == &SrcRT.getJITDylib() &&
+ "Can't transfer resources between JITDylibs");
+ runSessionLocked([&]() {
+ SrcRT.makeDefunct();
+ auto &JD = DstRT.getJITDylib();
+ JD.transferTracker(DstRT, SrcRT);
+ for (auto *L : reverse(ResourceManagers))
+ L->handleTransferResources(DstRT.getKeyUnsafe(), SrcRT.getKeyUnsafe());
+ });
+}
+
+void ExecutionSession::destroyResourceTracker(ResourceTracker &RT) {
+ runSessionLocked([&]() {
+ LLVM_DEBUG({
+ dbgs() << "In " << RT.getJITDylib().getName() << " destroying tracker "
+ << formatv("{0:x}", RT.getKeyUnsafe()) << "\n";
+ });
+ if (!RT.isDefunct())
+ transferResourceTracker(*RT.getJITDylib().getDefaultResourceTracker(),
+ RT);
+ });
+}
+
+Error ExecutionSession::IL_updateCandidatesFor(
+ JITDylib &JD, JITDylibLookupFlags JDLookupFlags,
+ SymbolLookupSet &Candidates, SymbolLookupSet *NonCandidates) {
+ return Candidates.forEachWithRemoval(
+ [&](const SymbolStringPtr &Name,
+ SymbolLookupFlags SymLookupFlags) -> Expected<bool> {
+ /// Search for the symbol. If not found then continue without
+ /// removal.
+ auto SymI = JD.Symbols.find(Name);
+ if (SymI == JD.Symbols.end())
+ return false;
+
+ // If this is a non-exported symbol and we're matching exported
+ // symbols only then remove this symbol from the candidates list.
+ //
+ // If we're tracking non-candidates then add this to the non-candidate
+ // list.
+ if (!SymI->second.getFlags().isExported() &&
+ JDLookupFlags == JITDylibLookupFlags::MatchExportedSymbolsOnly) {
+ if (NonCandidates)
+ NonCandidates->add(Name, SymLookupFlags);
+ return true;
+ }
+
+ // If we match against a materialization-side-effects only symbol
+ // then make sure it is weakly-referenced. Otherwise bail out with
+ // an error.
+ // FIXME: Use a "materialization-side-effects-only symbols must be
+ // weakly referenced" specific error here to reduce confusion.
+ if (SymI->second.getFlags().hasMaterializationSideEffectsOnly() &&
+ SymLookupFlags != SymbolLookupFlags::WeaklyReferencedSymbol)
+ return make_error<SymbolsNotFound>(getSymbolStringPool(),
+ SymbolNameVector({Name}));
+
+ // If we matched against this symbol but it is in the error state
+ // then bail out and treat it as a failure to materialize.
+ if (SymI->second.getFlags().hasError()) {
+ auto FailedSymbolsMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsMap)[&JD] = {Name};
+ return make_error<FailedToMaterialize>(std::move(FailedSymbolsMap));
+ }
+
+ // Otherwise this is a match. Remove it from the candidate set.
+ return true;
+ });
+}
+
+void ExecutionSession::OL_applyQueryPhase1(
+ std::unique_ptr<InProgressLookupState> IPLS, Error Err) {
+
+ LLVM_DEBUG({
+ dbgs() << "Entering OL_applyQueryPhase1:\n"
+ << " Lookup kind: " << IPLS->K << "\n"
+ << " Search order: " << IPLS->SearchOrder
+ << ", Current index = " << IPLS->CurSearchOrderIndex
+ << (IPLS->NewJITDylib ? " (entering new JITDylib)" : "") << "\n"
+ << " Lookup set: " << IPLS->LookupSet << "\n"
+ << " Definition generator candidates: "
+ << IPLS->DefGeneratorCandidates << "\n"
+ << " Definition generator non-candidates: "
+ << IPLS->DefGeneratorNonCandidates << "\n";
+ });
+
+ // FIXME: We should attach the query as we go: This provides a result in a
+ // single pass in the common case where all symbols have already reached the
+ // required state. The query could be detached again in the 'fail' method on
+ // IPLS. Phase 2 would be reduced to collecting and dispatching the MUs.
+
+ while (IPLS->CurSearchOrderIndex != IPLS->SearchOrder.size()) {
+
+ // If we've been handed an error or received one back from a generator then
+ // fail the query. We don't need to unlink: At this stage the query hasn't
+ // actually been lodged.
+ if (Err)
+ return IPLS->fail(std::move(Err));
+
+ // Get the next JITDylib and lookup flags.
+ auto &KV = IPLS->SearchOrder[IPLS->CurSearchOrderIndex];
+ auto &JD = *KV.first;
+ auto JDLookupFlags = KV.second;
+
+ LLVM_DEBUG({
+ dbgs() << "Visiting \"" << JD.getName() << "\" (" << JDLookupFlags
+ << ") with lookup set " << IPLS->LookupSet << ":\n";
+ });
+
+ // If we've just reached a new JITDylib then perform some setup.
+ if (IPLS->NewJITDylib) {
+
+ // Acquire the generator lock for this JITDylib.
+ IPLS->GeneratorLock = std::unique_lock<std::mutex>(JD.GeneratorsMutex);
+
+ // Add any non-candidates from the last JITDylib (if any) back on to the
+ // list of definition candidates for this JITDylib, reset definition
+ // non-candiates to the empty set.
+ SymbolLookupSet Tmp;
+ std::swap(IPLS->DefGeneratorNonCandidates, Tmp);
+ IPLS->DefGeneratorCandidates.append(std::move(Tmp));
+
+ LLVM_DEBUG({
+ dbgs() << " First time visiting " << JD.getName()
+ << ", resetting candidate sets and building generator stack\n";
+ });
+
+ // Build the definition generator stack for this JITDylib.
+ runSessionLocked([&] {
+ IPLS->CurDefGeneratorStack.reserve(JD.DefGenerators.size());
+ for (auto &DG : reverse(JD.DefGenerators))
+ IPLS->CurDefGeneratorStack.push_back(DG);
+ });
+
+ // Flag that we've done our initialization.
+ IPLS->NewJITDylib = false;
+ }
+
+ // Remove any generation candidates that are already defined (and match) in
+ // this JITDylib.
+ runSessionLocked([&] {
+ // Update the list of candidates (and non-candidates) for definition
+ // generation.
+ LLVM_DEBUG(dbgs() << " Updating candidate set...\n");
+ Err = IL_updateCandidatesFor(
+ JD, JDLookupFlags, IPLS->DefGeneratorCandidates,
+ JD.DefGenerators.empty() ? nullptr
+ : &IPLS->DefGeneratorNonCandidates);
+ LLVM_DEBUG({
+ dbgs() << " Remaining candidates = " << IPLS->DefGeneratorCandidates
+ << "\n";
+ });
+ });
+
+ // If we encountered an error while filtering generation candidates then
+ // bail out.
+ if (Err)
+ return IPLS->fail(std::move(Err));
+
+ /// Apply any definition generators on the stack.
+ LLVM_DEBUG({
+ if (IPLS->CurDefGeneratorStack.empty())
+ LLVM_DEBUG(dbgs() << " No generators to run for this JITDylib.\n");
+ else if (IPLS->DefGeneratorCandidates.empty())
+ LLVM_DEBUG(dbgs() << " No candidates to generate.\n");
+ else
+ dbgs() << " Running " << IPLS->CurDefGeneratorStack.size()
+ << " remaining generators for "
+ << IPLS->DefGeneratorCandidates.size() << " candidates\n";
+ });
+ while (!IPLS->CurDefGeneratorStack.empty() &&
+ !IPLS->DefGeneratorCandidates.empty()) {
+ auto DG = IPLS->CurDefGeneratorStack.back().lock();
+ IPLS->CurDefGeneratorStack.pop_back();
+
+ if (!DG)
+ return IPLS->fail(make_error<StringError>(
+ "DefinitionGenerator removed while lookup in progress",
+ inconvertibleErrorCode()));
+
+ auto K = IPLS->K;
+ auto &LookupSet = IPLS->DefGeneratorCandidates;
+
+ // Run the generator. If the generator takes ownership of QA then this
+ // will break the loop.
+ {
+ LLVM_DEBUG(dbgs() << " Attempting to generate " << LookupSet << "\n");
+ LookupState LS(std::move(IPLS));
+ Err = DG->tryToGenerate(LS, K, JD, JDLookupFlags, LookupSet);
+ IPLS = std::move(LS.IPLS);
+ }
+
+ // If there was an error then fail the query.
+ if (Err) {
+ LLVM_DEBUG({
+ dbgs() << " Error attempting to generate " << LookupSet << "\n";
+ });
+ assert(IPLS && "LS cannot be retained if error is returned");
+ return IPLS->fail(std::move(Err));
+ }
+
+ // Otherwise if QA was captured then break the loop.
+ if (!IPLS) {
+ LLVM_DEBUG(
+ { dbgs() << " LookupState captured. Exiting phase1 for now.\n"; });
+ return;
+ }
+
+ // Otherwise if we're continuing around the loop then update candidates
+ // for the next round.
+ runSessionLocked([&] {
+ LLVM_DEBUG(dbgs() << " Updating candidate set post-generation\n");
+ Err = IL_updateCandidatesFor(
+ JD, JDLookupFlags, IPLS->DefGeneratorCandidates,
+ JD.DefGenerators.empty() ? nullptr
+ : &IPLS->DefGeneratorNonCandidates);
+ });
+
+ // If updating candidates failed then fail the query.
+ if (Err) {
+ LLVM_DEBUG(dbgs() << " Error encountered while updating candidates\n");
+ return IPLS->fail(std::move(Err));
+ }
+ }
+
+ if (IPLS->DefGeneratorCandidates.empty() &&
+ IPLS->DefGeneratorNonCandidates.empty()) {
+ // Early out if there are no remaining symbols.
+ LLVM_DEBUG(dbgs() << "All symbols matched.\n");
+ IPLS->CurSearchOrderIndex = IPLS->SearchOrder.size();
+ break;
+ } else {
+ // If we get here then we've moved on to the next JITDylib with candidates
+ // remaining.
+ LLVM_DEBUG(dbgs() << "Phase 1 moving to next JITDylib.\n");
+ ++IPLS->CurSearchOrderIndex;
+ IPLS->NewJITDylib = true;
+ }
+ }
+
+ // Remove any weakly referenced candidates that could not be found/generated.
+ IPLS->DefGeneratorCandidates.remove_if(
+ [](const SymbolStringPtr &Name, SymbolLookupFlags SymLookupFlags) {
+ return SymLookupFlags == SymbolLookupFlags::WeaklyReferencedSymbol;
+ });
+
+ // If we get here then we've finished searching all JITDylibs.
+ // If we matched all symbols then move to phase 2, otherwise fail the query
+ // with a SymbolsNotFound error.
+ if (IPLS->DefGeneratorCandidates.empty()) {
+ LLVM_DEBUG(dbgs() << "Phase 1 succeeded.\n");
+ IPLS->complete(std::move(IPLS));
+ } else {
+ LLVM_DEBUG(dbgs() << "Phase 1 failed with unresolved symbols.\n");
+ IPLS->fail(make_error<SymbolsNotFound>(
+ getSymbolStringPool(), IPLS->DefGeneratorCandidates.getSymbolNames()));
+ }
+}
+
+void ExecutionSession::OL_completeLookup(
+ std::unique_ptr<InProgressLookupState> IPLS,
+ std::shared_ptr<AsynchronousSymbolQuery> Q,
+ RegisterDependenciesFunction RegisterDependencies) {
+
+ LLVM_DEBUG({
+ dbgs() << "Entering OL_completeLookup:\n"
+ << " Lookup kind: " << IPLS->K << "\n"
+ << " Search order: " << IPLS->SearchOrder
+ << ", Current index = " << IPLS->CurSearchOrderIndex
+ << (IPLS->NewJITDylib ? " (entering new JITDylib)" : "") << "\n"
+ << " Lookup set: " << IPLS->LookupSet << "\n"
+ << " Definition generator candidates: "
+ << IPLS->DefGeneratorCandidates << "\n"
+ << " Definition generator non-candidates: "
+ << IPLS->DefGeneratorNonCandidates << "\n";
+ });
+
+ bool QueryComplete = false;
+ DenseMap<JITDylib *, JITDylib::UnmaterializedInfosList> CollectedUMIs;
+
+ auto LodgingErr = runSessionLocked([&]() -> Error {
+ for (auto &KV : IPLS->SearchOrder) {
+ auto &JD = *KV.first;
+ auto JDLookupFlags = KV.second;
+ LLVM_DEBUG({
+ dbgs() << "Visiting \"" << JD.getName() << "\" (" << JDLookupFlags
+ << ") with lookup set " << IPLS->LookupSet << ":\n";
+ });
+
+ auto Err = IPLS->LookupSet.forEachWithRemoval(
+ [&](const SymbolStringPtr &Name,
+ SymbolLookupFlags SymLookupFlags) -> Expected<bool> {
+ LLVM_DEBUG({
+ dbgs() << " Attempting to match \"" << Name << "\" ("
+ << SymLookupFlags << ")... ";
+ });
+
+ /// Search for the symbol. If not found then continue without
+ /// removal.
+ auto SymI = JD.Symbols.find(Name);
+ if (SymI == JD.Symbols.end()) {
+ LLVM_DEBUG(dbgs() << "skipping: not present\n");
+ return false;
+ }
+
+ // If this is a non-exported symbol and we're matching exported
+ // symbols only then skip this symbol without removal.
+ if (!SymI->second.getFlags().isExported() &&
+ JDLookupFlags ==
+ JITDylibLookupFlags::MatchExportedSymbolsOnly) {
+ LLVM_DEBUG(dbgs() << "skipping: not exported\n");
+ return false;
+ }
+
+ // If we match against a materialization-side-effects only symbol
+ // then make sure it is weakly-referenced. Otherwise bail out with
+ // an error.
+ // FIXME: Use a "materialization-side-effects-only symbols must be
+ // weakly referenced" specific error here to reduce confusion.
+ if (SymI->second.getFlags().hasMaterializationSideEffectsOnly() &&
+ SymLookupFlags != SymbolLookupFlags::WeaklyReferencedSymbol) {
+ LLVM_DEBUG({
+ dbgs() << "error: "
+ "required, but symbol is has-side-effects-only\n";
+ });
+ return make_error<SymbolsNotFound>(getSymbolStringPool(),
+ SymbolNameVector({Name}));
+ }
+
+ // If we matched against this symbol but it is in the error state
+ // then bail out and treat it as a failure to materialize.
+ if (SymI->second.getFlags().hasError()) {
+ LLVM_DEBUG(dbgs() << "error: symbol is in error state\n");
+ auto FailedSymbolsMap = std::make_shared<SymbolDependenceMap>();
+ (*FailedSymbolsMap)[&JD] = {Name};
+ return make_error<FailedToMaterialize>(
+ std::move(FailedSymbolsMap));
+ }
+
+ // Otherwise this is a match.
+
+ // If this symbol is already in the requried state then notify the
+ // query, remove the symbol and continue.
+ if (SymI->second.getState() >= Q->getRequiredState()) {
+ LLVM_DEBUG(dbgs()
+ << "matched, symbol already in required state\n");
+ Q->notifySymbolMetRequiredState(Name, SymI->second.getSymbol());
+ return true;
+ }
+
+ // Otherwise this symbol does not yet meet the required state. Check
+ // whether it has a materializer attached, and if so prepare to run
+ // it.
+ if (SymI->second.hasMaterializerAttached()) {
+ assert(SymI->second.getAddress() == 0 &&
+ "Symbol not resolved but already has address?");
+ auto UMII = JD.UnmaterializedInfos.find(Name);
+ assert(UMII != JD.UnmaterializedInfos.end() &&
+ "Lazy symbol should have UnmaterializedInfo");
+
+ auto UMI = UMII->second;
+ assert(UMI->MU && "Materializer should not be null");
+ assert(UMI->RT && "Tracker should not be null");
+ LLVM_DEBUG({
+ dbgs() << "matched, preparing to dispatch MU@" << UMI->MU.get()
+ << " (" << UMI->MU->getName() << ")\n";
+ });
+
+ // Move all symbols associated with this MaterializationUnit into
+ // materializing state.
+ for (auto &KV : UMI->MU->getSymbols()) {
+ auto SymK = JD.Symbols.find(KV.first);
+ assert(SymK != JD.Symbols.end() &&
+ "No entry for symbol covered by MaterializationUnit");
+ SymK->second.setMaterializerAttached(false);
+ SymK->second.setState(SymbolState::Materializing);
+ JD.UnmaterializedInfos.erase(KV.first);
+ }
+
+ // Add MU to the list of MaterializationUnits to be materialized.
+ CollectedUMIs[&JD].push_back(std::move(UMI));
+ } else
+ LLVM_DEBUG(dbgs() << "matched, registering query");
+
+ // Add the query to the PendingQueries list and continue, deleting
+ // the element from the lookup set.
+ assert(SymI->second.getState() != SymbolState::NeverSearched &&
+ SymI->second.getState() != SymbolState::Ready &&
+ "By this line the symbol should be materializing");
+ auto &MI = JD.MaterializingInfos[Name];
+ MI.addQuery(Q);
+ Q->addQueryDependence(JD, Name);
+
+ return true;
+ });
+
+ // Handle failure.
+ if (Err) {
+
+ LLVM_DEBUG({
+ dbgs() << "Lookup failed. Detaching query and replacing MUs.\n";
+ });
+
+ // Detach the query.
+ Q->detach();
+
+ // Replace the MUs.
+ for (auto &KV : CollectedUMIs) {
+ auto &JD = *KV.first;
+ for (auto &UMI : KV.second)
+ for (auto &KV2 : UMI->MU->getSymbols()) {
+ assert(!JD.UnmaterializedInfos.count(KV2.first) &&
+ "Unexpected materializer in map");
+ auto SymI = JD.Symbols.find(KV2.first);
+ assert(SymI != JD.Symbols.end() && "Missing symbol entry");
+ assert(SymI->second.getState() == SymbolState::Materializing &&
+ "Can not replace symbol that is not materializing");
+ assert(!SymI->second.hasMaterializerAttached() &&
+ "MaterializerAttached flag should not be set");
+ SymI->second.setMaterializerAttached(true);
+ JD.UnmaterializedInfos[KV2.first] = UMI;
+ }
+ }
+
+ return Err;
+ }
+ }
+
+ LLVM_DEBUG(dbgs() << "Stripping unmatched weakly-referenced symbols\n");
+ IPLS->LookupSet.forEachWithRemoval(
+ [&](const SymbolStringPtr &Name, SymbolLookupFlags SymLookupFlags) {
+ if (SymLookupFlags == SymbolLookupFlags::WeaklyReferencedSymbol) {
+ Q->dropSymbol(Name);
+ return true;
+ } else
+ return false;
+ });
+
+ if (!IPLS->LookupSet.empty()) {
+ LLVM_DEBUG(dbgs() << "Failing due to unresolved symbols\n");
+ return make_error<SymbolsNotFound>(getSymbolStringPool(),
+ IPLS->LookupSet.getSymbolNames());
+ }
+
+ // Record whether the query completed.
+ QueryComplete = Q->isComplete();
+
+ LLVM_DEBUG({
+ dbgs() << "Query successfully "
+ << (QueryComplete ? "completed" : "lodged") << "\n";
+ });
+
+ // Move the collected MUs to the OutstandingMUs list.
+ if (!CollectedUMIs.empty()) {
+ std::lock_guard<std::recursive_mutex> Lock(OutstandingMUsMutex);
+
+ LLVM_DEBUG(dbgs() << "Adding MUs to dispatch:\n");
+ for (auto &KV : CollectedUMIs) {
+ LLVM_DEBUG({
+ auto &JD = *KV.first;
+ dbgs() << " For " << JD.getName() << ": Adding " << KV.second.size()
+ << " MUs.\n";
+ });
+ for (auto &UMI : KV.second) {
+ auto MR = createMaterializationResponsibility(
+ *UMI->RT, std::move(UMI->MU->SymbolFlags),
+ std::move(UMI->MU->InitSymbol));
+ OutstandingMUs.push_back(
+ std::make_pair(std::move(UMI->MU), std::move(MR)));
+ }
+ }
+ } else
+ LLVM_DEBUG(dbgs() << "No MUs to dispatch.\n");
+
+ if (RegisterDependencies && !Q->QueryRegistrations.empty()) {
+ LLVM_DEBUG(dbgs() << "Registering dependencies\n");
+ RegisterDependencies(Q->QueryRegistrations);
+ } else
+ LLVM_DEBUG(dbgs() << "No dependencies to register\n");
+
+ return Error::success();
+ });
+
+ if (LodgingErr) {
+ LLVM_DEBUG(dbgs() << "Failing query\n");
+ Q->detach();
+ Q->handleFailed(std::move(LodgingErr));
+ return;
+ }
+
+ if (QueryComplete) {
+ LLVM_DEBUG(dbgs() << "Completing query\n");
+ Q->handleComplete(*this);
+ }
+
+ dispatchOutstandingMUs();
+}
+
+void ExecutionSession::OL_completeLookupFlags(
+ std::unique_ptr<InProgressLookupState> IPLS,
+ unique_function<void(Expected<SymbolFlagsMap>)> OnComplete) {
+
+ auto Result = runSessionLocked([&]() -> Expected<SymbolFlagsMap> {
+ LLVM_DEBUG({
+ dbgs() << "Entering OL_completeLookupFlags:\n"
+ << " Lookup kind: " << IPLS->K << "\n"
+ << " Search order: " << IPLS->SearchOrder
+ << ", Current index = " << IPLS->CurSearchOrderIndex
+ << (IPLS->NewJITDylib ? " (entering new JITDylib)" : "") << "\n"
+ << " Lookup set: " << IPLS->LookupSet << "\n"
+ << " Definition generator candidates: "
+ << IPLS->DefGeneratorCandidates << "\n"
+ << " Definition generator non-candidates: "
+ << IPLS->DefGeneratorNonCandidates << "\n";
+ });
+
+ SymbolFlagsMap Result;
+
+ // Attempt to find flags for each symbol.
+ for (auto &KV : IPLS->SearchOrder) {
+ auto &JD = *KV.first;
+ auto JDLookupFlags = KV.second;
+ LLVM_DEBUG({
+ dbgs() << "Visiting \"" << JD.getName() << "\" (" << JDLookupFlags
+ << ") with lookup set " << IPLS->LookupSet << ":\n";
+ });
+
+ IPLS->LookupSet.forEachWithRemoval([&](const SymbolStringPtr &Name,
+ SymbolLookupFlags SymLookupFlags) {
+ LLVM_DEBUG({
+ dbgs() << " Attempting to match \"" << Name << "\" ("
+ << SymLookupFlags << ")... ";
+ });
+
+ // Search for the symbol. If not found then continue without removing
+ // from the lookup set.
+ auto SymI = JD.Symbols.find(Name);
+ if (SymI == JD.Symbols.end()) {
+ LLVM_DEBUG(dbgs() << "skipping: not present\n");
+ return false;
+ }
+
+ // If this is a non-exported symbol then it doesn't match. Skip it.
+ if (!SymI->second.getFlags().isExported() &&
+ JDLookupFlags == JITDylibLookupFlags::MatchExportedSymbolsOnly) {
+ LLVM_DEBUG(dbgs() << "skipping: not exported\n");
+ return false;
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "matched, \"" << Name << "\" -> " << SymI->second.getFlags()
+ << "\n";
+ });
+ Result[Name] = SymI->second.getFlags();
+ return true;
+ });
+ }
+
+ // Remove any weakly referenced symbols that haven't been resolved.
+ IPLS->LookupSet.remove_if(
+ [](const SymbolStringPtr &Name, SymbolLookupFlags SymLookupFlags) {
+ return SymLookupFlags == SymbolLookupFlags::WeaklyReferencedSymbol;
+ });
+
+ if (!IPLS->LookupSet.empty()) {
+ LLVM_DEBUG(dbgs() << "Failing due to unresolved symbols\n");
+ return make_error<SymbolsNotFound>(getSymbolStringPool(),
+ IPLS->LookupSet.getSymbolNames());
+ }
+
+ LLVM_DEBUG(dbgs() << "Succeded, result = " << Result << "\n");
+ return Result;
+ });
+
+ // Run the callback on the result.
+ LLVM_DEBUG(dbgs() << "Sending result to handler.\n");
+ OnComplete(std::move(Result));
+}
+
+void ExecutionSession::OL_destroyMaterializationResponsibility(
+ MaterializationResponsibility &MR) {
+
+ assert(MR.SymbolFlags.empty() &&
+ "All symbols should have been explicitly materialized or failed");
+ MR.JD.unlinkMaterializationResponsibility(MR);
+}
+
+SymbolNameSet ExecutionSession::OL_getRequestedSymbols(
+ const MaterializationResponsibility &MR) {
+ return MR.JD.getRequestedSymbols(MR.SymbolFlags);
+}
+
+Error ExecutionSession::OL_notifyResolved(MaterializationResponsibility &MR,
+ const SymbolMap &Symbols) {
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD.getName() << " resolving " << Symbols << "\n";
+ });
+#ifndef NDEBUG
+ for (auto &KV : Symbols) {
+ auto WeakFlags = JITSymbolFlags::Weak | JITSymbolFlags::Common;
+ auto I = MR.SymbolFlags.find(KV.first);
+ assert(I != MR.SymbolFlags.end() &&
+ "Resolving symbol outside this responsibility set");
+ assert(!I->second.hasMaterializationSideEffectsOnly() &&
+ "Can't resolve materialization-side-effects-only symbol");
+ assert((KV.second.getFlags() & ~WeakFlags) == (I->second & ~WeakFlags) &&
+ "Resolving symbol with incorrect flags");
+ }
+#endif
+
+ return MR.JD.resolve(MR, Symbols);
+}
+
+Error ExecutionSession::OL_notifyEmitted(MaterializationResponsibility &MR) {
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD.getName() << " emitting " << MR.SymbolFlags
+ << "\n";
+ });
+
+ if (auto Err = MR.JD.emit(MR, MR.SymbolFlags))
+ return Err;
+
+ MR.SymbolFlags.clear();
+ return Error::success();
+}
+
+Error ExecutionSession::OL_defineMaterializing(
+ MaterializationResponsibility &MR, SymbolFlagsMap NewSymbolFlags) {
+
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD.getName() << " defining materializing symbols "
+ << NewSymbolFlags << "\n";
+ });
+ if (auto AcceptedDefs =
+ MR.JD.defineMaterializing(std::move(NewSymbolFlags))) {
+ // Add all newly accepted symbols to this responsibility object.
+ for (auto &KV : *AcceptedDefs)
+ MR.SymbolFlags.insert(KV);
+ return Error::success();
+ } else
+ return AcceptedDefs.takeError();
+}
+
+void ExecutionSession::OL_notifyFailed(MaterializationResponsibility &MR) {
+
+ LLVM_DEBUG({
+ dbgs() << "In " << MR.JD.getName() << " failing materialization for "
+ << MR.SymbolFlags << "\n";
+ });
+
+ JITDylib::FailedSymbolsWorklist Worklist;
+
+ for (auto &KV : MR.SymbolFlags)
+ Worklist.push_back(std::make_pair(&MR.JD, KV.first));
+ MR.SymbolFlags.clear();
+
+ if (Worklist.empty())
+ return;
+
+ JITDylib::AsynchronousSymbolQuerySet FailedQueries;
+ std::shared_ptr<SymbolDependenceMap> FailedSymbols;
+
+ runSessionLocked([&]() {
+ // If the tracker is defunct then there's nothing to do here.
+ if (MR.RT->isDefunct())
+ return;
+
+ std::tie(FailedQueries, FailedSymbols) =
+ JITDylib::failSymbols(std::move(Worklist));
+ });
+
+ for (auto &Q : FailedQueries)
+ Q->handleFailed(make_error<FailedToMaterialize>(FailedSymbols));
+}
+
+Error ExecutionSession::OL_replace(MaterializationResponsibility &MR,
+ std::unique_ptr<MaterializationUnit> MU) {
+ for (auto &KV : MU->getSymbols()) {
+ assert(MR.SymbolFlags.count(KV.first) &&
+ "Replacing definition outside this responsibility set");
+ MR.SymbolFlags.erase(KV.first);
+ }
+
+ if (MU->getInitializerSymbol() == MR.InitSymbol)
+ MR.InitSymbol = nullptr;
+
+ LLVM_DEBUG(MR.JD.getExecutionSession().runSessionLocked([&]() {
+ dbgs() << "In " << MR.JD.getName() << " replacing symbols with " << *MU
+ << "\n";
+ }););
+
+ return MR.JD.replace(MR, std::move(MU));
+}
+
+Expected<std::unique_ptr<MaterializationResponsibility>>
+ExecutionSession::OL_delegate(MaterializationResponsibility &MR,
+ const SymbolNameSet &Symbols) {
+
+ SymbolStringPtr DelegatedInitSymbol;
+ SymbolFlagsMap DelegatedFlags;
+
+ for (auto &Name : Symbols) {
+ auto I = MR.SymbolFlags.find(Name);
+ assert(I != MR.SymbolFlags.end() &&
+ "Symbol is not tracked by this MaterializationResponsibility "
+ "instance");
+
+ DelegatedFlags[Name] = std::move(I->second);
+ if (Name == MR.InitSymbol)
+ std::swap(MR.InitSymbol, DelegatedInitSymbol);
+
+ MR.SymbolFlags.erase(I);
+ }
+
+ return MR.JD.delegate(MR, std::move(DelegatedFlags),
+ std::move(DelegatedInitSymbol));
+}
+
+void ExecutionSession::OL_addDependencies(
+ MaterializationResponsibility &MR, const SymbolStringPtr &Name,
+ const SymbolDependenceMap &Dependencies) {
+ LLVM_DEBUG({
+ dbgs() << "Adding dependencies for " << Name << ": " << Dependencies
+ << "\n";
+ });
+ assert(MR.SymbolFlags.count(Name) &&
+ "Symbol not covered by this MaterializationResponsibility instance");
+ MR.JD.addDependencies(Name, Dependencies);
+}
+
+void ExecutionSession::OL_addDependenciesForAll(
+ MaterializationResponsibility &MR,
+ const SymbolDependenceMap &Dependencies) {
+ LLVM_DEBUG({
+ dbgs() << "Adding dependencies for all symbols in " << MR.SymbolFlags << ": "
+ << Dependencies << "\n";
+ });
+ for (auto &KV : MR.SymbolFlags)
+ MR.JD.addDependencies(KV.first, Dependencies);
+}
+
+#ifndef NDEBUG
+void ExecutionSession::dumpDispatchInfo(Task &T) {
+ runSessionLocked([&]() {
+ dbgs() << "Dispatching: ";
+ T.printDescription(dbgs());
+ dbgs() << "\n";
+ });
+}
+#endif // NDEBUG
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp
new file mode 100644
index 0000000000..4ff6b7fd54
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebugObjectManagerPlugin.cpp
@@ -0,0 +1,516 @@
+//===------- DebugObjectManagerPlugin.cpp - JITLink debug objects ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// FIXME: Update Plugin to poke the debug object into a new JITLink section,
+// rather than creating a new allocation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h"
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/JITLinkDylib.h"
+#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Errc.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include <set>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm::jitlink;
+using namespace llvm::object;
+
+namespace llvm {
+namespace orc {
+
+class DebugObjectSection {
+public:
+ virtual void setTargetMemoryRange(SectionRange Range) = 0;
+ virtual void dump(raw_ostream &OS, StringRef Name) {}
+ virtual ~DebugObjectSection() {}
+};
+
+template <typename ELFT>
+class ELFDebugObjectSection : public DebugObjectSection {
+public:
+ // BinaryFormat ELF is not meant as a mutable format. We can only make changes
+ // that don't invalidate the file structure.
+ ELFDebugObjectSection(const typename ELFT::Shdr *Header)
+ : Header(const_cast<typename ELFT::Shdr *>(Header)) {}
+
+ void setTargetMemoryRange(SectionRange Range) override;
+ void dump(raw_ostream &OS, StringRef Name) override;
+
+ Error validateInBounds(StringRef Buffer, const char *Name) const;
+
+private:
+ typename ELFT::Shdr *Header;
+
+ bool isTextOrDataSection() const;
+};
+
+template <typename ELFT>
+void ELFDebugObjectSection<ELFT>::setTargetMemoryRange(SectionRange Range) {
+ // Only patch load-addresses for executable and data sections.
+ if (isTextOrDataSection())
+ Header->sh_addr =
+ static_cast<typename ELFT::uint>(Range.getStart().getValue());
+}
+
+template <typename ELFT>
+bool ELFDebugObjectSection<ELFT>::isTextOrDataSection() const {
+ switch (Header->sh_type) {
+ case ELF::SHT_PROGBITS:
+ case ELF::SHT_X86_64_UNWIND:
+ return Header->sh_flags & (ELF::SHF_EXECINSTR | ELF::SHF_ALLOC);
+ }
+ return false;
+}
+
+template <typename ELFT>
+Error ELFDebugObjectSection<ELFT>::validateInBounds(StringRef Buffer,
+ const char *Name) const {
+ const uint8_t *Start = Buffer.bytes_begin();
+ const uint8_t *End = Buffer.bytes_end();
+ const uint8_t *HeaderPtr = reinterpret_cast<uint8_t *>(Header);
+ if (HeaderPtr < Start || HeaderPtr + sizeof(typename ELFT::Shdr) > End)
+ return make_error<StringError>(
+ formatv("{0} section header at {1:x16} not within bounds of the "
+ "given debug object buffer [{2:x16} - {3:x16}]",
+ Name, &Header->sh_addr, Start, End),
+ inconvertibleErrorCode());
+ if (Header->sh_offset + Header->sh_size > Buffer.size())
+ return make_error<StringError>(
+ formatv("{0} section data [{1:x16} - {2:x16}] not within bounds of "
+ "the given debug object buffer [{3:x16} - {4:x16}]",
+ Name, Start + Header->sh_offset,
+ Start + Header->sh_offset + Header->sh_size, Start, End),
+ inconvertibleErrorCode());
+ return Error::success();
+}
+
+template <typename ELFT>
+void ELFDebugObjectSection<ELFT>::dump(raw_ostream &OS, StringRef Name) {
+ if (auto Addr = static_cast<JITTargetAddress>(Header->sh_addr)) {
+ OS << formatv(" {0:x16} {1}\n", Addr, Name);
+ } else {
+ OS << formatv(" {0}\n", Name);
+ }
+}
+
+enum class Requirement {
+ // Request final target memory load-addresses for all sections.
+ ReportFinalSectionLoadAddresses,
+};
+
+/// The plugin creates a debug object from when JITLink starts processing the
+/// corresponding LinkGraph. It provides access to the pass configuration of
+/// the LinkGraph and calls the finalization function, once the resulting link
+/// artifact was emitted.
+///
+class DebugObject {
+public:
+ DebugObject(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
+ ExecutionSession &ES)
+ : MemMgr(MemMgr), JD(JD), ES(ES) {}
+
+ void set(Requirement Req) { Reqs.insert(Req); }
+ bool has(Requirement Req) const { return Reqs.count(Req) > 0; }
+
+ using FinalizeContinuation = std::function<void(Expected<ExecutorAddrRange>)>;
+
+ void finalizeAsync(FinalizeContinuation OnFinalize);
+
+ virtual ~DebugObject() {
+ if (Alloc) {
+ std::vector<FinalizedAlloc> Allocs;
+ Allocs.push_back(std::move(Alloc));
+ if (Error Err = MemMgr.deallocate(std::move(Allocs)))
+ ES.reportError(std::move(Err));
+ }
+ }
+
+ virtual void reportSectionTargetMemoryRange(StringRef Name,
+ SectionRange TargetMem) {}
+
+protected:
+ using InFlightAlloc = JITLinkMemoryManager::InFlightAlloc;
+ using FinalizedAlloc = JITLinkMemoryManager::FinalizedAlloc;
+
+ virtual Expected<SimpleSegmentAlloc> finalizeWorkingMemory() = 0;
+
+ JITLinkMemoryManager &MemMgr;
+ const JITLinkDylib *JD = nullptr;
+
+private:
+ ExecutionSession &ES;
+ std::set<Requirement> Reqs;
+ FinalizedAlloc Alloc;
+};
+
+// Finalize working memory and take ownership of the resulting allocation. Start
+// copying memory over to the target and pass on the result once we're done.
+// Ownership of the allocation remains with us for the rest of our lifetime.
+void DebugObject::finalizeAsync(FinalizeContinuation OnFinalize) {
+ assert(!Alloc && "Cannot finalize more than once");
+
+ if (auto SimpleSegAlloc = finalizeWorkingMemory()) {
+ auto ROSeg = SimpleSegAlloc->getSegInfo(MemProt::Read);
+ ExecutorAddrRange DebugObjRange(ExecutorAddr(ROSeg.Addr),
+ ExecutorAddrDiff(ROSeg.WorkingMem.size()));
+ SimpleSegAlloc->finalize(
+ [this, DebugObjRange,
+ OnFinalize = std::move(OnFinalize)](Expected<FinalizedAlloc> FA) {
+ if (FA) {
+ Alloc = std::move(*FA);
+ OnFinalize(DebugObjRange);
+ } else
+ OnFinalize(FA.takeError());
+ });
+ } else
+ OnFinalize(SimpleSegAlloc.takeError());
+}
+
+/// The current implementation of ELFDebugObject replicates the approach used in
+/// RuntimeDyld: It patches executable and data section headers in the given
+/// object buffer with load-addresses of their corresponding sections in target
+/// memory.
+///
+class ELFDebugObject : public DebugObject {
+public:
+ static Expected<std::unique_ptr<DebugObject>>
+ Create(MemoryBufferRef Buffer, JITLinkContext &Ctx, ExecutionSession &ES);
+
+ void reportSectionTargetMemoryRange(StringRef Name,
+ SectionRange TargetMem) override;
+
+ StringRef getBuffer() const { return Buffer->getMemBufferRef().getBuffer(); }
+
+protected:
+ Expected<SimpleSegmentAlloc> finalizeWorkingMemory() override;
+
+ template <typename ELFT>
+ Error recordSection(StringRef Name,
+ std::unique_ptr<ELFDebugObjectSection<ELFT>> Section);
+ DebugObjectSection *getSection(StringRef Name);
+
+private:
+ template <typename ELFT>
+ static Expected<std::unique_ptr<ELFDebugObject>>
+ CreateArchType(MemoryBufferRef Buffer, JITLinkMemoryManager &MemMgr,
+ const JITLinkDylib *JD, ExecutionSession &ES);
+
+ static std::unique_ptr<WritableMemoryBuffer>
+ CopyBuffer(MemoryBufferRef Buffer, Error &Err);
+
+ ELFDebugObject(std::unique_ptr<WritableMemoryBuffer> Buffer,
+ JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
+ ExecutionSession &ES)
+ : DebugObject(MemMgr, JD, ES), Buffer(std::move(Buffer)) {
+ set(Requirement::ReportFinalSectionLoadAddresses);
+ }
+
+ std::unique_ptr<WritableMemoryBuffer> Buffer;
+ StringMap<std::unique_ptr<DebugObjectSection>> Sections;
+};
+
+static const std::set<StringRef> DwarfSectionNames = {
+#define HANDLE_DWARF_SECTION(ENUM_NAME, ELF_NAME, CMDLINE_NAME, OPTION) \
+ ELF_NAME,
+#include "llvm/BinaryFormat/Dwarf.def"
+#undef HANDLE_DWARF_SECTION
+};
+
+static bool isDwarfSection(StringRef SectionName) {
+ return DwarfSectionNames.count(SectionName) == 1;
+}
+
+std::unique_ptr<WritableMemoryBuffer>
+ELFDebugObject::CopyBuffer(MemoryBufferRef Buffer, Error &Err) {
+ ErrorAsOutParameter _(&Err);
+ size_t Size = Buffer.getBufferSize();
+ StringRef Name = Buffer.getBufferIdentifier();
+ if (auto Copy = WritableMemoryBuffer::getNewUninitMemBuffer(Size, Name)) {
+ memcpy(Copy->getBufferStart(), Buffer.getBufferStart(), Size);
+ return Copy;
+ }
+
+ Err = errorCodeToError(make_error_code(errc::not_enough_memory));
+ return nullptr;
+}
+
+template <typename ELFT>
+Expected<std::unique_ptr<ELFDebugObject>>
+ELFDebugObject::CreateArchType(MemoryBufferRef Buffer,
+ JITLinkMemoryManager &MemMgr,
+ const JITLinkDylib *JD, ExecutionSession &ES) {
+ using SectionHeader = typename ELFT::Shdr;
+
+ Error Err = Error::success();
+ std::unique_ptr<ELFDebugObject> DebugObj(
+ new ELFDebugObject(CopyBuffer(Buffer, Err), MemMgr, JD, ES));
+ if (Err)
+ return std::move(Err);
+
+ Expected<ELFFile<ELFT>> ObjRef = ELFFile<ELFT>::create(DebugObj->getBuffer());
+ if (!ObjRef)
+ return ObjRef.takeError();
+
+ // TODO: Add support for other architectures.
+ uint16_t TargetMachineArch = ObjRef->getHeader().e_machine;
+ if (TargetMachineArch != ELF::EM_X86_64)
+ return nullptr;
+
+ Expected<ArrayRef<SectionHeader>> Sections = ObjRef->sections();
+ if (!Sections)
+ return Sections.takeError();
+
+ bool HasDwarfSection = false;
+ for (const SectionHeader &Header : *Sections) {
+ Expected<StringRef> Name = ObjRef->getSectionName(Header);
+ if (!Name)
+ return Name.takeError();
+ if (Name->empty())
+ continue;
+ HasDwarfSection |= isDwarfSection(*Name);
+
+ auto Wrapped = std::make_unique<ELFDebugObjectSection<ELFT>>(&Header);
+ if (Error Err = DebugObj->recordSection(*Name, std::move(Wrapped)))
+ return std::move(Err);
+ }
+
+ if (!HasDwarfSection) {
+ LLVM_DEBUG(dbgs() << "Aborting debug registration for LinkGraph \""
+ << DebugObj->Buffer->getBufferIdentifier()
+ << "\": input object contains no debug info\n");
+ return nullptr;
+ }
+
+ return std::move(DebugObj);
+}
+
+Expected<std::unique_ptr<DebugObject>>
+ELFDebugObject::Create(MemoryBufferRef Buffer, JITLinkContext &Ctx,
+ ExecutionSession &ES) {
+ unsigned char Class, Endian;
+ std::tie(Class, Endian) = getElfArchType(Buffer.getBuffer());
+
+ if (Class == ELF::ELFCLASS32) {
+ if (Endian == ELF::ELFDATA2LSB)
+ return CreateArchType<ELF32LE>(Buffer, Ctx.getMemoryManager(),
+ Ctx.getJITLinkDylib(), ES);
+ if (Endian == ELF::ELFDATA2MSB)
+ return CreateArchType<ELF32BE>(Buffer, Ctx.getMemoryManager(),
+ Ctx.getJITLinkDylib(), ES);
+ return nullptr;
+ }
+ if (Class == ELF::ELFCLASS64) {
+ if (Endian == ELF::ELFDATA2LSB)
+ return CreateArchType<ELF64LE>(Buffer, Ctx.getMemoryManager(),
+ Ctx.getJITLinkDylib(), ES);
+ if (Endian == ELF::ELFDATA2MSB)
+ return CreateArchType<ELF64BE>(Buffer, Ctx.getMemoryManager(),
+ Ctx.getJITLinkDylib(), ES);
+ return nullptr;
+ }
+ return nullptr;
+}
+
+Expected<SimpleSegmentAlloc> ELFDebugObject::finalizeWorkingMemory() {
+ LLVM_DEBUG({
+ dbgs() << "Section load-addresses in debug object for \""
+ << Buffer->getBufferIdentifier() << "\":\n";
+ for (const auto &KV : Sections)
+ KV.second->dump(dbgs(), KV.first());
+ });
+
+ // TODO: This works, but what actual alignment requirements do we have?
+ unsigned PageSize = sys::Process::getPageSizeEstimate();
+ size_t Size = Buffer->getBufferSize();
+
+ // Allocate working memory for debug object in read-only segment.
+ auto Alloc = SimpleSegmentAlloc::Create(
+ MemMgr, JD, {{MemProt::Read, {Size, Align(PageSize)}}});
+ if (!Alloc)
+ return Alloc;
+
+ // Initialize working memory with a copy of our object buffer.
+ auto SegInfo = Alloc->getSegInfo(MemProt::Read);
+ memcpy(SegInfo.WorkingMem.data(), Buffer->getBufferStart(), Size);
+ Buffer.reset();
+
+ return Alloc;
+}
+
+void ELFDebugObject::reportSectionTargetMemoryRange(StringRef Name,
+ SectionRange TargetMem) {
+ if (auto *DebugObjSection = getSection(Name))
+ DebugObjSection->setTargetMemoryRange(TargetMem);
+}
+
+template <typename ELFT>
+Error ELFDebugObject::recordSection(
+ StringRef Name, std::unique_ptr<ELFDebugObjectSection<ELFT>> Section) {
+ if (Error Err = Section->validateInBounds(this->getBuffer(), Name.data()))
+ return Err;
+ auto ItInserted = Sections.try_emplace(Name, std::move(Section));
+ if (!ItInserted.second)
+ return make_error<StringError>("Duplicate section",
+ inconvertibleErrorCode());
+ return Error::success();
+}
+
+DebugObjectSection *ELFDebugObject::getSection(StringRef Name) {
+ auto It = Sections.find(Name);
+ return It == Sections.end() ? nullptr : It->second.get();
+}
+
+/// Creates a debug object based on the input object file from
+/// ObjectLinkingLayerJITLinkContext.
+///
+static Expected<std::unique_ptr<DebugObject>>
+createDebugObjectFromBuffer(ExecutionSession &ES, LinkGraph &G,
+ JITLinkContext &Ctx, MemoryBufferRef ObjBuffer) {
+ switch (G.getTargetTriple().getObjectFormat()) {
+ case Triple::ELF:
+ return ELFDebugObject::Create(ObjBuffer, Ctx, ES);
+
+ default:
+ // TODO: Once we add support for other formats, we might want to split this
+ // into multiple files.
+ return nullptr;
+ }
+}
+
+DebugObjectManagerPlugin::DebugObjectManagerPlugin(
+ ExecutionSession &ES, std::unique_ptr<DebugObjectRegistrar> Target)
+ : ES(ES), Target(std::move(Target)) {}
+
+DebugObjectManagerPlugin::~DebugObjectManagerPlugin() = default;
+
+void DebugObjectManagerPlugin::notifyMaterializing(
+ MaterializationResponsibility &MR, LinkGraph &G, JITLinkContext &Ctx,
+ MemoryBufferRef ObjBuffer) {
+ std::lock_guard<std::mutex> Lock(PendingObjsLock);
+ assert(PendingObjs.count(&MR) == 0 &&
+ "Cannot have more than one pending debug object per "
+ "MaterializationResponsibility");
+
+ if (auto DebugObj = createDebugObjectFromBuffer(ES, G, Ctx, ObjBuffer)) {
+ // Not all link artifacts allow debugging.
+ if (*DebugObj != nullptr)
+ PendingObjs[&MR] = std::move(*DebugObj);
+ } else {
+ ES.reportError(DebugObj.takeError());
+ }
+}
+
+void DebugObjectManagerPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, LinkGraph &G,
+ PassConfiguration &PassConfig) {
+ // Not all link artifacts have associated debug objects.
+ std::lock_guard<std::mutex> Lock(PendingObjsLock);
+ auto It = PendingObjs.find(&MR);
+ if (It == PendingObjs.end())
+ return;
+
+ DebugObject &DebugObj = *It->second;
+ if (DebugObj.has(Requirement::ReportFinalSectionLoadAddresses)) {
+ PassConfig.PostAllocationPasses.push_back(
+ [&DebugObj](LinkGraph &Graph) -> Error {
+ for (const Section &GraphSection : Graph.sections())
+ DebugObj.reportSectionTargetMemoryRange(GraphSection.getName(),
+ SectionRange(GraphSection));
+ return Error::success();
+ });
+ }
+}
+
+Error DebugObjectManagerPlugin::notifyEmitted(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(PendingObjsLock);
+ auto It = PendingObjs.find(&MR);
+ if (It == PendingObjs.end())
+ return Error::success();
+
+ // During finalization the debug object is registered with the target.
+ // Materialization must wait for this process to finish. Otherwise we might
+ // start running code before the debugger processed the corresponding debug
+ // info.
+ std::promise<MSVCPError> FinalizePromise;
+ std::future<MSVCPError> FinalizeErr = FinalizePromise.get_future();
+
+ It->second->finalizeAsync(
+ [this, &FinalizePromise, &MR](Expected<ExecutorAddrRange> TargetMem) {
+ // Any failure here will fail materialization.
+ if (!TargetMem) {
+ FinalizePromise.set_value(TargetMem.takeError());
+ return;
+ }
+ if (Error Err = Target->registerDebugObject(*TargetMem)) {
+ FinalizePromise.set_value(std::move(Err));
+ return;
+ }
+
+ // Once our tracking info is updated, notifyEmitted() can return and
+ // finish materialization.
+ FinalizePromise.set_value(MR.withResourceKeyDo([&](ResourceKey K) {
+ assert(PendingObjs.count(&MR) && "We still hold PendingObjsLock");
+ std::lock_guard<std::mutex> Lock(RegisteredObjsLock);
+ RegisteredObjs[K].push_back(std::move(PendingObjs[&MR]));
+ PendingObjs.erase(&MR);
+ }));
+ });
+
+ return FinalizeErr.get();
+}
+
+Error DebugObjectManagerPlugin::notifyFailed(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(PendingObjsLock);
+ PendingObjs.erase(&MR);
+ return Error::success();
+}
+
+void DebugObjectManagerPlugin::notifyTransferringResources(ResourceKey DstKey,
+ ResourceKey SrcKey) {
+ // Debug objects are stored by ResourceKey only after registration.
+ // Thus, pending objects don't need to be updated here.
+ std::lock_guard<std::mutex> Lock(RegisteredObjsLock);
+ auto SrcIt = RegisteredObjs.find(SrcKey);
+ if (SrcIt != RegisteredObjs.end()) {
+ // Resources from distinct MaterializationResponsibilitys can get merged
+ // after emission, so we can have multiple debug objects per resource key.
+ for (std::unique_ptr<DebugObject> &DebugObj : SrcIt->second)
+ RegisteredObjs[DstKey].push_back(std::move(DebugObj));
+ RegisteredObjs.erase(SrcIt);
+ }
+}
+
+Error DebugObjectManagerPlugin::notifyRemovingResources(ResourceKey Key) {
+ // Removing the resource for a pending object fails materialization, so they
+ // get cleaned up in the notifyFailed() handler.
+ std::lock_guard<std::mutex> Lock(RegisteredObjsLock);
+ RegisteredObjs.erase(Key);
+
+ // TODO: Implement unregister notifications.
+ return Error::success();
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebugUtils.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebugUtils.cpp
new file mode 100644
index 0000000000..5b386a458f
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebugUtils.cpp
@@ -0,0 +1,348 @@
+//===---------- DebugUtils.cpp - Utilities for debugging ORC JITs ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+
+namespace {
+
+#ifndef NDEBUG
+
+cl::opt<bool> PrintHidden("debug-orc-print-hidden", cl::init(true),
+ cl::desc("debug print hidden symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+cl::opt<bool> PrintCallable("debug-orc-print-callable", cl::init(true),
+ cl::desc("debug print callable symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+cl::opt<bool> PrintData("debug-orc-print-data", cl::init(true),
+ cl::desc("debug print data symbols defined by "
+ "materialization units"),
+ cl::Hidden);
+
+#endif // NDEBUG
+
+// SetPrinter predicate that prints every element.
+template <typename T> struct PrintAll {
+ bool operator()(const T &E) { return true; }
+};
+
+bool anyPrintSymbolOptionSet() {
+#ifndef NDEBUG
+ return PrintHidden || PrintCallable || PrintData;
+#else
+ return false;
+#endif // NDEBUG
+}
+
+bool flagsMatchCLOpts(const JITSymbolFlags &Flags) {
+#ifndef NDEBUG
+ // Bail out early if this is a hidden symbol and we're not printing hiddens.
+ if (!PrintHidden && !Flags.isExported())
+ return false;
+
+ // Return true if this is callable and we're printing callables.
+ if (PrintCallable && Flags.isCallable())
+ return true;
+
+ // Return true if this is data and we're printing data.
+ if (PrintData && !Flags.isCallable())
+ return true;
+
+ // otherwise return false.
+ return false;
+#else
+ return false;
+#endif // NDEBUG
+}
+
+// Prints a sequence of items, filtered by an user-supplied predicate.
+template <typename Sequence,
+ typename Pred = PrintAll<typename Sequence::value_type>>
+class SequencePrinter {
+public:
+ SequencePrinter(const Sequence &S, char OpenSeq, char CloseSeq,
+ Pred ShouldPrint = Pred())
+ : S(S), OpenSeq(OpenSeq), CloseSeq(CloseSeq),
+ ShouldPrint(std::move(ShouldPrint)) {}
+
+ void printTo(llvm::raw_ostream &OS) const {
+ bool PrintComma = false;
+ OS << OpenSeq;
+ for (auto &E : S) {
+ if (ShouldPrint(E)) {
+ if (PrintComma)
+ OS << ',';
+ OS << ' ' << E;
+ PrintComma = true;
+ }
+ }
+ OS << ' ' << CloseSeq;
+ }
+
+private:
+ const Sequence &S;
+ char OpenSeq;
+ char CloseSeq;
+ mutable Pred ShouldPrint;
+};
+
+template <typename Sequence, typename Pred>
+SequencePrinter<Sequence, Pred> printSequence(const Sequence &S, char OpenSeq,
+ char CloseSeq, Pred P = Pred()) {
+ return SequencePrinter<Sequence, Pred>(S, OpenSeq, CloseSeq, std::move(P));
+}
+
+// Render a SequencePrinter by delegating to its printTo method.
+template <typename Sequence, typename Pred>
+llvm::raw_ostream &operator<<(llvm::raw_ostream &OS,
+ const SequencePrinter<Sequence, Pred> &Printer) {
+ Printer.printTo(OS);
+ return OS;
+}
+
+struct PrintSymbolFlagsMapElemsMatchingCLOpts {
+ bool operator()(const orc::SymbolFlagsMap::value_type &KV) {
+ return flagsMatchCLOpts(KV.second);
+ }
+};
+
+struct PrintSymbolMapElemsMatchingCLOpts {
+ bool operator()(const orc::SymbolMap::value_type &KV) {
+ return flagsMatchCLOpts(KV.second.getFlags());
+ }
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolStringPtr &Sym) {
+ return OS << *Sym;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolNameSet &Symbols) {
+ return OS << printSequence(Symbols, '{', '}', PrintAll<SymbolStringPtr>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolNameVector &Symbols) {
+ return OS << printSequence(Symbols, '[', ']', PrintAll<SymbolStringPtr>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, ArrayRef<SymbolStringPtr> Symbols) {
+ return OS << printSequence(Symbols, '[', ']', PrintAll<SymbolStringPtr>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const JITSymbolFlags &Flags) {
+ if (Flags.hasError())
+ OS << "[*ERROR*]";
+ if (Flags.isCallable())
+ OS << "[Callable]";
+ else
+ OS << "[Data]";
+ if (Flags.isWeak())
+ OS << "[Weak]";
+ else if (Flags.isCommon())
+ OS << "[Common]";
+
+ if (!Flags.isExported())
+ OS << "[Hidden]";
+
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const JITEvaluatedSymbol &Sym) {
+ return OS << format("0x%016" PRIx64, Sym.getAddress()) << " "
+ << Sym.getFlags();
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap::value_type &KV) {
+ return OS << "(\"" << KV.first << "\", " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolMap::value_type &KV) {
+ return OS << "(\"" << KV.first << "\": " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolFlagsMap &SymbolFlags) {
+ return OS << printSequence(SymbolFlags, '{', '}',
+ PrintSymbolFlagsMapElemsMatchingCLOpts());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolMap &Symbols) {
+ return OS << printSequence(Symbols, '{', '}',
+ PrintSymbolMapElemsMatchingCLOpts());
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const SymbolDependenceMap::value_type &KV) {
+ return OS << "(" << KV.first->getName() << ", " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolDependenceMap &Deps) {
+ return OS << printSequence(Deps, '{', '}',
+ PrintAll<SymbolDependenceMap::value_type>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const MaterializationUnit &MU) {
+ OS << "MU@" << &MU << " (\"" << MU.getName() << "\"";
+ if (anyPrintSymbolOptionSet())
+ OS << ", " << MU.getSymbols();
+ return OS << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const LookupKind &K) {
+ switch (K) {
+ case LookupKind::Static:
+ return OS << "Static";
+ case LookupKind::DLSym:
+ return OS << "DLSym";
+ }
+ llvm_unreachable("Invalid lookup kind");
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const JITDylibLookupFlags &JDLookupFlags) {
+ switch (JDLookupFlags) {
+ case JITDylibLookupFlags::MatchExportedSymbolsOnly:
+ return OS << "MatchExportedSymbolsOnly";
+ case JITDylibLookupFlags::MatchAllSymbols:
+ return OS << "MatchAllSymbols";
+ }
+ llvm_unreachable("Invalid JITDylib lookup flags");
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupFlags &LookupFlags) {
+ switch (LookupFlags) {
+ case SymbolLookupFlags::RequiredSymbol:
+ return OS << "RequiredSymbol";
+ case SymbolLookupFlags::WeaklyReferencedSymbol:
+ return OS << "WeaklyReferencedSymbol";
+ }
+ llvm_unreachable("Invalid symbol lookup flags");
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const SymbolLookupSet::value_type &KV) {
+ return OS << "(" << KV.first << ", " << KV.second << ")";
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolLookupSet &LookupSet) {
+ return OS << printSequence(LookupSet, '{', '}',
+ PrintAll<SymbolLookupSet::value_type>());
+}
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const JITDylibSearchOrder &SearchOrder) {
+ OS << "[";
+ if (!SearchOrder.empty()) {
+ assert(SearchOrder.front().first &&
+ "JITDylibList entries must not be null");
+ OS << " (\"" << SearchOrder.front().first->getName() << "\", "
+ << SearchOrder.begin()->second << ")";
+ for (auto &KV : llvm::drop_begin(SearchOrder)) {
+ assert(KV.first && "JITDylibList entries must not be null");
+ OS << ", (\"" << KV.first->getName() << "\", " << KV.second << ")";
+ }
+ }
+ OS << " ]";
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolAliasMap &Aliases) {
+ OS << "{";
+ for (auto &KV : Aliases)
+ OS << " " << *KV.first << ": " << KV.second.Aliasee << " "
+ << KV.second.AliasFlags;
+ OS << " }";
+ return OS;
+}
+
+raw_ostream &operator<<(raw_ostream &OS, const SymbolState &S) {
+ switch (S) {
+ case SymbolState::Invalid:
+ return OS << "Invalid";
+ case SymbolState::NeverSearched:
+ return OS << "Never-Searched";
+ case SymbolState::Materializing:
+ return OS << "Materializing";
+ case SymbolState::Resolved:
+ return OS << "Resolved";
+ case SymbolState::Emitted:
+ return OS << "Emitted";
+ case SymbolState::Ready:
+ return OS << "Ready";
+ }
+ llvm_unreachable("Invalid state");
+}
+
+DumpObjects::DumpObjects(std::string DumpDir, std::string IdentifierOverride)
+ : DumpDir(std::move(DumpDir)),
+ IdentifierOverride(std::move(IdentifierOverride)) {
+
+ /// Discard any trailing separators.
+ while (!this->DumpDir.empty() &&
+ sys::path::is_separator(this->DumpDir.back()))
+ this->DumpDir.pop_back();
+}
+
+Expected<std::unique_ptr<MemoryBuffer>>
+DumpObjects::operator()(std::unique_ptr<MemoryBuffer> Obj) {
+ size_t Idx = 1;
+
+ std::string DumpPathStem;
+ raw_string_ostream(DumpPathStem)
+ << DumpDir << (DumpDir.empty() ? "" : "/") << getBufferIdentifier(*Obj);
+
+ std::string DumpPath = DumpPathStem + ".o";
+ while (sys::fs::exists(DumpPath)) {
+ DumpPath.clear();
+ raw_string_ostream(DumpPath) << DumpPathStem << "." << (++Idx) << ".o";
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Dumping object buffer [ " << (const void *)Obj->getBufferStart()
+ << " -- " << (const void *)(Obj->getBufferEnd() - 1) << " ] to "
+ << DumpPath << "\n";
+ });
+
+ std::error_code EC;
+ raw_fd_ostream DumpStream(DumpPath, EC);
+ if (EC)
+ return errorCodeToError(EC);
+ DumpStream.write(Obj->getBufferStart(), Obj->getBufferSize());
+
+ return std::move(Obj);
+}
+
+StringRef DumpObjects::getBufferIdentifier(MemoryBuffer &B) {
+ if (!IdentifierOverride.empty())
+ return IdentifierOverride;
+ StringRef Identifier = B.getBufferIdentifier();
+ Identifier.consume_back(".o");
+ return Identifier;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp
new file mode 100644
index 0000000000..6916ee4a82
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/DebuggerSupportPlugin.cpp
@@ -0,0 +1,470 @@
+//===------- DebuggerSupportPlugin.cpp - Utils for debugger support -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/DebuggerSupportPlugin.h"
+
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/BinaryFormat/MachO.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::orc;
+
+static const char *SynthDebugSectionName = "__jitlink_synth_debug_object";
+
+namespace {
+
+struct MachO64LE {
+ using UIntPtr = uint64_t;
+
+ using Header = MachO::mach_header_64;
+ using SegmentLC = MachO::segment_command_64;
+ using Section = MachO::section_64;
+ using NList = MachO::nlist_64;
+
+ static constexpr support::endianness Endianness = support::little;
+ static constexpr const uint32_t Magic = MachO::MH_MAGIC_64;
+ static constexpr const uint32_t SegmentCmd = MachO::LC_SEGMENT_64;
+};
+
+class MachODebugObjectSynthesizerBase
+ : public GDBJITDebugInfoRegistrationPlugin::DebugSectionSynthesizer {
+public:
+ static bool isDebugSection(Section &Sec) {
+ return Sec.getName().startswith("__DWARF,");
+ }
+
+ MachODebugObjectSynthesizerBase(LinkGraph &G, ExecutorAddr RegisterActionAddr)
+ : G(G), RegisterActionAddr(RegisterActionAddr) {}
+ virtual ~MachODebugObjectSynthesizerBase() {}
+
+ Error preserveDebugSections() {
+ if (G.findSectionByName(SynthDebugSectionName)) {
+ LLVM_DEBUG({
+ dbgs() << "MachODebugObjectSynthesizer skipping graph " << G.getName()
+ << " which contains an unexpected existing "
+ << SynthDebugSectionName << " section.\n";
+ });
+ return Error::success();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "MachODebugObjectSynthesizer visiting graph " << G.getName()
+ << "\n";
+ });
+ for (auto &Sec : G.sections()) {
+ if (!isDebugSection(Sec))
+ continue;
+ // Preserve blocks in this debug section by marking one existing symbol
+ // live for each block, and introducing a new live, anonymous symbol for
+ // each currently unreferenced block.
+ LLVM_DEBUG({
+ dbgs() << " Preserving debug section " << Sec.getName() << "\n";
+ });
+ SmallSet<Block *, 8> PreservedBlocks;
+ for (auto *Sym : Sec.symbols()) {
+ bool NewPreservedBlock =
+ PreservedBlocks.insert(&Sym->getBlock()).second;
+ if (NewPreservedBlock)
+ Sym->setLive(true);
+ }
+ for (auto *B : Sec.blocks())
+ if (!PreservedBlocks.count(B))
+ G.addAnonymousSymbol(*B, 0, 0, false, true);
+ }
+ return Error::success();
+ }
+
+protected:
+ LinkGraph &G;
+ ExecutorAddr RegisterActionAddr;
+};
+
+template <typename MachOTraits>
+class MachODebugObjectSynthesizer : public MachODebugObjectSynthesizerBase {
+private:
+ class MachOStructWriter {
+ public:
+ MachOStructWriter(MutableArrayRef<char> Buffer) : Buffer(Buffer) {}
+
+ size_t getOffset() const { return Offset; }
+
+ template <typename MachOStruct> void write(MachOStruct S) {
+ assert(Offset + sizeof(S) <= Buffer.size() &&
+ "Container block overflow while constructing debug MachO");
+ if (MachOTraits::Endianness != support::endian::system_endianness())
+ MachO::swapStruct(S);
+ memcpy(Buffer.data() + Offset, &S, sizeof(S));
+ Offset += sizeof(S);
+ }
+
+ private:
+ MutableArrayRef<char> Buffer;
+ size_t Offset = 0;
+ };
+
+public:
+ using MachODebugObjectSynthesizerBase::MachODebugObjectSynthesizerBase;
+
+ Error startSynthesis() override {
+ LLVM_DEBUG({
+ dbgs() << "Creating " << SynthDebugSectionName << " for " << G.getName()
+ << "\n";
+ });
+ auto &SDOSec = G.createSection(SynthDebugSectionName, MemProt::Read);
+
+ struct DebugSectionInfo {
+ Section *Sec = nullptr;
+ StringRef SegName;
+ StringRef SecName;
+ uint64_t Alignment = 0;
+ orc::ExecutorAddr StartAddr;
+ uint64_t Size = 0;
+ };
+
+ SmallVector<DebugSectionInfo, 12> DebugSecInfos;
+ size_t NumSections = 0;
+ for (auto &Sec : G.sections()) {
+ if (llvm::empty(Sec.blocks()))
+ continue;
+
+ ++NumSections;
+ if (isDebugSection(Sec)) {
+ size_t SepPos = Sec.getName().find(',');
+ if (SepPos > 16 || (Sec.getName().size() - (SepPos + 1) > 16)) {
+ LLVM_DEBUG({
+ dbgs() << "Skipping debug object synthesis for graph "
+ << G.getName()
+ << ": encountered non-standard DWARF section name \""
+ << Sec.getName() << "\"\n";
+ });
+ return Error::success();
+ }
+ DebugSecInfos.push_back({&Sec, Sec.getName().substr(0, SepPos),
+ Sec.getName().substr(SepPos + 1), 0,
+ orc::ExecutorAddr(), 0});
+ } else {
+ NonDebugSections.push_back(&Sec);
+
+ // If the first block in the section has a non-zero alignment offset
+ // then we need to add a padding block, since the section command in
+ // the header doesn't allow for aligment offsets.
+ SectionRange R(Sec);
+ if (!R.empty()) {
+ auto &FB = *R.getFirstBlock();
+ if (FB.getAlignmentOffset() != 0) {
+ auto Padding = G.allocateBuffer(FB.getAlignmentOffset());
+ memset(Padding.data(), 0, Padding.size());
+ G.createContentBlock(Sec, Padding,
+ FB.getAddress() - FB.getAlignmentOffset(),
+ FB.getAlignment(), 0);
+ }
+ }
+ }
+ }
+
+ // Create container block.
+ size_t SectionsCmdSize =
+ sizeof(typename MachOTraits::Section) * NumSections;
+ size_t SegmentLCSize =
+ sizeof(typename MachOTraits::SegmentLC) + SectionsCmdSize;
+ size_t ContainerBlockSize =
+ sizeof(typename MachOTraits::Header) + SegmentLCSize;
+ auto ContainerBlockContent = G.allocateBuffer(ContainerBlockSize);
+ MachOContainerBlock = &G.createMutableContentBlock(
+ SDOSec, ContainerBlockContent, orc::ExecutorAddr(), 8, 0);
+
+ // Copy debug section blocks and symbols.
+ orc::ExecutorAddr NextBlockAddr(MachOContainerBlock->getSize());
+ for (auto &SI : DebugSecInfos) {
+ assert(!llvm::empty(SI.Sec->blocks()) && "Empty debug info section?");
+
+ // Update addresses in debug section.
+ LLVM_DEBUG({
+ dbgs() << " Appending " << SI.Sec->getName() << " ("
+ << SI.Sec->blocks_size() << " block(s)) at "
+ << formatv("{0:x8}", NextBlockAddr) << "\n";
+ });
+ for (auto *B : SI.Sec->blocks()) {
+ NextBlockAddr = alignToBlock(NextBlockAddr, *B);
+ B->setAddress(NextBlockAddr);
+ NextBlockAddr += B->getSize();
+ }
+
+ auto &FirstBlock = **SI.Sec->blocks().begin();
+ if (FirstBlock.getAlignmentOffset() != 0)
+ return make_error<StringError>(
+ "First block in " + SI.Sec->getName() +
+ " section has non-zero alignment offset",
+ inconvertibleErrorCode());
+ if (FirstBlock.getAlignment() > std::numeric_limits<uint32_t>::max())
+ return make_error<StringError>("First block in " + SI.Sec->getName() +
+ " has alignment >4Gb",
+ inconvertibleErrorCode());
+
+ SI.Alignment = FirstBlock.getAlignment();
+ SI.StartAddr = FirstBlock.getAddress();
+ SI.Size = NextBlockAddr - SI.StartAddr;
+ G.mergeSections(SDOSec, *SI.Sec);
+ SI.Sec = nullptr;
+ }
+ size_t DebugSectionsSize =
+ NextBlockAddr - orc::ExecutorAddr(MachOContainerBlock->getSize());
+
+ // Write MachO header and debug section load commands.
+ MachOStructWriter Writer(MachOContainerBlock->getAlreadyMutableContent());
+ typename MachOTraits::Header Hdr;
+ memset(&Hdr, 0, sizeof(Hdr));
+ Hdr.magic = MachOTraits::Magic;
+ switch (G.getTargetTriple().getArch()) {
+ case Triple::x86_64:
+ Hdr.cputype = MachO::CPU_TYPE_X86_64;
+ Hdr.cpusubtype = MachO::CPU_SUBTYPE_X86_64_ALL;
+ break;
+ case Triple::aarch64:
+ Hdr.cputype = MachO::CPU_TYPE_ARM64;
+ Hdr.cpusubtype = MachO::CPU_SUBTYPE_ARM64_ALL;
+ break;
+ default:
+ llvm_unreachable("Unsupported architecture");
+ }
+ Hdr.filetype = MachO::MH_OBJECT;
+ Hdr.ncmds = 1;
+ Hdr.sizeofcmds = SegmentLCSize;
+ Hdr.flags = 0;
+ Writer.write(Hdr);
+
+ typename MachOTraits::SegmentLC SegLC;
+ memset(&SegLC, 0, sizeof(SegLC));
+ SegLC.cmd = MachOTraits::SegmentCmd;
+ SegLC.cmdsize = SegmentLCSize;
+ SegLC.vmaddr = ContainerBlockSize;
+ SegLC.vmsize = DebugSectionsSize;
+ SegLC.fileoff = ContainerBlockSize;
+ SegLC.filesize = DebugSectionsSize;
+ SegLC.maxprot =
+ MachO::VM_PROT_READ | MachO::VM_PROT_WRITE | MachO::VM_PROT_EXECUTE;
+ SegLC.initprot =
+ MachO::VM_PROT_READ | MachO::VM_PROT_WRITE | MachO::VM_PROT_EXECUTE;
+ SegLC.nsects = NumSections;
+ SegLC.flags = 0;
+ Writer.write(SegLC);
+
+ StringSet<> ExistingLongNames;
+ for (auto &SI : DebugSecInfos) {
+ typename MachOTraits::Section Sec;
+ memset(&Sec, 0, sizeof(Sec));
+ memcpy(Sec.sectname, SI.SecName.data(), SI.SecName.size());
+ memcpy(Sec.segname, SI.SegName.data(), SI.SegName.size());
+ Sec.addr = SI.StartAddr.getValue();
+ Sec.size = SI.Size;
+ Sec.offset = SI.StartAddr.getValue();
+ Sec.align = SI.Alignment;
+ Sec.reloff = 0;
+ Sec.nreloc = 0;
+ Sec.flags = MachO::S_ATTR_DEBUG;
+ Writer.write(Sec);
+ }
+
+ // Set MachOContainerBlock to indicate success to
+ // completeSynthesisAndRegister.
+ NonDebugSectionsStart = Writer.getOffset();
+ return Error::success();
+ }
+
+ Error completeSynthesisAndRegister() override {
+ if (!MachOContainerBlock) {
+ LLVM_DEBUG({
+ dbgs() << "Not writing MachO debug object header for " << G.getName()
+ << " since createDebugSection failed\n";
+ });
+ return Error::success();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "Writing MachO debug object header for " << G.getName() << "\n";
+ });
+
+ MachOStructWriter Writer(
+ MachOContainerBlock->getAlreadyMutableContent().drop_front(
+ NonDebugSectionsStart));
+
+ unsigned LongSectionNameIdx = 0;
+ for (auto *Sec : NonDebugSections) {
+ size_t SepPos = Sec->getName().find(',');
+ StringRef SegName, SecName;
+ std::string CustomSecName;
+
+ if ((SepPos == StringRef::npos && Sec->getName().size() <= 16)) {
+ // No embedded segment name, short section name.
+ SegName = "__JITLINK_CUSTOM";
+ SecName = Sec->getName();
+ } else if (SepPos < 16 && (Sec->getName().size() - (SepPos + 1) <= 16)) {
+ // Canonical embedded segment and section name.
+ SegName = Sec->getName().substr(0, SepPos);
+ SecName = Sec->getName().substr(SepPos + 1);
+ } else {
+ // Long section name that needs to be truncated.
+ assert(Sec->getName().size() > 16 &&
+ "Short section name should have been handled above");
+ SegName = "__JITLINK_CUSTOM";
+ auto IdxStr = std::to_string(++LongSectionNameIdx);
+ CustomSecName = Sec->getName().substr(0, 15 - IdxStr.size()).str();
+ CustomSecName += ".";
+ CustomSecName += IdxStr;
+ SecName = StringRef(CustomSecName.data(), 16);
+ }
+
+ SectionRange R(*Sec);
+ if (R.getFirstBlock()->getAlignmentOffset() != 0)
+ return make_error<StringError>(
+ "While building MachO debug object for " + G.getName() +
+ " first block has non-zero alignment offset",
+ inconvertibleErrorCode());
+
+ typename MachOTraits::Section SecCmd;
+ memset(&SecCmd, 0, sizeof(SecCmd));
+ memcpy(SecCmd.sectname, SecName.data(), SecName.size());
+ memcpy(SecCmd.segname, SegName.data(), SegName.size());
+ SecCmd.addr = R.getStart().getValue();
+ SecCmd.size = R.getSize();
+ SecCmd.offset = 0;
+ SecCmd.align = R.getFirstBlock()->getAlignment();
+ SecCmd.reloff = 0;
+ SecCmd.nreloc = 0;
+ SecCmd.flags = 0;
+ Writer.write(SecCmd);
+ }
+
+ SectionRange R(MachOContainerBlock->getSection());
+ G.allocActions().push_back({cantFail(shared::WrapperFunctionCall::Create<
+ SPSArgList<SPSExecutorAddrRange>>(
+ RegisterActionAddr, R.getRange())),
+ {}});
+ return Error::success();
+ }
+
+private:
+ Block *MachOContainerBlock = nullptr;
+ SmallVector<Section *, 16> NonDebugSections;
+ size_t NonDebugSectionsStart = 0;
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<GDBJITDebugInfoRegistrationPlugin>>
+GDBJITDebugInfoRegistrationPlugin::Create(ExecutionSession &ES,
+ JITDylib &ProcessJD,
+ const Triple &TT) {
+ auto RegisterActionAddr =
+ TT.isOSBinFormatMachO()
+ ? ES.intern("_llvm_orc_registerJITLoaderGDBAllocAction")
+ : ES.intern("llvm_orc_registerJITLoaderGDBAllocAction");
+
+ if (auto Addr = ES.lookup({&ProcessJD}, RegisterActionAddr))
+ return std::make_unique<GDBJITDebugInfoRegistrationPlugin>(
+ ExecutorAddr(Addr->getAddress()));
+ else
+ return Addr.takeError();
+}
+
+Error GDBJITDebugInfoRegistrationPlugin::notifyFailed(
+ MaterializationResponsibility &MR) {
+ return Error::success();
+}
+
+Error GDBJITDebugInfoRegistrationPlugin::notifyRemovingResources(
+ ResourceKey K) {
+ return Error::success();
+}
+
+void GDBJITDebugInfoRegistrationPlugin::notifyTransferringResources(
+ ResourceKey DstKey, ResourceKey SrcKey) {}
+
+void GDBJITDebugInfoRegistrationPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, LinkGraph &LG,
+ PassConfiguration &PassConfig) {
+
+ if (LG.getTargetTriple().getObjectFormat() == Triple::MachO)
+ modifyPassConfigForMachO(MR, LG, PassConfig);
+ else {
+ LLVM_DEBUG({
+ dbgs() << "GDBJITDebugInfoRegistrationPlugin skipping unspported graph "
+ << LG.getName() << "(triple = " << LG.getTargetTriple().str()
+ << "\n";
+ });
+ }
+}
+
+void GDBJITDebugInfoRegistrationPlugin::modifyPassConfigForMachO(
+ MaterializationResponsibility &MR, jitlink::LinkGraph &LG,
+ jitlink::PassConfiguration &PassConfig) {
+
+ switch (LG.getTargetTriple().getArch()) {
+ case Triple::x86_64:
+ case Triple::aarch64:
+ // Supported, continue.
+ assert(LG.getPointerSize() == 8 && "Graph has incorrect pointer size");
+ assert(LG.getEndianness() == support::little &&
+ "Graph has incorrect endianness");
+ break;
+ default:
+ // Unsupported.
+ LLVM_DEBUG({
+ dbgs() << "GDBJITDebugInfoRegistrationPlugin skipping unsupported "
+ << "MachO graph " << LG.getName()
+ << "(triple = " << LG.getTargetTriple().str()
+ << ", pointer size = " << LG.getPointerSize() << ", endianness = "
+ << (LG.getEndianness() == support::big ? "big" : "little")
+ << ")\n";
+ });
+ return;
+ }
+
+ // Scan for debug sections. If we find one then install passes.
+ bool HasDebugSections = false;
+ for (auto &Sec : LG.sections())
+ if (MachODebugObjectSynthesizerBase::isDebugSection(Sec)) {
+ HasDebugSections = true;
+ break;
+ }
+
+ if (HasDebugSections) {
+ LLVM_DEBUG({
+ dbgs() << "GDBJITDebugInfoRegistrationPlugin: Graph " << LG.getName()
+ << " contains debug info. Installing debugger support passes.\n";
+ });
+
+ auto MDOS = std::make_shared<MachODebugObjectSynthesizer<MachO64LE>>(
+ LG, RegisterActionAddr);
+ PassConfig.PrePrunePasses.push_back(
+ [=](LinkGraph &G) { return MDOS->preserveDebugSections(); });
+ PassConfig.PostPrunePasses.push_back(
+ [=](LinkGraph &G) { return MDOS->startSynthesis(); });
+ PassConfig.PreFixupPasses.push_back(
+ [=](LinkGraph &G) { return MDOS->completeSynthesisAndRegister(); });
+ } else {
+ LLVM_DEBUG({
+ dbgs() << "GDBJITDebugInfoRegistrationPlugin: Graph " << LG.getName()
+ << " contains no debug info. Skipping.\n";
+ });
+ }
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp
new file mode 100644
index 0000000000..d02760703f
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ELFNixPlatform.cpp
@@ -0,0 +1,828 @@
+//===------ ELFNixPlatform.cpp - Utilities for executing MachO in Orc -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ELFNixPlatform.h"
+
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/ExecutionEngine/JITLink/ELF_x86_64.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::orc::shared;
+
+namespace {
+
+class DSOHandleMaterializationUnit : public MaterializationUnit {
+public:
+ DSOHandleMaterializationUnit(ELFNixPlatform &ENP,
+ const SymbolStringPtr &DSOHandleSymbol)
+ : MaterializationUnit(
+ createDSOHandleSectionInterface(ENP, DSOHandleSymbol)),
+ ENP(ENP) {}
+
+ StringRef getName() const override { return "DSOHandleMU"; }
+
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ unsigned PointerSize;
+ support::endianness Endianness;
+ jitlink::Edge::Kind EdgeKind;
+ const auto &TT =
+ ENP.getExecutionSession().getExecutorProcessControl().getTargetTriple();
+
+ switch (TT.getArch()) {
+ case Triple::x86_64:
+ PointerSize = 8;
+ Endianness = support::endianness::little;
+ EdgeKind = jitlink::x86_64::Pointer64;
+ break;
+ default:
+ llvm_unreachable("Unrecognized architecture");
+ }
+
+ // void *__dso_handle = &__dso_handle;
+ auto G = std::make_unique<jitlink::LinkGraph>(
+ "<DSOHandleMU>", TT, PointerSize, Endianness,
+ jitlink::getGenericEdgeKindName);
+ auto &DSOHandleSection =
+ G->createSection(".data.__dso_handle", jitlink::MemProt::Read);
+ auto &DSOHandleBlock = G->createContentBlock(
+ DSOHandleSection, getDSOHandleContent(PointerSize), orc::ExecutorAddr(),
+ 8, 0);
+ auto &DSOHandleSymbol = G->addDefinedSymbol(
+ DSOHandleBlock, 0, *R->getInitializerSymbol(), DSOHandleBlock.getSize(),
+ jitlink::Linkage::Strong, jitlink::Scope::Default, false, true);
+ DSOHandleBlock.addEdge(EdgeKind, 0, DSOHandleSymbol, 0);
+
+ ENP.getObjectLinkingLayer().emit(std::move(R), std::move(G));
+ }
+
+ void discard(const JITDylib &JD, const SymbolStringPtr &Sym) override {}
+
+private:
+ static MaterializationUnit::Interface
+ createDSOHandleSectionInterface(ELFNixPlatform &ENP,
+ const SymbolStringPtr &DSOHandleSymbol) {
+ SymbolFlagsMap SymbolFlags;
+ SymbolFlags[DSOHandleSymbol] = JITSymbolFlags::Exported;
+ return MaterializationUnit::Interface(std::move(SymbolFlags),
+ DSOHandleSymbol);
+ }
+
+ ArrayRef<char> getDSOHandleContent(size_t PointerSize) {
+ static const char Content[8] = {0};
+ assert(PointerSize <= sizeof Content);
+ return {Content, PointerSize};
+ }
+
+ ELFNixPlatform &ENP;
+};
+
+StringRef EHFrameSectionName = ".eh_frame";
+StringRef InitArrayFuncSectionName = ".init_array";
+
+StringRef ThreadBSSSectionName = ".tbss";
+StringRef ThreadDataSectionName = ".tdata";
+
+StringRef InitSectionNames[] = {InitArrayFuncSectionName};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<ELFNixPlatform>>
+ELFNixPlatform::Create(ExecutionSession &ES,
+ ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD, const char *OrcRuntimePath,
+ Optional<SymbolAliasMap> RuntimeAliases) {
+
+ auto &EPC = ES.getExecutorProcessControl();
+
+ // If the target is not supported then bail out immediately.
+ if (!supportedTarget(EPC.getTargetTriple()))
+ return make_error<StringError>("Unsupported ELFNixPlatform triple: " +
+ EPC.getTargetTriple().str(),
+ inconvertibleErrorCode());
+
+ // Create default aliases if the caller didn't supply any.
+ if (!RuntimeAliases)
+ RuntimeAliases = standardPlatformAliases(ES);
+
+ // Define the aliases.
+ if (auto Err = PlatformJD.define(symbolAliases(std::move(*RuntimeAliases))))
+ return std::move(Err);
+
+ // Add JIT-dispatch function support symbols.
+ if (auto Err = PlatformJD.define(absoluteSymbols(
+ {{ES.intern("__orc_rt_jit_dispatch"),
+ {EPC.getJITDispatchInfo().JITDispatchFunction.getValue(),
+ JITSymbolFlags::Exported}},
+ {ES.intern("__orc_rt_jit_dispatch_ctx"),
+ {EPC.getJITDispatchInfo().JITDispatchContext.getValue(),
+ JITSymbolFlags::Exported}}})))
+ return std::move(Err);
+
+ // Create a generator for the ORC runtime archive.
+ auto OrcRuntimeArchiveGenerator = StaticLibraryDefinitionGenerator::Load(
+ ObjLinkingLayer, OrcRuntimePath, EPC.getTargetTriple());
+ if (!OrcRuntimeArchiveGenerator)
+ return OrcRuntimeArchiveGenerator.takeError();
+
+ // Create the instance.
+ Error Err = Error::success();
+ auto P = std::unique_ptr<ELFNixPlatform>(
+ new ELFNixPlatform(ES, ObjLinkingLayer, PlatformJD,
+ std::move(*OrcRuntimeArchiveGenerator), Err));
+ if (Err)
+ return std::move(Err);
+ return std::move(P);
+}
+
+Error ELFNixPlatform::setupJITDylib(JITDylib &JD) {
+ return JD.define(
+ std::make_unique<DSOHandleMaterializationUnit>(*this, DSOHandleSymbol));
+}
+
+Error ELFNixPlatform::teardownJITDylib(JITDylib &JD) {
+ return Error::success();
+}
+
+Error ELFNixPlatform::notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) {
+ auto &JD = RT.getJITDylib();
+ const auto &InitSym = MU.getInitializerSymbol();
+ if (!InitSym)
+ return Error::success();
+
+ RegisteredInitSymbols[&JD].add(InitSym,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform: Registered init symbol " << *InitSym
+ << " for MU " << MU.getName() << "\n";
+ });
+ return Error::success();
+}
+
+Error ELFNixPlatform::notifyRemoving(ResourceTracker &RT) {
+ llvm_unreachable("Not supported yet");
+}
+
+static void addAliases(ExecutionSession &ES, SymbolAliasMap &Aliases,
+ ArrayRef<std::pair<const char *, const char *>> AL) {
+ for (auto &KV : AL) {
+ auto AliasName = ES.intern(KV.first);
+ assert(!Aliases.count(AliasName) && "Duplicate symbol name in alias map");
+ Aliases[std::move(AliasName)] = {ES.intern(KV.second),
+ JITSymbolFlags::Exported};
+ }
+}
+
+SymbolAliasMap ELFNixPlatform::standardPlatformAliases(ExecutionSession &ES) {
+ SymbolAliasMap Aliases;
+ addAliases(ES, Aliases, requiredCXXAliases());
+ addAliases(ES, Aliases, standardRuntimeUtilityAliases());
+ return Aliases;
+}
+
+ArrayRef<std::pair<const char *, const char *>>
+ELFNixPlatform::requiredCXXAliases() {
+ static const std::pair<const char *, const char *> RequiredCXXAliases[] = {
+ {"__cxa_atexit", "__orc_rt_elfnix_cxa_atexit"},
+ {"atexit", "__orc_rt_elfnix_atexit"}};
+
+ return ArrayRef<std::pair<const char *, const char *>>(RequiredCXXAliases);
+}
+
+ArrayRef<std::pair<const char *, const char *>>
+ELFNixPlatform::standardRuntimeUtilityAliases() {
+ static const std::pair<const char *, const char *>
+ StandardRuntimeUtilityAliases[] = {
+ {"__orc_rt_run_program", "__orc_rt_elfnix_run_program"},
+ {"__orc_rt_log_error", "__orc_rt_log_error_to_stderr"}};
+
+ return ArrayRef<std::pair<const char *, const char *>>(
+ StandardRuntimeUtilityAliases);
+}
+
+bool ELFNixPlatform::isInitializerSection(StringRef SecName) {
+ for (auto &Name : InitSectionNames) {
+ if (Name.equals(SecName))
+ return true;
+ }
+ return false;
+}
+
+bool ELFNixPlatform::supportedTarget(const Triple &TT) {
+ switch (TT.getArch()) {
+ case Triple::x86_64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+ELFNixPlatform::ELFNixPlatform(
+ ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD,
+ std::unique_ptr<DefinitionGenerator> OrcRuntimeGenerator, Error &Err)
+ : ES(ES), ObjLinkingLayer(ObjLinkingLayer),
+ DSOHandleSymbol(ES.intern("__dso_handle")) {
+ ErrorAsOutParameter _(&Err);
+
+ ObjLinkingLayer.addPlugin(std::make_unique<ELFNixPlatformPlugin>(*this));
+
+ PlatformJD.addGenerator(std::move(OrcRuntimeGenerator));
+
+ // PlatformJD hasn't been 'set-up' by the platform yet (since we're creating
+ // the platform now), so set it up.
+ if (auto E2 = setupJITDylib(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ RegisteredInitSymbols[&PlatformJD].add(
+ DSOHandleSymbol, SymbolLookupFlags::WeaklyReferencedSymbol);
+
+ // Associate wrapper function tags with JIT-side function implementations.
+ if (auto E2 = associateRuntimeSupportFunctions(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ // Lookup addresses of runtime functions callable by the platform,
+ // call the platform bootstrap function to initialize the platform-state
+ // object in the executor.
+ if (auto E2 = bootstrapELFNixRuntime(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+}
+
+Error ELFNixPlatform::associateRuntimeSupportFunctions(JITDylib &PlatformJD) {
+ ExecutionSession::JITDispatchHandlerAssociationMap WFs;
+
+ using GetInitializersSPSSig =
+ SPSExpected<SPSELFNixJITDylibInitializerSequence>(SPSString);
+ WFs[ES.intern("__orc_rt_elfnix_get_initializers_tag")] =
+ ES.wrapAsyncWithSPS<GetInitializersSPSSig>(
+ this, &ELFNixPlatform::rt_getInitializers);
+
+ using GetDeinitializersSPSSig =
+ SPSExpected<SPSELFJITDylibDeinitializerSequence>(SPSExecutorAddr);
+ WFs[ES.intern("__orc_rt_elfnix_get_deinitializers_tag")] =
+ ES.wrapAsyncWithSPS<GetDeinitializersSPSSig>(
+ this, &ELFNixPlatform::rt_getDeinitializers);
+
+ using LookupSymbolSPSSig =
+ SPSExpected<SPSExecutorAddr>(SPSExecutorAddr, SPSString);
+ WFs[ES.intern("__orc_rt_elfnix_symbol_lookup_tag")] =
+ ES.wrapAsyncWithSPS<LookupSymbolSPSSig>(this,
+ &ELFNixPlatform::rt_lookupSymbol);
+
+ return ES.registerJITDispatchHandlers(PlatformJD, std::move(WFs));
+}
+
+void ELFNixPlatform::getInitializersBuildSequencePhase(
+ SendInitializerSequenceFn SendResult, JITDylib &JD,
+ std::vector<JITDylibSP> DFSLinkOrder) {
+ ELFNixJITDylibInitializerSequence FullInitSeq;
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ for (auto &InitJD : reverse(DFSLinkOrder)) {
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform: Appending inits for \"" << InitJD->getName()
+ << "\" to sequence\n";
+ });
+ auto ISItr = InitSeqs.find(InitJD.get());
+ if (ISItr != InitSeqs.end()) {
+ FullInitSeq.emplace_back(std::move(ISItr->second));
+ InitSeqs.erase(ISItr);
+ }
+ }
+ }
+
+ SendResult(std::move(FullInitSeq));
+}
+
+void ELFNixPlatform::getInitializersLookupPhase(
+ SendInitializerSequenceFn SendResult, JITDylib &JD) {
+
+ auto DFSLinkOrder = JD.getDFSLinkOrder();
+ if (!DFSLinkOrder) {
+ SendResult(DFSLinkOrder.takeError());
+ return;
+ }
+
+ DenseMap<JITDylib *, SymbolLookupSet> NewInitSymbols;
+ ES.runSessionLocked([&]() {
+ for (auto &InitJD : *DFSLinkOrder) {
+ auto RISItr = RegisteredInitSymbols.find(InitJD.get());
+ if (RISItr != RegisteredInitSymbols.end()) {
+ NewInitSymbols[InitJD.get()] = std::move(RISItr->second);
+ RegisteredInitSymbols.erase(RISItr);
+ }
+ }
+ });
+
+ // If there are no further init symbols to look up then move on to the next
+ // phase.
+ if (NewInitSymbols.empty()) {
+ getInitializersBuildSequencePhase(std::move(SendResult), JD,
+ std::move(*DFSLinkOrder));
+ return;
+ }
+
+ // Otherwise issue a lookup and re-run this phase when it completes.
+ lookupInitSymbolsAsync(
+ [this, SendResult = std::move(SendResult), &JD](Error Err) mutable {
+ if (Err)
+ SendResult(std::move(Err));
+ else
+ getInitializersLookupPhase(std::move(SendResult), JD);
+ },
+ ES, std::move(NewInitSymbols));
+}
+
+void ELFNixPlatform::rt_getInitializers(SendInitializerSequenceFn SendResult,
+ StringRef JDName) {
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform::rt_getInitializers(\"" << JDName << "\")\n";
+ });
+
+ JITDylib *JD = ES.getJITDylibByName(JDName);
+ if (!JD) {
+ LLVM_DEBUG({
+ dbgs() << " No such JITDylib \"" << JDName << "\". Sending error.\n";
+ });
+ SendResult(make_error<StringError>("No JITDylib named " + JDName,
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ getInitializersLookupPhase(std::move(SendResult), *JD);
+}
+
+void ELFNixPlatform::rt_getDeinitializers(
+ SendDeinitializerSequenceFn SendResult, ExecutorAddr Handle) {
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform::rt_getDeinitializers(\""
+ << formatv("{0:x}", Handle.getValue()) << "\")\n";
+ });
+
+ JITDylib *JD = nullptr;
+
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = HandleAddrToJITDylib.find(Handle);
+ if (I != HandleAddrToJITDylib.end())
+ JD = I->second;
+ }
+
+ if (!JD) {
+ LLVM_DEBUG({
+ dbgs() << " No JITDylib for handle "
+ << formatv("{0:x}", Handle.getValue()) << "\n";
+ });
+ SendResult(make_error<StringError>("No JITDylib associated with handle " +
+ formatv("{0:x}", Handle.getValue()),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ SendResult(ELFNixJITDylibDeinitializerSequence());
+}
+
+void ELFNixPlatform::rt_lookupSymbol(SendSymbolAddressFn SendResult,
+ ExecutorAddr Handle,
+ StringRef SymbolName) {
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform::rt_lookupSymbol(\""
+ << formatv("{0:x}", Handle.getValue()) << "\")\n";
+ });
+
+ JITDylib *JD = nullptr;
+
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = HandleAddrToJITDylib.find(Handle);
+ if (I != HandleAddrToJITDylib.end())
+ JD = I->second;
+ }
+
+ if (!JD) {
+ LLVM_DEBUG({
+ dbgs() << " No JITDylib for handle "
+ << formatv("{0:x}", Handle.getValue()) << "\n";
+ });
+ SendResult(make_error<StringError>("No JITDylib associated with handle " +
+ formatv("{0:x}", Handle.getValue()),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ // Use functor class to work around XL build compiler issue on AIX.
+ class RtLookupNotifyComplete {
+ public:
+ RtLookupNotifyComplete(SendSymbolAddressFn &&SendResult)
+ : SendResult(std::move(SendResult)) {}
+ void operator()(Expected<SymbolMap> Result) {
+ if (Result) {
+ assert(Result->size() == 1 && "Unexpected result map count");
+ SendResult(ExecutorAddr(Result->begin()->second.getAddress()));
+ } else {
+ SendResult(Result.takeError());
+ }
+ }
+
+ private:
+ SendSymbolAddressFn SendResult;
+ };
+
+ ES.lookup(
+ LookupKind::DLSym, {{JD, JITDylibLookupFlags::MatchExportedSymbolsOnly}},
+ SymbolLookupSet(ES.intern(SymbolName)), SymbolState::Ready,
+ RtLookupNotifyComplete(std::move(SendResult)), NoDependenciesToRegister);
+}
+
+Error ELFNixPlatform::bootstrapELFNixRuntime(JITDylib &PlatformJD) {
+
+ std::pair<const char *, ExecutorAddr *> Symbols[] = {
+ {"__orc_rt_elfnix_platform_bootstrap", &orc_rt_elfnix_platform_bootstrap},
+ {"__orc_rt_elfnix_platform_shutdown", &orc_rt_elfnix_platform_shutdown},
+ {"__orc_rt_elfnix_register_object_sections",
+ &orc_rt_elfnix_register_object_sections},
+ {"__orc_rt_elfnix_create_pthread_key",
+ &orc_rt_elfnix_create_pthread_key}};
+
+ SymbolLookupSet RuntimeSymbols;
+ std::vector<std::pair<SymbolStringPtr, ExecutorAddr *>> AddrsToRecord;
+ for (const auto &KV : Symbols) {
+ auto Name = ES.intern(KV.first);
+ RuntimeSymbols.add(Name);
+ AddrsToRecord.push_back({std::move(Name), KV.second});
+ }
+
+ auto RuntimeSymbolAddrs = ES.lookup(
+ {{&PlatformJD, JITDylibLookupFlags::MatchAllSymbols}}, RuntimeSymbols);
+ if (!RuntimeSymbolAddrs)
+ return RuntimeSymbolAddrs.takeError();
+
+ for (const auto &KV : AddrsToRecord) {
+ auto &Name = KV.first;
+ assert(RuntimeSymbolAddrs->count(Name) && "Missing runtime symbol?");
+ KV.second->setValue((*RuntimeSymbolAddrs)[Name].getAddress());
+ }
+
+ auto PJDDSOHandle = ES.lookup(
+ {{&PlatformJD, JITDylibLookupFlags::MatchAllSymbols}}, DSOHandleSymbol);
+ if (!PJDDSOHandle)
+ return PJDDSOHandle.takeError();
+
+ if (auto Err = ES.callSPSWrapper<void(uint64_t)>(
+ orc_rt_elfnix_platform_bootstrap, PJDDSOHandle->getAddress()))
+ return Err;
+
+ // FIXME: Ordering is fuzzy here. We're probably best off saying
+ // "behavior is undefined if code that uses the runtime is added before
+ // the platform constructor returns", then move all this to the constructor.
+ RuntimeBootstrapped = true;
+ std::vector<ELFPerObjectSectionsToRegister> DeferredPOSRs;
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ DeferredPOSRs = std::move(BootstrapPOSRs);
+ }
+
+ for (auto &D : DeferredPOSRs)
+ if (auto Err = registerPerObjectSections(D))
+ return Err;
+
+ return Error::success();
+}
+
+Error ELFNixPlatform::registerInitInfo(
+ JITDylib &JD, ArrayRef<jitlink::Section *> InitSections) {
+
+ std::unique_lock<std::mutex> Lock(PlatformMutex);
+
+ ELFNixJITDylibInitializers *InitSeq = nullptr;
+ {
+ auto I = InitSeqs.find(&JD);
+ if (I == InitSeqs.end()) {
+ // If there's no init sequence entry yet then we need to look up the
+ // header symbol to force creation of one.
+ Lock.unlock();
+
+ auto SearchOrder =
+ JD.withLinkOrderDo([](const JITDylibSearchOrder &SO) { return SO; });
+ if (auto Err = ES.lookup(SearchOrder, DSOHandleSymbol).takeError())
+ return Err;
+
+ Lock.lock();
+ I = InitSeqs.find(&JD);
+ assert(I != InitSeqs.end() &&
+ "Entry missing after header symbol lookup?");
+ }
+ InitSeq = &I->second;
+ }
+
+ for (auto *Sec : InitSections) {
+ // FIXME: Avoid copy here.
+ jitlink::SectionRange R(*Sec);
+ InitSeq->InitSections[Sec->getName()].push_back(
+ {ExecutorAddr(R.getStart()), ExecutorAddr(R.getEnd())});
+ }
+
+ return Error::success();
+}
+
+Error ELFNixPlatform::registerPerObjectSections(
+ const ELFPerObjectSectionsToRegister &POSR) {
+
+ if (!orc_rt_elfnix_register_object_sections)
+ return make_error<StringError>("Attempting to register per-object "
+ "sections, but runtime support has not "
+ "been loaded yet",
+ inconvertibleErrorCode());
+
+ Error ErrResult = Error::success();
+ if (auto Err = ES.callSPSWrapper<shared::SPSError(
+ SPSELFPerObjectSectionsToRegister)>(
+ orc_rt_elfnix_register_object_sections, ErrResult, POSR))
+ return Err;
+ return ErrResult;
+}
+
+Expected<uint64_t> ELFNixPlatform::createPThreadKey() {
+ if (!orc_rt_elfnix_create_pthread_key)
+ return make_error<StringError>(
+ "Attempting to create pthread key in target, but runtime support has "
+ "not been loaded yet",
+ inconvertibleErrorCode());
+
+ Expected<uint64_t> Result(0);
+ if (auto Err = ES.callSPSWrapper<SPSExpected<uint64_t>(void)>(
+ orc_rt_elfnix_create_pthread_key, Result))
+ return std::move(Err);
+ return Result;
+}
+
+void ELFNixPlatform::ELFNixPlatformPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, jitlink::LinkGraph &LG,
+ jitlink::PassConfiguration &Config) {
+
+ // If the initializer symbol is the __dso_handle symbol then just add
+ // the DSO handle support passes.
+ if (MR.getInitializerSymbol() == MP.DSOHandleSymbol) {
+ addDSOHandleSupportPasses(MR, Config);
+ // The DSOHandle materialization unit doesn't require any other
+ // support, so we can bail out early.
+ return;
+ }
+
+ // If the object contains initializers then add passes to record them.
+ if (MR.getInitializerSymbol())
+ addInitializerSupportPasses(MR, Config);
+
+ // Add passes for eh-frame and TLV support.
+ addEHAndTLVSupportPasses(MR, Config);
+}
+
+ObjectLinkingLayer::Plugin::SyntheticSymbolDependenciesMap
+ELFNixPlatform::ELFNixPlatformPlugin::getSyntheticSymbolDependencies(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ auto I = InitSymbolDeps.find(&MR);
+ if (I != InitSymbolDeps.end()) {
+ SyntheticSymbolDependenciesMap Result;
+ Result[MR.getInitializerSymbol()] = std::move(I->second);
+ InitSymbolDeps.erase(&MR);
+ return Result;
+ }
+ return SyntheticSymbolDependenciesMap();
+}
+
+void ELFNixPlatform::ELFNixPlatformPlugin::addInitializerSupportPasses(
+ MaterializationResponsibility &MR, jitlink::PassConfiguration &Config) {
+
+ /// Preserve init sections.
+ Config.PrePrunePasses.push_back([this, &MR](jitlink::LinkGraph &G) -> Error {
+ if (auto Err = preserveInitSections(G, MR))
+ return Err;
+ return Error::success();
+ });
+
+ Config.PostFixupPasses.push_back(
+ [this, &JD = MR.getTargetJITDylib()](jitlink::LinkGraph &G) {
+ return registerInitSections(G, JD);
+ });
+}
+
+void ELFNixPlatform::ELFNixPlatformPlugin::addDSOHandleSupportPasses(
+ MaterializationResponsibility &MR, jitlink::PassConfiguration &Config) {
+
+ Config.PostAllocationPasses.push_back([this, &JD = MR.getTargetJITDylib()](
+ jitlink::LinkGraph &G) -> Error {
+ auto I = llvm::find_if(G.defined_symbols(), [this](jitlink::Symbol *Sym) {
+ return Sym->getName() == *MP.DSOHandleSymbol;
+ });
+ assert(I != G.defined_symbols().end() && "Missing DSO handle symbol");
+ {
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ auto HandleAddr = (*I)->getAddress();
+ MP.HandleAddrToJITDylib[HandleAddr] = &JD;
+ assert(!MP.InitSeqs.count(&JD) && "InitSeq entry for JD already exists");
+ MP.InitSeqs.insert(std::make_pair(
+ &JD, ELFNixJITDylibInitializers(JD.getName(), HandleAddr)));
+ }
+ return Error::success();
+ });
+}
+
+void ELFNixPlatform::ELFNixPlatformPlugin::addEHAndTLVSupportPasses(
+ MaterializationResponsibility &MR, jitlink::PassConfiguration &Config) {
+
+ // Insert TLV lowering at the start of the PostPrunePasses, since we want
+ // it to run before GOT/PLT lowering.
+
+ // TODO: Check that before the fixTLVSectionsAndEdges pass, the GOT/PLT build
+ // pass has done. Because the TLS descriptor need to be allocate in GOT.
+ Config.PostPrunePasses.push_back(
+ [this, &JD = MR.getTargetJITDylib()](jitlink::LinkGraph &G) {
+ return fixTLVSectionsAndEdges(G, JD);
+ });
+
+ // Add a pass to register the final addresses of the eh-frame and TLV sections
+ // with the runtime.
+ Config.PostFixupPasses.push_back([this](jitlink::LinkGraph &G) -> Error {
+ ELFPerObjectSectionsToRegister POSR;
+
+ if (auto *EHFrameSection = G.findSectionByName(EHFrameSectionName)) {
+ jitlink::SectionRange R(*EHFrameSection);
+ if (!R.empty())
+ POSR.EHFrameSection = {ExecutorAddr(R.getStart()),
+ ExecutorAddr(R.getEnd())};
+ }
+
+ // Get a pointer to the thread data section if there is one. It will be used
+ // below.
+ jitlink::Section *ThreadDataSection =
+ G.findSectionByName(ThreadDataSectionName);
+
+ // Handle thread BSS section if there is one.
+ if (auto *ThreadBSSSection = G.findSectionByName(ThreadBSSSectionName)) {
+ // If there's already a thread data section in this graph then merge the
+ // thread BSS section content into it, otherwise just treat the thread
+ // BSS section as the thread data section.
+ if (ThreadDataSection)
+ G.mergeSections(*ThreadDataSection, *ThreadBSSSection);
+ else
+ ThreadDataSection = ThreadBSSSection;
+ }
+
+ // Having merged thread BSS (if present) and thread data (if present),
+ // record the resulting section range.
+ if (ThreadDataSection) {
+ jitlink::SectionRange R(*ThreadDataSection);
+ if (!R.empty())
+ POSR.ThreadDataSection = {ExecutorAddr(R.getStart()),
+ ExecutorAddr(R.getEnd())};
+ }
+
+ if (POSR.EHFrameSection.Start || POSR.ThreadDataSection.Start) {
+
+ // If we're still bootstrapping the runtime then just record this
+ // frame for now.
+ if (!MP.RuntimeBootstrapped) {
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ MP.BootstrapPOSRs.push_back(POSR);
+ return Error::success();
+ }
+
+ // Otherwise register it immediately.
+ if (auto Err = MP.registerPerObjectSections(POSR))
+ return Err;
+ }
+
+ return Error::success();
+ });
+}
+
+Error ELFNixPlatform::ELFNixPlatformPlugin::preserveInitSections(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR) {
+
+ JITLinkSymbolSet InitSectionSymbols;
+ for (auto &InitSectionName : InitSectionNames) {
+ // Skip non-init sections.
+ auto *InitSection = G.findSectionByName(InitSectionName);
+ if (!InitSection)
+ continue;
+
+ // Make a pass over live symbols in the section: those blocks are already
+ // preserved.
+ DenseSet<jitlink::Block *> AlreadyLiveBlocks;
+ for (auto &Sym : InitSection->symbols()) {
+ auto &B = Sym->getBlock();
+ if (Sym->isLive() && Sym->getOffset() == 0 &&
+ Sym->getSize() == B.getSize() && !AlreadyLiveBlocks.count(&B)) {
+ InitSectionSymbols.insert(Sym);
+ AlreadyLiveBlocks.insert(&B);
+ }
+ }
+
+ // Add anonymous symbols to preserve any not-already-preserved blocks.
+ for (auto *B : InitSection->blocks())
+ if (!AlreadyLiveBlocks.count(B))
+ InitSectionSymbols.insert(
+ &G.addAnonymousSymbol(*B, 0, B->getSize(), false, true));
+ }
+
+ if (!InitSectionSymbols.empty()) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ InitSymbolDeps[&MR] = std::move(InitSectionSymbols);
+ }
+
+ return Error::success();
+}
+
+Error ELFNixPlatform::ELFNixPlatformPlugin::registerInitSections(
+ jitlink::LinkGraph &G, JITDylib &JD) {
+
+ SmallVector<jitlink::Section *> InitSections;
+
+ LLVM_DEBUG({ dbgs() << "ELFNixPlatform::registerInitSections\n"; });
+
+ for (auto InitSectionName : InitSectionNames) {
+ if (auto *Sec = G.findSectionByName(InitSectionName)) {
+ InitSections.push_back(Sec);
+ }
+ }
+
+ // Dump the scraped inits.
+ LLVM_DEBUG({
+ dbgs() << "ELFNixPlatform: Scraped " << G.getName() << " init sections:\n";
+ for (auto *Sec : InitSections) {
+ jitlink::SectionRange R(*Sec);
+ dbgs() << " " << Sec->getName() << ": "
+ << formatv("[ {0:x} -- {1:x} ]", R.getStart(), R.getEnd()) << "\n";
+ }
+ });
+
+ return MP.registerInitInfo(JD, InitSections);
+}
+
+Error ELFNixPlatform::ELFNixPlatformPlugin::fixTLVSectionsAndEdges(
+ jitlink::LinkGraph &G, JITDylib &JD) {
+
+ // TODO implement TLV support
+ for (auto *Sym : G.external_symbols())
+ if (Sym->getName() == "__tls_get_addr") {
+ Sym->setName("___orc_rt_elfnix_tls_get_addr");
+ }
+
+ auto *TLSInfoEntrySection = G.findSectionByName("$__TLSINFO");
+
+ if (TLSInfoEntrySection) {
+ Optional<uint64_t> Key;
+ {
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ auto I = MP.JITDylibToPThreadKey.find(&JD);
+ if (I != MP.JITDylibToPThreadKey.end())
+ Key = I->second;
+ }
+ if (!Key) {
+ if (auto KeyOrErr = MP.createPThreadKey())
+ Key = *KeyOrErr;
+ else
+ return KeyOrErr.takeError();
+ }
+
+ uint64_t PlatformKeyBits =
+ support::endian::byte_swap(*Key, G.getEndianness());
+
+ for (auto *B : TLSInfoEntrySection->blocks()) {
+ // FIXME: The TLS descriptor byte length may different with different
+ // ISA
+ assert(B->getSize() == (G.getPointerSize() * 2) &&
+ "TLS descriptor must be 2 words length");
+ auto TLSInfoEntryContent = B->getMutableContent(G);
+ memcpy(TLSInfoEntryContent.data(), &PlatformKeyBits, G.getPointerSize());
+ }
+ }
+
+ return Error::success();
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp
new file mode 100644
index 0000000000..f3fe0555fa
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCDebugObjectRegistrar.cpp
@@ -0,0 +1,52 @@
+//===----- EPCDebugObjectRegistrar.cpp - EPC-based debug registration -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCDebugObjectRegistrar.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/Shared/SimplePackedSerialization.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<EPCDebugObjectRegistrar>>
+createJITLoaderGDBRegistrar(ExecutionSession &ES) {
+ auto &EPC = ES.getExecutorProcessControl();
+ auto ProcessHandle = EPC.loadDylib(nullptr);
+ if (!ProcessHandle)
+ return ProcessHandle.takeError();
+
+ SymbolStringPtr RegisterFn =
+ EPC.getTargetTriple().isOSBinFormatMachO()
+ ? EPC.intern("_llvm_orc_registerJITLoaderGDBWrapper")
+ : EPC.intern("llvm_orc_registerJITLoaderGDBWrapper");
+
+ SymbolLookupSet RegistrationSymbols;
+ RegistrationSymbols.add(RegisterFn);
+
+ auto Result = EPC.lookupSymbols({{*ProcessHandle, RegistrationSymbols}});
+ if (!Result)
+ return Result.takeError();
+
+ assert(Result->size() == 1 && "Unexpected number of dylibs in result");
+ assert((*Result)[0].size() == 1 &&
+ "Unexpected number of addresses in result");
+
+ return std::make_unique<EPCDebugObjectRegistrar>(
+ ES, ExecutorAddr((*Result)[0][0]));
+}
+
+Error EPCDebugObjectRegistrar::registerDebugObject(
+ ExecutorAddrRange TargetMem) {
+ return ES.callSPSWrapper<void(SPSExecutorAddrRange)>(RegisterFn, TargetMem);
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp
new file mode 100644
index 0000000000..ba154aaecd
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.cpp
@@ -0,0 +1,70 @@
+//===---------------- EPCDynamicLibrarySearchGenerator.cpp ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCDynamicLibrarySearchGenerator.h"
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<EPCDynamicLibrarySearchGenerator>>
+EPCDynamicLibrarySearchGenerator::Load(ExecutionSession &ES,
+ const char *LibraryPath,
+ SymbolPredicate Allow) {
+ auto Handle = ES.getExecutorProcessControl().loadDylib(LibraryPath);
+ if (!Handle)
+ return Handle.takeError();
+
+ return std::make_unique<EPCDynamicLibrarySearchGenerator>(ES, *Handle,
+ std::move(Allow));
+}
+
+Error EPCDynamicLibrarySearchGenerator::tryToGenerate(
+ LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags, const SymbolLookupSet &Symbols) {
+
+ if (Symbols.empty())
+ return Error::success();
+
+ SymbolLookupSet LookupSymbols;
+
+ for (auto &KV : Symbols) {
+ // Skip symbols that don't match the filter.
+ if (Allow && !Allow(KV.first))
+ continue;
+ LookupSymbols.add(KV.first, SymbolLookupFlags::WeaklyReferencedSymbol);
+ }
+
+ SymbolMap NewSymbols;
+
+ ExecutorProcessControl::LookupRequest Request(H, LookupSymbols);
+ auto Result = EPC.lookupSymbols(Request);
+ if (!Result)
+ return Result.takeError();
+
+ assert(Result->size() == 1 && "Results for more than one library returned");
+ assert(Result->front().size() == LookupSymbols.size() &&
+ "Result has incorrect number of elements");
+
+ auto ResultI = Result->front().begin();
+ for (auto &KV : LookupSymbols) {
+ if (*ResultI)
+ NewSymbols[KV.first] =
+ JITEvaluatedSymbol(*ResultI, JITSymbolFlags::Exported);
+ ++ResultI;
+ }
+
+ // If there were no resolved symbols bail out.
+ if (NewSymbols.empty())
+ return Error::success();
+
+ // Define resolved symbols.
+ return JD.define(absoluteSymbols(std::move(NewSymbols)));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp
new file mode 100644
index 0000000000..256ce94690
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCEHFrameRegistrar.cpp
@@ -0,0 +1,71 @@
+//===------ EPCEHFrameRegistrar.cpp - EPC-based eh-frame registration -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCEHFrameRegistrar.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<EPCEHFrameRegistrar>>
+EPCEHFrameRegistrar::Create(ExecutionSession &ES) {
+ // FIXME: Proper mangling here -- we really need to decouple linker mangling
+ // from DataLayout.
+
+ // Find the addresses of the registration/deregistration functions in the
+ // executor process.
+ auto &EPC = ES.getExecutorProcessControl();
+ auto ProcessHandle = EPC.loadDylib(nullptr);
+ if (!ProcessHandle)
+ return ProcessHandle.takeError();
+
+ std::string RegisterWrapperName, DeregisterWrapperName;
+ if (EPC.getTargetTriple().isOSBinFormatMachO()) {
+ RegisterWrapperName += '_';
+ DeregisterWrapperName += '_';
+ }
+ RegisterWrapperName += "llvm_orc_registerEHFrameSectionWrapper";
+ DeregisterWrapperName += "llvm_orc_deregisterEHFrameSectionWrapper";
+
+ SymbolLookupSet RegistrationSymbols;
+ RegistrationSymbols.add(EPC.intern(RegisterWrapperName));
+ RegistrationSymbols.add(EPC.intern(DeregisterWrapperName));
+
+ auto Result = EPC.lookupSymbols({{*ProcessHandle, RegistrationSymbols}});
+ if (!Result)
+ return Result.takeError();
+
+ assert(Result->size() == 1 && "Unexpected number of dylibs in result");
+ assert((*Result)[0].size() == 2 &&
+ "Unexpected number of addresses in result");
+
+ auto RegisterEHFrameWrapperFnAddr = (*Result)[0][0];
+ auto DeregisterEHFrameWrapperFnAddr = (*Result)[0][1];
+
+ return std::make_unique<EPCEHFrameRegistrar>(
+ ES, ExecutorAddr(RegisterEHFrameWrapperFnAddr),
+ ExecutorAddr(DeregisterEHFrameWrapperFnAddr));
+}
+
+Error EPCEHFrameRegistrar::registerEHFrames(ExecutorAddrRange EHFrameSection) {
+ return ES.callSPSWrapper<void(SPSExecutorAddrRange)>(
+ RegisterEHFrameWrapperFnAddr, EHFrameSection);
+}
+
+Error EPCEHFrameRegistrar::deregisterEHFrames(
+ ExecutorAddrRange EHFrameSection) {
+ return ES.callSPSWrapper<void(SPSExecutorAddrRange)>(
+ DeregisterEHFrameWrapperFnAddr, EHFrameSection);
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp
new file mode 100644
index 0000000000..6c47c5c5f7
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericDylibManager.cpp
@@ -0,0 +1,107 @@
+//===------- EPCGenericDylibManager.cpp -- Dylib management via EPC -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCGenericDylibManager.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h"
+
+namespace llvm {
+namespace orc {
+namespace shared {
+
+template <>
+class SPSSerializationTraits<SPSRemoteSymbolLookupSetElement,
+ SymbolLookupSet::value_type> {
+public:
+ static size_t size(const SymbolLookupSet::value_type &V) {
+ return SPSArgList<SPSString, bool>::size(
+ *V.first, V.second == SymbolLookupFlags::RequiredSymbol);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const SymbolLookupSet::value_type &V) {
+ return SPSArgList<SPSString, bool>::serialize(
+ OB, *V.first, V.second == SymbolLookupFlags::RequiredSymbol);
+ }
+};
+
+template <>
+class TrivialSPSSequenceSerialization<SPSRemoteSymbolLookupSetElement,
+ SymbolLookupSet> {
+public:
+ static constexpr bool available = true;
+};
+
+template <>
+class SPSSerializationTraits<SPSRemoteSymbolLookup,
+ ExecutorProcessControl::LookupRequest> {
+ using MemberSerialization =
+ SPSArgList<SPSExecutorAddr, SPSRemoteSymbolLookupSet>;
+
+public:
+ static size_t size(const ExecutorProcessControl::LookupRequest &LR) {
+ return MemberSerialization::size(ExecutorAddr(LR.Handle), LR.Symbols);
+ }
+
+ static bool serialize(SPSOutputBuffer &OB,
+ const ExecutorProcessControl::LookupRequest &LR) {
+ return MemberSerialization::serialize(OB, ExecutorAddr(LR.Handle),
+ LR.Symbols);
+ }
+};
+
+} // end namespace shared
+
+Expected<EPCGenericDylibManager>
+EPCGenericDylibManager::CreateWithDefaultBootstrapSymbols(
+ ExecutorProcessControl &EPC) {
+ SymbolAddrs SAs;
+ if (auto Err = EPC.getBootstrapSymbols(
+ {{SAs.Instance, rt::SimpleExecutorDylibManagerInstanceName},
+ {SAs.Open, rt::SimpleExecutorDylibManagerOpenWrapperName},
+ {SAs.Lookup, rt::SimpleExecutorDylibManagerLookupWrapperName}}))
+ return std::move(Err);
+ return EPCGenericDylibManager(EPC, std::move(SAs));
+}
+
+Expected<tpctypes::DylibHandle> EPCGenericDylibManager::open(StringRef Path,
+ uint64_t Mode) {
+ Expected<tpctypes::DylibHandle> H(0);
+ if (auto Err =
+ EPC.callSPSWrapper<rt::SPSSimpleExecutorDylibManagerOpenSignature>(
+ SAs.Open, H, SAs.Instance, Path, Mode))
+ return std::move(Err);
+ return H;
+}
+
+Expected<std::vector<ExecutorAddr>>
+EPCGenericDylibManager::lookup(tpctypes::DylibHandle H,
+ const SymbolLookupSet &Lookup) {
+ Expected<std::vector<ExecutorAddr>> Result((std::vector<ExecutorAddr>()));
+ if (auto Err =
+ EPC.callSPSWrapper<rt::SPSSimpleExecutorDylibManagerLookupSignature>(
+ SAs.Lookup, Result, SAs.Instance, H, Lookup))
+ return std::move(Err);
+ return Result;
+}
+
+Expected<std::vector<ExecutorAddr>>
+EPCGenericDylibManager::lookup(tpctypes::DylibHandle H,
+ const RemoteSymbolLookupSet &Lookup) {
+ Expected<std::vector<ExecutorAddr>> Result((std::vector<ExecutorAddr>()));
+ if (auto Err =
+ EPC.callSPSWrapper<rt::SPSSimpleExecutorDylibManagerLookupSignature>(
+ SAs.Lookup, Result, SAs.Instance, H, Lookup))
+ return std::move(Err);
+ return Result;
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp
new file mode 100644
index 0000000000..75cc30753f
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.cpp
@@ -0,0 +1,174 @@
+//===---- EPCGenericJITLinkMemoryManager.cpp -- Mem management via EPC ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h"
+
+#include "llvm/ExecutionEngine/JITLink/JITLink.h"
+#include "llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+
+#include <limits>
+
+using namespace llvm::jitlink;
+
+namespace llvm {
+namespace orc {
+
+class EPCGenericJITLinkMemoryManager::InFlightAlloc
+ : public jitlink::JITLinkMemoryManager::InFlightAlloc {
+public:
+
+ // FIXME: The C++98 initializer is an attempt to work around compile failures
+ // due to http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#1397.
+ // We should be able to switch this back to member initialization once that
+ // issue is fixed.
+ struct SegInfo {
+ SegInfo() : WorkingMem(nullptr), ContentSize(0), ZeroFillSize(0) {}
+
+ char *WorkingMem;
+ ExecutorAddr Addr;
+ uint64_t ContentSize;
+ uint64_t ZeroFillSize;
+ };
+
+ using SegInfoMap = AllocGroupSmallMap<SegInfo>;
+
+ InFlightAlloc(EPCGenericJITLinkMemoryManager &Parent, LinkGraph &G,
+ ExecutorAddr AllocAddr, SegInfoMap Segs)
+ : Parent(Parent), G(G), AllocAddr(AllocAddr), Segs(std::move(Segs)) {}
+
+ void finalize(OnFinalizedFunction OnFinalize) override {
+ tpctypes::FinalizeRequest FR;
+ for (auto &KV : Segs) {
+ assert(KV.second.ContentSize <= std::numeric_limits<size_t>::max());
+ FR.Segments.push_back(tpctypes::SegFinalizeRequest{
+ tpctypes::toWireProtectionFlags(
+ toSysMemoryProtectionFlags(KV.first.getMemProt())),
+ KV.second.Addr,
+ alignTo(KV.second.ContentSize + KV.second.ZeroFillSize,
+ Parent.EPC.getPageSize()),
+ {KV.second.WorkingMem, static_cast<size_t>(KV.second.ContentSize)}});
+ }
+
+ // Transfer allocation actions.
+ std::swap(FR.Actions, G.allocActions());
+
+ Parent.EPC.callSPSWrapperAsync<
+ rt::SPSSimpleExecutorMemoryManagerFinalizeSignature>(
+ Parent.SAs.Finalize,
+ [OnFinalize = std::move(OnFinalize), AllocAddr = this->AllocAddr](
+ Error SerializationErr, Error FinalizeErr) mutable {
+ // FIXME: Release abandoned alloc.
+ if (SerializationErr) {
+ cantFail(std::move(FinalizeErr));
+ OnFinalize(std::move(SerializationErr));
+ } else if (FinalizeErr)
+ OnFinalize(std::move(FinalizeErr));
+ else
+ OnFinalize(FinalizedAlloc(AllocAddr));
+ },
+ Parent.SAs.Allocator, std::move(FR));
+ }
+
+ void abandon(OnAbandonedFunction OnAbandoned) override {
+ // FIXME: Return memory to pool instead.
+ Parent.EPC.callSPSWrapperAsync<
+ rt::SPSSimpleExecutorMemoryManagerDeallocateSignature>(
+ Parent.SAs.Deallocate,
+ [OnAbandoned = std::move(OnAbandoned)](Error SerializationErr,
+ Error DeallocateErr) mutable {
+ if (SerializationErr) {
+ cantFail(std::move(DeallocateErr));
+ OnAbandoned(std::move(SerializationErr));
+ } else
+ OnAbandoned(std::move(DeallocateErr));
+ },
+ Parent.SAs.Allocator, ArrayRef<ExecutorAddr>(AllocAddr));
+ }
+
+private:
+ EPCGenericJITLinkMemoryManager &Parent;
+ LinkGraph &G;
+ ExecutorAddr AllocAddr;
+ SegInfoMap Segs;
+};
+
+void EPCGenericJITLinkMemoryManager::allocate(const JITLinkDylib *JD,
+ LinkGraph &G,
+ OnAllocatedFunction OnAllocated) {
+ BasicLayout BL(G);
+
+ auto Pages = BL.getContiguousPageBasedLayoutSizes(EPC.getPageSize());
+ if (!Pages)
+ return OnAllocated(Pages.takeError());
+
+ EPC.callSPSWrapperAsync<rt::SPSSimpleExecutorMemoryManagerReserveSignature>(
+ SAs.Reserve,
+ [this, BL = std::move(BL), OnAllocated = std::move(OnAllocated)](
+ Error SerializationErr, Expected<ExecutorAddr> AllocAddr) mutable {
+ if (SerializationErr) {
+ cantFail(AllocAddr.takeError());
+ return OnAllocated(std::move(SerializationErr));
+ }
+ if (!AllocAddr)
+ return OnAllocated(AllocAddr.takeError());
+
+ completeAllocation(*AllocAddr, std::move(BL), std::move(OnAllocated));
+ },
+ SAs.Allocator, Pages->total());
+}
+
+void EPCGenericJITLinkMemoryManager::deallocate(
+ std::vector<FinalizedAlloc> Allocs, OnDeallocatedFunction OnDeallocated) {
+ EPC.callSPSWrapperAsync<
+ rt::SPSSimpleExecutorMemoryManagerDeallocateSignature>(
+ SAs.Deallocate,
+ [OnDeallocated = std::move(OnDeallocated)](Error SerErr,
+ Error DeallocErr) mutable {
+ if (SerErr) {
+ cantFail(std::move(DeallocErr));
+ OnDeallocated(std::move(SerErr));
+ } else
+ OnDeallocated(std::move(DeallocErr));
+ },
+ SAs.Allocator, Allocs);
+ for (auto &A : Allocs)
+ A.release();
+}
+
+void EPCGenericJITLinkMemoryManager::completeAllocation(
+ ExecutorAddr AllocAddr, BasicLayout BL, OnAllocatedFunction OnAllocated) {
+
+ InFlightAlloc::SegInfoMap SegInfos;
+
+ ExecutorAddr NextSegAddr = AllocAddr;
+ for (auto &KV : BL.segments()) {
+ const auto &AG = KV.first;
+ auto &Seg = KV.second;
+
+ Seg.Addr = NextSegAddr;
+ KV.second.WorkingMem = BL.getGraph().allocateBuffer(Seg.ContentSize).data();
+ NextSegAddr += ExecutorAddrDiff(
+ alignTo(Seg.ContentSize + Seg.ZeroFillSize, EPC.getPageSize()));
+
+ auto &SegInfo = SegInfos[AG];
+ SegInfo.ContentSize = Seg.ContentSize;
+ SegInfo.ZeroFillSize = Seg.ZeroFillSize;
+ SegInfo.Addr = ExecutorAddr(Seg.Addr);
+ SegInfo.WorkingMem = Seg.WorkingMem;
+ }
+
+ if (auto Err = BL.apply())
+ return OnAllocated(std::move(Err));
+
+ OnAllocated(std::make_unique<InFlightAlloc>(*this, BL.getGraph(), AllocAddr,
+ std::move(SegInfos)));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp
new file mode 100644
index 0000000000..cdac367e11
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp
@@ -0,0 +1,319 @@
+//===----- EPCGenericRTDyldMemoryManager.cpp - EPC-bbasde MemMgr -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<EPCGenericRTDyldMemoryManager>>
+EPCGenericRTDyldMemoryManager::CreateWithDefaultBootstrapSymbols(
+ ExecutorProcessControl &EPC) {
+ SymbolAddrs SAs;
+ if (auto Err = EPC.getBootstrapSymbols(
+ {{SAs.Instance, rt::SimpleExecutorMemoryManagerInstanceName},
+ {SAs.Reserve, rt::SimpleExecutorMemoryManagerReserveWrapperName},
+ {SAs.Finalize, rt::SimpleExecutorMemoryManagerFinalizeWrapperName},
+ {SAs.Deallocate,
+ rt::SimpleExecutorMemoryManagerDeallocateWrapperName},
+ {SAs.RegisterEHFrame, rt::RegisterEHFrameSectionWrapperName},
+ {SAs.DeregisterEHFrame, rt::DeregisterEHFrameSectionWrapperName}}))
+ return std::move(Err);
+ return std::make_unique<EPCGenericRTDyldMemoryManager>(EPC, std::move(SAs));
+}
+
+EPCGenericRTDyldMemoryManager::EPCGenericRTDyldMemoryManager(
+ ExecutorProcessControl &EPC, SymbolAddrs SAs)
+ : EPC(EPC), SAs(std::move(SAs)) {
+ LLVM_DEBUG(dbgs() << "Created remote allocator " << (void *)this << "\n");
+}
+
+EPCGenericRTDyldMemoryManager::~EPCGenericRTDyldMemoryManager() {
+ LLVM_DEBUG(dbgs() << "Destroyed remote allocator " << (void *)this << "\n");
+ if (!ErrMsg.empty())
+ errs() << "Destroying with existing errors:\n" << ErrMsg << "\n";
+
+ Error Err = Error::success();
+ if (auto Err2 = EPC.callSPSWrapper<
+ rt::SPSSimpleExecutorMemoryManagerDeallocateSignature>(
+ SAs.Reserve, Err, SAs.Instance, FinalizedAllocs)) {
+ // FIXME: Report errors through EPC once that functionality is available.
+ logAllUnhandledErrors(std::move(Err2), errs(), "");
+ return;
+ }
+
+ if (Err)
+ logAllUnhandledErrors(std::move(Err), errs(), "");
+}
+
+uint8_t *EPCGenericRTDyldMemoryManager::allocateCodeSection(
+ uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ StringRef SectionName) {
+ std::lock_guard<std::mutex> Lock(M);
+ LLVM_DEBUG({
+ dbgs() << "Allocator " << (void *)this << " allocating code section "
+ << SectionName << ": size = " << formatv("{0:x}", Size)
+ << " bytes, alignment = " << Alignment << "\n";
+ });
+ auto &Seg = Unmapped.back().CodeAllocs;
+ Seg.emplace_back(Size, Alignment);
+ return reinterpret_cast<uint8_t *>(
+ alignAddr(Seg.back().Contents.get(), Align(Alignment)));
+}
+
+uint8_t *EPCGenericRTDyldMemoryManager::allocateDataSection(
+ uintptr_t Size, unsigned Alignment, unsigned SectionID,
+ StringRef SectionName, bool IsReadOnly) {
+ std::lock_guard<std::mutex> Lock(M);
+ LLVM_DEBUG({
+ dbgs() << "Allocator " << (void *)this << " allocating "
+ << (IsReadOnly ? "ro" : "rw") << "-data section " << SectionName
+ << ": size = " << formatv("{0:x}", Size) << " bytes, alignment "
+ << Alignment << ")\n";
+ });
+
+ auto &Seg =
+ IsReadOnly ? Unmapped.back().RODataAllocs : Unmapped.back().RWDataAllocs;
+
+ Seg.emplace_back(Size, Alignment);
+ return reinterpret_cast<uint8_t *>(
+ alignAddr(Seg.back().Contents.get(), Align(Alignment)));
+}
+
+void EPCGenericRTDyldMemoryManager::reserveAllocationSpace(
+ uintptr_t CodeSize, uint32_t CodeAlign, uintptr_t RODataSize,
+ uint32_t RODataAlign, uintptr_t RWDataSize, uint32_t RWDataAlign) {
+
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ // If there's already an error then bail out.
+ if (!ErrMsg.empty())
+ return;
+
+ if (!isPowerOf2_32(CodeAlign) || CodeAlign > EPC.getPageSize()) {
+ ErrMsg = "Invalid code alignment in reserveAllocationSpace";
+ return;
+ }
+ if (!isPowerOf2_32(RODataAlign) || RODataAlign > EPC.getPageSize()) {
+ ErrMsg = "Invalid ro-data alignment in reserveAllocationSpace";
+ return;
+ }
+ if (!isPowerOf2_32(RWDataAlign) || RWDataAlign > EPC.getPageSize()) {
+ ErrMsg = "Invalid rw-data alignment in reserveAllocationSpace";
+ return;
+ }
+ }
+
+ uint64_t TotalSize = 0;
+ TotalSize += alignTo(CodeSize, EPC.getPageSize());
+ TotalSize += alignTo(RODataSize, EPC.getPageSize());
+ TotalSize += alignTo(RWDataSize, EPC.getPageSize());
+
+ LLVM_DEBUG({
+ dbgs() << "Allocator " << (void *)this << " reserving "
+ << formatv("{0:x}", TotalSize) << " bytes.\n";
+ });
+
+ Expected<ExecutorAddr> TargetAllocAddr((ExecutorAddr()));
+ if (auto Err = EPC.callSPSWrapper<
+ rt::SPSSimpleExecutorMemoryManagerReserveSignature>(
+ SAs.Reserve, TargetAllocAddr, SAs.Instance, TotalSize)) {
+ std::lock_guard<std::mutex> Lock(M);
+ ErrMsg = toString(std::move(Err));
+ return;
+ }
+ if (!TargetAllocAddr) {
+ std::lock_guard<std::mutex> Lock(M);
+ ErrMsg = toString(TargetAllocAddr.takeError());
+ return;
+ }
+
+ std::lock_guard<std::mutex> Lock(M);
+ Unmapped.push_back(AllocGroup());
+ Unmapped.back().RemoteCode = {
+ *TargetAllocAddr, ExecutorAddrDiff(alignTo(CodeSize, EPC.getPageSize()))};
+ Unmapped.back().RemoteROData = {
+ Unmapped.back().RemoteCode.End,
+ ExecutorAddrDiff(alignTo(RODataSize, EPC.getPageSize()))};
+ Unmapped.back().RemoteRWData = {
+ Unmapped.back().RemoteROData.End,
+ ExecutorAddrDiff(alignTo(RWDataSize, EPC.getPageSize()))};
+}
+
+bool EPCGenericRTDyldMemoryManager::needsToReserveAllocationSpace() {
+ return true;
+}
+
+void EPCGenericRTDyldMemoryManager::registerEHFrames(uint8_t *Addr,
+ uint64_t LoadAddr,
+ size_t Size) {
+ LLVM_DEBUG({
+ dbgs() << "Allocator " << (void *)this << " added unfinalized eh-frame "
+ << formatv("[ {0:x} {1:x} ]", LoadAddr, LoadAddr + Size) << "\n";
+ });
+ std::lock_guard<std::mutex> Lock(M);
+ // Bail out early if there's already an error.
+ if (!ErrMsg.empty())
+ return;
+
+ ExecutorAddr LA(LoadAddr);
+ for (auto &Alloc : llvm::reverse(Unfinalized)) {
+ if (Alloc.RemoteCode.contains(LA) || Alloc.RemoteROData.contains(LA) ||
+ Alloc.RemoteRWData.contains(LA)) {
+ Alloc.UnfinalizedEHFrames.push_back({LA, Size});
+ return;
+ }
+ }
+ ErrMsg = "eh-frame does not lie inside unfinalized alloc";
+}
+
+void EPCGenericRTDyldMemoryManager::deregisterEHFrames() {
+ // This is a no-op for us: We've registered a deallocation action for it.
+}
+
+void EPCGenericRTDyldMemoryManager::notifyObjectLoaded(
+ RuntimeDyld &Dyld, const object::ObjectFile &Obj) {
+ std::lock_guard<std::mutex> Lock(M);
+ LLVM_DEBUG(dbgs() << "Allocator " << (void *)this << " applied mappings:\n");
+ for (auto &ObjAllocs : Unmapped) {
+ mapAllocsToRemoteAddrs(Dyld, ObjAllocs.CodeAllocs,
+ ObjAllocs.RemoteCode.Start);
+ mapAllocsToRemoteAddrs(Dyld, ObjAllocs.RODataAllocs,
+ ObjAllocs.RemoteROData.Start);
+ mapAllocsToRemoteAddrs(Dyld, ObjAllocs.RWDataAllocs,
+ ObjAllocs.RemoteRWData.Start);
+ Unfinalized.push_back(std::move(ObjAllocs));
+ }
+ Unmapped.clear();
+}
+
+bool EPCGenericRTDyldMemoryManager::finalizeMemory(std::string *ErrMsg) {
+ LLVM_DEBUG(dbgs() << "Allocator " << (void *)this << " finalizing:\n");
+
+ // If there's an error then bail out here.
+ std::vector<AllocGroup> Allocs;
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ if (ErrMsg && !this->ErrMsg.empty()) {
+ *ErrMsg = std::move(this->ErrMsg);
+ return true;
+ }
+ std::swap(Allocs, Unfinalized);
+ }
+
+ // Loop over unfinalized objects to make finalization requests.
+ for (auto &ObjAllocs : Allocs) {
+
+ tpctypes::WireProtectionFlags SegProts[3] = {
+ tpctypes::toWireProtectionFlags(
+ static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_EXEC)),
+ tpctypes::toWireProtectionFlags(sys::Memory::MF_READ),
+ tpctypes::toWireProtectionFlags(
+ static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
+ sys::Memory::MF_WRITE))};
+
+ ExecutorAddrRange *RemoteAddrs[3] = {&ObjAllocs.RemoteCode,
+ &ObjAllocs.RemoteROData,
+ &ObjAllocs.RemoteRWData};
+
+ std::vector<Alloc> *SegSections[3] = {&ObjAllocs.CodeAllocs,
+ &ObjAllocs.RODataAllocs,
+ &ObjAllocs.RWDataAllocs};
+
+ tpctypes::FinalizeRequest FR;
+ std::unique_ptr<char[]> AggregateContents[3];
+
+ for (unsigned I = 0; I != 3; ++I) {
+ FR.Segments.push_back({});
+ auto &Seg = FR.Segments.back();
+ Seg.Prot = SegProts[I];
+ Seg.Addr = RemoteAddrs[I]->Start;
+ for (auto &SecAlloc : *SegSections[I]) {
+ Seg.Size = alignTo(Seg.Size, SecAlloc.Align);
+ Seg.Size += SecAlloc.Size;
+ }
+ AggregateContents[I] = std::make_unique<char[]>(Seg.Size);
+ size_t SecOffset = 0;
+ for (auto &SecAlloc : *SegSections[I]) {
+ SecOffset = alignTo(SecOffset, SecAlloc.Align);
+ memcpy(&AggregateContents[I][SecOffset],
+ reinterpret_cast<const char *>(
+ alignAddr(SecAlloc.Contents.get(), Align(SecAlloc.Align))),
+ SecAlloc.Size);
+ SecOffset += SecAlloc.Size;
+ // FIXME: Can we reset SecAlloc.Content here, now that it's copied into
+ // the aggregated content?
+ }
+ Seg.Content = {AggregateContents[I].get(), SecOffset};
+ }
+
+ for (auto &Frame : ObjAllocs.UnfinalizedEHFrames)
+ FR.Actions.push_back(
+ {cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ SAs.RegisterEHFrame, Frame)),
+ cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ SAs.DeregisterEHFrame, Frame))});
+
+ // We'll also need to make an extra allocation for the eh-frame wrapper call
+ // arguments.
+ Error FinalizeErr = Error::success();
+ if (auto Err = EPC.callSPSWrapper<
+ rt::SPSSimpleExecutorMemoryManagerFinalizeSignature>(
+ SAs.Finalize, FinalizeErr, SAs.Instance, std::move(FR))) {
+ std::lock_guard<std::mutex> Lock(M);
+ this->ErrMsg = toString(std::move(Err));
+ dbgs() << "Serialization error: " << this->ErrMsg << "\n";
+ if (ErrMsg)
+ *ErrMsg = this->ErrMsg;
+ return true;
+ }
+ if (FinalizeErr) {
+ std::lock_guard<std::mutex> Lock(M);
+ this->ErrMsg = toString(std::move(FinalizeErr));
+ dbgs() << "Finalization error: " << this->ErrMsg << "\n";
+ if (ErrMsg)
+ *ErrMsg = this->ErrMsg;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void EPCGenericRTDyldMemoryManager::mapAllocsToRemoteAddrs(
+ RuntimeDyld &Dyld, std::vector<Alloc> &Allocs, ExecutorAddr NextAddr) {
+ for (auto &Alloc : Allocs) {
+ NextAddr.setValue(alignTo(NextAddr.getValue(), Alloc.Align));
+ LLVM_DEBUG({
+ dbgs() << " " << static_cast<void *>(Alloc.Contents.get()) << " -> "
+ << format("0x%016" PRIx64, NextAddr.getValue()) << "\n";
+ });
+ Dyld.mapSectionAddress(reinterpret_cast<const void *>(alignAddr(
+ Alloc.Contents.get(), Align(Alloc.Align))),
+ NextAddr.getValue());
+ Alloc.RemoteAddr = NextAddr;
+ // Only advance NextAddr if it was non-null to begin with,
+ // otherwise leave it as null.
+ if (NextAddr)
+ NextAddr += ExecutorAddrDiff(Alloc.Size);
+ }
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp
new file mode 100644
index 0000000000..249f02f36b
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/EPCIndirectionUtils.cpp
@@ -0,0 +1,427 @@
+//===------- EPCIndirectionUtils.cpp -- EPC based indirection APIs --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/EPCIndirectionUtils.h"
+
+#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
+#include "llvm/Support/MathExtras.h"
+
+#include <future>
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace llvm {
+namespace orc {
+
+class EPCIndirectionUtilsAccess {
+public:
+ using IndirectStubInfo = EPCIndirectionUtils::IndirectStubInfo;
+ using IndirectStubInfoVector = EPCIndirectionUtils::IndirectStubInfoVector;
+
+ static Expected<IndirectStubInfoVector>
+ getIndirectStubs(EPCIndirectionUtils &EPCIU, unsigned NumStubs) {
+ return EPCIU.getIndirectStubs(NumStubs);
+ };
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+namespace {
+
+class EPCTrampolinePool : public TrampolinePool {
+public:
+ EPCTrampolinePool(EPCIndirectionUtils &EPCIU);
+ Error deallocatePool();
+
+protected:
+ Error grow() override;
+
+ using FinalizedAlloc = jitlink::JITLinkMemoryManager::FinalizedAlloc;
+
+ EPCIndirectionUtils &EPCIU;
+ unsigned TrampolineSize = 0;
+ unsigned TrampolinesPerPage = 0;
+ std::vector<FinalizedAlloc> TrampolineBlocks;
+};
+
+class EPCIndirectStubsManager : public IndirectStubsManager,
+ private EPCIndirectionUtilsAccess {
+public:
+ EPCIndirectStubsManager(EPCIndirectionUtils &EPCIU) : EPCIU(EPCIU) {}
+
+ Error deallocateStubs();
+
+ Error createStub(StringRef StubName, JITTargetAddress StubAddr,
+ JITSymbolFlags StubFlags) override;
+
+ Error createStubs(const StubInitsMap &StubInits) override;
+
+ JITEvaluatedSymbol findStub(StringRef Name, bool ExportedStubsOnly) override;
+
+ JITEvaluatedSymbol findPointer(StringRef Name) override;
+
+ Error updatePointer(StringRef Name, JITTargetAddress NewAddr) override;
+
+private:
+ using StubInfo = std::pair<IndirectStubInfo, JITSymbolFlags>;
+
+ std::mutex ISMMutex;
+ EPCIndirectionUtils &EPCIU;
+ StringMap<StubInfo> StubInfos;
+};
+
+EPCTrampolinePool::EPCTrampolinePool(EPCIndirectionUtils &EPCIU)
+ : EPCIU(EPCIU) {
+ auto &EPC = EPCIU.getExecutorProcessControl();
+ auto &ABI = EPCIU.getABISupport();
+
+ TrampolineSize = ABI.getTrampolineSize();
+ TrampolinesPerPage =
+ (EPC.getPageSize() - ABI.getPointerSize()) / TrampolineSize;
+}
+
+Error EPCTrampolinePool::deallocatePool() {
+ Error Err = Error::success();
+ std::promise<MSVCPError> DeallocResultP;
+ auto DeallocResultF = DeallocResultP.get_future();
+
+ EPCIU.getExecutorProcessControl().getMemMgr().deallocate(
+ std::move(TrampolineBlocks),
+ [&](Error Err) { DeallocResultP.set_value(std::move(Err)); });
+
+ return DeallocResultF.get();
+}
+
+Error EPCTrampolinePool::grow() {
+ using namespace jitlink;
+
+ assert(AvailableTrampolines.empty() &&
+ "Grow called with trampolines still available");
+
+ auto ResolverAddress = EPCIU.getResolverBlockAddress();
+ assert(ResolverAddress && "Resolver address can not be null");
+
+ auto &EPC = EPCIU.getExecutorProcessControl();
+ auto PageSize = EPC.getPageSize();
+ auto Alloc = SimpleSegmentAlloc::Create(
+ EPC.getMemMgr(), nullptr,
+ {{MemProt::Read | MemProt::Exec, {PageSize, Align(PageSize)}}});
+ if (!Alloc)
+ return Alloc.takeError();
+
+ unsigned NumTrampolines = TrampolinesPerPage;
+
+ auto SegInfo = Alloc->getSegInfo(MemProt::Read | MemProt::Exec);
+ EPCIU.getABISupport().writeTrampolines(SegInfo.WorkingMem.data(),
+ SegInfo.Addr.getValue(),
+ ResolverAddress, NumTrampolines);
+ for (unsigned I = 0; I < NumTrampolines; ++I)
+ AvailableTrampolines.push_back(SegInfo.Addr.getValue() +
+ (I * TrampolineSize));
+
+ auto FA = Alloc->finalize();
+ if (!FA)
+ return FA.takeError();
+
+ TrampolineBlocks.push_back(std::move(*FA));
+
+ return Error::success();
+}
+
+Error EPCIndirectStubsManager::createStub(StringRef StubName,
+ JITTargetAddress StubAddr,
+ JITSymbolFlags StubFlags) {
+ StubInitsMap SIM;
+ SIM[StubName] = std::make_pair(StubAddr, StubFlags);
+ return createStubs(SIM);
+}
+
+Error EPCIndirectStubsManager::createStubs(const StubInitsMap &StubInits) {
+ auto AvailableStubInfos = getIndirectStubs(EPCIU, StubInits.size());
+ if (!AvailableStubInfos)
+ return AvailableStubInfos.takeError();
+
+ {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ unsigned ASIdx = 0;
+ for (auto &SI : StubInits) {
+ auto &A = (*AvailableStubInfos)[ASIdx++];
+ StubInfos[SI.first()] = std::make_pair(A, SI.second.second);
+ }
+ }
+
+ auto &MemAccess = EPCIU.getExecutorProcessControl().getMemoryAccess();
+ switch (EPCIU.getABISupport().getPointerSize()) {
+ case 4: {
+ unsigned ASIdx = 0;
+ std::vector<tpctypes::UInt32Write> PtrUpdates;
+ for (auto &SI : StubInits)
+ PtrUpdates.push_back(
+ {ExecutorAddr((*AvailableStubInfos)[ASIdx++].PointerAddress),
+ static_cast<uint32_t>(SI.second.first)});
+ return MemAccess.writeUInt32s(PtrUpdates);
+ }
+ case 8: {
+ unsigned ASIdx = 0;
+ std::vector<tpctypes::UInt64Write> PtrUpdates;
+ for (auto &SI : StubInits)
+ PtrUpdates.push_back(
+ {ExecutorAddr((*AvailableStubInfos)[ASIdx++].PointerAddress),
+ static_cast<uint64_t>(SI.second.first)});
+ return MemAccess.writeUInt64s(PtrUpdates);
+ }
+ default:
+ return make_error<StringError>("Unsupported pointer size",
+ inconvertibleErrorCode());
+ }
+}
+
+JITEvaluatedSymbol EPCIndirectStubsManager::findStub(StringRef Name,
+ bool ExportedStubsOnly) {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ auto I = StubInfos.find(Name);
+ if (I == StubInfos.end())
+ return nullptr;
+ return {I->second.first.StubAddress, I->second.second};
+}
+
+JITEvaluatedSymbol EPCIndirectStubsManager::findPointer(StringRef Name) {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ auto I = StubInfos.find(Name);
+ if (I == StubInfos.end())
+ return nullptr;
+ return {I->second.first.PointerAddress, I->second.second};
+}
+
+Error EPCIndirectStubsManager::updatePointer(StringRef Name,
+ JITTargetAddress NewAddr) {
+
+ JITTargetAddress PtrAddr = 0;
+ {
+ std::lock_guard<std::mutex> Lock(ISMMutex);
+ auto I = StubInfos.find(Name);
+ if (I == StubInfos.end())
+ return make_error<StringError>("Unknown stub name",
+ inconvertibleErrorCode());
+ PtrAddr = I->second.first.PointerAddress;
+ }
+
+ auto &MemAccess = EPCIU.getExecutorProcessControl().getMemoryAccess();
+ switch (EPCIU.getABISupport().getPointerSize()) {
+ case 4: {
+ tpctypes::UInt32Write PUpdate(ExecutorAddr(PtrAddr), NewAddr);
+ return MemAccess.writeUInt32s(PUpdate);
+ }
+ case 8: {
+ tpctypes::UInt64Write PUpdate(ExecutorAddr(PtrAddr), NewAddr);
+ return MemAccess.writeUInt64s(PUpdate);
+ }
+ default:
+ return make_error<StringError>("Unsupported pointer size",
+ inconvertibleErrorCode());
+ }
+}
+
+} // end anonymous namespace.
+
+namespace llvm {
+namespace orc {
+
+EPCIndirectionUtils::ABISupport::~ABISupport() {}
+
+Expected<std::unique_ptr<EPCIndirectionUtils>>
+EPCIndirectionUtils::Create(ExecutorProcessControl &EPC) {
+ const auto &TT = EPC.getTargetTriple();
+ switch (TT.getArch()) {
+ default:
+ return make_error<StringError>(
+ std::string("No EPCIndirectionUtils available for ") + TT.str(),
+ inconvertibleErrorCode());
+ case Triple::aarch64:
+ case Triple::aarch64_32:
+ return CreateWithABI<OrcAArch64>(EPC);
+
+ case Triple::x86:
+ return CreateWithABI<OrcI386>(EPC);
+
+ case Triple::mips:
+ return CreateWithABI<OrcMips32Be>(EPC);
+
+ case Triple::mipsel:
+ return CreateWithABI<OrcMips32Le>(EPC);
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ return CreateWithABI<OrcMips64>(EPC);
+
+ case Triple::x86_64:
+ if (TT.getOS() == Triple::OSType::Win32)
+ return CreateWithABI<OrcX86_64_Win32>(EPC);
+ else
+ return CreateWithABI<OrcX86_64_SysV>(EPC);
+ }
+}
+
+Error EPCIndirectionUtils::cleanup() {
+
+ auto &MemMgr = EPC.getMemMgr();
+ auto Err = MemMgr.deallocate(std::move(IndirectStubAllocs));
+
+ if (TP)
+ Err = joinErrors(std::move(Err),
+ static_cast<EPCTrampolinePool &>(*TP).deallocatePool());
+
+ if (ResolverBlock)
+ Err =
+ joinErrors(std::move(Err), MemMgr.deallocate(std::move(ResolverBlock)));
+
+ return Err;
+}
+
+Expected<JITTargetAddress>
+EPCIndirectionUtils::writeResolverBlock(JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+ using namespace jitlink;
+
+ assert(ABI && "ABI can not be null");
+ auto ResolverSize = ABI->getResolverCodeSize();
+
+ auto Alloc =
+ SimpleSegmentAlloc::Create(EPC.getMemMgr(), nullptr,
+ {{MemProt::Read | MemProt::Exec,
+ {ResolverSize, Align(EPC.getPageSize())}}});
+
+ if (!Alloc)
+ return Alloc.takeError();
+
+ auto SegInfo = Alloc->getSegInfo(MemProt::Read | MemProt::Exec);
+ ResolverBlockAddr = SegInfo.Addr.getValue();
+ ABI->writeResolverCode(SegInfo.WorkingMem.data(), ResolverBlockAddr,
+ ReentryFnAddr, ReentryCtxAddr);
+
+ auto FA = Alloc->finalize();
+ if (!FA)
+ return FA.takeError();
+
+ ResolverBlock = std::move(*FA);
+ return ResolverBlockAddr;
+}
+
+std::unique_ptr<IndirectStubsManager>
+EPCIndirectionUtils::createIndirectStubsManager() {
+ return std::make_unique<EPCIndirectStubsManager>(*this);
+}
+
+TrampolinePool &EPCIndirectionUtils::getTrampolinePool() {
+ if (!TP)
+ TP = std::make_unique<EPCTrampolinePool>(*this);
+ return *TP;
+}
+
+LazyCallThroughManager &EPCIndirectionUtils::createLazyCallThroughManager(
+ ExecutionSession &ES, JITTargetAddress ErrorHandlerAddr) {
+ assert(!LCTM &&
+ "createLazyCallThroughManager can not have been called before");
+ LCTM = std::make_unique<LazyCallThroughManager>(ES, ErrorHandlerAddr,
+ &getTrampolinePool());
+ return *LCTM;
+}
+
+EPCIndirectionUtils::EPCIndirectionUtils(ExecutorProcessControl &EPC,
+ std::unique_ptr<ABISupport> ABI)
+ : EPC(EPC), ABI(std::move(ABI)) {
+ assert(this->ABI && "ABI can not be null");
+
+ assert(EPC.getPageSize() > getABISupport().getStubSize() &&
+ "Stubs larger than one page are not supported");
+}
+
+Expected<EPCIndirectionUtils::IndirectStubInfoVector>
+EPCIndirectionUtils::getIndirectStubs(unsigned NumStubs) {
+ using namespace jitlink;
+
+ std::lock_guard<std::mutex> Lock(EPCUIMutex);
+
+ // If there aren't enough stubs available then allocate some more.
+ if (NumStubs > AvailableIndirectStubs.size()) {
+ auto NumStubsToAllocate = NumStubs;
+ auto PageSize = EPC.getPageSize();
+ auto StubBytes = alignTo(NumStubsToAllocate * ABI->getStubSize(), PageSize);
+ NumStubsToAllocate = StubBytes / ABI->getStubSize();
+ auto PtrBytes =
+ alignTo(NumStubsToAllocate * ABI->getPointerSize(), PageSize);
+
+ auto StubProt = MemProt::Read | MemProt::Exec;
+ auto PtrProt = MemProt::Read | MemProt::Write;
+
+ auto Alloc = SimpleSegmentAlloc::Create(
+ EPC.getMemMgr(), nullptr,
+ {{StubProt, {static_cast<size_t>(StubBytes), Align(PageSize)}},
+ {PtrProt, {static_cast<size_t>(PtrBytes), Align(PageSize)}}});
+
+ if (!Alloc)
+ return Alloc.takeError();
+
+ auto StubSeg = Alloc->getSegInfo(StubProt);
+ auto PtrSeg = Alloc->getSegInfo(PtrProt);
+
+ ABI->writeIndirectStubsBlock(StubSeg.WorkingMem.data(),
+ StubSeg.Addr.getValue(),
+ PtrSeg.Addr.getValue(), NumStubsToAllocate);
+
+ auto FA = Alloc->finalize();
+ if (!FA)
+ return FA.takeError();
+
+ IndirectStubAllocs.push_back(std::move(*FA));
+
+ auto StubExecutorAddr = StubSeg.Addr;
+ auto PtrExecutorAddr = PtrSeg.Addr;
+ for (unsigned I = 0; I != NumStubsToAllocate; ++I) {
+ AvailableIndirectStubs.push_back(IndirectStubInfo(
+ StubExecutorAddr.getValue(), PtrExecutorAddr.getValue()));
+ StubExecutorAddr += ABI->getStubSize();
+ PtrExecutorAddr += ABI->getPointerSize();
+ }
+ }
+
+ assert(NumStubs <= AvailableIndirectStubs.size() &&
+ "Sufficient stubs should have been allocated above");
+
+ IndirectStubInfoVector Result;
+ while (NumStubs--) {
+ Result.push_back(AvailableIndirectStubs.back());
+ AvailableIndirectStubs.pop_back();
+ }
+
+ return std::move(Result);
+}
+
+static JITTargetAddress reentry(JITTargetAddress LCTMAddr,
+ JITTargetAddress TrampolineAddr) {
+ auto &LCTM = *jitTargetAddressToPointer<LazyCallThroughManager *>(LCTMAddr);
+ std::promise<JITTargetAddress> LandingAddrP;
+ auto LandingAddrF = LandingAddrP.get_future();
+ LCTM.resolveTrampolineLandingAddress(
+ TrampolineAddr,
+ [&](JITTargetAddress Addr) { LandingAddrP.set_value(Addr); });
+ return LandingAddrF.get();
+}
+
+Error setUpInProcessLCTMReentryViaEPCIU(EPCIndirectionUtils &EPCIU) {
+ auto &LCTM = EPCIU.getLazyCallThroughManager();
+ return EPCIU
+ .writeResolverBlock(pointerToJITTargetAddress(&reentry),
+ pointerToJITTargetAddress(&LCTM))
+ .takeError();
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ExecutionUtils.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
new file mode 100644
index 0000000000..ae2d47fb8c
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ExecutionUtils.cpp
@@ -0,0 +1,407 @@
+//===---- ExecutionUtils.cpp - Utilities for executing functions in Orc ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/Layer.h"
+#include "llvm/ExecutionEngine/Orc/ObjectFileInterface.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Object/MachOUniversal.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Target/TargetMachine.h"
+#include <string>
+
+namespace llvm {
+namespace orc {
+
+CtorDtorIterator::CtorDtorIterator(const GlobalVariable *GV, bool End)
+ : InitList(
+ GV ? dyn_cast_or_null<ConstantArray>(GV->getInitializer()) : nullptr),
+ I((InitList && End) ? InitList->getNumOperands() : 0) {
+}
+
+bool CtorDtorIterator::operator==(const CtorDtorIterator &Other) const {
+ assert(InitList == Other.InitList && "Incomparable iterators.");
+ return I == Other.I;
+}
+
+bool CtorDtorIterator::operator!=(const CtorDtorIterator &Other) const {
+ return !(*this == Other);
+}
+
+CtorDtorIterator& CtorDtorIterator::operator++() {
+ ++I;
+ return *this;
+}
+
+CtorDtorIterator CtorDtorIterator::operator++(int) {
+ CtorDtorIterator Temp = *this;
+ ++I;
+ return Temp;
+}
+
+CtorDtorIterator::Element CtorDtorIterator::operator*() const {
+ ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(I));
+ assert(CS && "Unrecognized type in llvm.global_ctors/llvm.global_dtors");
+
+ Constant *FuncC = CS->getOperand(1);
+ Function *Func = nullptr;
+
+ // Extract function pointer, pulling off any casts.
+ while (FuncC) {
+ if (Function *F = dyn_cast_or_null<Function>(FuncC)) {
+ Func = F;
+ break;
+ } else if (ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(FuncC)) {
+ if (CE->isCast())
+ FuncC = dyn_cast_or_null<ConstantExpr>(CE->getOperand(0));
+ else
+ break;
+ } else {
+ // This isn't anything we recognize. Bail out with Func left set to null.
+ break;
+ }
+ }
+
+ auto *Priority = cast<ConstantInt>(CS->getOperand(0));
+ Value *Data = CS->getNumOperands() == 3 ? CS->getOperand(2) : nullptr;
+ if (Data && !isa<GlobalValue>(Data))
+ Data = nullptr;
+ return Element(Priority->getZExtValue(), Func, Data);
+}
+
+iterator_range<CtorDtorIterator> getConstructors(const Module &M) {
+ const GlobalVariable *CtorsList = M.getNamedGlobal("llvm.global_ctors");
+ return make_range(CtorDtorIterator(CtorsList, false),
+ CtorDtorIterator(CtorsList, true));
+}
+
+iterator_range<CtorDtorIterator> getDestructors(const Module &M) {
+ const GlobalVariable *DtorsList = M.getNamedGlobal("llvm.global_dtors");
+ return make_range(CtorDtorIterator(DtorsList, false),
+ CtorDtorIterator(DtorsList, true));
+}
+
+bool StaticInitGVIterator::isStaticInitGlobal(GlobalValue &GV) {
+ if (GV.isDeclaration())
+ return false;
+
+ if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
+ GV.getName() == "llvm.global_dtors"))
+ return true;
+
+ if (ObjFmt == Triple::MachO) {
+ // FIXME: These section checks are too strict: We should match first and
+ // second word split by comma.
+ if (GV.hasSection() &&
+ (GV.getSection().startswith("__DATA,__objc_classlist") ||
+ GV.getSection().startswith("__DATA,__objc_selrefs")))
+ return true;
+ }
+
+ return false;
+}
+
+void CtorDtorRunner::add(iterator_range<CtorDtorIterator> CtorDtors) {
+ if (CtorDtors.empty())
+ return;
+
+ MangleAndInterner Mangle(
+ JD.getExecutionSession(),
+ (*CtorDtors.begin()).Func->getParent()->getDataLayout());
+
+ for (auto CtorDtor : CtorDtors) {
+ assert(CtorDtor.Func && CtorDtor.Func->hasName() &&
+ "Ctor/Dtor function must be named to be runnable under the JIT");
+
+ // FIXME: Maybe use a symbol promoter here instead.
+ if (CtorDtor.Func->hasLocalLinkage()) {
+ CtorDtor.Func->setLinkage(GlobalValue::ExternalLinkage);
+ CtorDtor.Func->setVisibility(GlobalValue::HiddenVisibility);
+ }
+
+ if (CtorDtor.Data && cast<GlobalValue>(CtorDtor.Data)->isDeclaration()) {
+ dbgs() << " Skipping because why now?\n";
+ continue;
+ }
+
+ CtorDtorsByPriority[CtorDtor.Priority].push_back(
+ Mangle(CtorDtor.Func->getName()));
+ }
+}
+
+Error CtorDtorRunner::run() {
+ using CtorDtorTy = void (*)();
+
+ SymbolLookupSet LookupSet;
+ for (auto &KV : CtorDtorsByPriority)
+ for (auto &Name : KV.second)
+ LookupSet.add(Name);
+ assert(!LookupSet.containsDuplicates() &&
+ "Ctor/Dtor list contains duplicates");
+
+ auto &ES = JD.getExecutionSession();
+ if (auto CtorDtorMap = ES.lookup(
+ makeJITDylibSearchOrder(&JD, JITDylibLookupFlags::MatchAllSymbols),
+ std::move(LookupSet))) {
+ for (auto &KV : CtorDtorsByPriority) {
+ for (auto &Name : KV.second) {
+ assert(CtorDtorMap->count(Name) && "No entry for Name");
+ auto CtorDtor = reinterpret_cast<CtorDtorTy>(
+ static_cast<uintptr_t>((*CtorDtorMap)[Name].getAddress()));
+ CtorDtor();
+ }
+ }
+ CtorDtorsByPriority.clear();
+ return Error::success();
+ } else
+ return CtorDtorMap.takeError();
+}
+
+void LocalCXXRuntimeOverridesBase::runDestructors() {
+ auto& CXXDestructorDataPairs = DSOHandleOverride;
+ for (auto &P : CXXDestructorDataPairs)
+ P.first(P.second);
+ CXXDestructorDataPairs.clear();
+}
+
+int LocalCXXRuntimeOverridesBase::CXAAtExitOverride(DestructorPtr Destructor,
+ void *Arg,
+ void *DSOHandle) {
+ auto& CXXDestructorDataPairs =
+ *reinterpret_cast<CXXDestructorDataPairList*>(DSOHandle);
+ CXXDestructorDataPairs.push_back(std::make_pair(Destructor, Arg));
+ return 0;
+}
+
+Error LocalCXXRuntimeOverrides::enable(JITDylib &JD,
+ MangleAndInterner &Mangle) {
+ SymbolMap RuntimeInterposes;
+ RuntimeInterposes[Mangle("__dso_handle")] =
+ JITEvaluatedSymbol(toTargetAddress(&DSOHandleOverride),
+ JITSymbolFlags::Exported);
+ RuntimeInterposes[Mangle("__cxa_atexit")] =
+ JITEvaluatedSymbol(toTargetAddress(&CXAAtExitOverride),
+ JITSymbolFlags::Exported);
+
+ return JD.define(absoluteSymbols(std::move(RuntimeInterposes)));
+}
+
+void ItaniumCXAAtExitSupport::registerAtExit(void (*F)(void *), void *Ctx,
+ void *DSOHandle) {
+ std::lock_guard<std::mutex> Lock(AtExitsMutex);
+ AtExitRecords[DSOHandle].push_back({F, Ctx});
+}
+
+void ItaniumCXAAtExitSupport::runAtExits(void *DSOHandle) {
+ std::vector<AtExitRecord> AtExitsToRun;
+
+ {
+ std::lock_guard<std::mutex> Lock(AtExitsMutex);
+ auto I = AtExitRecords.find(DSOHandle);
+ if (I != AtExitRecords.end()) {
+ AtExitsToRun = std::move(I->second);
+ AtExitRecords.erase(I);
+ }
+ }
+
+ while (!AtExitsToRun.empty()) {
+ AtExitsToRun.back().F(AtExitsToRun.back().Ctx);
+ AtExitsToRun.pop_back();
+ }
+}
+
+DynamicLibrarySearchGenerator::DynamicLibrarySearchGenerator(
+ sys::DynamicLibrary Dylib, char GlobalPrefix, SymbolPredicate Allow)
+ : Dylib(std::move(Dylib)), Allow(std::move(Allow)),
+ GlobalPrefix(GlobalPrefix) {}
+
+Expected<std::unique_ptr<DynamicLibrarySearchGenerator>>
+DynamicLibrarySearchGenerator::Load(const char *FileName, char GlobalPrefix,
+ SymbolPredicate Allow) {
+ std::string ErrMsg;
+ auto Lib = sys::DynamicLibrary::getPermanentLibrary(FileName, &ErrMsg);
+ if (!Lib.isValid())
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+ return std::make_unique<DynamicLibrarySearchGenerator>(
+ std::move(Lib), GlobalPrefix, std::move(Allow));
+}
+
+Error DynamicLibrarySearchGenerator::tryToGenerate(
+ LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags, const SymbolLookupSet &Symbols) {
+ orc::SymbolMap NewSymbols;
+
+ bool HasGlobalPrefix = (GlobalPrefix != '\0');
+
+ for (auto &KV : Symbols) {
+ auto &Name = KV.first;
+
+ if ((*Name).empty())
+ continue;
+
+ if (Allow && !Allow(Name))
+ continue;
+
+ if (HasGlobalPrefix && (*Name).front() != GlobalPrefix)
+ continue;
+
+ std::string Tmp((*Name).data() + HasGlobalPrefix,
+ (*Name).size() - HasGlobalPrefix);
+ if (void *Addr = Dylib.getAddressOfSymbol(Tmp.c_str())) {
+ NewSymbols[Name] = JITEvaluatedSymbol(
+ static_cast<JITTargetAddress>(reinterpret_cast<uintptr_t>(Addr)),
+ JITSymbolFlags::Exported);
+ }
+ }
+
+ if (NewSymbols.empty())
+ return Error::success();
+
+ return JD.define(absoluteSymbols(std::move(NewSymbols)));
+}
+
+Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
+StaticLibraryDefinitionGenerator::Load(
+ ObjectLayer &L, const char *FileName,
+ GetObjectFileInterface GetObjFileInterface) {
+ auto ArchiveBuffer = errorOrToExpected(MemoryBuffer::getFile(FileName));
+
+ if (!ArchiveBuffer)
+ return ArchiveBuffer.takeError();
+
+ return Create(L, std::move(*ArchiveBuffer), std::move(GetObjFileInterface));
+}
+
+Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
+StaticLibraryDefinitionGenerator::Load(
+ ObjectLayer &L, const char *FileName, const Triple &TT,
+ GetObjectFileInterface GetObjFileInterface) {
+
+ auto B = object::createBinary(FileName);
+ if (!B)
+ return B.takeError();
+
+ // If this is a regular archive then create an instance from it.
+ if (isa<object::Archive>(B->getBinary()))
+ return Create(L, std::move(B->takeBinary().second),
+ std::move(GetObjFileInterface));
+
+ // If this is a universal binary then search for a slice matching the given
+ // Triple.
+ if (auto *UB = cast<object::MachOUniversalBinary>(B->getBinary())) {
+ for (const auto &Obj : UB->objects()) {
+ auto ObjTT = Obj.getTriple();
+ if (ObjTT.getArch() == TT.getArch() &&
+ ObjTT.getSubArch() == TT.getSubArch() &&
+ (TT.getVendor() == Triple::UnknownVendor ||
+ ObjTT.getVendor() == TT.getVendor())) {
+ // We found a match. Create an instance from a buffer covering this
+ // slice.
+ auto SliceBuffer = MemoryBuffer::getFileSlice(FileName, Obj.getSize(),
+ Obj.getOffset());
+ if (!SliceBuffer)
+ return make_error<StringError>(
+ Twine("Could not create buffer for ") + TT.str() + " slice of " +
+ FileName + ": [ " + formatv("{0:x}", Obj.getOffset()) +
+ " .. " + formatv("{0:x}", Obj.getOffset() + Obj.getSize()) +
+ ": " + SliceBuffer.getError().message(),
+ SliceBuffer.getError());
+ return Create(L, std::move(*SliceBuffer),
+ std::move(GetObjFileInterface));
+ }
+ }
+
+ return make_error<StringError>(Twine("Universal binary ") + FileName +
+ " does not contain a slice for " +
+ TT.str(),
+ inconvertibleErrorCode());
+ }
+
+ return make_error<StringError>(Twine("Unrecognized file type for ") +
+ FileName,
+ inconvertibleErrorCode());
+}
+
+Expected<std::unique_ptr<StaticLibraryDefinitionGenerator>>
+StaticLibraryDefinitionGenerator::Create(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer,
+ GetObjectFileInterface GetObjFileInterface) {
+ Error Err = Error::success();
+
+ std::unique_ptr<StaticLibraryDefinitionGenerator> ADG(
+ new StaticLibraryDefinitionGenerator(
+ L, std::move(ArchiveBuffer), std::move(GetObjFileInterface), Err));
+
+ if (Err)
+ return std::move(Err);
+
+ return std::move(ADG);
+}
+
+Error StaticLibraryDefinitionGenerator::tryToGenerate(
+ LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags, const SymbolLookupSet &Symbols) {
+
+ // Don't materialize symbols from static archives unless this is a static
+ // lookup.
+ if (K != LookupKind::Static)
+ return Error::success();
+
+ // Bail out early if we've already freed the archive.
+ if (!Archive)
+ return Error::success();
+
+ DenseSet<std::pair<StringRef, StringRef>> ChildBufferInfos;
+
+ for (const auto &KV : Symbols) {
+ const auto &Name = KV.first;
+ auto Child = Archive->findSym(*Name);
+ if (!Child)
+ return Child.takeError();
+ if (*Child == None)
+ continue;
+ auto ChildBuffer = (*Child)->getMemoryBufferRef();
+ if (!ChildBuffer)
+ return ChildBuffer.takeError();
+ ChildBufferInfos.insert(
+ {ChildBuffer->getBuffer(), ChildBuffer->getBufferIdentifier()});
+ }
+
+ for (auto ChildBufferInfo : ChildBufferInfos) {
+ MemoryBufferRef ChildBufferRef(ChildBufferInfo.first,
+ ChildBufferInfo.second);
+
+ auto I = GetObjFileInterface(L.getExecutionSession(), ChildBufferRef);
+ if (!I)
+ return I.takeError();
+
+ if (auto Err = L.add(JD, MemoryBuffer::getMemBuffer(ChildBufferRef, false),
+ std::move(*I)))
+ return Err;
+ }
+
+ return Error::success();
+}
+
+StaticLibraryDefinitionGenerator::StaticLibraryDefinitionGenerator(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> ArchiveBuffer,
+ GetObjectFileInterface GetObjFileInterface, Error &Err)
+ : L(L), GetObjFileInterface(std::move(GetObjFileInterface)),
+ ArchiveBuffer(std::move(ArchiveBuffer)),
+ Archive(std::make_unique<object::Archive>(*this->ArchiveBuffer, Err)) {
+
+ if (!this->GetObjFileInterface)
+ this->GetObjFileInterface = getObjectFileInterface;
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp
new file mode 100644
index 0000000000..2eb835551a
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ExecutorProcessControl.cpp
@@ -0,0 +1,197 @@
+//===---- ExecutorProcessControl.cpp -- Executor process control APIs -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
+
+#include "llvm/ExecutionEngine/Orc/Core.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Process.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+ExecutorProcessControl::MemoryAccess::~MemoryAccess() {}
+
+ExecutorProcessControl::~ExecutorProcessControl() {}
+
+SelfExecutorProcessControl::SelfExecutorProcessControl(
+ std::shared_ptr<SymbolStringPool> SSP, std::unique_ptr<TaskDispatcher> D,
+ Triple TargetTriple, unsigned PageSize,
+ std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgr)
+ : ExecutorProcessControl(std::move(SSP), std::move(D)) {
+
+ OwnedMemMgr = std::move(MemMgr);
+ if (!OwnedMemMgr)
+ OwnedMemMgr = std::make_unique<jitlink::InProcessMemoryManager>(
+ sys::Process::getPageSizeEstimate());
+
+ this->TargetTriple = std::move(TargetTriple);
+ this->PageSize = PageSize;
+ this->MemMgr = OwnedMemMgr.get();
+ this->MemAccess = this;
+ this->JDI = {ExecutorAddr::fromPtr(jitDispatchViaWrapperFunctionManager),
+ ExecutorAddr::fromPtr(this)};
+ if (this->TargetTriple.isOSBinFormatMachO())
+ GlobalManglingPrefix = '_';
+}
+
+Expected<std::unique_ptr<SelfExecutorProcessControl>>
+SelfExecutorProcessControl::Create(
+ std::shared_ptr<SymbolStringPool> SSP,
+ std::unique_ptr<TaskDispatcher> D,
+ std::unique_ptr<jitlink::JITLinkMemoryManager> MemMgr) {
+
+ if (!SSP)
+ SSP = std::make_shared<SymbolStringPool>();
+
+ if (!D) {
+#if LLVM_ENABLE_THREADS
+ D = std::make_unique<DynamicThreadPoolTaskDispatcher>();
+#else
+ D = std::make_unique<InPlaceTaskDispatcher>();
+#endif
+ }
+
+ auto PageSize = sys::Process::getPageSize();
+ if (!PageSize)
+ return PageSize.takeError();
+
+ Triple TT(sys::getProcessTriple());
+
+ return std::make_unique<SelfExecutorProcessControl>(
+ std::move(SSP), std::move(D), std::move(TT), *PageSize,
+ std::move(MemMgr));
+}
+
+Expected<tpctypes::DylibHandle>
+SelfExecutorProcessControl::loadDylib(const char *DylibPath) {
+ std::string ErrMsg;
+ auto Dylib = std::make_unique<sys::DynamicLibrary>(
+ sys::DynamicLibrary::getPermanentLibrary(DylibPath, &ErrMsg));
+ if (!Dylib->isValid())
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+ DynamicLibraries.push_back(std::move(Dylib));
+ return pointerToJITTargetAddress(DynamicLibraries.back().get());
+}
+
+Expected<std::vector<tpctypes::LookupResult>>
+SelfExecutorProcessControl::lookupSymbols(ArrayRef<LookupRequest> Request) {
+ std::vector<tpctypes::LookupResult> R;
+
+ for (auto &Elem : Request) {
+ auto *Dylib = jitTargetAddressToPointer<sys::DynamicLibrary *>(Elem.Handle);
+ assert(llvm::any_of(DynamicLibraries,
+ [=](const std::unique_ptr<sys::DynamicLibrary> &DL) {
+ return DL.get() == Dylib;
+ }) &&
+ "Invalid handle");
+
+ R.push_back(std::vector<JITTargetAddress>());
+ for (auto &KV : Elem.Symbols) {
+ auto &Sym = KV.first;
+ std::string Tmp((*Sym).data() + !!GlobalManglingPrefix,
+ (*Sym).size() - !!GlobalManglingPrefix);
+ void *Addr = Dylib->getAddressOfSymbol(Tmp.c_str());
+ if (!Addr && KV.second == SymbolLookupFlags::RequiredSymbol) {
+ // FIXME: Collect all failing symbols before erroring out.
+ SymbolNameVector MissingSymbols;
+ MissingSymbols.push_back(Sym);
+ return make_error<SymbolsNotFound>(SSP, std::move(MissingSymbols));
+ }
+ R.back().push_back(pointerToJITTargetAddress(Addr));
+ }
+ }
+
+ return R;
+}
+
+Expected<int32_t>
+SelfExecutorProcessControl::runAsMain(ExecutorAddr MainFnAddr,
+ ArrayRef<std::string> Args) {
+ using MainTy = int (*)(int, char *[]);
+ return orc::runAsMain(MainFnAddr.toPtr<MainTy>(), Args);
+}
+
+void SelfExecutorProcessControl::callWrapperAsync(ExecutorAddr WrapperFnAddr,
+ IncomingWFRHandler SendResult,
+ ArrayRef<char> ArgBuffer) {
+ using WrapperFnTy =
+ shared::CWrapperFunctionResult (*)(const char *Data, size_t Size);
+ auto *WrapperFn = WrapperFnAddr.toPtr<WrapperFnTy>();
+ SendResult(WrapperFn(ArgBuffer.data(), ArgBuffer.size()));
+}
+
+Error SelfExecutorProcessControl::disconnect() {
+ D->shutdown();
+ return Error::success();
+}
+
+void SelfExecutorProcessControl::writeUInt8sAsync(
+ ArrayRef<tpctypes::UInt8Write> Ws, WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *W.Addr.toPtr<uint8_t *>() = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void SelfExecutorProcessControl::writeUInt16sAsync(
+ ArrayRef<tpctypes::UInt16Write> Ws, WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *W.Addr.toPtr<uint16_t *>() = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void SelfExecutorProcessControl::writeUInt32sAsync(
+ ArrayRef<tpctypes::UInt32Write> Ws, WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *W.Addr.toPtr<uint32_t *>() = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void SelfExecutorProcessControl::writeUInt64sAsync(
+ ArrayRef<tpctypes::UInt64Write> Ws, WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ *W.Addr.toPtr<uint64_t *>() = W.Value;
+ OnWriteComplete(Error::success());
+}
+
+void SelfExecutorProcessControl::writeBuffersAsync(
+ ArrayRef<tpctypes::BufferWrite> Ws, WriteResultFn OnWriteComplete) {
+ for (auto &W : Ws)
+ memcpy(W.Addr.toPtr<char *>(), W.Buffer.data(), W.Buffer.size());
+ OnWriteComplete(Error::success());
+}
+
+shared::CWrapperFunctionResult
+SelfExecutorProcessControl::jitDispatchViaWrapperFunctionManager(
+ void *Ctx, const void *FnTag, const char *Data, size_t Size) {
+
+ LLVM_DEBUG({
+ dbgs() << "jit-dispatch call with tag " << FnTag << " and " << Size
+ << " byte payload.\n";
+ });
+
+ std::promise<shared::WrapperFunctionResult> ResultP;
+ auto ResultF = ResultP.get_future();
+ static_cast<SelfExecutorProcessControl *>(Ctx)
+ ->getExecutionSession()
+ .runJITDispatchHandler(
+ [ResultP = std::move(ResultP)](
+ shared::WrapperFunctionResult Result) mutable {
+ ResultP.set_value(std::move(Result));
+ },
+ pointerToJITTargetAddress(FnTag), {Data, Size});
+
+ return ResultF.get().release();
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/IRCompileLayer.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/IRCompileLayer.cpp
new file mode 100644
index 0000000000..aadc437c80
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/IRCompileLayer.cpp
@@ -0,0 +1,48 @@
+//===--------------- IRCompileLayer.cpp - IR Compiling Layer --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
+
+namespace llvm {
+namespace orc {
+
+IRCompileLayer::IRCompiler::~IRCompiler() {}
+
+IRCompileLayer::IRCompileLayer(ExecutionSession &ES, ObjectLayer &BaseLayer,
+ std::unique_ptr<IRCompiler> Compile)
+ : IRLayer(ES, ManglingOpts), BaseLayer(BaseLayer),
+ Compile(std::move(Compile)) {
+ ManglingOpts = &this->Compile->getManglingOptions();
+}
+
+void IRCompileLayer::setNotifyCompiled(NotifyCompiledFunction NotifyCompiled) {
+ std::lock_guard<std::mutex> Lock(IRLayerMutex);
+ this->NotifyCompiled = std::move(NotifyCompiled);
+}
+
+void IRCompileLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ ThreadSafeModule TSM) {
+ assert(TSM && "Module must not be null");
+
+ if (auto Obj = TSM.withModuleDo(*Compile)) {
+ {
+ std::lock_guard<std::mutex> Lock(IRLayerMutex);
+ if (NotifyCompiled)
+ NotifyCompiled(*R, std::move(TSM));
+ else
+ TSM = ThreadSafeModule();
+ }
+ BaseLayer.emit(std::move(R), std::move(*Obj));
+ } else {
+ R->failMaterialization();
+ getExecutionSession().reportError(Obj.takeError());
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/IRTransformLayer.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/IRTransformLayer.cpp
new file mode 100644
index 0000000000..d5b1134927
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/IRTransformLayer.cpp
@@ -0,0 +1,33 @@
+//===-------------- IRTransformLayer.cpp - IR Transform Layer -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IRTransformLayer.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+namespace orc {
+
+IRTransformLayer::IRTransformLayer(ExecutionSession &ES, IRLayer &BaseLayer,
+ TransformFunction Transform)
+ : IRLayer(ES, BaseLayer.getManglingOptions()), BaseLayer(BaseLayer),
+ Transform(std::move(Transform)) {}
+
+void IRTransformLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ ThreadSafeModule TSM) {
+ assert(TSM && "Module must not be null");
+
+ if (auto TransformedTSM = Transform(std::move(TSM), *R))
+ BaseLayer.emit(std::move(R), std::move(*TransformedTSM));
+ else {
+ R->failMaterialization();
+ getExecutionSession().reportError(TransformedTSM.takeError());
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/IndirectionUtils.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
new file mode 100644
index 0000000000..7a71d2f781
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/IndirectionUtils.cpp
@@ -0,0 +1,453 @@
+//===---- IndirectionUtils.cpp - Utilities for call indirection in Orc ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+#include "llvm/MC/MCInstrAnalysis.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <sstream>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+class CompileCallbackMaterializationUnit : public orc::MaterializationUnit {
+public:
+ using CompileFunction = JITCompileCallbackManager::CompileFunction;
+
+ CompileCallbackMaterializationUnit(SymbolStringPtr Name,
+ CompileFunction Compile)
+ : MaterializationUnit(Interface(
+ SymbolFlagsMap({{Name, JITSymbolFlags::Exported}}), nullptr)),
+ Name(std::move(Name)), Compile(std::move(Compile)) {}
+
+ StringRef getName() const override { return "<Compile Callbacks>"; }
+
+private:
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ SymbolMap Result;
+ Result[Name] = JITEvaluatedSymbol(Compile(), JITSymbolFlags::Exported);
+ // No dependencies, so these calls cannot fail.
+ cantFail(R->notifyResolved(Result));
+ cantFail(R->notifyEmitted());
+ }
+
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override {
+ llvm_unreachable("Discard should never occur on a LMU?");
+ }
+
+ SymbolStringPtr Name;
+ CompileFunction Compile;
+};
+
+} // namespace
+
+namespace llvm {
+namespace orc {
+
+TrampolinePool::~TrampolinePool() {}
+void IndirectStubsManager::anchor() {}
+
+Expected<JITTargetAddress>
+JITCompileCallbackManager::getCompileCallback(CompileFunction Compile) {
+ if (auto TrampolineAddr = TP->getTrampoline()) {
+ auto CallbackName =
+ ES.intern(std::string("cc") + std::to_string(++NextCallbackId));
+
+ std::lock_guard<std::mutex> Lock(CCMgrMutex);
+ AddrToSymbol[*TrampolineAddr] = CallbackName;
+ cantFail(
+ CallbacksJD.define(std::make_unique<CompileCallbackMaterializationUnit>(
+ std::move(CallbackName), std::move(Compile))));
+ return *TrampolineAddr;
+ } else
+ return TrampolineAddr.takeError();
+}
+
+JITTargetAddress JITCompileCallbackManager::executeCompileCallback(
+ JITTargetAddress TrampolineAddr) {
+ SymbolStringPtr Name;
+
+ {
+ std::unique_lock<std::mutex> Lock(CCMgrMutex);
+ auto I = AddrToSymbol.find(TrampolineAddr);
+
+ // If this address is not associated with a compile callback then report an
+ // error to the execution session and return ErrorHandlerAddress to the
+ // callee.
+ if (I == AddrToSymbol.end()) {
+ Lock.unlock();
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "No compile callback for trampoline at "
+ << format("0x%016" PRIx64, TrampolineAddr);
+ }
+ ES.reportError(
+ make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode()));
+ return ErrorHandlerAddress;
+ } else
+ Name = I->second;
+ }
+
+ if (auto Sym =
+ ES.lookup(makeJITDylibSearchOrder(
+ &CallbacksJD, JITDylibLookupFlags::MatchAllSymbols),
+ Name))
+ return Sym->getAddress();
+ else {
+ llvm::dbgs() << "Didn't find callback.\n";
+ // If anything goes wrong materializing Sym then report it to the session
+ // and return the ErrorHandlerAddress;
+ ES.reportError(Sym.takeError());
+ return ErrorHandlerAddress;
+ }
+}
+
+Expected<std::unique_ptr<JITCompileCallbackManager>>
+createLocalCompileCallbackManager(const Triple &T, ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddress) {
+ switch (T.getArch()) {
+ default:
+ return make_error<StringError>(
+ std::string("No callback manager available for ") + T.str(),
+ inconvertibleErrorCode());
+ case Triple::aarch64:
+ case Triple::aarch64_32: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcAArch64> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::x86: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcI386> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::mips: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips32Be> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+ case Triple::mipsel: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips32Le> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::mips64:
+ case Triple::mips64el: {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcMips64> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+
+ case Triple::x86_64: {
+ if (T.getOS() == Triple::OSType::Win32) {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcX86_64_Win32> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ } else {
+ typedef orc::LocalJITCompileCallbackManager<orc::OrcX86_64_SysV> CCMgrT;
+ return CCMgrT::Create(ES, ErrorHandlerAddress);
+ }
+ }
+
+ }
+}
+
+std::function<std::unique_ptr<IndirectStubsManager>()>
+createLocalIndirectStubsManagerBuilder(const Triple &T) {
+ switch (T.getArch()) {
+ default:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcGenericABI>>();
+ };
+
+ case Triple::aarch64:
+ case Triple::aarch64_32:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcAArch64>>();
+ };
+
+ case Triple::x86:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcI386>>();
+ };
+
+ case Triple::mips:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips32Be>>();
+ };
+
+ case Triple::mipsel:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips32Le>>();
+ };
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcMips64>>();
+ };
+
+ case Triple::x86_64:
+ if (T.getOS() == Triple::OSType::Win32) {
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcX86_64_Win32>>();
+ };
+ } else {
+ return [](){
+ return std::make_unique<
+ orc::LocalIndirectStubsManager<orc::OrcX86_64_SysV>>();
+ };
+ }
+
+ }
+}
+
+Constant* createIRTypedAddress(FunctionType &FT, JITTargetAddress Addr) {
+ Constant *AddrIntVal =
+ ConstantInt::get(Type::getInt64Ty(FT.getContext()), Addr);
+ Constant *AddrPtrVal =
+ ConstantExpr::getCast(Instruction::IntToPtr, AddrIntVal,
+ PointerType::get(&FT, 0));
+ return AddrPtrVal;
+}
+
+GlobalVariable* createImplPointer(PointerType &PT, Module &M,
+ const Twine &Name, Constant *Initializer) {
+ auto IP = new GlobalVariable(M, &PT, false, GlobalValue::ExternalLinkage,
+ Initializer, Name, nullptr,
+ GlobalValue::NotThreadLocal, 0, true);
+ IP->setVisibility(GlobalValue::HiddenVisibility);
+ return IP;
+}
+
+void makeStub(Function &F, Value &ImplPointer) {
+ assert(F.isDeclaration() && "Can't turn a definition into a stub.");
+ assert(F.getParent() && "Function isn't in a module.");
+ Module &M = *F.getParent();
+ BasicBlock *EntryBlock = BasicBlock::Create(M.getContext(), "entry", &F);
+ IRBuilder<> Builder(EntryBlock);
+ LoadInst *ImplAddr = Builder.CreateLoad(F.getType(), &ImplPointer);
+ std::vector<Value*> CallArgs;
+ for (auto &A : F.args())
+ CallArgs.push_back(&A);
+ CallInst *Call = Builder.CreateCall(F.getFunctionType(), ImplAddr, CallArgs);
+ Call->setTailCall();
+ Call->setAttributes(F.getAttributes());
+ if (F.getReturnType()->isVoidTy())
+ Builder.CreateRetVoid();
+ else
+ Builder.CreateRet(Call);
+}
+
+std::vector<GlobalValue *> SymbolLinkagePromoter::operator()(Module &M) {
+ std::vector<GlobalValue *> PromotedGlobals;
+
+ for (auto &GV : M.global_values()) {
+ bool Promoted = true;
+
+ // Rename if necessary.
+ if (!GV.hasName())
+ GV.setName("__orc_anon." + Twine(NextId++));
+ else if (GV.getName().startswith("\01L"))
+ GV.setName("__" + GV.getName().substr(1) + "." + Twine(NextId++));
+ else if (GV.hasLocalLinkage())
+ GV.setName("__orc_lcl." + GV.getName() + "." + Twine(NextId++));
+ else
+ Promoted = false;
+
+ if (GV.hasLocalLinkage()) {
+ GV.setLinkage(GlobalValue::ExternalLinkage);
+ GV.setVisibility(GlobalValue::HiddenVisibility);
+ Promoted = true;
+ }
+ GV.setUnnamedAddr(GlobalValue::UnnamedAddr::None);
+
+ if (Promoted)
+ PromotedGlobals.push_back(&GV);
+ }
+
+ return PromotedGlobals;
+}
+
+Function* cloneFunctionDecl(Module &Dst, const Function &F,
+ ValueToValueMapTy *VMap) {
+ Function *NewF =
+ Function::Create(cast<FunctionType>(F.getValueType()),
+ F.getLinkage(), F.getName(), &Dst);
+ NewF->copyAttributesFrom(&F);
+
+ if (VMap) {
+ (*VMap)[&F] = NewF;
+ auto NewArgI = NewF->arg_begin();
+ for (auto ArgI = F.arg_begin(), ArgE = F.arg_end(); ArgI != ArgE;
+ ++ArgI, ++NewArgI)
+ (*VMap)[&*ArgI] = &*NewArgI;
+ }
+
+ return NewF;
+}
+
+void moveFunctionBody(Function &OrigF, ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer,
+ Function *NewF) {
+ assert(!OrigF.isDeclaration() && "Nothing to move");
+ if (!NewF)
+ NewF = cast<Function>(VMap[&OrigF]);
+ else
+ assert(VMap[&OrigF] == NewF && "Incorrect function mapping in VMap.");
+ assert(NewF && "Function mapping missing from VMap.");
+ assert(NewF->getParent() != OrigF.getParent() &&
+ "moveFunctionBody should only be used to move bodies between "
+ "modules.");
+
+ SmallVector<ReturnInst *, 8> Returns; // Ignore returns cloned.
+ CloneFunctionInto(NewF, &OrigF, VMap,
+ CloneFunctionChangeType::DifferentModule, Returns, "",
+ nullptr, nullptr, Materializer);
+ OrigF.deleteBody();
+}
+
+GlobalVariable* cloneGlobalVariableDecl(Module &Dst, const GlobalVariable &GV,
+ ValueToValueMapTy *VMap) {
+ GlobalVariable *NewGV = new GlobalVariable(
+ Dst, GV.getValueType(), GV.isConstant(),
+ GV.getLinkage(), nullptr, GV.getName(), nullptr,
+ GV.getThreadLocalMode(), GV.getType()->getAddressSpace());
+ NewGV->copyAttributesFrom(&GV);
+ if (VMap)
+ (*VMap)[&GV] = NewGV;
+ return NewGV;
+}
+
+void moveGlobalVariableInitializer(GlobalVariable &OrigGV,
+ ValueToValueMapTy &VMap,
+ ValueMaterializer *Materializer,
+ GlobalVariable *NewGV) {
+ assert(OrigGV.hasInitializer() && "Nothing to move");
+ if (!NewGV)
+ NewGV = cast<GlobalVariable>(VMap[&OrigGV]);
+ else
+ assert(VMap[&OrigGV] == NewGV &&
+ "Incorrect global variable mapping in VMap.");
+ assert(NewGV->getParent() != OrigGV.getParent() &&
+ "moveGlobalVariableInitializer should only be used to move "
+ "initializers between modules");
+
+ NewGV->setInitializer(MapValue(OrigGV.getInitializer(), VMap, RF_None,
+ nullptr, Materializer));
+}
+
+GlobalAlias* cloneGlobalAliasDecl(Module &Dst, const GlobalAlias &OrigA,
+ ValueToValueMapTy &VMap) {
+ assert(OrigA.getAliasee() && "Original alias doesn't have an aliasee?");
+ auto *NewA = GlobalAlias::create(OrigA.getValueType(),
+ OrigA.getType()->getPointerAddressSpace(),
+ OrigA.getLinkage(), OrigA.getName(), &Dst);
+ NewA->copyAttributesFrom(&OrigA);
+ VMap[&OrigA] = NewA;
+ return NewA;
+}
+
+void cloneModuleFlagsMetadata(Module &Dst, const Module &Src,
+ ValueToValueMapTy &VMap) {
+ auto *MFs = Src.getModuleFlagsMetadata();
+ if (!MFs)
+ return;
+ for (auto *MF : MFs->operands())
+ Dst.addModuleFlag(MapMetadata(MF, VMap));
+}
+
+Error addFunctionPointerRelocationsToCurrentSymbol(jitlink::Symbol &Sym,
+ jitlink::LinkGraph &G,
+ MCDisassembler &Disassembler,
+ MCInstrAnalysis &MIA) {
+ // AArch64 appears to already come with the necessary relocations. Among other
+ // architectures, only x86_64 is currently implemented here.
+ if (G.getTargetTriple().getArch() != Triple::x86_64)
+ return Error::success();
+
+ raw_null_ostream CommentStream;
+ auto &STI = Disassembler.getSubtargetInfo();
+
+ // Determine the function bounds
+ auto &B = Sym.getBlock();
+ assert(!B.isZeroFill() && "expected content block");
+ auto SymAddress = Sym.getAddress();
+ auto SymStartInBlock =
+ (const uint8_t *)B.getContent().data() + Sym.getOffset();
+ auto SymSize = Sym.getSize() ? Sym.getSize() : B.getSize() - Sym.getOffset();
+ auto Content = makeArrayRef(SymStartInBlock, SymSize);
+
+ LLVM_DEBUG(dbgs() << "Adding self-relocations to " << Sym.getName() << "\n");
+
+ SmallDenseSet<uintptr_t, 8> ExistingRelocations;
+ for (auto &E : B.edges()) {
+ if (E.isRelocation())
+ ExistingRelocations.insert(E.getOffset());
+ }
+
+ size_t I = 0;
+ while (I < Content.size()) {
+ MCInst Instr;
+ uint64_t InstrSize = 0;
+ uint64_t InstrStart = SymAddress.getValue() + I;
+ auto DecodeStatus = Disassembler.getInstruction(
+ Instr, InstrSize, Content.drop_front(I), InstrStart, CommentStream);
+ if (DecodeStatus != MCDisassembler::Success) {
+ LLVM_DEBUG(dbgs() << "Aborting due to disassembly failure at address "
+ << InstrStart);
+ return make_error<StringError>(
+ formatv("failed to disassemble at address {0:x16}", InstrStart),
+ inconvertibleErrorCode());
+ }
+ // Advance to the next instruction.
+ I += InstrSize;
+
+ // Check for a PC-relative address equal to the symbol itself.
+ auto PCRelAddr =
+ MIA.evaluateMemoryOperandAddress(Instr, &STI, InstrStart, InstrSize);
+ if (!PCRelAddr || *PCRelAddr != SymAddress.getValue())
+ continue;
+
+ auto RelocOffInInstr =
+ MIA.getMemoryOperandRelocationOffset(Instr, InstrSize);
+ if (!RelocOffInInstr.hasValue() ||
+ InstrSize - RelocOffInInstr.getValue() != 4) {
+ LLVM_DEBUG(dbgs() << "Skipping unknown self-relocation at "
+ << InstrStart);
+ continue;
+ }
+
+ auto RelocOffInBlock = orc::ExecutorAddr(InstrStart) + *RelocOffInInstr -
+ SymAddress + Sym.getOffset();
+ if (ExistingRelocations.contains(RelocOffInBlock))
+ continue;
+
+ LLVM_DEBUG(dbgs() << "Adding delta32 self-relocation at " << InstrStart);
+ B.addEdge(jitlink::x86_64::Delta32, RelocOffInBlock, Sym, /*Addend=*/-4);
+ }
+ return Error::success();
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp
new file mode 100644
index 0000000000..0fbf79b8a5
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/JITTargetMachineBuilder.cpp
@@ -0,0 +1,146 @@
+//===----- JITTargetMachineBuilder.cpp - Build TargetMachines for JIT -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
+
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace llvm {
+namespace orc {
+
+JITTargetMachineBuilder::JITTargetMachineBuilder(Triple TT)
+ : TT(std::move(TT)) {
+ Options.EmulatedTLS = true;
+ Options.ExplicitEmulatedTLS = true;
+}
+
+Expected<JITTargetMachineBuilder> JITTargetMachineBuilder::detectHost() {
+ // FIXME: getProcessTriple is bogus. It returns the host LLVM was compiled on,
+ // rather than a valid triple for the current process.
+ JITTargetMachineBuilder TMBuilder((Triple(sys::getProcessTriple())));
+
+ // Retrieve host CPU name and sub-target features and add them to builder.
+ // Relocation model, code model and codegen opt level are kept to default
+ // values.
+ llvm::StringMap<bool> FeatureMap;
+ llvm::sys::getHostCPUFeatures(FeatureMap);
+ for (auto &Feature : FeatureMap)
+ TMBuilder.getFeatures().AddFeature(Feature.first(), Feature.second);
+
+ TMBuilder.setCPU(std::string(llvm::sys::getHostCPUName()));
+
+ return TMBuilder;
+}
+
+Expected<std::unique_ptr<TargetMachine>>
+JITTargetMachineBuilder::createTargetMachine() {
+
+ std::string ErrMsg;
+ auto *TheTarget = TargetRegistry::lookupTarget(TT.getTriple(), ErrMsg);
+ if (!TheTarget)
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+
+ auto *TM =
+ TheTarget->createTargetMachine(TT.getTriple(), CPU, Features.getString(),
+ Options, RM, CM, OptLevel, /*JIT*/ true);
+ if (!TM)
+ return make_error<StringError>("Could not allocate target machine",
+ inconvertibleErrorCode());
+
+ return std::unique_ptr<TargetMachine>(TM);
+}
+
+JITTargetMachineBuilder &JITTargetMachineBuilder::addFeatures(
+ const std::vector<std::string> &FeatureVec) {
+ for (const auto &F : FeatureVec)
+ Features.AddFeature(F);
+ return *this;
+}
+
+#ifndef NDEBUG
+void JITTargetMachineBuilderPrinter::print(raw_ostream &OS) const {
+ OS << Indent << "{\n"
+ << Indent << " Triple = \"" << JTMB.TT.str() << "\"\n"
+ << Indent << " CPU = \"" << JTMB.CPU << "\"\n"
+ << Indent << " Features = \"" << JTMB.Features.getString() << "\"\n"
+ << Indent << " Options = <not-printable>\n"
+ << Indent << " Relocation Model = ";
+
+ if (JTMB.RM) {
+ switch (*JTMB.RM) {
+ case Reloc::Static:
+ OS << "Static";
+ break;
+ case Reloc::PIC_:
+ OS << "PIC_";
+ break;
+ case Reloc::DynamicNoPIC:
+ OS << "DynamicNoPIC";
+ break;
+ case Reloc::ROPI:
+ OS << "ROPI";
+ break;
+ case Reloc::RWPI:
+ OS << "RWPI";
+ break;
+ case Reloc::ROPI_RWPI:
+ OS << "ROPI_RWPI";
+ break;
+ }
+ } else
+ OS << "unspecified (will use target default)";
+
+ OS << "\n"
+ << Indent << " Code Model = ";
+
+ if (JTMB.CM) {
+ switch (*JTMB.CM) {
+ case CodeModel::Tiny:
+ OS << "Tiny";
+ break;
+ case CodeModel::Small:
+ OS << "Small";
+ break;
+ case CodeModel::Kernel:
+ OS << "Kernel";
+ break;
+ case CodeModel::Medium:
+ OS << "Medium";
+ break;
+ case CodeModel::Large:
+ OS << "Large";
+ break;
+ }
+ } else
+ OS << "unspecified (will use target default)";
+
+ OS << "\n"
+ << Indent << " Optimization Level = ";
+ switch (JTMB.OptLevel) {
+ case CodeGenOpt::None:
+ OS << "None";
+ break;
+ case CodeGenOpt::Less:
+ OS << "Less";
+ break;
+ case CodeGenOpt::Default:
+ OS << "Default";
+ break;
+ case CodeGenOpt::Aggressive:
+ OS << "Aggressive";
+ break;
+ }
+
+ OS << "\n" << Indent << "}\n";
+}
+#endif // NDEBUG
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/LLJIT.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/LLJIT.cpp
new file mode 100644
index 0000000000..91949c9d7e
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/LLJIT.cpp
@@ -0,0 +1,933 @@
+//===--------- LLJIT.cpp - An ORC-based JIT for compiling LLVM IR ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
+#include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
+#include "llvm/ExecutionEngine/Orc/ExecutorProcessControl.h"
+#include "llvm/ExecutionEngine/Orc/MachOPlatform.h"
+#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/Orc/ObjectTransformLayer.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcError.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/DynamicLibrary.h"
+
+#include <map>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+/// Adds helper function decls and wrapper functions that call the helper with
+/// some additional prefix arguments.
+///
+/// E.g. For wrapper "foo" with type i8(i8, i64), helper "bar", and prefix
+/// args i32 4 and i16 12345, this function will add:
+///
+/// declare i8 @bar(i32, i16, i8, i64)
+///
+/// define i8 @foo(i8, i64) {
+/// entry:
+/// %2 = call i8 @bar(i32 4, i16 12345, i8 %0, i64 %1)
+/// ret i8 %2
+/// }
+///
+Function *addHelperAndWrapper(Module &M, StringRef WrapperName,
+ FunctionType *WrapperFnType,
+ GlobalValue::VisibilityTypes WrapperVisibility,
+ StringRef HelperName,
+ ArrayRef<Value *> HelperPrefixArgs) {
+ std::vector<Type *> HelperArgTypes;
+ for (auto *Arg : HelperPrefixArgs)
+ HelperArgTypes.push_back(Arg->getType());
+ for (auto *T : WrapperFnType->params())
+ HelperArgTypes.push_back(T);
+ auto *HelperFnType =
+ FunctionType::get(WrapperFnType->getReturnType(), HelperArgTypes, false);
+ auto *HelperFn = Function::Create(HelperFnType, GlobalValue::ExternalLinkage,
+ HelperName, M);
+
+ auto *WrapperFn = Function::Create(
+ WrapperFnType, GlobalValue::ExternalLinkage, WrapperName, M);
+ WrapperFn->setVisibility(WrapperVisibility);
+
+ auto *EntryBlock = BasicBlock::Create(M.getContext(), "entry", WrapperFn);
+ IRBuilder<> IB(EntryBlock);
+
+ std::vector<Value *> HelperArgs;
+ for (auto *Arg : HelperPrefixArgs)
+ HelperArgs.push_back(Arg);
+ for (auto &Arg : WrapperFn->args())
+ HelperArgs.push_back(&Arg);
+ auto *HelperResult = IB.CreateCall(HelperFn, HelperArgs);
+ if (HelperFn->getReturnType()->isVoidTy())
+ IB.CreateRetVoid();
+ else
+ IB.CreateRet(HelperResult);
+
+ return WrapperFn;
+}
+
+class GenericLLVMIRPlatformSupport;
+
+/// orc::Platform component of Generic LLVM IR Platform support.
+/// Just forwards calls to the GenericLLVMIRPlatformSupport class below.
+class GenericLLVMIRPlatform : public Platform {
+public:
+ GenericLLVMIRPlatform(GenericLLVMIRPlatformSupport &S) : S(S) {}
+ Error setupJITDylib(JITDylib &JD) override;
+ Error teardownJITDylib(JITDylib &JD) override;
+ Error notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) override;
+ Error notifyRemoving(ResourceTracker &RT) override {
+ // Noop -- Nothing to do (yet).
+ return Error::success();
+ }
+
+private:
+ GenericLLVMIRPlatformSupport &S;
+};
+
+/// This transform parses llvm.global_ctors to produce a single initialization
+/// function for the module, records the function, then deletes
+/// llvm.global_ctors.
+class GlobalCtorDtorScraper {
+public:
+ GlobalCtorDtorScraper(GenericLLVMIRPlatformSupport &PS,
+ StringRef InitFunctionPrefix,
+ StringRef DeInitFunctionPrefix)
+ : PS(PS), InitFunctionPrefix(InitFunctionPrefix),
+ DeInitFunctionPrefix(DeInitFunctionPrefix) {}
+ Expected<ThreadSafeModule> operator()(ThreadSafeModule TSM,
+ MaterializationResponsibility &R);
+
+private:
+ GenericLLVMIRPlatformSupport &PS;
+ StringRef InitFunctionPrefix;
+ StringRef DeInitFunctionPrefix;
+};
+
+/// Generic IR Platform Support
+///
+/// Scrapes llvm.global_ctors and llvm.global_dtors and replaces them with
+/// specially named 'init' and 'deinit'. Injects definitions / interposes for
+/// some runtime API, including __cxa_atexit, dlopen, and dlclose.
+class GenericLLVMIRPlatformSupport : public LLJIT::PlatformSupport {
+public:
+ GenericLLVMIRPlatformSupport(LLJIT &J)
+ : J(J), InitFunctionPrefix(J.mangle("__orc_init_func.")),
+ DeInitFunctionPrefix(J.mangle("__orc_deinit_func.")) {
+
+ getExecutionSession().setPlatform(
+ std::make_unique<GenericLLVMIRPlatform>(*this));
+
+ setInitTransform(J, GlobalCtorDtorScraper(*this, InitFunctionPrefix,
+ DeInitFunctionPrefix));
+
+ SymbolMap StdInterposes;
+
+ StdInterposes[J.mangleAndIntern("__lljit.platform_support_instance")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(this),
+ JITSymbolFlags::Exported);
+ StdInterposes[J.mangleAndIntern("__lljit.cxa_atexit_helper")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(registerAtExitHelper),
+ JITSymbolFlags());
+
+ cantFail(
+ J.getMainJITDylib().define(absoluteSymbols(std::move(StdInterposes))));
+ cantFail(setupJITDylib(J.getMainJITDylib()));
+ cantFail(J.addIRModule(J.getMainJITDylib(), createPlatformRuntimeModule()));
+ }
+
+ ExecutionSession &getExecutionSession() { return J.getExecutionSession(); }
+
+ /// Adds a module that defines the __dso_handle global.
+ Error setupJITDylib(JITDylib &JD) {
+
+ // Add per-jitdylib standard interposes.
+ SymbolMap PerJDInterposes;
+ PerJDInterposes[J.mangleAndIntern("__lljit.run_atexits_helper")] =
+ JITEvaluatedSymbol(pointerToJITTargetAddress(runAtExitsHelper),
+ JITSymbolFlags());
+ cantFail(JD.define(absoluteSymbols(std::move(PerJDInterposes))));
+
+ auto Ctx = std::make_unique<LLVMContext>();
+ auto M = std::make_unique<Module>("__standard_lib", *Ctx);
+ M->setDataLayout(J.getDataLayout());
+
+ auto *Int64Ty = Type::getInt64Ty(*Ctx);
+ auto *DSOHandle = new GlobalVariable(
+ *M, Int64Ty, true, GlobalValue::ExternalLinkage,
+ ConstantInt::get(Int64Ty, reinterpret_cast<uintptr_t>(&JD)),
+ "__dso_handle");
+ DSOHandle->setVisibility(GlobalValue::DefaultVisibility);
+ DSOHandle->setInitializer(
+ ConstantInt::get(Int64Ty, pointerToJITTargetAddress(&JD)));
+
+ auto *GenericIRPlatformSupportTy =
+ StructType::create(*Ctx, "lljit.GenericLLJITIRPlatformSupport");
+
+ auto *PlatformInstanceDecl = new GlobalVariable(
+ *M, GenericIRPlatformSupportTy, true, GlobalValue::ExternalLinkage,
+ nullptr, "__lljit.platform_support_instance");
+
+ auto *VoidTy = Type::getVoidTy(*Ctx);
+ addHelperAndWrapper(
+ *M, "__lljit_run_atexits", FunctionType::get(VoidTy, {}, false),
+ GlobalValue::HiddenVisibility, "__lljit.run_atexits_helper",
+ {PlatformInstanceDecl, DSOHandle});
+
+ return J.addIRModule(JD, ThreadSafeModule(std::move(M), std::move(Ctx)));
+ }
+
+ Error notifyAdding(ResourceTracker &RT, const MaterializationUnit &MU) {
+ auto &JD = RT.getJITDylib();
+ if (auto &InitSym = MU.getInitializerSymbol())
+ InitSymbols[&JD].add(InitSym, SymbolLookupFlags::WeaklyReferencedSymbol);
+ else {
+ // If there's no identified init symbol attached, but there is a symbol
+ // with the GenericIRPlatform::InitFunctionPrefix, then treat that as
+ // an init function. Add the symbol to both the InitSymbols map (which
+ // will trigger a lookup to materialize the module) and the InitFunctions
+ // map (which holds the names of the symbols to execute).
+ for (auto &KV : MU.getSymbols())
+ if ((*KV.first).startswith(InitFunctionPrefix)) {
+ InitSymbols[&JD].add(KV.first,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ InitFunctions[&JD].add(KV.first);
+ } else if ((*KV.first).startswith(DeInitFunctionPrefix)) {
+ DeInitFunctions[&JD].add(KV.first);
+ }
+ }
+ return Error::success();
+ }
+
+ Error initialize(JITDylib &JD) override {
+ LLVM_DEBUG({
+ dbgs() << "GenericLLVMIRPlatformSupport getting initializers to run\n";
+ });
+ if (auto Initializers = getInitializers(JD)) {
+ LLVM_DEBUG(
+ { dbgs() << "GenericLLVMIRPlatformSupport running initializers\n"; });
+ for (auto InitFnAddr : *Initializers) {
+ LLVM_DEBUG({
+ dbgs() << " Running init " << formatv("{0:x16}", InitFnAddr)
+ << "...\n";
+ });
+ auto *InitFn = jitTargetAddressToFunction<void (*)()>(InitFnAddr);
+ InitFn();
+ }
+ } else
+ return Initializers.takeError();
+ return Error::success();
+ }
+
+ Error deinitialize(JITDylib &JD) override {
+ LLVM_DEBUG({
+ dbgs() << "GenericLLVMIRPlatformSupport getting deinitializers to run\n";
+ });
+ if (auto Deinitializers = getDeinitializers(JD)) {
+ LLVM_DEBUG({
+ dbgs() << "GenericLLVMIRPlatformSupport running deinitializers\n";
+ });
+ for (auto DeinitFnAddr : *Deinitializers) {
+ LLVM_DEBUG({
+ dbgs() << " Running deinit " << formatv("{0:x16}", DeinitFnAddr)
+ << "...\n";
+ });
+ auto *DeinitFn = jitTargetAddressToFunction<void (*)()>(DeinitFnAddr);
+ DeinitFn();
+ }
+ } else
+ return Deinitializers.takeError();
+
+ return Error::success();
+ }
+
+ void registerInitFunc(JITDylib &JD, SymbolStringPtr InitName) {
+ getExecutionSession().runSessionLocked([&]() {
+ InitFunctions[&JD].add(InitName);
+ });
+ }
+
+ void registerDeInitFunc(JITDylib &JD, SymbolStringPtr DeInitName) {
+ getExecutionSession().runSessionLocked(
+ [&]() { DeInitFunctions[&JD].add(DeInitName); });
+ }
+
+private:
+
+ Expected<std::vector<JITTargetAddress>> getInitializers(JITDylib &JD) {
+ if (auto Err = issueInitLookups(JD))
+ return std::move(Err);
+
+ DenseMap<JITDylib *, SymbolLookupSet> LookupSymbols;
+ std::vector<JITDylibSP> DFSLinkOrder;
+
+ if (auto Err = getExecutionSession().runSessionLocked([&]() -> Error {
+ if (auto DFSLinkOrderOrErr = JD.getDFSLinkOrder())
+ DFSLinkOrder = std::move(*DFSLinkOrderOrErr);
+ else
+ return DFSLinkOrderOrErr.takeError();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto IFItr = InitFunctions.find(NextJD.get());
+ if (IFItr != InitFunctions.end()) {
+ LookupSymbols[NextJD.get()] = std::move(IFItr->second);
+ InitFunctions.erase(IFItr);
+ }
+ }
+ return Error::success();
+ }))
+ return std::move(Err);
+
+ LLVM_DEBUG({
+ dbgs() << "JITDylib init order is [ ";
+ for (auto &JD : llvm::reverse(DFSLinkOrder))
+ dbgs() << "\"" << JD->getName() << "\" ";
+ dbgs() << "]\n";
+ dbgs() << "Looking up init functions:\n";
+ for (auto &KV : LookupSymbols)
+ dbgs() << " \"" << KV.first->getName() << "\": " << KV.second << "\n";
+ });
+
+ auto &ES = getExecutionSession();
+ auto LookupResult = Platform::lookupInitSymbols(ES, LookupSymbols);
+
+ if (!LookupResult)
+ return LookupResult.takeError();
+
+ std::vector<JITTargetAddress> Initializers;
+ while (!DFSLinkOrder.empty()) {
+ auto &NextJD = *DFSLinkOrder.back();
+ DFSLinkOrder.pop_back();
+ auto InitsItr = LookupResult->find(&NextJD);
+ if (InitsItr == LookupResult->end())
+ continue;
+ for (auto &KV : InitsItr->second)
+ Initializers.push_back(KV.second.getAddress());
+ }
+
+ return Initializers;
+ }
+
+ Expected<std::vector<JITTargetAddress>> getDeinitializers(JITDylib &JD) {
+ auto &ES = getExecutionSession();
+
+ auto LLJITRunAtExits = J.mangleAndIntern("__lljit_run_atexits");
+
+ DenseMap<JITDylib *, SymbolLookupSet> LookupSymbols;
+ std::vector<JITDylibSP> DFSLinkOrder;
+
+ if (auto Err = ES.runSessionLocked([&]() -> Error {
+ if (auto DFSLinkOrderOrErr = JD.getDFSLinkOrder())
+ DFSLinkOrder = std::move(*DFSLinkOrderOrErr);
+ else
+ return DFSLinkOrderOrErr.takeError();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto &JDLookupSymbols = LookupSymbols[NextJD.get()];
+ auto DIFItr = DeInitFunctions.find(NextJD.get());
+ if (DIFItr != DeInitFunctions.end()) {
+ LookupSymbols[NextJD.get()] = std::move(DIFItr->second);
+ DeInitFunctions.erase(DIFItr);
+ }
+ JDLookupSymbols.add(LLJITRunAtExits,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ }
+ return Error::success();
+ }))
+ return std::move(Err);
+
+ LLVM_DEBUG({
+ dbgs() << "JITDylib deinit order is [ ";
+ for (auto &JD : DFSLinkOrder)
+ dbgs() << "\"" << JD->getName() << "\" ";
+ dbgs() << "]\n";
+ dbgs() << "Looking up deinit functions:\n";
+ for (auto &KV : LookupSymbols)
+ dbgs() << " \"" << KV.first->getName() << "\": " << KV.second << "\n";
+ });
+
+ auto LookupResult = Platform::lookupInitSymbols(ES, LookupSymbols);
+
+ if (!LookupResult)
+ return LookupResult.takeError();
+
+ std::vector<JITTargetAddress> DeInitializers;
+ for (auto &NextJD : DFSLinkOrder) {
+ auto DeInitsItr = LookupResult->find(NextJD.get());
+ assert(DeInitsItr != LookupResult->end() &&
+ "Every JD should have at least __lljit_run_atexits");
+
+ auto RunAtExitsItr = DeInitsItr->second.find(LLJITRunAtExits);
+ if (RunAtExitsItr != DeInitsItr->second.end())
+ DeInitializers.push_back(RunAtExitsItr->second.getAddress());
+
+ for (auto &KV : DeInitsItr->second)
+ if (KV.first != LLJITRunAtExits)
+ DeInitializers.push_back(KV.second.getAddress());
+ }
+
+ return DeInitializers;
+ }
+
+ /// Issue lookups for all init symbols required to initialize JD (and any
+ /// JITDylibs that it depends on).
+ Error issueInitLookups(JITDylib &JD) {
+ DenseMap<JITDylib *, SymbolLookupSet> RequiredInitSymbols;
+ std::vector<JITDylibSP> DFSLinkOrder;
+
+ if (auto Err = getExecutionSession().runSessionLocked([&]() -> Error {
+ if (auto DFSLinkOrderOrErr = JD.getDFSLinkOrder())
+ DFSLinkOrder = std::move(*DFSLinkOrderOrErr);
+ else
+ return DFSLinkOrderOrErr.takeError();
+
+ for (auto &NextJD : DFSLinkOrder) {
+ auto ISItr = InitSymbols.find(NextJD.get());
+ if (ISItr != InitSymbols.end()) {
+ RequiredInitSymbols[NextJD.get()] = std::move(ISItr->second);
+ InitSymbols.erase(ISItr);
+ }
+ }
+ return Error::success();
+ }))
+ return Err;
+
+ return Platform::lookupInitSymbols(getExecutionSession(),
+ RequiredInitSymbols)
+ .takeError();
+ }
+
+ static void registerAtExitHelper(void *Self, void (*F)(void *), void *Ctx,
+ void *DSOHandle) {
+ LLVM_DEBUG({
+ dbgs() << "Registering atexit function " << (void *)F << " for JD "
+ << (*static_cast<JITDylib **>(DSOHandle))->getName() << "\n";
+ });
+ static_cast<GenericLLVMIRPlatformSupport *>(Self)->AtExitMgr.registerAtExit(
+ F, Ctx, DSOHandle);
+ }
+
+ static void runAtExitsHelper(void *Self, void *DSOHandle) {
+ LLVM_DEBUG({
+ dbgs() << "Running atexit functions for JD "
+ << (*static_cast<JITDylib **>(DSOHandle))->getName() << "\n";
+ });
+ static_cast<GenericLLVMIRPlatformSupport *>(Self)->AtExitMgr.runAtExits(
+ DSOHandle);
+ }
+
+ // Constructs an LLVM IR module containing platform runtime globals,
+ // functions, and interposes.
+ ThreadSafeModule createPlatformRuntimeModule() {
+ auto Ctx = std::make_unique<LLVMContext>();
+ auto M = std::make_unique<Module>("__standard_lib", *Ctx);
+ M->setDataLayout(J.getDataLayout());
+
+ auto *GenericIRPlatformSupportTy =
+ StructType::create(*Ctx, "lljit.GenericLLJITIRPlatformSupport");
+
+ auto *PlatformInstanceDecl = new GlobalVariable(
+ *M, GenericIRPlatformSupportTy, true, GlobalValue::ExternalLinkage,
+ nullptr, "__lljit.platform_support_instance");
+
+ auto *Int8Ty = Type::getInt8Ty(*Ctx);
+ auto *IntTy = Type::getIntNTy(*Ctx, sizeof(int) * CHAR_BIT);
+ auto *VoidTy = Type::getVoidTy(*Ctx);
+ auto *BytePtrTy = PointerType::getUnqual(Int8Ty);
+ auto *AtExitCallbackTy = FunctionType::get(VoidTy, {BytePtrTy}, false);
+ auto *AtExitCallbackPtrTy = PointerType::getUnqual(AtExitCallbackTy);
+
+ addHelperAndWrapper(
+ *M, "__cxa_atexit",
+ FunctionType::get(IntTy, {AtExitCallbackPtrTy, BytePtrTy, BytePtrTy},
+ false),
+ GlobalValue::DefaultVisibility, "__lljit.cxa_atexit_helper",
+ {PlatformInstanceDecl});
+
+ return ThreadSafeModule(std::move(M), std::move(Ctx));
+ }
+
+ LLJIT &J;
+ std::string InitFunctionPrefix;
+ std::string DeInitFunctionPrefix;
+ DenseMap<JITDylib *, SymbolLookupSet> InitSymbols;
+ DenseMap<JITDylib *, SymbolLookupSet> InitFunctions;
+ DenseMap<JITDylib *, SymbolLookupSet> DeInitFunctions;
+ ItaniumCXAAtExitSupport AtExitMgr;
+};
+
+Error GenericLLVMIRPlatform::setupJITDylib(JITDylib &JD) {
+ return S.setupJITDylib(JD);
+}
+
+Error GenericLLVMIRPlatform::teardownJITDylib(JITDylib &JD) {
+ return Error::success();
+}
+
+Error GenericLLVMIRPlatform::notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) {
+ return S.notifyAdding(RT, MU);
+}
+
+Expected<ThreadSafeModule>
+GlobalCtorDtorScraper::operator()(ThreadSafeModule TSM,
+ MaterializationResponsibility &R) {
+ auto Err = TSM.withModuleDo([&](Module &M) -> Error {
+ auto &Ctx = M.getContext();
+ auto *GlobalCtors = M.getNamedGlobal("llvm.global_ctors");
+ auto *GlobalDtors = M.getNamedGlobal("llvm.global_dtors");
+
+ auto RegisterCOrDtors = [&](GlobalVariable *GlobalCOrDtors,
+ bool isCtor) -> Error {
+ // If there's no llvm.global_c/dtor or it's just a decl then skip.
+ if (!GlobalCOrDtors || GlobalCOrDtors->isDeclaration())
+ return Error::success();
+ std::string InitOrDeInitFunctionName;
+ if (isCtor)
+ raw_string_ostream(InitOrDeInitFunctionName)
+ << InitFunctionPrefix << M.getModuleIdentifier();
+ else
+ raw_string_ostream(InitOrDeInitFunctionName)
+ << DeInitFunctionPrefix << M.getModuleIdentifier();
+
+ MangleAndInterner Mangle(PS.getExecutionSession(), M.getDataLayout());
+ auto InternedInitOrDeInitName = Mangle(InitOrDeInitFunctionName);
+ if (auto Err = R.defineMaterializing(
+ {{InternedInitOrDeInitName, JITSymbolFlags::Callable}}))
+ return Err;
+
+ auto *InitOrDeInitFunc = Function::Create(
+ FunctionType::get(Type::getVoidTy(Ctx), {}, false),
+ GlobalValue::ExternalLinkage, InitOrDeInitFunctionName, &M);
+ InitOrDeInitFunc->setVisibility(GlobalValue::HiddenVisibility);
+ std::vector<std::pair<Function *, unsigned>> InitsOrDeInits;
+ auto COrDtors = isCtor ? getConstructors(M) : getDestructors(M);
+
+ for (auto E : COrDtors)
+ InitsOrDeInits.push_back(std::make_pair(E.Func, E.Priority));
+ llvm::sort(InitsOrDeInits,
+ [](const std::pair<Function *, unsigned> &LHS,
+ const std::pair<Function *, unsigned> &RHS) {
+ return LHS.first < RHS.first;
+ });
+
+ auto *InitOrDeInitFuncEntryBlock =
+ BasicBlock::Create(Ctx, "entry", InitOrDeInitFunc);
+ IRBuilder<> IB(InitOrDeInitFuncEntryBlock);
+ for (auto &KV : InitsOrDeInits)
+ IB.CreateCall(KV.first);
+ IB.CreateRetVoid();
+
+ if (isCtor)
+ PS.registerInitFunc(R.getTargetJITDylib(), InternedInitOrDeInitName);
+ else
+ PS.registerDeInitFunc(R.getTargetJITDylib(), InternedInitOrDeInitName);
+
+ GlobalCOrDtors->eraseFromParent();
+ return Error::success();
+ };
+
+ if (auto Err = RegisterCOrDtors(GlobalCtors, true))
+ return Err;
+ if (auto Err = RegisterCOrDtors(GlobalDtors, false))
+ return Err;
+
+ return Error::success();
+ });
+
+ if (Err)
+ return std::move(Err);
+
+ return std::move(TSM);
+}
+
+/// Inactive Platform Support
+///
+/// Explicitly disables platform support. JITDylibs are not scanned for special
+/// init/deinit symbols. No runtime API interposes are injected.
+class InactivePlatformSupport : public LLJIT::PlatformSupport {
+public:
+ InactivePlatformSupport() = default;
+
+ Error initialize(JITDylib &JD) override {
+ LLVM_DEBUG(dbgs() << "InactivePlatformSupport: no initializers running for "
+ << JD.getName() << "\n");
+ return Error::success();
+ }
+
+ Error deinitialize(JITDylib &JD) override {
+ LLVM_DEBUG(
+ dbgs() << "InactivePlatformSupport: no deinitializers running for "
+ << JD.getName() << "\n");
+ return Error::success();
+ }
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+void LLJIT::PlatformSupport::setInitTransform(
+ LLJIT &J, IRTransformLayer::TransformFunction T) {
+ J.InitHelperTransformLayer->setTransform(std::move(T));
+}
+
+LLJIT::PlatformSupport::~PlatformSupport() {}
+
+Error LLJITBuilderState::prepareForConstruction() {
+
+ LLVM_DEBUG(dbgs() << "Preparing to create LLJIT instance...\n");
+
+ if (!JTMB) {
+ LLVM_DEBUG({
+ dbgs() << " No explicitly set JITTargetMachineBuilder. "
+ "Detecting host...\n";
+ });
+ if (auto JTMBOrErr = JITTargetMachineBuilder::detectHost())
+ JTMB = std::move(*JTMBOrErr);
+ else
+ return JTMBOrErr.takeError();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << " JITTargetMachineBuilder is "
+ << JITTargetMachineBuilderPrinter(*JTMB, " ")
+ << " Pre-constructed ExecutionSession: " << (ES ? "Yes" : "No")
+ << "\n"
+ << " DataLayout: ";
+ if (DL)
+ dbgs() << DL->getStringRepresentation() << "\n";
+ else
+ dbgs() << "None (will be created by JITTargetMachineBuilder)\n";
+
+ dbgs() << " Custom object-linking-layer creator: "
+ << (CreateObjectLinkingLayer ? "Yes" : "No") << "\n"
+ << " Custom compile-function creator: "
+ << (CreateCompileFunction ? "Yes" : "No") << "\n"
+ << " Custom platform-setup function: "
+ << (SetUpPlatform ? "Yes" : "No") << "\n"
+ << " Number of compile threads: " << NumCompileThreads;
+ if (!NumCompileThreads)
+ dbgs() << " (code will be compiled on the execution thread)\n";
+ else
+ dbgs() << "\n";
+ });
+
+ // If neither ES nor EPC has been set then create an EPC instance.
+ if (!ES && !EPC) {
+ LLVM_DEBUG({
+ dbgs() << "ExecutorProcessControl not specified, "
+ "Creating SelfExecutorProcessControl instance\n";
+ });
+ if (auto EPCOrErr = SelfExecutorProcessControl::Create())
+ EPC = std::move(*EPCOrErr);
+ else
+ return EPCOrErr.takeError();
+ } else
+ LLVM_DEBUG({
+ dbgs() << "Using explicitly specified ExecutorProcessControl instance "
+ << EPC.get() << "\n";
+ });
+
+ // If the client didn't configure any linker options then auto-configure the
+ // JIT linker.
+ if (!CreateObjectLinkingLayer) {
+ auto &TT = JTMB->getTargetTriple();
+ if (TT.isOSBinFormatMachO() &&
+ (TT.getArch() == Triple::aarch64 || TT.getArch() == Triple::x86_64)) {
+
+ JTMB->setRelocationModel(Reloc::PIC_);
+ JTMB->setCodeModel(CodeModel::Small);
+ CreateObjectLinkingLayer =
+ [](ExecutionSession &ES,
+ const Triple &) -> Expected<std::unique_ptr<ObjectLayer>> {
+ auto ObjLinkingLayer = std::make_unique<ObjectLinkingLayer>(ES);
+ ObjLinkingLayer->addPlugin(std::make_unique<EHFrameRegistrationPlugin>(
+ ES, std::make_unique<jitlink::InProcessEHFrameRegistrar>()));
+ return std::move(ObjLinkingLayer);
+ };
+ }
+ }
+
+ return Error::success();
+}
+
+LLJIT::~LLJIT() {
+ if (CompileThreads)
+ CompileThreads->wait();
+ if (auto Err = ES->endSession())
+ ES->reportError(std::move(Err));
+}
+
+Error LLJIT::addIRModule(ResourceTrackerSP RT, ThreadSafeModule TSM) {
+ assert(TSM && "Can not add null module");
+
+ if (auto Err =
+ TSM.withModuleDo([&](Module &M) { return applyDataLayout(M); }))
+ return Err;
+
+ return InitHelperTransformLayer->add(std::move(RT), std::move(TSM));
+}
+
+Error LLJIT::addIRModule(JITDylib &JD, ThreadSafeModule TSM) {
+ return addIRModule(JD.getDefaultResourceTracker(), std::move(TSM));
+}
+
+Error LLJIT::addObjectFile(ResourceTrackerSP RT,
+ std::unique_ptr<MemoryBuffer> Obj) {
+ assert(Obj && "Can not add null object");
+
+ return ObjTransformLayer->add(std::move(RT), std::move(Obj));
+}
+
+Error LLJIT::addObjectFile(JITDylib &JD, std::unique_ptr<MemoryBuffer> Obj) {
+ return addObjectFile(JD.getDefaultResourceTracker(), std::move(Obj));
+}
+
+Expected<JITEvaluatedSymbol> LLJIT::lookupLinkerMangled(JITDylib &JD,
+ SymbolStringPtr Name) {
+ return ES->lookup(
+ makeJITDylibSearchOrder(&JD, JITDylibLookupFlags::MatchAllSymbols), Name);
+}
+
+Expected<std::unique_ptr<ObjectLayer>>
+LLJIT::createObjectLinkingLayer(LLJITBuilderState &S, ExecutionSession &ES) {
+
+ // If the config state provided an ObjectLinkingLayer factory then use it.
+ if (S.CreateObjectLinkingLayer)
+ return S.CreateObjectLinkingLayer(ES, S.JTMB->getTargetTriple());
+
+ // Otherwise default to creating an RTDyldObjectLinkingLayer that constructs
+ // a new SectionMemoryManager for each object.
+ auto GetMemMgr = []() { return std::make_unique<SectionMemoryManager>(); };
+ auto Layer =
+ std::make_unique<RTDyldObjectLinkingLayer>(ES, std::move(GetMemMgr));
+
+ if (S.JTMB->getTargetTriple().isOSBinFormatCOFF()) {
+ Layer->setOverrideObjectFlagsWithResponsibilityFlags(true);
+ Layer->setAutoClaimResponsibilityForObjectSymbols(true);
+ }
+
+ // FIXME: Explicit conversion to std::unique_ptr<ObjectLayer> added to silence
+ // errors from some GCC / libstdc++ bots. Remove this conversion (i.e.
+ // just return ObjLinkingLayer) once those bots are upgraded.
+ return std::unique_ptr<ObjectLayer>(std::move(Layer));
+}
+
+Expected<std::unique_ptr<IRCompileLayer::IRCompiler>>
+LLJIT::createCompileFunction(LLJITBuilderState &S,
+ JITTargetMachineBuilder JTMB) {
+
+ /// If there is a custom compile function creator set then use it.
+ if (S.CreateCompileFunction)
+ return S.CreateCompileFunction(std::move(JTMB));
+
+ // Otherwise default to creating a SimpleCompiler, or ConcurrentIRCompiler,
+ // depending on the number of threads requested.
+ if (S.NumCompileThreads > 0)
+ return std::make_unique<ConcurrentIRCompiler>(std::move(JTMB));
+
+ auto TM = JTMB.createTargetMachine();
+ if (!TM)
+ return TM.takeError();
+
+ return std::make_unique<TMOwningSimpleCompiler>(std::move(*TM));
+}
+
+LLJIT::LLJIT(LLJITBuilderState &S, Error &Err)
+ : DL(""), TT(S.JTMB->getTargetTriple()) {
+
+ ErrorAsOutParameter _(&Err);
+
+ assert(!(S.EPC && S.ES) && "EPC and ES should not both be set");
+
+ if (S.EPC) {
+ ES = std::make_unique<ExecutionSession>(std::move(S.EPC));
+ } else if (S.ES)
+ ES = std::move(S.ES);
+ else {
+ if (auto EPC = SelfExecutorProcessControl::Create()) {
+ ES = std::make_unique<ExecutionSession>(std::move(*EPC));
+ } else {
+ Err = EPC.takeError();
+ return;
+ }
+ }
+
+ if (auto MainOrErr = this->ES->createJITDylib("main"))
+ Main = &*MainOrErr;
+ else {
+ Err = MainOrErr.takeError();
+ return;
+ }
+
+ if (S.DL)
+ DL = std::move(*S.DL);
+ else if (auto DLOrErr = S.JTMB->getDefaultDataLayoutForTarget())
+ DL = std::move(*DLOrErr);
+ else {
+ Err = DLOrErr.takeError();
+ return;
+ }
+
+ auto ObjLayer = createObjectLinkingLayer(S, *ES);
+ if (!ObjLayer) {
+ Err = ObjLayer.takeError();
+ return;
+ }
+ ObjLinkingLayer = std::move(*ObjLayer);
+ ObjTransformLayer =
+ std::make_unique<ObjectTransformLayer>(*ES, *ObjLinkingLayer);
+
+ {
+ auto CompileFunction = createCompileFunction(S, std::move(*S.JTMB));
+ if (!CompileFunction) {
+ Err = CompileFunction.takeError();
+ return;
+ }
+ CompileLayer = std::make_unique<IRCompileLayer>(
+ *ES, *ObjTransformLayer, std::move(*CompileFunction));
+ TransformLayer = std::make_unique<IRTransformLayer>(*ES, *CompileLayer);
+ InitHelperTransformLayer =
+ std::make_unique<IRTransformLayer>(*ES, *TransformLayer);
+ }
+
+ if (S.NumCompileThreads > 0) {
+ InitHelperTransformLayer->setCloneToNewContextOnEmit(true);
+ CompileThreads =
+ std::make_unique<ThreadPool>(hardware_concurrency(S.NumCompileThreads));
+ ES->setDispatchTask([this](std::unique_ptr<Task> T) {
+ // FIXME: We should be able to use move-capture here, but ThreadPool's
+ // AsyncTaskTys are std::functions rather than unique_functions
+ // (because MSVC's std::packaged_tasks don't support move-only types).
+ // Fix this when all the above gets sorted out.
+ CompileThreads->async([UnownedT = T.release()]() mutable {
+ std::unique_ptr<Task> T(UnownedT);
+ T->run();
+ });
+ });
+ }
+
+ if (S.SetUpPlatform)
+ Err = S.SetUpPlatform(*this);
+ else
+ setUpGenericLLVMIRPlatform(*this);
+}
+
+std::string LLJIT::mangle(StringRef UnmangledName) const {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, UnmangledName, DL);
+ }
+ return MangledName;
+}
+
+Error LLJIT::applyDataLayout(Module &M) {
+ if (M.getDataLayout().isDefault())
+ M.setDataLayout(DL);
+
+ if (M.getDataLayout() != DL)
+ return make_error<StringError>(
+ "Added modules have incompatible data layouts: " +
+ M.getDataLayout().getStringRepresentation() + " (module) vs " +
+ DL.getStringRepresentation() + " (jit)",
+ inconvertibleErrorCode());
+
+ return Error::success();
+}
+
+void setUpGenericLLVMIRPlatform(LLJIT &J) {
+ LLVM_DEBUG(
+ { dbgs() << "Setting up GenericLLVMIRPlatform support for LLJIT\n"; });
+ J.setPlatformSupport(std::make_unique<GenericLLVMIRPlatformSupport>(J));
+}
+
+Error setUpInactivePlatform(LLJIT &J) {
+ LLVM_DEBUG(
+ { dbgs() << "Explicitly deactivated platform support for LLJIT\n"; });
+ J.setPlatformSupport(std::make_unique<InactivePlatformSupport>());
+ return Error::success();
+}
+
+Error LLLazyJITBuilderState::prepareForConstruction() {
+ if (auto Err = LLJITBuilderState::prepareForConstruction())
+ return Err;
+ TT = JTMB->getTargetTriple();
+ return Error::success();
+}
+
+Error LLLazyJIT::addLazyIRModule(JITDylib &JD, ThreadSafeModule TSM) {
+ assert(TSM && "Can not add null module");
+
+ if (auto Err = TSM.withModuleDo(
+ [&](Module &M) -> Error { return applyDataLayout(M); }))
+ return Err;
+
+ return CODLayer->add(JD, std::move(TSM));
+}
+
+LLLazyJIT::LLLazyJIT(LLLazyJITBuilderState &S, Error &Err) : LLJIT(S, Err) {
+
+ // If LLJIT construction failed then bail out.
+ if (Err)
+ return;
+
+ ErrorAsOutParameter _(&Err);
+
+ /// Take/Create the lazy-compile callthrough manager.
+ if (S.LCTMgr)
+ LCTMgr = std::move(S.LCTMgr);
+ else {
+ if (auto LCTMgrOrErr = createLocalLazyCallThroughManager(
+ S.TT, *ES, S.LazyCompileFailureAddr))
+ LCTMgr = std::move(*LCTMgrOrErr);
+ else {
+ Err = LCTMgrOrErr.takeError();
+ return;
+ }
+ }
+
+ // Take/Create the indirect stubs manager builder.
+ auto ISMBuilder = std::move(S.ISMBuilder);
+
+ // If none was provided, try to build one.
+ if (!ISMBuilder)
+ ISMBuilder = createLocalIndirectStubsManagerBuilder(S.TT);
+
+ // No luck. Bail out.
+ if (!ISMBuilder) {
+ Err = make_error<StringError>("Could not construct "
+ "IndirectStubsManagerBuilder for target " +
+ S.TT.str(),
+ inconvertibleErrorCode());
+ return;
+ }
+
+ // Create the COD layer.
+ CODLayer = std::make_unique<CompileOnDemandLayer>(
+ *ES, *InitHelperTransformLayer, *LCTMgr, std::move(ISMBuilder));
+
+ if (S.NumCompileThreads > 0)
+ CODLayer->setCloneToNewContextOnEmit(true);
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Layer.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Layer.cpp
new file mode 100644
index 0000000000..adb8861793
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Layer.cpp
@@ -0,0 +1,223 @@
+//===-------------------- Layer.cpp - Layer interfaces --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Layer.h"
+
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/ObjectFileInterface.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+IRLayer::~IRLayer() {}
+
+Error IRLayer::add(ResourceTrackerSP RT, ThreadSafeModule TSM) {
+ assert(RT && "RT can not be null");
+ auto &JD = RT->getJITDylib();
+ return JD.define(std::make_unique<BasicIRLayerMaterializationUnit>(
+ *this, *getManglingOptions(), std::move(TSM)),
+ std::move(RT));
+}
+
+IRMaterializationUnit::IRMaterializationUnit(
+ ExecutionSession &ES, const IRSymbolMapper::ManglingOptions &MO,
+ ThreadSafeModule TSM)
+ : MaterializationUnit(Interface()), TSM(std::move(TSM)) {
+
+ assert(this->TSM && "Module must not be null");
+
+ MangleAndInterner Mangle(ES, this->TSM.getModuleUnlocked()->getDataLayout());
+ this->TSM.withModuleDo([&](Module &M) {
+ for (auto &G : M.global_values()) {
+ // Skip globals that don't generate symbols.
+
+ if (!G.hasName() || G.isDeclaration() || G.hasLocalLinkage() ||
+ G.hasAvailableExternallyLinkage() || G.hasAppendingLinkage())
+ continue;
+
+ // thread locals generate different symbols depending on whether or not
+ // emulated TLS is enabled.
+ if (G.isThreadLocal() && MO.EmulatedTLS) {
+ auto &GV = cast<GlobalVariable>(G);
+
+ auto Flags = JITSymbolFlags::fromGlobalValue(GV);
+
+ auto EmuTLSV = Mangle(("__emutls_v." + GV.getName()).str());
+ SymbolFlags[EmuTLSV] = Flags;
+ SymbolToDefinition[EmuTLSV] = &GV;
+
+ // If this GV has a non-zero initializer we'll need to emit an
+ // __emutls.t symbol too.
+ if (GV.hasInitializer()) {
+ const auto *InitVal = GV.getInitializer();
+
+ // Skip zero-initializers.
+ if (isa<ConstantAggregateZero>(InitVal))
+ continue;
+ const auto *InitIntValue = dyn_cast<ConstantInt>(InitVal);
+ if (InitIntValue && InitIntValue->isZero())
+ continue;
+
+ auto EmuTLST = Mangle(("__emutls_t." + GV.getName()).str());
+ SymbolFlags[EmuTLST] = Flags;
+ }
+ continue;
+ }
+
+ // Otherwise we just need a normal linker mangling.
+ auto MangledName = Mangle(G.getName());
+ SymbolFlags[MangledName] = JITSymbolFlags::fromGlobalValue(G);
+ SymbolToDefinition[MangledName] = &G;
+ }
+
+ // If we need an init symbol for this module then create one.
+ if (!llvm::empty(getStaticInitGVs(M))) {
+ size_t Counter = 0;
+
+ do {
+ std::string InitSymbolName;
+ raw_string_ostream(InitSymbolName)
+ << "$." << M.getModuleIdentifier() << ".__inits." << Counter++;
+ InitSymbol = ES.intern(InitSymbolName);
+ } while (SymbolFlags.count(InitSymbol));
+
+ SymbolFlags[InitSymbol] = JITSymbolFlags::MaterializationSideEffectsOnly;
+ }
+ });
+}
+
+IRMaterializationUnit::IRMaterializationUnit(
+ ThreadSafeModule TSM, Interface I,
+ SymbolNameToDefinitionMap SymbolToDefinition)
+ : MaterializationUnit(std::move(I)), TSM(std::move(TSM)),
+ SymbolToDefinition(std::move(SymbolToDefinition)) {}
+
+StringRef IRMaterializationUnit::getName() const {
+ if (TSM)
+ return TSM.withModuleDo(
+ [](const Module &M) -> StringRef { return M.getModuleIdentifier(); });
+ return "<null module>";
+}
+
+void IRMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ LLVM_DEBUG(JD.getExecutionSession().runSessionLocked([&]() {
+ dbgs() << "In " << JD.getName() << " discarding " << *Name << " from MU@"
+ << this << " (" << getName() << ")\n";
+ }););
+
+ auto I = SymbolToDefinition.find(Name);
+ assert(I != SymbolToDefinition.end() &&
+ "Symbol not provided by this MU, or previously discarded");
+ assert(!I->second->isDeclaration() &&
+ "Discard should only apply to definitions");
+ I->second->setLinkage(GlobalValue::AvailableExternallyLinkage);
+ SymbolToDefinition.erase(I);
+}
+
+BasicIRLayerMaterializationUnit::BasicIRLayerMaterializationUnit(
+ IRLayer &L, const IRSymbolMapper::ManglingOptions &MO, ThreadSafeModule TSM)
+ : IRMaterializationUnit(L.getExecutionSession(), MO, std::move(TSM)), L(L) {
+}
+
+void BasicIRLayerMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+
+ // Throw away the SymbolToDefinition map: it's not usable after we hand
+ // off the module.
+ SymbolToDefinition.clear();
+
+ // If cloneToNewContextOnEmit is set, clone the module now.
+ if (L.getCloneToNewContextOnEmit())
+ TSM = cloneToNewContext(TSM);
+
+#ifndef NDEBUG
+ auto &ES = R->getTargetJITDylib().getExecutionSession();
+ auto &N = R->getTargetJITDylib().getName();
+#endif // NDEBUG
+
+ LLVM_DEBUG(ES.runSessionLocked(
+ [&]() { dbgs() << "Emitting, for " << N << ", " << *this << "\n"; }););
+ L.emit(std::move(R), std::move(TSM));
+ LLVM_DEBUG(ES.runSessionLocked([&]() {
+ dbgs() << "Finished emitting, for " << N << ", " << *this << "\n";
+ }););
+}
+
+char ObjectLayer::ID;
+
+ObjectLayer::ObjectLayer(ExecutionSession &ES) : ES(ES) {}
+
+ObjectLayer::~ObjectLayer() {}
+
+Error ObjectLayer::add(ResourceTrackerSP RT, std::unique_ptr<MemoryBuffer> O,
+ MaterializationUnit::Interface I) {
+ assert(RT && "RT can not be null");
+ auto &JD = RT->getJITDylib();
+ return JD.define(std::make_unique<BasicObjectLayerMaterializationUnit>(
+ *this, std::move(O), std::move(I)),
+ std::move(RT));
+}
+
+Error ObjectLayer::add(ResourceTrackerSP RT, std::unique_ptr<MemoryBuffer> O) {
+ auto I = getObjectFileInterface(getExecutionSession(), O->getMemBufferRef());
+ if (!I)
+ return I.takeError();
+ return add(std::move(RT), std::move(O), std::move(*I));
+}
+
+Error ObjectLayer::add(JITDylib &JD, std::unique_ptr<MemoryBuffer> O) {
+ auto I = getObjectFileInterface(getExecutionSession(), O->getMemBufferRef());
+ if (!I)
+ return I.takeError();
+ return add(JD, std::move(O), std::move(*I));
+}
+
+Expected<std::unique_ptr<BasicObjectLayerMaterializationUnit>>
+BasicObjectLayerMaterializationUnit::Create(ObjectLayer &L,
+ std::unique_ptr<MemoryBuffer> O) {
+
+ auto ObjInterface =
+ getObjectFileInterface(L.getExecutionSession(), O->getMemBufferRef());
+
+ if (!ObjInterface)
+ return ObjInterface.takeError();
+
+ return std::unique_ptr<BasicObjectLayerMaterializationUnit>(
+ new BasicObjectLayerMaterializationUnit(L, std::move(O),
+ std::move(*ObjInterface)));
+}
+
+BasicObjectLayerMaterializationUnit::BasicObjectLayerMaterializationUnit(
+ ObjectLayer &L, std::unique_ptr<MemoryBuffer> O, Interface I)
+ : MaterializationUnit(std::move(I)), L(L), O(std::move(O)) {}
+
+StringRef BasicObjectLayerMaterializationUnit::getName() const {
+ if (O)
+ return O->getBufferIdentifier();
+ return "<null object>";
+}
+
+void BasicObjectLayerMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+ L.emit(std::move(R), std::move(O));
+}
+
+void BasicObjectLayerMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ // This is a no-op for object files: Having removed 'Name' from SymbolFlags
+ // the symbol will be dead-stripped by the JIT linker.
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/LazyReexports.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/LazyReexports.cpp
new file mode 100644
index 0000000000..66453e6a63
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/LazyReexports.cpp
@@ -0,0 +1,234 @@
+//===---------- LazyReexports.cpp - Utilities for lazy reexports ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/LazyReexports.h"
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+LazyCallThroughManager::LazyCallThroughManager(
+ ExecutionSession &ES, JITTargetAddress ErrorHandlerAddr, TrampolinePool *TP)
+ : ES(ES), ErrorHandlerAddr(ErrorHandlerAddr), TP(TP) {}
+
+Expected<JITTargetAddress> LazyCallThroughManager::getCallThroughTrampoline(
+ JITDylib &SourceJD, SymbolStringPtr SymbolName,
+ NotifyResolvedFunction NotifyResolved) {
+ assert(TP && "TrampolinePool not set");
+
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto Trampoline = TP->getTrampoline();
+
+ if (!Trampoline)
+ return Trampoline.takeError();
+
+ Reexports[*Trampoline] = ReexportsEntry{&SourceJD, std::move(SymbolName)};
+ Notifiers[*Trampoline] = std::move(NotifyResolved);
+ return *Trampoline;
+}
+
+JITTargetAddress LazyCallThroughManager::reportCallThroughError(Error Err) {
+ ES.reportError(std::move(Err));
+ return ErrorHandlerAddr;
+}
+
+Expected<LazyCallThroughManager::ReexportsEntry>
+LazyCallThroughManager::findReexport(JITTargetAddress TrampolineAddr) {
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto I = Reexports.find(TrampolineAddr);
+ if (I == Reexports.end())
+ return createStringError(inconvertibleErrorCode(),
+ "Missing reexport for trampoline address %p",
+ TrampolineAddr);
+ return I->second;
+}
+
+Error LazyCallThroughManager::notifyResolved(JITTargetAddress TrampolineAddr,
+ JITTargetAddress ResolvedAddr) {
+ NotifyResolvedFunction NotifyResolved;
+ {
+ std::lock_guard<std::mutex> Lock(LCTMMutex);
+ auto I = Notifiers.find(TrampolineAddr);
+ if (I != Notifiers.end()) {
+ NotifyResolved = std::move(I->second);
+ Notifiers.erase(I);
+ }
+ }
+
+ return NotifyResolved ? NotifyResolved(ResolvedAddr) : Error::success();
+}
+
+void LazyCallThroughManager::resolveTrampolineLandingAddress(
+ JITTargetAddress TrampolineAddr,
+ NotifyLandingResolvedFunction NotifyLandingResolved) {
+
+ auto Entry = findReexport(TrampolineAddr);
+ if (!Entry)
+ return NotifyLandingResolved(reportCallThroughError(Entry.takeError()));
+
+ // Declaring SLS and the callback outside of the call to ES.lookup is a
+ // workaround to fix build failures on AIX and on z/OS platforms.
+ SymbolLookupSet SLS({Entry->SymbolName});
+ auto Callback = [this, TrampolineAddr, SymbolName = Entry->SymbolName,
+ NotifyLandingResolved = std::move(NotifyLandingResolved)](
+ Expected<SymbolMap> Result) mutable {
+ if (Result) {
+ assert(Result->size() == 1 && "Unexpected result size");
+ assert(Result->count(SymbolName) && "Unexpected result value");
+ JITTargetAddress LandingAddr = (*Result)[SymbolName].getAddress();
+
+ if (auto Err = notifyResolved(TrampolineAddr, LandingAddr))
+ NotifyLandingResolved(reportCallThroughError(std::move(Err)));
+ else
+ NotifyLandingResolved(LandingAddr);
+ } else {
+ NotifyLandingResolved(reportCallThroughError(Result.takeError()));
+ }
+ };
+
+ ES.lookup(LookupKind::Static,
+ makeJITDylibSearchOrder(Entry->SourceJD,
+ JITDylibLookupFlags::MatchAllSymbols),
+ std::move(SLS), SymbolState::Ready, std::move(Callback),
+ NoDependenciesToRegister);
+}
+
+Expected<std::unique_ptr<LazyCallThroughManager>>
+createLocalLazyCallThroughManager(const Triple &T, ExecutionSession &ES,
+ JITTargetAddress ErrorHandlerAddr) {
+ switch (T.getArch()) {
+ default:
+ return make_error<StringError>(
+ std::string("No callback manager available for ") + T.str(),
+ inconvertibleErrorCode());
+
+ case Triple::aarch64:
+ case Triple::aarch64_32:
+ return LocalLazyCallThroughManager::Create<OrcAArch64>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::x86:
+ return LocalLazyCallThroughManager::Create<OrcI386>(ES, ErrorHandlerAddr);
+
+ case Triple::mips:
+ return LocalLazyCallThroughManager::Create<OrcMips32Be>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::mipsel:
+ return LocalLazyCallThroughManager::Create<OrcMips32Le>(ES,
+ ErrorHandlerAddr);
+
+ case Triple::mips64:
+ case Triple::mips64el:
+ return LocalLazyCallThroughManager::Create<OrcMips64>(ES, ErrorHandlerAddr);
+
+ case Triple::x86_64:
+ if (T.getOS() == Triple::OSType::Win32)
+ return LocalLazyCallThroughManager::Create<OrcX86_64_Win32>(
+ ES, ErrorHandlerAddr);
+ else
+ return LocalLazyCallThroughManager::Create<OrcX86_64_SysV>(
+ ES, ErrorHandlerAddr);
+ }
+}
+
+LazyReexportsMaterializationUnit::LazyReexportsMaterializationUnit(
+ LazyCallThroughManager &LCTManager, IndirectStubsManager &ISManager,
+ JITDylib &SourceJD, SymbolAliasMap CallableAliases, ImplSymbolMap *SrcJDLoc)
+ : MaterializationUnit(extractFlags(CallableAliases)),
+ LCTManager(LCTManager), ISManager(ISManager), SourceJD(SourceJD),
+ CallableAliases(std::move(CallableAliases)), AliaseeTable(SrcJDLoc) {}
+
+StringRef LazyReexportsMaterializationUnit::getName() const {
+ return "<Lazy Reexports>";
+}
+
+void LazyReexportsMaterializationUnit::materialize(
+ std::unique_ptr<MaterializationResponsibility> R) {
+ auto RequestedSymbols = R->getRequestedSymbols();
+
+ SymbolAliasMap RequestedAliases;
+ for (auto &RequestedSymbol : RequestedSymbols) {
+ auto I = CallableAliases.find(RequestedSymbol);
+ assert(I != CallableAliases.end() && "Symbol not found in alias map?");
+ RequestedAliases[I->first] = std::move(I->second);
+ CallableAliases.erase(I);
+ }
+
+ if (!CallableAliases.empty())
+ if (auto Err = R->replace(lazyReexports(LCTManager, ISManager, SourceJD,
+ std::move(CallableAliases),
+ AliaseeTable))) {
+ R->getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+
+ IndirectStubsManager::StubInitsMap StubInits;
+ for (auto &Alias : RequestedAliases) {
+
+ auto CallThroughTrampoline = LCTManager.getCallThroughTrampoline(
+ SourceJD, Alias.second.Aliasee,
+ [&ISManager = this->ISManager,
+ StubSym = Alias.first](JITTargetAddress ResolvedAddr) -> Error {
+ return ISManager.updatePointer(*StubSym, ResolvedAddr);
+ });
+
+ if (!CallThroughTrampoline) {
+ SourceJD.getExecutionSession().reportError(
+ CallThroughTrampoline.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ StubInits[*Alias.first] =
+ std::make_pair(*CallThroughTrampoline, Alias.second.AliasFlags);
+ }
+
+ if (AliaseeTable != nullptr && !RequestedAliases.empty())
+ AliaseeTable->trackImpls(RequestedAliases, &SourceJD);
+
+ if (auto Err = ISManager.createStubs(StubInits)) {
+ SourceJD.getExecutionSession().reportError(std::move(Err));
+ R->failMaterialization();
+ return;
+ }
+
+ SymbolMap Stubs;
+ for (auto &Alias : RequestedAliases)
+ Stubs[Alias.first] = ISManager.findStub(*Alias.first, false);
+
+ // No registered dependencies, so these calls cannot fail.
+ cantFail(R->notifyResolved(Stubs));
+ cantFail(R->notifyEmitted());
+}
+
+void LazyReexportsMaterializationUnit::discard(const JITDylib &JD,
+ const SymbolStringPtr &Name) {
+ assert(CallableAliases.count(Name) &&
+ "Symbol not covered by this MaterializationUnit");
+ CallableAliases.erase(Name);
+}
+
+MaterializationUnit::Interface
+LazyReexportsMaterializationUnit::extractFlags(const SymbolAliasMap &Aliases) {
+ SymbolFlagsMap SymbolFlags;
+ for (auto &KV : Aliases) {
+ assert(KV.second.AliasFlags.isCallable() &&
+ "Lazy re-exports must be callable symbols");
+ SymbolFlags[KV.first] = KV.second.AliasFlags;
+ }
+ return MaterializationUnit::Interface(std::move(SymbolFlags), nullptr);
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp
new file mode 100644
index 0000000000..44cb78c773
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/LookupAndRecordAddrs.cpp
@@ -0,0 +1,82 @@
+//===------- LookupAndRecordAddrs.h - Symbol lookup support utility -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h"
+
+#include <future>
+
+namespace llvm {
+namespace orc {
+
+void lookupAndRecordAddrs(
+ unique_function<void(Error)> OnRecorded, ExecutionSession &ES, LookupKind K,
+ const JITDylibSearchOrder &SearchOrder,
+ std::vector<std::pair<SymbolStringPtr, ExecutorAddr *>> Pairs,
+ SymbolLookupFlags LookupFlags) {
+
+ SymbolLookupSet Symbols;
+ for (auto &KV : Pairs)
+ Symbols.add(KV.first, LookupFlags);
+
+ ES.lookup(
+ K, SearchOrder, Symbols, SymbolState::Ready,
+ [Pairs = std::move(Pairs),
+ OnRec = std::move(OnRecorded)](Expected<SymbolMap> Result) mutable {
+ if (!Result)
+ return OnRec(Result.takeError());
+ for (auto &KV : Pairs) {
+ auto I = Result->find(KV.first);
+ KV.second->setValue((I != Result->end()) ? I->second.getAddress()
+ : 0);
+ }
+ OnRec(Error::success());
+ },
+ NoDependenciesToRegister);
+}
+
+Error lookupAndRecordAddrs(
+ ExecutionSession &ES, LookupKind K, const JITDylibSearchOrder &SearchOrder,
+ std::vector<std::pair<SymbolStringPtr, ExecutorAddr *>> Pairs,
+ SymbolLookupFlags LookupFlags) {
+
+ std::promise<MSVCPError> ResultP;
+ auto ResultF = ResultP.get_future();
+ lookupAndRecordAddrs([&](Error Err) { ResultP.set_value(std::move(Err)); },
+ ES, K, SearchOrder, Pairs, LookupFlags);
+ return ResultF.get();
+}
+
+Error lookupAndRecordAddrs(
+ ExecutorProcessControl &EPC, tpctypes::DylibHandle H,
+ std::vector<std::pair<SymbolStringPtr, ExecutorAddr *>> Pairs,
+ SymbolLookupFlags LookupFlags) {
+
+ SymbolLookupSet Symbols;
+ for (auto &KV : Pairs)
+ Symbols.add(KV.first, LookupFlags);
+
+ ExecutorProcessControl::LookupRequest LR(H, Symbols);
+ auto Result = EPC.lookupSymbols(LR);
+ if (!Result)
+ return Result.takeError();
+
+ if (Result->size() != 1)
+ return make_error<StringError>("Error in lookup result",
+ inconvertibleErrorCode());
+ if (Result->front().size() != Pairs.size())
+ return make_error<StringError>("Error in lookup result elements",
+ inconvertibleErrorCode());
+
+ for (unsigned I = 0; I != Pairs.size(); ++I)
+ Pairs[I].second->setValue(Result->front()[I]);
+
+ return Error::success();
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/MachOPlatform.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/MachOPlatform.cpp
new file mode 100644
index 0000000000..a364719855
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/MachOPlatform.cpp
@@ -0,0 +1,988 @@
+//===------ MachOPlatform.cpp - Utilities for executing MachO in Orc ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/MachOPlatform.h"
+
+#include "llvm/BinaryFormat/MachO.h"
+#include "llvm/ExecutionEngine/JITLink/x86_64.h"
+#include "llvm/ExecutionEngine/Orc/DebugUtils.h"
+#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
+#include "llvm/ExecutionEngine/Orc/LookupAndRecordAddrs.h"
+#include "llvm/Support/BinaryByteStream.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::orc::shared;
+
+namespace {
+
+class MachOHeaderMaterializationUnit : public MaterializationUnit {
+public:
+ MachOHeaderMaterializationUnit(MachOPlatform &MOP,
+ const SymbolStringPtr &HeaderStartSymbol)
+ : MaterializationUnit(createHeaderInterface(MOP, HeaderStartSymbol)),
+ MOP(MOP) {}
+
+ StringRef getName() const override { return "MachOHeaderMU"; }
+
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ unsigned PointerSize;
+ support::endianness Endianness;
+ const auto &TT =
+ MOP.getExecutionSession().getExecutorProcessControl().getTargetTriple();
+
+ switch (TT.getArch()) {
+ case Triple::aarch64:
+ case Triple::x86_64:
+ PointerSize = 8;
+ Endianness = support::endianness::little;
+ break;
+ default:
+ llvm_unreachable("Unrecognized architecture");
+ }
+
+ auto G = std::make_unique<jitlink::LinkGraph>(
+ "<MachOHeaderMU>", TT, PointerSize, Endianness,
+ jitlink::getGenericEdgeKindName);
+ auto &HeaderSection = G->createSection("__header", jitlink::MemProt::Read);
+ auto &HeaderBlock = createHeaderBlock(*G, HeaderSection);
+
+ // Init symbol is header-start symbol.
+ G->addDefinedSymbol(HeaderBlock, 0, *R->getInitializerSymbol(),
+ HeaderBlock.getSize(), jitlink::Linkage::Strong,
+ jitlink::Scope::Default, false, true);
+ for (auto &HS : AdditionalHeaderSymbols)
+ G->addDefinedSymbol(HeaderBlock, HS.Offset, HS.Name,
+ HeaderBlock.getSize(), jitlink::Linkage::Strong,
+ jitlink::Scope::Default, false, true);
+
+ MOP.getObjectLinkingLayer().emit(std::move(R), std::move(G));
+ }
+
+ void discard(const JITDylib &JD, const SymbolStringPtr &Sym) override {}
+
+private:
+ struct HeaderSymbol {
+ const char *Name;
+ uint64_t Offset;
+ };
+
+ static constexpr HeaderSymbol AdditionalHeaderSymbols[] = {
+ {"___mh_executable_header", 0}};
+
+ static jitlink::Block &createHeaderBlock(jitlink::LinkGraph &G,
+ jitlink::Section &HeaderSection) {
+ MachO::mach_header_64 Hdr;
+ Hdr.magic = MachO::MH_MAGIC_64;
+ switch (G.getTargetTriple().getArch()) {
+ case Triple::aarch64:
+ Hdr.cputype = MachO::CPU_TYPE_ARM64;
+ Hdr.cpusubtype = MachO::CPU_SUBTYPE_ARM64_ALL;
+ break;
+ case Triple::x86_64:
+ Hdr.cputype = MachO::CPU_TYPE_X86_64;
+ Hdr.cpusubtype = MachO::CPU_SUBTYPE_X86_64_ALL;
+ break;
+ default:
+ llvm_unreachable("Unrecognized architecture");
+ }
+ Hdr.filetype = MachO::MH_DYLIB; // Custom file type?
+ Hdr.ncmds = 0;
+ Hdr.sizeofcmds = 0;
+ Hdr.flags = 0;
+ Hdr.reserved = 0;
+
+ if (G.getEndianness() != support::endian::system_endianness())
+ MachO::swapStruct(Hdr);
+
+ auto HeaderContent = G.allocateString(
+ StringRef(reinterpret_cast<const char *>(&Hdr), sizeof(Hdr)));
+
+ return G.createContentBlock(HeaderSection, HeaderContent, ExecutorAddr(), 8,
+ 0);
+ }
+
+ static MaterializationUnit::Interface
+ createHeaderInterface(MachOPlatform &MOP,
+ const SymbolStringPtr &HeaderStartSymbol) {
+ SymbolFlagsMap HeaderSymbolFlags;
+
+ HeaderSymbolFlags[HeaderStartSymbol] = JITSymbolFlags::Exported;
+ for (auto &HS : AdditionalHeaderSymbols)
+ HeaderSymbolFlags[MOP.getExecutionSession().intern(HS.Name)] =
+ JITSymbolFlags::Exported;
+
+ return MaterializationUnit::Interface(std::move(HeaderSymbolFlags),
+ HeaderStartSymbol);
+ }
+
+ MachOPlatform &MOP;
+};
+
+constexpr MachOHeaderMaterializationUnit::HeaderSymbol
+ MachOHeaderMaterializationUnit::AdditionalHeaderSymbols[];
+
+StringRef EHFrameSectionName = "__TEXT,__eh_frame";
+StringRef ModInitFuncSectionName = "__DATA,__mod_init_func";
+StringRef ObjCClassListSectionName = "__DATA,__objc_classlist";
+StringRef ObjCImageInfoSectionName = "__DATA,__objc_image_info";
+StringRef ObjCSelRefsSectionName = "__DATA,__objc_selrefs";
+StringRef Swift5ProtoSectionName = "__TEXT,__swift5_proto";
+StringRef Swift5ProtosSectionName = "__TEXT,__swift5_protos";
+StringRef Swift5TypesSectionName = "__TEXT,__swift5_types";
+StringRef ThreadBSSSectionName = "__DATA,__thread_bss";
+StringRef ThreadDataSectionName = "__DATA,__thread_data";
+StringRef ThreadVarsSectionName = "__DATA,__thread_vars";
+
+StringRef InitSectionNames[] = {
+ ModInitFuncSectionName, ObjCSelRefsSectionName, ObjCClassListSectionName,
+ Swift5ProtosSectionName, Swift5ProtoSectionName, Swift5TypesSectionName};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+Expected<std::unique_ptr<MachOPlatform>>
+MachOPlatform::Create(ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD, const char *OrcRuntimePath,
+ Optional<SymbolAliasMap> RuntimeAliases) {
+
+ auto &EPC = ES.getExecutorProcessControl();
+
+ // If the target is not supported then bail out immediately.
+ if (!supportedTarget(EPC.getTargetTriple()))
+ return make_error<StringError>("Unsupported MachOPlatform triple: " +
+ EPC.getTargetTriple().str(),
+ inconvertibleErrorCode());
+
+ // Create default aliases if the caller didn't supply any.
+ if (!RuntimeAliases)
+ RuntimeAliases = standardPlatformAliases(ES);
+
+ // Define the aliases.
+ if (auto Err = PlatformJD.define(symbolAliases(std::move(*RuntimeAliases))))
+ return std::move(Err);
+
+ // Add JIT-dispatch function support symbols.
+ if (auto Err = PlatformJD.define(absoluteSymbols(
+ {{ES.intern("___orc_rt_jit_dispatch"),
+ {EPC.getJITDispatchInfo().JITDispatchFunction.getValue(),
+ JITSymbolFlags::Exported}},
+ {ES.intern("___orc_rt_jit_dispatch_ctx"),
+ {EPC.getJITDispatchInfo().JITDispatchContext.getValue(),
+ JITSymbolFlags::Exported}}})))
+ return std::move(Err);
+
+ // Create a generator for the ORC runtime archive.
+ auto OrcRuntimeArchiveGenerator = StaticLibraryDefinitionGenerator::Load(
+ ObjLinkingLayer, OrcRuntimePath, EPC.getTargetTriple());
+ if (!OrcRuntimeArchiveGenerator)
+ return OrcRuntimeArchiveGenerator.takeError();
+
+ // Create the instance.
+ Error Err = Error::success();
+ auto P = std::unique_ptr<MachOPlatform>(
+ new MachOPlatform(ES, ObjLinkingLayer, PlatformJD,
+ std::move(*OrcRuntimeArchiveGenerator), Err));
+ if (Err)
+ return std::move(Err);
+ return std::move(P);
+}
+
+Error MachOPlatform::setupJITDylib(JITDylib &JD) {
+ return JD.define(std::make_unique<MachOHeaderMaterializationUnit>(
+ *this, MachOHeaderStartSymbol));
+}
+
+Error MachOPlatform::teardownJITDylib(JITDylib &JD) { return Error::success(); }
+
+Error MachOPlatform::notifyAdding(ResourceTracker &RT,
+ const MaterializationUnit &MU) {
+ auto &JD = RT.getJITDylib();
+ const auto &InitSym = MU.getInitializerSymbol();
+ if (!InitSym)
+ return Error::success();
+
+ RegisteredInitSymbols[&JD].add(InitSym,
+ SymbolLookupFlags::WeaklyReferencedSymbol);
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Registered init symbol " << *InitSym << " for MU "
+ << MU.getName() << "\n";
+ });
+ return Error::success();
+}
+
+Error MachOPlatform::notifyRemoving(ResourceTracker &RT) {
+ llvm_unreachable("Not supported yet");
+}
+
+static void addAliases(ExecutionSession &ES, SymbolAliasMap &Aliases,
+ ArrayRef<std::pair<const char *, const char *>> AL) {
+ for (auto &KV : AL) {
+ auto AliasName = ES.intern(KV.first);
+ assert(!Aliases.count(AliasName) && "Duplicate symbol name in alias map");
+ Aliases[std::move(AliasName)] = {ES.intern(KV.second),
+ JITSymbolFlags::Exported};
+ }
+}
+
+SymbolAliasMap MachOPlatform::standardPlatformAliases(ExecutionSession &ES) {
+ SymbolAliasMap Aliases;
+ addAliases(ES, Aliases, requiredCXXAliases());
+ addAliases(ES, Aliases, standardRuntimeUtilityAliases());
+ return Aliases;
+}
+
+ArrayRef<std::pair<const char *, const char *>>
+MachOPlatform::requiredCXXAliases() {
+ static const std::pair<const char *, const char *> RequiredCXXAliases[] = {
+ {"___cxa_atexit", "___orc_rt_macho_cxa_atexit"}};
+
+ return ArrayRef<std::pair<const char *, const char *>>(RequiredCXXAliases);
+}
+
+ArrayRef<std::pair<const char *, const char *>>
+MachOPlatform::standardRuntimeUtilityAliases() {
+ static const std::pair<const char *, const char *>
+ StandardRuntimeUtilityAliases[] = {
+ {"___orc_rt_run_program", "___orc_rt_macho_run_program"},
+ {"___orc_rt_log_error", "___orc_rt_log_error_to_stderr"}};
+
+ return ArrayRef<std::pair<const char *, const char *>>(
+ StandardRuntimeUtilityAliases);
+}
+
+bool MachOPlatform::isInitializerSection(StringRef SegName,
+ StringRef SectName) {
+ for (auto &Name : InitSectionNames) {
+ if (Name.startswith(SegName) && Name.substr(7) == SectName)
+ return true;
+ }
+ return false;
+}
+
+bool MachOPlatform::supportedTarget(const Triple &TT) {
+ switch (TT.getArch()) {
+ case Triple::aarch64:
+ case Triple::x86_64:
+ return true;
+ default:
+ return false;
+ }
+}
+
+MachOPlatform::MachOPlatform(
+ ExecutionSession &ES, ObjectLinkingLayer &ObjLinkingLayer,
+ JITDylib &PlatformJD,
+ std::unique_ptr<DefinitionGenerator> OrcRuntimeGenerator, Error &Err)
+ : ES(ES), ObjLinkingLayer(ObjLinkingLayer),
+ MachOHeaderStartSymbol(ES.intern("___dso_handle")) {
+ ErrorAsOutParameter _(&Err);
+
+ ObjLinkingLayer.addPlugin(std::make_unique<MachOPlatformPlugin>(*this));
+
+ PlatformJD.addGenerator(std::move(OrcRuntimeGenerator));
+
+ // Force linking of eh-frame registration functions.
+ if (auto Err2 = lookupAndRecordAddrs(
+ ES, LookupKind::Static, makeJITDylibSearchOrder(&PlatformJD),
+ {{ES.intern("___orc_rt_macho_register_ehframe_section"),
+ &orc_rt_macho_register_ehframe_section},
+ {ES.intern("___orc_rt_macho_deregister_ehframe_section"),
+ &orc_rt_macho_deregister_ehframe_section}})) {
+ Err = std::move(Err2);
+ return;
+ }
+
+ State = BootstrapPhase2;
+
+ // PlatformJD hasn't been 'set-up' by the platform yet (since we're creating
+ // the platform now), so set it up.
+ if (auto E2 = setupJITDylib(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ RegisteredInitSymbols[&PlatformJD].add(
+ MachOHeaderStartSymbol, SymbolLookupFlags::WeaklyReferencedSymbol);
+
+ // Associate wrapper function tags with JIT-side function implementations.
+ if (auto E2 = associateRuntimeSupportFunctions(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ // Lookup addresses of runtime functions callable by the platform,
+ // call the platform bootstrap function to initialize the platform-state
+ // object in the executor.
+ if (auto E2 = bootstrapMachORuntime(PlatformJD)) {
+ Err = std::move(E2);
+ return;
+ }
+
+ State = Initialized;
+}
+
+Error MachOPlatform::associateRuntimeSupportFunctions(JITDylib &PlatformJD) {
+ ExecutionSession::JITDispatchHandlerAssociationMap WFs;
+
+ using GetInitializersSPSSig =
+ SPSExpected<SPSMachOJITDylibInitializerSequence>(SPSString);
+ WFs[ES.intern("___orc_rt_macho_get_initializers_tag")] =
+ ES.wrapAsyncWithSPS<GetInitializersSPSSig>(
+ this, &MachOPlatform::rt_getInitializers);
+
+ using GetDeinitializersSPSSig =
+ SPSExpected<SPSMachOJITDylibDeinitializerSequence>(SPSExecutorAddr);
+ WFs[ES.intern("___orc_rt_macho_get_deinitializers_tag")] =
+ ES.wrapAsyncWithSPS<GetDeinitializersSPSSig>(
+ this, &MachOPlatform::rt_getDeinitializers);
+
+ using LookupSymbolSPSSig =
+ SPSExpected<SPSExecutorAddr>(SPSExecutorAddr, SPSString);
+ WFs[ES.intern("___orc_rt_macho_symbol_lookup_tag")] =
+ ES.wrapAsyncWithSPS<LookupSymbolSPSSig>(this,
+ &MachOPlatform::rt_lookupSymbol);
+
+ return ES.registerJITDispatchHandlers(PlatformJD, std::move(WFs));
+}
+
+void MachOPlatform::getInitializersBuildSequencePhase(
+ SendInitializerSequenceFn SendResult, JITDylib &JD,
+ std::vector<JITDylibSP> DFSLinkOrder) {
+ MachOJITDylibInitializerSequence FullInitSeq;
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ for (auto &InitJD : reverse(DFSLinkOrder)) {
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Appending inits for \"" << InitJD->getName()
+ << "\" to sequence\n";
+ });
+ auto ISItr = InitSeqs.find(InitJD.get());
+ if (ISItr != InitSeqs.end()) {
+ FullInitSeq.emplace_back(std::move(ISItr->second));
+ InitSeqs.erase(ISItr);
+ }
+ }
+ }
+
+ SendResult(std::move(FullInitSeq));
+}
+
+void MachOPlatform::getInitializersLookupPhase(
+ SendInitializerSequenceFn SendResult, JITDylib &JD) {
+
+ auto DFSLinkOrder = JD.getDFSLinkOrder();
+ if (!DFSLinkOrder) {
+ SendResult(DFSLinkOrder.takeError());
+ return;
+ }
+
+ DenseMap<JITDylib *, SymbolLookupSet> NewInitSymbols;
+ ES.runSessionLocked([&]() {
+ for (auto &InitJD : *DFSLinkOrder) {
+ auto RISItr = RegisteredInitSymbols.find(InitJD.get());
+ if (RISItr != RegisteredInitSymbols.end()) {
+ NewInitSymbols[InitJD.get()] = std::move(RISItr->second);
+ RegisteredInitSymbols.erase(RISItr);
+ }
+ }
+ });
+
+ // If there are no further init symbols to look up then move on to the next
+ // phase.
+ if (NewInitSymbols.empty()) {
+ getInitializersBuildSequencePhase(std::move(SendResult), JD,
+ std::move(*DFSLinkOrder));
+ return;
+ }
+
+ // Otherwise issue a lookup and re-run this phase when it completes.
+ lookupInitSymbolsAsync(
+ [this, SendResult = std::move(SendResult), &JD](Error Err) mutable {
+ if (Err)
+ SendResult(std::move(Err));
+ else
+ getInitializersLookupPhase(std::move(SendResult), JD);
+ },
+ ES, std::move(NewInitSymbols));
+}
+
+void MachOPlatform::rt_getInitializers(SendInitializerSequenceFn SendResult,
+ StringRef JDName) {
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform::rt_getInitializers(\"" << JDName << "\")\n";
+ });
+
+ JITDylib *JD = ES.getJITDylibByName(JDName);
+ if (!JD) {
+ LLVM_DEBUG({
+ dbgs() << " No such JITDylib \"" << JDName << "\". Sending error.\n";
+ });
+ SendResult(make_error<StringError>("No JITDylib named " + JDName,
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ getInitializersLookupPhase(std::move(SendResult), *JD);
+}
+
+void MachOPlatform::rt_getDeinitializers(SendDeinitializerSequenceFn SendResult,
+ ExecutorAddr Handle) {
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform::rt_getDeinitializers(\""
+ << formatv("{0:x}", Handle.getValue()) << "\")\n";
+ });
+
+ JITDylib *JD = nullptr;
+
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = HeaderAddrToJITDylib.find(Handle);
+ if (I != HeaderAddrToJITDylib.end())
+ JD = I->second;
+ }
+
+ if (!JD) {
+ LLVM_DEBUG({
+ dbgs() << " No JITDylib for handle "
+ << formatv("{0:x}", Handle.getValue()) << "\n";
+ });
+ SendResult(make_error<StringError>("No JITDylib associated with handle " +
+ formatv("{0:x}", Handle.getValue()),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ SendResult(MachOJITDylibDeinitializerSequence());
+}
+
+void MachOPlatform::rt_lookupSymbol(SendSymbolAddressFn SendResult,
+ ExecutorAddr Handle, StringRef SymbolName) {
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform::rt_lookupSymbol(\""
+ << formatv("{0:x}", Handle.getValue()) << "\")\n";
+ });
+
+ JITDylib *JD = nullptr;
+
+ {
+ std::lock_guard<std::mutex> Lock(PlatformMutex);
+ auto I = HeaderAddrToJITDylib.find(Handle);
+ if (I != HeaderAddrToJITDylib.end())
+ JD = I->second;
+ }
+
+ if (!JD) {
+ LLVM_DEBUG({
+ dbgs() << " No JITDylib for handle "
+ << formatv("{0:x}", Handle.getValue()) << "\n";
+ });
+ SendResult(make_error<StringError>("No JITDylib associated with handle " +
+ formatv("{0:x}", Handle.getValue()),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ // Use functor class to work around XL build compiler issue on AIX.
+ class RtLookupNotifyComplete {
+ public:
+ RtLookupNotifyComplete(SendSymbolAddressFn &&SendResult)
+ : SendResult(std::move(SendResult)) {}
+ void operator()(Expected<SymbolMap> Result) {
+ if (Result) {
+ assert(Result->size() == 1 && "Unexpected result map count");
+ SendResult(ExecutorAddr(Result->begin()->second.getAddress()));
+ } else {
+ SendResult(Result.takeError());
+ }
+ }
+
+ private:
+ SendSymbolAddressFn SendResult;
+ };
+
+ // FIXME: Proper mangling.
+ auto MangledName = ("_" + SymbolName).str();
+ ES.lookup(
+ LookupKind::DLSym, {{JD, JITDylibLookupFlags::MatchExportedSymbolsOnly}},
+ SymbolLookupSet(ES.intern(MangledName)), SymbolState::Ready,
+ RtLookupNotifyComplete(std::move(SendResult)), NoDependenciesToRegister);
+}
+
+Error MachOPlatform::bootstrapMachORuntime(JITDylib &PlatformJD) {
+ if (auto Err = lookupAndRecordAddrs(
+ ES, LookupKind::Static, makeJITDylibSearchOrder(&PlatformJD),
+ {{ES.intern("___orc_rt_macho_platform_bootstrap"),
+ &orc_rt_macho_platform_bootstrap},
+ {ES.intern("___orc_rt_macho_platform_shutdown"),
+ &orc_rt_macho_platform_shutdown},
+ {ES.intern("___orc_rt_macho_register_thread_data_section"),
+ &orc_rt_macho_register_thread_data_section},
+ {ES.intern("___orc_rt_macho_deregister_thread_data_section"),
+ &orc_rt_macho_deregister_thread_data_section},
+ {ES.intern("___orc_rt_macho_create_pthread_key"),
+ &orc_rt_macho_create_pthread_key}}))
+ return Err;
+
+ return ES.callSPSWrapper<void()>(orc_rt_macho_platform_bootstrap);
+}
+
+Error MachOPlatform::registerInitInfo(
+ JITDylib &JD, ExecutorAddr ObjCImageInfoAddr,
+ ArrayRef<jitlink::Section *> InitSections) {
+
+ std::unique_lock<std::mutex> Lock(PlatformMutex);
+
+ MachOJITDylibInitializers *InitSeq = nullptr;
+ {
+ auto I = InitSeqs.find(&JD);
+ if (I == InitSeqs.end()) {
+ // If there's no init sequence entry yet then we need to look up the
+ // header symbol to force creation of one.
+ Lock.unlock();
+
+ auto SearchOrder =
+ JD.withLinkOrderDo([](const JITDylibSearchOrder &SO) { return SO; });
+ if (auto Err = ES.lookup(SearchOrder, MachOHeaderStartSymbol).takeError())
+ return Err;
+
+ Lock.lock();
+ I = InitSeqs.find(&JD);
+ assert(I != InitSeqs.end() &&
+ "Entry missing after header symbol lookup?");
+ }
+ InitSeq = &I->second;
+ }
+
+ InitSeq->ObjCImageInfoAddress = ObjCImageInfoAddr;
+
+ for (auto *Sec : InitSections) {
+ // FIXME: Avoid copy here.
+ jitlink::SectionRange R(*Sec);
+ InitSeq->InitSections[Sec->getName()].push_back(
+ {ExecutorAddr(R.getStart()), ExecutorAddr(R.getEnd())});
+ }
+
+ return Error::success();
+}
+
+Expected<uint64_t> MachOPlatform::createPThreadKey() {
+ if (!orc_rt_macho_create_pthread_key)
+ return make_error<StringError>(
+ "Attempting to create pthread key in target, but runtime support has "
+ "not been loaded yet",
+ inconvertibleErrorCode());
+
+ Expected<uint64_t> Result(0);
+ if (auto Err = ES.callSPSWrapper<SPSExpected<uint64_t>(void)>(
+ orc_rt_macho_create_pthread_key, Result))
+ return std::move(Err);
+ return Result;
+}
+
+void MachOPlatform::MachOPlatformPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, jitlink::LinkGraph &LG,
+ jitlink::PassConfiguration &Config) {
+
+ auto PS = MP.State.load();
+
+ // --- Handle Initializers ---
+ if (auto InitSymbol = MR.getInitializerSymbol()) {
+
+ // If the initializer symbol is the MachOHeader start symbol then just
+ // register it and then bail out -- the header materialization unit
+ // definitely doesn't need any other passes.
+ if (InitSymbol == MP.MachOHeaderStartSymbol) {
+ Config.PostAllocationPasses.push_back([this, &MR](jitlink::LinkGraph &G) {
+ return associateJITDylibHeaderSymbol(G, MR);
+ });
+ return;
+ }
+
+ // If the object contains an init symbol other than the header start symbol
+ // then add passes to preserve, process and register the init
+ // sections/symbols.
+ Config.PrePrunePasses.push_back([this, &MR](jitlink::LinkGraph &G) {
+ if (auto Err = preserveInitSections(G, MR))
+ return Err;
+ return processObjCImageInfo(G, MR);
+ });
+
+ Config.PostFixupPasses.push_back(
+ [this, &JD = MR.getTargetJITDylib()](jitlink::LinkGraph &G) {
+ return registerInitSections(G, JD);
+ });
+ }
+
+ // --- Add passes for eh-frame and TLV support ---
+ if (PS == MachOPlatform::BootstrapPhase1) {
+ Config.PostFixupPasses.push_back(
+ [this](jitlink::LinkGraph &G) { return registerEHSectionsPhase1(G); });
+ return;
+ }
+
+ // Insert TLV lowering at the start of the PostPrunePasses, since we want
+ // it to run before GOT/PLT lowering.
+ Config.PostPrunePasses.insert(
+ Config.PostPrunePasses.begin(),
+ [this, &JD = MR.getTargetJITDylib()](jitlink::LinkGraph &G) {
+ return fixTLVSectionsAndEdges(G, JD);
+ });
+
+ // Add a pass to register the final addresses of the eh-frame and TLV sections
+ // with the runtime.
+ Config.PostFixupPasses.push_back(
+ [this](jitlink::LinkGraph &G) { return registerEHAndTLVSections(G); });
+}
+
+ObjectLinkingLayer::Plugin::SyntheticSymbolDependenciesMap
+MachOPlatform::MachOPlatformPlugin::getSyntheticSymbolDependencies(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ auto I = InitSymbolDeps.find(&MR);
+ if (I != InitSymbolDeps.end()) {
+ SyntheticSymbolDependenciesMap Result;
+ Result[MR.getInitializerSymbol()] = std::move(I->second);
+ InitSymbolDeps.erase(&MR);
+ return Result;
+ }
+ return SyntheticSymbolDependenciesMap();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::associateJITDylibHeaderSymbol(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR) {
+
+ auto I = llvm::find_if(G.defined_symbols(), [this](jitlink::Symbol *Sym) {
+ return Sym->getName() == *MP.MachOHeaderStartSymbol;
+ });
+ assert(I != G.defined_symbols().end() && "Missing MachO header start symbol");
+
+ auto &JD = MR.getTargetJITDylib();
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ auto HeaderAddr = (*I)->getAddress();
+ MP.HeaderAddrToJITDylib[HeaderAddr] = &JD;
+ assert(!MP.InitSeqs.count(&JD) && "InitSeq entry for JD already exists");
+ MP.InitSeqs.insert(
+ std::make_pair(&JD, MachOJITDylibInitializers(JD.getName(), HeaderAddr)));
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::preserveInitSections(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR) {
+
+ JITLinkSymbolSet InitSectionSymbols;
+ for (auto &InitSectionName : InitSectionNames) {
+ // Skip non-init sections.
+ auto *InitSection = G.findSectionByName(InitSectionName);
+ if (!InitSection)
+ continue;
+
+ // Make a pass over live symbols in the section: those blocks are already
+ // preserved.
+ DenseSet<jitlink::Block *> AlreadyLiveBlocks;
+ for (auto &Sym : InitSection->symbols()) {
+ auto &B = Sym->getBlock();
+ if (Sym->isLive() && Sym->getOffset() == 0 &&
+ Sym->getSize() == B.getSize() && !AlreadyLiveBlocks.count(&B)) {
+ InitSectionSymbols.insert(Sym);
+ AlreadyLiveBlocks.insert(&B);
+ }
+ }
+
+ // Add anonymous symbols to preserve any not-already-preserved blocks.
+ for (auto *B : InitSection->blocks())
+ if (!AlreadyLiveBlocks.count(B))
+ InitSectionSymbols.insert(
+ &G.addAnonymousSymbol(*B, 0, B->getSize(), false, true));
+ }
+
+ if (!InitSectionSymbols.empty()) {
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+ InitSymbolDeps[&MR] = std::move(InitSectionSymbols);
+ }
+
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::processObjCImageInfo(
+ jitlink::LinkGraph &G, MaterializationResponsibility &MR) {
+
+ // If there's an ObjC imagine info then either
+ // (1) It's the first __objc_imageinfo we've seen in this JITDylib. In
+ // this case we name and record it.
+ // OR
+ // (2) We already have a recorded __objc_imageinfo for this JITDylib,
+ // in which case we just verify it.
+ auto *ObjCImageInfo = G.findSectionByName(ObjCImageInfoSectionName);
+ if (!ObjCImageInfo)
+ return Error::success();
+
+ auto ObjCImageInfoBlocks = ObjCImageInfo->blocks();
+
+ // Check that the section is not empty if present.
+ if (llvm::empty(ObjCImageInfoBlocks))
+ return make_error<StringError>("Empty " + ObjCImageInfoSectionName +
+ " section in " + G.getName(),
+ inconvertibleErrorCode());
+
+ // Check that there's only one block in the section.
+ if (std::next(ObjCImageInfoBlocks.begin()) != ObjCImageInfoBlocks.end())
+ return make_error<StringError>("Multiple blocks in " +
+ ObjCImageInfoSectionName +
+ " section in " + G.getName(),
+ inconvertibleErrorCode());
+
+ // Check that the __objc_imageinfo section is unreferenced.
+ // FIXME: We could optimize this check if Symbols had a ref-count.
+ for (auto &Sec : G.sections()) {
+ if (&Sec != ObjCImageInfo)
+ for (auto *B : Sec.blocks())
+ for (auto &E : B->edges())
+ if (E.getTarget().isDefined() &&
+ &E.getTarget().getBlock().getSection() == ObjCImageInfo)
+ return make_error<StringError>(ObjCImageInfoSectionName +
+ " is referenced within file " +
+ G.getName(),
+ inconvertibleErrorCode());
+ }
+
+ auto &ObjCImageInfoBlock = **ObjCImageInfoBlocks.begin();
+ auto *ObjCImageInfoData = ObjCImageInfoBlock.getContent().data();
+ auto Version = support::endian::read32(ObjCImageInfoData, G.getEndianness());
+ auto Flags =
+ support::endian::read32(ObjCImageInfoData + 4, G.getEndianness());
+
+ // Lock the mutex while we verify / update the ObjCImageInfos map.
+ std::lock_guard<std::mutex> Lock(PluginMutex);
+
+ auto ObjCImageInfoItr = ObjCImageInfos.find(&MR.getTargetJITDylib());
+ if (ObjCImageInfoItr != ObjCImageInfos.end()) {
+ // We've already registered an __objc_imageinfo section. Verify the
+ // content of this new section matches, then delete it.
+ if (ObjCImageInfoItr->second.first != Version)
+ return make_error<StringError>(
+ "ObjC version in " + G.getName() +
+ " does not match first registered version",
+ inconvertibleErrorCode());
+ if (ObjCImageInfoItr->second.second != Flags)
+ return make_error<StringError>("ObjC flags in " + G.getName() +
+ " do not match first registered flags",
+ inconvertibleErrorCode());
+
+ // __objc_imageinfo is valid. Delete the block.
+ for (auto *S : ObjCImageInfo->symbols())
+ G.removeDefinedSymbol(*S);
+ G.removeBlock(ObjCImageInfoBlock);
+ } else {
+ // We haven't registered an __objc_imageinfo section yet. Register and
+ // move on. The section should already be marked no-dead-strip.
+ ObjCImageInfos[&MR.getTargetJITDylib()] = std::make_pair(Version, Flags);
+ }
+
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::registerInitSections(
+ jitlink::LinkGraph &G, JITDylib &JD) {
+
+ ExecutorAddr ObjCImageInfoAddr;
+ SmallVector<jitlink::Section *> InitSections;
+
+ if (auto *ObjCImageInfoSec = G.findSectionByName(ObjCImageInfoSectionName)) {
+ if (auto Addr = jitlink::SectionRange(*ObjCImageInfoSec).getStart())
+ ObjCImageInfoAddr = Addr;
+ }
+
+ for (auto InitSectionName : InitSectionNames)
+ if (auto *Sec = G.findSectionByName(InitSectionName))
+ InitSections.push_back(Sec);
+
+ // Dump the scraped inits.
+ LLVM_DEBUG({
+ dbgs() << "MachOPlatform: Scraped " << G.getName() << " init sections:\n";
+ if (ObjCImageInfoAddr)
+ dbgs() << " " << ObjCImageInfoSectionName << ": "
+ << formatv("{0:x}", ObjCImageInfoAddr.getValue()) << "\n";
+ for (auto *Sec : InitSections) {
+ jitlink::SectionRange R(*Sec);
+ dbgs() << " " << Sec->getName() << ": "
+ << formatv("[ {0:x} -- {1:x} ]", R.getStart(), R.getEnd()) << "\n";
+ }
+ });
+
+ return MP.registerInitInfo(JD, ObjCImageInfoAddr, InitSections);
+}
+
+Error MachOPlatform::MachOPlatformPlugin::fixTLVSectionsAndEdges(
+ jitlink::LinkGraph &G, JITDylib &JD) {
+
+ // Rename external references to __tlv_bootstrap to ___orc_rt_tlv_get_addr.
+ for (auto *Sym : G.external_symbols())
+ if (Sym->getName() == "__tlv_bootstrap") {
+ Sym->setName("___orc_rt_macho_tlv_get_addr");
+ break;
+ }
+
+ // Store key in __thread_vars struct fields.
+ if (auto *ThreadDataSec = G.findSectionByName(ThreadVarsSectionName)) {
+ Optional<uint64_t> Key;
+ {
+ std::lock_guard<std::mutex> Lock(MP.PlatformMutex);
+ auto I = MP.JITDylibToPThreadKey.find(&JD);
+ if (I != MP.JITDylibToPThreadKey.end())
+ Key = I->second;
+ }
+
+ if (!Key) {
+ if (auto KeyOrErr = MP.createPThreadKey())
+ Key = *KeyOrErr;
+ else
+ return KeyOrErr.takeError();
+ }
+
+ uint64_t PlatformKeyBits =
+ support::endian::byte_swap(*Key, G.getEndianness());
+
+ for (auto *B : ThreadDataSec->blocks()) {
+ if (B->getSize() != 3 * G.getPointerSize())
+ return make_error<StringError>("__thread_vars block at " +
+ formatv("{0:x}", B->getAddress()) +
+ " has unexpected size",
+ inconvertibleErrorCode());
+
+ auto NewBlockContent = G.allocateBuffer(B->getSize());
+ llvm::copy(B->getContent(), NewBlockContent.data());
+ memcpy(NewBlockContent.data() + G.getPointerSize(), &PlatformKeyBits,
+ G.getPointerSize());
+ B->setContent(NewBlockContent);
+ }
+ }
+
+ // Transform any TLV edges into GOT edges.
+ for (auto *B : G.blocks())
+ for (auto &E : B->edges())
+ if (E.getKind() ==
+ jitlink::x86_64::RequestTLVPAndTransformToPCRel32TLVPLoadREXRelaxable)
+ E.setKind(jitlink::x86_64::
+ RequestGOTAndTransformToPCRel32GOTLoadREXRelaxable);
+
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::registerEHAndTLVSections(
+ jitlink::LinkGraph &G) {
+
+ // Add a pass to register the final addresses of the eh-frame and TLV sections
+ // with the runtime.
+ if (auto *EHFrameSection = G.findSectionByName(EHFrameSectionName)) {
+ jitlink::SectionRange R(*EHFrameSection);
+ if (!R.empty())
+ G.allocActions().push_back(
+ {cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ MP.orc_rt_macho_register_ehframe_section, R.getRange())),
+ cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ MP.orc_rt_macho_deregister_ehframe_section, R.getRange()))});
+ }
+
+ // Get a pointer to the thread data section if there is one. It will be used
+ // below.
+ jitlink::Section *ThreadDataSection =
+ G.findSectionByName(ThreadDataSectionName);
+
+ // Handle thread BSS section if there is one.
+ if (auto *ThreadBSSSection = G.findSectionByName(ThreadBSSSectionName)) {
+ // If there's already a thread data section in this graph then merge the
+ // thread BSS section content into it, otherwise just treat the thread
+ // BSS section as the thread data section.
+ if (ThreadDataSection)
+ G.mergeSections(*ThreadDataSection, *ThreadBSSSection);
+ else
+ ThreadDataSection = ThreadBSSSection;
+ }
+
+ // Having merged thread BSS (if present) and thread data (if present),
+ // record the resulting section range.
+ if (ThreadDataSection) {
+ jitlink::SectionRange R(*ThreadDataSection);
+ if (!R.empty()) {
+ if (MP.State != MachOPlatform::Initialized)
+ return make_error<StringError>("__thread_data section encountered, but "
+ "MachOPlatform has not finished booting",
+ inconvertibleErrorCode());
+
+ G.allocActions().push_back(
+ {cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ MP.orc_rt_macho_register_thread_data_section, R.getRange())),
+ cantFail(
+ WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ MP.orc_rt_macho_deregister_thread_data_section,
+ R.getRange()))});
+ }
+ }
+ return Error::success();
+}
+
+Error MachOPlatform::MachOPlatformPlugin::registerEHSectionsPhase1(
+ jitlink::LinkGraph &G) {
+
+ // If there's no eh-frame there's nothing to do.
+ auto *EHFrameSection = G.findSectionByName(EHFrameSectionName);
+ if (!EHFrameSection)
+ return Error::success();
+
+ // If the eh-frame section is empty there's nothing to do.
+ jitlink::SectionRange R(*EHFrameSection);
+ if (R.empty())
+ return Error::success();
+
+ // Since we're linking the object containing the registration code now the
+ // addresses won't be ready in the platform. We'll have to find them in this
+ // graph instead.
+ ExecutorAddr orc_rt_macho_register_ehframe_section;
+ ExecutorAddr orc_rt_macho_deregister_ehframe_section;
+ for (auto *Sym : G.defined_symbols()) {
+ if (!Sym->hasName())
+ continue;
+ if (Sym->getName() == "___orc_rt_macho_register_ehframe_section")
+ orc_rt_macho_register_ehframe_section = ExecutorAddr(Sym->getAddress());
+ else if (Sym->getName() == "___orc_rt_macho_deregister_ehframe_section")
+ orc_rt_macho_deregister_ehframe_section = ExecutorAddr(Sym->getAddress());
+
+ if (orc_rt_macho_register_ehframe_section &&
+ orc_rt_macho_deregister_ehframe_section)
+ break;
+ }
+
+ // If we failed to find the required functions then bail out.
+ if (!orc_rt_macho_register_ehframe_section ||
+ !orc_rt_macho_deregister_ehframe_section)
+ return make_error<StringError>("Could not find eh-frame registration "
+ "functions during platform bootstrap",
+ inconvertibleErrorCode());
+
+ // Otherwise, add allocation actions to the graph to register eh-frames for
+ // this object.
+ G.allocActions().push_back(
+ {cantFail(WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ orc_rt_macho_register_ehframe_section, R.getRange())),
+ cantFail(WrapperFunctionCall::Create<SPSArgList<SPSExecutorAddrRange>>(
+ orc_rt_macho_deregister_ehframe_section, R.getRange()))});
+
+ return Error::success();
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Mangling.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Mangling.cpp
new file mode 100644
index 0000000000..9c243c9bf1
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Mangling.cpp
@@ -0,0 +1,84 @@
+//===----------- Mangling.cpp -- Name Mangling Utilities for ORC ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Mangling.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+MangleAndInterner::MangleAndInterner(ExecutionSession &ES, const DataLayout &DL)
+ : ES(ES), DL(DL) {}
+
+SymbolStringPtr MangleAndInterner::operator()(StringRef Name) {
+ std::string MangledName;
+ {
+ raw_string_ostream MangledNameStream(MangledName);
+ Mangler::getNameWithPrefix(MangledNameStream, Name, DL);
+ }
+ return ES.intern(MangledName);
+}
+
+void IRSymbolMapper::add(ExecutionSession &ES, const ManglingOptions &MO,
+ ArrayRef<GlobalValue *> GVs,
+ SymbolFlagsMap &SymbolFlags,
+ SymbolNameToDefinitionMap *SymbolToDefinition) {
+ if (GVs.empty())
+ return;
+
+ MangleAndInterner Mangle(ES, GVs[0]->getParent()->getDataLayout());
+ for (auto *G : GVs) {
+ assert(G && "GVs cannot contain null elements");
+ if (!G->hasName() || G->isDeclaration() || G->hasLocalLinkage() ||
+ G->hasAvailableExternallyLinkage() || G->hasAppendingLinkage())
+ continue;
+
+ if (G->isThreadLocal() && MO.EmulatedTLS) {
+ auto *GV = cast<GlobalVariable>(G);
+
+ auto Flags = JITSymbolFlags::fromGlobalValue(*GV);
+
+ auto EmuTLSV = Mangle(("__emutls_v." + GV->getName()).str());
+ SymbolFlags[EmuTLSV] = Flags;
+ if (SymbolToDefinition)
+ (*SymbolToDefinition)[EmuTLSV] = GV;
+
+ // If this GV has a non-zero initializer we'll need to emit an
+ // __emutls.t symbol too.
+ if (GV->hasInitializer()) {
+ const auto *InitVal = GV->getInitializer();
+
+ // Skip zero-initializers.
+ if (isa<ConstantAggregateZero>(InitVal))
+ continue;
+ const auto *InitIntValue = dyn_cast<ConstantInt>(InitVal);
+ if (InitIntValue && InitIntValue->isZero())
+ continue;
+
+ auto EmuTLST = Mangle(("__emutls_t." + GV->getName()).str());
+ SymbolFlags[EmuTLST] = Flags;
+ if (SymbolToDefinition)
+ (*SymbolToDefinition)[EmuTLST] = GV;
+ }
+ continue;
+ }
+
+ // Otherwise we just need a normal linker mangling.
+ auto MangledName = Mangle(G->getName());
+ SymbolFlags[MangledName] = JITSymbolFlags::fromGlobalValue(*G);
+ if (SymbolToDefinition)
+ (*SymbolToDefinition)[MangledName] = G;
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp
new file mode 100644
index 0000000000..c1ad569dd6
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectFileInterface.cpp
@@ -0,0 +1,205 @@
+//===------ ObjectFileInterface.cpp - MU interface utils for objects ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ObjectFileInterface.h"
+#include "llvm/ExecutionEngine/Orc/ELFNixPlatform.h"
+#include "llvm/ExecutionEngine/Orc/MachOPlatform.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+void addInitSymbol(MaterializationUnit::Interface &I, ExecutionSession &ES,
+ StringRef ObjFileName) {
+ assert(!I.InitSymbol && "I already has an init symbol");
+ size_t Counter = 0;
+
+ do {
+ std::string InitSymString;
+ raw_string_ostream(InitSymString)
+ << "$." << ObjFileName << ".__inits." << Counter++;
+ I.InitSymbol = ES.intern(InitSymString);
+ } while (I.SymbolFlags.count(I.InitSymbol));
+
+ I.SymbolFlags[I.InitSymbol] = JITSymbolFlags::MaterializationSideEffectsOnly;
+}
+
+static Expected<MaterializationUnit::Interface>
+getMachOObjectFileSymbolInfo(ExecutionSession &ES,
+ const object::MachOObjectFile &Obj) {
+ MaterializationUnit::Interface I;
+
+ for (auto &Sym : Obj.symbols()) {
+ Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
+ if (!SymFlagsOrErr)
+ // TODO: Test this error.
+ return SymFlagsOrErr.takeError();
+
+ // Skip symbols not defined in this object file.
+ if (*SymFlagsOrErr & object::BasicSymbolRef::SF_Undefined)
+ continue;
+
+ // Skip symbols that are not global.
+ if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global))
+ continue;
+
+ // Skip symbols that have type SF_File.
+ if (auto SymType = Sym.getType()) {
+ if (*SymType == object::SymbolRef::ST_File)
+ continue;
+ } else
+ return SymType.takeError();
+
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+ auto InternedName = ES.intern(*Name);
+ auto SymFlags = JITSymbolFlags::fromObjectSymbol(Sym);
+ if (!SymFlags)
+ return SymFlags.takeError();
+
+ // Strip the 'exported' flag from MachO linker-private symbols.
+ if (Name->startswith("l"))
+ *SymFlags &= ~JITSymbolFlags::Exported;
+
+ I.SymbolFlags[InternedName] = std::move(*SymFlags);
+ }
+
+ for (auto &Sec : Obj.sections()) {
+ auto SecType = Obj.getSectionType(Sec);
+ if ((SecType & MachO::SECTION_TYPE) == MachO::S_MOD_INIT_FUNC_POINTERS) {
+ addInitSymbol(I, ES, Obj.getFileName());
+ break;
+ }
+ auto SegName = Obj.getSectionFinalSegmentName(Sec.getRawDataRefImpl());
+ auto SecName = cantFail(Obj.getSectionName(Sec.getRawDataRefImpl()));
+ if (MachOPlatform::isInitializerSection(SegName, SecName)) {
+ addInitSymbol(I, ES, Obj.getFileName());
+ break;
+ }
+ }
+
+ return I;
+}
+
+static Expected<MaterializationUnit::Interface>
+getELFObjectFileSymbolInfo(ExecutionSession &ES,
+ const object::ELFObjectFileBase &Obj) {
+ MaterializationUnit::Interface I;
+
+ for (auto &Sym : Obj.symbols()) {
+ Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
+ if (!SymFlagsOrErr)
+ // TODO: Test this error.
+ return SymFlagsOrErr.takeError();
+
+ // Skip symbols not defined in this object file.
+ if (*SymFlagsOrErr & object::BasicSymbolRef::SF_Undefined)
+ continue;
+
+ // Skip symbols that are not global.
+ if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global))
+ continue;
+
+ // Skip symbols that have type SF_File.
+ if (auto SymType = Sym.getType()) {
+ if (*SymType == object::SymbolRef::ST_File)
+ continue;
+ } else
+ return SymType.takeError();
+
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+ auto InternedName = ES.intern(*Name);
+ auto SymFlags = JITSymbolFlags::fromObjectSymbol(Sym);
+ if (!SymFlags)
+ return SymFlags.takeError();
+
+ // ELF STB_GNU_UNIQUE should map to Weak for ORC.
+ if (Sym.getBinding() == ELF::STB_GNU_UNIQUE)
+ *SymFlags |= JITSymbolFlags::Weak;
+
+ I.SymbolFlags[InternedName] = std::move(*SymFlags);
+ }
+
+ SymbolStringPtr InitSymbol;
+ for (auto &Sec : Obj.sections()) {
+ if (auto SecName = Sec.getName()) {
+ if (ELFNixPlatform::isInitializerSection(*SecName)) {
+ addInitSymbol(I, ES, Obj.getFileName());
+ break;
+ }
+ }
+ }
+
+ return I;
+}
+
+Expected<MaterializationUnit::Interface>
+getGenericObjectFileSymbolInfo(ExecutionSession &ES,
+ const object::ObjectFile &Obj) {
+ MaterializationUnit::Interface I;
+
+ for (auto &Sym : Obj.symbols()) {
+ Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
+ if (!SymFlagsOrErr)
+ // TODO: Test this error.
+ return SymFlagsOrErr.takeError();
+
+ // Skip symbols not defined in this object file.
+ if (*SymFlagsOrErr & object::BasicSymbolRef::SF_Undefined)
+ continue;
+
+ // Skip symbols that are not global.
+ if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global))
+ continue;
+
+ // Skip symbols that have type SF_File.
+ if (auto SymType = Sym.getType()) {
+ if (*SymType == object::SymbolRef::ST_File)
+ continue;
+ } else
+ return SymType.takeError();
+
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+ auto InternedName = ES.intern(*Name);
+ auto SymFlags = JITSymbolFlags::fromObjectSymbol(Sym);
+ if (!SymFlags)
+ return SymFlags.takeError();
+
+ I.SymbolFlags[InternedName] = std::move(*SymFlags);
+ }
+
+ return I;
+}
+
+Expected<MaterializationUnit::Interface>
+getObjectFileInterface(ExecutionSession &ES, MemoryBufferRef ObjBuffer) {
+ auto Obj = object::ObjectFile::createObjectFile(ObjBuffer);
+
+ if (!Obj)
+ return Obj.takeError();
+
+ if (auto *MachOObj = dyn_cast<object::MachOObjectFile>(Obj->get()))
+ return getMachOObjectFileSymbolInfo(ES, *MachOObj);
+ else if (auto *ELFObj = dyn_cast<object::ELFObjectFileBase>(Obj->get()))
+ return getELFObjectFileSymbolInfo(ES, *ELFObj);
+
+ return getGenericObjectFileSymbolInfo(ES, **Obj);
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp
new file mode 100644
index 0000000000..32c5998a78
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectLinkingLayer.cpp
@@ -0,0 +1,833 @@
+//===------- ObjectLinkingLayer.cpp - JITLink backed ORC ObjectLayer ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h"
+#include "llvm/ExecutionEngine/Orc/DebugObjectManagerPlugin.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <string>
+#include <vector>
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::jitlink;
+using namespace llvm::orc;
+
+namespace {
+
+class LinkGraphMaterializationUnit : public MaterializationUnit {
+public:
+ static std::unique_ptr<LinkGraphMaterializationUnit>
+ Create(ObjectLinkingLayer &ObjLinkingLayer, std::unique_ptr<LinkGraph> G) {
+ auto LGI = scanLinkGraph(ObjLinkingLayer.getExecutionSession(), *G);
+ return std::unique_ptr<LinkGraphMaterializationUnit>(
+ new LinkGraphMaterializationUnit(ObjLinkingLayer, std::move(G),
+ std::move(LGI)));
+ }
+
+ StringRef getName() const override { return G->getName(); }
+ void materialize(std::unique_ptr<MaterializationResponsibility> MR) override {
+ ObjLinkingLayer.emit(std::move(MR), std::move(G));
+ }
+
+private:
+ static Interface scanLinkGraph(ExecutionSession &ES, LinkGraph &G) {
+
+ Interface LGI;
+
+ for (auto *Sym : G.defined_symbols()) {
+ // Skip local symbols.
+ if (Sym->getScope() == Scope::Local)
+ continue;
+ assert(Sym->hasName() && "Anonymous non-local symbol?");
+
+ JITSymbolFlags Flags;
+ if (Sym->getScope() == Scope::Default)
+ Flags |= JITSymbolFlags::Exported;
+
+ if (Sym->isCallable())
+ Flags |= JITSymbolFlags::Callable;
+
+ LGI.SymbolFlags[ES.intern(Sym->getName())] = Flags;
+ }
+
+ if ((G.getTargetTriple().isOSBinFormatMachO() && hasMachOInitSection(G)) ||
+ (G.getTargetTriple().isOSBinFormatELF() && hasELFInitSection(G)))
+ LGI.InitSymbol = makeInitSymbol(ES, G);
+
+ return LGI;
+ }
+
+ static bool hasMachOInitSection(LinkGraph &G) {
+ for (auto &Sec : G.sections())
+ if (Sec.getName() == "__DATA,__obj_selrefs" ||
+ Sec.getName() == "__DATA,__objc_classlist" ||
+ Sec.getName() == "__TEXT,__swift5_protos" ||
+ Sec.getName() == "__TEXT,__swift5_proto" ||
+ Sec.getName() == "__TEXT,__swift5_types" ||
+ Sec.getName() == "__DATA,__mod_init_func")
+ return true;
+ return false;
+ }
+
+ static bool hasELFInitSection(LinkGraph &G) {
+ for (auto &Sec : G.sections())
+ if (Sec.getName() == ".init_array")
+ return true;
+ return false;
+ }
+
+ static SymbolStringPtr makeInitSymbol(ExecutionSession &ES, LinkGraph &G) {
+ std::string InitSymString;
+ raw_string_ostream(InitSymString)
+ << "$." << G.getName() << ".__inits" << Counter++;
+ return ES.intern(InitSymString);
+ }
+
+ LinkGraphMaterializationUnit(ObjectLinkingLayer &ObjLinkingLayer,
+ std::unique_ptr<LinkGraph> G, Interface LGI)
+ : MaterializationUnit(std::move(LGI)), ObjLinkingLayer(ObjLinkingLayer),
+ G(std::move(G)) {}
+
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override {
+ for (auto *Sym : G->defined_symbols())
+ if (Sym->getName() == *Name) {
+ assert(Sym->getLinkage() == Linkage::Weak &&
+ "Discarding non-weak definition");
+ G->makeExternal(*Sym);
+ break;
+ }
+ }
+
+ ObjectLinkingLayer &ObjLinkingLayer;
+ std::unique_ptr<LinkGraph> G;
+ static std::atomic<uint64_t> Counter;
+};
+
+std::atomic<uint64_t> LinkGraphMaterializationUnit::Counter{0};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+class ObjectLinkingLayerJITLinkContext final : public JITLinkContext {
+public:
+ ObjectLinkingLayerJITLinkContext(
+ ObjectLinkingLayer &Layer,
+ std::unique_ptr<MaterializationResponsibility> MR,
+ std::unique_ptr<MemoryBuffer> ObjBuffer)
+ : JITLinkContext(&MR->getTargetJITDylib()), Layer(Layer),
+ MR(std::move(MR)), ObjBuffer(std::move(ObjBuffer)) {}
+
+ ~ObjectLinkingLayerJITLinkContext() {
+ // If there is an object buffer return function then use it to
+ // return ownership of the buffer.
+ if (Layer.ReturnObjectBuffer && ObjBuffer)
+ Layer.ReturnObjectBuffer(std::move(ObjBuffer));
+ }
+
+ JITLinkMemoryManager &getMemoryManager() override { return Layer.MemMgr; }
+
+ void notifyMaterializing(LinkGraph &G) {
+ for (auto &P : Layer.Plugins)
+ P->notifyMaterializing(*MR, G, *this,
+ ObjBuffer ? ObjBuffer->getMemBufferRef()
+ : MemoryBufferRef());
+ }
+
+ void notifyFailed(Error Err) override {
+ for (auto &P : Layer.Plugins)
+ Err = joinErrors(std::move(Err), P->notifyFailed(*MR));
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR->failMaterialization();
+ }
+
+ void lookup(const LookupMap &Symbols,
+ std::unique_ptr<JITLinkAsyncLookupContinuation> LC) override {
+
+ JITDylibSearchOrder LinkOrder;
+ MR->getTargetJITDylib().withLinkOrderDo(
+ [&](const JITDylibSearchOrder &LO) { LinkOrder = LO; });
+
+ auto &ES = Layer.getExecutionSession();
+
+ SymbolLookupSet LookupSet;
+ for (auto &KV : Symbols) {
+ orc::SymbolLookupFlags LookupFlags;
+ switch (KV.second) {
+ case jitlink::SymbolLookupFlags::RequiredSymbol:
+ LookupFlags = orc::SymbolLookupFlags::RequiredSymbol;
+ break;
+ case jitlink::SymbolLookupFlags::WeaklyReferencedSymbol:
+ LookupFlags = orc::SymbolLookupFlags::WeaklyReferencedSymbol;
+ break;
+ }
+ LookupSet.add(ES.intern(KV.first), LookupFlags);
+ }
+
+ // OnResolve -- De-intern the symbols and pass the result to the linker.
+ auto OnResolve = [LookupContinuation =
+ std::move(LC)](Expected<SymbolMap> Result) mutable {
+ if (!Result)
+ LookupContinuation->run(Result.takeError());
+ else {
+ AsyncLookupResult LR;
+ for (auto &KV : *Result)
+ LR[*KV.first] = KV.second;
+ LookupContinuation->run(std::move(LR));
+ }
+ };
+
+ for (auto &KV : InternalNamedSymbolDeps) {
+ SymbolDependenceMap InternalDeps;
+ InternalDeps[&MR->getTargetJITDylib()] = std::move(KV.second);
+ MR->addDependencies(KV.first, InternalDeps);
+ }
+
+ ES.lookup(LookupKind::Static, LinkOrder, std::move(LookupSet),
+ SymbolState::Resolved, std::move(OnResolve),
+ [this](const SymbolDependenceMap &Deps) {
+ registerDependencies(Deps);
+ });
+ }
+
+ Error notifyResolved(LinkGraph &G) override {
+ auto &ES = Layer.getExecutionSession();
+
+ SymbolFlagsMap ExtraSymbolsToClaim;
+ bool AutoClaim = Layer.AutoClaimObjectSymbols;
+
+ SymbolMap InternedResult;
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->hasName() && Sym->getScope() != Scope::Local) {
+ auto InternedName = ES.intern(Sym->getName());
+ JITSymbolFlags Flags;
+
+ if (Sym->isCallable())
+ Flags |= JITSymbolFlags::Callable;
+ if (Sym->getScope() == Scope::Default)
+ Flags |= JITSymbolFlags::Exported;
+
+ InternedResult[InternedName] =
+ JITEvaluatedSymbol(Sym->getAddress().getValue(), Flags);
+ if (AutoClaim && !MR->getSymbols().count(InternedName)) {
+ assert(!ExtraSymbolsToClaim.count(InternedName) &&
+ "Duplicate symbol to claim?");
+ ExtraSymbolsToClaim[InternedName] = Flags;
+ }
+ }
+
+ for (auto *Sym : G.absolute_symbols())
+ if (Sym->hasName()) {
+ auto InternedName = ES.intern(Sym->getName());
+ JITSymbolFlags Flags;
+ Flags |= JITSymbolFlags::Absolute;
+ if (Sym->isCallable())
+ Flags |= JITSymbolFlags::Callable;
+ if (Sym->getLinkage() == Linkage::Weak)
+ Flags |= JITSymbolFlags::Weak;
+ InternedResult[InternedName] =
+ JITEvaluatedSymbol(Sym->getAddress().getValue(), Flags);
+ if (AutoClaim && !MR->getSymbols().count(InternedName)) {
+ assert(!ExtraSymbolsToClaim.count(InternedName) &&
+ "Duplicate symbol to claim?");
+ ExtraSymbolsToClaim[InternedName] = Flags;
+ }
+ }
+
+ if (!ExtraSymbolsToClaim.empty())
+ if (auto Err = MR->defineMaterializing(ExtraSymbolsToClaim))
+ return Err;
+
+ {
+
+ // Check that InternedResult matches up with MR->getSymbols(), overriding
+ // flags if requested.
+ // This guards against faulty transformations / compilers / object caches.
+
+ // First check that there aren't any missing symbols.
+ size_t NumMaterializationSideEffectsOnlySymbols = 0;
+ SymbolNameVector ExtraSymbols;
+ SymbolNameVector MissingSymbols;
+ for (auto &KV : MR->getSymbols()) {
+
+ auto I = InternedResult.find(KV.first);
+
+ // If this is a materialization-side-effects only symbol then bump
+ // the counter and make sure it's *not* defined, otherwise make
+ // sure that it is defined.
+ if (KV.second.hasMaterializationSideEffectsOnly()) {
+ ++NumMaterializationSideEffectsOnlySymbols;
+ if (I != InternedResult.end())
+ ExtraSymbols.push_back(KV.first);
+ continue;
+ } else if (I == InternedResult.end())
+ MissingSymbols.push_back(KV.first);
+ else if (Layer.OverrideObjectFlags)
+ I->second.setFlags(KV.second);
+ }
+
+ // If there were missing symbols then report the error.
+ if (!MissingSymbols.empty())
+ return make_error<MissingSymbolDefinitions>(
+ Layer.getExecutionSession().getSymbolStringPool(), G.getName(),
+ std::move(MissingSymbols));
+
+ // If there are more definitions than expected, add them to the
+ // ExtraSymbols vector.
+ if (InternedResult.size() >
+ MR->getSymbols().size() - NumMaterializationSideEffectsOnlySymbols) {
+ for (auto &KV : InternedResult)
+ if (!MR->getSymbols().count(KV.first))
+ ExtraSymbols.push_back(KV.first);
+ }
+
+ // If there were extra definitions then report the error.
+ if (!ExtraSymbols.empty())
+ return make_error<UnexpectedSymbolDefinitions>(
+ Layer.getExecutionSession().getSymbolStringPool(), G.getName(),
+ std::move(ExtraSymbols));
+ }
+
+ if (auto Err = MR->notifyResolved(InternedResult))
+ return Err;
+
+ Layer.notifyLoaded(*MR);
+ return Error::success();
+ }
+
+ void notifyFinalized(JITLinkMemoryManager::FinalizedAlloc A) override {
+ if (auto Err = Layer.notifyEmitted(*MR, std::move(A))) {
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR->failMaterialization();
+ return;
+ }
+ if (auto Err = MR->notifyEmitted()) {
+ Layer.getExecutionSession().reportError(std::move(Err));
+ MR->failMaterialization();
+ }
+ }
+
+ LinkGraphPassFunction getMarkLivePass(const Triple &TT) const override {
+ return [this](LinkGraph &G) { return markResponsibilitySymbolsLive(G); };
+ }
+
+ Error modifyPassConfig(LinkGraph &LG, PassConfiguration &Config) override {
+ // Add passes to mark duplicate defs as should-discard, and to walk the
+ // link graph to build the symbol dependence graph.
+ Config.PrePrunePasses.push_back([this](LinkGraph &G) {
+ return claimOrExternalizeWeakAndCommonSymbols(G);
+ });
+
+ Layer.modifyPassConfig(*MR, LG, Config);
+
+ Config.PostPrunePasses.push_back(
+ [this](LinkGraph &G) { return computeNamedSymbolDependencies(G); });
+
+ return Error::success();
+ }
+
+private:
+ // Symbol name dependencies:
+ // Internal: Defined in this graph.
+ // External: Defined externally.
+ struct BlockSymbolDependencies {
+ SymbolNameSet Internal, External;
+ };
+
+ // Lazily populated map of blocks to BlockSymbolDependencies values.
+ class BlockDependenciesMap {
+ public:
+ BlockDependenciesMap(ExecutionSession &ES,
+ DenseMap<const Block *, DenseSet<Block *>> BlockDeps)
+ : ES(ES), BlockDeps(std::move(BlockDeps)) {}
+
+ const BlockSymbolDependencies &operator[](const Block &B) {
+ // Check the cache first.
+ auto I = BlockTransitiveDepsCache.find(&B);
+ if (I != BlockTransitiveDepsCache.end())
+ return I->second;
+
+ // No value. Populate the cache.
+ BlockSymbolDependencies BTDCacheVal;
+ auto BDI = BlockDeps.find(&B);
+ assert(BDI != BlockDeps.end() && "No block dependencies");
+
+ for (auto *BDep : BDI->second) {
+ auto &BID = getBlockImmediateDeps(*BDep);
+ for (auto &ExternalDep : BID.External)
+ BTDCacheVal.External.insert(ExternalDep);
+ for (auto &InternalDep : BID.Internal)
+ BTDCacheVal.Internal.insert(InternalDep);
+ }
+
+ return BlockTransitiveDepsCache
+ .insert(std::make_pair(&B, std::move(BTDCacheVal)))
+ .first->second;
+ }
+
+ SymbolStringPtr &getInternedName(Symbol &Sym) {
+ auto I = NameCache.find(&Sym);
+ if (I != NameCache.end())
+ return I->second;
+
+ return NameCache.insert(std::make_pair(&Sym, ES.intern(Sym.getName())))
+ .first->second;
+ }
+
+ private:
+ BlockSymbolDependencies &getBlockImmediateDeps(Block &B) {
+ // Check the cache first.
+ auto I = BlockImmediateDepsCache.find(&B);
+ if (I != BlockImmediateDepsCache.end())
+ return I->second;
+
+ BlockSymbolDependencies BIDCacheVal;
+ for (auto &E : B.edges()) {
+ auto &Tgt = E.getTarget();
+ if (Tgt.getScope() != Scope::Local) {
+ if (Tgt.isExternal())
+ BIDCacheVal.External.insert(getInternedName(Tgt));
+ else
+ BIDCacheVal.Internal.insert(getInternedName(Tgt));
+ }
+ }
+
+ return BlockImmediateDepsCache
+ .insert(std::make_pair(&B, std::move(BIDCacheVal)))
+ .first->second;
+ }
+
+ ExecutionSession &ES;
+ DenseMap<const Block *, DenseSet<Block *>> BlockDeps;
+ DenseMap<const Symbol *, SymbolStringPtr> NameCache;
+ DenseMap<const Block *, BlockSymbolDependencies> BlockImmediateDepsCache;
+ DenseMap<const Block *, BlockSymbolDependencies> BlockTransitiveDepsCache;
+ };
+
+ Error claimOrExternalizeWeakAndCommonSymbols(LinkGraph &G) {
+ auto &ES = Layer.getExecutionSession();
+
+ SymbolFlagsMap NewSymbolsToClaim;
+ std::vector<std::pair<SymbolStringPtr, Symbol *>> NameToSym;
+
+ auto ProcessSymbol = [&](Symbol *Sym) {
+ if (Sym->hasName() && Sym->getLinkage() == Linkage::Weak &&
+ Sym->getScope() != Scope::Local) {
+ auto Name = ES.intern(Sym->getName());
+ if (!MR->getSymbols().count(ES.intern(Sym->getName()))) {
+ JITSymbolFlags SF = JITSymbolFlags::Weak;
+ if (Sym->getScope() == Scope::Default)
+ SF |= JITSymbolFlags::Exported;
+ NewSymbolsToClaim[Name] = SF;
+ NameToSym.push_back(std::make_pair(std::move(Name), Sym));
+ }
+ }
+ };
+
+ for (auto *Sym : G.defined_symbols())
+ ProcessSymbol(Sym);
+ for (auto *Sym : G.absolute_symbols())
+ ProcessSymbol(Sym);
+
+ // Attempt to claim all weak defs that we're not already responsible for.
+ // This cannot fail -- any clashes will just result in rejection of our
+ // claim, at which point we'll externalize that symbol.
+ cantFail(MR->defineMaterializing(std::move(NewSymbolsToClaim)));
+
+ for (auto &KV : NameToSym)
+ if (!MR->getSymbols().count(KV.first))
+ G.makeExternal(*KV.second);
+
+ return Error::success();
+ }
+
+ Error markResponsibilitySymbolsLive(LinkGraph &G) const {
+ auto &ES = Layer.getExecutionSession();
+ for (auto *Sym : G.defined_symbols())
+ if (Sym->hasName() && MR->getSymbols().count(ES.intern(Sym->getName())))
+ Sym->setLive(true);
+ return Error::success();
+ }
+
+ Error computeNamedSymbolDependencies(LinkGraph &G) {
+ auto &ES = MR->getTargetJITDylib().getExecutionSession();
+ auto BlockDeps = computeBlockNonLocalDeps(G);
+
+ // Compute dependencies for symbols defined in the JITLink graph.
+ for (auto *Sym : G.defined_symbols()) {
+
+ // Skip local symbols: we do not track dependencies for these.
+ if (Sym->getScope() == Scope::Local)
+ continue;
+ assert(Sym->hasName() &&
+ "Defined non-local jitlink::Symbol should have a name");
+
+ auto &SymDeps = BlockDeps[Sym->getBlock()];
+ if (SymDeps.External.empty() && SymDeps.Internal.empty())
+ continue;
+
+ auto SymName = ES.intern(Sym->getName());
+ if (!SymDeps.External.empty())
+ ExternalNamedSymbolDeps[SymName] = SymDeps.External;
+ if (!SymDeps.Internal.empty())
+ InternalNamedSymbolDeps[SymName] = SymDeps.Internal;
+ }
+
+ for (auto &P : Layer.Plugins) {
+ auto SynthDeps = P->getSyntheticSymbolDependencies(*MR);
+ if (SynthDeps.empty())
+ continue;
+
+ DenseSet<Block *> BlockVisited;
+ for (auto &KV : SynthDeps) {
+ auto &Name = KV.first;
+ auto &DepsForName = KV.second;
+ for (auto *Sym : DepsForName) {
+ if (Sym->getScope() == Scope::Local) {
+ auto &BDeps = BlockDeps[Sym->getBlock()];
+ for (auto &S : BDeps.Internal)
+ InternalNamedSymbolDeps[Name].insert(S);
+ for (auto &S : BDeps.External)
+ ExternalNamedSymbolDeps[Name].insert(S);
+ } else {
+ if (Sym->isExternal())
+ ExternalNamedSymbolDeps[Name].insert(
+ BlockDeps.getInternedName(*Sym));
+ else
+ InternalNamedSymbolDeps[Name].insert(
+ BlockDeps.getInternedName(*Sym));
+ }
+ }
+ }
+ }
+
+ return Error::success();
+ }
+
+ BlockDependenciesMap computeBlockNonLocalDeps(LinkGraph &G) {
+ // First calculate the reachable-via-non-local-symbol blocks for each block.
+ struct BlockInfo {
+ DenseSet<Block *> Dependencies;
+ DenseSet<Block *> Dependants;
+ bool DependenciesChanged = true;
+ };
+ DenseMap<Block *, BlockInfo> BlockInfos;
+ SmallVector<Block *> WorkList;
+
+ // Pre-allocate map entries. This prevents any iterator/reference
+ // invalidation in the next loop.
+ for (auto *B : G.blocks())
+ (void)BlockInfos[B];
+
+ // Build initial worklist, record block dependencies/dependants and
+ // non-local symbol dependencies.
+ for (auto *B : G.blocks()) {
+ auto &BI = BlockInfos[B];
+ for (auto &E : B->edges()) {
+ if (E.getTarget().getScope() == Scope::Local) {
+ auto &TgtB = E.getTarget().getBlock();
+ if (&TgtB != B) {
+ BI.Dependencies.insert(&TgtB);
+ BlockInfos[&TgtB].Dependants.insert(B);
+ }
+ }
+ }
+
+ // If this node has both dependants and dependencies then add it to the
+ // worklist to propagate the dependencies to the dependants.
+ if (!BI.Dependants.empty() && !BI.Dependencies.empty())
+ WorkList.push_back(B);
+ }
+
+ // Propagate block-level dependencies through the block-dependence graph.
+ while (!WorkList.empty()) {
+ auto *B = WorkList.pop_back_val();
+
+ auto &BI = BlockInfos[B];
+ assert(BI.DependenciesChanged &&
+ "Block in worklist has unchanged dependencies");
+ BI.DependenciesChanged = false;
+ for (auto *Dependant : BI.Dependants) {
+ auto &DependantBI = BlockInfos[Dependant];
+ for (auto *Dependency : BI.Dependencies) {
+ if (Dependant != Dependency &&
+ DependantBI.Dependencies.insert(Dependency).second)
+ if (!DependantBI.DependenciesChanged) {
+ DependantBI.DependenciesChanged = true;
+ WorkList.push_back(Dependant);
+ }
+ }
+ }
+ }
+
+ DenseMap<const Block *, DenseSet<Block *>> BlockDeps;
+ for (auto &KV : BlockInfos)
+ BlockDeps[KV.first] = std::move(KV.second.Dependencies);
+
+ return BlockDependenciesMap(Layer.getExecutionSession(),
+ std::move(BlockDeps));
+ }
+
+ void registerDependencies(const SymbolDependenceMap &QueryDeps) {
+ for (auto &NamedDepsEntry : ExternalNamedSymbolDeps) {
+ auto &Name = NamedDepsEntry.first;
+ auto &NameDeps = NamedDepsEntry.second;
+ SymbolDependenceMap SymbolDeps;
+
+ for (const auto &QueryDepsEntry : QueryDeps) {
+ JITDylib &SourceJD = *QueryDepsEntry.first;
+ const SymbolNameSet &Symbols = QueryDepsEntry.second;
+ auto &DepsForJD = SymbolDeps[&SourceJD];
+
+ for (const auto &S : Symbols)
+ if (NameDeps.count(S))
+ DepsForJD.insert(S);
+
+ if (DepsForJD.empty())
+ SymbolDeps.erase(&SourceJD);
+ }
+
+ MR->addDependencies(Name, SymbolDeps);
+ }
+ }
+
+ ObjectLinkingLayer &Layer;
+ std::unique_ptr<MaterializationResponsibility> MR;
+ std::unique_ptr<MemoryBuffer> ObjBuffer;
+ DenseMap<SymbolStringPtr, SymbolNameSet> ExternalNamedSymbolDeps;
+ DenseMap<SymbolStringPtr, SymbolNameSet> InternalNamedSymbolDeps;
+};
+
+ObjectLinkingLayer::Plugin::~Plugin() {}
+
+char ObjectLinkingLayer::ID;
+
+using BaseT = RTTIExtends<ObjectLinkingLayer, ObjectLayer>;
+
+ObjectLinkingLayer::ObjectLinkingLayer(ExecutionSession &ES)
+ : BaseT(ES), MemMgr(ES.getExecutorProcessControl().getMemMgr()) {
+ ES.registerResourceManager(*this);
+}
+
+ObjectLinkingLayer::ObjectLinkingLayer(ExecutionSession &ES,
+ JITLinkMemoryManager &MemMgr)
+ : BaseT(ES), MemMgr(MemMgr) {
+ ES.registerResourceManager(*this);
+}
+
+ObjectLinkingLayer::ObjectLinkingLayer(
+ ExecutionSession &ES, std::unique_ptr<JITLinkMemoryManager> MemMgr)
+ : BaseT(ES), MemMgr(*MemMgr), MemMgrOwnership(std::move(MemMgr)) {
+ ES.registerResourceManager(*this);
+}
+
+ObjectLinkingLayer::~ObjectLinkingLayer() {
+ assert(Allocs.empty() && "Layer destroyed with resources still attached");
+ getExecutionSession().deregisterResourceManager(*this);
+}
+
+Error ObjectLinkingLayer::add(ResourceTrackerSP RT,
+ std::unique_ptr<LinkGraph> G) {
+ auto &JD = RT->getJITDylib();
+ return JD.define(LinkGraphMaterializationUnit::Create(*this, std::move(G)),
+ std::move(RT));
+}
+
+void ObjectLinkingLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Object must not be null");
+ MemoryBufferRef ObjBuffer = O->getMemBufferRef();
+
+ auto Ctx = std::make_unique<ObjectLinkingLayerJITLinkContext>(
+ *this, std::move(R), std::move(O));
+ if (auto G = createLinkGraphFromObject(ObjBuffer)) {
+ Ctx->notifyMaterializing(**G);
+ link(std::move(*G), std::move(Ctx));
+ } else {
+ Ctx->notifyFailed(G.takeError());
+ }
+}
+
+void ObjectLinkingLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<LinkGraph> G) {
+ auto Ctx = std::make_unique<ObjectLinkingLayerJITLinkContext>(
+ *this, std::move(R), nullptr);
+ Ctx->notifyMaterializing(*G);
+ link(std::move(G), std::move(Ctx));
+}
+
+void ObjectLinkingLayer::modifyPassConfig(MaterializationResponsibility &MR,
+ LinkGraph &G,
+ PassConfiguration &PassConfig) {
+ for (auto &P : Plugins)
+ P->modifyPassConfig(MR, G, PassConfig);
+}
+
+void ObjectLinkingLayer::notifyLoaded(MaterializationResponsibility &MR) {
+ for (auto &P : Plugins)
+ P->notifyLoaded(MR);
+}
+
+Error ObjectLinkingLayer::notifyEmitted(MaterializationResponsibility &MR,
+ FinalizedAlloc FA) {
+ Error Err = Error::success();
+ for (auto &P : Plugins)
+ Err = joinErrors(std::move(Err), P->notifyEmitted(MR));
+
+ if (Err)
+ return Err;
+
+ return MR.withResourceKeyDo(
+ [&](ResourceKey K) { Allocs[K].push_back(std::move(FA)); });
+}
+
+Error ObjectLinkingLayer::handleRemoveResources(ResourceKey K) {
+
+ {
+ Error Err = Error::success();
+ for (auto &P : Plugins)
+ Err = joinErrors(std::move(Err), P->notifyRemovingResources(K));
+ if (Err)
+ return Err;
+ }
+
+ std::vector<FinalizedAlloc> AllocsToRemove;
+ getExecutionSession().runSessionLocked([&] {
+ auto I = Allocs.find(K);
+ if (I != Allocs.end()) {
+ std::swap(AllocsToRemove, I->second);
+ Allocs.erase(I);
+ }
+ });
+
+ if (AllocsToRemove.empty())
+ return Error::success();
+
+ return MemMgr.deallocate(std::move(AllocsToRemove));
+}
+
+void ObjectLinkingLayer::handleTransferResources(ResourceKey DstKey,
+ ResourceKey SrcKey) {
+ auto I = Allocs.find(SrcKey);
+ if (I != Allocs.end()) {
+ auto &SrcAllocs = I->second;
+ auto &DstAllocs = Allocs[DstKey];
+ DstAllocs.reserve(DstAllocs.size() + SrcAllocs.size());
+ for (auto &Alloc : SrcAllocs)
+ DstAllocs.push_back(std::move(Alloc));
+
+ // Erase SrcKey entry using value rather than iterator I: I may have been
+ // invalidated when we looked up DstKey.
+ Allocs.erase(SrcKey);
+ }
+
+ for (auto &P : Plugins)
+ P->notifyTransferringResources(DstKey, SrcKey);
+}
+
+EHFrameRegistrationPlugin::EHFrameRegistrationPlugin(
+ ExecutionSession &ES, std::unique_ptr<EHFrameRegistrar> Registrar)
+ : ES(ES), Registrar(std::move(Registrar)) {}
+
+void EHFrameRegistrationPlugin::modifyPassConfig(
+ MaterializationResponsibility &MR, LinkGraph &G,
+ PassConfiguration &PassConfig) {
+
+ PassConfig.PostFixupPasses.push_back(createEHFrameRecorderPass(
+ G.getTargetTriple(), [this, &MR](ExecutorAddr Addr, size_t Size) {
+ if (Addr) {
+ std::lock_guard<std::mutex> Lock(EHFramePluginMutex);
+ assert(!InProcessLinks.count(&MR) &&
+ "Link for MR already being tracked?");
+ InProcessLinks[&MR] = {Addr, Size};
+ }
+ }));
+}
+
+Error EHFrameRegistrationPlugin::notifyEmitted(
+ MaterializationResponsibility &MR) {
+
+ ExecutorAddrRange EmittedRange;
+ {
+ std::lock_guard<std::mutex> Lock(EHFramePluginMutex);
+
+ auto EHFrameRangeItr = InProcessLinks.find(&MR);
+ if (EHFrameRangeItr == InProcessLinks.end())
+ return Error::success();
+
+ EmittedRange = EHFrameRangeItr->second;
+ assert(EmittedRange.Start && "eh-frame addr to register can not be null");
+ InProcessLinks.erase(EHFrameRangeItr);
+ }
+
+ if (auto Err = MR.withResourceKeyDo(
+ [&](ResourceKey K) { EHFrameRanges[K].push_back(EmittedRange); }))
+ return Err;
+
+ return Registrar->registerEHFrames(EmittedRange);
+}
+
+Error EHFrameRegistrationPlugin::notifyFailed(
+ MaterializationResponsibility &MR) {
+ std::lock_guard<std::mutex> Lock(EHFramePluginMutex);
+ InProcessLinks.erase(&MR);
+ return Error::success();
+}
+
+Error EHFrameRegistrationPlugin::notifyRemovingResources(ResourceKey K) {
+ std::vector<ExecutorAddrRange> RangesToRemove;
+
+ ES.runSessionLocked([&] {
+ auto I = EHFrameRanges.find(K);
+ if (I != EHFrameRanges.end()) {
+ RangesToRemove = std::move(I->second);
+ EHFrameRanges.erase(I);
+ }
+ });
+
+ Error Err = Error::success();
+ while (!RangesToRemove.empty()) {
+ auto RangeToRemove = RangesToRemove.back();
+ RangesToRemove.pop_back();
+ assert(RangeToRemove.Start && "Untracked eh-frame range must not be null");
+ Err = joinErrors(std::move(Err),
+ Registrar->deregisterEHFrames(RangeToRemove));
+ }
+
+ return Err;
+}
+
+void EHFrameRegistrationPlugin::notifyTransferringResources(
+ ResourceKey DstKey, ResourceKey SrcKey) {
+ auto SI = EHFrameRanges.find(SrcKey);
+ if (SI == EHFrameRanges.end())
+ return;
+
+ auto DI = EHFrameRanges.find(DstKey);
+ if (DI != EHFrameRanges.end()) {
+ auto &SrcRanges = SI->second;
+ auto &DstRanges = DI->second;
+ DstRanges.reserve(DstRanges.size() + SrcRanges.size());
+ for (auto &SrcRange : SrcRanges)
+ DstRanges.push_back(std::move(SrcRange));
+ EHFrameRanges.erase(SI);
+ } else {
+ // We need to move SrcKey's ranges over without invalidating the SI
+ // iterator.
+ auto Tmp = std::move(SI->second);
+ EHFrameRanges.erase(SI);
+ EHFrameRanges[DstKey] = std::move(Tmp);
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp
new file mode 100644
index 0000000000..207a31ec19
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ObjectTransformLayer.cpp
@@ -0,0 +1,44 @@
+//===---------- ObjectTransformLayer.cpp - Object Transform Layer ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ObjectTransformLayer.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+namespace llvm {
+namespace orc {
+
+char ObjectTransformLayer::ID;
+
+using BaseT = RTTIExtends<ObjectTransformLayer, ObjectLayer>;
+
+ObjectTransformLayer::ObjectTransformLayer(ExecutionSession &ES,
+ ObjectLayer &BaseLayer,
+ TransformFunction Transform)
+ : BaseT(ES), BaseLayer(BaseLayer), Transform(std::move(Transform)) {}
+
+void ObjectTransformLayer::emit(
+ std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Module must not be null");
+
+ // If there is a transform set then apply it.
+ if (Transform) {
+ if (auto TransformedObj = Transform(std::move(O)))
+ O = std::move(*TransformedObj);
+ else {
+ R->failMaterialization();
+ getExecutionSession().reportError(TransformedObj.takeError());
+ return;
+ }
+ }
+
+ BaseLayer.emit(std::move(R), std::move(O));
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/OrcABISupport.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/OrcABISupport.cpp
new file mode 100644
index 0000000000..18b3c5e12b
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/OrcABISupport.cpp
@@ -0,0 +1,910 @@
+//===------------- OrcABISupport.cpp - ABI specific support code ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/OrcABISupport.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/raw_ostream.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+
+template <typename ORCABI>
+bool stubAndPointerRangesOk(JITTargetAddress StubBlockAddr,
+ JITTargetAddress PointerBlockAddr,
+ unsigned NumStubs) {
+ constexpr unsigned MaxDisp = ORCABI::StubToPointerMaxDisplacement;
+ JITTargetAddress FirstStub = StubBlockAddr;
+ JITTargetAddress LastStub = FirstStub + ((NumStubs - 1) * ORCABI::StubSize);
+ JITTargetAddress FirstPointer = PointerBlockAddr;
+ JITTargetAddress LastPointer =
+ FirstPointer + ((NumStubs - 1) * ORCABI::StubSize);
+
+ if (FirstStub < FirstPointer) {
+ if (LastStub >= FirstPointer)
+ return false; // Ranges overlap.
+ return (FirstPointer - FirstStub <= MaxDisp) &&
+ (LastPointer - LastStub <= MaxDisp); // out-of-range.
+ }
+
+ if (LastPointer >= FirstStub)
+ return false; // Ranges overlap.
+
+ return (FirstStub - FirstPointer <= MaxDisp) &&
+ (LastStub - LastPointer <= MaxDisp);
+}
+
+namespace llvm {
+namespace orc {
+
+void OrcAArch64::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+
+ const uint32_t ResolverCode[] = {
+ // resolver_entry:
+ 0xa9bf47fd, // 0x000: stp x29, x17, [sp, #-16]!
+ 0x910003fd, // 0x004: mov x29, sp
+ 0xa9bf73fb, // 0x008: stp x27, x28, [sp, #-16]!
+ 0xa9bf6bf9, // 0x00c: stp x25, x26, [sp, #-16]!
+ 0xa9bf63f7, // 0x010: stp x23, x24, [sp, #-16]!
+ 0xa9bf5bf5, // 0x014: stp x21, x22, [sp, #-16]!
+ 0xa9bf53f3, // 0x018: stp x19, x20, [sp, #-16]!
+ 0xa9bf3fee, // 0x01c: stp x14, x15, [sp, #-16]!
+ 0xa9bf37ec, // 0x020: stp x12, x13, [sp, #-16]!
+ 0xa9bf2fea, // 0x024: stp x10, x11, [sp, #-16]!
+ 0xa9bf27e8, // 0x028: stp x8, x9, [sp, #-16]!
+ 0xa9bf1fe6, // 0x02c: stp x6, x7, [sp, #-16]!
+ 0xa9bf17e4, // 0x030: stp x4, x5, [sp, #-16]!
+ 0xa9bf0fe2, // 0x034: stp x2, x3, [sp, #-16]!
+ 0xa9bf07e0, // 0x038: stp x0, x1, [sp, #-16]!
+ 0xadbf7ffe, // 0x03c: stp q30, q31, [sp, #-32]!
+ 0xadbf77fc, // 0x040: stp q28, q29, [sp, #-32]!
+ 0xadbf6ffa, // 0x044: stp q26, q27, [sp, #-32]!
+ 0xadbf67f8, // 0x048: stp q24, q25, [sp, #-32]!
+ 0xadbf5ff6, // 0x04c: stp q22, q23, [sp, #-32]!
+ 0xadbf57f4, // 0x050: stp q20, q21, [sp, #-32]!
+ 0xadbf4ff2, // 0x054: stp q18, q19, [sp, #-32]!
+ 0xadbf47f0, // 0x058: stp q16, q17, [sp, #-32]!
+ 0xadbf3fee, // 0x05c: stp q14, q15, [sp, #-32]!
+ 0xadbf37ec, // 0x060: stp q12, q13, [sp, #-32]!
+ 0xadbf2fea, // 0x064: stp q10, q11, [sp, #-32]!
+ 0xadbf27e8, // 0x068: stp q8, q9, [sp, #-32]!
+ 0xadbf1fe6, // 0x06c: stp q6, q7, [sp, #-32]!
+ 0xadbf17e4, // 0x070: stp q4, q5, [sp, #-32]!
+ 0xadbf0fe2, // 0x074: stp q2, q3, [sp, #-32]!
+ 0xadbf07e0, // 0x078: stp q0, q1, [sp, #-32]!
+ 0x580004e0, // 0x07c: ldr x0, Lreentry_ctx_ptr
+ 0xaa1e03e1, // 0x080: mov x1, x30
+ 0xd1003021, // 0x084: sub x1, x1, #12
+ 0x58000442, // 0x088: ldr x2, Lreentry_fn_ptr
+ 0xd63f0040, // 0x08c: blr x2
+ 0xaa0003f1, // 0x090: mov x17, x0
+ 0xacc107e0, // 0x094: ldp q0, q1, [sp], #32
+ 0xacc10fe2, // 0x098: ldp q2, q3, [sp], #32
+ 0xacc117e4, // 0x09c: ldp q4, q5, [sp], #32
+ 0xacc11fe6, // 0x0a0: ldp q6, q7, [sp], #32
+ 0xacc127e8, // 0x0a4: ldp q8, q9, [sp], #32
+ 0xacc12fea, // 0x0a8: ldp q10, q11, [sp], #32
+ 0xacc137ec, // 0x0ac: ldp q12, q13, [sp], #32
+ 0xacc13fee, // 0x0b0: ldp q14, q15, [sp], #32
+ 0xacc147f0, // 0x0b4: ldp q16, q17, [sp], #32
+ 0xacc14ff2, // 0x0b8: ldp q18, q19, [sp], #32
+ 0xacc157f4, // 0x0bc: ldp q20, q21, [sp], #32
+ 0xacc15ff6, // 0x0c0: ldp q22, q23, [sp], #32
+ 0xacc167f8, // 0x0c4: ldp q24, q25, [sp], #32
+ 0xacc16ffa, // 0x0c8: ldp q26, q27, [sp], #32
+ 0xacc177fc, // 0x0cc: ldp q28, q29, [sp], #32
+ 0xacc17ffe, // 0x0d0: ldp q30, q31, [sp], #32
+ 0xa8c107e0, // 0x0d4: ldp x0, x1, [sp], #16
+ 0xa8c10fe2, // 0x0d8: ldp x2, x3, [sp], #16
+ 0xa8c117e4, // 0x0dc: ldp x4, x5, [sp], #16
+ 0xa8c11fe6, // 0x0e0: ldp x6, x7, [sp], #16
+ 0xa8c127e8, // 0x0e4: ldp x8, x9, [sp], #16
+ 0xa8c12fea, // 0x0e8: ldp x10, x11, [sp], #16
+ 0xa8c137ec, // 0x0ec: ldp x12, x13, [sp], #16
+ 0xa8c13fee, // 0x0f0: ldp x14, x15, [sp], #16
+ 0xa8c153f3, // 0x0f4: ldp x19, x20, [sp], #16
+ 0xa8c15bf5, // 0x0f8: ldp x21, x22, [sp], #16
+ 0xa8c163f7, // 0x0fc: ldp x23, x24, [sp], #16
+ 0xa8c16bf9, // 0x100: ldp x25, x26, [sp], #16
+ 0xa8c173fb, // 0x104: ldp x27, x28, [sp], #16
+ 0xa8c17bfd, // 0x108: ldp x29, x30, [sp], #16
+ 0xd65f0220, // 0x10c: ret x17
+ 0x01234567, // 0x110: Lreentry_fn_ptr:
+ 0xdeadbeef, // 0x114: .quad 0
+ 0x98765432, // 0x118: Lreentry_ctx_ptr:
+ 0xcafef00d // 0x11c: .quad 0
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x110;
+ const unsigned ReentryCtxAddrOffset = 0x118;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint64_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint64_t));
+}
+
+void OrcAArch64::writeTrampolines(char *TrampolineBlockWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverAddr,
+ unsigned NumTrampolines) {
+
+ unsigned OffsetToPtr = alignTo(NumTrampolines * TrampolineSize, 8);
+
+ memcpy(TrampolineBlockWorkingMem + OffsetToPtr, &ResolverAddr,
+ sizeof(uint64_t));
+
+ // OffsetToPtr is actually the offset from the PC for the 2nd instruction, so
+ // subtract 32-bits.
+ OffsetToPtr -= 4;
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize) {
+ Trampolines[3 * I + 0] = 0xaa1e03f1; // mov x17, x30
+ Trampolines[3 * I + 1] = 0x58000010 | (OffsetToPtr << 3); // adr x16, Lptr
+ Trampolines[3 * I + 2] = 0xd63f0200; // blr x16
+ }
+}
+
+void OrcAArch64::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // ldr x0, ptr1 ; PC-rel load of ptr1
+ // br x0 ; Jump to resolver
+ // stub2:
+ // ldr x0, ptr2 ; PC-rel load of ptr2
+ // br x0 ; Jump to resolver
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ static_assert(StubSize == PointerSize,
+ "Pointer and stub size must match for algorithm below");
+ assert(stubAndPointerRangesOk<OrcAArch64>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+ uint64_t PtrDisplacement =
+ PointersBlockTargetAddress - StubsBlockTargetAddress;
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlockWorkingMem);
+ uint64_t PtrOffsetField = PtrDisplacement << 3;
+
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Stub[I] = 0xd61f020058000010 | PtrOffsetField;
+}
+
+void OrcX86_64_Base::writeTrampolines(
+ char *TrampolineBlockWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverAddr, unsigned NumTrampolines) {
+
+ unsigned OffsetToPtr = NumTrampolines * TrampolineSize;
+
+ memcpy(TrampolineBlockWorkingMem + OffsetToPtr, &ResolverAddr,
+ sizeof(uint64_t));
+
+ uint64_t *Trampolines =
+ reinterpret_cast<uint64_t *>(TrampolineBlockWorkingMem);
+ uint64_t CallIndirPCRel = 0xf1c40000000015ff;
+
+ for (unsigned I = 0; I < NumTrampolines; ++I, OffsetToPtr -= TrampolineSize)
+ Trampolines[I] = CallIndirPCRel | ((OffsetToPtr - 6) << 16);
+}
+
+void OrcX86_64_Base::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // jmpq *ptr1(%rip)
+ // .byte 0xC4 ; <- Invalid opcode padding.
+ // .byte 0xF1
+ // stub2:
+ // jmpq *ptr2(%rip)
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ // Populate the stubs page stubs and mark it executable.
+ static_assert(StubSize == PointerSize,
+ "Pointer and stub size must match for algorithm below");
+ assert(stubAndPointerRangesOk<OrcX86_64_Base>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlockWorkingMem);
+ uint64_t PtrOffsetField =
+ (PointersBlockTargetAddress - StubsBlockTargetAddress - 6) << 16;
+ for (unsigned I = 0; I < NumStubs; ++I)
+ Stub[I] = 0xF1C40000000025ff | PtrOffsetField;
+}
+
+void OrcX86_64_SysV::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+
+ LLVM_DEBUG({
+ dbgs() << "Writing resolver code to "
+ << formatv("{0:x16}", ResolverTargetAddress) << "\n";
+ });
+
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushq %rbp
+ 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
+ 0x50, // 0x04: pushq %rax
+ 0x53, // 0x05: pushq %rbx
+ 0x51, // 0x06: pushq %rcx
+ 0x52, // 0x07: pushq %rdx
+ 0x56, // 0x08: pushq %rsi
+ 0x57, // 0x09: pushq %rdi
+ 0x41, 0x50, // 0x0a: pushq %r8
+ 0x41, 0x51, // 0x0c: pushq %r9
+ 0x41, 0x52, // 0x0e: pushq %r10
+ 0x41, 0x53, // 0x10: pushq %r11
+ 0x41, 0x54, // 0x12: pushq %r12
+ 0x41, 0x55, // 0x14: pushq %r13
+ 0x41, 0x56, // 0x16: pushq %r14
+ 0x41, 0x57, // 0x18: pushq %r15
+ 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
+ 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
+ 0x48, 0xbf, // 0x26: movabsq <CBMgr>, %rdi
+
+ // 0x28: JIT re-entry ctx addr.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x8b, 0x75, 0x08, // 0x30: movq 8(%rbp), %rsi
+ 0x48, 0x83, 0xee, 0x06, // 0x34: subq $6, %rsi
+ 0x48, 0xb8, // 0x38: movabsq <REntry>, %rax
+
+ // 0x3a: JIT re-entry fn addr:
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0xff, 0xd0, // 0x42: callq *%rax
+ 0x48, 0x89, 0x45, 0x08, // 0x44: movq %rax, 8(%rbp)
+ 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x48: fxrstor64 (%rsp)
+ 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x4d: addq 0x208, %rsp
+ 0x41, 0x5f, // 0x54: popq %r15
+ 0x41, 0x5e, // 0x56: popq %r14
+ 0x41, 0x5d, // 0x58: popq %r13
+ 0x41, 0x5c, // 0x5a: popq %r12
+ 0x41, 0x5b, // 0x5c: popq %r11
+ 0x41, 0x5a, // 0x5e: popq %r10
+ 0x41, 0x59, // 0x60: popq %r9
+ 0x41, 0x58, // 0x62: popq %r8
+ 0x5f, // 0x64: popq %rdi
+ 0x5e, // 0x65: popq %rsi
+ 0x5a, // 0x66: popq %rdx
+ 0x59, // 0x67: popq %rcx
+ 0x5b, // 0x68: popq %rbx
+ 0x58, // 0x69: popq %rax
+ 0x5d, // 0x6a: popq %rbp
+ 0xc3, // 0x6b: retq
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x3a;
+ const unsigned ReentryCtxAddrOffset = 0x28;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint64_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint64_t));
+}
+
+void OrcX86_64_Win32::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+
+ // resolverCode is similar to OrcX86_64 with differences specific to windows
+ // x64 calling convention: arguments go into rcx, rdx and come in reverse
+ // order, shadow space allocation on stack
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushq %rbp
+ 0x48, 0x89, 0xe5, // 0x01: movq %rsp, %rbp
+ 0x50, // 0x04: pushq %rax
+ 0x53, // 0x05: pushq %rbx
+ 0x51, // 0x06: pushq %rcx
+ 0x52, // 0x07: pushq %rdx
+ 0x56, // 0x08: pushq %rsi
+ 0x57, // 0x09: pushq %rdi
+ 0x41, 0x50, // 0x0a: pushq %r8
+ 0x41, 0x51, // 0x0c: pushq %r9
+ 0x41, 0x52, // 0x0e: pushq %r10
+ 0x41, 0x53, // 0x10: pushq %r11
+ 0x41, 0x54, // 0x12: pushq %r12
+ 0x41, 0x55, // 0x14: pushq %r13
+ 0x41, 0x56, // 0x16: pushq %r14
+ 0x41, 0x57, // 0x18: pushq %r15
+ 0x48, 0x81, 0xec, 0x08, 0x02, 0x00, 0x00, // 0x1a: subq 0x208, %rsp
+ 0x48, 0x0f, 0xae, 0x04, 0x24, // 0x21: fxsave64 (%rsp)
+
+ 0x48, 0xb9, // 0x26: movabsq <CBMgr>, %rcx
+ // 0x28: JIT re-entry ctx addr.
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ 0x48, 0x8B, 0x55, 0x08, // 0x30: mov rdx, [rbp+0x8]
+ 0x48, 0x83, 0xea, 0x06, // 0x34: sub rdx, 0x6
+
+ 0x48, 0xb8, // 0x38: movabsq <REntry>, %rax
+ // 0x3a: JIT re-entry fn addr:
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ // 0x42: sub rsp, 0x20 (Allocate shadow space)
+ 0x48, 0x83, 0xEC, 0x20,
+ 0xff, 0xd0, // 0x46: callq *%rax
+
+ // 0x48: add rsp, 0x20 (Free shadow space)
+ 0x48, 0x83, 0xC4, 0x20,
+
+ 0x48, 0x89, 0x45, 0x08, // 0x4C: movq %rax, 8(%rbp)
+ 0x48, 0x0f, 0xae, 0x0c, 0x24, // 0x50: fxrstor64 (%rsp)
+ 0x48, 0x81, 0xc4, 0x08, 0x02, 0x00, 0x00, // 0x55: addq 0x208, %rsp
+ 0x41, 0x5f, // 0x5C: popq %r15
+ 0x41, 0x5e, // 0x5E: popq %r14
+ 0x41, 0x5d, // 0x60: popq %r13
+ 0x41, 0x5c, // 0x62: popq %r12
+ 0x41, 0x5b, // 0x64: popq %r11
+ 0x41, 0x5a, // 0x66: popq %r10
+ 0x41, 0x59, // 0x68: popq %r9
+ 0x41, 0x58, // 0x6a: popq %r8
+ 0x5f, // 0x6c: popq %rdi
+ 0x5e, // 0x6d: popq %rsi
+ 0x5a, // 0x6e: popq %rdx
+ 0x59, // 0x6f: popq %rcx
+ 0x5b, // 0x70: popq %rbx
+ 0x58, // 0x71: popq %rax
+ 0x5d, // 0x72: popq %rbp
+ 0xc3, // 0x73: retq
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x3a;
+ const unsigned ReentryCtxAddrOffset = 0x28;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint64_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint64_t));
+}
+
+void OrcI386::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+
+ assert((ReentryFnAddr >> 32) == 0 && "ReentryFnAddr out of range");
+ assert((ReentryCtxAddr >> 32) == 0 && "ReentryCtxAddr out of range");
+
+ const uint8_t ResolverCode[] = {
+ // resolver_entry:
+ 0x55, // 0x00: pushl %ebp
+ 0x89, 0xe5, // 0x01: movl %esp, %ebp
+ 0x54, // 0x03: pushl %esp
+ 0x83, 0xe4, 0xf0, // 0x04: andl $-0x10, %esp
+ 0x50, // 0x07: pushl %eax
+ 0x53, // 0x08: pushl %ebx
+ 0x51, // 0x09: pushl %ecx
+ 0x52, // 0x0a: pushl %edx
+ 0x56, // 0x0b: pushl %esi
+ 0x57, // 0x0c: pushl %edi
+ 0x81, 0xec, 0x18, 0x02, 0x00, 0x00, // 0x0d: subl $0x218, %esp
+ 0x0f, 0xae, 0x44, 0x24, 0x10, // 0x13: fxsave 0x10(%esp)
+ 0x8b, 0x75, 0x04, // 0x18: movl 0x4(%ebp), %esi
+ 0x83, 0xee, 0x05, // 0x1b: subl $0x5, %esi
+ 0x89, 0x74, 0x24, 0x04, // 0x1e: movl %esi, 0x4(%esp)
+ 0xc7, 0x04, 0x24, 0x00, 0x00, 0x00,
+ 0x00, // 0x22: movl <cbmgr>, (%esp)
+ 0xb8, 0x00, 0x00, 0x00, 0x00, // 0x29: movl <reentry>, %eax
+ 0xff, 0xd0, // 0x2e: calll *%eax
+ 0x89, 0x45, 0x04, // 0x30: movl %eax, 0x4(%ebp)
+ 0x0f, 0xae, 0x4c, 0x24, 0x10, // 0x33: fxrstor 0x10(%esp)
+ 0x81, 0xc4, 0x18, 0x02, 0x00, 0x00, // 0x38: addl $0x218, %esp
+ 0x5f, // 0x3e: popl %edi
+ 0x5e, // 0x3f: popl %esi
+ 0x5a, // 0x40: popl %edx
+ 0x59, // 0x41: popl %ecx
+ 0x5b, // 0x42: popl %ebx
+ 0x58, // 0x43: popl %eax
+ 0x8b, 0x65, 0xfc, // 0x44: movl -0x4(%ebp), %esp
+ 0x5d, // 0x48: popl %ebp
+ 0xc3 // 0x49: retl
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x2a;
+ const unsigned ReentryCtxAddrOffset = 0x25;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnAddr,
+ sizeof(uint32_t));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxAddr,
+ sizeof(uint32_t));
+}
+
+void OrcI386::writeTrampolines(char *TrampolineWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverAddr,
+ unsigned NumTrampolines) {
+ assert((ResolverAddr >> 32) == 0 && "ResolverAddr out of range");
+
+ uint64_t CallRelImm = 0xF1C4C400000000e8;
+ uint64_t ResolverRel = ResolverAddr - TrampolineBlockTargetAddress - 5;
+
+ uint64_t *Trampolines = reinterpret_cast<uint64_t *>(TrampolineWorkingMem);
+ for (unsigned I = 0; I < NumTrampolines; ++I, ResolverRel -= TrampolineSize)
+ Trampolines[I] = CallRelImm | (ResolverRel << 8);
+}
+
+void OrcI386::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs) {
+ assert((StubsBlockTargetAddress >> 32) == 0 &&
+ "StubsBlockTargetAddress is out of range");
+ assert((PointersBlockTargetAddress >> 32) == 0 &&
+ "PointersBlockTargetAddress is out of range");
+
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // jmpq *ptr1
+ // .byte 0xC4 ; <- Invalid opcode padding.
+ // .byte 0xF1
+ // stub2:
+ // jmpq *ptr2
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .quad 0x0
+ // ptr2:
+ // .quad 0x0
+ //
+ // ...
+
+ assert(stubAndPointerRangesOk<OrcI386>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ uint64_t *Stub = reinterpret_cast<uint64_t *>(StubsBlockWorkingMem);
+ uint64_t PtrAddr = PointersBlockTargetAddress;
+ for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 4)
+ Stub[I] = 0xF1C40000000025ff | (PtrAddr << 16);
+}
+
+void OrcMips32_Base::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr,
+ bool isBigEndian) {
+
+ const uint32_t ResolverCode[] = {
+ // resolver_entry:
+ 0x27bdff98, // 0x00: addiu $sp,$sp,-104
+ 0xafa20000, // 0x04: sw $v0,0($sp)
+ 0xafa30004, // 0x08: sw $v1,4($sp)
+ 0xafa40008, // 0x0c: sw $a0,8($sp)
+ 0xafa5000c, // 0x10: sw $a1,12($sp)
+ 0xafa60010, // 0x14: sw $a2,16($sp)
+ 0xafa70014, // 0x18: sw $a3,20($sp)
+ 0xafb00018, // 0x1c: sw $s0,24($sp)
+ 0xafb1001c, // 0x20: sw $s1,28($sp)
+ 0xafb20020, // 0x24: sw $s2,32($sp)
+ 0xafb30024, // 0x28: sw $s3,36($sp)
+ 0xafb40028, // 0x2c: sw $s4,40($sp)
+ 0xafb5002c, // 0x30: sw $s5,44($sp)
+ 0xafb60030, // 0x34: sw $s6,48($sp)
+ 0xafb70034, // 0x38: sw $s7,52($sp)
+ 0xafa80038, // 0x3c: sw $t0,56($sp)
+ 0xafa9003c, // 0x40: sw $t1,60($sp)
+ 0xafaa0040, // 0x44: sw $t2,64($sp)
+ 0xafab0044, // 0x48: sw $t3,68($sp)
+ 0xafac0048, // 0x4c: sw $t4,72($sp)
+ 0xafad004c, // 0x50: sw $t5,76($sp)
+ 0xafae0050, // 0x54: sw $t6,80($sp)
+ 0xafaf0054, // 0x58: sw $t7,84($sp)
+ 0xafb80058, // 0x5c: sw $t8,88($sp)
+ 0xafb9005c, // 0x60: sw $t9,92($sp)
+ 0xafbe0060, // 0x64: sw $fp,96($sp)
+ 0xafbf0064, // 0x68: sw $ra,100($sp)
+
+ // JIT re-entry ctx addr.
+ 0x00000000, // 0x6c: lui $a0,ctx
+ 0x00000000, // 0x70: addiu $a0,$a0,ctx
+
+ 0x03e02825, // 0x74: move $a1, $ra
+ 0x24a5ffec, // 0x78: addiu $a1,$a1,-20
+
+ // JIT re-entry fn addr:
+ 0x00000000, // 0x7c: lui $t9,reentry
+ 0x00000000, // 0x80: addiu $t9,$t9,reentry
+
+ 0x0320f809, // 0x84: jalr $t9
+ 0x00000000, // 0x88: nop
+ 0x8fbf0064, // 0x8c: lw $ra,100($sp)
+ 0x8fbe0060, // 0x90: lw $fp,96($sp)
+ 0x8fb9005c, // 0x94: lw $t9,92($sp)
+ 0x8fb80058, // 0x98: lw $t8,88($sp)
+ 0x8faf0054, // 0x9c: lw $t7,84($sp)
+ 0x8fae0050, // 0xa0: lw $t6,80($sp)
+ 0x8fad004c, // 0xa4: lw $t5,76($sp)
+ 0x8fac0048, // 0xa8: lw $t4,72($sp)
+ 0x8fab0044, // 0xac: lw $t3,68($sp)
+ 0x8faa0040, // 0xb0: lw $t2,64($sp)
+ 0x8fa9003c, // 0xb4: lw $t1,60($sp)
+ 0x8fa80038, // 0xb8: lw $t0,56($sp)
+ 0x8fb70034, // 0xbc: lw $s7,52($sp)
+ 0x8fb60030, // 0xc0: lw $s6,48($sp)
+ 0x8fb5002c, // 0xc4: lw $s5,44($sp)
+ 0x8fb40028, // 0xc8: lw $s4,40($sp)
+ 0x8fb30024, // 0xcc: lw $s3,36($sp)
+ 0x8fb20020, // 0xd0: lw $s2,32($sp)
+ 0x8fb1001c, // 0xd4: lw $s1,28($sp)
+ 0x8fb00018, // 0xd8: lw $s0,24($sp)
+ 0x8fa70014, // 0xdc: lw $a3,20($sp)
+ 0x8fa60010, // 0xe0: lw $a2,16($sp)
+ 0x8fa5000c, // 0xe4: lw $a1,12($sp)
+ 0x8fa40008, // 0xe8: lw $a0,8($sp)
+ 0x27bd0068, // 0xec: addiu $sp,$sp,104
+ 0x0300f825, // 0xf0: move $ra, $t8
+ 0x03200008, // 0xf4: jr $t9
+ 0x00000000, // 0xf8: move $t9, $v0/v1
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x7c; // JIT re-entry fn addr lui
+ const unsigned ReentryCtxAddrOffset = 0x6c; // JIT re-entry context addr lui
+ const unsigned Offsett = 0xf8;
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+
+ // Depending on endian return value will be in v0 or v1.
+ uint32_t MoveVxT9 = isBigEndian ? 0x0060c825 : 0x0040c825;
+ memcpy(ResolverWorkingMem + Offsett, &MoveVxT9, sizeof(MoveVxT9));
+
+ uint32_t ReentryCtxLUi =
+ 0x3c040000 | (((ReentryCtxAddr + 0x8000) >> 16) & 0xFFFF);
+ uint32_t ReentryCtxADDiu = 0x24840000 | ((ReentryCtxAddr)&0xFFFF);
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxLUi,
+ sizeof(ReentryCtxLUi));
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset + 4, &ReentryCtxADDiu,
+ sizeof(ReentryCtxADDiu));
+
+ uint32_t ReentryFnLUi =
+ 0x3c190000 | (((ReentryFnAddr + 0x8000) >> 16) & 0xFFFF);
+ uint32_t ReentryFnADDiu = 0x27390000 | ((ReentryFnAddr)&0xFFFF);
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnLUi,
+ sizeof(ReentryFnLUi));
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset + 4, &ReentryFnADDiu,
+ sizeof(ReentryFnADDiu));
+}
+
+void OrcMips32_Base::writeTrampolines(
+ char *TrampolineBlockWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverAddr, unsigned NumTrampolines) {
+
+ assert((ResolverAddr >> 32) == 0 && "ResolverAddr out of range");
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+ uint32_t RHiAddr = ((ResolverAddr + 0x8000) >> 16);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I) {
+ // move $t8,$ra
+ // lui $t9,ResolverAddr
+ // addiu $t9,$t9,ResolverAddr
+ // jalr $t9
+ // nop
+ Trampolines[5 * I + 0] = 0x03e0c025;
+ Trampolines[5 * I + 1] = 0x3c190000 | (RHiAddr & 0xFFFF);
+ Trampolines[5 * I + 2] = 0x27390000 | (ResolverAddr & 0xFFFF);
+ Trampolines[5 * I + 3] = 0x0320f809;
+ Trampolines[5 * I + 4] = 0x00000000;
+ }
+}
+
+void OrcMips32_Base::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs) {
+ assert((StubsBlockTargetAddress >> 32) == 0 &&
+ "InitialPtrVal is out of range");
+
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // lui $t9, ptr1
+ // lw $t9, %lo(ptr1)($t9)
+ // jr $t9
+ // stub2:
+ // lui $t9, ptr2
+ // lw $t9,%lo(ptr1)($t9)
+ // jr $t9
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .word 0x0
+ // ptr2:
+ // .word 0x0
+ //
+ // i..
+
+ assert(stubAndPointerRangesOk<OrcAArch64>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ // Populate the stubs page stubs and mark it executable.
+ uint32_t *Stub = reinterpret_cast<uint32_t *>(StubsBlockWorkingMem);
+ uint64_t PtrAddr = PointersBlockTargetAddress;
+
+ for (unsigned I = 0; I < NumStubs; ++I) {
+ uint32_t HiAddr = ((PtrAddr + 0x8000) >> 16);
+ Stub[4 * I + 0] = 0x3c190000 | (HiAddr & 0xFFFF); // lui $t9,ptr1
+ Stub[4 * I + 1] = 0x8f390000 | (PtrAddr & 0xFFFF); // lw $t9,%lo(ptr1)($t9)
+ Stub[4 * I + 2] = 0x03200008; // jr $t9
+ Stub[4 * I + 3] = 0x00000000; // nop
+ PtrAddr += 4;
+ }
+}
+
+void OrcMips64::writeResolverCode(char *ResolverWorkingMem,
+ JITTargetAddress ResolverTargetAddress,
+ JITTargetAddress ReentryFnAddr,
+ JITTargetAddress ReentryCtxAddr) {
+
+ const uint32_t ResolverCode[] = {
+ //resolver_entry:
+ 0x67bdff30, // 0x00: daddiu $sp,$sp,-208
+ 0xffa20000, // 0x04: sd v0,0(sp)
+ 0xffa30008, // 0x08: sd v1,8(sp)
+ 0xffa40010, // 0x0c: sd a0,16(sp)
+ 0xffa50018, // 0x10: sd a1,24(sp)
+ 0xffa60020, // 0x14: sd a2,32(sp)
+ 0xffa70028, // 0x18: sd a3,40(sp)
+ 0xffa80030, // 0x1c: sd a4,48(sp)
+ 0xffa90038, // 0x20: sd a5,56(sp)
+ 0xffaa0040, // 0x24: sd a6,64(sp)
+ 0xffab0048, // 0x28: sd a7,72(sp)
+ 0xffac0050, // 0x2c: sd t0,80(sp)
+ 0xffad0058, // 0x30: sd t1,88(sp)
+ 0xffae0060, // 0x34: sd t2,96(sp)
+ 0xffaf0068, // 0x38: sd t3,104(sp)
+ 0xffb00070, // 0x3c: sd s0,112(sp)
+ 0xffb10078, // 0x40: sd s1,120(sp)
+ 0xffb20080, // 0x44: sd s2,128(sp)
+ 0xffb30088, // 0x48: sd s3,136(sp)
+ 0xffb40090, // 0x4c: sd s4,144(sp)
+ 0xffb50098, // 0x50: sd s5,152(sp)
+ 0xffb600a0, // 0x54: sd s6,160(sp)
+ 0xffb700a8, // 0x58: sd s7,168(sp)
+ 0xffb800b0, // 0x5c: sd t8,176(sp)
+ 0xffb900b8, // 0x60: sd t9,184(sp)
+ 0xffbe00c0, // 0x64: sd fp,192(sp)
+ 0xffbf00c8, // 0x68: sd ra,200(sp)
+
+ // JIT re-entry ctx addr.
+ 0x00000000, // 0x6c: lui $a0,heighest(ctx)
+ 0x00000000, // 0x70: daddiu $a0,$a0,heigher(ctx)
+ 0x00000000, // 0x74: dsll $a0,$a0,16
+ 0x00000000, // 0x78: daddiu $a0,$a0,hi(ctx)
+ 0x00000000, // 0x7c: dsll $a0,$a0,16
+ 0x00000000, // 0x80: daddiu $a0,$a0,lo(ctx)
+
+ 0x03e02825, // 0x84: move $a1, $ra
+ 0x64a5ffdc, // 0x88: daddiu $a1,$a1,-36
+
+ // JIT re-entry fn addr:
+ 0x00000000, // 0x8c: lui $t9,reentry
+ 0x00000000, // 0x90: daddiu $t9,$t9,reentry
+ 0x00000000, // 0x94: dsll $t9,$t9,
+ 0x00000000, // 0x98: daddiu $t9,$t9,
+ 0x00000000, // 0x9c: dsll $t9,$t9,
+ 0x00000000, // 0xa0: daddiu $t9,$t9,
+ 0x0320f809, // 0xa4: jalr $t9
+ 0x00000000, // 0xa8: nop
+ 0xdfbf00c8, // 0xac: ld ra, 200(sp)
+ 0xdfbe00c0, // 0xb0: ld fp, 192(sp)
+ 0xdfb900b8, // 0xb4: ld t9, 184(sp)
+ 0xdfb800b0, // 0xb8: ld t8, 176(sp)
+ 0xdfb700a8, // 0xbc: ld s7, 168(sp)
+ 0xdfb600a0, // 0xc0: ld s6, 160(sp)
+ 0xdfb50098, // 0xc4: ld s5, 152(sp)
+ 0xdfb40090, // 0xc8: ld s4, 144(sp)
+ 0xdfb30088, // 0xcc: ld s3, 136(sp)
+ 0xdfb20080, // 0xd0: ld s2, 128(sp)
+ 0xdfb10078, // 0xd4: ld s1, 120(sp)
+ 0xdfb00070, // 0xd8: ld s0, 112(sp)
+ 0xdfaf0068, // 0xdc: ld t3, 104(sp)
+ 0xdfae0060, // 0xe0: ld t2, 96(sp)
+ 0xdfad0058, // 0xe4: ld t1, 88(sp)
+ 0xdfac0050, // 0xe8: ld t0, 80(sp)
+ 0xdfab0048, // 0xec: ld a7, 72(sp)
+ 0xdfaa0040, // 0xf0: ld a6, 64(sp)
+ 0xdfa90038, // 0xf4: ld a5, 56(sp)
+ 0xdfa80030, // 0xf8: ld a4, 48(sp)
+ 0xdfa70028, // 0xfc: ld a3, 40(sp)
+ 0xdfa60020, // 0x100: ld a2, 32(sp)
+ 0xdfa50018, // 0x104: ld a1, 24(sp)
+ 0xdfa40010, // 0x108: ld a0, 16(sp)
+ 0xdfa30008, // 0x10c: ld v1, 8(sp)
+ 0x67bd00d0, // 0x110: daddiu $sp,$sp,208
+ 0x0300f825, // 0x114: move $ra, $t8
+ 0x03200008, // 0x118: jr $t9
+ 0x0040c825, // 0x11c: move $t9, $v0
+ };
+
+ const unsigned ReentryFnAddrOffset = 0x8c; // JIT re-entry fn addr lui
+ const unsigned ReentryCtxAddrOffset = 0x6c; // JIT re-entry ctx addr lui
+
+ memcpy(ResolverWorkingMem, ResolverCode, sizeof(ResolverCode));
+
+ uint32_t ReentryCtxLUi =
+ 0x3c040000 | (((ReentryCtxAddr + 0x800080008000) >> 48) & 0xFFFF);
+ uint32_t ReentryCtxDADDiu =
+ 0x64840000 | (((ReentryCtxAddr + 0x80008000) >> 32) & 0xFFFF);
+ uint32_t ReentryCtxDSLL = 0x00042438;
+ uint32_t ReentryCtxDADDiu2 =
+ 0x64840000 | ((((ReentryCtxAddr + 0x8000) >> 16) & 0xFFFF));
+ uint32_t ReentryCtxDSLL2 = 0x00042438;
+ uint32_t ReentryCtxDADDiu3 = 0x64840000 | ((ReentryCtxAddr)&0xFFFF);
+
+ memcpy(ResolverWorkingMem + ReentryCtxAddrOffset, &ReentryCtxLUi,
+ sizeof(ReentryCtxLUi));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 4), &ReentryCtxDADDiu,
+ sizeof(ReentryCtxDADDiu));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 8), &ReentryCtxDSLL,
+ sizeof(ReentryCtxDSLL));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 12), &ReentryCtxDADDiu2,
+ sizeof(ReentryCtxDADDiu2));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 16), &ReentryCtxDSLL2,
+ sizeof(ReentryCtxDSLL2));
+ memcpy(ResolverWorkingMem + (ReentryCtxAddrOffset + 20), &ReentryCtxDADDiu3,
+ sizeof(ReentryCtxDADDiu3));
+
+ uint32_t ReentryFnLUi =
+ 0x3c190000 | (((ReentryFnAddr + 0x800080008000) >> 48) & 0xFFFF);
+
+ uint32_t ReentryFnDADDiu =
+ 0x67390000 | (((ReentryFnAddr + 0x80008000) >> 32) & 0xFFFF);
+
+ uint32_t ReentryFnDSLL = 0x0019cc38;
+
+ uint32_t ReentryFnDADDiu2 =
+ 0x67390000 | (((ReentryFnAddr + 0x8000) >> 16) & 0xFFFF);
+
+ uint32_t ReentryFnDSLL2 = 0x0019cc38;
+
+ uint32_t ReentryFnDADDiu3 = 0x67390000 | ((ReentryFnAddr)&0xFFFF);
+
+ memcpy(ResolverWorkingMem + ReentryFnAddrOffset, &ReentryFnLUi,
+ sizeof(ReentryFnLUi));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 4), &ReentryFnDADDiu,
+ sizeof(ReentryFnDADDiu));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 8), &ReentryFnDSLL,
+ sizeof(ReentryFnDSLL));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 12), &ReentryFnDADDiu2,
+ sizeof(ReentryFnDADDiu2));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 16), &ReentryFnDSLL2,
+ sizeof(ReentryFnDSLL2));
+ memcpy(ResolverWorkingMem + (ReentryFnAddrOffset + 20), &ReentryFnDADDiu3,
+ sizeof(ReentryFnDADDiu3));
+}
+
+void OrcMips64::writeTrampolines(char *TrampolineBlockWorkingMem,
+ JITTargetAddress TrampolineBlockTargetAddress,
+ JITTargetAddress ResolverAddr,
+ unsigned NumTrampolines) {
+
+ uint32_t *Trampolines =
+ reinterpret_cast<uint32_t *>(TrampolineBlockWorkingMem);
+
+ uint64_t HeighestAddr = ((ResolverAddr + 0x800080008000) >> 48);
+ uint64_t HeigherAddr = ((ResolverAddr + 0x80008000) >> 32);
+ uint64_t HiAddr = ((ResolverAddr + 0x8000) >> 16);
+
+ for (unsigned I = 0; I < NumTrampolines; ++I) {
+ Trampolines[10 * I + 0] = 0x03e0c025; // move $t8,$ra
+ Trampolines[10 * I + 1] = 0x3c190000 | (HeighestAddr & 0xFFFF); // lui $t9,resolveAddr
+ Trampolines[10 * I + 2] = 0x67390000 | (HeigherAddr & 0xFFFF); // daddiu $t9,$t9,%higher(resolveAddr)
+ Trampolines[10 * I + 3] = 0x0019cc38; // dsll $t9,$t9,16
+ Trampolines[10 * I + 4] = 0x67390000 | (HiAddr & 0xFFFF); // daddiu $t9,$t9,%hi(ptr)
+ Trampolines[10 * I + 5] = 0x0019cc38; // dsll $t9,$t9,16
+ Trampolines[10 * I + 6] =
+ 0x67390000 | (ResolverAddr & 0xFFFF); // daddiu $t9,$t9,%lo(ptr)
+ Trampolines[10 * I + 7] = 0x0320f809; // jalr $t9
+ Trampolines[10 * I + 8] = 0x00000000; // nop
+ Trampolines[10 * I + 9] = 0x00000000; // nop
+ }
+}
+
+void OrcMips64::writeIndirectStubsBlock(
+ char *StubsBlockWorkingMem, JITTargetAddress StubsBlockTargetAddress,
+ JITTargetAddress PointersBlockTargetAddress, unsigned NumStubs) {
+ // Stub format is:
+ //
+ // .section __orc_stubs
+ // stub1:
+ // lui $t9,ptr1
+ // dsll $t9,$t9,16
+ // daddiu $t9,$t9,%hi(ptr)
+ // dsll $t9,$t9,16
+ // ld $t9,%lo(ptr)
+ // jr $t9
+ // stub2:
+ // lui $t9,ptr1
+ // dsll $t9,$t9,16
+ // daddiu $t9,$t9,%hi(ptr)
+ // dsll $t9,$t9,16
+ // ld $t9,%lo(ptr)
+ // jr $t9
+ //
+ // ...
+ //
+ // .section __orc_ptrs
+ // ptr1:
+ // .dword 0x0
+ // ptr2:
+ // .dword 0x0
+ //
+ // ...
+
+ assert(stubAndPointerRangesOk<OrcAArch64>(
+ StubsBlockTargetAddress, PointersBlockTargetAddress, NumStubs) &&
+ "PointersBlock is out of range");
+
+ // Populate the stubs page stubs and mark it executable.
+ uint32_t *Stub = reinterpret_cast<uint32_t *>(StubsBlockWorkingMem);
+ uint64_t PtrAddr = PointersBlockTargetAddress;
+
+ for (unsigned I = 0; I < NumStubs; ++I, PtrAddr += 8) {
+ uint64_t HeighestAddr = ((PtrAddr + 0x800080008000) >> 48);
+ uint64_t HeigherAddr = ((PtrAddr + 0x80008000) >> 32);
+ uint64_t HiAddr = ((PtrAddr + 0x8000) >> 16);
+ Stub[8 * I + 0] = 0x3c190000 | (HeighestAddr & 0xFFFF); // lui $t9,ptr1
+ Stub[8 * I + 1] = 0x67390000 | (HeigherAddr & 0xFFFF); // daddiu $t9,$t9,%higher(ptr)
+ Stub[8 * I + 2] = 0x0019cc38; // dsll $t9,$t9,16
+ Stub[8 * I + 3] = 0x67390000 | (HiAddr & 0xFFFF); // daddiu $t9,$t9,%hi(ptr)
+ Stub[8 * I + 4] = 0x0019cc38; // dsll $t9,$t9,16
+ Stub[8 * I + 5] = 0xdf390000 | (PtrAddr & 0xFFFF); // ld $t9,%lo(ptr)
+ Stub[8 * I + 6] = 0x03200008; // jr $t9
+ Stub[8 * I + 7] = 0x00000000; // nop
+ }
+}
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
new file mode 100644
index 0000000000..71be8dfdc0
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/OrcV2CBindings.cpp
@@ -0,0 +1,1018 @@
+//===--------------- OrcV2CBindings.cpp - C bindings OrcV2 APIs -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/LLJIT.h"
+#include "llvm-c/Orc.h"
+#include "llvm-c/OrcEE.h"
+#include "llvm-c/TargetMachine.h"
+
+#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
+#include "llvm/ExecutionEngine/Orc/LLJIT.h"
+#include "llvm/ExecutionEngine/Orc/ObjectTransformLayer.h"
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace llvm {
+namespace orc {
+
+class InProgressLookupState;
+
+class OrcV2CAPIHelper {
+public:
+ using PoolEntry = SymbolStringPtr::PoolEntry;
+ using PoolEntryPtr = SymbolStringPtr::PoolEntryPtr;
+
+ // Move from SymbolStringPtr to PoolEntryPtr (no change in ref count).
+ static PoolEntryPtr moveFromSymbolStringPtr(SymbolStringPtr S) {
+ PoolEntryPtr Result = nullptr;
+ std::swap(Result, S.S);
+ return Result;
+ }
+
+ // Move from a PoolEntryPtr to a SymbolStringPtr (no change in ref count).
+ static SymbolStringPtr moveToSymbolStringPtr(PoolEntryPtr P) {
+ SymbolStringPtr S;
+ S.S = P;
+ return S;
+ }
+
+ // Copy a pool entry to a SymbolStringPtr (increments ref count).
+ static SymbolStringPtr copyToSymbolStringPtr(PoolEntryPtr P) {
+ return SymbolStringPtr(P);
+ }
+
+ static PoolEntryPtr getRawPoolEntryPtr(const SymbolStringPtr &S) {
+ return S.S;
+ }
+
+ static void retainPoolEntry(PoolEntryPtr P) {
+ SymbolStringPtr S(P);
+ S.S = nullptr;
+ }
+
+ static void releasePoolEntry(PoolEntryPtr P) {
+ SymbolStringPtr S;
+ S.S = P;
+ }
+
+ static InProgressLookupState *extractLookupState(LookupState &LS) {
+ return LS.IPLS.release();
+ }
+
+ static void resetLookupState(LookupState &LS, InProgressLookupState *IPLS) {
+ return LS.reset(IPLS);
+ }
+};
+
+} // namespace orc
+} // namespace llvm
+
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ExecutionSession, LLVMOrcExecutionSessionRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(SymbolStringPool, LLVMOrcSymbolStringPoolRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(OrcV2CAPIHelper::PoolEntry,
+ LLVMOrcSymbolStringPoolEntryRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(MaterializationUnit,
+ LLVMOrcMaterializationUnitRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(MaterializationResponsibility,
+ LLVMOrcMaterializationResponsibilityRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(JITDylib, LLVMOrcJITDylibRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ResourceTracker, LLVMOrcResourceTrackerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DefinitionGenerator,
+ LLVMOrcDefinitionGeneratorRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(InProgressLookupState, LLVMOrcLookupStateRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ThreadSafeContext,
+ LLVMOrcThreadSafeContextRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ThreadSafeModule, LLVMOrcThreadSafeModuleRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(JITTargetMachineBuilder,
+ LLVMOrcJITTargetMachineBuilderRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ObjectLayer, LLVMOrcObjectLayerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRTransformLayer, LLVMOrcIRTransformLayerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(ObjectTransformLayer,
+ LLVMOrcObjectTransformLayerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(DumpObjects, LLVMOrcDumpObjectsRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IndirectStubsManager,
+ LLVMOrcIndirectStubsManagerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LazyCallThroughManager,
+ LLVMOrcLazyCallThroughManagerRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLJITBuilder, LLVMOrcLLJITBuilderRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLJIT, LLVMOrcLLJITRef)
+DEFINE_SIMPLE_CONVERSION_FUNCTIONS(TargetMachine, LLVMTargetMachineRef)
+
+namespace llvm {
+namespace orc {
+
+class CAPIDefinitionGenerator final : public DefinitionGenerator {
+public:
+ CAPIDefinitionGenerator(
+ void *Ctx,
+ LLVMOrcCAPIDefinitionGeneratorTryToGenerateFunction TryToGenerate)
+ : Ctx(Ctx), TryToGenerate(TryToGenerate) {}
+
+ Error tryToGenerate(LookupState &LS, LookupKind K, JITDylib &JD,
+ JITDylibLookupFlags JDLookupFlags,
+ const SymbolLookupSet &LookupSet) override {
+
+ // Take the lookup state.
+ LLVMOrcLookupStateRef LSR = ::wrap(OrcV2CAPIHelper::extractLookupState(LS));
+
+ // Translate the lookup kind.
+ LLVMOrcLookupKind CLookupKind;
+ switch (K) {
+ case LookupKind::Static:
+ CLookupKind = LLVMOrcLookupKindStatic;
+ break;
+ case LookupKind::DLSym:
+ CLookupKind = LLVMOrcLookupKindDLSym;
+ break;
+ }
+
+ // Translate the JITDylibSearchFlags.
+ LLVMOrcJITDylibLookupFlags CJDLookupFlags;
+ switch (JDLookupFlags) {
+ case JITDylibLookupFlags::MatchExportedSymbolsOnly:
+ CJDLookupFlags = LLVMOrcJITDylibLookupFlagsMatchExportedSymbolsOnly;
+ break;
+ case JITDylibLookupFlags::MatchAllSymbols:
+ CJDLookupFlags = LLVMOrcJITDylibLookupFlagsMatchAllSymbols;
+ break;
+ }
+
+ // Translate the lookup set.
+ std::vector<LLVMOrcCLookupSetElement> CLookupSet;
+ CLookupSet.reserve(LookupSet.size());
+ for (auto &KV : LookupSet) {
+ LLVMOrcSymbolLookupFlags SLF;
+ LLVMOrcSymbolStringPoolEntryRef Name =
+ ::wrap(OrcV2CAPIHelper::getRawPoolEntryPtr(KV.first));
+ switch (KV.second) {
+ case SymbolLookupFlags::RequiredSymbol:
+ SLF = LLVMOrcSymbolLookupFlagsRequiredSymbol;
+ break;
+ case SymbolLookupFlags::WeaklyReferencedSymbol:
+ SLF = LLVMOrcSymbolLookupFlagsWeaklyReferencedSymbol;
+ break;
+ }
+ CLookupSet.push_back({Name, SLF});
+ }
+
+ // Run the C TryToGenerate function.
+ auto Err = unwrap(TryToGenerate(::wrap(this), Ctx, &LSR, CLookupKind,
+ ::wrap(&JD), CJDLookupFlags,
+ CLookupSet.data(), CLookupSet.size()));
+
+ // Restore the lookup state.
+ OrcV2CAPIHelper::resetLookupState(LS, ::unwrap(LSR));
+
+ return Err;
+ }
+
+private:
+ void *Ctx;
+ LLVMOrcCAPIDefinitionGeneratorTryToGenerateFunction TryToGenerate;
+};
+
+} // end namespace orc
+} // end namespace llvm
+
+namespace {
+
+class OrcCAPIMaterializationUnit : public llvm::orc::MaterializationUnit {
+public:
+ OrcCAPIMaterializationUnit(
+ std::string Name, SymbolFlagsMap InitialSymbolFlags,
+ SymbolStringPtr InitSymbol, void *Ctx,
+ LLVMOrcMaterializationUnitMaterializeFunction Materialize,
+ LLVMOrcMaterializationUnitDiscardFunction Discard,
+ LLVMOrcMaterializationUnitDestroyFunction Destroy)
+ : llvm::orc::MaterializationUnit(
+ Interface(std::move(InitialSymbolFlags), std::move(InitSymbol))),
+ Name(std::move(Name)), Ctx(Ctx), Materialize(Materialize),
+ Discard(Discard), Destroy(Destroy) {}
+
+ ~OrcCAPIMaterializationUnit() {
+ if (Ctx)
+ Destroy(Ctx);
+ }
+
+ StringRef getName() const override { return Name; }
+
+ void materialize(std::unique_ptr<MaterializationResponsibility> R) override {
+ void *Tmp = Ctx;
+ Ctx = nullptr;
+ Materialize(Tmp, wrap(R.release()));
+ }
+
+private:
+ void discard(const JITDylib &JD, const SymbolStringPtr &Name) override {
+ Discard(Ctx, wrap(&JD), wrap(OrcV2CAPIHelper::getRawPoolEntryPtr(Name)));
+ }
+
+ std::string Name;
+ void *Ctx = nullptr;
+ LLVMOrcMaterializationUnitMaterializeFunction Materialize = nullptr;
+ LLVMOrcMaterializationUnitDiscardFunction Discard = nullptr;
+ LLVMOrcMaterializationUnitDestroyFunction Destroy = nullptr;
+};
+
+static JITSymbolFlags toJITSymbolFlags(LLVMJITSymbolFlags F) {
+
+ JITSymbolFlags JSF;
+
+ if (F.GenericFlags & LLVMJITSymbolGenericFlagsExported)
+ JSF |= JITSymbolFlags::Exported;
+ if (F.GenericFlags & LLVMJITSymbolGenericFlagsWeak)
+ JSF |= JITSymbolFlags::Weak;
+ if (F.GenericFlags & LLVMJITSymbolGenericFlagsCallable)
+ JSF |= JITSymbolFlags::Callable;
+ if (F.GenericFlags & LLVMJITSymbolGenericFlagsMaterializationSideEffectsOnly)
+ JSF |= JITSymbolFlags::MaterializationSideEffectsOnly;
+
+ JSF.getTargetFlags() = F.TargetFlags;
+
+ return JSF;
+}
+
+static LLVMJITSymbolFlags fromJITSymbolFlags(JITSymbolFlags JSF) {
+ LLVMJITSymbolFlags F = {0, 0};
+ if (JSF & JITSymbolFlags::Exported)
+ F.GenericFlags |= LLVMJITSymbolGenericFlagsExported;
+ if (JSF & JITSymbolFlags::Weak)
+ F.GenericFlags |= LLVMJITSymbolGenericFlagsWeak;
+ if (JSF & JITSymbolFlags::Callable)
+ F.GenericFlags |= LLVMJITSymbolGenericFlagsCallable;
+ if (JSF & JITSymbolFlags::MaterializationSideEffectsOnly)
+ F.GenericFlags |= LLVMJITSymbolGenericFlagsMaterializationSideEffectsOnly;
+
+ F.TargetFlags = JSF.getTargetFlags();
+
+ return F;
+}
+
+static SymbolMap toSymbolMap(LLVMOrcCSymbolMapPairs Syms, size_t NumPairs) {
+ SymbolMap SM;
+ for (size_t I = 0; I != NumPairs; ++I) {
+ JITSymbolFlags Flags = toJITSymbolFlags(Syms[I].Sym.Flags);
+ SM[OrcV2CAPIHelper::moveToSymbolStringPtr(unwrap(Syms[I].Name))] =
+ JITEvaluatedSymbol(Syms[I].Sym.Address, Flags);
+ }
+ return SM;
+}
+
+static SymbolDependenceMap
+toSymbolDependenceMap(LLVMOrcCDependenceMapPairs Pairs, size_t NumPairs) {
+ SymbolDependenceMap SDM;
+ for (size_t I = 0; I != NumPairs; ++I) {
+ JITDylib *JD = unwrap(Pairs[I].JD);
+ SymbolNameSet Names;
+
+ for (size_t J = 0; J != Pairs[I].Names.Length; ++J) {
+ auto Sym = Pairs[I].Names.Symbols[J];
+ Names.insert(OrcV2CAPIHelper::moveToSymbolStringPtr(unwrap(Sym)));
+ }
+ SDM[JD] = Names;
+ }
+ return SDM;
+}
+
+} // end anonymous namespace
+
+void LLVMOrcExecutionSessionSetErrorReporter(
+ LLVMOrcExecutionSessionRef ES, LLVMOrcErrorReporterFunction ReportError,
+ void *Ctx) {
+ unwrap(ES)->setErrorReporter(
+ [=](Error Err) { ReportError(Ctx, wrap(std::move(Err))); });
+}
+
+LLVMOrcSymbolStringPoolRef
+LLVMOrcExecutionSessionGetSymbolStringPool(LLVMOrcExecutionSessionRef ES) {
+ return wrap(
+ unwrap(ES)->getExecutorProcessControl().getSymbolStringPool().get());
+}
+
+void LLVMOrcSymbolStringPoolClearDeadEntries(LLVMOrcSymbolStringPoolRef SSP) {
+ unwrap(SSP)->clearDeadEntries();
+}
+
+LLVMOrcSymbolStringPoolEntryRef
+LLVMOrcExecutionSessionIntern(LLVMOrcExecutionSessionRef ES, const char *Name) {
+ return wrap(
+ OrcV2CAPIHelper::moveFromSymbolStringPtr(unwrap(ES)->intern(Name)));
+}
+
+void LLVMOrcRetainSymbolStringPoolEntry(LLVMOrcSymbolStringPoolEntryRef S) {
+ OrcV2CAPIHelper::retainPoolEntry(unwrap(S));
+}
+
+void LLVMOrcReleaseSymbolStringPoolEntry(LLVMOrcSymbolStringPoolEntryRef S) {
+ OrcV2CAPIHelper::releasePoolEntry(unwrap(S));
+}
+
+const char *LLVMOrcSymbolStringPoolEntryStr(LLVMOrcSymbolStringPoolEntryRef S) {
+ return unwrap(S)->getKey().data();
+}
+
+LLVMOrcResourceTrackerRef
+LLVMOrcJITDylibCreateResourceTracker(LLVMOrcJITDylibRef JD) {
+ auto RT = unwrap(JD)->createResourceTracker();
+ // Retain the pointer for the C API client.
+ RT->Retain();
+ return wrap(RT.get());
+}
+
+LLVMOrcResourceTrackerRef
+LLVMOrcJITDylibGetDefaultResourceTracker(LLVMOrcJITDylibRef JD) {
+ auto RT = unwrap(JD)->getDefaultResourceTracker();
+ // Retain the pointer for the C API client.
+ return wrap(RT.get());
+}
+
+void LLVMOrcReleaseResourceTracker(LLVMOrcResourceTrackerRef RT) {
+ ResourceTrackerSP TmpRT(unwrap(RT));
+ TmpRT->Release();
+}
+
+void LLVMOrcResourceTrackerTransferTo(LLVMOrcResourceTrackerRef SrcRT,
+ LLVMOrcResourceTrackerRef DstRT) {
+ ResourceTrackerSP TmpRT(unwrap(SrcRT));
+ TmpRT->transferTo(*unwrap(DstRT));
+}
+
+LLVMErrorRef LLVMOrcResourceTrackerRemove(LLVMOrcResourceTrackerRef RT) {
+ ResourceTrackerSP TmpRT(unwrap(RT));
+ return wrap(TmpRT->remove());
+}
+
+void LLVMOrcDisposeDefinitionGenerator(LLVMOrcDefinitionGeneratorRef DG) {
+ std::unique_ptr<DefinitionGenerator> TmpDG(unwrap(DG));
+}
+
+void LLVMOrcDisposeMaterializationUnit(LLVMOrcMaterializationUnitRef MU) {
+ std::unique_ptr<MaterializationUnit> TmpMU(unwrap(MU));
+}
+
+LLVMOrcMaterializationUnitRef LLVMOrcCreateCustomMaterializationUnit(
+ const char *Name, void *Ctx, LLVMOrcCSymbolFlagsMapPairs Syms,
+ size_t NumSyms, LLVMOrcSymbolStringPoolEntryRef InitSym,
+ LLVMOrcMaterializationUnitMaterializeFunction Materialize,
+ LLVMOrcMaterializationUnitDiscardFunction Discard,
+ LLVMOrcMaterializationUnitDestroyFunction Destroy) {
+ SymbolFlagsMap SFM;
+ for (size_t I = 0; I != NumSyms; ++I)
+ SFM[OrcV2CAPIHelper::moveToSymbolStringPtr(unwrap(Syms[I].Name))] =
+ toJITSymbolFlags(Syms[I].Flags);
+
+ auto IS = OrcV2CAPIHelper::moveToSymbolStringPtr(unwrap(InitSym));
+
+ return wrap(new OrcCAPIMaterializationUnit(
+ Name, std::move(SFM), std::move(IS), Ctx, Materialize, Discard, Destroy));
+}
+
+LLVMOrcMaterializationUnitRef
+LLVMOrcAbsoluteSymbols(LLVMOrcCSymbolMapPairs Syms, size_t NumPairs) {
+ SymbolMap SM = toSymbolMap(Syms, NumPairs);
+ return wrap(absoluteSymbols(std::move(SM)).release());
+}
+
+LLVMOrcMaterializationUnitRef LLVMOrcLazyReexports(
+ LLVMOrcLazyCallThroughManagerRef LCTM, LLVMOrcIndirectStubsManagerRef ISM,
+ LLVMOrcJITDylibRef SourceJD, LLVMOrcCSymbolAliasMapPairs CallableAliases,
+ size_t NumPairs) {
+
+ SymbolAliasMap SAM;
+ for (size_t I = 0; I != NumPairs; ++I) {
+ auto pair = CallableAliases[I];
+ JITSymbolFlags Flags = toJITSymbolFlags(pair.Entry.Flags);
+ SymbolStringPtr Name =
+ OrcV2CAPIHelper::moveToSymbolStringPtr(unwrap(pair.Entry.Name));
+ SAM[OrcV2CAPIHelper::moveToSymbolStringPtr(unwrap(pair.Name))] =
+ SymbolAliasMapEntry(Name, Flags);
+ }
+
+ return wrap(lazyReexports(*unwrap(LCTM), *unwrap(ISM), *unwrap(SourceJD),
+ std::move(SAM))
+ .release());
+}
+
+void LLVMOrcDisposeMaterializationResponsibility(
+ LLVMOrcMaterializationResponsibilityRef MR) {
+ std::unique_ptr<MaterializationResponsibility> TmpMR(unwrap(MR));
+}
+
+LLVMOrcJITDylibRef LLVMOrcMaterializationResponsibilityGetTargetDylib(
+ LLVMOrcMaterializationResponsibilityRef MR) {
+ return wrap(&unwrap(MR)->getTargetJITDylib());
+}
+
+LLVMOrcExecutionSessionRef
+LLVMOrcMaterializationResponsibilityGetExecutionSession(
+ LLVMOrcMaterializationResponsibilityRef MR) {
+ return wrap(&unwrap(MR)->getExecutionSession());
+}
+
+LLVMOrcCSymbolFlagsMapPairs LLVMOrcMaterializationResponsibilityGetSymbols(
+ LLVMOrcMaterializationResponsibilityRef MR, size_t *NumPairs) {
+
+ auto Symbols = unwrap(MR)->getSymbols();
+ LLVMOrcCSymbolFlagsMapPairs Result = static_cast<LLVMOrcCSymbolFlagsMapPairs>(
+ safe_malloc(Symbols.size() * sizeof(LLVMOrcCSymbolFlagsMapPair)));
+ size_t I = 0;
+ for (auto const &pair : Symbols) {
+ auto Name = wrap(OrcV2CAPIHelper::getRawPoolEntryPtr(pair.first));
+ auto Flags = pair.second;
+ Result[I] = {Name, fromJITSymbolFlags(Flags)};
+ I++;
+ }
+ *NumPairs = Symbols.size();
+ return Result;
+}
+
+void LLVMOrcDisposeCSymbolFlagsMap(LLVMOrcCSymbolFlagsMapPairs Pairs) {
+ free(Pairs);
+}
+
+LLVMOrcSymbolStringPoolEntryRef
+LLVMOrcMaterializationResponsibilityGetInitializerSymbol(
+ LLVMOrcMaterializationResponsibilityRef MR) {
+ auto Sym = unwrap(MR)->getInitializerSymbol();
+ return wrap(OrcV2CAPIHelper::getRawPoolEntryPtr(Sym));
+}
+
+LLVMOrcSymbolStringPoolEntryRef *
+LLVMOrcMaterializationResponsibilityGetRequestedSymbols(
+ LLVMOrcMaterializationResponsibilityRef MR, size_t *NumSymbols) {
+
+ auto Symbols = unwrap(MR)->getRequestedSymbols();
+ LLVMOrcSymbolStringPoolEntryRef *Result =
+ static_cast<LLVMOrcSymbolStringPoolEntryRef *>(safe_malloc(
+ Symbols.size() * sizeof(LLVMOrcSymbolStringPoolEntryRef)));
+ size_t I = 0;
+ for (auto &Name : Symbols) {
+ Result[I] = wrap(OrcV2CAPIHelper::getRawPoolEntryPtr(Name));
+ I++;
+ }
+ *NumSymbols = Symbols.size();
+ return Result;
+}
+
+void LLVMOrcDisposeSymbols(LLVMOrcSymbolStringPoolEntryRef *Symbols) {
+ free(Symbols);
+}
+
+LLVMErrorRef LLVMOrcMaterializationResponsibilityNotifyResolved(
+ LLVMOrcMaterializationResponsibilityRef MR, LLVMOrcCSymbolMapPairs Symbols,
+ size_t NumPairs) {
+ SymbolMap SM = toSymbolMap(Symbols, NumPairs);
+ return wrap(unwrap(MR)->notifyResolved(std::move(SM)));
+}
+
+LLVMErrorRef LLVMOrcMaterializationResponsibilityNotifyEmitted(
+ LLVMOrcMaterializationResponsibilityRef MR) {
+ return wrap(unwrap(MR)->notifyEmitted());
+}
+
+LLVMErrorRef LLVMOrcMaterializationResponsibilityDefineMaterializing(
+ LLVMOrcMaterializationResponsibilityRef MR,
+ LLVMOrcCSymbolFlagsMapPairs Syms, size_t NumSyms) {
+ SymbolFlagsMap SFM;
+ for (size_t I = 0; I != NumSyms; ++I)
+ SFM[OrcV2CAPIHelper::moveToSymbolStringPtr(unwrap(Syms[I].Name))] =
+ toJITSymbolFlags(Syms[I].Flags);
+
+ return wrap(unwrap(MR)->defineMaterializing(std::move(SFM)));
+}
+
+LLVMErrorRef LLVMOrcMaterializationResponsibilityReplace(
+ LLVMOrcMaterializationResponsibilityRef MR,
+ LLVMOrcMaterializationUnitRef MU) {
+ std::unique_ptr<MaterializationUnit> TmpMU(unwrap(MU));
+ return wrap(unwrap(MR)->replace(std::move(TmpMU)));
+}
+
+LLVMErrorRef LLVMOrcMaterializationResponsibilityDelegate(
+ LLVMOrcMaterializationResponsibilityRef MR,
+ LLVMOrcSymbolStringPoolEntryRef *Symbols, size_t NumSymbols,
+ LLVMOrcMaterializationResponsibilityRef *Result) {
+ SymbolNameSet Syms;
+ for (size_t I = 0; I != NumSymbols; I++) {
+ Syms.insert(OrcV2CAPIHelper::moveToSymbolStringPtr(unwrap(Symbols[I])));
+ }
+ auto OtherMR = unwrap(MR)->delegate(Syms);
+
+ if (!OtherMR) {
+ return wrap(OtherMR.takeError());
+ }
+ *Result = wrap(OtherMR->release());
+ return LLVMErrorSuccess;
+}
+
+void LLVMOrcMaterializationResponsibilityAddDependencies(
+ LLVMOrcMaterializationResponsibilityRef MR,
+ LLVMOrcSymbolStringPoolEntryRef Name,
+ LLVMOrcCDependenceMapPairs Dependencies, size_t NumPairs) {
+
+ SymbolDependenceMap SDM = toSymbolDependenceMap(Dependencies, NumPairs);
+ auto Sym = OrcV2CAPIHelper::moveToSymbolStringPtr(unwrap(Name));
+ unwrap(MR)->addDependencies(Sym, SDM);
+}
+
+void LLVMOrcMaterializationResponsibilityAddDependenciesForAll(
+ LLVMOrcMaterializationResponsibilityRef MR,
+ LLVMOrcCDependenceMapPairs Dependencies, size_t NumPairs) {
+
+ SymbolDependenceMap SDM = toSymbolDependenceMap(Dependencies, NumPairs);
+ unwrap(MR)->addDependenciesForAll(SDM);
+}
+
+void LLVMOrcMaterializationResponsibilityFailMaterialization(
+ LLVMOrcMaterializationResponsibilityRef MR) {
+ unwrap(MR)->failMaterialization();
+}
+
+void LLVMOrcIRTransformLayerEmit(LLVMOrcIRTransformLayerRef IRLayer,
+ LLVMOrcMaterializationResponsibilityRef MR,
+ LLVMOrcThreadSafeModuleRef TSM) {
+ std::unique_ptr<ThreadSafeModule> TmpTSM(unwrap(TSM));
+ unwrap(IRLayer)->emit(
+ std::unique_ptr<MaterializationResponsibility>(unwrap(MR)),
+ std::move(*TmpTSM));
+}
+
+LLVMOrcJITDylibRef
+LLVMOrcExecutionSessionCreateBareJITDylib(LLVMOrcExecutionSessionRef ES,
+ const char *Name) {
+ return wrap(&unwrap(ES)->createBareJITDylib(Name));
+}
+
+LLVMErrorRef
+LLVMOrcExecutionSessionCreateJITDylib(LLVMOrcExecutionSessionRef ES,
+ LLVMOrcJITDylibRef *Result,
+ const char *Name) {
+ auto JD = unwrap(ES)->createJITDylib(Name);
+ if (!JD)
+ return wrap(JD.takeError());
+ *Result = wrap(&*JD);
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcJITDylibRef
+LLVMOrcExecutionSessionGetJITDylibByName(LLVMOrcExecutionSessionRef ES,
+ const char *Name) {
+ return wrap(unwrap(ES)->getJITDylibByName(Name));
+}
+
+LLVMErrorRef LLVMOrcJITDylibDefine(LLVMOrcJITDylibRef JD,
+ LLVMOrcMaterializationUnitRef MU) {
+ std::unique_ptr<MaterializationUnit> TmpMU(unwrap(MU));
+
+ if (auto Err = unwrap(JD)->define(TmpMU)) {
+ TmpMU.release();
+ return wrap(std::move(Err));
+ }
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef LLVMOrcJITDylibClear(LLVMOrcJITDylibRef JD) {
+ return wrap(unwrap(JD)->clear());
+}
+
+void LLVMOrcJITDylibAddGenerator(LLVMOrcJITDylibRef JD,
+ LLVMOrcDefinitionGeneratorRef DG) {
+ unwrap(JD)->addGenerator(std::unique_ptr<DefinitionGenerator>(unwrap(DG)));
+}
+
+LLVMOrcDefinitionGeneratorRef LLVMOrcCreateCustomCAPIDefinitionGenerator(
+ LLVMOrcCAPIDefinitionGeneratorTryToGenerateFunction F, void *Ctx) {
+ auto DG = std::make_unique<CAPIDefinitionGenerator>(Ctx, F);
+ return wrap(DG.release());
+}
+
+LLVMErrorRef LLVMOrcCreateDynamicLibrarySearchGeneratorForProcess(
+ LLVMOrcDefinitionGeneratorRef *Result, char GlobalPrefix,
+ LLVMOrcSymbolPredicate Filter, void *FilterCtx) {
+ assert(Result && "Result can not be null");
+ assert((Filter || !FilterCtx) &&
+ "if Filter is null then FilterCtx must also be null");
+
+ DynamicLibrarySearchGenerator::SymbolPredicate Pred;
+ if (Filter)
+ Pred = [=](const SymbolStringPtr &Name) -> bool {
+ return Filter(FilterCtx, wrap(OrcV2CAPIHelper::getRawPoolEntryPtr(Name)));
+ };
+
+ auto ProcessSymsGenerator =
+ DynamicLibrarySearchGenerator::GetForCurrentProcess(GlobalPrefix, Pred);
+
+ if (!ProcessSymsGenerator) {
+ *Result = nullptr;
+ return wrap(ProcessSymsGenerator.takeError());
+ }
+
+ *Result = wrap(ProcessSymsGenerator->release());
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef LLVMOrcCreateDynamicLibrarySearchGeneratorForPath(
+ LLVMOrcDefinitionGeneratorRef *Result, const char *FileName,
+ char GlobalPrefix, LLVMOrcSymbolPredicate Filter, void *FilterCtx) {
+ assert(Result && "Result can not be null");
+ assert(FileName && "FileName can not be null");
+ assert((Filter || !FilterCtx) &&
+ "if Filter is null then FilterCtx must also be null");
+
+ DynamicLibrarySearchGenerator::SymbolPredicate Pred;
+ if (Filter)
+ Pred = [=](const SymbolStringPtr &Name) -> bool {
+ return Filter(FilterCtx, wrap(OrcV2CAPIHelper::getRawPoolEntryPtr(Name)));
+ };
+
+ auto LibrarySymsGenerator =
+ DynamicLibrarySearchGenerator::Load(FileName, GlobalPrefix, Pred);
+
+ if (!LibrarySymsGenerator) {
+ *Result = nullptr;
+ return wrap(LibrarySymsGenerator.takeError());
+ }
+
+ *Result = wrap(LibrarySymsGenerator->release());
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef LLVMOrcCreateStaticLibrarySearchGeneratorForPath(
+ LLVMOrcDefinitionGeneratorRef *Result, LLVMOrcObjectLayerRef ObjLayer,
+ const char *FileName, const char *TargetTriple) {
+ assert(Result && "Result can not be null");
+ assert(FileName && "Filename can not be null");
+ assert(ObjLayer && "ObjectLayer can not be null");
+
+ if (TargetTriple) {
+ auto TT = Triple(TargetTriple);
+ auto LibrarySymsGenerator =
+ StaticLibraryDefinitionGenerator::Load(*unwrap(ObjLayer), FileName, TT);
+ if (!LibrarySymsGenerator) {
+ *Result = nullptr;
+ return wrap(LibrarySymsGenerator.takeError());
+ }
+ *Result = wrap(LibrarySymsGenerator->release());
+ return LLVMErrorSuccess;
+ } else {
+ auto LibrarySymsGenerator =
+ StaticLibraryDefinitionGenerator::Load(*unwrap(ObjLayer), FileName);
+ if (!LibrarySymsGenerator) {
+ *Result = nullptr;
+ return wrap(LibrarySymsGenerator.takeError());
+ }
+ *Result = wrap(LibrarySymsGenerator->release());
+ return LLVMErrorSuccess;
+ }
+}
+
+LLVMOrcThreadSafeContextRef LLVMOrcCreateNewThreadSafeContext(void) {
+ return wrap(new ThreadSafeContext(std::make_unique<LLVMContext>()));
+}
+
+LLVMContextRef
+LLVMOrcThreadSafeContextGetContext(LLVMOrcThreadSafeContextRef TSCtx) {
+ return wrap(unwrap(TSCtx)->getContext());
+}
+
+void LLVMOrcDisposeThreadSafeContext(LLVMOrcThreadSafeContextRef TSCtx) {
+ delete unwrap(TSCtx);
+}
+
+LLVMErrorRef
+LLVMOrcThreadSafeModuleWithModuleDo(LLVMOrcThreadSafeModuleRef TSM,
+ LLVMOrcGenericIRModuleOperationFunction F,
+ void *Ctx) {
+ return wrap(unwrap(TSM)->withModuleDo(
+ [&](Module &M) { return unwrap(F(Ctx, wrap(&M))); }));
+}
+
+LLVMOrcThreadSafeModuleRef
+LLVMOrcCreateNewThreadSafeModule(LLVMModuleRef M,
+ LLVMOrcThreadSafeContextRef TSCtx) {
+ return wrap(
+ new ThreadSafeModule(std::unique_ptr<Module>(unwrap(M)), *unwrap(TSCtx)));
+}
+
+void LLVMOrcDisposeThreadSafeModule(LLVMOrcThreadSafeModuleRef TSM) {
+ delete unwrap(TSM);
+}
+
+LLVMErrorRef LLVMOrcJITTargetMachineBuilderDetectHost(
+ LLVMOrcJITTargetMachineBuilderRef *Result) {
+ assert(Result && "Result can not be null");
+
+ auto JTMB = JITTargetMachineBuilder::detectHost();
+ if (!JTMB) {
+ Result = nullptr;
+ return wrap(JTMB.takeError());
+ }
+
+ *Result = wrap(new JITTargetMachineBuilder(std::move(*JTMB)));
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcJITTargetMachineBuilderRef
+LLVMOrcJITTargetMachineBuilderCreateFromTargetMachine(LLVMTargetMachineRef TM) {
+ auto *TemplateTM = unwrap(TM);
+
+ auto JTMB =
+ std::make_unique<JITTargetMachineBuilder>(TemplateTM->getTargetTriple());
+
+ (*JTMB)
+ .setCPU(TemplateTM->getTargetCPU().str())
+ .setRelocationModel(TemplateTM->getRelocationModel())
+ .setCodeModel(TemplateTM->getCodeModel())
+ .setCodeGenOptLevel(TemplateTM->getOptLevel())
+ .setFeatures(TemplateTM->getTargetFeatureString())
+ .setOptions(TemplateTM->Options);
+
+ LLVMDisposeTargetMachine(TM);
+
+ return wrap(JTMB.release());
+}
+
+void LLVMOrcDisposeJITTargetMachineBuilder(
+ LLVMOrcJITTargetMachineBuilderRef JTMB) {
+ delete unwrap(JTMB);
+}
+
+char *LLVMOrcJITTargetMachineBuilderGetTargetTriple(
+ LLVMOrcJITTargetMachineBuilderRef JTMB) {
+ auto Tmp = unwrap(JTMB)->getTargetTriple().str();
+ char *TargetTriple = (char *)malloc(Tmp.size() + 1);
+ strcpy(TargetTriple, Tmp.c_str());
+ return TargetTriple;
+}
+
+void LLVMOrcJITTargetMachineBuilderSetTargetTriple(
+ LLVMOrcJITTargetMachineBuilderRef JTMB, const char *TargetTriple) {
+ unwrap(JTMB)->getTargetTriple() = Triple(TargetTriple);
+}
+
+LLVMErrorRef LLVMOrcObjectLayerAddObjectFile(LLVMOrcObjectLayerRef ObjLayer,
+ LLVMOrcJITDylibRef JD,
+ LLVMMemoryBufferRef ObjBuffer) {
+ return wrap(unwrap(ObjLayer)->add(
+ *unwrap(JD), std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer))));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddObjectFileWithRT(LLVMOrcObjectLayerRef ObjLayer,
+ LLVMOrcResourceTrackerRef RT,
+ LLVMMemoryBufferRef ObjBuffer) {
+ return wrap(
+ unwrap(ObjLayer)->add(ResourceTrackerSP(unwrap(RT)),
+ std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer))));
+}
+
+void LLVMOrcObjectLayerEmit(LLVMOrcObjectLayerRef ObjLayer,
+ LLVMOrcMaterializationResponsibilityRef R,
+ LLVMMemoryBufferRef ObjBuffer) {
+ unwrap(ObjLayer)->emit(
+ std::unique_ptr<MaterializationResponsibility>(unwrap(R)),
+ std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer)));
+}
+
+void LLVMOrcDisposeObjectLayer(LLVMOrcObjectLayerRef ObjLayer) {
+ delete unwrap(ObjLayer);
+}
+
+void LLVMOrcIRTransformLayerSetTransform(
+ LLVMOrcIRTransformLayerRef IRTransformLayer,
+ LLVMOrcIRTransformLayerTransformFunction TransformFunction, void *Ctx) {
+ unwrap(IRTransformLayer)
+ ->setTransform(
+ [=](ThreadSafeModule TSM,
+ MaterializationResponsibility &R) -> Expected<ThreadSafeModule> {
+ LLVMOrcThreadSafeModuleRef TSMRef =
+ wrap(new ThreadSafeModule(std::move(TSM)));
+ if (LLVMErrorRef Err = TransformFunction(Ctx, &TSMRef, wrap(&R))) {
+ assert(!TSMRef && "TSMRef was not reset to null on error");
+ return unwrap(Err);
+ }
+ return std::move(*unwrap(TSMRef));
+ });
+}
+
+void LLVMOrcObjectTransformLayerSetTransform(
+ LLVMOrcObjectTransformLayerRef ObjTransformLayer,
+ LLVMOrcObjectTransformLayerTransformFunction TransformFunction, void *Ctx) {
+ unwrap(ObjTransformLayer)
+ ->setTransform([TransformFunction, Ctx](std::unique_ptr<MemoryBuffer> Obj)
+ -> Expected<std::unique_ptr<MemoryBuffer>> {
+ LLVMMemoryBufferRef ObjBuffer = wrap(Obj.release());
+ if (LLVMErrorRef Err = TransformFunction(Ctx, &ObjBuffer)) {
+ assert(!ObjBuffer && "ObjBuffer was not reset to null on error");
+ return unwrap(Err);
+ }
+ return std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer));
+ });
+}
+
+LLVMOrcDumpObjectsRef LLVMOrcCreateDumpObjects(const char *DumpDir,
+ const char *IdentifierOverride) {
+ assert(DumpDir && "DumpDir should not be null");
+ assert(IdentifierOverride && "IdentifierOverride should not be null");
+ return wrap(new DumpObjects(DumpDir, IdentifierOverride));
+}
+
+void LLVMOrcDisposeDumpObjects(LLVMOrcDumpObjectsRef DumpObjects) {
+ delete unwrap(DumpObjects);
+}
+
+LLVMErrorRef LLVMOrcDumpObjects_CallOperator(LLVMOrcDumpObjectsRef DumpObjects,
+ LLVMMemoryBufferRef *ObjBuffer) {
+ std::unique_ptr<MemoryBuffer> OB(unwrap(*ObjBuffer));
+ if (auto Result = (*unwrap(DumpObjects))(std::move(OB))) {
+ *ObjBuffer = wrap(Result->release());
+ return LLVMErrorSuccess;
+ } else {
+ *ObjBuffer = nullptr;
+ return wrap(Result.takeError());
+ }
+}
+
+LLVMOrcLLJITBuilderRef LLVMOrcCreateLLJITBuilder(void) {
+ return wrap(new LLJITBuilder());
+}
+
+void LLVMOrcDisposeLLJITBuilder(LLVMOrcLLJITBuilderRef Builder) {
+ delete unwrap(Builder);
+}
+
+void LLVMOrcLLJITBuilderSetJITTargetMachineBuilder(
+ LLVMOrcLLJITBuilderRef Builder, LLVMOrcJITTargetMachineBuilderRef JTMB) {
+ unwrap(Builder)->setJITTargetMachineBuilder(std::move(*unwrap(JTMB)));
+ LLVMOrcDisposeJITTargetMachineBuilder(JTMB);
+}
+
+void LLVMOrcLLJITBuilderSetObjectLinkingLayerCreator(
+ LLVMOrcLLJITBuilderRef Builder,
+ LLVMOrcLLJITBuilderObjectLinkingLayerCreatorFunction F, void *Ctx) {
+ unwrap(Builder)->setObjectLinkingLayerCreator(
+ [=](ExecutionSession &ES, const Triple &TT) {
+ auto TTStr = TT.str();
+ return std::unique_ptr<ObjectLayer>(
+ unwrap(F(Ctx, wrap(&ES), TTStr.c_str())));
+ });
+}
+
+LLVMErrorRef LLVMOrcCreateLLJIT(LLVMOrcLLJITRef *Result,
+ LLVMOrcLLJITBuilderRef Builder) {
+ assert(Result && "Result can not be null");
+
+ if (!Builder)
+ Builder = LLVMOrcCreateLLJITBuilder();
+
+ auto J = unwrap(Builder)->create();
+ LLVMOrcDisposeLLJITBuilder(Builder);
+
+ if (!J) {
+ Result = nullptr;
+ return wrap(J.takeError());
+ }
+
+ *Result = wrap(J->release());
+ return LLVMErrorSuccess;
+}
+
+LLVMErrorRef LLVMOrcDisposeLLJIT(LLVMOrcLLJITRef J) {
+ delete unwrap(J);
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcExecutionSessionRef LLVMOrcLLJITGetExecutionSession(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getExecutionSession());
+}
+
+LLVMOrcJITDylibRef LLVMOrcLLJITGetMainJITDylib(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getMainJITDylib());
+}
+
+const char *LLVMOrcLLJITGetTripleString(LLVMOrcLLJITRef J) {
+ return unwrap(J)->getTargetTriple().str().c_str();
+}
+
+char LLVMOrcLLJITGetGlobalPrefix(LLVMOrcLLJITRef J) {
+ return unwrap(J)->getDataLayout().getGlobalPrefix();
+}
+
+LLVMOrcSymbolStringPoolEntryRef
+LLVMOrcLLJITMangleAndIntern(LLVMOrcLLJITRef J, const char *UnmangledName) {
+ return wrap(OrcV2CAPIHelper::moveFromSymbolStringPtr(
+ unwrap(J)->mangleAndIntern(UnmangledName)));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddObjectFile(LLVMOrcLLJITRef J, LLVMOrcJITDylibRef JD,
+ LLVMMemoryBufferRef ObjBuffer) {
+ return wrap(unwrap(J)->addObjectFile(
+ *unwrap(JD), std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer))));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddObjectFileWithRT(LLVMOrcLLJITRef J,
+ LLVMOrcResourceTrackerRef RT,
+ LLVMMemoryBufferRef ObjBuffer) {
+ return wrap(unwrap(J)->addObjectFile(
+ ResourceTrackerSP(unwrap(RT)),
+ std::unique_ptr<MemoryBuffer>(unwrap(ObjBuffer))));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddLLVMIRModule(LLVMOrcLLJITRef J,
+ LLVMOrcJITDylibRef JD,
+ LLVMOrcThreadSafeModuleRef TSM) {
+ std::unique_ptr<ThreadSafeModule> TmpTSM(unwrap(TSM));
+ return wrap(unwrap(J)->addIRModule(*unwrap(JD), std::move(*TmpTSM)));
+}
+
+LLVMErrorRef LLVMOrcLLJITAddLLVMIRModuleWithRT(LLVMOrcLLJITRef J,
+ LLVMOrcResourceTrackerRef RT,
+ LLVMOrcThreadSafeModuleRef TSM) {
+ std::unique_ptr<ThreadSafeModule> TmpTSM(unwrap(TSM));
+ return wrap(unwrap(J)->addIRModule(ResourceTrackerSP(unwrap(RT)),
+ std::move(*TmpTSM)));
+}
+
+LLVMErrorRef LLVMOrcLLJITLookup(LLVMOrcLLJITRef J,
+ LLVMOrcJITTargetAddress *Result,
+ const char *Name) {
+ assert(Result && "Result can not be null");
+
+ auto Sym = unwrap(J)->lookup(Name);
+ if (!Sym) {
+ *Result = 0;
+ return wrap(Sym.takeError());
+ }
+
+ *Result = Sym->getAddress();
+ return LLVMErrorSuccess;
+}
+
+LLVMOrcObjectLayerRef LLVMOrcLLJITGetObjLinkingLayer(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getObjLinkingLayer());
+}
+
+LLVMOrcObjectTransformLayerRef
+LLVMOrcLLJITGetObjTransformLayer(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getObjTransformLayer());
+}
+
+LLVMOrcObjectLayerRef
+LLVMOrcCreateRTDyldObjectLinkingLayerWithSectionMemoryManager(
+ LLVMOrcExecutionSessionRef ES) {
+ assert(ES && "ES must not be null");
+ return wrap(new RTDyldObjectLinkingLayer(
+ *unwrap(ES), [] { return std::make_unique<SectionMemoryManager>(); }));
+}
+
+void LLVMOrcRTDyldObjectLinkingLayerRegisterJITEventListener(
+ LLVMOrcObjectLayerRef RTDyldObjLinkingLayer,
+ LLVMJITEventListenerRef Listener) {
+ assert(RTDyldObjLinkingLayer && "RTDyldObjLinkingLayer must not be null");
+ assert(Listener && "Listener must not be null");
+ reinterpret_cast<RTDyldObjectLinkingLayer *>(unwrap(RTDyldObjLinkingLayer))
+ ->registerJITEventListener(*unwrap(Listener));
+}
+
+LLVMOrcIRTransformLayerRef LLVMOrcLLJITGetIRTransformLayer(LLVMOrcLLJITRef J) {
+ return wrap(&unwrap(J)->getIRTransformLayer());
+}
+
+const char *LLVMOrcLLJITGetDataLayoutStr(LLVMOrcLLJITRef J) {
+ return unwrap(J)->getDataLayout().getStringRepresentation().c_str();
+}
+
+LLVMOrcIndirectStubsManagerRef
+LLVMOrcCreateLocalIndirectStubsManager(const char *TargetTriple) {
+ auto builder = createLocalIndirectStubsManagerBuilder(Triple(TargetTriple));
+ return wrap(builder().release());
+}
+
+void LLVMOrcDisposeIndirectStubsManager(LLVMOrcIndirectStubsManagerRef ISM) {
+ std::unique_ptr<IndirectStubsManager> TmpISM(unwrap(ISM));
+}
+
+LLVMErrorRef LLVMOrcCreateLocalLazyCallThroughManager(
+ const char *TargetTriple, LLVMOrcExecutionSessionRef ES,
+ LLVMOrcJITTargetAddress ErrorHandlerAddr,
+ LLVMOrcLazyCallThroughManagerRef *Result) {
+ auto LCTM = createLocalLazyCallThroughManager(Triple(TargetTriple),
+ *unwrap(ES), ErrorHandlerAddr);
+
+ if (!LCTM)
+ return wrap(LCTM.takeError());
+ *Result = wrap(LCTM->release());
+ return LLVMErrorSuccess;
+}
+
+void LLVMOrcDisposeLazyCallThroughManager(
+ LLVMOrcLazyCallThroughManagerRef LCM) {
+ std::unique_ptr<LazyCallThroughManager> TmpLCM(unwrap(LCM));
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp
new file mode 100644
index 0000000000..27044f66a5
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.cpp
@@ -0,0 +1,355 @@
+//===-- RTDyldObjectLinkingLayer.cpp - RuntimeDyld backed ORC ObjectLayer -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
+#include "llvm/Object/COFF.h"
+
+namespace {
+
+using namespace llvm;
+using namespace llvm::orc;
+
+class JITDylibSearchOrderResolver : public JITSymbolResolver {
+public:
+ JITDylibSearchOrderResolver(MaterializationResponsibility &MR) : MR(MR) {}
+
+ void lookup(const LookupSet &Symbols, OnResolvedFunction OnResolved) override {
+ auto &ES = MR.getTargetJITDylib().getExecutionSession();
+ SymbolLookupSet InternedSymbols;
+
+ // Intern the requested symbols: lookup takes interned strings.
+ for (auto &S : Symbols)
+ InternedSymbols.add(ES.intern(S));
+
+ // Build an OnResolve callback to unwrap the interned strings and pass them
+ // to the OnResolved callback.
+ auto OnResolvedWithUnwrap =
+ [OnResolved = std::move(OnResolved)](
+ Expected<SymbolMap> InternedResult) mutable {
+ if (!InternedResult) {
+ OnResolved(InternedResult.takeError());
+ return;
+ }
+
+ LookupResult Result;
+ for (auto &KV : *InternedResult)
+ Result[*KV.first] = std::move(KV.second);
+ OnResolved(Result);
+ };
+
+ // Register dependencies for all symbols contained in this set.
+ auto RegisterDependencies = [&](const SymbolDependenceMap &Deps) {
+ MR.addDependenciesForAll(Deps);
+ };
+
+ JITDylibSearchOrder LinkOrder;
+ MR.getTargetJITDylib().withLinkOrderDo(
+ [&](const JITDylibSearchOrder &LO) { LinkOrder = LO; });
+ ES.lookup(LookupKind::Static, LinkOrder, InternedSymbols,
+ SymbolState::Resolved, std::move(OnResolvedWithUnwrap),
+ RegisterDependencies);
+ }
+
+ Expected<LookupSet> getResponsibilitySet(const LookupSet &Symbols) override {
+ LookupSet Result;
+
+ for (auto &KV : MR.getSymbols()) {
+ if (Symbols.count(*KV.first))
+ Result.insert(*KV.first);
+ }
+
+ return Result;
+ }
+
+private:
+ MaterializationResponsibility &MR;
+};
+
+} // end anonymous namespace
+
+namespace llvm {
+namespace orc {
+
+char RTDyldObjectLinkingLayer::ID;
+
+using BaseT = RTTIExtends<RTDyldObjectLinkingLayer, ObjectLayer>;
+
+RTDyldObjectLinkingLayer::RTDyldObjectLinkingLayer(
+ ExecutionSession &ES, GetMemoryManagerFunction GetMemoryManager)
+ : BaseT(ES), GetMemoryManager(GetMemoryManager) {
+ ES.registerResourceManager(*this);
+}
+
+RTDyldObjectLinkingLayer::~RTDyldObjectLinkingLayer() {
+ assert(MemMgrs.empty() && "Layer destroyed with resources still attached");
+}
+
+void RTDyldObjectLinkingLayer::emit(
+ std::unique_ptr<MaterializationResponsibility> R,
+ std::unique_ptr<MemoryBuffer> O) {
+ assert(O && "Object must not be null");
+
+ auto &ES = getExecutionSession();
+
+ auto Obj = object::ObjectFile::createObjectFile(*O);
+
+ if (!Obj) {
+ getExecutionSession().reportError(Obj.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ // Collect the internal symbols from the object file: We will need to
+ // filter these later.
+ auto InternalSymbols = std::make_shared<std::set<StringRef>>();
+ {
+ for (auto &Sym : (*Obj)->symbols()) {
+
+ // Skip file symbols.
+ if (auto SymType = Sym.getType()) {
+ if (*SymType == object::SymbolRef::ST_File)
+ continue;
+ } else {
+ ES.reportError(SymType.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ Expected<uint32_t> SymFlagsOrErr = Sym.getFlags();
+ if (!SymFlagsOrErr) {
+ // TODO: Test this error.
+ ES.reportError(SymFlagsOrErr.takeError());
+ R->failMaterialization();
+ return;
+ }
+
+ // Don't include symbols that aren't global.
+ if (!(*SymFlagsOrErr & object::BasicSymbolRef::SF_Global)) {
+ if (auto SymName = Sym.getName())
+ InternalSymbols->insert(*SymName);
+ else {
+ ES.reportError(SymName.takeError());
+ R->failMaterialization();
+ return;
+ }
+ }
+ }
+ }
+
+ auto MemMgr = GetMemoryManager();
+ auto &MemMgrRef = *MemMgr;
+
+ // Switch to shared ownership of MR so that it can be captured by both
+ // lambdas below.
+ std::shared_ptr<MaterializationResponsibility> SharedR(std::move(R));
+
+ JITDylibSearchOrderResolver Resolver(*SharedR);
+
+ jitLinkForORC(
+ object::OwningBinary<object::ObjectFile>(std::move(*Obj), std::move(O)),
+ MemMgrRef, Resolver, ProcessAllSections,
+ [this, SharedR, &MemMgrRef, InternalSymbols](
+ const object::ObjectFile &Obj,
+ RuntimeDyld::LoadedObjectInfo &LoadedObjInfo,
+ std::map<StringRef, JITEvaluatedSymbol> ResolvedSymbols) {
+ return onObjLoad(*SharedR, Obj, MemMgrRef, LoadedObjInfo,
+ ResolvedSymbols, *InternalSymbols);
+ },
+ [this, SharedR, MemMgr = std::move(MemMgr)](
+ object::OwningBinary<object::ObjectFile> Obj,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> LoadedObjInfo,
+ Error Err) mutable {
+ onObjEmit(*SharedR, std::move(Obj), std::move(MemMgr),
+ std::move(LoadedObjInfo), std::move(Err));
+ });
+}
+
+void RTDyldObjectLinkingLayer::registerJITEventListener(JITEventListener &L) {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ assert(!llvm::is_contained(EventListeners, &L) &&
+ "Listener has already been registered");
+ EventListeners.push_back(&L);
+}
+
+void RTDyldObjectLinkingLayer::unregisterJITEventListener(JITEventListener &L) {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ auto I = llvm::find(EventListeners, &L);
+ assert(I != EventListeners.end() && "Listener not registered");
+ EventListeners.erase(I);
+}
+
+Error RTDyldObjectLinkingLayer::onObjLoad(
+ MaterializationResponsibility &R, const object::ObjectFile &Obj,
+ RuntimeDyld::MemoryManager &MemMgr,
+ RuntimeDyld::LoadedObjectInfo &LoadedObjInfo,
+ std::map<StringRef, JITEvaluatedSymbol> Resolved,
+ std::set<StringRef> &InternalSymbols) {
+ SymbolFlagsMap ExtraSymbolsToClaim;
+ SymbolMap Symbols;
+
+ // Hack to support COFF constant pool comdats introduced during compilation:
+ // (See http://llvm.org/PR40074)
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(&Obj)) {
+ auto &ES = getExecutionSession();
+
+ // For all resolved symbols that are not already in the responsibilty set:
+ // check whether the symbol is in a comdat section and if so mark it as
+ // weak.
+ for (auto &Sym : COFFObj->symbols()) {
+ // getFlags() on COFF symbols can't fail.
+ uint32_t SymFlags = cantFail(Sym.getFlags());
+ if (SymFlags & object::BasicSymbolRef::SF_Undefined)
+ continue;
+ auto Name = Sym.getName();
+ if (!Name)
+ return Name.takeError();
+ auto I = Resolved.find(*Name);
+
+ // Skip unresolved symbols, internal symbols, and symbols that are
+ // already in the responsibility set.
+ if (I == Resolved.end() || InternalSymbols.count(*Name) ||
+ R.getSymbols().count(ES.intern(*Name)))
+ continue;
+ auto Sec = Sym.getSection();
+ if (!Sec)
+ return Sec.takeError();
+ if (*Sec == COFFObj->section_end())
+ continue;
+ auto &COFFSec = *COFFObj->getCOFFSection(**Sec);
+ if (COFFSec.Characteristics & COFF::IMAGE_SCN_LNK_COMDAT)
+ I->second.setFlags(I->second.getFlags() | JITSymbolFlags::Weak);
+ }
+ }
+
+ for (auto &KV : Resolved) {
+ // Scan the symbols and add them to the Symbols map for resolution.
+
+ // We never claim internal symbols.
+ if (InternalSymbols.count(KV.first))
+ continue;
+
+ auto InternedName = getExecutionSession().intern(KV.first);
+ auto Flags = KV.second.getFlags();
+
+ // Override object flags and claim responsibility for symbols if
+ // requested.
+ if (OverrideObjectFlags || AutoClaimObjectSymbols) {
+ auto I = R.getSymbols().find(InternedName);
+
+ if (OverrideObjectFlags && I != R.getSymbols().end())
+ Flags = I->second;
+ else if (AutoClaimObjectSymbols && I == R.getSymbols().end())
+ ExtraSymbolsToClaim[InternedName] = Flags;
+ }
+
+ Symbols[InternedName] = JITEvaluatedSymbol(KV.second.getAddress(), Flags);
+ }
+
+ if (!ExtraSymbolsToClaim.empty()) {
+ if (auto Err = R.defineMaterializing(ExtraSymbolsToClaim))
+ return Err;
+
+ // If we claimed responsibility for any weak symbols but were rejected then
+ // we need to remove them from the resolved set.
+ for (auto &KV : ExtraSymbolsToClaim)
+ if (KV.second.isWeak() && !R.getSymbols().count(KV.first))
+ Symbols.erase(KV.first);
+ }
+
+ if (auto Err = R.notifyResolved(Symbols)) {
+ R.failMaterialization();
+ return Err;
+ }
+
+ if (NotifyLoaded)
+ NotifyLoaded(R, Obj, LoadedObjInfo);
+
+ return Error::success();
+}
+
+void RTDyldObjectLinkingLayer::onObjEmit(
+ MaterializationResponsibility &R,
+ object::OwningBinary<object::ObjectFile> O,
+ std::unique_ptr<RuntimeDyld::MemoryManager> MemMgr,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> LoadedObjInfo, Error Err) {
+ if (Err) {
+ getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ return;
+ }
+
+ if (auto Err = R.notifyEmitted()) {
+ getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ return;
+ }
+
+ std::unique_ptr<object::ObjectFile> Obj;
+ std::unique_ptr<MemoryBuffer> ObjBuffer;
+ std::tie(Obj, ObjBuffer) = O.takeBinary();
+
+ // Run EventListener notifyLoaded callbacks.
+ {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ for (auto *L : EventListeners)
+ L->notifyObjectLoaded(pointerToJITTargetAddress(MemMgr.get()), *Obj,
+ *LoadedObjInfo);
+ }
+
+ if (NotifyEmitted)
+ NotifyEmitted(R, std::move(ObjBuffer));
+
+ if (auto Err = R.withResourceKeyDo(
+ [&](ResourceKey K) { MemMgrs[K].push_back(std::move(MemMgr)); })) {
+ getExecutionSession().reportError(std::move(Err));
+ R.failMaterialization();
+ }
+}
+
+Error RTDyldObjectLinkingLayer::handleRemoveResources(ResourceKey K) {
+
+ std::vector<MemoryManagerUP> MemMgrsToRemove;
+
+ getExecutionSession().runSessionLocked([&] {
+ auto I = MemMgrs.find(K);
+ if (I != MemMgrs.end()) {
+ std::swap(MemMgrsToRemove, I->second);
+ MemMgrs.erase(I);
+ }
+ });
+
+ {
+ std::lock_guard<std::mutex> Lock(RTDyldLayerMutex);
+ for (auto &MemMgr : MemMgrsToRemove) {
+ for (auto *L : EventListeners)
+ L->notifyFreeingObject(pointerToJITTargetAddress(MemMgr.get()));
+ MemMgr->deregisterEHFrames();
+ }
+ }
+
+ return Error::success();
+}
+
+void RTDyldObjectLinkingLayer::handleTransferResources(ResourceKey DstKey,
+ ResourceKey SrcKey) {
+ auto I = MemMgrs.find(SrcKey);
+ if (I != MemMgrs.end()) {
+ auto &SrcMemMgrs = I->second;
+ auto &DstMemMgrs = MemMgrs[DstKey];
+ DstMemMgrs.reserve(DstMemMgrs.size() + SrcMemMgrs.size());
+ for (auto &MemMgr : SrcMemMgrs)
+ DstMemMgrs.push_back(std::move(MemMgr));
+
+ // Erase SrcKey entry using value rather than iterator I: I may have been
+ // invalidated when we looked up DstKey.
+ MemMgrs.erase(SrcKey);
+ }
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp
new file mode 100644
index 0000000000..91f2899449
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/AllocationActions.cpp
@@ -0,0 +1,44 @@
+//===----- AllocationActions.gpp -- JITLink allocation support calls -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/AllocationActions.h"
+
+namespace llvm {
+namespace orc {
+namespace shared {
+
+Expected<std::vector<WrapperFunctionCall>>
+runFinalizeActions(AllocActions &AAs) {
+ std::vector<WrapperFunctionCall> DeallocActions;
+ DeallocActions.reserve(numDeallocActions(AAs));
+
+ for (auto &AA : AAs) {
+ if (AA.Finalize)
+ if (auto Err = AA.Finalize.runWithSPSRetErrorMerged())
+ return joinErrors(std::move(Err), runDeallocActions(DeallocActions));
+
+ if (AA.Dealloc)
+ DeallocActions.push_back(std::move(AA.Dealloc));
+ }
+
+ AAs.clear();
+ return DeallocActions;
+}
+
+Error runDeallocActions(ArrayRef<WrapperFunctionCall> DAs) {
+ Error Err = Error::success();
+ while (!DAs.empty()) {
+ Err = joinErrors(std::move(Err), DAs.back().runWithSPSRetErrorMerged());
+ DAs = DAs.drop_back();
+ }
+ return Err;
+}
+
+} // namespace shared
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/OrcError.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/OrcError.cpp
new file mode 100644
index 0000000000..fdad90cbcf
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/OrcError.cpp
@@ -0,0 +1,120 @@
+//===---------------- OrcError.cpp - Error codes for ORC ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Error codes for ORC.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcError.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+
+#include <type_traits>
+
+using namespace llvm;
+using namespace llvm::orc;
+
+namespace {
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class OrcErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "orc"; }
+
+ std::string message(int condition) const override {
+ switch (static_cast<OrcErrorCode>(condition)) {
+ case OrcErrorCode::UnknownORCError:
+ return "Unknown ORC error";
+ case OrcErrorCode::DuplicateDefinition:
+ return "Duplicate symbol definition";
+ case OrcErrorCode::JITSymbolNotFound:
+ return "JIT symbol not found";
+ case OrcErrorCode::RemoteAllocatorDoesNotExist:
+ return "Remote allocator does not exist";
+ case OrcErrorCode::RemoteAllocatorIdAlreadyInUse:
+ return "Remote allocator Id already in use";
+ case OrcErrorCode::RemoteMProtectAddrUnrecognized:
+ return "Remote mprotect call references unallocated memory";
+ case OrcErrorCode::RemoteIndirectStubsOwnerDoesNotExist:
+ return "Remote indirect stubs owner does not exist";
+ case OrcErrorCode::RemoteIndirectStubsOwnerIdAlreadyInUse:
+ return "Remote indirect stubs owner Id already in use";
+ case OrcErrorCode::RPCConnectionClosed:
+ return "RPC connection closed";
+ case OrcErrorCode::RPCCouldNotNegotiateFunction:
+ return "Could not negotiate RPC function";
+ case OrcErrorCode::RPCResponseAbandoned:
+ return "RPC response abandoned";
+ case OrcErrorCode::UnexpectedRPCCall:
+ return "Unexpected RPC call";
+ case OrcErrorCode::UnexpectedRPCResponse:
+ return "Unexpected RPC response";
+ case OrcErrorCode::UnknownErrorCodeFromRemote:
+ return "Unknown error returned from remote RPC function "
+ "(Use StringError to get error message)";
+ case OrcErrorCode::UnknownResourceHandle:
+ return "Unknown resource handle";
+ case OrcErrorCode::MissingSymbolDefinitions:
+ return "MissingSymbolsDefinitions";
+ case OrcErrorCode::UnexpectedSymbolDefinitions:
+ return "UnexpectedSymbolDefinitions";
+ }
+ llvm_unreachable("Unhandled error code");
+ }
+};
+
+static ManagedStatic<OrcErrorCategory> OrcErrCat;
+} // namespace
+
+namespace llvm {
+namespace orc {
+
+char DuplicateDefinition::ID = 0;
+char JITSymbolNotFound::ID = 0;
+
+std::error_code orcError(OrcErrorCode ErrCode) {
+ typedef std::underlying_type<OrcErrorCode>::type UT;
+ return std::error_code(static_cast<UT>(ErrCode), *OrcErrCat);
+}
+
+DuplicateDefinition::DuplicateDefinition(std::string SymbolName)
+ : SymbolName(std::move(SymbolName)) {}
+
+std::error_code DuplicateDefinition::convertToErrorCode() const {
+ return orcError(OrcErrorCode::DuplicateDefinition);
+}
+
+void DuplicateDefinition::log(raw_ostream &OS) const {
+ OS << "Duplicate definition of symbol '" << SymbolName << "'";
+}
+
+const std::string &DuplicateDefinition::getSymbolName() const {
+ return SymbolName;
+}
+
+JITSymbolNotFound::JITSymbolNotFound(std::string SymbolName)
+ : SymbolName(std::move(SymbolName)) {}
+
+std::error_code JITSymbolNotFound::convertToErrorCode() const {
+ typedef std::underlying_type<OrcErrorCode>::type UT;
+ return std::error_code(static_cast<UT>(OrcErrorCode::JITSymbolNotFound),
+ *OrcErrCat);
+}
+
+void JITSymbolNotFound::log(raw_ostream &OS) const {
+ OS << "Could not find symbol '" << SymbolName << "'";
+}
+
+const std::string &JITSymbolNotFound::getSymbolName() const {
+ return SymbolName;
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp
new file mode 100644
index 0000000000..5eae33121e
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/OrcRTBridge.cpp
@@ -0,0 +1,47 @@
+//===------ OrcRTBridge.cpp - Executor functions for bootstrap -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+
+namespace llvm {
+namespace orc {
+namespace rt {
+
+const char *SimpleExecutorDylibManagerInstanceName =
+ "__llvm_orc_SimpleExecutorDylibManager_Instance";
+const char *SimpleExecutorDylibManagerOpenWrapperName =
+ "__llvm_orc_SimpleExecutorDylibManager_open_wrapper";
+const char *SimpleExecutorDylibManagerLookupWrapperName =
+ "__llvm_orc_SimpleExecutorDylibManager_lookup_wrapper";
+const char *SimpleExecutorMemoryManagerInstanceName =
+ "__llvm_orc_SimpleExecutorMemoryManager_Instance";
+const char *SimpleExecutorMemoryManagerReserveWrapperName =
+ "__llvm_orc_SimpleExecutorMemoryManager_reserve_wrapper";
+const char *SimpleExecutorMemoryManagerFinalizeWrapperName =
+ "__llvm_orc_SimpleExecutorMemoryManager_finalize_wrapper";
+const char *SimpleExecutorMemoryManagerDeallocateWrapperName =
+ "__llvm_orc_SimpleExecutorMemoryManager_deallocate_wrapper";
+const char *MemoryWriteUInt8sWrapperName =
+ "__llvm_orc_bootstrap_mem_write_uint8s_wrapper";
+const char *MemoryWriteUInt16sWrapperName =
+ "__llvm_orc_bootstrap_mem_write_uint16s_wrapper";
+const char *MemoryWriteUInt32sWrapperName =
+ "__llvm_orc_bootstrap_mem_write_uint32s_wrapper";
+const char *MemoryWriteUInt64sWrapperName =
+ "__llvm_orc_bootstrap_mem_write_uint64s_wrapper";
+const char *MemoryWriteBuffersWrapperName =
+ "__llvm_orc_bootstrap_mem_write_buffers_wrapper";
+const char *RegisterEHFrameSectionWrapperName =
+ "__llvm_orc_bootstrap_register_ehframe_section_wrapper";
+const char *DeregisterEHFrameSectionWrapperName =
+ "__llvm_orc_bootstrap_deregister_ehframe_section_wrapper";
+const char *RunAsMainWrapperName = "__llvm_orc_bootstrap_run_as_main_wrapper";
+
+} // end namespace rt
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp
new file mode 100644
index 0000000000..64fc717b7b
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.cpp
@@ -0,0 +1,250 @@
+//===------ SimpleRemoteEPCUtils.cpp - Utils for Simple Remote EPC --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Message definitions and other utilities for SimpleRemoteEPC and
+// SimpleRemoteEPCServer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Shared/SimpleRemoteEPCUtils.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#include <unistd.h>
+#else
+#include <io.h>
+#endif
+
+namespace {
+
+struct FDMsgHeader {
+ static constexpr unsigned MsgSizeOffset = 0;
+ static constexpr unsigned OpCOffset = MsgSizeOffset + sizeof(uint64_t);
+ static constexpr unsigned SeqNoOffset = OpCOffset + sizeof(uint64_t);
+ static constexpr unsigned TagAddrOffset = SeqNoOffset + sizeof(uint64_t);
+ static constexpr unsigned Size = TagAddrOffset + sizeof(uint64_t);
+};
+
+} // namespace
+
+namespace llvm {
+namespace orc {
+namespace SimpleRemoteEPCDefaultBootstrapSymbolNames {
+
+const char *ExecutorSessionObjectName =
+ "__llvm_orc_SimpleRemoteEPC_dispatch_ctx";
+const char *DispatchFnName = "__llvm_orc_SimpleRemoteEPC_dispatch_fn";
+
+} // end namespace SimpleRemoteEPCDefaultBootstrapSymbolNames
+
+SimpleRemoteEPCTransportClient::~SimpleRemoteEPCTransportClient() {}
+SimpleRemoteEPCTransport::~SimpleRemoteEPCTransport() {}
+
+Expected<std::unique_ptr<FDSimpleRemoteEPCTransport>>
+FDSimpleRemoteEPCTransport::Create(SimpleRemoteEPCTransportClient &C, int InFD,
+ int OutFD) {
+#if LLVM_ENABLE_THREADS
+ if (InFD == -1)
+ return make_error<StringError>("Invalid input file descriptor " +
+ Twine(InFD),
+ inconvertibleErrorCode());
+ if (OutFD == -1)
+ return make_error<StringError>("Invalid output file descriptor " +
+ Twine(OutFD),
+ inconvertibleErrorCode());
+ std::unique_ptr<FDSimpleRemoteEPCTransport> FDT(
+ new FDSimpleRemoteEPCTransport(C, InFD, OutFD));
+ return std::move(FDT);
+#else
+ return make_error<StringError>("FD-based SimpleRemoteEPC transport requires "
+ "thread support, but llvm was built with "
+ "LLVM_ENABLE_THREADS=Off",
+ inconvertibleErrorCode());
+#endif
+}
+
+FDSimpleRemoteEPCTransport::~FDSimpleRemoteEPCTransport() {
+#if LLVM_ENABLE_THREADS
+ ListenerThread.join();
+#endif
+}
+
+Error FDSimpleRemoteEPCTransport::start() {
+#if LLVM_ENABLE_THREADS
+ ListenerThread = std::thread([this]() { listenLoop(); });
+ return Error::success();
+#endif
+ llvm_unreachable("Should not be called with LLVM_ENABLE_THREADS=Off");
+}
+
+Error FDSimpleRemoteEPCTransport::sendMessage(SimpleRemoteEPCOpcode OpC,
+ uint64_t SeqNo,
+ ExecutorAddr TagAddr,
+ ArrayRef<char> ArgBytes) {
+ char HeaderBuffer[FDMsgHeader::Size];
+
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::MsgSizeOffset)) =
+ FDMsgHeader::Size + ArgBytes.size();
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::OpCOffset)) =
+ static_cast<uint64_t>(OpC);
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::SeqNoOffset)) = SeqNo;
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::TagAddrOffset)) =
+ TagAddr.getValue();
+
+ std::lock_guard<std::mutex> Lock(M);
+ if (Disconnected)
+ return make_error<StringError>("FD-transport disconnected",
+ inconvertibleErrorCode());
+ if (int ErrNo = writeBytes(HeaderBuffer, FDMsgHeader::Size))
+ return errorCodeToError(std::error_code(ErrNo, std::generic_category()));
+ if (int ErrNo = writeBytes(ArgBytes.data(), ArgBytes.size()))
+ return errorCodeToError(std::error_code(ErrNo, std::generic_category()));
+ return Error::success();
+}
+
+void FDSimpleRemoteEPCTransport::disconnect() {
+ if (Disconnected)
+ return; // Return if already disconnected.
+
+ Disconnected = true;
+ bool CloseOutFD = InFD != OutFD;
+
+ // Close InFD.
+ while (close(InFD) == -1) {
+ if (errno == EBADF)
+ break;
+ }
+
+ // Close OutFD.
+ if (CloseOutFD) {
+ while (close(OutFD) == -1) {
+ if (errno == EBADF)
+ break;
+ }
+ }
+}
+
+static Error makeUnexpectedEOFError() {
+ return make_error<StringError>("Unexpected end-of-file",
+ inconvertibleErrorCode());
+}
+
+Error FDSimpleRemoteEPCTransport::readBytes(char *Dst, size_t Size,
+ bool *IsEOF) {
+ assert(Dst && "Attempt to read into null.");
+ ssize_t Completed = 0;
+ while (Completed < static_cast<ssize_t>(Size)) {
+ ssize_t Read = ::read(InFD, Dst + Completed, Size - Completed);
+ if (Read <= 0) {
+ auto ErrNo = errno;
+ if (Read == 0) {
+ if (Completed == 0 && IsEOF) {
+ *IsEOF = true;
+ return Error::success();
+ } else
+ return makeUnexpectedEOFError();
+ } else if (ErrNo == EAGAIN || ErrNo == EINTR)
+ continue;
+ else {
+ std::lock_guard<std::mutex> Lock(M);
+ if (Disconnected && IsEOF) { // disconnect called, pretend this is EOF.
+ *IsEOF = true;
+ return Error::success();
+ }
+ return errorCodeToError(
+ std::error_code(ErrNo, std::generic_category()));
+ }
+ }
+ Completed += Read;
+ }
+ return Error::success();
+}
+
+int FDSimpleRemoteEPCTransport::writeBytes(const char *Src, size_t Size) {
+ assert(Src && "Attempt to append from null.");
+ ssize_t Completed = 0;
+ while (Completed < static_cast<ssize_t>(Size)) {
+ ssize_t Written = ::write(OutFD, Src + Completed, Size - Completed);
+ if (Written < 0) {
+ auto ErrNo = errno;
+ if (ErrNo == EAGAIN || ErrNo == EINTR)
+ continue;
+ else
+ return ErrNo;
+ }
+ Completed += Written;
+ }
+ return 0;
+}
+
+void FDSimpleRemoteEPCTransport::listenLoop() {
+ Error Err = Error::success();
+ do {
+
+ char HeaderBuffer[FDMsgHeader::Size];
+ // Read the header buffer.
+ {
+ bool IsEOF = false;
+ if (auto Err2 = readBytes(HeaderBuffer, FDMsgHeader::Size, &IsEOF)) {
+ Err = joinErrors(std::move(Err), std::move(Err2));
+ break;
+ }
+ if (IsEOF)
+ break;
+ }
+
+ // Decode header buffer.
+ uint64_t MsgSize;
+ SimpleRemoteEPCOpcode OpC;
+ uint64_t SeqNo;
+ ExecutorAddr TagAddr;
+
+ MsgSize =
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::MsgSizeOffset));
+ OpC = static_cast<SimpleRemoteEPCOpcode>(static_cast<uint64_t>(
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::OpCOffset))));
+ SeqNo =
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::SeqNoOffset));
+ TagAddr.setValue(
+ *((support::ulittle64_t *)(HeaderBuffer + FDMsgHeader::TagAddrOffset)));
+
+ if (MsgSize < FDMsgHeader::Size) {
+ Err = joinErrors(std::move(Err),
+ make_error<StringError>("Message size too small",
+ inconvertibleErrorCode()));
+ break;
+ }
+
+ // Read the argument bytes.
+ SimpleRemoteEPCArgBytesVector ArgBytes;
+ ArgBytes.resize(MsgSize - FDMsgHeader::Size);
+ if (auto Err2 = readBytes(ArgBytes.data(), ArgBytes.size())) {
+ Err = joinErrors(std::move(Err), std::move(Err2));
+ break;
+ }
+
+ if (auto Action = C.handleMessage(OpC, SeqNo, TagAddr, ArgBytes)) {
+ if (*Action == SimpleRemoteEPCTransportClient::EndSession)
+ break;
+ } else {
+ Err = joinErrors(std::move(Err), Action.takeError());
+ break;
+ }
+ } while (true);
+
+ // Attempt to close FDs, set Disconnected to true so that subsequent
+ // sendMessage calls fail.
+ disconnect();
+
+ // Call up to the client to handle the disconnection.
+ C.handleDisconnect(std::move(Err));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/ya.make b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/ya.make
new file mode 100644
index 0000000000..797b646326
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared/ya.make
@@ -0,0 +1,29 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm14
+ contrib/libs/llvm14/lib/Support
+)
+
+ADDINCL(
+ contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ AllocationActions.cpp
+ OrcError.cpp
+ OrcRTBridge.cpp
+ SimpleRemoteEPCUtils.cpp
+)
+
+END()
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp
new file mode 100644
index 0000000000..47364a92a4
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/SimpleRemoteEPC.cpp
@@ -0,0 +1,406 @@
+//===------- SimpleRemoteEPC.cpp -- Simple remote executor control --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/SimpleRemoteEPC.h"
+#include "llvm/ExecutionEngine/Orc/EPCGenericJITLinkMemoryManager.h"
+#include "llvm/ExecutionEngine/Orc/EPCGenericMemoryAccess.h"
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+
+SimpleRemoteEPC::~SimpleRemoteEPC() {
+#ifndef NDEBUG
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ assert(Disconnected && "Destroyed without disconnection");
+#endif // NDEBUG
+}
+
+Expected<tpctypes::DylibHandle>
+SimpleRemoteEPC::loadDylib(const char *DylibPath) {
+ return DylibMgr->open(DylibPath, 0);
+}
+
+Expected<std::vector<tpctypes::LookupResult>>
+SimpleRemoteEPC::lookupSymbols(ArrayRef<LookupRequest> Request) {
+ std::vector<tpctypes::LookupResult> Result;
+
+ for (auto &Element : Request) {
+ if (auto R = DylibMgr->lookup(Element.Handle, Element.Symbols)) {
+ Result.push_back({});
+ Result.back().reserve(R->size());
+ for (auto Addr : *R)
+ Result.back().push_back(Addr.getValue());
+ } else
+ return R.takeError();
+ }
+ return std::move(Result);
+}
+
+Expected<int32_t> SimpleRemoteEPC::runAsMain(ExecutorAddr MainFnAddr,
+ ArrayRef<std::string> Args) {
+ int64_t Result = 0;
+ if (auto Err = callSPSWrapper<rt::SPSRunAsMainSignature>(
+ RunAsMainAddr, Result, ExecutorAddr(MainFnAddr), Args))
+ return std::move(Err);
+ return Result;
+}
+
+void SimpleRemoteEPC::callWrapperAsync(ExecutorAddr WrapperFnAddr,
+ IncomingWFRHandler OnComplete,
+ ArrayRef<char> ArgBuffer) {
+ uint64_t SeqNo;
+ {
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ SeqNo = getNextSeqNo();
+ assert(!PendingCallWrapperResults.count(SeqNo) && "SeqNo already in use");
+ PendingCallWrapperResults[SeqNo] = std::move(OnComplete);
+ }
+
+ if (auto Err = sendMessage(SimpleRemoteEPCOpcode::CallWrapper, SeqNo,
+ WrapperFnAddr, ArgBuffer)) {
+ IncomingWFRHandler H;
+
+ // We just registered OnComplete, but there may be a race between this
+ // thread returning from sendMessage and handleDisconnect being called from
+ // the transport's listener thread. If handleDisconnect gets there first
+ // then it will have failed 'H' for us. If we get there first (or if
+ // handleDisconnect already ran) then we need to take care of it.
+ {
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ auto I = PendingCallWrapperResults.find(SeqNo);
+ if (I != PendingCallWrapperResults.end()) {
+ H = std::move(I->second);
+ PendingCallWrapperResults.erase(I);
+ }
+ }
+
+ if (H)
+ H(shared::WrapperFunctionResult::createOutOfBandError("disconnecting"));
+
+ getExecutionSession().reportError(std::move(Err));
+ }
+}
+
+Error SimpleRemoteEPC::disconnect() {
+ T->disconnect();
+ D->shutdown();
+ std::unique_lock<std::mutex> Lock(SimpleRemoteEPCMutex);
+ DisconnectCV.wait(Lock, [this] { return Disconnected; });
+ return std::move(DisconnectErr);
+}
+
+Expected<SimpleRemoteEPCTransportClient::HandleMessageAction>
+SimpleRemoteEPC::handleMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo,
+ ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPC::handleMessage: opc = ";
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Setup:
+ dbgs() << "Setup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Setup?");
+ assert(TagAddr.getValue() == 0 && "Non-zero TagAddr for Setup?");
+ break;
+ case SimpleRemoteEPCOpcode::Hangup:
+ dbgs() << "Hangup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Hangup?");
+ assert(TagAddr.getValue() == 0 && "Non-zero TagAddr for Hangup?");
+ break;
+ case SimpleRemoteEPCOpcode::Result:
+ dbgs() << "Result";
+ assert(TagAddr.getValue() == 0 && "Non-zero TagAddr for Result?");
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ dbgs() << "CallWrapper";
+ break;
+ }
+ dbgs() << ", seqno = " << SeqNo
+ << ", tag-addr = " << formatv("{0:x}", TagAddr.getValue())
+ << ", arg-buffer = " << formatv("{0:x}", ArgBytes.size())
+ << " bytes\n";
+ });
+
+ using UT = std::underlying_type_t<SimpleRemoteEPCOpcode>;
+ if (static_cast<UT>(OpC) > static_cast<UT>(SimpleRemoteEPCOpcode::LastOpC))
+ return make_error<StringError>("Unexpected opcode",
+ inconvertibleErrorCode());
+
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Setup:
+ if (auto Err = handleSetup(SeqNo, TagAddr, std::move(ArgBytes)))
+ return std::move(Err);
+ break;
+ case SimpleRemoteEPCOpcode::Hangup:
+ T->disconnect();
+ if (auto Err = handleHangup(std::move(ArgBytes)))
+ return std::move(Err);
+ return EndSession;
+ case SimpleRemoteEPCOpcode::Result:
+ if (auto Err = handleResult(SeqNo, TagAddr, std::move(ArgBytes)))
+ return std::move(Err);
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ handleCallWrapper(SeqNo, TagAddr, std::move(ArgBytes));
+ break;
+ }
+ return ContinueSession;
+}
+
+void SimpleRemoteEPC::handleDisconnect(Error Err) {
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPC::handleDisconnect: "
+ << (Err ? "failure" : "success") << "\n";
+ });
+
+ PendingCallWrapperResultsMap TmpPending;
+
+ {
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ std::swap(TmpPending, PendingCallWrapperResults);
+ }
+
+ for (auto &KV : TmpPending)
+ KV.second(
+ shared::WrapperFunctionResult::createOutOfBandError("disconnecting"));
+
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ DisconnectErr = joinErrors(std::move(DisconnectErr), std::move(Err));
+ Disconnected = true;
+ DisconnectCV.notify_all();
+}
+
+Expected<std::unique_ptr<jitlink::JITLinkMemoryManager>>
+SimpleRemoteEPC::createDefaultMemoryManager(SimpleRemoteEPC &SREPC) {
+ EPCGenericJITLinkMemoryManager::SymbolAddrs SAs;
+ if (auto Err = SREPC.getBootstrapSymbols(
+ {{SAs.Allocator, rt::SimpleExecutorMemoryManagerInstanceName},
+ {SAs.Reserve, rt::SimpleExecutorMemoryManagerReserveWrapperName},
+ {SAs.Finalize, rt::SimpleExecutorMemoryManagerFinalizeWrapperName},
+ {SAs.Deallocate,
+ rt::SimpleExecutorMemoryManagerDeallocateWrapperName}}))
+ return std::move(Err);
+
+ return std::make_unique<EPCGenericJITLinkMemoryManager>(SREPC, SAs);
+}
+
+Expected<std::unique_ptr<ExecutorProcessControl::MemoryAccess>>
+SimpleRemoteEPC::createDefaultMemoryAccess(SimpleRemoteEPC &SREPC) {
+ return nullptr;
+}
+
+Error SimpleRemoteEPC::sendMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo,
+ ExecutorAddr TagAddr,
+ ArrayRef<char> ArgBytes) {
+ assert(OpC != SimpleRemoteEPCOpcode::Setup &&
+ "SimpleRemoteEPC sending Setup message? That's the wrong direction.");
+
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPC::sendMessage: opc = ";
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Hangup:
+ dbgs() << "Hangup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Hangup?");
+ assert(TagAddr.getValue() == 0 && "Non-zero TagAddr for Hangup?");
+ break;
+ case SimpleRemoteEPCOpcode::Result:
+ dbgs() << "Result";
+ assert(TagAddr.getValue() == 0 && "Non-zero TagAddr for Result?");
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ dbgs() << "CallWrapper";
+ break;
+ default:
+ llvm_unreachable("Invalid opcode");
+ }
+ dbgs() << ", seqno = " << SeqNo
+ << ", tag-addr = " << formatv("{0:x}", TagAddr.getValue())
+ << ", arg-buffer = " << formatv("{0:x}", ArgBytes.size())
+ << " bytes\n";
+ });
+ auto Err = T->sendMessage(OpC, SeqNo, TagAddr, ArgBytes);
+ LLVM_DEBUG({
+ if (Err)
+ dbgs() << " \\--> SimpleRemoteEPC::sendMessage failed\n";
+ });
+ return Err;
+}
+
+Error SimpleRemoteEPC::handleSetup(uint64_t SeqNo, ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+ if (SeqNo != 0)
+ return make_error<StringError>("Setup packet SeqNo not zero",
+ inconvertibleErrorCode());
+
+ if (TagAddr)
+ return make_error<StringError>("Setup packet TagAddr not zero",
+ inconvertibleErrorCode());
+
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ auto I = PendingCallWrapperResults.find(0);
+ assert(PendingCallWrapperResults.size() == 1 &&
+ I != PendingCallWrapperResults.end() &&
+ "Setup message handler not connectly set up");
+ auto SetupMsgHandler = std::move(I->second);
+ PendingCallWrapperResults.erase(I);
+
+ auto WFR =
+ shared::WrapperFunctionResult::copyFrom(ArgBytes.data(), ArgBytes.size());
+ SetupMsgHandler(std::move(WFR));
+ return Error::success();
+}
+
+Error SimpleRemoteEPC::setup(Setup S) {
+ using namespace SimpleRemoteEPCDefaultBootstrapSymbolNames;
+
+ std::promise<MSVCPExpected<SimpleRemoteEPCExecutorInfo>> EIP;
+ auto EIF = EIP.get_future();
+
+ // Prepare a handler for the setup packet.
+ PendingCallWrapperResults[0] =
+ RunInPlace()(
+ [&](shared::WrapperFunctionResult SetupMsgBytes) {
+ if (const char *ErrMsg = SetupMsgBytes.getOutOfBandError()) {
+ EIP.set_value(
+ make_error<StringError>(ErrMsg, inconvertibleErrorCode()));
+ return;
+ }
+ using SPSSerialize =
+ shared::SPSArgList<shared::SPSSimpleRemoteEPCExecutorInfo>;
+ shared::SPSInputBuffer IB(SetupMsgBytes.data(), SetupMsgBytes.size());
+ SimpleRemoteEPCExecutorInfo EI;
+ if (SPSSerialize::deserialize(IB, EI))
+ EIP.set_value(EI);
+ else
+ EIP.set_value(make_error<StringError>(
+ "Could not deserialize setup message", inconvertibleErrorCode()));
+ });
+
+ // Start the transport.
+ if (auto Err = T->start())
+ return Err;
+
+ // Wait for setup packet to arrive.
+ auto EI = EIF.get();
+ if (!EI) {
+ T->disconnect();
+ return EI.takeError();
+ }
+
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPC received setup message:\n"
+ << " Triple: " << EI->TargetTriple << "\n"
+ << " Page size: " << EI->PageSize << "\n"
+ << " Bootstrap symbols:\n";
+ for (const auto &KV : EI->BootstrapSymbols)
+ dbgs() << " " << KV.first() << ": "
+ << formatv("{0:x16}", KV.second.getValue()) << "\n";
+ });
+ TargetTriple = Triple(EI->TargetTriple);
+ PageSize = EI->PageSize;
+ BootstrapSymbols = std::move(EI->BootstrapSymbols);
+
+ if (auto Err = getBootstrapSymbols(
+ {{JDI.JITDispatchContext, ExecutorSessionObjectName},
+ {JDI.JITDispatchFunction, DispatchFnName},
+ {RunAsMainAddr, rt::RunAsMainWrapperName}}))
+ return Err;
+
+ if (auto DM =
+ EPCGenericDylibManager::CreateWithDefaultBootstrapSymbols(*this))
+ DylibMgr = std::make_unique<EPCGenericDylibManager>(std::move(*DM));
+ else
+ return DM.takeError();
+
+ // Set a default CreateMemoryManager if none is specified.
+ if (!S.CreateMemoryManager)
+ S.CreateMemoryManager = createDefaultMemoryManager;
+
+ if (auto MemMgr = S.CreateMemoryManager(*this)) {
+ OwnedMemMgr = std::move(*MemMgr);
+ this->MemMgr = OwnedMemMgr.get();
+ } else
+ return MemMgr.takeError();
+
+ // Set a default CreateMemoryAccess if none is specified.
+ if (!S.CreateMemoryAccess)
+ S.CreateMemoryAccess = createDefaultMemoryAccess;
+
+ if (auto MemAccess = S.CreateMemoryAccess(*this)) {
+ OwnedMemAccess = std::move(*MemAccess);
+ this->MemAccess = OwnedMemAccess.get();
+ } else
+ return MemAccess.takeError();
+
+ return Error::success();
+}
+
+Error SimpleRemoteEPC::handleResult(uint64_t SeqNo, ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+ IncomingWFRHandler SendResult;
+
+ if (TagAddr)
+ return make_error<StringError>("Unexpected TagAddr in result message",
+ inconvertibleErrorCode());
+
+ {
+ std::lock_guard<std::mutex> Lock(SimpleRemoteEPCMutex);
+ auto I = PendingCallWrapperResults.find(SeqNo);
+ if (I == PendingCallWrapperResults.end())
+ return make_error<StringError>("No call for sequence number " +
+ Twine(SeqNo),
+ inconvertibleErrorCode());
+ SendResult = std::move(I->second);
+ PendingCallWrapperResults.erase(I);
+ releaseSeqNo(SeqNo);
+ }
+
+ auto WFR =
+ shared::WrapperFunctionResult::copyFrom(ArgBytes.data(), ArgBytes.size());
+ SendResult(std::move(WFR));
+ return Error::success();
+}
+
+void SimpleRemoteEPC::handleCallWrapper(
+ uint64_t RemoteSeqNo, ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+ assert(ES && "No ExecutionSession attached");
+ D->dispatch(makeGenericNamedTask(
+ [this, RemoteSeqNo, TagAddr, ArgBytes = std::move(ArgBytes)]() {
+ ES->runJITDispatchHandler(
+ [this, RemoteSeqNo](shared::WrapperFunctionResult WFR) {
+ if (auto Err =
+ sendMessage(SimpleRemoteEPCOpcode::Result, RemoteSeqNo,
+ ExecutorAddr(), {WFR.data(), WFR.size()}))
+ getExecutionSession().reportError(std::move(Err));
+ },
+ TagAddr.getValue(), ArgBytes);
+ },
+ "callWrapper task"));
+}
+
+Error SimpleRemoteEPC::handleHangup(SimpleRemoteEPCArgBytesVector ArgBytes) {
+ using namespace llvm::orc::shared;
+ auto WFR = WrapperFunctionResult::copyFrom(ArgBytes.data(), ArgBytes.size());
+ if (const char *ErrMsg = WFR.getOutOfBandError())
+ return make_error<StringError>(ErrMsg, inconvertibleErrorCode());
+
+ detail::SPSSerializableError Info;
+ SPSInputBuffer IB(WFR.data(), WFR.size());
+ if (!SPSArgList<SPSError>::deserialize(IB, Info))
+ return make_error<StringError>("Could not deserialize hangup info",
+ inconvertibleErrorCode());
+ return fromSPSSerializable(std::move(Info));
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp
new file mode 100644
index 0000000000..c2fa4466ea
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp
@@ -0,0 +1,306 @@
+//===-- SpeculateAnalyses.cpp --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/SpeculateAnalyses.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Analysis/BlockFrequencyInfo.h"
+#include "llvm/Analysis/BranchProbabilityInfo.h"
+#include "llvm/Analysis/CFG.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Passes/PassBuilder.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <algorithm>
+
+namespace {
+using namespace llvm;
+SmallVector<const BasicBlock *, 8> findBBwithCalls(const Function &F,
+ bool IndirectCall = false) {
+ SmallVector<const BasicBlock *, 8> BBs;
+
+ auto findCallInst = [&IndirectCall](const Instruction &I) {
+ if (auto Call = dyn_cast<CallBase>(&I))
+ return Call->isIndirectCall() ? IndirectCall : true;
+ else
+ return false;
+ };
+ for (auto &BB : F)
+ if (findCallInst(*BB.getTerminator()) ||
+ llvm::any_of(BB.instructionsWithoutDebug(), findCallInst))
+ BBs.emplace_back(&BB);
+
+ return BBs;
+}
+} // namespace
+
+// Implementations of Queries shouldn't need to lock the resources
+// such as LLVMContext, each argument (function) has a non-shared LLVMContext
+// Plus, if Queries contain states necessary locking scheme should be provided.
+namespace llvm {
+namespace orc {
+
+// Collect direct calls only
+void SpeculateQuery::findCalles(const BasicBlock *BB,
+ DenseSet<StringRef> &CallesNames) {
+ assert(BB != nullptr && "Traversing Null BB to find calls?");
+
+ auto getCalledFunction = [&CallesNames](const CallBase *Call) {
+ auto CalledValue = Call->getCalledOperand()->stripPointerCasts();
+ if (auto DirectCall = dyn_cast<Function>(CalledValue))
+ CallesNames.insert(DirectCall->getName());
+ };
+ for (auto &I : BB->instructionsWithoutDebug())
+ if (auto CI = dyn_cast<CallInst>(&I))
+ getCalledFunction(CI);
+
+ if (auto II = dyn_cast<InvokeInst>(BB->getTerminator()))
+ getCalledFunction(II);
+}
+
+bool SpeculateQuery::isStraightLine(const Function &F) {
+ return llvm::all_of(F.getBasicBlockList(), [](const BasicBlock &BB) {
+ return BB.getSingleSuccessor() != nullptr;
+ });
+}
+
+// BlockFreqQuery Implementations
+
+size_t BlockFreqQuery::numBBToGet(size_t numBB) {
+ // small CFG
+ if (numBB < 4)
+ return numBB;
+ // mid-size CFG
+ else if (numBB < 20)
+ return (numBB / 2);
+ else
+ return (numBB / 2) + (numBB / 4);
+}
+
+BlockFreqQuery::ResultTy BlockFreqQuery::operator()(Function &F) {
+ DenseMap<StringRef, DenseSet<StringRef>> CallerAndCalles;
+ DenseSet<StringRef> Calles;
+ SmallVector<std::pair<const BasicBlock *, uint64_t>, 8> BBFreqs;
+
+ PassBuilder PB;
+ FunctionAnalysisManager FAM;
+ PB.registerFunctionAnalyses(FAM);
+
+ auto IBBs = findBBwithCalls(F);
+
+ if (IBBs.empty())
+ return None;
+
+ auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(F);
+
+ for (const auto I : IBBs)
+ BBFreqs.push_back({I, BFI.getBlockFreq(I).getFrequency()});
+
+ assert(IBBs.size() == BBFreqs.size() && "BB Count Mismatch");
+
+ llvm::sort(BBFreqs, [](decltype(BBFreqs)::const_reference BBF,
+ decltype(BBFreqs)::const_reference BBS) {
+ return BBF.second > BBS.second ? true : false;
+ });
+
+ // ignoring number of direct calls in a BB
+ auto Topk = numBBToGet(BBFreqs.size());
+
+ for (size_t i = 0; i < Topk; i++)
+ findCalles(BBFreqs[i].first, Calles);
+
+ assert(!Calles.empty() && "Running Analysis on Function with no calls?");
+
+ CallerAndCalles.insert({F.getName(), std::move(Calles)});
+
+ return CallerAndCalles;
+}
+
+// SequenceBBQuery Implementation
+std::size_t SequenceBBQuery::getHottestBlocks(std::size_t TotalBlocks) {
+ if (TotalBlocks == 1)
+ return TotalBlocks;
+ return TotalBlocks / 2;
+}
+
+// FIXME : find good implementation.
+SequenceBBQuery::BlockListTy
+SequenceBBQuery::rearrangeBB(const Function &F, const BlockListTy &BBList) {
+ BlockListTy RearrangedBBSet;
+
+ for (auto &Block : F.getBasicBlockList())
+ if (llvm::is_contained(BBList, &Block))
+ RearrangedBBSet.push_back(&Block);
+
+ assert(RearrangedBBSet.size() == BBList.size() &&
+ "BasicBlock missing while rearranging?");
+ return RearrangedBBSet;
+}
+
+void SequenceBBQuery::traverseToEntryBlock(const BasicBlock *AtBB,
+ const BlockListTy &CallerBlocks,
+ const BackEdgesInfoTy &BackEdgesInfo,
+ const BranchProbabilityInfo *BPI,
+ VisitedBlocksInfoTy &VisitedBlocks) {
+ auto Itr = VisitedBlocks.find(AtBB);
+ if (Itr != VisitedBlocks.end()) { // already visited.
+ if (!Itr->second.Upward)
+ return;
+ Itr->second.Upward = false;
+ } else {
+ // Create hint for newly discoverd blocks.
+ WalkDirection BlockHint;
+ BlockHint.Upward = false;
+ // FIXME: Expensive Check
+ if (llvm::is_contained(CallerBlocks, AtBB))
+ BlockHint.CallerBlock = true;
+ VisitedBlocks.insert(std::make_pair(AtBB, BlockHint));
+ }
+
+ const_pred_iterator PIt = pred_begin(AtBB), EIt = pred_end(AtBB);
+ // Move this check to top, when we have code setup to launch speculative
+ // compiles for function in entry BB, this triggers the speculative compiles
+ // before running the program.
+ if (PIt == EIt) // No Preds.
+ return;
+
+ DenseSet<const BasicBlock *> PredSkipNodes;
+
+ // Since we are checking for predecessor's backedges, this Block
+ // occurs in second position.
+ for (auto &I : BackEdgesInfo)
+ if (I.second == AtBB)
+ PredSkipNodes.insert(I.first);
+
+ // Skip predecessors which source of back-edges.
+ for (; PIt != EIt; ++PIt)
+ // checking EdgeHotness is cheaper
+ if (BPI->isEdgeHot(*PIt, AtBB) && !PredSkipNodes.count(*PIt))
+ traverseToEntryBlock(*PIt, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+}
+
+void SequenceBBQuery::traverseToExitBlock(const BasicBlock *AtBB,
+ const BlockListTy &CallerBlocks,
+ const BackEdgesInfoTy &BackEdgesInfo,
+ const BranchProbabilityInfo *BPI,
+ VisitedBlocksInfoTy &VisitedBlocks) {
+ auto Itr = VisitedBlocks.find(AtBB);
+ if (Itr != VisitedBlocks.end()) { // already visited.
+ if (!Itr->second.Downward)
+ return;
+ Itr->second.Downward = false;
+ } else {
+ // Create hint for newly discoverd blocks.
+ WalkDirection BlockHint;
+ BlockHint.Downward = false;
+ // FIXME: Expensive Check
+ if (llvm::is_contained(CallerBlocks, AtBB))
+ BlockHint.CallerBlock = true;
+ VisitedBlocks.insert(std::make_pair(AtBB, BlockHint));
+ }
+
+ const_succ_iterator PIt = succ_begin(AtBB), EIt = succ_end(AtBB);
+ if (PIt == EIt) // No succs.
+ return;
+
+ // If there are hot edges, then compute SuccSkipNodes.
+ DenseSet<const BasicBlock *> SuccSkipNodes;
+
+ // Since we are checking for successor's backedges, this Block
+ // occurs in first position.
+ for (auto &I : BackEdgesInfo)
+ if (I.first == AtBB)
+ SuccSkipNodes.insert(I.second);
+
+ for (; PIt != EIt; ++PIt)
+ if (BPI->isEdgeHot(AtBB, *PIt) && !SuccSkipNodes.count(*PIt))
+ traverseToExitBlock(*PIt, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+}
+
+// Get Block frequencies for blocks and take most frquently executed block,
+// walk towards the entry block from those blocks and discover the basic blocks
+// with call.
+SequenceBBQuery::BlockListTy
+SequenceBBQuery::queryCFG(Function &F, const BlockListTy &CallerBlocks) {
+
+ BlockFreqInfoTy BBFreqs;
+ VisitedBlocksInfoTy VisitedBlocks;
+ BackEdgesInfoTy BackEdgesInfo;
+
+ PassBuilder PB;
+ FunctionAnalysisManager FAM;
+ PB.registerFunctionAnalyses(FAM);
+
+ auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(F);
+
+ llvm::FindFunctionBackedges(F, BackEdgesInfo);
+
+ for (const auto I : CallerBlocks)
+ BBFreqs.push_back({I, BFI.getBlockFreq(I).getFrequency()});
+
+ llvm::sort(BBFreqs, [](decltype(BBFreqs)::const_reference Bbf,
+ decltype(BBFreqs)::const_reference Bbs) {
+ return Bbf.second > Bbs.second;
+ });
+
+ ArrayRef<std::pair<const BasicBlock *, uint64_t>> HotBlocksRef(BBFreqs);
+ HotBlocksRef =
+ HotBlocksRef.drop_back(BBFreqs.size() - getHottestBlocks(BBFreqs.size()));
+
+ BranchProbabilityInfo *BPI =
+ FAM.getCachedResult<BranchProbabilityAnalysis>(F);
+
+ // visit NHotBlocks,
+ // traverse upwards to entry
+ // traverse downwards to end.
+
+ for (auto I : HotBlocksRef) {
+ traverseToEntryBlock(I.first, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+ traverseToExitBlock(I.first, CallerBlocks, BackEdgesInfo, BPI,
+ VisitedBlocks);
+ }
+
+ BlockListTy MinCallerBlocks;
+ for (auto &I : VisitedBlocks)
+ if (I.second.CallerBlock)
+ MinCallerBlocks.push_back(std::move(I.first));
+
+ return rearrangeBB(F, MinCallerBlocks);
+}
+
+SpeculateQuery::ResultTy SequenceBBQuery::operator()(Function &F) {
+ // reduce the number of lists!
+ DenseMap<StringRef, DenseSet<StringRef>> CallerAndCalles;
+ DenseSet<StringRef> Calles;
+ BlockListTy SequencedBlocks;
+ BlockListTy CallerBlocks;
+
+ CallerBlocks = findBBwithCalls(F);
+ if (CallerBlocks.empty())
+ return None;
+
+ if (isStraightLine(F))
+ SequencedBlocks = rearrangeBB(F, CallerBlocks);
+ else
+ SequencedBlocks = queryCFG(F, CallerBlocks);
+
+ for (auto BB : SequencedBlocks)
+ findCalles(BB, Calles);
+
+ CallerAndCalles.insert({F.getName(), std::move(Calles)});
+ return CallerAndCalles;
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Speculation.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Speculation.cpp
new file mode 100644
index 0000000000..0b4755fe23
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/Speculation.cpp
@@ -0,0 +1,143 @@
+//===---------- speculation.cpp - Utilities for Speculation ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/Speculation.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Verifier.h"
+
+namespace llvm {
+
+namespace orc {
+
+// ImplSymbolMap methods
+void ImplSymbolMap::trackImpls(SymbolAliasMap ImplMaps, JITDylib *SrcJD) {
+ assert(SrcJD && "Tracking on Null Source .impl dylib");
+ std::lock_guard<std::mutex> Lockit(ConcurrentAccess);
+ for (auto &I : ImplMaps) {
+ auto It = Maps.insert({I.first, {I.second.Aliasee, SrcJD}});
+ // check rationale when independent dylibs have same symbol name?
+ assert(It.second && "ImplSymbols are already tracked for this Symbol?");
+ (void)(It);
+ }
+}
+
+// Trigger Speculative Compiles.
+void Speculator::speculateForEntryPoint(Speculator *Ptr, uint64_t StubId) {
+ assert(Ptr && " Null Address Received in orc_speculate_for ");
+ Ptr->speculateFor(StubId);
+}
+
+Error Speculator::addSpeculationRuntime(JITDylib &JD,
+ MangleAndInterner &Mangle) {
+ JITEvaluatedSymbol ThisPtr(pointerToJITTargetAddress(this),
+ JITSymbolFlags::Exported);
+ JITEvaluatedSymbol SpeculateForEntryPtr(
+ pointerToJITTargetAddress(&speculateForEntryPoint),
+ JITSymbolFlags::Exported);
+ return JD.define(absoluteSymbols({
+ {Mangle("__orc_speculator"), ThisPtr}, // Data Symbol
+ {Mangle("__orc_speculate_for"), SpeculateForEntryPtr} // Callable Symbol
+ }));
+}
+
+// If two modules, share the same LLVMContext, different threads must
+// not access them concurrently without locking the associated LLVMContext
+// this implementation follows this contract.
+void IRSpeculationLayer::emit(std::unique_ptr<MaterializationResponsibility> R,
+ ThreadSafeModule TSM) {
+
+ assert(TSM && "Speculation Layer received Null Module ?");
+ assert(TSM.getContext().getContext() != nullptr &&
+ "Module with null LLVMContext?");
+
+ // Instrumentation of runtime calls, lock the Module
+ TSM.withModuleDo([this, &R](Module &M) {
+ auto &MContext = M.getContext();
+ auto SpeculatorVTy = StructType::create(MContext, "Class.Speculator");
+ auto RuntimeCallTy = FunctionType::get(
+ Type::getVoidTy(MContext),
+ {SpeculatorVTy->getPointerTo(), Type::getInt64Ty(MContext)}, false);
+ auto RuntimeCall =
+ Function::Create(RuntimeCallTy, Function::LinkageTypes::ExternalLinkage,
+ "__orc_speculate_for", &M);
+ auto SpeclAddr = new GlobalVariable(
+ M, SpeculatorVTy, false, GlobalValue::LinkageTypes::ExternalLinkage,
+ nullptr, "__orc_speculator");
+
+ IRBuilder<> Mutator(MContext);
+
+ // QueryAnalysis allowed to transform the IR source, one such example is
+ // Simplify CFG helps the static branch prediction heuristics!
+ for (auto &Fn : M.getFunctionList()) {
+ if (!Fn.isDeclaration()) {
+
+ auto IRNames = QueryAnalysis(Fn);
+ // Instrument and register if Query has result
+ if (IRNames.hasValue()) {
+
+ // Emit globals for each function.
+ auto LoadValueTy = Type::getInt8Ty(MContext);
+ auto SpeculatorGuard = new GlobalVariable(
+ M, LoadValueTy, false, GlobalValue::LinkageTypes::InternalLinkage,
+ ConstantInt::get(LoadValueTy, 0),
+ "__orc_speculate.guard.for." + Fn.getName());
+ SpeculatorGuard->setAlignment(Align(1));
+ SpeculatorGuard->setUnnamedAddr(GlobalValue::UnnamedAddr::Local);
+
+ BasicBlock &ProgramEntry = Fn.getEntryBlock();
+ // Create BasicBlocks before the program's entry basicblock
+ BasicBlock *SpeculateBlock = BasicBlock::Create(
+ MContext, "__orc_speculate.block", &Fn, &ProgramEntry);
+ BasicBlock *SpeculateDecisionBlock = BasicBlock::Create(
+ MContext, "__orc_speculate.decision.block", &Fn, SpeculateBlock);
+
+ assert(SpeculateDecisionBlock == &Fn.getEntryBlock() &&
+ "SpeculateDecisionBlock not updated?");
+ Mutator.SetInsertPoint(SpeculateDecisionBlock);
+
+ auto LoadGuard =
+ Mutator.CreateLoad(LoadValueTy, SpeculatorGuard, "guard.value");
+ // if just loaded value equal to 0,return true.
+ auto CanSpeculate =
+ Mutator.CreateICmpEQ(LoadGuard, ConstantInt::get(LoadValueTy, 0),
+ "compare.to.speculate");
+ Mutator.CreateCondBr(CanSpeculate, SpeculateBlock, &ProgramEntry);
+
+ Mutator.SetInsertPoint(SpeculateBlock);
+ auto ImplAddrToUint =
+ Mutator.CreatePtrToInt(&Fn, Type::getInt64Ty(MContext));
+ Mutator.CreateCall(RuntimeCallTy, RuntimeCall,
+ {SpeclAddr, ImplAddrToUint});
+ Mutator.CreateStore(ConstantInt::get(LoadValueTy, 1),
+ SpeculatorGuard);
+ Mutator.CreateBr(&ProgramEntry);
+
+ assert(Mutator.GetInsertBlock()->getParent() == &Fn &&
+ "IR builder association mismatch?");
+ S.registerSymbols(internToJITSymbols(IRNames.getValue()),
+ &R->getTargetJITDylib());
+ }
+ }
+ }
+ });
+
+ assert(!TSM.withModuleDo([](const Module &M) { return verifyModule(M); }) &&
+ "Speculation Instrumentation breaks IR?");
+
+ NextLayer.emit(std::move(R), std::move(TSM));
+}
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp
new file mode 100644
index 0000000000..ffa2969536
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.cpp
@@ -0,0 +1,129 @@
+//===- JITLoaderGDB.h - Register objects via GDB JIT interface -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/JITLoaderGDB.h"
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/ManagedStatic.h"
+
+#include <cstdint>
+#include <mutex>
+#include <utility>
+
+#define DEBUG_TYPE "orc"
+
+// First version as landed in August 2009
+static constexpr uint32_t JitDescriptorVersion = 1;
+
+// Keep in sync with gdb/gdb/jit.h
+extern "C" {
+
+typedef enum {
+ JIT_NOACTION = 0,
+ JIT_REGISTER_FN,
+ JIT_UNREGISTER_FN
+} jit_actions_t;
+
+struct jit_code_entry {
+ struct jit_code_entry *next_entry;
+ struct jit_code_entry *prev_entry;
+ const char *symfile_addr;
+ uint64_t symfile_size;
+};
+
+struct jit_descriptor {
+ uint32_t version;
+ // This should be jit_actions_t, but we want to be specific about the
+ // bit-width.
+ uint32_t action_flag;
+ struct jit_code_entry *relevant_entry;
+ struct jit_code_entry *first_entry;
+};
+
+// We put information about the JITed function in this global, which the
+// debugger reads. Make sure to specify the version statically, because the
+// debugger checks the version before we can set it during runtime.
+struct jit_descriptor __jit_debug_descriptor = {JitDescriptorVersion, 0,
+ nullptr, nullptr};
+
+// Debuggers that implement the GDB JIT interface put a special breakpoint in
+// this function.
+LLVM_ATTRIBUTE_NOINLINE void __jit_debug_register_code() {
+ // The noinline and the asm prevent calls to this function from being
+ // optimized out.
+#if !defined(_MSC_VER)
+ asm volatile("" ::: "memory");
+#endif
+}
+}
+
+using namespace llvm;
+using namespace llvm::orc;
+
+// Serialize rendezvous with the debugger as well as access to shared data.
+ManagedStatic<std::mutex> JITDebugLock;
+
+// Register debug object, return error message or null for success.
+static void registerJITLoaderGDBImpl(const char *ObjAddr, size_t Size) {
+ LLVM_DEBUG({
+ dbgs() << "Registering debug object with GDB JIT interface "
+ << formatv("([{0:x16} -- {1:x16}])",
+ reinterpret_cast<uintptr_t>(ObjAddr),
+ reinterpret_cast<uintptr_t>(ObjAddr + Size))
+ << "\n";
+ });
+
+ jit_code_entry *E = new jit_code_entry;
+ E->symfile_addr = ObjAddr;
+ E->symfile_size = Size;
+ E->prev_entry = nullptr;
+
+ std::lock_guard<std::mutex> Lock(*JITDebugLock);
+
+ // Insert this entry at the head of the list.
+ jit_code_entry *NextEntry = __jit_debug_descriptor.first_entry;
+ E->next_entry = NextEntry;
+ if (NextEntry) {
+ NextEntry->prev_entry = E;
+ }
+
+ __jit_debug_descriptor.first_entry = E;
+ __jit_debug_descriptor.relevant_entry = E;
+
+ // Run into the rendezvous breakpoint.
+ __jit_debug_descriptor.action_flag = JIT_REGISTER_FN;
+ __jit_debug_register_code();
+}
+
+extern "C" orc::shared::CWrapperFunctionResult
+llvm_orc_registerJITLoaderGDBAllocAction(const char *Data, size_t Size) {
+ using namespace orc::shared;
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
+ Data, Size,
+ [](ExecutorAddrRange R) {
+ registerJITLoaderGDBImpl(R.Start.toPtr<const char *>(),
+ R.size());
+ return Error::success();
+ })
+ .release();
+}
+
+extern "C" orc::shared::CWrapperFunctionResult
+llvm_orc_registerJITLoaderGDBWrapper(const char *Data, uint64_t Size) {
+ using namespace orc::shared;
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
+ Data, Size,
+ [](ExecutorAddrRange R) {
+ registerJITLoaderGDBImpl(R.Start.toPtr<const char *>(),
+ R.size());
+ return Error::success();
+ })
+ .release();
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp
new file mode 100644
index 0000000000..909d47deef
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.cpp
@@ -0,0 +1,83 @@
+//===------------------------ OrcRTBootstrap.cpp --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "OrcRTBootstrap.h"
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/ExecutionEngine/Orc/Shared/WrapperFunctionUtils.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h"
+#include "llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+namespace rt_bootstrap {
+
+template <typename WriteT, typename SPSWriteT>
+static llvm::orc::shared::CWrapperFunctionResult
+writeUIntsWrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<void(SPSSequence<SPSWriteT>)>::handle(
+ ArgData, ArgSize,
+ [](std::vector<WriteT> Ws) {
+ for (auto &W : Ws)
+ *W.Addr.template toPtr<decltype(W.Value) *>() = W.Value;
+ })
+ .release();
+}
+
+static llvm::orc::shared::CWrapperFunctionResult
+writeBuffersWrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<void(SPSSequence<SPSMemoryAccessBufferWrite>)>::handle(
+ ArgData, ArgSize,
+ [](std::vector<tpctypes::BufferWrite> Ws) {
+ for (auto &W : Ws)
+ memcpy(W.Addr.template toPtr<char *>(), W.Buffer.data(),
+ W.Buffer.size());
+ })
+ .release();
+}
+
+static llvm::orc::shared::CWrapperFunctionResult
+runAsMainWrapper(const char *ArgData, size_t ArgSize) {
+ return WrapperFunction<rt::SPSRunAsMainSignature>::handle(
+ ArgData, ArgSize,
+ [](ExecutorAddr MainAddr,
+ std::vector<std::string> Args) -> int64_t {
+ return runAsMain(MainAddr.toPtr<int (*)(int, char *[])>(), Args);
+ })
+ .release();
+}
+
+void addTo(StringMap<ExecutorAddr> &M) {
+ M[rt::MemoryWriteUInt8sWrapperName] = ExecutorAddr::fromPtr(
+ &writeUIntsWrapper<tpctypes::UInt8Write,
+ shared::SPSMemoryAccessUInt8Write>);
+ M[rt::MemoryWriteUInt16sWrapperName] = ExecutorAddr::fromPtr(
+ &writeUIntsWrapper<tpctypes::UInt16Write,
+ shared::SPSMemoryAccessUInt16Write>);
+ M[rt::MemoryWriteUInt32sWrapperName] = ExecutorAddr::fromPtr(
+ &writeUIntsWrapper<tpctypes::UInt32Write,
+ shared::SPSMemoryAccessUInt32Write>);
+ M[rt::MemoryWriteUInt64sWrapperName] = ExecutorAddr::fromPtr(
+ &writeUIntsWrapper<tpctypes::UInt64Write,
+ shared::SPSMemoryAccessUInt64Write>);
+ M[rt::MemoryWriteBuffersWrapperName] =
+ ExecutorAddr::fromPtr(&writeBuffersWrapper);
+ M[rt::RegisterEHFrameSectionWrapperName] =
+ ExecutorAddr::fromPtr(&llvm_orc_registerEHFrameSectionWrapper);
+ M[rt::DeregisterEHFrameSectionWrapperName] =
+ ExecutorAddr::fromPtr(&llvm_orc_deregisterEHFrameSectionWrapper);
+ M[rt::RunAsMainWrapperName] = ExecutorAddr::fromPtr(&runAsMainWrapper);
+}
+
+} // end namespace rt_bootstrap
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h
new file mode 100644
index 0000000000..92b513d0bb
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/OrcRTBootstrap.h
@@ -0,0 +1,36 @@
+//===----------------------- OrcRTBootstrap.h -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// OrcRTPrelinkImpl provides functions that should be linked into the executor
+// to bootstrap common JIT functionality (e.g. memory allocation and memory
+// access).
+//
+// Call rt_impl::addTo to add these functions to a bootstrap symbols map.
+//
+// FIXME: The functionality in this file should probably be moved to an ORC
+// runtime bootstrap library in compiler-rt.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LIB_EXECUTIONENGINE_ORC_TARGETPROCESS_ORCRTBOOTSTRAP_H
+#define LIB_EXECUTIONENGINE_ORC_TARGETPROCESS_ORCRTBOOTSTRAP_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ExecutionEngine/Orc/Shared/ExecutorAddress.h"
+
+namespace llvm {
+namespace orc {
+namespace rt_bootstrap {
+
+void addTo(StringMap<ExecutorAddr> &M);
+
+} // namespace rt_bootstrap
+} // end namespace orc
+} // end namespace llvm
+
+#endif // LIB_EXECUTIONENGINE_ORC_TARGETPROCESS_ORCRTBOOTSTRAP_H
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp
new file mode 100644
index 0000000000..fdae0e45da
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.cpp
@@ -0,0 +1,183 @@
+//===--------- RegisterEHFrames.cpp - Register EH frame sections ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/RegisterEHFrames.h"
+
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/raw_ostream.h"
+
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm;
+using namespace llvm::orc;
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+
+#if defined(HAVE_REGISTER_FRAME) && defined(HAVE_DEREGISTER_FRAME) && \
+ !defined(__SEH__) && !defined(__USING_SJLJ_EXCEPTIONS__)
+
+extern "C" void __register_frame(const void *);
+extern "C" void __deregister_frame(const void *);
+
+Error registerFrameWrapper(const void *P) {
+ __register_frame(P);
+ return Error::success();
+}
+
+Error deregisterFrameWrapper(const void *P) {
+ __deregister_frame(P);
+ return Error::success();
+}
+
+#else
+
+// The building compiler does not have __(de)register_frame but
+// it may be found at runtime in a dynamically-loaded library.
+// For example, this happens when building LLVM with Visual C++
+// but using the MingW runtime.
+static Error registerFrameWrapper(const void *P) {
+ static void((*RegisterFrame)(const void *)) = 0;
+
+ if (!RegisterFrame)
+ *(void **)&RegisterFrame =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol("__register_frame");
+
+ if (RegisterFrame) {
+ RegisterFrame(P);
+ return Error::success();
+ }
+
+ return make_error<StringError>("could not register eh-frame: "
+ "__register_frame function not found",
+ inconvertibleErrorCode());
+}
+
+static Error deregisterFrameWrapper(const void *P) {
+ static void((*DeregisterFrame)(const void *)) = 0;
+
+ if (!DeregisterFrame)
+ *(void **)&DeregisterFrame =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
+ "__deregister_frame");
+
+ if (DeregisterFrame) {
+ DeregisterFrame(P);
+ return Error::success();
+ }
+
+ return make_error<StringError>("could not deregister eh-frame: "
+ "__deregister_frame function not found",
+ inconvertibleErrorCode());
+}
+#endif
+
+#if defined(HAVE_UNW_ADD_DYNAMIC_FDE) || defined(__APPLE__)
+
+template <typename HandleFDEFn>
+Error walkLibunwindEHFrameSection(const char *const SectionStart,
+ size_t SectionSize, HandleFDEFn HandleFDE) {
+ const char *CurCFIRecord = SectionStart;
+ const char *End = SectionStart + SectionSize;
+ uint64_t Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+
+ while (CurCFIRecord != End && Size != 0) {
+ const char *OffsetField = CurCFIRecord + (Size == 0xffffffff ? 12 : 4);
+ if (Size == 0xffffffff)
+ Size = *reinterpret_cast<const uint64_t *>(CurCFIRecord + 4) + 12;
+ else
+ Size += 4;
+ uint32_t Offset = *reinterpret_cast<const uint32_t *>(OffsetField);
+
+ LLVM_DEBUG({
+ dbgs() << "Registering eh-frame section:\n";
+ dbgs() << "Processing " << (Offset ? "FDE" : "CIE") << " @"
+ << (void *)CurCFIRecord << ": [";
+ for (unsigned I = 0; I < Size; ++I)
+ dbgs() << format(" 0x%02" PRIx8, *(CurCFIRecord + I));
+ dbgs() << " ]\n";
+ });
+
+ if (Offset != 0)
+ if (auto Err = HandleFDE(CurCFIRecord))
+ return Err;
+
+ CurCFIRecord += Size;
+
+ Size = *reinterpret_cast<const uint32_t *>(CurCFIRecord);
+ }
+
+ return Error::success();
+}
+
+#endif // HAVE_UNW_ADD_DYNAMIC_FDE || __APPLE__
+
+Error registerEHFrameSection(const void *EHFrameSectionAddr,
+ size_t EHFrameSectionSize) {
+ /* libgcc and libunwind __register_frame behave differently. We use the
+ * presence of __unw_add_dynamic_fde to detect libunwind. */
+#if defined(HAVE_UNW_ADD_DYNAMIC_FDE) || defined(__APPLE__)
+ // With libunwind, __register_frame has to be called for each FDE entry.
+ return walkLibunwindEHFrameSection(
+ static_cast<const char *>(EHFrameSectionAddr), EHFrameSectionSize,
+ registerFrameWrapper);
+#else
+ // With libgcc, __register_frame takes a single argument:
+ // a pointer to the start of the .eh_frame section.
+
+ // How can it find the end? Because crtendS.o is linked
+ // in and it has an .eh_frame section with four zero chars.
+ return registerFrameWrapper(EHFrameSectionAddr);
+#endif
+}
+
+Error deregisterEHFrameSection(const void *EHFrameSectionAddr,
+ size_t EHFrameSectionSize) {
+#if defined(HAVE_UNW_ADD_DYNAMIC_FDE) || defined(__APPLE__)
+ return walkLibunwindEHFrameSection(
+ static_cast<const char *>(EHFrameSectionAddr), EHFrameSectionSize,
+ deregisterFrameWrapper);
+#else
+ return deregisterFrameWrapper(EHFrameSectionAddr);
+#endif
+}
+
+} // end namespace orc
+} // end namespace llvm
+
+static Error registerEHFrameWrapper(ExecutorAddrRange EHFrame) {
+ return llvm::orc::registerEHFrameSection(EHFrame.Start.toPtr<const void *>(),
+ EHFrame.size());
+}
+
+static Error deregisterEHFrameWrapper(ExecutorAddrRange EHFrame) {
+ return llvm::orc::deregisterEHFrameSection(
+ EHFrame.Start.toPtr<const void *>(), EHFrame.size());
+}
+
+extern "C" orc::shared::CWrapperFunctionResult
+llvm_orc_registerEHFrameSectionWrapper(const char *Data, uint64_t Size) {
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
+ Data, Size, registerEHFrameWrapper)
+ .release();
+}
+
+extern "C" orc::shared::CWrapperFunctionResult
+llvm_orc_deregisterEHFrameSectionWrapper(const char *Data, uint64_t Size) {
+ return WrapperFunction<SPSError(SPSExecutorAddrRange)>::handle(
+ Data, Size, deregisterEHFrameWrapper)
+ .release();
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp
new file mode 100644
index 0000000000..3c9dd21b08
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.cpp
@@ -0,0 +1,129 @@
+//===--- SimpleExecutorDylibManager.cpp - Executor-side dylib management --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorDylibManager.h"
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+namespace rt_bootstrap {
+
+SimpleExecutorDylibManager::~SimpleExecutorDylibManager() {
+ assert(Dylibs.empty() && "shutdown not called?");
+}
+
+Expected<tpctypes::DylibHandle>
+SimpleExecutorDylibManager::open(const std::string &Path, uint64_t Mode) {
+ if (Mode != 0)
+ return make_error<StringError>("open: non-zero mode bits not yet supported",
+ inconvertibleErrorCode());
+
+ const char *PathCStr = Path.empty() ? nullptr : Path.c_str();
+ std::string ErrMsg;
+
+ auto DL = sys::DynamicLibrary::getPermanentLibrary(PathCStr, &ErrMsg);
+ if (!DL.isValid())
+ return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
+
+ std::lock_guard<std::mutex> Lock(M);
+ Dylibs[NextId] = std::move(DL);
+ return NextId++;
+}
+
+Expected<std::vector<ExecutorAddr>>
+SimpleExecutorDylibManager::lookup(tpctypes::DylibHandle H,
+ const RemoteSymbolLookupSet &L) {
+ std::vector<ExecutorAddr> Result;
+
+ std::lock_guard<std::mutex> Lock(M);
+ auto I = Dylibs.find(H);
+ if (I == Dylibs.end())
+ return make_error<StringError>("No dylib for handle " + formatv("{0:x}", H),
+ inconvertibleErrorCode());
+ auto &DL = I->second;
+
+ for (const auto &E : L) {
+
+ if (E.Name.empty()) {
+ if (E.Required)
+ return make_error<StringError>("Required address for empty symbol \"\"",
+ inconvertibleErrorCode());
+ else
+ Result.push_back(ExecutorAddr());
+ } else {
+
+ const char *DemangledSymName = E.Name.c_str();
+#ifdef __APPLE__
+ if (E.Name.front() != '_')
+ return make_error<StringError>(Twine("MachO symbol \"") + E.Name +
+ "\" missing leading '_'",
+ inconvertibleErrorCode());
+ ++DemangledSymName;
+#endif
+
+ void *Addr = DL.getAddressOfSymbol(DemangledSymName);
+ if (!Addr && E.Required)
+ return make_error<StringError>(Twine("Missing definition for ") +
+ DemangledSymName,
+ inconvertibleErrorCode());
+
+ Result.push_back(ExecutorAddr::fromPtr(Addr));
+ }
+ }
+
+ return Result;
+}
+
+Error SimpleExecutorDylibManager::shutdown() {
+
+ DylibsMap DM;
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ std::swap(DM, Dylibs);
+ }
+
+ // There is no removal of dylibs at the moment, so nothing to do here.
+ return Error::success();
+}
+
+void SimpleExecutorDylibManager::addBootstrapSymbols(
+ StringMap<ExecutorAddr> &M) {
+ M[rt::SimpleExecutorDylibManagerInstanceName] = ExecutorAddr::fromPtr(this);
+ M[rt::SimpleExecutorDylibManagerOpenWrapperName] =
+ ExecutorAddr::fromPtr(&openWrapper);
+ M[rt::SimpleExecutorDylibManagerLookupWrapperName] =
+ ExecutorAddr::fromPtr(&lookupWrapper);
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+SimpleExecutorDylibManager::openWrapper(const char *ArgData, size_t ArgSize) {
+ return shared::
+ WrapperFunction<rt::SPSSimpleExecutorDylibManagerOpenSignature>::handle(
+ ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &SimpleExecutorDylibManager::open))
+ .release();
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+SimpleExecutorDylibManager::lookupWrapper(const char *ArgData, size_t ArgSize) {
+ return shared::
+ WrapperFunction<rt::SPSSimpleExecutorDylibManagerLookupSignature>::handle(
+ ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &SimpleExecutorDylibManager::lookup))
+ .release();
+}
+
+} // namespace rt_bootstrap
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp
new file mode 100644
index 0000000000..7cadf3bb51
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.cpp
@@ -0,0 +1,261 @@
+//===- SimpleExecuorMemoryManagare.cpp - Simple executor-side memory mgmt -===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/SimpleExecutorMemoryManager.h"
+
+#include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
+#include "llvm/Support/FormatVariadic.h"
+
+#define DEBUG_TYPE "orc"
+
+namespace llvm {
+namespace orc {
+namespace rt_bootstrap {
+
+SimpleExecutorMemoryManager::~SimpleExecutorMemoryManager() {
+ assert(Allocations.empty() && "shutdown not called?");
+}
+
+Expected<ExecutorAddr> SimpleExecutorMemoryManager::allocate(uint64_t Size) {
+ std::error_code EC;
+ auto MB = sys::Memory::allocateMappedMemory(
+ Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
+ if (EC)
+ return errorCodeToError(EC);
+ std::lock_guard<std::mutex> Lock(M);
+ assert(!Allocations.count(MB.base()) && "Duplicate allocation addr");
+ Allocations[MB.base()].Size = Size;
+ return ExecutorAddr::fromPtr(MB.base());
+}
+
+Error SimpleExecutorMemoryManager::finalize(tpctypes::FinalizeRequest &FR) {
+ ExecutorAddr Base(~0ULL);
+ std::vector<shared::WrapperFunctionCall> DeallocationActions;
+ size_t SuccessfulFinalizationActions = 0;
+
+ if (FR.Segments.empty()) {
+ // NOTE: Finalizing nothing is currently a no-op. Should it be an error?
+ if (FR.Actions.empty())
+ return Error::success();
+ else
+ return make_error<StringError>("Finalization actions attached to empty "
+ "finalization request",
+ inconvertibleErrorCode());
+ }
+
+ for (auto &Seg : FR.Segments)
+ Base = std::min(Base, Seg.Addr);
+
+ for (auto &ActPair : FR.Actions)
+ if (ActPair.Dealloc)
+ DeallocationActions.push_back(ActPair.Dealloc);
+
+ // Get the Allocation for this finalization.
+ size_t AllocSize = 0;
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ auto I = Allocations.find(Base.toPtr<void *>());
+ if (I == Allocations.end())
+ return make_error<StringError>("Attempt to finalize unrecognized "
+ "allocation " +
+ formatv("{0:x}", Base.getValue()),
+ inconvertibleErrorCode());
+ AllocSize = I->second.Size;
+ I->second.DeallocationActions = std::move(DeallocationActions);
+ }
+ ExecutorAddr AllocEnd = Base + ExecutorAddrDiff(AllocSize);
+
+ // Bail-out function: this will run deallocation actions corresponding to any
+ // completed finalization actions, then deallocate memory.
+ auto BailOut = [&](Error Err) {
+ std::pair<void *, Allocation> AllocToDestroy;
+
+ // Get allocation to destory.
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ auto I = Allocations.find(Base.toPtr<void *>());
+
+ // Check for missing allocation (effective a double free).
+ if (I == Allocations.end())
+ return joinErrors(
+ std::move(Err),
+ make_error<StringError>("No allocation entry found "
+ "for " +
+ formatv("{0:x}", Base.getValue()),
+ inconvertibleErrorCode()));
+ AllocToDestroy = std::move(*I);
+ Allocations.erase(I);
+ }
+
+ // Run deallocation actions for all completed finalization actions.
+ while (SuccessfulFinalizationActions)
+ Err =
+ joinErrors(std::move(Err), FR.Actions[--SuccessfulFinalizationActions]
+ .Dealloc.runWithSPSRetErrorMerged());
+
+ // Deallocate memory.
+ sys::MemoryBlock MB(AllocToDestroy.first, AllocToDestroy.second.Size);
+ if (auto EC = sys::Memory::releaseMappedMemory(MB))
+ Err = joinErrors(std::move(Err), errorCodeToError(EC));
+
+ return Err;
+ };
+
+ // Copy content and apply permissions.
+ for (auto &Seg : FR.Segments) {
+
+ // Check segment ranges.
+ if (LLVM_UNLIKELY(Seg.Size < Seg.Content.size()))
+ return BailOut(make_error<StringError>(
+ formatv("Segment {0:x} content size ({1:x} bytes) "
+ "exceeds segment size ({2:x} bytes)",
+ Seg.Addr.getValue(), Seg.Content.size(), Seg.Size),
+ inconvertibleErrorCode()));
+ ExecutorAddr SegEnd = Seg.Addr + ExecutorAddrDiff(Seg.Size);
+ if (LLVM_UNLIKELY(Seg.Addr < Base || SegEnd > AllocEnd))
+ return BailOut(make_error<StringError>(
+ formatv("Segment {0:x} -- {1:x} crosses boundary of "
+ "allocation {2:x} -- {3:x}",
+ Seg.Addr.getValue(), SegEnd.getValue(), Base.getValue(),
+ AllocEnd.getValue()),
+ inconvertibleErrorCode()));
+
+ char *Mem = Seg.Addr.toPtr<char *>();
+ memcpy(Mem, Seg.Content.data(), Seg.Content.size());
+ memset(Mem + Seg.Content.size(), 0, Seg.Size - Seg.Content.size());
+ assert(Seg.Size <= std::numeric_limits<size_t>::max());
+ if (auto EC = sys::Memory::protectMappedMemory(
+ {Mem, static_cast<size_t>(Seg.Size)},
+ tpctypes::fromWireProtectionFlags(Seg.Prot)))
+ return BailOut(errorCodeToError(EC));
+ if (Seg.Prot & tpctypes::WPF_Exec)
+ sys::Memory::InvalidateInstructionCache(Mem, Seg.Size);
+ }
+
+ // Run finalization actions.
+ for (auto &ActPair : FR.Actions) {
+ if (auto Err = ActPair.Finalize.runWithSPSRetErrorMerged())
+ return BailOut(std::move(Err));
+ ++SuccessfulFinalizationActions;
+ }
+
+ return Error::success();
+}
+
+Error SimpleExecutorMemoryManager::deallocate(
+ const std::vector<ExecutorAddr> &Bases) {
+ std::vector<std::pair<void *, Allocation>> AllocPairs;
+ AllocPairs.reserve(Bases.size());
+
+ // Get allocation to destory.
+ Error Err = Error::success();
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ for (auto &Base : Bases) {
+ auto I = Allocations.find(Base.toPtr<void *>());
+
+ // Check for missing allocation (effective a double free).
+ if (I != Allocations.end()) {
+ AllocPairs.push_back(std::move(*I));
+ Allocations.erase(I);
+ } else
+ Err = joinErrors(
+ std::move(Err),
+ make_error<StringError>("No allocation entry found "
+ "for " +
+ formatv("{0:x}", Base.getValue()),
+ inconvertibleErrorCode()));
+ }
+ }
+
+ while (!AllocPairs.empty()) {
+ auto &P = AllocPairs.back();
+ Err = joinErrors(std::move(Err), deallocateImpl(P.first, P.second));
+ AllocPairs.pop_back();
+ }
+
+ return Err;
+}
+
+Error SimpleExecutorMemoryManager::shutdown() {
+
+ AllocationsMap AM;
+ {
+ std::lock_guard<std::mutex> Lock(M);
+ AM = std::move(Allocations);
+ }
+
+ Error Err = Error::success();
+ for (auto &KV : AM)
+ Err = joinErrors(std::move(Err), deallocateImpl(KV.first, KV.second));
+ return Err;
+}
+
+void SimpleExecutorMemoryManager::addBootstrapSymbols(
+ StringMap<ExecutorAddr> &M) {
+ M[rt::SimpleExecutorMemoryManagerInstanceName] = ExecutorAddr::fromPtr(this);
+ M[rt::SimpleExecutorMemoryManagerReserveWrapperName] =
+ ExecutorAddr::fromPtr(&reserveWrapper);
+ M[rt::SimpleExecutorMemoryManagerFinalizeWrapperName] =
+ ExecutorAddr::fromPtr(&finalizeWrapper);
+ M[rt::SimpleExecutorMemoryManagerDeallocateWrapperName] =
+ ExecutorAddr::fromPtr(&deallocateWrapper);
+}
+
+Error SimpleExecutorMemoryManager::deallocateImpl(void *Base, Allocation &A) {
+ Error Err = Error::success();
+
+ while (!A.DeallocationActions.empty()) {
+ Err = joinErrors(std::move(Err),
+ A.DeallocationActions.back().runWithSPSRetErrorMerged());
+ A.DeallocationActions.pop_back();
+ }
+
+ sys::MemoryBlock MB(Base, A.Size);
+ if (auto EC = sys::Memory::releaseMappedMemory(MB))
+ Err = joinErrors(std::move(Err), errorCodeToError(EC));
+
+ return Err;
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+SimpleExecutorMemoryManager::reserveWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return shared::WrapperFunction<
+ rt::SPSSimpleExecutorMemoryManagerReserveSignature>::
+ handle(ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &SimpleExecutorMemoryManager::allocate))
+ .release();
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+SimpleExecutorMemoryManager::finalizeWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return shared::WrapperFunction<
+ rt::SPSSimpleExecutorMemoryManagerFinalizeSignature>::
+ handle(ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &SimpleExecutorMemoryManager::finalize))
+ .release();
+}
+
+llvm::orc::shared::CWrapperFunctionResult
+SimpleExecutorMemoryManager::deallocateWrapper(const char *ArgData,
+ size_t ArgSize) {
+ return shared::WrapperFunction<
+ rt::SPSSimpleExecutorMemoryManagerDeallocateSignature>::
+ handle(ArgData, ArgSize,
+ shared::makeMethodWrapperHandler(
+ &SimpleExecutorMemoryManager::deallocate))
+ .release();
+}
+
+} // namespace rt_bootstrap
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp
new file mode 100644
index 0000000000..b6b21bde11
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.cpp
@@ -0,0 +1,293 @@
+//===------- SimpleEPCServer.cpp - EPC over simple abstract channel -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/SimpleRemoteEPCServer.h"
+
+#include "llvm/ExecutionEngine/Orc/Shared/TargetProcessControlTypes.h"
+#include "llvm/Support/FormatVariadic.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Process.h"
+
+#include "OrcRTBootstrap.h"
+
+#define DEBUG_TYPE "orc"
+
+using namespace llvm::orc::shared;
+
+namespace llvm {
+namespace orc {
+
+ExecutorBootstrapService::~ExecutorBootstrapService() {}
+
+SimpleRemoteEPCServer::Dispatcher::~Dispatcher() {}
+
+#if LLVM_ENABLE_THREADS
+void SimpleRemoteEPCServer::ThreadDispatcher::dispatch(
+ unique_function<void()> Work) {
+ {
+ std::lock_guard<std::mutex> Lock(DispatchMutex);
+ if (!Running)
+ return;
+ ++Outstanding;
+ }
+
+ std::thread([this, Work = std::move(Work)]() mutable {
+ Work();
+ std::lock_guard<std::mutex> Lock(DispatchMutex);
+ --Outstanding;
+ OutstandingCV.notify_all();
+ }).detach();
+}
+
+void SimpleRemoteEPCServer::ThreadDispatcher::shutdown() {
+ std::unique_lock<std::mutex> Lock(DispatchMutex);
+ Running = false;
+ OutstandingCV.wait(Lock, [this]() { return Outstanding == 0; });
+}
+#endif
+
+StringMap<ExecutorAddr> SimpleRemoteEPCServer::defaultBootstrapSymbols() {
+ StringMap<ExecutorAddr> DBS;
+ rt_bootstrap::addTo(DBS);
+ return DBS;
+}
+
+Expected<SimpleRemoteEPCTransportClient::HandleMessageAction>
+SimpleRemoteEPCServer::handleMessage(SimpleRemoteEPCOpcode OpC, uint64_t SeqNo,
+ ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPCServer::handleMessage: opc = ";
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Setup:
+ dbgs() << "Setup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Setup?");
+ assert(TagAddr.getValue() == 0 && "Non-zero TagAddr for Setup?");
+ break;
+ case SimpleRemoteEPCOpcode::Hangup:
+ dbgs() << "Hangup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Hangup?");
+ assert(TagAddr.getValue() == 0 && "Non-zero TagAddr for Hangup?");
+ break;
+ case SimpleRemoteEPCOpcode::Result:
+ dbgs() << "Result";
+ assert(TagAddr.getValue() == 0 && "Non-zero TagAddr for Result?");
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ dbgs() << "CallWrapper";
+ break;
+ }
+ dbgs() << ", seqno = " << SeqNo
+ << ", tag-addr = " << formatv("{0:x}", TagAddr.getValue())
+ << ", arg-buffer = " << formatv("{0:x}", ArgBytes.size())
+ << " bytes\n";
+ });
+
+ using UT = std::underlying_type_t<SimpleRemoteEPCOpcode>;
+ if (static_cast<UT>(OpC) > static_cast<UT>(SimpleRemoteEPCOpcode::LastOpC))
+ return make_error<StringError>("Unexpected opcode",
+ inconvertibleErrorCode());
+
+ // TODO: Clean detach message?
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Setup:
+ return make_error<StringError>("Unexpected Setup opcode",
+ inconvertibleErrorCode());
+ case SimpleRemoteEPCOpcode::Hangup:
+ return SimpleRemoteEPCTransportClient::EndSession;
+ case SimpleRemoteEPCOpcode::Result:
+ if (auto Err = handleResult(SeqNo, TagAddr, std::move(ArgBytes)))
+ return std::move(Err);
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ handleCallWrapper(SeqNo, TagAddr, std::move(ArgBytes));
+ break;
+ }
+ return ContinueSession;
+}
+
+Error SimpleRemoteEPCServer::waitForDisconnect() {
+ std::unique_lock<std::mutex> Lock(ServerStateMutex);
+ ShutdownCV.wait(Lock, [this]() { return RunState == ServerShutDown; });
+ return std::move(ShutdownErr);
+}
+
+void SimpleRemoteEPCServer::handleDisconnect(Error Err) {
+ PendingJITDispatchResultsMap TmpPending;
+
+ {
+ std::lock_guard<std::mutex> Lock(ServerStateMutex);
+ std::swap(TmpPending, PendingJITDispatchResults);
+ RunState = ServerShuttingDown;
+ }
+
+ // Send out-of-band errors to any waiting threads.
+ for (auto &KV : TmpPending)
+ KV.second->set_value(
+ shared::WrapperFunctionResult::createOutOfBandError("disconnecting"));
+
+ // Wait for dispatcher to clear.
+ D->shutdown();
+
+ // Shut down services.
+ while (!Services.empty()) {
+ ShutdownErr =
+ joinErrors(std::move(ShutdownErr), Services.back()->shutdown());
+ Services.pop_back();
+ }
+
+ std::lock_guard<std::mutex> Lock(ServerStateMutex);
+ ShutdownErr = joinErrors(std::move(ShutdownErr), std::move(Err));
+ RunState = ServerShutDown;
+ ShutdownCV.notify_all();
+}
+
+Error SimpleRemoteEPCServer::sendMessage(SimpleRemoteEPCOpcode OpC,
+ uint64_t SeqNo, ExecutorAddr TagAddr,
+ ArrayRef<char> ArgBytes) {
+
+ LLVM_DEBUG({
+ dbgs() << "SimpleRemoteEPCServer::sendMessage: opc = ";
+ switch (OpC) {
+ case SimpleRemoteEPCOpcode::Setup:
+ dbgs() << "Setup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Setup?");
+ assert(TagAddr.getValue() == 0 && "Non-zero TagAddr for Setup?");
+ break;
+ case SimpleRemoteEPCOpcode::Hangup:
+ dbgs() << "Hangup";
+ assert(SeqNo == 0 && "Non-zero SeqNo for Hangup?");
+ assert(TagAddr.getValue() == 0 && "Non-zero TagAddr for Hangup?");
+ break;
+ case SimpleRemoteEPCOpcode::Result:
+ dbgs() << "Result";
+ assert(TagAddr.getValue() == 0 && "Non-zero TagAddr for Result?");
+ break;
+ case SimpleRemoteEPCOpcode::CallWrapper:
+ dbgs() << "CallWrapper";
+ break;
+ }
+ dbgs() << ", seqno = " << SeqNo
+ << ", tag-addr = " << formatv("{0:x}", TagAddr.getValue())
+ << ", arg-buffer = " << formatv("{0:x}", ArgBytes.size())
+ << " bytes\n";
+ });
+ auto Err = T->sendMessage(OpC, SeqNo, TagAddr, ArgBytes);
+ LLVM_DEBUG({
+ if (Err)
+ dbgs() << " \\--> SimpleRemoteEPC::sendMessage failed\n";
+ });
+ return Err;
+}
+
+Error SimpleRemoteEPCServer::sendSetupMessage(
+ StringMap<ExecutorAddr> BootstrapSymbols) {
+
+ using namespace SimpleRemoteEPCDefaultBootstrapSymbolNames;
+
+ std::vector<char> SetupPacket;
+ SimpleRemoteEPCExecutorInfo EI;
+ EI.TargetTriple = sys::getProcessTriple();
+ if (auto PageSize = sys::Process::getPageSize())
+ EI.PageSize = *PageSize;
+ else
+ return PageSize.takeError();
+ EI.BootstrapSymbols = std::move(BootstrapSymbols);
+
+ assert(!EI.BootstrapSymbols.count(ExecutorSessionObjectName) &&
+ "Dispatch context name should not be set");
+ assert(!EI.BootstrapSymbols.count(DispatchFnName) &&
+ "Dispatch function name should not be set");
+ EI.BootstrapSymbols[ExecutorSessionObjectName] = ExecutorAddr::fromPtr(this);
+ EI.BootstrapSymbols[DispatchFnName] = ExecutorAddr::fromPtr(jitDispatchEntry);
+
+ using SPSSerialize =
+ shared::SPSArgList<shared::SPSSimpleRemoteEPCExecutorInfo>;
+ auto SetupPacketBytes =
+ shared::WrapperFunctionResult::allocate(SPSSerialize::size(EI));
+ shared::SPSOutputBuffer OB(SetupPacketBytes.data(), SetupPacketBytes.size());
+ if (!SPSSerialize::serialize(OB, EI))
+ return make_error<StringError>("Could not send setup packet",
+ inconvertibleErrorCode());
+
+ return sendMessage(SimpleRemoteEPCOpcode::Setup, 0, ExecutorAddr(),
+ {SetupPacketBytes.data(), SetupPacketBytes.size()});
+}
+
+Error SimpleRemoteEPCServer::handleResult(
+ uint64_t SeqNo, ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+ std::promise<shared::WrapperFunctionResult> *P = nullptr;
+ {
+ std::lock_guard<std::mutex> Lock(ServerStateMutex);
+ auto I = PendingJITDispatchResults.find(SeqNo);
+ if (I == PendingJITDispatchResults.end())
+ return make_error<StringError>("No call for sequence number " +
+ Twine(SeqNo),
+ inconvertibleErrorCode());
+ P = I->second;
+ PendingJITDispatchResults.erase(I);
+ releaseSeqNo(SeqNo);
+ }
+ auto R = shared::WrapperFunctionResult::allocate(ArgBytes.size());
+ memcpy(R.data(), ArgBytes.data(), ArgBytes.size());
+ P->set_value(std::move(R));
+ return Error::success();
+}
+
+void SimpleRemoteEPCServer::handleCallWrapper(
+ uint64_t RemoteSeqNo, ExecutorAddr TagAddr,
+ SimpleRemoteEPCArgBytesVector ArgBytes) {
+ D->dispatch([this, RemoteSeqNo, TagAddr, ArgBytes = std::move(ArgBytes)]() {
+ using WrapperFnTy =
+ shared::CWrapperFunctionResult (*)(const char *, size_t);
+ auto *Fn = TagAddr.toPtr<WrapperFnTy>();
+ shared::WrapperFunctionResult ResultBytes(
+ Fn(ArgBytes.data(), ArgBytes.size()));
+ if (auto Err = sendMessage(SimpleRemoteEPCOpcode::Result, RemoteSeqNo,
+ ExecutorAddr(),
+ {ResultBytes.data(), ResultBytes.size()}))
+ ReportError(std::move(Err));
+ });
+}
+
+shared::WrapperFunctionResult
+SimpleRemoteEPCServer::doJITDispatch(const void *FnTag, const char *ArgData,
+ size_t ArgSize) {
+ uint64_t SeqNo;
+ std::promise<shared::WrapperFunctionResult> ResultP;
+ auto ResultF = ResultP.get_future();
+ {
+ std::lock_guard<std::mutex> Lock(ServerStateMutex);
+ if (RunState != ServerRunning)
+ return shared::WrapperFunctionResult::createOutOfBandError(
+ "jit_dispatch not available (EPC server shut down)");
+
+ SeqNo = getNextSeqNo();
+ assert(!PendingJITDispatchResults.count(SeqNo) && "SeqNo already in use");
+ PendingJITDispatchResults[SeqNo] = &ResultP;
+ }
+
+ if (auto Err = sendMessage(SimpleRemoteEPCOpcode::CallWrapper, SeqNo,
+ ExecutorAddr::fromPtr(FnTag), {ArgData, ArgSize}))
+ ReportError(std::move(Err));
+
+ return ResultF.get();
+}
+
+shared::CWrapperFunctionResult
+SimpleRemoteEPCServer::jitDispatchEntry(void *DispatchCtx, const void *FnTag,
+ const char *ArgData, size_t ArgSize) {
+ return reinterpret_cast<SimpleRemoteEPCServer *>(DispatchCtx)
+ ->doJITDispatch(FnTag, ArgData, ArgSize)
+ .release();
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp
new file mode 100644
index 0000000000..a8e6c049cf
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.cpp
@@ -0,0 +1,43 @@
+//===--- TargetExecutionUtils.cpp - Execution utils for target processes --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TargetProcess/TargetExecutionUtils.h"
+
+#include <vector>
+
+namespace llvm {
+namespace orc {
+
+int runAsMain(int (*Main)(int, char *[]), ArrayRef<std::string> Args,
+ Optional<StringRef> ProgramName) {
+ std::vector<std::unique_ptr<char[]>> ArgVStorage;
+ std::vector<char *> ArgV;
+
+ ArgVStorage.reserve(Args.size() + (ProgramName ? 1 : 0));
+ ArgV.reserve(Args.size() + 1 + (ProgramName ? 1 : 0));
+
+ if (ProgramName) {
+ ArgVStorage.push_back(std::make_unique<char[]>(ProgramName->size() + 1));
+ llvm::copy(*ProgramName, &ArgVStorage.back()[0]);
+ ArgVStorage.back()[ProgramName->size()] = '\0';
+ ArgV.push_back(ArgVStorage.back().get());
+ }
+
+ for (const auto &Arg : Args) {
+ ArgVStorage.push_back(std::make_unique<char[]>(Arg.size() + 1));
+ llvm::copy(Arg, &ArgVStorage.back()[0]);
+ ArgVStorage.back()[Arg.size()] = '\0';
+ ArgV.push_back(ArgVStorage.back().get());
+ }
+ ArgV.push_back(nullptr);
+
+ return Main(Args.size() + !!ProgramName, ArgV.data());
+}
+
+} // End namespace orc.
+} // End namespace llvm.
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/ya.make b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/ya.make
new file mode 100644
index 0000000000..faf97a4796
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess/ya.make
@@ -0,0 +1,33 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm14
+ contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared
+ contrib/libs/llvm14/lib/Support
+)
+
+ADDINCL(
+ contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ JITLoaderGDB.cpp
+ OrcRTBootstrap.cpp
+ RegisterEHFrames.cpp
+ SimpleExecutorDylibManager.cpp
+ SimpleExecutorMemoryManager.cpp
+ SimpleRemoteEPCServer.cpp
+ TargetExecutionUtils.cpp
+)
+
+END()
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TaskDispatch.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TaskDispatch.cpp
new file mode 100644
index 0000000000..111c84ec87
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/TaskDispatch.cpp
@@ -0,0 +1,48 @@
+//===------------ TaskDispatch.cpp - ORC task dispatch utils --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/TaskDispatch.h"
+
+namespace llvm {
+namespace orc {
+
+char Task::ID = 0;
+char GenericNamedTask::ID = 0;
+const char *GenericNamedTask::DefaultDescription = "Generic Task";
+
+void Task::anchor() {}
+TaskDispatcher::~TaskDispatcher() {}
+
+void InPlaceTaskDispatcher::dispatch(std::unique_ptr<Task> T) { T->run(); }
+
+void InPlaceTaskDispatcher::shutdown() {}
+
+#if LLVM_ENABLE_THREADS
+void DynamicThreadPoolTaskDispatcher::dispatch(std::unique_ptr<Task> T) {
+ {
+ std::lock_guard<std::mutex> Lock(DispatchMutex);
+ ++Outstanding;
+ }
+
+ std::thread([this, T = std::move(T)]() mutable {
+ T->run();
+ std::lock_guard<std::mutex> Lock(DispatchMutex);
+ --Outstanding;
+ OutstandingCV.notify_all();
+ }).detach();
+}
+
+void DynamicThreadPoolTaskDispatcher::shutdown() {
+ std::unique_lock<std::mutex> Lock(DispatchMutex);
+ Running = false;
+ OutstandingCV.wait(Lock, [this]() { return Outstanding == 0; });
+}
+#endif
+
+} // namespace orc
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp
new file mode 100644
index 0000000000..2e128dd237
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ThreadSafeModule.cpp
@@ -0,0 +1,64 @@
+//===-- ThreadSafeModule.cpp - Thread safe Module, Context, and Utilities
+//h-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/Orc/ThreadSafeModule.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+
+namespace llvm {
+namespace orc {
+
+ThreadSafeModule cloneToNewContext(const ThreadSafeModule &TSM,
+ GVPredicate ShouldCloneDef,
+ GVModifier UpdateClonedDefSource) {
+ assert(TSM && "Can not clone null module");
+
+ if (!ShouldCloneDef)
+ ShouldCloneDef = [](const GlobalValue &) { return true; };
+
+ return TSM.withModuleDo([&](Module &M) {
+ SmallVector<char, 1> ClonedModuleBuffer;
+
+ {
+ std::set<GlobalValue *> ClonedDefsInSrc;
+ ValueToValueMapTy VMap;
+ auto Tmp = CloneModule(M, VMap, [&](const GlobalValue *GV) {
+ if (ShouldCloneDef(*GV)) {
+ ClonedDefsInSrc.insert(const_cast<GlobalValue *>(GV));
+ return true;
+ }
+ return false;
+ });
+
+ if (UpdateClonedDefSource)
+ for (auto *GV : ClonedDefsInSrc)
+ UpdateClonedDefSource(*GV);
+
+ BitcodeWriter BCWriter(ClonedModuleBuffer);
+
+ BCWriter.writeModule(*Tmp);
+ BCWriter.writeSymtab();
+ BCWriter.writeStrtab();
+ }
+
+ MemoryBufferRef ClonedModuleBufferRef(
+ StringRef(ClonedModuleBuffer.data(), ClonedModuleBuffer.size()),
+ "cloned module buffer");
+ ThreadSafeContext NewTSCtx(std::make_unique<LLVMContext>());
+
+ auto ClonedModule = cantFail(
+ parseBitcodeFile(ClonedModuleBufferRef, *NewTSCtx.getContext()));
+ ClonedModule->setModuleIdentifier(M.getName());
+ return ThreadSafeModule(std::move(ClonedModule), std::move(NewTSCtx));
+ });
+}
+
+} // end namespace orc
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ya.make b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ya.make
new file mode 100644
index 0000000000..cc129be705
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/Orc/ya.make
@@ -0,0 +1,78 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm14
+ contrib/libs/llvm14/include
+ contrib/libs/llvm14/lib/Analysis
+ contrib/libs/llvm14/lib/Bitcode/Reader
+ contrib/libs/llvm14/lib/Bitcode/Writer
+ contrib/libs/llvm14/lib/ExecutionEngine
+ contrib/libs/llvm14/lib/ExecutionEngine/JITLink
+ contrib/libs/llvm14/lib/ExecutionEngine/Orc/Shared
+ contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess
+ contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld
+ contrib/libs/llvm14/lib/IR
+ contrib/libs/llvm14/lib/MC
+ contrib/libs/llvm14/lib/MC/MCDisassembler
+ contrib/libs/llvm14/lib/Object
+ contrib/libs/llvm14/lib/Passes
+ contrib/libs/llvm14/lib/Support
+ contrib/libs/llvm14/lib/Target
+ contrib/libs/llvm14/lib/Transforms/Utils
+)
+
+ADDINCL(
+ contrib/libs/llvm14/lib/ExecutionEngine/Orc
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ CompileOnDemandLayer.cpp
+ CompileUtils.cpp
+ Core.cpp
+ DebugObjectManagerPlugin.cpp
+ DebugUtils.cpp
+ DebuggerSupportPlugin.cpp
+ ELFNixPlatform.cpp
+ EPCDebugObjectRegistrar.cpp
+ EPCDynamicLibrarySearchGenerator.cpp
+ EPCEHFrameRegistrar.cpp
+ EPCGenericDylibManager.cpp
+ EPCGenericJITLinkMemoryManager.cpp
+ EPCGenericRTDyldMemoryManager.cpp
+ EPCIndirectionUtils.cpp
+ ExecutionUtils.cpp
+ ExecutorProcessControl.cpp
+ IRCompileLayer.cpp
+ IRTransformLayer.cpp
+ IndirectionUtils.cpp
+ JITTargetMachineBuilder.cpp
+ LLJIT.cpp
+ Layer.cpp
+ LazyReexports.cpp
+ LookupAndRecordAddrs.cpp
+ MachOPlatform.cpp
+ Mangling.cpp
+ ObjectFileInterface.cpp
+ ObjectLinkingLayer.cpp
+ ObjectTransformLayer.cpp
+ OrcABISupport.cpp
+ OrcV2CBindings.cpp
+ RTDyldObjectLinkingLayer.cpp
+ SimpleRemoteEPC.cpp
+ SpeculateAnalyses.cpp
+ Speculation.cpp
+ TaskDispatch.cpp
+ ThreadSafeModule.cpp
+)
+
+END()
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp
new file mode 100644
index 0000000000..4a236e183c
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/PerfJITEvents/PerfJITEventListener.cpp
@@ -0,0 +1,507 @@
+//===-- PerfJITEventListener.cpp - Tell Linux's perf about JITted code ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a JITEventListener object that tells perf about JITted
+// functions, including source line information.
+//
+// Documentation for perf jit integration is available at:
+// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/tools/perf/Documentation/jitdump-specification.txt
+// https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/tools/perf/Documentation/jit-interface.txt
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/Config/config.h"
+#include "llvm/DebugInfo/DWARF/DWARFContext.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/SymbolSize.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Errno.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Threading.h"
+#include "llvm/Support/raw_ostream.h"
+#include <mutex>
+
+#include <sys/mman.h> // mmap()
+#include <time.h> // clock_gettime(), time(), localtime_r() */
+#include <unistd.h> // for read(), close()
+
+using namespace llvm;
+using namespace llvm::object;
+typedef DILineInfoSpecifier::FileLineInfoKind FileLineInfoKind;
+
+namespace {
+
+// language identifier (XXX: should we generate something better from debug
+// info?)
+#define JIT_LANG "llvm-IR"
+#define LLVM_PERF_JIT_MAGIC \
+ ((uint32_t)'J' << 24 | (uint32_t)'i' << 16 | (uint32_t)'T' << 8 | \
+ (uint32_t)'D')
+#define LLVM_PERF_JIT_VERSION 1
+
+// bit 0: set if the jitdump file is using an architecture-specific timestamp
+// clock source
+#define JITDUMP_FLAGS_ARCH_TIMESTAMP (1ULL << 0)
+
+struct LLVMPerfJitHeader;
+
+class PerfJITEventListener : public JITEventListener {
+public:
+ PerfJITEventListener();
+ ~PerfJITEventListener() {
+ if (MarkerAddr)
+ CloseMarker();
+ }
+
+ void notifyObjectLoaded(ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) override;
+ void notifyFreeingObject(ObjectKey K) override;
+
+private:
+ bool InitDebuggingDir();
+ bool OpenMarker();
+ void CloseMarker();
+ static bool FillMachine(LLVMPerfJitHeader &hdr);
+
+ void NotifyCode(Expected<llvm::StringRef> &Symbol, uint64_t CodeAddr,
+ uint64_t CodeSize);
+ void NotifyDebug(uint64_t CodeAddr, DILineInfoTable Lines);
+
+ // cache lookups
+ sys::Process::Pid Pid;
+
+ // base directory for output data
+ std::string JitPath;
+
+ // output data stream, closed via Dumpstream
+ int DumpFd = -1;
+
+ // output data stream
+ std::unique_ptr<raw_fd_ostream> Dumpstream;
+
+ // prevent concurrent dumps from messing up the output file
+ sys::Mutex Mutex;
+
+ // perf mmap marker
+ void *MarkerAddr = NULL;
+
+ // perf support ready
+ bool SuccessfullyInitialized = false;
+
+ // identifier for functions, primarily to identify when moving them around
+ uint64_t CodeGeneration = 1;
+};
+
+// The following are POD struct definitions from the perf jit specification
+
+enum LLVMPerfJitRecordType {
+ JIT_CODE_LOAD = 0,
+ JIT_CODE_MOVE = 1, // not emitted, code isn't moved
+ JIT_CODE_DEBUG_INFO = 2,
+ JIT_CODE_CLOSE = 3, // not emitted, unnecessary
+ JIT_CODE_UNWINDING_INFO = 4, // not emitted
+
+ JIT_CODE_MAX
+};
+
+struct LLVMPerfJitHeader {
+ uint32_t Magic; // characters "JiTD"
+ uint32_t Version; // header version
+ uint32_t TotalSize; // total size of header
+ uint32_t ElfMach; // elf mach target
+ uint32_t Pad1; // reserved
+ uint32_t Pid;
+ uint64_t Timestamp; // timestamp
+ uint64_t Flags; // flags
+};
+
+// record prefix (mandatory in each record)
+struct LLVMPerfJitRecordPrefix {
+ uint32_t Id; // record type identifier
+ uint32_t TotalSize;
+ uint64_t Timestamp;
+};
+
+struct LLVMPerfJitRecordCodeLoad {
+ LLVMPerfJitRecordPrefix Prefix;
+
+ uint32_t Pid;
+ uint32_t Tid;
+ uint64_t Vma;
+ uint64_t CodeAddr;
+ uint64_t CodeSize;
+ uint64_t CodeIndex;
+};
+
+struct LLVMPerfJitDebugEntry {
+ uint64_t Addr;
+ int Lineno; // source line number starting at 1
+ int Discrim; // column discriminator, 0 is default
+ // followed by null terminated filename, \xff\0 if same as previous entry
+};
+
+struct LLVMPerfJitRecordDebugInfo {
+ LLVMPerfJitRecordPrefix Prefix;
+
+ uint64_t CodeAddr;
+ uint64_t NrEntry;
+ // followed by NrEntry LLVMPerfJitDebugEntry records
+};
+
+static inline uint64_t timespec_to_ns(const struct timespec *ts) {
+ const uint64_t NanoSecPerSec = 1000000000;
+ return ((uint64_t)ts->tv_sec * NanoSecPerSec) + ts->tv_nsec;
+}
+
+static inline uint64_t perf_get_timestamp(void) {
+ struct timespec ts;
+ int ret;
+
+ ret = clock_gettime(CLOCK_MONOTONIC, &ts);
+ if (ret)
+ return 0;
+
+ return timespec_to_ns(&ts);
+}
+
+PerfJITEventListener::PerfJITEventListener()
+ : Pid(sys::Process::getProcessId()) {
+ // check if clock-source is supported
+ if (!perf_get_timestamp()) {
+ errs() << "kernel does not support CLOCK_MONOTONIC\n";
+ return;
+ }
+
+ if (!InitDebuggingDir()) {
+ errs() << "could not initialize debugging directory\n";
+ return;
+ }
+
+ std::string Filename;
+ raw_string_ostream FilenameBuf(Filename);
+ FilenameBuf << JitPath << "/jit-" << Pid << ".dump";
+
+ // Need to open ourselves, because we need to hand the FD to OpenMarker() and
+ // raw_fd_ostream doesn't expose the FD.
+ using sys::fs::openFileForWrite;
+ if (auto EC =
+ openFileForReadWrite(FilenameBuf.str(), DumpFd,
+ sys::fs::CD_CreateNew, sys::fs::OF_None)) {
+ errs() << "could not open JIT dump file " << FilenameBuf.str() << ": "
+ << EC.message() << "\n";
+ return;
+ }
+
+ Dumpstream = std::make_unique<raw_fd_ostream>(DumpFd, true);
+
+ LLVMPerfJitHeader Header = {0};
+ if (!FillMachine(Header))
+ return;
+
+ // signal this process emits JIT information
+ if (!OpenMarker())
+ return;
+
+ // emit dumpstream header
+ Header.Magic = LLVM_PERF_JIT_MAGIC;
+ Header.Version = LLVM_PERF_JIT_VERSION;
+ Header.TotalSize = sizeof(Header);
+ Header.Pid = Pid;
+ Header.Timestamp = perf_get_timestamp();
+ Dumpstream->write(reinterpret_cast<const char *>(&Header), sizeof(Header));
+
+ // Everything initialized, can do profiling now.
+ if (!Dumpstream->has_error())
+ SuccessfullyInitialized = true;
+}
+
+void PerfJITEventListener::notifyObjectLoaded(
+ ObjectKey K, const ObjectFile &Obj,
+ const RuntimeDyld::LoadedObjectInfo &L) {
+
+ if (!SuccessfullyInitialized)
+ return;
+
+ OwningBinary<ObjectFile> DebugObjOwner = L.getObjectForDebug(Obj);
+ const ObjectFile &DebugObj = *DebugObjOwner.getBinary();
+
+ // Get the address of the object image for use as a unique identifier
+ std::unique_ptr<DIContext> Context = DWARFContext::create(DebugObj);
+
+ // Use symbol info to iterate over functions in the object.
+ for (const std::pair<SymbolRef, uint64_t> &P : computeSymbolSizes(DebugObj)) {
+ SymbolRef Sym = P.first;
+ std::string SourceFileName;
+
+ Expected<SymbolRef::Type> SymTypeOrErr = Sym.getType();
+ if (!SymTypeOrErr) {
+ // There's not much we can with errors here
+ consumeError(SymTypeOrErr.takeError());
+ continue;
+ }
+ SymbolRef::Type SymType = *SymTypeOrErr;
+ if (SymType != SymbolRef::ST_Function)
+ continue;
+
+ Expected<StringRef> Name = Sym.getName();
+ if (!Name) {
+ consumeError(Name.takeError());
+ continue;
+ }
+
+ Expected<uint64_t> AddrOrErr = Sym.getAddress();
+ if (!AddrOrErr) {
+ consumeError(AddrOrErr.takeError());
+ continue;
+ }
+ uint64_t Size = P.second;
+ object::SectionedAddress Address;
+ Address.Address = *AddrOrErr;
+
+ uint64_t SectionIndex = object::SectionedAddress::UndefSection;
+ if (auto SectOrErr = Sym.getSection())
+ if (*SectOrErr != Obj.section_end())
+ SectionIndex = SectOrErr.get()->getIndex();
+
+ // According to spec debugging info has to come before loading the
+ // corresonding code load.
+ DILineInfoTable Lines = Context->getLineInfoForAddressRange(
+ {*AddrOrErr, SectionIndex}, Size, FileLineInfoKind::AbsoluteFilePath);
+
+ NotifyDebug(*AddrOrErr, Lines);
+ NotifyCode(Name, *AddrOrErr, Size);
+ }
+
+ // avoid races with writes
+ std::lock_guard<sys::Mutex> Guard(Mutex);
+
+ Dumpstream->flush();
+}
+
+void PerfJITEventListener::notifyFreeingObject(ObjectKey K) {
+ // perf currently doesn't have an interface for unloading. But munmap()ing the
+ // code section does, so that's ok.
+}
+
+bool PerfJITEventListener::InitDebuggingDir() {
+ time_t Time;
+ struct tm LocalTime;
+ char TimeBuffer[sizeof("YYYYMMDD")];
+ SmallString<64> Path;
+
+ // search for location to dump data to
+ if (const char *BaseDir = getenv("JITDUMPDIR"))
+ Path.append(BaseDir);
+ else if (!sys::path::home_directory(Path))
+ Path = ".";
+
+ // create debug directory
+ Path += "/.debug/jit/";
+ if (auto EC = sys::fs::create_directories(Path)) {
+ errs() << "could not create jit cache directory " << Path << ": "
+ << EC.message() << "\n";
+ return false;
+ }
+
+ // create unique directory for dump data related to this process
+ time(&Time);
+ localtime_r(&Time, &LocalTime);
+ strftime(TimeBuffer, sizeof(TimeBuffer), "%Y%m%d", &LocalTime);
+ Path += JIT_LANG "-jit-";
+ Path += TimeBuffer;
+
+ SmallString<128> UniqueDebugDir;
+
+ using sys::fs::createUniqueDirectory;
+ if (auto EC = createUniqueDirectory(Path, UniqueDebugDir)) {
+ errs() << "could not create unique jit cache directory " << UniqueDebugDir
+ << ": " << EC.message() << "\n";
+ return false;
+ }
+
+ JitPath = std::string(UniqueDebugDir.str());
+
+ return true;
+}
+
+bool PerfJITEventListener::OpenMarker() {
+ // We mmap the jitdump to create an MMAP RECORD in perf.data file. The mmap
+ // is captured either live (perf record running when we mmap) or in deferred
+ // mode, via /proc/PID/maps. The MMAP record is used as a marker of a jitdump
+ // file for more meta data info about the jitted code. Perf report/annotate
+ // detect this special filename and process the jitdump file.
+ //
+ // Mapping must be PROT_EXEC to ensure it is captured by perf record
+ // even when not using -d option.
+ MarkerAddr = ::mmap(NULL, sys::Process::getPageSizeEstimate(),
+ PROT_READ | PROT_EXEC, MAP_PRIVATE, DumpFd, 0);
+
+ if (MarkerAddr == MAP_FAILED) {
+ errs() << "could not mmap JIT marker\n";
+ return false;
+ }
+ return true;
+}
+
+void PerfJITEventListener::CloseMarker() {
+ if (!MarkerAddr)
+ return;
+
+ munmap(MarkerAddr, sys::Process::getPageSizeEstimate());
+ MarkerAddr = nullptr;
+}
+
+bool PerfJITEventListener::FillMachine(LLVMPerfJitHeader &hdr) {
+ char id[16];
+ struct {
+ uint16_t e_type;
+ uint16_t e_machine;
+ } info;
+
+ size_t RequiredMemory = sizeof(id) + sizeof(info);
+
+ ErrorOr<std::unique_ptr<MemoryBuffer>> MB =
+ MemoryBuffer::getFileSlice("/proc/self/exe",
+ RequiredMemory,
+ 0);
+
+ // This'll not guarantee that enough data was actually read from the
+ // underlying file. Instead the trailing part of the buffer would be
+ // zeroed. Given the ELF signature check below that seems ok though,
+ // it's unlikely that the file ends just after that, and the
+ // consequence would just be that perf wouldn't recognize the
+ // signature.
+ if (auto EC = MB.getError()) {
+ errs() << "could not open /proc/self/exe: " << EC.message() << "\n";
+ return false;
+ }
+
+ memcpy(&id, (*MB)->getBufferStart(), sizeof(id));
+ memcpy(&info, (*MB)->getBufferStart() + sizeof(id), sizeof(info));
+
+ // check ELF signature
+ if (id[0] != 0x7f || id[1] != 'E' || id[2] != 'L' || id[3] != 'F') {
+ errs() << "invalid elf signature\n";
+ return false;
+ }
+
+ hdr.ElfMach = info.e_machine;
+
+ return true;
+}
+
+void PerfJITEventListener::NotifyCode(Expected<llvm::StringRef> &Symbol,
+ uint64_t CodeAddr, uint64_t CodeSize) {
+ assert(SuccessfullyInitialized);
+
+ // 0 length functions can't have samples.
+ if (CodeSize == 0)
+ return;
+
+ LLVMPerfJitRecordCodeLoad rec;
+ rec.Prefix.Id = JIT_CODE_LOAD;
+ rec.Prefix.TotalSize = sizeof(rec) + // debug record itself
+ Symbol->size() + 1 + // symbol name
+ CodeSize; // and code
+ rec.Prefix.Timestamp = perf_get_timestamp();
+
+ rec.CodeSize = CodeSize;
+ rec.Vma = 0;
+ rec.CodeAddr = CodeAddr;
+ rec.Pid = Pid;
+ rec.Tid = get_threadid();
+
+ // avoid interspersing output
+ std::lock_guard<sys::Mutex> Guard(Mutex);
+
+ rec.CodeIndex = CodeGeneration++; // under lock!
+
+ Dumpstream->write(reinterpret_cast<const char *>(&rec), sizeof(rec));
+ Dumpstream->write(Symbol->data(), Symbol->size() + 1);
+ Dumpstream->write(reinterpret_cast<const char *>(CodeAddr), CodeSize);
+}
+
+void PerfJITEventListener::NotifyDebug(uint64_t CodeAddr,
+ DILineInfoTable Lines) {
+ assert(SuccessfullyInitialized);
+
+ // Didn't get useful debug info.
+ if (Lines.empty())
+ return;
+
+ LLVMPerfJitRecordDebugInfo rec;
+ rec.Prefix.Id = JIT_CODE_DEBUG_INFO;
+ rec.Prefix.TotalSize = sizeof(rec); // will be increased further
+ rec.Prefix.Timestamp = perf_get_timestamp();
+ rec.CodeAddr = CodeAddr;
+ rec.NrEntry = Lines.size();
+
+ // compute total size size of record (variable due to filenames)
+ DILineInfoTable::iterator Begin = Lines.begin();
+ DILineInfoTable::iterator End = Lines.end();
+ for (DILineInfoTable::iterator It = Begin; It != End; ++It) {
+ DILineInfo &line = It->second;
+ rec.Prefix.TotalSize += sizeof(LLVMPerfJitDebugEntry);
+ rec.Prefix.TotalSize += line.FileName.size() + 1;
+ }
+
+ // The debug_entry describes the source line information. It is defined as
+ // follows in order:
+ // * uint64_t code_addr: address of function for which the debug information
+ // is generated
+ // * uint32_t line : source file line number (starting at 1)
+ // * uint32_t discrim : column discriminator, 0 is default
+ // * char name[n] : source file name in ASCII, including null termination
+
+ // avoid interspersing output
+ std::lock_guard<sys::Mutex> Guard(Mutex);
+
+ Dumpstream->write(reinterpret_cast<const char *>(&rec), sizeof(rec));
+
+ for (DILineInfoTable::iterator It = Begin; It != End; ++It) {
+ LLVMPerfJitDebugEntry LineInfo;
+ DILineInfo &Line = It->second;
+
+ LineInfo.Addr = It->first;
+ // The function re-created by perf is preceded by a elf
+ // header. Need to adjust for that, otherwise the results are
+ // wrong.
+ LineInfo.Addr += 0x40;
+ LineInfo.Lineno = Line.Line;
+ LineInfo.Discrim = Line.Discriminator;
+
+ Dumpstream->write(reinterpret_cast<const char *>(&LineInfo),
+ sizeof(LineInfo));
+ Dumpstream->write(Line.FileName.c_str(), Line.FileName.size() + 1);
+ }
+}
+
+// There should be only a single event listener per process, otherwise perf gets
+// confused.
+llvm::ManagedStatic<PerfJITEventListener> PerfListener;
+
+} // end anonymous namespace
+
+namespace llvm {
+JITEventListener *JITEventListener::createPerfJITEventListener() {
+ return &*PerfListener;
+}
+
+} // namespace llvm
+
+LLVMJITEventListenerRef LLVMCreatePerfJITEventListener(void)
+{
+ return wrap(JITEventListener::createPerfJITEventListener());
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/PerfJITEvents/ya.make b/contrib/libs/llvm14/lib/ExecutionEngine/PerfJITEvents/ya.make
new file mode 100644
index 0000000000..77548bc2de
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/PerfJITEvents/ya.make
@@ -0,0 +1,32 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm14
+ contrib/libs/llvm14/lib/CodeGen
+ contrib/libs/llvm14/lib/DebugInfo/DWARF
+ contrib/libs/llvm14/lib/ExecutionEngine
+ contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld
+ contrib/libs/llvm14/lib/IR
+ contrib/libs/llvm14/lib/Object
+ contrib/libs/llvm14/lib/Support
+)
+
+ADDINCL(
+ contrib/libs/llvm14/lib/ExecutionEngine/PerfJITEvents
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ PerfJITEventListener.cpp
+)
+
+END()
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp
new file mode 100644
index 0000000000..210fbf6e43
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/JITSymbol.cpp
@@ -0,0 +1,169 @@
+//===----------- JITSymbol.cpp - JITSymbol class implementation -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// JITSymbol class implementation plus helper functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/JITSymbol.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/Object/ObjectFile.h"
+
+using namespace llvm;
+
+JITSymbolFlags llvm::JITSymbolFlags::fromGlobalValue(const GlobalValue &GV) {
+ assert(GV.hasName() && "Can't get flags for anonymous symbol");
+
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ if (GV.hasWeakLinkage() || GV.hasLinkOnceLinkage())
+ Flags |= JITSymbolFlags::Weak;
+ if (GV.hasCommonLinkage())
+ Flags |= JITSymbolFlags::Common;
+ if (!GV.hasLocalLinkage() && !GV.hasHiddenVisibility())
+ Flags |= JITSymbolFlags::Exported;
+
+ if (isa<Function>(GV))
+ Flags |= JITSymbolFlags::Callable;
+ else if (isa<GlobalAlias>(GV) &&
+ isa<Function>(cast<GlobalAlias>(GV).getAliasee()))
+ Flags |= JITSymbolFlags::Callable;
+
+ // Check for a linker-private-global-prefix on the symbol name, in which
+ // case it must be marked as non-exported.
+ if (auto *M = GV.getParent()) {
+ const auto &DL = M->getDataLayout();
+ StringRef LPGP = DL.getLinkerPrivateGlobalPrefix();
+ if (!LPGP.empty() && GV.getName().front() == '\01' &&
+ GV.getName().substr(1).startswith(LPGP))
+ Flags &= ~JITSymbolFlags::Exported;
+ }
+
+ return Flags;
+}
+
+JITSymbolFlags llvm::JITSymbolFlags::fromSummary(GlobalValueSummary *S) {
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ auto L = S->linkage();
+ if (GlobalValue::isWeakLinkage(L) || GlobalValue::isLinkOnceLinkage(L))
+ Flags |= JITSymbolFlags::Weak;
+ if (GlobalValue::isCommonLinkage(L))
+ Flags |= JITSymbolFlags::Common;
+ if (GlobalValue::isExternalLinkage(L) || GlobalValue::isExternalWeakLinkage(L))
+ Flags |= JITSymbolFlags::Exported;
+
+ if (isa<FunctionSummary>(S))
+ Flags |= JITSymbolFlags::Callable;
+
+ return Flags;
+}
+
+Expected<JITSymbolFlags>
+llvm::JITSymbolFlags::fromObjectSymbol(const object::SymbolRef &Symbol) {
+ Expected<uint32_t> SymbolFlagsOrErr = Symbol.getFlags();
+ if (!SymbolFlagsOrErr)
+ // TODO: Test this error.
+ return SymbolFlagsOrErr.takeError();
+
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Weak)
+ Flags |= JITSymbolFlags::Weak;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Common)
+ Flags |= JITSymbolFlags::Common;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Exported)
+ Flags |= JITSymbolFlags::Exported;
+
+ auto SymbolType = Symbol.getType();
+ if (!SymbolType)
+ return SymbolType.takeError();
+
+ if (*SymbolType == object::SymbolRef::ST_Function)
+ Flags |= JITSymbolFlags::Callable;
+
+ return Flags;
+}
+
+ARMJITSymbolFlags
+llvm::ARMJITSymbolFlags::fromObjectSymbol(const object::SymbolRef &Symbol) {
+ Expected<uint32_t> SymbolFlagsOrErr = Symbol.getFlags();
+ if (!SymbolFlagsOrErr)
+ // TODO: Actually report errors helpfully.
+ report_fatal_error(SymbolFlagsOrErr.takeError());
+ ARMJITSymbolFlags Flags;
+ if (*SymbolFlagsOrErr & object::BasicSymbolRef::SF_Thumb)
+ Flags |= ARMJITSymbolFlags::Thumb;
+ return Flags;
+}
+
+/// Performs lookup by, for each symbol, first calling
+/// findSymbolInLogicalDylib and if that fails calling
+/// findSymbol.
+void LegacyJITSymbolResolver::lookup(const LookupSet &Symbols,
+ OnResolvedFunction OnResolved) {
+ JITSymbolResolver::LookupResult Result;
+ for (auto &Symbol : Symbols) {
+ std::string SymName = Symbol.str();
+ if (auto Sym = findSymbolInLogicalDylib(SymName)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ Result[Symbol] = JITEvaluatedSymbol(*AddrOrErr, Sym.getFlags());
+ else {
+ OnResolved(AddrOrErr.takeError());
+ return;
+ }
+ } else if (auto Err = Sym.takeError()) {
+ OnResolved(std::move(Err));
+ return;
+ } else {
+ // findSymbolInLogicalDylib failed. Lets try findSymbol.
+ if (auto Sym = findSymbol(SymName)) {
+ if (auto AddrOrErr = Sym.getAddress())
+ Result[Symbol] = JITEvaluatedSymbol(*AddrOrErr, Sym.getFlags());
+ else {
+ OnResolved(AddrOrErr.takeError());
+ return;
+ }
+ } else if (auto Err = Sym.takeError()) {
+ OnResolved(std::move(Err));
+ return;
+ } else {
+ OnResolved(make_error<StringError>("Symbol not found: " + Symbol,
+ inconvertibleErrorCode()));
+ return;
+ }
+ }
+ }
+
+ OnResolved(std::move(Result));
+}
+
+/// Performs flags lookup by calling findSymbolInLogicalDylib and
+/// returning the flags value for that symbol.
+Expected<JITSymbolResolver::LookupSet>
+LegacyJITSymbolResolver::getResponsibilitySet(const LookupSet &Symbols) {
+ JITSymbolResolver::LookupSet Result;
+
+ for (auto &Symbol : Symbols) {
+ std::string SymName = Symbol.str();
+ if (auto Sym = findSymbolInLogicalDylib(SymName)) {
+ // If there's an existing def but it is not strong, then the caller is
+ // responsible for it.
+ if (!Sym.getFlags().isStrong())
+ Result.insert(Symbol);
+ } else if (auto Err = Sym.takeError())
+ return std::move(Err);
+ else {
+ // If there is no existing definition then the caller is responsible for
+ // it.
+ Result.insert(Symbol);
+ }
+ }
+
+ return std::move(Result);
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp
new file mode 100644
index 0000000000..9c8d402364
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RTDyldMemoryManager.cpp
@@ -0,0 +1,297 @@
+//===-- RTDyldMemoryManager.cpp - Memory manager for MC-JIT -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the runtime dynamic memory manager base class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Config/config.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstdlib>
+
+#ifdef __linux__
+ // These includes used by RTDyldMemoryManager::getPointerToNamedFunction()
+ // for Glibc trickery. See comments in this function for more information.
+ #ifdef HAVE_SYS_STAT_H
+ #include <sys/stat.h>
+ #endif
+ #include <fcntl.h>
+ #include <unistd.h>
+#endif
+
+namespace llvm {
+
+RTDyldMemoryManager::~RTDyldMemoryManager() {}
+
+#if defined(HAVE_REGISTER_FRAME) && defined(HAVE_DEREGISTER_FRAME) && \
+ !defined(__SEH__) && !defined(__USING_SJLJ_EXCEPTIONS__)
+extern "C" void __register_frame(void *);
+extern "C" void __deregister_frame(void *);
+#else
+// The building compiler does not have __(de)register_frame but
+// it may be found at runtime in a dynamically-loaded library.
+// For example, this happens when building LLVM with Visual C++
+// but using the MingW runtime.
+static void __register_frame(void *p) {
+ static bool Searched = false;
+ static void((*rf)(void *)) = 0;
+
+ if (!Searched) {
+ Searched = true;
+ *(void **)&rf =
+ llvm::sys::DynamicLibrary::SearchForAddressOfSymbol("__register_frame");
+ }
+ if (rf)
+ rf(p);
+}
+
+static void __deregister_frame(void *p) {
+ static bool Searched = false;
+ static void((*df)(void *)) = 0;
+
+ if (!Searched) {
+ Searched = true;
+ *(void **)&df = llvm::sys::DynamicLibrary::SearchForAddressOfSymbol(
+ "__deregister_frame");
+ }
+ if (df)
+ df(p);
+}
+#endif
+
+/* libgcc and libunwind __register_frame behave differently. We use the presence
+ * of __unw_add_dynamic_fde to detect libunwind. */
+#if defined(HAVE_UNW_ADD_DYNAMIC_FDE) || defined(__APPLE__)
+
+static const char *processFDE(const char *Entry, bool isDeregister) {
+ const char *P = Entry;
+ uint32_t Length = *((const uint32_t *)P);
+ P += 4;
+ uint32_t Offset = *((const uint32_t *)P);
+ if (Offset != 0) {
+ if (isDeregister)
+ __deregister_frame(const_cast<char *>(Entry));
+ else
+ __register_frame(const_cast<char *>(Entry));
+ }
+ return P + Length;
+}
+
+// This implementation handles frame registration for local targets.
+// Memory managers for remote targets should re-implement this function
+// and use the LoadAddr parameter.
+void RTDyldMemoryManager::registerEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ // On OS X OS X __register_frame takes a single FDE as an argument.
+ // See http://lists.llvm.org/pipermail/llvm-dev/2013-April/061737.html
+ // and projects/libunwind/src/UnwindLevel1-gcc-ext.c.
+ const char *P = (const char *)Addr;
+ const char *End = P + Size;
+ do {
+ P = processFDE(P, false);
+ } while(P != End);
+}
+
+void RTDyldMemoryManager::deregisterEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ const char *P = (const char *)Addr;
+ const char *End = P + Size;
+ do {
+ P = processFDE(P, true);
+ } while(P != End);
+}
+
+#else
+
+void RTDyldMemoryManager::registerEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ // On Linux __register_frame takes a single argument:
+ // a pointer to the start of the .eh_frame section.
+
+ // How can it find the end? Because crtendS.o is linked
+ // in and it has an .eh_frame section with four zero chars.
+ __register_frame(Addr);
+}
+
+void RTDyldMemoryManager::deregisterEHFramesInProcess(uint8_t *Addr,
+ size_t Size) {
+ __deregister_frame(Addr);
+}
+
+#endif
+
+void RTDyldMemoryManager::registerEHFrames(uint8_t *Addr, uint64_t LoadAddr,
+ size_t Size) {
+ registerEHFramesInProcess(Addr, Size);
+ EHFrames.push_back({Addr, Size});
+}
+
+void RTDyldMemoryManager::deregisterEHFrames() {
+ for (auto &Frame : EHFrames)
+ deregisterEHFramesInProcess(Frame.Addr, Frame.Size);
+ EHFrames.clear();
+}
+
+static int jit_noop() {
+ return 0;
+}
+
+// ARM math functions are statically linked on Android from libgcc.a, but not
+// available at runtime for dynamic linking. On Linux these are usually placed
+// in libgcc_s.so so can be found by normal dynamic lookup.
+#if defined(__BIONIC__) && defined(__arm__)
+// List of functions which are statically linked on Android and can be generated
+// by LLVM. This is done as a nested macro which is used once to declare the
+// imported functions with ARM_MATH_DECL and once to compare them to the
+// user-requested symbol in getSymbolAddress with ARM_MATH_CHECK. The test
+// assumes that all functions start with __aeabi_ and getSymbolAddress must be
+// modified if that changes.
+#define ARM_MATH_IMPORTS(PP) \
+ PP(__aeabi_d2f) \
+ PP(__aeabi_d2iz) \
+ PP(__aeabi_d2lz) \
+ PP(__aeabi_d2uiz) \
+ PP(__aeabi_d2ulz) \
+ PP(__aeabi_dadd) \
+ PP(__aeabi_dcmpeq) \
+ PP(__aeabi_dcmpge) \
+ PP(__aeabi_dcmpgt) \
+ PP(__aeabi_dcmple) \
+ PP(__aeabi_dcmplt) \
+ PP(__aeabi_dcmpun) \
+ PP(__aeabi_ddiv) \
+ PP(__aeabi_dmul) \
+ PP(__aeabi_dsub) \
+ PP(__aeabi_f2d) \
+ PP(__aeabi_f2iz) \
+ PP(__aeabi_f2lz) \
+ PP(__aeabi_f2uiz) \
+ PP(__aeabi_f2ulz) \
+ PP(__aeabi_fadd) \
+ PP(__aeabi_fcmpeq) \
+ PP(__aeabi_fcmpge) \
+ PP(__aeabi_fcmpgt) \
+ PP(__aeabi_fcmple) \
+ PP(__aeabi_fcmplt) \
+ PP(__aeabi_fcmpun) \
+ PP(__aeabi_fdiv) \
+ PP(__aeabi_fmul) \
+ PP(__aeabi_fsub) \
+ PP(__aeabi_i2d) \
+ PP(__aeabi_i2f) \
+ PP(__aeabi_idiv) \
+ PP(__aeabi_idivmod) \
+ PP(__aeabi_l2d) \
+ PP(__aeabi_l2f) \
+ PP(__aeabi_lasr) \
+ PP(__aeabi_ldivmod) \
+ PP(__aeabi_llsl) \
+ PP(__aeabi_llsr) \
+ PP(__aeabi_lmul) \
+ PP(__aeabi_ui2d) \
+ PP(__aeabi_ui2f) \
+ PP(__aeabi_uidiv) \
+ PP(__aeabi_uidivmod) \
+ PP(__aeabi_ul2d) \
+ PP(__aeabi_ul2f) \
+ PP(__aeabi_uldivmod)
+
+// Declare statically linked math functions on ARM. The function declarations
+// here do not have the correct prototypes for each function in
+// ARM_MATH_IMPORTS, but it doesn't matter because only the symbol addresses are
+// needed. In particular the __aeabi_*divmod functions do not have calling
+// conventions which match any C prototype.
+#define ARM_MATH_DECL(name) extern "C" void name();
+ARM_MATH_IMPORTS(ARM_MATH_DECL)
+#undef ARM_MATH_DECL
+#endif
+
+#if defined(__linux__) && defined(__GLIBC__) && \
+ (defined(__i386__) || defined(__x86_64__))
+extern "C" LLVM_ATTRIBUTE_WEAK void __morestack();
+#endif
+
+uint64_t
+RTDyldMemoryManager::getSymbolAddressInProcess(const std::string &Name) {
+ // This implementation assumes that the host program is the target.
+ // Clients generating code for a remote target should implement their own
+ // memory manager.
+#if defined(__linux__) && defined(__GLIBC__)
+ //===--------------------------------------------------------------------===//
+ // Function stubs that are invoked instead of certain library calls
+ //
+ // Force the following functions to be linked in to anything that uses the
+ // JIT. This is a hack designed to work around the all-too-clever Glibc
+ // strategy of making these functions work differently when inlined vs. when
+ // not inlined, and hiding their real definitions in a separate archive file
+ // that the dynamic linker can't see. For more info, search for
+ // 'libc_nonshared.a' on Google, or read http://llvm.org/PR274.
+ if (Name == "stat") return (uint64_t)&stat;
+ if (Name == "fstat") return (uint64_t)&fstat;
+ if (Name == "lstat") return (uint64_t)&lstat;
+ if (Name == "stat64") return (uint64_t)&stat64;
+ if (Name == "fstat64") return (uint64_t)&fstat64;
+ if (Name == "lstat64") return (uint64_t)&lstat64;
+ if (Name == "atexit") return (uint64_t)&atexit;
+ if (Name == "mknod") return (uint64_t)&mknod;
+
+#if defined(__i386__) || defined(__x86_64__)
+ // __morestack lives in libgcc, a static library.
+ if (&__morestack && Name == "__morestack")
+ return (uint64_t)&__morestack;
+#endif
+#endif // __linux__ && __GLIBC__
+
+ // See ARM_MATH_IMPORTS definition for explanation
+#if defined(__BIONIC__) && defined(__arm__)
+ if (Name.compare(0, 8, "__aeabi_") == 0) {
+ // Check if the user has requested any of the functions listed in
+ // ARM_MATH_IMPORTS, and if so redirect to the statically linked symbol.
+#define ARM_MATH_CHECK(fn) if (Name == #fn) return (uint64_t)&fn;
+ ARM_MATH_IMPORTS(ARM_MATH_CHECK)
+#undef ARM_MATH_CHECK
+ }
+#endif
+
+ // We should not invoke parent's ctors/dtors from generated main()!
+ // On Mingw and Cygwin, the symbol __main is resolved to
+ // callee's(eg. tools/lli) one, to invoke wrong duplicated ctors
+ // (and register wrong callee's dtors with atexit(3)).
+ // We expect ExecutionEngine::runStaticConstructorsDestructors()
+ // is called before ExecutionEngine::runFunctionAsMain() is called.
+ if (Name == "__main") return (uint64_t)&jit_noop;
+
+ const char *NameStr = Name.c_str();
+
+ // DynamicLibrary::SearchForAddresOfSymbol expects an unmangled 'C' symbol
+ // name so ff we're on Darwin, strip the leading '_' off.
+#ifdef __APPLE__
+ if (NameStr[0] == '_')
+ ++NameStr;
+#endif
+
+ return (uint64_t)sys::DynamicLibrary::SearchForAddressOfSymbol(NameStr);
+}
+
+void *RTDyldMemoryManager::getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure) {
+ uint64_t Addr = getSymbolAddress(Name);
+
+ if (!Addr && AbortOnFailure)
+ report_fatal_error(Twine("Program used external function '") + Name +
+ "' which could not be resolved!");
+
+ return (void*)Addr;
+}
+
+void RTDyldMemoryManager::anchor() {}
+void MCJITMemoryManager::anchor() {}
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
new file mode 100644
index 0000000000..3f38d26869
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp
@@ -0,0 +1,1482 @@
+//===-- RuntimeDyld.cpp - Run-time dynamic linker for MC-JIT ----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "RuntimeDyldCOFF.h"
+#include "RuntimeDyldELF.h"
+#include "RuntimeDyldImpl.h"
+#include "RuntimeDyldMachO.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Support/Alignment.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MathExtras.h"
+#include <mutex>
+
+#include <future>
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+enum RuntimeDyldErrorCode {
+ GenericRTDyldError = 1
+};
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class RuntimeDyldErrorCategory : public std::error_category {
+public:
+ const char *name() const noexcept override { return "runtimedyld"; }
+
+ std::string message(int Condition) const override {
+ switch (static_cast<RuntimeDyldErrorCode>(Condition)) {
+ case GenericRTDyldError: return "Generic RuntimeDyld error";
+ }
+ llvm_unreachable("Unrecognized RuntimeDyldErrorCode");
+ }
+};
+
+static ManagedStatic<RuntimeDyldErrorCategory> RTDyldErrorCategory;
+
+}
+
+char RuntimeDyldError::ID = 0;
+
+void RuntimeDyldError::log(raw_ostream &OS) const {
+ OS << ErrMsg << "\n";
+}
+
+std::error_code RuntimeDyldError::convertToErrorCode() const {
+ return std::error_code(GenericRTDyldError, *RTDyldErrorCategory);
+}
+
+// Empty out-of-line virtual destructor as the key function.
+RuntimeDyldImpl::~RuntimeDyldImpl() {}
+
+// Pin LoadedObjectInfo's vtables to this file.
+void RuntimeDyld::LoadedObjectInfo::anchor() {}
+
+namespace llvm {
+
+void RuntimeDyldImpl::registerEHFrames() {}
+
+void RuntimeDyldImpl::deregisterEHFrames() {
+ MemMgr.deregisterEHFrames();
+}
+
+#ifndef NDEBUG
+static void dumpSectionMemory(const SectionEntry &S, StringRef State) {
+ dbgs() << "----- Contents of section " << S.getName() << " " << State
+ << " -----";
+
+ if (S.getAddress() == nullptr) {
+ dbgs() << "\n <section not emitted>\n";
+ return;
+ }
+
+ const unsigned ColsPerRow = 16;
+
+ uint8_t *DataAddr = S.getAddress();
+ uint64_t LoadAddr = S.getLoadAddress();
+
+ unsigned StartPadding = LoadAddr & (ColsPerRow - 1);
+ unsigned BytesRemaining = S.getSize();
+
+ if (StartPadding) {
+ dbgs() << "\n" << format("0x%016" PRIx64,
+ LoadAddr & ~(uint64_t)(ColsPerRow - 1)) << ":";
+ while (StartPadding--)
+ dbgs() << " ";
+ }
+
+ while (BytesRemaining > 0) {
+ if ((LoadAddr & (ColsPerRow - 1)) == 0)
+ dbgs() << "\n" << format("0x%016" PRIx64, LoadAddr) << ":";
+
+ dbgs() << " " << format("%02x", *DataAddr);
+
+ ++DataAddr;
+ ++LoadAddr;
+ --BytesRemaining;
+ }
+
+ dbgs() << "\n";
+}
+#endif
+
+// Resolve the relocations for all symbols we currently know about.
+void RuntimeDyldImpl::resolveRelocations() {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Print out the sections prior to relocation.
+ LLVM_DEBUG({
+ for (SectionEntry &S : Sections)
+ dumpSectionMemory(S, "before relocations");
+ });
+
+ // First, resolve relocations associated with external symbols.
+ if (auto Err = resolveExternalSymbols()) {
+ HasError = true;
+ ErrorStr = toString(std::move(Err));
+ }
+
+ resolveLocalRelocations();
+
+ // Print out sections after relocation.
+ LLVM_DEBUG({
+ for (SectionEntry &S : Sections)
+ dumpSectionMemory(S, "after relocations");
+ });
+}
+
+void RuntimeDyldImpl::resolveLocalRelocations() {
+ // Iterate over all outstanding relocations
+ for (const auto &Rel : Relocations) {
+ // The Section here (Sections[i]) refers to the section in which the
+ // symbol for the relocation is located. The SectionID in the relocation
+ // entry provides the section to which the relocation will be applied.
+ unsigned Idx = Rel.first;
+ uint64_t Addr = getSectionLoadAddress(Idx);
+ LLVM_DEBUG(dbgs() << "Resolving relocations Section #" << Idx << "\t"
+ << format("%p", (uintptr_t)Addr) << "\n");
+ resolveRelocationList(Rel.second, Addr);
+ }
+ Relocations.clear();
+}
+
+void RuntimeDyldImpl::mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) {
+ std::lock_guard<sys::Mutex> locked(lock);
+ for (unsigned i = 0, e = Sections.size(); i != e; ++i) {
+ if (Sections[i].getAddress() == LocalAddress) {
+ reassignSectionAddress(i, TargetAddress);
+ return;
+ }
+ }
+ llvm_unreachable("Attempting to remap address of unknown section!");
+}
+
+static Error getOffset(const SymbolRef &Sym, SectionRef Sec,
+ uint64_t &Result) {
+ Expected<uint64_t> AddressOrErr = Sym.getAddress();
+ if (!AddressOrErr)
+ return AddressOrErr.takeError();
+ Result = *AddressOrErr - Sec.getAddress();
+ return Error::success();
+}
+
+Expected<RuntimeDyldImpl::ObjSectionToIDMap>
+RuntimeDyldImpl::loadObjectImpl(const object::ObjectFile &Obj) {
+ std::lock_guard<sys::Mutex> locked(lock);
+
+ // Save information about our target
+ Arch = (Triple::ArchType)Obj.getArch();
+ IsTargetLittleEndian = Obj.isLittleEndian();
+ setMipsABI(Obj);
+
+ // Compute the memory size required to load all sections to be loaded
+ // and pass this information to the memory manager
+ if (MemMgr.needsToReserveAllocationSpace()) {
+ uint64_t CodeSize = 0, RODataSize = 0, RWDataSize = 0;
+ uint32_t CodeAlign = 1, RODataAlign = 1, RWDataAlign = 1;
+ if (auto Err = computeTotalAllocSize(Obj,
+ CodeSize, CodeAlign,
+ RODataSize, RODataAlign,
+ RWDataSize, RWDataAlign))
+ return std::move(Err);
+ MemMgr.reserveAllocationSpace(CodeSize, CodeAlign, RODataSize, RODataAlign,
+ RWDataSize, RWDataAlign);
+ }
+
+ // Used sections from the object file
+ ObjSectionToIDMap LocalSections;
+
+ // Common symbols requiring allocation, with their sizes and alignments
+ CommonSymbolList CommonSymbolsToAllocate;
+
+ uint64_t CommonSize = 0;
+ uint32_t CommonAlign = 0;
+
+ // First, collect all weak and common symbols. We need to know if stronger
+ // definitions occur elsewhere.
+ JITSymbolResolver::LookupSet ResponsibilitySet;
+ {
+ JITSymbolResolver::LookupSet Symbols;
+ for (auto &Sym : Obj.symbols()) {
+ Expected<uint32_t> FlagsOrErr = Sym.getFlags();
+ if (!FlagsOrErr)
+ // TODO: Test this error.
+ return FlagsOrErr.takeError();
+ if ((*FlagsOrErr & SymbolRef::SF_Common) ||
+ (*FlagsOrErr & SymbolRef::SF_Weak)) {
+ // Get symbol name.
+ if (auto NameOrErr = Sym.getName())
+ Symbols.insert(*NameOrErr);
+ else
+ return NameOrErr.takeError();
+ }
+ }
+
+ if (auto ResultOrErr = Resolver.getResponsibilitySet(Symbols))
+ ResponsibilitySet = std::move(*ResultOrErr);
+ else
+ return ResultOrErr.takeError();
+ }
+
+ // Parse symbols
+ LLVM_DEBUG(dbgs() << "Parse symbols:\n");
+ for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E;
+ ++I) {
+ Expected<uint32_t> FlagsOrErr = I->getFlags();
+ if (!FlagsOrErr)
+ // TODO: Test this error.
+ return FlagsOrErr.takeError();
+
+ // Skip undefined symbols.
+ if (*FlagsOrErr & SymbolRef::SF_Undefined)
+ continue;
+
+ // Get the symbol type.
+ object::SymbolRef::Type SymType;
+ if (auto SymTypeOrErr = I->getType())
+ SymType = *SymTypeOrErr;
+ else
+ return SymTypeOrErr.takeError();
+
+ // Get symbol name.
+ StringRef Name;
+ if (auto NameOrErr = I->getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+
+ // Compute JIT symbol flags.
+ auto JITSymFlags = getJITSymbolFlags(*I);
+ if (!JITSymFlags)
+ return JITSymFlags.takeError();
+
+ // If this is a weak definition, check to see if there's a strong one.
+ // If there is, skip this symbol (we won't be providing it: the strong
+ // definition will). If there's no strong definition, make this definition
+ // strong.
+ if (JITSymFlags->isWeak() || JITSymFlags->isCommon()) {
+ // First check whether there's already a definition in this instance.
+ if (GlobalSymbolTable.count(Name))
+ continue;
+
+ // If we're not responsible for this symbol, skip it.
+ if (!ResponsibilitySet.count(Name))
+ continue;
+
+ // Otherwise update the flags on the symbol to make this definition
+ // strong.
+ if (JITSymFlags->isWeak())
+ *JITSymFlags &= ~JITSymbolFlags::Weak;
+ if (JITSymFlags->isCommon()) {
+ *JITSymFlags &= ~JITSymbolFlags::Common;
+ uint32_t Align = I->getAlignment();
+ uint64_t Size = I->getCommonSize();
+ if (!CommonAlign)
+ CommonAlign = Align;
+ CommonSize = alignTo(CommonSize, Align) + Size;
+ CommonSymbolsToAllocate.push_back(*I);
+ }
+ }
+
+ if (*FlagsOrErr & SymbolRef::SF_Absolute &&
+ SymType != object::SymbolRef::ST_File) {
+ uint64_t Addr = 0;
+ if (auto AddrOrErr = I->getAddress())
+ Addr = *AddrOrErr;
+ else
+ return AddrOrErr.takeError();
+
+ unsigned SectionID = AbsoluteSymbolSection;
+
+ LLVM_DEBUG(dbgs() << "\tType: " << SymType << " (absolute) Name: " << Name
+ << " SID: " << SectionID
+ << " Offset: " << format("%p", (uintptr_t)Addr)
+ << " flags: " << *FlagsOrErr << "\n");
+ if (!Name.empty()) // Skip absolute symbol relocations.
+ GlobalSymbolTable[Name] =
+ SymbolTableEntry(SectionID, Addr, *JITSymFlags);
+ } else if (SymType == object::SymbolRef::ST_Function ||
+ SymType == object::SymbolRef::ST_Data ||
+ SymType == object::SymbolRef::ST_Unknown ||
+ SymType == object::SymbolRef::ST_Other) {
+
+ section_iterator SI = Obj.section_end();
+ if (auto SIOrErr = I->getSection())
+ SI = *SIOrErr;
+ else
+ return SIOrErr.takeError();
+
+ if (SI == Obj.section_end())
+ continue;
+
+ // Get symbol offset.
+ uint64_t SectOffset;
+ if (auto Err = getOffset(*I, *SI, SectOffset))
+ return std::move(Err);
+
+ bool IsCode = SI->isText();
+ unsigned SectionID;
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, *SI, IsCode, LocalSections))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+
+ LLVM_DEBUG(dbgs() << "\tType: " << SymType << " Name: " << Name
+ << " SID: " << SectionID
+ << " Offset: " << format("%p", (uintptr_t)SectOffset)
+ << " flags: " << *FlagsOrErr << "\n");
+ if (!Name.empty()) // Skip absolute symbol relocations
+ GlobalSymbolTable[Name] =
+ SymbolTableEntry(SectionID, SectOffset, *JITSymFlags);
+ }
+ }
+
+ // Allocate common symbols
+ if (auto Err = emitCommonSymbols(Obj, CommonSymbolsToAllocate, CommonSize,
+ CommonAlign))
+ return std::move(Err);
+
+ // Parse and process relocations
+ LLVM_DEBUG(dbgs() << "Parse relocations:\n");
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ StubMap Stubs;
+
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ return RelSecOrErr.takeError();
+
+ section_iterator RelocatedSection = *RelSecOrErr;
+ if (RelocatedSection == SE)
+ continue;
+
+ relocation_iterator I = SI->relocation_begin();
+ relocation_iterator E = SI->relocation_end();
+
+ if (I == E && !ProcessAllSections)
+ continue;
+
+ bool IsCode = RelocatedSection->isText();
+ unsigned SectionID = 0;
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, *RelocatedSection, IsCode,
+ LocalSections))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+
+ LLVM_DEBUG(dbgs() << "\tSectionID: " << SectionID << "\n");
+
+ for (; I != E;)
+ if (auto IOrErr = processRelocationRef(SectionID, I, Obj, LocalSections, Stubs))
+ I = *IOrErr;
+ else
+ return IOrErr.takeError();
+
+ // If there is a NotifyStubEmitted callback set, call it to register any
+ // stubs created for this section.
+ if (NotifyStubEmitted) {
+ StringRef FileName = Obj.getFileName();
+ StringRef SectionName = Sections[SectionID].getName();
+ for (auto &KV : Stubs) {
+
+ auto &VR = KV.first;
+ uint64_t StubAddr = KV.second;
+
+ // If this is a named stub, just call NotifyStubEmitted.
+ if (VR.SymbolName) {
+ NotifyStubEmitted(FileName, SectionName, VR.SymbolName, SectionID,
+ StubAddr);
+ continue;
+ }
+
+ // Otherwise we will have to try a reverse lookup on the globla symbol table.
+ for (auto &GSTMapEntry : GlobalSymbolTable) {
+ StringRef SymbolName = GSTMapEntry.first();
+ auto &GSTEntry = GSTMapEntry.second;
+ if (GSTEntry.getSectionID() == VR.SectionID &&
+ GSTEntry.getOffset() == VR.Offset) {
+ NotifyStubEmitted(FileName, SectionName, SymbolName, SectionID,
+ StubAddr);
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ // Process remaining sections
+ if (ProcessAllSections) {
+ LLVM_DEBUG(dbgs() << "Process remaining sections:\n");
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ /* Ignore already loaded sections */
+ if (LocalSections.find(*SI) != LocalSections.end())
+ continue;
+
+ bool IsCode = SI->isText();
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, *SI, IsCode, LocalSections))
+ LLVM_DEBUG(dbgs() << "\tSectionID: " << (*SectionIDOrErr) << "\n");
+ else
+ return SectionIDOrErr.takeError();
+ }
+ }
+
+ // Give the subclasses a chance to tie-up any loose ends.
+ if (auto Err = finalizeLoad(Obj, LocalSections))
+ return std::move(Err);
+
+// for (auto E : LocalSections)
+// llvm::dbgs() << "Added: " << E.first.getRawDataRefImpl() << " -> " << E.second << "\n";
+
+ return LocalSections;
+}
+
+// A helper method for computeTotalAllocSize.
+// Computes the memory size required to allocate sections with the given sizes,
+// assuming that all sections are allocated with the given alignment
+static uint64_t
+computeAllocationSizeForSections(std::vector<uint64_t> &SectionSizes,
+ uint64_t Alignment) {
+ uint64_t TotalSize = 0;
+ for (uint64_t SectionSize : SectionSizes) {
+ uint64_t AlignedSize =
+ (SectionSize + Alignment - 1) / Alignment * Alignment;
+ TotalSize += AlignedSize;
+ }
+ return TotalSize;
+}
+
+static bool isRequiredForExecution(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return ELFSectionRef(Section).getFlags() & ELF::SHF_ALLOC;
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj)) {
+ const coff_section *CoffSection = COFFObj->getCOFFSection(Section);
+ // Avoid loading zero-sized COFF sections.
+ // In PE files, VirtualSize gives the section size, and SizeOfRawData
+ // may be zero for sections with content. In Obj files, SizeOfRawData
+ // gives the section size, and VirtualSize is always zero. Hence
+ // the need to check for both cases below.
+ bool HasContent =
+ (CoffSection->VirtualSize > 0) || (CoffSection->SizeOfRawData > 0);
+ bool IsDiscardable =
+ CoffSection->Characteristics &
+ (COFF::IMAGE_SCN_MEM_DISCARDABLE | COFF::IMAGE_SCN_LNK_INFO);
+ return HasContent && !IsDiscardable;
+ }
+
+ assert(isa<MachOObjectFile>(Obj));
+ return true;
+}
+
+static bool isReadOnlyData(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return !(ELFSectionRef(Section).getFlags() &
+ (ELF::SHF_WRITE | ELF::SHF_EXECINSTR));
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj))
+ return ((COFFObj->getCOFFSection(Section)->Characteristics &
+ (COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ
+ | COFF::IMAGE_SCN_MEM_WRITE))
+ ==
+ (COFF::IMAGE_SCN_CNT_INITIALIZED_DATA
+ | COFF::IMAGE_SCN_MEM_READ));
+
+ assert(isa<MachOObjectFile>(Obj));
+ return false;
+}
+
+static bool isZeroInit(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return ELFSectionRef(Section).getType() == ELF::SHT_NOBITS;
+ if (auto *COFFObj = dyn_cast<object::COFFObjectFile>(Obj))
+ return COFFObj->getCOFFSection(Section)->Characteristics &
+ COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA;
+
+ auto *MachO = cast<MachOObjectFile>(Obj);
+ unsigned SectionType = MachO->getSectionType(Section);
+ return SectionType == MachO::S_ZEROFILL ||
+ SectionType == MachO::S_GB_ZEROFILL;
+}
+
+static bool isTLS(const SectionRef Section) {
+ const ObjectFile *Obj = Section.getObject();
+ if (isa<object::ELFObjectFileBase>(Obj))
+ return ELFSectionRef(Section).getFlags() & ELF::SHF_TLS;
+ return false;
+}
+
+// Compute an upper bound of the memory size that is required to load all
+// sections
+Error RuntimeDyldImpl::computeTotalAllocSize(const ObjectFile &Obj,
+ uint64_t &CodeSize,
+ uint32_t &CodeAlign,
+ uint64_t &RODataSize,
+ uint32_t &RODataAlign,
+ uint64_t &RWDataSize,
+ uint32_t &RWDataAlign) {
+ // Compute the size of all sections required for execution
+ std::vector<uint64_t> CodeSectionSizes;
+ std::vector<uint64_t> ROSectionSizes;
+ std::vector<uint64_t> RWSectionSizes;
+
+ // Collect sizes of all sections to be loaded;
+ // also determine the max alignment of all sections
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ const SectionRef &Section = *SI;
+
+ bool IsRequired = isRequiredForExecution(Section) || ProcessAllSections;
+
+ // Consider only the sections that are required to be loaded for execution
+ if (IsRequired) {
+ uint64_t DataSize = Section.getSize();
+ uint64_t Alignment64 = Section.getAlignment();
+ unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
+ bool IsCode = Section.isText();
+ bool IsReadOnly = isReadOnlyData(Section);
+ bool IsTLS = isTLS(Section);
+
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef Name = *NameOrErr;
+
+ uint64_t StubBufSize = computeSectionStubBufSize(Obj, Section);
+
+ uint64_t PaddingSize = 0;
+ if (Name == ".eh_frame")
+ PaddingSize += 4;
+ if (StubBufSize != 0)
+ PaddingSize += getStubAlignment() - 1;
+
+ uint64_t SectionSize = DataSize + PaddingSize + StubBufSize;
+
+ // The .eh_frame section (at least on Linux) needs an extra four bytes
+ // padded
+ // with zeroes added at the end. For MachO objects, this section has a
+ // slightly different name, so this won't have any effect for MachO
+ // objects.
+ if (Name == ".eh_frame")
+ SectionSize += 4;
+
+ if (!SectionSize)
+ SectionSize = 1;
+
+ if (IsCode) {
+ CodeAlign = std::max(CodeAlign, Alignment);
+ CodeSectionSizes.push_back(SectionSize);
+ } else if (IsReadOnly) {
+ RODataAlign = std::max(RODataAlign, Alignment);
+ ROSectionSizes.push_back(SectionSize);
+ } else if (!IsTLS) {
+ RWDataAlign = std::max(RWDataAlign, Alignment);
+ RWSectionSizes.push_back(SectionSize);
+ }
+ }
+ }
+
+ // Compute Global Offset Table size. If it is not zero we
+ // also update alignment, which is equal to a size of a
+ // single GOT entry.
+ if (unsigned GotSize = computeGOTSize(Obj)) {
+ RWSectionSizes.push_back(GotSize);
+ RWDataAlign = std::max<uint32_t>(RWDataAlign, getGOTEntrySize());
+ }
+
+ // Compute the size of all common symbols
+ uint64_t CommonSize = 0;
+ uint32_t CommonAlign = 1;
+ for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E;
+ ++I) {
+ Expected<uint32_t> FlagsOrErr = I->getFlags();
+ if (!FlagsOrErr)
+ // TODO: Test this error.
+ return FlagsOrErr.takeError();
+ if (*FlagsOrErr & SymbolRef::SF_Common) {
+ // Add the common symbols to a list. We'll allocate them all below.
+ uint64_t Size = I->getCommonSize();
+ uint32_t Align = I->getAlignment();
+ // If this is the first common symbol, use its alignment as the alignment
+ // for the common symbols section.
+ if (CommonSize == 0)
+ CommonAlign = Align;
+ CommonSize = alignTo(CommonSize, Align) + Size;
+ }
+ }
+ if (CommonSize != 0) {
+ RWSectionSizes.push_back(CommonSize);
+ RWDataAlign = std::max(RWDataAlign, CommonAlign);
+ }
+
+ // Compute the required allocation space for each different type of sections
+ // (code, read-only data, read-write data) assuming that all sections are
+ // allocated with the max alignment. Note that we cannot compute with the
+ // individual alignments of the sections, because then the required size
+ // depends on the order, in which the sections are allocated.
+ CodeSize = computeAllocationSizeForSections(CodeSectionSizes, CodeAlign);
+ RODataSize = computeAllocationSizeForSections(ROSectionSizes, RODataAlign);
+ RWDataSize = computeAllocationSizeForSections(RWSectionSizes, RWDataAlign);
+
+ return Error::success();
+}
+
+// compute GOT size
+unsigned RuntimeDyldImpl::computeGOTSize(const ObjectFile &Obj) {
+ size_t GotEntrySize = getGOTEntrySize();
+ if (!GotEntrySize)
+ return 0;
+
+ size_t GotSize = 0;
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ for (const RelocationRef &Reloc : SI->relocations())
+ if (relocationNeedsGot(Reloc))
+ GotSize += GotEntrySize;
+ }
+
+ return GotSize;
+}
+
+// compute stub buffer size for the given section
+unsigned RuntimeDyldImpl::computeSectionStubBufSize(const ObjectFile &Obj,
+ const SectionRef &Section) {
+ if (!MemMgr.allowStubAllocation()) {
+ return 0;
+ }
+
+ unsigned StubSize = getMaxStubSize();
+ if (StubSize == 0) {
+ return 0;
+ }
+ // FIXME: this is an inefficient way to handle this. We should computed the
+ // necessary section allocation size in loadObject by walking all the sections
+ // once.
+ unsigned StubBufSize = 0;
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ report_fatal_error(Twine(toString(RelSecOrErr.takeError())));
+
+ section_iterator RelSecI = *RelSecOrErr;
+ if (!(RelSecI == Section))
+ continue;
+
+ for (const RelocationRef &Reloc : SI->relocations())
+ if (relocationNeedsStub(Reloc))
+ StubBufSize += StubSize;
+ }
+
+ // Get section data size and alignment
+ uint64_t DataSize = Section.getSize();
+ uint64_t Alignment64 = Section.getAlignment();
+
+ // Add stubbuf size alignment
+ unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
+ unsigned StubAlignment = getStubAlignment();
+ unsigned EndAlignment = (DataSize | Alignment) & -(DataSize | Alignment);
+ if (StubAlignment > EndAlignment)
+ StubBufSize += StubAlignment - EndAlignment;
+ return StubBufSize;
+}
+
+uint64_t RuntimeDyldImpl::readBytesUnaligned(uint8_t *Src,
+ unsigned Size) const {
+ uint64_t Result = 0;
+ if (IsTargetLittleEndian) {
+ Src += Size - 1;
+ while (Size--)
+ Result = (Result << 8) | *Src--;
+ } else
+ while (Size--)
+ Result = (Result << 8) | *Src++;
+
+ return Result;
+}
+
+void RuntimeDyldImpl::writeBytesUnaligned(uint64_t Value, uint8_t *Dst,
+ unsigned Size) const {
+ if (IsTargetLittleEndian) {
+ while (Size--) {
+ *Dst++ = Value & 0xFF;
+ Value >>= 8;
+ }
+ } else {
+ Dst += Size - 1;
+ while (Size--) {
+ *Dst-- = Value & 0xFF;
+ Value >>= 8;
+ }
+ }
+}
+
+Expected<JITSymbolFlags>
+RuntimeDyldImpl::getJITSymbolFlags(const SymbolRef &SR) {
+ return JITSymbolFlags::fromObjectSymbol(SR);
+}
+
+Error RuntimeDyldImpl::emitCommonSymbols(const ObjectFile &Obj,
+ CommonSymbolList &SymbolsToAllocate,
+ uint64_t CommonSize,
+ uint32_t CommonAlign) {
+ if (SymbolsToAllocate.empty())
+ return Error::success();
+
+ // Allocate memory for the section
+ unsigned SectionID = Sections.size();
+ uint8_t *Addr = MemMgr.allocateDataSection(CommonSize, CommonAlign, SectionID,
+ "<common symbols>", false);
+ if (!Addr)
+ report_fatal_error("Unable to allocate memory for common symbols!");
+ uint64_t Offset = 0;
+ Sections.push_back(
+ SectionEntry("<common symbols>", Addr, CommonSize, CommonSize, 0));
+ memset(Addr, 0, CommonSize);
+
+ LLVM_DEBUG(dbgs() << "emitCommonSection SectionID: " << SectionID
+ << " new addr: " << format("%p", Addr)
+ << " DataSize: " << CommonSize << "\n");
+
+ // Assign the address of each symbol
+ for (auto &Sym : SymbolsToAllocate) {
+ uint32_t Alignment = Sym.getAlignment();
+ uint64_t Size = Sym.getCommonSize();
+ StringRef Name;
+ if (auto NameOrErr = Sym.getName())
+ Name = *NameOrErr;
+ else
+ return NameOrErr.takeError();
+ if (Alignment) {
+ // This symbol has an alignment requirement.
+ uint64_t AlignOffset =
+ offsetToAlignment((uint64_t)Addr, Align(Alignment));
+ Addr += AlignOffset;
+ Offset += AlignOffset;
+ }
+ auto JITSymFlags = getJITSymbolFlags(Sym);
+
+ if (!JITSymFlags)
+ return JITSymFlags.takeError();
+
+ LLVM_DEBUG(dbgs() << "Allocating common symbol " << Name << " address "
+ << format("%p", Addr) << "\n");
+ if (!Name.empty()) // Skip absolute symbol relocations.
+ GlobalSymbolTable[Name] =
+ SymbolTableEntry(SectionID, Offset, std::move(*JITSymFlags));
+ Offset += Size;
+ Addr += Size;
+ }
+
+ return Error::success();
+}
+
+Expected<unsigned>
+RuntimeDyldImpl::emitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode) {
+ StringRef data;
+ uint64_t Alignment64 = Section.getAlignment();
+
+ unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL;
+ unsigned PaddingSize = 0;
+ unsigned StubBufSize = 0;
+ bool IsRequired = isRequiredForExecution(Section);
+ bool IsVirtual = Section.isVirtual();
+ bool IsZeroInit = isZeroInit(Section);
+ bool IsReadOnly = isReadOnlyData(Section);
+ bool IsTLS = isTLS(Section);
+ uint64_t DataSize = Section.getSize();
+
+ // An alignment of 0 (at least with ELF) is identical to an alignment of 1,
+ // while being more "polite". Other formats do not support 0-aligned sections
+ // anyway, so we should guarantee that the alignment is always at least 1.
+ Alignment = std::max(1u, Alignment);
+
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef Name = *NameOrErr;
+
+ StubBufSize = computeSectionStubBufSize(Obj, Section);
+
+ // The .eh_frame section (at least on Linux) needs an extra four bytes padded
+ // with zeroes added at the end. For MachO objects, this section has a
+ // slightly different name, so this won't have any effect for MachO objects.
+ if (Name == ".eh_frame")
+ PaddingSize = 4;
+
+ uintptr_t Allocate;
+ unsigned SectionID = Sections.size();
+ uint8_t *Addr;
+ uint64_t LoadAddress = 0;
+ const char *pData = nullptr;
+
+ // If this section contains any bits (i.e. isn't a virtual or bss section),
+ // grab a reference to them.
+ if (!IsVirtual && !IsZeroInit) {
+ // In either case, set the location of the unrelocated section in memory,
+ // since we still process relocations for it even if we're not applying them.
+ if (Expected<StringRef> E = Section.getContents())
+ data = *E;
+ else
+ return E.takeError();
+ pData = data.data();
+ }
+
+ // If there are any stubs then the section alignment needs to be at least as
+ // high as stub alignment or padding calculations may by incorrect when the
+ // section is remapped.
+ if (StubBufSize != 0) {
+ Alignment = std::max(Alignment, getStubAlignment());
+ PaddingSize += getStubAlignment() - 1;
+ }
+
+ // Some sections, such as debug info, don't need to be loaded for execution.
+ // Process those only if explicitly requested.
+ if (IsRequired || ProcessAllSections) {
+ Allocate = DataSize + PaddingSize + StubBufSize;
+ if (!Allocate)
+ Allocate = 1;
+ if (IsTLS) {
+ auto TLSSection =
+ MemMgr.allocateTLSSection(Allocate, Alignment, SectionID, Name);
+ Addr = TLSSection.InitializationImage;
+ LoadAddress = TLSSection.Offset;
+ } else if (IsCode) {
+ Addr = MemMgr.allocateCodeSection(Allocate, Alignment, SectionID, Name);
+ } else {
+ Addr = MemMgr.allocateDataSection(Allocate, Alignment, SectionID, Name,
+ IsReadOnly);
+ }
+ if (!Addr)
+ report_fatal_error("Unable to allocate section memory!");
+
+ // Zero-initialize or copy the data from the image
+ if (IsZeroInit || IsVirtual)
+ memset(Addr, 0, DataSize);
+ else
+ memcpy(Addr, pData, DataSize);
+
+ // Fill in any extra bytes we allocated for padding
+ if (PaddingSize != 0) {
+ memset(Addr + DataSize, 0, PaddingSize);
+ // Update the DataSize variable to include padding.
+ DataSize += PaddingSize;
+
+ // Align DataSize to stub alignment if we have any stubs (PaddingSize will
+ // have been increased above to account for this).
+ if (StubBufSize > 0)
+ DataSize &= -(uint64_t)getStubAlignment();
+ }
+
+ LLVM_DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: "
+ << Name << " obj addr: " << format("%p", pData)
+ << " new addr: " << format("%p", Addr) << " DataSize: "
+ << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
+ } else {
+ // Even if we didn't load the section, we need to record an entry for it
+ // to handle later processing (and by 'handle' I mean don't do anything
+ // with these sections).
+ Allocate = 0;
+ Addr = nullptr;
+ LLVM_DEBUG(
+ dbgs() << "emitSection SectionID: " << SectionID << " Name: " << Name
+ << " obj addr: " << format("%p", data.data()) << " new addr: 0"
+ << " DataSize: " << DataSize << " StubBufSize: " << StubBufSize
+ << " Allocate: " << Allocate << "\n");
+ }
+
+ Sections.push_back(
+ SectionEntry(Name, Addr, DataSize, Allocate, (uintptr_t)pData));
+
+ // The load address of a TLS section is not equal to the address of its
+ // initialization image
+ if (IsTLS)
+ Sections.back().setLoadAddress(LoadAddress);
+ // Debug info sections are linked as if their load address was zero
+ if (!IsRequired)
+ Sections.back().setLoadAddress(0);
+
+ return SectionID;
+}
+
+Expected<unsigned>
+RuntimeDyldImpl::findOrEmitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode,
+ ObjSectionToIDMap &LocalSections) {
+
+ unsigned SectionID = 0;
+ ObjSectionToIDMap::iterator i = LocalSections.find(Section);
+ if (i != LocalSections.end())
+ SectionID = i->second;
+ else {
+ if (auto SectionIDOrErr = emitSection(Obj, Section, IsCode))
+ SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ LocalSections[Section] = SectionID;
+ }
+ return SectionID;
+}
+
+void RuntimeDyldImpl::addRelocationForSection(const RelocationEntry &RE,
+ unsigned SectionID) {
+ Relocations[SectionID].push_back(RE);
+}
+
+void RuntimeDyldImpl::addRelocationForSymbol(const RelocationEntry &RE,
+ StringRef SymbolName) {
+ // Relocation by symbol. If the symbol is found in the global symbol table,
+ // create an appropriate section relocation. Otherwise, add it to
+ // ExternalSymbolRelocations.
+ RTDyldSymbolTable::const_iterator Loc = GlobalSymbolTable.find(SymbolName);
+ if (Loc == GlobalSymbolTable.end()) {
+ ExternalSymbolRelocations[SymbolName].push_back(RE);
+ } else {
+ assert(!SymbolName.empty() &&
+ "Empty symbol should not be in GlobalSymbolTable");
+ // Copy the RE since we want to modify its addend.
+ RelocationEntry RECopy = RE;
+ const auto &SymInfo = Loc->second;
+ RECopy.Addend += SymInfo.getOffset();
+ Relocations[SymInfo.getSectionID()].push_back(RECopy);
+ }
+}
+
+uint8_t *RuntimeDyldImpl::createStubFunction(uint8_t *Addr,
+ unsigned AbiVariant) {
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be ||
+ Arch == Triple::aarch64_32) {
+ // This stub has to be able to access the full address space,
+ // since symbol lookup won't necessarily find a handy, in-range,
+ // PLT stub for functions which could be anywhere.
+ // Stub can use ip0 (== x16) to calculate address
+ writeBytesUnaligned(0xd2e00010, Addr, 4); // movz ip0, #:abs_g3:<addr>
+ writeBytesUnaligned(0xf2c00010, Addr+4, 4); // movk ip0, #:abs_g2_nc:<addr>
+ writeBytesUnaligned(0xf2a00010, Addr+8, 4); // movk ip0, #:abs_g1_nc:<addr>
+ writeBytesUnaligned(0xf2800010, Addr+12, 4); // movk ip0, #:abs_g0_nc:<addr>
+ writeBytesUnaligned(0xd61f0200, Addr+16, 4); // br ip0
+
+ return Addr;
+ } else if (Arch == Triple::arm || Arch == Triple::armeb) {
+ // TODO: There is only ARM far stub now. We should add the Thumb stub,
+ // and stubs for branches Thumb - ARM and ARM - Thumb.
+ writeBytesUnaligned(0xe51ff004, Addr, 4); // ldr pc, [pc, #-4]
+ return Addr + 4;
+ } else if (IsMipsO32ABI || IsMipsN32ABI) {
+ // 0: 3c190000 lui t9,%hi(addr).
+ // 4: 27390000 addiu t9,t9,%lo(addr).
+ // 8: 03200008 jr t9.
+ // c: 00000000 nop.
+ const unsigned LuiT9Instr = 0x3c190000, AdduiT9Instr = 0x27390000;
+ const unsigned NopInstr = 0x0;
+ unsigned JrT9Instr = 0x03200008;
+ if ((AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_32R6 ||
+ (AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_64R6)
+ JrT9Instr = 0x03200009;
+
+ writeBytesUnaligned(LuiT9Instr, Addr, 4);
+ writeBytesUnaligned(AdduiT9Instr, Addr + 4, 4);
+ writeBytesUnaligned(JrT9Instr, Addr + 8, 4);
+ writeBytesUnaligned(NopInstr, Addr + 12, 4);
+ return Addr;
+ } else if (IsMipsN64ABI) {
+ // 0: 3c190000 lui t9,%highest(addr).
+ // 4: 67390000 daddiu t9,t9,%higher(addr).
+ // 8: 0019CC38 dsll t9,t9,16.
+ // c: 67390000 daddiu t9,t9,%hi(addr).
+ // 10: 0019CC38 dsll t9,t9,16.
+ // 14: 67390000 daddiu t9,t9,%lo(addr).
+ // 18: 03200008 jr t9.
+ // 1c: 00000000 nop.
+ const unsigned LuiT9Instr = 0x3c190000, DaddiuT9Instr = 0x67390000,
+ DsllT9Instr = 0x19CC38;
+ const unsigned NopInstr = 0x0;
+ unsigned JrT9Instr = 0x03200008;
+ if ((AbiVariant & ELF::EF_MIPS_ARCH) == ELF::EF_MIPS_ARCH_64R6)
+ JrT9Instr = 0x03200009;
+
+ writeBytesUnaligned(LuiT9Instr, Addr, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 4, 4);
+ writeBytesUnaligned(DsllT9Instr, Addr + 8, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 12, 4);
+ writeBytesUnaligned(DsllT9Instr, Addr + 16, 4);
+ writeBytesUnaligned(DaddiuT9Instr, Addr + 20, 4);
+ writeBytesUnaligned(JrT9Instr, Addr + 24, 4);
+ writeBytesUnaligned(NopInstr, Addr + 28, 4);
+ return Addr;
+ } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
+ // Depending on which version of the ELF ABI is in use, we need to
+ // generate one of two variants of the stub. They both start with
+ // the same sequence to load the target address into r12.
+ writeInt32BE(Addr, 0x3D800000); // lis r12, highest(addr)
+ writeInt32BE(Addr+4, 0x618C0000); // ori r12, higher(addr)
+ writeInt32BE(Addr+8, 0x798C07C6); // sldi r12, r12, 32
+ writeInt32BE(Addr+12, 0x658C0000); // oris r12, r12, h(addr)
+ writeInt32BE(Addr+16, 0x618C0000); // ori r12, r12, l(addr)
+ if (AbiVariant == 2) {
+ // PowerPC64 stub ELFv2 ABI: The address points to the function itself.
+ // The address is already in r12 as required by the ABI. Branch to it.
+ writeInt32BE(Addr+20, 0xF8410018); // std r2, 24(r1)
+ writeInt32BE(Addr+24, 0x7D8903A6); // mtctr r12
+ writeInt32BE(Addr+28, 0x4E800420); // bctr
+ } else {
+ // PowerPC64 stub ELFv1 ABI: The address points to a function descriptor.
+ // Load the function address on r11 and sets it to control register. Also
+ // loads the function TOC in r2 and environment pointer to r11.
+ writeInt32BE(Addr+20, 0xF8410028); // std r2, 40(r1)
+ writeInt32BE(Addr+24, 0xE96C0000); // ld r11, 0(r12)
+ writeInt32BE(Addr+28, 0xE84C0008); // ld r2, 0(r12)
+ writeInt32BE(Addr+32, 0x7D6903A6); // mtctr r11
+ writeInt32BE(Addr+36, 0xE96C0010); // ld r11, 16(r2)
+ writeInt32BE(Addr+40, 0x4E800420); // bctr
+ }
+ return Addr;
+ } else if (Arch == Triple::systemz) {
+ writeInt16BE(Addr, 0xC418); // lgrl %r1,.+8
+ writeInt16BE(Addr+2, 0x0000);
+ writeInt16BE(Addr+4, 0x0004);
+ writeInt16BE(Addr+6, 0x07F1); // brc 15,%r1
+ // 8-byte address stored at Addr + 8
+ return Addr;
+ } else if (Arch == Triple::x86_64) {
+ *Addr = 0xFF; // jmp
+ *(Addr+1) = 0x25; // rip
+ // 32-bit PC-relative address of the GOT entry will be stored at Addr+2
+ } else if (Arch == Triple::x86) {
+ *Addr = 0xE9; // 32-bit pc-relative jump.
+ }
+ return Addr;
+}
+
+// Assign an address to a symbol name and resolve all the relocations
+// associated with it.
+void RuntimeDyldImpl::reassignSectionAddress(unsigned SectionID,
+ uint64_t Addr) {
+ // The address to use for relocation resolution is not
+ // the address of the local section buffer. We must be doing
+ // a remote execution environment of some sort. Relocations can't
+ // be applied until all the sections have been moved. The client must
+ // trigger this with a call to MCJIT::finalize() or
+ // RuntimeDyld::resolveRelocations().
+ //
+ // Addr is a uint64_t because we can't assume the pointer width
+ // of the target is the same as that of the host. Just use a generic
+ // "big enough" type.
+ LLVM_DEBUG(
+ dbgs() << "Reassigning address for section " << SectionID << " ("
+ << Sections[SectionID].getName() << "): "
+ << format("0x%016" PRIx64, Sections[SectionID].getLoadAddress())
+ << " -> " << format("0x%016" PRIx64, Addr) << "\n");
+ Sections[SectionID].setLoadAddress(Addr);
+}
+
+void RuntimeDyldImpl::resolveRelocationList(const RelocationList &Relocs,
+ uint64_t Value) {
+ for (unsigned i = 0, e = Relocs.size(); i != e; ++i) {
+ const RelocationEntry &RE = Relocs[i];
+ // Ignore relocations for sections that were not loaded
+ if (RE.SectionID != AbsoluteSymbolSection &&
+ Sections[RE.SectionID].getAddress() == nullptr)
+ continue;
+ resolveRelocation(RE, Value);
+ }
+}
+
+void RuntimeDyldImpl::applyExternalSymbolRelocations(
+ const StringMap<JITEvaluatedSymbol> ExternalSymbolMap) {
+ for (auto &RelocKV : ExternalSymbolRelocations) {
+ StringRef Name = RelocKV.first();
+ RelocationList &Relocs = RelocKV.second;
+ if (Name.size() == 0) {
+ // This is an absolute symbol, use an address of zero.
+ LLVM_DEBUG(dbgs() << "Resolving absolute relocations."
+ << "\n");
+ resolveRelocationList(Relocs, 0);
+ } else {
+ uint64_t Addr = 0;
+ JITSymbolFlags Flags;
+ RTDyldSymbolTable::const_iterator Loc = GlobalSymbolTable.find(Name);
+ if (Loc == GlobalSymbolTable.end()) {
+ auto RRI = ExternalSymbolMap.find(Name);
+ assert(RRI != ExternalSymbolMap.end() && "No result for symbol");
+ Addr = RRI->second.getAddress();
+ Flags = RRI->second.getFlags();
+ } else {
+ // We found the symbol in our global table. It was probably in a
+ // Module that we loaded previously.
+ const auto &SymInfo = Loc->second;
+ Addr = getSectionLoadAddress(SymInfo.getSectionID()) +
+ SymInfo.getOffset();
+ Flags = SymInfo.getFlags();
+ }
+
+ // FIXME: Implement error handling that doesn't kill the host program!
+ if (!Addr && !Resolver.allowsZeroSymbols())
+ report_fatal_error(Twine("Program used external function '") + Name +
+ "' which could not be resolved!");
+
+ // If Resolver returned UINT64_MAX, the client wants to handle this symbol
+ // manually and we shouldn't resolve its relocations.
+ if (Addr != UINT64_MAX) {
+
+ // Tweak the address based on the symbol flags if necessary.
+ // For example, this is used by RuntimeDyldMachOARM to toggle the low bit
+ // if the target symbol is Thumb.
+ Addr = modifyAddressBasedOnFlags(Addr, Flags);
+
+ LLVM_DEBUG(dbgs() << "Resolving relocations Name: " << Name << "\t"
+ << format("0x%lx", Addr) << "\n");
+ resolveRelocationList(Relocs, Addr);
+ }
+ }
+ }
+ ExternalSymbolRelocations.clear();
+}
+
+Error RuntimeDyldImpl::resolveExternalSymbols() {
+ StringMap<JITEvaluatedSymbol> ExternalSymbolMap;
+
+ // Resolution can trigger emission of more symbols, so iterate until
+ // we've resolved *everything*.
+ {
+ JITSymbolResolver::LookupSet ResolvedSymbols;
+
+ while (true) {
+ JITSymbolResolver::LookupSet NewSymbols;
+
+ for (auto &RelocKV : ExternalSymbolRelocations) {
+ StringRef Name = RelocKV.first();
+ if (!Name.empty() && !GlobalSymbolTable.count(Name) &&
+ !ResolvedSymbols.count(Name))
+ NewSymbols.insert(Name);
+ }
+
+ if (NewSymbols.empty())
+ break;
+
+#ifdef _MSC_VER
+ using ExpectedLookupResult =
+ MSVCPExpected<JITSymbolResolver::LookupResult>;
+#else
+ using ExpectedLookupResult = Expected<JITSymbolResolver::LookupResult>;
+#endif
+
+ auto NewSymbolsP = std::make_shared<std::promise<ExpectedLookupResult>>();
+ auto NewSymbolsF = NewSymbolsP->get_future();
+ Resolver.lookup(NewSymbols,
+ [=](Expected<JITSymbolResolver::LookupResult> Result) {
+ NewSymbolsP->set_value(std::move(Result));
+ });
+
+ auto NewResolverResults = NewSymbolsF.get();
+
+ if (!NewResolverResults)
+ return NewResolverResults.takeError();
+
+ assert(NewResolverResults->size() == NewSymbols.size() &&
+ "Should have errored on unresolved symbols");
+
+ for (auto &RRKV : *NewResolverResults) {
+ assert(!ResolvedSymbols.count(RRKV.first) && "Redundant resolution?");
+ ExternalSymbolMap.insert(RRKV);
+ ResolvedSymbols.insert(RRKV.first);
+ }
+ }
+ }
+
+ applyExternalSymbolRelocations(ExternalSymbolMap);
+
+ return Error::success();
+}
+
+void RuntimeDyldImpl::finalizeAsync(
+ std::unique_ptr<RuntimeDyldImpl> This,
+ unique_function<void(object::OwningBinary<object::ObjectFile>,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>, Error)>
+ OnEmitted,
+ object::OwningBinary<object::ObjectFile> O,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> Info) {
+
+ auto SharedThis = std::shared_ptr<RuntimeDyldImpl>(std::move(This));
+ auto PostResolveContinuation =
+ [SharedThis, OnEmitted = std::move(OnEmitted), O = std::move(O),
+ Info = std::move(Info)](
+ Expected<JITSymbolResolver::LookupResult> Result) mutable {
+ if (!Result) {
+ OnEmitted(std::move(O), std::move(Info), Result.takeError());
+ return;
+ }
+
+ /// Copy the result into a StringMap, where the keys are held by value.
+ StringMap<JITEvaluatedSymbol> Resolved;
+ for (auto &KV : *Result)
+ Resolved[KV.first] = KV.second;
+
+ SharedThis->applyExternalSymbolRelocations(Resolved);
+ SharedThis->resolveLocalRelocations();
+ SharedThis->registerEHFrames();
+ std::string ErrMsg;
+ if (SharedThis->MemMgr.finalizeMemory(&ErrMsg))
+ OnEmitted(std::move(O), std::move(Info),
+ make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode()));
+ else
+ OnEmitted(std::move(O), std::move(Info), Error::success());
+ };
+
+ JITSymbolResolver::LookupSet Symbols;
+
+ for (auto &RelocKV : SharedThis->ExternalSymbolRelocations) {
+ StringRef Name = RelocKV.first();
+ if (Name.empty()) // Skip absolute symbol relocations.
+ continue;
+ assert(!SharedThis->GlobalSymbolTable.count(Name) &&
+ "Name already processed. RuntimeDyld instances can not be re-used "
+ "when finalizing with finalizeAsync.");
+ Symbols.insert(Name);
+ }
+
+ if (!Symbols.empty()) {
+ SharedThis->Resolver.lookup(Symbols, std::move(PostResolveContinuation));
+ } else
+ PostResolveContinuation(std::map<StringRef, JITEvaluatedSymbol>());
+}
+
+//===----------------------------------------------------------------------===//
+// RuntimeDyld class implementation
+
+uint64_t RuntimeDyld::LoadedObjectInfo::getSectionLoadAddress(
+ const object::SectionRef &Sec) const {
+
+ auto I = ObjSecToIDMap.find(Sec);
+ if (I != ObjSecToIDMap.end())
+ return RTDyld.Sections[I->second].getLoadAddress();
+
+ return 0;
+}
+
+RuntimeDyld::MemoryManager::TLSSection
+RuntimeDyld::MemoryManager::allocateTLSSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) {
+ report_fatal_error("allocation of TLS not implemented");
+}
+
+void RuntimeDyld::MemoryManager::anchor() {}
+void JITSymbolResolver::anchor() {}
+void LegacyJITSymbolResolver::anchor() {}
+
+RuntimeDyld::RuntimeDyld(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : MemMgr(MemMgr), Resolver(Resolver) {
+ // FIXME: There's a potential issue lurking here if a single instance of
+ // RuntimeDyld is used to load multiple objects. The current implementation
+ // associates a single memory manager with a RuntimeDyld instance. Even
+ // though the public class spawns a new 'impl' instance for each load,
+ // they share a single memory manager. This can become a problem when page
+ // permissions are applied.
+ Dyld = nullptr;
+ ProcessAllSections = false;
+}
+
+RuntimeDyld::~RuntimeDyld() {}
+
+static std::unique_ptr<RuntimeDyldCOFF>
+createRuntimeDyldCOFF(
+ Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver, bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldCOFF> Dyld =
+ RuntimeDyldCOFF::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+static std::unique_ptr<RuntimeDyldELF>
+createRuntimeDyldELF(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver, bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldELF> Dyld =
+ RuntimeDyldELF::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+static std::unique_ptr<RuntimeDyldMachO>
+createRuntimeDyldMachO(
+ Triple::ArchType Arch, RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver,
+ bool ProcessAllSections,
+ RuntimeDyld::NotifyStubEmittedFunction NotifyStubEmitted) {
+ std::unique_ptr<RuntimeDyldMachO> Dyld =
+ RuntimeDyldMachO::create(Arch, MM, Resolver);
+ Dyld->setProcessAllSections(ProcessAllSections);
+ Dyld->setNotifyStubEmitted(std::move(NotifyStubEmitted));
+ return Dyld;
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyld::loadObject(const ObjectFile &Obj) {
+ if (!Dyld) {
+ if (Obj.isELF())
+ Dyld =
+ createRuntimeDyldELF(static_cast<Triple::ArchType>(Obj.getArch()),
+ MemMgr, Resolver, ProcessAllSections,
+ std::move(NotifyStubEmitted));
+ else if (Obj.isMachO())
+ Dyld = createRuntimeDyldMachO(
+ static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver,
+ ProcessAllSections, std::move(NotifyStubEmitted));
+ else if (Obj.isCOFF())
+ Dyld = createRuntimeDyldCOFF(
+ static_cast<Triple::ArchType>(Obj.getArch()), MemMgr, Resolver,
+ ProcessAllSections, std::move(NotifyStubEmitted));
+ else
+ report_fatal_error("Incompatible object format!");
+ }
+
+ if (!Dyld->isCompatibleFile(Obj))
+ report_fatal_error("Incompatible object format!");
+
+ auto LoadedObjInfo = Dyld->loadObject(Obj);
+ MemMgr.notifyObjectLoaded(*this, Obj);
+ return LoadedObjInfo;
+}
+
+void *RuntimeDyld::getSymbolLocalAddress(StringRef Name) const {
+ if (!Dyld)
+ return nullptr;
+ return Dyld->getSymbolLocalAddress(Name);
+}
+
+unsigned RuntimeDyld::getSymbolSectionID(StringRef Name) const {
+ assert(Dyld && "No RuntimeDyld instance attached");
+ return Dyld->getSymbolSectionID(Name);
+}
+
+JITEvaluatedSymbol RuntimeDyld::getSymbol(StringRef Name) const {
+ if (!Dyld)
+ return nullptr;
+ return Dyld->getSymbol(Name);
+}
+
+std::map<StringRef, JITEvaluatedSymbol> RuntimeDyld::getSymbolTable() const {
+ if (!Dyld)
+ return std::map<StringRef, JITEvaluatedSymbol>();
+ return Dyld->getSymbolTable();
+}
+
+void RuntimeDyld::resolveRelocations() { Dyld->resolveRelocations(); }
+
+void RuntimeDyld::reassignSectionAddress(unsigned SectionID, uint64_t Addr) {
+ Dyld->reassignSectionAddress(SectionID, Addr);
+}
+
+void RuntimeDyld::mapSectionAddress(const void *LocalAddress,
+ uint64_t TargetAddress) {
+ Dyld->mapSectionAddress(LocalAddress, TargetAddress);
+}
+
+bool RuntimeDyld::hasError() { return Dyld->hasError(); }
+
+StringRef RuntimeDyld::getErrorString() { return Dyld->getErrorString(); }
+
+void RuntimeDyld::finalizeWithMemoryManagerLocking() {
+ bool MemoryFinalizationLocked = MemMgr.FinalizationLocked;
+ MemMgr.FinalizationLocked = true;
+ resolveRelocations();
+ registerEHFrames();
+ if (!MemoryFinalizationLocked) {
+ MemMgr.finalizeMemory();
+ MemMgr.FinalizationLocked = false;
+ }
+}
+
+StringRef RuntimeDyld::getSectionContent(unsigned SectionID) const {
+ assert(Dyld && "No Dyld instance attached");
+ return Dyld->getSectionContent(SectionID);
+}
+
+uint64_t RuntimeDyld::getSectionLoadAddress(unsigned SectionID) const {
+ assert(Dyld && "No Dyld instance attached");
+ return Dyld->getSectionLoadAddress(SectionID);
+}
+
+void RuntimeDyld::registerEHFrames() {
+ if (Dyld)
+ Dyld->registerEHFrames();
+}
+
+void RuntimeDyld::deregisterEHFrames() {
+ if (Dyld)
+ Dyld->deregisterEHFrames();
+}
+// FIXME: Kill this with fire once we have a new JIT linker: this is only here
+// so that we can re-use RuntimeDyld's implementation without twisting the
+// interface any further for ORC's purposes.
+void jitLinkForORC(
+ object::OwningBinary<object::ObjectFile> O,
+ RuntimeDyld::MemoryManager &MemMgr, JITSymbolResolver &Resolver,
+ bool ProcessAllSections,
+ unique_function<Error(const object::ObjectFile &Obj,
+ RuntimeDyld::LoadedObjectInfo &LoadedObj,
+ std::map<StringRef, JITEvaluatedSymbol>)>
+ OnLoaded,
+ unique_function<void(object::OwningBinary<object::ObjectFile>,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>, Error)>
+ OnEmitted) {
+
+ RuntimeDyld RTDyld(MemMgr, Resolver);
+ RTDyld.setProcessAllSections(ProcessAllSections);
+
+ auto Info = RTDyld.loadObject(*O.getBinary());
+
+ if (RTDyld.hasError()) {
+ OnEmitted(std::move(O), std::move(Info),
+ make_error<StringError>(RTDyld.getErrorString(),
+ inconvertibleErrorCode()));
+ return;
+ }
+
+ if (auto Err = OnLoaded(*O.getBinary(), *Info, RTDyld.getSymbolTable()))
+ OnEmitted(std::move(O), std::move(Info), std::move(Err));
+
+ RuntimeDyldImpl::finalizeAsync(std::move(RTDyld.Dyld), std::move(OnEmitted),
+ std::move(O), std::move(Info));
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
new file mode 100644
index 0000000000..1d8f1ac8ac
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.cpp
@@ -0,0 +1,121 @@
+//===-- RuntimeDyldCOFF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of COFF support for the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldCOFF.h"
+#include "Targets/RuntimeDyldCOFFAArch64.h"
+#include "Targets/RuntimeDyldCOFFI386.h"
+#include "Targets/RuntimeDyldCOFFThumb.h"
+#include "Targets/RuntimeDyldCOFFX86_64.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/FormatVariadic.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+class LoadedCOFFObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedCOFFObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedCOFFObjectInfo(
+ RuntimeDyldImpl &RTDyld,
+ RuntimeDyld::LoadedObjectInfo::ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override {
+ return OwningBinary<ObjectFile>();
+ }
+};
+}
+
+namespace llvm {
+
+std::unique_ptr<RuntimeDyldCOFF>
+llvm::RuntimeDyldCOFF::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default: llvm_unreachable("Unsupported target for RuntimeDyldCOFF.");
+ case Triple::x86:
+ return std::make_unique<RuntimeDyldCOFFI386>(MemMgr, Resolver);
+ case Triple::thumb:
+ return std::make_unique<RuntimeDyldCOFFThumb>(MemMgr, Resolver);
+ case Triple::x86_64:
+ return std::make_unique<RuntimeDyldCOFFX86_64>(MemMgr, Resolver);
+ case Triple::aarch64:
+ return std::make_unique<RuntimeDyldCOFFAArch64>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldCOFF::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O)) {
+ return std::make_unique<LoadedCOFFObjectInfo>(*this, *ObjSectionToIDOrErr);
+ } else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+uint64_t RuntimeDyldCOFF::getSymbolOffset(const SymbolRef &Sym) {
+ // The value in a relocatable COFF object is the offset.
+ return cantFail(Sym.getValue());
+}
+
+uint64_t RuntimeDyldCOFF::getDLLImportOffset(unsigned SectionID, StubMap &Stubs,
+ StringRef Name,
+ bool SetSectionIDMinus1) {
+ LLVM_DEBUG(dbgs() << "Getting DLLImport entry for " << Name << "... ");
+ assert(Name.startswith(getImportSymbolPrefix()) && "Not a DLLImport symbol?");
+ RelocationValueRef Reloc;
+ Reloc.SymbolName = Name.data();
+ auto I = Stubs.find(Reloc);
+ if (I != Stubs.end()) {
+ LLVM_DEBUG(dbgs() << format("{0:x8}", I->second) << "\n");
+ return I->second;
+ }
+
+ assert(SectionID < Sections.size() && "SectionID out of range");
+ auto &Sec = Sections[SectionID];
+ auto EntryOffset = alignTo(Sec.getStubOffset(), PointerSize);
+ Sec.advanceStubOffset(EntryOffset + PointerSize - Sec.getStubOffset());
+ Stubs[Reloc] = EntryOffset;
+
+ RelocationEntry RE(SectionID, EntryOffset, PointerReloc, 0, false,
+ Log2_64(PointerSize));
+ // Hack to tell I386/Thumb resolveRelocation that this isn't section relative.
+ if (SetSectionIDMinus1)
+ RE.Sections.SectionA = -1;
+ addRelocationForSymbol(RE, Name.drop_front(getImportSymbolPrefix().size()));
+
+ LLVM_DEBUG({
+ dbgs() << "Creating entry at "
+ << formatv("{0:x16} + {1:x8} ( {2:x16} )", Sec.getLoadAddress(),
+ EntryOffset, Sec.getLoadAddress() + EntryOffset)
+ << "\n";
+ });
+ return EntryOffset;
+}
+
+bool RuntimeDyldCOFF::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isCOFF();
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h
new file mode 100644
index 0000000000..41ee06c154
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCOFF.h
@@ -0,0 +1,61 @@
+//===-- RuntimeDyldCOFF.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_RUNTIME_DYLD_COFF_H
+#define LLVM_RUNTIME_DYLD_COFF_H
+
+#include "RuntimeDyldImpl.h"
+
+#define DEBUG_TYPE "dyld"
+
+using namespace llvm;
+
+namespace llvm {
+
+// Common base class for COFF dynamic linker support.
+// Concrete subclasses for each target can be found in ./Targets.
+class RuntimeDyldCOFF : public RuntimeDyldImpl {
+
+public:
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &Obj) override;
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+
+ static std::unique_ptr<RuntimeDyldCOFF>
+ create(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+protected:
+ RuntimeDyldCOFF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver, unsigned PointerSize,
+ uint32_t PointerReloc)
+ : RuntimeDyldImpl(MemMgr, Resolver), PointerSize(PointerSize),
+ PointerReloc(PointerReloc) {
+ assert((PointerSize == 4 || PointerSize == 8) && "Unexpected pointer size");
+ }
+
+ uint64_t getSymbolOffset(const SymbolRef &Sym);
+ uint64_t getDLLImportOffset(unsigned SectionID, StubMap &Stubs,
+ StringRef Name, bool SetSectionIDMinus1 = false);
+
+ static constexpr StringRef getImportSymbolPrefix() { return "__imp_"; }
+
+private:
+ unsigned PointerSize;
+ uint32_t PointerReloc;
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
new file mode 100644
index 0000000000..33db23408c
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldChecker.cpp
@@ -0,0 +1,910 @@
+//===--- RuntimeDyldChecker.cpp - RuntimeDyld tester framework --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
+#include "RuntimeDyldCheckerImpl.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/MSVCErrorWorkarounds.h"
+#include "llvm/Support/Path.h"
+#include <cctype>
+#include <memory>
+#include <utility>
+
+#define DEBUG_TYPE "rtdyld"
+
+using namespace llvm;
+
+namespace llvm {
+
+// Helper class that implements the language evaluated by RuntimeDyldChecker.
+class RuntimeDyldCheckerExprEval {
+public:
+ RuntimeDyldCheckerExprEval(const RuntimeDyldCheckerImpl &Checker,
+ raw_ostream &ErrStream)
+ : Checker(Checker) {}
+
+ bool evaluate(StringRef Expr) const {
+ // Expect equality expression of the form 'LHS = RHS'.
+ Expr = Expr.trim();
+ size_t EQIdx = Expr.find('=');
+
+ ParseContext OutsideLoad(false);
+
+ // Evaluate LHS.
+ StringRef LHSExpr = Expr.substr(0, EQIdx).rtrim();
+ StringRef RemainingExpr;
+ EvalResult LHSResult;
+ std::tie(LHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(LHSExpr, OutsideLoad), OutsideLoad);
+ if (LHSResult.hasError())
+ return handleError(Expr, LHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, LHSExpr, ""));
+
+ // Evaluate RHS.
+ StringRef RHSExpr = Expr.substr(EQIdx + 1).ltrim();
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(RHSExpr, OutsideLoad), OutsideLoad);
+ if (RHSResult.hasError())
+ return handleError(Expr, RHSResult);
+ if (RemainingExpr != "")
+ return handleError(Expr, unexpectedToken(RemainingExpr, RHSExpr, ""));
+
+ if (LHSResult.getValue() != RHSResult.getValue()) {
+ Checker.ErrStream << "Expression '" << Expr << "' is false: "
+ << format("0x%" PRIx64, LHSResult.getValue())
+ << " != " << format("0x%" PRIx64, RHSResult.getValue())
+ << "\n";
+ return false;
+ }
+ return true;
+ }
+
+private:
+ // RuntimeDyldCheckerExprEval requires some context when parsing exprs. In
+ // particular, it needs to know whether a symbol is being evaluated in the
+ // context of a load, in which case we want the linker's local address for
+ // the symbol, or outside of a load, in which case we want the symbol's
+ // address in the remote target.
+
+ struct ParseContext {
+ bool IsInsideLoad;
+ ParseContext(bool IsInsideLoad) : IsInsideLoad(IsInsideLoad) {}
+ };
+
+ const RuntimeDyldCheckerImpl &Checker;
+
+ enum class BinOpToken : unsigned {
+ Invalid,
+ Add,
+ Sub,
+ BitwiseAnd,
+ BitwiseOr,
+ ShiftLeft,
+ ShiftRight
+ };
+
+ class EvalResult {
+ public:
+ EvalResult() : Value(0) {}
+ EvalResult(uint64_t Value) : Value(Value) {}
+ EvalResult(std::string ErrorMsg)
+ : Value(0), ErrorMsg(std::move(ErrorMsg)) {}
+ uint64_t getValue() const { return Value; }
+ bool hasError() const { return ErrorMsg != ""; }
+ const std::string &getErrorMsg() const { return ErrorMsg; }
+
+ private:
+ uint64_t Value;
+ std::string ErrorMsg;
+ };
+
+ StringRef getTokenForError(StringRef Expr) const {
+ if (Expr.empty())
+ return "";
+
+ StringRef Token, Remaining;
+ if (isalpha(Expr[0]))
+ std::tie(Token, Remaining) = parseSymbol(Expr);
+ else if (isdigit(Expr[0]))
+ std::tie(Token, Remaining) = parseNumberString(Expr);
+ else {
+ unsigned TokLen = 1;
+ if (Expr.startswith("<<") || Expr.startswith(">>"))
+ TokLen = 2;
+ Token = Expr.substr(0, TokLen);
+ }
+ return Token;
+ }
+
+ EvalResult unexpectedToken(StringRef TokenStart, StringRef SubExpr,
+ StringRef ErrText) const {
+ std::string ErrorMsg("Encountered unexpected token '");
+ ErrorMsg += getTokenForError(TokenStart);
+ if (SubExpr != "") {
+ ErrorMsg += "' while parsing subexpression '";
+ ErrorMsg += SubExpr;
+ }
+ ErrorMsg += "'";
+ if (ErrText != "") {
+ ErrorMsg += " ";
+ ErrorMsg += ErrText;
+ }
+ return EvalResult(std::move(ErrorMsg));
+ }
+
+ bool handleError(StringRef Expr, const EvalResult &R) const {
+ assert(R.hasError() && "Not an error result.");
+ Checker.ErrStream << "Error evaluating expression '" << Expr
+ << "': " << R.getErrorMsg() << "\n";
+ return false;
+ }
+
+ std::pair<BinOpToken, StringRef> parseBinOpToken(StringRef Expr) const {
+ if (Expr.empty())
+ return std::make_pair(BinOpToken::Invalid, "");
+
+ // Handle the two 2-character tokens.
+ if (Expr.startswith("<<"))
+ return std::make_pair(BinOpToken::ShiftLeft, Expr.substr(2).ltrim());
+ if (Expr.startswith(">>"))
+ return std::make_pair(BinOpToken::ShiftRight, Expr.substr(2).ltrim());
+
+ // Handle one-character tokens.
+ BinOpToken Op;
+ switch (Expr[0]) {
+ default:
+ return std::make_pair(BinOpToken::Invalid, Expr);
+ case '+':
+ Op = BinOpToken::Add;
+ break;
+ case '-':
+ Op = BinOpToken::Sub;
+ break;
+ case '&':
+ Op = BinOpToken::BitwiseAnd;
+ break;
+ case '|':
+ Op = BinOpToken::BitwiseOr;
+ break;
+ }
+
+ return std::make_pair(Op, Expr.substr(1).ltrim());
+ }
+
+ EvalResult computeBinOpResult(BinOpToken Op, const EvalResult &LHSResult,
+ const EvalResult &RHSResult) const {
+ switch (Op) {
+ default:
+ llvm_unreachable("Tried to evaluate unrecognized operation.");
+ case BinOpToken::Add:
+ return EvalResult(LHSResult.getValue() + RHSResult.getValue());
+ case BinOpToken::Sub:
+ return EvalResult(LHSResult.getValue() - RHSResult.getValue());
+ case BinOpToken::BitwiseAnd:
+ return EvalResult(LHSResult.getValue() & RHSResult.getValue());
+ case BinOpToken::BitwiseOr:
+ return EvalResult(LHSResult.getValue() | RHSResult.getValue());
+ case BinOpToken::ShiftLeft:
+ return EvalResult(LHSResult.getValue() << RHSResult.getValue());
+ case BinOpToken::ShiftRight:
+ return EvalResult(LHSResult.getValue() >> RHSResult.getValue());
+ }
+ }
+
+ // Parse a symbol and return a (string, string) pair representing the symbol
+ // name and expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseSymbol(StringRef Expr) const {
+ size_t FirstNonSymbol = Expr.find_first_not_of("0123456789"
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ ":_.$");
+ return std::make_pair(Expr.substr(0, FirstNonSymbol),
+ Expr.substr(FirstNonSymbol).ltrim());
+ }
+
+ // Evaluate a call to decode_operand. Decode the instruction operand at the
+ // given symbol and get the value of the requested operand.
+ // Returns an error if the instruction cannot be decoded, or the requested
+ // operand is not an immediate.
+ // On success, returns a pair containing the value of the operand, plus
+ // the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalDecodeOperand(StringRef Expr) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(
+ EvalResult(("Cannot decode unknown symbol '" + Symbol + "'").str()),
+ "");
+
+ // if there is an offset number expr
+ int64_t Offset = 0;
+ BinOpToken BinOp;
+ std::tie(BinOp, RemainingExpr) = parseBinOpToken(RemainingExpr);
+ switch (BinOp) {
+ case BinOpToken::Add: {
+ EvalResult Number;
+ std::tie(Number, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ Offset = Number.getValue();
+ break;
+ }
+ case BinOpToken::Invalid:
+ break;
+ default:
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr,
+ "expected '+' for offset or ',' if no offset"),
+ "");
+ }
+
+ if (!RemainingExpr.startswith(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult OpIdxExpr;
+ std::tie(OpIdxExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (OpIdxExpr.hasError())
+ return std::make_pair(OpIdxExpr, "");
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t Size;
+ if (!decodeInst(Symbol, Inst, Size, Offset))
+ return std::make_pair(
+ EvalResult(("Couldn't decode instruction at '" + Symbol + "'").str()),
+ "");
+
+ unsigned OpIdx = OpIdxExpr.getValue();
+ if (OpIdx >= Inst.getNumOperands()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Invalid operand index '" << format("%i", OpIdx)
+ << "' for instruction '" << Symbol
+ << "'. Instruction has only "
+ << format("%i", Inst.getNumOperands())
+ << " operands.\nInstruction is:\n ";
+ Inst.dump_pretty(ErrMsgStream, Checker.InstPrinter);
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ }
+
+ const MCOperand &Op = Inst.getOperand(OpIdx);
+ if (!Op.isImm()) {
+ std::string ErrMsg;
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ ErrMsgStream << "Operand '" << format("%i", OpIdx) << "' of instruction '"
+ << Symbol << "' is not an immediate.\nInstruction is:\n ";
+ Inst.dump_pretty(ErrMsgStream, Checker.InstPrinter);
+
+ return std::make_pair(EvalResult(ErrMsgStream.str()), "");
+ }
+
+ return std::make_pair(EvalResult(Op.getImm()), RemainingExpr);
+ }
+
+ // Evaluate a call to next_pc.
+ // Decode the instruction at the given symbol and return the following program
+ // counter.
+ // Returns an error if the instruction cannot be decoded.
+ // On success, returns a pair containing the next PC, plus of the
+ // expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef> evalNextPC(StringRef Expr,
+ ParseContext PCtx) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!Checker.isSymbolValid(Symbol))
+ return std::make_pair(
+ EvalResult(("Cannot decode unknown symbol '" + Symbol + "'").str()),
+ "");
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ MCInst Inst;
+ uint64_t InstSize;
+ if (!decodeInst(Symbol, Inst, InstSize, 0))
+ return std::make_pair(
+ EvalResult(("Couldn't decode instruction at '" + Symbol + "'").str()),
+ "");
+
+ uint64_t SymbolAddr = PCtx.IsInsideLoad
+ ? Checker.getSymbolLocalAddr(Symbol)
+ : Checker.getSymbolRemoteAddr(Symbol);
+ uint64_t NextPC = SymbolAddr + InstSize;
+
+ return std::make_pair(EvalResult(NextPC), RemainingExpr);
+ }
+
+ // Evaluate a call to stub_addr/got_addr.
+ // Look up and return the address of the stub for the given
+ // (<file name>, <section name>, <symbol name>) tuple.
+ // On success, returns a pair containing the stub address, plus the expression
+ // remaining to be evaluated.
+ std::pair<EvalResult, StringRef>
+ evalStubOrGOTAddr(StringRef Expr, ParseContext PCtx, bool IsStubAddr) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Handle file-name specially, as it may contain characters that aren't
+ // legal for symbols.
+ StringRef StubContainerName;
+ size_t ComaIdx = RemainingExpr.find(',');
+ StubContainerName = RemainingExpr.substr(0, ComaIdx).rtrim();
+ RemainingExpr = RemainingExpr.substr(ComaIdx).ltrim();
+
+ if (!RemainingExpr.startswith(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ StringRef Symbol;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(RemainingExpr);
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ uint64_t StubAddr;
+ std::string ErrorMsg;
+ std::tie(StubAddr, ErrorMsg) = Checker.getStubOrGOTAddrFor(
+ StubContainerName, Symbol, PCtx.IsInsideLoad, IsStubAddr);
+
+ if (ErrorMsg != "")
+ return std::make_pair(EvalResult(ErrorMsg), "");
+
+ return std::make_pair(EvalResult(StubAddr), RemainingExpr);
+ }
+
+ std::pair<EvalResult, StringRef> evalSectionAddr(StringRef Expr,
+ ParseContext PCtx) const {
+ if (!Expr.startswith("("))
+ return std::make_pair(unexpectedToken(Expr, Expr, "expected '('"), "");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Handle file-name specially, as it may contain characters that aren't
+ // legal for symbols.
+ StringRef FileName;
+ size_t ComaIdx = RemainingExpr.find(',');
+ FileName = RemainingExpr.substr(0, ComaIdx).rtrim();
+ RemainingExpr = RemainingExpr.substr(ComaIdx).ltrim();
+
+ if (!RemainingExpr.startswith(","))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ','"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ StringRef SectionName;
+ size_t CloseParensIdx = RemainingExpr.find(')');
+ SectionName = RemainingExpr.substr(0, CloseParensIdx).rtrim();
+ RemainingExpr = RemainingExpr.substr(CloseParensIdx).ltrim();
+
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ uint64_t StubAddr;
+ std::string ErrorMsg;
+ std::tie(StubAddr, ErrorMsg) = Checker.getSectionAddr(
+ FileName, SectionName, PCtx.IsInsideLoad);
+
+ if (ErrorMsg != "")
+ return std::make_pair(EvalResult(ErrorMsg), "");
+
+ return std::make_pair(EvalResult(StubAddr), RemainingExpr);
+ }
+
+ // Evaluate an identiefer expr, which may be a symbol, or a call to
+ // one of the builtin functions: get_insn_opcode or get_insn_length.
+ // Return the result, plus the expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalIdentifierExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ StringRef Symbol;
+ StringRef RemainingExpr;
+ std::tie(Symbol, RemainingExpr) = parseSymbol(Expr);
+
+ // Check for builtin function calls.
+ if (Symbol == "decode_operand")
+ return evalDecodeOperand(RemainingExpr);
+ else if (Symbol == "next_pc")
+ return evalNextPC(RemainingExpr, PCtx);
+ else if (Symbol == "stub_addr")
+ return evalStubOrGOTAddr(RemainingExpr, PCtx, true);
+ else if (Symbol == "got_addr")
+ return evalStubOrGOTAddr(RemainingExpr, PCtx, false);
+ else if (Symbol == "section_addr")
+ return evalSectionAddr(RemainingExpr, PCtx);
+
+ if (!Checker.isSymbolValid(Symbol)) {
+ std::string ErrMsg("No known address for symbol '");
+ ErrMsg += Symbol;
+ ErrMsg += "'";
+ if (Symbol.startswith("L"))
+ ErrMsg += " (this appears to be an assembler local label - "
+ " perhaps drop the 'L'?)";
+
+ return std::make_pair(EvalResult(ErrMsg), "");
+ }
+
+ // The value for the symbol depends on the context we're evaluating in:
+ // Inside a load this is the address in the linker's memory, outside a
+ // load it's the address in the target processes memory.
+ uint64_t Value = PCtx.IsInsideLoad ? Checker.getSymbolLocalAddr(Symbol)
+ : Checker.getSymbolRemoteAddr(Symbol);
+
+ // Looks like a plain symbol reference.
+ return std::make_pair(EvalResult(Value), RemainingExpr);
+ }
+
+ // Parse a number (hexadecimal or decimal) and return a (string, string)
+ // pair representing the number and the expression remaining to be parsed.
+ std::pair<StringRef, StringRef> parseNumberString(StringRef Expr) const {
+ size_t FirstNonDigit = StringRef::npos;
+ if (Expr.startswith("0x")) {
+ FirstNonDigit = Expr.find_first_not_of("0123456789abcdefABCDEF", 2);
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ } else {
+ FirstNonDigit = Expr.find_first_not_of("0123456789");
+ if (FirstNonDigit == StringRef::npos)
+ FirstNonDigit = Expr.size();
+ }
+ return std::make_pair(Expr.substr(0, FirstNonDigit),
+ Expr.substr(FirstNonDigit));
+ }
+
+ // Evaluate a constant numeric expression (hexadecimal or decimal) and
+ // return a pair containing the result, and the expression remaining to be
+ // evaluated.
+ std::pair<EvalResult, StringRef> evalNumberExpr(StringRef Expr) const {
+ StringRef ValueStr;
+ StringRef RemainingExpr;
+ std::tie(ValueStr, RemainingExpr) = parseNumberString(Expr);
+
+ if (ValueStr.empty() || !isdigit(ValueStr[0]))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected number"), "");
+ uint64_t Value;
+ ValueStr.getAsInteger(0, Value);
+ return std::make_pair(EvalResult(Value), RemainingExpr);
+ }
+
+ // Evaluate an expression of the form "(<expr>)" and return a pair
+ // containing the result of evaluating <expr>, plus the expression
+ // remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalParensExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ assert(Expr.startswith("(") && "Not a parenthesized expression");
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(Expr.substr(1).ltrim(), PCtx), PCtx);
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, "");
+ if (!RemainingExpr.startswith(")"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, Expr, "expected ')'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate an expression in one of the following forms:
+ // *{<number>}<expr>
+ // Return a pair containing the result, plus the expression remaining to be
+ // parsed.
+ std::pair<EvalResult, StringRef> evalLoadExpr(StringRef Expr) const {
+ assert(Expr.startswith("*") && "Not a load expression");
+ StringRef RemainingExpr = Expr.substr(1).ltrim();
+
+ // Parse read size.
+ if (!RemainingExpr.startswith("{"))
+ return std::make_pair(EvalResult("Expected '{' following '*'."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+ EvalResult ReadSizeExpr;
+ std::tie(ReadSizeExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+ if (ReadSizeExpr.hasError())
+ return std::make_pair(ReadSizeExpr, RemainingExpr);
+ uint64_t ReadSize = ReadSizeExpr.getValue();
+ if (ReadSize < 1 || ReadSize > 8)
+ return std::make_pair(EvalResult("Invalid size for dereference."), "");
+ if (!RemainingExpr.startswith("}"))
+ return std::make_pair(EvalResult("Missing '}' for dereference."), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ // Evaluate the expression representing the load address.
+ ParseContext LoadCtx(true);
+ EvalResult LoadAddrExprResult;
+ std::tie(LoadAddrExprResult, RemainingExpr) =
+ evalComplexExpr(evalSimpleExpr(RemainingExpr, LoadCtx), LoadCtx);
+
+ if (LoadAddrExprResult.hasError())
+ return std::make_pair(LoadAddrExprResult, "");
+
+ uint64_t LoadAddr = LoadAddrExprResult.getValue();
+
+ // If there is no error but the content pointer is null then this is a
+ // zero-fill symbol/section.
+ if (LoadAddr == 0)
+ return std::make_pair(0, RemainingExpr);
+
+ return std::make_pair(
+ EvalResult(Checker.readMemoryAtAddr(LoadAddr, ReadSize)),
+ RemainingExpr);
+ }
+
+ // Evaluate a "simple" expression. This is any expression that _isn't_ an
+ // un-parenthesized binary expression.
+ //
+ // "Simple" expressions can be optionally bit-sliced. See evalSlicedExpr.
+ //
+ // Returns a pair containing the result of the evaluation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef> evalSimpleExpr(StringRef Expr,
+ ParseContext PCtx) const {
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+
+ if (Expr.empty())
+ return std::make_pair(EvalResult("Unexpected end of expression"), "");
+
+ if (Expr[0] == '(')
+ std::tie(SubExprResult, RemainingExpr) = evalParensExpr(Expr, PCtx);
+ else if (Expr[0] == '*')
+ std::tie(SubExprResult, RemainingExpr) = evalLoadExpr(Expr);
+ else if (isalpha(Expr[0]) || Expr[0] == '_')
+ std::tie(SubExprResult, RemainingExpr) = evalIdentifierExpr(Expr, PCtx);
+ else if (isdigit(Expr[0]))
+ std::tie(SubExprResult, RemainingExpr) = evalNumberExpr(Expr);
+ else
+ return std::make_pair(
+ unexpectedToken(Expr, Expr,
+ "expected '(', '*', identifier, or number"), "");
+
+ if (SubExprResult.hasError())
+ return std::make_pair(SubExprResult, RemainingExpr);
+
+ // Evaluate bit-slice if present.
+ if (RemainingExpr.startswith("["))
+ std::tie(SubExprResult, RemainingExpr) =
+ evalSliceExpr(std::make_pair(SubExprResult, RemainingExpr));
+
+ return std::make_pair(SubExprResult, RemainingExpr);
+ }
+
+ // Evaluate a bit-slice of an expression.
+ // A bit-slice has the form "<expr>[high:low]". The result of evaluating a
+ // slice is the bits between high and low (inclusive) in the original
+ // expression, right shifted so that the "low" bit is in position 0 in the
+ // result.
+ // Returns a pair containing the result of the slice operation, plus the
+ // expression remaining to be parsed.
+ std::pair<EvalResult, StringRef>
+ evalSliceExpr(const std::pair<EvalResult, StringRef> &Ctx) const {
+ EvalResult SubExprResult;
+ StringRef RemainingExpr;
+ std::tie(SubExprResult, RemainingExpr) = Ctx;
+
+ assert(RemainingExpr.startswith("[") && "Not a slice expr.");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult HighBitExpr;
+ std::tie(HighBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (HighBitExpr.hasError())
+ return std::make_pair(HighBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.startswith(":"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ':'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ EvalResult LowBitExpr;
+ std::tie(LowBitExpr, RemainingExpr) = evalNumberExpr(RemainingExpr);
+
+ if (LowBitExpr.hasError())
+ return std::make_pair(LowBitExpr, RemainingExpr);
+
+ if (!RemainingExpr.startswith("]"))
+ return std::make_pair(
+ unexpectedToken(RemainingExpr, RemainingExpr, "expected ']'"), "");
+ RemainingExpr = RemainingExpr.substr(1).ltrim();
+
+ unsigned HighBit = HighBitExpr.getValue();
+ unsigned LowBit = LowBitExpr.getValue();
+ uint64_t Mask = ((uint64_t)1 << (HighBit - LowBit + 1)) - 1;
+ uint64_t SlicedValue = (SubExprResult.getValue() >> LowBit) & Mask;
+ return std::make_pair(EvalResult(SlicedValue), RemainingExpr);
+ }
+
+ // Evaluate a "complex" expression.
+ // Takes an already evaluated subexpression and checks for the presence of a
+ // binary operator, computing the result of the binary operation if one is
+ // found. Used to make arithmetic expressions left-associative.
+ // Returns a pair containing the ultimate result of evaluating the
+ // expression, plus the expression remaining to be evaluated.
+ std::pair<EvalResult, StringRef>
+ evalComplexExpr(const std::pair<EvalResult, StringRef> &LHSAndRemaining,
+ ParseContext PCtx) const {
+ EvalResult LHSResult;
+ StringRef RemainingExpr;
+ std::tie(LHSResult, RemainingExpr) = LHSAndRemaining;
+
+ // If there was an error, or there's nothing left to evaluate, return the
+ // result.
+ if (LHSResult.hasError() || RemainingExpr == "")
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // Otherwise check if this is a binary expressioan.
+ BinOpToken BinOp;
+ std::tie(BinOp, RemainingExpr) = parseBinOpToken(RemainingExpr);
+
+ // If this isn't a recognized expression just return.
+ if (BinOp == BinOpToken::Invalid)
+ return std::make_pair(LHSResult, RemainingExpr);
+
+ // This is a recognized bin-op. Evaluate the RHS, then evaluate the binop.
+ EvalResult RHSResult;
+ std::tie(RHSResult, RemainingExpr) = evalSimpleExpr(RemainingExpr, PCtx);
+
+ // If there was an error evaluating the RHS, return it.
+ if (RHSResult.hasError())
+ return std::make_pair(RHSResult, RemainingExpr);
+
+ // This is a binary expression - evaluate and try to continue as a
+ // complex expr.
+ EvalResult ThisResult(computeBinOpResult(BinOp, LHSResult, RHSResult));
+
+ return evalComplexExpr(std::make_pair(ThisResult, RemainingExpr), PCtx);
+ }
+
+ bool decodeInst(StringRef Symbol, MCInst &Inst, uint64_t &Size,
+ int64_t Offset) const {
+ MCDisassembler *Dis = Checker.Disassembler;
+ StringRef SymbolMem = Checker.getSymbolContent(Symbol);
+ ArrayRef<uint8_t> SymbolBytes(SymbolMem.bytes_begin() + Offset,
+ SymbolMem.size() - Offset);
+
+ MCDisassembler::DecodeStatus S =
+ Dis->getInstruction(Inst, Size, SymbolBytes, 0, nulls());
+
+ return (S == MCDisassembler::Success);
+ }
+};
+} // namespace llvm
+
+RuntimeDyldCheckerImpl::RuntimeDyldCheckerImpl(
+ IsSymbolValidFunction IsSymbolValid, GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo, GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo, support::endianness Endianness,
+ MCDisassembler *Disassembler, MCInstPrinter *InstPrinter,
+ raw_ostream &ErrStream)
+ : IsSymbolValid(std::move(IsSymbolValid)),
+ GetSymbolInfo(std::move(GetSymbolInfo)),
+ GetSectionInfo(std::move(GetSectionInfo)),
+ GetStubInfo(std::move(GetStubInfo)), GetGOTInfo(std::move(GetGOTInfo)),
+ Endianness(Endianness), Disassembler(Disassembler),
+ InstPrinter(InstPrinter), ErrStream(ErrStream) {}
+
+bool RuntimeDyldCheckerImpl::check(StringRef CheckExpr) const {
+ CheckExpr = CheckExpr.trim();
+ LLVM_DEBUG(dbgs() << "RuntimeDyldChecker: Checking '" << CheckExpr
+ << "'...\n");
+ RuntimeDyldCheckerExprEval P(*this, ErrStream);
+ bool Result = P.evaluate(CheckExpr);
+ (void)Result;
+ LLVM_DEBUG(dbgs() << "RuntimeDyldChecker: '" << CheckExpr << "' "
+ << (Result ? "passed" : "FAILED") << ".\n");
+ return Result;
+}
+
+bool RuntimeDyldCheckerImpl::checkAllRulesInBuffer(StringRef RulePrefix,
+ MemoryBuffer *MemBuf) const {
+ bool DidAllTestsPass = true;
+ unsigned NumRules = 0;
+
+ std::string CheckExpr;
+ const char *LineStart = MemBuf->getBufferStart();
+
+ // Eat whitespace.
+ while (LineStart != MemBuf->getBufferEnd() && isSpace(*LineStart))
+ ++LineStart;
+
+ while (LineStart != MemBuf->getBufferEnd() && *LineStart != '\0') {
+ const char *LineEnd = LineStart;
+ while (LineEnd != MemBuf->getBufferEnd() && *LineEnd != '\r' &&
+ *LineEnd != '\n')
+ ++LineEnd;
+
+ StringRef Line(LineStart, LineEnd - LineStart);
+ if (Line.startswith(RulePrefix))
+ CheckExpr += Line.substr(RulePrefix.size()).str();
+
+ // If there's a check expr string...
+ if (!CheckExpr.empty()) {
+ // ... and it's complete then run it, otherwise remove the trailer '\'.
+ if (CheckExpr.back() != '\\') {
+ DidAllTestsPass &= check(CheckExpr);
+ CheckExpr.clear();
+ ++NumRules;
+ } else
+ CheckExpr.pop_back();
+ }
+
+ // Eat whitespace.
+ LineStart = LineEnd;
+ while (LineStart != MemBuf->getBufferEnd() && isSpace(*LineStart))
+ ++LineStart;
+ }
+ return DidAllTestsPass && (NumRules != 0);
+}
+
+bool RuntimeDyldCheckerImpl::isSymbolValid(StringRef Symbol) const {
+ return IsSymbolValid(Symbol);
+}
+
+uint64_t RuntimeDyldCheckerImpl::getSymbolLocalAddr(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return 0;
+ }
+
+ if (SymInfo->isZeroFill())
+ return 0;
+
+ return static_cast<uint64_t>(
+ reinterpret_cast<uintptr_t>(SymInfo->getContent().data()));
+}
+
+uint64_t RuntimeDyldCheckerImpl::getSymbolRemoteAddr(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return 0;
+ }
+
+ return SymInfo->getTargetAddress();
+}
+
+uint64_t RuntimeDyldCheckerImpl::readMemoryAtAddr(uint64_t SrcAddr,
+ unsigned Size) const {
+ uintptr_t PtrSizedAddr = static_cast<uintptr_t>(SrcAddr);
+ assert(PtrSizedAddr == SrcAddr && "Linker memory pointer out-of-range.");
+ void *Ptr = reinterpret_cast<void*>(PtrSizedAddr);
+
+ switch (Size) {
+ case 1:
+ return support::endian::read<uint8_t>(Ptr, Endianness);
+ case 2:
+ return support::endian::read<uint16_t>(Ptr, Endianness);
+ case 4:
+ return support::endian::read<uint32_t>(Ptr, Endianness);
+ case 8:
+ return support::endian::read<uint64_t>(Ptr, Endianness);
+ }
+ llvm_unreachable("Unsupported read size");
+}
+
+StringRef RuntimeDyldCheckerImpl::getSymbolContent(StringRef Symbol) const {
+ auto SymInfo = GetSymbolInfo(Symbol);
+ if (!SymInfo) {
+ logAllUnhandledErrors(SymInfo.takeError(), errs(), "RTDyldChecker: ");
+ return StringRef();
+ }
+ return {SymInfo->getContent().data(), SymInfo->getContent().size()};
+}
+
+std::pair<uint64_t, std::string> RuntimeDyldCheckerImpl::getSectionAddr(
+ StringRef FileName, StringRef SectionName, bool IsInsideLoad) const {
+
+ auto SecInfo = GetSectionInfo(FileName, SectionName);
+ if (!SecInfo) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ logAllUnhandledErrors(SecInfo.takeError(), ErrMsgStream,
+ "RTDyldChecker: ");
+ }
+ return std::make_pair(0, std::move(ErrMsg));
+ }
+
+ // If this address is being looked up in "load" mode, return the content
+ // pointer, otherwise return the target address.
+
+ uint64_t Addr = 0;
+
+ if (IsInsideLoad) {
+ if (SecInfo->isZeroFill())
+ Addr = 0;
+ else
+ Addr = pointerToJITTargetAddress(SecInfo->getContent().data());
+ } else
+ Addr = SecInfo->getTargetAddress();
+
+ return std::make_pair(Addr, "");
+}
+
+std::pair<uint64_t, std::string> RuntimeDyldCheckerImpl::getStubOrGOTAddrFor(
+ StringRef StubContainerName, StringRef SymbolName, bool IsInsideLoad,
+ bool IsStubAddr) const {
+
+ auto StubInfo = IsStubAddr ? GetStubInfo(StubContainerName, SymbolName)
+ : GetGOTInfo(StubContainerName, SymbolName);
+
+ if (!StubInfo) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrMsgStream(ErrMsg);
+ logAllUnhandledErrors(StubInfo.takeError(), ErrMsgStream,
+ "RTDyldChecker: ");
+ }
+ return std::make_pair((uint64_t)0, std::move(ErrMsg));
+ }
+
+ uint64_t Addr = 0;
+
+ if (IsInsideLoad) {
+ if (StubInfo->isZeroFill())
+ return std::make_pair((uint64_t)0, "Detected zero-filled stub/GOT entry");
+ Addr = pointerToJITTargetAddress(StubInfo->getContent().data());
+ } else
+ Addr = StubInfo->getTargetAddress();
+
+ return std::make_pair(Addr, "");
+}
+
+RuntimeDyldChecker::RuntimeDyldChecker(
+ IsSymbolValidFunction IsSymbolValid, GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo, GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo, support::endianness Endianness,
+ MCDisassembler *Disassembler, MCInstPrinter *InstPrinter,
+ raw_ostream &ErrStream)
+ : Impl(::std::make_unique<RuntimeDyldCheckerImpl>(
+ std::move(IsSymbolValid), std::move(GetSymbolInfo),
+ std::move(GetSectionInfo), std::move(GetStubInfo),
+ std::move(GetGOTInfo), Endianness, Disassembler, InstPrinter,
+ ErrStream)) {}
+
+RuntimeDyldChecker::~RuntimeDyldChecker() {}
+
+bool RuntimeDyldChecker::check(StringRef CheckExpr) const {
+ return Impl->check(CheckExpr);
+}
+
+bool RuntimeDyldChecker::checkAllRulesInBuffer(StringRef RulePrefix,
+ MemoryBuffer *MemBuf) const {
+ return Impl->checkAllRulesInBuffer(RulePrefix, MemBuf);
+}
+
+std::pair<uint64_t, std::string>
+RuntimeDyldChecker::getSectionAddr(StringRef FileName, StringRef SectionName,
+ bool LocalAddress) {
+ return Impl->getSectionAddr(FileName, SectionName, LocalAddress);
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
new file mode 100644
index 0000000000..ac9d4d4602
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldCheckerImpl.h
@@ -0,0 +1,74 @@
+//===-- RuntimeDyldCheckerImpl.h -- RuntimeDyld test framework --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDCHECKERIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDCHECKERIMPL_H
+
+#include "RuntimeDyldImpl.h"
+
+namespace llvm {
+
+class RuntimeDyldCheckerImpl {
+ friend class RuntimeDyldChecker;
+ friend class RuntimeDyldCheckerExprEval;
+
+ using IsSymbolValidFunction =
+ RuntimeDyldChecker::IsSymbolValidFunction;
+ using GetSymbolInfoFunction = RuntimeDyldChecker::GetSymbolInfoFunction;
+ using GetSectionInfoFunction = RuntimeDyldChecker::GetSectionInfoFunction;
+ using GetStubInfoFunction = RuntimeDyldChecker::GetStubInfoFunction;
+ using GetGOTInfoFunction = RuntimeDyldChecker::GetGOTInfoFunction;
+
+public:
+ RuntimeDyldCheckerImpl(
+ IsSymbolValidFunction IsSymbolValid, GetSymbolInfoFunction GetSymbolInfo,
+ GetSectionInfoFunction GetSectionInfo, GetStubInfoFunction GetStubInfo,
+ GetGOTInfoFunction GetGOTInfo, support::endianness Endianness,
+ MCDisassembler *Disassembler, MCInstPrinter *InstPrinter,
+ llvm::raw_ostream &ErrStream);
+
+ bool check(StringRef CheckExpr) const;
+ bool checkAllRulesInBuffer(StringRef RulePrefix, MemoryBuffer *MemBuf) const;
+
+private:
+
+ // StubMap typedefs.
+
+ Expected<JITSymbolResolver::LookupResult>
+ lookup(const JITSymbolResolver::LookupSet &Symbols) const;
+
+ bool isSymbolValid(StringRef Symbol) const;
+ uint64_t getSymbolLocalAddr(StringRef Symbol) const;
+ uint64_t getSymbolRemoteAddr(StringRef Symbol) const;
+ uint64_t readMemoryAtAddr(uint64_t Addr, unsigned Size) const;
+
+ StringRef getSymbolContent(StringRef Symbol) const;
+
+ std::pair<uint64_t, std::string> getSectionAddr(StringRef FileName,
+ StringRef SectionName,
+ bool IsInsideLoad) const;
+
+ std::pair<uint64_t, std::string>
+ getStubOrGOTAddrFor(StringRef StubContainerName, StringRef Symbol,
+ bool IsInsideLoad, bool IsStubAddr) const;
+
+ Optional<uint64_t> getSectionLoadAddress(void *LocalAddr) const;
+
+ IsSymbolValidFunction IsSymbolValid;
+ GetSymbolInfoFunction GetSymbolInfo;
+ GetSectionInfoFunction GetSectionInfo;
+ GetStubInfoFunction GetStubInfo;
+ GetGOTInfoFunction GetGOTInfo;
+ support::endianness Endianness;
+ MCDisassembler *Disassembler;
+ MCInstPrinter *InstPrinter;
+ llvm::raw_ostream &ErrStream;
+};
+}
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
new file mode 100644
index 0000000000..f92618afdf
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp
@@ -0,0 +1,2396 @@
+//===-- RuntimeDyldELF.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of ELF support for the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldELF.h"
+#include "RuntimeDyldCheckerImpl.h"
+#include "Targets/RuntimeDyldELFMips.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/BinaryFormat/ELF.h"
+#include "llvm/Object/ELFObjectFile.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace llvm;
+using namespace llvm::object;
+using namespace llvm::support::endian;
+
+#define DEBUG_TYPE "dyld"
+
+static void or32le(void *P, int32_t V) { write32le(P, read32le(P) | V); }
+
+static void or32AArch64Imm(void *L, uint64_t Imm) {
+ or32le(L, (Imm & 0xFFF) << 10);
+}
+
+template <class T> static void write(bool isBE, void *P, T V) {
+ isBE ? write<T, support::big>(P, V) : write<T, support::little>(P, V);
+}
+
+static void write32AArch64Addr(void *L, uint64_t Imm) {
+ uint32_t ImmLo = (Imm & 0x3) << 29;
+ uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
+ uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
+ write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
+}
+
+// Return the bits [Start, End] from Val shifted Start bits.
+// For instance, getBits(0xF0, 4, 8) returns 0xF.
+static uint64_t getBits(uint64_t Val, int Start, int End) {
+ uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
+ return (Val >> Start) & Mask;
+}
+
+namespace {
+
+template <class ELFT> class DyldELFObject : public ELFObjectFile<ELFT> {
+ LLVM_ELF_IMPORT_TYPES_ELFT(ELFT)
+
+ typedef typename ELFT::uint addr_type;
+
+ DyldELFObject(ELFObjectFile<ELFT> &&Obj);
+
+public:
+ static Expected<std::unique_ptr<DyldELFObject>>
+ create(MemoryBufferRef Wrapper);
+
+ void updateSectionAddress(const SectionRef &Sec, uint64_t Addr);
+
+ void updateSymbolAddress(const SymbolRef &SymRef, uint64_t Addr);
+
+ // Methods for type inquiry through isa, cast and dyn_cast
+ static bool classof(const Binary *v) {
+ return (isa<ELFObjectFile<ELFT>>(v) &&
+ classof(cast<ELFObjectFile<ELFT>>(v)));
+ }
+ static bool classof(const ELFObjectFile<ELFT> *v) {
+ return v->isDyldType();
+ }
+};
+
+
+
+// The MemoryBuffer passed into this constructor is just a wrapper around the
+// actual memory. Ultimately, the Binary parent class will take ownership of
+// this MemoryBuffer object but not the underlying memory.
+template <class ELFT>
+DyldELFObject<ELFT>::DyldELFObject(ELFObjectFile<ELFT> &&Obj)
+ : ELFObjectFile<ELFT>(std::move(Obj)) {
+ this->isDyldELFObject = true;
+}
+
+template <class ELFT>
+Expected<std::unique_ptr<DyldELFObject<ELFT>>>
+DyldELFObject<ELFT>::create(MemoryBufferRef Wrapper) {
+ auto Obj = ELFObjectFile<ELFT>::create(Wrapper);
+ if (auto E = Obj.takeError())
+ return std::move(E);
+ std::unique_ptr<DyldELFObject<ELFT>> Ret(
+ new DyldELFObject<ELFT>(std::move(*Obj)));
+ return std::move(Ret);
+}
+
+template <class ELFT>
+void DyldELFObject<ELFT>::updateSectionAddress(const SectionRef &Sec,
+ uint64_t Addr) {
+ DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
+ Elf_Shdr *shdr =
+ const_cast<Elf_Shdr *>(reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
+
+ // This assumes the address passed in matches the target address bitness
+ // The template-based type cast handles everything else.
+ shdr->sh_addr = static_cast<addr_type>(Addr);
+}
+
+template <class ELFT>
+void DyldELFObject<ELFT>::updateSymbolAddress(const SymbolRef &SymRef,
+ uint64_t Addr) {
+
+ Elf_Sym *sym = const_cast<Elf_Sym *>(
+ ELFObjectFile<ELFT>::getSymbol(SymRef.getRawDataRefImpl()));
+
+ // This assumes the address passed in matches the target address bitness
+ // The template-based type cast handles everything else.
+ sym->st_value = static_cast<addr_type>(Addr);
+}
+
+class LoadedELFObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedELFObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedELFObjectInfo(RuntimeDyldImpl &RTDyld, ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override;
+};
+
+template <typename ELFT>
+static Expected<std::unique_ptr<DyldELFObject<ELFT>>>
+createRTDyldELFObject(MemoryBufferRef Buffer, const ObjectFile &SourceObject,
+ const LoadedELFObjectInfo &L) {
+ typedef typename ELFT::Shdr Elf_Shdr;
+ typedef typename ELFT::uint addr_type;
+
+ Expected<std::unique_ptr<DyldELFObject<ELFT>>> ObjOrErr =
+ DyldELFObject<ELFT>::create(Buffer);
+ if (Error E = ObjOrErr.takeError())
+ return std::move(E);
+
+ std::unique_ptr<DyldELFObject<ELFT>> Obj = std::move(*ObjOrErr);
+
+ // Iterate over all sections in the object.
+ auto SI = SourceObject.section_begin();
+ for (const auto &Sec : Obj->sections()) {
+ Expected<StringRef> NameOrErr = Sec.getName();
+ if (!NameOrErr) {
+ consumeError(NameOrErr.takeError());
+ continue;
+ }
+
+ if (*NameOrErr != "") {
+ DataRefImpl ShdrRef = Sec.getRawDataRefImpl();
+ Elf_Shdr *shdr = const_cast<Elf_Shdr *>(
+ reinterpret_cast<const Elf_Shdr *>(ShdrRef.p));
+
+ if (uint64_t SecLoadAddr = L.getSectionLoadAddress(*SI)) {
+ // This assumes that the address passed in matches the target address
+ // bitness. The template-based type cast handles everything else.
+ shdr->sh_addr = static_cast<addr_type>(SecLoadAddr);
+ }
+ }
+ ++SI;
+ }
+
+ return std::move(Obj);
+}
+
+static OwningBinary<ObjectFile>
+createELFDebugObject(const ObjectFile &Obj, const LoadedELFObjectInfo &L) {
+ assert(Obj.isELF() && "Not an ELF object file.");
+
+ std::unique_ptr<MemoryBuffer> Buffer =
+ MemoryBuffer::getMemBufferCopy(Obj.getData(), Obj.getFileName());
+
+ Expected<std::unique_ptr<ObjectFile>> DebugObj(nullptr);
+ handleAllErrors(DebugObj.takeError());
+ if (Obj.getBytesInAddress() == 4 && Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF32LE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 4 && !Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF32BE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 8 && !Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF64BE>(Buffer->getMemBufferRef(), Obj, L);
+ else if (Obj.getBytesInAddress() == 8 && Obj.isLittleEndian())
+ DebugObj =
+ createRTDyldELFObject<ELF64LE>(Buffer->getMemBufferRef(), Obj, L);
+ else
+ llvm_unreachable("Unexpected ELF format");
+
+ handleAllErrors(DebugObj.takeError());
+ return OwningBinary<ObjectFile>(std::move(*DebugObj), std::move(Buffer));
+}
+
+OwningBinary<ObjectFile>
+LoadedELFObjectInfo::getObjectForDebug(const ObjectFile &Obj) const {
+ return createELFDebugObject(Obj, *this);
+}
+
+} // anonymous namespace
+
+namespace llvm {
+
+RuntimeDyldELF::RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldImpl(MemMgr, Resolver), GOTSectionID(0), CurrentGOTIndex(0) {}
+RuntimeDyldELF::~RuntimeDyldELF() {}
+
+void RuntimeDyldELF::registerEHFrames() {
+ for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) {
+ SID EHFrameSID = UnregisteredEHFrameSections[i];
+ uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
+ uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
+ size_t EHFrameSize = Sections[EHFrameSID].getSize();
+ MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
+ }
+ UnregisteredEHFrameSections.clear();
+}
+
+std::unique_ptr<RuntimeDyldELF>
+llvm::RuntimeDyldELF::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default:
+ return std::make_unique<RuntimeDyldELF>(MemMgr, Resolver);
+ case Triple::mips:
+ case Triple::mipsel:
+ case Triple::mips64:
+ case Triple::mips64el:
+ return std::make_unique<RuntimeDyldELFMips>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldELF::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O))
+ return std::make_unique<LoadedELFObjectInfo>(*this, *ObjSectionToIDOrErr);
+ else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+void RuntimeDyldELF::resolveX86_64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset) {
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_X86_64_NONE:
+ break;
+ case ELF::R_X86_64_8: {
+ Value += Addend;
+ assert((int64_t)Value <= INT8_MAX && (int64_t)Value >= INT8_MIN);
+ uint8_t TruncatedAddr = (Value & 0xFF);
+ *Section.getAddressWithOffset(Offset) = TruncatedAddr;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_16: {
+ Value += Addend;
+ assert((int64_t)Value <= INT16_MAX && (int64_t)Value >= INT16_MIN);
+ uint16_t TruncatedAddr = (Value & 0xFFFF);
+ support::ulittle16_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncatedAddr;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_64: {
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
+ Value + Addend;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_32:
+ case ELF::R_X86_64_32S: {
+ Value += Addend;
+ assert((Type == ELF::R_X86_64_32 && (Value <= UINT32_MAX)) ||
+ (Type == ELF::R_X86_64_32S &&
+ ((int64_t)Value <= INT32_MAX && (int64_t)Value >= INT32_MIN)));
+ uint32_t TruncatedAddr = (Value & 0xFFFFFFFF);
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncatedAddr;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", TruncatedAddr) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_X86_64_PC8: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ assert(isInt<8>(RealOffset));
+ int8_t TruncOffset = (RealOffset & 0xFF);
+ Section.getAddress()[Offset] = TruncOffset;
+ break;
+ }
+ case ELF::R_X86_64_PC32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ assert(isInt<32>(RealOffset));
+ int32_t TruncOffset = (RealOffset & 0xFFFFFFFF);
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncOffset;
+ break;
+ }
+ case ELF::R_X86_64_PC64: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t RealOffset = Value + Addend - FinalAddress;
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
+ RealOffset;
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", RealOffset) << " at "
+ << format("%p\n", FinalAddress));
+ break;
+ }
+ case ELF::R_X86_64_GOTOFF64: {
+ // Compute Value - GOTBase.
+ uint64_t GOTBase = 0;
+ for (const auto &Section : Sections) {
+ if (Section.getName() == ".got") {
+ GOTBase = Section.getLoadAddressWithOffset(0);
+ break;
+ }
+ }
+ assert(GOTBase != 0 && "missing GOT");
+ int64_t GOTOffset = Value - GOTBase + Addend;
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = GOTOffset;
+ break;
+ }
+ case ELF::R_X86_64_DTPMOD64: {
+ // We only have one DSO, so the module id is always 1.
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) = 1;
+ break;
+ }
+ case ELF::R_X86_64_DTPOFF64:
+ case ELF::R_X86_64_TPOFF64: {
+ // DTPOFF64 should resolve to the offset in the TLS block, TPOFF64 to the
+ // offset in the *initial* TLS block. Since we are statically linking, all
+ // TLS blocks already exist in the initial block, so resolve both
+ // relocations equally.
+ support::ulittle64_t::ref(Section.getAddressWithOffset(Offset)) =
+ Value + Addend;
+ break;
+ }
+ case ELF::R_X86_64_DTPOFF32:
+ case ELF::R_X86_64_TPOFF32: {
+ // As for the (D)TPOFF64 relocations above, both DTPOFF32 and TPOFF32 can
+ // be resolved equally.
+ int64_t RealValue = Value + Addend;
+ assert(RealValue >= INT32_MIN && RealValue <= INT32_MAX);
+ int32_t TruncValue = RealValue;
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ TruncValue;
+ break;
+ }
+ }
+}
+
+void RuntimeDyldELF::resolveX86Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
+ switch (Type) {
+ case ELF::R_386_32: {
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ Value + Addend;
+ break;
+ }
+ // Handle R_386_PLT32 like R_386_PC32 since it should be able to
+ // reach any 32 bit address.
+ case ELF::R_386_PLT32:
+ case ELF::R_386_PC32: {
+ uint32_t FinalAddress =
+ Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
+ uint32_t RealOffset = Value + Addend - FinalAddress;
+ support::ulittle32_t::ref(Section.getAddressWithOffset(Offset)) =
+ RealOffset;
+ break;
+ }
+ default:
+ // There are other relocation types, but it appears these are the
+ // only ones currently used by the LLVM ELF object writer
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint32_t *TargetPtr =
+ reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ // Data should use target endian. Code should always use little endian.
+ bool isBE = Arch == Triple::aarch64_be;
+
+ LLVM_DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x" << format("%llx", FinalAddress)
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x"
+ << format("%llx", Addend) << "\n");
+
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_AARCH64_NONE:
+ break;
+ case ELF::R_AARCH64_ABS16: {
+ uint64_t Result = Value + Addend;
+ assert(static_cast<int64_t>(Result) >= INT16_MIN && Result < UINT16_MAX);
+ write(isBE, TargetPtr, static_cast<uint16_t>(Result & 0xffffU));
+ break;
+ }
+ case ELF::R_AARCH64_ABS32: {
+ uint64_t Result = Value + Addend;
+ assert(static_cast<int64_t>(Result) >= INT32_MIN && Result < UINT32_MAX);
+ write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
+ break;
+ }
+ case ELF::R_AARCH64_ABS64:
+ write(isBE, TargetPtr, Value + Addend);
+ break;
+ case ELF::R_AARCH64_PLT32: {
+ uint64_t Result = Value + Addend - FinalAddress;
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ static_cast<int64_t>(Result) <= INT32_MAX);
+ write(isBE, TargetPtr, static_cast<uint32_t>(Result));
+ break;
+ }
+ case ELF::R_AARCH64_PREL32: {
+ uint64_t Result = Value + Addend - FinalAddress;
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ static_cast<int64_t>(Result) <= UINT32_MAX);
+ write(isBE, TargetPtr, static_cast<uint32_t>(Result & 0xffffffffU));
+ break;
+ }
+ case ELF::R_AARCH64_PREL64:
+ write(isBE, TargetPtr, Value + Addend - FinalAddress);
+ break;
+ case ELF::R_AARCH64_CONDBR19: {
+ uint64_t BranchImm = Value + Addend - FinalAddress;
+
+ assert(isInt<21>(BranchImm));
+ *TargetPtr &= 0xff00001fU;
+ // Immediate:20:2 goes in bits 23:5 of Bcc, CBZ, CBNZ
+ or32le(TargetPtr, (BranchImm & 0x001FFFFC) << 3);
+ break;
+ }
+ case ELF::R_AARCH64_TSTBR14: {
+ uint64_t BranchImm = Value + Addend - FinalAddress;
+
+ assert(isInt<16>(BranchImm));
+
+ *TargetPtr &= 0xfff8001fU;
+ // Immediate:15:2 goes in bits 18:5 of TBZ, TBNZ
+ or32le(TargetPtr, (BranchImm & 0x0FFFFFFC) << 3);
+ break;
+ }
+ case ELF::R_AARCH64_CALL26: // fallthrough
+ case ELF::R_AARCH64_JUMP26: {
+ // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the
+ // calculation.
+ uint64_t BranchImm = Value + Addend - FinalAddress;
+
+ // "Check that -2^27 <= result < 2^27".
+ assert(isInt<28>(BranchImm));
+ or32le(TargetPtr, (BranchImm & 0x0FFFFFFC) >> 2);
+ break;
+ }
+ case ELF::R_AARCH64_MOVW_UABS_G3:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF000000000000) >> 43);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G2_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF00000000) >> 27);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G1_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF0000) >> 11);
+ break;
+ case ELF::R_AARCH64_MOVW_UABS_G0_NC:
+ or32le(TargetPtr, ((Value + Addend) & 0xFFFF) << 5);
+ break;
+ case ELF::R_AARCH64_ADR_PREL_PG_HI21: {
+ // Operation: Page(S+A) - Page(P)
+ uint64_t Result =
+ ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL);
+
+ // Check that -2^32 <= X < 2^32
+ assert(isInt<33>(Result) && "overflow check failed for relocation");
+
+ // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken
+ // from bits 32:12 of X.
+ write32AArch64Addr(TargetPtr, Result >> 12);
+ break;
+ }
+ case ELF::R_AARCH64_ADD_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:0 of X
+ or32AArch64Imm(TargetPtr, Value + Addend);
+ break;
+ case ELF::R_AARCH64_LDST8_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:0 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 0, 11));
+ break;
+ case ELF::R_AARCH64_LDST16_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:1 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 1, 11));
+ break;
+ case ELF::R_AARCH64_LDST32_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:2 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 2, 11));
+ break;
+ case ELF::R_AARCH64_LDST64_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:3 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 3, 11));
+ break;
+ case ELF::R_AARCH64_LDST128_ABS_LO12_NC:
+ // Operation: S + A
+ // Immediate goes in bits 21:10 of LD/ST instruction, taken
+ // from bits 11:4 of X
+ or32AArch64Imm(TargetPtr, getBits(Value + Addend, 4, 11));
+ break;
+ case ELF::R_AARCH64_LD_PREL_LO19: {
+ // Operation: S + A - P
+ uint64_t Result = Value + Addend - FinalAddress;
+
+ // "Check that -2^20 <= result < 2^20".
+ assert(isInt<21>(Result));
+
+ *TargetPtr &= 0xff00001fU;
+ // Immediate goes in bits 23:5 of LD imm instruction, taken
+ // from bits 20:2 of X
+ *TargetPtr |= ((Result & 0xffc) << (5 - 2));
+ break;
+ }
+ case ELF::R_AARCH64_ADR_PREL_LO21: {
+ // Operation: S + A - P
+ uint64_t Result = Value + Addend - FinalAddress;
+
+ // "Check that -2^20 <= result < 2^20".
+ assert(isInt<21>(Result));
+
+ *TargetPtr &= 0x9f00001fU;
+ // Immediate goes in bits 23:5, 30:29 of ADR imm instruction, taken
+ // from bits 20:0 of X
+ *TargetPtr |= ((Result & 0xffc) << (5 - 2));
+ *TargetPtr |= (Result & 0x3) << 29;
+ break;
+ }
+ }
+}
+
+void RuntimeDyldELF::resolveARMRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint32_t Value,
+ uint32_t Type, int32_t Addend) {
+ // TODO: Add Thumb relocations.
+ uint32_t *TargetPtr =
+ reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset));
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset) & 0xFFFFFFFF;
+ Value += Addend;
+
+ LLVM_DEBUG(dbgs() << "resolveARMRelocation, LocalAddress: "
+ << Section.getAddressWithOffset(Offset)
+ << " FinalAddress: " << format("%p", FinalAddress)
+ << " Value: " << format("%x", Value)
+ << " Type: " << format("%x", Type)
+ << " Addend: " << format("%x", Addend) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Not implemented relocation type!");
+
+ case ELF::R_ARM_NONE:
+ break;
+ // Write a 31bit signed offset
+ case ELF::R_ARM_PREL31:
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0x80000000) |
+ ((Value - FinalAddress) & ~0x80000000);
+ break;
+ case ELF::R_ARM_TARGET1:
+ case ELF::R_ARM_ABS32:
+ support::ulittle32_t::ref{TargetPtr} = Value;
+ break;
+ // Write first 16 bit of 32 bit value to the mov instruction.
+ // Last 4 bit should be shifted.
+ case ELF::R_ARM_MOVW_ABS_NC:
+ case ELF::R_ARM_MOVT_ABS:
+ if (Type == ELF::R_ARM_MOVW_ABS_NC)
+ Value = Value & 0xFFFF;
+ else if (Type == ELF::R_ARM_MOVT_ABS)
+ Value = (Value >> 16) & 0xFFFF;
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & ~0x000F0FFF) | (Value & 0xFFF) |
+ (((Value >> 12) & 0xF) << 16);
+ break;
+ // Write 24 bit relative value to the branch instruction.
+ case ELF::R_ARM_PC24: // Fall through.
+ case ELF::R_ARM_CALL: // Fall through.
+ case ELF::R_ARM_JUMP24:
+ int32_t RelValue = static_cast<int32_t>(Value - FinalAddress - 8);
+ RelValue = (RelValue & 0x03FFFFFC) >> 2;
+ assert((support::ulittle32_t::ref{TargetPtr} & 0xFFFFFF) == 0xFFFFFE);
+ support::ulittle32_t::ref{TargetPtr} =
+ (support::ulittle32_t::ref{TargetPtr} & 0xFF000000) | RelValue;
+ break;
+ }
+}
+
+void RuntimeDyldELF::setMipsABI(const ObjectFile &Obj) {
+ if (Arch == Triple::UnknownArch ||
+ !StringRef(Triple::getArchTypePrefix(Arch)).equals("mips")) {
+ IsMipsO32ABI = false;
+ IsMipsN32ABI = false;
+ IsMipsN64ABI = false;
+ return;
+ }
+ if (auto *E = dyn_cast<ELFObjectFileBase>(&Obj)) {
+ unsigned AbiVariant = E->getPlatformFlags();
+ IsMipsO32ABI = AbiVariant & ELF::EF_MIPS_ABI_O32;
+ IsMipsN32ABI = AbiVariant & ELF::EF_MIPS_ABI2;
+ }
+ IsMipsN64ABI = Obj.getFileFormatName().equals("elf64-mips");
+}
+
+// Return the .TOC. section and offset.
+Error RuntimeDyldELF::findPPC64TOCSection(const ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel) {
+ // Set a default SectionID in case we do not find a TOC section below.
+ // This may happen for references to TOC base base (sym@toc, .odp
+ // relocation) without a .toc directive. In this case just use the
+ // first section (which is usually the .odp) since the code won't
+ // reference the .toc base directly.
+ Rel.SymbolName = nullptr;
+ Rel.SectionID = 0;
+
+ // The TOC consists of sections .got, .toc, .tocbss, .plt in that
+ // order. The TOC starts where the first of these sections starts.
+ for (auto &Section : Obj.sections()) {
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef SectionName = *NameOrErr;
+
+ if (SectionName == ".got"
+ || SectionName == ".toc"
+ || SectionName == ".tocbss"
+ || SectionName == ".plt") {
+ if (auto SectionIDOrErr =
+ findOrEmitSection(Obj, Section, false, LocalSections))
+ Rel.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ break;
+ }
+ }
+
+ // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
+ // thus permitting a full 64 Kbytes segment.
+ Rel.Addend = 0x8000;
+
+ return Error::success();
+}
+
+// Returns the sections and offset associated with the ODP entry referenced
+// by Symbol.
+Error RuntimeDyldELF::findOPDEntrySection(const ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel) {
+ // Get the ELF symbol value (st_value) to compare with Relocation offset in
+ // .opd entries
+ for (section_iterator si = Obj.section_begin(), se = Obj.section_end();
+ si != se; ++si) {
+
+ Expected<section_iterator> RelSecOrErr = si->getRelocatedSection();
+ if (!RelSecOrErr)
+ report_fatal_error(Twine(toString(RelSecOrErr.takeError())));
+
+ section_iterator RelSecI = *RelSecOrErr;
+ if (RelSecI == Obj.section_end())
+ continue;
+
+ Expected<StringRef> NameOrErr = RelSecI->getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+ StringRef RelSectionName = *NameOrErr;
+
+ if (RelSectionName != ".opd")
+ continue;
+
+ for (elf_relocation_iterator i = si->relocation_begin(),
+ e = si->relocation_end();
+ i != e;) {
+ // The R_PPC64_ADDR64 relocation indicates the first field
+ // of a .opd entry
+ uint64_t TypeFunc = i->getType();
+ if (TypeFunc != ELF::R_PPC64_ADDR64) {
+ ++i;
+ continue;
+ }
+
+ uint64_t TargetSymbolOffset = i->getOffset();
+ symbol_iterator TargetSymbol = i->getSymbol();
+ int64_t Addend;
+ if (auto AddendOrErr = i->getAddend())
+ Addend = *AddendOrErr;
+ else
+ return AddendOrErr.takeError();
+
+ ++i;
+ if (i == e)
+ break;
+
+ // Just check if following relocation is a R_PPC64_TOC
+ uint64_t TypeTOC = i->getType();
+ if (TypeTOC != ELF::R_PPC64_TOC)
+ continue;
+
+ // Finally compares the Symbol value and the target symbol offset
+ // to check if this .opd entry refers to the symbol the relocation
+ // points to.
+ if (Rel.Addend != (int64_t)TargetSymbolOffset)
+ continue;
+
+ section_iterator TSI = Obj.section_end();
+ if (auto TSIOrErr = TargetSymbol->getSection())
+ TSI = *TSIOrErr;
+ else
+ return TSIOrErr.takeError();
+ assert(TSI != Obj.section_end() && "TSI should refer to a valid section");
+
+ bool IsCode = TSI->isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, *TSI, IsCode,
+ LocalSections))
+ Rel.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ Rel.Addend = (intptr_t)Addend;
+ return Error::success();
+ }
+ }
+ llvm_unreachable("Attempting to get address of ODP entry!");
+}
+
+// Relocation masks following the #lo(value), #hi(value), #ha(value),
+// #higher(value), #highera(value), #highest(value), and #highesta(value)
+// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
+// document.
+
+static inline uint16_t applyPPClo(uint64_t value) { return value & 0xffff; }
+
+static inline uint16_t applyPPChi(uint64_t value) {
+ return (value >> 16) & 0xffff;
+}
+
+static inline uint16_t applyPPCha (uint64_t value) {
+ return ((value + 0x8000) >> 16) & 0xffff;
+}
+
+static inline uint16_t applyPPChigher(uint64_t value) {
+ return (value >> 32) & 0xffff;
+}
+
+static inline uint16_t applyPPChighera (uint64_t value) {
+ return ((value + 0x8000) >> 32) & 0xffff;
+}
+
+static inline uint16_t applyPPChighest(uint64_t value) {
+ return (value >> 48) & 0xffff;
+}
+
+static inline uint16_t applyPPChighesta (uint64_t value) {
+ return ((value + 0x8000) >> 48) & 0xffff;
+}
+
+void RuntimeDyldELF::resolvePPC32Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_PPC_ADDR16_LO:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC_ADDR16_HI:
+ writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
+ break;
+ case ELF::R_PPC_ADDR16_HA:
+ writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolvePPC64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_PPC64_ADDR16:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
+ break;
+ case ELF::R_PPC64_ADDR16_LO:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_LO_DS:
+ writeInt16BE(LocalAddress, applyPPClo(Value + Addend) & ~3);
+ break;
+ case ELF::R_PPC64_ADDR16_HI:
+ case ELF::R_PPC64_ADDR16_HIGH:
+ writeInt16BE(LocalAddress, applyPPChi(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HA:
+ case ELF::R_PPC64_ADDR16_HIGHA:
+ writeInt16BE(LocalAddress, applyPPCha(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHER:
+ writeInt16BE(LocalAddress, applyPPChigher(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHERA:
+ writeInt16BE(LocalAddress, applyPPChighera(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHEST:
+ writeInt16BE(LocalAddress, applyPPChighest(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR16_HIGHESTA:
+ writeInt16BE(LocalAddress, applyPPChighesta(Value + Addend));
+ break;
+ case ELF::R_PPC64_ADDR14: {
+ assert(((Value + Addend) & 3) == 0);
+ // Preserve the AA/LK bits in the branch instruction
+ uint8_t aalk = *(LocalAddress + 3);
+ writeInt16BE(LocalAddress + 2, (aalk & 3) | ((Value + Addend) & 0xfffc));
+ } break;
+ case ELF::R_PPC64_REL16_LO: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPClo(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HI: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPChi(Delta));
+ } break;
+ case ELF::R_PPC64_REL16_HA: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt16BE(LocalAddress, applyPPCha(Delta));
+ } break;
+ case ELF::R_PPC64_ADDR32: {
+ int64_t Result = static_cast<int64_t>(Value + Addend);
+ if (SignExtend64<32>(Result) != Result)
+ llvm_unreachable("Relocation R_PPC64_ADDR32 overflow");
+ writeInt32BE(LocalAddress, Result);
+ } break;
+ case ELF::R_PPC64_REL24: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
+ if (SignExtend64<26>(delta) != delta)
+ llvm_unreachable("Relocation R_PPC64_REL24 overflow");
+ // We preserve bits other than LI field, i.e. PO and AA/LK fields.
+ uint32_t Inst = readBytesUnaligned(LocalAddress, 4);
+ writeInt32BE(LocalAddress, (Inst & 0xFC000003) | (delta & 0x03FFFFFC));
+ } break;
+ case ELF::R_PPC64_REL32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ int64_t delta = static_cast<int64_t>(Value - FinalAddress + Addend);
+ if (SignExtend64<32>(delta) != delta)
+ llvm_unreachable("Relocation R_PPC64_REL32 overflow");
+ writeInt32BE(LocalAddress, delta);
+ } break;
+ case ELF::R_PPC64_REL64: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ uint64_t Delta = Value - FinalAddress + Addend;
+ writeInt64BE(LocalAddress, Delta);
+ } break;
+ case ELF::R_PPC64_ADDR64:
+ writeInt64BE(LocalAddress, Value + Addend);
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveSystemZRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_390_PC16DBL:
+ case ELF::R_390_PLT16DBL: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int16_t(Delta / 2) * 2 == Delta && "R_390_PC16DBL overflow");
+ writeInt16BE(LocalAddress, Delta / 2);
+ break;
+ }
+ case ELF::R_390_PC32DBL:
+ case ELF::R_390_PLT32DBL: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int32_t(Delta / 2) * 2 == Delta && "R_390_PC32DBL overflow");
+ writeInt32BE(LocalAddress, Delta / 2);
+ break;
+ }
+ case ELF::R_390_PC16: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int16_t(Delta) == Delta && "R_390_PC16 overflow");
+ writeInt16BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_PC32: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ assert(int32_t(Delta) == Delta && "R_390_PC32 overflow");
+ writeInt32BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_PC64: {
+ int64_t Delta = (Value + Addend) - Section.getLoadAddressWithOffset(Offset);
+ writeInt64BE(LocalAddress, Delta);
+ break;
+ }
+ case ELF::R_390_8:
+ *LocalAddress = (uint8_t)(Value + Addend);
+ break;
+ case ELF::R_390_16:
+ writeInt16BE(LocalAddress, Value + Addend);
+ break;
+ case ELF::R_390_32:
+ writeInt32BE(LocalAddress, Value + Addend);
+ break;
+ case ELF::R_390_64:
+ writeInt64BE(LocalAddress, Value + Addend);
+ break;
+ }
+}
+
+void RuntimeDyldELF::resolveBPFRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend) {
+ bool isBE = Arch == Triple::bpfeb;
+
+ switch (Type) {
+ default:
+ report_fatal_error("Relocation type not implemented yet!");
+ break;
+ case ELF::R_BPF_NONE:
+ case ELF::R_BPF_64_64:
+ case ELF::R_BPF_64_32:
+ case ELF::R_BPF_64_NODYLD32:
+ break;
+ case ELF::R_BPF_64_ABS64: {
+ write(isBE, Section.getAddressWithOffset(Offset), Value + Addend);
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", (Value + Addend)) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ case ELF::R_BPF_64_ABS32: {
+ Value += Addend;
+ assert(Value <= UINT32_MAX);
+ write(isBE, Section.getAddressWithOffset(Offset), static_cast<uint32_t>(Value));
+ LLVM_DEBUG(dbgs() << "Writing " << format("%p", Value) << " at "
+ << format("%p\n", Section.getAddressWithOffset(Offset)));
+ break;
+ }
+ }
+}
+
+// The target location for the relocation is described by RE.SectionID and
+// RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
+// SectionEntry has three members describing its location.
+// SectionEntry::Address is the address at which the section has been loaded
+// into memory in the current (host) process. SectionEntry::LoadAddress is the
+// address that the section will have in the target process.
+// SectionEntry::ObjAddress is the address of the bits for this section in the
+// original emitted object image (also in the current address space).
+//
+// Relocations will be applied as if the section were loaded at
+// SectionEntry::LoadAddress, but they will be applied at an address based
+// on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer to
+// Target memory contents if they are required for value calculations.
+//
+// The Value parameter here is the load address of the symbol for the
+// relocation to be applied. For relocations which refer to symbols in the
+// current object Value will be the LoadAddress of the section in which
+// the symbol resides (RE.Addend provides additional information about the
+// symbol location). For external symbols, Value will be the address of the
+// symbol in the target address space.
+void RuntimeDyldELF::resolveRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ return resolveRelocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+}
+
+void RuntimeDyldELF::resolveRelocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID) {
+ switch (Arch) {
+ case Triple::x86_64:
+ resolveX86_64Relocation(Section, Offset, Value, Type, Addend, SymOffset);
+ break;
+ case Triple::x86:
+ resolveX86Relocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
+ (uint32_t)(Addend & 0xffffffffL));
+ break;
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ resolveAArch64Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::arm: // Fall through.
+ case Triple::armeb:
+ case Triple::thumb:
+ case Triple::thumbeb:
+ resolveARMRelocation(Section, Offset, (uint32_t)(Value & 0xffffffffL), Type,
+ (uint32_t)(Addend & 0xffffffffL));
+ break;
+ case Triple::ppc: // Fall through.
+ case Triple::ppcle:
+ resolvePPC32Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::ppc64: // Fall through.
+ case Triple::ppc64le:
+ resolvePPC64Relocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::systemz:
+ resolveSystemZRelocation(Section, Offset, Value, Type, Addend);
+ break;
+ case Triple::bpfel:
+ case Triple::bpfeb:
+ resolveBPFRelocation(Section, Offset, Value, Type, Addend);
+ break;
+ default:
+ llvm_unreachable("Unsupported CPU type!");
+ }
+}
+
+void *RuntimeDyldELF::computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const {
+ return (void *)(Sections[SectionID].getObjAddress() + Offset);
+}
+
+void RuntimeDyldELF::processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value) {
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend, Value.Offset);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+}
+
+uint32_t RuntimeDyldELF::getMatchingLoRelocation(uint32_t RelType,
+ bool IsLocal) const {
+ switch (RelType) {
+ case ELF::R_MICROMIPS_GOT16:
+ if (IsLocal)
+ return ELF::R_MICROMIPS_LO16;
+ break;
+ case ELF::R_MICROMIPS_HI16:
+ return ELF::R_MICROMIPS_LO16;
+ case ELF::R_MIPS_GOT16:
+ if (IsLocal)
+ return ELF::R_MIPS_LO16;
+ break;
+ case ELF::R_MIPS_HI16:
+ return ELF::R_MIPS_LO16;
+ case ELF::R_MIPS_PCHI16:
+ return ELF::R_MIPS_PCLO16;
+ default:
+ break;
+ }
+ return ELF::R_MIPS_NONE;
+}
+
+// Sometimes we don't need to create thunk for a branch.
+// This typically happens when branch target is located
+// in the same object file. In such case target is either
+// a weak symbol or symbol in a different executable section.
+// This function checks if branch target is located in the
+// same object file and if distance between source and target
+// fits R_AARCH64_CALL26 relocation. If both conditions are
+// met, it emits direct jump to the target and returns true.
+// Otherwise false is returned and thunk is created.
+bool RuntimeDyldELF::resolveAArch64ShortBranch(
+ unsigned SectionID, relocation_iterator RelI,
+ const RelocationValueRef &Value) {
+ uint64_t Address;
+ if (Value.SymbolName) {
+ auto Loc = GlobalSymbolTable.find(Value.SymbolName);
+
+ // Don't create direct branch for external symbols.
+ if (Loc == GlobalSymbolTable.end())
+ return false;
+
+ const auto &SymInfo = Loc->second;
+ Address =
+ uint64_t(Sections[SymInfo.getSectionID()].getLoadAddressWithOffset(
+ SymInfo.getOffset()));
+ } else {
+ Address = uint64_t(Sections[Value.SectionID].getLoadAddress());
+ }
+ uint64_t Offset = RelI->getOffset();
+ uint64_t SourceAddress = Sections[SectionID].getLoadAddressWithOffset(Offset);
+
+ // R_AARCH64_CALL26 requires immediate to be in range -2^27 <= imm < 2^27
+ // If distance between source and target is out of range then we should
+ // create thunk.
+ if (!isInt<28>(Address + Value.Addend - SourceAddress))
+ return false;
+
+ resolveRelocation(Sections[SectionID], Offset, Address, RelI->getType(),
+ Value.Addend);
+
+ return true;
+}
+
+void RuntimeDyldELF::resolveAArch64Branch(unsigned SectionID,
+ const RelocationValueRef &Value,
+ relocation_iterator RelI,
+ StubMap &Stubs) {
+
+ LLVM_DEBUG(dbgs() << "\t\tThis is an AArch64 branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ uint64_t Offset = RelI->getOffset();
+ unsigned RelType = RelI->getType();
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ resolveRelocation(Section, Offset,
+ (uint64_t)Section.getAddressWithOffset(i->second),
+ RelType, 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else if (!resolveAArch64ShortBranch(SectionID, RelI, Value)) {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()));
+
+ RelocationEntry REmovz_g3(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_AARCH64_MOVW_UABS_G3, Value.Addend);
+ RelocationEntry REmovk_g2(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_AARCH64_MOVW_UABS_G2_NC, Value.Addend);
+ RelocationEntry REmovk_g1(SectionID,
+ StubTargetAddr - Section.getAddress() + 8,
+ ELF::R_AARCH64_MOVW_UABS_G1_NC, Value.Addend);
+ RelocationEntry REmovk_g0(SectionID,
+ StubTargetAddr - Section.getAddress() + 12,
+ ELF::R_AARCH64_MOVW_UABS_G0_NC, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REmovz_g3, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g2, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g1, Value.SymbolName);
+ addRelocationForSymbol(REmovk_g0, Value.SymbolName);
+ } else {
+ addRelocationForSection(REmovz_g3, Value.SectionID);
+ addRelocationForSection(REmovk_g2, Value.SectionID);
+ addRelocationForSection(REmovk_g1, Value.SectionID);
+ addRelocationForSection(REmovk_g0, Value.SectionID);
+ }
+ resolveRelocation(Section, Offset,
+ reinterpret_cast<uint64_t>(Section.getAddressWithOffset(
+ Section.getStubOffset())),
+ RelType, 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+}
+
+Expected<relocation_iterator>
+RuntimeDyldELF::processRelocationRef(
+ unsigned SectionID, relocation_iterator RelI, const ObjectFile &O,
+ ObjSectionToIDMap &ObjSectionToID, StubMap &Stubs) {
+ const auto &Obj = cast<ELFObjectFileBase>(O);
+ uint64_t RelType = RelI->getType();
+ int64_t Addend = 0;
+ if (Expected<int64_t> AddendOrErr = ELFRelocationRef(*RelI).getAddend())
+ Addend = *AddendOrErr;
+ else
+ consumeError(AddendOrErr.takeError());
+ elf_symbol_iterator Symbol = RelI->getSymbol();
+
+ // Obtain the symbol name which is referenced in the relocation
+ StringRef TargetName;
+ if (Symbol != Obj.symbol_end()) {
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+ }
+ LLVM_DEBUG(dbgs() << "\t\tRelType: " << RelType << " Addend: " << Addend
+ << " TargetName: " << TargetName << "\n");
+ RelocationValueRef Value;
+ // First search for the symbol in the local symbol table
+ SymbolRef::Type SymType = SymbolRef::ST_Unknown;
+
+ // Search for the symbol in the global symbol table
+ RTDyldSymbolTable::const_iterator gsi = GlobalSymbolTable.end();
+ if (Symbol != Obj.symbol_end()) {
+ gsi = GlobalSymbolTable.find(TargetName.data());
+ Expected<SymbolRef::Type> SymTypeOrErr = Symbol->getType();
+ if (!SymTypeOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SymTypeOrErr.takeError(), OS);
+ report_fatal_error(Twine(OS.str()));
+ }
+ SymType = *SymTypeOrErr;
+ }
+ if (gsi != GlobalSymbolTable.end()) {
+ const auto &SymInfo = gsi->second;
+ Value.SectionID = SymInfo.getSectionID();
+ Value.Offset = SymInfo.getOffset();
+ Value.Addend = SymInfo.getOffset() + Addend;
+ } else {
+ switch (SymType) {
+ case SymbolRef::ST_Debug: {
+ // TODO: Now ELF SymbolRef::ST_Debug = STT_SECTION, it's not obviously
+ // and can be changed by another developers. Maybe best way is add
+ // a new symbol type ST_Section to SymbolRef and use it.
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SectionOrErr.takeError(), OS);
+ report_fatal_error(Twine(OS.str()));
+ }
+ section_iterator si = *SectionOrErr;
+ if (si == Obj.section_end())
+ llvm_unreachable("Symbol section not found, bad object file format!");
+ LLVM_DEBUG(dbgs() << "\t\tThis is section symbol\n");
+ bool isCode = si->isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, (*si), isCode,
+ ObjSectionToID))
+ Value.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ Value.Addend = Addend;
+ break;
+ }
+ case SymbolRef::ST_Data:
+ case SymbolRef::ST_Function:
+ case SymbolRef::ST_Unknown: {
+ Value.SymbolName = TargetName.data();
+ Value.Addend = Addend;
+
+ // Absolute relocations will have a zero symbol ID (STN_UNDEF), which
+ // will manifest here as a NULL symbol name.
+ // We can set this as a valid (but empty) symbol name, and rely
+ // on addRelocationForSymbol to handle this.
+ if (!Value.SymbolName)
+ Value.SymbolName = "";
+ break;
+ }
+ default:
+ llvm_unreachable("Unresolved symbol type!");
+ break;
+ }
+ }
+
+ uint64_t Offset = RelI->getOffset();
+
+ LLVM_DEBUG(dbgs() << "\t\tSectionID: " << SectionID << " Offset: " << Offset
+ << "\n");
+ if ((Arch == Triple::aarch64 || Arch == Triple::aarch64_be)) {
+ if ((RelType == ELF::R_AARCH64_CALL26 ||
+ RelType == ELF::R_AARCH64_JUMP26) &&
+ MemMgr.allowStubAllocation()) {
+ resolveAArch64Branch(SectionID, Value, RelI, Stubs);
+ } else if (RelType == ELF::R_AARCH64_ADR_GOT_PAGE) {
+ // Create new GOT entry or find existing one. If GOT entry is
+ // to be created, then we also emit ABS64 relocation for it.
+ uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_AARCH64_ADR_PREL_PG_HI21);
+
+ } else if (RelType == ELF::R_AARCH64_LD64_GOT_LO12_NC) {
+ uint64_t GOTOffset = findOrAllocGOTEntry(Value, ELF::R_AARCH64_ABS64);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_AARCH64_LDST64_ABS_LO12_NC);
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (Arch == Triple::arm) {
+ if (RelType == ELF::R_ARM_PC24 || RelType == ELF::R_ARM_CALL ||
+ RelType == ELF::R_ARM_JUMP24) {
+ // This is an ARM branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is an ARM branch relocation.\n");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ resolveRelocation(
+ Section, Offset,
+ reinterpret_cast<uint64_t>(Section.getAddressWithOffset(i->second)),
+ RelType, 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()));
+ RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_ARM_ABS32, Value.Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>(
+ Section.getAddressWithOffset(
+ Section.getStubOffset())),
+ RelType, 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else {
+ uint32_t *Placeholder =
+ reinterpret_cast<uint32_t*>(computePlaceholderAddress(SectionID, Offset));
+ if (RelType == ELF::R_ARM_PREL31 || RelType == ELF::R_ARM_TARGET1 ||
+ RelType == ELF::R_ARM_ABS32) {
+ Value.Addend += *Placeholder;
+ } else if (RelType == ELF::R_ARM_MOVW_ABS_NC || RelType == ELF::R_ARM_MOVT_ABS) {
+ // See ELF for ARM documentation
+ Value.Addend += (int16_t)((*Placeholder & 0xFFF) | (((*Placeholder >> 16) & 0xF) << 12));
+ }
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (IsMipsO32ABI) {
+ uint8_t *Placeholder = reinterpret_cast<uint8_t *>(
+ computePlaceholderAddress(SectionID, Offset));
+ uint32_t Opcode = readBytesUnaligned(Placeholder, 4);
+ if (RelType == ELF::R_MIPS_26) {
+ // This is an Mips branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Extract the addend from the instruction.
+ // We shift up by two since the Value will be down shifted again
+ // when applying the relocation.
+ uint32_t Addend = (Opcode & 0x03ffffff) << 2;
+
+ Value.Addend += Addend;
+
+ // Look up for existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ RelocationEntry RE(SectionID, Offset, RelType, i->second);
+ addRelocationForSection(RE, SectionID);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+
+ unsigned AbiVariant = Obj.getPlatformFlags();
+
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
+
+ // Creating Hi and Lo relocations for the filled stub instructions.
+ RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_LO16, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+
+ RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
+ addRelocationForSection(RE, SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else if (RelType == ELF::R_MIPS_HI16 || RelType == ELF::R_MIPS_PCHI16) {
+ int64_t Addend = (Opcode & 0x0000ffff) << 16;
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ PendingRelocs.push_back(std::make_pair(Value, RE));
+ } else if (RelType == ELF::R_MIPS_LO16 || RelType == ELF::R_MIPS_PCLO16) {
+ int64_t Addend = Value.Addend + SignExtend32<16>(Opcode & 0x0000ffff);
+ for (auto I = PendingRelocs.begin(); I != PendingRelocs.end();) {
+ const RelocationValueRef &MatchingValue = I->first;
+ RelocationEntry &Reloc = I->second;
+ if (MatchingValue == Value &&
+ RelType == getMatchingLoRelocation(Reloc.RelType) &&
+ SectionID == Reloc.SectionID) {
+ Reloc.Addend += Addend;
+ if (Value.SymbolName)
+ addRelocationForSymbol(Reloc, Value.SymbolName);
+ else
+ addRelocationForSection(Reloc, Value.SectionID);
+ I = PendingRelocs.erase(I);
+ } else
+ ++I;
+ }
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else {
+ if (RelType == ELF::R_MIPS_32)
+ Value.Addend += Opcode;
+ else if (RelType == ELF::R_MIPS_PC16)
+ Value.Addend += SignExtend32<18>((Opcode & 0x0000ffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC19_S2)
+ Value.Addend += SignExtend32<21>((Opcode & 0x0007ffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC21_S2)
+ Value.Addend += SignExtend32<23>((Opcode & 0x001fffff) << 2);
+ else if (RelType == ELF::R_MIPS_PC26_S2)
+ Value.Addend += SignExtend32<28>((Opcode & 0x03ffffff) << 2);
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else if (IsMipsN32ABI || IsMipsN64ABI) {
+ uint32_t r_type = RelType & 0xff;
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+ if (r_type == ELF::R_MIPS_CALL16 || r_type == ELF::R_MIPS_GOT_PAGE
+ || r_type == ELF::R_MIPS_GOT_DISP) {
+ StringMap<uint64_t>::iterator i = GOTSymbolOffsets.find(TargetName);
+ if (i != GOTSymbolOffsets.end())
+ RE.SymOffset = i->second;
+ else {
+ RE.SymOffset = allocateGOTEntries(1);
+ GOTSymbolOffsets[TargetName] = RE.SymOffset;
+ }
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_MIPS_26) {
+ // This is an Mips branch relocation, need to use a stub function.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a Mips branch relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look up for existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ RelocationEntry RE(SectionID, Offset, RelType, i->second);
+ addRelocationForSection(RE, SectionID);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+
+ unsigned AbiVariant = Obj.getPlatformFlags();
+
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()), AbiVariant);
+
+ if (IsMipsN32ABI) {
+ // Creating Hi and Lo relocations for the filled stub instructions.
+ RelocationEntry REHi(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_LO16, Value.Addend);
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+ } else {
+ // Creating Highest, Higher, Hi and Lo relocations for the filled stub
+ // instructions.
+ RelocationEntry REHighest(SectionID,
+ StubTargetAddr - Section.getAddress(),
+ ELF::R_MIPS_HIGHEST, Value.Addend);
+ RelocationEntry REHigher(SectionID,
+ StubTargetAddr - Section.getAddress() + 4,
+ ELF::R_MIPS_HIGHER, Value.Addend);
+ RelocationEntry REHi(SectionID,
+ StubTargetAddr - Section.getAddress() + 12,
+ ELF::R_MIPS_HI16, Value.Addend);
+ RelocationEntry RELo(SectionID,
+ StubTargetAddr - Section.getAddress() + 20,
+ ELF::R_MIPS_LO16, Value.Addend);
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REHighest, Value.SymbolName);
+ addRelocationForSymbol(REHigher, Value.SymbolName);
+ addRelocationForSymbol(REHi, Value.SymbolName);
+ addRelocationForSymbol(RELo, Value.SymbolName);
+ } else {
+ addRelocationForSection(REHighest, Value.SectionID);
+ addRelocationForSection(REHigher, Value.SectionID);
+ addRelocationForSection(REHi, Value.SectionID);
+ addRelocationForSection(RELo, Value.SectionID);
+ }
+ }
+ RelocationEntry RE(SectionID, Offset, RelType, Section.getStubOffset());
+ addRelocationForSection(RE, SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+
+ } else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
+ if (RelType == ELF::R_PPC64_REL24) {
+ // Determine ABI variant in use for this object.
+ unsigned AbiVariant = Obj.getPlatformFlags();
+ AbiVariant &= ELF::EF_PPC64_ABI;
+ // A PPC branch relocation will need a stub function if the target is
+ // an external symbol (either Value.SymbolName is set, or SymType is
+ // Symbol::ST_Unknown) or if the target address is not within the
+ // signed 24-bits branch address.
+ SectionEntry &Section = Sections[SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(Offset);
+ bool RangeOverflow = false;
+ bool IsExtern = Value.SymbolName || SymType == SymbolRef::ST_Unknown;
+ if (!IsExtern) {
+ if (AbiVariant != 2) {
+ // In the ELFv1 ABI, a function call may point to the .opd entry,
+ // so the final symbol value is calculated based on the relocation
+ // values in the .opd section.
+ if (auto Err = findOPDEntrySection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ } else {
+ // In the ELFv2 ABI, a function symbol may provide a local entry
+ // point, which must be used for direct calls.
+ if (Value.SectionID == SectionID){
+ uint8_t SymOther = Symbol->getOther();
+ Value.Addend += ELF::decodePPC64LocalEntryOffset(SymOther);
+ }
+ }
+ uint8_t *RelocTarget =
+ Sections[Value.SectionID].getAddressWithOffset(Value.Addend);
+ int64_t delta = static_cast<int64_t>(Target - RelocTarget);
+ // If it is within 26-bits branch range, just set the branch target
+ if (SignExtend64<26>(delta) != delta) {
+ RangeOverflow = true;
+ } else if ((AbiVariant != 2) ||
+ (AbiVariant == 2 && Value.SectionID == SectionID)) {
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+ addRelocationForSection(RE, Value.SectionID);
+ }
+ }
+ if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID) ||
+ RangeOverflow) {
+ // It is an external symbol (either Value.SymbolName is set, or
+ // SymType is SymbolRef::ST_Unknown) or out of range.
+ StubMap::const_iterator i = Stubs.find(Value);
+ if (i != Stubs.end()) {
+ // Symbol function stub already created, just relocate to it
+ resolveRelocation(Section, Offset,
+ reinterpret_cast<uint64_t>(
+ Section.getAddressWithOffset(i->second)),
+ RelType, 0);
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *StubTargetAddr = createStubFunction(
+ Section.getAddressWithOffset(Section.getStubOffset()),
+ AbiVariant);
+ RelocationEntry RE(SectionID, StubTargetAddr - Section.getAddress(),
+ ELF::R_PPC64_ADDR64, Value.Addend);
+
+ // Generates the 64-bits address loads as exemplified in section
+ // 4.5.1 in PPC64 ELF ABI. Note that the relocations need to
+ // apply to the low part of the instructions, so we have to update
+ // the offset according to the target endianness.
+ uint64_t StubRelocOffset = StubTargetAddr - Section.getAddress();
+ if (!IsTargetLittleEndian)
+ StubRelocOffset += 2;
+
+ RelocationEntry REhst(SectionID, StubRelocOffset + 0,
+ ELF::R_PPC64_ADDR16_HIGHEST, Value.Addend);
+ RelocationEntry REhr(SectionID, StubRelocOffset + 4,
+ ELF::R_PPC64_ADDR16_HIGHER, Value.Addend);
+ RelocationEntry REh(SectionID, StubRelocOffset + 12,
+ ELF::R_PPC64_ADDR16_HI, Value.Addend);
+ RelocationEntry REl(SectionID, StubRelocOffset + 16,
+ ELF::R_PPC64_ADDR16_LO, Value.Addend);
+
+ if (Value.SymbolName) {
+ addRelocationForSymbol(REhst, Value.SymbolName);
+ addRelocationForSymbol(REhr, Value.SymbolName);
+ addRelocationForSymbol(REh, Value.SymbolName);
+ addRelocationForSymbol(REl, Value.SymbolName);
+ } else {
+ addRelocationForSection(REhst, Value.SectionID);
+ addRelocationForSection(REhr, Value.SectionID);
+ addRelocationForSection(REh, Value.SectionID);
+ addRelocationForSection(REl, Value.SectionID);
+ }
+
+ resolveRelocation(Section, Offset, reinterpret_cast<uint64_t>(
+ Section.getAddressWithOffset(
+ Section.getStubOffset())),
+ RelType, 0);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ if (IsExtern || (AbiVariant == 2 && Value.SectionID != SectionID)) {
+ // Restore the TOC for external calls
+ if (AbiVariant == 2)
+ writeInt32BE(Target + 4, 0xE8410018); // ld r2,24(r1)
+ else
+ writeInt32BE(Target + 4, 0xE8410028); // ld r2,40(r1)
+ }
+ }
+ } else if (RelType == ELF::R_PPC64_TOC16 ||
+ RelType == ELF::R_PPC64_TOC16_DS ||
+ RelType == ELF::R_PPC64_TOC16_LO ||
+ RelType == ELF::R_PPC64_TOC16_LO_DS ||
+ RelType == ELF::R_PPC64_TOC16_HI ||
+ RelType == ELF::R_PPC64_TOC16_HA) {
+ // These relocations are supposed to subtract the TOC address from
+ // the final value. This does not fit cleanly into the RuntimeDyld
+ // scheme, since there may be *two* sections involved in determining
+ // the relocation value (the section of the symbol referred to by the
+ // relocation, and the TOC section associated with the current module).
+ //
+ // Fortunately, these relocations are currently only ever generated
+ // referring to symbols that themselves reside in the TOC, which means
+ // that the two sections are actually the same. Thus they cancel out
+ // and we can immediately resolve the relocation right now.
+ switch (RelType) {
+ case ELF::R_PPC64_TOC16: RelType = ELF::R_PPC64_ADDR16; break;
+ case ELF::R_PPC64_TOC16_DS: RelType = ELF::R_PPC64_ADDR16_DS; break;
+ case ELF::R_PPC64_TOC16_LO: RelType = ELF::R_PPC64_ADDR16_LO; break;
+ case ELF::R_PPC64_TOC16_LO_DS: RelType = ELF::R_PPC64_ADDR16_LO_DS; break;
+ case ELF::R_PPC64_TOC16_HI: RelType = ELF::R_PPC64_ADDR16_HI; break;
+ case ELF::R_PPC64_TOC16_HA: RelType = ELF::R_PPC64_ADDR16_HA; break;
+ default: llvm_unreachable("Wrong relocation type.");
+ }
+
+ RelocationValueRef TOCValue;
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, TOCValue))
+ return std::move(Err);
+ if (Value.SymbolName || Value.SectionID != TOCValue.SectionID)
+ llvm_unreachable("Unsupported TOC relocation.");
+ Value.Addend -= TOCValue.Addend;
+ resolveRelocation(Sections[SectionID], Offset, Value.Addend, RelType, 0);
+ } else {
+ // There are two ways to refer to the TOC address directly: either
+ // via a ELF::R_PPC64_TOC relocation (where both symbol and addend are
+ // ignored), or via any relocation that refers to the magic ".TOC."
+ // symbols (in which case the addend is respected).
+ if (RelType == ELF::R_PPC64_TOC) {
+ RelType = ELF::R_PPC64_ADDR64;
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ } else if (TargetName == ".TOC.") {
+ if (auto Err = findPPC64TOCSection(Obj, ObjSectionToID, Value))
+ return std::move(Err);
+ Value.Addend += Addend;
+ }
+
+ RelocationEntry RE(SectionID, Offset, RelType, Value.Addend);
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+ } else if (Arch == Triple::systemz &&
+ (RelType == ELF::R_390_PLT32DBL || RelType == ELF::R_390_GOTENT)) {
+ // Create function stubs for both PLT and GOT references, regardless of
+ // whether the GOT reference is to data or code. The stub contains the
+ // full address of the symbol, as needed by GOT references, and the
+ // executable part only adds an overhead of 8 bytes.
+ //
+ // We could try to conserve space by allocating the code and data
+ // parts of the stub separately. However, as things stand, we allocate
+ // a stub for every relocation, so using a GOT in JIT code should be
+ // no less space efficient than using an explicit constant pool.
+ LLVM_DEBUG(dbgs() << "\t\tThis is a SystemZ indirect relocation.");
+ SectionEntry &Section = Sections[SectionID];
+
+ // Look for an existing stub.
+ StubMap::const_iterator i = Stubs.find(Value);
+ uintptr_t StubAddress;
+ if (i != Stubs.end()) {
+ StubAddress = uintptr_t(Section.getAddressWithOffset(i->second));
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function.
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+
+ uintptr_t BaseAddress = uintptr_t(Section.getAddress());
+ uintptr_t StubAlignment = getStubAlignment();
+ StubAddress =
+ (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
+ -StubAlignment;
+ unsigned StubOffset = StubAddress - BaseAddress;
+
+ Stubs[Value] = StubOffset;
+ createStubFunction((uint8_t *)StubAddress);
+ RelocationEntry RE(SectionID, StubOffset + 8, ELF::R_390_64,
+ Value.Offset);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+
+ if (RelType == ELF::R_390_GOTENT)
+ resolveRelocation(Section, Offset, StubAddress + 8, ELF::R_390_PC32DBL,
+ Addend);
+ else
+ resolveRelocation(Section, Offset, StubAddress, RelType, Addend);
+ } else if (Arch == Triple::x86_64) {
+ if (RelType == ELF::R_X86_64_PLT32) {
+ // The way the PLT relocations normally work is that the linker allocates
+ // the
+ // PLT and this relocation makes a PC-relative call into the PLT. The PLT
+ // entry will then jump to an address provided by the GOT. On first call,
+ // the
+ // GOT address will point back into PLT code that resolves the symbol. After
+ // the first call, the GOT entry points to the actual function.
+ //
+ // For local functions we're ignoring all of that here and just replacing
+ // the PLT32 relocation type with PC32, which will translate the relocation
+ // into a PC-relative call directly to the function. For external symbols we
+ // can't be sure the function will be within 2^32 bytes of the call site, so
+ // we need to create a stub, which calls into the GOT. This case is
+ // equivalent to the usual PLT implementation except that we use the stub
+ // mechanism in RuntimeDyld (which puts stubs at the end of the section)
+ // rather than allocating a PLT section.
+ if (Value.SymbolName && MemMgr.allowStubAllocation()) {
+ // This is a call to an external function.
+ // Look for an existing stub.
+ SectionEntry *Section = &Sections[SectionID];
+ StubMap::const_iterator i = Stubs.find(Value);
+ uintptr_t StubAddress;
+ if (i != Stubs.end()) {
+ StubAddress = uintptr_t(Section->getAddress()) + i->second;
+ LLVM_DEBUG(dbgs() << " Stub function found\n");
+ } else {
+ // Create a new stub function (equivalent to a PLT entry).
+ LLVM_DEBUG(dbgs() << " Create a new stub function\n");
+
+ uintptr_t BaseAddress = uintptr_t(Section->getAddress());
+ uintptr_t StubAlignment = getStubAlignment();
+ StubAddress =
+ (BaseAddress + Section->getStubOffset() + StubAlignment - 1) &
+ -StubAlignment;
+ unsigned StubOffset = StubAddress - BaseAddress;
+ Stubs[Value] = StubOffset;
+ createStubFunction((uint8_t *)StubAddress);
+
+ // Bump our stub offset counter
+ Section->advanceStubOffset(getMaxStubSize());
+
+ // Allocate a GOT Entry
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ // This potentially creates a new Section which potentially
+ // invalidates the Section pointer, so reload it.
+ Section = &Sections[SectionID];
+
+ // The load of the GOT address has an addend of -4
+ resolveGOTOffsetRelocation(SectionID, StubOffset + 2, GOTOffset - 4,
+ ELF::R_X86_64_PC32);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ addRelocationForSymbol(
+ computeGOTOffsetRE(GOTOffset, 0, ELF::R_X86_64_64),
+ Value.SymbolName);
+ }
+
+ // Make the target call a call into the stub table.
+ resolveRelocation(*Section, Offset, StubAddress, ELF::R_X86_64_PC32,
+ Addend);
+ } else {
+ Value.Addend += support::ulittle32_t::ref(
+ computePlaceholderAddress(SectionID, Offset));
+ processSimpleRelocation(SectionID, Offset, ELF::R_X86_64_PC32, Value);
+ }
+ } else if (RelType == ELF::R_X86_64_GOTPCREL ||
+ RelType == ELF::R_X86_64_GOTPCRELX ||
+ RelType == ELF::R_X86_64_REX_GOTPCRELX) {
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_X86_64_PC32);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_X86_64_GOT64) {
+ // Fill in a 64-bit GOT offset.
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ resolveRelocation(Sections[SectionID], Offset, GOTOffset,
+ ELF::R_X86_64_64, 0);
+
+ // Fill in the value of the symbol we're targeting into the GOT
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_X86_64_GOTPC32) {
+ // Materialize the address of the base of the GOT relative to the PC.
+ // This doesn't create a GOT entry, but it does mean we need a GOT
+ // section.
+ (void)allocateGOTEntries(0);
+ resolveGOTOffsetRelocation(SectionID, Offset, Addend, ELF::R_X86_64_PC32);
+ } else if (RelType == ELF::R_X86_64_GOTPC64) {
+ (void)allocateGOTEntries(0);
+ resolveGOTOffsetRelocation(SectionID, Offset, Addend, ELF::R_X86_64_PC64);
+ } else if (RelType == ELF::R_X86_64_GOTOFF64) {
+ // GOTOFF relocations ultimately require a section difference relocation.
+ (void)allocateGOTEntries(0);
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else if (RelType == ELF::R_X86_64_PC32) {
+ Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else if (RelType == ELF::R_X86_64_PC64) {
+ Value.Addend += support::ulittle64_t::ref(computePlaceholderAddress(SectionID, Offset));
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ } else if (RelType == ELF::R_X86_64_GOTTPOFF) {
+ processX86_64GOTTPOFFRelocation(SectionID, Offset, Value, Addend);
+ } else if (RelType == ELF::R_X86_64_TLSGD ||
+ RelType == ELF::R_X86_64_TLSLD) {
+ // The next relocation must be the relocation for __tls_get_addr.
+ ++RelI;
+ auto &GetAddrRelocation = *RelI;
+ processX86_64TLSRelocation(SectionID, Offset, RelType, Value, Addend,
+ GetAddrRelocation);
+ } else {
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ } else {
+ if (Arch == Triple::x86) {
+ Value.Addend += support::ulittle32_t::ref(computePlaceholderAddress(SectionID, Offset));
+ }
+ processSimpleRelocation(SectionID, Offset, RelType, Value);
+ }
+ return ++RelI;
+}
+
+void RuntimeDyldELF::processX86_64GOTTPOFFRelocation(unsigned SectionID,
+ uint64_t Offset,
+ RelocationValueRef Value,
+ int64_t Addend) {
+ // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
+ // to replace the GOTTPOFF relocation with a TPOFF relocation. The spec
+ // only mentions one optimization even though there are two different
+ // code sequences for the Initial Exec TLS Model. We match the code to
+ // find out which one was used.
+
+ // A possible TLS code sequence and its replacement
+ struct CodeSequence {
+ // The expected code sequence
+ ArrayRef<uint8_t> ExpectedCodeSequence;
+ // The negative offset of the GOTTPOFF relocation to the beginning of
+ // the sequence
+ uint64_t TLSSequenceOffset;
+ // The new code sequence
+ ArrayRef<uint8_t> NewCodeSequence;
+ // The offset of the new TPOFF relocation
+ uint64_t TpoffRelocationOffset;
+ };
+
+ std::array<CodeSequence, 2> CodeSequences;
+
+ // Initial Exec Code Model Sequence
+ {
+ static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
+ 0x00, // mov %fs:0, %rax
+ 0x48, 0x03, 0x05, 0x00, 0x00, 0x00, 0x00 // add x@gotpoff(%rip),
+ // %rax
+ };
+ CodeSequences[0].ExpectedCodeSequence =
+ ArrayRef<uint8_t>(ExpectedCodeSequenceList);
+ CodeSequences[0].TLSSequenceOffset = 12;
+
+ static const std::initializer_list<uint8_t> NewCodeSequenceList = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0, %rax
+ 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax), %rax
+ };
+ CodeSequences[0].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
+ CodeSequences[0].TpoffRelocationOffset = 12;
+ }
+
+ // Initial Exec Code Model Sequence, II
+ {
+ static const std::initializer_list<uint8_t> ExpectedCodeSequenceList = {
+ 0x48, 0x8b, 0x05, 0x00, 0x00, 0x00, 0x00, // mov x@gotpoff(%rip), %rax
+ 0x64, 0x48, 0x8b, 0x00, 0x00, 0x00, 0x00 // mov %fs:(%rax), %rax
+ };
+ CodeSequences[1].ExpectedCodeSequence =
+ ArrayRef<uint8_t>(ExpectedCodeSequenceList);
+ CodeSequences[1].TLSSequenceOffset = 3;
+
+ static const std::initializer_list<uint8_t> NewCodeSequenceList = {
+ 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00, // 6 byte nop
+ 0x64, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:x@tpoff, %rax
+ };
+ CodeSequences[1].NewCodeSequence = ArrayRef<uint8_t>(NewCodeSequenceList);
+ CodeSequences[1].TpoffRelocationOffset = 10;
+ }
+
+ bool Resolved = false;
+ auto &Section = Sections[SectionID];
+ for (const auto &C : CodeSequences) {
+ assert(C.ExpectedCodeSequence.size() == C.NewCodeSequence.size() &&
+ "Old and new code sequences must have the same size");
+
+ if (Offset < C.TLSSequenceOffset ||
+ (Offset - C.TLSSequenceOffset + C.NewCodeSequence.size()) >
+ Section.getSize()) {
+ // This can't be a matching sequence as it doesn't fit in the current
+ // section
+ continue;
+ }
+
+ auto TLSSequenceStartOffset = Offset - C.TLSSequenceOffset;
+ auto *TLSSequence = Section.getAddressWithOffset(TLSSequenceStartOffset);
+ if (ArrayRef<uint8_t>(TLSSequence, C.ExpectedCodeSequence.size()) !=
+ C.ExpectedCodeSequence) {
+ continue;
+ }
+
+ memcpy(TLSSequence, C.NewCodeSequence.data(), C.NewCodeSequence.size());
+
+ // The original GOTTPOFF relocation has an addend as it is PC relative,
+ // so it needs to be corrected. The TPOFF32 relocation is used as an
+ // absolute value (which is an offset from %fs:0), so remove the addend
+ // again.
+ RelocationEntry RE(SectionID,
+ TLSSequenceStartOffset + C.TpoffRelocationOffset,
+ ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ Resolved = true;
+ break;
+ }
+
+ if (!Resolved) {
+ // The GOTTPOFF relocation was not used in one of the sequences
+ // described in the spec, so we can't optimize it to a TPOFF
+ // relocation.
+ uint64_t GOTOffset = allocateGOTEntries(1);
+ resolveGOTOffsetRelocation(SectionID, Offset, GOTOffset + Addend,
+ ELF::R_X86_64_PC32);
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, ELF::R_X86_64_TPOFF64);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+}
+
+void RuntimeDyldELF::processX86_64TLSRelocation(
+ unsigned SectionID, uint64_t Offset, uint64_t RelType,
+ RelocationValueRef Value, int64_t Addend,
+ const RelocationRef &GetAddrRelocation) {
+ // Since we are statically linking and have no additional DSOs, we can resolve
+ // the relocation directly without using __tls_get_addr.
+ // Use the approach from "x86-64 Linker Optimizations" from the TLS spec
+ // to replace it with the Local Exec relocation variant.
+
+ // Find out whether the code was compiled with the large or small memory
+ // model. For this we look at the next relocation which is the relocation
+ // for the __tls_get_addr function. If it's a 32 bit relocation, it's the
+ // small code model, with a 64 bit relocation it's the large code model.
+ bool IsSmallCodeModel;
+ // Is the relocation for the __tls_get_addr a PC-relative GOT relocation?
+ bool IsGOTPCRel = false;
+
+ switch (GetAddrRelocation.getType()) {
+ case ELF::R_X86_64_GOTPCREL:
+ case ELF::R_X86_64_REX_GOTPCRELX:
+ case ELF::R_X86_64_GOTPCRELX:
+ IsGOTPCRel = true;
+ LLVM_FALLTHROUGH;
+ case ELF::R_X86_64_PLT32:
+ IsSmallCodeModel = true;
+ break;
+ case ELF::R_X86_64_PLTOFF64:
+ IsSmallCodeModel = false;
+ break;
+ default:
+ report_fatal_error(
+ "invalid TLS relocations for General/Local Dynamic TLS Model: "
+ "expected PLT or GOT relocation for __tls_get_addr function");
+ }
+
+ // The negative offset to the start of the TLS code sequence relative to
+ // the offset of the TLSGD/TLSLD relocation
+ uint64_t TLSSequenceOffset;
+ // The expected start of the code sequence
+ ArrayRef<uint8_t> ExpectedCodeSequence;
+ // The new TLS code sequence that will replace the existing code
+ ArrayRef<uint8_t> NewCodeSequence;
+
+ if (RelType == ELF::R_X86_64_TLSGD) {
+ // The offset of the new TPOFF32 relocation (offset starting from the
+ // beginning of the whole TLS sequence)
+ uint64_t TpoffRelocOffset;
+
+ if (IsSmallCodeModel) {
+ if (!IsGOTPCRel) {
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x66, // data16 (no-op prefix)
+ 0x48, 0x8d, 0x3d, 0x00, 0x00,
+ 0x00, 0x00, // lea <disp32>(%rip), %rdi
+ 0x66, 0x66, // two data16 prefixes
+ 0x48, // rex64 (no-op prefix)
+ 0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 4;
+ } else {
+ // This code sequence is not described in the TLS spec but gcc
+ // generates it sometimes.
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x66, // data16 (no-op prefix)
+ 0x48, 0x8d, 0x3d, 0x00, 0x00,
+ 0x00, 0x00, // lea <disp32>(%rip), %rdi
+ 0x66, // data16 prefix (no-op prefix)
+ 0x48, // rex64 (no-op prefix)
+ 0xff, 0x15, 0x00, 0x00, 0x00,
+ 0x00 // call *__tls_get_addr@gotpcrel(%rip)
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 4;
+ }
+
+ // The replacement code for the small code model. It's the same for
+ // both sequences.
+ static const std::initializer_list<uint8_t> SmallSequence = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
+ 0x00, // mov %fs:0, %rax
+ 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 // lea x@tpoff(%rax),
+ // %rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
+ TpoffRelocOffset = 12;
+ } else {
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
+ // %rdi
+ 0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, // movabs $__tls_get_addr@pltoff, %rax
+ 0x48, 0x01, 0xd8, // add %rbx, %rax
+ 0xff, 0xd0 // call *%rax
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement code for the large code model
+ static const std::initializer_list<uint8_t> LargeSequence = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00,
+ 0x00, // mov %fs:0, %rax
+ 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00, // lea x@tpoff(%rax),
+ // %rax
+ 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 // nopw 0x0(%rax,%rax,1)
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
+ TpoffRelocOffset = 12;
+ }
+
+ // The TLSGD/TLSLD relocations are PC-relative, so they have an addend.
+ // The new TPOFF32 relocations is used as an absolute offset from
+ // %fs:0, so remove the TLSGD/TLSLD addend again.
+ RelocationEntry RE(SectionID, Offset - TLSSequenceOffset + TpoffRelocOffset,
+ ELF::R_X86_64_TPOFF32, Value.Addend - Addend);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ } else if (RelType == ELF::R_X86_64_TLSLD) {
+ if (IsSmallCodeModel) {
+ if (!IsGOTPCRel) {
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
+ 0x00, 0xe8, 0x00, 0x00, 0x00, 0x00 // call __tls_get_addr@plt
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement code for the small code model
+ static const std::initializer_list<uint8_t> SmallSequence = {
+ 0x66, 0x66, 0x66, // three data16 prefixes (no-op)
+ 0x64, 0x48, 0x8b, 0x04, 0x25,
+ 0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
+ } else {
+ // This code sequence is not described in the TLS spec but gcc
+ // generates it sometimes.
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00,
+ 0x00, 0x00, 0x00, // leaq <disp32>(%rip), %rdi
+ 0xff, 0x15, 0x00, 0x00,
+ 0x00, 0x00 // call
+ // *__tls_get_addr@gotpcrel(%rip)
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement is code is just like above but it needs to be
+ // one byte longer.
+ static const std::initializer_list<uint8_t> SmallSequence = {
+ 0x0f, 0x1f, 0x40, 0x00, // 4 byte nop
+ 0x64, 0x48, 0x8b, 0x04, 0x25,
+ 0x00, 0x00, 0x00, 0x00 // mov %fs:0, %rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(SmallSequence);
+ }
+ } else {
+ // This is the same sequence as for the TLSGD sequence with the large
+ // memory model above
+ static const std::initializer_list<uint8_t> CodeSequence = {
+ 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, // lea <disp32>(%rip),
+ // %rdi
+ 0x48, 0xb8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x48, // movabs $__tls_get_addr@pltoff, %rax
+ 0x01, 0xd8, // add %rbx, %rax
+ 0xff, 0xd0 // call *%rax
+ };
+ ExpectedCodeSequence = ArrayRef<uint8_t>(CodeSequence);
+ TLSSequenceOffset = 3;
+
+ // The replacement code for the large code model
+ static const std::initializer_list<uint8_t> LargeSequence = {
+ 0x66, 0x66, 0x66, // three data16 prefixes (no-op)
+ 0x66, 0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00,
+ 0x00, // 10 byte nop
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00 // mov %fs:0,%rax
+ };
+ NewCodeSequence = ArrayRef<uint8_t>(LargeSequence);
+ }
+ } else {
+ llvm_unreachable("both TLS relocations handled above");
+ }
+
+ assert(ExpectedCodeSequence.size() == NewCodeSequence.size() &&
+ "Old and new code sequences must have the same size");
+
+ auto &Section = Sections[SectionID];
+ if (Offset < TLSSequenceOffset ||
+ (Offset - TLSSequenceOffset + NewCodeSequence.size()) >
+ Section.getSize()) {
+ report_fatal_error("unexpected end of section in TLS sequence");
+ }
+
+ auto *TLSSequence = Section.getAddressWithOffset(Offset - TLSSequenceOffset);
+ if (ArrayRef<uint8_t>(TLSSequence, ExpectedCodeSequence.size()) !=
+ ExpectedCodeSequence) {
+ report_fatal_error(
+ "invalid TLS sequence for Global/Local Dynamic TLS Model");
+ }
+
+ memcpy(TLSSequence, NewCodeSequence.data(), NewCodeSequence.size());
+}
+
+size_t RuntimeDyldELF::getGOTEntrySize() {
+ // We don't use the GOT in all of these cases, but it's essentially free
+ // to put them all here.
+ size_t Result = 0;
+ switch (Arch) {
+ case Triple::x86_64:
+ case Triple::aarch64:
+ case Triple::aarch64_be:
+ case Triple::ppc64:
+ case Triple::ppc64le:
+ case Triple::systemz:
+ Result = sizeof(uint64_t);
+ break;
+ case Triple::x86:
+ case Triple::arm:
+ case Triple::thumb:
+ Result = sizeof(uint32_t);
+ break;
+ case Triple::mips:
+ case Triple::mipsel:
+ case Triple::mips64:
+ case Triple::mips64el:
+ if (IsMipsO32ABI || IsMipsN32ABI)
+ Result = sizeof(uint32_t);
+ else if (IsMipsN64ABI)
+ Result = sizeof(uint64_t);
+ else
+ llvm_unreachable("Mips ABI not handled");
+ break;
+ default:
+ llvm_unreachable("Unsupported CPU type!");
+ }
+ return Result;
+}
+
+uint64_t RuntimeDyldELF::allocateGOTEntries(unsigned no) {
+ if (GOTSectionID == 0) {
+ GOTSectionID = Sections.size();
+ // Reserve a section id. We'll allocate the section later
+ // once we know the total size
+ Sections.push_back(SectionEntry(".got", nullptr, 0, 0, 0));
+ }
+ uint64_t StartOffset = CurrentGOTIndex * getGOTEntrySize();
+ CurrentGOTIndex += no;
+ return StartOffset;
+}
+
+uint64_t RuntimeDyldELF::findOrAllocGOTEntry(const RelocationValueRef &Value,
+ unsigned GOTRelType) {
+ auto E = GOTOffsetMap.insert({Value, 0});
+ if (E.second) {
+ uint64_t GOTOffset = allocateGOTEntries(1);
+
+ // Create relocation for newly created GOT entry
+ RelocationEntry RE =
+ computeGOTOffsetRE(GOTOffset, Value.Offset, GOTRelType);
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ E.first->second = GOTOffset;
+ }
+
+ return E.first->second;
+}
+
+void RuntimeDyldELF::resolveGOTOffsetRelocation(unsigned SectionID,
+ uint64_t Offset,
+ uint64_t GOTOffset,
+ uint32_t Type) {
+ // Fill in the relative address of the GOT Entry into the stub
+ RelocationEntry GOTRE(SectionID, Offset, Type, GOTOffset);
+ addRelocationForSection(GOTRE, GOTSectionID);
+}
+
+RelocationEntry RuntimeDyldELF::computeGOTOffsetRE(uint64_t GOTOffset,
+ uint64_t SymbolOffset,
+ uint32_t Type) {
+ return RelocationEntry(GOTSectionID, GOTOffset, Type, SymbolOffset);
+}
+
+Error RuntimeDyldELF::finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) {
+ if (IsMipsO32ABI)
+ if (!PendingRelocs.empty())
+ return make_error<RuntimeDyldError>("Can't find matching LO16 reloc");
+
+ // If necessary, allocate the global offset table
+ if (GOTSectionID != 0) {
+ // Allocate memory for the section
+ size_t TotalSize = CurrentGOTIndex * getGOTEntrySize();
+ uint8_t *Addr = MemMgr.allocateDataSection(TotalSize, getGOTEntrySize(),
+ GOTSectionID, ".got", false);
+ if (!Addr)
+ return make_error<RuntimeDyldError>("Unable to allocate memory for GOT!");
+
+ Sections[GOTSectionID] =
+ SectionEntry(".got", Addr, TotalSize, TotalSize, 0);
+
+ // For now, initialize all GOT entries to zero. We'll fill them in as
+ // needed when GOT-based relocations are applied.
+ memset(Addr, 0, TotalSize);
+ if (IsMipsN32ABI || IsMipsN64ABI) {
+ // To correctly resolve Mips GOT relocations, we need a mapping from
+ // object's sections to GOTs.
+ for (section_iterator SI = Obj.section_begin(), SE = Obj.section_end();
+ SI != SE; ++SI) {
+ if (SI->relocation_begin() != SI->relocation_end()) {
+ Expected<section_iterator> RelSecOrErr = SI->getRelocatedSection();
+ if (!RelSecOrErr)
+ return make_error<RuntimeDyldError>(
+ toString(RelSecOrErr.takeError()));
+
+ section_iterator RelocatedSection = *RelSecOrErr;
+ ObjSectionToIDMap::iterator i = SectionMap.find(*RelocatedSection);
+ assert (i != SectionMap.end());
+ SectionToGOTMap[i->second] = GOTSectionID;
+ }
+ }
+ GOTSymbolOffsets.clear();
+ }
+ }
+
+ // Look for and record the EH frame section.
+ ObjSectionToIDMap::iterator i, e;
+ for (i = SectionMap.begin(), e = SectionMap.end(); i != e; ++i) {
+ const SectionRef &Section = i->first;
+
+ StringRef Name;
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (NameOrErr)
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == ".eh_frame") {
+ UnregisteredEHFrameSections.push_back(i->second);
+ break;
+ }
+ }
+
+ GOTSectionID = 0;
+ CurrentGOTIndex = 0;
+
+ return Error::success();
+}
+
+bool RuntimeDyldELF::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isELF();
+}
+
+bool RuntimeDyldELF::relocationNeedsGot(const RelocationRef &R) const {
+ unsigned RelTy = R.getType();
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be)
+ return RelTy == ELF::R_AARCH64_ADR_GOT_PAGE ||
+ RelTy == ELF::R_AARCH64_LD64_GOT_LO12_NC;
+
+ if (Arch == Triple::x86_64)
+ return RelTy == ELF::R_X86_64_GOTPCREL ||
+ RelTy == ELF::R_X86_64_GOTPCRELX ||
+ RelTy == ELF::R_X86_64_GOT64 ||
+ RelTy == ELF::R_X86_64_REX_GOTPCRELX;
+ return false;
+}
+
+bool RuntimeDyldELF::relocationNeedsStub(const RelocationRef &R) const {
+ if (Arch != Triple::x86_64)
+ return true; // Conservative answer
+
+ switch (R.getType()) {
+ default:
+ return true; // Conservative answer
+
+
+ case ELF::R_X86_64_GOTPCREL:
+ case ELF::R_X86_64_GOTPCRELX:
+ case ELF::R_X86_64_REX_GOTPCRELX:
+ case ELF::R_X86_64_GOTPC64:
+ case ELF::R_X86_64_GOT64:
+ case ELF::R_X86_64_GOTOFF64:
+ case ELF::R_X86_64_PC32:
+ case ELF::R_X86_64_PC64:
+ case ELF::R_X86_64_64:
+ // We know that these reloation types won't need a stub function. This list
+ // can be extended as needed.
+ return false;
+ }
+}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
new file mode 100644
index 0000000000..1251036f4c
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h
@@ -0,0 +1,202 @@
+//===-- RuntimeDyldELF.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ELF support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDELF_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDELF_H
+
+#include "RuntimeDyldImpl.h"
+#include "llvm/ADT/DenseMap.h"
+
+using namespace llvm;
+
+namespace llvm {
+namespace object {
+class ELFObjectFileBase;
+}
+
+class RuntimeDyldELF : public RuntimeDyldImpl {
+
+ void resolveRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset = 0, SID SectionID = 0);
+
+ void resolveX86_64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset);
+
+ void resolveX86Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolveAArch64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ bool resolveAArch64ShortBranch(unsigned SectionID, relocation_iterator RelI,
+ const RelocationValueRef &Value);
+
+ void resolveAArch64Branch(unsigned SectionID, const RelocationValueRef &Value,
+ relocation_iterator RelI, StubMap &Stubs);
+
+ void resolveARMRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+
+ void resolvePPC32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolvePPC64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveSystemZRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ void resolveBPFRelocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend);
+
+ unsigned getMaxStubSize() const override {
+ if (Arch == Triple::aarch64 || Arch == Triple::aarch64_be)
+ return 20; // movz; movk; movk; movk; br
+ if (Arch == Triple::arm || Arch == Triple::thumb)
+ return 8; // 32-bit instruction and 32-bit address
+ else if (IsMipsO32ABI || IsMipsN32ABI)
+ return 16;
+ else if (IsMipsN64ABI)
+ return 32;
+ else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le)
+ return 44;
+ else if (Arch == Triple::x86_64)
+ return 6; // 2-byte jmp instruction + 32-bit relative address
+ else if (Arch == Triple::systemz)
+ return 16;
+ else
+ return 0;
+ }
+
+ unsigned getStubAlignment() override {
+ if (Arch == Triple::systemz)
+ return 8;
+ else
+ return 1;
+ }
+
+ void setMipsABI(const ObjectFile &Obj) override;
+
+ Error findPPC64TOCSection(const object::ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel);
+ Error findOPDEntrySection(const object::ELFObjectFileBase &Obj,
+ ObjSectionToIDMap &LocalSections,
+ RelocationValueRef &Rel);
+
+protected:
+ size_t getGOTEntrySize() override;
+
+private:
+ SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
+
+ // Allocate no GOT entries for use in the given section.
+ uint64_t allocateGOTEntries(unsigned no);
+
+ // Find GOT entry corresponding to relocation or create new one.
+ uint64_t findOrAllocGOTEntry(const RelocationValueRef &Value,
+ unsigned GOTRelType);
+
+ // Resolve the relvative address of GOTOffset in Section ID and place
+ // it at the given Offset
+ void resolveGOTOffsetRelocation(unsigned SectionID, uint64_t Offset,
+ uint64_t GOTOffset, uint32_t Type);
+
+ // For a GOT entry referenced from SectionID, compute a relocation entry
+ // that will place the final resolved value in the GOT slot
+ RelocationEntry computeGOTOffsetRE(uint64_t GOTOffset, uint64_t SymbolOffset,
+ unsigned Type);
+
+ // Compute the address in memory where we can find the placeholder
+ void *computePlaceholderAddress(unsigned SectionID, uint64_t Offset) const;
+
+ // Split out common case for createing the RelocationEntry for when the relocation requires
+ // no particular advanced processing.
+ void processSimpleRelocation(unsigned SectionID, uint64_t Offset, unsigned RelType, RelocationValueRef Value);
+
+ // Return matching *LO16 relocation (Mips specific)
+ uint32_t getMatchingLoRelocation(uint32_t RelType,
+ bool IsLocal = false) const;
+
+ // The tentative ID for the GOT section
+ unsigned GOTSectionID;
+
+ // Records the current number of allocated slots in the GOT
+ // (This would be equivalent to GOTEntries.size() were it not for relocations
+ // that consume more than one slot)
+ unsigned CurrentGOTIndex;
+
+protected:
+ // A map from section to a GOT section that has entries for section's GOT
+ // relocations. (Mips64 specific)
+ DenseMap<SID, SID> SectionToGOTMap;
+
+private:
+ // A map to avoid duplicate got entries (Mips64 specific)
+ StringMap<uint64_t> GOTSymbolOffsets;
+
+ // *HI16 relocations will be added for resolving when we find matching
+ // *LO16 part. (Mips specific)
+ SmallVector<std::pair<RelocationValueRef, RelocationEntry>, 8> PendingRelocs;
+
+ // When a module is loaded we save the SectionID of the EH frame section
+ // in a table until we receive a request to register all unregistered
+ // EH frame sections with the memory manager.
+ SmallVector<SID, 2> UnregisteredEHFrameSections;
+
+ // Map between GOT relocation value and corresponding GOT offset
+ std::map<RelocationValueRef, uint64_t> GOTOffsetMap;
+
+ bool relocationNeedsGot(const RelocationRef &R) const override;
+ bool relocationNeedsStub(const RelocationRef &R) const override;
+
+ // Process a GOTTPOFF TLS relocation for x86-64
+ // NOLINTNEXTLINE(readability-identifier-naming)
+ void processX86_64GOTTPOFFRelocation(unsigned SectionID, uint64_t Offset,
+ RelocationValueRef Value,
+ int64_t Addend);
+ // Process a TLSLD/TLSGD relocation for x86-64
+ // NOLINTNEXTLINE(readability-identifier-naming)
+ void processX86_64TLSRelocation(unsigned SectionID, uint64_t Offset,
+ uint64_t RelType, RelocationValueRef Value,
+ int64_t Addend,
+ const RelocationRef &GetAddrRelocation);
+
+public:
+ RuntimeDyldELF(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+ ~RuntimeDyldELF() override;
+
+ static std::unique_ptr<RuntimeDyldELF>
+ create(Triple::ArchType Arch, RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &O) override;
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override;
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override;
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+ void registerEHFrames() override;
+ Error finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
new file mode 100644
index 0000000000..a5bc181f8a
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h
@@ -0,0 +1,584 @@
+//===-- RuntimeDyldImpl.h - Run-time dynamic linker for MC-JIT --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Interface for the implementations of runtime dynamic linker facilities.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDIMPL_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDIMPL_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/RTDyldMemoryManager.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/ExecutionEngine/RuntimeDyldChecker.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/SwapByteOrder.h"
+#include <deque>
+#include <map>
+#include <system_error>
+#include <unordered_map>
+
+using namespace llvm;
+using namespace llvm::object;
+
+namespace llvm {
+
+#define UNIMPLEMENTED_RELOC(RelType) \
+ case RelType: \
+ return make_error<RuntimeDyldError>("Unimplemented relocation: " #RelType)
+
+/// SectionEntry - represents a section emitted into memory by the dynamic
+/// linker.
+class SectionEntry {
+ /// Name - section name.
+ std::string Name;
+
+ /// Address - address in the linker's memory where the section resides.
+ uint8_t *Address;
+
+ /// Size - section size. Doesn't include the stubs.
+ size_t Size;
+
+ /// LoadAddress - the address of the section in the target process's memory.
+ /// Used for situations in which JIT-ed code is being executed in the address
+ /// space of a separate process. If the code executes in the same address
+ /// space where it was JIT-ed, this just equals Address.
+ uint64_t LoadAddress;
+
+ /// StubOffset - used for architectures with stub functions for far
+ /// relocations (like ARM).
+ uintptr_t StubOffset;
+
+ /// The total amount of space allocated for this section. This includes the
+ /// section size and the maximum amount of space that the stubs can occupy.
+ size_t AllocationSize;
+
+ /// ObjAddress - address of the section in the in-memory object file. Used
+ /// for calculating relocations in some object formats (like MachO).
+ uintptr_t ObjAddress;
+
+public:
+ SectionEntry(StringRef name, uint8_t *address, size_t size,
+ size_t allocationSize, uintptr_t objAddress)
+ : Name(std::string(name)), Address(address), Size(size),
+ LoadAddress(reinterpret_cast<uintptr_t>(address)), StubOffset(size),
+ AllocationSize(allocationSize), ObjAddress(objAddress) {
+ // AllocationSize is used only in asserts, prevent an "unused private field"
+ // warning:
+ (void)AllocationSize;
+ }
+
+ StringRef getName() const { return Name; }
+
+ uint8_t *getAddress() const { return Address; }
+
+ /// Return the address of this section with an offset.
+ uint8_t *getAddressWithOffset(unsigned OffsetBytes) const {
+ assert(OffsetBytes <= AllocationSize && "Offset out of bounds!");
+ return Address + OffsetBytes;
+ }
+
+ size_t getSize() const { return Size; }
+
+ uint64_t getLoadAddress() const { return LoadAddress; }
+ void setLoadAddress(uint64_t LA) { LoadAddress = LA; }
+
+ /// Return the load address of this section with an offset.
+ uint64_t getLoadAddressWithOffset(unsigned OffsetBytes) const {
+ assert(OffsetBytes <= AllocationSize && "Offset out of bounds!");
+ return LoadAddress + OffsetBytes;
+ }
+
+ uintptr_t getStubOffset() const { return StubOffset; }
+
+ void advanceStubOffset(unsigned StubSize) {
+ StubOffset += StubSize;
+ assert(StubOffset <= AllocationSize && "Not enough space allocated!");
+ }
+
+ uintptr_t getObjAddress() const { return ObjAddress; }
+};
+
+/// RelocationEntry - used to represent relocations internally in the dynamic
+/// linker.
+class RelocationEntry {
+public:
+ /// SectionID - the section this relocation points to.
+ unsigned SectionID;
+
+ /// Offset - offset into the section.
+ uint64_t Offset;
+
+ /// RelType - relocation type.
+ uint32_t RelType;
+
+ /// Addend - the relocation addend encoded in the instruction itself. Also
+ /// used to make a relocation section relative instead of symbol relative.
+ int64_t Addend;
+
+ struct SectionPair {
+ uint32_t SectionA;
+ uint32_t SectionB;
+ };
+
+ /// SymOffset - Section offset of the relocation entry's symbol (used for GOT
+ /// lookup).
+ union {
+ uint64_t SymOffset;
+ SectionPair Sections;
+ };
+
+ /// True if this is a PCRel relocation (MachO specific).
+ bool IsPCRel;
+
+ /// The size of this relocation (MachO specific).
+ unsigned Size;
+
+ // ARM (MachO and COFF) specific.
+ bool IsTargetThumbFunc = false;
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend)
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(0), IsPCRel(false), Size(0), IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ uint64_t symoffset)
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(symoffset), IsPCRel(false), Size(0),
+ IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ bool IsPCRel, unsigned Size)
+ : SectionID(id), Offset(offset), RelType(type), Addend(addend),
+ SymOffset(0), IsPCRel(IsPCRel), Size(Size), IsTargetThumbFunc(false) {}
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ unsigned SectionA, uint64_t SectionAOffset, unsigned SectionB,
+ uint64_t SectionBOffset, bool IsPCRel, unsigned Size)
+ : SectionID(id), Offset(offset), RelType(type),
+ Addend(SectionAOffset - SectionBOffset + addend), IsPCRel(IsPCRel),
+ Size(Size), IsTargetThumbFunc(false) {
+ Sections.SectionA = SectionA;
+ Sections.SectionB = SectionB;
+ }
+
+ RelocationEntry(unsigned id, uint64_t offset, uint32_t type, int64_t addend,
+ unsigned SectionA, uint64_t SectionAOffset, unsigned SectionB,
+ uint64_t SectionBOffset, bool IsPCRel, unsigned Size,
+ bool IsTargetThumbFunc)
+ : SectionID(id), Offset(offset), RelType(type),
+ Addend(SectionAOffset - SectionBOffset + addend), IsPCRel(IsPCRel),
+ Size(Size), IsTargetThumbFunc(IsTargetThumbFunc) {
+ Sections.SectionA = SectionA;
+ Sections.SectionB = SectionB;
+ }
+};
+
+class RelocationValueRef {
+public:
+ unsigned SectionID = 0;
+ uint64_t Offset = 0;
+ int64_t Addend = 0;
+ const char *SymbolName = nullptr;
+ bool IsStubThumb = false;
+
+ inline bool operator==(const RelocationValueRef &Other) const {
+ return SectionID == Other.SectionID && Offset == Other.Offset &&
+ Addend == Other.Addend && SymbolName == Other.SymbolName &&
+ IsStubThumb == Other.IsStubThumb;
+ }
+ inline bool operator<(const RelocationValueRef &Other) const {
+ if (SectionID != Other.SectionID)
+ return SectionID < Other.SectionID;
+ if (Offset != Other.Offset)
+ return Offset < Other.Offset;
+ if (Addend != Other.Addend)
+ return Addend < Other.Addend;
+ if (IsStubThumb != Other.IsStubThumb)
+ return IsStubThumb < Other.IsStubThumb;
+ return SymbolName < Other.SymbolName;
+ }
+};
+
+/// Symbol info for RuntimeDyld.
+class SymbolTableEntry {
+public:
+ SymbolTableEntry() = default;
+
+ SymbolTableEntry(unsigned SectionID, uint64_t Offset, JITSymbolFlags Flags)
+ : Offset(Offset), SectionID(SectionID), Flags(Flags) {}
+
+ unsigned getSectionID() const { return SectionID; }
+ uint64_t getOffset() const { return Offset; }
+ void setOffset(uint64_t NewOffset) { Offset = NewOffset; }
+
+ JITSymbolFlags getFlags() const { return Flags; }
+
+private:
+ uint64_t Offset = 0;
+ unsigned SectionID = 0;
+ JITSymbolFlags Flags = JITSymbolFlags::None;
+};
+
+typedef StringMap<SymbolTableEntry> RTDyldSymbolTable;
+
+class RuntimeDyldImpl {
+ friend class RuntimeDyld::LoadedObjectInfo;
+protected:
+ static const unsigned AbsoluteSymbolSection = ~0U;
+
+ // The MemoryManager to load objects into.
+ RuntimeDyld::MemoryManager &MemMgr;
+
+ // The symbol resolver to use for external symbols.
+ JITSymbolResolver &Resolver;
+
+ // A list of all sections emitted by the dynamic linker. These sections are
+ // referenced in the code by means of their index in this list - SectionID.
+ // Because references may be kept while the list grows, use a container that
+ // guarantees reference stability.
+ typedef std::deque<SectionEntry> SectionList;
+ SectionList Sections;
+
+ typedef unsigned SID; // Type for SectionIDs
+#define RTDYLD_INVALID_SECTION_ID ((RuntimeDyldImpl::SID)(-1))
+
+ // Keep a map of sections from object file to the SectionID which
+ // references it.
+ typedef std::map<SectionRef, unsigned> ObjSectionToIDMap;
+
+ // A global symbol table for symbols from all loaded modules.
+ RTDyldSymbolTable GlobalSymbolTable;
+
+ // Keep a map of common symbols to their info pairs
+ typedef std::vector<SymbolRef> CommonSymbolList;
+
+ // For each symbol, keep a list of relocations based on it. Anytime
+ // its address is reassigned (the JIT re-compiled the function, e.g.),
+ // the relocations get re-resolved.
+ // The symbol (or section) the relocation is sourced from is the Key
+ // in the relocation list where it's stored.
+ typedef SmallVector<RelocationEntry, 64> RelocationList;
+ // Relocations to sections already loaded. Indexed by SectionID which is the
+ // source of the address. The target where the address will be written is
+ // SectionID/Offset in the relocation itself.
+ std::unordered_map<unsigned, RelocationList> Relocations;
+
+ // Relocations to external symbols that are not yet resolved. Symbols are
+ // external when they aren't found in the global symbol table of all loaded
+ // modules. This map is indexed by symbol name.
+ StringMap<RelocationList> ExternalSymbolRelocations;
+
+
+ typedef std::map<RelocationValueRef, uintptr_t> StubMap;
+
+ Triple::ArchType Arch;
+ bool IsTargetLittleEndian;
+ bool IsMipsO32ABI;
+ bool IsMipsN32ABI;
+ bool IsMipsN64ABI;
+
+ // True if all sections should be passed to the memory manager, false if only
+ // sections containing relocations should be. Defaults to 'false'.
+ bool ProcessAllSections;
+
+ // This mutex prevents simultaneously loading objects from two different
+ // threads. This keeps us from having to protect individual data structures
+ // and guarantees that section allocation requests to the memory manager
+ // won't be interleaved between modules. It is also used in mapSectionAddress
+ // and resolveRelocations to protect write access to internal data structures.
+ //
+ // loadObject may be called on the same thread during the handling of of
+ // processRelocations, and that's OK. The handling of the relocation lists
+ // is written in such a way as to work correctly if new elements are added to
+ // the end of the list while the list is being processed.
+ sys::Mutex lock;
+
+ using NotifyStubEmittedFunction =
+ RuntimeDyld::NotifyStubEmittedFunction;
+ NotifyStubEmittedFunction NotifyStubEmitted;
+
+ virtual unsigned getMaxStubSize() const = 0;
+ virtual unsigned getStubAlignment() = 0;
+
+ bool HasError;
+ std::string ErrorStr;
+
+ void writeInt16BE(uint8_t *Addr, uint16_t Value) {
+ llvm::support::endian::write<uint16_t, llvm::support::unaligned>(
+ Addr, Value, IsTargetLittleEndian ? support::little : support::big);
+ }
+
+ void writeInt32BE(uint8_t *Addr, uint32_t Value) {
+ llvm::support::endian::write<uint32_t, llvm::support::unaligned>(
+ Addr, Value, IsTargetLittleEndian ? support::little : support::big);
+ }
+
+ void writeInt64BE(uint8_t *Addr, uint64_t Value) {
+ llvm::support::endian::write<uint64_t, llvm::support::unaligned>(
+ Addr, Value, IsTargetLittleEndian ? support::little : support::big);
+ }
+
+ virtual void setMipsABI(const ObjectFile &Obj) {
+ IsMipsO32ABI = false;
+ IsMipsN32ABI = false;
+ IsMipsN64ABI = false;
+ }
+
+ /// Endian-aware read Read the least significant Size bytes from Src.
+ uint64_t readBytesUnaligned(uint8_t *Src, unsigned Size) const;
+
+ /// Endian-aware write. Write the least significant Size bytes from Value to
+ /// Dst.
+ void writeBytesUnaligned(uint64_t Value, uint8_t *Dst, unsigned Size) const;
+
+ /// Generate JITSymbolFlags from a libObject symbol.
+ virtual Expected<JITSymbolFlags> getJITSymbolFlags(const SymbolRef &Sym);
+
+ /// Modify the given target address based on the given symbol flags.
+ /// This can be used by subclasses to tweak addresses based on symbol flags,
+ /// For example: the MachO/ARM target uses it to set the low bit if the target
+ /// is a thumb symbol.
+ virtual uint64_t modifyAddressBasedOnFlags(uint64_t Addr,
+ JITSymbolFlags Flags) const {
+ return Addr;
+ }
+
+ /// Given the common symbols discovered in the object file, emit a
+ /// new section for them and update the symbol mappings in the object and
+ /// symbol table.
+ Error emitCommonSymbols(const ObjectFile &Obj,
+ CommonSymbolList &CommonSymbols, uint64_t CommonSize,
+ uint32_t CommonAlign);
+
+ /// Emits section data from the object file to the MemoryManager.
+ /// \param IsCode if it's true then allocateCodeSection() will be
+ /// used for emits, else allocateDataSection() will be used.
+ /// \return SectionID.
+ Expected<unsigned> emitSection(const ObjectFile &Obj,
+ const SectionRef &Section,
+ bool IsCode);
+
+ /// Find Section in LocalSections. If the secton is not found - emit
+ /// it and store in LocalSections.
+ /// \param IsCode if it's true then allocateCodeSection() will be
+ /// used for emmits, else allocateDataSection() will be used.
+ /// \return SectionID.
+ Expected<unsigned> findOrEmitSection(const ObjectFile &Obj,
+ const SectionRef &Section, bool IsCode,
+ ObjSectionToIDMap &LocalSections);
+
+ // Add a relocation entry that uses the given section.
+ void addRelocationForSection(const RelocationEntry &RE, unsigned SectionID);
+
+ // Add a relocation entry that uses the given symbol. This symbol may
+ // be found in the global symbol table, or it may be external.
+ void addRelocationForSymbol(const RelocationEntry &RE, StringRef SymbolName);
+
+ /// Emits long jump instruction to Addr.
+ /// \return Pointer to the memory area for emitting target address.
+ uint8_t *createStubFunction(uint8_t *Addr, unsigned AbiVariant = 0);
+
+ /// Resolves relocations from Relocs list with address from Value.
+ void resolveRelocationList(const RelocationList &Relocs, uint64_t Value);
+
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ virtual void resolveRelocation(const RelocationEntry &RE, uint64_t Value) = 0;
+
+ /// Parses one or more object file relocations (some object files use
+ /// relocation pairs) and stores it to Relocations or SymbolRelocations
+ /// (this depends on the object file type).
+ /// \return Iterator to the next relocation that needs to be parsed.
+ virtual Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &Obj, ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) = 0;
+
+ void applyExternalSymbolRelocations(
+ const StringMap<JITEvaluatedSymbol> ExternalSymbolMap);
+
+ /// Resolve relocations to external symbols.
+ Error resolveExternalSymbols();
+
+ // Compute an upper bound of the memory that is required to load all
+ // sections
+ Error computeTotalAllocSize(const ObjectFile &Obj,
+ uint64_t &CodeSize, uint32_t &CodeAlign,
+ uint64_t &RODataSize, uint32_t &RODataAlign,
+ uint64_t &RWDataSize, uint32_t &RWDataAlign);
+
+ // Compute GOT size
+ unsigned computeGOTSize(const ObjectFile &Obj);
+
+ // Compute the stub buffer size required for a section
+ unsigned computeSectionStubBufSize(const ObjectFile &Obj,
+ const SectionRef &Section);
+
+ // Implementation of the generic part of the loadObject algorithm.
+ Expected<ObjSectionToIDMap> loadObjectImpl(const object::ObjectFile &Obj);
+
+ // Return size of Global Offset Table (GOT) entry
+ virtual size_t getGOTEntrySize() { return 0; }
+
+ // Return true if the relocation R may require allocating a GOT entry.
+ virtual bool relocationNeedsGot(const RelocationRef &R) const {
+ return false;
+ }
+
+ // Return true if the relocation R may require allocating a stub.
+ virtual bool relocationNeedsStub(const RelocationRef &R) const {
+ return true; // Conservative answer
+ }
+
+public:
+ RuntimeDyldImpl(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : MemMgr(MemMgr), Resolver(Resolver),
+ ProcessAllSections(false), HasError(false) {
+ }
+
+ virtual ~RuntimeDyldImpl();
+
+ void setProcessAllSections(bool ProcessAllSections) {
+ this->ProcessAllSections = ProcessAllSections;
+ }
+
+ virtual std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &Obj) = 0;
+
+ uint64_t getSectionLoadAddress(unsigned SectionID) const {
+ if (SectionID == AbsoluteSymbolSection)
+ return 0;
+ else
+ return Sections[SectionID].getLoadAddress();
+ }
+
+ uint8_t *getSectionAddress(unsigned SectionID) const {
+ if (SectionID == AbsoluteSymbolSection)
+ return nullptr;
+ else
+ return Sections[SectionID].getAddress();
+ }
+
+ StringRef getSectionContent(unsigned SectionID) const {
+ if (SectionID == AbsoluteSymbolSection)
+ return {};
+ else
+ return StringRef(
+ reinterpret_cast<char *>(Sections[SectionID].getAddress()),
+ Sections[SectionID].getStubOffset() + getMaxStubSize());
+ }
+
+ uint8_t* getSymbolLocalAddress(StringRef Name) const {
+ // FIXME: Just look up as a function for now. Overly simple of course.
+ // Work in progress.
+ RTDyldSymbolTable::const_iterator pos = GlobalSymbolTable.find(Name);
+ if (pos == GlobalSymbolTable.end())
+ return nullptr;
+ const auto &SymInfo = pos->second;
+ // Absolute symbols do not have a local address.
+ if (SymInfo.getSectionID() == AbsoluteSymbolSection)
+ return nullptr;
+ return getSectionAddress(SymInfo.getSectionID()) + SymInfo.getOffset();
+ }
+
+ unsigned getSymbolSectionID(StringRef Name) const {
+ auto GSTItr = GlobalSymbolTable.find(Name);
+ if (GSTItr == GlobalSymbolTable.end())
+ return ~0U;
+ return GSTItr->second.getSectionID();
+ }
+
+ JITEvaluatedSymbol getSymbol(StringRef Name) const {
+ // FIXME: Just look up as a function for now. Overly simple of course.
+ // Work in progress.
+ RTDyldSymbolTable::const_iterator pos = GlobalSymbolTable.find(Name);
+ if (pos == GlobalSymbolTable.end())
+ return nullptr;
+ const auto &SymEntry = pos->second;
+ uint64_t SectionAddr = 0;
+ if (SymEntry.getSectionID() != AbsoluteSymbolSection)
+ SectionAddr = getSectionLoadAddress(SymEntry.getSectionID());
+ uint64_t TargetAddr = SectionAddr + SymEntry.getOffset();
+
+ // FIXME: Have getSymbol should return the actual address and the client
+ // modify it based on the flags. This will require clients to be
+ // aware of the target architecture, which we should build
+ // infrastructure for.
+ TargetAddr = modifyAddressBasedOnFlags(TargetAddr, SymEntry.getFlags());
+ return JITEvaluatedSymbol(TargetAddr, SymEntry.getFlags());
+ }
+
+ std::map<StringRef, JITEvaluatedSymbol> getSymbolTable() const {
+ std::map<StringRef, JITEvaluatedSymbol> Result;
+
+ for (auto &KV : GlobalSymbolTable) {
+ auto SectionID = KV.second.getSectionID();
+ uint64_t SectionAddr = getSectionLoadAddress(SectionID);
+ Result[KV.first()] =
+ JITEvaluatedSymbol(SectionAddr + KV.second.getOffset(), KV.second.getFlags());
+ }
+
+ return Result;
+ }
+
+ void resolveRelocations();
+
+ void resolveLocalRelocations();
+
+ static void finalizeAsync(
+ std::unique_ptr<RuntimeDyldImpl> This,
+ unique_function<void(object::OwningBinary<object::ObjectFile>,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>,
+ Error)>
+ OnEmitted,
+ object::OwningBinary<object::ObjectFile> O,
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo> Info);
+
+ void reassignSectionAddress(unsigned SectionID, uint64_t Addr);
+
+ void mapSectionAddress(const void *LocalAddress, uint64_t TargetAddress);
+
+ // Is the linker in an error state?
+ bool hasError() { return HasError; }
+
+ // Mark the error condition as handled and continue.
+ void clearError() { HasError = false; }
+
+ // Get the error message.
+ StringRef getErrorString() { return ErrorStr; }
+
+ virtual bool isCompatibleFile(const ObjectFile &Obj) const = 0;
+
+ void setNotifyStubEmitted(NotifyStubEmittedFunction NotifyStubEmitted) {
+ this->NotifyStubEmitted = std::move(NotifyStubEmitted);
+ }
+
+ virtual void registerEHFrames();
+
+ void deregisterEHFrames();
+
+ virtual Error finalizeLoad(const ObjectFile &ObjImg,
+ ObjSectionToIDMap &SectionMap) {
+ return Error::success();
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
new file mode 100644
index 0000000000..9ca76602ea
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.cpp
@@ -0,0 +1,382 @@
+//===-- RuntimeDyldMachO.cpp - Run-time dynamic linker for MC-JIT -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldMachO.h"
+#include "Targets/RuntimeDyldMachOAArch64.h"
+#include "Targets/RuntimeDyldMachOARM.h"
+#include "Targets/RuntimeDyldMachOI386.h"
+#include "Targets/RuntimeDyldMachOX86_64.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+#define DEBUG_TYPE "dyld"
+
+namespace {
+
+class LoadedMachOObjectInfo final
+ : public LoadedObjectInfoHelper<LoadedMachOObjectInfo,
+ RuntimeDyld::LoadedObjectInfo> {
+public:
+ LoadedMachOObjectInfo(RuntimeDyldImpl &RTDyld,
+ ObjSectionToIDMap ObjSecToIDMap)
+ : LoadedObjectInfoHelper(RTDyld, std::move(ObjSecToIDMap)) {}
+
+ OwningBinary<ObjectFile>
+ getObjectForDebug(const ObjectFile &Obj) const override {
+ return OwningBinary<ObjectFile>();
+ }
+};
+
+}
+
+namespace llvm {
+
+int64_t RuntimeDyldMachO::memcpyAddend(const RelocationEntry &RE) const {
+ unsigned NumBytes = 1 << RE.Size;
+ uint8_t *Src = Sections[RE.SectionID].getAddress() + RE.Offset;
+
+ return static_cast<int64_t>(readBytesUnaligned(Src, NumBytes));
+}
+
+Expected<relocation_iterator>
+RuntimeDyldMachO::processScatteredVANILLA(
+ unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID,
+ bool TargetIsLocalThumbFunc) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = Obj.getAnyRelocationType(RE);
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RE);
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend = readBytesUnaligned(LocalAddress, NumBytes);
+
+ unsigned SymbolBaseAddr = Obj.getScatteredRelocationValue(RE);
+ section_iterator TargetSI = getSectionByAddress(Obj, SymbolBaseAddr);
+ assert(TargetSI != Obj.section_end() && "Can't find section for symbol");
+ uint64_t SectionBaseAddr = TargetSI->getAddress();
+ SectionRef TargetSection = *TargetSI;
+ bool IsCode = TargetSection.isText();
+ uint32_t TargetSectionID = ~0U;
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, TargetSection, IsCode, ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+
+ Addend -= SectionBaseAddr;
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, IsPCRel, Size);
+ R.IsTargetThumbFunc = TargetIsLocalThumbFunc;
+
+ addRelocationForSection(R, TargetSectionID);
+
+ return ++RelI;
+}
+
+
+Expected<RelocationValueRef>
+RuntimeDyldMachO::getRelocationValueRef(
+ const ObjectFile &BaseTObj, const relocation_iterator &RI,
+ const RelocationEntry &RE, ObjSectionToIDMap &ObjSectionToID) {
+
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseTObj);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+ RelocationValueRef Value;
+
+ bool IsExternal = Obj.getPlainRelocationExternal(RelInfo);
+ if (IsExternal) {
+ symbol_iterator Symbol = RI->getSymbol();
+ StringRef TargetName;
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+ RTDyldSymbolTable::const_iterator SI =
+ GlobalSymbolTable.find(TargetName.data());
+ if (SI != GlobalSymbolTable.end()) {
+ const auto &SymInfo = SI->second;
+ Value.SectionID = SymInfo.getSectionID();
+ Value.Offset = SymInfo.getOffset() + RE.Addend;
+ } else {
+ Value.SymbolName = TargetName.data();
+ Value.Offset = RE.Addend;
+ }
+ } else {
+ SectionRef Sec = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = Sec.isText();
+ if (auto SectionIDOrErr = findOrEmitSection(Obj, Sec, IsCode,
+ ObjSectionToID))
+ Value.SectionID = *SectionIDOrErr;
+ else
+ return SectionIDOrErr.takeError();
+ uint64_t Addr = Sec.getAddress();
+ Value.Offset = RE.Addend - Addr;
+ }
+
+ return Value;
+}
+
+void RuntimeDyldMachO::makeValueAddendPCRel(RelocationValueRef &Value,
+ const relocation_iterator &RI,
+ unsigned OffsetToNextPC) {
+ auto &O = *cast<MachOObjectFile>(RI->getObject());
+ section_iterator SecI = O.getRelocationRelocatedSection(RI);
+ Value.Offset += RI->getOffset() + OffsetToNextPC + SecI->getAddress();
+}
+
+void RuntimeDyldMachO::dumpRelocationToResolve(const RelocationEntry &RE,
+ uint64_t Value) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddress() + RE.Offset;
+ uint64_t FinalAddress = Section.getLoadAddress() + RE.Offset;
+
+ dbgs() << "resolveRelocation Section: " << RE.SectionID
+ << " LocalAddress: " << format("%p", LocalAddress)
+ << " FinalAddress: " << format("0x%016" PRIx64, FinalAddress)
+ << " Value: " << format("0x%016" PRIx64, Value) << " Addend: " << RE.Addend
+ << " isPCRel: " << RE.IsPCRel << " MachoType: " << RE.RelType
+ << " Size: " << (1 << RE.Size) << "\n";
+}
+
+section_iterator
+RuntimeDyldMachO::getSectionByAddress(const MachOObjectFile &Obj,
+ uint64_t Addr) {
+ section_iterator SI = Obj.section_begin();
+ section_iterator SE = Obj.section_end();
+
+ for (; SI != SE; ++SI) {
+ uint64_t SAddr = SI->getAddress();
+ uint64_t SSize = SI->getSize();
+ if ((Addr >= SAddr) && (Addr < SAddr + SSize))
+ return SI;
+ }
+
+ return SE;
+}
+
+
+// Populate __pointers section.
+Error RuntimeDyldMachO::populateIndirectSymbolPointersSection(
+ const MachOObjectFile &Obj,
+ const SectionRef &PTSection,
+ unsigned PTSectionID) {
+ assert(!Obj.is64Bit() &&
+ "Pointer table section not supported in 64-bit MachO.");
+
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(PTSection.getRawDataRefImpl());
+ uint32_t PTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ const unsigned PTEntrySize = 4;
+ unsigned NumPTEntries = PTSectionSize / PTEntrySize;
+ unsigned PTEntryOffset = 0;
+
+ assert((PTSectionSize % PTEntrySize) == 0 &&
+ "Pointers section does not contain a whole number of stubs?");
+
+ LLVM_DEBUG(dbgs() << "Populating pointer table section "
+ << Sections[PTSectionID].getName() << ", Section ID "
+ << PTSectionID << ", " << NumPTEntries << " entries, "
+ << PTEntrySize << " bytes each:\n");
+
+ for (unsigned i = 0; i < NumPTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ StringRef IndirectSymbolName;
+ if (auto IndirectSymbolNameOrErr = SI->getName())
+ IndirectSymbolName = *IndirectSymbolNameOrErr;
+ else
+ return IndirectSymbolNameOrErr.takeError();
+ LLVM_DEBUG(dbgs() << " " << IndirectSymbolName << ": index " << SymbolIndex
+ << ", PT offset: " << PTEntryOffset << "\n");
+ RelocationEntry RE(PTSectionID, PTEntryOffset,
+ MachO::GENERIC_RELOC_VANILLA, 0, false, 2);
+ addRelocationForSymbol(RE, IndirectSymbolName);
+ PTEntryOffset += PTEntrySize;
+ }
+ return Error::success();
+}
+
+bool RuntimeDyldMachO::isCompatibleFile(const object::ObjectFile &Obj) const {
+ return Obj.isMachO();
+}
+
+template <typename Impl>
+Error
+RuntimeDyldMachOCRTPBase<Impl>::finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) {
+ unsigned EHFrameSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned TextSID = RTDYLD_INVALID_SECTION_ID;
+ unsigned ExceptTabSID = RTDYLD_INVALID_SECTION_ID;
+
+ for (const auto &Section : Obj.sections()) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ // Force emission of the __text, __eh_frame, and __gcc_except_tab sections
+ // if they're present. Otherwise call down to the impl to handle other
+ // sections that have already been emitted.
+ if (Name == "__text") {
+ if (auto TextSIDOrErr = findOrEmitSection(Obj, Section, true, SectionMap))
+ TextSID = *TextSIDOrErr;
+ else
+ return TextSIDOrErr.takeError();
+ } else if (Name == "__eh_frame") {
+ if (auto EHFrameSIDOrErr = findOrEmitSection(Obj, Section, false,
+ SectionMap))
+ EHFrameSID = *EHFrameSIDOrErr;
+ else
+ return EHFrameSIDOrErr.takeError();
+ } else if (Name == "__gcc_except_tab") {
+ if (auto ExceptTabSIDOrErr = findOrEmitSection(Obj, Section, true,
+ SectionMap))
+ ExceptTabSID = *ExceptTabSIDOrErr;
+ else
+ return ExceptTabSIDOrErr.takeError();
+ } else {
+ auto I = SectionMap.find(Section);
+ if (I != SectionMap.end())
+ if (auto Err = impl().finalizeSection(Obj, I->second, Section))
+ return Err;
+ }
+ }
+ UnregisteredEHFrameSections.push_back(
+ EHFrameRelatedSections(EHFrameSID, TextSID, ExceptTabSID));
+
+ return Error::success();
+}
+
+template <typename Impl>
+unsigned char *RuntimeDyldMachOCRTPBase<Impl>::processFDE(uint8_t *P,
+ int64_t DeltaForText,
+ int64_t DeltaForEH) {
+ typedef typename Impl::TargetPtrT TargetPtrT;
+
+ LLVM_DEBUG(dbgs() << "Processing FDE: Delta for text: " << DeltaForText
+ << ", Delta for EH: " << DeltaForEH << "\n");
+ uint32_t Length = readBytesUnaligned(P, 4);
+ P += 4;
+ uint8_t *Ret = P + Length;
+ uint32_t Offset = readBytesUnaligned(P, 4);
+ if (Offset == 0) // is a CIE
+ return Ret;
+
+ P += 4;
+ TargetPtrT FDELocation = readBytesUnaligned(P, sizeof(TargetPtrT));
+ TargetPtrT NewLocation = FDELocation - DeltaForText;
+ writeBytesUnaligned(NewLocation, P, sizeof(TargetPtrT));
+
+ P += sizeof(TargetPtrT);
+
+ // Skip the FDE address range
+ P += sizeof(TargetPtrT);
+
+ uint8_t Augmentationsize = *P;
+ P += 1;
+ if (Augmentationsize != 0) {
+ TargetPtrT LSDA = readBytesUnaligned(P, sizeof(TargetPtrT));
+ TargetPtrT NewLSDA = LSDA - DeltaForEH;
+ writeBytesUnaligned(NewLSDA, P, sizeof(TargetPtrT));
+ }
+
+ return Ret;
+}
+
+static int64_t computeDelta(SectionEntry *A, SectionEntry *B) {
+ int64_t ObjDistance = static_cast<int64_t>(A->getObjAddress()) -
+ static_cast<int64_t>(B->getObjAddress());
+ int64_t MemDistance = A->getLoadAddress() - B->getLoadAddress();
+ return ObjDistance - MemDistance;
+}
+
+template <typename Impl>
+void RuntimeDyldMachOCRTPBase<Impl>::registerEHFrames() {
+
+ for (int i = 0, e = UnregisteredEHFrameSections.size(); i != e; ++i) {
+ EHFrameRelatedSections &SectionInfo = UnregisteredEHFrameSections[i];
+ if (SectionInfo.EHFrameSID == RTDYLD_INVALID_SECTION_ID ||
+ SectionInfo.TextSID == RTDYLD_INVALID_SECTION_ID)
+ continue;
+ SectionEntry *Text = &Sections[SectionInfo.TextSID];
+ SectionEntry *EHFrame = &Sections[SectionInfo.EHFrameSID];
+ SectionEntry *ExceptTab = nullptr;
+ if (SectionInfo.ExceptTabSID != RTDYLD_INVALID_SECTION_ID)
+ ExceptTab = &Sections[SectionInfo.ExceptTabSID];
+
+ int64_t DeltaForText = computeDelta(Text, EHFrame);
+ int64_t DeltaForEH = 0;
+ if (ExceptTab)
+ DeltaForEH = computeDelta(ExceptTab, EHFrame);
+
+ uint8_t *P = EHFrame->getAddress();
+ uint8_t *End = P + EHFrame->getSize();
+ while (P != End) {
+ P = processFDE(P, DeltaForText, DeltaForEH);
+ }
+
+ MemMgr.registerEHFrames(EHFrame->getAddress(), EHFrame->getLoadAddress(),
+ EHFrame->getSize());
+ }
+ UnregisteredEHFrameSections.clear();
+}
+
+std::unique_ptr<RuntimeDyldMachO>
+RuntimeDyldMachO::create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver) {
+ switch (Arch) {
+ default:
+ llvm_unreachable("Unsupported target for RuntimeDyldMachO.");
+ break;
+ case Triple::arm:
+ return std::make_unique<RuntimeDyldMachOARM>(MemMgr, Resolver);
+ case Triple::aarch64:
+ return std::make_unique<RuntimeDyldMachOAArch64>(MemMgr, Resolver);
+ case Triple::aarch64_32:
+ return std::make_unique<RuntimeDyldMachOAArch64>(MemMgr, Resolver);
+ case Triple::x86:
+ return std::make_unique<RuntimeDyldMachOI386>(MemMgr, Resolver);
+ case Triple::x86_64:
+ return std::make_unique<RuntimeDyldMachOX86_64>(MemMgr, Resolver);
+ }
+}
+
+std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+RuntimeDyldMachO::loadObject(const object::ObjectFile &O) {
+ if (auto ObjSectionToIDOrErr = loadObjectImpl(O))
+ return std::make_unique<LoadedMachOObjectInfo>(*this,
+ *ObjSectionToIDOrErr);
+ else {
+ HasError = true;
+ raw_string_ostream ErrStream(ErrorStr);
+ logAllUnhandledErrors(ObjSectionToIDOrErr.takeError(), ErrStream);
+ return nullptr;
+ }
+}
+
+} // end namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
new file mode 100644
index 0000000000..650e7b79fb
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldMachO.h
@@ -0,0 +1,167 @@
+//===-- RuntimeDyldMachO.h - Run-time dynamic linker for MC-JIT ---*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MachO support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDMACHO_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_RUNTIMEDYLDMACHO_H
+
+#include "RuntimeDyldImpl.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/Support/Format.h"
+
+#define DEBUG_TYPE "dyld"
+
+using namespace llvm;
+using namespace llvm::object;
+
+namespace llvm {
+class RuntimeDyldMachO : public RuntimeDyldImpl {
+protected:
+ struct SectionOffsetPair {
+ unsigned SectionID;
+ uint64_t Offset;
+ };
+
+ struct EHFrameRelatedSections {
+ EHFrameRelatedSections()
+ : EHFrameSID(RTDYLD_INVALID_SECTION_ID),
+ TextSID(RTDYLD_INVALID_SECTION_ID),
+ ExceptTabSID(RTDYLD_INVALID_SECTION_ID) {}
+
+ EHFrameRelatedSections(SID EH, SID T, SID Ex)
+ : EHFrameSID(EH), TextSID(T), ExceptTabSID(Ex) {}
+ SID EHFrameSID;
+ SID TextSID;
+ SID ExceptTabSID;
+ };
+
+ // When a module is loaded we save the SectionID of the EH frame section
+ // in a table until we receive a request to register all unregistered
+ // EH frame sections with the memory manager.
+ SmallVector<EHFrameRelatedSections, 2> UnregisteredEHFrameSections;
+
+ RuntimeDyldMachO(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldImpl(MemMgr, Resolver) {}
+
+ /// This convenience method uses memcpy to extract a contiguous addend (the
+ /// addend size and offset are taken from the corresponding fields of the RE).
+ int64_t memcpyAddend(const RelocationEntry &RE) const;
+
+ /// Given a relocation_iterator for a non-scattered relocation, construct a
+ /// RelocationEntry and fill in the common fields. The 'Addend' field is *not*
+ /// filled in, since immediate encodings are highly target/opcode specific.
+ /// For targets/opcodes with simple, contiguous immediates (e.g. X86) the
+ /// memcpyAddend method can be used to read the immediate.
+ RelocationEntry getRelocationEntry(unsigned SectionID,
+ const ObjectFile &BaseTObj,
+ const relocation_iterator &RI) const {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseTObj);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RI->getRawDataRefImpl());
+
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RelInfo);
+ unsigned Size = Obj.getAnyRelocationLength(RelInfo);
+ uint64_t Offset = RI->getOffset();
+ MachO::RelocationInfoType RelType =
+ static_cast<MachO::RelocationInfoType>(Obj.getAnyRelocationType(RelInfo));
+
+ return RelocationEntry(SectionID, Offset, RelType, 0, IsPCRel, Size);
+ }
+
+ /// Process a scattered vanilla relocation.
+ Expected<relocation_iterator>
+ processScatteredVANILLA(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ RuntimeDyldMachO::ObjSectionToIDMap &ObjSectionToID,
+ bool TargetIsLocalThumbFunc = false);
+
+ /// Construct a RelocationValueRef representing the relocation target.
+ /// For Symbols in known sections, this will return a RelocationValueRef
+ /// representing a (SectionID, Offset) pair.
+ /// For Symbols whose section is not known, this will return a
+ /// (SymbolName, Offset) pair, where the Offset is taken from the instruction
+ /// immediate (held in RE.Addend).
+ /// In both cases the Addend field is *NOT* fixed up to be PC-relative. That
+ /// should be done by the caller where appropriate by calling makePCRel on
+ /// the RelocationValueRef.
+ Expected<RelocationValueRef>
+ getRelocationValueRef(const ObjectFile &BaseTObj,
+ const relocation_iterator &RI,
+ const RelocationEntry &RE,
+ ObjSectionToIDMap &ObjSectionToID);
+
+ /// Make the RelocationValueRef addend PC-relative.
+ void makeValueAddendPCRel(RelocationValueRef &Value,
+ const relocation_iterator &RI,
+ unsigned OffsetToNextPC);
+
+ /// Dump information about the relocation entry (RE) and resolved value.
+ void dumpRelocationToResolve(const RelocationEntry &RE, uint64_t Value) const;
+
+ // Return a section iterator for the section containing the given address.
+ static section_iterator getSectionByAddress(const MachOObjectFile &Obj,
+ uint64_t Addr);
+
+
+ // Populate __pointers section.
+ Error populateIndirectSymbolPointersSection(const MachOObjectFile &Obj,
+ const SectionRef &PTSection,
+ unsigned PTSectionID);
+
+public:
+
+ /// Create a RuntimeDyldMachO instance for the given target architecture.
+ static std::unique_ptr<RuntimeDyldMachO>
+ create(Triple::ArchType Arch,
+ RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver);
+
+ std::unique_ptr<RuntimeDyld::LoadedObjectInfo>
+ loadObject(const object::ObjectFile &O) override;
+
+ SectionEntry &getSection(unsigned SectionID) { return Sections[SectionID]; }
+
+ bool isCompatibleFile(const object::ObjectFile &Obj) const override;
+};
+
+/// RuntimeDyldMachOTarget - Templated base class for generic MachO linker
+/// algorithms and data structures.
+///
+/// Concrete, target specific sub-classes can be accessed via the impl()
+/// methods. (i.e. the RuntimeDyldMachO hierarchy uses the Curiously
+/// Recurring Template Idiom). Concrete subclasses for each target
+/// can be found in ./Targets.
+template <typename Impl>
+class RuntimeDyldMachOCRTPBase : public RuntimeDyldMachO {
+private:
+ Impl &impl() { return static_cast<Impl &>(*this); }
+ const Impl &impl() const { return static_cast<const Impl &>(*this); }
+
+ unsigned char *processFDE(uint8_t *P, int64_t DeltaForText,
+ int64_t DeltaForEH);
+
+public:
+ RuntimeDyldMachOCRTPBase(RuntimeDyld::MemoryManager &MemMgr,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachO(MemMgr, Resolver) {}
+
+ Error finalizeLoad(const ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override;
+ void registerEHFrames() override;
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h
new file mode 100644
index 0000000000..14510e56b3
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h
@@ -0,0 +1,376 @@
+//===-- RuntimeDyldCOFFAArch64.h --- COFF/AArch64 specific code ---*- C++
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF AArch64 support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFAARCH64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFAARCH64_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Support/Endian.h"
+
+#define DEBUG_TYPE "dyld"
+
+using namespace llvm::support::endian;
+
+namespace llvm {
+
+// This relocation type is used for handling long branch instruction
+// throught the Stub.
+enum InternalRelocationType : unsigned {
+ INTERNAL_REL_ARM64_LONG_BRANCH26 = 0x111,
+};
+
+static void add16(uint8_t *p, int16_t v) { write16le(p, read16le(p) + v); }
+static void or32le(void *P, int32_t V) { write32le(P, read32le(P) | V); }
+
+static void write32AArch64Imm(uint8_t *T, uint64_t imm, uint32_t rangeLimit) {
+ uint32_t orig = read32le(T);
+ orig &= ~(0xFFF << 10);
+ write32le(T, orig | ((imm & (0xFFF >> rangeLimit)) << 10));
+}
+
+static void write32AArch64Ldr(uint8_t *T, uint64_t imm) {
+ uint32_t orig = read32le(T);
+ uint32_t size = orig >> 30;
+ // 0x04000000 indicates SIMD/FP registers
+ // 0x00800000 indicates 128 bit
+ if ((orig & 0x04800000) == 0x04800000)
+ size += 4;
+ if ((imm & ((1 << size) - 1)) != 0)
+ assert(0 && "misaligned ldr/str offset");
+ write32AArch64Imm(T, imm >> size, size);
+}
+
+static void write32AArch64Addr(void *T, uint64_t s, uint64_t p, int shift) {
+ uint64_t Imm = (s >> shift) - (p >> shift);
+ uint32_t ImmLo = (Imm & 0x3) << 29;
+ uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
+ uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
+ write32le(T, (read32le(T) & ~Mask) | ImmLo | ImmHi);
+}
+
+class RuntimeDyldCOFFAArch64 : public RuntimeDyldCOFF {
+
+private:
+ // When a module is loaded we save the SectionID of the unwind
+ // sections in a table until we receive a request to register all
+ // unregisteredEH frame sections with the memory manager.
+ SmallVector<SID, 2> UnregisteredEHFrameSections;
+ SmallVector<SID, 2> RegisteredEHFrameSections;
+ uint64_t ImageBase;
+
+ // Fake an __ImageBase pointer by returning the section with the lowest adress
+ uint64_t getImageBase() {
+ if (!ImageBase) {
+ ImageBase = std::numeric_limits<uint64_t>::max();
+ for (const SectionEntry &Section : Sections)
+ // The Sections list may contain sections that weren't loaded for
+ // whatever reason: they may be debug sections, and ProcessAllSections
+ // is false, or they may be sections that contain 0 bytes. If the
+ // section isn't loaded, the load address will be 0, and it should not
+ // be included in the ImageBase calculation.
+ if (Section.getLoadAddress() != 0)
+ ImageBase = std::min(ImageBase, Section.getLoadAddress());
+ }
+ return ImageBase;
+ }
+
+public:
+ RuntimeDyldCOFFAArch64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 8, COFF::IMAGE_REL_ARM64_ADDR64),
+ ImageBase(0) {}
+
+ unsigned getStubAlignment() override { return 8; }
+
+ unsigned getMaxStubSize() const override { return 20; }
+
+ std::tuple<uint64_t, uint64_t, uint64_t>
+ generateRelocationStub(unsigned SectionID, StringRef TargetName,
+ uint64_t Offset, uint64_t RelType, uint64_t Addend,
+ StubMap &Stubs) {
+ uintptr_t StubOffset;
+ SectionEntry &Section = Sections[SectionID];
+
+ RelocationValueRef OriginalRelValueRef;
+ OriginalRelValueRef.SectionID = SectionID;
+ OriginalRelValueRef.Offset = Offset;
+ OriginalRelValueRef.Addend = Addend;
+ OriginalRelValueRef.SymbolName = TargetName.data();
+
+ auto Stub = Stubs.find(OriginalRelValueRef);
+ if (Stub == Stubs.end()) {
+ LLVM_DEBUG(dbgs() << " Create a new stub function for "
+ << TargetName.data() << "\n");
+
+ StubOffset = Section.getStubOffset();
+ Stubs[OriginalRelValueRef] = StubOffset;
+ createStubFunction(Section.getAddressWithOffset(StubOffset));
+ Section.advanceStubOffset(getMaxStubSize());
+ } else {
+ LLVM_DEBUG(dbgs() << " Stub function found for " << TargetName.data()
+ << "\n");
+ StubOffset = Stub->second;
+ }
+
+ // Resolve original relocation to stub function.
+ const RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ resolveRelocation(RE, Section.getLoadAddressWithOffset(StubOffset));
+
+ // adjust relocation info so resolution writes to the stub function
+ // Here an internal relocation type is used for resolving long branch via
+ // stub instruction.
+ Addend = 0;
+ Offset = StubOffset;
+ RelType = INTERNAL_REL_ARM64_LONG_BRANCH26;
+
+ return std::make_tuple(Offset, RelType, Addend);
+ }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID, object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+
+ auto Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
+ auto Section = *SectionOrErr;
+
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+
+ // If there is no section, this must be an external reference.
+ bool IsExtern = Section == Obj.section_end();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t Addend = 0;
+ SectionEntry &AddendSection = Sections[SectionID];
+ uintptr_t ObjTarget = AddendSection.getObjAddress() + Offset;
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+
+ unsigned TargetSectionID = -1;
+ uint64_t TargetOffset = -1;
+
+ if (TargetName.startswith(getImportSymbolPrefix())) {
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr = findOrEmitSection(
+ Obj, *Section, Section->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_ARM64_ADDR32:
+ case COFF::IMAGE_REL_ARM64_ADDR32NB:
+ case COFF::IMAGE_REL_ARM64_REL32:
+ case COFF::IMAGE_REL_ARM64_SECREL:
+ Addend = read32le(Displacement);
+ break;
+ case COFF::IMAGE_REL_ARM64_BRANCH26: {
+ uint32_t orig = read32le(Displacement);
+ Addend = (orig & 0x03FFFFFF) << 2;
+
+ if (IsExtern)
+ std::tie(Offset, RelType, Addend) = generateRelocationStub(
+ SectionID, TargetName, Offset, RelType, Addend, Stubs);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH19: {
+ uint32_t orig = read32le(Displacement);
+ Addend = (orig & 0x00FFFFE0) >> 3;
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH14: {
+ uint32_t orig = read32le(Displacement);
+ Addend = (orig & 0x000FFFE0) >> 3;
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_REL21:
+ case COFF::IMAGE_REL_ARM64_PAGEBASE_REL21: {
+ uint32_t orig = read32le(Displacement);
+ Addend = ((orig >> 29) & 0x3) | ((orig >> 3) & 0x1FFFFC);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12L:
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12A: {
+ uint32_t orig = read32le(Displacement);
+ Addend = ((orig >> 10) & 0xFFF);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR64: {
+ Addend = read64le(Displacement);
+ break;
+ }
+ default:
+ break;
+ }
+
+#if !defined(NDEBUG)
+ SmallString<32> RelTypeName;
+ RelI->getTypeName(RelTypeName);
+
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+#endif
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+ RelocationEntry RE(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ }
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const auto Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("unsupported relocation type");
+ case COFF::IMAGE_REL_ARM64_ABSOLUTE: {
+ // This relocation is ignored.
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEBASE_REL21: {
+ // The page base of the target, for ADRP instruction.
+ Value += RE.Addend;
+ write32AArch64Addr(Target, Value, FinalAddress, 12);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_REL21: {
+ // The 12-bit relative displacement to the target, for instruction ADR
+ Value += RE.Addend;
+ write32AArch64Addr(Target, Value, FinalAddress, 0);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12A: {
+ // The 12-bit page offset of the target,
+ // for instructions ADD/ADDS (immediate) with zero shift.
+ Value += RE.Addend;
+ write32AArch64Imm(Target, Value & 0xFFF, 0);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_PAGEOFFSET_12L: {
+ // The 12-bit page offset of the target,
+ // for instruction LDR (indexed, unsigned immediate).
+ Value += RE.Addend;
+ write32AArch64Ldr(Target, Value & 0xFFF);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR32: {
+ // The 32-bit VA of the target.
+ uint32_t VA = Value + RE.Addend;
+ write32le(Target, VA);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR32NB: {
+ // The target's 32-bit RVA.
+ uint64_t RVA = Value + RE.Addend - getImageBase();
+ write32le(Target, RVA);
+ break;
+ }
+ case INTERNAL_REL_ARM64_LONG_BRANCH26: {
+ // Encode the immadiate value for generated Stub instruction (MOVZ)
+ or32le(Target + 12, ((Value + RE.Addend) & 0xFFFF) << 5);
+ or32le(Target + 8, ((Value + RE.Addend) & 0xFFFF0000) >> 11);
+ or32le(Target + 4, ((Value + RE.Addend) & 0xFFFF00000000) >> 27);
+ or32le(Target + 0, ((Value + RE.Addend) & 0xFFFF000000000000) >> 43);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH26: {
+ // The 26-bit relative displacement to the target, for B and BL
+ // instructions.
+ uint64_t PCRelVal = Value + RE.Addend - FinalAddress;
+ assert(isInt<28>(PCRelVal) && "Branch target is out of range.");
+ write32le(Target, (read32le(Target) & ~(0x03FFFFFF)) |
+ (PCRelVal & 0x0FFFFFFC) >> 2);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH19: {
+ // The 19-bit offset to the relocation target,
+ // for conditional B instruction.
+ uint64_t PCRelVal = Value + RE.Addend - FinalAddress;
+ assert(isInt<21>(PCRelVal) && "Branch target is out of range.");
+ write32le(Target, (read32le(Target) & ~(0x00FFFFE0)) |
+ (PCRelVal & 0x001FFFFC) << 3);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_BRANCH14: {
+ // The 14-bit offset to the relocation target,
+ // for instructions TBZ and TBNZ.
+ uint64_t PCRelVal = Value + RE.Addend - FinalAddress;
+ assert(isInt<16>(PCRelVal) && "Branch target is out of range.");
+ write32le(Target, (read32le(Target) & ~(0x000FFFE0)) |
+ (PCRelVal & 0x0000FFFC) << 3);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_ADDR64: {
+ // The 64-bit VA of the relocation target.
+ write64le(Target, Value + RE.Addend);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_SECTION: {
+ // 16-bit section index of the section that contains the target.
+ assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
+ "relocation overflow");
+ add16(Target, RE.SectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_SECREL: {
+ // 32-bit offset of the target from the beginning of its section.
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "Relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "Relocation underflow");
+ write32le(Target, RE.Addend);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM64_REL32: {
+ // The 32-bit relative address from the byte following the relocation.
+ uint64_t Result = Value - FinalAddress - 4;
+ write32le(Target, Result + RE.Addend);
+ break;
+ }
+ }
+ }
+
+ void registerEHFrames() override {}
+};
+
+} // End namespace llvm
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h
new file mode 100644
index 0000000000..03c38260be
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h
@@ -0,0 +1,228 @@
+//===--- RuntimeDyldCOFFI386.h --- COFF/X86_64 specific code ---*- C++ --*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF x86 support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFI386_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFI386_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldCOFFI386 : public RuntimeDyldCOFF {
+public:
+ RuntimeDyldCOFFI386(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 4, COFF::IMAGE_REL_I386_DIR32) {}
+
+ unsigned getMaxStubSize() const override {
+ return 8; // 2-byte jmp instruction + 32-bit relative address + 2 byte pad
+ }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+
+ auto Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
+ auto Section = *SectionOrErr;
+ bool IsExtern = Section == Obj.section_end();
+
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+
+ unsigned TargetSectionID = -1;
+ uint64_t TargetOffset = -1;
+ if (TargetName.startswith(getImportSymbolPrefix())) {
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName, true);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr = findOrEmitSection(
+ Obj, *Section, Section->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+ if (RelType != COFF::IMAGE_REL_I386_SECTION)
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t Addend = 0;
+ SectionEntry &AddendSection = Sections[SectionID];
+ uintptr_t ObjTarget = AddendSection.getObjAddress() + Offset;
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_I386_DIR32:
+ case COFF::IMAGE_REL_I386_DIR32NB:
+ case COFF::IMAGE_REL_I386_SECREL:
+ case COFF::IMAGE_REL_I386_REL32: {
+ Addend = readBytesUnaligned(Displacement, 4);
+ break;
+ }
+ default:
+ break;
+ }
+
+#if !defined(NDEBUG)
+ SmallString<32> RelTypeName;
+ RelI->getTypeName(RelTypeName);
+#endif
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, 0, -1, 0, 0, 0, false, 0);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_I386_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_I386_DIR32:
+ case COFF::IMAGE_REL_I386_DIR32NB:
+ case COFF::IMAGE_REL_I386_REL32: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECTION: {
+ RelocationEntry RE =
+ RelocationEntry(TargetSectionID, Offset, RelType, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECREL: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const auto Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+ case COFF::IMAGE_REL_I386_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_I386_DIR32: {
+ // The target's 32-bit VA.
+ uint64_t Result =
+ RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddressWithOffset(
+ RE.Addend);
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_DIR32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_DIR32NB: {
+ // The target's 32-bit RVA.
+ // NOTE: use Section[0].getLoadAddress() as an approximation of ImageBase
+ uint64_t Result =
+ Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend) -
+ Sections[0].getLoadAddress();
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_DIR32NB"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_REL32: {
+ // 32-bit relative displacement to the target.
+ uint64_t Result = RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddress();
+ Result = Result - Section.getLoadAddress() + RE.Addend - 4 - RE.Offset;
+ assert(static_cast<int64_t>(Result) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(Result) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_REL32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_I386_SECTION:
+ // 16-bit section index of the section that contains the target.
+ assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_SECTION Value: "
+ << RE.SectionID << '\n');
+ writeBytesUnaligned(RE.SectionID, Target, 2);
+ break;
+ case COFF::IMAGE_REL_I386_SECREL:
+ // 32-bit offset of the target from the beginning of its section.
+ assert(static_cast<uint64_t>(RE.Addend) <= UINT32_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_I386_SECREL Value: "
+ << RE.Addend << '\n');
+ writeBytesUnaligned(RE.Addend, Target, 4);
+ break;
+ default:
+ llvm_unreachable("unsupported relocation type");
+ }
+ }
+
+ void registerEHFrames() override {}
+};
+
+}
+
+#endif
+
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h
new file mode 100644
index 0000000000..dd66ff7ecf
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h
@@ -0,0 +1,325 @@
+//===--- RuntimeDyldCOFFThumb.h --- COFF/Thumb specific code ---*- C++ --*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF thumb support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFTHUMB_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFFTHUMB_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+static bool isThumbFunc(object::symbol_iterator Symbol,
+ const object::ObjectFile &Obj,
+ object::section_iterator Section) {
+ Expected<object::SymbolRef::Type> SymTypeOrErr = Symbol->getType();
+ if (!SymTypeOrErr) {
+ std::string Buf;
+ raw_string_ostream OS(Buf);
+ logAllUnhandledErrors(SymTypeOrErr.takeError(), OS);
+ report_fatal_error(Twine(OS.str()));
+ }
+
+ if (*SymTypeOrErr != object::SymbolRef::ST_Function)
+ return false;
+
+ // We check the IMAGE_SCN_MEM_16BIT flag in the section of the symbol to tell
+ // if it's thumb or not
+ return cast<object::COFFObjectFile>(Obj)
+ .getCOFFSection(*Section)
+ ->Characteristics &
+ COFF::IMAGE_SCN_MEM_16BIT;
+}
+
+class RuntimeDyldCOFFThumb : public RuntimeDyldCOFF {
+public:
+ RuntimeDyldCOFFThumb(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 4, COFF::IMAGE_REL_ARM_ADDR32) {}
+
+ unsigned getMaxStubSize() const override {
+ return 16; // 8-byte load instructions, 4-byte jump, 4-byte padding
+ }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ auto Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+ StringRef TargetName = *TargetNameOrErr;
+
+ auto SectionOrErr = Symbol->getSection();
+ if (!SectionOrErr)
+ return SectionOrErr.takeError();
+ auto Section = *SectionOrErr;
+
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t Addend = 0;
+ SectionEntry &AddendSection = Sections[SectionID];
+ uintptr_t ObjTarget = AddendSection.getObjAddress() + Offset;
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+
+ switch (RelType) {
+ case COFF::IMAGE_REL_ARM_ADDR32:
+ case COFF::IMAGE_REL_ARM_ADDR32NB:
+ case COFF::IMAGE_REL_ARM_SECREL:
+ Addend = readBytesUnaligned(Displacement, 4);
+ break;
+ default:
+ break;
+ }
+
+#if !defined(NDEBUG)
+ SmallString<32> RelTypeName;
+ RelI->getTypeName(RelTypeName);
+#endif
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelTypeName << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ bool IsExtern = Section == Obj.section_end();
+ unsigned TargetSectionID = -1;
+ uint64_t TargetOffset = -1;
+
+ if (TargetName.startswith(getImportSymbolPrefix())) {
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName, true);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, *Section, Section->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+ if (RelType != COFF::IMAGE_REL_ARM_SECTION)
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, 0, -1, 0, 0, 0, false, 0);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+
+ // We need to find out if the relocation is relative to a thumb function
+ // so that we include the ISA selection bit when resolve the relocation
+ bool IsTargetThumbFunc = isThumbFunc(Symbol, Obj, Section);
+
+ switch (RelType) {
+ default: llvm_unreachable("unsupported relocation type");
+ case COFF::IMAGE_REL_ARM_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_ARM_ADDR32: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0, IsTargetThumbFunc);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_ADDR32NB: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECTION: {
+ RelocationEntry RE =
+ RelocationEntry(TargetSectionID, Offset, RelType, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECREL: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_MOV32T: {
+ RelocationEntry RE =
+ RelocationEntry(SectionID, Offset, RelType, Addend, TargetSectionID,
+ TargetOffset, 0, 0, false, 0, IsTargetThumbFunc);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH20T:
+ case COFF::IMAGE_REL_ARM_BRANCH24T:
+ case COFF::IMAGE_REL_ARM_BLX23T: {
+ RelocationEntry RE = RelocationEntry(SectionID, Offset, RelType,
+ TargetOffset + Addend, true, 0);
+ addRelocationForSection(RE, TargetSectionID);
+ break;
+ }
+ }
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const auto Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+ int ISASelectionBit = RE.IsTargetThumbFunc ? 1 : 0;
+
+ switch (RE.RelType) {
+ default: llvm_unreachable("unsupported relocation type");
+ case COFF::IMAGE_REL_ARM_ABSOLUTE:
+ // This relocation is ignored.
+ break;
+ case COFF::IMAGE_REL_ARM_ADDR32: {
+ // The target's 32-bit VA.
+ uint64_t Result =
+ RE.Sections.SectionA == static_cast<uint32_t>(-1)
+ ? Value
+ : Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend);
+ Result |= ISASelectionBit;
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_ADDR32"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_ADDR32NB: {
+ // The target's 32-bit RVA.
+ // NOTE: use Section[0].getLoadAddress() as an approximation of ImageBase
+ uint64_t Result = Sections[RE.Sections.SectionA].getLoadAddress() -
+ Sections[0].getLoadAddress() + RE.Addend;
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_ADDR32NB"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+ Result |= ISASelectionBit;
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_SECTION:
+ // 16-bit section index of the section that contains the target.
+ assert(static_cast<uint32_t>(RE.SectionID) <= UINT16_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_SECTION Value: "
+ << RE.SectionID << '\n');
+ writeBytesUnaligned(RE.SectionID, Target, 2);
+ break;
+ case COFF::IMAGE_REL_ARM_SECREL:
+ // 32-bit offset of the target from the beginning of its section.
+ assert(static_cast<uint64_t>(RE.Addend) <= UINT32_MAX &&
+ "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_SECREL Value: " << RE.Addend
+ << '\n');
+ writeBytesUnaligned(RE.Addend, Target, 2);
+ break;
+ case COFF::IMAGE_REL_ARM_MOV32T: {
+ // 32-bit VA of the target applied to a contiguous MOVW+MOVT pair.
+ uint64_t Result =
+ Sections[RE.Sections.SectionA].getLoadAddressWithOffset(RE.Addend);
+ assert(Result <= UINT32_MAX && "relocation overflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_MOV32T"
+ << " TargetSection: " << RE.Sections.SectionA
+ << " Value: " << format("0x%08" PRIx32, Result)
+ << '\n');
+
+ // MOVW(T3): |11110|i|10|0|1|0|0|imm4|0|imm3|Rd|imm8|
+ // imm32 = zext imm4:i:imm3:imm8
+ // MOVT(T1): |11110|i|10|1|1|0|0|imm4|0|imm3|Rd|imm8|
+ // imm16 = imm4:i:imm3:imm8
+
+ auto EncodeImmediate = [](uint8_t *Bytes, uint16_t Immediate) {
+ Bytes[0] |= ((Immediate & 0xf000) >> 12);
+ Bytes[1] |= ((Immediate & 0x0800) >> 11);
+ Bytes[2] |= ((Immediate & 0x00ff) >> 0);
+ Bytes[3] |= (((Immediate & 0x0700) >> 8) << 4);
+ };
+
+ EncodeImmediate(&Target[0],
+ (static_cast<uint32_t>(Result) >> 00) | ISASelectionBit);
+ EncodeImmediate(&Target[4], static_cast<uint32_t>(Result) >> 16);
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH20T: {
+ // The most significant 20-bits of the signed 21-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BRANCH20T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BRANCH24T: {
+ // The most significant 24-bits of the signed 25-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BRANCH24T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ case COFF::IMAGE_REL_ARM_BLX23T: {
+ // The most significant 24-bits of the signed 25-bit relative displacement
+ uint64_t Value =
+ RE.Addend - (Sections[RE.SectionID].getLoadAddress() + RE.Offset) - 4;
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX &&
+ "relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN &&
+ "relocation underflow");
+ LLVM_DEBUG(dbgs() << "\t\tOffset: " << RE.Offset
+ << " RelType: IMAGE_REL_ARM_BLX23T"
+ << " Value: " << static_cast<int32_t>(Value) << '\n');
+ static_cast<void>(Value);
+ llvm_unreachable("unimplemented relocation");
+ break;
+ }
+ }
+ }
+
+ void registerEHFrames() override {}
+};
+
+}
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h
new file mode 100644
index 0000000000..9df3e2e3c3
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h
@@ -0,0 +1,315 @@
+//===-- RuntimeDyldCOFFX86_64.h --- COFF/X86_64 specific code ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// COFF x86_x64 support for MC-JIT runtime dynamic linker.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFF86_64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDCOFF86_64_H
+
+#include "../RuntimeDyldCOFF.h"
+#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/Object/COFF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldCOFFX86_64 : public RuntimeDyldCOFF {
+
+private:
+ // When a module is loaded we save the SectionID of the unwind
+ // sections in a table until we receive a request to register all
+ // unregisteredEH frame sections with the memory manager.
+ SmallVector<SID, 2> UnregisteredEHFrameSections;
+ SmallVector<SID, 2> RegisteredEHFrameSections;
+ uint64_t ImageBase;
+
+ // Fake an __ImageBase pointer by returning the section with the lowest adress
+ uint64_t getImageBase() {
+ if (!ImageBase) {
+ ImageBase = std::numeric_limits<uint64_t>::max();
+ for (const SectionEntry &Section : Sections)
+ // The Sections list may contain sections that weren't loaded for
+ // whatever reason: they may be debug sections, and ProcessAllSections
+ // is false, or they may be sections that contain 0 bytes. If the
+ // section isn't loaded, the load address will be 0, and it should not
+ // be included in the ImageBase calculation.
+ if (Section.getLoadAddress() != 0)
+ ImageBase = std::min(ImageBase, Section.getLoadAddress());
+ }
+ return ImageBase;
+ }
+
+ void write32BitOffset(uint8_t *Target, int64_t Addend, uint64_t Delta) {
+ uint64_t Result = Addend + Delta;
+ assert(Result <= UINT32_MAX && "Relocation overflow");
+ writeBytesUnaligned(Result, Target, 4);
+ }
+
+public:
+ RuntimeDyldCOFFX86_64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldCOFF(MM, Resolver, 8, COFF::IMAGE_REL_AMD64_ADDR64),
+ ImageBase(0) {}
+
+ unsigned getStubAlignment() override { return 1; }
+
+ // 2-byte jmp instruction + 32-bit relative address + 64-bit absolute jump
+ unsigned getMaxStubSize() const override { return 14; }
+
+ // The target location for the relocation is described by RE.SectionID and
+ // RE.Offset. RE.SectionID can be used to find the SectionEntry. Each
+ // SectionEntry has three members describing its location.
+ // SectionEntry::Address is the address at which the section has been loaded
+ // into memory in the current (host) process. SectionEntry::LoadAddress is
+ // the address that the section will have in the target process.
+ // SectionEntry::ObjAddress is the address of the bits for this section in the
+ // original emitted object image (also in the current address space).
+ //
+ // Relocations will be applied as if the section were loaded at
+ // SectionEntry::LoadAddress, but they will be applied at an address based
+ // on SectionEntry::Address. SectionEntry::ObjAddress will be used to refer
+ // to Target memory contents if they are required for value calculations.
+ //
+ // The Value parameter here is the load address of the symbol for the
+ // relocation to be applied. For relocations which refer to symbols in the
+ // current object Value will be the LoadAddress of the section in which
+ // the symbol resides (RE.Addend provides additional information about the
+ // symbol location). For external symbols, Value will be the address of the
+ // symbol in the target address space.
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *Target = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+
+ case COFF::IMAGE_REL_AMD64_REL32:
+ case COFF::IMAGE_REL_AMD64_REL32_1:
+ case COFF::IMAGE_REL_AMD64_REL32_2:
+ case COFF::IMAGE_REL_AMD64_REL32_3:
+ case COFF::IMAGE_REL_AMD64_REL32_4:
+ case COFF::IMAGE_REL_AMD64_REL32_5: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ // Delta is the distance from the start of the reloc to the end of the
+ // instruction with the reloc.
+ uint64_t Delta = 4 + (RE.RelType - COFF::IMAGE_REL_AMD64_REL32);
+ Value -= FinalAddress + Delta;
+ uint64_t Result = Value + RE.Addend;
+ assert(((int64_t)Result <= INT32_MAX) && "Relocation overflow");
+ assert(((int64_t)Result >= INT32_MIN) && "Relocation underflow");
+ writeBytesUnaligned(Result, Target, 4);
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR32NB: {
+ // ADDR32NB requires an offset less than 2GB from 'ImageBase'.
+ // The MemoryManager can make sure this is always true by forcing the
+ // memory layout to be: CodeSection < ReadOnlySection < ReadWriteSection.
+ const uint64_t ImageBase = getImageBase();
+ if (Value < ImageBase || ((Value - ImageBase) > UINT32_MAX))
+ report_fatal_error("IMAGE_REL_AMD64_ADDR32NB relocation requires an "
+ "ordered section layout");
+ else {
+ write32BitOffset(Target, RE.Addend, Value - ImageBase);
+ }
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR64: {
+ writeBytesUnaligned(Value + RE.Addend, Target, 8);
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_SECREL: {
+ assert(static_cast<int64_t>(RE.Addend) <= INT32_MAX && "Relocation overflow");
+ assert(static_cast<int64_t>(RE.Addend) >= INT32_MIN && "Relocation underflow");
+ writeBytesUnaligned(RE.Addend, Target, 4);
+ break;
+ }
+
+ default:
+ llvm_unreachable("Relocation type not implemented yet!");
+ break;
+ }
+ }
+
+ std::tuple<uint64_t, uint64_t, uint64_t>
+ generateRelocationStub(unsigned SectionID, StringRef TargetName,
+ uint64_t Offset, uint64_t RelType, uint64_t Addend,
+ StubMap &Stubs) {
+ uintptr_t StubOffset;
+ SectionEntry &Section = Sections[SectionID];
+
+ RelocationValueRef OriginalRelValueRef;
+ OriginalRelValueRef.SectionID = SectionID;
+ OriginalRelValueRef.Offset = Offset;
+ OriginalRelValueRef.Addend = Addend;
+ OriginalRelValueRef.SymbolName = TargetName.data();
+
+ auto Stub = Stubs.find(OriginalRelValueRef);
+ if (Stub == Stubs.end()) {
+ LLVM_DEBUG(dbgs() << " Create a new stub function for "
+ << TargetName.data() << "\n");
+
+ StubOffset = Section.getStubOffset();
+ Stubs[OriginalRelValueRef] = StubOffset;
+ createStubFunction(Section.getAddressWithOffset(StubOffset));
+ Section.advanceStubOffset(getMaxStubSize());
+ } else {
+ LLVM_DEBUG(dbgs() << " Stub function found for " << TargetName.data()
+ << "\n");
+ StubOffset = Stub->second;
+ }
+
+ // FIXME: If RelType == COFF::IMAGE_REL_AMD64_ADDR32NB we should be able
+ // to ignore the __ImageBase requirement and just forward to the stub
+ // directly as an offset of this section:
+ // write32BitOffset(Section.getAddressWithOffset(Offset), 0, StubOffset);
+ // .xdata exception handler's aren't having this though.
+
+ // Resolve original relocation to stub function.
+ const RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ resolveRelocation(RE, Section.getLoadAddressWithOffset(StubOffset));
+
+ // adjust relocation info so resolution writes to the stub function
+ Addend = 0;
+ Offset = StubOffset + 6;
+ RelType = COFF::IMAGE_REL_AMD64_ADDR64;
+
+ return std::make_tuple(Offset, RelType, Addend);
+ }
+
+ Expected<object::relocation_iterator>
+ processRelocationRef(unsigned SectionID,
+ object::relocation_iterator RelI,
+ const object::ObjectFile &Obj,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ // If possible, find the symbol referred to in the relocation,
+ // and the section that contains it.
+ object::symbol_iterator Symbol = RelI->getSymbol();
+ if (Symbol == Obj.symbol_end())
+ report_fatal_error("Unknown symbol in relocation");
+ auto SectionOrError = Symbol->getSection();
+ if (!SectionOrError)
+ return SectionOrError.takeError();
+ object::section_iterator SecI = *SectionOrError;
+ // If there is no section, this must be an external reference.
+ bool IsExtern = SecI == Obj.section_end();
+
+ // Determine the Addend used to adjust the relocation value.
+ uint64_t RelType = RelI->getType();
+ uint64_t Offset = RelI->getOffset();
+ uint64_t Addend = 0;
+ SectionEntry &Section = Sections[SectionID];
+ uintptr_t ObjTarget = Section.getObjAddress() + Offset;
+
+ Expected<StringRef> TargetNameOrErr = Symbol->getName();
+ if (!TargetNameOrErr)
+ return TargetNameOrErr.takeError();
+
+ StringRef TargetName = *TargetNameOrErr;
+ unsigned TargetSectionID = 0;
+ uint64_t TargetOffset = 0;
+
+ if (TargetName.startswith(getImportSymbolPrefix())) {
+ assert(IsExtern && "DLLImport not marked extern?");
+ TargetSectionID = SectionID;
+ TargetOffset = getDLLImportOffset(SectionID, Stubs, TargetName);
+ TargetName = StringRef();
+ IsExtern = false;
+ } else if (!IsExtern) {
+ if (auto TargetSectionIDOrErr =
+ findOrEmitSection(Obj, *SecI, SecI->isText(), ObjSectionToID))
+ TargetSectionID = *TargetSectionIDOrErr;
+ else
+ return TargetSectionIDOrErr.takeError();
+ TargetOffset = getSymbolOffset(*Symbol);
+ }
+
+ switch (RelType) {
+
+ case COFF::IMAGE_REL_AMD64_REL32:
+ case COFF::IMAGE_REL_AMD64_REL32_1:
+ case COFF::IMAGE_REL_AMD64_REL32_2:
+ case COFF::IMAGE_REL_AMD64_REL32_3:
+ case COFF::IMAGE_REL_AMD64_REL32_4:
+ case COFF::IMAGE_REL_AMD64_REL32_5:
+ case COFF::IMAGE_REL_AMD64_ADDR32NB: {
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+ Addend = readBytesUnaligned(Displacement, 4);
+
+ if (IsExtern)
+ std::tie(Offset, RelType, Addend) = generateRelocationStub(
+ SectionID, TargetName, Offset, RelType, Addend, Stubs);
+
+ break;
+ }
+
+ case COFF::IMAGE_REL_AMD64_ADDR64: {
+ uint8_t *Displacement = (uint8_t *)ObjTarget;
+ Addend = readBytesUnaligned(Displacement, 8);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ LLVM_DEBUG(dbgs() << "\t\tIn Section " << SectionID << " Offset " << Offset
+ << " RelType: " << RelType << " TargetName: "
+ << TargetName << " Addend " << Addend << "\n");
+
+ if (IsExtern) {
+ RelocationEntry RE(SectionID, Offset, RelType, Addend);
+ addRelocationForSymbol(RE, TargetName);
+ } else {
+ RelocationEntry RE(SectionID, Offset, RelType, TargetOffset + Addend);
+ addRelocationForSection(RE, TargetSectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void registerEHFrames() override {
+ for (auto const &EHFrameSID : UnregisteredEHFrameSections) {
+ uint8_t *EHFrameAddr = Sections[EHFrameSID].getAddress();
+ uint64_t EHFrameLoadAddr = Sections[EHFrameSID].getLoadAddress();
+ size_t EHFrameSize = Sections[EHFrameSID].getSize();
+ MemMgr.registerEHFrames(EHFrameAddr, EHFrameLoadAddr, EHFrameSize);
+ RegisteredEHFrameSections.push_back(EHFrameSID);
+ }
+ UnregisteredEHFrameSections.clear();
+ }
+
+ Error finalizeLoad(const object::ObjectFile &Obj,
+ ObjSectionToIDMap &SectionMap) override {
+ // Look for and record the EH frame section IDs.
+ for (const auto &SectionPair : SectionMap) {
+ const object::SectionRef &Section = SectionPair.first;
+ Expected<StringRef> NameOrErr = Section.getName();
+ if (!NameOrErr)
+ return NameOrErr.takeError();
+
+ // Note unwind info is stored in .pdata but often points to .xdata
+ // with an IMAGE_REL_AMD64_ADDR32NB relocation. Using a memory manager
+ // that keeps sections ordered in relation to __ImageBase is necessary.
+ if ((*NameOrErr) == ".pdata")
+ UnregisteredEHFrameSections.push_back(SectionPair.second);
+ }
+ return Error::success();
+ }
+};
+
+} // end namespace llvm
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp
new file mode 100644
index 0000000000..17cbe612fb
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.cpp
@@ -0,0 +1,320 @@
+//===-- RuntimeDyldELFMips.cpp ---- ELF/Mips specific code. -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "RuntimeDyldELFMips.h"
+#include "llvm/BinaryFormat/ELF.h"
+
+#define DEBUG_TYPE "dyld"
+
+void RuntimeDyldELFMips::resolveRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ if (IsMipsO32ABI)
+ resolveMIPSO32Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend);
+ else if (IsMipsN32ABI) {
+ resolveMIPSN32Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+ } else if (IsMipsN64ABI)
+ resolveMIPSN64Relocation(Section, RE.Offset, Value, RE.RelType, RE.Addend,
+ RE.SymOffset, RE.SectionID);
+ else
+ llvm_unreachable("Mips ABI not handled");
+}
+
+uint64_t RuntimeDyldELFMips::evaluateRelocation(const RelocationEntry &RE,
+ uint64_t Value,
+ uint64_t Addend) {
+ if (IsMipsN32ABI) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ Value = evaluateMIPS64Relocation(Section, RE.Offset, Value, RE.RelType,
+ Addend, RE.SymOffset, RE.SectionID);
+ return Value;
+ }
+ llvm_unreachable("Not reachable");
+}
+
+void RuntimeDyldELFMips::applyRelocation(const RelocationEntry &RE,
+ uint64_t Value) {
+ if (IsMipsN32ABI) {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ applyMIPSRelocation(Section.getAddressWithOffset(RE.Offset), Value,
+ RE.RelType);
+ return;
+ }
+ llvm_unreachable("Not reachable");
+}
+
+int64_t
+RuntimeDyldELFMips::evaluateMIPS32Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type) {
+
+ LLVM_DEBUG(dbgs() << "evaluateMIPS32Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x"
+ << format("%llx", Section.getLoadAddressWithOffset(Offset))
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Unknown relocation type!");
+ return Value;
+ case ELF::R_MIPS_32:
+ return Value;
+ case ELF::R_MIPS_26:
+ return Value >> 2;
+ case ELF::R_MIPS_HI16:
+ // Get the higher 16-bits. Also add 1 if bit 15 is 1.
+ return (Value + 0x8000) >> 16;
+ case ELF::R_MIPS_LO16:
+ return Value;
+ case ELF::R_MIPS_PC32: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value - FinalAddress;
+ }
+ case ELF::R_MIPS_PC16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PC19_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - (FinalAddress & ~0x3)) >> 2;
+ }
+ case ELF::R_MIPS_PC21_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PC26_S2: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress) >> 2;
+ }
+ case ELF::R_MIPS_PCHI16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value - FinalAddress + 0x8000) >> 16;
+ }
+ case ELF::R_MIPS_PCLO16: {
+ uint32_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value - FinalAddress;
+ }
+ }
+}
+
+int64_t RuntimeDyldELFMips::evaluateMIPS64Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+
+ LLVM_DEBUG(dbgs() << "evaluateMIPS64Relocation, LocalAddress: 0x"
+ << format("%llx", Section.getAddressWithOffset(Offset))
+ << " FinalAddress: 0x"
+ << format("%llx", Section.getLoadAddressWithOffset(Offset))
+ << " Value: 0x" << format("%llx", Value) << " Type: 0x"
+ << format("%x", Type) << " Addend: 0x"
+ << format("%llx", Addend)
+ << " Offset: " << format("%llx" PRIx64, Offset)
+ << " SID: " << format("%d", SectionID)
+ << " SymOffset: " << format("%x", SymOffset) << "\n");
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Not implemented relocation type!");
+ break;
+ case ELF::R_MIPS_JALR:
+ case ELF::R_MIPS_NONE:
+ break;
+ case ELF::R_MIPS_32:
+ case ELF::R_MIPS_64:
+ return Value + Addend;
+ case ELF::R_MIPS_26:
+ return ((Value + Addend) >> 2) & 0x3ffffff;
+ case ELF::R_MIPS_GPREL16: {
+ uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
+ return Value + Addend - (GOTAddr + 0x7ff0);
+ }
+ case ELF::R_MIPS_SUB:
+ return Value - Addend;
+ case ELF::R_MIPS_HI16:
+ // Get the higher 16-bits. Also add 1 if bit 15 is 1.
+ return ((Value + Addend + 0x8000) >> 16) & 0xffff;
+ case ELF::R_MIPS_LO16:
+ return (Value + Addend) & 0xffff;
+ case ELF::R_MIPS_HIGHER:
+ return ((Value + Addend + 0x80008000) >> 32) & 0xffff;
+ case ELF::R_MIPS_HIGHEST:
+ return ((Value + Addend + 0x800080008000) >> 48) & 0xffff;
+ case ELF::R_MIPS_CALL16:
+ case ELF::R_MIPS_GOT_DISP:
+ case ELF::R_MIPS_GOT_PAGE: {
+ uint8_t *LocalGOTAddr =
+ getSectionAddress(SectionToGOTMap[SectionID]) + SymOffset;
+ uint64_t GOTEntry = readBytesUnaligned(LocalGOTAddr, getGOTEntrySize());
+
+ Value += Addend;
+ if (Type == ELF::R_MIPS_GOT_PAGE)
+ Value = (Value + 0x8000) & ~0xffff;
+
+ if (GOTEntry)
+ assert(GOTEntry == Value &&
+ "GOT entry has two different addresses.");
+ else
+ writeBytesUnaligned(Value, LocalGOTAddr, getGOTEntrySize());
+
+ return (SymOffset - 0x7ff0) & 0xffff;
+ }
+ case ELF::R_MIPS_GOT_OFST: {
+ int64_t page = (Value + Addend + 0x8000) & ~0xffff;
+ return (Value + Addend - page) & 0xffff;
+ }
+ case ELF::R_MIPS_GPREL32: {
+ uint64_t GOTAddr = getSectionLoadAddress(SectionToGOTMap[SectionID]);
+ return Value + Addend - (GOTAddr + 0x7ff0);
+ }
+ case ELF::R_MIPS_PC16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0xffff;
+ }
+ case ELF::R_MIPS_PC32: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return Value + Addend - FinalAddress;
+ }
+ case ELF::R_MIPS_PC18_S3: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - (FinalAddress & ~0x7)) >> 3) & 0x3ffff;
+ }
+ case ELF::R_MIPS_PC19_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - (FinalAddress & ~0x3)) >> 2) & 0x7ffff;
+ }
+ case ELF::R_MIPS_PC21_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0x1fffff;
+ }
+ case ELF::R_MIPS_PC26_S2: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress) >> 2) & 0x3ffffff;
+ }
+ case ELF::R_MIPS_PCHI16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return ((Value + Addend - FinalAddress + 0x8000) >> 16) & 0xffff;
+ }
+ case ELF::R_MIPS_PCLO16: {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset);
+ return (Value + Addend - FinalAddress) & 0xffff;
+ }
+ }
+ return 0;
+}
+
+void RuntimeDyldELFMips::applyMIPSRelocation(uint8_t *TargetPtr, int64_t Value,
+ uint32_t Type) {
+ uint32_t Insn = readBytesUnaligned(TargetPtr, 4);
+
+ switch (Type) {
+ default:
+ llvm_unreachable("Unknown relocation type!");
+ break;
+ case ELF::R_MIPS_GPREL16:
+ case ELF::R_MIPS_HI16:
+ case ELF::R_MIPS_LO16:
+ case ELF::R_MIPS_HIGHER:
+ case ELF::R_MIPS_HIGHEST:
+ case ELF::R_MIPS_PC16:
+ case ELF::R_MIPS_PCHI16:
+ case ELF::R_MIPS_PCLO16:
+ case ELF::R_MIPS_CALL16:
+ case ELF::R_MIPS_GOT_DISP:
+ case ELF::R_MIPS_GOT_PAGE:
+ case ELF::R_MIPS_GOT_OFST:
+ Insn = (Insn & 0xffff0000) | (Value & 0x0000ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC18_S3:
+ Insn = (Insn & 0xfffc0000) | (Value & 0x0003ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC19_S2:
+ Insn = (Insn & 0xfff80000) | (Value & 0x0007ffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_PC21_S2:
+ Insn = (Insn & 0xffe00000) | (Value & 0x001fffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_26:
+ case ELF::R_MIPS_PC26_S2:
+ Insn = (Insn & 0xfc000000) | (Value & 0x03ffffff);
+ writeBytesUnaligned(Insn, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_32:
+ case ELF::R_MIPS_GPREL32:
+ case ELF::R_MIPS_PC32:
+ writeBytesUnaligned(Value & 0xffffffff, TargetPtr, 4);
+ break;
+ case ELF::R_MIPS_64:
+ case ELF::R_MIPS_SUB:
+ writeBytesUnaligned(Value, TargetPtr, 8);
+ break;
+ }
+}
+
+void RuntimeDyldELFMips::resolveMIPSN32Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+ int64_t CalculatedValue = evaluateMIPS64Relocation(
+ Section, Offset, Value, Type, Addend, SymOffset, SectionID);
+ applyMIPSRelocation(Section.getAddressWithOffset(Offset), CalculatedValue,
+ Type);
+}
+
+void RuntimeDyldELFMips::resolveMIPSN64Relocation(
+ const SectionEntry &Section, uint64_t Offset, uint64_t Value, uint32_t Type,
+ int64_t Addend, uint64_t SymOffset, SID SectionID) {
+ uint32_t r_type = Type & 0xff;
+ uint32_t r_type2 = (Type >> 8) & 0xff;
+ uint32_t r_type3 = (Type >> 16) & 0xff;
+
+ // RelType is used to keep information for which relocation type we are
+ // applying relocation.
+ uint32_t RelType = r_type;
+ int64_t CalculatedValue = evaluateMIPS64Relocation(Section, Offset, Value,
+ RelType, Addend,
+ SymOffset, SectionID);
+ if (r_type2 != ELF::R_MIPS_NONE) {
+ RelType = r_type2;
+ CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
+ CalculatedValue, SymOffset,
+ SectionID);
+ }
+ if (r_type3 != ELF::R_MIPS_NONE) {
+ RelType = r_type3;
+ CalculatedValue = evaluateMIPS64Relocation(Section, Offset, 0, RelType,
+ CalculatedValue, SymOffset,
+ SectionID);
+ }
+ applyMIPSRelocation(Section.getAddressWithOffset(Offset), CalculatedValue,
+ RelType);
+}
+
+void RuntimeDyldELFMips::resolveMIPSO32Relocation(const SectionEntry &Section,
+ uint64_t Offset,
+ uint32_t Value, uint32_t Type,
+ int32_t Addend) {
+ uint8_t *TargetPtr = Section.getAddressWithOffset(Offset);
+ Value += Addend;
+
+ LLVM_DEBUG(dbgs() << "resolveMIPSO32Relocation, LocalAddress: "
+ << Section.getAddressWithOffset(Offset) << " FinalAddress: "
+ << format("%p", Section.getLoadAddressWithOffset(Offset))
+ << " Value: " << format("%x", Value) << " Type: "
+ << format("%x", Type) << " Addend: " << format("%x", Addend)
+ << " SymOffset: " << format("%x", Offset) << "\n");
+
+ Value = evaluateMIPS32Relocation(Section, Offset, Value, Type);
+
+ applyMIPSRelocation(TargetPtr, Value, Type);
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h
new file mode 100644
index 0000000000..f03acb41d6
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldELFMips.h
@@ -0,0 +1,66 @@
+//===-- RuntimeDyldELFMips.h ---- ELF/Mips specific code. -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDELFMIPS_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDELFMIPS_H
+
+#include "../RuntimeDyldELF.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldELFMips : public RuntimeDyldELF {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldELFMips(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldELF(MM, Resolver) {}
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override;
+
+protected:
+ void resolveMIPSO32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint32_t Value, uint32_t Type, int32_t Addend);
+ void resolveMIPSN32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+ void resolveMIPSN64Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+
+private:
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ uint64_t evaluateRelocation(const RelocationEntry &RE, uint64_t Value,
+ uint64_t Addend);
+
+ /// A object file specific relocation resolver
+ /// \param RE The relocation to be resolved
+ /// \param Value Target symbol address to apply the relocation action
+ void applyRelocation(const RelocationEntry &RE, uint64_t Value);
+
+ int64_t evaluateMIPS32Relocation(const SectionEntry &Section, uint64_t Offset,
+ uint64_t Value, uint32_t Type);
+ int64_t evaluateMIPS64Relocation(const SectionEntry &Section,
+ uint64_t Offset, uint64_t Value,
+ uint32_t Type, int64_t Addend,
+ uint64_t SymOffset, SID SectionID);
+
+ void applyMIPSRelocation(uint8_t *TargetPtr, int64_t CalculatedValue,
+ uint32_t Type);
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
new file mode 100644
index 0000000000..f2ee1b06d4
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h
@@ -0,0 +1,541 @@
+//===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H
+
+#include "../RuntimeDyldMachO.h"
+#include "llvm/Support/Endian.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOAArch64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldMachOAArch64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ unsigned getStubAlignment() override { return 8; }
+
+ /// Extract the addend encoded in the instruction / memory location.
+ Expected<int64_t> decodeAddend(const RelocationEntry &RE) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+ unsigned NumBytes = 1 << RE.Size;
+ int64_t Addend = 0;
+ // Verify that the relocation has the correct size and alignment.
+ switch (RE.RelType) {
+ default: {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Unsupported relocation type: "
+ << getRelocName(RE.RelType);
+ }
+ return make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode());
+ }
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED: {
+ if (NumBytes != 4 && NumBytes != 8) {
+ std::string ErrMsg;
+ {
+ raw_string_ostream ErrStream(ErrMsg);
+ ErrStream << "Invalid relocation size for relocation "
+ << getRelocName(RE.RelType);
+ }
+ return make_error<StringError>(std::move(ErrMsg),
+ inconvertibleErrorCode());
+ }
+ break;
+ }
+ case MachO::ARM64_RELOC_BRANCH26:
+ case MachO::ARM64_RELOC_PAGE21:
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ assert(NumBytes == 4 && "Invalid relocation size.");
+ assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
+ "Instruction address is not aligned to 4 bytes.");
+ break;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ // This could be an unaligned memory location.
+ if (NumBytes == 4)
+ Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress);
+ else
+ Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress);
+ break;
+ case MachO::ARM64_RELOC_BRANCH26: {
+ // Verify that the relocation points to a B/BL instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert(((*p & 0xFC000000) == 0x14000000 ||
+ (*p & 0xFC000000) == 0x94000000) &&
+ "Expected branch instruction.");
+
+ // Get the 26 bit addend encoded in the branch instruction and sign-extend
+ // to 64 bit. The lower 2 bits are always zeros and are therefore implicit
+ // (<< 2).
+ Addend = (*p & 0x03FFFFFF) << 2;
+ Addend = SignExtend64(Addend, 28);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ // Verify that the relocation points to the expected adrp instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
+
+ // Get the 21 bit addend encoded in the adrp instruction and sign-extend
+ // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are
+ // therefore implicit (<< 12).
+ Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12;
+ Addend = SignExtend64(Addend, 33);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ (void)p;
+ assert((*p & 0x3B000000) == 0x39000000 &&
+ "Only expected load / store instructions.");
+ LLVM_FALLTHROUGH;
+ }
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // or add / sub instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((((*p & 0x3B000000) == 0x39000000) ||
+ ((*p & 0x11C00000) == 0x11000000) ) &&
+ "Expected load / store or add/sub instruction.");
+
+ // Get the 12 bit addend encoded in the instruction.
+ Addend = (*p & 0x003FFC00) >> 10;
+
+ // Check which instruction we are decoding to obtain the implicit shift
+ // factor of the instruction.
+ int ImplicitShift = 0;
+ if ((*p & 0x3B000000) == 0x39000000) { // << load / store
+ // For load / store instructions the size is encoded in bits 31:30.
+ ImplicitShift = ((*p >> 30) & 0x3);
+ if (ImplicitShift == 0) {
+ // Check if this a vector op to get the correct shift value.
+ if ((*p & 0x04800000) == 0x04800000)
+ ImplicitShift = 4;
+ }
+ }
+ // Compensate for implicit shift.
+ Addend <<= ImplicitShift;
+ break;
+ }
+ }
+ return Addend;
+ }
+
+ /// Extract the addend encoded in the instruction.
+ void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes,
+ MachO::RelocationInfoType RelType, int64_t Addend) const {
+ // Verify that the relocation has the correct alignment.
+ switch (RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size.");
+ break;
+ case MachO::ARM64_RELOC_BRANCH26:
+ case MachO::ARM64_RELOC_PAGE21:
+ case MachO::ARM64_RELOC_PAGEOFF12:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ assert(NumBytes == 4 && "Invalid relocation size.");
+ assert((((uintptr_t)LocalAddress & 0x3) == 0) &&
+ "Instruction address is not aligned to 4 bytes.");
+ break;
+ }
+
+ switch (RelType) {
+ default:
+ llvm_unreachable("Unsupported relocation type!");
+ case MachO::ARM64_RELOC_POINTER_TO_GOT:
+ case MachO::ARM64_RELOC_UNSIGNED:
+ // This could be an unaligned memory location.
+ if (NumBytes == 4)
+ *reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend;
+ else
+ *reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend;
+ break;
+ case MachO::ARM64_RELOC_BRANCH26: {
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ // Verify that the relocation points to the expected branch instruction.
+ assert(((*p & 0xFC000000) == 0x14000000 ||
+ (*p & 0xFC000000) == 0x94000000) &&
+ "Expected branch instruction.");
+
+ // Verify addend value.
+ assert((Addend & 0x3) == 0 && "Branch target is not aligned");
+ assert(isInt<28>(Addend) && "Branch target is out of range.");
+
+ // Encode the addend as 26 bit immediate in the branch instruction.
+ *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ // Verify that the relocation points to the expected adrp instruction.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction.");
+
+ // Check that the addend fits into 21 bits (+ 12 lower bits).
+ assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned.");
+ assert(isInt<33>(Addend) && "Invalid page reloc value.");
+
+ // Encode the addend into the instruction.
+ uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000;
+ uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0;
+ *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue;
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((*p & 0x3B000000) == 0x39000000 &&
+ "Only expected load / store instructions.");
+ (void)p;
+ LLVM_FALLTHROUGH;
+ }
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ // Verify that the relocation points to one of the expected load / store
+ // or add / sub instructions.
+ auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress);
+ assert((((*p & 0x3B000000) == 0x39000000) ||
+ ((*p & 0x11C00000) == 0x11000000) ) &&
+ "Expected load / store or add/sub instruction.");
+
+ // Check which instruction we are decoding to obtain the implicit shift
+ // factor of the instruction and verify alignment.
+ int ImplicitShift = 0;
+ if ((*p & 0x3B000000) == 0x39000000) { // << load / store
+ // For load / store instructions the size is encoded in bits 31:30.
+ ImplicitShift = ((*p >> 30) & 0x3);
+ switch (ImplicitShift) {
+ case 0:
+ // Check if this a vector op to get the correct shift value.
+ if ((*p & 0x04800000) == 0x04800000) {
+ ImplicitShift = 4;
+ assert(((Addend & 0xF) == 0) &&
+ "128-bit LDR/STR not 16-byte aligned.");
+ }
+ break;
+ case 1:
+ assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned.");
+ break;
+ case 2:
+ assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned.");
+ break;
+ case 3:
+ assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned.");
+ break;
+ }
+ }
+ // Compensate for implicit shift.
+ Addend >>= ImplicitShift;
+ assert(isUInt<12>(Addend) && "Addend cannot be encoded.");
+
+ // Encode the addend into the instruction.
+ *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00);
+ break;
+ }
+ }
+ }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ if (Obj.isRelocationScattered(RelInfo))
+ return make_error<RuntimeDyldError>("Scattered relocations not supported "
+ "for MachO AArch64");
+
+ // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit
+ // addend for the following relocation. If found: (1) store the associated
+ // addend, (2) consume the next relocation, and (3) use the stored addend to
+ // override the addend.
+ int64_t ExplicitAddend = 0;
+ if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) {
+ assert(!Obj.getPlainRelocationExternal(RelInfo));
+ assert(!Obj.getAnyRelocationPCRel(RelInfo));
+ assert(Obj.getAnyRelocationLength(RelInfo) == 2);
+ int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo);
+ // Sign-extend the 24-bit to 64-bit.
+ ExplicitAddend = SignExtend64(RawAddend, 24);
+ ++RelI;
+ RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
+ }
+
+ if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_SUBTRACTOR)
+ return processSubtractRelocation(SectionID, RelI, Obj, ObjSectionToID);
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+
+ if (RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT) {
+ bool Valid =
+ (RE.Size == 2 && RE.IsPCRel) || (RE.Size == 3 && !RE.IsPCRel);
+ if (!Valid)
+ return make_error<StringError>("ARM64_RELOC_POINTER_TO_GOT supports "
+ "32-bit pc-rel or 64-bit absolute only",
+ inconvertibleErrorCode());
+ }
+
+ if (auto Addend = decodeAddend(RE))
+ RE.Addend = *Addend;
+ else
+ return Addend.takeError();
+
+ assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\
+ "ARM64_RELOC_ADDEND and embedded addend in the instruction.");
+ if (ExplicitAddend)
+ RE.Addend = ExplicitAddend;
+
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT) {
+ // We'll take care of the offset in processGOTRelocation.
+ Value.Offset = 0;
+ } else if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ RE.Addend = Value.Offset;
+
+ if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 ||
+ RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12 ||
+ RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+ MachO::RelocationInfoType RelType =
+ static_cast<MachO::RelocationInfoType>(RE.RelType);
+
+ switch (RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::ARM64_RELOC_UNSIGNED: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported");
+ // Mask in the target value a byte at a time (we don't have an alignment
+ // guarantee for the target address, so this is safest).
+ if (RE.Size < 2)
+ llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED");
+
+ encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_POINTER_TO_GOT: {
+ assert(((RE.Size == 2 && RE.IsPCRel) || (RE.Size == 3 && !RE.IsPCRel)) &&
+ "ARM64_RELOC_POINTER_TO_GOT only supports 32-bit pc-rel or 64-bit "
+ "absolute");
+ // Addend is the GOT entry address and RE.Offset the target of the
+ // relocation.
+ uint64_t Result =
+ RE.IsPCRel ? (RE.Addend - RE.Offset) : (Value + RE.Addend);
+ encodeAddend(LocalAddress, 1 << RE.Size, RelType, Result);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_BRANCH26: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported");
+ // Check if branch is in range.
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ int64_t PCRelVal = Value - FinalAddress + RE.Addend;
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_PAGE21: {
+ assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported");
+ // Adjust for PC-relative relocation and offset.
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ int64_t PCRelVal =
+ ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096));
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal);
+ break;
+ }
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12:
+ case MachO::ARM64_RELOC_PAGEOFF12: {
+ assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported");
+ // Add the offset from the symbol.
+ Value += RE.Addend;
+ // Mask out the page address and only use the lower 12 bits.
+ Value &= 0xFFF;
+ encodeAddend(LocalAddress, /*Size=*/4, RelType, Value);
+ break;
+ }
+ case MachO::ARM64_RELOC_SUBTRACTOR: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SUBTRACTOR relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21:
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12:
+ llvm_unreachable("Relocation type not yet implemented!");
+ case MachO::ARM64_RELOC_ADDEND:
+ llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by "
+ "processRelocationRef!");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ return Error::success();
+ }
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ assert((RE.RelType == MachO::ARM64_RELOC_POINTER_TO_GOT &&
+ (RE.Size == 2 || RE.Size == 3)) ||
+ RE.Size == 2);
+ SectionEntry &Section = Sections[RE.SectionID];
+ StubMap::const_iterator i = Stubs.find(Value);
+ int64_t Offset;
+ if (i != Stubs.end())
+ Offset = static_cast<int64_t>(i->second);
+ else {
+ // FIXME: There must be a better way to do this then to check and fix the
+ // alignment every time!!!
+ uintptr_t BaseAddress = uintptr_t(Section.getAddress());
+ uintptr_t StubAlignment = getStubAlignment();
+ uintptr_t StubAddress =
+ (BaseAddress + Section.getStubOffset() + StubAlignment - 1) &
+ -StubAlignment;
+ unsigned StubOffset = StubAddress - BaseAddress;
+ Stubs[Value] = StubOffset;
+ assert(((StubAddress % getStubAlignment()) == 0) &&
+ "GOT entry not aligned");
+ RelocationEntry GOTRE(RE.SectionID, StubOffset,
+ MachO::ARM64_RELOC_UNSIGNED, Value.Offset,
+ /*IsPCRel=*/false, /*Size=*/3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ Offset = static_cast<int64_t>(StubOffset);
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset,
+ RE.IsPCRel, RE.Size);
+ addRelocationForSection(TargetRE, RE.SectionID);
+ }
+
+ Expected<relocation_iterator>
+ processSubtractRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Sections[SectionID].getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+
+ Expected<StringRef> SubtrahendNameOrErr = RelI->getSymbol()->getName();
+ if (!SubtrahendNameOrErr)
+ return SubtrahendNameOrErr.takeError();
+ auto SubtrahendI = GlobalSymbolTable.find(*SubtrahendNameOrErr);
+ unsigned SectionBID = SubtrahendI->second.getSectionID();
+ uint64_t SectionBOffset = SubtrahendI->second.getOffset();
+ int64_t Addend =
+ SignExtend64(readBytesUnaligned(LocalAddress, NumBytes), NumBytes * 8);
+
+ ++RelI;
+ Expected<StringRef> MinuendNameOrErr = RelI->getSymbol()->getName();
+ if (!MinuendNameOrErr)
+ return MinuendNameOrErr.takeError();
+ auto MinuendI = GlobalSymbolTable.find(*MinuendNameOrErr);
+ unsigned SectionAID = MinuendI->second.getSectionID();
+ uint64_t SectionAOffset = MinuendI->second.getOffset();
+
+ RelocationEntry R(SectionID, Offset, MachO::ARM64_RELOC_SUBTRACTOR, (uint64_t)Addend,
+ SectionAID, SectionAOffset, SectionBID, SectionBOffset,
+ false, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+ static const char *getRelocName(uint32_t RelocType) {
+ switch (RelocType) {
+ case MachO::ARM64_RELOC_UNSIGNED: return "ARM64_RELOC_UNSIGNED";
+ case MachO::ARM64_RELOC_SUBTRACTOR: return "ARM64_RELOC_SUBTRACTOR";
+ case MachO::ARM64_RELOC_BRANCH26: return "ARM64_RELOC_BRANCH26";
+ case MachO::ARM64_RELOC_PAGE21: return "ARM64_RELOC_PAGE21";
+ case MachO::ARM64_RELOC_PAGEOFF12: return "ARM64_RELOC_PAGEOFF12";
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: return "ARM64_RELOC_GOT_LOAD_PAGE21";
+ case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: return "ARM64_RELOC_GOT_LOAD_PAGEOFF12";
+ case MachO::ARM64_RELOC_POINTER_TO_GOT: return "ARM64_RELOC_POINTER_TO_GOT";
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21: return "ARM64_RELOC_TLVP_LOAD_PAGE21";
+ case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12: return "ARM64_RELOC_TLVP_LOAD_PAGEOFF12";
+ case MachO::ARM64_RELOC_ADDEND: return "ARM64_RELOC_ADDEND";
+ }
+ return "Unrecognized arm64 addend";
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
new file mode 100644
index 0000000000..fcf723aaea
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h
@@ -0,0 +1,431 @@
+//===----- RuntimeDyldMachOARM.h ---- MachO/ARM specific code. ----*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOARM_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOARM_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOARM
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> {
+private:
+ typedef RuntimeDyldMachOCRTPBase<RuntimeDyldMachOARM> ParentT;
+
+public:
+
+ typedef uint32_t TargetPtrT;
+
+ RuntimeDyldMachOARM(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ unsigned getStubAlignment() override { return 4; }
+
+ Expected<JITSymbolFlags> getJITSymbolFlags(const SymbolRef &SR) override {
+ auto Flags = RuntimeDyldImpl::getJITSymbolFlags(SR);
+ if (!Flags)
+ return Flags.takeError();
+ Flags->getTargetFlags() = ARMJITSymbolFlags::fromObjectSymbol(SR);
+ return Flags;
+ }
+
+ uint64_t modifyAddressBasedOnFlags(uint64_t Addr,
+ JITSymbolFlags Flags) const override {
+ if (Flags.getTargetFlags() & ARMJITSymbolFlags::Thumb)
+ Addr |= 0x1;
+ return Addr;
+ }
+
+ bool isAddrTargetThumb(unsigned SectionID, uint64_t Offset) {
+ auto TargetObjAddr = Sections[SectionID].getObjAddress() + Offset;
+ for (auto &KV : GlobalSymbolTable) {
+ auto &Entry = KV.second;
+ auto SymbolObjAddr =
+ Sections[Entry.getSectionID()].getObjAddress() + Entry.getOffset();
+ if (TargetObjAddr == SymbolObjAddr)
+ return (Entry.getFlags().getTargetFlags() & ARMJITSymbolFlags::Thumb);
+ }
+ return false;
+ }
+
+ Expected<int64_t> decodeAddend(const RelocationEntry &RE) const {
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ switch (RE.RelType) {
+ default:
+ return memcpyAddend(RE);
+ case MachO::ARM_RELOC_BR24: {
+ uint32_t Temp = readBytesUnaligned(LocalAddress, 4);
+ Temp &= 0x00ffffff; // Mask out the opcode.
+ // Now we've got the shifted immediate, shift by 2, sign extend and ret.
+ return SignExtend32<26>(Temp << 2);
+ }
+
+ case MachO::ARM_THUMB_RELOC_BR22: {
+ // This is a pair of instructions whose operands combine to provide 22
+ // bits of displacement:
+ // Encoding for high bits 1111 0XXX XXXX XXXX
+ // Encoding for low bits 1111 1XXX XXXX XXXX
+ uint16_t HighInsn = readBytesUnaligned(LocalAddress, 2);
+ if ((HighInsn & 0xf800) != 0xf000)
+ return make_error<StringError>("Unrecognized thumb branch encoding "
+ "(BR22 high bits)",
+ inconvertibleErrorCode());
+
+ uint16_t LowInsn = readBytesUnaligned(LocalAddress + 2, 2);
+ if ((LowInsn & 0xf800) != 0xf800)
+ return make_error<StringError>("Unrecognized thumb branch encoding "
+ "(BR22 low bits)",
+ inconvertibleErrorCode());
+
+ return SignExtend64<23>(((HighInsn & 0x7ff) << 12) |
+ ((LowInsn & 0x7ff) << 1));
+ }
+ }
+ }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ // Set to true for thumb functions in this (or previous) TUs.
+ // Will be used to set the TargetIsThumbFunc member on the relocation entry.
+ bool TargetIsLocalThumbFunc = false;
+ if (Obj.getPlainRelocationExternal(RelInfo)) {
+ auto Symbol = RelI->getSymbol();
+ StringRef TargetName;
+ if (auto TargetNameOrErr = Symbol->getName())
+ TargetName = *TargetNameOrErr;
+ else
+ return TargetNameOrErr.takeError();
+
+ // If the target is external but the value doesn't have a name then we've
+ // converted the value to a section/offset pair, but we still need to set
+ // the IsTargetThumbFunc bit, so look the value up in the globla symbol table.
+ auto EntryItr = GlobalSymbolTable.find(TargetName);
+ if (EntryItr != GlobalSymbolTable.end()) {
+ TargetIsLocalThumbFunc =
+ EntryItr->second.getFlags().getTargetFlags() &
+ ARMJITSymbolFlags::Thumb;
+ }
+ }
+
+ if (Obj.isRelocationScattered(RelInfo)) {
+ if (RelType == MachO::ARM_RELOC_HALF_SECTDIFF)
+ return processHALFSECTDIFFRelocation(SectionID, RelI, Obj,
+ ObjSectionToID);
+ else if (RelType == MachO::GENERIC_RELOC_VANILLA)
+ return processScatteredVANILLA(SectionID, RelI, Obj, ObjSectionToID,
+ TargetIsLocalThumbFunc);
+ else
+ return ++RelI;
+ }
+
+ // Validate the relocation type.
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_PAIR);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_SECTDIFF);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_LOCAL_SECTDIFF);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_PB_LA_PTR);
+ UNIMPLEMENTED_RELOC(MachO::ARM_THUMB_32BIT_BRANCH);
+ UNIMPLEMENTED_RELOC(MachO::ARM_RELOC_HALF);
+ default:
+ if (RelType > MachO::ARM_RELOC_HALF_SECTDIFF)
+ return make_error<RuntimeDyldError>(("MachO ARM relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ if (auto AddendOrErr = decodeAddend(RE))
+ RE.Addend = *AddendOrErr;
+ else
+ return AddendOrErr.takeError();
+ RE.IsTargetThumbFunc = TargetIsLocalThumbFunc;
+
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ // If this is a branch from a thumb function (BR22) then make sure we mark
+ // the value as being a thumb stub: we don't want to mix it up with an ARM
+ // stub targeting the same function.
+ if (RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ Value.IsStubThumb = true;
+
+ if (RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI,
+ (RE.RelType == MachO::ARM_THUMB_RELOC_BR22) ? 4 : 8);
+
+ // If this is a non-external branch target check whether Value points to a
+ // thumb func.
+ if (!Value.SymbolName && (RelType == MachO::ARM_RELOC_BR24 ||
+ RelType == MachO::ARM_THUMB_RELOC_BR22))
+ RE.IsTargetThumbFunc = isAddrTargetThumb(Value.SectionID, Value.Offset);
+
+ if (RE.RelType == MachO::ARM_RELOC_BR24 ||
+ RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ processBranchRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Offset;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress;
+ // ARM PCRel relocations have an effective-PC offset of two instructions
+ // (four bytes in Thumb mode, 8 bytes in ARM mode).
+ Value -= (RE.RelType == MachO::ARM_THUMB_RELOC_BR22) ? 4 : 8;
+ }
+
+ switch (RE.RelType) {
+ case MachO::ARM_THUMB_RELOC_BR22: {
+ Value += RE.Addend;
+ uint16_t HighInsn = readBytesUnaligned(LocalAddress, 2);
+ assert((HighInsn & 0xf800) == 0xf000 &&
+ "Unrecognized thumb branch encoding (BR22 high bits)");
+ HighInsn = (HighInsn & 0xf800) | ((Value >> 12) & 0x7ff);
+
+ uint16_t LowInsn = readBytesUnaligned(LocalAddress + 2, 2);
+ assert((LowInsn & 0xf800) == 0xf800 &&
+ "Unrecognized thumb branch encoding (BR22 low bits)");
+ LowInsn = (LowInsn & 0xf800) | ((Value >> 1) & 0x7ff);
+
+ writeBytesUnaligned(HighInsn, LocalAddress, 2);
+ writeBytesUnaligned(LowInsn, LocalAddress + 2, 2);
+ break;
+ }
+
+ case MachO::ARM_RELOC_VANILLA:
+ if (RE.IsTargetThumbFunc)
+ Value |= 0x01;
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::ARM_RELOC_BR24: {
+ // Mask the value into the target address. We know instructions are
+ // 32-bit aligned, so we can do it all at once.
+ Value += RE.Addend;
+ // The low two bits of the value are not encoded.
+ Value >>= 2;
+ // Mask the value to 24 bits.
+ uint64_t FinalValue = Value & 0xffffff;
+ // FIXME: If the destination is a Thumb function (and the instruction
+ // is a non-predicated BL instruction), we need to change it to a BLX
+ // instruction instead.
+
+ // Insert the value into the instruction.
+ uint32_t Temp = readBytesUnaligned(LocalAddress, 4);
+ writeBytesUnaligned((Temp & ~0xffffff) | FinalValue, LocalAddress, 4);
+
+ break;
+ }
+ case MachO::ARM_RELOC_HALF_SECTDIFF: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected HALFSECTDIFF relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ if (RE.Size & 0x1) // :upper16:
+ Value = (Value >> 16);
+
+ bool IsThumb = RE.Size & 0x2;
+
+ Value &= 0xffff;
+
+ uint32_t Insn = readBytesUnaligned(LocalAddress, 4);
+
+ if (IsThumb)
+ Insn = (Insn & 0x8f00fbf0) | ((Value & 0xf000) >> 12) |
+ ((Value & 0x0800) >> 1) | ((Value & 0x0700) << 20) |
+ ((Value & 0x00ff) << 16);
+ else
+ Insn = (Insn & 0xfff0f000) | ((Value & 0xf000) << 4) | (Value & 0x0fff);
+ writeBytesUnaligned(Insn, LocalAddress, 4);
+ break;
+ }
+
+ default:
+ llvm_unreachable("Invalid relocation type");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == "__nl_symbol_ptr")
+ return populateIndirectSymbolPointersSection(cast<MachOObjectFile>(Obj),
+ Section, SectionID);
+ return Error::success();
+ }
+
+private:
+
+ void processBranchRelocation(const RelocationEntry &RE,
+ const RelocationValueRef &Value,
+ StubMap &Stubs) {
+ // This is an ARM branch relocation, need to use a stub function.
+ // Look up for existing stub.
+ SectionEntry &Section = Sections[RE.SectionID];
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.getAddressWithOffset(i->second);
+ } else {
+ // Create a new stub function.
+ assert(Section.getStubOffset() % 4 == 0 && "Misaligned stub");
+ Stubs[Value] = Section.getStubOffset();
+ uint32_t StubOpcode = 0;
+ if (RE.RelType == MachO::ARM_RELOC_BR24)
+ StubOpcode = 0xe51ff004; // ldr pc, [pc, #-4]
+ else if (RE.RelType == MachO::ARM_THUMB_RELOC_BR22)
+ StubOpcode = 0xf000f8df; // ldr pc, [pc]
+ else
+ llvm_unreachable("Unrecognized relocation");
+ Addr = Section.getAddressWithOffset(Section.getStubOffset());
+ writeBytesUnaligned(StubOpcode, Addr, 4);
+ uint8_t *StubTargetAddr = Addr + 4;
+ RelocationEntry StubRE(
+ RE.SectionID, StubTargetAddr - Section.getAddress(),
+ MachO::GENERIC_RELOC_VANILLA, Value.Offset, false, 2);
+ StubRE.IsTargetThumbFunc = RE.IsTargetThumbFunc;
+ if (Value.SymbolName)
+ addRelocationForSymbol(StubRE, Value.SymbolName);
+ else
+ addRelocationForSection(StubRE, Value.SectionID);
+ Section.advanceStubOffset(getMaxStubSize());
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, 0,
+ RE.IsPCRel, RE.Size);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+
+ Expected<relocation_iterator>
+ processHALFSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseTObj,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &MachO =
+ static_cast<const MachOObjectFile&>(BaseTObj);
+ MachO::any_relocation_info RE =
+ MachO.getRelocation(RelI->getRawDataRefImpl());
+
+ // For a half-diff relocation the length bits actually record whether this
+ // is a movw/movt, and whether this is arm or thumb.
+ // Bit 0 indicates movw (b0 == 0) or movt (b0 == 1).
+ // Bit 1 indicates arm (b1 == 0) or thumb (b1 == 1).
+ unsigned HalfDiffKindBits = MachO.getAnyRelocationLength(RE);
+ bool IsThumb = HalfDiffKindBits & 0x2;
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = MachO.getAnyRelocationType(RE);
+ bool IsPCRel = MachO.getAnyRelocationPCRel(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ int64_t Immediate = readBytesUnaligned(LocalAddress, 4); // Copy the whole instruction out.
+
+ if (IsThumb)
+ Immediate = ((Immediate & 0x0000000f) << 12) |
+ ((Immediate & 0x00000400) << 1) |
+ ((Immediate & 0x70000000) >> 20) |
+ ((Immediate & 0x00ff0000) >> 16);
+ else
+ Immediate = ((Immediate >> 4) & 0xf000) | (Immediate & 0xfff);
+
+ ++RelI;
+ MachO::any_relocation_info RE2 =
+ MachO.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t AddrA = MachO.getScatteredRelocationValue(RE);
+ section_iterator SAI = getSectionByAddress(MachO, AddrA);
+ assert(SAI != MachO.section_end() && "Can't find section for address A");
+ uint64_t SectionABase = SAI->getAddress();
+ uint64_t SectionAOffset = AddrA - SectionABase;
+ SectionRef SectionA = *SAI;
+ bool IsCode = SectionA.isText();
+ uint32_t SectionAID = ~0U;
+ if (auto SectionAIDOrErr =
+ findOrEmitSection(MachO, SectionA, IsCode, ObjSectionToID))
+ SectionAID = *SectionAIDOrErr;
+ else
+ return SectionAIDOrErr.takeError();
+
+ uint32_t AddrB = MachO.getScatteredRelocationValue(RE2);
+ section_iterator SBI = getSectionByAddress(MachO, AddrB);
+ assert(SBI != MachO.section_end() && "Can't find section for address B");
+ uint64_t SectionBBase = SBI->getAddress();
+ uint64_t SectionBOffset = AddrB - SectionBBase;
+ SectionRef SectionB = *SBI;
+ uint32_t SectionBID = ~0U;
+ if (auto SectionBIDOrErr =
+ findOrEmitSection(MachO, SectionB, IsCode, ObjSectionToID))
+ SectionBID = *SectionBIDOrErr;
+ else
+ return SectionBIDOrErr.takeError();
+
+ uint32_t OtherHalf = MachO.getAnyRelocationAddress(RE2) & 0xffff;
+ unsigned Shift = (HalfDiffKindBits & 0x1) ? 16 : 0;
+ uint32_t FullImmVal = (Immediate << Shift) | (OtherHalf << (16 - Shift));
+ int64_t Addend = FullImmVal - (AddrA - AddrB);
+
+ // addend = Encoded - Expected
+ // = Encoded - (AddrA - AddrB)
+
+ LLVM_DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA
+ << ", AddrB: " << AddrB << ", Addend: " << Addend
+ << ", SectionA ID: " << SectionAID << ", SectionAOffset: "
+ << SectionAOffset << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
+ SectionAOffset, SectionBID, SectionBOffset, IsPCRel,
+ HalfDiffKindBits);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
new file mode 100644
index 0000000000..d029d3266f
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h
@@ -0,0 +1,250 @@
+//===---- RuntimeDyldMachOI386.h ---- MachO/I386 specific code. ---*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOI386_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOI386_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOI386
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOI386> {
+public:
+
+ typedef uint32_t TargetPtrT;
+
+ RuntimeDyldMachOI386(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 0; }
+
+ unsigned getStubAlignment() override { return 1; }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ if (Obj.isRelocationScattered(RelInfo)) {
+ if (RelType == MachO::GENERIC_RELOC_SECTDIFF ||
+ RelType == MachO::GENERIC_RELOC_LOCAL_SECTDIFF)
+ return processSECTDIFFRelocation(SectionID, RelI, Obj,
+ ObjSectionToID);
+ else if (RelType == MachO::GENERIC_RELOC_VANILLA)
+ return processScatteredVANILLA(SectionID, RelI, Obj, ObjSectionToID);
+ return make_error<RuntimeDyldError>(("Unhandled I386 scattered relocation "
+ "type: " + Twine(RelType)).str());
+ }
+
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_PAIR);
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_PB_LA_PTR);
+ UNIMPLEMENTED_RELOC(MachO::GENERIC_RELOC_TLV);
+ default:
+ if (RelType > MachO::GENERIC_RELOC_TLV)
+ return make_error<RuntimeDyldError>(("MachO I386 relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ RE.Addend = memcpyAddend(RE);
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ // Addends for external, PC-rel relocations on i386 point back to the zero
+ // offset. Calculate the final offset from the relocation target instead.
+ // This allows us to use the same logic for both external and internal
+ // relocations in resolveI386RelocationRef.
+ // bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ // if (IsExtern && RE.IsPCRel) {
+ // uint64_t RelocAddr = 0;
+ // RelI->getAddress(RelocAddr);
+ // Value.Addend += RelocAddr + 4;
+ // }
+ if (RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ RE.Addend = Value.Offset;
+
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ if (RE.IsPCRel) {
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress + 4; // see MachOX86_64::resolveRelocation.
+ }
+
+ switch (RE.RelType) {
+ case MachO::GENERIC_RELOC_VANILLA:
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::GENERIC_RELOC_SECTDIFF:
+ case MachO::GENERIC_RELOC_LOCAL_SECTDIFF: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SECTDIFF relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ StringRef Name;
+ if (Expected<StringRef> NameOrErr = Section.getName())
+ Name = *NameOrErr;
+ else
+ consumeError(NameOrErr.takeError());
+
+ if (Name == "__jump_table")
+ return populateJumpTable(cast<MachOObjectFile>(Obj), Section, SectionID);
+ else if (Name == "__pointers")
+ return populateIndirectSymbolPointersSection(cast<MachOObjectFile>(Obj),
+ Section, SectionID);
+ return Error::success();
+ }
+
+private:
+ Expected<relocation_iterator>
+ processSECTDIFFRelocation(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObjT);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ SectionEntry &Section = Sections[SectionID];
+ uint32_t RelocType = Obj.getAnyRelocationType(RE);
+ bool IsPCRel = Obj.getAnyRelocationPCRel(RE);
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Section.getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ uint64_t Addend = readBytesUnaligned(LocalAddress, NumBytes);
+
+ ++RelI;
+ MachO::any_relocation_info RE2 =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ uint32_t AddrA = Obj.getScatteredRelocationValue(RE);
+ section_iterator SAI = getSectionByAddress(Obj, AddrA);
+ assert(SAI != Obj.section_end() && "Can't find section for address A");
+ uint64_t SectionABase = SAI->getAddress();
+ uint64_t SectionAOffset = AddrA - SectionABase;
+ SectionRef SectionA = *SAI;
+ bool IsCode = SectionA.isText();
+ uint32_t SectionAID = ~0U;
+ if (auto SectionAIDOrErr =
+ findOrEmitSection(Obj, SectionA, IsCode, ObjSectionToID))
+ SectionAID = *SectionAIDOrErr;
+ else
+ return SectionAIDOrErr.takeError();
+
+ uint32_t AddrB = Obj.getScatteredRelocationValue(RE2);
+ section_iterator SBI = getSectionByAddress(Obj, AddrB);
+ assert(SBI != Obj.section_end() && "Can't find section for address B");
+ uint64_t SectionBBase = SBI->getAddress();
+ uint64_t SectionBOffset = AddrB - SectionBBase;
+ SectionRef SectionB = *SBI;
+ uint32_t SectionBID = ~0U;
+ if (auto SectionBIDOrErr =
+ findOrEmitSection(Obj, SectionB, IsCode, ObjSectionToID))
+ SectionBID = *SectionBIDOrErr;
+ else
+ return SectionBIDOrErr.takeError();
+
+ // Compute the addend 'C' from the original expression 'A - B + C'.
+ Addend -= AddrA - AddrB;
+
+ LLVM_DEBUG(dbgs() << "Found SECTDIFF: AddrA: " << AddrA
+ << ", AddrB: " << AddrB << ", Addend: " << Addend
+ << ", SectionA ID: " << SectionAID << ", SectionAOffset: "
+ << SectionAOffset << ", SectionB ID: " << SectionBID
+ << ", SectionBOffset: " << SectionBOffset << "\n");
+ RelocationEntry R(SectionID, Offset, RelocType, Addend, SectionAID,
+ SectionAOffset, SectionBID, SectionBOffset,
+ IsPCRel, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+ // Populate stubs in __jump_table section.
+ Error populateJumpTable(const MachOObjectFile &Obj,
+ const SectionRef &JTSection,
+ unsigned JTSectionID) {
+ MachO::dysymtab_command DySymTabCmd = Obj.getDysymtabLoadCommand();
+ MachO::section Sec32 = Obj.getSection(JTSection.getRawDataRefImpl());
+ uint32_t JTSectionSize = Sec32.size;
+ unsigned FirstIndirectSymbol = Sec32.reserved1;
+ unsigned JTEntrySize = Sec32.reserved2;
+ unsigned NumJTEntries = JTSectionSize / JTEntrySize;
+ uint8_t *JTSectionAddr = getSectionAddress(JTSectionID);
+ unsigned JTEntryOffset = 0;
+
+ if (JTSectionSize % JTEntrySize != 0)
+ return make_error<RuntimeDyldError>("Jump-table section does not contain "
+ "a whole number of stubs?");
+
+ for (unsigned i = 0; i < NumJTEntries; ++i) {
+ unsigned SymbolIndex =
+ Obj.getIndirectSymbolTableEntry(DySymTabCmd, FirstIndirectSymbol + i);
+ symbol_iterator SI = Obj.getSymbolByIndex(SymbolIndex);
+ Expected<StringRef> IndirectSymbolName = SI->getName();
+ if (!IndirectSymbolName)
+ return IndirectSymbolName.takeError();
+ uint8_t *JTEntryAddr = JTSectionAddr + JTEntryOffset;
+ createStubFunction(JTEntryAddr);
+ RelocationEntry RE(JTSectionID, JTEntryOffset + 1,
+ MachO::GENERIC_RELOC_VANILLA, 0, true, 2);
+ addRelocationForSymbol(RE, *IndirectSymbolName);
+ JTEntryOffset += JTEntrySize;
+ }
+
+ return Error::success();
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
new file mode 100644
index 0000000000..a4d91cf338
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h
@@ -0,0 +1,238 @@
+//===-- RuntimeDyldMachOX86_64.h ---- MachO/X86_64 specific code. -*- C++ -*-=//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOX86_64_H
+#define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOX86_64_H
+
+#include "../RuntimeDyldMachO.h"
+
+#define DEBUG_TYPE "dyld"
+
+namespace llvm {
+
+class RuntimeDyldMachOX86_64
+ : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOX86_64> {
+public:
+
+ typedef uint64_t TargetPtrT;
+
+ RuntimeDyldMachOX86_64(RuntimeDyld::MemoryManager &MM,
+ JITSymbolResolver &Resolver)
+ : RuntimeDyldMachOCRTPBase(MM, Resolver) {}
+
+ unsigned getMaxStubSize() const override { return 8; }
+
+ unsigned getStubAlignment() override { return 8; }
+
+ Expected<relocation_iterator>
+ processRelocationRef(unsigned SectionID, relocation_iterator RelI,
+ const ObjectFile &BaseObjT,
+ ObjSectionToIDMap &ObjSectionToID,
+ StubMap &Stubs) override {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile &>(BaseObjT);
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+ uint32_t RelType = Obj.getAnyRelocationType(RelInfo);
+
+ if (RelType == MachO::X86_64_RELOC_SUBTRACTOR)
+ return processSubtractRelocation(SectionID, RelI, Obj, ObjSectionToID);
+
+ assert(!Obj.isRelocationScattered(RelInfo) &&
+ "Scattered relocations not supported on X86_64");
+
+ RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI));
+ RE.Addend = memcpyAddend(RE);
+ RelocationValueRef Value;
+ if (auto ValueOrErr = getRelocationValueRef(Obj, RelI, RE, ObjSectionToID))
+ Value = *ValueOrErr;
+ else
+ return ValueOrErr.takeError();
+
+ bool IsExtern = Obj.getPlainRelocationExternal(RelInfo);
+ if (!IsExtern && RE.IsPCRel)
+ makeValueAddendPCRel(Value, RelI, 1 << RE.Size);
+
+ switch (RelType) {
+ UNIMPLEMENTED_RELOC(MachO::X86_64_RELOC_TLV);
+ default:
+ if (RelType > MachO::X86_64_RELOC_TLV)
+ return make_error<RuntimeDyldError>(("MachO X86_64 relocation type " +
+ Twine(RelType) +
+ " is out of range").str());
+ break;
+ }
+
+ if (RE.RelType == MachO::X86_64_RELOC_GOT ||
+ RE.RelType == MachO::X86_64_RELOC_GOT_LOAD)
+ processGOTRelocation(RE, Value, Stubs);
+ else {
+ RE.Addend = Value.Offset;
+ if (Value.SymbolName)
+ addRelocationForSymbol(RE, Value.SymbolName);
+ else
+ addRelocationForSection(RE, Value.SectionID);
+ }
+
+ return ++RelI;
+ }
+
+ void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override {
+ LLVM_DEBUG(dumpRelocationToResolve(RE, Value));
+ const SectionEntry &Section = Sections[RE.SectionID];
+ uint8_t *LocalAddress = Section.getAddressWithOffset(RE.Offset);
+
+ // If the relocation is PC-relative, the value to be encoded is the
+ // pointer difference.
+ if (RE.IsPCRel) {
+ // FIXME: It seems this value needs to be adjusted by 4 for an effective
+ // PC address. Is that expected? Only for branches, perhaps?
+ uint64_t FinalAddress = Section.getLoadAddressWithOffset(RE.Offset);
+ Value -= FinalAddress + 4;
+ }
+
+ switch (RE.RelType) {
+ default:
+ llvm_unreachable("Invalid relocation type!");
+ case MachO::X86_64_RELOC_SIGNED_1:
+ case MachO::X86_64_RELOC_SIGNED_2:
+ case MachO::X86_64_RELOC_SIGNED_4:
+ case MachO::X86_64_RELOC_SIGNED:
+ case MachO::X86_64_RELOC_UNSIGNED:
+ case MachO::X86_64_RELOC_BRANCH:
+ writeBytesUnaligned(Value + RE.Addend, LocalAddress, 1 << RE.Size);
+ break;
+ case MachO::X86_64_RELOC_SUBTRACTOR: {
+ uint64_t SectionABase = Sections[RE.Sections.SectionA].getLoadAddress();
+ uint64_t SectionBBase = Sections[RE.Sections.SectionB].getLoadAddress();
+ assert((Value == SectionABase || Value == SectionBBase) &&
+ "Unexpected SUBTRACTOR relocation value.");
+ Value = SectionABase - SectionBBase + RE.Addend;
+ writeBytesUnaligned(Value, LocalAddress, 1 << RE.Size);
+ break;
+ }
+ }
+ }
+
+ Error finalizeSection(const ObjectFile &Obj, unsigned SectionID,
+ const SectionRef &Section) {
+ return Error::success();
+ }
+
+private:
+ void processGOTRelocation(const RelocationEntry &RE,
+ RelocationValueRef &Value, StubMap &Stubs) {
+ SectionEntry &Section = Sections[RE.SectionID];
+ assert(RE.IsPCRel);
+ assert(RE.Size == 2);
+ Value.Offset -= RE.Addend;
+ RuntimeDyldMachO::StubMap::const_iterator i = Stubs.find(Value);
+ uint8_t *Addr;
+ if (i != Stubs.end()) {
+ Addr = Section.getAddressWithOffset(i->second);
+ } else {
+ Stubs[Value] = Section.getStubOffset();
+ uint8_t *GOTEntry = Section.getAddressWithOffset(Section.getStubOffset());
+ RelocationEntry GOTRE(RE.SectionID, Section.getStubOffset(),
+ MachO::X86_64_RELOC_UNSIGNED, Value.Offset, false,
+ 3);
+ if (Value.SymbolName)
+ addRelocationForSymbol(GOTRE, Value.SymbolName);
+ else
+ addRelocationForSection(GOTRE, Value.SectionID);
+ Section.advanceStubOffset(8);
+ Addr = GOTEntry;
+ }
+ RelocationEntry TargetRE(RE.SectionID, RE.Offset,
+ MachO::X86_64_RELOC_UNSIGNED, RE.Addend, true, 2);
+ resolveRelocation(TargetRE, (uint64_t)Addr);
+ }
+
+ Expected<relocation_iterator>
+ processSubtractRelocation(unsigned SectionID, relocation_iterator RelI,
+ const MachOObjectFile &BaseObj,
+ ObjSectionToIDMap &ObjSectionToID) {
+ const MachOObjectFile &Obj =
+ static_cast<const MachOObjectFile&>(BaseObj);
+ MachO::any_relocation_info RE =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ unsigned Size = Obj.getAnyRelocationLength(RE);
+ uint64_t Offset = RelI->getOffset();
+ uint8_t *LocalAddress = Sections[SectionID].getAddressWithOffset(Offset);
+ unsigned NumBytes = 1 << Size;
+ int64_t Addend =
+ SignExtend64(readBytesUnaligned(LocalAddress, NumBytes), NumBytes * 8);
+
+ unsigned SectionBID = ~0U;
+ uint64_t SectionBOffset = 0;
+
+ MachO::any_relocation_info RelInfo =
+ Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ bool AIsExternal = BaseObj.getPlainRelocationExternal(RelInfo);
+
+ if (AIsExternal) {
+ Expected<StringRef> SubtrahendNameOrErr = RelI->getSymbol()->getName();
+ if (!SubtrahendNameOrErr)
+ return SubtrahendNameOrErr.takeError();
+ auto SubtrahendI = GlobalSymbolTable.find(*SubtrahendNameOrErr);
+ SectionBID = SubtrahendI->second.getSectionID();
+ SectionBOffset = SubtrahendI->second.getOffset();
+ } else {
+ SectionRef SecB = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = SecB.isText();
+ Expected<unsigned> SectionBIDOrErr =
+ findOrEmitSection(Obj, SecB, IsCode, ObjSectionToID);
+ if (!SectionBIDOrErr)
+ return SectionBIDOrErr.takeError();
+ SectionBID = *SectionBIDOrErr;
+ Addend += SecB.getAddress();
+ }
+
+ ++RelI;
+
+ unsigned SectionAID = ~0U;
+ uint64_t SectionAOffset = 0;
+
+ RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl());
+
+ bool BIsExternal = BaseObj.getPlainRelocationExternal(RelInfo);
+ if (BIsExternal) {
+ Expected<StringRef> MinuendNameOrErr = RelI->getSymbol()->getName();
+ if (!MinuendNameOrErr)
+ return MinuendNameOrErr.takeError();
+ auto MinuendI = GlobalSymbolTable.find(*MinuendNameOrErr);
+ SectionAID = MinuendI->second.getSectionID();
+ SectionAOffset = MinuendI->second.getOffset();
+ } else {
+ SectionRef SecA = Obj.getAnyRelocationSection(RelInfo);
+ bool IsCode = SecA.isText();
+ Expected<unsigned> SectionAIDOrErr =
+ findOrEmitSection(Obj, SecA, IsCode, ObjSectionToID);
+ if (!SectionAIDOrErr)
+ return SectionAIDOrErr.takeError();
+ SectionAID = *SectionAIDOrErr;
+ Addend -= SecA.getAddress();
+ }
+
+ RelocationEntry R(SectionID, Offset, MachO::X86_64_RELOC_SUBTRACTOR, (uint64_t)Addend,
+ SectionAID, SectionAOffset, SectionBID, SectionBOffset,
+ false, Size);
+
+ addRelocationForSection(R, SectionAID);
+
+ return ++RelI;
+ }
+
+};
+}
+
+#undef DEBUG_TYPE
+
+#endif
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/ya.make b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/ya.make
new file mode 100644
index 0000000000..67afc5f76f
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld/ya.make
@@ -0,0 +1,37 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm14
+ contrib/libs/llvm14/include
+ contrib/libs/llvm14/lib/IR
+ contrib/libs/llvm14/lib/MC
+ contrib/libs/llvm14/lib/Object
+ contrib/libs/llvm14/lib/Support
+)
+
+ADDINCL(
+ contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ JITSymbol.cpp
+ RTDyldMemoryManager.cpp
+ RuntimeDyld.cpp
+ RuntimeDyldCOFF.cpp
+ RuntimeDyldChecker.cpp
+ RuntimeDyldELF.cpp
+ RuntimeDyldMachO.cpp
+ Targets/RuntimeDyldELFMips.cpp
+)
+
+END()
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/SectionMemoryManager.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/SectionMemoryManager.cpp
new file mode 100644
index 0000000000..56b232b9db
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/SectionMemoryManager.cpp
@@ -0,0 +1,273 @@
+//===- SectionMemoryManager.cpp - Memory manager for MCJIT/RtDyld *- C++ -*-==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the section-based memory manager used by the MCJIT
+// execution engine and RuntimeDyld
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ExecutionEngine/SectionMemoryManager.h"
+#include "llvm/Config/config.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/Process.h"
+
+namespace llvm {
+
+uint8_t *SectionMemoryManager::allocateDataSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName,
+ bool IsReadOnly) {
+ if (IsReadOnly)
+ return allocateSection(SectionMemoryManager::AllocationPurpose::ROData,
+ Size, Alignment);
+ return allocateSection(SectionMemoryManager::AllocationPurpose::RWData, Size,
+ Alignment);
+}
+
+uint8_t *SectionMemoryManager::allocateCodeSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID,
+ StringRef SectionName) {
+ return allocateSection(SectionMemoryManager::AllocationPurpose::Code, Size,
+ Alignment);
+}
+
+uint8_t *SectionMemoryManager::allocateSection(
+ SectionMemoryManager::AllocationPurpose Purpose, uintptr_t Size,
+ unsigned Alignment) {
+ if (!Alignment)
+ Alignment = 16;
+
+ assert(!(Alignment & (Alignment - 1)) && "Alignment must be a power of two.");
+
+ uintptr_t RequiredSize = Alignment * ((Size + Alignment - 1) / Alignment + 1);
+ uintptr_t Addr = 0;
+
+ MemoryGroup &MemGroup = [&]() -> MemoryGroup & {
+ switch (Purpose) {
+ case AllocationPurpose::Code:
+ return CodeMem;
+ case AllocationPurpose::ROData:
+ return RODataMem;
+ case AllocationPurpose::RWData:
+ return RWDataMem;
+ }
+ llvm_unreachable("Unknown SectionMemoryManager::AllocationPurpose");
+ }();
+
+ // Look in the list of free memory regions and use a block there if one
+ // is available.
+ for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+ if (FreeMB.Free.allocatedSize() >= RequiredSize) {
+ Addr = (uintptr_t)FreeMB.Free.base();
+ uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize();
+ // Align the address.
+ Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
+
+ if (FreeMB.PendingPrefixIndex == (unsigned)-1) {
+ // The part of the block we're giving out to the user is now pending
+ MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
+
+ // Remember this pending block, such that future allocations can just
+ // modify it rather than creating a new one
+ FreeMB.PendingPrefixIndex = MemGroup.PendingMem.size() - 1;
+ } else {
+ sys::MemoryBlock &PendingMB =
+ MemGroup.PendingMem[FreeMB.PendingPrefixIndex];
+ PendingMB = sys::MemoryBlock(PendingMB.base(),
+ Addr + Size - (uintptr_t)PendingMB.base());
+ }
+
+ // Remember how much free space is now left in this block
+ FreeMB.Free =
+ sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size);
+ return (uint8_t *)Addr;
+ }
+ }
+
+ // No pre-allocated free block was large enough. Allocate a new memory region.
+ // Note that all sections get allocated as read-write. The permissions will
+ // be updated later based on memory group.
+ //
+ // FIXME: It would be useful to define a default allocation size (or add
+ // it as a constructor parameter) to minimize the number of allocations.
+ //
+ // FIXME: Initialize the Near member for each memory group to avoid
+ // interleaving.
+ std::error_code ec;
+ sys::MemoryBlock MB = MMapper.allocateMappedMemory(
+ Purpose, RequiredSize, &MemGroup.Near,
+ sys::Memory::MF_READ | sys::Memory::MF_WRITE, ec);
+ if (ec) {
+ // FIXME: Add error propagation to the interface.
+ return nullptr;
+ }
+
+ // Save this address as the basis for our next request
+ MemGroup.Near = MB;
+
+ // Copy the address to all the other groups, if they have not
+ // been initialized.
+ if (CodeMem.Near.base() == nullptr)
+ CodeMem.Near = MB;
+ if (RODataMem.Near.base() == nullptr)
+ RODataMem.Near = MB;
+ if (RWDataMem.Near.base() == nullptr)
+ RWDataMem.Near = MB;
+
+ // Remember that we allocated this memory
+ MemGroup.AllocatedMem.push_back(MB);
+ Addr = (uintptr_t)MB.base();
+ uintptr_t EndOfBlock = Addr + MB.allocatedSize();
+
+ // Align the address.
+ Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1);
+
+ // The part of the block we're giving out to the user is now pending
+ MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size));
+
+ // The allocateMappedMemory may allocate much more memory than we need. In
+ // this case, we store the unused memory as a free memory block.
+ unsigned FreeSize = EndOfBlock - Addr - Size;
+ if (FreeSize > 16) {
+ FreeMemBlock FreeMB;
+ FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize);
+ FreeMB.PendingPrefixIndex = (unsigned)-1;
+ MemGroup.FreeMem.push_back(FreeMB);
+ }
+
+ // Return aligned address
+ return (uint8_t *)Addr;
+}
+
+bool SectionMemoryManager::finalizeMemory(std::string *ErrMsg) {
+ // FIXME: Should in-progress permissions be reverted if an error occurs?
+ std::error_code ec;
+
+ // Make code memory executable.
+ ec = applyMemoryGroupPermissions(CodeMem,
+ sys::Memory::MF_READ | sys::Memory::MF_EXEC);
+ if (ec) {
+ if (ErrMsg) {
+ *ErrMsg = ec.message();
+ }
+ return true;
+ }
+
+ // Make read-only data memory read-only.
+ ec = applyMemoryGroupPermissions(RODataMem, sys::Memory::MF_READ);
+ if (ec) {
+ if (ErrMsg) {
+ *ErrMsg = ec.message();
+ }
+ return true;
+ }
+
+ // Read-write data memory already has the correct permissions
+
+ // Some platforms with separate data cache and instruction cache require
+ // explicit cache flush, otherwise JIT code manipulations (like resolved
+ // relocations) will get to the data cache but not to the instruction cache.
+ invalidateInstructionCache();
+
+ return false;
+}
+
+static sys::MemoryBlock trimBlockToPageSize(sys::MemoryBlock M) {
+ static const size_t PageSize = sys::Process::getPageSizeEstimate();
+
+ size_t StartOverlap =
+ (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize;
+
+ size_t TrimmedSize = M.allocatedSize();
+ TrimmedSize -= StartOverlap;
+ TrimmedSize -= TrimmedSize % PageSize;
+
+ sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap),
+ TrimmedSize);
+
+ assert(((uintptr_t)Trimmed.base() % PageSize) == 0);
+ assert((Trimmed.allocatedSize() % PageSize) == 0);
+ assert(M.base() <= Trimmed.base() &&
+ Trimmed.allocatedSize() <= M.allocatedSize());
+
+ return Trimmed;
+}
+
+std::error_code
+SectionMemoryManager::applyMemoryGroupPermissions(MemoryGroup &MemGroup,
+ unsigned Permissions) {
+ for (sys::MemoryBlock &MB : MemGroup.PendingMem)
+ if (std::error_code EC = MMapper.protectMappedMemory(MB, Permissions))
+ return EC;
+
+ MemGroup.PendingMem.clear();
+
+ // Now go through free blocks and trim any of them that don't span the entire
+ // page because one of the pending blocks may have overlapped it.
+ for (FreeMemBlock &FreeMB : MemGroup.FreeMem) {
+ FreeMB.Free = trimBlockToPageSize(FreeMB.Free);
+ // We cleared the PendingMem list, so all these pointers are now invalid
+ FreeMB.PendingPrefixIndex = (unsigned)-1;
+ }
+
+ // Remove all blocks which are now empty
+ erase_if(MemGroup.FreeMem, [](FreeMemBlock &FreeMB) {
+ return FreeMB.Free.allocatedSize() == 0;
+ });
+
+ return std::error_code();
+}
+
+void SectionMemoryManager::invalidateInstructionCache() {
+ for (sys::MemoryBlock &Block : CodeMem.PendingMem)
+ sys::Memory::InvalidateInstructionCache(Block.base(),
+ Block.allocatedSize());
+}
+
+SectionMemoryManager::~SectionMemoryManager() {
+ for (MemoryGroup *Group : {&CodeMem, &RWDataMem, &RODataMem}) {
+ for (sys::MemoryBlock &Block : Group->AllocatedMem)
+ MMapper.releaseMappedMemory(Block);
+ }
+}
+
+SectionMemoryManager::MemoryMapper::~MemoryMapper() {}
+
+void SectionMemoryManager::anchor() {}
+
+namespace {
+// Trivial implementation of SectionMemoryManager::MemoryMapper that just calls
+// into sys::Memory.
+class DefaultMMapper final : public SectionMemoryManager::MemoryMapper {
+public:
+ sys::MemoryBlock
+ allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose,
+ size_t NumBytes, const sys::MemoryBlock *const NearBlock,
+ unsigned Flags, std::error_code &EC) override {
+ return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC);
+ }
+
+ std::error_code protectMappedMemory(const sys::MemoryBlock &Block,
+ unsigned Flags) override {
+ return sys::Memory::protectMappedMemory(Block, Flags);
+ }
+
+ std::error_code releaseMappedMemory(sys::MemoryBlock &M) override {
+ return sys::Memory::releaseMappedMemory(M);
+ }
+};
+
+DefaultMMapper DefaultMMapperInstance;
+} // namespace
+
+SectionMemoryManager::SectionMemoryManager(MemoryMapper *MM)
+ : MMapper(MM ? *MM : DefaultMMapperInstance) {}
+
+} // namespace llvm
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/TargetSelect.cpp b/contrib/libs/llvm14/lib/ExecutionEngine/TargetSelect.cpp
new file mode 100644
index 0000000000..c67a1a7661
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/TargetSelect.cpp
@@ -0,0 +1,96 @@
+//===-- TargetSelect.cpp - Target Chooser Code ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This just asks the TargetRegistry for the appropriate target to use, and
+// allows the user to specify a specific one on the commandline with -march=x,
+// -mcpu=y, and -mattr=a,-b,+c. Clients should initialize targets prior to
+// calling selectTarget().
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/ExecutionEngine/ExecutionEngine.h"
+#include "llvm/IR/Module.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+TargetMachine *EngineBuilder::selectTarget() {
+ Triple TT;
+
+ // MCJIT can generate code for remote targets, but the old JIT and Interpreter
+ // must use the host architecture.
+ if (WhichEngine != EngineKind::Interpreter && M)
+ TT.setTriple(M->getTargetTriple());
+
+ return selectTarget(TT, MArch, MCPU, MAttrs);
+}
+
+/// selectTarget - Pick a target either via -march or by guessing the native
+/// arch. Add any CPU features specified via -mcpu or -mattr.
+TargetMachine *EngineBuilder::selectTarget(const Triple &TargetTriple,
+ StringRef MArch,
+ StringRef MCPU,
+ const SmallVectorImpl<std::string>& MAttrs) {
+ Triple TheTriple(TargetTriple);
+ if (TheTriple.getTriple().empty())
+ TheTriple.setTriple(sys::getProcessTriple());
+
+ // Adjust the triple to match what the user requested.
+ const Target *TheTarget = nullptr;
+ if (!MArch.empty()) {
+ auto I = find_if(TargetRegistry::targets(),
+ [&](const Target &T) { return MArch == T.getName(); });
+
+ if (I == TargetRegistry::targets().end()) {
+ if (ErrorStr)
+ *ErrorStr = "No available targets are compatible with this -march, "
+ "see -version for the available targets.\n";
+ return nullptr;
+ }
+
+ TheTarget = &*I;
+
+ // Adjust the triple to match (if known), otherwise stick with the
+ // requested/host triple.
+ Triple::ArchType Type = Triple::getArchTypeForLLVMName(MArch);
+ if (Type != Triple::UnknownArch)
+ TheTriple.setArch(Type);
+ } else {
+ std::string Error;
+ TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Error);
+ if (!TheTarget) {
+ if (ErrorStr)
+ *ErrorStr = Error;
+ return nullptr;
+ }
+ }
+
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (!MAttrs.empty()) {
+ SubtargetFeatures Features;
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
+ // Allocate a target...
+ TargetMachine *Target =
+ TheTarget->createTargetMachine(TheTriple.getTriple(), MCPU, FeaturesStr,
+ Options, RelocModel, CMModel, OptLevel,
+ /*JIT*/ true);
+ Target->Options.EmulatedTLS = EmulatedTLS;
+ Target->Options.ExplicitEmulatedTLS = true;
+
+ assert(Target && "Could not allocate target machine!");
+ return Target;
+}
diff --git a/contrib/libs/llvm14/lib/ExecutionEngine/ya.make b/contrib/libs/llvm14/lib/ExecutionEngine/ya.make
new file mode 100644
index 0000000000..7fa308cc44
--- /dev/null
+++ b/contrib/libs/llvm14/lib/ExecutionEngine/ya.make
@@ -0,0 +1,37 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm14
+ contrib/libs/llvm14/include
+ contrib/libs/llvm14/lib/ExecutionEngine/Orc/TargetProcess
+ contrib/libs/llvm14/lib/ExecutionEngine/RuntimeDyld
+ contrib/libs/llvm14/lib/IR
+ contrib/libs/llvm14/lib/MC
+ contrib/libs/llvm14/lib/Object
+ contrib/libs/llvm14/lib/Support
+ contrib/libs/llvm14/lib/Target
+)
+
+ADDINCL(
+ contrib/libs/llvm14/lib/ExecutionEngine
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ ExecutionEngine.cpp
+ ExecutionEngineBindings.cpp
+ GDBRegistrationListener.cpp
+ SectionMemoryManager.cpp
+ TargetSelect.cpp
+)
+
+END()