aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm14/lib/Bitcode
diff options
context:
space:
mode:
authorvitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
committervitalyisaev <vitalyisaev@yandex-team.com>2023-06-29 10:00:50 +0300
commit6ffe9e53658409f212834330e13564e4952558f6 (patch)
tree85b1e00183517648b228aafa7c8fb07f5276f419 /contrib/libs/llvm14/lib/Bitcode
parent726057070f9c5a91fc10fde0d5024913d10f1ab9 (diff)
downloadydb-6ffe9e53658409f212834330e13564e4952558f6.tar.gz
YQ Connector: support managed ClickHouse
Со стороны dqrun можно обратиться к инстансу коннектора, который работает на streaming стенде, и извлечь данные из облачного CH.
Diffstat (limited to 'contrib/libs/llvm14/lib/Bitcode')
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Reader/BitReader.cpp133
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Reader/BitcodeAnalyzer.cpp985
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Reader/BitcodeReader.cpp7144
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Reader/MetadataLoader.cpp2379
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Reader/MetadataLoader.h83
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Reader/ValueList.cpp216
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Reader/ValueList.h96
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Reader/ya.make33
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Writer/BitWriter.cpp49
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Writer/BitcodeWriter.cpp4975
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Writer/BitcodeWriterPass.cpp86
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Writer/ValueEnumerator.cpp1181
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Writer/ValueEnumerator.h305
-rw-r--r--contrib/libs/llvm14/lib/Bitcode/Writer/ya.make34
14 files changed, 17699 insertions, 0 deletions
diff --git a/contrib/libs/llvm14/lib/Bitcode/Reader/BitReader.cpp b/contrib/libs/llvm14/lib/Bitcode/Reader/BitReader.cpp
new file mode 100644
index 0000000000..5ac893aef1
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Reader/BitReader.cpp
@@ -0,0 +1,133 @@
+//===-- BitReader.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/BitReader.h"
+#include "llvm-c/Core.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstring>
+#include <string>
+
+using namespace llvm;
+
+/* Builds a module from the bitcode in the specified memory buffer, returning a
+ reference to the module via the OutModule parameter. Returns 0 on success.
+ Optionally returns a human-readable error message via OutMessage. */
+LLVMBool LLVMParseBitcode(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutModule,
+ char **OutMessage) {
+ return LLVMParseBitcodeInContext(LLVMGetGlobalContext(), MemBuf, OutModule,
+ OutMessage);
+}
+
+LLVMBool LLVMParseBitcode2(LLVMMemoryBufferRef MemBuf,
+ LLVMModuleRef *OutModule) {
+ return LLVMParseBitcodeInContext2(LLVMGetGlobalContext(), MemBuf, OutModule);
+}
+
+LLVMBool LLVMParseBitcodeInContext(LLVMContextRef ContextRef,
+ LLVMMemoryBufferRef MemBuf,
+ LLVMModuleRef *OutModule,
+ char **OutMessage) {
+ MemoryBufferRef Buf = unwrap(MemBuf)->getMemBufferRef();
+ LLVMContext &Ctx = *unwrap(ContextRef);
+
+ Expected<std::unique_ptr<Module>> ModuleOrErr = parseBitcodeFile(Buf, Ctx);
+ if (Error Err = ModuleOrErr.takeError()) {
+ std::string Message;
+ handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+ Message = EIB.message();
+ });
+ if (OutMessage)
+ *OutMessage = strdup(Message.c_str());
+ *OutModule = wrap((Module *)nullptr);
+ return 1;
+ }
+
+ *OutModule = wrap(ModuleOrErr.get().release());
+ return 0;
+}
+
+LLVMBool LLVMParseBitcodeInContext2(LLVMContextRef ContextRef,
+ LLVMMemoryBufferRef MemBuf,
+ LLVMModuleRef *OutModule) {
+ MemoryBufferRef Buf = unwrap(MemBuf)->getMemBufferRef();
+ LLVMContext &Ctx = *unwrap(ContextRef);
+
+ ErrorOr<std::unique_ptr<Module>> ModuleOrErr =
+ expectedToErrorOrAndEmitErrors(Ctx, parseBitcodeFile(Buf, Ctx));
+ if (ModuleOrErr.getError()) {
+ *OutModule = wrap((Module *)nullptr);
+ return 1;
+ }
+
+ *OutModule = wrap(ModuleOrErr.get().release());
+ return 0;
+}
+
+/* Reads a module from the specified path, returning via the OutModule parameter
+ a module provider which performs lazy deserialization. Returns 0 on success.
+ Optionally returns a human-readable error message via OutMessage. */
+LLVMBool LLVMGetBitcodeModuleInContext(LLVMContextRef ContextRef,
+ LLVMMemoryBufferRef MemBuf,
+ LLVMModuleRef *OutM, char **OutMessage) {
+ LLVMContext &Ctx = *unwrap(ContextRef);
+ std::unique_ptr<MemoryBuffer> Owner(unwrap(MemBuf));
+ Expected<std::unique_ptr<Module>> ModuleOrErr =
+ getOwningLazyBitcodeModule(std::move(Owner), Ctx);
+ // Release the buffer if we didn't take ownership of it since we never owned
+ // it anyway.
+ (void)Owner.release();
+
+ if (Error Err = ModuleOrErr.takeError()) {
+ std::string Message;
+ handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+ Message = EIB.message();
+ });
+ if (OutMessage)
+ *OutMessage = strdup(Message.c_str());
+ *OutM = wrap((Module *)nullptr);
+ return 1;
+ }
+
+ *OutM = wrap(ModuleOrErr.get().release());
+
+ return 0;
+}
+
+LLVMBool LLVMGetBitcodeModuleInContext2(LLVMContextRef ContextRef,
+ LLVMMemoryBufferRef MemBuf,
+ LLVMModuleRef *OutM) {
+ LLVMContext &Ctx = *unwrap(ContextRef);
+ std::unique_ptr<MemoryBuffer> Owner(unwrap(MemBuf));
+
+ ErrorOr<std::unique_ptr<Module>> ModuleOrErr = expectedToErrorOrAndEmitErrors(
+ Ctx, getOwningLazyBitcodeModule(std::move(Owner), Ctx));
+ Owner.release();
+
+ if (ModuleOrErr.getError()) {
+ *OutM = wrap((Module *)nullptr);
+ return 1;
+ }
+
+ *OutM = wrap(ModuleOrErr.get().release());
+ return 0;
+}
+
+LLVMBool LLVMGetBitcodeModule(LLVMMemoryBufferRef MemBuf, LLVMModuleRef *OutM,
+ char **OutMessage) {
+ return LLVMGetBitcodeModuleInContext(LLVMGetGlobalContext(), MemBuf, OutM,
+ OutMessage);
+}
+
+LLVMBool LLVMGetBitcodeModule2(LLVMMemoryBufferRef MemBuf,
+ LLVMModuleRef *OutM) {
+ return LLVMGetBitcodeModuleInContext2(LLVMGetGlobalContext(), MemBuf, OutM);
+}
diff --git a/contrib/libs/llvm14/lib/Bitcode/Reader/BitcodeAnalyzer.cpp b/contrib/libs/llvm14/lib/Bitcode/Reader/BitcodeAnalyzer.cpp
new file mode 100644
index 0000000000..ffef352999
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Reader/BitcodeAnalyzer.cpp
@@ -0,0 +1,985 @@
+//===- BitcodeAnalyzer.cpp - Internal BitcodeAnalyzer implementation ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Bitcode/BitcodeAnalyzer.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/LLVMBitCodes.h"
+#include "llvm/Bitstream/BitCodes.h"
+#include "llvm/Bitstream/BitstreamReader.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/SHA1.h"
+
+using namespace llvm;
+
+static Error reportError(StringRef Message) {
+ return createStringError(std::errc::illegal_byte_sequence, Message.data());
+}
+
+/// Return a symbolic block name if known, otherwise return null.
+static Optional<const char *> GetBlockName(unsigned BlockID,
+ const BitstreamBlockInfo &BlockInfo,
+ CurStreamTypeType CurStreamType) {
+ // Standard blocks for all bitcode files.
+ if (BlockID < bitc::FIRST_APPLICATION_BLOCKID) {
+ if (BlockID == bitc::BLOCKINFO_BLOCK_ID)
+ return "BLOCKINFO_BLOCK";
+ return None;
+ }
+
+ // Check to see if we have a blockinfo record for this block, with a name.
+ if (const BitstreamBlockInfo::BlockInfo *Info =
+ BlockInfo.getBlockInfo(BlockID)) {
+ if (!Info->Name.empty())
+ return Info->Name.c_str();
+ }
+
+ if (CurStreamType != LLVMIRBitstream)
+ return None;
+
+ switch (BlockID) {
+ default:
+ return None;
+ case bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID:
+ return "OPERAND_BUNDLE_TAGS_BLOCK";
+ case bitc::MODULE_BLOCK_ID:
+ return "MODULE_BLOCK";
+ case bitc::PARAMATTR_BLOCK_ID:
+ return "PARAMATTR_BLOCK";
+ case bitc::PARAMATTR_GROUP_BLOCK_ID:
+ return "PARAMATTR_GROUP_BLOCK_ID";
+ case bitc::TYPE_BLOCK_ID_NEW:
+ return "TYPE_BLOCK_ID";
+ case bitc::CONSTANTS_BLOCK_ID:
+ return "CONSTANTS_BLOCK";
+ case bitc::FUNCTION_BLOCK_ID:
+ return "FUNCTION_BLOCK";
+ case bitc::IDENTIFICATION_BLOCK_ID:
+ return "IDENTIFICATION_BLOCK_ID";
+ case bitc::VALUE_SYMTAB_BLOCK_ID:
+ return "VALUE_SYMTAB";
+ case bitc::METADATA_BLOCK_ID:
+ return "METADATA_BLOCK";
+ case bitc::METADATA_KIND_BLOCK_ID:
+ return "METADATA_KIND_BLOCK";
+ case bitc::METADATA_ATTACHMENT_ID:
+ return "METADATA_ATTACHMENT_BLOCK";
+ case bitc::USELIST_BLOCK_ID:
+ return "USELIST_BLOCK_ID";
+ case bitc::GLOBALVAL_SUMMARY_BLOCK_ID:
+ return "GLOBALVAL_SUMMARY_BLOCK";
+ case bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID:
+ return "FULL_LTO_GLOBALVAL_SUMMARY_BLOCK";
+ case bitc::MODULE_STRTAB_BLOCK_ID:
+ return "MODULE_STRTAB_BLOCK";
+ case bitc::STRTAB_BLOCK_ID:
+ return "STRTAB_BLOCK";
+ case bitc::SYMTAB_BLOCK_ID:
+ return "SYMTAB_BLOCK";
+ }
+}
+
+/// Return a symbolic code name if known, otherwise return null.
+static Optional<const char *> GetCodeName(unsigned CodeID, unsigned BlockID,
+ const BitstreamBlockInfo &BlockInfo,
+ CurStreamTypeType CurStreamType) {
+ // Standard blocks for all bitcode files.
+ if (BlockID < bitc::FIRST_APPLICATION_BLOCKID) {
+ if (BlockID == bitc::BLOCKINFO_BLOCK_ID) {
+ switch (CodeID) {
+ default:
+ return None;
+ case bitc::BLOCKINFO_CODE_SETBID:
+ return "SETBID";
+ case bitc::BLOCKINFO_CODE_BLOCKNAME:
+ return "BLOCKNAME";
+ case bitc::BLOCKINFO_CODE_SETRECORDNAME:
+ return "SETRECORDNAME";
+ }
+ }
+ return None;
+ }
+
+ // Check to see if we have a blockinfo record for this record, with a name.
+ if (const BitstreamBlockInfo::BlockInfo *Info =
+ BlockInfo.getBlockInfo(BlockID)) {
+ for (const std::pair<unsigned, std::string> &RN : Info->RecordNames)
+ if (RN.first == CodeID)
+ return RN.second.c_str();
+ }
+
+ if (CurStreamType != LLVMIRBitstream)
+ return None;
+
+#define STRINGIFY_CODE(PREFIX, CODE) \
+ case bitc::PREFIX##_##CODE: \
+ return #CODE;
+ switch (BlockID) {
+ default:
+ return None;
+ case bitc::MODULE_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ STRINGIFY_CODE(MODULE_CODE, VERSION)
+ STRINGIFY_CODE(MODULE_CODE, TRIPLE)
+ STRINGIFY_CODE(MODULE_CODE, DATALAYOUT)
+ STRINGIFY_CODE(MODULE_CODE, ASM)
+ STRINGIFY_CODE(MODULE_CODE, SECTIONNAME)
+ STRINGIFY_CODE(MODULE_CODE, DEPLIB) // Deprecated, present in old bitcode
+ STRINGIFY_CODE(MODULE_CODE, GLOBALVAR)
+ STRINGIFY_CODE(MODULE_CODE, FUNCTION)
+ STRINGIFY_CODE(MODULE_CODE, ALIAS)
+ STRINGIFY_CODE(MODULE_CODE, GCNAME)
+ STRINGIFY_CODE(MODULE_CODE, COMDAT)
+ STRINGIFY_CODE(MODULE_CODE, VSTOFFSET)
+ STRINGIFY_CODE(MODULE_CODE, METADATA_VALUES_UNUSED)
+ STRINGIFY_CODE(MODULE_CODE, SOURCE_FILENAME)
+ STRINGIFY_CODE(MODULE_CODE, HASH)
+ }
+ case bitc::IDENTIFICATION_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ STRINGIFY_CODE(IDENTIFICATION_CODE, STRING)
+ STRINGIFY_CODE(IDENTIFICATION_CODE, EPOCH)
+ }
+ case bitc::PARAMATTR_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ // FIXME: Should these be different?
+ case bitc::PARAMATTR_CODE_ENTRY_OLD:
+ return "ENTRY";
+ case bitc::PARAMATTR_CODE_ENTRY:
+ return "ENTRY";
+ }
+ case bitc::PARAMATTR_GROUP_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ case bitc::PARAMATTR_GRP_CODE_ENTRY:
+ return "ENTRY";
+ }
+ case bitc::TYPE_BLOCK_ID_NEW:
+ switch (CodeID) {
+ default:
+ return None;
+ STRINGIFY_CODE(TYPE_CODE, NUMENTRY)
+ STRINGIFY_CODE(TYPE_CODE, VOID)
+ STRINGIFY_CODE(TYPE_CODE, FLOAT)
+ STRINGIFY_CODE(TYPE_CODE, DOUBLE)
+ STRINGIFY_CODE(TYPE_CODE, LABEL)
+ STRINGIFY_CODE(TYPE_CODE, OPAQUE)
+ STRINGIFY_CODE(TYPE_CODE, INTEGER)
+ STRINGIFY_CODE(TYPE_CODE, POINTER)
+ STRINGIFY_CODE(TYPE_CODE, HALF)
+ STRINGIFY_CODE(TYPE_CODE, ARRAY)
+ STRINGIFY_CODE(TYPE_CODE, VECTOR)
+ STRINGIFY_CODE(TYPE_CODE, X86_FP80)
+ STRINGIFY_CODE(TYPE_CODE, FP128)
+ STRINGIFY_CODE(TYPE_CODE, PPC_FP128)
+ STRINGIFY_CODE(TYPE_CODE, METADATA)
+ STRINGIFY_CODE(TYPE_CODE, X86_MMX)
+ STRINGIFY_CODE(TYPE_CODE, STRUCT_ANON)
+ STRINGIFY_CODE(TYPE_CODE, STRUCT_NAME)
+ STRINGIFY_CODE(TYPE_CODE, STRUCT_NAMED)
+ STRINGIFY_CODE(TYPE_CODE, FUNCTION)
+ STRINGIFY_CODE(TYPE_CODE, TOKEN)
+ STRINGIFY_CODE(TYPE_CODE, BFLOAT)
+ }
+
+ case bitc::CONSTANTS_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ STRINGIFY_CODE(CST_CODE, SETTYPE)
+ STRINGIFY_CODE(CST_CODE, NULL)
+ STRINGIFY_CODE(CST_CODE, UNDEF)
+ STRINGIFY_CODE(CST_CODE, INTEGER)
+ STRINGIFY_CODE(CST_CODE, WIDE_INTEGER)
+ STRINGIFY_CODE(CST_CODE, FLOAT)
+ STRINGIFY_CODE(CST_CODE, AGGREGATE)
+ STRINGIFY_CODE(CST_CODE, STRING)
+ STRINGIFY_CODE(CST_CODE, CSTRING)
+ STRINGIFY_CODE(CST_CODE, CE_BINOP)
+ STRINGIFY_CODE(CST_CODE, CE_CAST)
+ STRINGIFY_CODE(CST_CODE, CE_GEP)
+ STRINGIFY_CODE(CST_CODE, CE_INBOUNDS_GEP)
+ STRINGIFY_CODE(CST_CODE, CE_SELECT)
+ STRINGIFY_CODE(CST_CODE, CE_EXTRACTELT)
+ STRINGIFY_CODE(CST_CODE, CE_INSERTELT)
+ STRINGIFY_CODE(CST_CODE, CE_SHUFFLEVEC)
+ STRINGIFY_CODE(CST_CODE, CE_CMP)
+ STRINGIFY_CODE(CST_CODE, INLINEASM)
+ STRINGIFY_CODE(CST_CODE, CE_SHUFVEC_EX)
+ STRINGIFY_CODE(CST_CODE, CE_UNOP)
+ STRINGIFY_CODE(CST_CODE, DSO_LOCAL_EQUIVALENT)
+ STRINGIFY_CODE(CST_CODE, NO_CFI_VALUE)
+ case bitc::CST_CODE_BLOCKADDRESS:
+ return "CST_CODE_BLOCKADDRESS";
+ STRINGIFY_CODE(CST_CODE, DATA)
+ }
+ case bitc::FUNCTION_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ STRINGIFY_CODE(FUNC_CODE, DECLAREBLOCKS)
+ STRINGIFY_CODE(FUNC_CODE, INST_BINOP)
+ STRINGIFY_CODE(FUNC_CODE, INST_CAST)
+ STRINGIFY_CODE(FUNC_CODE, INST_GEP_OLD)
+ STRINGIFY_CODE(FUNC_CODE, INST_INBOUNDS_GEP_OLD)
+ STRINGIFY_CODE(FUNC_CODE, INST_SELECT)
+ STRINGIFY_CODE(FUNC_CODE, INST_EXTRACTELT)
+ STRINGIFY_CODE(FUNC_CODE, INST_INSERTELT)
+ STRINGIFY_CODE(FUNC_CODE, INST_SHUFFLEVEC)
+ STRINGIFY_CODE(FUNC_CODE, INST_CMP)
+ STRINGIFY_CODE(FUNC_CODE, INST_RET)
+ STRINGIFY_CODE(FUNC_CODE, INST_BR)
+ STRINGIFY_CODE(FUNC_CODE, INST_SWITCH)
+ STRINGIFY_CODE(FUNC_CODE, INST_INVOKE)
+ STRINGIFY_CODE(FUNC_CODE, INST_UNOP)
+ STRINGIFY_CODE(FUNC_CODE, INST_UNREACHABLE)
+ STRINGIFY_CODE(FUNC_CODE, INST_CLEANUPRET)
+ STRINGIFY_CODE(FUNC_CODE, INST_CATCHRET)
+ STRINGIFY_CODE(FUNC_CODE, INST_CATCHPAD)
+ STRINGIFY_CODE(FUNC_CODE, INST_PHI)
+ STRINGIFY_CODE(FUNC_CODE, INST_ALLOCA)
+ STRINGIFY_CODE(FUNC_CODE, INST_LOAD)
+ STRINGIFY_CODE(FUNC_CODE, INST_VAARG)
+ STRINGIFY_CODE(FUNC_CODE, INST_STORE)
+ STRINGIFY_CODE(FUNC_CODE, INST_EXTRACTVAL)
+ STRINGIFY_CODE(FUNC_CODE, INST_INSERTVAL)
+ STRINGIFY_CODE(FUNC_CODE, INST_CMP2)
+ STRINGIFY_CODE(FUNC_CODE, INST_VSELECT)
+ STRINGIFY_CODE(FUNC_CODE, DEBUG_LOC_AGAIN)
+ STRINGIFY_CODE(FUNC_CODE, INST_CALL)
+ STRINGIFY_CODE(FUNC_CODE, DEBUG_LOC)
+ STRINGIFY_CODE(FUNC_CODE, INST_GEP)
+ STRINGIFY_CODE(FUNC_CODE, OPERAND_BUNDLE)
+ STRINGIFY_CODE(FUNC_CODE, INST_FENCE)
+ STRINGIFY_CODE(FUNC_CODE, INST_ATOMICRMW)
+ STRINGIFY_CODE(FUNC_CODE, INST_LOADATOMIC)
+ STRINGIFY_CODE(FUNC_CODE, INST_STOREATOMIC)
+ STRINGIFY_CODE(FUNC_CODE, INST_CMPXCHG)
+ STRINGIFY_CODE(FUNC_CODE, INST_CALLBR)
+ }
+ case bitc::VALUE_SYMTAB_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ STRINGIFY_CODE(VST_CODE, ENTRY)
+ STRINGIFY_CODE(VST_CODE, BBENTRY)
+ STRINGIFY_CODE(VST_CODE, FNENTRY)
+ STRINGIFY_CODE(VST_CODE, COMBINED_ENTRY)
+ }
+ case bitc::MODULE_STRTAB_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ STRINGIFY_CODE(MST_CODE, ENTRY)
+ STRINGIFY_CODE(MST_CODE, HASH)
+ }
+ case bitc::GLOBALVAL_SUMMARY_BLOCK_ID:
+ case bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ STRINGIFY_CODE(FS, PERMODULE)
+ STRINGIFY_CODE(FS, PERMODULE_PROFILE)
+ STRINGIFY_CODE(FS, PERMODULE_RELBF)
+ STRINGIFY_CODE(FS, PERMODULE_GLOBALVAR_INIT_REFS)
+ STRINGIFY_CODE(FS, PERMODULE_VTABLE_GLOBALVAR_INIT_REFS)
+ STRINGIFY_CODE(FS, COMBINED)
+ STRINGIFY_CODE(FS, COMBINED_PROFILE)
+ STRINGIFY_CODE(FS, COMBINED_GLOBALVAR_INIT_REFS)
+ STRINGIFY_CODE(FS, ALIAS)
+ STRINGIFY_CODE(FS, COMBINED_ALIAS)
+ STRINGIFY_CODE(FS, COMBINED_ORIGINAL_NAME)
+ STRINGIFY_CODE(FS, VERSION)
+ STRINGIFY_CODE(FS, FLAGS)
+ STRINGIFY_CODE(FS, TYPE_TESTS)
+ STRINGIFY_CODE(FS, TYPE_TEST_ASSUME_VCALLS)
+ STRINGIFY_CODE(FS, TYPE_CHECKED_LOAD_VCALLS)
+ STRINGIFY_CODE(FS, TYPE_TEST_ASSUME_CONST_VCALL)
+ STRINGIFY_CODE(FS, TYPE_CHECKED_LOAD_CONST_VCALL)
+ STRINGIFY_CODE(FS, VALUE_GUID)
+ STRINGIFY_CODE(FS, CFI_FUNCTION_DEFS)
+ STRINGIFY_CODE(FS, CFI_FUNCTION_DECLS)
+ STRINGIFY_CODE(FS, TYPE_ID)
+ STRINGIFY_CODE(FS, TYPE_ID_METADATA)
+ STRINGIFY_CODE(FS, BLOCK_COUNT)
+ STRINGIFY_CODE(FS, PARAM_ACCESS)
+ }
+ case bitc::METADATA_ATTACHMENT_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ STRINGIFY_CODE(METADATA, ATTACHMENT)
+ }
+ case bitc::METADATA_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ STRINGIFY_CODE(METADATA, STRING_OLD)
+ STRINGIFY_CODE(METADATA, VALUE)
+ STRINGIFY_CODE(METADATA, NODE)
+ STRINGIFY_CODE(METADATA, NAME)
+ STRINGIFY_CODE(METADATA, DISTINCT_NODE)
+ STRINGIFY_CODE(METADATA, KIND) // Older bitcode has it in a MODULE_BLOCK
+ STRINGIFY_CODE(METADATA, LOCATION)
+ STRINGIFY_CODE(METADATA, OLD_NODE)
+ STRINGIFY_CODE(METADATA, OLD_FN_NODE)
+ STRINGIFY_CODE(METADATA, NAMED_NODE)
+ STRINGIFY_CODE(METADATA, GENERIC_DEBUG)
+ STRINGIFY_CODE(METADATA, SUBRANGE)
+ STRINGIFY_CODE(METADATA, ENUMERATOR)
+ STRINGIFY_CODE(METADATA, BASIC_TYPE)
+ STRINGIFY_CODE(METADATA, FILE)
+ STRINGIFY_CODE(METADATA, DERIVED_TYPE)
+ STRINGIFY_CODE(METADATA, COMPOSITE_TYPE)
+ STRINGIFY_CODE(METADATA, SUBROUTINE_TYPE)
+ STRINGIFY_CODE(METADATA, COMPILE_UNIT)
+ STRINGIFY_CODE(METADATA, SUBPROGRAM)
+ STRINGIFY_CODE(METADATA, LEXICAL_BLOCK)
+ STRINGIFY_CODE(METADATA, LEXICAL_BLOCK_FILE)
+ STRINGIFY_CODE(METADATA, NAMESPACE)
+ STRINGIFY_CODE(METADATA, TEMPLATE_TYPE)
+ STRINGIFY_CODE(METADATA, TEMPLATE_VALUE)
+ STRINGIFY_CODE(METADATA, GLOBAL_VAR)
+ STRINGIFY_CODE(METADATA, LOCAL_VAR)
+ STRINGIFY_CODE(METADATA, EXPRESSION)
+ STRINGIFY_CODE(METADATA, OBJC_PROPERTY)
+ STRINGIFY_CODE(METADATA, IMPORTED_ENTITY)
+ STRINGIFY_CODE(METADATA, MODULE)
+ STRINGIFY_CODE(METADATA, MACRO)
+ STRINGIFY_CODE(METADATA, MACRO_FILE)
+ STRINGIFY_CODE(METADATA, STRINGS)
+ STRINGIFY_CODE(METADATA, GLOBAL_DECL_ATTACHMENT)
+ STRINGIFY_CODE(METADATA, GLOBAL_VAR_EXPR)
+ STRINGIFY_CODE(METADATA, INDEX_OFFSET)
+ STRINGIFY_CODE(METADATA, INDEX)
+ STRINGIFY_CODE(METADATA, ARG_LIST)
+ }
+ case bitc::METADATA_KIND_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ STRINGIFY_CODE(METADATA, KIND)
+ }
+ case bitc::USELIST_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ case bitc::USELIST_CODE_DEFAULT:
+ return "USELIST_CODE_DEFAULT";
+ case bitc::USELIST_CODE_BB:
+ return "USELIST_CODE_BB";
+ }
+
+ case bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ case bitc::OPERAND_BUNDLE_TAG:
+ return "OPERAND_BUNDLE_TAG";
+ }
+ case bitc::STRTAB_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ case bitc::STRTAB_BLOB:
+ return "BLOB";
+ }
+ case bitc::SYMTAB_BLOCK_ID:
+ switch (CodeID) {
+ default:
+ return None;
+ case bitc::SYMTAB_BLOB:
+ return "BLOB";
+ }
+ }
+#undef STRINGIFY_CODE
+}
+
+static void printSize(raw_ostream &OS, double Bits) {
+ OS << format("%.2f/%.2fB/%luW", Bits, Bits / 8, (unsigned long)(Bits / 32));
+}
+static void printSize(raw_ostream &OS, uint64_t Bits) {
+ OS << format("%lub/%.2fB/%luW", (unsigned long)Bits, (double)Bits / 8,
+ (unsigned long)(Bits / 32));
+}
+
+static Expected<CurStreamTypeType> ReadSignature(BitstreamCursor &Stream) {
+ auto tryRead = [&Stream](char &Dest, size_t size) -> Error {
+ if (Expected<SimpleBitstreamCursor::word_t> MaybeWord = Stream.Read(size))
+ Dest = MaybeWord.get();
+ else
+ return MaybeWord.takeError();
+ return Error::success();
+ };
+
+ char Signature[6];
+ if (Error Err = tryRead(Signature[0], 8))
+ return std::move(Err);
+ if (Error Err = tryRead(Signature[1], 8))
+ return std::move(Err);
+
+ // Autodetect the file contents, if it is one we know.
+ if (Signature[0] == 'C' && Signature[1] == 'P') {
+ if (Error Err = tryRead(Signature[2], 8))
+ return std::move(Err);
+ if (Error Err = tryRead(Signature[3], 8))
+ return std::move(Err);
+ if (Signature[2] == 'C' && Signature[3] == 'H')
+ return ClangSerializedASTBitstream;
+ } else if (Signature[0] == 'D' && Signature[1] == 'I') {
+ if (Error Err = tryRead(Signature[2], 8))
+ return std::move(Err);
+ if (Error Err = tryRead(Signature[3], 8))
+ return std::move(Err);
+ if (Signature[2] == 'A' && Signature[3] == 'G')
+ return ClangSerializedDiagnosticsBitstream;
+ } else if (Signature[0] == 'R' && Signature[1] == 'M') {
+ if (Error Err = tryRead(Signature[2], 8))
+ return std::move(Err);
+ if (Error Err = tryRead(Signature[3], 8))
+ return std::move(Err);
+ if (Signature[2] == 'R' && Signature[3] == 'K')
+ return LLVMBitstreamRemarks;
+ } else {
+ if (Error Err = tryRead(Signature[2], 4))
+ return std::move(Err);
+ if (Error Err = tryRead(Signature[3], 4))
+ return std::move(Err);
+ if (Error Err = tryRead(Signature[4], 4))
+ return std::move(Err);
+ if (Error Err = tryRead(Signature[5], 4))
+ return std::move(Err);
+ if (Signature[0] == 'B' && Signature[1] == 'C' && Signature[2] == 0x0 &&
+ Signature[3] == 0xC && Signature[4] == 0xE && Signature[5] == 0xD)
+ return LLVMIRBitstream;
+ }
+ return UnknownBitstream;
+}
+
+static Expected<CurStreamTypeType> analyzeHeader(Optional<BCDumpOptions> O,
+ BitstreamCursor &Stream) {
+ ArrayRef<uint8_t> Bytes = Stream.getBitcodeBytes();
+ const unsigned char *BufPtr = (const unsigned char *)Bytes.data();
+ const unsigned char *EndBufPtr = BufPtr + Bytes.size();
+
+ // If we have a wrapper header, parse it and ignore the non-bc file
+ // contents. The magic number is 0x0B17C0DE stored in little endian.
+ if (isBitcodeWrapper(BufPtr, EndBufPtr)) {
+ if (Bytes.size() < BWH_HeaderSize)
+ return reportError("Invalid bitcode wrapper header");
+
+ if (O) {
+ unsigned Magic = support::endian::read32le(&BufPtr[BWH_MagicField]);
+ unsigned Version = support::endian::read32le(&BufPtr[BWH_VersionField]);
+ unsigned Offset = support::endian::read32le(&BufPtr[BWH_OffsetField]);
+ unsigned Size = support::endian::read32le(&BufPtr[BWH_SizeField]);
+ unsigned CPUType = support::endian::read32le(&BufPtr[BWH_CPUTypeField]);
+
+ O->OS << "<BITCODE_WRAPPER_HEADER"
+ << " Magic=" << format_hex(Magic, 10)
+ << " Version=" << format_hex(Version, 10)
+ << " Offset=" << format_hex(Offset, 10)
+ << " Size=" << format_hex(Size, 10)
+ << " CPUType=" << format_hex(CPUType, 10) << "/>\n";
+ }
+
+ if (SkipBitcodeWrapperHeader(BufPtr, EndBufPtr, true))
+ return reportError("Invalid bitcode wrapper header");
+ }
+
+ // Use the cursor modified by skipping the wrapper header.
+ Stream = BitstreamCursor(ArrayRef<uint8_t>(BufPtr, EndBufPtr));
+
+ return ReadSignature(Stream);
+}
+
+static bool canDecodeBlob(unsigned Code, unsigned BlockID) {
+ return BlockID == bitc::METADATA_BLOCK_ID && Code == bitc::METADATA_STRINGS;
+}
+
+Error BitcodeAnalyzer::decodeMetadataStringsBlob(StringRef Indent,
+ ArrayRef<uint64_t> Record,
+ StringRef Blob,
+ raw_ostream &OS) {
+ if (Blob.empty())
+ return reportError("Cannot decode empty blob.");
+
+ if (Record.size() != 2)
+ return reportError(
+ "Decoding metadata strings blob needs two record entries.");
+
+ unsigned NumStrings = Record[0];
+ unsigned StringsOffset = Record[1];
+ OS << " num-strings = " << NumStrings << " {\n";
+
+ StringRef Lengths = Blob.slice(0, StringsOffset);
+ SimpleBitstreamCursor R(Lengths);
+ StringRef Strings = Blob.drop_front(StringsOffset);
+ do {
+ if (R.AtEndOfStream())
+ return reportError("bad length");
+
+ uint32_t Size;
+ if (Error E = R.ReadVBR(6).moveInto(Size))
+ return E;
+ if (Strings.size() < Size)
+ return reportError("truncated chars");
+
+ OS << Indent << " '";
+ OS.write_escaped(Strings.slice(0, Size), /*hex=*/true);
+ OS << "'\n";
+ Strings = Strings.drop_front(Size);
+ } while (--NumStrings);
+
+ OS << Indent << " }";
+ return Error::success();
+}
+
+BitcodeAnalyzer::BitcodeAnalyzer(StringRef Buffer,
+ Optional<StringRef> BlockInfoBuffer)
+ : Stream(Buffer) {
+ if (BlockInfoBuffer)
+ BlockInfoStream.emplace(*BlockInfoBuffer);
+}
+
+Error BitcodeAnalyzer::analyze(Optional<BCDumpOptions> O,
+ Optional<StringRef> CheckHash) {
+ if (Error E = analyzeHeader(O, Stream).moveInto(CurStreamType))
+ return E;
+
+ Stream.setBlockInfo(&BlockInfo);
+
+ // Read block info from BlockInfoStream, if specified.
+ // The block info must be a top-level block.
+ if (BlockInfoStream) {
+ BitstreamCursor BlockInfoCursor(*BlockInfoStream);
+ if (Error E = analyzeHeader(O, BlockInfoCursor).takeError())
+ return E;
+
+ while (!BlockInfoCursor.AtEndOfStream()) {
+ Expected<unsigned> MaybeCode = BlockInfoCursor.ReadCode();
+ if (!MaybeCode)
+ return MaybeCode.takeError();
+ if (MaybeCode.get() != bitc::ENTER_SUBBLOCK)
+ return reportError("Invalid record at top-level in block info file");
+
+ Expected<unsigned> MaybeBlockID = BlockInfoCursor.ReadSubBlockID();
+ if (!MaybeBlockID)
+ return MaybeBlockID.takeError();
+ if (MaybeBlockID.get() == bitc::BLOCKINFO_BLOCK_ID) {
+ Optional<BitstreamBlockInfo> NewBlockInfo;
+ if (Error E =
+ BlockInfoCursor.ReadBlockInfoBlock(/*ReadBlockInfoNames=*/true)
+ .moveInto(NewBlockInfo))
+ return E;
+ if (!NewBlockInfo)
+ return reportError("Malformed BlockInfoBlock in block info file");
+ BlockInfo = std::move(*NewBlockInfo);
+ break;
+ }
+
+ if (Error Err = BlockInfoCursor.SkipBlock())
+ return Err;
+ }
+ }
+
+ // Parse the top-level structure. We only allow blocks at the top-level.
+ while (!Stream.AtEndOfStream()) {
+ Expected<unsigned> MaybeCode = Stream.ReadCode();
+ if (!MaybeCode)
+ return MaybeCode.takeError();
+ if (MaybeCode.get() != bitc::ENTER_SUBBLOCK)
+ return reportError("Invalid record at top-level");
+
+ Expected<unsigned> MaybeBlockID = Stream.ReadSubBlockID();
+ if (!MaybeBlockID)
+ return MaybeBlockID.takeError();
+
+ if (Error E = parseBlock(MaybeBlockID.get(), 0, O, CheckHash))
+ return E;
+ ++NumTopBlocks;
+ }
+
+ return Error::success();
+}
+
+void BitcodeAnalyzer::printStats(BCDumpOptions O,
+ Optional<StringRef> Filename) {
+ uint64_t BufferSizeBits = Stream.getBitcodeBytes().size() * CHAR_BIT;
+ // Print a summary of the read file.
+ O.OS << "Summary ";
+ if (Filename)
+ O.OS << "of " << Filename->data() << ":\n";
+ O.OS << " Total size: ";
+ printSize(O.OS, BufferSizeBits);
+ O.OS << "\n";
+ O.OS << " Stream type: ";
+ switch (CurStreamType) {
+ case UnknownBitstream:
+ O.OS << "unknown\n";
+ break;
+ case LLVMIRBitstream:
+ O.OS << "LLVM IR\n";
+ break;
+ case ClangSerializedASTBitstream:
+ O.OS << "Clang Serialized AST\n";
+ break;
+ case ClangSerializedDiagnosticsBitstream:
+ O.OS << "Clang Serialized Diagnostics\n";
+ break;
+ case LLVMBitstreamRemarks:
+ O.OS << "LLVM Remarks\n";
+ break;
+ }
+ O.OS << " # Toplevel Blocks: " << NumTopBlocks << "\n";
+ O.OS << "\n";
+
+ // Emit per-block stats.
+ O.OS << "Per-block Summary:\n";
+ for (const auto &Stat : BlockIDStats) {
+ O.OS << " Block ID #" << Stat.first;
+ if (Optional<const char *> BlockName =
+ GetBlockName(Stat.first, BlockInfo, CurStreamType))
+ O.OS << " (" << *BlockName << ")";
+ O.OS << ":\n";
+
+ const PerBlockIDStats &Stats = Stat.second;
+ O.OS << " Num Instances: " << Stats.NumInstances << "\n";
+ O.OS << " Total Size: ";
+ printSize(O.OS, Stats.NumBits);
+ O.OS << "\n";
+ double pct = (Stats.NumBits * 100.0) / BufferSizeBits;
+ O.OS << " Percent of file: " << format("%2.4f%%", pct) << "\n";
+ if (Stats.NumInstances > 1) {
+ O.OS << " Average Size: ";
+ printSize(O.OS, Stats.NumBits / (double)Stats.NumInstances);
+ O.OS << "\n";
+ O.OS << " Tot/Avg SubBlocks: " << Stats.NumSubBlocks << "/"
+ << Stats.NumSubBlocks / (double)Stats.NumInstances << "\n";
+ O.OS << " Tot/Avg Abbrevs: " << Stats.NumAbbrevs << "/"
+ << Stats.NumAbbrevs / (double)Stats.NumInstances << "\n";
+ O.OS << " Tot/Avg Records: " << Stats.NumRecords << "/"
+ << Stats.NumRecords / (double)Stats.NumInstances << "\n";
+ } else {
+ O.OS << " Num SubBlocks: " << Stats.NumSubBlocks << "\n";
+ O.OS << " Num Abbrevs: " << Stats.NumAbbrevs << "\n";
+ O.OS << " Num Records: " << Stats.NumRecords << "\n";
+ }
+ if (Stats.NumRecords) {
+ double pct = (Stats.NumAbbreviatedRecords * 100.0) / Stats.NumRecords;
+ O.OS << " Percent Abbrevs: " << format("%2.4f%%", pct) << "\n";
+ }
+ O.OS << "\n";
+
+ // Print a histogram of the codes we see.
+ if (O.Histogram && !Stats.CodeFreq.empty()) {
+ std::vector<std::pair<unsigned, unsigned>> FreqPairs; // <freq,code>
+ for (unsigned i = 0, e = Stats.CodeFreq.size(); i != e; ++i)
+ if (unsigned Freq = Stats.CodeFreq[i].NumInstances)
+ FreqPairs.push_back(std::make_pair(Freq, i));
+ llvm::stable_sort(FreqPairs);
+ std::reverse(FreqPairs.begin(), FreqPairs.end());
+
+ O.OS << "\tRecord Histogram:\n";
+ O.OS << "\t\t Count # Bits b/Rec % Abv Record Kind\n";
+ for (const auto &FreqPair : FreqPairs) {
+ const PerRecordStats &RecStats = Stats.CodeFreq[FreqPair.second];
+
+ O.OS << format("\t\t%7d %9lu", RecStats.NumInstances,
+ (unsigned long)RecStats.TotalBits);
+
+ if (RecStats.NumInstances > 1)
+ O.OS << format(" %9.1f",
+ (double)RecStats.TotalBits / RecStats.NumInstances);
+ else
+ O.OS << " ";
+
+ if (RecStats.NumAbbrev)
+ O.OS << format(" %7.2f", (double)RecStats.NumAbbrev /
+ RecStats.NumInstances * 100);
+ else
+ O.OS << " ";
+
+ O.OS << " ";
+ if (Optional<const char *> CodeName = GetCodeName(
+ FreqPair.second, Stat.first, BlockInfo, CurStreamType))
+ O.OS << *CodeName << "\n";
+ else
+ O.OS << "UnknownCode" << FreqPair.second << "\n";
+ }
+ O.OS << "\n";
+ }
+ }
+}
+
+Error BitcodeAnalyzer::parseBlock(unsigned BlockID, unsigned IndentLevel,
+ Optional<BCDumpOptions> O,
+ Optional<StringRef> CheckHash) {
+ std::string Indent(IndentLevel * 2, ' ');
+ uint64_t BlockBitStart = Stream.GetCurrentBitNo();
+
+ // Get the statistics for this BlockID.
+ PerBlockIDStats &BlockStats = BlockIDStats[BlockID];
+
+ BlockStats.NumInstances++;
+
+ // BLOCKINFO is a special part of the stream.
+ bool DumpRecords = O.hasValue();
+ if (BlockID == bitc::BLOCKINFO_BLOCK_ID) {
+ if (O && !O->DumpBlockinfo)
+ O->OS << Indent << "<BLOCKINFO_BLOCK/>\n";
+ Optional<BitstreamBlockInfo> NewBlockInfo;
+ if (Error E = Stream.ReadBlockInfoBlock(/*ReadBlockInfoNames=*/true)
+ .moveInto(NewBlockInfo))
+ return E;
+ if (!NewBlockInfo)
+ return reportError("Malformed BlockInfoBlock");
+ BlockInfo = std::move(*NewBlockInfo);
+ if (Error Err = Stream.JumpToBit(BlockBitStart))
+ return Err;
+ // It's not really interesting to dump the contents of the blockinfo
+ // block, so only do it if the user explicitly requests it.
+ DumpRecords = O && O->DumpBlockinfo;
+ }
+
+ unsigned NumWords = 0;
+ if (Error Err = Stream.EnterSubBlock(BlockID, &NumWords))
+ return Err;
+
+ // Keep it for later, when we see a MODULE_HASH record
+ uint64_t BlockEntryPos = Stream.getCurrentByteNo();
+
+ Optional<const char *> BlockName = None;
+ if (DumpRecords) {
+ O->OS << Indent << "<";
+ if ((BlockName = GetBlockName(BlockID, BlockInfo, CurStreamType)))
+ O->OS << *BlockName;
+ else
+ O->OS << "UnknownBlock" << BlockID;
+
+ if (!O->Symbolic && BlockName)
+ O->OS << " BlockID=" << BlockID;
+
+ O->OS << " NumWords=" << NumWords
+ << " BlockCodeSize=" << Stream.getAbbrevIDWidth() << ">\n";
+ }
+
+ SmallVector<uint64_t, 64> Record;
+
+ // Keep the offset to the metadata index if seen.
+ uint64_t MetadataIndexOffset = 0;
+
+ // Read all the records for this block.
+ while (true) {
+ if (Stream.AtEndOfStream())
+ return reportError("Premature end of bitstream");
+
+ uint64_t RecordStartBit = Stream.GetCurrentBitNo();
+
+ BitstreamEntry Entry;
+ if (Error E = Stream.advance(BitstreamCursor::AF_DontAutoprocessAbbrevs)
+ .moveInto(Entry))
+ return E;
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::Error:
+ return reportError("malformed bitcode file");
+ case BitstreamEntry::EndBlock: {
+ uint64_t BlockBitEnd = Stream.GetCurrentBitNo();
+ BlockStats.NumBits += BlockBitEnd - BlockBitStart;
+ if (DumpRecords) {
+ O->OS << Indent << "</";
+ if (BlockName)
+ O->OS << *BlockName << ">\n";
+ else
+ O->OS << "UnknownBlock" << BlockID << ">\n";
+ }
+ return Error::success();
+ }
+
+ case BitstreamEntry::SubBlock: {
+ uint64_t SubBlockBitStart = Stream.GetCurrentBitNo();
+ if (Error E = parseBlock(Entry.ID, IndentLevel + 1, O, CheckHash))
+ return E;
+ ++BlockStats.NumSubBlocks;
+ uint64_t SubBlockBitEnd = Stream.GetCurrentBitNo();
+
+ // Don't include subblock sizes in the size of this block.
+ BlockBitStart += SubBlockBitEnd - SubBlockBitStart;
+ continue;
+ }
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ if (Entry.ID == bitc::DEFINE_ABBREV) {
+ if (Error Err = Stream.ReadAbbrevRecord())
+ return Err;
+ ++BlockStats.NumAbbrevs;
+ continue;
+ }
+
+ Record.clear();
+
+ ++BlockStats.NumRecords;
+
+ StringRef Blob;
+ uint64_t CurrentRecordPos = Stream.GetCurrentBitNo();
+ unsigned Code;
+ if (Error E = Stream.readRecord(Entry.ID, Record, &Blob).moveInto(Code))
+ return E;
+
+ // Increment the # occurrences of this code.
+ if (BlockStats.CodeFreq.size() <= Code)
+ BlockStats.CodeFreq.resize(Code + 1);
+ BlockStats.CodeFreq[Code].NumInstances++;
+ BlockStats.CodeFreq[Code].TotalBits +=
+ Stream.GetCurrentBitNo() - RecordStartBit;
+ if (Entry.ID != bitc::UNABBREV_RECORD) {
+ BlockStats.CodeFreq[Code].NumAbbrev++;
+ ++BlockStats.NumAbbreviatedRecords;
+ }
+
+ if (DumpRecords) {
+ O->OS << Indent << " <";
+ Optional<const char *> CodeName =
+ GetCodeName(Code, BlockID, BlockInfo, CurStreamType);
+ if (CodeName)
+ O->OS << *CodeName;
+ else
+ O->OS << "UnknownCode" << Code;
+ if (!O->Symbolic && CodeName)
+ O->OS << " codeid=" << Code;
+ const BitCodeAbbrev *Abbv = nullptr;
+ if (Entry.ID != bitc::UNABBREV_RECORD) {
+ Abbv = Stream.getAbbrev(Entry.ID);
+ O->OS << " abbrevid=" << Entry.ID;
+ }
+
+ for (unsigned i = 0, e = Record.size(); i != e; ++i)
+ O->OS << " op" << i << "=" << (int64_t)Record[i];
+
+ // If we found a metadata index, let's verify that we had an offset
+ // before and validate its forward reference offset was correct!
+ if (BlockID == bitc::METADATA_BLOCK_ID) {
+ if (Code == bitc::METADATA_INDEX_OFFSET) {
+ if (Record.size() != 2)
+ O->OS << "(Invalid record)";
+ else {
+ auto Offset = Record[0] + (Record[1] << 32);
+ MetadataIndexOffset = Stream.GetCurrentBitNo() + Offset;
+ }
+ }
+ if (Code == bitc::METADATA_INDEX) {
+ O->OS << " (offset ";
+ if (MetadataIndexOffset == RecordStartBit)
+ O->OS << "match)";
+ else
+ O->OS << "mismatch: " << MetadataIndexOffset << " vs "
+ << RecordStartBit << ")";
+ }
+ }
+
+ // If we found a module hash, let's verify that it matches!
+ if (BlockID == bitc::MODULE_BLOCK_ID && Code == bitc::MODULE_CODE_HASH &&
+ CheckHash.hasValue()) {
+ if (Record.size() != 5)
+ O->OS << " (invalid)";
+ else {
+ // Recompute the hash and compare it to the one in the bitcode
+ SHA1 Hasher;
+ StringRef Hash;
+ Hasher.update(*CheckHash);
+ {
+ int BlockSize = (CurrentRecordPos / 8) - BlockEntryPos;
+ auto Ptr = Stream.getPointerToByte(BlockEntryPos, BlockSize);
+ Hasher.update(ArrayRef<uint8_t>(Ptr, BlockSize));
+ Hash = Hasher.result();
+ }
+ std::array<char, 20> RecordedHash;
+ int Pos = 0;
+ for (auto &Val : Record) {
+ assert(!(Val >> 32) && "Unexpected high bits set");
+ support::endian::write32be(&RecordedHash[Pos], Val);
+ Pos += 4;
+ }
+ if (Hash == StringRef(RecordedHash.data(), RecordedHash.size()))
+ O->OS << " (match)";
+ else
+ O->OS << " (!mismatch!)";
+ }
+ }
+
+ O->OS << "/>";
+
+ if (Abbv) {
+ for (unsigned i = 1, e = Abbv->getNumOperandInfos(); i != e; ++i) {
+ const BitCodeAbbrevOp &Op = Abbv->getOperandInfo(i);
+ if (!Op.isEncoding() || Op.getEncoding() != BitCodeAbbrevOp::Array)
+ continue;
+ assert(i + 2 == e && "Array op not second to last");
+ std::string Str;
+ bool ArrayIsPrintable = true;
+ for (unsigned j = i - 1, je = Record.size(); j != je; ++j) {
+ if (!isPrint(static_cast<unsigned char>(Record[j]))) {
+ ArrayIsPrintable = false;
+ break;
+ }
+ Str += (char)Record[j];
+ }
+ if (ArrayIsPrintable)
+ O->OS << " record string = '" << Str << "'";
+ break;
+ }
+ }
+
+ if (Blob.data()) {
+ if (canDecodeBlob(Code, BlockID)) {
+ if (Error E = decodeMetadataStringsBlob(Indent, Record, Blob, O->OS))
+ return E;
+ } else {
+ O->OS << " blob data = ";
+ if (O->ShowBinaryBlobs) {
+ O->OS << "'";
+ O->OS.write_escaped(Blob, /*hex=*/true) << "'";
+ } else {
+ bool BlobIsPrintable = true;
+ for (char C : Blob)
+ if (!isPrint(static_cast<unsigned char>(C))) {
+ BlobIsPrintable = false;
+ break;
+ }
+
+ if (BlobIsPrintable)
+ O->OS << "'" << Blob << "'";
+ else
+ O->OS << "unprintable, " << Blob.size() << " bytes.";
+ }
+ }
+ }
+
+ O->OS << "\n";
+ }
+
+ // Make sure that we can skip the current record.
+ if (Error Err = Stream.JumpToBit(CurrentRecordPos))
+ return Err;
+ if (Expected<unsigned> Skipped = Stream.skipRecord(Entry.ID))
+ ; // Do nothing.
+ else
+ return Skipped.takeError();
+ }
+}
+
diff --git a/contrib/libs/llvm14/lib/Bitcode/Reader/BitcodeReader.cpp b/contrib/libs/llvm14/lib/Bitcode/Reader/BitcodeReader.cpp
new file mode 100644
index 0000000000..720ab560f9
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -0,0 +1,7144 @@
+//===- BitcodeReader.cpp - Internal BitcodeReader implementation ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "MetadataLoader.h"
+#include "ValueList.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Bitcode/BitcodeCommon.h"
+#include "llvm/Bitcode/LLVMBitCodes.h"
+#include "llvm/Bitstream/BitstreamReader.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/AutoUpgrade.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GVMaterializer.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/Verifier.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ErrorOr.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <deque>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <system_error>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+static cl::opt<bool> PrintSummaryGUIDs(
+ "print-summary-global-ids", cl::init(false), cl::Hidden,
+ cl::desc(
+ "Print the global id for each value when reading the module summary"));
+
+namespace {
+
+enum {
+ SWITCH_INST_MAGIC = 0x4B5 // May 2012 => 1205 => Hex
+};
+
+} // end anonymous namespace
+
+static Error error(const Twine &Message) {
+ return make_error<StringError>(
+ Message, make_error_code(BitcodeError::CorruptedBitcode));
+}
+
+static Error hasInvalidBitcodeHeader(BitstreamCursor &Stream) {
+ if (!Stream.canSkipToPos(4))
+ return createStringError(std::errc::illegal_byte_sequence,
+ "file too small to contain bitcode header");
+ for (unsigned C : {'B', 'C'})
+ if (Expected<SimpleBitstreamCursor::word_t> Res = Stream.Read(8)) {
+ if (Res.get() != C)
+ return createStringError(std::errc::illegal_byte_sequence,
+ "file doesn't start with bitcode header");
+ } else
+ return Res.takeError();
+ for (unsigned C : {0x0, 0xC, 0xE, 0xD})
+ if (Expected<SimpleBitstreamCursor::word_t> Res = Stream.Read(4)) {
+ if (Res.get() != C)
+ return createStringError(std::errc::illegal_byte_sequence,
+ "file doesn't start with bitcode header");
+ } else
+ return Res.takeError();
+ return Error::success();
+}
+
+static Expected<BitstreamCursor> initStream(MemoryBufferRef Buffer) {
+ const unsigned char *BufPtr = (const unsigned char *)Buffer.getBufferStart();
+ const unsigned char *BufEnd = BufPtr + Buffer.getBufferSize();
+
+ if (Buffer.getBufferSize() & 3)
+ return error("Invalid bitcode signature");
+
+ // If we have a wrapper header, parse it and ignore the non-bc file contents.
+ // The magic number is 0x0B17C0DE stored in little endian.
+ if (isBitcodeWrapper(BufPtr, BufEnd))
+ if (SkipBitcodeWrapperHeader(BufPtr, BufEnd, true))
+ return error("Invalid bitcode wrapper header");
+
+ BitstreamCursor Stream(ArrayRef<uint8_t>(BufPtr, BufEnd));
+ if (Error Err = hasInvalidBitcodeHeader(Stream))
+ return std::move(Err);
+
+ return std::move(Stream);
+}
+
+/// Convert a string from a record into an std::string, return true on failure.
+template <typename StrTy>
+static bool convertToString(ArrayRef<uint64_t> Record, unsigned Idx,
+ StrTy &Result) {
+ if (Idx > Record.size())
+ return true;
+
+ Result.append(Record.begin() + Idx, Record.end());
+ return false;
+}
+
+// Strip all the TBAA attachment for the module.
+static void stripTBAA(Module *M) {
+ for (auto &F : *M) {
+ if (F.isMaterializable())
+ continue;
+ for (auto &I : instructions(F))
+ I.setMetadata(LLVMContext::MD_tbaa, nullptr);
+ }
+}
+
+/// Read the "IDENTIFICATION_BLOCK_ID" block, do some basic enforcement on the
+/// "epoch" encoded in the bitcode, and return the producer name if any.
+static Expected<std::string> readIdentificationBlock(BitstreamCursor &Stream) {
+ if (Error Err = Stream.EnterSubBlock(bitc::IDENTIFICATION_BLOCK_ID))
+ return std::move(Err);
+
+ // Read all the records.
+ SmallVector<uint64_t, 64> Record;
+
+ std::string ProducerIdentification;
+
+ while (true) {
+ BitstreamEntry Entry;
+ if (Error E = Stream.advance().moveInto(Entry))
+ return std::move(E);
+
+ switch (Entry.Kind) {
+ default:
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return ProducerIdentification;
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ Expected<unsigned> MaybeBitCode = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeBitCode)
+ return MaybeBitCode.takeError();
+ switch (MaybeBitCode.get()) {
+ default: // Default behavior: reject
+ return error("Invalid value");
+ case bitc::IDENTIFICATION_CODE_STRING: // IDENTIFICATION: [strchr x N]
+ convertToString(Record, 0, ProducerIdentification);
+ break;
+ case bitc::IDENTIFICATION_CODE_EPOCH: { // EPOCH: [epoch#]
+ unsigned epoch = (unsigned)Record[0];
+ if (epoch != bitc::BITCODE_CURRENT_EPOCH) {
+ return error(
+ Twine("Incompatible epoch: Bitcode '") + Twine(epoch) +
+ "' vs current: '" + Twine(bitc::BITCODE_CURRENT_EPOCH) + "'");
+ }
+ }
+ }
+ }
+}
+
+static Expected<std::string> readIdentificationCode(BitstreamCursor &Stream) {
+ // We expect a number of well-defined blocks, though we don't necessarily
+ // need to understand them all.
+ while (true) {
+ if (Stream.AtEndOfStream())
+ return "";
+
+ BitstreamEntry Entry;
+ if (Error E = Stream.advance().moveInto(Entry))
+ return std::move(E);
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::EndBlock:
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+
+ case BitstreamEntry::SubBlock:
+ if (Entry.ID == bitc::IDENTIFICATION_BLOCK_ID)
+ return readIdentificationBlock(Stream);
+
+ // Ignore other sub-blocks.
+ if (Error Err = Stream.SkipBlock())
+ return std::move(Err);
+ continue;
+ case BitstreamEntry::Record:
+ if (Error E = Stream.skipRecord(Entry.ID).takeError())
+ return std::move(E);
+ continue;
+ }
+ }
+}
+
+static Expected<bool> hasObjCCategoryInModule(BitstreamCursor &Stream) {
+ if (Error Err = Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+ return std::move(Err);
+
+ SmallVector<uint64_t, 64> Record;
+ // Read all the records for this module.
+
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return false;
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ switch (MaybeRecord.get()) {
+ default:
+ break; // Default behavior, ignore unknown content.
+ case bitc::MODULE_CODE_SECTIONNAME: { // SECTIONNAME: [strchr x N]
+ std::string S;
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
+ // Check for the i386 and other (x86_64, ARM) conventions
+ if (S.find("__DATA,__objc_catlist") != std::string::npos ||
+ S.find("__OBJC,__category") != std::string::npos)
+ return true;
+ break;
+ }
+ }
+ Record.clear();
+ }
+ llvm_unreachable("Exit infinite loop");
+}
+
+static Expected<bool> hasObjCCategory(BitstreamCursor &Stream) {
+ // We expect a number of well-defined blocks, though we don't necessarily
+ // need to understand them all.
+ while (true) {
+ BitstreamEntry Entry;
+ if (Error E = Stream.advance().moveInto(Entry))
+ return std::move(E);
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return false;
+
+ case BitstreamEntry::SubBlock:
+ if (Entry.ID == bitc::MODULE_BLOCK_ID)
+ return hasObjCCategoryInModule(Stream);
+
+ // Ignore other sub-blocks.
+ if (Error Err = Stream.SkipBlock())
+ return std::move(Err);
+ continue;
+
+ case BitstreamEntry::Record:
+ if (Error E = Stream.skipRecord(Entry.ID).takeError())
+ return std::move(E);
+ continue;
+ }
+ }
+}
+
+static Expected<std::string> readModuleTriple(BitstreamCursor &Stream) {
+ if (Error Err = Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+ return std::move(Err);
+
+ SmallVector<uint64_t, 64> Record;
+
+ std::string Triple;
+
+ // Read all the records for this module.
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return Triple;
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ switch (MaybeRecord.get()) {
+ default: break; // Default behavior, ignore unknown content.
+ case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N]
+ std::string S;
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
+ Triple = S;
+ break;
+ }
+ }
+ Record.clear();
+ }
+ llvm_unreachable("Exit infinite loop");
+}
+
+static Expected<std::string> readTriple(BitstreamCursor &Stream) {
+ // We expect a number of well-defined blocks, though we don't necessarily
+ // need to understand them all.
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return "";
+
+ case BitstreamEntry::SubBlock:
+ if (Entry.ID == bitc::MODULE_BLOCK_ID)
+ return readModuleTriple(Stream);
+
+ // Ignore other sub-blocks.
+ if (Error Err = Stream.SkipBlock())
+ return std::move(Err);
+ continue;
+
+ case BitstreamEntry::Record:
+ if (llvm::Expected<unsigned> Skipped = Stream.skipRecord(Entry.ID))
+ continue;
+ else
+ return Skipped.takeError();
+ }
+ }
+}
+
+namespace {
+
+class BitcodeReaderBase {
+protected:
+ BitcodeReaderBase(BitstreamCursor Stream, StringRef Strtab)
+ : Stream(std::move(Stream)), Strtab(Strtab) {
+ this->Stream.setBlockInfo(&BlockInfo);
+ }
+
+ BitstreamBlockInfo BlockInfo;
+ BitstreamCursor Stream;
+ StringRef Strtab;
+
+ /// In version 2 of the bitcode we store names of global values and comdats in
+ /// a string table rather than in the VST.
+ bool UseStrtab = false;
+
+ Expected<unsigned> parseVersionRecord(ArrayRef<uint64_t> Record);
+
+ /// If this module uses a string table, pop the reference to the string table
+ /// and return the referenced string and the rest of the record. Otherwise
+ /// just return the record itself.
+ std::pair<StringRef, ArrayRef<uint64_t>>
+ readNameFromStrtab(ArrayRef<uint64_t> Record);
+
+ bool readBlockInfo();
+
+ // Contains an arbitrary and optional string identifying the bitcode producer
+ std::string ProducerIdentification;
+
+ Error error(const Twine &Message);
+};
+
+} // end anonymous namespace
+
+Error BitcodeReaderBase::error(const Twine &Message) {
+ std::string FullMsg = Message.str();
+ if (!ProducerIdentification.empty())
+ FullMsg += " (Producer: '" + ProducerIdentification + "' Reader: 'LLVM " +
+ LLVM_VERSION_STRING "')";
+ return ::error(FullMsg);
+}
+
+Expected<unsigned>
+BitcodeReaderBase::parseVersionRecord(ArrayRef<uint64_t> Record) {
+ if (Record.empty())
+ return error("Invalid record");
+ unsigned ModuleVersion = Record[0];
+ if (ModuleVersion > 2)
+ return error("Invalid value");
+ UseStrtab = ModuleVersion >= 2;
+ return ModuleVersion;
+}
+
+std::pair<StringRef, ArrayRef<uint64_t>>
+BitcodeReaderBase::readNameFromStrtab(ArrayRef<uint64_t> Record) {
+ if (!UseStrtab)
+ return {"", Record};
+ // Invalid reference. Let the caller complain about the record being empty.
+ if (Record[0] + Record[1] > Strtab.size())
+ return {"", {}};
+ return {StringRef(Strtab.data() + Record[0], Record[1]), Record.slice(2)};
+}
+
+namespace {
+
+class BitcodeReader : public BitcodeReaderBase, public GVMaterializer {
+ LLVMContext &Context;
+ Module *TheModule = nullptr;
+ // Next offset to start scanning for lazy parsing of function bodies.
+ uint64_t NextUnreadBit = 0;
+ // Last function offset found in the VST.
+ uint64_t LastFunctionBlockBit = 0;
+ bool SeenValueSymbolTable = false;
+ uint64_t VSTOffset = 0;
+
+ std::vector<std::string> SectionTable;
+ std::vector<std::string> GCTable;
+
+ std::vector<Type*> TypeList;
+ DenseMap<Function *, FunctionType *> FunctionTypes;
+ BitcodeReaderValueList ValueList;
+ Optional<MetadataLoader> MDLoader;
+ std::vector<Comdat *> ComdatList;
+ DenseSet<GlobalObject *> ImplicitComdatObjects;
+ SmallVector<Instruction *, 64> InstructionList;
+
+ std::vector<std::pair<GlobalVariable *, unsigned>> GlobalInits;
+ std::vector<std::pair<GlobalValue *, unsigned>> IndirectSymbolInits;
+
+ struct FunctionOperandInfo {
+ Function *F;
+ unsigned PersonalityFn;
+ unsigned Prefix;
+ unsigned Prologue;
+ };
+ std::vector<FunctionOperandInfo> FunctionOperands;
+
+ /// The set of attributes by index. Index zero in the file is for null, and
+ /// is thus not represented here. As such all indices are off by one.
+ std::vector<AttributeList> MAttributes;
+
+ /// The set of attribute groups.
+ std::map<unsigned, AttributeList> MAttributeGroups;
+
+ /// While parsing a function body, this is a list of the basic blocks for the
+ /// function.
+ std::vector<BasicBlock*> FunctionBBs;
+
+ // When reading the module header, this list is populated with functions that
+ // have bodies later in the file.
+ std::vector<Function*> FunctionsWithBodies;
+
+ // When intrinsic functions are encountered which require upgrading they are
+ // stored here with their replacement function.
+ using UpdatedIntrinsicMap = DenseMap<Function *, Function *>;
+ UpdatedIntrinsicMap UpgradedIntrinsics;
+ // Intrinsics which were remangled because of types rename
+ UpdatedIntrinsicMap RemangledIntrinsics;
+
+ // Several operations happen after the module header has been read, but
+ // before function bodies are processed. This keeps track of whether
+ // we've done this yet.
+ bool SeenFirstFunctionBody = false;
+
+ /// When function bodies are initially scanned, this map contains info about
+ /// where to find deferred function body in the stream.
+ DenseMap<Function*, uint64_t> DeferredFunctionInfo;
+
+ /// When Metadata block is initially scanned when parsing the module, we may
+ /// choose to defer parsing of the metadata. This vector contains info about
+ /// which Metadata blocks are deferred.
+ std::vector<uint64_t> DeferredMetadataInfo;
+
+ /// These are basic blocks forward-referenced by block addresses. They are
+ /// inserted lazily into functions when they're loaded. The basic block ID is
+ /// its index into the vector.
+ DenseMap<Function *, std::vector<BasicBlock *>> BasicBlockFwdRefs;
+ std::deque<Function *> BasicBlockFwdRefQueue;
+
+ /// Indicates that we are using a new encoding for instruction operands where
+ /// most operands in the current FUNCTION_BLOCK are encoded relative to the
+ /// instruction number, for a more compact encoding. Some instruction
+ /// operands are not relative to the instruction ID: basic block numbers, and
+ /// types. Once the old style function blocks have been phased out, we would
+ /// not need this flag.
+ bool UseRelativeIDs = false;
+
+ /// True if all functions will be materialized, negating the need to process
+ /// (e.g.) blockaddress forward references.
+ bool WillMaterializeAllForwardRefs = false;
+
+ bool StripDebugInfo = false;
+ TBAAVerifier TBAAVerifyHelper;
+
+ std::vector<std::string> BundleTags;
+ SmallVector<SyncScope::ID, 8> SSIDs;
+
+public:
+ BitcodeReader(BitstreamCursor Stream, StringRef Strtab,
+ StringRef ProducerIdentification, LLVMContext &Context);
+
+ Error materializeForwardReferencedFunctions();
+
+ Error materialize(GlobalValue *GV) override;
+ Error materializeModule() override;
+ std::vector<StructType *> getIdentifiedStructTypes() const override;
+
+ /// Main interface to parsing a bitcode buffer.
+ /// \returns true if an error occurred.
+ Error parseBitcodeInto(
+ Module *M, bool ShouldLazyLoadMetadata = false, bool IsImporting = false,
+ DataLayoutCallbackTy DataLayoutCallback = [](StringRef) { return None; });
+
+ static uint64_t decodeSignRotatedValue(uint64_t V);
+
+ /// Materialize any deferred Metadata block.
+ Error materializeMetadata() override;
+
+ void setStripDebugInfo() override;
+
+private:
+ std::vector<StructType *> IdentifiedStructTypes;
+ StructType *createIdentifiedStructType(LLVMContext &Context, StringRef Name);
+ StructType *createIdentifiedStructType(LLVMContext &Context);
+
+ Type *getTypeByID(unsigned ID);
+
+ Value *getFnValueByID(unsigned ID, Type *Ty) {
+ if (Ty && Ty->isMetadataTy())
+ return MetadataAsValue::get(Ty->getContext(), getFnMetadataByID(ID));
+ return ValueList.getValueFwdRef(ID, Ty);
+ }
+
+ Metadata *getFnMetadataByID(unsigned ID) {
+ return MDLoader->getMetadataFwdRefOrLoad(ID);
+ }
+
+ BasicBlock *getBasicBlock(unsigned ID) const {
+ if (ID >= FunctionBBs.size()) return nullptr; // Invalid ID
+ return FunctionBBs[ID];
+ }
+
+ AttributeList getAttributes(unsigned i) const {
+ if (i-1 < MAttributes.size())
+ return MAttributes[i-1];
+ return AttributeList();
+ }
+
+ /// Read a value/type pair out of the specified record from slot 'Slot'.
+ /// Increment Slot past the number of slots used in the record. Return true on
+ /// failure.
+ bool getValueTypePair(const SmallVectorImpl<uint64_t> &Record, unsigned &Slot,
+ unsigned InstNum, Value *&ResVal) {
+ if (Slot == Record.size()) return true;
+ unsigned ValNo = (unsigned)Record[Slot++];
+ // Adjust the ValNo, if it was encoded relative to the InstNum.
+ if (UseRelativeIDs)
+ ValNo = InstNum - ValNo;
+ if (ValNo < InstNum) {
+ // If this is not a forward reference, just return the value we already
+ // have.
+ ResVal = getFnValueByID(ValNo, nullptr);
+ return ResVal == nullptr;
+ }
+ if (Slot == Record.size())
+ return true;
+
+ unsigned TypeNo = (unsigned)Record[Slot++];
+ ResVal = getFnValueByID(ValNo, getTypeByID(TypeNo));
+ return ResVal == nullptr;
+ }
+
+ /// Read a value out of the specified record from slot 'Slot'. Increment Slot
+ /// past the number of slots used by the value in the record. Return true if
+ /// there is an error.
+ bool popValue(const SmallVectorImpl<uint64_t> &Record, unsigned &Slot,
+ unsigned InstNum, Type *Ty, Value *&ResVal) {
+ if (getValue(Record, Slot, InstNum, Ty, ResVal))
+ return true;
+ // All values currently take a single record slot.
+ ++Slot;
+ return false;
+ }
+
+ /// Like popValue, but does not increment the Slot number.
+ bool getValue(const SmallVectorImpl<uint64_t> &Record, unsigned Slot,
+ unsigned InstNum, Type *Ty, Value *&ResVal) {
+ ResVal = getValue(Record, Slot, InstNum, Ty);
+ return ResVal == nullptr;
+ }
+
+ /// Version of getValue that returns ResVal directly, or 0 if there is an
+ /// error.
+ Value *getValue(const SmallVectorImpl<uint64_t> &Record, unsigned Slot,
+ unsigned InstNum, Type *Ty) {
+ if (Slot == Record.size()) return nullptr;
+ unsigned ValNo = (unsigned)Record[Slot];
+ // Adjust the ValNo, if it was encoded relative to the InstNum.
+ if (UseRelativeIDs)
+ ValNo = InstNum - ValNo;
+ return getFnValueByID(ValNo, Ty);
+ }
+
+ /// Like getValue, but decodes signed VBRs.
+ Value *getValueSigned(const SmallVectorImpl<uint64_t> &Record, unsigned Slot,
+ unsigned InstNum, Type *Ty) {
+ if (Slot == Record.size()) return nullptr;
+ unsigned ValNo = (unsigned)decodeSignRotatedValue(Record[Slot]);
+ // Adjust the ValNo, if it was encoded relative to the InstNum.
+ if (UseRelativeIDs)
+ ValNo = InstNum - ValNo;
+ return getFnValueByID(ValNo, Ty);
+ }
+
+ /// Upgrades old-style typeless byval/sret/inalloca attributes by adding the
+ /// corresponding argument's pointee type. Also upgrades intrinsics that now
+ /// require an elementtype attribute.
+ void propagateAttributeTypes(CallBase *CB, ArrayRef<Type *> ArgsTys);
+
+ /// Converts alignment exponent (i.e. power of two (or zero)) to the
+ /// corresponding alignment to use. If alignment is too large, returns
+ /// a corresponding error code.
+ Error parseAlignmentValue(uint64_t Exponent, MaybeAlign &Alignment);
+ Error parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind);
+ Error parseModule(
+ uint64_t ResumeBit, bool ShouldLazyLoadMetadata = false,
+ DataLayoutCallbackTy DataLayoutCallback = [](StringRef) { return None; });
+
+ Error parseComdatRecord(ArrayRef<uint64_t> Record);
+ Error parseGlobalVarRecord(ArrayRef<uint64_t> Record);
+ Error parseFunctionRecord(ArrayRef<uint64_t> Record);
+ Error parseGlobalIndirectSymbolRecord(unsigned BitCode,
+ ArrayRef<uint64_t> Record);
+
+ Error parseAttributeBlock();
+ Error parseAttributeGroupBlock();
+ Error parseTypeTable();
+ Error parseTypeTableBody();
+ Error parseOperandBundleTags();
+ Error parseSyncScopeNames();
+
+ Expected<Value *> recordValue(SmallVectorImpl<uint64_t> &Record,
+ unsigned NameIndex, Triple &TT);
+ void setDeferredFunctionInfo(unsigned FuncBitcodeOffsetDelta, Function *F,
+ ArrayRef<uint64_t> Record);
+ Error parseValueSymbolTable(uint64_t Offset = 0);
+ Error parseGlobalValueSymbolTable();
+ Error parseConstants();
+ Error rememberAndSkipFunctionBodies();
+ Error rememberAndSkipFunctionBody();
+ /// Save the positions of the Metadata blocks and skip parsing the blocks.
+ Error rememberAndSkipMetadata();
+ Error typeCheckLoadStoreInst(Type *ValType, Type *PtrType);
+ Error parseFunctionBody(Function *F);
+ Error globalCleanup();
+ Error resolveGlobalAndIndirectSymbolInits();
+ Error parseUseLists();
+ Error findFunctionInStream(
+ Function *F,
+ DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator);
+
+ SyncScope::ID getDecodedSyncScopeID(unsigned Val);
+};
+
+/// Class to manage reading and parsing function summary index bitcode
+/// files/sections.
+class ModuleSummaryIndexBitcodeReader : public BitcodeReaderBase {
+ /// The module index built during parsing.
+ ModuleSummaryIndex &TheIndex;
+
+ /// Indicates whether we have encountered a global value summary section
+ /// yet during parsing.
+ bool SeenGlobalValSummary = false;
+
+ /// Indicates whether we have already parsed the VST, used for error checking.
+ bool SeenValueSymbolTable = false;
+
+ /// Set to the offset of the VST recorded in the MODULE_CODE_VSTOFFSET record.
+ /// Used to enable on-demand parsing of the VST.
+ uint64_t VSTOffset = 0;
+
+ // Map to save ValueId to ValueInfo association that was recorded in the
+ // ValueSymbolTable. It is used after the VST is parsed to convert
+ // call graph edges read from the function summary from referencing
+ // callees by their ValueId to using the ValueInfo instead, which is how
+ // they are recorded in the summary index being built.
+ // We save a GUID which refers to the same global as the ValueInfo, but
+ // ignoring the linkage, i.e. for values other than local linkage they are
+ // identical.
+ DenseMap<unsigned, std::pair<ValueInfo, GlobalValue::GUID>>
+ ValueIdToValueInfoMap;
+
+ /// Map populated during module path string table parsing, from the
+ /// module ID to a string reference owned by the index's module
+ /// path string table, used to correlate with combined index
+ /// summary records.
+ DenseMap<uint64_t, StringRef> ModuleIdMap;
+
+ /// Original source file name recorded in a bitcode record.
+ std::string SourceFileName;
+
+ /// The string identifier given to this module by the client, normally the
+ /// path to the bitcode file.
+ StringRef ModulePath;
+
+ /// For per-module summary indexes, the unique numerical identifier given to
+ /// this module by the client.
+ unsigned ModuleId;
+
+public:
+ ModuleSummaryIndexBitcodeReader(BitstreamCursor Stream, StringRef Strtab,
+ ModuleSummaryIndex &TheIndex,
+ StringRef ModulePath, unsigned ModuleId);
+
+ Error parseModule();
+
+private:
+ void setValueGUID(uint64_t ValueID, StringRef ValueName,
+ GlobalValue::LinkageTypes Linkage,
+ StringRef SourceFileName);
+ Error parseValueSymbolTable(
+ uint64_t Offset,
+ DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap);
+ std::vector<ValueInfo> makeRefList(ArrayRef<uint64_t> Record);
+ std::vector<FunctionSummary::EdgeTy> makeCallList(ArrayRef<uint64_t> Record,
+ bool IsOldProfileFormat,
+ bool HasProfile,
+ bool HasRelBF);
+ Error parseEntireSummary(unsigned ID);
+ Error parseModuleStringTable();
+ void parseTypeIdCompatibleVtableSummaryRecord(ArrayRef<uint64_t> Record);
+ void parseTypeIdCompatibleVtableInfo(ArrayRef<uint64_t> Record, size_t &Slot,
+ TypeIdCompatibleVtableInfo &TypeId);
+ std::vector<FunctionSummary::ParamAccess>
+ parseParamAccesses(ArrayRef<uint64_t> Record);
+
+ std::pair<ValueInfo, GlobalValue::GUID>
+ getValueInfoFromValueId(unsigned ValueId);
+
+ void addThisModule();
+ ModuleSummaryIndex::ModuleInfo *getThisModule();
+};
+
+} // end anonymous namespace
+
+std::error_code llvm::errorToErrorCodeAndEmitErrors(LLVMContext &Ctx,
+ Error Err) {
+ if (Err) {
+ std::error_code EC;
+ handleAllErrors(std::move(Err), [&](ErrorInfoBase &EIB) {
+ EC = EIB.convertToErrorCode();
+ Ctx.emitError(EIB.message());
+ });
+ return EC;
+ }
+ return std::error_code();
+}
+
+BitcodeReader::BitcodeReader(BitstreamCursor Stream, StringRef Strtab,
+ StringRef ProducerIdentification,
+ LLVMContext &Context)
+ : BitcodeReaderBase(std::move(Stream), Strtab), Context(Context),
+ ValueList(Context, Stream.SizeInBytes()) {
+ this->ProducerIdentification = std::string(ProducerIdentification);
+}
+
+Error BitcodeReader::materializeForwardReferencedFunctions() {
+ if (WillMaterializeAllForwardRefs)
+ return Error::success();
+
+ // Prevent recursion.
+ WillMaterializeAllForwardRefs = true;
+
+ while (!BasicBlockFwdRefQueue.empty()) {
+ Function *F = BasicBlockFwdRefQueue.front();
+ BasicBlockFwdRefQueue.pop_front();
+ assert(F && "Expected valid function");
+ if (!BasicBlockFwdRefs.count(F))
+ // Already materialized.
+ continue;
+
+ // Check for a function that isn't materializable to prevent an infinite
+ // loop. When parsing a blockaddress stored in a global variable, there
+ // isn't a trivial way to check if a function will have a body without a
+ // linear search through FunctionsWithBodies, so just check it here.
+ if (!F->isMaterializable())
+ return error("Never resolved function from blockaddress");
+
+ // Try to materialize F.
+ if (Error Err = materialize(F))
+ return Err;
+ }
+ assert(BasicBlockFwdRefs.empty() && "Function missing from queue");
+
+ // Reset state.
+ WillMaterializeAllForwardRefs = false;
+ return Error::success();
+}
+
+//===----------------------------------------------------------------------===//
+// Helper functions to implement forward reference resolution, etc.
+//===----------------------------------------------------------------------===//
+
+static bool hasImplicitComdat(size_t Val) {
+ switch (Val) {
+ default:
+ return false;
+ case 1: // Old WeakAnyLinkage
+ case 4: // Old LinkOnceAnyLinkage
+ case 10: // Old WeakODRLinkage
+ case 11: // Old LinkOnceODRLinkage
+ return true;
+ }
+}
+
+static GlobalValue::LinkageTypes getDecodedLinkage(unsigned Val) {
+ switch (Val) {
+ default: // Map unknown/new linkages to external
+ case 0:
+ return GlobalValue::ExternalLinkage;
+ case 2:
+ return GlobalValue::AppendingLinkage;
+ case 3:
+ return GlobalValue::InternalLinkage;
+ case 5:
+ return GlobalValue::ExternalLinkage; // Obsolete DLLImportLinkage
+ case 6:
+ return GlobalValue::ExternalLinkage; // Obsolete DLLExportLinkage
+ case 7:
+ return GlobalValue::ExternalWeakLinkage;
+ case 8:
+ return GlobalValue::CommonLinkage;
+ case 9:
+ return GlobalValue::PrivateLinkage;
+ case 12:
+ return GlobalValue::AvailableExternallyLinkage;
+ case 13:
+ return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateLinkage
+ case 14:
+ return GlobalValue::PrivateLinkage; // Obsolete LinkerPrivateWeakLinkage
+ case 15:
+ return GlobalValue::ExternalLinkage; // Obsolete LinkOnceODRAutoHideLinkage
+ case 1: // Old value with implicit comdat.
+ case 16:
+ return GlobalValue::WeakAnyLinkage;
+ case 10: // Old value with implicit comdat.
+ case 17:
+ return GlobalValue::WeakODRLinkage;
+ case 4: // Old value with implicit comdat.
+ case 18:
+ return GlobalValue::LinkOnceAnyLinkage;
+ case 11: // Old value with implicit comdat.
+ case 19:
+ return GlobalValue::LinkOnceODRLinkage;
+ }
+}
+
+static FunctionSummary::FFlags getDecodedFFlags(uint64_t RawFlags) {
+ FunctionSummary::FFlags Flags;
+ Flags.ReadNone = RawFlags & 0x1;
+ Flags.ReadOnly = (RawFlags >> 1) & 0x1;
+ Flags.NoRecurse = (RawFlags >> 2) & 0x1;
+ Flags.ReturnDoesNotAlias = (RawFlags >> 3) & 0x1;
+ Flags.NoInline = (RawFlags >> 4) & 0x1;
+ Flags.AlwaysInline = (RawFlags >> 5) & 0x1;
+ Flags.NoUnwind = (RawFlags >> 6) & 0x1;
+ Flags.MayThrow = (RawFlags >> 7) & 0x1;
+ Flags.HasUnknownCall = (RawFlags >> 8) & 0x1;
+ Flags.MustBeUnreachable = (RawFlags >> 9) & 0x1;
+ return Flags;
+}
+
+// Decode the flags for GlobalValue in the summary. The bits for each attribute:
+//
+// linkage: [0,4), notEligibleToImport: 4, live: 5, local: 6, canAutoHide: 7,
+// visibility: [8, 10).
+static GlobalValueSummary::GVFlags getDecodedGVSummaryFlags(uint64_t RawFlags,
+ uint64_t Version) {
+ // Summary were not emitted before LLVM 3.9, we don't need to upgrade Linkage
+ // like getDecodedLinkage() above. Any future change to the linkage enum and
+ // to getDecodedLinkage() will need to be taken into account here as above.
+ auto Linkage = GlobalValue::LinkageTypes(RawFlags & 0xF); // 4 bits
+ auto Visibility = GlobalValue::VisibilityTypes((RawFlags >> 8) & 3); // 2 bits
+ RawFlags = RawFlags >> 4;
+ bool NotEligibleToImport = (RawFlags & 0x1) || Version < 3;
+ // The Live flag wasn't introduced until version 3. For dead stripping
+ // to work correctly on earlier versions, we must conservatively treat all
+ // values as live.
+ bool Live = (RawFlags & 0x2) || Version < 3;
+ bool Local = (RawFlags & 0x4);
+ bool AutoHide = (RawFlags & 0x8);
+
+ return GlobalValueSummary::GVFlags(Linkage, Visibility, NotEligibleToImport,
+ Live, Local, AutoHide);
+}
+
+// Decode the flags for GlobalVariable in the summary
+static GlobalVarSummary::GVarFlags getDecodedGVarFlags(uint64_t RawFlags) {
+ return GlobalVarSummary::GVarFlags(
+ (RawFlags & 0x1) ? true : false, (RawFlags & 0x2) ? true : false,
+ (RawFlags & 0x4) ? true : false,
+ (GlobalObject::VCallVisibility)(RawFlags >> 3));
+}
+
+static GlobalValue::VisibilityTypes getDecodedVisibility(unsigned Val) {
+ switch (Val) {
+ default: // Map unknown visibilities to default.
+ case 0: return GlobalValue::DefaultVisibility;
+ case 1: return GlobalValue::HiddenVisibility;
+ case 2: return GlobalValue::ProtectedVisibility;
+ }
+}
+
+static GlobalValue::DLLStorageClassTypes
+getDecodedDLLStorageClass(unsigned Val) {
+ switch (Val) {
+ default: // Map unknown values to default.
+ case 0: return GlobalValue::DefaultStorageClass;
+ case 1: return GlobalValue::DLLImportStorageClass;
+ case 2: return GlobalValue::DLLExportStorageClass;
+ }
+}
+
+static bool getDecodedDSOLocal(unsigned Val) {
+ switch(Val) {
+ default: // Map unknown values to preemptable.
+ case 0: return false;
+ case 1: return true;
+ }
+}
+
+static GlobalVariable::ThreadLocalMode getDecodedThreadLocalMode(unsigned Val) {
+ switch (Val) {
+ case 0: return GlobalVariable::NotThreadLocal;
+ default: // Map unknown non-zero value to general dynamic.
+ case 1: return GlobalVariable::GeneralDynamicTLSModel;
+ case 2: return GlobalVariable::LocalDynamicTLSModel;
+ case 3: return GlobalVariable::InitialExecTLSModel;
+ case 4: return GlobalVariable::LocalExecTLSModel;
+ }
+}
+
+static GlobalVariable::UnnamedAddr getDecodedUnnamedAddrType(unsigned Val) {
+ switch (Val) {
+ default: // Map unknown to UnnamedAddr::None.
+ case 0: return GlobalVariable::UnnamedAddr::None;
+ case 1: return GlobalVariable::UnnamedAddr::Global;
+ case 2: return GlobalVariable::UnnamedAddr::Local;
+ }
+}
+
+static int getDecodedCastOpcode(unsigned Val) {
+ switch (Val) {
+ default: return -1;
+ case bitc::CAST_TRUNC : return Instruction::Trunc;
+ case bitc::CAST_ZEXT : return Instruction::ZExt;
+ case bitc::CAST_SEXT : return Instruction::SExt;
+ case bitc::CAST_FPTOUI : return Instruction::FPToUI;
+ case bitc::CAST_FPTOSI : return Instruction::FPToSI;
+ case bitc::CAST_UITOFP : return Instruction::UIToFP;
+ case bitc::CAST_SITOFP : return Instruction::SIToFP;
+ case bitc::CAST_FPTRUNC : return Instruction::FPTrunc;
+ case bitc::CAST_FPEXT : return Instruction::FPExt;
+ case bitc::CAST_PTRTOINT: return Instruction::PtrToInt;
+ case bitc::CAST_INTTOPTR: return Instruction::IntToPtr;
+ case bitc::CAST_BITCAST : return Instruction::BitCast;
+ case bitc::CAST_ADDRSPACECAST: return Instruction::AddrSpaceCast;
+ }
+}
+
+static int getDecodedUnaryOpcode(unsigned Val, Type *Ty) {
+ bool IsFP = Ty->isFPOrFPVectorTy();
+ // UnOps are only valid for int/fp or vector of int/fp types
+ if (!IsFP && !Ty->isIntOrIntVectorTy())
+ return -1;
+
+ switch (Val) {
+ default:
+ return -1;
+ case bitc::UNOP_FNEG:
+ return IsFP ? Instruction::FNeg : -1;
+ }
+}
+
+static int getDecodedBinaryOpcode(unsigned Val, Type *Ty) {
+ bool IsFP = Ty->isFPOrFPVectorTy();
+ // BinOps are only valid for int/fp or vector of int/fp types
+ if (!IsFP && !Ty->isIntOrIntVectorTy())
+ return -1;
+
+ switch (Val) {
+ default:
+ return -1;
+ case bitc::BINOP_ADD:
+ return IsFP ? Instruction::FAdd : Instruction::Add;
+ case bitc::BINOP_SUB:
+ return IsFP ? Instruction::FSub : Instruction::Sub;
+ case bitc::BINOP_MUL:
+ return IsFP ? Instruction::FMul : Instruction::Mul;
+ case bitc::BINOP_UDIV:
+ return IsFP ? -1 : Instruction::UDiv;
+ case bitc::BINOP_SDIV:
+ return IsFP ? Instruction::FDiv : Instruction::SDiv;
+ case bitc::BINOP_UREM:
+ return IsFP ? -1 : Instruction::URem;
+ case bitc::BINOP_SREM:
+ return IsFP ? Instruction::FRem : Instruction::SRem;
+ case bitc::BINOP_SHL:
+ return IsFP ? -1 : Instruction::Shl;
+ case bitc::BINOP_LSHR:
+ return IsFP ? -1 : Instruction::LShr;
+ case bitc::BINOP_ASHR:
+ return IsFP ? -1 : Instruction::AShr;
+ case bitc::BINOP_AND:
+ return IsFP ? -1 : Instruction::And;
+ case bitc::BINOP_OR:
+ return IsFP ? -1 : Instruction::Or;
+ case bitc::BINOP_XOR:
+ return IsFP ? -1 : Instruction::Xor;
+ }
+}
+
+static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) {
+ switch (Val) {
+ default: return AtomicRMWInst::BAD_BINOP;
+ case bitc::RMW_XCHG: return AtomicRMWInst::Xchg;
+ case bitc::RMW_ADD: return AtomicRMWInst::Add;
+ case bitc::RMW_SUB: return AtomicRMWInst::Sub;
+ case bitc::RMW_AND: return AtomicRMWInst::And;
+ case bitc::RMW_NAND: return AtomicRMWInst::Nand;
+ case bitc::RMW_OR: return AtomicRMWInst::Or;
+ case bitc::RMW_XOR: return AtomicRMWInst::Xor;
+ case bitc::RMW_MAX: return AtomicRMWInst::Max;
+ case bitc::RMW_MIN: return AtomicRMWInst::Min;
+ case bitc::RMW_UMAX: return AtomicRMWInst::UMax;
+ case bitc::RMW_UMIN: return AtomicRMWInst::UMin;
+ case bitc::RMW_FADD: return AtomicRMWInst::FAdd;
+ case bitc::RMW_FSUB: return AtomicRMWInst::FSub;
+ }
+}
+
+static AtomicOrdering getDecodedOrdering(unsigned Val) {
+ switch (Val) {
+ case bitc::ORDERING_NOTATOMIC: return AtomicOrdering::NotAtomic;
+ case bitc::ORDERING_UNORDERED: return AtomicOrdering::Unordered;
+ case bitc::ORDERING_MONOTONIC: return AtomicOrdering::Monotonic;
+ case bitc::ORDERING_ACQUIRE: return AtomicOrdering::Acquire;
+ case bitc::ORDERING_RELEASE: return AtomicOrdering::Release;
+ case bitc::ORDERING_ACQREL: return AtomicOrdering::AcquireRelease;
+ default: // Map unknown orderings to sequentially-consistent.
+ case bitc::ORDERING_SEQCST: return AtomicOrdering::SequentiallyConsistent;
+ }
+}
+
+static Comdat::SelectionKind getDecodedComdatSelectionKind(unsigned Val) {
+ switch (Val) {
+ default: // Map unknown selection kinds to any.
+ case bitc::COMDAT_SELECTION_KIND_ANY:
+ return Comdat::Any;
+ case bitc::COMDAT_SELECTION_KIND_EXACT_MATCH:
+ return Comdat::ExactMatch;
+ case bitc::COMDAT_SELECTION_KIND_LARGEST:
+ return Comdat::Largest;
+ case bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES:
+ return Comdat::NoDeduplicate;
+ case bitc::COMDAT_SELECTION_KIND_SAME_SIZE:
+ return Comdat::SameSize;
+ }
+}
+
+static FastMathFlags getDecodedFastMathFlags(unsigned Val) {
+ FastMathFlags FMF;
+ if (0 != (Val & bitc::UnsafeAlgebra))
+ FMF.setFast();
+ if (0 != (Val & bitc::AllowReassoc))
+ FMF.setAllowReassoc();
+ if (0 != (Val & bitc::NoNaNs))
+ FMF.setNoNaNs();
+ if (0 != (Val & bitc::NoInfs))
+ FMF.setNoInfs();
+ if (0 != (Val & bitc::NoSignedZeros))
+ FMF.setNoSignedZeros();
+ if (0 != (Val & bitc::AllowReciprocal))
+ FMF.setAllowReciprocal();
+ if (0 != (Val & bitc::AllowContract))
+ FMF.setAllowContract(true);
+ if (0 != (Val & bitc::ApproxFunc))
+ FMF.setApproxFunc();
+ return FMF;
+}
+
+static void upgradeDLLImportExportLinkage(GlobalValue *GV, unsigned Val) {
+ switch (Val) {
+ case 5: GV->setDLLStorageClass(GlobalValue::DLLImportStorageClass); break;
+ case 6: GV->setDLLStorageClass(GlobalValue::DLLExportStorageClass); break;
+ }
+}
+
+Type *BitcodeReader::getTypeByID(unsigned ID) {
+ // The type table size is always specified correctly.
+ if (ID >= TypeList.size())
+ return nullptr;
+
+ if (Type *Ty = TypeList[ID])
+ return Ty;
+
+ // If we have a forward reference, the only possible case is when it is to a
+ // named struct. Just create a placeholder for now.
+ return TypeList[ID] = createIdentifiedStructType(Context);
+}
+
+StructType *BitcodeReader::createIdentifiedStructType(LLVMContext &Context,
+ StringRef Name) {
+ auto *Ret = StructType::create(Context, Name);
+ IdentifiedStructTypes.push_back(Ret);
+ return Ret;
+}
+
+StructType *BitcodeReader::createIdentifiedStructType(LLVMContext &Context) {
+ auto *Ret = StructType::create(Context);
+ IdentifiedStructTypes.push_back(Ret);
+ return Ret;
+}
+
+//===----------------------------------------------------------------------===//
+// Functions for parsing blocks from the bitcode file
+//===----------------------------------------------------------------------===//
+
+static uint64_t getRawAttributeMask(Attribute::AttrKind Val) {
+ switch (Val) {
+ case Attribute::EndAttrKinds:
+ case Attribute::EmptyKey:
+ case Attribute::TombstoneKey:
+ llvm_unreachable("Synthetic enumerators which should never get here");
+
+ case Attribute::None: return 0;
+ case Attribute::ZExt: return 1 << 0;
+ case Attribute::SExt: return 1 << 1;
+ case Attribute::NoReturn: return 1 << 2;
+ case Attribute::InReg: return 1 << 3;
+ case Attribute::StructRet: return 1 << 4;
+ case Attribute::NoUnwind: return 1 << 5;
+ case Attribute::NoAlias: return 1 << 6;
+ case Attribute::ByVal: return 1 << 7;
+ case Attribute::Nest: return 1 << 8;
+ case Attribute::ReadNone: return 1 << 9;
+ case Attribute::ReadOnly: return 1 << 10;
+ case Attribute::NoInline: return 1 << 11;
+ case Attribute::AlwaysInline: return 1 << 12;
+ case Attribute::OptimizeForSize: return 1 << 13;
+ case Attribute::StackProtect: return 1 << 14;
+ case Attribute::StackProtectReq: return 1 << 15;
+ case Attribute::Alignment: return 31 << 16;
+ case Attribute::NoCapture: return 1 << 21;
+ case Attribute::NoRedZone: return 1 << 22;
+ case Attribute::NoImplicitFloat: return 1 << 23;
+ case Attribute::Naked: return 1 << 24;
+ case Attribute::InlineHint: return 1 << 25;
+ case Attribute::StackAlignment: return 7 << 26;
+ case Attribute::ReturnsTwice: return 1 << 29;
+ case Attribute::UWTable: return 1 << 30;
+ case Attribute::NonLazyBind: return 1U << 31;
+ case Attribute::SanitizeAddress: return 1ULL << 32;
+ case Attribute::MinSize: return 1ULL << 33;
+ case Attribute::NoDuplicate: return 1ULL << 34;
+ case Attribute::StackProtectStrong: return 1ULL << 35;
+ case Attribute::SanitizeThread: return 1ULL << 36;
+ case Attribute::SanitizeMemory: return 1ULL << 37;
+ case Attribute::NoBuiltin: return 1ULL << 38;
+ case Attribute::Returned: return 1ULL << 39;
+ case Attribute::Cold: return 1ULL << 40;
+ case Attribute::Builtin: return 1ULL << 41;
+ case Attribute::OptimizeNone: return 1ULL << 42;
+ case Attribute::InAlloca: return 1ULL << 43;
+ case Attribute::NonNull: return 1ULL << 44;
+ case Attribute::JumpTable: return 1ULL << 45;
+ case Attribute::Convergent: return 1ULL << 46;
+ case Attribute::SafeStack: return 1ULL << 47;
+ case Attribute::NoRecurse: return 1ULL << 48;
+ case Attribute::InaccessibleMemOnly: return 1ULL << 49;
+ case Attribute::InaccessibleMemOrArgMemOnly: return 1ULL << 50;
+ case Attribute::SwiftSelf: return 1ULL << 51;
+ case Attribute::SwiftError: return 1ULL << 52;
+ case Attribute::WriteOnly: return 1ULL << 53;
+ case Attribute::Speculatable: return 1ULL << 54;
+ case Attribute::StrictFP: return 1ULL << 55;
+ case Attribute::SanitizeHWAddress: return 1ULL << 56;
+ case Attribute::NoCfCheck: return 1ULL << 57;
+ case Attribute::OptForFuzzing: return 1ULL << 58;
+ case Attribute::ShadowCallStack: return 1ULL << 59;
+ case Attribute::SpeculativeLoadHardening:
+ return 1ULL << 60;
+ case Attribute::ImmArg:
+ return 1ULL << 61;
+ case Attribute::WillReturn:
+ return 1ULL << 62;
+ case Attribute::NoFree:
+ return 1ULL << 63;
+ default:
+ // Other attributes are not supported in the raw format,
+ // as we ran out of space.
+ return 0;
+ }
+ llvm_unreachable("Unsupported attribute type");
+}
+
+static void addRawAttributeValue(AttrBuilder &B, uint64_t Val) {
+ if (!Val) return;
+
+ for (Attribute::AttrKind I = Attribute::None; I != Attribute::EndAttrKinds;
+ I = Attribute::AttrKind(I + 1)) {
+ if (uint64_t A = (Val & getRawAttributeMask(I))) {
+ if (I == Attribute::Alignment)
+ B.addAlignmentAttr(1ULL << ((A >> 16) - 1));
+ else if (I == Attribute::StackAlignment)
+ B.addStackAlignmentAttr(1ULL << ((A >> 26)-1));
+ else if (Attribute::isTypeAttrKind(I))
+ B.addTypeAttr(I, nullptr); // Type will be auto-upgraded.
+ else
+ B.addAttribute(I);
+ }
+ }
+}
+
+/// This fills an AttrBuilder object with the LLVM attributes that have
+/// been decoded from the given integer. This function must stay in sync with
+/// 'encodeLLVMAttributesForBitcode'.
+static void decodeLLVMAttributesForBitcode(AttrBuilder &B,
+ uint64_t EncodedAttrs) {
+ // The alignment is stored as a 16-bit raw value from bits 31--16. We shift
+ // the bits above 31 down by 11 bits.
+ unsigned Alignment = (EncodedAttrs & (0xffffULL << 16)) >> 16;
+ assert((!Alignment || isPowerOf2_32(Alignment)) &&
+ "Alignment must be a power of two.");
+
+ if (Alignment)
+ B.addAlignmentAttr(Alignment);
+ addRawAttributeValue(B, ((EncodedAttrs & (0xfffffULL << 32)) >> 11) |
+ (EncodedAttrs & 0xffff));
+}
+
+Error BitcodeReader::parseAttributeBlock() {
+ if (Error Err = Stream.EnterSubBlock(bitc::PARAMATTR_BLOCK_ID))
+ return Err;
+
+ if (!MAttributes.empty())
+ return error("Invalid multiple blocks");
+
+ SmallVector<uint64_t, 64> Record;
+
+ SmallVector<AttributeList, 8> Attrs;
+
+ // Read all the records.
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ switch (MaybeRecord.get()) {
+ default: // Default behavior: ignore.
+ break;
+ case bitc::PARAMATTR_CODE_ENTRY_OLD: // ENTRY: [paramidx0, attr0, ...]
+ // Deprecated, but still needed to read old bitcode files.
+ if (Record.size() & 1)
+ return error("Invalid record");
+
+ for (unsigned i = 0, e = Record.size(); i != e; i += 2) {
+ AttrBuilder B(Context);
+ decodeLLVMAttributesForBitcode(B, Record[i+1]);
+ Attrs.push_back(AttributeList::get(Context, Record[i], B));
+ }
+
+ MAttributes.push_back(AttributeList::get(Context, Attrs));
+ Attrs.clear();
+ break;
+ case bitc::PARAMATTR_CODE_ENTRY: // ENTRY: [attrgrp0, attrgrp1, ...]
+ for (unsigned i = 0, e = Record.size(); i != e; ++i)
+ Attrs.push_back(MAttributeGroups[Record[i]]);
+
+ MAttributes.push_back(AttributeList::get(Context, Attrs));
+ Attrs.clear();
+ break;
+ }
+ }
+}
+
+// Returns Attribute::None on unrecognized codes.
+static Attribute::AttrKind getAttrFromCode(uint64_t Code) {
+ switch (Code) {
+ default:
+ return Attribute::None;
+ case bitc::ATTR_KIND_ALIGNMENT:
+ return Attribute::Alignment;
+ case bitc::ATTR_KIND_ALWAYS_INLINE:
+ return Attribute::AlwaysInline;
+ case bitc::ATTR_KIND_ARGMEMONLY:
+ return Attribute::ArgMemOnly;
+ case bitc::ATTR_KIND_BUILTIN:
+ return Attribute::Builtin;
+ case bitc::ATTR_KIND_BY_VAL:
+ return Attribute::ByVal;
+ case bitc::ATTR_KIND_IN_ALLOCA:
+ return Attribute::InAlloca;
+ case bitc::ATTR_KIND_COLD:
+ return Attribute::Cold;
+ case bitc::ATTR_KIND_CONVERGENT:
+ return Attribute::Convergent;
+ case bitc::ATTR_KIND_DISABLE_SANITIZER_INSTRUMENTATION:
+ return Attribute::DisableSanitizerInstrumentation;
+ case bitc::ATTR_KIND_ELEMENTTYPE:
+ return Attribute::ElementType;
+ case bitc::ATTR_KIND_INACCESSIBLEMEM_ONLY:
+ return Attribute::InaccessibleMemOnly;
+ case bitc::ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY:
+ return Attribute::InaccessibleMemOrArgMemOnly;
+ case bitc::ATTR_KIND_INLINE_HINT:
+ return Attribute::InlineHint;
+ case bitc::ATTR_KIND_IN_REG:
+ return Attribute::InReg;
+ case bitc::ATTR_KIND_JUMP_TABLE:
+ return Attribute::JumpTable;
+ case bitc::ATTR_KIND_MIN_SIZE:
+ return Attribute::MinSize;
+ case bitc::ATTR_KIND_NAKED:
+ return Attribute::Naked;
+ case bitc::ATTR_KIND_NEST:
+ return Attribute::Nest;
+ case bitc::ATTR_KIND_NO_ALIAS:
+ return Attribute::NoAlias;
+ case bitc::ATTR_KIND_NO_BUILTIN:
+ return Attribute::NoBuiltin;
+ case bitc::ATTR_KIND_NO_CALLBACK:
+ return Attribute::NoCallback;
+ case bitc::ATTR_KIND_NO_CAPTURE:
+ return Attribute::NoCapture;
+ case bitc::ATTR_KIND_NO_DUPLICATE:
+ return Attribute::NoDuplicate;
+ case bitc::ATTR_KIND_NOFREE:
+ return Attribute::NoFree;
+ case bitc::ATTR_KIND_NO_IMPLICIT_FLOAT:
+ return Attribute::NoImplicitFloat;
+ case bitc::ATTR_KIND_NO_INLINE:
+ return Attribute::NoInline;
+ case bitc::ATTR_KIND_NO_RECURSE:
+ return Attribute::NoRecurse;
+ case bitc::ATTR_KIND_NO_MERGE:
+ return Attribute::NoMerge;
+ case bitc::ATTR_KIND_NON_LAZY_BIND:
+ return Attribute::NonLazyBind;
+ case bitc::ATTR_KIND_NON_NULL:
+ return Attribute::NonNull;
+ case bitc::ATTR_KIND_DEREFERENCEABLE:
+ return Attribute::Dereferenceable;
+ case bitc::ATTR_KIND_DEREFERENCEABLE_OR_NULL:
+ return Attribute::DereferenceableOrNull;
+ case bitc::ATTR_KIND_ALLOC_SIZE:
+ return Attribute::AllocSize;
+ case bitc::ATTR_KIND_NO_RED_ZONE:
+ return Attribute::NoRedZone;
+ case bitc::ATTR_KIND_NO_RETURN:
+ return Attribute::NoReturn;
+ case bitc::ATTR_KIND_NOSYNC:
+ return Attribute::NoSync;
+ case bitc::ATTR_KIND_NOCF_CHECK:
+ return Attribute::NoCfCheck;
+ case bitc::ATTR_KIND_NO_PROFILE:
+ return Attribute::NoProfile;
+ case bitc::ATTR_KIND_NO_UNWIND:
+ return Attribute::NoUnwind;
+ case bitc::ATTR_KIND_NO_SANITIZE_COVERAGE:
+ return Attribute::NoSanitizeCoverage;
+ case bitc::ATTR_KIND_NULL_POINTER_IS_VALID:
+ return Attribute::NullPointerIsValid;
+ case bitc::ATTR_KIND_OPT_FOR_FUZZING:
+ return Attribute::OptForFuzzing;
+ case bitc::ATTR_KIND_OPTIMIZE_FOR_SIZE:
+ return Attribute::OptimizeForSize;
+ case bitc::ATTR_KIND_OPTIMIZE_NONE:
+ return Attribute::OptimizeNone;
+ case bitc::ATTR_KIND_READ_NONE:
+ return Attribute::ReadNone;
+ case bitc::ATTR_KIND_READ_ONLY:
+ return Attribute::ReadOnly;
+ case bitc::ATTR_KIND_RETURNED:
+ return Attribute::Returned;
+ case bitc::ATTR_KIND_RETURNS_TWICE:
+ return Attribute::ReturnsTwice;
+ case bitc::ATTR_KIND_S_EXT:
+ return Attribute::SExt;
+ case bitc::ATTR_KIND_SPECULATABLE:
+ return Attribute::Speculatable;
+ case bitc::ATTR_KIND_STACK_ALIGNMENT:
+ return Attribute::StackAlignment;
+ case bitc::ATTR_KIND_STACK_PROTECT:
+ return Attribute::StackProtect;
+ case bitc::ATTR_KIND_STACK_PROTECT_REQ:
+ return Attribute::StackProtectReq;
+ case bitc::ATTR_KIND_STACK_PROTECT_STRONG:
+ return Attribute::StackProtectStrong;
+ case bitc::ATTR_KIND_SAFESTACK:
+ return Attribute::SafeStack;
+ case bitc::ATTR_KIND_SHADOWCALLSTACK:
+ return Attribute::ShadowCallStack;
+ case bitc::ATTR_KIND_STRICT_FP:
+ return Attribute::StrictFP;
+ case bitc::ATTR_KIND_STRUCT_RET:
+ return Attribute::StructRet;
+ case bitc::ATTR_KIND_SANITIZE_ADDRESS:
+ return Attribute::SanitizeAddress;
+ case bitc::ATTR_KIND_SANITIZE_HWADDRESS:
+ return Attribute::SanitizeHWAddress;
+ case bitc::ATTR_KIND_SANITIZE_THREAD:
+ return Attribute::SanitizeThread;
+ case bitc::ATTR_KIND_SANITIZE_MEMORY:
+ return Attribute::SanitizeMemory;
+ case bitc::ATTR_KIND_SPECULATIVE_LOAD_HARDENING:
+ return Attribute::SpeculativeLoadHardening;
+ case bitc::ATTR_KIND_SWIFT_ERROR:
+ return Attribute::SwiftError;
+ case bitc::ATTR_KIND_SWIFT_SELF:
+ return Attribute::SwiftSelf;
+ case bitc::ATTR_KIND_SWIFT_ASYNC:
+ return Attribute::SwiftAsync;
+ case bitc::ATTR_KIND_UW_TABLE:
+ return Attribute::UWTable;
+ case bitc::ATTR_KIND_VSCALE_RANGE:
+ return Attribute::VScaleRange;
+ case bitc::ATTR_KIND_WILLRETURN:
+ return Attribute::WillReturn;
+ case bitc::ATTR_KIND_WRITEONLY:
+ return Attribute::WriteOnly;
+ case bitc::ATTR_KIND_Z_EXT:
+ return Attribute::ZExt;
+ case bitc::ATTR_KIND_IMMARG:
+ return Attribute::ImmArg;
+ case bitc::ATTR_KIND_SANITIZE_MEMTAG:
+ return Attribute::SanitizeMemTag;
+ case bitc::ATTR_KIND_PREALLOCATED:
+ return Attribute::Preallocated;
+ case bitc::ATTR_KIND_NOUNDEF:
+ return Attribute::NoUndef;
+ case bitc::ATTR_KIND_BYREF:
+ return Attribute::ByRef;
+ case bitc::ATTR_KIND_MUSTPROGRESS:
+ return Attribute::MustProgress;
+ case bitc::ATTR_KIND_HOT:
+ return Attribute::Hot;
+ }
+}
+
+Error BitcodeReader::parseAlignmentValue(uint64_t Exponent,
+ MaybeAlign &Alignment) {
+ // Note: Alignment in bitcode files is incremented by 1, so that zero
+ // can be used for default alignment.
+ if (Exponent > Value::MaxAlignmentExponent + 1)
+ return error("Invalid alignment value");
+ Alignment = decodeMaybeAlign(Exponent);
+ return Error::success();
+}
+
+Error BitcodeReader::parseAttrKind(uint64_t Code, Attribute::AttrKind *Kind) {
+ *Kind = getAttrFromCode(Code);
+ if (*Kind == Attribute::None)
+ return error("Unknown attribute kind (" + Twine(Code) + ")");
+ return Error::success();
+}
+
+Error BitcodeReader::parseAttributeGroupBlock() {
+ if (Error Err = Stream.EnterSubBlock(bitc::PARAMATTR_GROUP_BLOCK_ID))
+ return Err;
+
+ if (!MAttributeGroups.empty())
+ return error("Invalid multiple blocks");
+
+ SmallVector<uint64_t, 64> Record;
+
+ // Read all the records.
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ switch (MaybeRecord.get()) {
+ default: // Default behavior: ignore.
+ break;
+ case bitc::PARAMATTR_GRP_CODE_ENTRY: { // ENTRY: [grpid, idx, a0, a1, ...]
+ if (Record.size() < 3)
+ return error("Invalid record");
+
+ uint64_t GrpID = Record[0];
+ uint64_t Idx = Record[1]; // Index of the object this attribute refers to.
+
+ AttrBuilder B(Context);
+ for (unsigned i = 2, e = Record.size(); i != e; ++i) {
+ if (Record[i] == 0) { // Enum attribute
+ Attribute::AttrKind Kind;
+ if (Error Err = parseAttrKind(Record[++i], &Kind))
+ return Err;
+
+ // Upgrade old-style byval attribute to one with a type, even if it's
+ // nullptr. We will have to insert the real type when we associate
+ // this AttributeList with a function.
+ if (Kind == Attribute::ByVal)
+ B.addByValAttr(nullptr);
+ else if (Kind == Attribute::StructRet)
+ B.addStructRetAttr(nullptr);
+ else if (Kind == Attribute::InAlloca)
+ B.addInAllocaAttr(nullptr);
+ else if (Attribute::isEnumAttrKind(Kind))
+ B.addAttribute(Kind);
+ else
+ return error("Not an enum attribute");
+ } else if (Record[i] == 1) { // Integer attribute
+ Attribute::AttrKind Kind;
+ if (Error Err = parseAttrKind(Record[++i], &Kind))
+ return Err;
+ if (!Attribute::isIntAttrKind(Kind))
+ return error("Not an int attribute");
+ if (Kind == Attribute::Alignment)
+ B.addAlignmentAttr(Record[++i]);
+ else if (Kind == Attribute::StackAlignment)
+ B.addStackAlignmentAttr(Record[++i]);
+ else if (Kind == Attribute::Dereferenceable)
+ B.addDereferenceableAttr(Record[++i]);
+ else if (Kind == Attribute::DereferenceableOrNull)
+ B.addDereferenceableOrNullAttr(Record[++i]);
+ else if (Kind == Attribute::AllocSize)
+ B.addAllocSizeAttrFromRawRepr(Record[++i]);
+ else if (Kind == Attribute::VScaleRange)
+ B.addVScaleRangeAttrFromRawRepr(Record[++i]);
+ } else if (Record[i] == 3 || Record[i] == 4) { // String attribute
+ bool HasValue = (Record[i++] == 4);
+ SmallString<64> KindStr;
+ SmallString<64> ValStr;
+
+ while (Record[i] != 0 && i != e)
+ KindStr += Record[i++];
+ assert(Record[i] == 0 && "Kind string not null terminated");
+
+ if (HasValue) {
+ // Has a value associated with it.
+ ++i; // Skip the '0' that terminates the "kind" string.
+ while (Record[i] != 0 && i != e)
+ ValStr += Record[i++];
+ assert(Record[i] == 0 && "Value string not null terminated");
+ }
+
+ B.addAttribute(KindStr.str(), ValStr.str());
+ } else {
+ assert((Record[i] == 5 || Record[i] == 6) &&
+ "Invalid attribute group entry");
+ bool HasType = Record[i] == 6;
+ Attribute::AttrKind Kind;
+ if (Error Err = parseAttrKind(Record[++i], &Kind))
+ return Err;
+ if (!Attribute::isTypeAttrKind(Kind))
+ return error("Not a type attribute");
+
+ B.addTypeAttr(Kind, HasType ? getTypeByID(Record[++i]) : nullptr);
+ }
+ }
+
+ UpgradeAttributes(B);
+ MAttributeGroups[GrpID] = AttributeList::get(Context, Idx, B);
+ break;
+ }
+ }
+ }
+}
+
+Error BitcodeReader::parseTypeTable() {
+ if (Error Err = Stream.EnterSubBlock(bitc::TYPE_BLOCK_ID_NEW))
+ return Err;
+
+ return parseTypeTableBody();
+}
+
+Error BitcodeReader::parseTypeTableBody() {
+ if (!TypeList.empty())
+ return error("Invalid multiple blocks");
+
+ SmallVector<uint64_t, 64> Record;
+ unsigned NumRecords = 0;
+
+ SmallString<64> TypeName;
+
+ // Read all the records for this type table.
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ if (NumRecords != TypeList.size())
+ return error("Malformed block");
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ Type *ResultTy = nullptr;
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ switch (MaybeRecord.get()) {
+ default:
+ return error("Invalid value");
+ case bitc::TYPE_CODE_NUMENTRY: // TYPE_CODE_NUMENTRY: [numentries]
+ // TYPE_CODE_NUMENTRY contains a count of the number of types in the
+ // type list. This allows us to reserve space.
+ if (Record.empty())
+ return error("Invalid record");
+ TypeList.resize(Record[0]);
+ continue;
+ case bitc::TYPE_CODE_VOID: // VOID
+ ResultTy = Type::getVoidTy(Context);
+ break;
+ case bitc::TYPE_CODE_HALF: // HALF
+ ResultTy = Type::getHalfTy(Context);
+ break;
+ case bitc::TYPE_CODE_BFLOAT: // BFLOAT
+ ResultTy = Type::getBFloatTy(Context);
+ break;
+ case bitc::TYPE_CODE_FLOAT: // FLOAT
+ ResultTy = Type::getFloatTy(Context);
+ break;
+ case bitc::TYPE_CODE_DOUBLE: // DOUBLE
+ ResultTy = Type::getDoubleTy(Context);
+ break;
+ case bitc::TYPE_CODE_X86_FP80: // X86_FP80
+ ResultTy = Type::getX86_FP80Ty(Context);
+ break;
+ case bitc::TYPE_CODE_FP128: // FP128
+ ResultTy = Type::getFP128Ty(Context);
+ break;
+ case bitc::TYPE_CODE_PPC_FP128: // PPC_FP128
+ ResultTy = Type::getPPC_FP128Ty(Context);
+ break;
+ case bitc::TYPE_CODE_LABEL: // LABEL
+ ResultTy = Type::getLabelTy(Context);
+ break;
+ case bitc::TYPE_CODE_METADATA: // METADATA
+ ResultTy = Type::getMetadataTy(Context);
+ break;
+ case bitc::TYPE_CODE_X86_MMX: // X86_MMX
+ ResultTy = Type::getX86_MMXTy(Context);
+ break;
+ case bitc::TYPE_CODE_X86_AMX: // X86_AMX
+ ResultTy = Type::getX86_AMXTy(Context);
+ break;
+ case bitc::TYPE_CODE_TOKEN: // TOKEN
+ ResultTy = Type::getTokenTy(Context);
+ break;
+ case bitc::TYPE_CODE_INTEGER: { // INTEGER: [width]
+ if (Record.empty())
+ return error("Invalid record");
+
+ uint64_t NumBits = Record[0];
+ if (NumBits < IntegerType::MIN_INT_BITS ||
+ NumBits > IntegerType::MAX_INT_BITS)
+ return error("Bitwidth for integer type out of range");
+ ResultTy = IntegerType::get(Context, NumBits);
+ break;
+ }
+ case bitc::TYPE_CODE_POINTER: { // POINTER: [pointee type] or
+ // [pointee type, address space]
+ if (Record.empty())
+ return error("Invalid record");
+ unsigned AddressSpace = 0;
+ if (Record.size() == 2)
+ AddressSpace = Record[1];
+ ResultTy = getTypeByID(Record[0]);
+ if (!ResultTy ||
+ !PointerType::isValidElementType(ResultTy))
+ return error("Invalid type");
+ ResultTy = PointerType::get(ResultTy, AddressSpace);
+ break;
+ }
+ case bitc::TYPE_CODE_OPAQUE_POINTER: { // OPAQUE_POINTER: [addrspace]
+ if (Record.size() != 1)
+ return error("Invalid record");
+ if (Context.supportsTypedPointers())
+ return error(
+ "Opaque pointers are only supported in -opaque-pointers mode");
+ unsigned AddressSpace = Record[0];
+ ResultTy = PointerType::get(Context, AddressSpace);
+ break;
+ }
+ case bitc::TYPE_CODE_FUNCTION_OLD: {
+ // Deprecated, but still needed to read old bitcode files.
+ // FUNCTION: [vararg, attrid, retty, paramty x N]
+ if (Record.size() < 3)
+ return error("Invalid record");
+ SmallVector<Type*, 8> ArgTys;
+ for (unsigned i = 3, e = Record.size(); i != e; ++i) {
+ if (Type *T = getTypeByID(Record[i]))
+ ArgTys.push_back(T);
+ else
+ break;
+ }
+
+ ResultTy = getTypeByID(Record[2]);
+ if (!ResultTy || ArgTys.size() < Record.size()-3)
+ return error("Invalid type");
+
+ ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]);
+ break;
+ }
+ case bitc::TYPE_CODE_FUNCTION: {
+ // FUNCTION: [vararg, retty, paramty x N]
+ if (Record.size() < 2)
+ return error("Invalid record");
+ SmallVector<Type*, 8> ArgTys;
+ for (unsigned i = 2, e = Record.size(); i != e; ++i) {
+ if (Type *T = getTypeByID(Record[i])) {
+ if (!FunctionType::isValidArgumentType(T))
+ return error("Invalid function argument type");
+ ArgTys.push_back(T);
+ }
+ else
+ break;
+ }
+
+ ResultTy = getTypeByID(Record[1]);
+ if (!ResultTy || ArgTys.size() < Record.size()-2)
+ return error("Invalid type");
+
+ ResultTy = FunctionType::get(ResultTy, ArgTys, Record[0]);
+ break;
+ }
+ case bitc::TYPE_CODE_STRUCT_ANON: { // STRUCT: [ispacked, eltty x N]
+ if (Record.empty())
+ return error("Invalid record");
+ SmallVector<Type*, 8> EltTys;
+ for (unsigned i = 1, e = Record.size(); i != e; ++i) {
+ if (Type *T = getTypeByID(Record[i]))
+ EltTys.push_back(T);
+ else
+ break;
+ }
+ if (EltTys.size() != Record.size()-1)
+ return error("Invalid type");
+ ResultTy = StructType::get(Context, EltTys, Record[0]);
+ break;
+ }
+ case bitc::TYPE_CODE_STRUCT_NAME: // STRUCT_NAME: [strchr x N]
+ if (convertToString(Record, 0, TypeName))
+ return error("Invalid record");
+ continue;
+
+ case bitc::TYPE_CODE_STRUCT_NAMED: { // STRUCT: [ispacked, eltty x N]
+ if (Record.empty())
+ return error("Invalid record");
+
+ if (NumRecords >= TypeList.size())
+ return error("Invalid TYPE table");
+
+ // Check to see if this was forward referenced, if so fill in the temp.
+ StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]);
+ if (Res) {
+ Res->setName(TypeName);
+ TypeList[NumRecords] = nullptr;
+ } else // Otherwise, create a new struct.
+ Res = createIdentifiedStructType(Context, TypeName);
+ TypeName.clear();
+
+ SmallVector<Type*, 8> EltTys;
+ for (unsigned i = 1, e = Record.size(); i != e; ++i) {
+ if (Type *T = getTypeByID(Record[i]))
+ EltTys.push_back(T);
+ else
+ break;
+ }
+ if (EltTys.size() != Record.size()-1)
+ return error("Invalid record");
+ Res->setBody(EltTys, Record[0]);
+ ResultTy = Res;
+ break;
+ }
+ case bitc::TYPE_CODE_OPAQUE: { // OPAQUE: []
+ if (Record.size() != 1)
+ return error("Invalid record");
+
+ if (NumRecords >= TypeList.size())
+ return error("Invalid TYPE table");
+
+ // Check to see if this was forward referenced, if so fill in the temp.
+ StructType *Res = cast_or_null<StructType>(TypeList[NumRecords]);
+ if (Res) {
+ Res->setName(TypeName);
+ TypeList[NumRecords] = nullptr;
+ } else // Otherwise, create a new struct with no body.
+ Res = createIdentifiedStructType(Context, TypeName);
+ TypeName.clear();
+ ResultTy = Res;
+ break;
+ }
+ case bitc::TYPE_CODE_ARRAY: // ARRAY: [numelts, eltty]
+ if (Record.size() < 2)
+ return error("Invalid record");
+ ResultTy = getTypeByID(Record[1]);
+ if (!ResultTy || !ArrayType::isValidElementType(ResultTy))
+ return error("Invalid type");
+ ResultTy = ArrayType::get(ResultTy, Record[0]);
+ break;
+ case bitc::TYPE_CODE_VECTOR: // VECTOR: [numelts, eltty] or
+ // [numelts, eltty, scalable]
+ if (Record.size() < 2)
+ return error("Invalid record");
+ if (Record[0] == 0)
+ return error("Invalid vector length");
+ ResultTy = getTypeByID(Record[1]);
+ if (!ResultTy || !VectorType::isValidElementType(ResultTy))
+ return error("Invalid type");
+ bool Scalable = Record.size() > 2 ? Record[2] : false;
+ ResultTy = VectorType::get(ResultTy, Record[0], Scalable);
+ break;
+ }
+
+ if (NumRecords >= TypeList.size())
+ return error("Invalid TYPE table");
+ if (TypeList[NumRecords])
+ return error(
+ "Invalid TYPE table: Only named structs can be forward referenced");
+ assert(ResultTy && "Didn't read a type?");
+ TypeList[NumRecords++] = ResultTy;
+ }
+}
+
+Error BitcodeReader::parseOperandBundleTags() {
+ if (Error Err = Stream.EnterSubBlock(bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID))
+ return Err;
+
+ if (!BundleTags.empty())
+ return error("Invalid multiple blocks");
+
+ SmallVector<uint64_t, 64> Record;
+
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Tags are implicitly mapped to integers by their order.
+
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ if (MaybeRecord.get() != bitc::OPERAND_BUNDLE_TAG)
+ return error("Invalid record");
+
+ // OPERAND_BUNDLE_TAG: [strchr x N]
+ BundleTags.emplace_back();
+ if (convertToString(Record, 0, BundleTags.back()))
+ return error("Invalid record");
+ Record.clear();
+ }
+}
+
+Error BitcodeReader::parseSyncScopeNames() {
+ if (Error Err = Stream.EnterSubBlock(bitc::SYNC_SCOPE_NAMES_BLOCK_ID))
+ return Err;
+
+ if (!SSIDs.empty())
+ return error("Invalid multiple synchronization scope names blocks");
+
+ SmallVector<uint64_t, 64> Record;
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ if (SSIDs.empty())
+ return error("Invalid empty synchronization scope names block");
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Synchronization scope names are implicitly mapped to synchronization
+ // scope IDs by their order.
+
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ if (MaybeRecord.get() != bitc::SYNC_SCOPE_NAME)
+ return error("Invalid record");
+
+ SmallString<16> SSN;
+ if (convertToString(Record, 0, SSN))
+ return error("Invalid record");
+
+ SSIDs.push_back(Context.getOrInsertSyncScopeID(SSN));
+ Record.clear();
+ }
+}
+
+/// Associate a value with its name from the given index in the provided record.
+Expected<Value *> BitcodeReader::recordValue(SmallVectorImpl<uint64_t> &Record,
+ unsigned NameIndex, Triple &TT) {
+ SmallString<128> ValueName;
+ if (convertToString(Record, NameIndex, ValueName))
+ return error("Invalid record");
+ unsigned ValueID = Record[0];
+ if (ValueID >= ValueList.size() || !ValueList[ValueID])
+ return error("Invalid record");
+ Value *V = ValueList[ValueID];
+
+ StringRef NameStr(ValueName.data(), ValueName.size());
+ if (NameStr.find_first_of(0) != StringRef::npos)
+ return error("Invalid value name");
+ V->setName(NameStr);
+ auto *GO = dyn_cast<GlobalObject>(V);
+ if (GO && ImplicitComdatObjects.contains(GO) && TT.supportsCOMDAT())
+ GO->setComdat(TheModule->getOrInsertComdat(V->getName()));
+ return V;
+}
+
+/// Helper to note and return the current location, and jump to the given
+/// offset.
+static Expected<uint64_t> jumpToValueSymbolTable(uint64_t Offset,
+ BitstreamCursor &Stream) {
+ // Save the current parsing location so we can jump back at the end
+ // of the VST read.
+ uint64_t CurrentBit = Stream.GetCurrentBitNo();
+ if (Error JumpFailed = Stream.JumpToBit(Offset * 32))
+ return std::move(JumpFailed);
+ Expected<BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ assert(MaybeEntry.get().Kind == BitstreamEntry::SubBlock);
+ assert(MaybeEntry.get().ID == bitc::VALUE_SYMTAB_BLOCK_ID);
+ return CurrentBit;
+}
+
+void BitcodeReader::setDeferredFunctionInfo(unsigned FuncBitcodeOffsetDelta,
+ Function *F,
+ ArrayRef<uint64_t> Record) {
+ // Note that we subtract 1 here because the offset is relative to one word
+ // before the start of the identification or module block, which was
+ // historically always the start of the regular bitcode header.
+ uint64_t FuncWordOffset = Record[1] - 1;
+ uint64_t FuncBitOffset = FuncWordOffset * 32;
+ DeferredFunctionInfo[F] = FuncBitOffset + FuncBitcodeOffsetDelta;
+ // Set the LastFunctionBlockBit to point to the last function block.
+ // Later when parsing is resumed after function materialization,
+ // we can simply skip that last function block.
+ if (FuncBitOffset > LastFunctionBlockBit)
+ LastFunctionBlockBit = FuncBitOffset;
+}
+
+/// Read a new-style GlobalValue symbol table.
+Error BitcodeReader::parseGlobalValueSymbolTable() {
+ unsigned FuncBitcodeOffsetDelta =
+ Stream.getAbbrevIDWidth() + bitc::BlockIDWidth;
+
+ if (Error Err = Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID))
+ return Err;
+
+ SmallVector<uint64_t, 64> Record;
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock:
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return Error::success();
+ case BitstreamEntry::Record:
+ break;
+ }
+
+ Record.clear();
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ switch (MaybeRecord.get()) {
+ case bitc::VST_CODE_FNENTRY: // [valueid, offset]
+ setDeferredFunctionInfo(FuncBitcodeOffsetDelta,
+ cast<Function>(ValueList[Record[0]]), Record);
+ break;
+ }
+ }
+}
+
+/// Parse the value symbol table at either the current parsing location or
+/// at the given bit offset if provided.
+Error BitcodeReader::parseValueSymbolTable(uint64_t Offset) {
+ uint64_t CurrentBit;
+ // Pass in the Offset to distinguish between calling for the module-level
+ // VST (where we want to jump to the VST offset) and the function-level
+ // VST (where we don't).
+ if (Offset > 0) {
+ Expected<uint64_t> MaybeCurrentBit = jumpToValueSymbolTable(Offset, Stream);
+ if (!MaybeCurrentBit)
+ return MaybeCurrentBit.takeError();
+ CurrentBit = MaybeCurrentBit.get();
+ // If this module uses a string table, read this as a module-level VST.
+ if (UseStrtab) {
+ if (Error Err = parseGlobalValueSymbolTable())
+ return Err;
+ if (Error JumpFailed = Stream.JumpToBit(CurrentBit))
+ return JumpFailed;
+ return Error::success();
+ }
+ // Otherwise, the VST will be in a similar format to a function-level VST,
+ // and will contain symbol names.
+ }
+
+ // Compute the delta between the bitcode indices in the VST (the word offset
+ // to the word-aligned ENTER_SUBBLOCK for the function block, and that
+ // expected by the lazy reader. The reader's EnterSubBlock expects to have
+ // already read the ENTER_SUBBLOCK code (size getAbbrevIDWidth) and BlockID
+ // (size BlockIDWidth). Note that we access the stream's AbbrevID width here
+ // just before entering the VST subblock because: 1) the EnterSubBlock
+ // changes the AbbrevID width; 2) the VST block is nested within the same
+ // outer MODULE_BLOCK as the FUNCTION_BLOCKs and therefore have the same
+ // AbbrevID width before calling EnterSubBlock; and 3) when we want to
+ // jump to the FUNCTION_BLOCK using this offset later, we don't want
+ // to rely on the stream's AbbrevID width being that of the MODULE_BLOCK.
+ unsigned FuncBitcodeOffsetDelta =
+ Stream.getAbbrevIDWidth() + bitc::BlockIDWidth;
+
+ if (Error Err = Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID))
+ return Err;
+
+ SmallVector<uint64_t, 64> Record;
+
+ Triple TT(TheModule->getTargetTriple());
+
+ // Read all the records for this value table.
+ SmallString<128> ValueName;
+
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ if (Offset > 0)
+ if (Error JumpFailed = Stream.JumpToBit(CurrentBit))
+ return JumpFailed;
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ switch (MaybeRecord.get()) {
+ default: // Default behavior: unknown type.
+ break;
+ case bitc::VST_CODE_ENTRY: { // VST_CODE_ENTRY: [valueid, namechar x N]
+ Expected<Value *> ValOrErr = recordValue(Record, 1, TT);
+ if (Error Err = ValOrErr.takeError())
+ return Err;
+ ValOrErr.get();
+ break;
+ }
+ case bitc::VST_CODE_FNENTRY: {
+ // VST_CODE_FNENTRY: [valueid, offset, namechar x N]
+ Expected<Value *> ValOrErr = recordValue(Record, 2, TT);
+ if (Error Err = ValOrErr.takeError())
+ return Err;
+ Value *V = ValOrErr.get();
+
+ // Ignore function offsets emitted for aliases of functions in older
+ // versions of LLVM.
+ if (auto *F = dyn_cast<Function>(V))
+ setDeferredFunctionInfo(FuncBitcodeOffsetDelta, F, Record);
+ break;
+ }
+ case bitc::VST_CODE_BBENTRY: {
+ if (convertToString(Record, 1, ValueName))
+ return error("Invalid record");
+ BasicBlock *BB = getBasicBlock(Record[0]);
+ if (!BB)
+ return error("Invalid record");
+
+ BB->setName(StringRef(ValueName.data(), ValueName.size()));
+ ValueName.clear();
+ break;
+ }
+ }
+ }
+}
+
+/// Decode a signed value stored with the sign bit in the LSB for dense VBR
+/// encoding.
+uint64_t BitcodeReader::decodeSignRotatedValue(uint64_t V) {
+ if ((V & 1) == 0)
+ return V >> 1;
+ if (V != 1)
+ return -(V >> 1);
+ // There is no such thing as -0 with integers. "-0" really means MININT.
+ return 1ULL << 63;
+}
+
+/// Resolve all of the initializers for global values and aliases that we can.
+Error BitcodeReader::resolveGlobalAndIndirectSymbolInits() {
+ std::vector<std::pair<GlobalVariable *, unsigned>> GlobalInitWorklist;
+ std::vector<std::pair<GlobalValue *, unsigned>> IndirectSymbolInitWorklist;
+ std::vector<FunctionOperandInfo> FunctionOperandWorklist;
+
+ GlobalInitWorklist.swap(GlobalInits);
+ IndirectSymbolInitWorklist.swap(IndirectSymbolInits);
+ FunctionOperandWorklist.swap(FunctionOperands);
+
+ while (!GlobalInitWorklist.empty()) {
+ unsigned ValID = GlobalInitWorklist.back().second;
+ if (ValID >= ValueList.size()) {
+ // Not ready to resolve this yet, it requires something later in the file.
+ GlobalInits.push_back(GlobalInitWorklist.back());
+ } else {
+ if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
+ GlobalInitWorklist.back().first->setInitializer(C);
+ else
+ return error("Expected a constant");
+ }
+ GlobalInitWorklist.pop_back();
+ }
+
+ while (!IndirectSymbolInitWorklist.empty()) {
+ unsigned ValID = IndirectSymbolInitWorklist.back().second;
+ if (ValID >= ValueList.size()) {
+ IndirectSymbolInits.push_back(IndirectSymbolInitWorklist.back());
+ } else {
+ Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]);
+ if (!C)
+ return error("Expected a constant");
+ GlobalValue *GV = IndirectSymbolInitWorklist.back().first;
+ if (auto *GA = dyn_cast<GlobalAlias>(GV)) {
+ if (C->getType() != GV->getType())
+ return error("Alias and aliasee types don't match");
+ GA->setAliasee(C);
+ } else if (auto *GI = dyn_cast<GlobalIFunc>(GV)) {
+ Type *ResolverFTy =
+ GlobalIFunc::getResolverFunctionType(GI->getValueType());
+ // Transparently fix up the type for compatiblity with older bitcode
+ GI->setResolver(
+ ConstantExpr::getBitCast(C, ResolverFTy->getPointerTo()));
+ } else {
+ return error("Expected an alias or an ifunc");
+ }
+ }
+ IndirectSymbolInitWorklist.pop_back();
+ }
+
+ while (!FunctionOperandWorklist.empty()) {
+ FunctionOperandInfo &Info = FunctionOperandWorklist.back();
+ if (Info.PersonalityFn) {
+ unsigned ValID = Info.PersonalityFn - 1;
+ if (ValID < ValueList.size()) {
+ if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
+ Info.F->setPersonalityFn(C);
+ else
+ return error("Expected a constant");
+ Info.PersonalityFn = 0;
+ }
+ }
+ if (Info.Prefix) {
+ unsigned ValID = Info.Prefix - 1;
+ if (ValID < ValueList.size()) {
+ if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
+ Info.F->setPrefixData(C);
+ else
+ return error("Expected a constant");
+ Info.Prefix = 0;
+ }
+ }
+ if (Info.Prologue) {
+ unsigned ValID = Info.Prologue - 1;
+ if (ValID < ValueList.size()) {
+ if (Constant *C = dyn_cast_or_null<Constant>(ValueList[ValID]))
+ Info.F->setPrologueData(C);
+ else
+ return error("Expected a constant");
+ Info.Prologue = 0;
+ }
+ }
+ if (Info.PersonalityFn || Info.Prefix || Info.Prologue)
+ FunctionOperands.push_back(Info);
+ FunctionOperandWorklist.pop_back();
+ }
+
+ return Error::success();
+}
+
+APInt llvm::readWideAPInt(ArrayRef<uint64_t> Vals, unsigned TypeBits) {
+ SmallVector<uint64_t, 8> Words(Vals.size());
+ transform(Vals, Words.begin(),
+ BitcodeReader::decodeSignRotatedValue);
+
+ return APInt(TypeBits, Words);
+}
+
+Error BitcodeReader::parseConstants() {
+ if (Error Err = Stream.EnterSubBlock(bitc::CONSTANTS_BLOCK_ID))
+ return Err;
+
+ SmallVector<uint64_t, 64> Record;
+
+ // Read all the records for this value table.
+ Type *CurTy = Type::getInt32Ty(Context);
+ unsigned NextCstNo = ValueList.size();
+
+ struct DelayedShufTy {
+ VectorType *OpTy;
+ VectorType *RTy;
+ uint64_t Op0Idx;
+ uint64_t Op1Idx;
+ uint64_t Op2Idx;
+ unsigned CstNo;
+ };
+ std::vector<DelayedShufTy> DelayedShuffles;
+ struct DelayedSelTy {
+ Type *OpTy;
+ uint64_t Op0Idx;
+ uint64_t Op1Idx;
+ uint64_t Op2Idx;
+ unsigned CstNo;
+ };
+ std::vector<DelayedSelTy> DelayedSelectors;
+
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ // Once all the constants have been read, go through and resolve forward
+ // references.
+ //
+ // We have to treat shuffles specially because they don't have three
+ // operands anymore. We need to convert the shuffle mask into an array,
+ // and we can't convert a forward reference.
+ for (auto &DelayedShuffle : DelayedShuffles) {
+ VectorType *OpTy = DelayedShuffle.OpTy;
+ VectorType *RTy = DelayedShuffle.RTy;
+ uint64_t Op0Idx = DelayedShuffle.Op0Idx;
+ uint64_t Op1Idx = DelayedShuffle.Op1Idx;
+ uint64_t Op2Idx = DelayedShuffle.Op2Idx;
+ uint64_t CstNo = DelayedShuffle.CstNo;
+ Constant *Op0 = ValueList.getConstantFwdRef(Op0Idx, OpTy);
+ Constant *Op1 = ValueList.getConstantFwdRef(Op1Idx, OpTy);
+ Type *ShufTy =
+ VectorType::get(Type::getInt32Ty(Context), RTy->getElementCount());
+ Constant *Op2 = ValueList.getConstantFwdRef(Op2Idx, ShufTy);
+ if (!ShuffleVectorInst::isValidOperands(Op0, Op1, Op2))
+ return error("Invalid shufflevector operands");
+ SmallVector<int, 16> Mask;
+ ShuffleVectorInst::getShuffleMask(Op2, Mask);
+ Value *V = ConstantExpr::getShuffleVector(Op0, Op1, Mask);
+ ValueList.assignValue(V, CstNo);
+ }
+ for (auto &DelayedSelector : DelayedSelectors) {
+ Type *OpTy = DelayedSelector.OpTy;
+ Type *SelectorTy = Type::getInt1Ty(Context);
+ uint64_t Op0Idx = DelayedSelector.Op0Idx;
+ uint64_t Op1Idx = DelayedSelector.Op1Idx;
+ uint64_t Op2Idx = DelayedSelector.Op2Idx;
+ uint64_t CstNo = DelayedSelector.CstNo;
+ Constant *Op1 = ValueList.getConstantFwdRef(Op1Idx, OpTy);
+ Constant *Op2 = ValueList.getConstantFwdRef(Op2Idx, OpTy);
+ // The selector might be an i1 or an <n x i1>
+ // Get the type from the ValueList before getting a forward ref.
+ if (VectorType *VTy = dyn_cast<VectorType>(OpTy)) {
+ Value *V = ValueList[Op0Idx];
+ assert(V);
+ if (SelectorTy != V->getType())
+ SelectorTy = VectorType::get(SelectorTy, VTy->getElementCount());
+ }
+ Constant *Op0 = ValueList.getConstantFwdRef(Op0Idx, SelectorTy);
+ Value *V = ConstantExpr::getSelect(Op0, Op1, Op2);
+ ValueList.assignValue(V, CstNo);
+ }
+
+ if (NextCstNo != ValueList.size())
+ return error("Invalid constant reference");
+
+ ValueList.resolveConstantForwardRefs();
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ Type *VoidType = Type::getVoidTy(Context);
+ Value *V = nullptr;
+ Expected<unsigned> MaybeBitCode = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeBitCode)
+ return MaybeBitCode.takeError();
+ switch (unsigned BitCode = MaybeBitCode.get()) {
+ default: // Default behavior: unknown constant
+ case bitc::CST_CODE_UNDEF: // UNDEF
+ V = UndefValue::get(CurTy);
+ break;
+ case bitc::CST_CODE_POISON: // POISON
+ V = PoisonValue::get(CurTy);
+ break;
+ case bitc::CST_CODE_SETTYPE: // SETTYPE: [typeid]
+ if (Record.empty())
+ return error("Invalid record");
+ if (Record[0] >= TypeList.size() || !TypeList[Record[0]])
+ return error("Invalid record");
+ if (TypeList[Record[0]] == VoidType)
+ return error("Invalid constant type");
+ CurTy = TypeList[Record[0]];
+ continue; // Skip the ValueList manipulation.
+ case bitc::CST_CODE_NULL: // NULL
+ if (CurTy->isVoidTy() || CurTy->isFunctionTy() || CurTy->isLabelTy())
+ return error("Invalid type for a constant null value");
+ V = Constant::getNullValue(CurTy);
+ break;
+ case bitc::CST_CODE_INTEGER: // INTEGER: [intval]
+ if (!CurTy->isIntegerTy() || Record.empty())
+ return error("Invalid record");
+ V = ConstantInt::get(CurTy, decodeSignRotatedValue(Record[0]));
+ break;
+ case bitc::CST_CODE_WIDE_INTEGER: {// WIDE_INTEGER: [n x intval]
+ if (!CurTy->isIntegerTy() || Record.empty())
+ return error("Invalid record");
+
+ APInt VInt =
+ readWideAPInt(Record, cast<IntegerType>(CurTy)->getBitWidth());
+ V = ConstantInt::get(Context, VInt);
+
+ break;
+ }
+ case bitc::CST_CODE_FLOAT: { // FLOAT: [fpval]
+ if (Record.empty())
+ return error("Invalid record");
+ if (CurTy->isHalfTy())
+ V = ConstantFP::get(Context, APFloat(APFloat::IEEEhalf(),
+ APInt(16, (uint16_t)Record[0])));
+ else if (CurTy->isBFloatTy())
+ V = ConstantFP::get(Context, APFloat(APFloat::BFloat(),
+ APInt(16, (uint32_t)Record[0])));
+ else if (CurTy->isFloatTy())
+ V = ConstantFP::get(Context, APFloat(APFloat::IEEEsingle(),
+ APInt(32, (uint32_t)Record[0])));
+ else if (CurTy->isDoubleTy())
+ V = ConstantFP::get(Context, APFloat(APFloat::IEEEdouble(),
+ APInt(64, Record[0])));
+ else if (CurTy->isX86_FP80Ty()) {
+ // Bits are not stored the same way as a normal i80 APInt, compensate.
+ uint64_t Rearrange[2];
+ Rearrange[0] = (Record[1] & 0xffffLL) | (Record[0] << 16);
+ Rearrange[1] = Record[0] >> 48;
+ V = ConstantFP::get(Context, APFloat(APFloat::x87DoubleExtended(),
+ APInt(80, Rearrange)));
+ } else if (CurTy->isFP128Ty())
+ V = ConstantFP::get(Context, APFloat(APFloat::IEEEquad(),
+ APInt(128, Record)));
+ else if (CurTy->isPPC_FP128Ty())
+ V = ConstantFP::get(Context, APFloat(APFloat::PPCDoubleDouble(),
+ APInt(128, Record)));
+ else
+ V = UndefValue::get(CurTy);
+ break;
+ }
+
+ case bitc::CST_CODE_AGGREGATE: {// AGGREGATE: [n x value number]
+ if (Record.empty())
+ return error("Invalid record");
+
+ unsigned Size = Record.size();
+ SmallVector<Constant*, 16> Elts;
+
+ if (StructType *STy = dyn_cast<StructType>(CurTy)) {
+ for (unsigned i = 0; i != Size; ++i)
+ Elts.push_back(ValueList.getConstantFwdRef(Record[i],
+ STy->getElementType(i)));
+ V = ConstantStruct::get(STy, Elts);
+ } else if (ArrayType *ATy = dyn_cast<ArrayType>(CurTy)) {
+ Type *EltTy = ATy->getElementType();
+ for (unsigned i = 0; i != Size; ++i)
+ Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy));
+ V = ConstantArray::get(ATy, Elts);
+ } else if (VectorType *VTy = dyn_cast<VectorType>(CurTy)) {
+ Type *EltTy = VTy->getElementType();
+ for (unsigned i = 0; i != Size; ++i)
+ Elts.push_back(ValueList.getConstantFwdRef(Record[i], EltTy));
+ V = ConstantVector::get(Elts);
+ } else {
+ V = UndefValue::get(CurTy);
+ }
+ break;
+ }
+ case bitc::CST_CODE_STRING: // STRING: [values]
+ case bitc::CST_CODE_CSTRING: { // CSTRING: [values]
+ if (Record.empty())
+ return error("Invalid record");
+
+ SmallString<16> Elts(Record.begin(), Record.end());
+ V = ConstantDataArray::getString(Context, Elts,
+ BitCode == bitc::CST_CODE_CSTRING);
+ break;
+ }
+ case bitc::CST_CODE_DATA: {// DATA: [n x value]
+ if (Record.empty())
+ return error("Invalid record");
+
+ Type *EltTy;
+ if (auto *Array = dyn_cast<ArrayType>(CurTy))
+ EltTy = Array->getElementType();
+ else
+ EltTy = cast<VectorType>(CurTy)->getElementType();
+ if (EltTy->isIntegerTy(8)) {
+ SmallVector<uint8_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::get(Context, Elts);
+ else
+ V = ConstantDataArray::get(Context, Elts);
+ } else if (EltTy->isIntegerTy(16)) {
+ SmallVector<uint16_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::get(Context, Elts);
+ else
+ V = ConstantDataArray::get(Context, Elts);
+ } else if (EltTy->isIntegerTy(32)) {
+ SmallVector<uint32_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::get(Context, Elts);
+ else
+ V = ConstantDataArray::get(Context, Elts);
+ } else if (EltTy->isIntegerTy(64)) {
+ SmallVector<uint64_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::get(Context, Elts);
+ else
+ V = ConstantDataArray::get(Context, Elts);
+ } else if (EltTy->isHalfTy()) {
+ SmallVector<uint16_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::getFP(EltTy, Elts);
+ else
+ V = ConstantDataArray::getFP(EltTy, Elts);
+ } else if (EltTy->isBFloatTy()) {
+ SmallVector<uint16_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::getFP(EltTy, Elts);
+ else
+ V = ConstantDataArray::getFP(EltTy, Elts);
+ } else if (EltTy->isFloatTy()) {
+ SmallVector<uint32_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::getFP(EltTy, Elts);
+ else
+ V = ConstantDataArray::getFP(EltTy, Elts);
+ } else if (EltTy->isDoubleTy()) {
+ SmallVector<uint64_t, 16> Elts(Record.begin(), Record.end());
+ if (isa<VectorType>(CurTy))
+ V = ConstantDataVector::getFP(EltTy, Elts);
+ else
+ V = ConstantDataArray::getFP(EltTy, Elts);
+ } else {
+ return error("Invalid type for value");
+ }
+ break;
+ }
+ case bitc::CST_CODE_CE_UNOP: { // CE_UNOP: [opcode, opval]
+ if (Record.size() < 2)
+ return error("Invalid record");
+ int Opc = getDecodedUnaryOpcode(Record[0], CurTy);
+ if (Opc < 0) {
+ V = UndefValue::get(CurTy); // Unknown unop.
+ } else {
+ Constant *LHS = ValueList.getConstantFwdRef(Record[1], CurTy);
+ unsigned Flags = 0;
+ V = ConstantExpr::get(Opc, LHS, Flags);
+ }
+ break;
+ }
+ case bitc::CST_CODE_CE_BINOP: { // CE_BINOP: [opcode, opval, opval]
+ if (Record.size() < 3)
+ return error("Invalid record");
+ int Opc = getDecodedBinaryOpcode(Record[0], CurTy);
+ if (Opc < 0) {
+ V = UndefValue::get(CurTy); // Unknown binop.
+ } else {
+ Constant *LHS = ValueList.getConstantFwdRef(Record[1], CurTy);
+ Constant *RHS = ValueList.getConstantFwdRef(Record[2], CurTy);
+ unsigned Flags = 0;
+ if (Record.size() >= 4) {
+ if (Opc == Instruction::Add ||
+ Opc == Instruction::Sub ||
+ Opc == Instruction::Mul ||
+ Opc == Instruction::Shl) {
+ if (Record[3] & (1 << bitc::OBO_NO_SIGNED_WRAP))
+ Flags |= OverflowingBinaryOperator::NoSignedWrap;
+ if (Record[3] & (1 << bitc::OBO_NO_UNSIGNED_WRAP))
+ Flags |= OverflowingBinaryOperator::NoUnsignedWrap;
+ } else if (Opc == Instruction::SDiv ||
+ Opc == Instruction::UDiv ||
+ Opc == Instruction::LShr ||
+ Opc == Instruction::AShr) {
+ if (Record[3] & (1 << bitc::PEO_EXACT))
+ Flags |= SDivOperator::IsExact;
+ }
+ }
+ V = ConstantExpr::get(Opc, LHS, RHS, Flags);
+ }
+ break;
+ }
+ case bitc::CST_CODE_CE_CAST: { // CE_CAST: [opcode, opty, opval]
+ if (Record.size() < 3)
+ return error("Invalid record");
+ int Opc = getDecodedCastOpcode(Record[0]);
+ if (Opc < 0) {
+ V = UndefValue::get(CurTy); // Unknown cast.
+ } else {
+ Type *OpTy = getTypeByID(Record[1]);
+ if (!OpTy)
+ return error("Invalid record");
+ Constant *Op = ValueList.getConstantFwdRef(Record[2], OpTy);
+ V = UpgradeBitCastExpr(Opc, Op, CurTy);
+ if (!V) V = ConstantExpr::getCast(Opc, Op, CurTy);
+ }
+ break;
+ }
+ case bitc::CST_CODE_CE_INBOUNDS_GEP: // [ty, n x operands]
+ case bitc::CST_CODE_CE_GEP: // [ty, n x operands]
+ case bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX: { // [ty, flags, n x
+ // operands]
+ unsigned OpNum = 0;
+ Type *PointeeType = nullptr;
+ if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX ||
+ Record.size() % 2)
+ PointeeType = getTypeByID(Record[OpNum++]);
+
+ bool InBounds = false;
+ Optional<unsigned> InRangeIndex;
+ if (BitCode == bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX) {
+ uint64_t Op = Record[OpNum++];
+ InBounds = Op & 1;
+ InRangeIndex = Op >> 1;
+ } else if (BitCode == bitc::CST_CODE_CE_INBOUNDS_GEP)
+ InBounds = true;
+
+ SmallVector<Constant*, 16> Elts;
+ Type *Elt0FullTy = nullptr;
+ while (OpNum != Record.size()) {
+ if (!Elt0FullTy)
+ Elt0FullTy = getTypeByID(Record[OpNum]);
+ Type *ElTy = getTypeByID(Record[OpNum++]);
+ if (!ElTy)
+ return error("Invalid record");
+ Elts.push_back(ValueList.getConstantFwdRef(Record[OpNum++], ElTy));
+ }
+
+ if (Elts.size() < 1)
+ return error("Invalid gep with no operands");
+
+ PointerType *OrigPtrTy = cast<PointerType>(Elt0FullTy->getScalarType());
+ if (!PointeeType)
+ PointeeType = OrigPtrTy->getPointerElementType();
+ else if (!OrigPtrTy->isOpaqueOrPointeeTypeMatches(PointeeType))
+ return error("Explicit gep operator type does not match pointee type "
+ "of pointer operand");
+
+ ArrayRef<Constant *> Indices(Elts.begin() + 1, Elts.end());
+ V = ConstantExpr::getGetElementPtr(PointeeType, Elts[0], Indices,
+ InBounds, InRangeIndex);
+ break;
+ }
+ case bitc::CST_CODE_CE_SELECT: { // CE_SELECT: [opval#, opval#, opval#]
+ if (Record.size() < 3)
+ return error("Invalid record");
+
+ DelayedSelectors.push_back(
+ {CurTy, Record[0], Record[1], Record[2], NextCstNo});
+ (void)ValueList.getConstantFwdRef(NextCstNo, CurTy);
+ ++NextCstNo;
+ continue;
+ }
+ case bitc::CST_CODE_CE_EXTRACTELT
+ : { // CE_EXTRACTELT: [opty, opval, opty, opval]
+ if (Record.size() < 3)
+ return error("Invalid record");
+ VectorType *OpTy =
+ dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
+ if (!OpTy)
+ return error("Invalid record");
+ Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
+ Constant *Op1 = nullptr;
+ if (Record.size() == 4) {
+ Type *IdxTy = getTypeByID(Record[2]);
+ if (!IdxTy)
+ return error("Invalid record");
+ Op1 = ValueList.getConstantFwdRef(Record[3], IdxTy);
+ } else {
+ // Deprecated, but still needed to read old bitcode files.
+ Op1 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context));
+ }
+ if (!Op1)
+ return error("Invalid record");
+ V = ConstantExpr::getExtractElement(Op0, Op1);
+ break;
+ }
+ case bitc::CST_CODE_CE_INSERTELT
+ : { // CE_INSERTELT: [opval, opval, opty, opval]
+ VectorType *OpTy = dyn_cast<VectorType>(CurTy);
+ if (Record.size() < 3 || !OpTy)
+ return error("Invalid record");
+ Constant *Op0 = ValueList.getConstantFwdRef(Record[0], OpTy);
+ Constant *Op1 = ValueList.getConstantFwdRef(Record[1],
+ OpTy->getElementType());
+ Constant *Op2 = nullptr;
+ if (Record.size() == 4) {
+ Type *IdxTy = getTypeByID(Record[2]);
+ if (!IdxTy)
+ return error("Invalid record");
+ Op2 = ValueList.getConstantFwdRef(Record[3], IdxTy);
+ } else {
+ // Deprecated, but still needed to read old bitcode files.
+ Op2 = ValueList.getConstantFwdRef(Record[2], Type::getInt32Ty(Context));
+ }
+ if (!Op2)
+ return error("Invalid record");
+ V = ConstantExpr::getInsertElement(Op0, Op1, Op2);
+ break;
+ }
+ case bitc::CST_CODE_CE_SHUFFLEVEC: { // CE_SHUFFLEVEC: [opval, opval, opval]
+ VectorType *OpTy = dyn_cast<VectorType>(CurTy);
+ if (Record.size() < 3 || !OpTy)
+ return error("Invalid record");
+ DelayedShuffles.push_back(
+ {OpTy, OpTy, Record[0], Record[1], Record[2], NextCstNo});
+ ++NextCstNo;
+ continue;
+ }
+ case bitc::CST_CODE_CE_SHUFVEC_EX: { // [opty, opval, opval, opval]
+ VectorType *RTy = dyn_cast<VectorType>(CurTy);
+ VectorType *OpTy =
+ dyn_cast_or_null<VectorType>(getTypeByID(Record[0]));
+ if (Record.size() < 4 || !RTy || !OpTy)
+ return error("Invalid record");
+ DelayedShuffles.push_back(
+ {OpTy, RTy, Record[1], Record[2], Record[3], NextCstNo});
+ ++NextCstNo;
+ continue;
+ }
+ case bitc::CST_CODE_CE_CMP: { // CE_CMP: [opty, opval, opval, pred]
+ if (Record.size() < 4)
+ return error("Invalid record");
+ Type *OpTy = getTypeByID(Record[0]);
+ if (!OpTy)
+ return error("Invalid record");
+ Constant *Op0 = ValueList.getConstantFwdRef(Record[1], OpTy);
+ Constant *Op1 = ValueList.getConstantFwdRef(Record[2], OpTy);
+
+ if (OpTy->isFPOrFPVectorTy())
+ V = ConstantExpr::getFCmp(Record[3], Op0, Op1);
+ else
+ V = ConstantExpr::getICmp(Record[3], Op0, Op1);
+ break;
+ }
+ // This maintains backward compatibility, pre-asm dialect keywords.
+ // Deprecated, but still needed to read old bitcode files.
+ case bitc::CST_CODE_INLINEASM_OLD: {
+ if (Record.size() < 2)
+ return error("Invalid record");
+ std::string AsmStr, ConstrStr;
+ bool HasSideEffects = Record[0] & 1;
+ bool IsAlignStack = Record[0] >> 1;
+ unsigned AsmStrSize = Record[1];
+ if (2+AsmStrSize >= Record.size())
+ return error("Invalid record");
+ unsigned ConstStrSize = Record[2+AsmStrSize];
+ if (3+AsmStrSize+ConstStrSize > Record.size())
+ return error("Invalid record");
+
+ for (unsigned i = 0; i != AsmStrSize; ++i)
+ AsmStr += (char)Record[2+i];
+ for (unsigned i = 0; i != ConstStrSize; ++i)
+ ConstrStr += (char)Record[3+AsmStrSize+i];
+ UpgradeInlineAsmString(&AsmStr);
+ // FIXME: support upgrading in opaque pointers mode.
+ V = InlineAsm::get(cast<FunctionType>(CurTy->getPointerElementType()),
+ AsmStr, ConstrStr, HasSideEffects, IsAlignStack);
+ break;
+ }
+ // This version adds support for the asm dialect keywords (e.g.,
+ // inteldialect).
+ case bitc::CST_CODE_INLINEASM_OLD2: {
+ if (Record.size() < 2)
+ return error("Invalid record");
+ std::string AsmStr, ConstrStr;
+ bool HasSideEffects = Record[0] & 1;
+ bool IsAlignStack = (Record[0] >> 1) & 1;
+ unsigned AsmDialect = Record[0] >> 2;
+ unsigned AsmStrSize = Record[1];
+ if (2+AsmStrSize >= Record.size())
+ return error("Invalid record");
+ unsigned ConstStrSize = Record[2+AsmStrSize];
+ if (3+AsmStrSize+ConstStrSize > Record.size())
+ return error("Invalid record");
+
+ for (unsigned i = 0; i != AsmStrSize; ++i)
+ AsmStr += (char)Record[2+i];
+ for (unsigned i = 0; i != ConstStrSize; ++i)
+ ConstrStr += (char)Record[3+AsmStrSize+i];
+ UpgradeInlineAsmString(&AsmStr);
+ // FIXME: support upgrading in opaque pointers mode.
+ V = InlineAsm::get(cast<FunctionType>(CurTy->getPointerElementType()),
+ AsmStr, ConstrStr, HasSideEffects, IsAlignStack,
+ InlineAsm::AsmDialect(AsmDialect));
+ break;
+ }
+ // This version adds support for the unwind keyword.
+ case bitc::CST_CODE_INLINEASM_OLD3: {
+ if (Record.size() < 2)
+ return error("Invalid record");
+ unsigned OpNum = 0;
+ std::string AsmStr, ConstrStr;
+ bool HasSideEffects = Record[OpNum] & 1;
+ bool IsAlignStack = (Record[OpNum] >> 1) & 1;
+ unsigned AsmDialect = (Record[OpNum] >> 2) & 1;
+ bool CanThrow = (Record[OpNum] >> 3) & 1;
+ ++OpNum;
+ unsigned AsmStrSize = Record[OpNum];
+ ++OpNum;
+ if (OpNum + AsmStrSize >= Record.size())
+ return error("Invalid record");
+ unsigned ConstStrSize = Record[OpNum + AsmStrSize];
+ if (OpNum + 1 + AsmStrSize + ConstStrSize > Record.size())
+ return error("Invalid record");
+
+ for (unsigned i = 0; i != AsmStrSize; ++i)
+ AsmStr += (char)Record[OpNum + i];
+ ++OpNum;
+ for (unsigned i = 0; i != ConstStrSize; ++i)
+ ConstrStr += (char)Record[OpNum + AsmStrSize + i];
+ UpgradeInlineAsmString(&AsmStr);
+ // FIXME: support upgrading in opaque pointers mode.
+ V = InlineAsm::get(cast<FunctionType>(CurTy->getPointerElementType()),
+ AsmStr, ConstrStr, HasSideEffects, IsAlignStack,
+ InlineAsm::AsmDialect(AsmDialect), CanThrow);
+ break;
+ }
+ // This version adds explicit function type.
+ case bitc::CST_CODE_INLINEASM: {
+ if (Record.size() < 3)
+ return error("Invalid record");
+ unsigned OpNum = 0;
+ auto *FnTy = dyn_cast_or_null<FunctionType>(getTypeByID(Record[OpNum]));
+ ++OpNum;
+ if (!FnTy)
+ return error("Invalid record");
+ std::string AsmStr, ConstrStr;
+ bool HasSideEffects = Record[OpNum] & 1;
+ bool IsAlignStack = (Record[OpNum] >> 1) & 1;
+ unsigned AsmDialect = (Record[OpNum] >> 2) & 1;
+ bool CanThrow = (Record[OpNum] >> 3) & 1;
+ ++OpNum;
+ unsigned AsmStrSize = Record[OpNum];
+ ++OpNum;
+ if (OpNum + AsmStrSize >= Record.size())
+ return error("Invalid record");
+ unsigned ConstStrSize = Record[OpNum + AsmStrSize];
+ if (OpNum + 1 + AsmStrSize + ConstStrSize > Record.size())
+ return error("Invalid record");
+
+ for (unsigned i = 0; i != AsmStrSize; ++i)
+ AsmStr += (char)Record[OpNum + i];
+ ++OpNum;
+ for (unsigned i = 0; i != ConstStrSize; ++i)
+ ConstrStr += (char)Record[OpNum + AsmStrSize + i];
+ UpgradeInlineAsmString(&AsmStr);
+ V = InlineAsm::get(FnTy, AsmStr, ConstrStr, HasSideEffects, IsAlignStack,
+ InlineAsm::AsmDialect(AsmDialect), CanThrow);
+ break;
+ }
+ case bitc::CST_CODE_BLOCKADDRESS:{
+ if (Record.size() < 3)
+ return error("Invalid record");
+ Type *FnTy = getTypeByID(Record[0]);
+ if (!FnTy)
+ return error("Invalid record");
+ Function *Fn =
+ dyn_cast_or_null<Function>(ValueList.getConstantFwdRef(Record[1],FnTy));
+ if (!Fn)
+ return error("Invalid record");
+
+ // If the function is already parsed we can insert the block address right
+ // away.
+ BasicBlock *BB;
+ unsigned BBID = Record[2];
+ if (!BBID)
+ // Invalid reference to entry block.
+ return error("Invalid ID");
+ if (!Fn->empty()) {
+ Function::iterator BBI = Fn->begin(), BBE = Fn->end();
+ for (size_t I = 0, E = BBID; I != E; ++I) {
+ if (BBI == BBE)
+ return error("Invalid ID");
+ ++BBI;
+ }
+ BB = &*BBI;
+ } else {
+ // Otherwise insert a placeholder and remember it so it can be inserted
+ // when the function is parsed.
+ auto &FwdBBs = BasicBlockFwdRefs[Fn];
+ if (FwdBBs.empty())
+ BasicBlockFwdRefQueue.push_back(Fn);
+ if (FwdBBs.size() < BBID + 1)
+ FwdBBs.resize(BBID + 1);
+ if (!FwdBBs[BBID])
+ FwdBBs[BBID] = BasicBlock::Create(Context);
+ BB = FwdBBs[BBID];
+ }
+ V = BlockAddress::get(Fn, BB);
+ break;
+ }
+ case bitc::CST_CODE_DSO_LOCAL_EQUIVALENT: {
+ if (Record.size() < 2)
+ return error("Invalid record");
+ Type *GVTy = getTypeByID(Record[0]);
+ if (!GVTy)
+ return error("Invalid record");
+ GlobalValue *GV = dyn_cast_or_null<GlobalValue>(
+ ValueList.getConstantFwdRef(Record[1], GVTy));
+ if (!GV)
+ return error("Invalid record");
+
+ V = DSOLocalEquivalent::get(GV);
+ break;
+ }
+ case bitc::CST_CODE_NO_CFI_VALUE: {
+ if (Record.size() < 2)
+ return error("Invalid record");
+ Type *GVTy = getTypeByID(Record[0]);
+ if (!GVTy)
+ return error("Invalid record");
+ GlobalValue *GV = dyn_cast_or_null<GlobalValue>(
+ ValueList.getConstantFwdRef(Record[1], GVTy));
+ if (!GV)
+ return error("Invalid record");
+ V = NoCFIValue::get(GV);
+ break;
+ }
+ }
+
+ ValueList.assignValue(V, NextCstNo);
+ ++NextCstNo;
+ }
+}
+
+Error BitcodeReader::parseUseLists() {
+ if (Error Err = Stream.EnterSubBlock(bitc::USELIST_BLOCK_ID))
+ return Err;
+
+ // Read all the records.
+ SmallVector<uint64_t, 64> Record;
+
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a use list record.
+ Record.clear();
+ bool IsBB = false;
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ switch (MaybeRecord.get()) {
+ default: // Default behavior: unknown type.
+ break;
+ case bitc::USELIST_CODE_BB:
+ IsBB = true;
+ LLVM_FALLTHROUGH;
+ case bitc::USELIST_CODE_DEFAULT: {
+ unsigned RecordLength = Record.size();
+ if (RecordLength < 3)
+ // Records should have at least an ID and two indexes.
+ return error("Invalid record");
+ unsigned ID = Record.pop_back_val();
+
+ Value *V;
+ if (IsBB) {
+ assert(ID < FunctionBBs.size() && "Basic block not found");
+ V = FunctionBBs[ID];
+ } else
+ V = ValueList[ID];
+ unsigned NumUses = 0;
+ SmallDenseMap<const Use *, unsigned, 16> Order;
+ for (const Use &U : V->materialized_uses()) {
+ if (++NumUses > Record.size())
+ break;
+ Order[&U] = Record[NumUses - 1];
+ }
+ if (Order.size() != Record.size() || NumUses > Record.size())
+ // Mismatches can happen if the functions are being materialized lazily
+ // (out-of-order), or a value has been upgraded.
+ break;
+
+ V->sortUseList([&](const Use &L, const Use &R) {
+ return Order.lookup(&L) < Order.lookup(&R);
+ });
+ break;
+ }
+ }
+ }
+}
+
+/// When we see the block for metadata, remember where it is and then skip it.
+/// This lets us lazily deserialize the metadata.
+Error BitcodeReader::rememberAndSkipMetadata() {
+ // Save the current stream state.
+ uint64_t CurBit = Stream.GetCurrentBitNo();
+ DeferredMetadataInfo.push_back(CurBit);
+
+ // Skip over the block for now.
+ if (Error Err = Stream.SkipBlock())
+ return Err;
+ return Error::success();
+}
+
+Error BitcodeReader::materializeMetadata() {
+ for (uint64_t BitPos : DeferredMetadataInfo) {
+ // Move the bit stream to the saved position.
+ if (Error JumpFailed = Stream.JumpToBit(BitPos))
+ return JumpFailed;
+ if (Error Err = MDLoader->parseModuleMetadata())
+ return Err;
+ }
+
+ // Upgrade "Linker Options" module flag to "llvm.linker.options" module-level
+ // metadata. Only upgrade if the new option doesn't exist to avoid upgrade
+ // multiple times.
+ if (!TheModule->getNamedMetadata("llvm.linker.options")) {
+ if (Metadata *Val = TheModule->getModuleFlag("Linker Options")) {
+ NamedMDNode *LinkerOpts =
+ TheModule->getOrInsertNamedMetadata("llvm.linker.options");
+ for (const MDOperand &MDOptions : cast<MDNode>(Val)->operands())
+ LinkerOpts->addOperand(cast<MDNode>(MDOptions));
+ }
+ }
+
+ DeferredMetadataInfo.clear();
+ return Error::success();
+}
+
+void BitcodeReader::setStripDebugInfo() { StripDebugInfo = true; }
+
+/// When we see the block for a function body, remember where it is and then
+/// skip it. This lets us lazily deserialize the functions.
+Error BitcodeReader::rememberAndSkipFunctionBody() {
+ // Get the function we are talking about.
+ if (FunctionsWithBodies.empty())
+ return error("Insufficient function protos");
+
+ Function *Fn = FunctionsWithBodies.back();
+ FunctionsWithBodies.pop_back();
+
+ // Save the current stream state.
+ uint64_t CurBit = Stream.GetCurrentBitNo();
+ assert(
+ (DeferredFunctionInfo[Fn] == 0 || DeferredFunctionInfo[Fn] == CurBit) &&
+ "Mismatch between VST and scanned function offsets");
+ DeferredFunctionInfo[Fn] = CurBit;
+
+ // Skip over the function block for now.
+ if (Error Err = Stream.SkipBlock())
+ return Err;
+ return Error::success();
+}
+
+Error BitcodeReader::globalCleanup() {
+ // Patch the initializers for globals and aliases up.
+ if (Error Err = resolveGlobalAndIndirectSymbolInits())
+ return Err;
+ if (!GlobalInits.empty() || !IndirectSymbolInits.empty())
+ return error("Malformed global initializer set");
+
+ // Look for intrinsic functions which need to be upgraded at some point
+ // and functions that need to have their function attributes upgraded.
+ for (Function &F : *TheModule) {
+ MDLoader->upgradeDebugIntrinsics(F);
+ Function *NewFn;
+ if (UpgradeIntrinsicFunction(&F, NewFn))
+ UpgradedIntrinsics[&F] = NewFn;
+ else if (auto Remangled = Intrinsic::remangleIntrinsicFunction(&F))
+ // Some types could be renamed during loading if several modules are
+ // loaded in the same LLVMContext (LTO scenario). In this case we should
+ // remangle intrinsics names as well.
+ RemangledIntrinsics[&F] = Remangled.getValue();
+ // Look for functions that rely on old function attribute behavior.
+ UpgradeFunctionAttributes(F);
+ }
+
+ // Look for global variables which need to be renamed.
+ std::vector<std::pair<GlobalVariable *, GlobalVariable *>> UpgradedVariables;
+ for (GlobalVariable &GV : TheModule->globals())
+ if (GlobalVariable *Upgraded = UpgradeGlobalVariable(&GV))
+ UpgradedVariables.emplace_back(&GV, Upgraded);
+ for (auto &Pair : UpgradedVariables) {
+ Pair.first->eraseFromParent();
+ TheModule->getGlobalList().push_back(Pair.second);
+ }
+
+ // Force deallocation of memory for these vectors to favor the client that
+ // want lazy deserialization.
+ std::vector<std::pair<GlobalVariable *, unsigned>>().swap(GlobalInits);
+ std::vector<std::pair<GlobalValue *, unsigned>>().swap(IndirectSymbolInits);
+ return Error::success();
+}
+
+/// Support for lazy parsing of function bodies. This is required if we
+/// either have an old bitcode file without a VST forward declaration record,
+/// or if we have an anonymous function being materialized, since anonymous
+/// functions do not have a name and are therefore not in the VST.
+Error BitcodeReader::rememberAndSkipFunctionBodies() {
+ if (Error JumpFailed = Stream.JumpToBit(NextUnreadBit))
+ return JumpFailed;
+
+ if (Stream.AtEndOfStream())
+ return error("Could not find function in stream");
+
+ if (!SeenFirstFunctionBody)
+ return error("Trying to materialize functions before seeing function blocks");
+
+ // An old bitcode file with the symbol table at the end would have
+ // finished the parse greedily.
+ assert(SeenValueSymbolTable);
+
+ SmallVector<uint64_t, 64> Record;
+
+ while (true) {
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ default:
+ return error("Expect SubBlock");
+ case BitstreamEntry::SubBlock:
+ switch (Entry.ID) {
+ default:
+ return error("Expect function block");
+ case bitc::FUNCTION_BLOCK_ID:
+ if (Error Err = rememberAndSkipFunctionBody())
+ return Err;
+ NextUnreadBit = Stream.GetCurrentBitNo();
+ return Error::success();
+ }
+ }
+ }
+}
+
+bool BitcodeReaderBase::readBlockInfo() {
+ Expected<Optional<BitstreamBlockInfo>> MaybeNewBlockInfo =
+ Stream.ReadBlockInfoBlock();
+ if (!MaybeNewBlockInfo)
+ return true; // FIXME Handle the error.
+ Optional<BitstreamBlockInfo> NewBlockInfo =
+ std::move(MaybeNewBlockInfo.get());
+ if (!NewBlockInfo)
+ return true;
+ BlockInfo = std::move(*NewBlockInfo);
+ return false;
+}
+
+Error BitcodeReader::parseComdatRecord(ArrayRef<uint64_t> Record) {
+ // v1: [selection_kind, name]
+ // v2: [strtab_offset, strtab_size, selection_kind]
+ StringRef Name;
+ std::tie(Name, Record) = readNameFromStrtab(Record);
+
+ if (Record.empty())
+ return error("Invalid record");
+ Comdat::SelectionKind SK = getDecodedComdatSelectionKind(Record[0]);
+ std::string OldFormatName;
+ if (!UseStrtab) {
+ if (Record.size() < 2)
+ return error("Invalid record");
+ unsigned ComdatNameSize = Record[1];
+ OldFormatName.reserve(ComdatNameSize);
+ for (unsigned i = 0; i != ComdatNameSize; ++i)
+ OldFormatName += (char)Record[2 + i];
+ Name = OldFormatName;
+ }
+ Comdat *C = TheModule->getOrInsertComdat(Name);
+ C->setSelectionKind(SK);
+ ComdatList.push_back(C);
+ return Error::success();
+}
+
+static void inferDSOLocal(GlobalValue *GV) {
+ // infer dso_local from linkage and visibility if it is not encoded.
+ if (GV->hasLocalLinkage() ||
+ (!GV->hasDefaultVisibility() && !GV->hasExternalWeakLinkage()))
+ GV->setDSOLocal(true);
+}
+
+Error BitcodeReader::parseGlobalVarRecord(ArrayRef<uint64_t> Record) {
+ // v1: [pointer type, isconst, initid, linkage, alignment, section,
+ // visibility, threadlocal, unnamed_addr, externally_initialized,
+ // dllstorageclass, comdat, attributes, preemption specifier,
+ // partition strtab offset, partition strtab size] (name in VST)
+ // v2: [strtab_offset, strtab_size, v1]
+ StringRef Name;
+ std::tie(Name, Record) = readNameFromStrtab(Record);
+
+ if (Record.size() < 6)
+ return error("Invalid record");
+ Type *Ty = getTypeByID(Record[0]);
+ if (!Ty)
+ return error("Invalid record");
+ bool isConstant = Record[1] & 1;
+ bool explicitType = Record[1] & 2;
+ unsigned AddressSpace;
+ if (explicitType) {
+ AddressSpace = Record[1] >> 2;
+ } else {
+ if (!Ty->isPointerTy())
+ return error("Invalid type for value");
+ AddressSpace = cast<PointerType>(Ty)->getAddressSpace();
+ Ty = Ty->getPointerElementType();
+ }
+
+ uint64_t RawLinkage = Record[3];
+ GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage);
+ MaybeAlign Alignment;
+ if (Error Err = parseAlignmentValue(Record[4], Alignment))
+ return Err;
+ std::string Section;
+ if (Record[5]) {
+ if (Record[5] - 1 >= SectionTable.size())
+ return error("Invalid ID");
+ Section = SectionTable[Record[5] - 1];
+ }
+ GlobalValue::VisibilityTypes Visibility = GlobalValue::DefaultVisibility;
+ // Local linkage must have default visibility.
+ // auto-upgrade `hidden` and `protected` for old bitcode.
+ if (Record.size() > 6 && !GlobalValue::isLocalLinkage(Linkage))
+ Visibility = getDecodedVisibility(Record[6]);
+
+ GlobalVariable::ThreadLocalMode TLM = GlobalVariable::NotThreadLocal;
+ if (Record.size() > 7)
+ TLM = getDecodedThreadLocalMode(Record[7]);
+
+ GlobalValue::UnnamedAddr UnnamedAddr = GlobalValue::UnnamedAddr::None;
+ if (Record.size() > 8)
+ UnnamedAddr = getDecodedUnnamedAddrType(Record[8]);
+
+ bool ExternallyInitialized = false;
+ if (Record.size() > 9)
+ ExternallyInitialized = Record[9];
+
+ GlobalVariable *NewGV =
+ new GlobalVariable(*TheModule, Ty, isConstant, Linkage, nullptr, Name,
+ nullptr, TLM, AddressSpace, ExternallyInitialized);
+ NewGV->setAlignment(Alignment);
+ if (!Section.empty())
+ NewGV->setSection(Section);
+ NewGV->setVisibility(Visibility);
+ NewGV->setUnnamedAddr(UnnamedAddr);
+
+ if (Record.size() > 10)
+ NewGV->setDLLStorageClass(getDecodedDLLStorageClass(Record[10]));
+ else
+ upgradeDLLImportExportLinkage(NewGV, RawLinkage);
+
+ ValueList.push_back(NewGV);
+
+ // Remember which value to use for the global initializer.
+ if (unsigned InitID = Record[2])
+ GlobalInits.push_back(std::make_pair(NewGV, InitID - 1));
+
+ if (Record.size() > 11) {
+ if (unsigned ComdatID = Record[11]) {
+ if (ComdatID > ComdatList.size())
+ return error("Invalid global variable comdat ID");
+ NewGV->setComdat(ComdatList[ComdatID - 1]);
+ }
+ } else if (hasImplicitComdat(RawLinkage)) {
+ ImplicitComdatObjects.insert(NewGV);
+ }
+
+ if (Record.size() > 12) {
+ auto AS = getAttributes(Record[12]).getFnAttrs();
+ NewGV->setAttributes(AS);
+ }
+
+ if (Record.size() > 13) {
+ NewGV->setDSOLocal(getDecodedDSOLocal(Record[13]));
+ }
+ inferDSOLocal(NewGV);
+
+ // Check whether we have enough values to read a partition name.
+ if (Record.size() > 15)
+ NewGV->setPartition(StringRef(Strtab.data() + Record[14], Record[15]));
+
+ return Error::success();
+}
+
+Error BitcodeReader::parseFunctionRecord(ArrayRef<uint64_t> Record) {
+ // v1: [type, callingconv, isproto, linkage, paramattr, alignment, section,
+ // visibility, gc, unnamed_addr, prologuedata, dllstorageclass, comdat,
+ // prefixdata, personalityfn, preemption specifier, addrspace] (name in VST)
+ // v2: [strtab_offset, strtab_size, v1]
+ StringRef Name;
+ std::tie(Name, Record) = readNameFromStrtab(Record);
+
+ if (Record.size() < 8)
+ return error("Invalid record");
+ Type *FTy = getTypeByID(Record[0]);
+ if (!FTy)
+ return error("Invalid record");
+ if (auto *PTy = dyn_cast<PointerType>(FTy))
+ FTy = PTy->getPointerElementType();
+
+ if (!isa<FunctionType>(FTy))
+ return error("Invalid type for value");
+ auto CC = static_cast<CallingConv::ID>(Record[1]);
+ if (CC & ~CallingConv::MaxID)
+ return error("Invalid calling convention ID");
+
+ unsigned AddrSpace = TheModule->getDataLayout().getProgramAddressSpace();
+ if (Record.size() > 16)
+ AddrSpace = Record[16];
+
+ Function *Func =
+ Function::Create(cast<FunctionType>(FTy), GlobalValue::ExternalLinkage,
+ AddrSpace, Name, TheModule);
+
+ assert(Func->getFunctionType() == FTy &&
+ "Incorrect fully specified type provided for function");
+ FunctionTypes[Func] = cast<FunctionType>(FTy);
+
+ Func->setCallingConv(CC);
+ bool isProto = Record[2];
+ uint64_t RawLinkage = Record[3];
+ Func->setLinkage(getDecodedLinkage(RawLinkage));
+ Func->setAttributes(getAttributes(Record[4]));
+
+ // Upgrade any old-style byval or sret without a type by propagating the
+ // argument's pointee type. There should be no opaque pointers where the byval
+ // type is implicit.
+ for (unsigned i = 0; i != Func->arg_size(); ++i) {
+ for (Attribute::AttrKind Kind : {Attribute::ByVal, Attribute::StructRet,
+ Attribute::InAlloca}) {
+ if (!Func->hasParamAttribute(i, Kind))
+ continue;
+
+ if (Func->getParamAttribute(i, Kind).getValueAsType())
+ continue;
+
+ Func->removeParamAttr(i, Kind);
+
+ Type *PTy = cast<FunctionType>(FTy)->getParamType(i);
+ Type *PtrEltTy = PTy->getPointerElementType();
+ Attribute NewAttr;
+ switch (Kind) {
+ case Attribute::ByVal:
+ NewAttr = Attribute::getWithByValType(Context, PtrEltTy);
+ break;
+ case Attribute::StructRet:
+ NewAttr = Attribute::getWithStructRetType(Context, PtrEltTy);
+ break;
+ case Attribute::InAlloca:
+ NewAttr = Attribute::getWithInAllocaType(Context, PtrEltTy);
+ break;
+ default:
+ llvm_unreachable("not an upgraded type attribute");
+ }
+
+ Func->addParamAttr(i, NewAttr);
+ }
+ }
+
+ MaybeAlign Alignment;
+ if (Error Err = parseAlignmentValue(Record[5], Alignment))
+ return Err;
+ Func->setAlignment(Alignment);
+ if (Record[6]) {
+ if (Record[6] - 1 >= SectionTable.size())
+ return error("Invalid ID");
+ Func->setSection(SectionTable[Record[6] - 1]);
+ }
+ // Local linkage must have default visibility.
+ // auto-upgrade `hidden` and `protected` for old bitcode.
+ if (!Func->hasLocalLinkage())
+ Func->setVisibility(getDecodedVisibility(Record[7]));
+ if (Record.size() > 8 && Record[8]) {
+ if (Record[8] - 1 >= GCTable.size())
+ return error("Invalid ID");
+ Func->setGC(GCTable[Record[8] - 1]);
+ }
+ GlobalValue::UnnamedAddr UnnamedAddr = GlobalValue::UnnamedAddr::None;
+ if (Record.size() > 9)
+ UnnamedAddr = getDecodedUnnamedAddrType(Record[9]);
+ Func->setUnnamedAddr(UnnamedAddr);
+
+ FunctionOperandInfo OperandInfo = {Func, 0, 0, 0};
+ if (Record.size() > 10)
+ OperandInfo.Prologue = Record[10];
+
+ if (Record.size() > 11)
+ Func->setDLLStorageClass(getDecodedDLLStorageClass(Record[11]));
+ else
+ upgradeDLLImportExportLinkage(Func, RawLinkage);
+
+ if (Record.size() > 12) {
+ if (unsigned ComdatID = Record[12]) {
+ if (ComdatID > ComdatList.size())
+ return error("Invalid function comdat ID");
+ Func->setComdat(ComdatList[ComdatID - 1]);
+ }
+ } else if (hasImplicitComdat(RawLinkage)) {
+ ImplicitComdatObjects.insert(Func);
+ }
+
+ if (Record.size() > 13)
+ OperandInfo.Prefix = Record[13];
+
+ if (Record.size() > 14)
+ OperandInfo.PersonalityFn = Record[14];
+
+ if (Record.size() > 15) {
+ Func->setDSOLocal(getDecodedDSOLocal(Record[15]));
+ }
+ inferDSOLocal(Func);
+
+ // Record[16] is the address space number.
+
+ // Check whether we have enough values to read a partition name. Also make
+ // sure Strtab has enough values.
+ if (Record.size() > 18 && Strtab.data() &&
+ Record[17] + Record[18] <= Strtab.size()) {
+ Func->setPartition(StringRef(Strtab.data() + Record[17], Record[18]));
+ }
+
+ ValueList.push_back(Func);
+
+ if (OperandInfo.PersonalityFn || OperandInfo.Prefix || OperandInfo.Prologue)
+ FunctionOperands.push_back(OperandInfo);
+
+ // If this is a function with a body, remember the prototype we are
+ // creating now, so that we can match up the body with them later.
+ if (!isProto) {
+ Func->setIsMaterializable(true);
+ FunctionsWithBodies.push_back(Func);
+ DeferredFunctionInfo[Func] = 0;
+ }
+ return Error::success();
+}
+
+Error BitcodeReader::parseGlobalIndirectSymbolRecord(
+ unsigned BitCode, ArrayRef<uint64_t> Record) {
+ // v1 ALIAS_OLD: [alias type, aliasee val#, linkage] (name in VST)
+ // v1 ALIAS: [alias type, addrspace, aliasee val#, linkage, visibility,
+ // dllstorageclass, threadlocal, unnamed_addr,
+ // preemption specifier] (name in VST)
+ // v1 IFUNC: [alias type, addrspace, aliasee val#, linkage,
+ // visibility, dllstorageclass, threadlocal, unnamed_addr,
+ // preemption specifier] (name in VST)
+ // v2: [strtab_offset, strtab_size, v1]
+ StringRef Name;
+ std::tie(Name, Record) = readNameFromStrtab(Record);
+
+ bool NewRecord = BitCode != bitc::MODULE_CODE_ALIAS_OLD;
+ if (Record.size() < (3 + (unsigned)NewRecord))
+ return error("Invalid record");
+ unsigned OpNum = 0;
+ Type *Ty = getTypeByID(Record[OpNum++]);
+ if (!Ty)
+ return error("Invalid record");
+
+ unsigned AddrSpace;
+ if (!NewRecord) {
+ auto *PTy = dyn_cast<PointerType>(Ty);
+ if (!PTy)
+ return error("Invalid type for value");
+ Ty = PTy->getPointerElementType();
+ AddrSpace = PTy->getAddressSpace();
+ } else {
+ AddrSpace = Record[OpNum++];
+ }
+
+ auto Val = Record[OpNum++];
+ auto Linkage = Record[OpNum++];
+ GlobalValue *NewGA;
+ if (BitCode == bitc::MODULE_CODE_ALIAS ||
+ BitCode == bitc::MODULE_CODE_ALIAS_OLD)
+ NewGA = GlobalAlias::create(Ty, AddrSpace, getDecodedLinkage(Linkage), Name,
+ TheModule);
+ else
+ NewGA = GlobalIFunc::create(Ty, AddrSpace, getDecodedLinkage(Linkage), Name,
+ nullptr, TheModule);
+
+ // Local linkage must have default visibility.
+ // auto-upgrade `hidden` and `protected` for old bitcode.
+ if (OpNum != Record.size()) {
+ auto VisInd = OpNum++;
+ if (!NewGA->hasLocalLinkage())
+ NewGA->setVisibility(getDecodedVisibility(Record[VisInd]));
+ }
+ if (BitCode == bitc::MODULE_CODE_ALIAS ||
+ BitCode == bitc::MODULE_CODE_ALIAS_OLD) {
+ if (OpNum != Record.size())
+ NewGA->setDLLStorageClass(getDecodedDLLStorageClass(Record[OpNum++]));
+ else
+ upgradeDLLImportExportLinkage(NewGA, Linkage);
+ if (OpNum != Record.size())
+ NewGA->setThreadLocalMode(getDecodedThreadLocalMode(Record[OpNum++]));
+ if (OpNum != Record.size())
+ NewGA->setUnnamedAddr(getDecodedUnnamedAddrType(Record[OpNum++]));
+ }
+ if (OpNum != Record.size())
+ NewGA->setDSOLocal(getDecodedDSOLocal(Record[OpNum++]));
+ inferDSOLocal(NewGA);
+
+ // Check whether we have enough values to read a partition name.
+ if (OpNum + 1 < Record.size()) {
+ NewGA->setPartition(
+ StringRef(Strtab.data() + Record[OpNum], Record[OpNum + 1]));
+ OpNum += 2;
+ }
+
+ ValueList.push_back(NewGA);
+ IndirectSymbolInits.push_back(std::make_pair(NewGA, Val));
+ return Error::success();
+}
+
+Error BitcodeReader::parseModule(uint64_t ResumeBit,
+ bool ShouldLazyLoadMetadata,
+ DataLayoutCallbackTy DataLayoutCallback) {
+ if (ResumeBit) {
+ if (Error JumpFailed = Stream.JumpToBit(ResumeBit))
+ return JumpFailed;
+ } else if (Error Err = Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+ return Err;
+
+ SmallVector<uint64_t, 64> Record;
+
+ // Parts of bitcode parsing depend on the datalayout. Make sure we
+ // finalize the datalayout before we run any of that code.
+ bool ResolvedDataLayout = false;
+ auto ResolveDataLayout = [&] {
+ if (ResolvedDataLayout)
+ return;
+
+ // datalayout and triple can't be parsed after this point.
+ ResolvedDataLayout = true;
+
+ // Upgrade data layout string.
+ std::string DL = llvm::UpgradeDataLayoutString(
+ TheModule->getDataLayoutStr(), TheModule->getTargetTriple());
+ TheModule->setDataLayout(DL);
+
+ if (auto LayoutOverride =
+ DataLayoutCallback(TheModule->getTargetTriple()))
+ TheModule->setDataLayout(*LayoutOverride);
+ };
+
+ // Read all the records for this module.
+ while (true) {
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ ResolveDataLayout();
+ return globalCleanup();
+
+ case BitstreamEntry::SubBlock:
+ switch (Entry.ID) {
+ default: // Skip unknown content.
+ if (Error Err = Stream.SkipBlock())
+ return Err;
+ break;
+ case bitc::BLOCKINFO_BLOCK_ID:
+ if (readBlockInfo())
+ return error("Malformed block");
+ break;
+ case bitc::PARAMATTR_BLOCK_ID:
+ if (Error Err = parseAttributeBlock())
+ return Err;
+ break;
+ case bitc::PARAMATTR_GROUP_BLOCK_ID:
+ if (Error Err = parseAttributeGroupBlock())
+ return Err;
+ break;
+ case bitc::TYPE_BLOCK_ID_NEW:
+ if (Error Err = parseTypeTable())
+ return Err;
+ break;
+ case bitc::VALUE_SYMTAB_BLOCK_ID:
+ if (!SeenValueSymbolTable) {
+ // Either this is an old form VST without function index and an
+ // associated VST forward declaration record (which would have caused
+ // the VST to be jumped to and parsed before it was encountered
+ // normally in the stream), or there were no function blocks to
+ // trigger an earlier parsing of the VST.
+ assert(VSTOffset == 0 || FunctionsWithBodies.empty());
+ if (Error Err = parseValueSymbolTable())
+ return Err;
+ SeenValueSymbolTable = true;
+ } else {
+ // We must have had a VST forward declaration record, which caused
+ // the parser to jump to and parse the VST earlier.
+ assert(VSTOffset > 0);
+ if (Error Err = Stream.SkipBlock())
+ return Err;
+ }
+ break;
+ case bitc::CONSTANTS_BLOCK_ID:
+ if (Error Err = parseConstants())
+ return Err;
+ if (Error Err = resolveGlobalAndIndirectSymbolInits())
+ return Err;
+ break;
+ case bitc::METADATA_BLOCK_ID:
+ if (ShouldLazyLoadMetadata) {
+ if (Error Err = rememberAndSkipMetadata())
+ return Err;
+ break;
+ }
+ assert(DeferredMetadataInfo.empty() && "Unexpected deferred metadata");
+ if (Error Err = MDLoader->parseModuleMetadata())
+ return Err;
+ break;
+ case bitc::METADATA_KIND_BLOCK_ID:
+ if (Error Err = MDLoader->parseMetadataKinds())
+ return Err;
+ break;
+ case bitc::FUNCTION_BLOCK_ID:
+ ResolveDataLayout();
+
+ // If this is the first function body we've seen, reverse the
+ // FunctionsWithBodies list.
+ if (!SeenFirstFunctionBody) {
+ std::reverse(FunctionsWithBodies.begin(), FunctionsWithBodies.end());
+ if (Error Err = globalCleanup())
+ return Err;
+ SeenFirstFunctionBody = true;
+ }
+
+ if (VSTOffset > 0) {
+ // If we have a VST forward declaration record, make sure we
+ // parse the VST now if we haven't already. It is needed to
+ // set up the DeferredFunctionInfo vector for lazy reading.
+ if (!SeenValueSymbolTable) {
+ if (Error Err = BitcodeReader::parseValueSymbolTable(VSTOffset))
+ return Err;
+ SeenValueSymbolTable = true;
+ // Fall through so that we record the NextUnreadBit below.
+ // This is necessary in case we have an anonymous function that
+ // is later materialized. Since it will not have a VST entry we
+ // need to fall back to the lazy parse to find its offset.
+ } else {
+ // If we have a VST forward declaration record, but have already
+ // parsed the VST (just above, when the first function body was
+ // encountered here), then we are resuming the parse after
+ // materializing functions. The ResumeBit points to the
+ // start of the last function block recorded in the
+ // DeferredFunctionInfo map. Skip it.
+ if (Error Err = Stream.SkipBlock())
+ return Err;
+ continue;
+ }
+ }
+
+ // Support older bitcode files that did not have the function
+ // index in the VST, nor a VST forward declaration record, as
+ // well as anonymous functions that do not have VST entries.
+ // Build the DeferredFunctionInfo vector on the fly.
+ if (Error Err = rememberAndSkipFunctionBody())
+ return Err;
+
+ // Suspend parsing when we reach the function bodies. Subsequent
+ // materialization calls will resume it when necessary. If the bitcode
+ // file is old, the symbol table will be at the end instead and will not
+ // have been seen yet. In this case, just finish the parse now.
+ if (SeenValueSymbolTable) {
+ NextUnreadBit = Stream.GetCurrentBitNo();
+ // After the VST has been parsed, we need to make sure intrinsic name
+ // are auto-upgraded.
+ return globalCleanup();
+ }
+ break;
+ case bitc::USELIST_BLOCK_ID:
+ if (Error Err = parseUseLists())
+ return Err;
+ break;
+ case bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID:
+ if (Error Err = parseOperandBundleTags())
+ return Err;
+ break;
+ case bitc::SYNC_SCOPE_NAMES_BLOCK_ID:
+ if (Error Err = parseSyncScopeNames())
+ return Err;
+ break;
+ }
+ continue;
+
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Expected<unsigned> MaybeBitCode = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeBitCode)
+ return MaybeBitCode.takeError();
+ switch (unsigned BitCode = MaybeBitCode.get()) {
+ default: break; // Default behavior, ignore unknown content.
+ case bitc::MODULE_CODE_VERSION: {
+ Expected<unsigned> VersionOrErr = parseVersionRecord(Record);
+ if (!VersionOrErr)
+ return VersionOrErr.takeError();
+ UseRelativeIDs = *VersionOrErr >= 1;
+ break;
+ }
+ case bitc::MODULE_CODE_TRIPLE: { // TRIPLE: [strchr x N]
+ if (ResolvedDataLayout)
+ return error("target triple too late in module");
+ std::string S;
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
+ TheModule->setTargetTriple(S);
+ break;
+ }
+ case bitc::MODULE_CODE_DATALAYOUT: { // DATALAYOUT: [strchr x N]
+ if (ResolvedDataLayout)
+ return error("datalayout too late in module");
+ std::string S;
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
+ TheModule->setDataLayout(S);
+ break;
+ }
+ case bitc::MODULE_CODE_ASM: { // ASM: [strchr x N]
+ std::string S;
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
+ TheModule->setModuleInlineAsm(S);
+ break;
+ }
+ case bitc::MODULE_CODE_DEPLIB: { // DEPLIB: [strchr x N]
+ // Deprecated, but still needed to read old bitcode files.
+ std::string S;
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
+ // Ignore value.
+ break;
+ }
+ case bitc::MODULE_CODE_SECTIONNAME: { // SECTIONNAME: [strchr x N]
+ std::string S;
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
+ SectionTable.push_back(S);
+ break;
+ }
+ case bitc::MODULE_CODE_GCNAME: { // SECTIONNAME: [strchr x N]
+ std::string S;
+ if (convertToString(Record, 0, S))
+ return error("Invalid record");
+ GCTable.push_back(S);
+ break;
+ }
+ case bitc::MODULE_CODE_COMDAT:
+ if (Error Err = parseComdatRecord(Record))
+ return Err;
+ break;
+ // FIXME: BitcodeReader should handle {GLOBALVAR, FUNCTION, ALIAS, IFUNC}
+ // written by ThinLinkBitcodeWriter. See
+ // `ThinLinkBitcodeWriter::writeSimplifiedModuleInfo` for the format of each
+ // record
+ // (https://github.com/llvm/llvm-project/blob/b6a93967d9c11e79802b5e75cec1584d6c8aa472/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp#L4714)
+ case bitc::MODULE_CODE_GLOBALVAR:
+ if (Error Err = parseGlobalVarRecord(Record))
+ return Err;
+ break;
+ case bitc::MODULE_CODE_FUNCTION:
+ ResolveDataLayout();
+ if (Error Err = parseFunctionRecord(Record))
+ return Err;
+ break;
+ case bitc::MODULE_CODE_IFUNC:
+ case bitc::MODULE_CODE_ALIAS:
+ case bitc::MODULE_CODE_ALIAS_OLD:
+ if (Error Err = parseGlobalIndirectSymbolRecord(BitCode, Record))
+ return Err;
+ break;
+ /// MODULE_CODE_VSTOFFSET: [offset]
+ case bitc::MODULE_CODE_VSTOFFSET:
+ if (Record.empty())
+ return error("Invalid record");
+ // Note that we subtract 1 here because the offset is relative to one word
+ // before the start of the identification or module block, which was
+ // historically always the start of the regular bitcode header.
+ VSTOffset = Record[0] - 1;
+ break;
+ /// MODULE_CODE_SOURCE_FILENAME: [namechar x N]
+ case bitc::MODULE_CODE_SOURCE_FILENAME:
+ SmallString<128> ValueName;
+ if (convertToString(Record, 0, ValueName))
+ return error("Invalid record");
+ TheModule->setSourceFileName(ValueName);
+ break;
+ }
+ Record.clear();
+ }
+}
+
+Error BitcodeReader::parseBitcodeInto(Module *M, bool ShouldLazyLoadMetadata,
+ bool IsImporting,
+ DataLayoutCallbackTy DataLayoutCallback) {
+ TheModule = M;
+ MDLoader = MetadataLoader(Stream, *M, ValueList, IsImporting,
+ [&](unsigned ID) { return getTypeByID(ID); });
+ return parseModule(0, ShouldLazyLoadMetadata, DataLayoutCallback);
+}
+
+Error BitcodeReader::typeCheckLoadStoreInst(Type *ValType, Type *PtrType) {
+ if (!isa<PointerType>(PtrType))
+ return error("Load/Store operand is not a pointer type");
+
+ if (!cast<PointerType>(PtrType)->isOpaqueOrPointeeTypeMatches(ValType))
+ return error("Explicit load/store type does not match pointee "
+ "type of pointer operand");
+ if (!PointerType::isLoadableOrStorableType(ValType))
+ return error("Cannot load/store from pointer");
+ return Error::success();
+}
+
+void BitcodeReader::propagateAttributeTypes(CallBase *CB,
+ ArrayRef<Type *> ArgsTys) {
+ for (unsigned i = 0; i != CB->arg_size(); ++i) {
+ for (Attribute::AttrKind Kind : {Attribute::ByVal, Attribute::StructRet,
+ Attribute::InAlloca}) {
+ if (!CB->paramHasAttr(i, Kind) ||
+ CB->getParamAttr(i, Kind).getValueAsType())
+ continue;
+
+ CB->removeParamAttr(i, Kind);
+
+ Type *PtrEltTy = ArgsTys[i]->getPointerElementType();
+ Attribute NewAttr;
+ switch (Kind) {
+ case Attribute::ByVal:
+ NewAttr = Attribute::getWithByValType(Context, PtrEltTy);
+ break;
+ case Attribute::StructRet:
+ NewAttr = Attribute::getWithStructRetType(Context, PtrEltTy);
+ break;
+ case Attribute::InAlloca:
+ NewAttr = Attribute::getWithInAllocaType(Context, PtrEltTy);
+ break;
+ default:
+ llvm_unreachable("not an upgraded type attribute");
+ }
+
+ CB->addParamAttr(i, NewAttr);
+ }
+ }
+
+ if (CB->isInlineAsm()) {
+ const InlineAsm *IA = cast<InlineAsm>(CB->getCalledOperand());
+ unsigned ArgNo = 0;
+ for (const InlineAsm::ConstraintInfo &CI : IA->ParseConstraints()) {
+ if (!CI.hasArg())
+ continue;
+
+ if (CI.isIndirect && !CB->getAttributes().getParamElementType(ArgNo)) {
+ Type *ElemTy = ArgsTys[ArgNo]->getPointerElementType();
+ CB->addParamAttr(
+ ArgNo, Attribute::get(Context, Attribute::ElementType, ElemTy));
+ }
+
+ ArgNo++;
+ }
+ }
+
+ switch (CB->getIntrinsicID()) {
+ case Intrinsic::preserve_array_access_index:
+ case Intrinsic::preserve_struct_access_index:
+ if (!CB->getAttributes().getParamElementType(0)) {
+ Type *ElTy = ArgsTys[0]->getPointerElementType();
+ Attribute NewAttr = Attribute::get(Context, Attribute::ElementType, ElTy);
+ CB->addParamAttr(0, NewAttr);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+/// Lazily parse the specified function body block.
+Error BitcodeReader::parseFunctionBody(Function *F) {
+ if (Error Err = Stream.EnterSubBlock(bitc::FUNCTION_BLOCK_ID))
+ return Err;
+
+ // Unexpected unresolved metadata when parsing function.
+ if (MDLoader->hasFwdRefs())
+ return error("Invalid function metadata: incoming forward references");
+
+ InstructionList.clear();
+ unsigned ModuleValueListSize = ValueList.size();
+ unsigned ModuleMDLoaderSize = MDLoader->size();
+
+ // Add all the function arguments to the value table.
+#ifndef NDEBUG
+ unsigned ArgNo = 0;
+ FunctionType *FTy = FunctionTypes[F];
+#endif
+ for (Argument &I : F->args()) {
+ assert(I.getType() == FTy->getParamType(ArgNo++) &&
+ "Incorrect fully specified type for Function Argument");
+ ValueList.push_back(&I);
+ }
+ unsigned NextValueNo = ValueList.size();
+ BasicBlock *CurBB = nullptr;
+ unsigned CurBBNo = 0;
+
+ DebugLoc LastLoc;
+ auto getLastInstruction = [&]() -> Instruction * {
+ if (CurBB && !CurBB->empty())
+ return &CurBB->back();
+ else if (CurBBNo && FunctionBBs[CurBBNo - 1] &&
+ !FunctionBBs[CurBBNo - 1]->empty())
+ return &FunctionBBs[CurBBNo - 1]->back();
+ return nullptr;
+ };
+
+ std::vector<OperandBundleDef> OperandBundles;
+
+ // Read all the records.
+ SmallVector<uint64_t, 64> Record;
+
+ while (true) {
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ goto OutOfRecordLoop;
+
+ case BitstreamEntry::SubBlock:
+ switch (Entry.ID) {
+ default: // Skip unknown content.
+ if (Error Err = Stream.SkipBlock())
+ return Err;
+ break;
+ case bitc::CONSTANTS_BLOCK_ID:
+ if (Error Err = parseConstants())
+ return Err;
+ NextValueNo = ValueList.size();
+ break;
+ case bitc::VALUE_SYMTAB_BLOCK_ID:
+ if (Error Err = parseValueSymbolTable())
+ return Err;
+ break;
+ case bitc::METADATA_ATTACHMENT_ID:
+ if (Error Err = MDLoader->parseMetadataAttachment(*F, InstructionList))
+ return Err;
+ break;
+ case bitc::METADATA_BLOCK_ID:
+ assert(DeferredMetadataInfo.empty() &&
+ "Must read all module-level metadata before function-level");
+ if (Error Err = MDLoader->parseFunctionMetadata())
+ return Err;
+ break;
+ case bitc::USELIST_BLOCK_ID:
+ if (Error Err = parseUseLists())
+ return Err;
+ break;
+ }
+ continue;
+
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ Instruction *I = nullptr;
+ Expected<unsigned> MaybeBitCode = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeBitCode)
+ return MaybeBitCode.takeError();
+ switch (unsigned BitCode = MaybeBitCode.get()) {
+ default: // Default behavior: reject
+ return error("Invalid value");
+ case bitc::FUNC_CODE_DECLAREBLOCKS: { // DECLAREBLOCKS: [nblocks]
+ if (Record.empty() || Record[0] == 0)
+ return error("Invalid record");
+ // Create all the basic blocks for the function.
+ FunctionBBs.resize(Record[0]);
+
+ // See if anything took the address of blocks in this function.
+ auto BBFRI = BasicBlockFwdRefs.find(F);
+ if (BBFRI == BasicBlockFwdRefs.end()) {
+ for (BasicBlock *&BB : FunctionBBs)
+ BB = BasicBlock::Create(Context, "", F);
+ } else {
+ auto &BBRefs = BBFRI->second;
+ // Check for invalid basic block references.
+ if (BBRefs.size() > FunctionBBs.size())
+ return error("Invalid ID");
+ assert(!BBRefs.empty() && "Unexpected empty array");
+ assert(!BBRefs.front() && "Invalid reference to entry block");
+ for (unsigned I = 0, E = FunctionBBs.size(), RE = BBRefs.size(); I != E;
+ ++I)
+ if (I < RE && BBRefs[I]) {
+ BBRefs[I]->insertInto(F);
+ FunctionBBs[I] = BBRefs[I];
+ } else {
+ FunctionBBs[I] = BasicBlock::Create(Context, "", F);
+ }
+
+ // Erase from the table.
+ BasicBlockFwdRefs.erase(BBFRI);
+ }
+
+ CurBB = FunctionBBs[0];
+ continue;
+ }
+
+ case bitc::FUNC_CODE_DEBUG_LOC_AGAIN: // DEBUG_LOC_AGAIN
+ // This record indicates that the last instruction is at the same
+ // location as the previous instruction with a location.
+ I = getLastInstruction();
+
+ if (!I)
+ return error("Invalid record");
+ I->setDebugLoc(LastLoc);
+ I = nullptr;
+ continue;
+
+ case bitc::FUNC_CODE_DEBUG_LOC: { // DEBUG_LOC: [line, col, scope, ia]
+ I = getLastInstruction();
+ if (!I || Record.size() < 4)
+ return error("Invalid record");
+
+ unsigned Line = Record[0], Col = Record[1];
+ unsigned ScopeID = Record[2], IAID = Record[3];
+ bool isImplicitCode = Record.size() == 5 && Record[4];
+
+ MDNode *Scope = nullptr, *IA = nullptr;
+ if (ScopeID) {
+ Scope = dyn_cast_or_null<MDNode>(
+ MDLoader->getMetadataFwdRefOrLoad(ScopeID - 1));
+ if (!Scope)
+ return error("Invalid record");
+ }
+ if (IAID) {
+ IA = dyn_cast_or_null<MDNode>(
+ MDLoader->getMetadataFwdRefOrLoad(IAID - 1));
+ if (!IA)
+ return error("Invalid record");
+ }
+ LastLoc = DILocation::get(Scope->getContext(), Line, Col, Scope, IA,
+ isImplicitCode);
+ I->setDebugLoc(LastLoc);
+ I = nullptr;
+ continue;
+ }
+ case bitc::FUNC_CODE_INST_UNOP: { // UNOP: [opval, ty, opcode]
+ unsigned OpNum = 0;
+ Value *LHS;
+ if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
+ OpNum+1 > Record.size())
+ return error("Invalid record");
+
+ int Opc = getDecodedUnaryOpcode(Record[OpNum++], LHS->getType());
+ if (Opc == -1)
+ return error("Invalid record");
+ I = UnaryOperator::Create((Instruction::UnaryOps)Opc, LHS);
+ InstructionList.push_back(I);
+ if (OpNum < Record.size()) {
+ if (isa<FPMathOperator>(I)) {
+ FastMathFlags FMF = getDecodedFastMathFlags(Record[OpNum]);
+ if (FMF.any())
+ I->setFastMathFlags(FMF);
+ }
+ }
+ break;
+ }
+ case bitc::FUNC_CODE_INST_BINOP: { // BINOP: [opval, ty, opval, opcode]
+ unsigned OpNum = 0;
+ Value *LHS, *RHS;
+ if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
+ popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS) ||
+ OpNum+1 > Record.size())
+ return error("Invalid record");
+
+ int Opc = getDecodedBinaryOpcode(Record[OpNum++], LHS->getType());
+ if (Opc == -1)
+ return error("Invalid record");
+ I = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
+ InstructionList.push_back(I);
+ if (OpNum < Record.size()) {
+ if (Opc == Instruction::Add ||
+ Opc == Instruction::Sub ||
+ Opc == Instruction::Mul ||
+ Opc == Instruction::Shl) {
+ if (Record[OpNum] & (1 << bitc::OBO_NO_SIGNED_WRAP))
+ cast<BinaryOperator>(I)->setHasNoSignedWrap(true);
+ if (Record[OpNum] & (1 << bitc::OBO_NO_UNSIGNED_WRAP))
+ cast<BinaryOperator>(I)->setHasNoUnsignedWrap(true);
+ } else if (Opc == Instruction::SDiv ||
+ Opc == Instruction::UDiv ||
+ Opc == Instruction::LShr ||
+ Opc == Instruction::AShr) {
+ if (Record[OpNum] & (1 << bitc::PEO_EXACT))
+ cast<BinaryOperator>(I)->setIsExact(true);
+ } else if (isa<FPMathOperator>(I)) {
+ FastMathFlags FMF = getDecodedFastMathFlags(Record[OpNum]);
+ if (FMF.any())
+ I->setFastMathFlags(FMF);
+ }
+
+ }
+ break;
+ }
+ case bitc::FUNC_CODE_INST_CAST: { // CAST: [opval, opty, destty, castopc]
+ unsigned OpNum = 0;
+ Value *Op;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
+ OpNum+2 != Record.size())
+ return error("Invalid record");
+
+ Type *ResTy = getTypeByID(Record[OpNum]);
+ int Opc = getDecodedCastOpcode(Record[OpNum + 1]);
+ if (Opc == -1 || !ResTy)
+ return error("Invalid record");
+ Instruction *Temp = nullptr;
+ if ((I = UpgradeBitCastInst(Opc, Op, ResTy, Temp))) {
+ if (Temp) {
+ InstructionList.push_back(Temp);
+ assert(CurBB && "No current BB?");
+ CurBB->getInstList().push_back(Temp);
+ }
+ } else {
+ auto CastOp = (Instruction::CastOps)Opc;
+ if (!CastInst::castIsValid(CastOp, Op, ResTy))
+ return error("Invalid cast");
+ I = CastInst::Create(CastOp, Op, ResTy);
+ }
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD:
+ case bitc::FUNC_CODE_INST_GEP_OLD:
+ case bitc::FUNC_CODE_INST_GEP: { // GEP: type, [n x operands]
+ unsigned OpNum = 0;
+
+ Type *Ty;
+ bool InBounds;
+
+ if (BitCode == bitc::FUNC_CODE_INST_GEP) {
+ InBounds = Record[OpNum++];
+ Ty = getTypeByID(Record[OpNum++]);
+ } else {
+ InBounds = BitCode == bitc::FUNC_CODE_INST_INBOUNDS_GEP_OLD;
+ Ty = nullptr;
+ }
+
+ Value *BasePtr;
+ if (getValueTypePair(Record, OpNum, NextValueNo, BasePtr))
+ return error("Invalid record");
+
+ if (!Ty) {
+ Ty = BasePtr->getType()->getScalarType()->getPointerElementType();
+ } else if (!cast<PointerType>(BasePtr->getType()->getScalarType())
+ ->isOpaqueOrPointeeTypeMatches(Ty)) {
+ return error(
+ "Explicit gep type does not match pointee type of pointer operand");
+ }
+
+ SmallVector<Value*, 16> GEPIdx;
+ while (OpNum != Record.size()) {
+ Value *Op;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+ return error("Invalid record");
+ GEPIdx.push_back(Op);
+ }
+
+ I = GetElementPtrInst::Create(Ty, BasePtr, GEPIdx);
+
+ InstructionList.push_back(I);
+ if (InBounds)
+ cast<GetElementPtrInst>(I)->setIsInBounds(true);
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_EXTRACTVAL: {
+ // EXTRACTVAL: [opty, opval, n x indices]
+ unsigned OpNum = 0;
+ Value *Agg;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Agg))
+ return error("Invalid record");
+ Type *Ty = Agg->getType();
+
+ unsigned RecSize = Record.size();
+ if (OpNum == RecSize)
+ return error("EXTRACTVAL: Invalid instruction with 0 indices");
+
+ SmallVector<unsigned, 4> EXTRACTVALIdx;
+ for (; OpNum != RecSize; ++OpNum) {
+ bool IsArray = Ty->isArrayTy();
+ bool IsStruct = Ty->isStructTy();
+ uint64_t Index = Record[OpNum];
+
+ if (!IsStruct && !IsArray)
+ return error("EXTRACTVAL: Invalid type");
+ if ((unsigned)Index != Index)
+ return error("Invalid value");
+ if (IsStruct && Index >= Ty->getStructNumElements())
+ return error("EXTRACTVAL: Invalid struct index");
+ if (IsArray && Index >= Ty->getArrayNumElements())
+ return error("EXTRACTVAL: Invalid array index");
+ EXTRACTVALIdx.push_back((unsigned)Index);
+
+ if (IsStruct)
+ Ty = Ty->getStructElementType(Index);
+ else
+ Ty = Ty->getArrayElementType();
+ }
+
+ I = ExtractValueInst::Create(Agg, EXTRACTVALIdx);
+ InstructionList.push_back(I);
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_INSERTVAL: {
+ // INSERTVAL: [opty, opval, opty, opval, n x indices]
+ unsigned OpNum = 0;
+ Value *Agg;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Agg))
+ return error("Invalid record");
+ Value *Val;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Val))
+ return error("Invalid record");
+
+ unsigned RecSize = Record.size();
+ if (OpNum == RecSize)
+ return error("INSERTVAL: Invalid instruction with 0 indices");
+
+ SmallVector<unsigned, 4> INSERTVALIdx;
+ Type *CurTy = Agg->getType();
+ for (; OpNum != RecSize; ++OpNum) {
+ bool IsArray = CurTy->isArrayTy();
+ bool IsStruct = CurTy->isStructTy();
+ uint64_t Index = Record[OpNum];
+
+ if (!IsStruct && !IsArray)
+ return error("INSERTVAL: Invalid type");
+ if ((unsigned)Index != Index)
+ return error("Invalid value");
+ if (IsStruct && Index >= CurTy->getStructNumElements())
+ return error("INSERTVAL: Invalid struct index");
+ if (IsArray && Index >= CurTy->getArrayNumElements())
+ return error("INSERTVAL: Invalid array index");
+
+ INSERTVALIdx.push_back((unsigned)Index);
+ if (IsStruct)
+ CurTy = CurTy->getStructElementType(Index);
+ else
+ CurTy = CurTy->getArrayElementType();
+ }
+
+ if (CurTy != Val->getType())
+ return error("Inserted value type doesn't match aggregate type");
+
+ I = InsertValueInst::Create(Agg, Val, INSERTVALIdx);
+ InstructionList.push_back(I);
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_SELECT: { // SELECT: [opval, ty, opval, opval]
+ // obsolete form of select
+ // handles select i1 ... in old bitcode
+ unsigned OpNum = 0;
+ Value *TrueVal, *FalseVal, *Cond;
+ if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
+ popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) ||
+ popValue(Record, OpNum, NextValueNo, Type::getInt1Ty(Context), Cond))
+ return error("Invalid record");
+
+ I = SelectInst::Create(Cond, TrueVal, FalseVal);
+ InstructionList.push_back(I);
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_VSELECT: {// VSELECT: [ty,opval,opval,predty,pred]
+ // new form of select
+ // handles select i1 or select [N x i1]
+ unsigned OpNum = 0;
+ Value *TrueVal, *FalseVal, *Cond;
+ if (getValueTypePair(Record, OpNum, NextValueNo, TrueVal) ||
+ popValue(Record, OpNum, NextValueNo, TrueVal->getType(), FalseVal) ||
+ getValueTypePair(Record, OpNum, NextValueNo, Cond))
+ return error("Invalid record");
+
+ // select condition can be either i1 or [N x i1]
+ if (VectorType* vector_type =
+ dyn_cast<VectorType>(Cond->getType())) {
+ // expect <n x i1>
+ if (vector_type->getElementType() != Type::getInt1Ty(Context))
+ return error("Invalid type for value");
+ } else {
+ // expect i1
+ if (Cond->getType() != Type::getInt1Ty(Context))
+ return error("Invalid type for value");
+ }
+
+ I = SelectInst::Create(Cond, TrueVal, FalseVal);
+ InstructionList.push_back(I);
+ if (OpNum < Record.size() && isa<FPMathOperator>(I)) {
+ FastMathFlags FMF = getDecodedFastMathFlags(Record[OpNum]);
+ if (FMF.any())
+ I->setFastMathFlags(FMF);
+ }
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_EXTRACTELT: { // EXTRACTELT: [opty, opval, opval]
+ unsigned OpNum = 0;
+ Value *Vec, *Idx;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Vec) ||
+ getValueTypePair(Record, OpNum, NextValueNo, Idx))
+ return error("Invalid record");
+ if (!Vec->getType()->isVectorTy())
+ return error("Invalid type for value");
+ I = ExtractElementInst::Create(Vec, Idx);
+ InstructionList.push_back(I);
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_INSERTELT: { // INSERTELT: [ty, opval,opval,opval]
+ unsigned OpNum = 0;
+ Value *Vec, *Elt, *Idx;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Vec))
+ return error("Invalid record");
+ if (!Vec->getType()->isVectorTy())
+ return error("Invalid type for value");
+ if (popValue(Record, OpNum, NextValueNo,
+ cast<VectorType>(Vec->getType())->getElementType(), Elt) ||
+ getValueTypePair(Record, OpNum, NextValueNo, Idx))
+ return error("Invalid record");
+ I = InsertElementInst::Create(Vec, Elt, Idx);
+ InstructionList.push_back(I);
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_SHUFFLEVEC: {// SHUFFLEVEC: [opval,ty,opval,opval]
+ unsigned OpNum = 0;
+ Value *Vec1, *Vec2, *Mask;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Vec1) ||
+ popValue(Record, OpNum, NextValueNo, Vec1->getType(), Vec2))
+ return error("Invalid record");
+
+ if (getValueTypePair(Record, OpNum, NextValueNo, Mask))
+ return error("Invalid record");
+ if (!Vec1->getType()->isVectorTy() || !Vec2->getType()->isVectorTy())
+ return error("Invalid type for value");
+
+ I = new ShuffleVectorInst(Vec1, Vec2, Mask);
+ InstructionList.push_back(I);
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_CMP: // CMP: [opty, opval, opval, pred]
+ // Old form of ICmp/FCmp returning bool
+ // Existed to differentiate between icmp/fcmp and vicmp/vfcmp which were
+ // both legal on vectors but had different behaviour.
+ case bitc::FUNC_CODE_INST_CMP2: { // CMP2: [opty, opval, opval, pred]
+ // FCmp/ICmp returning bool or vector of bool
+
+ unsigned OpNum = 0;
+ Value *LHS, *RHS;
+ if (getValueTypePair(Record, OpNum, NextValueNo, LHS) ||
+ popValue(Record, OpNum, NextValueNo, LHS->getType(), RHS))
+ return error("Invalid record");
+
+ if (OpNum >= Record.size())
+ return error(
+ "Invalid record: operand number exceeded available operands");
+
+ unsigned PredVal = Record[OpNum];
+ bool IsFP = LHS->getType()->isFPOrFPVectorTy();
+ FastMathFlags FMF;
+ if (IsFP && Record.size() > OpNum+1)
+ FMF = getDecodedFastMathFlags(Record[++OpNum]);
+
+ if (OpNum+1 != Record.size())
+ return error("Invalid record");
+
+ if (LHS->getType()->isFPOrFPVectorTy())
+ I = new FCmpInst((FCmpInst::Predicate)PredVal, LHS, RHS);
+ else
+ I = new ICmpInst((ICmpInst::Predicate)PredVal, LHS, RHS);
+
+ if (FMF.any())
+ I->setFastMathFlags(FMF);
+ InstructionList.push_back(I);
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_RET: // RET: [opty,opval<optional>]
+ {
+ unsigned Size = Record.size();
+ if (Size == 0) {
+ I = ReturnInst::Create(Context);
+ InstructionList.push_back(I);
+ break;
+ }
+
+ unsigned OpNum = 0;
+ Value *Op = nullptr;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+ return error("Invalid record");
+ if (OpNum != Record.size())
+ return error("Invalid record");
+
+ I = ReturnInst::Create(Context, Op);
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_BR: { // BR: [bb#, bb#, opval] or [bb#]
+ if (Record.size() != 1 && Record.size() != 3)
+ return error("Invalid record");
+ BasicBlock *TrueDest = getBasicBlock(Record[0]);
+ if (!TrueDest)
+ return error("Invalid record");
+
+ if (Record.size() == 1) {
+ I = BranchInst::Create(TrueDest);
+ InstructionList.push_back(I);
+ }
+ else {
+ BasicBlock *FalseDest = getBasicBlock(Record[1]);
+ Value *Cond = getValue(Record, 2, NextValueNo,
+ Type::getInt1Ty(Context));
+ if (!FalseDest || !Cond)
+ return error("Invalid record");
+ I = BranchInst::Create(TrueDest, FalseDest, Cond);
+ InstructionList.push_back(I);
+ }
+ break;
+ }
+ case bitc::FUNC_CODE_INST_CLEANUPRET: { // CLEANUPRET: [val] or [val,bb#]
+ if (Record.size() != 1 && Record.size() != 2)
+ return error("Invalid record");
+ unsigned Idx = 0;
+ Value *CleanupPad =
+ getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context));
+ if (!CleanupPad)
+ return error("Invalid record");
+ BasicBlock *UnwindDest = nullptr;
+ if (Record.size() == 2) {
+ UnwindDest = getBasicBlock(Record[Idx++]);
+ if (!UnwindDest)
+ return error("Invalid record");
+ }
+
+ I = CleanupReturnInst::Create(CleanupPad, UnwindDest);
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_CATCHRET: { // CATCHRET: [val,bb#]
+ if (Record.size() != 2)
+ return error("Invalid record");
+ unsigned Idx = 0;
+ Value *CatchPad =
+ getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context));
+ if (!CatchPad)
+ return error("Invalid record");
+ BasicBlock *BB = getBasicBlock(Record[Idx++]);
+ if (!BB)
+ return error("Invalid record");
+
+ I = CatchReturnInst::Create(CatchPad, BB);
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_CATCHSWITCH: { // CATCHSWITCH: [tok,num,(bb)*,bb?]
+ // We must have, at minimum, the outer scope and the number of arguments.
+ if (Record.size() < 2)
+ return error("Invalid record");
+
+ unsigned Idx = 0;
+
+ Value *ParentPad =
+ getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context));
+
+ unsigned NumHandlers = Record[Idx++];
+
+ SmallVector<BasicBlock *, 2> Handlers;
+ for (unsigned Op = 0; Op != NumHandlers; ++Op) {
+ BasicBlock *BB = getBasicBlock(Record[Idx++]);
+ if (!BB)
+ return error("Invalid record");
+ Handlers.push_back(BB);
+ }
+
+ BasicBlock *UnwindDest = nullptr;
+ if (Idx + 1 == Record.size()) {
+ UnwindDest = getBasicBlock(Record[Idx++]);
+ if (!UnwindDest)
+ return error("Invalid record");
+ }
+
+ if (Record.size() != Idx)
+ return error("Invalid record");
+
+ auto *CatchSwitch =
+ CatchSwitchInst::Create(ParentPad, UnwindDest, NumHandlers);
+ for (BasicBlock *Handler : Handlers)
+ CatchSwitch->addHandler(Handler);
+ I = CatchSwitch;
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_CATCHPAD:
+ case bitc::FUNC_CODE_INST_CLEANUPPAD: { // [tok,num,(ty,val)*]
+ // We must have, at minimum, the outer scope and the number of arguments.
+ if (Record.size() < 2)
+ return error("Invalid record");
+
+ unsigned Idx = 0;
+
+ Value *ParentPad =
+ getValue(Record, Idx++, NextValueNo, Type::getTokenTy(Context));
+
+ unsigned NumArgOperands = Record[Idx++];
+
+ SmallVector<Value *, 2> Args;
+ for (unsigned Op = 0; Op != NumArgOperands; ++Op) {
+ Value *Val;
+ if (getValueTypePair(Record, Idx, NextValueNo, Val))
+ return error("Invalid record");
+ Args.push_back(Val);
+ }
+
+ if (Record.size() != Idx)
+ return error("Invalid record");
+
+ if (BitCode == bitc::FUNC_CODE_INST_CLEANUPPAD)
+ I = CleanupPadInst::Create(ParentPad, Args);
+ else
+ I = CatchPadInst::Create(ParentPad, Args);
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_SWITCH: { // SWITCH: [opty, op0, op1, ...]
+ // Check magic
+ if ((Record[0] >> 16) == SWITCH_INST_MAGIC) {
+ // "New" SwitchInst format with case ranges. The changes to write this
+ // format were reverted but we still recognize bitcode that uses it.
+ // Hopefully someday we will have support for case ranges and can use
+ // this format again.
+
+ Type *OpTy = getTypeByID(Record[1]);
+ unsigned ValueBitWidth = cast<IntegerType>(OpTy)->getBitWidth();
+
+ Value *Cond = getValue(Record, 2, NextValueNo, OpTy);
+ BasicBlock *Default = getBasicBlock(Record[3]);
+ if (!OpTy || !Cond || !Default)
+ return error("Invalid record");
+
+ unsigned NumCases = Record[4];
+
+ SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases);
+ InstructionList.push_back(SI);
+
+ unsigned CurIdx = 5;
+ for (unsigned i = 0; i != NumCases; ++i) {
+ SmallVector<ConstantInt*, 1> CaseVals;
+ unsigned NumItems = Record[CurIdx++];
+ for (unsigned ci = 0; ci != NumItems; ++ci) {
+ bool isSingleNumber = Record[CurIdx++];
+
+ APInt Low;
+ unsigned ActiveWords = 1;
+ if (ValueBitWidth > 64)
+ ActiveWords = Record[CurIdx++];
+ Low = readWideAPInt(makeArrayRef(&Record[CurIdx], ActiveWords),
+ ValueBitWidth);
+ CurIdx += ActiveWords;
+
+ if (!isSingleNumber) {
+ ActiveWords = 1;
+ if (ValueBitWidth > 64)
+ ActiveWords = Record[CurIdx++];
+ APInt High = readWideAPInt(
+ makeArrayRef(&Record[CurIdx], ActiveWords), ValueBitWidth);
+ CurIdx += ActiveWords;
+
+ // FIXME: It is not clear whether values in the range should be
+ // compared as signed or unsigned values. The partially
+ // implemented changes that used this format in the past used
+ // unsigned comparisons.
+ for ( ; Low.ule(High); ++Low)
+ CaseVals.push_back(ConstantInt::get(Context, Low));
+ } else
+ CaseVals.push_back(ConstantInt::get(Context, Low));
+ }
+ BasicBlock *DestBB = getBasicBlock(Record[CurIdx++]);
+ for (ConstantInt *Cst : CaseVals)
+ SI->addCase(Cst, DestBB);
+ }
+ I = SI;
+ break;
+ }
+
+ // Old SwitchInst format without case ranges.
+
+ if (Record.size() < 3 || (Record.size() & 1) == 0)
+ return error("Invalid record");
+ Type *OpTy = getTypeByID(Record[0]);
+ Value *Cond = getValue(Record, 1, NextValueNo, OpTy);
+ BasicBlock *Default = getBasicBlock(Record[2]);
+ if (!OpTy || !Cond || !Default)
+ return error("Invalid record");
+ unsigned NumCases = (Record.size()-3)/2;
+ SwitchInst *SI = SwitchInst::Create(Cond, Default, NumCases);
+ InstructionList.push_back(SI);
+ for (unsigned i = 0, e = NumCases; i != e; ++i) {
+ ConstantInt *CaseVal =
+ dyn_cast_or_null<ConstantInt>(getFnValueByID(Record[3+i*2], OpTy));
+ BasicBlock *DestBB = getBasicBlock(Record[1+3+i*2]);
+ if (!CaseVal || !DestBB) {
+ delete SI;
+ return error("Invalid record");
+ }
+ SI->addCase(CaseVal, DestBB);
+ }
+ I = SI;
+ break;
+ }
+ case bitc::FUNC_CODE_INST_INDIRECTBR: { // INDIRECTBR: [opty, op0, op1, ...]
+ if (Record.size() < 2)
+ return error("Invalid record");
+ Type *OpTy = getTypeByID(Record[0]);
+ Value *Address = getValue(Record, 1, NextValueNo, OpTy);
+ if (!OpTy || !Address)
+ return error("Invalid record");
+ unsigned NumDests = Record.size()-2;
+ IndirectBrInst *IBI = IndirectBrInst::Create(Address, NumDests);
+ InstructionList.push_back(IBI);
+ for (unsigned i = 0, e = NumDests; i != e; ++i) {
+ if (BasicBlock *DestBB = getBasicBlock(Record[2+i])) {
+ IBI->addDestination(DestBB);
+ } else {
+ delete IBI;
+ return error("Invalid record");
+ }
+ }
+ I = IBI;
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_INVOKE: {
+ // INVOKE: [attrs, cc, normBB, unwindBB, fnty, op0,op1,op2, ...]
+ if (Record.size() < 4)
+ return error("Invalid record");
+ unsigned OpNum = 0;
+ AttributeList PAL = getAttributes(Record[OpNum++]);
+ unsigned CCInfo = Record[OpNum++];
+ BasicBlock *NormalBB = getBasicBlock(Record[OpNum++]);
+ BasicBlock *UnwindBB = getBasicBlock(Record[OpNum++]);
+
+ FunctionType *FTy = nullptr;
+ if ((CCInfo >> 13) & 1) {
+ FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++]));
+ if (!FTy)
+ return error("Explicit invoke type is not a function type");
+ }
+
+ Value *Callee;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
+ return error("Invalid record");
+
+ PointerType *CalleeTy = dyn_cast<PointerType>(Callee->getType());
+ if (!CalleeTy)
+ return error("Callee is not a pointer");
+ if (!FTy) {
+ FTy =
+ dyn_cast<FunctionType>(Callee->getType()->getPointerElementType());
+ if (!FTy)
+ return error("Callee is not of pointer to function type");
+ } else if (!CalleeTy->isOpaqueOrPointeeTypeMatches(FTy))
+ return error("Explicit invoke type does not match pointee type of "
+ "callee operand");
+ if (Record.size() < FTy->getNumParams() + OpNum)
+ return error("Insufficient operands to call");
+
+ SmallVector<Value*, 16> Ops;
+ SmallVector<Type *, 16> ArgsTys;
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
+ Ops.push_back(getValue(Record, OpNum, NextValueNo,
+ FTy->getParamType(i)));
+ ArgsTys.push_back(FTy->getParamType(i));
+ if (!Ops.back())
+ return error("Invalid record");
+ }
+
+ if (!FTy->isVarArg()) {
+ if (Record.size() != OpNum)
+ return error("Invalid record");
+ } else {
+ // Read type/value pairs for varargs params.
+ while (OpNum != Record.size()) {
+ Value *Op;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+ return error("Invalid record");
+ Ops.push_back(Op);
+ ArgsTys.push_back(Op->getType());
+ }
+ }
+
+ I = InvokeInst::Create(FTy, Callee, NormalBB, UnwindBB, Ops,
+ OperandBundles);
+ OperandBundles.clear();
+ InstructionList.push_back(I);
+ cast<InvokeInst>(I)->setCallingConv(
+ static_cast<CallingConv::ID>(CallingConv::MaxID & CCInfo));
+ cast<InvokeInst>(I)->setAttributes(PAL);
+ propagateAttributeTypes(cast<CallBase>(I), ArgsTys);
+
+ break;
+ }
+ case bitc::FUNC_CODE_INST_RESUME: { // RESUME: [opval]
+ unsigned Idx = 0;
+ Value *Val = nullptr;
+ if (getValueTypePair(Record, Idx, NextValueNo, Val))
+ return error("Invalid record");
+ I = ResumeInst::Create(Val);
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_CALLBR: {
+ // CALLBR: [attr, cc, norm, transfs, fty, fnid, args]
+ unsigned OpNum = 0;
+ AttributeList PAL = getAttributes(Record[OpNum++]);
+ unsigned CCInfo = Record[OpNum++];
+
+ BasicBlock *DefaultDest = getBasicBlock(Record[OpNum++]);
+ unsigned NumIndirectDests = Record[OpNum++];
+ SmallVector<BasicBlock *, 16> IndirectDests;
+ for (unsigned i = 0, e = NumIndirectDests; i != e; ++i)
+ IndirectDests.push_back(getBasicBlock(Record[OpNum++]));
+
+ FunctionType *FTy = nullptr;
+ if ((CCInfo >> bitc::CALL_EXPLICIT_TYPE) & 1) {
+ FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++]));
+ if (!FTy)
+ return error("Explicit call type is not a function type");
+ }
+
+ Value *Callee;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
+ return error("Invalid record");
+
+ PointerType *OpTy = dyn_cast<PointerType>(Callee->getType());
+ if (!OpTy)
+ return error("Callee is not a pointer type");
+ if (!FTy) {
+ FTy =
+ dyn_cast<FunctionType>(Callee->getType()->getPointerElementType());
+ if (!FTy)
+ return error("Callee is not of pointer to function type");
+ } else if (!OpTy->isOpaqueOrPointeeTypeMatches(FTy))
+ return error("Explicit call type does not match pointee type of "
+ "callee operand");
+ if (Record.size() < FTy->getNumParams() + OpNum)
+ return error("Insufficient operands to call");
+
+ SmallVector<Value*, 16> Args;
+ SmallVector<Type *, 16> ArgsTys;
+ // Read the fixed params.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
+ Value *Arg;
+ if (FTy->getParamType(i)->isLabelTy())
+ Arg = getBasicBlock(Record[OpNum]);
+ else
+ Arg = getValue(Record, OpNum, NextValueNo, FTy->getParamType(i));
+ if (!Arg)
+ return error("Invalid record");
+ Args.push_back(Arg);
+ ArgsTys.push_back(Arg->getType());
+ }
+
+ // Read type/value pairs for varargs params.
+ if (!FTy->isVarArg()) {
+ if (OpNum != Record.size())
+ return error("Invalid record");
+ } else {
+ while (OpNum != Record.size()) {
+ Value *Op;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+ return error("Invalid record");
+ Args.push_back(Op);
+ ArgsTys.push_back(Op->getType());
+ }
+ }
+
+ I = CallBrInst::Create(FTy, Callee, DefaultDest, IndirectDests, Args,
+ OperandBundles);
+ OperandBundles.clear();
+ InstructionList.push_back(I);
+ cast<CallBrInst>(I)->setCallingConv(
+ static_cast<CallingConv::ID>((0x7ff & CCInfo) >> bitc::CALL_CCONV));
+ cast<CallBrInst>(I)->setAttributes(PAL);
+ propagateAttributeTypes(cast<CallBase>(I), ArgsTys);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_UNREACHABLE: // UNREACHABLE
+ I = new UnreachableInst(Context);
+ InstructionList.push_back(I);
+ break;
+ case bitc::FUNC_CODE_INST_PHI: { // PHI: [ty, val0,bb0, ...]
+ if (Record.empty())
+ return error("Invalid record");
+ // The first record specifies the type.
+ Type *Ty = getTypeByID(Record[0]);
+ if (!Ty)
+ return error("Invalid record");
+
+ // Phi arguments are pairs of records of [value, basic block].
+ // There is an optional final record for fast-math-flags if this phi has a
+ // floating-point type.
+ size_t NumArgs = (Record.size() - 1) / 2;
+ PHINode *PN = PHINode::Create(Ty, NumArgs);
+ if ((Record.size() - 1) % 2 == 1 && !isa<FPMathOperator>(PN))
+ return error("Invalid record");
+ InstructionList.push_back(PN);
+
+ for (unsigned i = 0; i != NumArgs; i++) {
+ Value *V;
+ // With the new function encoding, it is possible that operands have
+ // negative IDs (for forward references). Use a signed VBR
+ // representation to keep the encoding small.
+ if (UseRelativeIDs)
+ V = getValueSigned(Record, i * 2 + 1, NextValueNo, Ty);
+ else
+ V = getValue(Record, i * 2 + 1, NextValueNo, Ty);
+ BasicBlock *BB = getBasicBlock(Record[i * 2 + 2]);
+ if (!V || !BB)
+ return error("Invalid record");
+ PN->addIncoming(V, BB);
+ }
+ I = PN;
+
+ // If there are an even number of records, the final record must be FMF.
+ if (Record.size() % 2 == 0) {
+ assert(isa<FPMathOperator>(I) && "Unexpected phi type");
+ FastMathFlags FMF = getDecodedFastMathFlags(Record[Record.size() - 1]);
+ if (FMF.any())
+ I->setFastMathFlags(FMF);
+ }
+
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_LANDINGPAD:
+ case bitc::FUNC_CODE_INST_LANDINGPAD_OLD: {
+ // LANDINGPAD: [ty, val, val, num, (id0,val0 ...)?]
+ unsigned Idx = 0;
+ if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD) {
+ if (Record.size() < 3)
+ return error("Invalid record");
+ } else {
+ assert(BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD);
+ if (Record.size() < 4)
+ return error("Invalid record");
+ }
+ Type *Ty = getTypeByID(Record[Idx++]);
+ if (!Ty)
+ return error("Invalid record");
+ if (BitCode == bitc::FUNC_CODE_INST_LANDINGPAD_OLD) {
+ Value *PersFn = nullptr;
+ if (getValueTypePair(Record, Idx, NextValueNo, PersFn))
+ return error("Invalid record");
+
+ if (!F->hasPersonalityFn())
+ F->setPersonalityFn(cast<Constant>(PersFn));
+ else if (F->getPersonalityFn() != cast<Constant>(PersFn))
+ return error("Personality function mismatch");
+ }
+
+ bool IsCleanup = !!Record[Idx++];
+ unsigned NumClauses = Record[Idx++];
+ LandingPadInst *LP = LandingPadInst::Create(Ty, NumClauses);
+ LP->setCleanup(IsCleanup);
+ for (unsigned J = 0; J != NumClauses; ++J) {
+ LandingPadInst::ClauseType CT =
+ LandingPadInst::ClauseType(Record[Idx++]); (void)CT;
+ Value *Val;
+
+ if (getValueTypePair(Record, Idx, NextValueNo, Val)) {
+ delete LP;
+ return error("Invalid record");
+ }
+
+ assert((CT != LandingPadInst::Catch ||
+ !isa<ArrayType>(Val->getType())) &&
+ "Catch clause has a invalid type!");
+ assert((CT != LandingPadInst::Filter ||
+ isa<ArrayType>(Val->getType())) &&
+ "Filter clause has invalid type!");
+ LP->addClause(cast<Constant>(Val));
+ }
+
+ I = LP;
+ InstructionList.push_back(I);
+ break;
+ }
+
+ case bitc::FUNC_CODE_INST_ALLOCA: { // ALLOCA: [instty, opty, op, align]
+ if (Record.size() != 4)
+ return error("Invalid record");
+ using APV = AllocaPackedValues;
+ const uint64_t Rec = Record[3];
+ const bool InAlloca = Bitfield::get<APV::UsedWithInAlloca>(Rec);
+ const bool SwiftError = Bitfield::get<APV::SwiftError>(Rec);
+ Type *Ty = getTypeByID(Record[0]);
+ if (!Bitfield::get<APV::ExplicitType>(Rec)) {
+ auto *PTy = dyn_cast_or_null<PointerType>(Ty);
+ if (!PTy)
+ return error("Old-style alloca with a non-pointer type");
+ Ty = PTy->getPointerElementType();
+ }
+ Type *OpTy = getTypeByID(Record[1]);
+ Value *Size = getFnValueByID(Record[2], OpTy);
+ MaybeAlign Align;
+ uint64_t AlignExp =
+ Bitfield::get<APV::AlignLower>(Rec) |
+ (Bitfield::get<APV::AlignUpper>(Rec) << APV::AlignLower::Bits);
+ if (Error Err = parseAlignmentValue(AlignExp, Align)) {
+ return Err;
+ }
+ if (!Ty || !Size)
+ return error("Invalid record");
+
+ // FIXME: Make this an optional field.
+ const DataLayout &DL = TheModule->getDataLayout();
+ unsigned AS = DL.getAllocaAddrSpace();
+
+ SmallPtrSet<Type *, 4> Visited;
+ if (!Align && !Ty->isSized(&Visited))
+ return error("alloca of unsized type");
+ if (!Align)
+ Align = DL.getPrefTypeAlign(Ty);
+
+ AllocaInst *AI = new AllocaInst(Ty, AS, Size, *Align);
+ AI->setUsedWithInAlloca(InAlloca);
+ AI->setSwiftError(SwiftError);
+ I = AI;
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_LOAD: { // LOAD: [opty, op, align, vol]
+ unsigned OpNum = 0;
+ Value *Op;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
+ (OpNum + 2 != Record.size() && OpNum + 3 != Record.size()))
+ return error("Invalid record");
+
+ if (!isa<PointerType>(Op->getType()))
+ return error("Load operand is not a pointer type");
+
+ Type *Ty = nullptr;
+ if (OpNum + 3 == Record.size()) {
+ Ty = getTypeByID(Record[OpNum++]);
+ } else {
+ Ty = Op->getType()->getPointerElementType();
+ }
+
+ if (Error Err = typeCheckLoadStoreInst(Ty, Op->getType()))
+ return Err;
+
+ MaybeAlign Align;
+ if (Error Err = parseAlignmentValue(Record[OpNum], Align))
+ return Err;
+ SmallPtrSet<Type *, 4> Visited;
+ if (!Align && !Ty->isSized(&Visited))
+ return error("load of unsized type");
+ if (!Align)
+ Align = TheModule->getDataLayout().getABITypeAlign(Ty);
+ I = new LoadInst(Ty, Op, "", Record[OpNum + 1], *Align);
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_LOADATOMIC: {
+ // LOADATOMIC: [opty, op, align, vol, ordering, ssid]
+ unsigned OpNum = 0;
+ Value *Op;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op) ||
+ (OpNum + 4 != Record.size() && OpNum + 5 != Record.size()))
+ return error("Invalid record");
+
+ if (!isa<PointerType>(Op->getType()))
+ return error("Load operand is not a pointer type");
+
+ Type *Ty = nullptr;
+ if (OpNum + 5 == Record.size()) {
+ Ty = getTypeByID(Record[OpNum++]);
+ } else {
+ Ty = Op->getType()->getPointerElementType();
+ }
+
+ if (Error Err = typeCheckLoadStoreInst(Ty, Op->getType()))
+ return Err;
+
+ AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
+ if (Ordering == AtomicOrdering::NotAtomic ||
+ Ordering == AtomicOrdering::Release ||
+ Ordering == AtomicOrdering::AcquireRelease)
+ return error("Invalid record");
+ if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
+ return error("Invalid record");
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]);
+
+ MaybeAlign Align;
+ if (Error Err = parseAlignmentValue(Record[OpNum], Align))
+ return Err;
+ if (!Align)
+ return error("Alignment missing from atomic load");
+ I = new LoadInst(Ty, Op, "", Record[OpNum + 1], *Align, Ordering, SSID);
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_STORE:
+ case bitc::FUNC_CODE_INST_STORE_OLD: { // STORE2:[ptrty, ptr, val, align, vol]
+ unsigned OpNum = 0;
+ Value *Val, *Ptr;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
+ (BitCode == bitc::FUNC_CODE_INST_STORE
+ ? getValueTypePair(Record, OpNum, NextValueNo, Val)
+ : popValue(Record, OpNum, NextValueNo,
+ Ptr->getType()->getPointerElementType(), Val)) ||
+ OpNum + 2 != Record.size())
+ return error("Invalid record");
+
+ if (Error Err = typeCheckLoadStoreInst(Val->getType(), Ptr->getType()))
+ return Err;
+ MaybeAlign Align;
+ if (Error Err = parseAlignmentValue(Record[OpNum], Align))
+ return Err;
+ SmallPtrSet<Type *, 4> Visited;
+ if (!Align && !Val->getType()->isSized(&Visited))
+ return error("store of unsized type");
+ if (!Align)
+ Align = TheModule->getDataLayout().getABITypeAlign(Val->getType());
+ I = new StoreInst(Val, Ptr, Record[OpNum + 1], *Align);
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_STOREATOMIC:
+ case bitc::FUNC_CODE_INST_STOREATOMIC_OLD: {
+ // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, ssid]
+ unsigned OpNum = 0;
+ Value *Val, *Ptr;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Ptr) ||
+ !isa<PointerType>(Ptr->getType()) ||
+ (BitCode == bitc::FUNC_CODE_INST_STOREATOMIC
+ ? getValueTypePair(Record, OpNum, NextValueNo, Val)
+ : popValue(Record, OpNum, NextValueNo,
+ Ptr->getType()->getPointerElementType(), Val)) ||
+ OpNum + 4 != Record.size())
+ return error("Invalid record");
+
+ if (Error Err = typeCheckLoadStoreInst(Val->getType(), Ptr->getType()))
+ return Err;
+ AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
+ if (Ordering == AtomicOrdering::NotAtomic ||
+ Ordering == AtomicOrdering::Acquire ||
+ Ordering == AtomicOrdering::AcquireRelease)
+ return error("Invalid record");
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]);
+ if (Ordering != AtomicOrdering::NotAtomic && Record[OpNum] == 0)
+ return error("Invalid record");
+
+ MaybeAlign Align;
+ if (Error Err = parseAlignmentValue(Record[OpNum], Align))
+ return Err;
+ if (!Align)
+ return error("Alignment missing from atomic store");
+ I = new StoreInst(Val, Ptr, Record[OpNum + 1], *Align, Ordering, SSID);
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_CMPXCHG_OLD: {
+ // CMPXCHG_OLD: [ptrty, ptr, cmp, val, vol, ordering, synchscope,
+ // failure_ordering?, weak?]
+ const size_t NumRecords = Record.size();
+ unsigned OpNum = 0;
+ Value *Ptr = nullptr;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Ptr))
+ return error("Invalid record");
+
+ if (!isa<PointerType>(Ptr->getType()))
+ return error("Cmpxchg operand is not a pointer type");
+
+ Value *Cmp = nullptr;
+ if (popValue(Record, OpNum, NextValueNo,
+ cast<PointerType>(Ptr->getType())->getPointerElementType(),
+ Cmp))
+ return error("Invalid record");
+
+ Value *New = nullptr;
+ if (popValue(Record, OpNum, NextValueNo, Cmp->getType(), New) ||
+ NumRecords < OpNum + 3 || NumRecords > OpNum + 5)
+ return error("Invalid record");
+
+ const AtomicOrdering SuccessOrdering =
+ getDecodedOrdering(Record[OpNum + 1]);
+ if (SuccessOrdering == AtomicOrdering::NotAtomic ||
+ SuccessOrdering == AtomicOrdering::Unordered)
+ return error("Invalid record");
+
+ const SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 2]);
+
+ if (Error Err = typeCheckLoadStoreInst(Cmp->getType(), Ptr->getType()))
+ return Err;
+
+ const AtomicOrdering FailureOrdering =
+ NumRecords < 7
+ ? AtomicCmpXchgInst::getStrongestFailureOrdering(SuccessOrdering)
+ : getDecodedOrdering(Record[OpNum + 3]);
+
+ if (FailureOrdering == AtomicOrdering::NotAtomic ||
+ FailureOrdering == AtomicOrdering::Unordered)
+ return error("Invalid record");
+
+ const Align Alignment(
+ TheModule->getDataLayout().getTypeStoreSize(Cmp->getType()));
+
+ I = new AtomicCmpXchgInst(Ptr, Cmp, New, Alignment, SuccessOrdering,
+ FailureOrdering, SSID);
+ cast<AtomicCmpXchgInst>(I)->setVolatile(Record[OpNum]);
+
+ if (NumRecords < 8) {
+ // Before weak cmpxchgs existed, the instruction simply returned the
+ // value loaded from memory, so bitcode files from that era will be
+ // expecting the first component of a modern cmpxchg.
+ CurBB->getInstList().push_back(I);
+ I = ExtractValueInst::Create(I, 0);
+ } else {
+ cast<AtomicCmpXchgInst>(I)->setWeak(Record[OpNum + 4]);
+ }
+
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_CMPXCHG: {
+ // CMPXCHG: [ptrty, ptr, cmp, val, vol, success_ordering, synchscope,
+ // failure_ordering, weak, align?]
+ const size_t NumRecords = Record.size();
+ unsigned OpNum = 0;
+ Value *Ptr = nullptr;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Ptr))
+ return error("Invalid record");
+
+ if (!isa<PointerType>(Ptr->getType()))
+ return error("Cmpxchg operand is not a pointer type");
+
+ Value *Cmp = nullptr;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Cmp))
+ return error("Invalid record");
+
+ Value *Val = nullptr;
+ if (popValue(Record, OpNum, NextValueNo, Cmp->getType(), Val))
+ return error("Invalid record");
+
+ if (NumRecords < OpNum + 3 || NumRecords > OpNum + 6)
+ return error("Invalid record");
+
+ const bool IsVol = Record[OpNum];
+
+ const AtomicOrdering SuccessOrdering =
+ getDecodedOrdering(Record[OpNum + 1]);
+ if (!AtomicCmpXchgInst::isValidSuccessOrdering(SuccessOrdering))
+ return error("Invalid cmpxchg success ordering");
+
+ const SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 2]);
+
+ if (Error Err = typeCheckLoadStoreInst(Cmp->getType(), Ptr->getType()))
+ return Err;
+
+ const AtomicOrdering FailureOrdering =
+ getDecodedOrdering(Record[OpNum + 3]);
+ if (!AtomicCmpXchgInst::isValidFailureOrdering(FailureOrdering))
+ return error("Invalid cmpxchg failure ordering");
+
+ const bool IsWeak = Record[OpNum + 4];
+
+ MaybeAlign Alignment;
+
+ if (NumRecords == (OpNum + 6)) {
+ if (Error Err = parseAlignmentValue(Record[OpNum + 5], Alignment))
+ return Err;
+ }
+ if (!Alignment)
+ Alignment =
+ Align(TheModule->getDataLayout().getTypeStoreSize(Cmp->getType()));
+
+ I = new AtomicCmpXchgInst(Ptr, Cmp, Val, *Alignment, SuccessOrdering,
+ FailureOrdering, SSID);
+ cast<AtomicCmpXchgInst>(I)->setVolatile(IsVol);
+ cast<AtomicCmpXchgInst>(I)->setWeak(IsWeak);
+
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_ATOMICRMW_OLD:
+ case bitc::FUNC_CODE_INST_ATOMICRMW: {
+ // ATOMICRMW_OLD: [ptrty, ptr, val, op, vol, ordering, ssid, align?]
+ // ATOMICRMW: [ptrty, ptr, valty, val, op, vol, ordering, ssid, align?]
+ const size_t NumRecords = Record.size();
+ unsigned OpNum = 0;
+
+ Value *Ptr = nullptr;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Ptr))
+ return error("Invalid record");
+
+ if (!isa<PointerType>(Ptr->getType()))
+ return error("Invalid record");
+
+ Value *Val = nullptr;
+ if (BitCode == bitc::FUNC_CODE_INST_ATOMICRMW_OLD) {
+ if (popValue(Record, OpNum, NextValueNo,
+ cast<PointerType>(Ptr->getType())->getPointerElementType(),
+ Val))
+ return error("Invalid record");
+ } else {
+ if (getValueTypePair(Record, OpNum, NextValueNo, Val))
+ return error("Invalid record");
+ }
+
+ if (!(NumRecords == (OpNum + 4) || NumRecords == (OpNum + 5)))
+ return error("Invalid record");
+
+ const AtomicRMWInst::BinOp Operation =
+ getDecodedRMWOperation(Record[OpNum]);
+ if (Operation < AtomicRMWInst::FIRST_BINOP ||
+ Operation > AtomicRMWInst::LAST_BINOP)
+ return error("Invalid record");
+
+ const bool IsVol = Record[OpNum + 1];
+
+ const AtomicOrdering Ordering = getDecodedOrdering(Record[OpNum + 2]);
+ if (Ordering == AtomicOrdering::NotAtomic ||
+ Ordering == AtomicOrdering::Unordered)
+ return error("Invalid record");
+
+ const SyncScope::ID SSID = getDecodedSyncScopeID(Record[OpNum + 3]);
+
+ MaybeAlign Alignment;
+
+ if (NumRecords == (OpNum + 5)) {
+ if (Error Err = parseAlignmentValue(Record[OpNum + 4], Alignment))
+ return Err;
+ }
+
+ if (!Alignment)
+ Alignment =
+ Align(TheModule->getDataLayout().getTypeStoreSize(Val->getType()));
+
+ I = new AtomicRMWInst(Operation, Ptr, Val, *Alignment, Ordering, SSID);
+ cast<AtomicRMWInst>(I)->setVolatile(IsVol);
+
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_FENCE: { // FENCE:[ordering, ssid]
+ if (2 != Record.size())
+ return error("Invalid record");
+ AtomicOrdering Ordering = getDecodedOrdering(Record[0]);
+ if (Ordering == AtomicOrdering::NotAtomic ||
+ Ordering == AtomicOrdering::Unordered ||
+ Ordering == AtomicOrdering::Monotonic)
+ return error("Invalid record");
+ SyncScope::ID SSID = getDecodedSyncScopeID(Record[1]);
+ I = new FenceInst(Context, Ordering, SSID);
+ InstructionList.push_back(I);
+ break;
+ }
+ case bitc::FUNC_CODE_INST_CALL: {
+ // CALL: [paramattrs, cc, fmf, fnty, fnid, arg0, arg1...]
+ if (Record.size() < 3)
+ return error("Invalid record");
+
+ unsigned OpNum = 0;
+ AttributeList PAL = getAttributes(Record[OpNum++]);
+ unsigned CCInfo = Record[OpNum++];
+
+ FastMathFlags FMF;
+ if ((CCInfo >> bitc::CALL_FMF) & 1) {
+ FMF = getDecodedFastMathFlags(Record[OpNum++]);
+ if (!FMF.any())
+ return error("Fast math flags indicator set for call with no FMF");
+ }
+
+ FunctionType *FTy = nullptr;
+ if ((CCInfo >> bitc::CALL_EXPLICIT_TYPE) & 1) {
+ FTy = dyn_cast<FunctionType>(getTypeByID(Record[OpNum++]));
+ if (!FTy)
+ return error("Explicit call type is not a function type");
+ }
+
+ Value *Callee;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Callee))
+ return error("Invalid record");
+
+ PointerType *OpTy = dyn_cast<PointerType>(Callee->getType());
+ if (!OpTy)
+ return error("Callee is not a pointer type");
+ if (!FTy) {
+ FTy =
+ dyn_cast<FunctionType>(Callee->getType()->getPointerElementType());
+ if (!FTy)
+ return error("Callee is not of pointer to function type");
+ } else if (!OpTy->isOpaqueOrPointeeTypeMatches(FTy))
+ return error("Explicit call type does not match pointee type of "
+ "callee operand");
+ if (Record.size() < FTy->getNumParams() + OpNum)
+ return error("Insufficient operands to call");
+
+ SmallVector<Value*, 16> Args;
+ SmallVector<Type *, 16> ArgsTys;
+ // Read the fixed params.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i, ++OpNum) {
+ if (FTy->getParamType(i)->isLabelTy())
+ Args.push_back(getBasicBlock(Record[OpNum]));
+ else
+ Args.push_back(getValue(Record, OpNum, NextValueNo,
+ FTy->getParamType(i)));
+ ArgsTys.push_back(FTy->getParamType(i));
+ if (!Args.back())
+ return error("Invalid record");
+ }
+
+ // Read type/value pairs for varargs params.
+ if (!FTy->isVarArg()) {
+ if (OpNum != Record.size())
+ return error("Invalid record");
+ } else {
+ while (OpNum != Record.size()) {
+ Value *Op;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+ return error("Invalid record");
+ Args.push_back(Op);
+ ArgsTys.push_back(Op->getType());
+ }
+ }
+
+ I = CallInst::Create(FTy, Callee, Args, OperandBundles);
+ OperandBundles.clear();
+ InstructionList.push_back(I);
+ cast<CallInst>(I)->setCallingConv(
+ static_cast<CallingConv::ID>((0x7ff & CCInfo) >> bitc::CALL_CCONV));
+ CallInst::TailCallKind TCK = CallInst::TCK_None;
+ if (CCInfo & 1 << bitc::CALL_TAIL)
+ TCK = CallInst::TCK_Tail;
+ if (CCInfo & (1 << bitc::CALL_MUSTTAIL))
+ TCK = CallInst::TCK_MustTail;
+ if (CCInfo & (1 << bitc::CALL_NOTAIL))
+ TCK = CallInst::TCK_NoTail;
+ cast<CallInst>(I)->setTailCallKind(TCK);
+ cast<CallInst>(I)->setAttributes(PAL);
+ propagateAttributeTypes(cast<CallBase>(I), ArgsTys);
+ if (FMF.any()) {
+ if (!isa<FPMathOperator>(I))
+ return error("Fast-math-flags specified for call without "
+ "floating-point scalar or vector return type");
+ I->setFastMathFlags(FMF);
+ }
+ break;
+ }
+ case bitc::FUNC_CODE_INST_VAARG: { // VAARG: [valistty, valist, instty]
+ if (Record.size() < 3)
+ return error("Invalid record");
+ Type *OpTy = getTypeByID(Record[0]);
+ Value *Op = getValue(Record, 1, NextValueNo, OpTy);
+ Type *ResTy = getTypeByID(Record[2]);
+ if (!OpTy || !Op || !ResTy)
+ return error("Invalid record");
+ I = new VAArgInst(Op, ResTy);
+ InstructionList.push_back(I);
+ break;
+ }
+
+ case bitc::FUNC_CODE_OPERAND_BUNDLE: {
+ // A call or an invoke can be optionally prefixed with some variable
+ // number of operand bundle blocks. These blocks are read into
+ // OperandBundles and consumed at the next call or invoke instruction.
+
+ if (Record.empty() || Record[0] >= BundleTags.size())
+ return error("Invalid record");
+
+ std::vector<Value *> Inputs;
+
+ unsigned OpNum = 1;
+ while (OpNum != Record.size()) {
+ Value *Op;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+ return error("Invalid record");
+ Inputs.push_back(Op);
+ }
+
+ OperandBundles.emplace_back(BundleTags[Record[0]], std::move(Inputs));
+ continue;
+ }
+
+ case bitc::FUNC_CODE_INST_FREEZE: { // FREEZE: [opty,opval]
+ unsigned OpNum = 0;
+ Value *Op = nullptr;
+ if (getValueTypePair(Record, OpNum, NextValueNo, Op))
+ return error("Invalid record");
+ if (OpNum != Record.size())
+ return error("Invalid record");
+
+ I = new FreezeInst(Op);
+ InstructionList.push_back(I);
+ break;
+ }
+ }
+
+ // Add instruction to end of current BB. If there is no current BB, reject
+ // this file.
+ if (!CurBB) {
+ I->deleteValue();
+ return error("Invalid instruction with no BB");
+ }
+ if (!OperandBundles.empty()) {
+ I->deleteValue();
+ return error("Operand bundles found with no consumer");
+ }
+ CurBB->getInstList().push_back(I);
+
+ // If this was a terminator instruction, move to the next block.
+ if (I->isTerminator()) {
+ ++CurBBNo;
+ CurBB = CurBBNo < FunctionBBs.size() ? FunctionBBs[CurBBNo] : nullptr;
+ }
+
+ // Non-void values get registered in the value table for future use.
+ if (!I->getType()->isVoidTy())
+ ValueList.assignValue(I, NextValueNo++);
+ }
+
+OutOfRecordLoop:
+
+ if (!OperandBundles.empty())
+ return error("Operand bundles found with no consumer");
+
+ // Check the function list for unresolved values.
+ if (Argument *A = dyn_cast<Argument>(ValueList.back())) {
+ if (!A->getParent()) {
+ // We found at least one unresolved value. Nuke them all to avoid leaks.
+ for (unsigned i = ModuleValueListSize, e = ValueList.size(); i != e; ++i){
+ if ((A = dyn_cast_or_null<Argument>(ValueList[i])) && !A->getParent()) {
+ A->replaceAllUsesWith(UndefValue::get(A->getType()));
+ delete A;
+ }
+ }
+ return error("Never resolved value found in function");
+ }
+ }
+
+ // Unexpected unresolved metadata about to be dropped.
+ if (MDLoader->hasFwdRefs())
+ return error("Invalid function metadata: outgoing forward refs");
+
+ // Trim the value list down to the size it was before we parsed this function.
+ ValueList.shrinkTo(ModuleValueListSize);
+ MDLoader->shrinkTo(ModuleMDLoaderSize);
+ std::vector<BasicBlock*>().swap(FunctionBBs);
+ return Error::success();
+}
+
+/// Find the function body in the bitcode stream
+Error BitcodeReader::findFunctionInStream(
+ Function *F,
+ DenseMap<Function *, uint64_t>::iterator DeferredFunctionInfoIterator) {
+ while (DeferredFunctionInfoIterator->second == 0) {
+ // This is the fallback handling for the old format bitcode that
+ // didn't contain the function index in the VST, or when we have
+ // an anonymous function which would not have a VST entry.
+ // Assert that we have one of those two cases.
+ assert(VSTOffset == 0 || !F->hasName());
+ // Parse the next body in the stream and set its position in the
+ // DeferredFunctionInfo map.
+ if (Error Err = rememberAndSkipFunctionBodies())
+ return Err;
+ }
+ return Error::success();
+}
+
+SyncScope::ID BitcodeReader::getDecodedSyncScopeID(unsigned Val) {
+ if (Val == SyncScope::SingleThread || Val == SyncScope::System)
+ return SyncScope::ID(Val);
+ if (Val >= SSIDs.size())
+ return SyncScope::System; // Map unknown synchronization scopes to system.
+ return SSIDs[Val];
+}
+
+//===----------------------------------------------------------------------===//
+// GVMaterializer implementation
+//===----------------------------------------------------------------------===//
+
+Error BitcodeReader::materialize(GlobalValue *GV) {
+ Function *F = dyn_cast<Function>(GV);
+ // If it's not a function or is already material, ignore the request.
+ if (!F || !F->isMaterializable())
+ return Error::success();
+
+ DenseMap<Function*, uint64_t>::iterator DFII = DeferredFunctionInfo.find(F);
+ assert(DFII != DeferredFunctionInfo.end() && "Deferred function not found!");
+ // If its position is recorded as 0, its body is somewhere in the stream
+ // but we haven't seen it yet.
+ if (DFII->second == 0)
+ if (Error Err = findFunctionInStream(F, DFII))
+ return Err;
+
+ // Materialize metadata before parsing any function bodies.
+ if (Error Err = materializeMetadata())
+ return Err;
+
+ // Move the bit stream to the saved position of the deferred function body.
+ if (Error JumpFailed = Stream.JumpToBit(DFII->second))
+ return JumpFailed;
+ if (Error Err = parseFunctionBody(F))
+ return Err;
+ F->setIsMaterializable(false);
+
+ if (StripDebugInfo)
+ stripDebugInfo(*F);
+
+ // Upgrade any old intrinsic calls in the function.
+ for (auto &I : UpgradedIntrinsics) {
+ for (User *U : llvm::make_early_inc_range(I.first->materialized_users()))
+ if (CallInst *CI = dyn_cast<CallInst>(U))
+ UpgradeIntrinsicCall(CI, I.second);
+ }
+
+ // Update calls to the remangled intrinsics
+ for (auto &I : RemangledIntrinsics)
+ for (User *U : llvm::make_early_inc_range(I.first->materialized_users()))
+ // Don't expect any other users than call sites
+ cast<CallBase>(U)->setCalledFunction(I.second);
+
+ // Finish fn->subprogram upgrade for materialized functions.
+ if (DISubprogram *SP = MDLoader->lookupSubprogramForFunction(F))
+ F->setSubprogram(SP);
+
+ // Check if the TBAA Metadata are valid, otherwise we will need to strip them.
+ if (!MDLoader->isStrippingTBAA()) {
+ for (auto &I : instructions(F)) {
+ MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa);
+ if (!TBAA || TBAAVerifyHelper.visitTBAAMetadata(I, TBAA))
+ continue;
+ MDLoader->setStripTBAA(true);
+ stripTBAA(F->getParent());
+ }
+ }
+
+ for (auto &I : instructions(F)) {
+ // "Upgrade" older incorrect branch weights by dropping them.
+ if (auto *MD = I.getMetadata(LLVMContext::MD_prof)) {
+ if (MD->getOperand(0) != nullptr && isa<MDString>(MD->getOperand(0))) {
+ MDString *MDS = cast<MDString>(MD->getOperand(0));
+ StringRef ProfName = MDS->getString();
+ // Check consistency of !prof branch_weights metadata.
+ if (!ProfName.equals("branch_weights"))
+ continue;
+ unsigned ExpectedNumOperands = 0;
+ if (BranchInst *BI = dyn_cast<BranchInst>(&I))
+ ExpectedNumOperands = BI->getNumSuccessors();
+ else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
+ ExpectedNumOperands = SI->getNumSuccessors();
+ else if (isa<CallInst>(&I))
+ ExpectedNumOperands = 1;
+ else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
+ ExpectedNumOperands = IBI->getNumDestinations();
+ else if (isa<SelectInst>(&I))
+ ExpectedNumOperands = 2;
+ else
+ continue; // ignore and continue.
+
+ // If branch weight doesn't match, just strip branch weight.
+ if (MD->getNumOperands() != 1 + ExpectedNumOperands)
+ I.setMetadata(LLVMContext::MD_prof, nullptr);
+ }
+ }
+
+ // Remove incompatible attributes on function calls.
+ if (auto *CI = dyn_cast<CallBase>(&I)) {
+ CI->removeRetAttrs(AttributeFuncs::typeIncompatible(
+ CI->getFunctionType()->getReturnType()));
+
+ for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ++ArgNo)
+ CI->removeParamAttrs(ArgNo, AttributeFuncs::typeIncompatible(
+ CI->getArgOperand(ArgNo)->getType()));
+ }
+ }
+
+ // Look for functions that rely on old function attribute behavior.
+ UpgradeFunctionAttributes(*F);
+
+ // Bring in any functions that this function forward-referenced via
+ // blockaddresses.
+ return materializeForwardReferencedFunctions();
+}
+
+Error BitcodeReader::materializeModule() {
+ if (Error Err = materializeMetadata())
+ return Err;
+
+ // Promise to materialize all forward references.
+ WillMaterializeAllForwardRefs = true;
+
+ // Iterate over the module, deserializing any functions that are still on
+ // disk.
+ for (Function &F : *TheModule) {
+ if (Error Err = materialize(&F))
+ return Err;
+ }
+ // At this point, if there are any function bodies, parse the rest of
+ // the bits in the module past the last function block we have recorded
+ // through either lazy scanning or the VST.
+ if (LastFunctionBlockBit || NextUnreadBit)
+ if (Error Err = parseModule(LastFunctionBlockBit > NextUnreadBit
+ ? LastFunctionBlockBit
+ : NextUnreadBit))
+ return Err;
+
+ // Check that all block address forward references got resolved (as we
+ // promised above).
+ if (!BasicBlockFwdRefs.empty())
+ return error("Never resolved function from blockaddress");
+
+ // Upgrade any intrinsic calls that slipped through (should not happen!) and
+ // delete the old functions to clean up. We can't do this unless the entire
+ // module is materialized because there could always be another function body
+ // with calls to the old function.
+ for (auto &I : UpgradedIntrinsics) {
+ for (auto *U : I.first->users()) {
+ if (CallInst *CI = dyn_cast<CallInst>(U))
+ UpgradeIntrinsicCall(CI, I.second);
+ }
+ if (!I.first->use_empty())
+ I.first->replaceAllUsesWith(I.second);
+ I.first->eraseFromParent();
+ }
+ UpgradedIntrinsics.clear();
+ // Do the same for remangled intrinsics
+ for (auto &I : RemangledIntrinsics) {
+ I.first->replaceAllUsesWith(I.second);
+ I.first->eraseFromParent();
+ }
+ RemangledIntrinsics.clear();
+
+ UpgradeDebugInfo(*TheModule);
+
+ UpgradeModuleFlags(*TheModule);
+
+ UpgradeARCRuntime(*TheModule);
+
+ return Error::success();
+}
+
+std::vector<StructType *> BitcodeReader::getIdentifiedStructTypes() const {
+ return IdentifiedStructTypes;
+}
+
+ModuleSummaryIndexBitcodeReader::ModuleSummaryIndexBitcodeReader(
+ BitstreamCursor Cursor, StringRef Strtab, ModuleSummaryIndex &TheIndex,
+ StringRef ModulePath, unsigned ModuleId)
+ : BitcodeReaderBase(std::move(Cursor), Strtab), TheIndex(TheIndex),
+ ModulePath(ModulePath), ModuleId(ModuleId) {}
+
+void ModuleSummaryIndexBitcodeReader::addThisModule() {
+ TheIndex.addModule(ModulePath, ModuleId);
+}
+
+ModuleSummaryIndex::ModuleInfo *
+ModuleSummaryIndexBitcodeReader::getThisModule() {
+ return TheIndex.getModule(ModulePath);
+}
+
+std::pair<ValueInfo, GlobalValue::GUID>
+ModuleSummaryIndexBitcodeReader::getValueInfoFromValueId(unsigned ValueId) {
+ auto VGI = ValueIdToValueInfoMap[ValueId];
+ assert(VGI.first);
+ return VGI;
+}
+
+void ModuleSummaryIndexBitcodeReader::setValueGUID(
+ uint64_t ValueID, StringRef ValueName, GlobalValue::LinkageTypes Linkage,
+ StringRef SourceFileName) {
+ std::string GlobalId =
+ GlobalValue::getGlobalIdentifier(ValueName, Linkage, SourceFileName);
+ auto ValueGUID = GlobalValue::getGUID(GlobalId);
+ auto OriginalNameID = ValueGUID;
+ if (GlobalValue::isLocalLinkage(Linkage))
+ OriginalNameID = GlobalValue::getGUID(ValueName);
+ if (PrintSummaryGUIDs)
+ dbgs() << "GUID " << ValueGUID << "(" << OriginalNameID << ") is "
+ << ValueName << "\n";
+
+ // UseStrtab is false for legacy summary formats and value names are
+ // created on stack. In that case we save the name in a string saver in
+ // the index so that the value name can be recorded.
+ ValueIdToValueInfoMap[ValueID] = std::make_pair(
+ TheIndex.getOrInsertValueInfo(
+ ValueGUID,
+ UseStrtab ? ValueName : TheIndex.saveString(ValueName)),
+ OriginalNameID);
+}
+
+// Specialized value symbol table parser used when reading module index
+// blocks where we don't actually create global values. The parsed information
+// is saved in the bitcode reader for use when later parsing summaries.
+Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable(
+ uint64_t Offset,
+ DenseMap<unsigned, GlobalValue::LinkageTypes> &ValueIdToLinkageMap) {
+ // With a strtab the VST is not required to parse the summary.
+ if (UseStrtab)
+ return Error::success();
+
+ assert(Offset > 0 && "Expected non-zero VST offset");
+ Expected<uint64_t> MaybeCurrentBit = jumpToValueSymbolTable(Offset, Stream);
+ if (!MaybeCurrentBit)
+ return MaybeCurrentBit.takeError();
+ uint64_t CurrentBit = MaybeCurrentBit.get();
+
+ if (Error Err = Stream.EnterSubBlock(bitc::VALUE_SYMTAB_BLOCK_ID))
+ return Err;
+
+ SmallVector<uint64_t, 64> Record;
+
+ // Read all the records for this value table.
+ SmallString<128> ValueName;
+
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ // Done parsing VST, jump back to wherever we came from.
+ if (Error JumpFailed = Stream.JumpToBit(CurrentBit))
+ return JumpFailed;
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ switch (MaybeRecord.get()) {
+ default: // Default behavior: ignore (e.g. VST_CODE_BBENTRY records).
+ break;
+ case bitc::VST_CODE_ENTRY: { // VST_CODE_ENTRY: [valueid, namechar x N]
+ if (convertToString(Record, 1, ValueName))
+ return error("Invalid record");
+ unsigned ValueID = Record[0];
+ assert(!SourceFileName.empty());
+ auto VLI = ValueIdToLinkageMap.find(ValueID);
+ assert(VLI != ValueIdToLinkageMap.end() &&
+ "No linkage found for VST entry?");
+ auto Linkage = VLI->second;
+ setValueGUID(ValueID, ValueName, Linkage, SourceFileName);
+ ValueName.clear();
+ break;
+ }
+ case bitc::VST_CODE_FNENTRY: {
+ // VST_CODE_FNENTRY: [valueid, offset, namechar x N]
+ if (convertToString(Record, 2, ValueName))
+ return error("Invalid record");
+ unsigned ValueID = Record[0];
+ assert(!SourceFileName.empty());
+ auto VLI = ValueIdToLinkageMap.find(ValueID);
+ assert(VLI != ValueIdToLinkageMap.end() &&
+ "No linkage found for VST entry?");
+ auto Linkage = VLI->second;
+ setValueGUID(ValueID, ValueName, Linkage, SourceFileName);
+ ValueName.clear();
+ break;
+ }
+ case bitc::VST_CODE_COMBINED_ENTRY: {
+ // VST_CODE_COMBINED_ENTRY: [valueid, refguid]
+ unsigned ValueID = Record[0];
+ GlobalValue::GUID RefGUID = Record[1];
+ // The "original name", which is the second value of the pair will be
+ // overriden later by a FS_COMBINED_ORIGINAL_NAME in the combined index.
+ ValueIdToValueInfoMap[ValueID] =
+ std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID);
+ break;
+ }
+ }
+ }
+}
+
+// Parse just the blocks needed for building the index out of the module.
+// At the end of this routine the module Index is populated with a map
+// from global value id to GlobalValueSummary objects.
+Error ModuleSummaryIndexBitcodeReader::parseModule() {
+ if (Error Err = Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+ return Err;
+
+ SmallVector<uint64_t, 64> Record;
+ DenseMap<unsigned, GlobalValue::LinkageTypes> ValueIdToLinkageMap;
+ unsigned ValueId = 0;
+
+ // Read the index for this module.
+ while (true) {
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return Error::success();
+
+ case BitstreamEntry::SubBlock:
+ switch (Entry.ID) {
+ default: // Skip unknown content.
+ if (Error Err = Stream.SkipBlock())
+ return Err;
+ break;
+ case bitc::BLOCKINFO_BLOCK_ID:
+ // Need to parse these to get abbrev ids (e.g. for VST)
+ if (readBlockInfo())
+ return error("Malformed block");
+ break;
+ case bitc::VALUE_SYMTAB_BLOCK_ID:
+ // Should have been parsed earlier via VSTOffset, unless there
+ // is no summary section.
+ assert(((SeenValueSymbolTable && VSTOffset > 0) ||
+ !SeenGlobalValSummary) &&
+ "Expected early VST parse via VSTOffset record");
+ if (Error Err = Stream.SkipBlock())
+ return Err;
+ break;
+ case bitc::GLOBALVAL_SUMMARY_BLOCK_ID:
+ case bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID:
+ // Add the module if it is a per-module index (has a source file name).
+ if (!SourceFileName.empty())
+ addThisModule();
+ assert(!SeenValueSymbolTable &&
+ "Already read VST when parsing summary block?");
+ // We might not have a VST if there were no values in the
+ // summary. An empty summary block generated when we are
+ // performing ThinLTO compiles so we don't later invoke
+ // the regular LTO process on them.
+ if (VSTOffset > 0) {
+ if (Error Err = parseValueSymbolTable(VSTOffset, ValueIdToLinkageMap))
+ return Err;
+ SeenValueSymbolTable = true;
+ }
+ SeenGlobalValSummary = true;
+ if (Error Err = parseEntireSummary(Entry.ID))
+ return Err;
+ break;
+ case bitc::MODULE_STRTAB_BLOCK_ID:
+ if (Error Err = parseModuleStringTable())
+ return Err;
+ break;
+ }
+ continue;
+
+ case BitstreamEntry::Record: {
+ Record.clear();
+ Expected<unsigned> MaybeBitCode = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeBitCode)
+ return MaybeBitCode.takeError();
+ switch (MaybeBitCode.get()) {
+ default:
+ break; // Default behavior, ignore unknown content.
+ case bitc::MODULE_CODE_VERSION: {
+ if (Error Err = parseVersionRecord(Record).takeError())
+ return Err;
+ break;
+ }
+ /// MODULE_CODE_SOURCE_FILENAME: [namechar x N]
+ case bitc::MODULE_CODE_SOURCE_FILENAME: {
+ SmallString<128> ValueName;
+ if (convertToString(Record, 0, ValueName))
+ return error("Invalid record");
+ SourceFileName = ValueName.c_str();
+ break;
+ }
+ /// MODULE_CODE_HASH: [5*i32]
+ case bitc::MODULE_CODE_HASH: {
+ if (Record.size() != 5)
+ return error("Invalid hash length " + Twine(Record.size()).str());
+ auto &Hash = getThisModule()->second.second;
+ int Pos = 0;
+ for (auto &Val : Record) {
+ assert(!(Val >> 32) && "Unexpected high bits set");
+ Hash[Pos++] = Val;
+ }
+ break;
+ }
+ /// MODULE_CODE_VSTOFFSET: [offset]
+ case bitc::MODULE_CODE_VSTOFFSET:
+ if (Record.empty())
+ return error("Invalid record");
+ // Note that we subtract 1 here because the offset is relative to one
+ // word before the start of the identification or module block, which
+ // was historically always the start of the regular bitcode header.
+ VSTOffset = Record[0] - 1;
+ break;
+ // v1 GLOBALVAR: [pointer type, isconst, initid, linkage, ...]
+ // v1 FUNCTION: [type, callingconv, isproto, linkage, ...]
+ // v1 ALIAS: [alias type, addrspace, aliasee val#, linkage, ...]
+ // v2: [strtab offset, strtab size, v1]
+ case bitc::MODULE_CODE_GLOBALVAR:
+ case bitc::MODULE_CODE_FUNCTION:
+ case bitc::MODULE_CODE_ALIAS: {
+ StringRef Name;
+ ArrayRef<uint64_t> GVRecord;
+ std::tie(Name, GVRecord) = readNameFromStrtab(Record);
+ if (GVRecord.size() <= 3)
+ return error("Invalid record");
+ uint64_t RawLinkage = GVRecord[3];
+ GlobalValue::LinkageTypes Linkage = getDecodedLinkage(RawLinkage);
+ if (!UseStrtab) {
+ ValueIdToLinkageMap[ValueId++] = Linkage;
+ break;
+ }
+
+ setValueGUID(ValueId++, Name, Linkage, SourceFileName);
+ break;
+ }
+ }
+ }
+ continue;
+ }
+ }
+}
+
+std::vector<ValueInfo>
+ModuleSummaryIndexBitcodeReader::makeRefList(ArrayRef<uint64_t> Record) {
+ std::vector<ValueInfo> Ret;
+ Ret.reserve(Record.size());
+ for (uint64_t RefValueId : Record)
+ Ret.push_back(getValueInfoFromValueId(RefValueId).first);
+ return Ret;
+}
+
+std::vector<FunctionSummary::EdgeTy>
+ModuleSummaryIndexBitcodeReader::makeCallList(ArrayRef<uint64_t> Record,
+ bool IsOldProfileFormat,
+ bool HasProfile, bool HasRelBF) {
+ std::vector<FunctionSummary::EdgeTy> Ret;
+ Ret.reserve(Record.size());
+ for (unsigned I = 0, E = Record.size(); I != E; ++I) {
+ CalleeInfo::HotnessType Hotness = CalleeInfo::HotnessType::Unknown;
+ uint64_t RelBF = 0;
+ ValueInfo Callee = getValueInfoFromValueId(Record[I]).first;
+ if (IsOldProfileFormat) {
+ I += 1; // Skip old callsitecount field
+ if (HasProfile)
+ I += 1; // Skip old profilecount field
+ } else if (HasProfile)
+ Hotness = static_cast<CalleeInfo::HotnessType>(Record[++I]);
+ else if (HasRelBF)
+ RelBF = Record[++I];
+ Ret.push_back(FunctionSummary::EdgeTy{Callee, CalleeInfo(Hotness, RelBF)});
+ }
+ return Ret;
+}
+
+static void
+parseWholeProgramDevirtResolutionByArg(ArrayRef<uint64_t> Record, size_t &Slot,
+ WholeProgramDevirtResolution &Wpd) {
+ uint64_t ArgNum = Record[Slot++];
+ WholeProgramDevirtResolution::ByArg &B =
+ Wpd.ResByArg[{Record.begin() + Slot, Record.begin() + Slot + ArgNum}];
+ Slot += ArgNum;
+
+ B.TheKind =
+ static_cast<WholeProgramDevirtResolution::ByArg::Kind>(Record[Slot++]);
+ B.Info = Record[Slot++];
+ B.Byte = Record[Slot++];
+ B.Bit = Record[Slot++];
+}
+
+static void parseWholeProgramDevirtResolution(ArrayRef<uint64_t> Record,
+ StringRef Strtab, size_t &Slot,
+ TypeIdSummary &TypeId) {
+ uint64_t Id = Record[Slot++];
+ WholeProgramDevirtResolution &Wpd = TypeId.WPDRes[Id];
+
+ Wpd.TheKind = static_cast<WholeProgramDevirtResolution::Kind>(Record[Slot++]);
+ Wpd.SingleImplName = {Strtab.data() + Record[Slot],
+ static_cast<size_t>(Record[Slot + 1])};
+ Slot += 2;
+
+ uint64_t ResByArgNum = Record[Slot++];
+ for (uint64_t I = 0; I != ResByArgNum; ++I)
+ parseWholeProgramDevirtResolutionByArg(Record, Slot, Wpd);
+}
+
+static void parseTypeIdSummaryRecord(ArrayRef<uint64_t> Record,
+ StringRef Strtab,
+ ModuleSummaryIndex &TheIndex) {
+ size_t Slot = 0;
+ TypeIdSummary &TypeId = TheIndex.getOrInsertTypeIdSummary(
+ {Strtab.data() + Record[Slot], static_cast<size_t>(Record[Slot + 1])});
+ Slot += 2;
+
+ TypeId.TTRes.TheKind = static_cast<TypeTestResolution::Kind>(Record[Slot++]);
+ TypeId.TTRes.SizeM1BitWidth = Record[Slot++];
+ TypeId.TTRes.AlignLog2 = Record[Slot++];
+ TypeId.TTRes.SizeM1 = Record[Slot++];
+ TypeId.TTRes.BitMask = Record[Slot++];
+ TypeId.TTRes.InlineBits = Record[Slot++];
+
+ while (Slot < Record.size())
+ parseWholeProgramDevirtResolution(Record, Strtab, Slot, TypeId);
+}
+
+std::vector<FunctionSummary::ParamAccess>
+ModuleSummaryIndexBitcodeReader::parseParamAccesses(ArrayRef<uint64_t> Record) {
+ auto ReadRange = [&]() {
+ APInt Lower(FunctionSummary::ParamAccess::RangeWidth,
+ BitcodeReader::decodeSignRotatedValue(Record.front()));
+ Record = Record.drop_front();
+ APInt Upper(FunctionSummary::ParamAccess::RangeWidth,
+ BitcodeReader::decodeSignRotatedValue(Record.front()));
+ Record = Record.drop_front();
+ ConstantRange Range{Lower, Upper};
+ assert(!Range.isFullSet());
+ assert(!Range.isUpperSignWrapped());
+ return Range;
+ };
+
+ std::vector<FunctionSummary::ParamAccess> PendingParamAccesses;
+ while (!Record.empty()) {
+ PendingParamAccesses.emplace_back();
+ FunctionSummary::ParamAccess &ParamAccess = PendingParamAccesses.back();
+ ParamAccess.ParamNo = Record.front();
+ Record = Record.drop_front();
+ ParamAccess.Use = ReadRange();
+ ParamAccess.Calls.resize(Record.front());
+ Record = Record.drop_front();
+ for (auto &Call : ParamAccess.Calls) {
+ Call.ParamNo = Record.front();
+ Record = Record.drop_front();
+ Call.Callee = getValueInfoFromValueId(Record.front()).first;
+ Record = Record.drop_front();
+ Call.Offsets = ReadRange();
+ }
+ }
+ return PendingParamAccesses;
+}
+
+void ModuleSummaryIndexBitcodeReader::parseTypeIdCompatibleVtableInfo(
+ ArrayRef<uint64_t> Record, size_t &Slot,
+ TypeIdCompatibleVtableInfo &TypeId) {
+ uint64_t Offset = Record[Slot++];
+ ValueInfo Callee = getValueInfoFromValueId(Record[Slot++]).first;
+ TypeId.push_back({Offset, Callee});
+}
+
+void ModuleSummaryIndexBitcodeReader::parseTypeIdCompatibleVtableSummaryRecord(
+ ArrayRef<uint64_t> Record) {
+ size_t Slot = 0;
+ TypeIdCompatibleVtableInfo &TypeId =
+ TheIndex.getOrInsertTypeIdCompatibleVtableSummary(
+ {Strtab.data() + Record[Slot],
+ static_cast<size_t>(Record[Slot + 1])});
+ Slot += 2;
+
+ while (Slot < Record.size())
+ parseTypeIdCompatibleVtableInfo(Record, Slot, TypeId);
+}
+
+static void setSpecialRefs(std::vector<ValueInfo> &Refs, unsigned ROCnt,
+ unsigned WOCnt) {
+ // Readonly and writeonly refs are in the end of the refs list.
+ assert(ROCnt + WOCnt <= Refs.size());
+ unsigned FirstWORef = Refs.size() - WOCnt;
+ unsigned RefNo = FirstWORef - ROCnt;
+ for (; RefNo < FirstWORef; ++RefNo)
+ Refs[RefNo].setReadOnly();
+ for (; RefNo < Refs.size(); ++RefNo)
+ Refs[RefNo].setWriteOnly();
+}
+
+// Eagerly parse the entire summary block. This populates the GlobalValueSummary
+// objects in the index.
+Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
+ if (Error Err = Stream.EnterSubBlock(ID))
+ return Err;
+ SmallVector<uint64_t, 64> Record;
+
+ // Parse version
+ {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ if (Entry.Kind != BitstreamEntry::Record)
+ return error("Invalid Summary Block: record for version expected");
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ if (MaybeRecord.get() != bitc::FS_VERSION)
+ return error("Invalid Summary Block: version expected");
+ }
+ const uint64_t Version = Record[0];
+ const bool IsOldProfileFormat = Version == 1;
+ if (Version < 1 || Version > ModuleSummaryIndex::BitcodeSummaryVersion)
+ return error("Invalid summary version " + Twine(Version) +
+ ". Version should be in the range [1-" +
+ Twine(ModuleSummaryIndex::BitcodeSummaryVersion) +
+ "].");
+ Record.clear();
+
+ // Keep around the last seen summary to be used when we see an optional
+ // "OriginalName" attachement.
+ GlobalValueSummary *LastSeenSummary = nullptr;
+ GlobalValue::GUID LastSeenGUID = 0;
+
+ // We can expect to see any number of type ID information records before
+ // each function summary records; these variables store the information
+ // collected so far so that it can be used to create the summary object.
+ std::vector<GlobalValue::GUID> PendingTypeTests;
+ std::vector<FunctionSummary::VFuncId> PendingTypeTestAssumeVCalls,
+ PendingTypeCheckedLoadVCalls;
+ std::vector<FunctionSummary::ConstVCall> PendingTypeTestAssumeConstVCalls,
+ PendingTypeCheckedLoadConstVCalls;
+ std::vector<FunctionSummary::ParamAccess> PendingParamAccesses;
+
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record. The record format depends on whether this
+ // is a per-module index or a combined index file. In the per-module
+ // case the records contain the associated value's ID for correlation
+ // with VST entries. In the combined index the correlation is done
+ // via the bitcode offset of the summary records (which were saved
+ // in the combined index VST entries). The records also contain
+ // information used for ThinLTO renaming and importing.
+ Record.clear();
+ Expected<unsigned> MaybeBitCode = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeBitCode)
+ return MaybeBitCode.takeError();
+ switch (unsigned BitCode = MaybeBitCode.get()) {
+ default: // Default behavior: ignore.
+ break;
+ case bitc::FS_FLAGS: { // [flags]
+ TheIndex.setFlags(Record[0]);
+ break;
+ }
+ case bitc::FS_VALUE_GUID: { // [valueid, refguid]
+ uint64_t ValueID = Record[0];
+ GlobalValue::GUID RefGUID = Record[1];
+ ValueIdToValueInfoMap[ValueID] =
+ std::make_pair(TheIndex.getOrInsertValueInfo(RefGUID), RefGUID);
+ break;
+ }
+ // FS_PERMODULE: [valueid, flags, instcount, fflags, numrefs,
+ // numrefs x valueid, n x (valueid)]
+ // FS_PERMODULE_PROFILE: [valueid, flags, instcount, fflags, numrefs,
+ // numrefs x valueid,
+ // n x (valueid, hotness)]
+ // FS_PERMODULE_RELBF: [valueid, flags, instcount, fflags, numrefs,
+ // numrefs x valueid,
+ // n x (valueid, relblockfreq)]
+ case bitc::FS_PERMODULE:
+ case bitc::FS_PERMODULE_RELBF:
+ case bitc::FS_PERMODULE_PROFILE: {
+ unsigned ValueID = Record[0];
+ uint64_t RawFlags = Record[1];
+ unsigned InstCount = Record[2];
+ uint64_t RawFunFlags = 0;
+ unsigned NumRefs = Record[3];
+ unsigned NumRORefs = 0, NumWORefs = 0;
+ int RefListStartIndex = 4;
+ if (Version >= 4) {
+ RawFunFlags = Record[3];
+ NumRefs = Record[4];
+ RefListStartIndex = 5;
+ if (Version >= 5) {
+ NumRORefs = Record[5];
+ RefListStartIndex = 6;
+ if (Version >= 7) {
+ NumWORefs = Record[6];
+ RefListStartIndex = 7;
+ }
+ }
+ }
+
+ auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+ // The module path string ref set in the summary must be owned by the
+ // index's module string table. Since we don't have a module path
+ // string table section in the per-module index, we create a single
+ // module path string table entry with an empty (0) ID to take
+ // ownership.
+ int CallGraphEdgeStartIndex = RefListStartIndex + NumRefs;
+ assert(Record.size() >= RefListStartIndex + NumRefs &&
+ "Record size inconsistent with number of references");
+ std::vector<ValueInfo> Refs = makeRefList(
+ ArrayRef<uint64_t>(Record).slice(RefListStartIndex, NumRefs));
+ bool HasProfile = (BitCode == bitc::FS_PERMODULE_PROFILE);
+ bool HasRelBF = (BitCode == bitc::FS_PERMODULE_RELBF);
+ std::vector<FunctionSummary::EdgeTy> Calls = makeCallList(
+ ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex),
+ IsOldProfileFormat, HasProfile, HasRelBF);
+ setSpecialRefs(Refs, NumRORefs, NumWORefs);
+ auto FS = std::make_unique<FunctionSummary>(
+ Flags, InstCount, getDecodedFFlags(RawFunFlags), /*EntryCount=*/0,
+ std::move(Refs), std::move(Calls), std::move(PendingTypeTests),
+ std::move(PendingTypeTestAssumeVCalls),
+ std::move(PendingTypeCheckedLoadVCalls),
+ std::move(PendingTypeTestAssumeConstVCalls),
+ std::move(PendingTypeCheckedLoadConstVCalls),
+ std::move(PendingParamAccesses));
+ auto VIAndOriginalGUID = getValueInfoFromValueId(ValueID);
+ FS->setModulePath(getThisModule()->first());
+ FS->setOriginalName(VIAndOriginalGUID.second);
+ TheIndex.addGlobalValueSummary(VIAndOriginalGUID.first, std::move(FS));
+ break;
+ }
+ // FS_ALIAS: [valueid, flags, valueid]
+ // Aliases must be emitted (and parsed) after all FS_PERMODULE entries, as
+ // they expect all aliasee summaries to be available.
+ case bitc::FS_ALIAS: {
+ unsigned ValueID = Record[0];
+ uint64_t RawFlags = Record[1];
+ unsigned AliaseeID = Record[2];
+ auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+ auto AS = std::make_unique<AliasSummary>(Flags);
+ // The module path string ref set in the summary must be owned by the
+ // index's module string table. Since we don't have a module path
+ // string table section in the per-module index, we create a single
+ // module path string table entry with an empty (0) ID to take
+ // ownership.
+ AS->setModulePath(getThisModule()->first());
+
+ auto AliaseeVI = getValueInfoFromValueId(AliaseeID).first;
+ auto AliaseeInModule = TheIndex.findSummaryInModule(AliaseeVI, ModulePath);
+ if (!AliaseeInModule)
+ return error("Alias expects aliasee summary to be parsed");
+ AS->setAliasee(AliaseeVI, AliaseeInModule);
+
+ auto GUID = getValueInfoFromValueId(ValueID);
+ AS->setOriginalName(GUID.second);
+ TheIndex.addGlobalValueSummary(GUID.first, std::move(AS));
+ break;
+ }
+ // FS_PERMODULE_GLOBALVAR_INIT_REFS: [valueid, flags, varflags, n x valueid]
+ case bitc::FS_PERMODULE_GLOBALVAR_INIT_REFS: {
+ unsigned ValueID = Record[0];
+ uint64_t RawFlags = Record[1];
+ unsigned RefArrayStart = 2;
+ GlobalVarSummary::GVarFlags GVF(/* ReadOnly */ false,
+ /* WriteOnly */ false,
+ /* Constant */ false,
+ GlobalObject::VCallVisibilityPublic);
+ auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+ if (Version >= 5) {
+ GVF = getDecodedGVarFlags(Record[2]);
+ RefArrayStart = 3;
+ }
+ std::vector<ValueInfo> Refs =
+ makeRefList(ArrayRef<uint64_t>(Record).slice(RefArrayStart));
+ auto FS =
+ std::make_unique<GlobalVarSummary>(Flags, GVF, std::move(Refs));
+ FS->setModulePath(getThisModule()->first());
+ auto GUID = getValueInfoFromValueId(ValueID);
+ FS->setOriginalName(GUID.second);
+ TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
+ break;
+ }
+ // FS_PERMODULE_VTABLE_GLOBALVAR_INIT_REFS: [valueid, flags, varflags,
+ // numrefs, numrefs x valueid,
+ // n x (valueid, offset)]
+ case bitc::FS_PERMODULE_VTABLE_GLOBALVAR_INIT_REFS: {
+ unsigned ValueID = Record[0];
+ uint64_t RawFlags = Record[1];
+ GlobalVarSummary::GVarFlags GVF = getDecodedGVarFlags(Record[2]);
+ unsigned NumRefs = Record[3];
+ unsigned RefListStartIndex = 4;
+ unsigned VTableListStartIndex = RefListStartIndex + NumRefs;
+ auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+ std::vector<ValueInfo> Refs = makeRefList(
+ ArrayRef<uint64_t>(Record).slice(RefListStartIndex, NumRefs));
+ VTableFuncList VTableFuncs;
+ for (unsigned I = VTableListStartIndex, E = Record.size(); I != E; ++I) {
+ ValueInfo Callee = getValueInfoFromValueId(Record[I]).first;
+ uint64_t Offset = Record[++I];
+ VTableFuncs.push_back({Callee, Offset});
+ }
+ auto VS =
+ std::make_unique<GlobalVarSummary>(Flags, GVF, std::move(Refs));
+ VS->setModulePath(getThisModule()->first());
+ VS->setVTableFuncs(VTableFuncs);
+ auto GUID = getValueInfoFromValueId(ValueID);
+ VS->setOriginalName(GUID.second);
+ TheIndex.addGlobalValueSummary(GUID.first, std::move(VS));
+ break;
+ }
+ // FS_COMBINED: [valueid, modid, flags, instcount, fflags, numrefs,
+ // numrefs x valueid, n x (valueid)]
+ // FS_COMBINED_PROFILE: [valueid, modid, flags, instcount, fflags, numrefs,
+ // numrefs x valueid, n x (valueid, hotness)]
+ case bitc::FS_COMBINED:
+ case bitc::FS_COMBINED_PROFILE: {
+ unsigned ValueID = Record[0];
+ uint64_t ModuleId = Record[1];
+ uint64_t RawFlags = Record[2];
+ unsigned InstCount = Record[3];
+ uint64_t RawFunFlags = 0;
+ uint64_t EntryCount = 0;
+ unsigned NumRefs = Record[4];
+ unsigned NumRORefs = 0, NumWORefs = 0;
+ int RefListStartIndex = 5;
+
+ if (Version >= 4) {
+ RawFunFlags = Record[4];
+ RefListStartIndex = 6;
+ size_t NumRefsIndex = 5;
+ if (Version >= 5) {
+ unsigned NumRORefsOffset = 1;
+ RefListStartIndex = 7;
+ if (Version >= 6) {
+ NumRefsIndex = 6;
+ EntryCount = Record[5];
+ RefListStartIndex = 8;
+ if (Version >= 7) {
+ RefListStartIndex = 9;
+ NumWORefs = Record[8];
+ NumRORefsOffset = 2;
+ }
+ }
+ NumRORefs = Record[RefListStartIndex - NumRORefsOffset];
+ }
+ NumRefs = Record[NumRefsIndex];
+ }
+
+ auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+ int CallGraphEdgeStartIndex = RefListStartIndex + NumRefs;
+ assert(Record.size() >= RefListStartIndex + NumRefs &&
+ "Record size inconsistent with number of references");
+ std::vector<ValueInfo> Refs = makeRefList(
+ ArrayRef<uint64_t>(Record).slice(RefListStartIndex, NumRefs));
+ bool HasProfile = (BitCode == bitc::FS_COMBINED_PROFILE);
+ std::vector<FunctionSummary::EdgeTy> Edges = makeCallList(
+ ArrayRef<uint64_t>(Record).slice(CallGraphEdgeStartIndex),
+ IsOldProfileFormat, HasProfile, false);
+ ValueInfo VI = getValueInfoFromValueId(ValueID).first;
+ setSpecialRefs(Refs, NumRORefs, NumWORefs);
+ auto FS = std::make_unique<FunctionSummary>(
+ Flags, InstCount, getDecodedFFlags(RawFunFlags), EntryCount,
+ std::move(Refs), std::move(Edges), std::move(PendingTypeTests),
+ std::move(PendingTypeTestAssumeVCalls),
+ std::move(PendingTypeCheckedLoadVCalls),
+ std::move(PendingTypeTestAssumeConstVCalls),
+ std::move(PendingTypeCheckedLoadConstVCalls),
+ std::move(PendingParamAccesses));
+ LastSeenSummary = FS.get();
+ LastSeenGUID = VI.getGUID();
+ FS->setModulePath(ModuleIdMap[ModuleId]);
+ TheIndex.addGlobalValueSummary(VI, std::move(FS));
+ break;
+ }
+ // FS_COMBINED_ALIAS: [valueid, modid, flags, valueid]
+ // Aliases must be emitted (and parsed) after all FS_COMBINED entries, as
+ // they expect all aliasee summaries to be available.
+ case bitc::FS_COMBINED_ALIAS: {
+ unsigned ValueID = Record[0];
+ uint64_t ModuleId = Record[1];
+ uint64_t RawFlags = Record[2];
+ unsigned AliaseeValueId = Record[3];
+ auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+ auto AS = std::make_unique<AliasSummary>(Flags);
+ LastSeenSummary = AS.get();
+ AS->setModulePath(ModuleIdMap[ModuleId]);
+
+ auto AliaseeVI = getValueInfoFromValueId(AliaseeValueId).first;
+ auto AliaseeInModule = TheIndex.findSummaryInModule(AliaseeVI, AS->modulePath());
+ AS->setAliasee(AliaseeVI, AliaseeInModule);
+
+ ValueInfo VI = getValueInfoFromValueId(ValueID).first;
+ LastSeenGUID = VI.getGUID();
+ TheIndex.addGlobalValueSummary(VI, std::move(AS));
+ break;
+ }
+ // FS_COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid]
+ case bitc::FS_COMBINED_GLOBALVAR_INIT_REFS: {
+ unsigned ValueID = Record[0];
+ uint64_t ModuleId = Record[1];
+ uint64_t RawFlags = Record[2];
+ unsigned RefArrayStart = 3;
+ GlobalVarSummary::GVarFlags GVF(/* ReadOnly */ false,
+ /* WriteOnly */ false,
+ /* Constant */ false,
+ GlobalObject::VCallVisibilityPublic);
+ auto Flags = getDecodedGVSummaryFlags(RawFlags, Version);
+ if (Version >= 5) {
+ GVF = getDecodedGVarFlags(Record[3]);
+ RefArrayStart = 4;
+ }
+ std::vector<ValueInfo> Refs =
+ makeRefList(ArrayRef<uint64_t>(Record).slice(RefArrayStart));
+ auto FS =
+ std::make_unique<GlobalVarSummary>(Flags, GVF, std::move(Refs));
+ LastSeenSummary = FS.get();
+ FS->setModulePath(ModuleIdMap[ModuleId]);
+ ValueInfo VI = getValueInfoFromValueId(ValueID).first;
+ LastSeenGUID = VI.getGUID();
+ TheIndex.addGlobalValueSummary(VI, std::move(FS));
+ break;
+ }
+ // FS_COMBINED_ORIGINAL_NAME: [original_name]
+ case bitc::FS_COMBINED_ORIGINAL_NAME: {
+ uint64_t OriginalName = Record[0];
+ if (!LastSeenSummary)
+ return error("Name attachment that does not follow a combined record");
+ LastSeenSummary->setOriginalName(OriginalName);
+ TheIndex.addOriginalName(LastSeenGUID, OriginalName);
+ // Reset the LastSeenSummary
+ LastSeenSummary = nullptr;
+ LastSeenGUID = 0;
+ break;
+ }
+ case bitc::FS_TYPE_TESTS:
+ assert(PendingTypeTests.empty());
+ llvm::append_range(PendingTypeTests, Record);
+ break;
+
+ case bitc::FS_TYPE_TEST_ASSUME_VCALLS:
+ assert(PendingTypeTestAssumeVCalls.empty());
+ for (unsigned I = 0; I != Record.size(); I += 2)
+ PendingTypeTestAssumeVCalls.push_back({Record[I], Record[I+1]});
+ break;
+
+ case bitc::FS_TYPE_CHECKED_LOAD_VCALLS:
+ assert(PendingTypeCheckedLoadVCalls.empty());
+ for (unsigned I = 0; I != Record.size(); I += 2)
+ PendingTypeCheckedLoadVCalls.push_back({Record[I], Record[I+1]});
+ break;
+
+ case bitc::FS_TYPE_TEST_ASSUME_CONST_VCALL:
+ PendingTypeTestAssumeConstVCalls.push_back(
+ {{Record[0], Record[1]}, {Record.begin() + 2, Record.end()}});
+ break;
+
+ case bitc::FS_TYPE_CHECKED_LOAD_CONST_VCALL:
+ PendingTypeCheckedLoadConstVCalls.push_back(
+ {{Record[0], Record[1]}, {Record.begin() + 2, Record.end()}});
+ break;
+
+ case bitc::FS_CFI_FUNCTION_DEFS: {
+ std::set<std::string> &CfiFunctionDefs = TheIndex.cfiFunctionDefs();
+ for (unsigned I = 0; I != Record.size(); I += 2)
+ CfiFunctionDefs.insert(
+ {Strtab.data() + Record[I], static_cast<size_t>(Record[I + 1])});
+ break;
+ }
+
+ case bitc::FS_CFI_FUNCTION_DECLS: {
+ std::set<std::string> &CfiFunctionDecls = TheIndex.cfiFunctionDecls();
+ for (unsigned I = 0; I != Record.size(); I += 2)
+ CfiFunctionDecls.insert(
+ {Strtab.data() + Record[I], static_cast<size_t>(Record[I + 1])});
+ break;
+ }
+
+ case bitc::FS_TYPE_ID:
+ parseTypeIdSummaryRecord(Record, Strtab, TheIndex);
+ break;
+
+ case bitc::FS_TYPE_ID_METADATA:
+ parseTypeIdCompatibleVtableSummaryRecord(Record);
+ break;
+
+ case bitc::FS_BLOCK_COUNT:
+ TheIndex.addBlockCount(Record[0]);
+ break;
+
+ case bitc::FS_PARAM_ACCESS: {
+ PendingParamAccesses = parseParamAccesses(Record);
+ break;
+ }
+ }
+ }
+ llvm_unreachable("Exit infinite loop");
+}
+
+// Parse the module string table block into the Index.
+// This populates the ModulePathStringTable map in the index.
+Error ModuleSummaryIndexBitcodeReader::parseModuleStringTable() {
+ if (Error Err = Stream.EnterSubBlock(bitc::MODULE_STRTAB_BLOCK_ID))
+ return Err;
+
+ SmallVector<uint64_t, 64> Record;
+
+ SmallString<128> ModulePath;
+ ModuleSummaryIndex::ModuleInfo *LastSeenModule = nullptr;
+
+ while (true) {
+ Expected<BitstreamEntry> MaybeEntry = Stream.advanceSkippingSubblocks();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ Record.clear();
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ switch (MaybeRecord.get()) {
+ default: // Default behavior: ignore.
+ break;
+ case bitc::MST_CODE_ENTRY: {
+ // MST_ENTRY: [modid, namechar x N]
+ uint64_t ModuleId = Record[0];
+
+ if (convertToString(Record, 1, ModulePath))
+ return error("Invalid record");
+
+ LastSeenModule = TheIndex.addModule(ModulePath, ModuleId);
+ ModuleIdMap[ModuleId] = LastSeenModule->first();
+
+ ModulePath.clear();
+ break;
+ }
+ /// MST_CODE_HASH: [5*i32]
+ case bitc::MST_CODE_HASH: {
+ if (Record.size() != 5)
+ return error("Invalid hash length " + Twine(Record.size()).str());
+ if (!LastSeenModule)
+ return error("Invalid hash that does not follow a module path");
+ int Pos = 0;
+ for (auto &Val : Record) {
+ assert(!(Val >> 32) && "Unexpected high bits set");
+ LastSeenModule->second.second[Pos++] = Val;
+ }
+ // Reset LastSeenModule to avoid overriding the hash unexpectedly.
+ LastSeenModule = nullptr;
+ break;
+ }
+ }
+ }
+ llvm_unreachable("Exit infinite loop");
+}
+
+namespace {
+
+// FIXME: This class is only here to support the transition to llvm::Error. It
+// will be removed once this transition is complete. Clients should prefer to
+// deal with the Error value directly, rather than converting to error_code.
+class BitcodeErrorCategoryType : public std::error_category {
+ const char *name() const noexcept override {
+ return "llvm.bitcode";
+ }
+
+ std::string message(int IE) const override {
+ BitcodeError E = static_cast<BitcodeError>(IE);
+ switch (E) {
+ case BitcodeError::CorruptedBitcode:
+ return "Corrupted bitcode";
+ }
+ llvm_unreachable("Unknown error type!");
+ }
+};
+
+} // end anonymous namespace
+
+static ManagedStatic<BitcodeErrorCategoryType> ErrorCategory;
+
+const std::error_category &llvm::BitcodeErrorCategory() {
+ return *ErrorCategory;
+}
+
+static Expected<StringRef> readBlobInRecord(BitstreamCursor &Stream,
+ unsigned Block, unsigned RecordID) {
+ if (Error Err = Stream.EnterSubBlock(Block))
+ return std::move(Err);
+
+ StringRef Strtab;
+ while (true) {
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::EndBlock:
+ return Strtab;
+
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+
+ case BitstreamEntry::SubBlock:
+ if (Error Err = Stream.SkipBlock())
+ return std::move(Err);
+ break;
+
+ case BitstreamEntry::Record:
+ StringRef Blob;
+ SmallVector<uint64_t, 1> Record;
+ Expected<unsigned> MaybeRecord =
+ Stream.readRecord(Entry.ID, Record, &Blob);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ if (MaybeRecord.get() == RecordID)
+ Strtab = Blob;
+ break;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// External interface
+//===----------------------------------------------------------------------===//
+
+Expected<std::vector<BitcodeModule>>
+llvm::getBitcodeModuleList(MemoryBufferRef Buffer) {
+ auto FOrErr = getBitcodeFileContents(Buffer);
+ if (!FOrErr)
+ return FOrErr.takeError();
+ return std::move(FOrErr->Mods);
+}
+
+Expected<BitcodeFileContents>
+llvm::getBitcodeFileContents(MemoryBufferRef Buffer) {
+ Expected<BitstreamCursor> StreamOrErr = initStream(Buffer);
+ if (!StreamOrErr)
+ return StreamOrErr.takeError();
+ BitstreamCursor &Stream = *StreamOrErr;
+
+ BitcodeFileContents F;
+ while (true) {
+ uint64_t BCBegin = Stream.getCurrentByteNo();
+
+ // We may be consuming bitcode from a client that leaves garbage at the end
+ // of the bitcode stream (e.g. Apple's ar tool). If we are close enough to
+ // the end that there cannot possibly be another module, stop looking.
+ if (BCBegin + 8 >= Stream.getBitcodeBytes().size())
+ return F;
+
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ llvm::BitstreamEntry Entry = MaybeEntry.get();
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::EndBlock:
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+
+ case BitstreamEntry::SubBlock: {
+ uint64_t IdentificationBit = -1ull;
+ if (Entry.ID == bitc::IDENTIFICATION_BLOCK_ID) {
+ IdentificationBit = Stream.GetCurrentBitNo() - BCBegin * 8;
+ if (Error Err = Stream.SkipBlock())
+ return std::move(Err);
+
+ {
+ Expected<llvm::BitstreamEntry> MaybeEntry = Stream.advance();
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ Entry = MaybeEntry.get();
+ }
+
+ if (Entry.Kind != BitstreamEntry::SubBlock ||
+ Entry.ID != bitc::MODULE_BLOCK_ID)
+ return error("Malformed block");
+ }
+
+ if (Entry.ID == bitc::MODULE_BLOCK_ID) {
+ uint64_t ModuleBit = Stream.GetCurrentBitNo() - BCBegin * 8;
+ if (Error Err = Stream.SkipBlock())
+ return std::move(Err);
+
+ F.Mods.push_back({Stream.getBitcodeBytes().slice(
+ BCBegin, Stream.getCurrentByteNo() - BCBegin),
+ Buffer.getBufferIdentifier(), IdentificationBit,
+ ModuleBit});
+ continue;
+ }
+
+ if (Entry.ID == bitc::STRTAB_BLOCK_ID) {
+ Expected<StringRef> Strtab =
+ readBlobInRecord(Stream, bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB);
+ if (!Strtab)
+ return Strtab.takeError();
+ // This string table is used by every preceding bitcode module that does
+ // not have its own string table. A bitcode file may have multiple
+ // string tables if it was created by binary concatenation, for example
+ // with "llvm-cat -b".
+ for (BitcodeModule &I : llvm::reverse(F.Mods)) {
+ if (!I.Strtab.empty())
+ break;
+ I.Strtab = *Strtab;
+ }
+ // Similarly, the string table is used by every preceding symbol table;
+ // normally there will be just one unless the bitcode file was created
+ // by binary concatenation.
+ if (!F.Symtab.empty() && F.StrtabForSymtab.empty())
+ F.StrtabForSymtab = *Strtab;
+ continue;
+ }
+
+ if (Entry.ID == bitc::SYMTAB_BLOCK_ID) {
+ Expected<StringRef> SymtabOrErr =
+ readBlobInRecord(Stream, bitc::SYMTAB_BLOCK_ID, bitc::SYMTAB_BLOB);
+ if (!SymtabOrErr)
+ return SymtabOrErr.takeError();
+
+ // We can expect the bitcode file to have multiple symbol tables if it
+ // was created by binary concatenation. In that case we silently
+ // ignore any subsequent symbol tables, which is fine because this is a
+ // low level function. The client is expected to notice that the number
+ // of modules in the symbol table does not match the number of modules
+ // in the input file and regenerate the symbol table.
+ if (F.Symtab.empty())
+ F.Symtab = *SymtabOrErr;
+ continue;
+ }
+
+ if (Error Err = Stream.SkipBlock())
+ return std::move(Err);
+ continue;
+ }
+ case BitstreamEntry::Record:
+ if (Error E = Stream.skipRecord(Entry.ID).takeError())
+ return std::move(E);
+ continue;
+ }
+ }
+}
+
+/// Get a lazy one-at-time loading module from bitcode.
+///
+/// This isn't always used in a lazy context. In particular, it's also used by
+/// \a parseModule(). If this is truly lazy, then we need to eagerly pull
+/// in forward-referenced functions from block address references.
+///
+/// \param[in] MaterializeAll Set to \c true if we should materialize
+/// everything.
+Expected<std::unique_ptr<Module>>
+BitcodeModule::getModuleImpl(LLVMContext &Context, bool MaterializeAll,
+ bool ShouldLazyLoadMetadata, bool IsImporting,
+ DataLayoutCallbackTy DataLayoutCallback) {
+ BitstreamCursor Stream(Buffer);
+
+ std::string ProducerIdentification;
+ if (IdentificationBit != -1ull) {
+ if (Error JumpFailed = Stream.JumpToBit(IdentificationBit))
+ return std::move(JumpFailed);
+ if (Error E =
+ readIdentificationBlock(Stream).moveInto(ProducerIdentification))
+ return std::move(E);
+ }
+
+ if (Error JumpFailed = Stream.JumpToBit(ModuleBit))
+ return std::move(JumpFailed);
+ auto *R = new BitcodeReader(std::move(Stream), Strtab, ProducerIdentification,
+ Context);
+
+ std::unique_ptr<Module> M =
+ std::make_unique<Module>(ModuleIdentifier, Context);
+ M->setMaterializer(R);
+
+ // Delay parsing Metadata if ShouldLazyLoadMetadata is true.
+ if (Error Err = R->parseBitcodeInto(M.get(), ShouldLazyLoadMetadata,
+ IsImporting, DataLayoutCallback))
+ return std::move(Err);
+
+ if (MaterializeAll) {
+ // Read in the entire module, and destroy the BitcodeReader.
+ if (Error Err = M->materializeAll())
+ return std::move(Err);
+ } else {
+ // Resolve forward references from blockaddresses.
+ if (Error Err = R->materializeForwardReferencedFunctions())
+ return std::move(Err);
+ }
+ return std::move(M);
+}
+
+Expected<std::unique_ptr<Module>>
+BitcodeModule::getLazyModule(LLVMContext &Context, bool ShouldLazyLoadMetadata,
+ bool IsImporting) {
+ return getModuleImpl(Context, false, ShouldLazyLoadMetadata, IsImporting,
+ [](StringRef) { return None; });
+}
+
+// Parse the specified bitcode buffer and merge the index into CombinedIndex.
+// We don't use ModuleIdentifier here because the client may need to control the
+// module path used in the combined summary (e.g. when reading summaries for
+// regular LTO modules).
+Error BitcodeModule::readSummary(ModuleSummaryIndex &CombinedIndex,
+ StringRef ModulePath, uint64_t ModuleId) {
+ BitstreamCursor Stream(Buffer);
+ if (Error JumpFailed = Stream.JumpToBit(ModuleBit))
+ return JumpFailed;
+
+ ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, CombinedIndex,
+ ModulePath, ModuleId);
+ return R.parseModule();
+}
+
+// Parse the specified bitcode buffer, returning the function info index.
+Expected<std::unique_ptr<ModuleSummaryIndex>> BitcodeModule::getSummary() {
+ BitstreamCursor Stream(Buffer);
+ if (Error JumpFailed = Stream.JumpToBit(ModuleBit))
+ return std::move(JumpFailed);
+
+ auto Index = std::make_unique<ModuleSummaryIndex>(/*HaveGVs=*/false);
+ ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, *Index,
+ ModuleIdentifier, 0);
+
+ if (Error Err = R.parseModule())
+ return std::move(Err);
+
+ return std::move(Index);
+}
+
+static Expected<bool> getEnableSplitLTOUnitFlag(BitstreamCursor &Stream,
+ unsigned ID) {
+ if (Error Err = Stream.EnterSubBlock(ID))
+ return std::move(Err);
+ SmallVector<uint64_t, 64> Record;
+
+ while (true) {
+ BitstreamEntry Entry;
+ if (Error E = Stream.advanceSkippingSubblocks().moveInto(Entry))
+ return std::move(E);
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ // If no flags record found, conservatively return true to mimic
+ // behavior before this flag was added.
+ return true;
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Look for the FS_FLAGS record.
+ Record.clear();
+ Expected<unsigned> MaybeBitCode = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeBitCode)
+ return MaybeBitCode.takeError();
+ switch (MaybeBitCode.get()) {
+ default: // Default behavior: ignore.
+ break;
+ case bitc::FS_FLAGS: { // [flags]
+ uint64_t Flags = Record[0];
+ // Scan flags.
+ assert(Flags <= 0x7f && "Unexpected bits in flag");
+
+ return Flags & 0x8;
+ }
+ }
+ }
+ llvm_unreachable("Exit infinite loop");
+}
+
+// Check if the given bitcode buffer contains a global value summary block.
+Expected<BitcodeLTOInfo> BitcodeModule::getLTOInfo() {
+ BitstreamCursor Stream(Buffer);
+ if (Error JumpFailed = Stream.JumpToBit(ModuleBit))
+ return std::move(JumpFailed);
+
+ if (Error Err = Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
+ return std::move(Err);
+
+ while (true) {
+ llvm::BitstreamEntry Entry;
+ if (Error E = Stream.advance().moveInto(Entry))
+ return std::move(E);
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/false,
+ /*EnableSplitLTOUnit=*/false};
+
+ case BitstreamEntry::SubBlock:
+ if (Entry.ID == bitc::GLOBALVAL_SUMMARY_BLOCK_ID) {
+ Expected<bool> EnableSplitLTOUnit =
+ getEnableSplitLTOUnitFlag(Stream, Entry.ID);
+ if (!EnableSplitLTOUnit)
+ return EnableSplitLTOUnit.takeError();
+ return BitcodeLTOInfo{/*IsThinLTO=*/true, /*HasSummary=*/true,
+ *EnableSplitLTOUnit};
+ }
+
+ if (Entry.ID == bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID) {
+ Expected<bool> EnableSplitLTOUnit =
+ getEnableSplitLTOUnitFlag(Stream, Entry.ID);
+ if (!EnableSplitLTOUnit)
+ return EnableSplitLTOUnit.takeError();
+ return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/true,
+ *EnableSplitLTOUnit};
+ }
+
+ // Ignore other sub-blocks.
+ if (Error Err = Stream.SkipBlock())
+ return std::move(Err);
+ continue;
+
+ case BitstreamEntry::Record:
+ if (Expected<unsigned> StreamFailed = Stream.skipRecord(Entry.ID))
+ continue;
+ else
+ return StreamFailed.takeError();
+ }
+ }
+}
+
+static Expected<BitcodeModule> getSingleModule(MemoryBufferRef Buffer) {
+ Expected<std::vector<BitcodeModule>> MsOrErr = getBitcodeModuleList(Buffer);
+ if (!MsOrErr)
+ return MsOrErr.takeError();
+
+ if (MsOrErr->size() != 1)
+ return error("Expected a single module");
+
+ return (*MsOrErr)[0];
+}
+
+Expected<std::unique_ptr<Module>>
+llvm::getLazyBitcodeModule(MemoryBufferRef Buffer, LLVMContext &Context,
+ bool ShouldLazyLoadMetadata, bool IsImporting) {
+ Expected<BitcodeModule> BM = getSingleModule(Buffer);
+ if (!BM)
+ return BM.takeError();
+
+ return BM->getLazyModule(Context, ShouldLazyLoadMetadata, IsImporting);
+}
+
+Expected<std::unique_ptr<Module>> llvm::getOwningLazyBitcodeModule(
+ std::unique_ptr<MemoryBuffer> &&Buffer, LLVMContext &Context,
+ bool ShouldLazyLoadMetadata, bool IsImporting) {
+ auto MOrErr = getLazyBitcodeModule(*Buffer, Context, ShouldLazyLoadMetadata,
+ IsImporting);
+ if (MOrErr)
+ (*MOrErr)->setOwnedMemoryBuffer(std::move(Buffer));
+ return MOrErr;
+}
+
+Expected<std::unique_ptr<Module>>
+BitcodeModule::parseModule(LLVMContext &Context,
+ DataLayoutCallbackTy DataLayoutCallback) {
+ return getModuleImpl(Context, true, false, false, DataLayoutCallback);
+ // TODO: Restore the use-lists to the in-memory state when the bitcode was
+ // written. We must defer until the Module has been fully materialized.
+}
+
+Expected<std::unique_ptr<Module>>
+llvm::parseBitcodeFile(MemoryBufferRef Buffer, LLVMContext &Context,
+ DataLayoutCallbackTy DataLayoutCallback) {
+ Expected<BitcodeModule> BM = getSingleModule(Buffer);
+ if (!BM)
+ return BM.takeError();
+
+ return BM->parseModule(Context, DataLayoutCallback);
+}
+
+Expected<std::string> llvm::getBitcodeTargetTriple(MemoryBufferRef Buffer) {
+ Expected<BitstreamCursor> StreamOrErr = initStream(Buffer);
+ if (!StreamOrErr)
+ return StreamOrErr.takeError();
+
+ return readTriple(*StreamOrErr);
+}
+
+Expected<bool> llvm::isBitcodeContainingObjCCategory(MemoryBufferRef Buffer) {
+ Expected<BitstreamCursor> StreamOrErr = initStream(Buffer);
+ if (!StreamOrErr)
+ return StreamOrErr.takeError();
+
+ return hasObjCCategory(*StreamOrErr);
+}
+
+Expected<std::string> llvm::getBitcodeProducerString(MemoryBufferRef Buffer) {
+ Expected<BitstreamCursor> StreamOrErr = initStream(Buffer);
+ if (!StreamOrErr)
+ return StreamOrErr.takeError();
+
+ return readIdentificationCode(*StreamOrErr);
+}
+
+Error llvm::readModuleSummaryIndex(MemoryBufferRef Buffer,
+ ModuleSummaryIndex &CombinedIndex,
+ uint64_t ModuleId) {
+ Expected<BitcodeModule> BM = getSingleModule(Buffer);
+ if (!BM)
+ return BM.takeError();
+
+ return BM->readSummary(CombinedIndex, BM->getModuleIdentifier(), ModuleId);
+}
+
+Expected<std::unique_ptr<ModuleSummaryIndex>>
+llvm::getModuleSummaryIndex(MemoryBufferRef Buffer) {
+ Expected<BitcodeModule> BM = getSingleModule(Buffer);
+ if (!BM)
+ return BM.takeError();
+
+ return BM->getSummary();
+}
+
+Expected<BitcodeLTOInfo> llvm::getBitcodeLTOInfo(MemoryBufferRef Buffer) {
+ Expected<BitcodeModule> BM = getSingleModule(Buffer);
+ if (!BM)
+ return BM.takeError();
+
+ return BM->getLTOInfo();
+}
+
+Expected<std::unique_ptr<ModuleSummaryIndex>>
+llvm::getModuleSummaryIndexForFile(StringRef Path,
+ bool IgnoreEmptyThinLTOIndexFile) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
+ MemoryBuffer::getFileOrSTDIN(Path);
+ if (!FileOrErr)
+ return errorCodeToError(FileOrErr.getError());
+ if (IgnoreEmptyThinLTOIndexFile && !(*FileOrErr)->getBufferSize())
+ return nullptr;
+ return getModuleSummaryIndex(**FileOrErr);
+}
diff --git a/contrib/libs/llvm14/lib/Bitcode/Reader/MetadataLoader.cpp b/contrib/libs/llvm14/lib/Bitcode/Reader/MetadataLoader.cpp
new file mode 100644
index 0000000000..0f41115140
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Reader/MetadataLoader.cpp
@@ -0,0 +1,2379 @@
+//===- MetadataLoader.cpp - Internal BitcodeReader implementation ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "MetadataLoader.h"
+#include "ValueList.h"
+
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/LLVMBitCodes.h"
+#include "llvm/Bitstream/BitstreamReader.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/AutoUpgrade.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/CallingConv.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfo.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/DiagnosticPrinter.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GVMaterializer.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/OperandTraits.h"
+#include "llvm/IR/TrackingMDRef.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/ValueHandle.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <deque>
+#include <limits>
+#include <map>
+#include <string>
+#include <system_error>
+#include <tuple>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "bitcode-reader"
+
+STATISTIC(NumMDStringLoaded, "Number of MDStrings loaded");
+STATISTIC(NumMDNodeTemporary, "Number of MDNode::Temporary created");
+STATISTIC(NumMDRecordLoaded, "Number of Metadata records loaded");
+
+/// Flag whether we need to import full type definitions for ThinLTO.
+/// Currently needed for Darwin and LLDB.
+static cl::opt<bool> ImportFullTypeDefinitions(
+ "import-full-type-definitions", cl::init(false), cl::Hidden,
+ cl::desc("Import full type definitions for ThinLTO."));
+
+static cl::opt<bool> DisableLazyLoading(
+ "disable-ondemand-mds-loading", cl::init(false), cl::Hidden,
+ cl::desc("Force disable the lazy-loading on-demand of metadata when "
+ "loading bitcode for importing."));
+
+namespace {
+
+static int64_t unrotateSign(uint64_t U) { return (U & 1) ? ~(U >> 1) : U >> 1; }
+
+class BitcodeReaderMetadataList {
+ /// Array of metadata references.
+ ///
+ /// Don't use std::vector here. Some versions of libc++ copy (instead of
+ /// move) on resize, and TrackingMDRef is very expensive to copy.
+ SmallVector<TrackingMDRef, 1> MetadataPtrs;
+
+ /// The set of indices in MetadataPtrs above of forward references that were
+ /// generated.
+ SmallDenseSet<unsigned, 1> ForwardReference;
+
+ /// The set of indices in MetadataPtrs above of Metadata that need to be
+ /// resolved.
+ SmallDenseSet<unsigned, 1> UnresolvedNodes;
+
+ /// Structures for resolving old type refs.
+ struct {
+ SmallDenseMap<MDString *, TempMDTuple, 1> Unknown;
+ SmallDenseMap<MDString *, DICompositeType *, 1> Final;
+ SmallDenseMap<MDString *, DICompositeType *, 1> FwdDecls;
+ SmallVector<std::pair<TrackingMDRef, TempMDTuple>, 1> Arrays;
+ } OldTypeRefs;
+
+ LLVMContext &Context;
+
+ /// Maximum number of valid references. Forward references exceeding the
+ /// maximum must be invalid.
+ unsigned RefsUpperBound;
+
+public:
+ BitcodeReaderMetadataList(LLVMContext &C, size_t RefsUpperBound)
+ : Context(C),
+ RefsUpperBound(std::min((size_t)std::numeric_limits<unsigned>::max(),
+ RefsUpperBound)) {}
+
+ // vector compatibility methods
+ unsigned size() const { return MetadataPtrs.size(); }
+ void resize(unsigned N) { MetadataPtrs.resize(N); }
+ void push_back(Metadata *MD) { MetadataPtrs.emplace_back(MD); }
+ void clear() { MetadataPtrs.clear(); }
+ Metadata *back() const { return MetadataPtrs.back(); }
+ void pop_back() { MetadataPtrs.pop_back(); }
+ bool empty() const { return MetadataPtrs.empty(); }
+
+ Metadata *operator[](unsigned i) const {
+ assert(i < MetadataPtrs.size());
+ return MetadataPtrs[i];
+ }
+
+ Metadata *lookup(unsigned I) const {
+ if (I < MetadataPtrs.size())
+ return MetadataPtrs[I];
+ return nullptr;
+ }
+
+ void shrinkTo(unsigned N) {
+ assert(N <= size() && "Invalid shrinkTo request!");
+ assert(ForwardReference.empty() && "Unexpected forward refs");
+ assert(UnresolvedNodes.empty() && "Unexpected unresolved node");
+ MetadataPtrs.resize(N);
+ }
+
+ /// Return the given metadata, creating a replaceable forward reference if
+ /// necessary.
+ Metadata *getMetadataFwdRef(unsigned Idx);
+
+ /// Return the given metadata only if it is fully resolved.
+ ///
+ /// Gives the same result as \a lookup(), unless \a MDNode::isResolved()
+ /// would give \c false.
+ Metadata *getMetadataIfResolved(unsigned Idx);
+
+ MDNode *getMDNodeFwdRefOrNull(unsigned Idx);
+ void assignValue(Metadata *MD, unsigned Idx);
+ void tryToResolveCycles();
+ bool hasFwdRefs() const { return !ForwardReference.empty(); }
+ int getNextFwdRef() {
+ assert(hasFwdRefs());
+ return *ForwardReference.begin();
+ }
+
+ /// Upgrade a type that had an MDString reference.
+ void addTypeRef(MDString &UUID, DICompositeType &CT);
+
+ /// Upgrade a type that had an MDString reference.
+ Metadata *upgradeTypeRef(Metadata *MaybeUUID);
+
+ /// Upgrade a type ref array that may have MDString references.
+ Metadata *upgradeTypeRefArray(Metadata *MaybeTuple);
+
+private:
+ Metadata *resolveTypeRefArray(Metadata *MaybeTuple);
+};
+
+void BitcodeReaderMetadataList::assignValue(Metadata *MD, unsigned Idx) {
+ if (auto *MDN = dyn_cast<MDNode>(MD))
+ if (!MDN->isResolved())
+ UnresolvedNodes.insert(Idx);
+
+ if (Idx == size()) {
+ push_back(MD);
+ return;
+ }
+
+ if (Idx >= size())
+ resize(Idx + 1);
+
+ TrackingMDRef &OldMD = MetadataPtrs[Idx];
+ if (!OldMD) {
+ OldMD.reset(MD);
+ return;
+ }
+
+ // If there was a forward reference to this value, replace it.
+ TempMDTuple PrevMD(cast<MDTuple>(OldMD.get()));
+ PrevMD->replaceAllUsesWith(MD);
+ ForwardReference.erase(Idx);
+}
+
+Metadata *BitcodeReaderMetadataList::getMetadataFwdRef(unsigned Idx) {
+ // Bail out for a clearly invalid value.
+ if (Idx >= RefsUpperBound)
+ return nullptr;
+
+ if (Idx >= size())
+ resize(Idx + 1);
+
+ if (Metadata *MD = MetadataPtrs[Idx])
+ return MD;
+
+ // Track forward refs to be resolved later.
+ ForwardReference.insert(Idx);
+
+ // Create and return a placeholder, which will later be RAUW'd.
+ ++NumMDNodeTemporary;
+ Metadata *MD = MDNode::getTemporary(Context, None).release();
+ MetadataPtrs[Idx].reset(MD);
+ return MD;
+}
+
+Metadata *BitcodeReaderMetadataList::getMetadataIfResolved(unsigned Idx) {
+ Metadata *MD = lookup(Idx);
+ if (auto *N = dyn_cast_or_null<MDNode>(MD))
+ if (!N->isResolved())
+ return nullptr;
+ return MD;
+}
+
+MDNode *BitcodeReaderMetadataList::getMDNodeFwdRefOrNull(unsigned Idx) {
+ return dyn_cast_or_null<MDNode>(getMetadataFwdRef(Idx));
+}
+
+void BitcodeReaderMetadataList::tryToResolveCycles() {
+ if (!ForwardReference.empty())
+ // Still forward references... can't resolve cycles.
+ return;
+
+ // Give up on finding a full definition for any forward decls that remain.
+ for (const auto &Ref : OldTypeRefs.FwdDecls)
+ OldTypeRefs.Final.insert(Ref);
+ OldTypeRefs.FwdDecls.clear();
+
+ // Upgrade from old type ref arrays. In strange cases, this could add to
+ // OldTypeRefs.Unknown.
+ for (const auto &Array : OldTypeRefs.Arrays)
+ Array.second->replaceAllUsesWith(resolveTypeRefArray(Array.first.get()));
+ OldTypeRefs.Arrays.clear();
+
+ // Replace old string-based type refs with the resolved node, if possible.
+ // If we haven't seen the node, leave it to the verifier to complain about
+ // the invalid string reference.
+ for (const auto &Ref : OldTypeRefs.Unknown) {
+ if (DICompositeType *CT = OldTypeRefs.Final.lookup(Ref.first))
+ Ref.second->replaceAllUsesWith(CT);
+ else
+ Ref.second->replaceAllUsesWith(Ref.first);
+ }
+ OldTypeRefs.Unknown.clear();
+
+ if (UnresolvedNodes.empty())
+ // Nothing to do.
+ return;
+
+ // Resolve any cycles.
+ for (unsigned I : UnresolvedNodes) {
+ auto &MD = MetadataPtrs[I];
+ auto *N = dyn_cast_or_null<MDNode>(MD);
+ if (!N)
+ continue;
+
+ assert(!N->isTemporary() && "Unexpected forward reference");
+ N->resolveCycles();
+ }
+
+ // Make sure we return early again until there's another unresolved ref.
+ UnresolvedNodes.clear();
+}
+
+void BitcodeReaderMetadataList::addTypeRef(MDString &UUID,
+ DICompositeType &CT) {
+ assert(CT.getRawIdentifier() == &UUID && "Mismatched UUID");
+ if (CT.isForwardDecl())
+ OldTypeRefs.FwdDecls.insert(std::make_pair(&UUID, &CT));
+ else
+ OldTypeRefs.Final.insert(std::make_pair(&UUID, &CT));
+}
+
+Metadata *BitcodeReaderMetadataList::upgradeTypeRef(Metadata *MaybeUUID) {
+ auto *UUID = dyn_cast_or_null<MDString>(MaybeUUID);
+ if (LLVM_LIKELY(!UUID))
+ return MaybeUUID;
+
+ if (auto *CT = OldTypeRefs.Final.lookup(UUID))
+ return CT;
+
+ auto &Ref = OldTypeRefs.Unknown[UUID];
+ if (!Ref)
+ Ref = MDNode::getTemporary(Context, None);
+ return Ref.get();
+}
+
+Metadata *BitcodeReaderMetadataList::upgradeTypeRefArray(Metadata *MaybeTuple) {
+ auto *Tuple = dyn_cast_or_null<MDTuple>(MaybeTuple);
+ if (!Tuple || Tuple->isDistinct())
+ return MaybeTuple;
+
+ // Look through the array immediately if possible.
+ if (!Tuple->isTemporary())
+ return resolveTypeRefArray(Tuple);
+
+ // Create and return a placeholder to use for now. Eventually
+ // resolveTypeRefArrays() will be resolve this forward reference.
+ OldTypeRefs.Arrays.emplace_back(
+ std::piecewise_construct, std::forward_as_tuple(Tuple),
+ std::forward_as_tuple(MDTuple::getTemporary(Context, None)));
+ return OldTypeRefs.Arrays.back().second.get();
+}
+
+Metadata *BitcodeReaderMetadataList::resolveTypeRefArray(Metadata *MaybeTuple) {
+ auto *Tuple = dyn_cast_or_null<MDTuple>(MaybeTuple);
+ if (!Tuple || Tuple->isDistinct())
+ return MaybeTuple;
+
+ // Look through the DITypeRefArray, upgrading each DIType *.
+ SmallVector<Metadata *, 32> Ops;
+ Ops.reserve(Tuple->getNumOperands());
+ for (Metadata *MD : Tuple->operands())
+ Ops.push_back(upgradeTypeRef(MD));
+
+ return MDTuple::get(Context, Ops);
+}
+
+namespace {
+
+class PlaceholderQueue {
+ // Placeholders would thrash around when moved, so store in a std::deque
+ // instead of some sort of vector.
+ std::deque<DistinctMDOperandPlaceholder> PHs;
+
+public:
+ ~PlaceholderQueue() {
+ assert(empty() &&
+ "PlaceholderQueue hasn't been flushed before being destroyed");
+ }
+ bool empty() const { return PHs.empty(); }
+ DistinctMDOperandPlaceholder &getPlaceholderOp(unsigned ID);
+ void flush(BitcodeReaderMetadataList &MetadataList);
+
+ /// Return the list of temporaries nodes in the queue, these need to be
+ /// loaded before we can flush the queue.
+ void getTemporaries(BitcodeReaderMetadataList &MetadataList,
+ DenseSet<unsigned> &Temporaries) {
+ for (auto &PH : PHs) {
+ auto ID = PH.getID();
+ auto *MD = MetadataList.lookup(ID);
+ if (!MD) {
+ Temporaries.insert(ID);
+ continue;
+ }
+ auto *N = dyn_cast_or_null<MDNode>(MD);
+ if (N && N->isTemporary())
+ Temporaries.insert(ID);
+ }
+ }
+};
+
+} // end anonymous namespace
+
+DistinctMDOperandPlaceholder &PlaceholderQueue::getPlaceholderOp(unsigned ID) {
+ PHs.emplace_back(ID);
+ return PHs.back();
+}
+
+void PlaceholderQueue::flush(BitcodeReaderMetadataList &MetadataList) {
+ while (!PHs.empty()) {
+ auto *MD = MetadataList.lookup(PHs.front().getID());
+ assert(MD && "Flushing placeholder on unassigned MD");
+#ifndef NDEBUG
+ if (auto *MDN = dyn_cast<MDNode>(MD))
+ assert(MDN->isResolved() &&
+ "Flushing Placeholder while cycles aren't resolved");
+#endif
+ PHs.front().replaceUseWith(MD);
+ PHs.pop_front();
+ }
+}
+
+} // anonymous namespace
+
+static Error error(const Twine &Message) {
+ return make_error<StringError>(
+ Message, make_error_code(BitcodeError::CorruptedBitcode));
+}
+
+class MetadataLoader::MetadataLoaderImpl {
+ BitcodeReaderMetadataList MetadataList;
+ BitcodeReaderValueList &ValueList;
+ BitstreamCursor &Stream;
+ LLVMContext &Context;
+ Module &TheModule;
+ std::function<Type *(unsigned)> getTypeByID;
+
+ /// Cursor associated with the lazy-loading of Metadata. This is the easy way
+ /// to keep around the right "context" (Abbrev list) to be able to jump in
+ /// the middle of the metadata block and load any record.
+ BitstreamCursor IndexCursor;
+
+ /// Index that keeps track of MDString values.
+ std::vector<StringRef> MDStringRef;
+
+ /// On-demand loading of a single MDString. Requires the index above to be
+ /// populated.
+ MDString *lazyLoadOneMDString(unsigned Idx);
+
+ /// Index that keeps track of where to find a metadata record in the stream.
+ std::vector<uint64_t> GlobalMetadataBitPosIndex;
+
+ /// Cursor position of the start of the global decl attachments, to enable
+ /// loading using the index built for lazy loading, instead of forward
+ /// references.
+ uint64_t GlobalDeclAttachmentPos = 0;
+
+#ifndef NDEBUG
+ /// Baisic correctness check that we end up parsing all of the global decl
+ /// attachments.
+ unsigned NumGlobalDeclAttachSkipped = 0;
+ unsigned NumGlobalDeclAttachParsed = 0;
+#endif
+
+ /// Load the global decl attachments, using the index built for lazy loading.
+ Expected<bool> loadGlobalDeclAttachments();
+
+ /// Populate the index above to enable lazily loading of metadata, and load
+ /// the named metadata as well as the transitively referenced global
+ /// Metadata.
+ Expected<bool> lazyLoadModuleMetadataBlock();
+
+ /// On-demand loading of a single metadata. Requires the index above to be
+ /// populated.
+ void lazyLoadOneMetadata(unsigned Idx, PlaceholderQueue &Placeholders);
+
+ // Keep mapping of seens pair of old-style CU <-> SP, and update pointers to
+ // point from SP to CU after a block is completly parsed.
+ std::vector<std::pair<DICompileUnit *, Metadata *>> CUSubprograms;
+
+ /// Functions that need to be matched with subprograms when upgrading old
+ /// metadata.
+ SmallDenseMap<Function *, DISubprogram *, 16> FunctionsWithSPs;
+
+ // Map the bitcode's custom MDKind ID to the Module's MDKind ID.
+ DenseMap<unsigned, unsigned> MDKindMap;
+
+ bool StripTBAA = false;
+ bool HasSeenOldLoopTags = false;
+ bool NeedUpgradeToDIGlobalVariableExpression = false;
+ bool NeedDeclareExpressionUpgrade = false;
+
+ /// True if metadata is being parsed for a module being ThinLTO imported.
+ bool IsImporting = false;
+
+ Error parseOneMetadata(SmallVectorImpl<uint64_t> &Record, unsigned Code,
+ PlaceholderQueue &Placeholders, StringRef Blob,
+ unsigned &NextMetadataNo);
+ Error parseMetadataStrings(ArrayRef<uint64_t> Record, StringRef Blob,
+ function_ref<void(StringRef)> CallBack);
+ Error parseGlobalObjectAttachment(GlobalObject &GO,
+ ArrayRef<uint64_t> Record);
+ Error parseMetadataKindRecord(SmallVectorImpl<uint64_t> &Record);
+
+ void resolveForwardRefsAndPlaceholders(PlaceholderQueue &Placeholders);
+
+ /// Upgrade old-style CU <-> SP pointers to point from SP to CU.
+ void upgradeCUSubprograms() {
+ for (auto CU_SP : CUSubprograms)
+ if (auto *SPs = dyn_cast_or_null<MDTuple>(CU_SP.second))
+ for (auto &Op : SPs->operands())
+ if (auto *SP = dyn_cast_or_null<DISubprogram>(Op))
+ SP->replaceUnit(CU_SP.first);
+ CUSubprograms.clear();
+ }
+
+ /// Upgrade old-style bare DIGlobalVariables to DIGlobalVariableExpressions.
+ void upgradeCUVariables() {
+ if (!NeedUpgradeToDIGlobalVariableExpression)
+ return;
+
+ // Upgrade list of variables attached to the CUs.
+ if (NamedMDNode *CUNodes = TheModule.getNamedMetadata("llvm.dbg.cu"))
+ for (unsigned I = 0, E = CUNodes->getNumOperands(); I != E; ++I) {
+ auto *CU = cast<DICompileUnit>(CUNodes->getOperand(I));
+ if (auto *GVs = dyn_cast_or_null<MDTuple>(CU->getRawGlobalVariables()))
+ for (unsigned I = 0; I < GVs->getNumOperands(); I++)
+ if (auto *GV =
+ dyn_cast_or_null<DIGlobalVariable>(GVs->getOperand(I))) {
+ auto *DGVE = DIGlobalVariableExpression::getDistinct(
+ Context, GV, DIExpression::get(Context, {}));
+ GVs->replaceOperandWith(I, DGVE);
+ }
+ }
+
+ // Upgrade variables attached to globals.
+ for (auto &GV : TheModule.globals()) {
+ SmallVector<MDNode *, 1> MDs;
+ GV.getMetadata(LLVMContext::MD_dbg, MDs);
+ GV.eraseMetadata(LLVMContext::MD_dbg);
+ for (auto *MD : MDs)
+ if (auto *DGV = dyn_cast<DIGlobalVariable>(MD)) {
+ auto *DGVE = DIGlobalVariableExpression::getDistinct(
+ Context, DGV, DIExpression::get(Context, {}));
+ GV.addMetadata(LLVMContext::MD_dbg, *DGVE);
+ } else
+ GV.addMetadata(LLVMContext::MD_dbg, *MD);
+ }
+ }
+
+ /// Remove a leading DW_OP_deref from DIExpressions in a dbg.declare that
+ /// describes a function argument.
+ void upgradeDeclareExpressions(Function &F) {
+ if (!NeedDeclareExpressionUpgrade)
+ return;
+
+ for (auto &BB : F)
+ for (auto &I : BB)
+ if (auto *DDI = dyn_cast<DbgDeclareInst>(&I))
+ if (auto *DIExpr = DDI->getExpression())
+ if (DIExpr->startsWithDeref() &&
+ isa_and_nonnull<Argument>(DDI->getAddress())) {
+ SmallVector<uint64_t, 8> Ops;
+ Ops.append(std::next(DIExpr->elements_begin()),
+ DIExpr->elements_end());
+ DDI->setExpression(DIExpression::get(Context, Ops));
+ }
+ }
+
+ /// Upgrade the expression from previous versions.
+ Error upgradeDIExpression(uint64_t FromVersion,
+ MutableArrayRef<uint64_t> &Expr,
+ SmallVectorImpl<uint64_t> &Buffer) {
+ auto N = Expr.size();
+ switch (FromVersion) {
+ default:
+ return error("Invalid record");
+ case 0:
+ if (N >= 3 && Expr[N - 3] == dwarf::DW_OP_bit_piece)
+ Expr[N - 3] = dwarf::DW_OP_LLVM_fragment;
+ LLVM_FALLTHROUGH;
+ case 1:
+ // Move DW_OP_deref to the end.
+ if (N && Expr[0] == dwarf::DW_OP_deref) {
+ auto End = Expr.end();
+ if (Expr.size() >= 3 &&
+ *std::prev(End, 3) == dwarf::DW_OP_LLVM_fragment)
+ End = std::prev(End, 3);
+ std::move(std::next(Expr.begin()), End, Expr.begin());
+ *std::prev(End) = dwarf::DW_OP_deref;
+ }
+ NeedDeclareExpressionUpgrade = true;
+ LLVM_FALLTHROUGH;
+ case 2: {
+ // Change DW_OP_plus to DW_OP_plus_uconst.
+ // Change DW_OP_minus to DW_OP_uconst, DW_OP_minus
+ auto SubExpr = ArrayRef<uint64_t>(Expr);
+ while (!SubExpr.empty()) {
+ // Skip past other operators with their operands
+ // for this version of the IR, obtained from
+ // from historic DIExpression::ExprOperand::getSize().
+ size_t HistoricSize;
+ switch (SubExpr.front()) {
+ default:
+ HistoricSize = 1;
+ break;
+ case dwarf::DW_OP_constu:
+ case dwarf::DW_OP_minus:
+ case dwarf::DW_OP_plus:
+ HistoricSize = 2;
+ break;
+ case dwarf::DW_OP_LLVM_fragment:
+ HistoricSize = 3;
+ break;
+ }
+
+ // If the expression is malformed, make sure we don't
+ // copy more elements than we should.
+ HistoricSize = std::min(SubExpr.size(), HistoricSize);
+ ArrayRef<uint64_t> Args = SubExpr.slice(1, HistoricSize - 1);
+
+ switch (SubExpr.front()) {
+ case dwarf::DW_OP_plus:
+ Buffer.push_back(dwarf::DW_OP_plus_uconst);
+ Buffer.append(Args.begin(), Args.end());
+ break;
+ case dwarf::DW_OP_minus:
+ Buffer.push_back(dwarf::DW_OP_constu);
+ Buffer.append(Args.begin(), Args.end());
+ Buffer.push_back(dwarf::DW_OP_minus);
+ break;
+ default:
+ Buffer.push_back(*SubExpr.begin());
+ Buffer.append(Args.begin(), Args.end());
+ break;
+ }
+
+ // Continue with remaining elements.
+ SubExpr = SubExpr.slice(HistoricSize);
+ }
+ Expr = MutableArrayRef<uint64_t>(Buffer);
+ LLVM_FALLTHROUGH;
+ }
+ case 3:
+ // Up-to-date!
+ break;
+ }
+
+ return Error::success();
+ }
+
+ void upgradeDebugInfo() {
+ upgradeCUSubprograms();
+ upgradeCUVariables();
+ }
+
+public:
+ MetadataLoaderImpl(BitstreamCursor &Stream, Module &TheModule,
+ BitcodeReaderValueList &ValueList,
+ std::function<Type *(unsigned)> getTypeByID,
+ bool IsImporting)
+ : MetadataList(TheModule.getContext(), Stream.SizeInBytes()),
+ ValueList(ValueList), Stream(Stream), Context(TheModule.getContext()),
+ TheModule(TheModule), getTypeByID(std::move(getTypeByID)),
+ IsImporting(IsImporting) {}
+
+ Error parseMetadata(bool ModuleLevel);
+
+ bool hasFwdRefs() const { return MetadataList.hasFwdRefs(); }
+
+ Metadata *getMetadataFwdRefOrLoad(unsigned ID) {
+ if (ID < MDStringRef.size())
+ return lazyLoadOneMDString(ID);
+ if (auto *MD = MetadataList.lookup(ID))
+ return MD;
+ // If lazy-loading is enabled, we try recursively to load the operand
+ // instead of creating a temporary.
+ if (ID < (MDStringRef.size() + GlobalMetadataBitPosIndex.size())) {
+ PlaceholderQueue Placeholders;
+ lazyLoadOneMetadata(ID, Placeholders);
+ resolveForwardRefsAndPlaceholders(Placeholders);
+ return MetadataList.lookup(ID);
+ }
+ return MetadataList.getMetadataFwdRef(ID);
+ }
+
+ DISubprogram *lookupSubprogramForFunction(Function *F) {
+ return FunctionsWithSPs.lookup(F);
+ }
+
+ bool hasSeenOldLoopTags() const { return HasSeenOldLoopTags; }
+
+ Error parseMetadataAttachment(
+ Function &F, const SmallVectorImpl<Instruction *> &InstructionList);
+
+ Error parseMetadataKinds();
+
+ void setStripTBAA(bool Value) { StripTBAA = Value; }
+ bool isStrippingTBAA() const { return StripTBAA; }
+
+ unsigned size() const { return MetadataList.size(); }
+ void shrinkTo(unsigned N) { MetadataList.shrinkTo(N); }
+ void upgradeDebugIntrinsics(Function &F) { upgradeDeclareExpressions(F); }
+};
+
+Expected<bool>
+MetadataLoader::MetadataLoaderImpl::lazyLoadModuleMetadataBlock() {
+ IndexCursor = Stream;
+ SmallVector<uint64_t, 64> Record;
+ GlobalDeclAttachmentPos = 0;
+ // Get the abbrevs, and preload record positions to make them lazy-loadable.
+ while (true) {
+ uint64_t SavedPos = IndexCursor.GetCurrentBitNo();
+ BitstreamEntry Entry;
+ if (Error E =
+ IndexCursor
+ .advanceSkippingSubblocks(BitstreamCursor::AF_DontPopBlockAtEnd)
+ .moveInto(Entry))
+ return std::move(E);
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock: {
+ return true;
+ }
+ case BitstreamEntry::Record: {
+ // The interesting case.
+ ++NumMDRecordLoaded;
+ uint64_t CurrentPos = IndexCursor.GetCurrentBitNo();
+ unsigned Code;
+ if (Error E = IndexCursor.skipRecord(Entry.ID).moveInto(Code))
+ return std::move(E);
+ switch (Code) {
+ case bitc::METADATA_STRINGS: {
+ // Rewind and parse the strings.
+ if (Error Err = IndexCursor.JumpToBit(CurrentPos))
+ return std::move(Err);
+ StringRef Blob;
+ Record.clear();
+ if (Expected<unsigned> MaybeRecord =
+ IndexCursor.readRecord(Entry.ID, Record, &Blob))
+ ;
+ else
+ return MaybeRecord.takeError();
+ unsigned NumStrings = Record[0];
+ MDStringRef.reserve(NumStrings);
+ auto IndexNextMDString = [&](StringRef Str) {
+ MDStringRef.push_back(Str);
+ };
+ if (auto Err = parseMetadataStrings(Record, Blob, IndexNextMDString))
+ return std::move(Err);
+ break;
+ }
+ case bitc::METADATA_INDEX_OFFSET: {
+ // This is the offset to the index, when we see this we skip all the
+ // records and load only an index to these.
+ if (Error Err = IndexCursor.JumpToBit(CurrentPos))
+ return std::move(Err);
+ Record.clear();
+ if (Expected<unsigned> MaybeRecord =
+ IndexCursor.readRecord(Entry.ID, Record))
+ ;
+ else
+ return MaybeRecord.takeError();
+ if (Record.size() != 2)
+ return error("Invalid record");
+ auto Offset = Record[0] + (Record[1] << 32);
+ auto BeginPos = IndexCursor.GetCurrentBitNo();
+ if (Error Err = IndexCursor.JumpToBit(BeginPos + Offset))
+ return std::move(Err);
+ Expected<BitstreamEntry> MaybeEntry =
+ IndexCursor.advanceSkippingSubblocks(
+ BitstreamCursor::AF_DontPopBlockAtEnd);
+ if (!MaybeEntry)
+ return MaybeEntry.takeError();
+ Entry = MaybeEntry.get();
+ assert(Entry.Kind == BitstreamEntry::Record &&
+ "Corrupted bitcode: Expected `Record` when trying to find the "
+ "Metadata index");
+ Record.clear();
+ if (Expected<unsigned> MaybeCode =
+ IndexCursor.readRecord(Entry.ID, Record))
+ assert(MaybeCode.get() == bitc::METADATA_INDEX &&
+ "Corrupted bitcode: Expected `METADATA_INDEX` when trying to "
+ "find the Metadata index");
+ else
+ return MaybeCode.takeError();
+ // Delta unpack
+ auto CurrentValue = BeginPos;
+ GlobalMetadataBitPosIndex.reserve(Record.size());
+ for (auto &Elt : Record) {
+ CurrentValue += Elt;
+ GlobalMetadataBitPosIndex.push_back(CurrentValue);
+ }
+ break;
+ }
+ case bitc::METADATA_INDEX:
+ // We don't expect to get there, the Index is loaded when we encounter
+ // the offset.
+ return error("Corrupted Metadata block");
+ case bitc::METADATA_NAME: {
+ // Named metadata need to be materialized now and aren't deferred.
+ if (Error Err = IndexCursor.JumpToBit(CurrentPos))
+ return std::move(Err);
+ Record.clear();
+
+ unsigned Code;
+ if (Expected<unsigned> MaybeCode =
+ IndexCursor.readRecord(Entry.ID, Record)) {
+ Code = MaybeCode.get();
+ assert(Code == bitc::METADATA_NAME);
+ } else
+ return MaybeCode.takeError();
+
+ // Read name of the named metadata.
+ SmallString<8> Name(Record.begin(), Record.end());
+ if (Expected<unsigned> MaybeCode = IndexCursor.ReadCode())
+ Code = MaybeCode.get();
+ else
+ return MaybeCode.takeError();
+
+ // Named Metadata comes in two parts, we expect the name to be followed
+ // by the node
+ Record.clear();
+ if (Expected<unsigned> MaybeNextBitCode =
+ IndexCursor.readRecord(Code, Record))
+ assert(MaybeNextBitCode.get() == bitc::METADATA_NAMED_NODE);
+ else
+ return MaybeNextBitCode.takeError();
+
+ // Read named metadata elements.
+ unsigned Size = Record.size();
+ NamedMDNode *NMD = TheModule.getOrInsertNamedMetadata(Name);
+ for (unsigned i = 0; i != Size; ++i) {
+ // FIXME: We could use a placeholder here, however NamedMDNode are
+ // taking MDNode as operand and not using the Metadata infrastructure.
+ // It is acknowledged by 'TODO: Inherit from Metadata' in the
+ // NamedMDNode class definition.
+ MDNode *MD = MetadataList.getMDNodeFwdRefOrNull(Record[i]);
+ assert(MD && "Invalid metadata: expect fwd ref to MDNode");
+ NMD->addOperand(MD);
+ }
+ break;
+ }
+ case bitc::METADATA_GLOBAL_DECL_ATTACHMENT: {
+ if (!GlobalDeclAttachmentPos)
+ GlobalDeclAttachmentPos = SavedPos;
+#ifndef NDEBUG
+ NumGlobalDeclAttachSkipped++;
+#endif
+ break;
+ }
+ case bitc::METADATA_KIND:
+ case bitc::METADATA_STRING_OLD:
+ case bitc::METADATA_OLD_FN_NODE:
+ case bitc::METADATA_OLD_NODE:
+ case bitc::METADATA_VALUE:
+ case bitc::METADATA_DISTINCT_NODE:
+ case bitc::METADATA_NODE:
+ case bitc::METADATA_LOCATION:
+ case bitc::METADATA_GENERIC_DEBUG:
+ case bitc::METADATA_SUBRANGE:
+ case bitc::METADATA_ENUMERATOR:
+ case bitc::METADATA_BASIC_TYPE:
+ case bitc::METADATA_STRING_TYPE:
+ case bitc::METADATA_DERIVED_TYPE:
+ case bitc::METADATA_COMPOSITE_TYPE:
+ case bitc::METADATA_SUBROUTINE_TYPE:
+ case bitc::METADATA_MODULE:
+ case bitc::METADATA_FILE:
+ case bitc::METADATA_COMPILE_UNIT:
+ case bitc::METADATA_SUBPROGRAM:
+ case bitc::METADATA_LEXICAL_BLOCK:
+ case bitc::METADATA_LEXICAL_BLOCK_FILE:
+ case bitc::METADATA_NAMESPACE:
+ case bitc::METADATA_COMMON_BLOCK:
+ case bitc::METADATA_MACRO:
+ case bitc::METADATA_MACRO_FILE:
+ case bitc::METADATA_TEMPLATE_TYPE:
+ case bitc::METADATA_TEMPLATE_VALUE:
+ case bitc::METADATA_GLOBAL_VAR:
+ case bitc::METADATA_LOCAL_VAR:
+ case bitc::METADATA_LABEL:
+ case bitc::METADATA_EXPRESSION:
+ case bitc::METADATA_OBJC_PROPERTY:
+ case bitc::METADATA_IMPORTED_ENTITY:
+ case bitc::METADATA_GLOBAL_VAR_EXPR:
+ case bitc::METADATA_GENERIC_SUBRANGE:
+ // We don't expect to see any of these, if we see one, give up on
+ // lazy-loading and fallback.
+ MDStringRef.clear();
+ GlobalMetadataBitPosIndex.clear();
+ return false;
+ }
+ break;
+ }
+ }
+ }
+}
+
+// Load the global decl attachments after building the lazy loading index.
+// We don't load them "lazily" - all global decl attachments must be
+// parsed since they aren't materialized on demand. However, by delaying
+// their parsing until after the index is created, we can use the index
+// instead of creating temporaries.
+Expected<bool> MetadataLoader::MetadataLoaderImpl::loadGlobalDeclAttachments() {
+ // Nothing to do if we didn't find any of these metadata records.
+ if (!GlobalDeclAttachmentPos)
+ return true;
+ // Use a temporary cursor so that we don't mess up the main Stream cursor or
+ // the lazy loading IndexCursor (which holds the necessary abbrev ids).
+ BitstreamCursor TempCursor = Stream;
+ SmallVector<uint64_t, 64> Record;
+ // Jump to the position before the first global decl attachment, so we can
+ // scan for the first BitstreamEntry record.
+ if (Error Err = TempCursor.JumpToBit(GlobalDeclAttachmentPos))
+ return std::move(Err);
+ while (true) {
+ BitstreamEntry Entry;
+ if (Error E =
+ TempCursor
+ .advanceSkippingSubblocks(BitstreamCursor::AF_DontPopBlockAtEnd)
+ .moveInto(Entry))
+ return std::move(E);
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ // Check that we parsed them all.
+ assert(NumGlobalDeclAttachSkipped == NumGlobalDeclAttachParsed);
+ return true;
+ case BitstreamEntry::Record:
+ break;
+ }
+ uint64_t CurrentPos = TempCursor.GetCurrentBitNo();
+ Expected<unsigned> MaybeCode = TempCursor.skipRecord(Entry.ID);
+ if (!MaybeCode)
+ return MaybeCode.takeError();
+ if (MaybeCode.get() != bitc::METADATA_GLOBAL_DECL_ATTACHMENT) {
+ // Anything other than a global decl attachment signals the end of
+ // these records. Check that we parsed them all.
+ assert(NumGlobalDeclAttachSkipped == NumGlobalDeclAttachParsed);
+ return true;
+ }
+#ifndef NDEBUG
+ NumGlobalDeclAttachParsed++;
+#endif
+ // FIXME: we need to do this early because we don't materialize global
+ // value explicitly.
+ if (Error Err = TempCursor.JumpToBit(CurrentPos))
+ return std::move(Err);
+ Record.clear();
+ if (Expected<unsigned> MaybeRecord =
+ TempCursor.readRecord(Entry.ID, Record))
+ ;
+ else
+ return MaybeRecord.takeError();
+ if (Record.size() % 2 == 0)
+ return error("Invalid record");
+ unsigned ValueID = Record[0];
+ if (ValueID >= ValueList.size())
+ return error("Invalid record");
+ if (auto *GO = dyn_cast<GlobalObject>(ValueList[ValueID])) {
+ // Need to save and restore the current position since
+ // parseGlobalObjectAttachment will resolve all forward references which
+ // would require parsing from locations stored in the index.
+ CurrentPos = TempCursor.GetCurrentBitNo();
+ if (Error Err = parseGlobalObjectAttachment(
+ *GO, ArrayRef<uint64_t>(Record).slice(1)))
+ return std::move(Err);
+ if (Error Err = TempCursor.JumpToBit(CurrentPos))
+ return std::move(Err);
+ }
+ }
+}
+
+/// Parse a METADATA_BLOCK. If ModuleLevel is true then we are parsing
+/// module level metadata.
+Error MetadataLoader::MetadataLoaderImpl::parseMetadata(bool ModuleLevel) {
+ if (!ModuleLevel && MetadataList.hasFwdRefs())
+ return error("Invalid metadata: fwd refs into function blocks");
+
+ // Record the entry position so that we can jump back here and efficiently
+ // skip the whole block in case we lazy-load.
+ auto EntryPos = Stream.GetCurrentBitNo();
+
+ if (Error Err = Stream.EnterSubBlock(bitc::METADATA_BLOCK_ID))
+ return Err;
+
+ SmallVector<uint64_t, 64> Record;
+ PlaceholderQueue Placeholders;
+
+ // We lazy-load module-level metadata: we build an index for each record, and
+ // then load individual record as needed, starting with the named metadata.
+ if (ModuleLevel && IsImporting && MetadataList.empty() &&
+ !DisableLazyLoading) {
+ auto SuccessOrErr = lazyLoadModuleMetadataBlock();
+ if (!SuccessOrErr)
+ return SuccessOrErr.takeError();
+ if (SuccessOrErr.get()) {
+ // An index was successfully created and we will be able to load metadata
+ // on-demand.
+ MetadataList.resize(MDStringRef.size() +
+ GlobalMetadataBitPosIndex.size());
+
+ // Now that we have built the index, load the global decl attachments
+ // that were deferred during that process. This avoids creating
+ // temporaries.
+ SuccessOrErr = loadGlobalDeclAttachments();
+ if (!SuccessOrErr)
+ return SuccessOrErr.takeError();
+ assert(SuccessOrErr.get());
+
+ // Reading the named metadata created forward references and/or
+ // placeholders, that we flush here.
+ resolveForwardRefsAndPlaceholders(Placeholders);
+ upgradeDebugInfo();
+ // Return at the beginning of the block, since it is easy to skip it
+ // entirely from there.
+ Stream.ReadBlockEnd(); // Pop the abbrev block context.
+ if (Error Err = IndexCursor.JumpToBit(EntryPos))
+ return Err;
+ if (Error Err = Stream.SkipBlock()) {
+ // FIXME this drops the error on the floor, which
+ // ThinLTO/X86/debuginfo-cu-import.ll relies on.
+ consumeError(std::move(Err));
+ return Error::success();
+ }
+ return Error::success();
+ }
+ // Couldn't load an index, fallback to loading all the block "old-style".
+ }
+
+ unsigned NextMetadataNo = MetadataList.size();
+
+ // Read all the records.
+ while (true) {
+ BitstreamEntry Entry;
+ if (Error E = Stream.advanceSkippingSubblocks().moveInto(Entry))
+ return E;
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ resolveForwardRefsAndPlaceholders(Placeholders);
+ upgradeDebugInfo();
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ StringRef Blob;
+ ++NumMDRecordLoaded;
+ if (Expected<unsigned> MaybeCode =
+ Stream.readRecord(Entry.ID, Record, &Blob)) {
+ if (Error Err = parseOneMetadata(Record, MaybeCode.get(), Placeholders,
+ Blob, NextMetadataNo))
+ return Err;
+ } else
+ return MaybeCode.takeError();
+ }
+}
+
+MDString *MetadataLoader::MetadataLoaderImpl::lazyLoadOneMDString(unsigned ID) {
+ ++NumMDStringLoaded;
+ if (Metadata *MD = MetadataList.lookup(ID))
+ return cast<MDString>(MD);
+ auto MDS = MDString::get(Context, MDStringRef[ID]);
+ MetadataList.assignValue(MDS, ID);
+ return MDS;
+}
+
+void MetadataLoader::MetadataLoaderImpl::lazyLoadOneMetadata(
+ unsigned ID, PlaceholderQueue &Placeholders) {
+ assert(ID < (MDStringRef.size()) + GlobalMetadataBitPosIndex.size());
+ assert(ID >= MDStringRef.size() && "Unexpected lazy-loading of MDString");
+ // Lookup first if the metadata hasn't already been loaded.
+ if (auto *MD = MetadataList.lookup(ID)) {
+ auto *N = cast<MDNode>(MD);
+ if (!N->isTemporary())
+ return;
+ }
+ SmallVector<uint64_t, 64> Record;
+ StringRef Blob;
+ if (Error Err = IndexCursor.JumpToBit(
+ GlobalMetadataBitPosIndex[ID - MDStringRef.size()]))
+ report_fatal_error("lazyLoadOneMetadata failed jumping: " +
+ Twine(toString(std::move(Err))));
+ BitstreamEntry Entry;
+ if (Error E = IndexCursor.advanceSkippingSubblocks().moveInto(Entry))
+ // FIXME this drops the error on the floor.
+ report_fatal_error("lazyLoadOneMetadata failed advanceSkippingSubblocks: " +
+ Twine(toString(std::move(E))));
+ ++NumMDRecordLoaded;
+ if (Expected<unsigned> MaybeCode =
+ IndexCursor.readRecord(Entry.ID, Record, &Blob)) {
+ if (Error Err =
+ parseOneMetadata(Record, MaybeCode.get(), Placeholders, Blob, ID))
+ report_fatal_error("Can't lazyload MD, parseOneMetadata: " +
+ Twine(toString(std::move(Err))));
+ } else
+ report_fatal_error("Can't lazyload MD: " +
+ Twine(toString(MaybeCode.takeError())));
+}
+
+/// Ensure that all forward-references and placeholders are resolved.
+/// Iteratively lazy-loading metadata on-demand if needed.
+void MetadataLoader::MetadataLoaderImpl::resolveForwardRefsAndPlaceholders(
+ PlaceholderQueue &Placeholders) {
+ DenseSet<unsigned> Temporaries;
+ while (true) {
+ // Populate Temporaries with the placeholders that haven't been loaded yet.
+ Placeholders.getTemporaries(MetadataList, Temporaries);
+
+ // If we don't have any temporary, or FwdReference, we're done!
+ if (Temporaries.empty() && !MetadataList.hasFwdRefs())
+ break;
+
+ // First, load all the temporaries. This can add new placeholders or
+ // forward references.
+ for (auto ID : Temporaries)
+ lazyLoadOneMetadata(ID, Placeholders);
+ Temporaries.clear();
+
+ // Second, load the forward-references. This can also add new placeholders
+ // or forward references.
+ while (MetadataList.hasFwdRefs())
+ lazyLoadOneMetadata(MetadataList.getNextFwdRef(), Placeholders);
+ }
+ // At this point we don't have any forward reference remaining, or temporary
+ // that haven't been loaded. We can safely drop RAUW support and mark cycles
+ // as resolved.
+ MetadataList.tryToResolveCycles();
+
+ // Finally, everything is in place, we can replace the placeholders operands
+ // with the final node they refer to.
+ Placeholders.flush(MetadataList);
+}
+
+Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
+ SmallVectorImpl<uint64_t> &Record, unsigned Code,
+ PlaceholderQueue &Placeholders, StringRef Blob, unsigned &NextMetadataNo) {
+
+ bool IsDistinct = false;
+ auto getMD = [&](unsigned ID) -> Metadata * {
+ if (ID < MDStringRef.size())
+ return lazyLoadOneMDString(ID);
+ if (!IsDistinct) {
+ if (auto *MD = MetadataList.lookup(ID))
+ return MD;
+ // If lazy-loading is enabled, we try recursively to load the operand
+ // instead of creating a temporary.
+ if (ID < (MDStringRef.size() + GlobalMetadataBitPosIndex.size())) {
+ // Create a temporary for the node that is referencing the operand we
+ // will lazy-load. It is needed before recursing in case there are
+ // uniquing cycles.
+ MetadataList.getMetadataFwdRef(NextMetadataNo);
+ lazyLoadOneMetadata(ID, Placeholders);
+ return MetadataList.lookup(ID);
+ }
+ // Return a temporary.
+ return MetadataList.getMetadataFwdRef(ID);
+ }
+ if (auto *MD = MetadataList.getMetadataIfResolved(ID))
+ return MD;
+ return &Placeholders.getPlaceholderOp(ID);
+ };
+ auto getMDOrNull = [&](unsigned ID) -> Metadata * {
+ if (ID)
+ return getMD(ID - 1);
+ return nullptr;
+ };
+ auto getMDOrNullWithoutPlaceholders = [&](unsigned ID) -> Metadata * {
+ if (ID)
+ return MetadataList.getMetadataFwdRef(ID - 1);
+ return nullptr;
+ };
+ auto getMDString = [&](unsigned ID) -> MDString * {
+ // This requires that the ID is not really a forward reference. In
+ // particular, the MDString must already have been resolved.
+ auto MDS = getMDOrNull(ID);
+ return cast_or_null<MDString>(MDS);
+ };
+
+ // Support for old type refs.
+ auto getDITypeRefOrNull = [&](unsigned ID) {
+ return MetadataList.upgradeTypeRef(getMDOrNull(ID));
+ };
+
+#define GET_OR_DISTINCT(CLASS, ARGS) \
+ (IsDistinct ? CLASS::getDistinct ARGS : CLASS::get ARGS)
+
+ switch (Code) {
+ default: // Default behavior: ignore.
+ break;
+ case bitc::METADATA_NAME: {
+ // Read name of the named metadata.
+ SmallString<8> Name(Record.begin(), Record.end());
+ Record.clear();
+ if (Error E = Stream.ReadCode().moveInto(Code))
+ return E;
+
+ ++NumMDRecordLoaded;
+ if (Expected<unsigned> MaybeNextBitCode = Stream.readRecord(Code, Record)) {
+ if (MaybeNextBitCode.get() != bitc::METADATA_NAMED_NODE)
+ return error("METADATA_NAME not followed by METADATA_NAMED_NODE");
+ } else
+ return MaybeNextBitCode.takeError();
+
+ // Read named metadata elements.
+ unsigned Size = Record.size();
+ NamedMDNode *NMD = TheModule.getOrInsertNamedMetadata(Name);
+ for (unsigned i = 0; i != Size; ++i) {
+ MDNode *MD = MetadataList.getMDNodeFwdRefOrNull(Record[i]);
+ if (!MD)
+ return error("Invalid named metadata: expect fwd ref to MDNode");
+ NMD->addOperand(MD);
+ }
+ break;
+ }
+ case bitc::METADATA_OLD_FN_NODE: {
+ // Deprecated, but still needed to read old bitcode files.
+ // This is a LocalAsMetadata record, the only type of function-local
+ // metadata.
+ if (Record.size() % 2 == 1)
+ return error("Invalid record");
+
+ // If this isn't a LocalAsMetadata record, we're dropping it. This used
+ // to be legal, but there's no upgrade path.
+ auto dropRecord = [&] {
+ MetadataList.assignValue(MDNode::get(Context, None), NextMetadataNo);
+ NextMetadataNo++;
+ };
+ if (Record.size() != 2) {
+ dropRecord();
+ break;
+ }
+
+ Type *Ty = getTypeByID(Record[0]);
+ if (Ty->isMetadataTy() || Ty->isVoidTy()) {
+ dropRecord();
+ break;
+ }
+
+ MetadataList.assignValue(
+ LocalAsMetadata::get(ValueList.getValueFwdRef(Record[1], Ty)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_OLD_NODE: {
+ // Deprecated, but still needed to read old bitcode files.
+ if (Record.size() % 2 == 1)
+ return error("Invalid record");
+
+ unsigned Size = Record.size();
+ SmallVector<Metadata *, 8> Elts;
+ for (unsigned i = 0; i != Size; i += 2) {
+ Type *Ty = getTypeByID(Record[i]);
+ if (!Ty)
+ return error("Invalid record");
+ if (Ty->isMetadataTy())
+ Elts.push_back(getMD(Record[i + 1]));
+ else if (!Ty->isVoidTy()) {
+ auto *MD =
+ ValueAsMetadata::get(ValueList.getValueFwdRef(Record[i + 1], Ty));
+ assert(isa<ConstantAsMetadata>(MD) &&
+ "Expected non-function-local metadata");
+ Elts.push_back(MD);
+ } else
+ Elts.push_back(nullptr);
+ }
+ MetadataList.assignValue(MDNode::get(Context, Elts), NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_VALUE: {
+ if (Record.size() != 2)
+ return error("Invalid record");
+
+ Type *Ty = getTypeByID(Record[0]);
+ if (Ty->isMetadataTy() || Ty->isVoidTy())
+ return error("Invalid record");
+
+ MetadataList.assignValue(
+ ValueAsMetadata::get(ValueList.getValueFwdRef(Record[1], Ty)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_DISTINCT_NODE:
+ IsDistinct = true;
+ LLVM_FALLTHROUGH;
+ case bitc::METADATA_NODE: {
+ SmallVector<Metadata *, 8> Elts;
+ Elts.reserve(Record.size());
+ for (unsigned ID : Record)
+ Elts.push_back(getMDOrNull(ID));
+ MetadataList.assignValue(IsDistinct ? MDNode::getDistinct(Context, Elts)
+ : MDNode::get(Context, Elts),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_LOCATION: {
+ if (Record.size() != 5 && Record.size() != 6)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ unsigned Line = Record[1];
+ unsigned Column = Record[2];
+ Metadata *Scope = getMD(Record[3]);
+ Metadata *InlinedAt = getMDOrNull(Record[4]);
+ bool ImplicitCode = Record.size() == 6 && Record[5];
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DILocation, (Context, Line, Column, Scope, InlinedAt,
+ ImplicitCode)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_GENERIC_DEBUG: {
+ if (Record.size() < 4)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ unsigned Tag = Record[1];
+ unsigned Version = Record[2];
+
+ if (Tag >= 1u << 16 || Version != 0)
+ return error("Invalid record");
+
+ auto *Header = getMDString(Record[3]);
+ SmallVector<Metadata *, 8> DwarfOps;
+ for (unsigned I = 4, E = Record.size(); I != E; ++I)
+ DwarfOps.push_back(getMDOrNull(Record[I]));
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(GenericDINode, (Context, Tag, Header, DwarfOps)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_SUBRANGE: {
+ Metadata *Val = nullptr;
+ // Operand 'count' is interpreted as:
+ // - Signed integer (version 0)
+ // - Metadata node (version 1)
+ // Operand 'lowerBound' is interpreted as:
+ // - Signed integer (version 0 and 1)
+ // - Metadata node (version 2)
+ // Operands 'upperBound' and 'stride' are interpreted as:
+ // - Metadata node (version 2)
+ switch (Record[0] >> 1) {
+ case 0:
+ Val = GET_OR_DISTINCT(DISubrange,
+ (Context, Record[1], unrotateSign(Record[2])));
+ break;
+ case 1:
+ Val = GET_OR_DISTINCT(DISubrange, (Context, getMDOrNull(Record[1]),
+ unrotateSign(Record[2])));
+ break;
+ case 2:
+ Val = GET_OR_DISTINCT(
+ DISubrange, (Context, getMDOrNull(Record[1]), getMDOrNull(Record[2]),
+ getMDOrNull(Record[3]), getMDOrNull(Record[4])));
+ break;
+ default:
+ return error("Invalid record: Unsupported version of DISubrange");
+ }
+
+ MetadataList.assignValue(Val, NextMetadataNo);
+ IsDistinct = Record[0] & 1;
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_GENERIC_SUBRANGE: {
+ Metadata *Val = nullptr;
+ Val = GET_OR_DISTINCT(DIGenericSubrange,
+ (Context, getMDOrNull(Record[1]),
+ getMDOrNull(Record[2]), getMDOrNull(Record[3]),
+ getMDOrNull(Record[4])));
+
+ MetadataList.assignValue(Val, NextMetadataNo);
+ IsDistinct = Record[0] & 1;
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_ENUMERATOR: {
+ if (Record.size() < 3)
+ return error("Invalid record");
+
+ IsDistinct = Record[0] & 1;
+ bool IsUnsigned = Record[0] & 2;
+ bool IsBigInt = Record[0] & 4;
+ APInt Value;
+
+ if (IsBigInt) {
+ const uint64_t BitWidth = Record[1];
+ const size_t NumWords = Record.size() - 3;
+ Value = readWideAPInt(makeArrayRef(&Record[3], NumWords), BitWidth);
+ } else
+ Value = APInt(64, unrotateSign(Record[1]), !IsUnsigned);
+
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DIEnumerator,
+ (Context, Value, IsUnsigned, getMDString(Record[2]))),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_BASIC_TYPE: {
+ if (Record.size() < 6 || Record.size() > 7)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ DINode::DIFlags Flags = (Record.size() > 6)
+ ? static_cast<DINode::DIFlags>(Record[6])
+ : DINode::FlagZero;
+
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DIBasicType,
+ (Context, Record[1], getMDString(Record[2]), Record[3],
+ Record[4], Record[5], Flags)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_STRING_TYPE: {
+ if (Record.size() > 9 || Record.size() < 8)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ bool SizeIs8 = Record.size() == 8;
+ // StringLocationExp (i.e. Record[5]) is added at a later time
+ // than the other fields. The code here enables backward compatibility.
+ Metadata *StringLocationExp = SizeIs8 ? nullptr : getMDOrNull(Record[5]);
+ unsigned Offset = SizeIs8 ? 5 : 6;
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DIStringType,
+ (Context, Record[1], getMDString(Record[2]),
+ getMDOrNull(Record[3]), getMDOrNull(Record[4]),
+ StringLocationExp, Record[Offset], Record[Offset + 1],
+ Record[Offset + 2])),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_DERIVED_TYPE: {
+ if (Record.size() < 12 || Record.size() > 14)
+ return error("Invalid record");
+
+ // DWARF address space is encoded as N->getDWARFAddressSpace() + 1. 0 means
+ // that there is no DWARF address space associated with DIDerivedType.
+ Optional<unsigned> DWARFAddressSpace;
+ if (Record.size() > 12 && Record[12])
+ DWARFAddressSpace = Record[12] - 1;
+
+ Metadata *Annotations = nullptr;
+ if (Record.size() > 13 && Record[13])
+ Annotations = getMDOrNull(Record[13]);
+
+ IsDistinct = Record[0];
+ DINode::DIFlags Flags = static_cast<DINode::DIFlags>(Record[10]);
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DIDerivedType,
+ (Context, Record[1], getMDString(Record[2]),
+ getMDOrNull(Record[3]), Record[4],
+ getDITypeRefOrNull(Record[5]),
+ getDITypeRefOrNull(Record[6]), Record[7], Record[8],
+ Record[9], DWARFAddressSpace, Flags,
+ getDITypeRefOrNull(Record[11]), Annotations)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_COMPOSITE_TYPE: {
+ if (Record.size() < 16 || Record.size() > 22)
+ return error("Invalid record");
+
+ // If we have a UUID and this is not a forward declaration, lookup the
+ // mapping.
+ IsDistinct = Record[0] & 0x1;
+ bool IsNotUsedInTypeRef = Record[0] >= 2;
+ unsigned Tag = Record[1];
+ MDString *Name = getMDString(Record[2]);
+ Metadata *File = getMDOrNull(Record[3]);
+ unsigned Line = Record[4];
+ Metadata *Scope = getDITypeRefOrNull(Record[5]);
+ Metadata *BaseType = nullptr;
+ uint64_t SizeInBits = Record[7];
+ if (Record[8] > (uint64_t)std::numeric_limits<uint32_t>::max())
+ return error("Alignment value is too large");
+ uint32_t AlignInBits = Record[8];
+ uint64_t OffsetInBits = 0;
+ DINode::DIFlags Flags = static_cast<DINode::DIFlags>(Record[10]);
+ Metadata *Elements = nullptr;
+ unsigned RuntimeLang = Record[12];
+ Metadata *VTableHolder = nullptr;
+ Metadata *TemplateParams = nullptr;
+ Metadata *Discriminator = nullptr;
+ Metadata *DataLocation = nullptr;
+ Metadata *Associated = nullptr;
+ Metadata *Allocated = nullptr;
+ Metadata *Rank = nullptr;
+ Metadata *Annotations = nullptr;
+ auto *Identifier = getMDString(Record[15]);
+ // If this module is being parsed so that it can be ThinLTO imported
+ // into another module, composite types only need to be imported
+ // as type declarations (unless full type definitions requested).
+ // Create type declarations up front to save memory. Also, buildODRType
+ // handles the case where this is type ODRed with a definition needed
+ // by the importing module, in which case the existing definition is
+ // used.
+ if (IsImporting && !ImportFullTypeDefinitions && Identifier &&
+ (Tag == dwarf::DW_TAG_enumeration_type ||
+ Tag == dwarf::DW_TAG_class_type ||
+ Tag == dwarf::DW_TAG_structure_type ||
+ Tag == dwarf::DW_TAG_union_type)) {
+ Flags = Flags | DINode::FlagFwdDecl;
+ } else {
+ BaseType = getDITypeRefOrNull(Record[6]);
+ OffsetInBits = Record[9];
+ Elements = getMDOrNull(Record[11]);
+ VTableHolder = getDITypeRefOrNull(Record[13]);
+ TemplateParams = getMDOrNull(Record[14]);
+ if (Record.size() > 16)
+ Discriminator = getMDOrNull(Record[16]);
+ if (Record.size() > 17)
+ DataLocation = getMDOrNull(Record[17]);
+ if (Record.size() > 19) {
+ Associated = getMDOrNull(Record[18]);
+ Allocated = getMDOrNull(Record[19]);
+ }
+ if (Record.size() > 20) {
+ Rank = getMDOrNull(Record[20]);
+ }
+ if (Record.size() > 21) {
+ Annotations = getMDOrNull(Record[21]);
+ }
+ }
+ DICompositeType *CT = nullptr;
+ if (Identifier)
+ CT = DICompositeType::buildODRType(
+ Context, *Identifier, Tag, Name, File, Line, Scope, BaseType,
+ SizeInBits, AlignInBits, OffsetInBits, Flags, Elements, RuntimeLang,
+ VTableHolder, TemplateParams, Discriminator, DataLocation, Associated,
+ Allocated, Rank, Annotations);
+
+ // Create a node if we didn't get a lazy ODR type.
+ if (!CT)
+ CT = GET_OR_DISTINCT(DICompositeType,
+ (Context, Tag, Name, File, Line, Scope, BaseType,
+ SizeInBits, AlignInBits, OffsetInBits, Flags,
+ Elements, RuntimeLang, VTableHolder, TemplateParams,
+ Identifier, Discriminator, DataLocation, Associated,
+ Allocated, Rank, Annotations));
+ if (!IsNotUsedInTypeRef && Identifier)
+ MetadataList.addTypeRef(*Identifier, *cast<DICompositeType>(CT));
+
+ MetadataList.assignValue(CT, NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_SUBROUTINE_TYPE: {
+ if (Record.size() < 3 || Record.size() > 4)
+ return error("Invalid record");
+ bool IsOldTypeRefArray = Record[0] < 2;
+ unsigned CC = (Record.size() > 3) ? Record[3] : 0;
+
+ IsDistinct = Record[0] & 0x1;
+ DINode::DIFlags Flags = static_cast<DINode::DIFlags>(Record[1]);
+ Metadata *Types = getMDOrNull(Record[2]);
+ if (LLVM_UNLIKELY(IsOldTypeRefArray))
+ Types = MetadataList.upgradeTypeRefArray(Types);
+
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DISubroutineType, (Context, Flags, CC, Types)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+
+ case bitc::METADATA_MODULE: {
+ if (Record.size() < 5 || Record.size() > 9)
+ return error("Invalid record");
+
+ unsigned Offset = Record.size() >= 8 ? 2 : 1;
+ IsDistinct = Record[0];
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(
+ DIModule,
+ (Context, Record.size() >= 8 ? getMDOrNull(Record[1]) : nullptr,
+ getMDOrNull(Record[0 + Offset]), getMDString(Record[1 + Offset]),
+ getMDString(Record[2 + Offset]), getMDString(Record[3 + Offset]),
+ getMDString(Record[4 + Offset]),
+ Record.size() <= 7 ? 0 : Record[7],
+ Record.size() <= 8 ? false : Record[8])),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+
+ case bitc::METADATA_FILE: {
+ if (Record.size() != 3 && Record.size() != 5 && Record.size() != 6)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ Optional<DIFile::ChecksumInfo<MDString *>> Checksum;
+ // The BitcodeWriter writes null bytes into Record[3:4] when the Checksum
+ // is not present. This matches up with the old internal representation,
+ // and the old encoding for CSK_None in the ChecksumKind. The new
+ // representation reserves the value 0 in the ChecksumKind to continue to
+ // encode None in a backwards-compatible way.
+ if (Record.size() > 4 && Record[3] && Record[4])
+ Checksum.emplace(static_cast<DIFile::ChecksumKind>(Record[3]),
+ getMDString(Record[4]));
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(
+ DIFile,
+ (Context, getMDString(Record[1]), getMDString(Record[2]), Checksum,
+ Record.size() > 5 ? Optional<MDString *>(getMDString(Record[5]))
+ : None)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_COMPILE_UNIT: {
+ if (Record.size() < 14 || Record.size() > 22)
+ return error("Invalid record");
+
+ // Ignore Record[0], which indicates whether this compile unit is
+ // distinct. It's always distinct.
+ IsDistinct = true;
+ auto *CU = DICompileUnit::getDistinct(
+ Context, Record[1], getMDOrNull(Record[2]), getMDString(Record[3]),
+ Record[4], getMDString(Record[5]), Record[6], getMDString(Record[7]),
+ Record[8], getMDOrNull(Record[9]), getMDOrNull(Record[10]),
+ getMDOrNull(Record[12]), getMDOrNull(Record[13]),
+ Record.size() <= 15 ? nullptr : getMDOrNull(Record[15]),
+ Record.size() <= 14 ? 0 : Record[14],
+ Record.size() <= 16 ? true : Record[16],
+ Record.size() <= 17 ? false : Record[17],
+ Record.size() <= 18 ? 0 : Record[18],
+ Record.size() <= 19 ? false : Record[19],
+ Record.size() <= 20 ? nullptr : getMDString(Record[20]),
+ Record.size() <= 21 ? nullptr : getMDString(Record[21]));
+
+ MetadataList.assignValue(CU, NextMetadataNo);
+ NextMetadataNo++;
+
+ // Move the Upgrade the list of subprograms.
+ if (Metadata *SPs = getMDOrNullWithoutPlaceholders(Record[11]))
+ CUSubprograms.push_back({CU, SPs});
+ break;
+ }
+ case bitc::METADATA_SUBPROGRAM: {
+ if (Record.size() < 18 || Record.size() > 21)
+ return error("Invalid record");
+
+ bool HasSPFlags = Record[0] & 4;
+
+ DINode::DIFlags Flags;
+ DISubprogram::DISPFlags SPFlags;
+ if (!HasSPFlags)
+ Flags = static_cast<DINode::DIFlags>(Record[11 + 2]);
+ else {
+ Flags = static_cast<DINode::DIFlags>(Record[11]);
+ SPFlags = static_cast<DISubprogram::DISPFlags>(Record[9]);
+ }
+
+ // Support for old metadata when
+ // subprogram specific flags are placed in DIFlags.
+ const unsigned DIFlagMainSubprogram = 1 << 21;
+ bool HasOldMainSubprogramFlag = Flags & DIFlagMainSubprogram;
+ if (HasOldMainSubprogramFlag)
+ // Remove old DIFlagMainSubprogram from DIFlags.
+ // Note: This assumes that any future use of bit 21 defaults to it
+ // being 0.
+ Flags &= ~static_cast<DINode::DIFlags>(DIFlagMainSubprogram);
+
+ if (HasOldMainSubprogramFlag && HasSPFlags)
+ SPFlags |= DISubprogram::SPFlagMainSubprogram;
+ else if (!HasSPFlags)
+ SPFlags = DISubprogram::toSPFlags(
+ /*IsLocalToUnit=*/Record[7], /*IsDefinition=*/Record[8],
+ /*IsOptimized=*/Record[14], /*Virtuality=*/Record[11],
+ /*IsMainSubprogram=*/HasOldMainSubprogramFlag);
+
+ // All definitions should be distinct.
+ IsDistinct = (Record[0] & 1) || (SPFlags & DISubprogram::SPFlagDefinition);
+ // Version 1 has a Function as Record[15].
+ // Version 2 has removed Record[15].
+ // Version 3 has the Unit as Record[15].
+ // Version 4 added thisAdjustment.
+ // Version 5 repacked flags into DISPFlags, changing many element numbers.
+ bool HasUnit = Record[0] & 2;
+ if (!HasSPFlags && HasUnit && Record.size() < 19)
+ return error("Invalid record");
+ if (HasSPFlags && !HasUnit)
+ return error("Invalid record");
+ // Accommodate older formats.
+ bool HasFn = false;
+ bool HasThisAdj = true;
+ bool HasThrownTypes = true;
+ bool HasAnnotations = false;
+ unsigned OffsetA = 0;
+ unsigned OffsetB = 0;
+ if (!HasSPFlags) {
+ OffsetA = 2;
+ OffsetB = 2;
+ if (Record.size() >= 19) {
+ HasFn = !HasUnit;
+ OffsetB++;
+ }
+ HasThisAdj = Record.size() >= 20;
+ HasThrownTypes = Record.size() >= 21;
+ } else {
+ HasAnnotations = Record.size() >= 19;
+ }
+ Metadata *CUorFn = getMDOrNull(Record[12 + OffsetB]);
+ DISubprogram *SP = GET_OR_DISTINCT(
+ DISubprogram,
+ (Context,
+ getDITypeRefOrNull(Record[1]), // scope
+ getMDString(Record[2]), // name
+ getMDString(Record[3]), // linkageName
+ getMDOrNull(Record[4]), // file
+ Record[5], // line
+ getMDOrNull(Record[6]), // type
+ Record[7 + OffsetA], // scopeLine
+ getDITypeRefOrNull(Record[8 + OffsetA]), // containingType
+ Record[10 + OffsetA], // virtualIndex
+ HasThisAdj ? Record[16 + OffsetB] : 0, // thisAdjustment
+ Flags, // flags
+ SPFlags, // SPFlags
+ HasUnit ? CUorFn : nullptr, // unit
+ getMDOrNull(Record[13 + OffsetB]), // templateParams
+ getMDOrNull(Record[14 + OffsetB]), // declaration
+ getMDOrNull(Record[15 + OffsetB]), // retainedNodes
+ HasThrownTypes ? getMDOrNull(Record[17 + OffsetB])
+ : nullptr, // thrownTypes
+ HasAnnotations ? getMDOrNull(Record[18 + OffsetB])
+ : nullptr // annotations
+ ));
+ MetadataList.assignValue(SP, NextMetadataNo);
+ NextMetadataNo++;
+
+ // Upgrade sp->function mapping to function->sp mapping.
+ if (HasFn) {
+ if (auto *CMD = dyn_cast_or_null<ConstantAsMetadata>(CUorFn))
+ if (auto *F = dyn_cast<Function>(CMD->getValue())) {
+ if (F->isMaterializable())
+ // Defer until materialized; unmaterialized functions may not have
+ // metadata.
+ FunctionsWithSPs[F] = SP;
+ else if (!F->empty())
+ F->setSubprogram(SP);
+ }
+ }
+ break;
+ }
+ case bitc::METADATA_LEXICAL_BLOCK: {
+ if (Record.size() != 5)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DILexicalBlock,
+ (Context, getMDOrNull(Record[1]),
+ getMDOrNull(Record[2]), Record[3], Record[4])),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_LEXICAL_BLOCK_FILE: {
+ if (Record.size() != 4)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DILexicalBlockFile,
+ (Context, getMDOrNull(Record[1]),
+ getMDOrNull(Record[2]), Record[3])),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_COMMON_BLOCK: {
+ IsDistinct = Record[0] & 1;
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DICommonBlock,
+ (Context, getMDOrNull(Record[1]),
+ getMDOrNull(Record[2]), getMDString(Record[3]),
+ getMDOrNull(Record[4]), Record[5])),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_NAMESPACE: {
+ // Newer versions of DINamespace dropped file and line.
+ MDString *Name;
+ if (Record.size() == 3)
+ Name = getMDString(Record[2]);
+ else if (Record.size() == 5)
+ Name = getMDString(Record[3]);
+ else
+ return error("Invalid record");
+
+ IsDistinct = Record[0] & 1;
+ bool ExportSymbols = Record[0] & 2;
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DINamespace,
+ (Context, getMDOrNull(Record[1]), Name, ExportSymbols)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_MACRO: {
+ if (Record.size() != 5)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DIMacro,
+ (Context, Record[1], Record[2], getMDString(Record[3]),
+ getMDString(Record[4]))),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_MACRO_FILE: {
+ if (Record.size() != 5)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DIMacroFile,
+ (Context, Record[1], Record[2], getMDOrNull(Record[3]),
+ getMDOrNull(Record[4]))),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_TEMPLATE_TYPE: {
+ if (Record.size() < 3 || Record.size() > 4)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DITemplateTypeParameter,
+ (Context, getMDString(Record[1]),
+ getDITypeRefOrNull(Record[2]),
+ (Record.size() == 4) ? getMDOrNull(Record[3])
+ : getMDOrNull(false))),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_TEMPLATE_VALUE: {
+ if (Record.size() < 5 || Record.size() > 6)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(
+ DITemplateValueParameter,
+ (Context, Record[1], getMDString(Record[2]),
+ getDITypeRefOrNull(Record[3]),
+ (Record.size() == 6) ? getMDOrNull(Record[4]) : getMDOrNull(false),
+ (Record.size() == 6) ? getMDOrNull(Record[5])
+ : getMDOrNull(Record[4]))),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_GLOBAL_VAR: {
+ if (Record.size() < 11 || Record.size() > 13)
+ return error("Invalid record");
+
+ IsDistinct = Record[0] & 1;
+ unsigned Version = Record[0] >> 1;
+
+ if (Version == 2) {
+ Metadata *Annotations = nullptr;
+ if (Record.size() > 12)
+ Annotations = getMDOrNull(Record[12]);
+
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DIGlobalVariable,
+ (Context, getMDOrNull(Record[1]),
+ getMDString(Record[2]), getMDString(Record[3]),
+ getMDOrNull(Record[4]), Record[5],
+ getDITypeRefOrNull(Record[6]), Record[7], Record[8],
+ getMDOrNull(Record[9]), getMDOrNull(Record[10]),
+ Record[11], Annotations)),
+ NextMetadataNo);
+
+ NextMetadataNo++;
+ } else if (Version == 1) {
+ // No upgrade necessary. A null field will be introduced to indicate
+ // that no parameter information is available.
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(
+ DIGlobalVariable,
+ (Context, getMDOrNull(Record[1]), getMDString(Record[2]),
+ getMDString(Record[3]), getMDOrNull(Record[4]), Record[5],
+ getDITypeRefOrNull(Record[6]), Record[7], Record[8],
+ getMDOrNull(Record[10]), nullptr, Record[11], nullptr)),
+ NextMetadataNo);
+
+ NextMetadataNo++;
+ } else if (Version == 0) {
+ // Upgrade old metadata, which stored a global variable reference or a
+ // ConstantInt here.
+ NeedUpgradeToDIGlobalVariableExpression = true;
+ Metadata *Expr = getMDOrNull(Record[9]);
+ uint32_t AlignInBits = 0;
+ if (Record.size() > 11) {
+ if (Record[11] > (uint64_t)std::numeric_limits<uint32_t>::max())
+ return error("Alignment value is too large");
+ AlignInBits = Record[11];
+ }
+ GlobalVariable *Attach = nullptr;
+ if (auto *CMD = dyn_cast_or_null<ConstantAsMetadata>(Expr)) {
+ if (auto *GV = dyn_cast<GlobalVariable>(CMD->getValue())) {
+ Attach = GV;
+ Expr = nullptr;
+ } else if (auto *CI = dyn_cast<ConstantInt>(CMD->getValue())) {
+ Expr = DIExpression::get(Context,
+ {dwarf::DW_OP_constu, CI->getZExtValue(),
+ dwarf::DW_OP_stack_value});
+ } else {
+ Expr = nullptr;
+ }
+ }
+ DIGlobalVariable *DGV = GET_OR_DISTINCT(
+ DIGlobalVariable,
+ (Context, getMDOrNull(Record[1]), getMDString(Record[2]),
+ getMDString(Record[3]), getMDOrNull(Record[4]), Record[5],
+ getDITypeRefOrNull(Record[6]), Record[7], Record[8],
+ getMDOrNull(Record[10]), nullptr, AlignInBits, nullptr));
+
+ DIGlobalVariableExpression *DGVE = nullptr;
+ if (Attach || Expr)
+ DGVE = DIGlobalVariableExpression::getDistinct(
+ Context, DGV, Expr ? Expr : DIExpression::get(Context, {}));
+ if (Attach)
+ Attach->addDebugInfo(DGVE);
+
+ auto *MDNode = Expr ? cast<Metadata>(DGVE) : cast<Metadata>(DGV);
+ MetadataList.assignValue(MDNode, NextMetadataNo);
+ NextMetadataNo++;
+ } else
+ return error("Invalid record");
+
+ break;
+ }
+ case bitc::METADATA_LOCAL_VAR: {
+ // 10th field is for the obseleted 'inlinedAt:' field.
+ if (Record.size() < 8 || Record.size() > 10)
+ return error("Invalid record");
+
+ IsDistinct = Record[0] & 1;
+ bool HasAlignment = Record[0] & 2;
+ // 2nd field used to be an artificial tag, either DW_TAG_auto_variable or
+ // DW_TAG_arg_variable, if we have alignment flag encoded it means, that
+ // this is newer version of record which doesn't have artificial tag.
+ bool HasTag = !HasAlignment && Record.size() > 8;
+ DINode::DIFlags Flags = static_cast<DINode::DIFlags>(Record[7 + HasTag]);
+ uint32_t AlignInBits = 0;
+ Metadata *Annotations = nullptr;
+ if (HasAlignment) {
+ if (Record[8] > (uint64_t)std::numeric_limits<uint32_t>::max())
+ return error("Alignment value is too large");
+ AlignInBits = Record[8];
+ if (Record.size() > 9)
+ Annotations = getMDOrNull(Record[9]);
+ }
+
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DILocalVariable,
+ (Context, getMDOrNull(Record[1 + HasTag]),
+ getMDString(Record[2 + HasTag]),
+ getMDOrNull(Record[3 + HasTag]), Record[4 + HasTag],
+ getDITypeRefOrNull(Record[5 + HasTag]),
+ Record[6 + HasTag], Flags, AlignInBits, Annotations)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_LABEL: {
+ if (Record.size() != 5)
+ return error("Invalid record");
+
+ IsDistinct = Record[0] & 1;
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DILabel, (Context, getMDOrNull(Record[1]),
+ getMDString(Record[2]),
+ getMDOrNull(Record[3]), Record[4])),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_EXPRESSION: {
+ if (Record.size() < 1)
+ return error("Invalid record");
+
+ IsDistinct = Record[0] & 1;
+ uint64_t Version = Record[0] >> 1;
+ auto Elts = MutableArrayRef<uint64_t>(Record).slice(1);
+
+ SmallVector<uint64_t, 6> Buffer;
+ if (Error Err = upgradeDIExpression(Version, Elts, Buffer))
+ return Err;
+
+ MetadataList.assignValue(GET_OR_DISTINCT(DIExpression, (Context, Elts)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_GLOBAL_VAR_EXPR: {
+ if (Record.size() != 3)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ Metadata *Expr = getMDOrNull(Record[2]);
+ if (!Expr)
+ Expr = DIExpression::get(Context, {});
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DIGlobalVariableExpression,
+ (Context, getMDOrNull(Record[1]), Expr)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_OBJC_PROPERTY: {
+ if (Record.size() != 8)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DIObjCProperty,
+ (Context, getMDString(Record[1]),
+ getMDOrNull(Record[2]), Record[3],
+ getMDString(Record[4]), getMDString(Record[5]),
+ Record[6], getDITypeRefOrNull(Record[7]))),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_IMPORTED_ENTITY: {
+ if (Record.size() < 6 && Record.size() > 8)
+ return error("Invalid record");
+
+ IsDistinct = Record[0];
+ bool HasFile = (Record.size() >= 7);
+ bool HasElements = (Record.size() >= 8);
+ MetadataList.assignValue(
+ GET_OR_DISTINCT(DIImportedEntity,
+ (Context, Record[1], getMDOrNull(Record[2]),
+ getDITypeRefOrNull(Record[3]),
+ HasFile ? getMDOrNull(Record[6]) : nullptr,
+ HasFile ? Record[4] : 0, getMDString(Record[5]),
+ HasElements ? getMDOrNull(Record[7]) : nullptr)),
+ NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_STRING_OLD: {
+ std::string String(Record.begin(), Record.end());
+
+ // Test for upgrading !llvm.loop.
+ HasSeenOldLoopTags |= mayBeOldLoopAttachmentTag(String);
+ ++NumMDStringLoaded;
+ Metadata *MD = MDString::get(Context, String);
+ MetadataList.assignValue(MD, NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ case bitc::METADATA_STRINGS: {
+ auto CreateNextMDString = [&](StringRef Str) {
+ ++NumMDStringLoaded;
+ MetadataList.assignValue(MDString::get(Context, Str), NextMetadataNo);
+ NextMetadataNo++;
+ };
+ if (Error Err = parseMetadataStrings(Record, Blob, CreateNextMDString))
+ return Err;
+ break;
+ }
+ case bitc::METADATA_GLOBAL_DECL_ATTACHMENT: {
+ if (Record.size() % 2 == 0)
+ return error("Invalid record");
+ unsigned ValueID = Record[0];
+ if (ValueID >= ValueList.size())
+ return error("Invalid record");
+ if (auto *GO = dyn_cast<GlobalObject>(ValueList[ValueID]))
+ if (Error Err = parseGlobalObjectAttachment(
+ *GO, ArrayRef<uint64_t>(Record).slice(1)))
+ return Err;
+ break;
+ }
+ case bitc::METADATA_KIND: {
+ // Support older bitcode files that had METADATA_KIND records in a
+ // block with METADATA_BLOCK_ID.
+ if (Error Err = parseMetadataKindRecord(Record))
+ return Err;
+ break;
+ }
+ case bitc::METADATA_ARG_LIST: {
+ SmallVector<ValueAsMetadata *, 4> Elts;
+ Elts.reserve(Record.size());
+ for (uint64_t Elt : Record) {
+ Metadata *MD = getMD(Elt);
+ if (isa<MDNode>(MD) && cast<MDNode>(MD)->isTemporary())
+ return error(
+ "Invalid record: DIArgList should not contain forward refs");
+ if (!isa<ValueAsMetadata>(MD))
+ return error("Invalid record");
+ Elts.push_back(cast<ValueAsMetadata>(MD));
+ }
+
+ MetadataList.assignValue(DIArgList::get(Context, Elts), NextMetadataNo);
+ NextMetadataNo++;
+ break;
+ }
+ }
+ return Error::success();
+#undef GET_OR_DISTINCT
+}
+
+Error MetadataLoader::MetadataLoaderImpl::parseMetadataStrings(
+ ArrayRef<uint64_t> Record, StringRef Blob,
+ function_ref<void(StringRef)> CallBack) {
+ // All the MDStrings in the block are emitted together in a single
+ // record. The strings are concatenated and stored in a blob along with
+ // their sizes.
+ if (Record.size() != 2)
+ return error("Invalid record: metadata strings layout");
+
+ unsigned NumStrings = Record[0];
+ unsigned StringsOffset = Record[1];
+ if (!NumStrings)
+ return error("Invalid record: metadata strings with no strings");
+ if (StringsOffset > Blob.size())
+ return error("Invalid record: metadata strings corrupt offset");
+
+ StringRef Lengths = Blob.slice(0, StringsOffset);
+ SimpleBitstreamCursor R(Lengths);
+
+ StringRef Strings = Blob.drop_front(StringsOffset);
+ do {
+ if (R.AtEndOfStream())
+ return error("Invalid record: metadata strings bad length");
+
+ uint32_t Size;
+ if (Error E = R.ReadVBR(6).moveInto(Size))
+ return E;
+ if (Strings.size() < Size)
+ return error("Invalid record: metadata strings truncated chars");
+
+ CallBack(Strings.slice(0, Size));
+ Strings = Strings.drop_front(Size);
+ } while (--NumStrings);
+
+ return Error::success();
+}
+
+Error MetadataLoader::MetadataLoaderImpl::parseGlobalObjectAttachment(
+ GlobalObject &GO, ArrayRef<uint64_t> Record) {
+ assert(Record.size() % 2 == 0);
+ for (unsigned I = 0, E = Record.size(); I != E; I += 2) {
+ auto K = MDKindMap.find(Record[I]);
+ if (K == MDKindMap.end())
+ return error("Invalid ID");
+ MDNode *MD =
+ dyn_cast_or_null<MDNode>(getMetadataFwdRefOrLoad(Record[I + 1]));
+ if (!MD)
+ return error("Invalid metadata attachment: expect fwd ref to MDNode");
+ GO.addMetadata(K->second, *MD);
+ }
+ return Error::success();
+}
+
+/// Parse metadata attachments.
+Error MetadataLoader::MetadataLoaderImpl::parseMetadataAttachment(
+ Function &F, const SmallVectorImpl<Instruction *> &InstructionList) {
+ if (Error Err = Stream.EnterSubBlock(bitc::METADATA_ATTACHMENT_ID))
+ return Err;
+
+ SmallVector<uint64_t, 64> Record;
+ PlaceholderQueue Placeholders;
+
+ while (true) {
+ BitstreamEntry Entry;
+ if (Error E = Stream.advanceSkippingSubblocks().moveInto(Entry))
+ return E;
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ resolveForwardRefsAndPlaceholders(Placeholders);
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a metadata attachment record.
+ Record.clear();
+ ++NumMDRecordLoaded;
+ Expected<unsigned> MaybeRecord = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeRecord)
+ return MaybeRecord.takeError();
+ switch (MaybeRecord.get()) {
+ default: // Default behavior: ignore.
+ break;
+ case bitc::METADATA_ATTACHMENT: {
+ unsigned RecordLength = Record.size();
+ if (Record.empty())
+ return error("Invalid record");
+ if (RecordLength % 2 == 0) {
+ // A function attachment.
+ if (Error Err = parseGlobalObjectAttachment(F, Record))
+ return Err;
+ continue;
+ }
+
+ // An instruction attachment.
+ Instruction *Inst = InstructionList[Record[0]];
+ for (unsigned i = 1; i != RecordLength; i = i + 2) {
+ unsigned Kind = Record[i];
+ DenseMap<unsigned, unsigned>::iterator I = MDKindMap.find(Kind);
+ if (I == MDKindMap.end())
+ return error("Invalid ID");
+ if (I->second == LLVMContext::MD_tbaa && StripTBAA)
+ continue;
+
+ auto Idx = Record[i + 1];
+ if (Idx < (MDStringRef.size() + GlobalMetadataBitPosIndex.size()) &&
+ !MetadataList.lookup(Idx)) {
+ // Load the attachment if it is in the lazy-loadable range and hasn't
+ // been loaded yet.
+ lazyLoadOneMetadata(Idx, Placeholders);
+ resolveForwardRefsAndPlaceholders(Placeholders);
+ }
+
+ Metadata *Node = MetadataList.getMetadataFwdRef(Idx);
+ if (isa<LocalAsMetadata>(Node))
+ // Drop the attachment. This used to be legal, but there's no
+ // upgrade path.
+ break;
+ MDNode *MD = dyn_cast_or_null<MDNode>(Node);
+ if (!MD)
+ return error("Invalid metadata attachment");
+
+ if (HasSeenOldLoopTags && I->second == LLVMContext::MD_loop)
+ MD = upgradeInstructionLoopAttachment(*MD);
+
+ if (I->second == LLVMContext::MD_tbaa) {
+ assert(!MD->isTemporary() && "should load MDs before attachments");
+ MD = UpgradeTBAANode(*MD);
+ }
+ Inst->setMetadata(I->second, MD);
+ }
+ break;
+ }
+ }
+ }
+}
+
+/// Parse a single METADATA_KIND record, inserting result in MDKindMap.
+Error MetadataLoader::MetadataLoaderImpl::parseMetadataKindRecord(
+ SmallVectorImpl<uint64_t> &Record) {
+ if (Record.size() < 2)
+ return error("Invalid record");
+
+ unsigned Kind = Record[0];
+ SmallString<8> Name(Record.begin() + 1, Record.end());
+
+ unsigned NewKind = TheModule.getMDKindID(Name.str());
+ if (!MDKindMap.insert(std::make_pair(Kind, NewKind)).second)
+ return error("Conflicting METADATA_KIND records");
+ return Error::success();
+}
+
+/// Parse the metadata kinds out of the METADATA_KIND_BLOCK.
+Error MetadataLoader::MetadataLoaderImpl::parseMetadataKinds() {
+ if (Error Err = Stream.EnterSubBlock(bitc::METADATA_KIND_BLOCK_ID))
+ return Err;
+
+ SmallVector<uint64_t, 64> Record;
+
+ // Read all the records.
+ while (true) {
+ BitstreamEntry Entry;
+ if (Error E = Stream.advanceSkippingSubblocks().moveInto(Entry))
+ return E;
+
+ switch (Entry.Kind) {
+ case BitstreamEntry::SubBlock: // Handled for us already.
+ case BitstreamEntry::Error:
+ return error("Malformed block");
+ case BitstreamEntry::EndBlock:
+ return Error::success();
+ case BitstreamEntry::Record:
+ // The interesting case.
+ break;
+ }
+
+ // Read a record.
+ Record.clear();
+ ++NumMDRecordLoaded;
+ Expected<unsigned> MaybeCode = Stream.readRecord(Entry.ID, Record);
+ if (!MaybeCode)
+ return MaybeCode.takeError();
+ switch (MaybeCode.get()) {
+ default: // Default behavior: ignore.
+ break;
+ case bitc::METADATA_KIND: {
+ if (Error Err = parseMetadataKindRecord(Record))
+ return Err;
+ break;
+ }
+ }
+ }
+}
+
+MetadataLoader &MetadataLoader::operator=(MetadataLoader &&RHS) {
+ Pimpl = std::move(RHS.Pimpl);
+ return *this;
+}
+MetadataLoader::MetadataLoader(MetadataLoader &&RHS)
+ : Pimpl(std::move(RHS.Pimpl)) {}
+
+MetadataLoader::~MetadataLoader() = default;
+MetadataLoader::MetadataLoader(BitstreamCursor &Stream, Module &TheModule,
+ BitcodeReaderValueList &ValueList,
+ bool IsImporting,
+ std::function<Type *(unsigned)> getTypeByID)
+ : Pimpl(std::make_unique<MetadataLoaderImpl>(
+ Stream, TheModule, ValueList, std::move(getTypeByID), IsImporting)) {}
+
+Error MetadataLoader::parseMetadata(bool ModuleLevel) {
+ return Pimpl->parseMetadata(ModuleLevel);
+}
+
+bool MetadataLoader::hasFwdRefs() const { return Pimpl->hasFwdRefs(); }
+
+/// Return the given metadata, creating a replaceable forward reference if
+/// necessary.
+Metadata *MetadataLoader::getMetadataFwdRefOrLoad(unsigned Idx) {
+ return Pimpl->getMetadataFwdRefOrLoad(Idx);
+}
+
+DISubprogram *MetadataLoader::lookupSubprogramForFunction(Function *F) {
+ return Pimpl->lookupSubprogramForFunction(F);
+}
+
+Error MetadataLoader::parseMetadataAttachment(
+ Function &F, const SmallVectorImpl<Instruction *> &InstructionList) {
+ return Pimpl->parseMetadataAttachment(F, InstructionList);
+}
+
+Error MetadataLoader::parseMetadataKinds() {
+ return Pimpl->parseMetadataKinds();
+}
+
+void MetadataLoader::setStripTBAA(bool StripTBAA) {
+ return Pimpl->setStripTBAA(StripTBAA);
+}
+
+bool MetadataLoader::isStrippingTBAA() { return Pimpl->isStrippingTBAA(); }
+
+unsigned MetadataLoader::size() const { return Pimpl->size(); }
+void MetadataLoader::shrinkTo(unsigned N) { return Pimpl->shrinkTo(N); }
+
+void MetadataLoader::upgradeDebugIntrinsics(Function &F) {
+ return Pimpl->upgradeDebugIntrinsics(F);
+}
diff --git a/contrib/libs/llvm14/lib/Bitcode/Reader/MetadataLoader.h b/contrib/libs/llvm14/lib/Bitcode/Reader/MetadataLoader.h
new file mode 100644
index 0000000000..709800850f
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Reader/MetadataLoader.h
@@ -0,0 +1,83 @@
+//===-- Bitcode/Reader/MetadataLoader.h - Load Metadatas -------*- C++ -*-====//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class handles loading Metadatas.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_BITCODE_READER_METADATALOADER_H
+#define LLVM_LIB_BITCODE_READER_METADATALOADER_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Error.h"
+
+#include <functional>
+#include <memory>
+
+namespace llvm {
+class BitcodeReaderValueList;
+class BitstreamCursor;
+class DISubprogram;
+class Function;
+class Instruction;
+class Metadata;
+class Module;
+class Type;
+
+/// Helper class that handles loading Metadatas and keeping them available.
+class MetadataLoader {
+ class MetadataLoaderImpl;
+ std::unique_ptr<MetadataLoaderImpl> Pimpl;
+ Error parseMetadata(bool ModuleLevel);
+
+public:
+ ~MetadataLoader();
+ MetadataLoader(BitstreamCursor &Stream, Module &TheModule,
+ BitcodeReaderValueList &ValueList, bool IsImporting,
+ std::function<Type *(unsigned)> getTypeByID);
+ MetadataLoader &operator=(MetadataLoader &&);
+ MetadataLoader(MetadataLoader &&);
+
+ // Parse a module metadata block
+ Error parseModuleMetadata() { return parseMetadata(true); }
+
+ // Parse a function metadata block
+ Error parseFunctionMetadata() { return parseMetadata(false); }
+
+ /// Set the mode to strip TBAA metadata on load.
+ void setStripTBAA(bool StripTBAA = true);
+
+ /// Return true if the Loader is stripping TBAA metadata.
+ bool isStrippingTBAA();
+
+ // Return true there are remaining unresolved forward references.
+ bool hasFwdRefs() const;
+
+ /// Return the given metadata, creating a replaceable forward reference if
+ /// necessary.
+ Metadata *getMetadataFwdRefOrLoad(unsigned Idx);
+
+ /// Return the DISubprogram metadata for a Function if any, null otherwise.
+ DISubprogram *lookupSubprogramForFunction(Function *F);
+
+ /// Parse a `METADATA_ATTACHMENT` block for a function.
+ Error parseMetadataAttachment(
+ Function &F, const SmallVectorImpl<Instruction *> &InstructionList);
+
+ /// Parse a `METADATA_KIND` block for the current module.
+ Error parseMetadataKinds();
+
+ unsigned size() const;
+ void shrinkTo(unsigned N);
+
+ /// Perform bitcode upgrades on llvm.dbg.* calls.
+ void upgradeDebugIntrinsics(Function &F);
+};
+}
+
+#endif // LLVM_LIB_BITCODE_READER_METADATALOADER_H
diff --git a/contrib/libs/llvm14/lib/Bitcode/Reader/ValueList.cpp b/contrib/libs/llvm14/lib/Bitcode/Reader/ValueList.cpp
new file mode 100644
index 0000000000..86ed664070
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Reader/ValueList.cpp
@@ -0,0 +1,216 @@
+//===- ValueList.cpp - Internal BitcodeReader implementation --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "ValueList.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <cstddef>
+#include <limits>
+
+using namespace llvm;
+
+namespace llvm {
+
+namespace {
+
+/// A class for maintaining the slot number definition
+/// as a placeholder for the actual definition for forward constants defs.
+class ConstantPlaceHolder : public ConstantExpr {
+public:
+ explicit ConstantPlaceHolder(Type *Ty, LLVMContext &Context)
+ : ConstantExpr(Ty, Instruction::UserOp1, &Op<0>(), 1) {
+ Op<0>() = UndefValue::get(Type::getInt32Ty(Context));
+ }
+
+ ConstantPlaceHolder &operator=(const ConstantPlaceHolder &) = delete;
+
+ // allocate space for exactly one operand
+ void *operator new(size_t s) { return User::operator new(s, 1); }
+
+ /// Methods to support type inquiry through isa, cast, and dyn_cast.
+ static bool classof(const Value *V) {
+ return isa<ConstantExpr>(V) &&
+ cast<ConstantExpr>(V)->getOpcode() == Instruction::UserOp1;
+ }
+
+ /// Provide fast operand accessors
+ DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
+};
+
+} // end anonymous namespace
+
+// FIXME: can we inherit this from ConstantExpr?
+template <>
+struct OperandTraits<ConstantPlaceHolder>
+ : public FixedNumOperandTraits<ConstantPlaceHolder, 1> {};
+DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ConstantPlaceHolder, Value)
+
+} // end namespace llvm
+
+void BitcodeReaderValueList::assignValue(Value *V, unsigned Idx) {
+ if (Idx == size()) {
+ push_back(V);
+ return;
+ }
+
+ if (Idx >= size())
+ resize(Idx + 1);
+
+ WeakTrackingVH &OldV = ValuePtrs[Idx];
+ if (!OldV) {
+ OldV = V;
+ return;
+ }
+
+ // Handle constants and non-constants (e.g. instrs) differently for
+ // efficiency.
+ if (Constant *PHC = dyn_cast<Constant>(&*OldV)) {
+ ResolveConstants.push_back(std::make_pair(PHC, Idx));
+ OldV = V;
+ } else {
+ // If there was a forward reference to this value, replace it.
+ Value *PrevVal = OldV;
+ OldV->replaceAllUsesWith(V);
+ PrevVal->deleteValue();
+ }
+}
+
+Constant *BitcodeReaderValueList::getConstantFwdRef(unsigned Idx, Type *Ty) {
+ // Bail out for a clearly invalid value.
+ if (Idx >= RefsUpperBound)
+ return nullptr;
+
+ if (Idx >= size())
+ resize(Idx + 1);
+
+ if (Value *V = ValuePtrs[Idx]) {
+ if (Ty != V->getType())
+ report_fatal_error("Type mismatch in constant table!");
+ return cast<Constant>(V);
+ }
+
+ // Create and return a placeholder, which will later be RAUW'd.
+ Constant *C = new ConstantPlaceHolder(Ty, Context);
+ ValuePtrs[Idx] = C;
+ return C;
+}
+
+Value *BitcodeReaderValueList::getValueFwdRef(unsigned Idx, Type *Ty) {
+ // Bail out for a clearly invalid value.
+ if (Idx >= RefsUpperBound)
+ return nullptr;
+
+ if (Idx >= size())
+ resize(Idx + 1);
+
+ if (Value *V = ValuePtrs[Idx]) {
+ // If the types don't match, it's invalid.
+ if (Ty && Ty != V->getType())
+ return nullptr;
+ return V;
+ }
+
+ // No type specified, must be invalid reference.
+ if (!Ty)
+ return nullptr;
+
+ // Create and return a placeholder, which will later be RAUW'd.
+ Value *V = new Argument(Ty);
+ ValuePtrs[Idx] = V;
+ return V;
+}
+
+/// Once all constants are read, this method bulk resolves any forward
+/// references. The idea behind this is that we sometimes get constants (such
+/// as large arrays) which reference *many* forward ref constants. Replacing
+/// each of these causes a lot of thrashing when building/reuniquing the
+/// constant. Instead of doing this, we look at all the uses and rewrite all
+/// the place holders at once for any constant that uses a placeholder.
+void BitcodeReaderValueList::resolveConstantForwardRefs() {
+ // Sort the values by-pointer so that they are efficient to look up with a
+ // binary search.
+ llvm::sort(ResolveConstants);
+
+ SmallVector<Constant *, 64> NewOps;
+
+ while (!ResolveConstants.empty()) {
+ Value *RealVal = operator[](ResolveConstants.back().second);
+ Constant *Placeholder = ResolveConstants.back().first;
+ ResolveConstants.pop_back();
+
+ // Loop over all users of the placeholder, updating them to reference the
+ // new value. If they reference more than one placeholder, update them all
+ // at once.
+ while (!Placeholder->use_empty()) {
+ auto UI = Placeholder->user_begin();
+ User *U = *UI;
+
+ // If the using object isn't uniqued, just update the operands. This
+ // handles instructions and initializers for global variables.
+ if (!isa<Constant>(U) || isa<GlobalValue>(U)) {
+ UI.getUse().set(RealVal);
+ continue;
+ }
+
+ // Otherwise, we have a constant that uses the placeholder. Replace that
+ // constant with a new constant that has *all* placeholder uses updated.
+ Constant *UserC = cast<Constant>(U);
+ for (User::op_iterator I = UserC->op_begin(), E = UserC->op_end(); I != E;
+ ++I) {
+ Value *NewOp;
+ if (!isa<ConstantPlaceHolder>(*I)) {
+ // Not a placeholder reference.
+ NewOp = *I;
+ } else if (*I == Placeholder) {
+ // Common case is that it just references this one placeholder.
+ NewOp = RealVal;
+ } else {
+ // Otherwise, look up the placeholder in ResolveConstants.
+ ResolveConstantsTy::iterator It = llvm::lower_bound(
+ ResolveConstants,
+ std::pair<Constant *, unsigned>(cast<Constant>(*I), 0));
+ assert(It != ResolveConstants.end() && It->first == *I);
+ NewOp = operator[](It->second);
+ }
+
+ NewOps.push_back(cast<Constant>(NewOp));
+ }
+
+ // Make the new constant.
+ Constant *NewC;
+ if (ConstantArray *UserCA = dyn_cast<ConstantArray>(UserC)) {
+ NewC = ConstantArray::get(UserCA->getType(), NewOps);
+ } else if (ConstantStruct *UserCS = dyn_cast<ConstantStruct>(UserC)) {
+ NewC = ConstantStruct::get(UserCS->getType(), NewOps);
+ } else if (isa<ConstantVector>(UserC)) {
+ NewC = ConstantVector::get(NewOps);
+ } else {
+ assert(isa<ConstantExpr>(UserC) && "Must be a ConstantExpr.");
+ NewC = cast<ConstantExpr>(UserC)->getWithOperands(NewOps);
+ }
+
+ UserC->replaceAllUsesWith(NewC);
+ UserC->destroyConstant();
+ NewOps.clear();
+ }
+
+ // Update all ValueHandles, they should be the only users at this point.
+ Placeholder->replaceAllUsesWith(RealVal);
+ delete cast<ConstantPlaceHolder>(Placeholder);
+ }
+}
diff --git a/contrib/libs/llvm14/lib/Bitcode/Reader/ValueList.h b/contrib/libs/llvm14/lib/Bitcode/Reader/ValueList.h
new file mode 100644
index 0000000000..a39617018f
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Reader/ValueList.h
@@ -0,0 +1,96 @@
+//===-- Bitcode/Reader/ValueList.h - Number values --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class gives values and types Unique ID's.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_BITCODE_READER_VALUELIST_H
+#define LLVM_LIB_BITCODE_READER_VALUELIST_H
+
+#include "llvm/IR/ValueHandle.h"
+#include <cassert>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class Constant;
+class LLVMContext;
+class Type;
+class Value;
+
+class BitcodeReaderValueList {
+ std::vector<WeakTrackingVH> ValuePtrs;
+
+ /// As we resolve forward-referenced constants, we add information about them
+ /// to this vector. This allows us to resolve them in bulk instead of
+ /// resolving each reference at a time. See the code in
+ /// ResolveConstantForwardRefs for more information about this.
+ ///
+ /// The key of this vector is the placeholder constant, the value is the slot
+ /// number that holds the resolved value.
+ using ResolveConstantsTy = std::vector<std::pair<Constant *, unsigned>>;
+ ResolveConstantsTy ResolveConstants;
+ LLVMContext &Context;
+
+ /// Maximum number of valid references. Forward references exceeding the
+ /// maximum must be invalid.
+ unsigned RefsUpperBound;
+
+public:
+ BitcodeReaderValueList(LLVMContext &C, size_t RefsUpperBound)
+ : Context(C),
+ RefsUpperBound(std::min((size_t)std::numeric_limits<unsigned>::max(),
+ RefsUpperBound)) {}
+
+ ~BitcodeReaderValueList() {
+ assert(ResolveConstants.empty() && "Constants not resolved?");
+ }
+
+ // vector compatibility methods
+ unsigned size() const { return ValuePtrs.size(); }
+ void resize(unsigned N) {
+ ValuePtrs.resize(N);
+ }
+ void push_back(Value *V) { ValuePtrs.emplace_back(V); }
+
+ void clear() {
+ assert(ResolveConstants.empty() && "Constants not resolved?");
+ ValuePtrs.clear();
+ }
+
+ Value *operator[](unsigned i) const {
+ assert(i < ValuePtrs.size());
+ return ValuePtrs[i];
+ }
+
+ Value *back() const { return ValuePtrs.back(); }
+ void pop_back() {
+ ValuePtrs.pop_back();
+ }
+ bool empty() const { return ValuePtrs.empty(); }
+
+ void shrinkTo(unsigned N) {
+ assert(N <= size() && "Invalid shrinkTo request!");
+ ValuePtrs.resize(N);
+ }
+
+ Constant *getConstantFwdRef(unsigned Idx, Type *Ty);
+ Value *getValueFwdRef(unsigned Idx, Type *Ty);
+
+ void assignValue(Value *V, unsigned Idx);
+
+ /// Once all constants are read, this method bulk resolves any forward
+ /// references.
+ void resolveConstantForwardRefs();
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_BITCODE_READER_VALUELIST_H
diff --git a/contrib/libs/llvm14/lib/Bitcode/Reader/ya.make b/contrib/libs/llvm14/lib/Bitcode/Reader/ya.make
new file mode 100644
index 0000000000..ee811679c8
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Reader/ya.make
@@ -0,0 +1,33 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm14
+ contrib/libs/llvm14/include
+ contrib/libs/llvm14/lib/Bitstream/Reader
+ contrib/libs/llvm14/lib/IR
+ contrib/libs/llvm14/lib/Support
+)
+
+ADDINCL(
+ contrib/libs/llvm14/lib/Bitcode/Reader
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ BitReader.cpp
+ BitcodeAnalyzer.cpp
+ BitcodeReader.cpp
+ MetadataLoader.cpp
+ ValueList.cpp
+)
+
+END()
diff --git a/contrib/libs/llvm14/lib/Bitcode/Writer/BitWriter.cpp b/contrib/libs/llvm14/lib/Bitcode/Writer/BitWriter.cpp
new file mode 100644
index 0000000000..be59c1f928
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Writer/BitWriter.cpp
@@ -0,0 +1,49 @@
+//===-- BitWriter.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-c/BitWriter.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+
+/*===-- Operations on modules ---------------------------------------------===*/
+
+int LLVMWriteBitcodeToFile(LLVMModuleRef M, const char *Path) {
+ std::error_code EC;
+ raw_fd_ostream OS(Path, EC, sys::fs::OF_None);
+
+ if (EC)
+ return -1;
+
+ WriteBitcodeToFile(*unwrap(M), OS);
+ return 0;
+}
+
+int LLVMWriteBitcodeToFD(LLVMModuleRef M, int FD, int ShouldClose,
+ int Unbuffered) {
+ raw_fd_ostream OS(FD, ShouldClose, Unbuffered);
+
+ WriteBitcodeToFile(*unwrap(M), OS);
+ return 0;
+}
+
+int LLVMWriteBitcodeToFileHandle(LLVMModuleRef M, int FileHandle) {
+ return LLVMWriteBitcodeToFD(M, FileHandle, true, false);
+}
+
+LLVMMemoryBufferRef LLVMWriteBitcodeToMemoryBuffer(LLVMModuleRef M) {
+ std::string Data;
+ raw_string_ostream OS(Data);
+
+ WriteBitcodeToFile(*unwrap(M), OS);
+ return wrap(MemoryBuffer::getMemBufferCopy(OS.str()).release());
+}
diff --git a/contrib/libs/llvm14/lib/Bitcode/Writer/BitcodeWriter.cpp b/contrib/libs/llvm14/lib/Bitcode/Writer/BitcodeWriter.cpp
new file mode 100644
index 0000000000..4bba0b3566
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -0,0 +1,4975 @@
+//===- Bitcode/Writer/BitcodeWriter.cpp - Bitcode Writer ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Bitcode writer implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "ValueEnumerator.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Bitcode/BitcodeCommon.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/LLVMBitCodes.h"
+#include "llvm/Bitstream/BitCodes.h"
+#include "llvm/Bitstream/BitstreamWriter.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Comdat.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/Constants.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DebugLoc.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/InlineAsm.h"
+#include "llvm/IR/InstrTypes.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/UseListOrder.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/MC/StringTableBuilder.h"
+#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Object/IRSymtab.h"
+#include "llvm/Support/AtomicOrdering.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/SHA1.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <iterator>
+#include <map>
+#include <memory>
+#include <string>
+#include <utility>
+#include <vector>
+
+using namespace llvm;
+
+static cl::opt<unsigned>
+ IndexThreshold("bitcode-mdindex-threshold", cl::Hidden, cl::init(25),
+ cl::desc("Number of metadatas above which we emit an index "
+ "to enable lazy-loading"));
+static cl::opt<uint32_t> FlushThreshold(
+ "bitcode-flush-threshold", cl::Hidden, cl::init(512),
+ cl::desc("The threshold (unit M) for flushing LLVM bitcode."));
+
+static cl::opt<bool> WriteRelBFToSummary(
+ "write-relbf-to-summary", cl::Hidden, cl::init(false),
+ cl::desc("Write relative block frequency to function summary "));
+
+extern FunctionSummary::ForceSummaryHotnessType ForceSummaryEdgesCold;
+
+namespace {
+
+/// These are manifest constants used by the bitcode writer. They do not need to
+/// be kept in sync with the reader, but need to be consistent within this file.
+enum {
+ // VALUE_SYMTAB_BLOCK abbrev id's.
+ VST_ENTRY_8_ABBREV = bitc::FIRST_APPLICATION_ABBREV,
+ VST_ENTRY_7_ABBREV,
+ VST_ENTRY_6_ABBREV,
+ VST_BBENTRY_6_ABBREV,
+
+ // CONSTANTS_BLOCK abbrev id's.
+ CONSTANTS_SETTYPE_ABBREV = bitc::FIRST_APPLICATION_ABBREV,
+ CONSTANTS_INTEGER_ABBREV,
+ CONSTANTS_CE_CAST_Abbrev,
+ CONSTANTS_NULL_Abbrev,
+
+ // FUNCTION_BLOCK abbrev id's.
+ FUNCTION_INST_LOAD_ABBREV = bitc::FIRST_APPLICATION_ABBREV,
+ FUNCTION_INST_UNOP_ABBREV,
+ FUNCTION_INST_UNOP_FLAGS_ABBREV,
+ FUNCTION_INST_BINOP_ABBREV,
+ FUNCTION_INST_BINOP_FLAGS_ABBREV,
+ FUNCTION_INST_CAST_ABBREV,
+ FUNCTION_INST_RET_VOID_ABBREV,
+ FUNCTION_INST_RET_VAL_ABBREV,
+ FUNCTION_INST_UNREACHABLE_ABBREV,
+ FUNCTION_INST_GEP_ABBREV,
+};
+
+/// Abstract class to manage the bitcode writing, subclassed for each bitcode
+/// file type.
+class BitcodeWriterBase {
+protected:
+ /// The stream created and owned by the client.
+ BitstreamWriter &Stream;
+
+ StringTableBuilder &StrtabBuilder;
+
+public:
+ /// Constructs a BitcodeWriterBase object that writes to the provided
+ /// \p Stream.
+ BitcodeWriterBase(BitstreamWriter &Stream, StringTableBuilder &StrtabBuilder)
+ : Stream(Stream), StrtabBuilder(StrtabBuilder) {}
+
+protected:
+ void writeModuleVersion();
+};
+
+void BitcodeWriterBase::writeModuleVersion() {
+ // VERSION: [version#]
+ Stream.EmitRecord(bitc::MODULE_CODE_VERSION, ArrayRef<uint64_t>{2});
+}
+
+/// Base class to manage the module bitcode writing, currently subclassed for
+/// ModuleBitcodeWriter and ThinLinkBitcodeWriter.
+class ModuleBitcodeWriterBase : public BitcodeWriterBase {
+protected:
+ /// The Module to write to bitcode.
+ const Module &M;
+
+ /// Enumerates ids for all values in the module.
+ ValueEnumerator VE;
+
+ /// Optional per-module index to write for ThinLTO.
+ const ModuleSummaryIndex *Index;
+
+ /// Map that holds the correspondence between GUIDs in the summary index,
+ /// that came from indirect call profiles, and a value id generated by this
+ /// class to use in the VST and summary block records.
+ std::map<GlobalValue::GUID, unsigned> GUIDToValueIdMap;
+
+ /// Tracks the last value id recorded in the GUIDToValueMap.
+ unsigned GlobalValueId;
+
+ /// Saves the offset of the VSTOffset record that must eventually be
+ /// backpatched with the offset of the actual VST.
+ uint64_t VSTOffsetPlaceholder = 0;
+
+public:
+ /// Constructs a ModuleBitcodeWriterBase object for the given Module,
+ /// writing to the provided \p Buffer.
+ ModuleBitcodeWriterBase(const Module &M, StringTableBuilder &StrtabBuilder,
+ BitstreamWriter &Stream,
+ bool ShouldPreserveUseListOrder,
+ const ModuleSummaryIndex *Index)
+ : BitcodeWriterBase(Stream, StrtabBuilder), M(M),
+ VE(M, ShouldPreserveUseListOrder), Index(Index) {
+ // Assign ValueIds to any callee values in the index that came from
+ // indirect call profiles and were recorded as a GUID not a Value*
+ // (which would have been assigned an ID by the ValueEnumerator).
+ // The starting ValueId is just after the number of values in the
+ // ValueEnumerator, so that they can be emitted in the VST.
+ GlobalValueId = VE.getValues().size();
+ if (!Index)
+ return;
+ for (const auto &GUIDSummaryLists : *Index)
+ // Examine all summaries for this GUID.
+ for (auto &Summary : GUIDSummaryLists.second.SummaryList)
+ if (auto FS = dyn_cast<FunctionSummary>(Summary.get()))
+ // For each call in the function summary, see if the call
+ // is to a GUID (which means it is for an indirect call,
+ // otherwise we would have a Value for it). If so, synthesize
+ // a value id.
+ for (auto &CallEdge : FS->calls())
+ if (!CallEdge.first.haveGVs() || !CallEdge.first.getValue())
+ assignValueId(CallEdge.first.getGUID());
+ }
+
+protected:
+ void writePerModuleGlobalValueSummary();
+
+private:
+ void writePerModuleFunctionSummaryRecord(SmallVector<uint64_t, 64> &NameVals,
+ GlobalValueSummary *Summary,
+ unsigned ValueID,
+ unsigned FSCallsAbbrev,
+ unsigned FSCallsProfileAbbrev,
+ const Function &F);
+ void writeModuleLevelReferences(const GlobalVariable &V,
+ SmallVector<uint64_t, 64> &NameVals,
+ unsigned FSModRefsAbbrev,
+ unsigned FSModVTableRefsAbbrev);
+
+ void assignValueId(GlobalValue::GUID ValGUID) {
+ GUIDToValueIdMap[ValGUID] = ++GlobalValueId;
+ }
+
+ unsigned getValueId(GlobalValue::GUID ValGUID) {
+ const auto &VMI = GUIDToValueIdMap.find(ValGUID);
+ // Expect that any GUID value had a value Id assigned by an
+ // earlier call to assignValueId.
+ assert(VMI != GUIDToValueIdMap.end() &&
+ "GUID does not have assigned value Id");
+ return VMI->second;
+ }
+
+ // Helper to get the valueId for the type of value recorded in VI.
+ unsigned getValueId(ValueInfo VI) {
+ if (!VI.haveGVs() || !VI.getValue())
+ return getValueId(VI.getGUID());
+ return VE.getValueID(VI.getValue());
+ }
+
+ std::map<GlobalValue::GUID, unsigned> &valueIds() { return GUIDToValueIdMap; }
+};
+
+/// Class to manage the bitcode writing for a module.
+class ModuleBitcodeWriter : public ModuleBitcodeWriterBase {
+ /// Pointer to the buffer allocated by caller for bitcode writing.
+ const SmallVectorImpl<char> &Buffer;
+
+ /// True if a module hash record should be written.
+ bool GenerateHash;
+
+ /// If non-null, when GenerateHash is true, the resulting hash is written
+ /// into ModHash.
+ ModuleHash *ModHash;
+
+ SHA1 Hasher;
+
+ /// The start bit of the identification block.
+ uint64_t BitcodeStartBit;
+
+public:
+ /// Constructs a ModuleBitcodeWriter object for the given Module,
+ /// writing to the provided \p Buffer.
+ ModuleBitcodeWriter(const Module &M, SmallVectorImpl<char> &Buffer,
+ StringTableBuilder &StrtabBuilder,
+ BitstreamWriter &Stream, bool ShouldPreserveUseListOrder,
+ const ModuleSummaryIndex *Index, bool GenerateHash,
+ ModuleHash *ModHash = nullptr)
+ : ModuleBitcodeWriterBase(M, StrtabBuilder, Stream,
+ ShouldPreserveUseListOrder, Index),
+ Buffer(Buffer), GenerateHash(GenerateHash), ModHash(ModHash),
+ BitcodeStartBit(Stream.GetCurrentBitNo()) {}
+
+ /// Emit the current module to the bitstream.
+ void write();
+
+private:
+ uint64_t bitcodeStartBit() { return BitcodeStartBit; }
+
+ size_t addToStrtab(StringRef Str);
+
+ void writeAttributeGroupTable();
+ void writeAttributeTable();
+ void writeTypeTable();
+ void writeComdats();
+ void writeValueSymbolTableForwardDecl();
+ void writeModuleInfo();
+ void writeValueAsMetadata(const ValueAsMetadata *MD,
+ SmallVectorImpl<uint64_t> &Record);
+ void writeMDTuple(const MDTuple *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ unsigned createDILocationAbbrev();
+ void writeDILocation(const DILocation *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned &Abbrev);
+ unsigned createGenericDINodeAbbrev();
+ void writeGenericDINode(const GenericDINode *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned &Abbrev);
+ void writeDISubrange(const DISubrange *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDIGenericSubrange(const DIGenericSubrange *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDIEnumerator(const DIEnumerator *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDIBasicType(const DIBasicType *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDIStringType(const DIStringType *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDIDerivedType(const DIDerivedType *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDICompositeType(const DICompositeType *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDISubroutineType(const DISubroutineType *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDIFile(const DIFile *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDICompileUnit(const DICompileUnit *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDISubprogram(const DISubprogram *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDILexicalBlock(const DILexicalBlock *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDILexicalBlockFile(const DILexicalBlockFile *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDICommonBlock(const DICommonBlock *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDINamespace(const DINamespace *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDIMacro(const DIMacro *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDIMacroFile(const DIMacroFile *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDIArgList(const DIArgList *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDIModule(const DIModule *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDITemplateTypeParameter(const DITemplateTypeParameter *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDITemplateValueParameter(const DITemplateValueParameter *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDIGlobalVariable(const DIGlobalVariable *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDILocalVariable(const DILocalVariable *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDILabel(const DILabel *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDIExpression(const DIExpression *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDIGlobalVariableExpression(const DIGlobalVariableExpression *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ void writeDIObjCProperty(const DIObjCProperty *N,
+ SmallVectorImpl<uint64_t> &Record, unsigned Abbrev);
+ void writeDIImportedEntity(const DIImportedEntity *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev);
+ unsigned createNamedMetadataAbbrev();
+ void writeNamedMetadata(SmallVectorImpl<uint64_t> &Record);
+ unsigned createMetadataStringsAbbrev();
+ void writeMetadataStrings(ArrayRef<const Metadata *> Strings,
+ SmallVectorImpl<uint64_t> &Record);
+ void writeMetadataRecords(ArrayRef<const Metadata *> MDs,
+ SmallVectorImpl<uint64_t> &Record,
+ std::vector<unsigned> *MDAbbrevs = nullptr,
+ std::vector<uint64_t> *IndexPos = nullptr);
+ void writeModuleMetadata();
+ void writeFunctionMetadata(const Function &F);
+ void writeFunctionMetadataAttachment(const Function &F);
+ void pushGlobalMetadataAttachment(SmallVectorImpl<uint64_t> &Record,
+ const GlobalObject &GO);
+ void writeModuleMetadataKinds();
+ void writeOperandBundleTags();
+ void writeSyncScopeNames();
+ void writeConstants(unsigned FirstVal, unsigned LastVal, bool isGlobal);
+ void writeModuleConstants();
+ bool pushValueAndType(const Value *V, unsigned InstID,
+ SmallVectorImpl<unsigned> &Vals);
+ void writeOperandBundles(const CallBase &CB, unsigned InstID);
+ void pushValue(const Value *V, unsigned InstID,
+ SmallVectorImpl<unsigned> &Vals);
+ void pushValueSigned(const Value *V, unsigned InstID,
+ SmallVectorImpl<uint64_t> &Vals);
+ void writeInstruction(const Instruction &I, unsigned InstID,
+ SmallVectorImpl<unsigned> &Vals);
+ void writeFunctionLevelValueSymbolTable(const ValueSymbolTable &VST);
+ void writeGlobalValueSymbolTable(
+ DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex);
+ void writeUseList(UseListOrder &&Order);
+ void writeUseListBlock(const Function *F);
+ void
+ writeFunction(const Function &F,
+ DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex);
+ void writeBlockInfo();
+ void writeModuleHash(size_t BlockStartPos);
+
+ unsigned getEncodedSyncScopeID(SyncScope::ID SSID) {
+ return unsigned(SSID);
+ }
+
+ unsigned getEncodedAlign(MaybeAlign Alignment) { return encode(Alignment); }
+};
+
+/// Class to manage the bitcode writing for a combined index.
+class IndexBitcodeWriter : public BitcodeWriterBase {
+ /// The combined index to write to bitcode.
+ const ModuleSummaryIndex &Index;
+
+ /// When writing a subset of the index for distributed backends, client
+ /// provides a map of modules to the corresponding GUIDs/summaries to write.
+ const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex;
+
+ /// Map that holds the correspondence between the GUID used in the combined
+ /// index and a value id generated by this class to use in references.
+ std::map<GlobalValue::GUID, unsigned> GUIDToValueIdMap;
+
+ /// Tracks the last value id recorded in the GUIDToValueMap.
+ unsigned GlobalValueId = 0;
+
+public:
+ /// Constructs a IndexBitcodeWriter object for the given combined index,
+ /// writing to the provided \p Buffer. When writing a subset of the index
+ /// for a distributed backend, provide a \p ModuleToSummariesForIndex map.
+ IndexBitcodeWriter(BitstreamWriter &Stream, StringTableBuilder &StrtabBuilder,
+ const ModuleSummaryIndex &Index,
+ const std::map<std::string, GVSummaryMapTy>
+ *ModuleToSummariesForIndex = nullptr)
+ : BitcodeWriterBase(Stream, StrtabBuilder), Index(Index),
+ ModuleToSummariesForIndex(ModuleToSummariesForIndex) {
+ // Assign unique value ids to all summaries to be written, for use
+ // in writing out the call graph edges. Save the mapping from GUID
+ // to the new global value id to use when writing those edges, which
+ // are currently saved in the index in terms of GUID.
+ forEachSummary([&](GVInfo I, bool) {
+ GUIDToValueIdMap[I.first] = ++GlobalValueId;
+ });
+ }
+
+ /// The below iterator returns the GUID and associated summary.
+ using GVInfo = std::pair<GlobalValue::GUID, GlobalValueSummary *>;
+
+ /// Calls the callback for each value GUID and summary to be written to
+ /// bitcode. This hides the details of whether they are being pulled from the
+ /// entire index or just those in a provided ModuleToSummariesForIndex map.
+ template<typename Functor>
+ void forEachSummary(Functor Callback) {
+ if (ModuleToSummariesForIndex) {
+ for (auto &M : *ModuleToSummariesForIndex)
+ for (auto &Summary : M.second) {
+ Callback(Summary, false);
+ // Ensure aliasee is handled, e.g. for assigning a valueId,
+ // even if we are not importing the aliasee directly (the
+ // imported alias will contain a copy of aliasee).
+ if (auto *AS = dyn_cast<AliasSummary>(Summary.getSecond()))
+ Callback({AS->getAliaseeGUID(), &AS->getAliasee()}, true);
+ }
+ } else {
+ for (auto &Summaries : Index)
+ for (auto &Summary : Summaries.second.SummaryList)
+ Callback({Summaries.first, Summary.get()}, false);
+ }
+ }
+
+ /// Calls the callback for each entry in the modulePaths StringMap that
+ /// should be written to the module path string table. This hides the details
+ /// of whether they are being pulled from the entire index or just those in a
+ /// provided ModuleToSummariesForIndex map.
+ template <typename Functor> void forEachModule(Functor Callback) {
+ if (ModuleToSummariesForIndex) {
+ for (const auto &M : *ModuleToSummariesForIndex) {
+ const auto &MPI = Index.modulePaths().find(M.first);
+ if (MPI == Index.modulePaths().end()) {
+ // This should only happen if the bitcode file was empty, in which
+ // case we shouldn't be importing (the ModuleToSummariesForIndex
+ // would only include the module we are writing and index for).
+ assert(ModuleToSummariesForIndex->size() == 1);
+ continue;
+ }
+ Callback(*MPI);
+ }
+ } else {
+ for (const auto &MPSE : Index.modulePaths())
+ Callback(MPSE);
+ }
+ }
+
+ /// Main entry point for writing a combined index to bitcode.
+ void write();
+
+private:
+ void writeModStrings();
+ void writeCombinedGlobalValueSummary();
+
+ Optional<unsigned> getValueId(GlobalValue::GUID ValGUID) {
+ auto VMI = GUIDToValueIdMap.find(ValGUID);
+ if (VMI == GUIDToValueIdMap.end())
+ return None;
+ return VMI->second;
+ }
+
+ std::map<GlobalValue::GUID, unsigned> &valueIds() { return GUIDToValueIdMap; }
+};
+
+} // end anonymous namespace
+
+static unsigned getEncodedCastOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ default: llvm_unreachable("Unknown cast instruction!");
+ case Instruction::Trunc : return bitc::CAST_TRUNC;
+ case Instruction::ZExt : return bitc::CAST_ZEXT;
+ case Instruction::SExt : return bitc::CAST_SEXT;
+ case Instruction::FPToUI : return bitc::CAST_FPTOUI;
+ case Instruction::FPToSI : return bitc::CAST_FPTOSI;
+ case Instruction::UIToFP : return bitc::CAST_UITOFP;
+ case Instruction::SIToFP : return bitc::CAST_SITOFP;
+ case Instruction::FPTrunc : return bitc::CAST_FPTRUNC;
+ case Instruction::FPExt : return bitc::CAST_FPEXT;
+ case Instruction::PtrToInt: return bitc::CAST_PTRTOINT;
+ case Instruction::IntToPtr: return bitc::CAST_INTTOPTR;
+ case Instruction::BitCast : return bitc::CAST_BITCAST;
+ case Instruction::AddrSpaceCast: return bitc::CAST_ADDRSPACECAST;
+ }
+}
+
+static unsigned getEncodedUnaryOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ default: llvm_unreachable("Unknown binary instruction!");
+ case Instruction::FNeg: return bitc::UNOP_FNEG;
+ }
+}
+
+static unsigned getEncodedBinaryOpcode(unsigned Opcode) {
+ switch (Opcode) {
+ default: llvm_unreachable("Unknown binary instruction!");
+ case Instruction::Add:
+ case Instruction::FAdd: return bitc::BINOP_ADD;
+ case Instruction::Sub:
+ case Instruction::FSub: return bitc::BINOP_SUB;
+ case Instruction::Mul:
+ case Instruction::FMul: return bitc::BINOP_MUL;
+ case Instruction::UDiv: return bitc::BINOP_UDIV;
+ case Instruction::FDiv:
+ case Instruction::SDiv: return bitc::BINOP_SDIV;
+ case Instruction::URem: return bitc::BINOP_UREM;
+ case Instruction::FRem:
+ case Instruction::SRem: return bitc::BINOP_SREM;
+ case Instruction::Shl: return bitc::BINOP_SHL;
+ case Instruction::LShr: return bitc::BINOP_LSHR;
+ case Instruction::AShr: return bitc::BINOP_ASHR;
+ case Instruction::And: return bitc::BINOP_AND;
+ case Instruction::Or: return bitc::BINOP_OR;
+ case Instruction::Xor: return bitc::BINOP_XOR;
+ }
+}
+
+static unsigned getEncodedRMWOperation(AtomicRMWInst::BinOp Op) {
+ switch (Op) {
+ default: llvm_unreachable("Unknown RMW operation!");
+ case AtomicRMWInst::Xchg: return bitc::RMW_XCHG;
+ case AtomicRMWInst::Add: return bitc::RMW_ADD;
+ case AtomicRMWInst::Sub: return bitc::RMW_SUB;
+ case AtomicRMWInst::And: return bitc::RMW_AND;
+ case AtomicRMWInst::Nand: return bitc::RMW_NAND;
+ case AtomicRMWInst::Or: return bitc::RMW_OR;
+ case AtomicRMWInst::Xor: return bitc::RMW_XOR;
+ case AtomicRMWInst::Max: return bitc::RMW_MAX;
+ case AtomicRMWInst::Min: return bitc::RMW_MIN;
+ case AtomicRMWInst::UMax: return bitc::RMW_UMAX;
+ case AtomicRMWInst::UMin: return bitc::RMW_UMIN;
+ case AtomicRMWInst::FAdd: return bitc::RMW_FADD;
+ case AtomicRMWInst::FSub: return bitc::RMW_FSUB;
+ }
+}
+
+static unsigned getEncodedOrdering(AtomicOrdering Ordering) {
+ switch (Ordering) {
+ case AtomicOrdering::NotAtomic: return bitc::ORDERING_NOTATOMIC;
+ case AtomicOrdering::Unordered: return bitc::ORDERING_UNORDERED;
+ case AtomicOrdering::Monotonic: return bitc::ORDERING_MONOTONIC;
+ case AtomicOrdering::Acquire: return bitc::ORDERING_ACQUIRE;
+ case AtomicOrdering::Release: return bitc::ORDERING_RELEASE;
+ case AtomicOrdering::AcquireRelease: return bitc::ORDERING_ACQREL;
+ case AtomicOrdering::SequentiallyConsistent: return bitc::ORDERING_SEQCST;
+ }
+ llvm_unreachable("Invalid ordering");
+}
+
+static void writeStringRecord(BitstreamWriter &Stream, unsigned Code,
+ StringRef Str, unsigned AbbrevToUse) {
+ SmallVector<unsigned, 64> Vals;
+
+ // Code: [strchar x N]
+ for (char C : Str) {
+ if (AbbrevToUse && !BitCodeAbbrevOp::isChar6(C))
+ AbbrevToUse = 0;
+ Vals.push_back(C);
+ }
+
+ // Emit the finished record.
+ Stream.EmitRecord(Code, Vals, AbbrevToUse);
+}
+
+static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
+ switch (Kind) {
+ case Attribute::Alignment:
+ return bitc::ATTR_KIND_ALIGNMENT;
+ case Attribute::AllocSize:
+ return bitc::ATTR_KIND_ALLOC_SIZE;
+ case Attribute::AlwaysInline:
+ return bitc::ATTR_KIND_ALWAYS_INLINE;
+ case Attribute::ArgMemOnly:
+ return bitc::ATTR_KIND_ARGMEMONLY;
+ case Attribute::Builtin:
+ return bitc::ATTR_KIND_BUILTIN;
+ case Attribute::ByVal:
+ return bitc::ATTR_KIND_BY_VAL;
+ case Attribute::Convergent:
+ return bitc::ATTR_KIND_CONVERGENT;
+ case Attribute::InAlloca:
+ return bitc::ATTR_KIND_IN_ALLOCA;
+ case Attribute::Cold:
+ return bitc::ATTR_KIND_COLD;
+ case Attribute::DisableSanitizerInstrumentation:
+ return bitc::ATTR_KIND_DISABLE_SANITIZER_INSTRUMENTATION;
+ case Attribute::Hot:
+ return bitc::ATTR_KIND_HOT;
+ case Attribute::ElementType:
+ return bitc::ATTR_KIND_ELEMENTTYPE;
+ case Attribute::InaccessibleMemOnly:
+ return bitc::ATTR_KIND_INACCESSIBLEMEM_ONLY;
+ case Attribute::InaccessibleMemOrArgMemOnly:
+ return bitc::ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY;
+ case Attribute::InlineHint:
+ return bitc::ATTR_KIND_INLINE_HINT;
+ case Attribute::InReg:
+ return bitc::ATTR_KIND_IN_REG;
+ case Attribute::JumpTable:
+ return bitc::ATTR_KIND_JUMP_TABLE;
+ case Attribute::MinSize:
+ return bitc::ATTR_KIND_MIN_SIZE;
+ case Attribute::Naked:
+ return bitc::ATTR_KIND_NAKED;
+ case Attribute::Nest:
+ return bitc::ATTR_KIND_NEST;
+ case Attribute::NoAlias:
+ return bitc::ATTR_KIND_NO_ALIAS;
+ case Attribute::NoBuiltin:
+ return bitc::ATTR_KIND_NO_BUILTIN;
+ case Attribute::NoCallback:
+ return bitc::ATTR_KIND_NO_CALLBACK;
+ case Attribute::NoCapture:
+ return bitc::ATTR_KIND_NO_CAPTURE;
+ case Attribute::NoDuplicate:
+ return bitc::ATTR_KIND_NO_DUPLICATE;
+ case Attribute::NoFree:
+ return bitc::ATTR_KIND_NOFREE;
+ case Attribute::NoImplicitFloat:
+ return bitc::ATTR_KIND_NO_IMPLICIT_FLOAT;
+ case Attribute::NoInline:
+ return bitc::ATTR_KIND_NO_INLINE;
+ case Attribute::NoRecurse:
+ return bitc::ATTR_KIND_NO_RECURSE;
+ case Attribute::NoMerge:
+ return bitc::ATTR_KIND_NO_MERGE;
+ case Attribute::NonLazyBind:
+ return bitc::ATTR_KIND_NON_LAZY_BIND;
+ case Attribute::NonNull:
+ return bitc::ATTR_KIND_NON_NULL;
+ case Attribute::Dereferenceable:
+ return bitc::ATTR_KIND_DEREFERENCEABLE;
+ case Attribute::DereferenceableOrNull:
+ return bitc::ATTR_KIND_DEREFERENCEABLE_OR_NULL;
+ case Attribute::NoRedZone:
+ return bitc::ATTR_KIND_NO_RED_ZONE;
+ case Attribute::NoReturn:
+ return bitc::ATTR_KIND_NO_RETURN;
+ case Attribute::NoSync:
+ return bitc::ATTR_KIND_NOSYNC;
+ case Attribute::NoCfCheck:
+ return bitc::ATTR_KIND_NOCF_CHECK;
+ case Attribute::NoProfile:
+ return bitc::ATTR_KIND_NO_PROFILE;
+ case Attribute::NoUnwind:
+ return bitc::ATTR_KIND_NO_UNWIND;
+ case Attribute::NoSanitizeCoverage:
+ return bitc::ATTR_KIND_NO_SANITIZE_COVERAGE;
+ case Attribute::NullPointerIsValid:
+ return bitc::ATTR_KIND_NULL_POINTER_IS_VALID;
+ case Attribute::OptForFuzzing:
+ return bitc::ATTR_KIND_OPT_FOR_FUZZING;
+ case Attribute::OptimizeForSize:
+ return bitc::ATTR_KIND_OPTIMIZE_FOR_SIZE;
+ case Attribute::OptimizeNone:
+ return bitc::ATTR_KIND_OPTIMIZE_NONE;
+ case Attribute::ReadNone:
+ return bitc::ATTR_KIND_READ_NONE;
+ case Attribute::ReadOnly:
+ return bitc::ATTR_KIND_READ_ONLY;
+ case Attribute::Returned:
+ return bitc::ATTR_KIND_RETURNED;
+ case Attribute::ReturnsTwice:
+ return bitc::ATTR_KIND_RETURNS_TWICE;
+ case Attribute::SExt:
+ return bitc::ATTR_KIND_S_EXT;
+ case Attribute::Speculatable:
+ return bitc::ATTR_KIND_SPECULATABLE;
+ case Attribute::StackAlignment:
+ return bitc::ATTR_KIND_STACK_ALIGNMENT;
+ case Attribute::StackProtect:
+ return bitc::ATTR_KIND_STACK_PROTECT;
+ case Attribute::StackProtectReq:
+ return bitc::ATTR_KIND_STACK_PROTECT_REQ;
+ case Attribute::StackProtectStrong:
+ return bitc::ATTR_KIND_STACK_PROTECT_STRONG;
+ case Attribute::SafeStack:
+ return bitc::ATTR_KIND_SAFESTACK;
+ case Attribute::ShadowCallStack:
+ return bitc::ATTR_KIND_SHADOWCALLSTACK;
+ case Attribute::StrictFP:
+ return bitc::ATTR_KIND_STRICT_FP;
+ case Attribute::StructRet:
+ return bitc::ATTR_KIND_STRUCT_RET;
+ case Attribute::SanitizeAddress:
+ return bitc::ATTR_KIND_SANITIZE_ADDRESS;
+ case Attribute::SanitizeHWAddress:
+ return bitc::ATTR_KIND_SANITIZE_HWADDRESS;
+ case Attribute::SanitizeThread:
+ return bitc::ATTR_KIND_SANITIZE_THREAD;
+ case Attribute::SanitizeMemory:
+ return bitc::ATTR_KIND_SANITIZE_MEMORY;
+ case Attribute::SpeculativeLoadHardening:
+ return bitc::ATTR_KIND_SPECULATIVE_LOAD_HARDENING;
+ case Attribute::SwiftError:
+ return bitc::ATTR_KIND_SWIFT_ERROR;
+ case Attribute::SwiftSelf:
+ return bitc::ATTR_KIND_SWIFT_SELF;
+ case Attribute::SwiftAsync:
+ return bitc::ATTR_KIND_SWIFT_ASYNC;
+ case Attribute::UWTable:
+ return bitc::ATTR_KIND_UW_TABLE;
+ case Attribute::VScaleRange:
+ return bitc::ATTR_KIND_VSCALE_RANGE;
+ case Attribute::WillReturn:
+ return bitc::ATTR_KIND_WILLRETURN;
+ case Attribute::WriteOnly:
+ return bitc::ATTR_KIND_WRITEONLY;
+ case Attribute::ZExt:
+ return bitc::ATTR_KIND_Z_EXT;
+ case Attribute::ImmArg:
+ return bitc::ATTR_KIND_IMMARG;
+ case Attribute::SanitizeMemTag:
+ return bitc::ATTR_KIND_SANITIZE_MEMTAG;
+ case Attribute::Preallocated:
+ return bitc::ATTR_KIND_PREALLOCATED;
+ case Attribute::NoUndef:
+ return bitc::ATTR_KIND_NOUNDEF;
+ case Attribute::ByRef:
+ return bitc::ATTR_KIND_BYREF;
+ case Attribute::MustProgress:
+ return bitc::ATTR_KIND_MUSTPROGRESS;
+ case Attribute::EndAttrKinds:
+ llvm_unreachable("Can not encode end-attribute kinds marker.");
+ case Attribute::None:
+ llvm_unreachable("Can not encode none-attribute.");
+ case Attribute::EmptyKey:
+ case Attribute::TombstoneKey:
+ llvm_unreachable("Trying to encode EmptyKey/TombstoneKey");
+ }
+
+ llvm_unreachable("Trying to encode unknown attribute");
+}
+
+void ModuleBitcodeWriter::writeAttributeGroupTable() {
+ const std::vector<ValueEnumerator::IndexAndAttrSet> &AttrGrps =
+ VE.getAttributeGroups();
+ if (AttrGrps.empty()) return;
+
+ Stream.EnterSubblock(bitc::PARAMATTR_GROUP_BLOCK_ID, 3);
+
+ SmallVector<uint64_t, 64> Record;
+ for (ValueEnumerator::IndexAndAttrSet Pair : AttrGrps) {
+ unsigned AttrListIndex = Pair.first;
+ AttributeSet AS = Pair.second;
+ Record.push_back(VE.getAttributeGroupID(Pair));
+ Record.push_back(AttrListIndex);
+
+ for (Attribute Attr : AS) {
+ if (Attr.isEnumAttribute()) {
+ Record.push_back(0);
+ Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum()));
+ } else if (Attr.isIntAttribute()) {
+ Record.push_back(1);
+ Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum()));
+ Record.push_back(Attr.getValueAsInt());
+ } else if (Attr.isStringAttribute()) {
+ StringRef Kind = Attr.getKindAsString();
+ StringRef Val = Attr.getValueAsString();
+
+ Record.push_back(Val.empty() ? 3 : 4);
+ Record.append(Kind.begin(), Kind.end());
+ Record.push_back(0);
+ if (!Val.empty()) {
+ Record.append(Val.begin(), Val.end());
+ Record.push_back(0);
+ }
+ } else {
+ assert(Attr.isTypeAttribute());
+ Type *Ty = Attr.getValueAsType();
+ Record.push_back(Ty ? 6 : 5);
+ Record.push_back(getAttrKindEncoding(Attr.getKindAsEnum()));
+ if (Ty)
+ Record.push_back(VE.getTypeID(Attr.getValueAsType()));
+ }
+ }
+
+ Stream.EmitRecord(bitc::PARAMATTR_GRP_CODE_ENTRY, Record);
+ Record.clear();
+ }
+
+ Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeAttributeTable() {
+ const std::vector<AttributeList> &Attrs = VE.getAttributeLists();
+ if (Attrs.empty()) return;
+
+ Stream.EnterSubblock(bitc::PARAMATTR_BLOCK_ID, 3);
+
+ SmallVector<uint64_t, 64> Record;
+ for (const AttributeList &AL : Attrs) {
+ for (unsigned i : AL.indexes()) {
+ AttributeSet AS = AL.getAttributes(i);
+ if (AS.hasAttributes())
+ Record.push_back(VE.getAttributeGroupID({i, AS}));
+ }
+
+ Stream.EmitRecord(bitc::PARAMATTR_CODE_ENTRY, Record);
+ Record.clear();
+ }
+
+ Stream.ExitBlock();
+}
+
+/// WriteTypeTable - Write out the type table for a module.
+void ModuleBitcodeWriter::writeTypeTable() {
+ const ValueEnumerator::TypeList &TypeList = VE.getTypes();
+
+ Stream.EnterSubblock(bitc::TYPE_BLOCK_ID_NEW, 4 /*count from # abbrevs */);
+ SmallVector<uint64_t, 64> TypeVals;
+
+ uint64_t NumBits = VE.computeBitsRequiredForTypeIndicies();
+
+ // Abbrev for TYPE_CODE_POINTER.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_POINTER));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+ Abbv->Add(BitCodeAbbrevOp(0)); // Addrspace = 0
+ unsigned PtrAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for TYPE_CODE_OPAQUE_POINTER.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_OPAQUE_POINTER));
+ Abbv->Add(BitCodeAbbrevOp(0)); // Addrspace = 0
+ unsigned OpaquePtrAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for TYPE_CODE_FUNCTION.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_FUNCTION));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isvararg
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+ unsigned FunctionAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for TYPE_CODE_STRUCT_ANON.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_ANON));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ispacked
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+ unsigned StructAnonAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for TYPE_CODE_STRUCT_NAME.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_NAME));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+ unsigned StructNameAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for TYPE_CODE_STRUCT_NAMED.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_STRUCT_NAMED));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ispacked
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+ unsigned StructNamedAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for TYPE_CODE_ARRAY.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::TYPE_CODE_ARRAY));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // size
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, NumBits));
+ unsigned ArrayAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Emit an entry count so the reader can reserve space.
+ TypeVals.push_back(TypeList.size());
+ Stream.EmitRecord(bitc::TYPE_CODE_NUMENTRY, TypeVals);
+ TypeVals.clear();
+
+ // Loop over all of the types, emitting each in turn.
+ for (Type *T : TypeList) {
+ int AbbrevToUse = 0;
+ unsigned Code = 0;
+
+ switch (T->getTypeID()) {
+ case Type::VoidTyID: Code = bitc::TYPE_CODE_VOID; break;
+ case Type::HalfTyID: Code = bitc::TYPE_CODE_HALF; break;
+ case Type::BFloatTyID: Code = bitc::TYPE_CODE_BFLOAT; break;
+ case Type::FloatTyID: Code = bitc::TYPE_CODE_FLOAT; break;
+ case Type::DoubleTyID: Code = bitc::TYPE_CODE_DOUBLE; break;
+ case Type::X86_FP80TyID: Code = bitc::TYPE_CODE_X86_FP80; break;
+ case Type::FP128TyID: Code = bitc::TYPE_CODE_FP128; break;
+ case Type::PPC_FP128TyID: Code = bitc::TYPE_CODE_PPC_FP128; break;
+ case Type::LabelTyID: Code = bitc::TYPE_CODE_LABEL; break;
+ case Type::MetadataTyID: Code = bitc::TYPE_CODE_METADATA; break;
+ case Type::X86_MMXTyID: Code = bitc::TYPE_CODE_X86_MMX; break;
+ case Type::X86_AMXTyID: Code = bitc::TYPE_CODE_X86_AMX; break;
+ case Type::TokenTyID: Code = bitc::TYPE_CODE_TOKEN; break;
+ case Type::IntegerTyID:
+ // INTEGER: [width]
+ Code = bitc::TYPE_CODE_INTEGER;
+ TypeVals.push_back(cast<IntegerType>(T)->getBitWidth());
+ break;
+ case Type::PointerTyID: {
+ PointerType *PTy = cast<PointerType>(T);
+ unsigned AddressSpace = PTy->getAddressSpace();
+ if (PTy->isOpaque()) {
+ // OPAQUE_POINTER: [address space]
+ Code = bitc::TYPE_CODE_OPAQUE_POINTER;
+ TypeVals.push_back(AddressSpace);
+ if (AddressSpace == 0)
+ AbbrevToUse = OpaquePtrAbbrev;
+ } else {
+ // POINTER: [pointee type, address space]
+ Code = bitc::TYPE_CODE_POINTER;
+ TypeVals.push_back(VE.getTypeID(PTy->getNonOpaquePointerElementType()));
+ TypeVals.push_back(AddressSpace);
+ if (AddressSpace == 0)
+ AbbrevToUse = PtrAbbrev;
+ }
+ break;
+ }
+ case Type::FunctionTyID: {
+ FunctionType *FT = cast<FunctionType>(T);
+ // FUNCTION: [isvararg, retty, paramty x N]
+ Code = bitc::TYPE_CODE_FUNCTION;
+ TypeVals.push_back(FT->isVarArg());
+ TypeVals.push_back(VE.getTypeID(FT->getReturnType()));
+ for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i)
+ TypeVals.push_back(VE.getTypeID(FT->getParamType(i)));
+ AbbrevToUse = FunctionAbbrev;
+ break;
+ }
+ case Type::StructTyID: {
+ StructType *ST = cast<StructType>(T);
+ // STRUCT: [ispacked, eltty x N]
+ TypeVals.push_back(ST->isPacked());
+ // Output all of the element types.
+ for (Type *ET : ST->elements())
+ TypeVals.push_back(VE.getTypeID(ET));
+
+ if (ST->isLiteral()) {
+ Code = bitc::TYPE_CODE_STRUCT_ANON;
+ AbbrevToUse = StructAnonAbbrev;
+ } else {
+ if (ST->isOpaque()) {
+ Code = bitc::TYPE_CODE_OPAQUE;
+ } else {
+ Code = bitc::TYPE_CODE_STRUCT_NAMED;
+ AbbrevToUse = StructNamedAbbrev;
+ }
+
+ // Emit the name if it is present.
+ if (!ST->getName().empty())
+ writeStringRecord(Stream, bitc::TYPE_CODE_STRUCT_NAME, ST->getName(),
+ StructNameAbbrev);
+ }
+ break;
+ }
+ case Type::ArrayTyID: {
+ ArrayType *AT = cast<ArrayType>(T);
+ // ARRAY: [numelts, eltty]
+ Code = bitc::TYPE_CODE_ARRAY;
+ TypeVals.push_back(AT->getNumElements());
+ TypeVals.push_back(VE.getTypeID(AT->getElementType()));
+ AbbrevToUse = ArrayAbbrev;
+ break;
+ }
+ case Type::FixedVectorTyID:
+ case Type::ScalableVectorTyID: {
+ VectorType *VT = cast<VectorType>(T);
+ // VECTOR [numelts, eltty] or
+ // [numelts, eltty, scalable]
+ Code = bitc::TYPE_CODE_VECTOR;
+ TypeVals.push_back(VT->getElementCount().getKnownMinValue());
+ TypeVals.push_back(VE.getTypeID(VT->getElementType()));
+ if (isa<ScalableVectorType>(VT))
+ TypeVals.push_back(true);
+ break;
+ }
+ }
+
+ // Emit the finished record.
+ Stream.EmitRecord(Code, TypeVals, AbbrevToUse);
+ TypeVals.clear();
+ }
+
+ Stream.ExitBlock();
+}
+
+static unsigned getEncodedLinkage(const GlobalValue::LinkageTypes Linkage) {
+ switch (Linkage) {
+ case GlobalValue::ExternalLinkage:
+ return 0;
+ case GlobalValue::WeakAnyLinkage:
+ return 16;
+ case GlobalValue::AppendingLinkage:
+ return 2;
+ case GlobalValue::InternalLinkage:
+ return 3;
+ case GlobalValue::LinkOnceAnyLinkage:
+ return 18;
+ case GlobalValue::ExternalWeakLinkage:
+ return 7;
+ case GlobalValue::CommonLinkage:
+ return 8;
+ case GlobalValue::PrivateLinkage:
+ return 9;
+ case GlobalValue::WeakODRLinkage:
+ return 17;
+ case GlobalValue::LinkOnceODRLinkage:
+ return 19;
+ case GlobalValue::AvailableExternallyLinkage:
+ return 12;
+ }
+ llvm_unreachable("Invalid linkage");
+}
+
+static unsigned getEncodedLinkage(const GlobalValue &GV) {
+ return getEncodedLinkage(GV.getLinkage());
+}
+
+static uint64_t getEncodedFFlags(FunctionSummary::FFlags Flags) {
+ uint64_t RawFlags = 0;
+ RawFlags |= Flags.ReadNone;
+ RawFlags |= (Flags.ReadOnly << 1);
+ RawFlags |= (Flags.NoRecurse << 2);
+ RawFlags |= (Flags.ReturnDoesNotAlias << 3);
+ RawFlags |= (Flags.NoInline << 4);
+ RawFlags |= (Flags.AlwaysInline << 5);
+ RawFlags |= (Flags.NoUnwind << 6);
+ RawFlags |= (Flags.MayThrow << 7);
+ RawFlags |= (Flags.HasUnknownCall << 8);
+ RawFlags |= (Flags.MustBeUnreachable << 9);
+ return RawFlags;
+}
+
+// Decode the flags for GlobalValue in the summary. See getDecodedGVSummaryFlags
+// in BitcodeReader.cpp.
+static uint64_t getEncodedGVSummaryFlags(GlobalValueSummary::GVFlags Flags) {
+ uint64_t RawFlags = 0;
+
+ RawFlags |= Flags.NotEligibleToImport; // bool
+ RawFlags |= (Flags.Live << 1);
+ RawFlags |= (Flags.DSOLocal << 2);
+ RawFlags |= (Flags.CanAutoHide << 3);
+
+ // Linkage don't need to be remapped at that time for the summary. Any future
+ // change to the getEncodedLinkage() function will need to be taken into
+ // account here as well.
+ RawFlags = (RawFlags << 4) | Flags.Linkage; // 4 bits
+
+ RawFlags |= (Flags.Visibility << 8); // 2 bits
+
+ return RawFlags;
+}
+
+static uint64_t getEncodedGVarFlags(GlobalVarSummary::GVarFlags Flags) {
+ uint64_t RawFlags = Flags.MaybeReadOnly | (Flags.MaybeWriteOnly << 1) |
+ (Flags.Constant << 2) | Flags.VCallVisibility << 3;
+ return RawFlags;
+}
+
+static unsigned getEncodedVisibility(const GlobalValue &GV) {
+ switch (GV.getVisibility()) {
+ case GlobalValue::DefaultVisibility: return 0;
+ case GlobalValue::HiddenVisibility: return 1;
+ case GlobalValue::ProtectedVisibility: return 2;
+ }
+ llvm_unreachable("Invalid visibility");
+}
+
+static unsigned getEncodedDLLStorageClass(const GlobalValue &GV) {
+ switch (GV.getDLLStorageClass()) {
+ case GlobalValue::DefaultStorageClass: return 0;
+ case GlobalValue::DLLImportStorageClass: return 1;
+ case GlobalValue::DLLExportStorageClass: return 2;
+ }
+ llvm_unreachable("Invalid DLL storage class");
+}
+
+static unsigned getEncodedThreadLocalMode(const GlobalValue &GV) {
+ switch (GV.getThreadLocalMode()) {
+ case GlobalVariable::NotThreadLocal: return 0;
+ case GlobalVariable::GeneralDynamicTLSModel: return 1;
+ case GlobalVariable::LocalDynamicTLSModel: return 2;
+ case GlobalVariable::InitialExecTLSModel: return 3;
+ case GlobalVariable::LocalExecTLSModel: return 4;
+ }
+ llvm_unreachable("Invalid TLS model");
+}
+
+static unsigned getEncodedComdatSelectionKind(const Comdat &C) {
+ switch (C.getSelectionKind()) {
+ case Comdat::Any:
+ return bitc::COMDAT_SELECTION_KIND_ANY;
+ case Comdat::ExactMatch:
+ return bitc::COMDAT_SELECTION_KIND_EXACT_MATCH;
+ case Comdat::Largest:
+ return bitc::COMDAT_SELECTION_KIND_LARGEST;
+ case Comdat::NoDeduplicate:
+ return bitc::COMDAT_SELECTION_KIND_NO_DUPLICATES;
+ case Comdat::SameSize:
+ return bitc::COMDAT_SELECTION_KIND_SAME_SIZE;
+ }
+ llvm_unreachable("Invalid selection kind");
+}
+
+static unsigned getEncodedUnnamedAddr(const GlobalValue &GV) {
+ switch (GV.getUnnamedAddr()) {
+ case GlobalValue::UnnamedAddr::None: return 0;
+ case GlobalValue::UnnamedAddr::Local: return 2;
+ case GlobalValue::UnnamedAddr::Global: return 1;
+ }
+ llvm_unreachable("Invalid unnamed_addr");
+}
+
+size_t ModuleBitcodeWriter::addToStrtab(StringRef Str) {
+ if (GenerateHash)
+ Hasher.update(Str);
+ return StrtabBuilder.add(Str);
+}
+
+void ModuleBitcodeWriter::writeComdats() {
+ SmallVector<unsigned, 64> Vals;
+ for (const Comdat *C : VE.getComdats()) {
+ // COMDAT: [strtab offset, strtab size, selection_kind]
+ Vals.push_back(addToStrtab(C->getName()));
+ Vals.push_back(C->getName().size());
+ Vals.push_back(getEncodedComdatSelectionKind(*C));
+ Stream.EmitRecord(bitc::MODULE_CODE_COMDAT, Vals, /*AbbrevToUse=*/0);
+ Vals.clear();
+ }
+}
+
+/// Write a record that will eventually hold the word offset of the
+/// module-level VST. For now the offset is 0, which will be backpatched
+/// after the real VST is written. Saves the bit offset to backpatch.
+void ModuleBitcodeWriter::writeValueSymbolTableForwardDecl() {
+ // Write a placeholder value in for the offset of the real VST,
+ // which is written after the function blocks so that it can include
+ // the offset of each function. The placeholder offset will be
+ // updated when the real VST is written.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_VSTOFFSET));
+ // Blocks are 32-bit aligned, so we can use a 32-bit word offset to
+ // hold the real VST offset. Must use fixed instead of VBR as we don't
+ // know how many VBR chunks to reserve ahead of time.
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ unsigned VSTOffsetAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Emit the placeholder
+ uint64_t Vals[] = {bitc::MODULE_CODE_VSTOFFSET, 0};
+ Stream.EmitRecordWithAbbrev(VSTOffsetAbbrev, Vals);
+
+ // Compute and save the bit offset to the placeholder, which will be
+ // patched when the real VST is written. We can simply subtract the 32-bit
+ // fixed size from the current bit number to get the location to backpatch.
+ VSTOffsetPlaceholder = Stream.GetCurrentBitNo() - 32;
+}
+
+enum StringEncoding { SE_Char6, SE_Fixed7, SE_Fixed8 };
+
+/// Determine the encoding to use for the given string name and length.
+static StringEncoding getStringEncoding(StringRef Str) {
+ bool isChar6 = true;
+ for (char C : Str) {
+ if (isChar6)
+ isChar6 = BitCodeAbbrevOp::isChar6(C);
+ if ((unsigned char)C & 128)
+ // don't bother scanning the rest.
+ return SE_Fixed8;
+ }
+ if (isChar6)
+ return SE_Char6;
+ return SE_Fixed7;
+}
+
+/// Emit top-level description of module, including target triple, inline asm,
+/// descriptors for global variables, and function prototype info.
+/// Returns the bit offset to backpatch with the location of the real VST.
+void ModuleBitcodeWriter::writeModuleInfo() {
+ // Emit various pieces of data attached to a module.
+ if (!M.getTargetTriple().empty())
+ writeStringRecord(Stream, bitc::MODULE_CODE_TRIPLE, M.getTargetTriple(),
+ 0 /*TODO*/);
+ const std::string &DL = M.getDataLayoutStr();
+ if (!DL.empty())
+ writeStringRecord(Stream, bitc::MODULE_CODE_DATALAYOUT, DL, 0 /*TODO*/);
+ if (!M.getModuleInlineAsm().empty())
+ writeStringRecord(Stream, bitc::MODULE_CODE_ASM, M.getModuleInlineAsm(),
+ 0 /*TODO*/);
+
+ // Emit information about sections and GC, computing how many there are. Also
+ // compute the maximum alignment value.
+ std::map<std::string, unsigned> SectionMap;
+ std::map<std::string, unsigned> GCMap;
+ MaybeAlign MaxAlignment;
+ unsigned MaxGlobalType = 0;
+ const auto UpdateMaxAlignment = [&MaxAlignment](const MaybeAlign A) {
+ if (A)
+ MaxAlignment = !MaxAlignment ? *A : std::max(*MaxAlignment, *A);
+ };
+ for (const GlobalVariable &GV : M.globals()) {
+ UpdateMaxAlignment(GV.getAlign());
+ MaxGlobalType = std::max(MaxGlobalType, VE.getTypeID(GV.getValueType()));
+ if (GV.hasSection()) {
+ // Give section names unique ID's.
+ unsigned &Entry = SectionMap[std::string(GV.getSection())];
+ if (!Entry) {
+ writeStringRecord(Stream, bitc::MODULE_CODE_SECTIONNAME, GV.getSection(),
+ 0 /*TODO*/);
+ Entry = SectionMap.size();
+ }
+ }
+ }
+ for (const Function &F : M) {
+ UpdateMaxAlignment(F.getAlign());
+ if (F.hasSection()) {
+ // Give section names unique ID's.
+ unsigned &Entry = SectionMap[std::string(F.getSection())];
+ if (!Entry) {
+ writeStringRecord(Stream, bitc::MODULE_CODE_SECTIONNAME, F.getSection(),
+ 0 /*TODO*/);
+ Entry = SectionMap.size();
+ }
+ }
+ if (F.hasGC()) {
+ // Same for GC names.
+ unsigned &Entry = GCMap[F.getGC()];
+ if (!Entry) {
+ writeStringRecord(Stream, bitc::MODULE_CODE_GCNAME, F.getGC(),
+ 0 /*TODO*/);
+ Entry = GCMap.size();
+ }
+ }
+ }
+
+ // Emit abbrev for globals, now that we know # sections and max alignment.
+ unsigned SimpleGVarAbbrev = 0;
+ if (!M.global_empty()) {
+ // Add an abbrev for common globals with no visibility or thread localness.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_GLOBALVAR));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ Log2_32_Ceil(MaxGlobalType+1)));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // AddrSpace << 2
+ //| explicitType << 1
+ //| constant
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Initializer.
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 5)); // Linkage.
+ if (!MaxAlignment) // Alignment.
+ Abbv->Add(BitCodeAbbrevOp(0));
+ else {
+ unsigned MaxEncAlignment = getEncodedAlign(MaxAlignment);
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ Log2_32_Ceil(MaxEncAlignment+1)));
+ }
+ if (SectionMap.empty()) // Section.
+ Abbv->Add(BitCodeAbbrevOp(0));
+ else
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ Log2_32_Ceil(SectionMap.size()+1)));
+ // Don't bother emitting vis + thread local.
+ SimpleGVarAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+ }
+
+ SmallVector<unsigned, 64> Vals;
+ // Emit the module's source file name.
+ {
+ StringEncoding Bits = getStringEncoding(M.getSourceFileName());
+ BitCodeAbbrevOp AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8);
+ if (Bits == SE_Char6)
+ AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Char6);
+ else if (Bits == SE_Fixed7)
+ AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7);
+
+ // MODULE_CODE_SOURCE_FILENAME: [namechar x N]
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_SOURCE_FILENAME));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(AbbrevOpToUse);
+ unsigned FilenameAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ for (const auto P : M.getSourceFileName())
+ Vals.push_back((unsigned char)P);
+
+ // Emit the finished record.
+ Stream.EmitRecord(bitc::MODULE_CODE_SOURCE_FILENAME, Vals, FilenameAbbrev);
+ Vals.clear();
+ }
+
+ // Emit the global variable information.
+ for (const GlobalVariable &GV : M.globals()) {
+ unsigned AbbrevToUse = 0;
+
+ // GLOBALVAR: [strtab offset, strtab size, type, isconst, initid,
+ // linkage, alignment, section, visibility, threadlocal,
+ // unnamed_addr, externally_initialized, dllstorageclass,
+ // comdat, attributes, DSO_Local]
+ Vals.push_back(addToStrtab(GV.getName()));
+ Vals.push_back(GV.getName().size());
+ Vals.push_back(VE.getTypeID(GV.getValueType()));
+ Vals.push_back(GV.getType()->getAddressSpace() << 2 | 2 | GV.isConstant());
+ Vals.push_back(GV.isDeclaration() ? 0 :
+ (VE.getValueID(GV.getInitializer()) + 1));
+ Vals.push_back(getEncodedLinkage(GV));
+ Vals.push_back(getEncodedAlign(GV.getAlign()));
+ Vals.push_back(GV.hasSection() ? SectionMap[std::string(GV.getSection())]
+ : 0);
+ if (GV.isThreadLocal() ||
+ GV.getVisibility() != GlobalValue::DefaultVisibility ||
+ GV.getUnnamedAddr() != GlobalValue::UnnamedAddr::None ||
+ GV.isExternallyInitialized() ||
+ GV.getDLLStorageClass() != GlobalValue::DefaultStorageClass ||
+ GV.hasComdat() ||
+ GV.hasAttributes() ||
+ GV.isDSOLocal() ||
+ GV.hasPartition()) {
+ Vals.push_back(getEncodedVisibility(GV));
+ Vals.push_back(getEncodedThreadLocalMode(GV));
+ Vals.push_back(getEncodedUnnamedAddr(GV));
+ Vals.push_back(GV.isExternallyInitialized());
+ Vals.push_back(getEncodedDLLStorageClass(GV));
+ Vals.push_back(GV.hasComdat() ? VE.getComdatID(GV.getComdat()) : 0);
+
+ auto AL = GV.getAttributesAsList(AttributeList::FunctionIndex);
+ Vals.push_back(VE.getAttributeListID(AL));
+
+ Vals.push_back(GV.isDSOLocal());
+ Vals.push_back(addToStrtab(GV.getPartition()));
+ Vals.push_back(GV.getPartition().size());
+ } else {
+ AbbrevToUse = SimpleGVarAbbrev;
+ }
+
+ Stream.EmitRecord(bitc::MODULE_CODE_GLOBALVAR, Vals, AbbrevToUse);
+ Vals.clear();
+ }
+
+ // Emit the function proto information.
+ for (const Function &F : M) {
+ // FUNCTION: [strtab offset, strtab size, type, callingconv, isproto,
+ // linkage, paramattrs, alignment, section, visibility, gc,
+ // unnamed_addr, prologuedata, dllstorageclass, comdat,
+ // prefixdata, personalityfn, DSO_Local, addrspace]
+ Vals.push_back(addToStrtab(F.getName()));
+ Vals.push_back(F.getName().size());
+ Vals.push_back(VE.getTypeID(F.getFunctionType()));
+ Vals.push_back(F.getCallingConv());
+ Vals.push_back(F.isDeclaration());
+ Vals.push_back(getEncodedLinkage(F));
+ Vals.push_back(VE.getAttributeListID(F.getAttributes()));
+ Vals.push_back(getEncodedAlign(F.getAlign()));
+ Vals.push_back(F.hasSection() ? SectionMap[std::string(F.getSection())]
+ : 0);
+ Vals.push_back(getEncodedVisibility(F));
+ Vals.push_back(F.hasGC() ? GCMap[F.getGC()] : 0);
+ Vals.push_back(getEncodedUnnamedAddr(F));
+ Vals.push_back(F.hasPrologueData() ? (VE.getValueID(F.getPrologueData()) + 1)
+ : 0);
+ Vals.push_back(getEncodedDLLStorageClass(F));
+ Vals.push_back(F.hasComdat() ? VE.getComdatID(F.getComdat()) : 0);
+ Vals.push_back(F.hasPrefixData() ? (VE.getValueID(F.getPrefixData()) + 1)
+ : 0);
+ Vals.push_back(
+ F.hasPersonalityFn() ? (VE.getValueID(F.getPersonalityFn()) + 1) : 0);
+
+ Vals.push_back(F.isDSOLocal());
+ Vals.push_back(F.getAddressSpace());
+ Vals.push_back(addToStrtab(F.getPartition()));
+ Vals.push_back(F.getPartition().size());
+
+ unsigned AbbrevToUse = 0;
+ Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals, AbbrevToUse);
+ Vals.clear();
+ }
+
+ // Emit the alias information.
+ for (const GlobalAlias &A : M.aliases()) {
+ // ALIAS: [strtab offset, strtab size, alias type, aliasee val#, linkage,
+ // visibility, dllstorageclass, threadlocal, unnamed_addr,
+ // DSO_Local]
+ Vals.push_back(addToStrtab(A.getName()));
+ Vals.push_back(A.getName().size());
+ Vals.push_back(VE.getTypeID(A.getValueType()));
+ Vals.push_back(A.getType()->getAddressSpace());
+ Vals.push_back(VE.getValueID(A.getAliasee()));
+ Vals.push_back(getEncodedLinkage(A));
+ Vals.push_back(getEncodedVisibility(A));
+ Vals.push_back(getEncodedDLLStorageClass(A));
+ Vals.push_back(getEncodedThreadLocalMode(A));
+ Vals.push_back(getEncodedUnnamedAddr(A));
+ Vals.push_back(A.isDSOLocal());
+ Vals.push_back(addToStrtab(A.getPartition()));
+ Vals.push_back(A.getPartition().size());
+
+ unsigned AbbrevToUse = 0;
+ Stream.EmitRecord(bitc::MODULE_CODE_ALIAS, Vals, AbbrevToUse);
+ Vals.clear();
+ }
+
+ // Emit the ifunc information.
+ for (const GlobalIFunc &I : M.ifuncs()) {
+ // IFUNC: [strtab offset, strtab size, ifunc type, address space, resolver
+ // val#, linkage, visibility, DSO_Local]
+ Vals.push_back(addToStrtab(I.getName()));
+ Vals.push_back(I.getName().size());
+ Vals.push_back(VE.getTypeID(I.getValueType()));
+ Vals.push_back(I.getType()->getAddressSpace());
+ Vals.push_back(VE.getValueID(I.getResolver()));
+ Vals.push_back(getEncodedLinkage(I));
+ Vals.push_back(getEncodedVisibility(I));
+ Vals.push_back(I.isDSOLocal());
+ Vals.push_back(addToStrtab(I.getPartition()));
+ Vals.push_back(I.getPartition().size());
+ Stream.EmitRecord(bitc::MODULE_CODE_IFUNC, Vals);
+ Vals.clear();
+ }
+
+ writeValueSymbolTableForwardDecl();
+}
+
+static uint64_t getOptimizationFlags(const Value *V) {
+ uint64_t Flags = 0;
+
+ if (const auto *OBO = dyn_cast<OverflowingBinaryOperator>(V)) {
+ if (OBO->hasNoSignedWrap())
+ Flags |= 1 << bitc::OBO_NO_SIGNED_WRAP;
+ if (OBO->hasNoUnsignedWrap())
+ Flags |= 1 << bitc::OBO_NO_UNSIGNED_WRAP;
+ } else if (const auto *PEO = dyn_cast<PossiblyExactOperator>(V)) {
+ if (PEO->isExact())
+ Flags |= 1 << bitc::PEO_EXACT;
+ } else if (const auto *FPMO = dyn_cast<FPMathOperator>(V)) {
+ if (FPMO->hasAllowReassoc())
+ Flags |= bitc::AllowReassoc;
+ if (FPMO->hasNoNaNs())
+ Flags |= bitc::NoNaNs;
+ if (FPMO->hasNoInfs())
+ Flags |= bitc::NoInfs;
+ if (FPMO->hasNoSignedZeros())
+ Flags |= bitc::NoSignedZeros;
+ if (FPMO->hasAllowReciprocal())
+ Flags |= bitc::AllowReciprocal;
+ if (FPMO->hasAllowContract())
+ Flags |= bitc::AllowContract;
+ if (FPMO->hasApproxFunc())
+ Flags |= bitc::ApproxFunc;
+ }
+
+ return Flags;
+}
+
+void ModuleBitcodeWriter::writeValueAsMetadata(
+ const ValueAsMetadata *MD, SmallVectorImpl<uint64_t> &Record) {
+ // Mimic an MDNode with a value as one operand.
+ Value *V = MD->getValue();
+ Record.push_back(VE.getTypeID(V->getType()));
+ Record.push_back(VE.getValueID(V));
+ Stream.EmitRecord(bitc::METADATA_VALUE, Record, 0);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeMDTuple(const MDTuple *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ Metadata *MD = N->getOperand(i);
+ assert(!(MD && isa<LocalAsMetadata>(MD)) &&
+ "Unexpected function-local metadata");
+ Record.push_back(VE.getMetadataOrNullID(MD));
+ }
+ Stream.EmitRecord(N->isDistinct() ? bitc::METADATA_DISTINCT_NODE
+ : bitc::METADATA_NODE,
+ Record, Abbrev);
+ Record.clear();
+}
+
+unsigned ModuleBitcodeWriter::createDILocationAbbrev() {
+ // Assume the column is usually under 128, and always output the inlined-at
+ // location (it's never more expensive than building an array size 1).
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_LOCATION));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ return Stream.EmitAbbrev(std::move(Abbv));
+}
+
+void ModuleBitcodeWriter::writeDILocation(const DILocation *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned &Abbrev) {
+ if (!Abbrev)
+ Abbrev = createDILocationAbbrev();
+
+ Record.push_back(N->isDistinct());
+ Record.push_back(N->getLine());
+ Record.push_back(N->getColumn());
+ Record.push_back(VE.getMetadataID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getInlinedAt()));
+ Record.push_back(N->isImplicitCode());
+
+ Stream.EmitRecord(bitc::METADATA_LOCATION, Record, Abbrev);
+ Record.clear();
+}
+
+unsigned ModuleBitcodeWriter::createGenericDINodeAbbrev() {
+ // Assume the column is usually under 128, and always output the inlined-at
+ // location (it's never more expensive than building an array size 1).
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_GENERIC_DEBUG));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ return Stream.EmitAbbrev(std::move(Abbv));
+}
+
+void ModuleBitcodeWriter::writeGenericDINode(const GenericDINode *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned &Abbrev) {
+ if (!Abbrev)
+ Abbrev = createGenericDINodeAbbrev();
+
+ Record.push_back(N->isDistinct());
+ Record.push_back(N->getTag());
+ Record.push_back(0); // Per-tag version field; unused for now.
+
+ for (auto &I : N->operands())
+ Record.push_back(VE.getMetadataOrNullID(I));
+
+ Stream.EmitRecord(bitc::METADATA_GENERIC_DEBUG, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDISubrange(const DISubrange *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ const uint64_t Version = 2 << 1;
+ Record.push_back((uint64_t)N->isDistinct() | Version);
+ Record.push_back(VE.getMetadataOrNullID(N->getRawCountNode()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawLowerBound()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawUpperBound()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawStride()));
+
+ Stream.EmitRecord(bitc::METADATA_SUBRANGE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIGenericSubrange(
+ const DIGenericSubrange *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back((uint64_t)N->isDistinct());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawCountNode()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawLowerBound()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawUpperBound()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawStride()));
+
+ Stream.EmitRecord(bitc::METADATA_GENERIC_SUBRANGE, Record, Abbrev);
+ Record.clear();
+}
+
+static void emitSignedInt64(SmallVectorImpl<uint64_t> &Vals, uint64_t V) {
+ if ((int64_t)V >= 0)
+ Vals.push_back(V << 1);
+ else
+ Vals.push_back((-V << 1) | 1);
+}
+
+static void emitWideAPInt(SmallVectorImpl<uint64_t> &Vals, const APInt &A) {
+ // We have an arbitrary precision integer value to write whose
+ // bit width is > 64. However, in canonical unsigned integer
+ // format it is likely that the high bits are going to be zero.
+ // So, we only write the number of active words.
+ unsigned NumWords = A.getActiveWords();
+ const uint64_t *RawData = A.getRawData();
+ for (unsigned i = 0; i < NumWords; i++)
+ emitSignedInt64(Vals, RawData[i]);
+}
+
+void ModuleBitcodeWriter::writeDIEnumerator(const DIEnumerator *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ const uint64_t IsBigInt = 1 << 2;
+ Record.push_back(IsBigInt | (N->isUnsigned() << 1) | N->isDistinct());
+ Record.push_back(N->getValue().getBitWidth());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ emitWideAPInt(Record, N->getValue());
+
+ Stream.EmitRecord(bitc::METADATA_ENUMERATOR, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIBasicType(const DIBasicType *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(N->getTag());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(N->getSizeInBits());
+ Record.push_back(N->getAlignInBits());
+ Record.push_back(N->getEncoding());
+ Record.push_back(N->getFlags());
+
+ Stream.EmitRecord(bitc::METADATA_BASIC_TYPE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIStringType(const DIStringType *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(N->getTag());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getStringLength()));
+ Record.push_back(VE.getMetadataOrNullID(N->getStringLengthExp()));
+ Record.push_back(VE.getMetadataOrNullID(N->getStringLocationExp()));
+ Record.push_back(N->getSizeInBits());
+ Record.push_back(N->getAlignInBits());
+ Record.push_back(N->getEncoding());
+
+ Stream.EmitRecord(bitc::METADATA_STRING_TYPE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIDerivedType(const DIDerivedType *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(N->getTag());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(N->getLine());
+ Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getBaseType()));
+ Record.push_back(N->getSizeInBits());
+ Record.push_back(N->getAlignInBits());
+ Record.push_back(N->getOffsetInBits());
+ Record.push_back(N->getFlags());
+ Record.push_back(VE.getMetadataOrNullID(N->getExtraData()));
+
+ // DWARF address space is encoded as N->getDWARFAddressSpace() + 1. 0 means
+ // that there is no DWARF address space associated with DIDerivedType.
+ if (const auto &DWARFAddressSpace = N->getDWARFAddressSpace())
+ Record.push_back(*DWARFAddressSpace + 1);
+ else
+ Record.push_back(0);
+
+ Record.push_back(VE.getMetadataOrNullID(N->getAnnotations().get()));
+
+ Stream.EmitRecord(bitc::METADATA_DERIVED_TYPE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDICompositeType(
+ const DICompositeType *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ const unsigned IsNotUsedInOldTypeRef = 0x2;
+ Record.push_back(IsNotUsedInOldTypeRef | (unsigned)N->isDistinct());
+ Record.push_back(N->getTag());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(N->getLine());
+ Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getBaseType()));
+ Record.push_back(N->getSizeInBits());
+ Record.push_back(N->getAlignInBits());
+ Record.push_back(N->getOffsetInBits());
+ Record.push_back(N->getFlags());
+ Record.push_back(VE.getMetadataOrNullID(N->getElements().get()));
+ Record.push_back(N->getRuntimeLang());
+ Record.push_back(VE.getMetadataOrNullID(N->getVTableHolder()));
+ Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams().get()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawIdentifier()));
+ Record.push_back(VE.getMetadataOrNullID(N->getDiscriminator()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawDataLocation()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawAssociated()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawAllocated()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawRank()));
+ Record.push_back(VE.getMetadataOrNullID(N->getAnnotations().get()));
+
+ Stream.EmitRecord(bitc::METADATA_COMPOSITE_TYPE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDISubroutineType(
+ const DISubroutineType *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ const unsigned HasNoOldTypeRefs = 0x2;
+ Record.push_back(HasNoOldTypeRefs | (unsigned)N->isDistinct());
+ Record.push_back(N->getFlags());
+ Record.push_back(VE.getMetadataOrNullID(N->getTypeArray().get()));
+ Record.push_back(N->getCC());
+
+ Stream.EmitRecord(bitc::METADATA_SUBROUTINE_TYPE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIFile(const DIFile *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawFilename()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawDirectory()));
+ if (N->getRawChecksum()) {
+ Record.push_back(N->getRawChecksum()->Kind);
+ Record.push_back(VE.getMetadataOrNullID(N->getRawChecksum()->Value));
+ } else {
+ // Maintain backwards compatibility with the old internal representation of
+ // CSK_None in ChecksumKind by writing nulls here when Checksum is None.
+ Record.push_back(0);
+ Record.push_back(VE.getMetadataOrNullID(nullptr));
+ }
+ auto Source = N->getRawSource();
+ if (Source)
+ Record.push_back(VE.getMetadataOrNullID(*Source));
+
+ Stream.EmitRecord(bitc::METADATA_FILE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDICompileUnit(const DICompileUnit *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ assert(N->isDistinct() && "Expected distinct compile units");
+ Record.push_back(/* IsDistinct */ true);
+ Record.push_back(N->getSourceLanguage());
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawProducer()));
+ Record.push_back(N->isOptimized());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawFlags()));
+ Record.push_back(N->getRuntimeVersion());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawSplitDebugFilename()));
+ Record.push_back(N->getEmissionKind());
+ Record.push_back(VE.getMetadataOrNullID(N->getEnumTypes().get()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRetainedTypes().get()));
+ Record.push_back(/* subprograms */ 0);
+ Record.push_back(VE.getMetadataOrNullID(N->getGlobalVariables().get()));
+ Record.push_back(VE.getMetadataOrNullID(N->getImportedEntities().get()));
+ Record.push_back(N->getDWOId());
+ Record.push_back(VE.getMetadataOrNullID(N->getMacros().get()));
+ Record.push_back(N->getSplitDebugInlining());
+ Record.push_back(N->getDebugInfoForProfiling());
+ Record.push_back((unsigned)N->getNameTableKind());
+ Record.push_back(N->getRangesBaseAddress());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawSysRoot()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawSDK()));
+
+ Stream.EmitRecord(bitc::METADATA_COMPILE_UNIT, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDISubprogram(const DISubprogram *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ const uint64_t HasUnitFlag = 1 << 1;
+ const uint64_t HasSPFlagsFlag = 1 << 2;
+ Record.push_back(uint64_t(N->isDistinct()) | HasUnitFlag | HasSPFlagsFlag);
+ Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawLinkageName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(N->getLine());
+ Record.push_back(VE.getMetadataOrNullID(N->getType()));
+ Record.push_back(N->getScopeLine());
+ Record.push_back(VE.getMetadataOrNullID(N->getContainingType()));
+ Record.push_back(N->getSPFlags());
+ Record.push_back(N->getVirtualIndex());
+ Record.push_back(N->getFlags());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawUnit()));
+ Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams().get()));
+ Record.push_back(VE.getMetadataOrNullID(N->getDeclaration()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRetainedNodes().get()));
+ Record.push_back(N->getThisAdjustment());
+ Record.push_back(VE.getMetadataOrNullID(N->getThrownTypes().get()));
+ Record.push_back(VE.getMetadataOrNullID(N->getAnnotations().get()));
+
+ Stream.EmitRecord(bitc::METADATA_SUBPROGRAM, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDILexicalBlock(const DILexicalBlock *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(N->getLine());
+ Record.push_back(N->getColumn());
+
+ Stream.EmitRecord(bitc::METADATA_LEXICAL_BLOCK, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDILexicalBlockFile(
+ const DILexicalBlockFile *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(N->getDiscriminator());
+
+ Stream.EmitRecord(bitc::METADATA_LEXICAL_BLOCK_FILE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDICommonBlock(const DICommonBlock *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getDecl()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(N->getLineNo());
+
+ Stream.EmitRecord(bitc::METADATA_COMMON_BLOCK, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDINamespace(const DINamespace *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct() | N->getExportSymbols() << 1);
+ Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+
+ Stream.EmitRecord(bitc::METADATA_NAMESPACE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIMacro(const DIMacro *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(N->getMacinfoType());
+ Record.push_back(N->getLine());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawValue()));
+
+ Stream.EmitRecord(bitc::METADATA_MACRO, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIMacroFile(const DIMacroFile *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(N->getMacinfoType());
+ Record.push_back(N->getLine());
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(VE.getMetadataOrNullID(N->getElements().get()));
+
+ Stream.EmitRecord(bitc::METADATA_MACRO_FILE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIArgList(const DIArgList *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.reserve(N->getArgs().size());
+ for (ValueAsMetadata *MD : N->getArgs())
+ Record.push_back(VE.getMetadataID(MD));
+
+ Stream.EmitRecord(bitc::METADATA_ARG_LIST, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIModule(const DIModule *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ for (auto &I : N->operands())
+ Record.push_back(VE.getMetadataOrNullID(I));
+ Record.push_back(N->getLineNo());
+ Record.push_back(N->getIsDecl());
+
+ Stream.EmitRecord(bitc::METADATA_MODULE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDITemplateTypeParameter(
+ const DITemplateTypeParameter *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getType()));
+ Record.push_back(N->isDefault());
+
+ Stream.EmitRecord(bitc::METADATA_TEMPLATE_TYPE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDITemplateValueParameter(
+ const DITemplateValueParameter *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(N->getTag());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getType()));
+ Record.push_back(N->isDefault());
+ Record.push_back(VE.getMetadataOrNullID(N->getValue()));
+
+ Stream.EmitRecord(bitc::METADATA_TEMPLATE_VALUE, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIGlobalVariable(
+ const DIGlobalVariable *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ const uint64_t Version = 2 << 1;
+ Record.push_back((uint64_t)N->isDistinct() | Version);
+ Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawLinkageName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(N->getLine());
+ Record.push_back(VE.getMetadataOrNullID(N->getType()));
+ Record.push_back(N->isLocalToUnit());
+ Record.push_back(N->isDefinition());
+ Record.push_back(VE.getMetadataOrNullID(N->getStaticDataMemberDeclaration()));
+ Record.push_back(VE.getMetadataOrNullID(N->getTemplateParams()));
+ Record.push_back(N->getAlignInBits());
+ Record.push_back(VE.getMetadataOrNullID(N->getAnnotations().get()));
+
+ Stream.EmitRecord(bitc::METADATA_GLOBAL_VAR, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDILocalVariable(
+ const DILocalVariable *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ // In order to support all possible bitcode formats in BitcodeReader we need
+ // to distinguish the following cases:
+ // 1) Record has no artificial tag (Record[1]),
+ // has no obsolete inlinedAt field (Record[9]).
+ // In this case Record size will be 8, HasAlignment flag is false.
+ // 2) Record has artificial tag (Record[1]),
+ // has no obsolete inlignedAt field (Record[9]).
+ // In this case Record size will be 9, HasAlignment flag is false.
+ // 3) Record has both artificial tag (Record[1]) and
+ // obsolete inlignedAt field (Record[9]).
+ // In this case Record size will be 10, HasAlignment flag is false.
+ // 4) Record has neither artificial tag, nor inlignedAt field, but
+ // HasAlignment flag is true and Record[8] contains alignment value.
+ const uint64_t HasAlignmentFlag = 1 << 1;
+ Record.push_back((uint64_t)N->isDistinct() | HasAlignmentFlag);
+ Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(N->getLine());
+ Record.push_back(VE.getMetadataOrNullID(N->getType()));
+ Record.push_back(N->getArg());
+ Record.push_back(N->getFlags());
+ Record.push_back(N->getAlignInBits());
+ Record.push_back(VE.getMetadataOrNullID(N->getAnnotations().get()));
+
+ Stream.EmitRecord(bitc::METADATA_LOCAL_VAR, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDILabel(
+ const DILabel *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back((uint64_t)N->isDistinct());
+ Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(N->getLine());
+
+ Stream.EmitRecord(bitc::METADATA_LABEL, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIExpression(const DIExpression *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.reserve(N->getElements().size() + 1);
+ const uint64_t Version = 3 << 1;
+ Record.push_back((uint64_t)N->isDistinct() | Version);
+ Record.append(N->elements_begin(), N->elements_end());
+
+ Stream.EmitRecord(bitc::METADATA_EXPRESSION, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIGlobalVariableExpression(
+ const DIGlobalVariableExpression *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(VE.getMetadataOrNullID(N->getVariable()));
+ Record.push_back(VE.getMetadataOrNullID(N->getExpression()));
+
+ Stream.EmitRecord(bitc::METADATA_GLOBAL_VAR_EXPR, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIObjCProperty(const DIObjCProperty *N,
+ SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getFile()));
+ Record.push_back(N->getLine());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawSetterName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawGetterName()));
+ Record.push_back(N->getAttributes());
+ Record.push_back(VE.getMetadataOrNullID(N->getType()));
+
+ Stream.EmitRecord(bitc::METADATA_OBJC_PROPERTY, Record, Abbrev);
+ Record.clear();
+}
+
+void ModuleBitcodeWriter::writeDIImportedEntity(
+ const DIImportedEntity *N, SmallVectorImpl<uint64_t> &Record,
+ unsigned Abbrev) {
+ Record.push_back(N->isDistinct());
+ Record.push_back(N->getTag());
+ Record.push_back(VE.getMetadataOrNullID(N->getScope()));
+ Record.push_back(VE.getMetadataOrNullID(N->getEntity()));
+ Record.push_back(N->getLine());
+ Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
+ Record.push_back(VE.getMetadataOrNullID(N->getRawFile()));
+ Record.push_back(VE.getMetadataOrNullID(N->getElements().get()));
+
+ Stream.EmitRecord(bitc::METADATA_IMPORTED_ENTITY, Record, Abbrev);
+ Record.clear();
+}
+
+unsigned ModuleBitcodeWriter::createNamedMetadataAbbrev() {
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_NAME));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
+ return Stream.EmitAbbrev(std::move(Abbv));
+}
+
+void ModuleBitcodeWriter::writeNamedMetadata(
+ SmallVectorImpl<uint64_t> &Record) {
+ if (M.named_metadata_empty())
+ return;
+
+ unsigned Abbrev = createNamedMetadataAbbrev();
+ for (const NamedMDNode &NMD : M.named_metadata()) {
+ // Write name.
+ StringRef Str = NMD.getName();
+ Record.append(Str.bytes_begin(), Str.bytes_end());
+ Stream.EmitRecord(bitc::METADATA_NAME, Record, Abbrev);
+ Record.clear();
+
+ // Write named metadata operands.
+ for (const MDNode *N : NMD.operands())
+ Record.push_back(VE.getMetadataID(N));
+ Stream.EmitRecord(bitc::METADATA_NAMED_NODE, Record, 0);
+ Record.clear();
+ }
+}
+
+unsigned ModuleBitcodeWriter::createMetadataStringsAbbrev() {
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_STRINGS));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of strings
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // offset to chars
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ return Stream.EmitAbbrev(std::move(Abbv));
+}
+
+/// Write out a record for MDString.
+///
+/// All the metadata strings in a metadata block are emitted in a single
+/// record. The sizes and strings themselves are shoved into a blob.
+void ModuleBitcodeWriter::writeMetadataStrings(
+ ArrayRef<const Metadata *> Strings, SmallVectorImpl<uint64_t> &Record) {
+ if (Strings.empty())
+ return;
+
+ // Start the record with the number of strings.
+ Record.push_back(bitc::METADATA_STRINGS);
+ Record.push_back(Strings.size());
+
+ // Emit the sizes of the strings in the blob.
+ SmallString<256> Blob;
+ {
+ BitstreamWriter W(Blob);
+ for (const Metadata *MD : Strings)
+ W.EmitVBR(cast<MDString>(MD)->getLength(), 6);
+ W.FlushToWord();
+ }
+
+ // Add the offset to the strings to the record.
+ Record.push_back(Blob.size());
+
+ // Add the strings to the blob.
+ for (const Metadata *MD : Strings)
+ Blob.append(cast<MDString>(MD)->getString());
+
+ // Emit the final record.
+ Stream.EmitRecordWithBlob(createMetadataStringsAbbrev(), Record, Blob);
+ Record.clear();
+}
+
+// Generates an enum to use as an index in the Abbrev array of Metadata record.
+enum MetadataAbbrev : unsigned {
+#define HANDLE_MDNODE_LEAF(CLASS) CLASS##AbbrevID,
+#include "llvm/IR/Metadata.def"
+ LastPlusOne
+};
+
+void ModuleBitcodeWriter::writeMetadataRecords(
+ ArrayRef<const Metadata *> MDs, SmallVectorImpl<uint64_t> &Record,
+ std::vector<unsigned> *MDAbbrevs, std::vector<uint64_t> *IndexPos) {
+ if (MDs.empty())
+ return;
+
+ // Initialize MDNode abbreviations.
+#define HANDLE_MDNODE_LEAF(CLASS) unsigned CLASS##Abbrev = 0;
+#include "llvm/IR/Metadata.def"
+
+ for (const Metadata *MD : MDs) {
+ if (IndexPos)
+ IndexPos->push_back(Stream.GetCurrentBitNo());
+ if (const MDNode *N = dyn_cast<MDNode>(MD)) {
+ assert(N->isResolved() && "Expected forward references to be resolved");
+
+ switch (N->getMetadataID()) {
+ default:
+ llvm_unreachable("Invalid MDNode subclass");
+#define HANDLE_MDNODE_LEAF(CLASS) \
+ case Metadata::CLASS##Kind: \
+ if (MDAbbrevs) \
+ write##CLASS(cast<CLASS>(N), Record, \
+ (*MDAbbrevs)[MetadataAbbrev::CLASS##AbbrevID]); \
+ else \
+ write##CLASS(cast<CLASS>(N), Record, CLASS##Abbrev); \
+ continue;
+#include "llvm/IR/Metadata.def"
+ }
+ }
+ writeValueAsMetadata(cast<ValueAsMetadata>(MD), Record);
+ }
+}
+
+void ModuleBitcodeWriter::writeModuleMetadata() {
+ if (!VE.hasMDs() && M.named_metadata_empty())
+ return;
+
+ Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 4);
+ SmallVector<uint64_t, 64> Record;
+
+ // Emit all abbrevs upfront, so that the reader can jump in the middle of the
+ // block and load any metadata.
+ std::vector<unsigned> MDAbbrevs;
+
+ MDAbbrevs.resize(MetadataAbbrev::LastPlusOne);
+ MDAbbrevs[MetadataAbbrev::DILocationAbbrevID] = createDILocationAbbrev();
+ MDAbbrevs[MetadataAbbrev::GenericDINodeAbbrevID] =
+ createGenericDINodeAbbrev();
+
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_INDEX_OFFSET));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ unsigned OffsetAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::METADATA_INDEX));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ unsigned IndexAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Emit MDStrings together upfront.
+ writeMetadataStrings(VE.getMDStrings(), Record);
+
+ // We only emit an index for the metadata record if we have more than a given
+ // (naive) threshold of metadatas, otherwise it is not worth it.
+ if (VE.getNonMDStrings().size() > IndexThreshold) {
+ // Write a placeholder value in for the offset of the metadata index,
+ // which is written after the records, so that it can include
+ // the offset of each entry. The placeholder offset will be
+ // updated after all records are emitted.
+ uint64_t Vals[] = {0, 0};
+ Stream.EmitRecord(bitc::METADATA_INDEX_OFFSET, Vals, OffsetAbbrev);
+ }
+
+ // Compute and save the bit offset to the current position, which will be
+ // patched when we emit the index later. We can simply subtract the 64-bit
+ // fixed size from the current bit number to get the location to backpatch.
+ uint64_t IndexOffsetRecordBitPos = Stream.GetCurrentBitNo();
+
+ // This index will contain the bitpos for each individual record.
+ std::vector<uint64_t> IndexPos;
+ IndexPos.reserve(VE.getNonMDStrings().size());
+
+ // Write all the records
+ writeMetadataRecords(VE.getNonMDStrings(), Record, &MDAbbrevs, &IndexPos);
+
+ if (VE.getNonMDStrings().size() > IndexThreshold) {
+ // Now that we have emitted all the records we will emit the index. But
+ // first
+ // backpatch the forward reference so that the reader can skip the records
+ // efficiently.
+ Stream.BackpatchWord64(IndexOffsetRecordBitPos - 64,
+ Stream.GetCurrentBitNo() - IndexOffsetRecordBitPos);
+
+ // Delta encode the index.
+ uint64_t PreviousValue = IndexOffsetRecordBitPos;
+ for (auto &Elt : IndexPos) {
+ auto EltDelta = Elt - PreviousValue;
+ PreviousValue = Elt;
+ Elt = EltDelta;
+ }
+ // Emit the index record.
+ Stream.EmitRecord(bitc::METADATA_INDEX, IndexPos, IndexAbbrev);
+ IndexPos.clear();
+ }
+
+ // Write the named metadata now.
+ writeNamedMetadata(Record);
+
+ auto AddDeclAttachedMetadata = [&](const GlobalObject &GO) {
+ SmallVector<uint64_t, 4> Record;
+ Record.push_back(VE.getValueID(&GO));
+ pushGlobalMetadataAttachment(Record, GO);
+ Stream.EmitRecord(bitc::METADATA_GLOBAL_DECL_ATTACHMENT, Record);
+ };
+ for (const Function &F : M)
+ if (F.isDeclaration() && F.hasMetadata())
+ AddDeclAttachedMetadata(F);
+ // FIXME: Only store metadata for declarations here, and move data for global
+ // variable definitions to a separate block (PR28134).
+ for (const GlobalVariable &GV : M.globals())
+ if (GV.hasMetadata())
+ AddDeclAttachedMetadata(GV);
+
+ Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeFunctionMetadata(const Function &F) {
+ if (!VE.hasMDs())
+ return;
+
+ Stream.EnterSubblock(bitc::METADATA_BLOCK_ID, 3);
+ SmallVector<uint64_t, 64> Record;
+ writeMetadataStrings(VE.getMDStrings(), Record);
+ writeMetadataRecords(VE.getNonMDStrings(), Record);
+ Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::pushGlobalMetadataAttachment(
+ SmallVectorImpl<uint64_t> &Record, const GlobalObject &GO) {
+ // [n x [id, mdnode]]
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ GO.getAllMetadata(MDs);
+ for (const auto &I : MDs) {
+ Record.push_back(I.first);
+ Record.push_back(VE.getMetadataID(I.second));
+ }
+}
+
+void ModuleBitcodeWriter::writeFunctionMetadataAttachment(const Function &F) {
+ Stream.EnterSubblock(bitc::METADATA_ATTACHMENT_ID, 3);
+
+ SmallVector<uint64_t, 64> Record;
+
+ if (F.hasMetadata()) {
+ pushGlobalMetadataAttachment(Record, F);
+ Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0);
+ Record.clear();
+ }
+
+ // Write metadata attachments
+ // METADATA_ATTACHMENT - [m x [value, [n x [id, mdnode]]]
+ SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB) {
+ MDs.clear();
+ I.getAllMetadataOtherThanDebugLoc(MDs);
+
+ // If no metadata, ignore instruction.
+ if (MDs.empty()) continue;
+
+ Record.push_back(VE.getInstructionID(&I));
+
+ for (unsigned i = 0, e = MDs.size(); i != e; ++i) {
+ Record.push_back(MDs[i].first);
+ Record.push_back(VE.getMetadataID(MDs[i].second));
+ }
+ Stream.EmitRecord(bitc::METADATA_ATTACHMENT, Record, 0);
+ Record.clear();
+ }
+
+ Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeModuleMetadataKinds() {
+ SmallVector<uint64_t, 64> Record;
+
+ // Write metadata kinds
+ // METADATA_KIND - [n x [id, name]]
+ SmallVector<StringRef, 8> Names;
+ M.getMDKindNames(Names);
+
+ if (Names.empty()) return;
+
+ Stream.EnterSubblock(bitc::METADATA_KIND_BLOCK_ID, 3);
+
+ for (unsigned MDKindID = 0, e = Names.size(); MDKindID != e; ++MDKindID) {
+ Record.push_back(MDKindID);
+ StringRef KName = Names[MDKindID];
+ Record.append(KName.begin(), KName.end());
+
+ Stream.EmitRecord(bitc::METADATA_KIND, Record, 0);
+ Record.clear();
+ }
+
+ Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeOperandBundleTags() {
+ // Write metadata kinds
+ //
+ // OPERAND_BUNDLE_TAGS_BLOCK_ID : N x OPERAND_BUNDLE_TAG
+ //
+ // OPERAND_BUNDLE_TAG - [strchr x N]
+
+ SmallVector<StringRef, 8> Tags;
+ M.getOperandBundleTags(Tags);
+
+ if (Tags.empty())
+ return;
+
+ Stream.EnterSubblock(bitc::OPERAND_BUNDLE_TAGS_BLOCK_ID, 3);
+
+ SmallVector<uint64_t, 64> Record;
+
+ for (auto Tag : Tags) {
+ Record.append(Tag.begin(), Tag.end());
+
+ Stream.EmitRecord(bitc::OPERAND_BUNDLE_TAG, Record, 0);
+ Record.clear();
+ }
+
+ Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeSyncScopeNames() {
+ SmallVector<StringRef, 8> SSNs;
+ M.getContext().getSyncScopeNames(SSNs);
+ if (SSNs.empty())
+ return;
+
+ Stream.EnterSubblock(bitc::SYNC_SCOPE_NAMES_BLOCK_ID, 2);
+
+ SmallVector<uint64_t, 64> Record;
+ for (auto SSN : SSNs) {
+ Record.append(SSN.begin(), SSN.end());
+ Stream.EmitRecord(bitc::SYNC_SCOPE_NAME, Record, 0);
+ Record.clear();
+ }
+
+ Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeConstants(unsigned FirstVal, unsigned LastVal,
+ bool isGlobal) {
+ if (FirstVal == LastVal) return;
+
+ Stream.EnterSubblock(bitc::CONSTANTS_BLOCK_ID, 4);
+
+ unsigned AggregateAbbrev = 0;
+ unsigned String8Abbrev = 0;
+ unsigned CString7Abbrev = 0;
+ unsigned CString6Abbrev = 0;
+ // If this is a constant pool for the module, emit module-specific abbrevs.
+ if (isGlobal) {
+ // Abbrev for CST_CODE_AGGREGATE.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_AGGREGATE));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, Log2_32_Ceil(LastVal+1)));
+ AggregateAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for CST_CODE_STRING.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_STRING));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
+ String8Abbrev = Stream.EmitAbbrev(std::move(Abbv));
+ // Abbrev for CST_CODE_CSTRING.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
+ CString7Abbrev = Stream.EmitAbbrev(std::move(Abbv));
+ // Abbrev for CST_CODE_CSTRING.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CSTRING));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+ CString6Abbrev = Stream.EmitAbbrev(std::move(Abbv));
+ }
+
+ SmallVector<uint64_t, 64> Record;
+
+ const ValueEnumerator::ValueList &Vals = VE.getValues();
+ Type *LastTy = nullptr;
+ for (unsigned i = FirstVal; i != LastVal; ++i) {
+ const Value *V = Vals[i].first;
+ // If we need to switch types, do so now.
+ if (V->getType() != LastTy) {
+ LastTy = V->getType();
+ Record.push_back(VE.getTypeID(LastTy));
+ Stream.EmitRecord(bitc::CST_CODE_SETTYPE, Record,
+ CONSTANTS_SETTYPE_ABBREV);
+ Record.clear();
+ }
+
+ if (const InlineAsm *IA = dyn_cast<InlineAsm>(V)) {
+ Record.push_back(VE.getTypeID(IA->getFunctionType()));
+ Record.push_back(
+ unsigned(IA->hasSideEffects()) | unsigned(IA->isAlignStack()) << 1 |
+ unsigned(IA->getDialect() & 1) << 2 | unsigned(IA->canThrow()) << 3);
+
+ // Add the asm string.
+ const std::string &AsmStr = IA->getAsmString();
+ Record.push_back(AsmStr.size());
+ Record.append(AsmStr.begin(), AsmStr.end());
+
+ // Add the constraint string.
+ const std::string &ConstraintStr = IA->getConstraintString();
+ Record.push_back(ConstraintStr.size());
+ Record.append(ConstraintStr.begin(), ConstraintStr.end());
+ Stream.EmitRecord(bitc::CST_CODE_INLINEASM, Record);
+ Record.clear();
+ continue;
+ }
+ const Constant *C = cast<Constant>(V);
+ unsigned Code = -1U;
+ unsigned AbbrevToUse = 0;
+ if (C->isNullValue()) {
+ Code = bitc::CST_CODE_NULL;
+ } else if (isa<PoisonValue>(C)) {
+ Code = bitc::CST_CODE_POISON;
+ } else if (isa<UndefValue>(C)) {
+ Code = bitc::CST_CODE_UNDEF;
+ } else if (const ConstantInt *IV = dyn_cast<ConstantInt>(C)) {
+ if (IV->getBitWidth() <= 64) {
+ uint64_t V = IV->getSExtValue();
+ emitSignedInt64(Record, V);
+ Code = bitc::CST_CODE_INTEGER;
+ AbbrevToUse = CONSTANTS_INTEGER_ABBREV;
+ } else { // Wide integers, > 64 bits in size.
+ emitWideAPInt(Record, IV->getValue());
+ Code = bitc::CST_CODE_WIDE_INTEGER;
+ }
+ } else if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
+ Code = bitc::CST_CODE_FLOAT;
+ Type *Ty = CFP->getType();
+ if (Ty->isHalfTy() || Ty->isBFloatTy() || Ty->isFloatTy() ||
+ Ty->isDoubleTy()) {
+ Record.push_back(CFP->getValueAPF().bitcastToAPInt().getZExtValue());
+ } else if (Ty->isX86_FP80Ty()) {
+ // api needed to prevent premature destruction
+ // bits are not in the same order as a normal i80 APInt, compensate.
+ APInt api = CFP->getValueAPF().bitcastToAPInt();
+ const uint64_t *p = api.getRawData();
+ Record.push_back((p[1] << 48) | (p[0] >> 16));
+ Record.push_back(p[0] & 0xffffLL);
+ } else if (Ty->isFP128Ty() || Ty->isPPC_FP128Ty()) {
+ APInt api = CFP->getValueAPF().bitcastToAPInt();
+ const uint64_t *p = api.getRawData();
+ Record.push_back(p[0]);
+ Record.push_back(p[1]);
+ } else {
+ assert(0 && "Unknown FP type!");
+ }
+ } else if (isa<ConstantDataSequential>(C) &&
+ cast<ConstantDataSequential>(C)->isString()) {
+ const ConstantDataSequential *Str = cast<ConstantDataSequential>(C);
+ // Emit constant strings specially.
+ unsigned NumElts = Str->getNumElements();
+ // If this is a null-terminated string, use the denser CSTRING encoding.
+ if (Str->isCString()) {
+ Code = bitc::CST_CODE_CSTRING;
+ --NumElts; // Don't encode the null, which isn't allowed by char6.
+ } else {
+ Code = bitc::CST_CODE_STRING;
+ AbbrevToUse = String8Abbrev;
+ }
+ bool isCStr7 = Code == bitc::CST_CODE_CSTRING;
+ bool isCStrChar6 = Code == bitc::CST_CODE_CSTRING;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ unsigned char V = Str->getElementAsInteger(i);
+ Record.push_back(V);
+ isCStr7 &= (V & 128) == 0;
+ if (isCStrChar6)
+ isCStrChar6 = BitCodeAbbrevOp::isChar6(V);
+ }
+
+ if (isCStrChar6)
+ AbbrevToUse = CString6Abbrev;
+ else if (isCStr7)
+ AbbrevToUse = CString7Abbrev;
+ } else if (const ConstantDataSequential *CDS =
+ dyn_cast<ConstantDataSequential>(C)) {
+ Code = bitc::CST_CODE_DATA;
+ Type *EltTy = CDS->getElementType();
+ if (isa<IntegerType>(EltTy)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i)
+ Record.push_back(CDS->getElementAsInteger(i));
+ } else {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i)
+ Record.push_back(
+ CDS->getElementAsAPFloat(i).bitcastToAPInt().getLimitedValue());
+ }
+ } else if (isa<ConstantAggregate>(C)) {
+ Code = bitc::CST_CODE_AGGREGATE;
+ for (const Value *Op : C->operands())
+ Record.push_back(VE.getValueID(Op));
+ AbbrevToUse = AggregateAbbrev;
+ } else if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
+ switch (CE->getOpcode()) {
+ default:
+ if (Instruction::isCast(CE->getOpcode())) {
+ Code = bitc::CST_CODE_CE_CAST;
+ Record.push_back(getEncodedCastOpcode(CE->getOpcode()));
+ Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
+ Record.push_back(VE.getValueID(C->getOperand(0)));
+ AbbrevToUse = CONSTANTS_CE_CAST_Abbrev;
+ } else {
+ assert(CE->getNumOperands() == 2 && "Unknown constant expr!");
+ Code = bitc::CST_CODE_CE_BINOP;
+ Record.push_back(getEncodedBinaryOpcode(CE->getOpcode()));
+ Record.push_back(VE.getValueID(C->getOperand(0)));
+ Record.push_back(VE.getValueID(C->getOperand(1)));
+ uint64_t Flags = getOptimizationFlags(CE);
+ if (Flags != 0)
+ Record.push_back(Flags);
+ }
+ break;
+ case Instruction::FNeg: {
+ assert(CE->getNumOperands() == 1 && "Unknown constant expr!");
+ Code = bitc::CST_CODE_CE_UNOP;
+ Record.push_back(getEncodedUnaryOpcode(CE->getOpcode()));
+ Record.push_back(VE.getValueID(C->getOperand(0)));
+ uint64_t Flags = getOptimizationFlags(CE);
+ if (Flags != 0)
+ Record.push_back(Flags);
+ break;
+ }
+ case Instruction::GetElementPtr: {
+ Code = bitc::CST_CODE_CE_GEP;
+ const auto *GO = cast<GEPOperator>(C);
+ Record.push_back(VE.getTypeID(GO->getSourceElementType()));
+ if (Optional<unsigned> Idx = GO->getInRangeIndex()) {
+ Code = bitc::CST_CODE_CE_GEP_WITH_INRANGE_INDEX;
+ Record.push_back((*Idx << 1) | GO->isInBounds());
+ } else if (GO->isInBounds())
+ Code = bitc::CST_CODE_CE_INBOUNDS_GEP;
+ for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) {
+ Record.push_back(VE.getTypeID(C->getOperand(i)->getType()));
+ Record.push_back(VE.getValueID(C->getOperand(i)));
+ }
+ break;
+ }
+ case Instruction::Select:
+ Code = bitc::CST_CODE_CE_SELECT;
+ Record.push_back(VE.getValueID(C->getOperand(0)));
+ Record.push_back(VE.getValueID(C->getOperand(1)));
+ Record.push_back(VE.getValueID(C->getOperand(2)));
+ break;
+ case Instruction::ExtractElement:
+ Code = bitc::CST_CODE_CE_EXTRACTELT;
+ Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
+ Record.push_back(VE.getValueID(C->getOperand(0)));
+ Record.push_back(VE.getTypeID(C->getOperand(1)->getType()));
+ Record.push_back(VE.getValueID(C->getOperand(1)));
+ break;
+ case Instruction::InsertElement:
+ Code = bitc::CST_CODE_CE_INSERTELT;
+ Record.push_back(VE.getValueID(C->getOperand(0)));
+ Record.push_back(VE.getValueID(C->getOperand(1)));
+ Record.push_back(VE.getTypeID(C->getOperand(2)->getType()));
+ Record.push_back(VE.getValueID(C->getOperand(2)));
+ break;
+ case Instruction::ShuffleVector:
+ // If the return type and argument types are the same, this is a
+ // standard shufflevector instruction. If the types are different,
+ // then the shuffle is widening or truncating the input vectors, and
+ // the argument type must also be encoded.
+ if (C->getType() == C->getOperand(0)->getType()) {
+ Code = bitc::CST_CODE_CE_SHUFFLEVEC;
+ } else {
+ Code = bitc::CST_CODE_CE_SHUFVEC_EX;
+ Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
+ }
+ Record.push_back(VE.getValueID(C->getOperand(0)));
+ Record.push_back(VE.getValueID(C->getOperand(1)));
+ Record.push_back(VE.getValueID(CE->getShuffleMaskForBitcode()));
+ break;
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ Code = bitc::CST_CODE_CE_CMP;
+ Record.push_back(VE.getTypeID(C->getOperand(0)->getType()));
+ Record.push_back(VE.getValueID(C->getOperand(0)));
+ Record.push_back(VE.getValueID(C->getOperand(1)));
+ Record.push_back(CE->getPredicate());
+ break;
+ }
+ } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(C)) {
+ Code = bitc::CST_CODE_BLOCKADDRESS;
+ Record.push_back(VE.getTypeID(BA->getFunction()->getType()));
+ Record.push_back(VE.getValueID(BA->getFunction()));
+ Record.push_back(VE.getGlobalBasicBlockID(BA->getBasicBlock()));
+ } else if (const auto *Equiv = dyn_cast<DSOLocalEquivalent>(C)) {
+ Code = bitc::CST_CODE_DSO_LOCAL_EQUIVALENT;
+ Record.push_back(VE.getTypeID(Equiv->getGlobalValue()->getType()));
+ Record.push_back(VE.getValueID(Equiv->getGlobalValue()));
+ } else if (const auto *NC = dyn_cast<NoCFIValue>(C)) {
+ Code = bitc::CST_CODE_NO_CFI_VALUE;
+ Record.push_back(VE.getTypeID(NC->getGlobalValue()->getType()));
+ Record.push_back(VE.getValueID(NC->getGlobalValue()));
+ } else {
+#ifndef NDEBUG
+ C->dump();
+#endif
+ llvm_unreachable("Unknown constant!");
+ }
+ Stream.EmitRecord(Code, Record, AbbrevToUse);
+ Record.clear();
+ }
+
+ Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeModuleConstants() {
+ const ValueEnumerator::ValueList &Vals = VE.getValues();
+
+ // Find the first constant to emit, which is the first non-globalvalue value.
+ // We know globalvalues have been emitted by WriteModuleInfo.
+ for (unsigned i = 0, e = Vals.size(); i != e; ++i) {
+ if (!isa<GlobalValue>(Vals[i].first)) {
+ writeConstants(i, Vals.size(), true);
+ return;
+ }
+ }
+}
+
+/// pushValueAndType - The file has to encode both the value and type id for
+/// many values, because we need to know what type to create for forward
+/// references. However, most operands are not forward references, so this type
+/// field is not needed.
+///
+/// This function adds V's value ID to Vals. If the value ID is higher than the
+/// instruction ID, then it is a forward reference, and it also includes the
+/// type ID. The value ID that is written is encoded relative to the InstID.
+bool ModuleBitcodeWriter::pushValueAndType(const Value *V, unsigned InstID,
+ SmallVectorImpl<unsigned> &Vals) {
+ unsigned ValID = VE.getValueID(V);
+ // Make encoding relative to the InstID.
+ Vals.push_back(InstID - ValID);
+ if (ValID >= InstID) {
+ Vals.push_back(VE.getTypeID(V->getType()));
+ return true;
+ }
+ return false;
+}
+
+void ModuleBitcodeWriter::writeOperandBundles(const CallBase &CS,
+ unsigned InstID) {
+ SmallVector<unsigned, 64> Record;
+ LLVMContext &C = CS.getContext();
+
+ for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i) {
+ const auto &Bundle = CS.getOperandBundleAt(i);
+ Record.push_back(C.getOperandBundleTagID(Bundle.getTagName()));
+
+ for (auto &Input : Bundle.Inputs)
+ pushValueAndType(Input, InstID, Record);
+
+ Stream.EmitRecord(bitc::FUNC_CODE_OPERAND_BUNDLE, Record);
+ Record.clear();
+ }
+}
+
+/// pushValue - Like pushValueAndType, but where the type of the value is
+/// omitted (perhaps it was already encoded in an earlier operand).
+void ModuleBitcodeWriter::pushValue(const Value *V, unsigned InstID,
+ SmallVectorImpl<unsigned> &Vals) {
+ unsigned ValID = VE.getValueID(V);
+ Vals.push_back(InstID - ValID);
+}
+
+void ModuleBitcodeWriter::pushValueSigned(const Value *V, unsigned InstID,
+ SmallVectorImpl<uint64_t> &Vals) {
+ unsigned ValID = VE.getValueID(V);
+ int64_t diff = ((int32_t)InstID - (int32_t)ValID);
+ emitSignedInt64(Vals, diff);
+}
+
+/// WriteInstruction - Emit an instruction to the specified stream.
+void ModuleBitcodeWriter::writeInstruction(const Instruction &I,
+ unsigned InstID,
+ SmallVectorImpl<unsigned> &Vals) {
+ unsigned Code = 0;
+ unsigned AbbrevToUse = 0;
+ VE.setInstructionID(&I);
+ switch (I.getOpcode()) {
+ default:
+ if (Instruction::isCast(I.getOpcode())) {
+ Code = bitc::FUNC_CODE_INST_CAST;
+ if (!pushValueAndType(I.getOperand(0), InstID, Vals))
+ AbbrevToUse = FUNCTION_INST_CAST_ABBREV;
+ Vals.push_back(VE.getTypeID(I.getType()));
+ Vals.push_back(getEncodedCastOpcode(I.getOpcode()));
+ } else {
+ assert(isa<BinaryOperator>(I) && "Unknown instruction!");
+ Code = bitc::FUNC_CODE_INST_BINOP;
+ if (!pushValueAndType(I.getOperand(0), InstID, Vals))
+ AbbrevToUse = FUNCTION_INST_BINOP_ABBREV;
+ pushValue(I.getOperand(1), InstID, Vals);
+ Vals.push_back(getEncodedBinaryOpcode(I.getOpcode()));
+ uint64_t Flags = getOptimizationFlags(&I);
+ if (Flags != 0) {
+ if (AbbrevToUse == FUNCTION_INST_BINOP_ABBREV)
+ AbbrevToUse = FUNCTION_INST_BINOP_FLAGS_ABBREV;
+ Vals.push_back(Flags);
+ }
+ }
+ break;
+ case Instruction::FNeg: {
+ Code = bitc::FUNC_CODE_INST_UNOP;
+ if (!pushValueAndType(I.getOperand(0), InstID, Vals))
+ AbbrevToUse = FUNCTION_INST_UNOP_ABBREV;
+ Vals.push_back(getEncodedUnaryOpcode(I.getOpcode()));
+ uint64_t Flags = getOptimizationFlags(&I);
+ if (Flags != 0) {
+ if (AbbrevToUse == FUNCTION_INST_UNOP_ABBREV)
+ AbbrevToUse = FUNCTION_INST_UNOP_FLAGS_ABBREV;
+ Vals.push_back(Flags);
+ }
+ break;
+ }
+ case Instruction::GetElementPtr: {
+ Code = bitc::FUNC_CODE_INST_GEP;
+ AbbrevToUse = FUNCTION_INST_GEP_ABBREV;
+ auto &GEPInst = cast<GetElementPtrInst>(I);
+ Vals.push_back(GEPInst.isInBounds());
+ Vals.push_back(VE.getTypeID(GEPInst.getSourceElementType()));
+ for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
+ pushValueAndType(I.getOperand(i), InstID, Vals);
+ break;
+ }
+ case Instruction::ExtractValue: {
+ Code = bitc::FUNC_CODE_INST_EXTRACTVAL;
+ pushValueAndType(I.getOperand(0), InstID, Vals);
+ const ExtractValueInst *EVI = cast<ExtractValueInst>(&I);
+ Vals.append(EVI->idx_begin(), EVI->idx_end());
+ break;
+ }
+ case Instruction::InsertValue: {
+ Code = bitc::FUNC_CODE_INST_INSERTVAL;
+ pushValueAndType(I.getOperand(0), InstID, Vals);
+ pushValueAndType(I.getOperand(1), InstID, Vals);
+ const InsertValueInst *IVI = cast<InsertValueInst>(&I);
+ Vals.append(IVI->idx_begin(), IVI->idx_end());
+ break;
+ }
+ case Instruction::Select: {
+ Code = bitc::FUNC_CODE_INST_VSELECT;
+ pushValueAndType(I.getOperand(1), InstID, Vals);
+ pushValue(I.getOperand(2), InstID, Vals);
+ pushValueAndType(I.getOperand(0), InstID, Vals);
+ uint64_t Flags = getOptimizationFlags(&I);
+ if (Flags != 0)
+ Vals.push_back(Flags);
+ break;
+ }
+ case Instruction::ExtractElement:
+ Code = bitc::FUNC_CODE_INST_EXTRACTELT;
+ pushValueAndType(I.getOperand(0), InstID, Vals);
+ pushValueAndType(I.getOperand(1), InstID, Vals);
+ break;
+ case Instruction::InsertElement:
+ Code = bitc::FUNC_CODE_INST_INSERTELT;
+ pushValueAndType(I.getOperand(0), InstID, Vals);
+ pushValue(I.getOperand(1), InstID, Vals);
+ pushValueAndType(I.getOperand(2), InstID, Vals);
+ break;
+ case Instruction::ShuffleVector:
+ Code = bitc::FUNC_CODE_INST_SHUFFLEVEC;
+ pushValueAndType(I.getOperand(0), InstID, Vals);
+ pushValue(I.getOperand(1), InstID, Vals);
+ pushValue(cast<ShuffleVectorInst>(I).getShuffleMaskForBitcode(), InstID,
+ Vals);
+ break;
+ case Instruction::ICmp:
+ case Instruction::FCmp: {
+ // compare returning Int1Ty or vector of Int1Ty
+ Code = bitc::FUNC_CODE_INST_CMP2;
+ pushValueAndType(I.getOperand(0), InstID, Vals);
+ pushValue(I.getOperand(1), InstID, Vals);
+ Vals.push_back(cast<CmpInst>(I).getPredicate());
+ uint64_t Flags = getOptimizationFlags(&I);
+ if (Flags != 0)
+ Vals.push_back(Flags);
+ break;
+ }
+
+ case Instruction::Ret:
+ {
+ Code = bitc::FUNC_CODE_INST_RET;
+ unsigned NumOperands = I.getNumOperands();
+ if (NumOperands == 0)
+ AbbrevToUse = FUNCTION_INST_RET_VOID_ABBREV;
+ else if (NumOperands == 1) {
+ if (!pushValueAndType(I.getOperand(0), InstID, Vals))
+ AbbrevToUse = FUNCTION_INST_RET_VAL_ABBREV;
+ } else {
+ for (unsigned i = 0, e = NumOperands; i != e; ++i)
+ pushValueAndType(I.getOperand(i), InstID, Vals);
+ }
+ }
+ break;
+ case Instruction::Br:
+ {
+ Code = bitc::FUNC_CODE_INST_BR;
+ const BranchInst &II = cast<BranchInst>(I);
+ Vals.push_back(VE.getValueID(II.getSuccessor(0)));
+ if (II.isConditional()) {
+ Vals.push_back(VE.getValueID(II.getSuccessor(1)));
+ pushValue(II.getCondition(), InstID, Vals);
+ }
+ }
+ break;
+ case Instruction::Switch:
+ {
+ Code = bitc::FUNC_CODE_INST_SWITCH;
+ const SwitchInst &SI = cast<SwitchInst>(I);
+ Vals.push_back(VE.getTypeID(SI.getCondition()->getType()));
+ pushValue(SI.getCondition(), InstID, Vals);
+ Vals.push_back(VE.getValueID(SI.getDefaultDest()));
+ for (auto Case : SI.cases()) {
+ Vals.push_back(VE.getValueID(Case.getCaseValue()));
+ Vals.push_back(VE.getValueID(Case.getCaseSuccessor()));
+ }
+ }
+ break;
+ case Instruction::IndirectBr:
+ Code = bitc::FUNC_CODE_INST_INDIRECTBR;
+ Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));
+ // Encode the address operand as relative, but not the basic blocks.
+ pushValue(I.getOperand(0), InstID, Vals);
+ for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i)
+ Vals.push_back(VE.getValueID(I.getOperand(i)));
+ break;
+
+ case Instruction::Invoke: {
+ const InvokeInst *II = cast<InvokeInst>(&I);
+ const Value *Callee = II->getCalledOperand();
+ FunctionType *FTy = II->getFunctionType();
+
+ if (II->hasOperandBundles())
+ writeOperandBundles(*II, InstID);
+
+ Code = bitc::FUNC_CODE_INST_INVOKE;
+
+ Vals.push_back(VE.getAttributeListID(II->getAttributes()));
+ Vals.push_back(II->getCallingConv() | 1 << 13);
+ Vals.push_back(VE.getValueID(II->getNormalDest()));
+ Vals.push_back(VE.getValueID(II->getUnwindDest()));
+ Vals.push_back(VE.getTypeID(FTy));
+ pushValueAndType(Callee, InstID, Vals);
+
+ // Emit value #'s for the fixed parameters.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
+ pushValue(I.getOperand(i), InstID, Vals); // fixed param.
+
+ // Emit type/value pairs for varargs params.
+ if (FTy->isVarArg()) {
+ for (unsigned i = FTy->getNumParams(), e = II->arg_size(); i != e; ++i)
+ pushValueAndType(I.getOperand(i), InstID, Vals); // vararg
+ }
+ break;
+ }
+ case Instruction::Resume:
+ Code = bitc::FUNC_CODE_INST_RESUME;
+ pushValueAndType(I.getOperand(0), InstID, Vals);
+ break;
+ case Instruction::CleanupRet: {
+ Code = bitc::FUNC_CODE_INST_CLEANUPRET;
+ const auto &CRI = cast<CleanupReturnInst>(I);
+ pushValue(CRI.getCleanupPad(), InstID, Vals);
+ if (CRI.hasUnwindDest())
+ Vals.push_back(VE.getValueID(CRI.getUnwindDest()));
+ break;
+ }
+ case Instruction::CatchRet: {
+ Code = bitc::FUNC_CODE_INST_CATCHRET;
+ const auto &CRI = cast<CatchReturnInst>(I);
+ pushValue(CRI.getCatchPad(), InstID, Vals);
+ Vals.push_back(VE.getValueID(CRI.getSuccessor()));
+ break;
+ }
+ case Instruction::CleanupPad:
+ case Instruction::CatchPad: {
+ const auto &FuncletPad = cast<FuncletPadInst>(I);
+ Code = isa<CatchPadInst>(FuncletPad) ? bitc::FUNC_CODE_INST_CATCHPAD
+ : bitc::FUNC_CODE_INST_CLEANUPPAD;
+ pushValue(FuncletPad.getParentPad(), InstID, Vals);
+
+ unsigned NumArgOperands = FuncletPad.getNumArgOperands();
+ Vals.push_back(NumArgOperands);
+ for (unsigned Op = 0; Op != NumArgOperands; ++Op)
+ pushValueAndType(FuncletPad.getArgOperand(Op), InstID, Vals);
+ break;
+ }
+ case Instruction::CatchSwitch: {
+ Code = bitc::FUNC_CODE_INST_CATCHSWITCH;
+ const auto &CatchSwitch = cast<CatchSwitchInst>(I);
+
+ pushValue(CatchSwitch.getParentPad(), InstID, Vals);
+
+ unsigned NumHandlers = CatchSwitch.getNumHandlers();
+ Vals.push_back(NumHandlers);
+ for (const BasicBlock *CatchPadBB : CatchSwitch.handlers())
+ Vals.push_back(VE.getValueID(CatchPadBB));
+
+ if (CatchSwitch.hasUnwindDest())
+ Vals.push_back(VE.getValueID(CatchSwitch.getUnwindDest()));
+ break;
+ }
+ case Instruction::CallBr: {
+ const CallBrInst *CBI = cast<CallBrInst>(&I);
+ const Value *Callee = CBI->getCalledOperand();
+ FunctionType *FTy = CBI->getFunctionType();
+
+ if (CBI->hasOperandBundles())
+ writeOperandBundles(*CBI, InstID);
+
+ Code = bitc::FUNC_CODE_INST_CALLBR;
+
+ Vals.push_back(VE.getAttributeListID(CBI->getAttributes()));
+
+ Vals.push_back(CBI->getCallingConv() << bitc::CALL_CCONV |
+ 1 << bitc::CALL_EXPLICIT_TYPE);
+
+ Vals.push_back(VE.getValueID(CBI->getDefaultDest()));
+ Vals.push_back(CBI->getNumIndirectDests());
+ for (unsigned i = 0, e = CBI->getNumIndirectDests(); i != e; ++i)
+ Vals.push_back(VE.getValueID(CBI->getIndirectDest(i)));
+
+ Vals.push_back(VE.getTypeID(FTy));
+ pushValueAndType(Callee, InstID, Vals);
+
+ // Emit value #'s for the fixed parameters.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
+ pushValue(I.getOperand(i), InstID, Vals); // fixed param.
+
+ // Emit type/value pairs for varargs params.
+ if (FTy->isVarArg()) {
+ for (unsigned i = FTy->getNumParams(), e = CBI->arg_size(); i != e; ++i)
+ pushValueAndType(I.getOperand(i), InstID, Vals); // vararg
+ }
+ break;
+ }
+ case Instruction::Unreachable:
+ Code = bitc::FUNC_CODE_INST_UNREACHABLE;
+ AbbrevToUse = FUNCTION_INST_UNREACHABLE_ABBREV;
+ break;
+
+ case Instruction::PHI: {
+ const PHINode &PN = cast<PHINode>(I);
+ Code = bitc::FUNC_CODE_INST_PHI;
+ // With the newer instruction encoding, forward references could give
+ // negative valued IDs. This is most common for PHIs, so we use
+ // signed VBRs.
+ SmallVector<uint64_t, 128> Vals64;
+ Vals64.push_back(VE.getTypeID(PN.getType()));
+ for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) {
+ pushValueSigned(PN.getIncomingValue(i), InstID, Vals64);
+ Vals64.push_back(VE.getValueID(PN.getIncomingBlock(i)));
+ }
+
+ uint64_t Flags = getOptimizationFlags(&I);
+ if (Flags != 0)
+ Vals64.push_back(Flags);
+
+ // Emit a Vals64 vector and exit.
+ Stream.EmitRecord(Code, Vals64, AbbrevToUse);
+ Vals64.clear();
+ return;
+ }
+
+ case Instruction::LandingPad: {
+ const LandingPadInst &LP = cast<LandingPadInst>(I);
+ Code = bitc::FUNC_CODE_INST_LANDINGPAD;
+ Vals.push_back(VE.getTypeID(LP.getType()));
+ Vals.push_back(LP.isCleanup());
+ Vals.push_back(LP.getNumClauses());
+ for (unsigned I = 0, E = LP.getNumClauses(); I != E; ++I) {
+ if (LP.isCatch(I))
+ Vals.push_back(LandingPadInst::Catch);
+ else
+ Vals.push_back(LandingPadInst::Filter);
+ pushValueAndType(LP.getClause(I), InstID, Vals);
+ }
+ break;
+ }
+
+ case Instruction::Alloca: {
+ Code = bitc::FUNC_CODE_INST_ALLOCA;
+ const AllocaInst &AI = cast<AllocaInst>(I);
+ Vals.push_back(VE.getTypeID(AI.getAllocatedType()));
+ Vals.push_back(VE.getTypeID(I.getOperand(0)->getType()));
+ Vals.push_back(VE.getValueID(I.getOperand(0))); // size.
+ using APV = AllocaPackedValues;
+ unsigned Record = 0;
+ unsigned EncodedAlign = getEncodedAlign(AI.getAlign());
+ Bitfield::set<APV::AlignLower>(
+ Record, EncodedAlign & ((1 << APV::AlignLower::Bits) - 1));
+ Bitfield::set<APV::AlignUpper>(Record,
+ EncodedAlign >> APV::AlignLower::Bits);
+ Bitfield::set<APV::UsedWithInAlloca>(Record, AI.isUsedWithInAlloca());
+ Bitfield::set<APV::ExplicitType>(Record, true);
+ Bitfield::set<APV::SwiftError>(Record, AI.isSwiftError());
+ Vals.push_back(Record);
+ break;
+ }
+
+ case Instruction::Load:
+ if (cast<LoadInst>(I).isAtomic()) {
+ Code = bitc::FUNC_CODE_INST_LOADATOMIC;
+ pushValueAndType(I.getOperand(0), InstID, Vals);
+ } else {
+ Code = bitc::FUNC_CODE_INST_LOAD;
+ if (!pushValueAndType(I.getOperand(0), InstID, Vals)) // ptr
+ AbbrevToUse = FUNCTION_INST_LOAD_ABBREV;
+ }
+ Vals.push_back(VE.getTypeID(I.getType()));
+ Vals.push_back(getEncodedAlign(cast<LoadInst>(I).getAlign()));
+ Vals.push_back(cast<LoadInst>(I).isVolatile());
+ if (cast<LoadInst>(I).isAtomic()) {
+ Vals.push_back(getEncodedOrdering(cast<LoadInst>(I).getOrdering()));
+ Vals.push_back(getEncodedSyncScopeID(cast<LoadInst>(I).getSyncScopeID()));
+ }
+ break;
+ case Instruction::Store:
+ if (cast<StoreInst>(I).isAtomic())
+ Code = bitc::FUNC_CODE_INST_STOREATOMIC;
+ else
+ Code = bitc::FUNC_CODE_INST_STORE;
+ pushValueAndType(I.getOperand(1), InstID, Vals); // ptrty + ptr
+ pushValueAndType(I.getOperand(0), InstID, Vals); // valty + val
+ Vals.push_back(getEncodedAlign(cast<StoreInst>(I).getAlign()));
+ Vals.push_back(cast<StoreInst>(I).isVolatile());
+ if (cast<StoreInst>(I).isAtomic()) {
+ Vals.push_back(getEncodedOrdering(cast<StoreInst>(I).getOrdering()));
+ Vals.push_back(
+ getEncodedSyncScopeID(cast<StoreInst>(I).getSyncScopeID()));
+ }
+ break;
+ case Instruction::AtomicCmpXchg:
+ Code = bitc::FUNC_CODE_INST_CMPXCHG;
+ pushValueAndType(I.getOperand(0), InstID, Vals); // ptrty + ptr
+ pushValueAndType(I.getOperand(1), InstID, Vals); // cmp.
+ pushValue(I.getOperand(2), InstID, Vals); // newval.
+ Vals.push_back(cast<AtomicCmpXchgInst>(I).isVolatile());
+ Vals.push_back(
+ getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getSuccessOrdering()));
+ Vals.push_back(
+ getEncodedSyncScopeID(cast<AtomicCmpXchgInst>(I).getSyncScopeID()));
+ Vals.push_back(
+ getEncodedOrdering(cast<AtomicCmpXchgInst>(I).getFailureOrdering()));
+ Vals.push_back(cast<AtomicCmpXchgInst>(I).isWeak());
+ Vals.push_back(getEncodedAlign(cast<AtomicCmpXchgInst>(I).getAlign()));
+ break;
+ case Instruction::AtomicRMW:
+ Code = bitc::FUNC_CODE_INST_ATOMICRMW;
+ pushValueAndType(I.getOperand(0), InstID, Vals); // ptrty + ptr
+ pushValueAndType(I.getOperand(1), InstID, Vals); // valty + val
+ Vals.push_back(
+ getEncodedRMWOperation(cast<AtomicRMWInst>(I).getOperation()));
+ Vals.push_back(cast<AtomicRMWInst>(I).isVolatile());
+ Vals.push_back(getEncodedOrdering(cast<AtomicRMWInst>(I).getOrdering()));
+ Vals.push_back(
+ getEncodedSyncScopeID(cast<AtomicRMWInst>(I).getSyncScopeID()));
+ Vals.push_back(getEncodedAlign(cast<AtomicRMWInst>(I).getAlign()));
+ break;
+ case Instruction::Fence:
+ Code = bitc::FUNC_CODE_INST_FENCE;
+ Vals.push_back(getEncodedOrdering(cast<FenceInst>(I).getOrdering()));
+ Vals.push_back(getEncodedSyncScopeID(cast<FenceInst>(I).getSyncScopeID()));
+ break;
+ case Instruction::Call: {
+ const CallInst &CI = cast<CallInst>(I);
+ FunctionType *FTy = CI.getFunctionType();
+
+ if (CI.hasOperandBundles())
+ writeOperandBundles(CI, InstID);
+
+ Code = bitc::FUNC_CODE_INST_CALL;
+
+ Vals.push_back(VE.getAttributeListID(CI.getAttributes()));
+
+ unsigned Flags = getOptimizationFlags(&I);
+ Vals.push_back(CI.getCallingConv() << bitc::CALL_CCONV |
+ unsigned(CI.isTailCall()) << bitc::CALL_TAIL |
+ unsigned(CI.isMustTailCall()) << bitc::CALL_MUSTTAIL |
+ 1 << bitc::CALL_EXPLICIT_TYPE |
+ unsigned(CI.isNoTailCall()) << bitc::CALL_NOTAIL |
+ unsigned(Flags != 0) << bitc::CALL_FMF);
+ if (Flags != 0)
+ Vals.push_back(Flags);
+
+ Vals.push_back(VE.getTypeID(FTy));
+ pushValueAndType(CI.getCalledOperand(), InstID, Vals); // Callee
+
+ // Emit value #'s for the fixed parameters.
+ for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
+ // Check for labels (can happen with asm labels).
+ if (FTy->getParamType(i)->isLabelTy())
+ Vals.push_back(VE.getValueID(CI.getArgOperand(i)));
+ else
+ pushValue(CI.getArgOperand(i), InstID, Vals); // fixed param.
+ }
+
+ // Emit type/value pairs for varargs params.
+ if (FTy->isVarArg()) {
+ for (unsigned i = FTy->getNumParams(), e = CI.arg_size(); i != e; ++i)
+ pushValueAndType(CI.getArgOperand(i), InstID, Vals); // varargs
+ }
+ break;
+ }
+ case Instruction::VAArg:
+ Code = bitc::FUNC_CODE_INST_VAARG;
+ Vals.push_back(VE.getTypeID(I.getOperand(0)->getType())); // valistty
+ pushValue(I.getOperand(0), InstID, Vals); // valist.
+ Vals.push_back(VE.getTypeID(I.getType())); // restype.
+ break;
+ case Instruction::Freeze:
+ Code = bitc::FUNC_CODE_INST_FREEZE;
+ pushValueAndType(I.getOperand(0), InstID, Vals);
+ break;
+ }
+
+ Stream.EmitRecord(Code, Vals, AbbrevToUse);
+ Vals.clear();
+}
+
+/// Write a GlobalValue VST to the module. The purpose of this data structure is
+/// to allow clients to efficiently find the function body.
+void ModuleBitcodeWriter::writeGlobalValueSymbolTable(
+ DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex) {
+ // Get the offset of the VST we are writing, and backpatch it into
+ // the VST forward declaration record.
+ uint64_t VSTOffset = Stream.GetCurrentBitNo();
+ // The BitcodeStartBit was the stream offset of the identification block.
+ VSTOffset -= bitcodeStartBit();
+ assert((VSTOffset & 31) == 0 && "VST block not 32-bit aligned");
+ // Note that we add 1 here because the offset is relative to one word
+ // before the start of the identification block, which was historically
+ // always the start of the regular bitcode header.
+ Stream.BackpatchWord(VSTOffsetPlaceholder, VSTOffset / 32 + 1);
+
+ Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4);
+
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_FNENTRY));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // funcoffset
+ unsigned FnEntryAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ for (const Function &F : M) {
+ uint64_t Record[2];
+
+ if (F.isDeclaration())
+ continue;
+
+ Record[0] = VE.getValueID(&F);
+
+ // Save the word offset of the function (from the start of the
+ // actual bitcode written to the stream).
+ uint64_t BitcodeIndex = FunctionToBitcodeIndex[&F] - bitcodeStartBit();
+ assert((BitcodeIndex & 31) == 0 && "function block not 32-bit aligned");
+ // Note that we add 1 here because the offset is relative to one word
+ // before the start of the identification block, which was historically
+ // always the start of the regular bitcode header.
+ Record[1] = BitcodeIndex / 32 + 1;
+
+ Stream.EmitRecord(bitc::VST_CODE_FNENTRY, Record, FnEntryAbbrev);
+ }
+
+ Stream.ExitBlock();
+}
+
+/// Emit names for arguments, instructions and basic blocks in a function.
+void ModuleBitcodeWriter::writeFunctionLevelValueSymbolTable(
+ const ValueSymbolTable &VST) {
+ if (VST.empty())
+ return;
+
+ Stream.EnterSubblock(bitc::VALUE_SYMTAB_BLOCK_ID, 4);
+
+ // FIXME: Set up the abbrev, we know how many values there are!
+ // FIXME: We know if the type names can use 7-bit ascii.
+ SmallVector<uint64_t, 64> NameVals;
+
+ for (const ValueName &Name : VST) {
+ // Figure out the encoding to use for the name.
+ StringEncoding Bits = getStringEncoding(Name.getKey());
+
+ unsigned AbbrevToUse = VST_ENTRY_8_ABBREV;
+ NameVals.push_back(VE.getValueID(Name.getValue()));
+
+ // VST_CODE_ENTRY: [valueid, namechar x N]
+ // VST_CODE_BBENTRY: [bbid, namechar x N]
+ unsigned Code;
+ if (isa<BasicBlock>(Name.getValue())) {
+ Code = bitc::VST_CODE_BBENTRY;
+ if (Bits == SE_Char6)
+ AbbrevToUse = VST_BBENTRY_6_ABBREV;
+ } else {
+ Code = bitc::VST_CODE_ENTRY;
+ if (Bits == SE_Char6)
+ AbbrevToUse = VST_ENTRY_6_ABBREV;
+ else if (Bits == SE_Fixed7)
+ AbbrevToUse = VST_ENTRY_7_ABBREV;
+ }
+
+ for (const auto P : Name.getKey())
+ NameVals.push_back((unsigned char)P);
+
+ // Emit the finished record.
+ Stream.EmitRecord(Code, NameVals, AbbrevToUse);
+ NameVals.clear();
+ }
+
+ Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeUseList(UseListOrder &&Order) {
+ assert(Order.Shuffle.size() >= 2 && "Shuffle too small");
+ unsigned Code;
+ if (isa<BasicBlock>(Order.V))
+ Code = bitc::USELIST_CODE_BB;
+ else
+ Code = bitc::USELIST_CODE_DEFAULT;
+
+ SmallVector<uint64_t, 64> Record(Order.Shuffle.begin(), Order.Shuffle.end());
+ Record.push_back(VE.getValueID(Order.V));
+ Stream.EmitRecord(Code, Record);
+}
+
+void ModuleBitcodeWriter::writeUseListBlock(const Function *F) {
+ assert(VE.shouldPreserveUseListOrder() &&
+ "Expected to be preserving use-list order");
+
+ auto hasMore = [&]() {
+ return !VE.UseListOrders.empty() && VE.UseListOrders.back().F == F;
+ };
+ if (!hasMore())
+ // Nothing to do.
+ return;
+
+ Stream.EnterSubblock(bitc::USELIST_BLOCK_ID, 3);
+ while (hasMore()) {
+ writeUseList(std::move(VE.UseListOrders.back()));
+ VE.UseListOrders.pop_back();
+ }
+ Stream.ExitBlock();
+}
+
+/// Emit a function body to the module stream.
+void ModuleBitcodeWriter::writeFunction(
+ const Function &F,
+ DenseMap<const Function *, uint64_t> &FunctionToBitcodeIndex) {
+ // Save the bitcode index of the start of this function block for recording
+ // in the VST.
+ FunctionToBitcodeIndex[&F] = Stream.GetCurrentBitNo();
+
+ Stream.EnterSubblock(bitc::FUNCTION_BLOCK_ID, 4);
+ VE.incorporateFunction(F);
+
+ SmallVector<unsigned, 64> Vals;
+
+ // Emit the number of basic blocks, so the reader can create them ahead of
+ // time.
+ Vals.push_back(VE.getBasicBlocks().size());
+ Stream.EmitRecord(bitc::FUNC_CODE_DECLAREBLOCKS, Vals);
+ Vals.clear();
+
+ // If there are function-local constants, emit them now.
+ unsigned CstStart, CstEnd;
+ VE.getFunctionConstantRange(CstStart, CstEnd);
+ writeConstants(CstStart, CstEnd, false);
+
+ // If there is function-local metadata, emit it now.
+ writeFunctionMetadata(F);
+
+ // Keep a running idea of what the instruction ID is.
+ unsigned InstID = CstEnd;
+
+ bool NeedsMetadataAttachment = F.hasMetadata();
+
+ DILocation *LastDL = nullptr;
+ // Finally, emit all the instructions, in order.
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB) {
+ writeInstruction(I, InstID, Vals);
+
+ if (!I.getType()->isVoidTy())
+ ++InstID;
+
+ // If the instruction has metadata, write a metadata attachment later.
+ NeedsMetadataAttachment |= I.hasMetadataOtherThanDebugLoc();
+
+ // If the instruction has a debug location, emit it.
+ DILocation *DL = I.getDebugLoc();
+ if (!DL)
+ continue;
+
+ if (DL == LastDL) {
+ // Just repeat the same debug loc as last time.
+ Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC_AGAIN, Vals);
+ continue;
+ }
+
+ Vals.push_back(DL->getLine());
+ Vals.push_back(DL->getColumn());
+ Vals.push_back(VE.getMetadataOrNullID(DL->getScope()));
+ Vals.push_back(VE.getMetadataOrNullID(DL->getInlinedAt()));
+ Vals.push_back(DL->isImplicitCode());
+ Stream.EmitRecord(bitc::FUNC_CODE_DEBUG_LOC, Vals);
+ Vals.clear();
+
+ LastDL = DL;
+ }
+
+ // Emit names for all the instructions etc.
+ if (auto *Symtab = F.getValueSymbolTable())
+ writeFunctionLevelValueSymbolTable(*Symtab);
+
+ if (NeedsMetadataAttachment)
+ writeFunctionMetadataAttachment(F);
+ if (VE.shouldPreserveUseListOrder())
+ writeUseListBlock(&F);
+ VE.purgeFunction();
+ Stream.ExitBlock();
+}
+
+// Emit blockinfo, which defines the standard abbreviations etc.
+void ModuleBitcodeWriter::writeBlockInfo() {
+ // We only want to emit block info records for blocks that have multiple
+ // instances: CONSTANTS_BLOCK, FUNCTION_BLOCK and VALUE_SYMTAB_BLOCK.
+ // Other blocks can define their abbrevs inline.
+ Stream.EnterBlockInfoBlock();
+
+ { // 8-bit fixed-width VST_CODE_ENTRY/VST_CODE_BBENTRY strings.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
+ if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) !=
+ VST_ENTRY_8_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+
+ { // 7-bit fixed width VST_CODE_ENTRY strings.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
+ if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) !=
+ VST_ENTRY_7_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+ { // 6-bit char6 VST_CODE_ENTRY strings.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_ENTRY));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+ if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) !=
+ VST_ENTRY_6_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+ { // 6-bit char6 VST_CODE_BBENTRY strings.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::VST_CODE_BBENTRY));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+ if (Stream.EmitBlockInfoAbbrev(bitc::VALUE_SYMTAB_BLOCK_ID, Abbv) !=
+ VST_BBENTRY_6_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+
+ { // SETTYPE abbrev for CONSTANTS_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_SETTYPE));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed,
+ VE.computeBitsRequiredForTypeIndicies()));
+ if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) !=
+ CONSTANTS_SETTYPE_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+
+ { // INTEGER abbrev for CONSTANTS_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_INTEGER));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) !=
+ CONSTANTS_INTEGER_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+
+ { // CE_CAST abbrev for CONSTANTS_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_CE_CAST));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // cast opc
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // typeid
+ VE.computeBitsRequiredForTypeIndicies()));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // value id
+
+ if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) !=
+ CONSTANTS_CE_CAST_Abbrev)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+ { // NULL abbrev for CONSTANTS_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::CST_CODE_NULL));
+ if (Stream.EmitBlockInfoAbbrev(bitc::CONSTANTS_BLOCK_ID, Abbv) !=
+ CONSTANTS_NULL_Abbrev)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+
+ // FIXME: This should only use space for first class types!
+
+ { // INST_LOAD abbrev for FUNCTION_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_LOAD));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Ptr
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty
+ VE.computeBitsRequiredForTypeIndicies()));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // Align
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // volatile
+ if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+ FUNCTION_INST_LOAD_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+ { // INST_UNOP abbrev for FUNCTION_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_UNOP));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
+ if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+ FUNCTION_INST_UNOP_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+ { // INST_UNOP_FLAGS abbrev for FUNCTION_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_UNOP));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); // flags
+ if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+ FUNCTION_INST_UNOP_FLAGS_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+ { // INST_BINOP abbrev for FUNCTION_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
+ if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+ FUNCTION_INST_BINOP_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+ { // INST_BINOP_FLAGS abbrev for FUNCTION_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_BINOP));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LHS
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // RHS
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); // flags
+ if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+ FUNCTION_INST_BINOP_FLAGS_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+ { // INST_CAST abbrev for FUNCTION_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_CAST));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // OpVal
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty
+ VE.computeBitsRequiredForTypeIndicies()));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // opc
+ if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+ FUNCTION_INST_CAST_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+
+ { // INST_RET abbrev for FUNCTION_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET));
+ if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+ FUNCTION_INST_RET_VOID_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+ { // INST_RET abbrev for FUNCTION_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_RET));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ValID
+ if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+ FUNCTION_INST_RET_VAL_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+ { // INST_UNREACHABLE abbrev for FUNCTION_BLOCK.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_UNREACHABLE));
+ if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+ FUNCTION_INST_UNREACHABLE_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+ {
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FUNC_CODE_INST_GEP));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, // dest ty
+ Log2_32_Ceil(VE.getTypes().size() + 1)));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ if (Stream.EmitBlockInfoAbbrev(bitc::FUNCTION_BLOCK_ID, Abbv) !=
+ FUNCTION_INST_GEP_ABBREV)
+ llvm_unreachable("Unexpected abbrev ordering!");
+ }
+
+ Stream.ExitBlock();
+}
+
+/// Write the module path strings, currently only used when generating
+/// a combined index file.
+void IndexBitcodeWriter::writeModStrings() {
+ Stream.EnterSubblock(bitc::MODULE_STRTAB_BLOCK_ID, 3);
+
+ // TODO: See which abbrev sizes we actually need to emit
+
+ // 8-bit fixed-width MST_ENTRY strings.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_ENTRY));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8));
+ unsigned Abbrev8Bit = Stream.EmitAbbrev(std::move(Abbv));
+
+ // 7-bit fixed width MST_ENTRY strings.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_ENTRY));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7));
+ unsigned Abbrev7Bit = Stream.EmitAbbrev(std::move(Abbv));
+
+ // 6-bit char6 MST_ENTRY strings.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_ENTRY));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+ unsigned Abbrev6Bit = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Module Hash, 160 bits SHA1. Optionally, emitted after each MST_CODE_ENTRY.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::MST_CODE_HASH));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ unsigned AbbrevHash = Stream.EmitAbbrev(std::move(Abbv));
+
+ SmallVector<unsigned, 64> Vals;
+ forEachModule(
+ [&](const StringMapEntry<std::pair<uint64_t, ModuleHash>> &MPSE) {
+ StringRef Key = MPSE.getKey();
+ const auto &Value = MPSE.getValue();
+ StringEncoding Bits = getStringEncoding(Key);
+ unsigned AbbrevToUse = Abbrev8Bit;
+ if (Bits == SE_Char6)
+ AbbrevToUse = Abbrev6Bit;
+ else if (Bits == SE_Fixed7)
+ AbbrevToUse = Abbrev7Bit;
+
+ Vals.push_back(Value.first);
+ Vals.append(Key.begin(), Key.end());
+
+ // Emit the finished record.
+ Stream.EmitRecord(bitc::MST_CODE_ENTRY, Vals, AbbrevToUse);
+
+ // Emit an optional hash for the module now
+ const auto &Hash = Value.second;
+ if (llvm::any_of(Hash, [](uint32_t H) { return H; })) {
+ Vals.assign(Hash.begin(), Hash.end());
+ // Emit the hash record.
+ Stream.EmitRecord(bitc::MST_CODE_HASH, Vals, AbbrevHash);
+ }
+
+ Vals.clear();
+ });
+ Stream.ExitBlock();
+}
+
+/// Write the function type metadata related records that need to appear before
+/// a function summary entry (whether per-module or combined).
+template <typename Fn>
+static void writeFunctionTypeMetadataRecords(BitstreamWriter &Stream,
+ FunctionSummary *FS,
+ Fn GetValueID) {
+ if (!FS->type_tests().empty())
+ Stream.EmitRecord(bitc::FS_TYPE_TESTS, FS->type_tests());
+
+ SmallVector<uint64_t, 64> Record;
+
+ auto WriteVFuncIdVec = [&](uint64_t Ty,
+ ArrayRef<FunctionSummary::VFuncId> VFs) {
+ if (VFs.empty())
+ return;
+ Record.clear();
+ for (auto &VF : VFs) {
+ Record.push_back(VF.GUID);
+ Record.push_back(VF.Offset);
+ }
+ Stream.EmitRecord(Ty, Record);
+ };
+
+ WriteVFuncIdVec(bitc::FS_TYPE_TEST_ASSUME_VCALLS,
+ FS->type_test_assume_vcalls());
+ WriteVFuncIdVec(bitc::FS_TYPE_CHECKED_LOAD_VCALLS,
+ FS->type_checked_load_vcalls());
+
+ auto WriteConstVCallVec = [&](uint64_t Ty,
+ ArrayRef<FunctionSummary::ConstVCall> VCs) {
+ for (auto &VC : VCs) {
+ Record.clear();
+ Record.push_back(VC.VFunc.GUID);
+ Record.push_back(VC.VFunc.Offset);
+ llvm::append_range(Record, VC.Args);
+ Stream.EmitRecord(Ty, Record);
+ }
+ };
+
+ WriteConstVCallVec(bitc::FS_TYPE_TEST_ASSUME_CONST_VCALL,
+ FS->type_test_assume_const_vcalls());
+ WriteConstVCallVec(bitc::FS_TYPE_CHECKED_LOAD_CONST_VCALL,
+ FS->type_checked_load_const_vcalls());
+
+ auto WriteRange = [&](ConstantRange Range) {
+ Range = Range.sextOrTrunc(FunctionSummary::ParamAccess::RangeWidth);
+ assert(Range.getLower().getNumWords() == 1);
+ assert(Range.getUpper().getNumWords() == 1);
+ emitSignedInt64(Record, *Range.getLower().getRawData());
+ emitSignedInt64(Record, *Range.getUpper().getRawData());
+ };
+
+ if (!FS->paramAccesses().empty()) {
+ Record.clear();
+ for (auto &Arg : FS->paramAccesses()) {
+ size_t UndoSize = Record.size();
+ Record.push_back(Arg.ParamNo);
+ WriteRange(Arg.Use);
+ Record.push_back(Arg.Calls.size());
+ for (auto &Call : Arg.Calls) {
+ Record.push_back(Call.ParamNo);
+ Optional<unsigned> ValueID = GetValueID(Call.Callee);
+ if (!ValueID) {
+ // If ValueID is unknown we can't drop just this call, we must drop
+ // entire parameter.
+ Record.resize(UndoSize);
+ break;
+ }
+ Record.push_back(*ValueID);
+ WriteRange(Call.Offsets);
+ }
+ }
+ if (!Record.empty())
+ Stream.EmitRecord(bitc::FS_PARAM_ACCESS, Record);
+ }
+}
+
+/// Collect type IDs from type tests used by function.
+static void
+getReferencedTypeIds(FunctionSummary *FS,
+ std::set<GlobalValue::GUID> &ReferencedTypeIds) {
+ if (!FS->type_tests().empty())
+ for (auto &TT : FS->type_tests())
+ ReferencedTypeIds.insert(TT);
+
+ auto GetReferencedTypesFromVFuncIdVec =
+ [&](ArrayRef<FunctionSummary::VFuncId> VFs) {
+ for (auto &VF : VFs)
+ ReferencedTypeIds.insert(VF.GUID);
+ };
+
+ GetReferencedTypesFromVFuncIdVec(FS->type_test_assume_vcalls());
+ GetReferencedTypesFromVFuncIdVec(FS->type_checked_load_vcalls());
+
+ auto GetReferencedTypesFromConstVCallVec =
+ [&](ArrayRef<FunctionSummary::ConstVCall> VCs) {
+ for (auto &VC : VCs)
+ ReferencedTypeIds.insert(VC.VFunc.GUID);
+ };
+
+ GetReferencedTypesFromConstVCallVec(FS->type_test_assume_const_vcalls());
+ GetReferencedTypesFromConstVCallVec(FS->type_checked_load_const_vcalls());
+}
+
+static void writeWholeProgramDevirtResolutionByArg(
+ SmallVector<uint64_t, 64> &NameVals, const std::vector<uint64_t> &args,
+ const WholeProgramDevirtResolution::ByArg &ByArg) {
+ NameVals.push_back(args.size());
+ llvm::append_range(NameVals, args);
+
+ NameVals.push_back(ByArg.TheKind);
+ NameVals.push_back(ByArg.Info);
+ NameVals.push_back(ByArg.Byte);
+ NameVals.push_back(ByArg.Bit);
+}
+
+static void writeWholeProgramDevirtResolution(
+ SmallVector<uint64_t, 64> &NameVals, StringTableBuilder &StrtabBuilder,
+ uint64_t Id, const WholeProgramDevirtResolution &Wpd) {
+ NameVals.push_back(Id);
+
+ NameVals.push_back(Wpd.TheKind);
+ NameVals.push_back(StrtabBuilder.add(Wpd.SingleImplName));
+ NameVals.push_back(Wpd.SingleImplName.size());
+
+ NameVals.push_back(Wpd.ResByArg.size());
+ for (auto &A : Wpd.ResByArg)
+ writeWholeProgramDevirtResolutionByArg(NameVals, A.first, A.second);
+}
+
+static void writeTypeIdSummaryRecord(SmallVector<uint64_t, 64> &NameVals,
+ StringTableBuilder &StrtabBuilder,
+ const std::string &Id,
+ const TypeIdSummary &Summary) {
+ NameVals.push_back(StrtabBuilder.add(Id));
+ NameVals.push_back(Id.size());
+
+ NameVals.push_back(Summary.TTRes.TheKind);
+ NameVals.push_back(Summary.TTRes.SizeM1BitWidth);
+ NameVals.push_back(Summary.TTRes.AlignLog2);
+ NameVals.push_back(Summary.TTRes.SizeM1);
+ NameVals.push_back(Summary.TTRes.BitMask);
+ NameVals.push_back(Summary.TTRes.InlineBits);
+
+ for (auto &W : Summary.WPDRes)
+ writeWholeProgramDevirtResolution(NameVals, StrtabBuilder, W.first,
+ W.second);
+}
+
+static void writeTypeIdCompatibleVtableSummaryRecord(
+ SmallVector<uint64_t, 64> &NameVals, StringTableBuilder &StrtabBuilder,
+ const std::string &Id, const TypeIdCompatibleVtableInfo &Summary,
+ ValueEnumerator &VE) {
+ NameVals.push_back(StrtabBuilder.add(Id));
+ NameVals.push_back(Id.size());
+
+ for (auto &P : Summary) {
+ NameVals.push_back(P.AddressPointOffset);
+ NameVals.push_back(VE.getValueID(P.VTableVI.getValue()));
+ }
+}
+
+// Helper to emit a single function summary record.
+void ModuleBitcodeWriterBase::writePerModuleFunctionSummaryRecord(
+ SmallVector<uint64_t, 64> &NameVals, GlobalValueSummary *Summary,
+ unsigned ValueID, unsigned FSCallsAbbrev, unsigned FSCallsProfileAbbrev,
+ const Function &F) {
+ NameVals.push_back(ValueID);
+
+ FunctionSummary *FS = cast<FunctionSummary>(Summary);
+
+ writeFunctionTypeMetadataRecords(
+ Stream, FS, [&](const ValueInfo &VI) -> Optional<unsigned> {
+ return {VE.getValueID(VI.getValue())};
+ });
+
+ auto SpecialRefCnts = FS->specialRefCounts();
+ NameVals.push_back(getEncodedGVSummaryFlags(FS->flags()));
+ NameVals.push_back(FS->instCount());
+ NameVals.push_back(getEncodedFFlags(FS->fflags()));
+ NameVals.push_back(FS->refs().size());
+ NameVals.push_back(SpecialRefCnts.first); // rorefcnt
+ NameVals.push_back(SpecialRefCnts.second); // worefcnt
+
+ for (auto &RI : FS->refs())
+ NameVals.push_back(VE.getValueID(RI.getValue()));
+
+ bool HasProfileData =
+ F.hasProfileData() || ForceSummaryEdgesCold != FunctionSummary::FSHT_None;
+ for (auto &ECI : FS->calls()) {
+ NameVals.push_back(getValueId(ECI.first));
+ if (HasProfileData)
+ NameVals.push_back(static_cast<uint8_t>(ECI.second.Hotness));
+ else if (WriteRelBFToSummary)
+ NameVals.push_back(ECI.second.RelBlockFreq);
+ }
+
+ unsigned FSAbbrev = (HasProfileData ? FSCallsProfileAbbrev : FSCallsAbbrev);
+ unsigned Code =
+ (HasProfileData ? bitc::FS_PERMODULE_PROFILE
+ : (WriteRelBFToSummary ? bitc::FS_PERMODULE_RELBF
+ : bitc::FS_PERMODULE));
+
+ // Emit the finished record.
+ Stream.EmitRecord(Code, NameVals, FSAbbrev);
+ NameVals.clear();
+}
+
+// Collect the global value references in the given variable's initializer,
+// and emit them in a summary record.
+void ModuleBitcodeWriterBase::writeModuleLevelReferences(
+ const GlobalVariable &V, SmallVector<uint64_t, 64> &NameVals,
+ unsigned FSModRefsAbbrev, unsigned FSModVTableRefsAbbrev) {
+ auto VI = Index->getValueInfo(V.getGUID());
+ if (!VI || VI.getSummaryList().empty()) {
+ // Only declarations should not have a summary (a declaration might however
+ // have a summary if the def was in module level asm).
+ assert(V.isDeclaration());
+ return;
+ }
+ auto *Summary = VI.getSummaryList()[0].get();
+ NameVals.push_back(VE.getValueID(&V));
+ GlobalVarSummary *VS = cast<GlobalVarSummary>(Summary);
+ NameVals.push_back(getEncodedGVSummaryFlags(VS->flags()));
+ NameVals.push_back(getEncodedGVarFlags(VS->varflags()));
+
+ auto VTableFuncs = VS->vTableFuncs();
+ if (!VTableFuncs.empty())
+ NameVals.push_back(VS->refs().size());
+
+ unsigned SizeBeforeRefs = NameVals.size();
+ for (auto &RI : VS->refs())
+ NameVals.push_back(VE.getValueID(RI.getValue()));
+ // Sort the refs for determinism output, the vector returned by FS->refs() has
+ // been initialized from a DenseSet.
+ llvm::sort(drop_begin(NameVals, SizeBeforeRefs));
+
+ if (VTableFuncs.empty())
+ Stream.EmitRecord(bitc::FS_PERMODULE_GLOBALVAR_INIT_REFS, NameVals,
+ FSModRefsAbbrev);
+ else {
+ // VTableFuncs pairs should already be sorted by offset.
+ for (auto &P : VTableFuncs) {
+ NameVals.push_back(VE.getValueID(P.FuncVI.getValue()));
+ NameVals.push_back(P.VTableOffset);
+ }
+
+ Stream.EmitRecord(bitc::FS_PERMODULE_VTABLE_GLOBALVAR_INIT_REFS, NameVals,
+ FSModVTableRefsAbbrev);
+ }
+ NameVals.clear();
+}
+
+/// Emit the per-module summary section alongside the rest of
+/// the module's bitcode.
+void ModuleBitcodeWriterBase::writePerModuleGlobalValueSummary() {
+ // By default we compile with ThinLTO if the module has a summary, but the
+ // client can request full LTO with a module flag.
+ bool IsThinLTO = true;
+ if (auto *MD =
+ mdconst::extract_or_null<ConstantInt>(M.getModuleFlag("ThinLTO")))
+ IsThinLTO = MD->getZExtValue();
+ Stream.EnterSubblock(IsThinLTO ? bitc::GLOBALVAL_SUMMARY_BLOCK_ID
+ : bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID,
+ 4);
+
+ Stream.EmitRecord(
+ bitc::FS_VERSION,
+ ArrayRef<uint64_t>{ModuleSummaryIndex::BitcodeSummaryVersion});
+
+ // Write the index flags.
+ uint64_t Flags = 0;
+ // Bits 1-3 are set only in the combined index, skip them.
+ if (Index->enableSplitLTOUnit())
+ Flags |= 0x8;
+ Stream.EmitRecord(bitc::FS_FLAGS, ArrayRef<uint64_t>{Flags});
+
+ if (Index->begin() == Index->end()) {
+ Stream.ExitBlock();
+ return;
+ }
+
+ for (const auto &GVI : valueIds()) {
+ Stream.EmitRecord(bitc::FS_VALUE_GUID,
+ ArrayRef<uint64_t>{GVI.second, GVI.first});
+ }
+
+ // Abbrev for FS_PERMODULE_PROFILE.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE_PROFILE));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // flags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // instcount
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // fflags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // numrefs
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // rorefcnt
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // worefcnt
+ // numrefs x valueid, n x (valueid, hotness)
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ unsigned FSCallsProfileAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for FS_PERMODULE or FS_PERMODULE_RELBF.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ if (WriteRelBFToSummary)
+ Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE_RELBF));
+ else
+ Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // flags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // instcount
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // fflags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // numrefs
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // rorefcnt
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // worefcnt
+ // numrefs x valueid, n x (valueid [, rel_block_freq])
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ unsigned FSCallsAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for FS_PERMODULE_GLOBALVAR_INIT_REFS.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE_GLOBALVAR_INIT_REFS));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // flags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); // valueids
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ unsigned FSModRefsAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for FS_PERMODULE_VTABLE_GLOBALVAR_INIT_REFS.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FS_PERMODULE_VTABLE_GLOBALVAR_INIT_REFS));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // flags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // numrefs
+ // numrefs x valueid, n x (valueid , offset)
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ unsigned FSModVTableRefsAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for FS_ALIAS.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FS_ALIAS));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // flags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+ unsigned FSAliasAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for FS_TYPE_ID_METADATA
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FS_TYPE_ID_METADATA));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // typeid strtab index
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // typeid length
+ // n x (valueid , offset)
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ unsigned TypeIdCompatibleVtableAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ SmallVector<uint64_t, 64> NameVals;
+ // Iterate over the list of functions instead of the Index to
+ // ensure the ordering is stable.
+ for (const Function &F : M) {
+ // Summary emission does not support anonymous functions, they have to
+ // renamed using the anonymous function renaming pass.
+ if (!F.hasName())
+ report_fatal_error("Unexpected anonymous function when writing summary");
+
+ ValueInfo VI = Index->getValueInfo(F.getGUID());
+ if (!VI || VI.getSummaryList().empty()) {
+ // Only declarations should not have a summary (a declaration might
+ // however have a summary if the def was in module level asm).
+ assert(F.isDeclaration());
+ continue;
+ }
+ auto *Summary = VI.getSummaryList()[0].get();
+ writePerModuleFunctionSummaryRecord(NameVals, Summary, VE.getValueID(&F),
+ FSCallsAbbrev, FSCallsProfileAbbrev, F);
+ }
+
+ // Capture references from GlobalVariable initializers, which are outside
+ // of a function scope.
+ for (const GlobalVariable &G : M.globals())
+ writeModuleLevelReferences(G, NameVals, FSModRefsAbbrev,
+ FSModVTableRefsAbbrev);
+
+ for (const GlobalAlias &A : M.aliases()) {
+ auto *Aliasee = A.getAliaseeObject();
+ if (!Aliasee->hasName())
+ // Nameless function don't have an entry in the summary, skip it.
+ continue;
+ auto AliasId = VE.getValueID(&A);
+ auto AliaseeId = VE.getValueID(Aliasee);
+ NameVals.push_back(AliasId);
+ auto *Summary = Index->getGlobalValueSummary(A);
+ AliasSummary *AS = cast<AliasSummary>(Summary);
+ NameVals.push_back(getEncodedGVSummaryFlags(AS->flags()));
+ NameVals.push_back(AliaseeId);
+ Stream.EmitRecord(bitc::FS_ALIAS, NameVals, FSAliasAbbrev);
+ NameVals.clear();
+ }
+
+ for (auto &S : Index->typeIdCompatibleVtableMap()) {
+ writeTypeIdCompatibleVtableSummaryRecord(NameVals, StrtabBuilder, S.first,
+ S.second, VE);
+ Stream.EmitRecord(bitc::FS_TYPE_ID_METADATA, NameVals,
+ TypeIdCompatibleVtableAbbrev);
+ NameVals.clear();
+ }
+
+ Stream.EmitRecord(bitc::FS_BLOCK_COUNT,
+ ArrayRef<uint64_t>{Index->getBlockCount()});
+
+ Stream.ExitBlock();
+}
+
+/// Emit the combined summary section into the combined index file.
+void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
+ Stream.EnterSubblock(bitc::GLOBALVAL_SUMMARY_BLOCK_ID, 3);
+ Stream.EmitRecord(
+ bitc::FS_VERSION,
+ ArrayRef<uint64_t>{ModuleSummaryIndex::BitcodeSummaryVersion});
+
+ // Write the index flags.
+ Stream.EmitRecord(bitc::FS_FLAGS, ArrayRef<uint64_t>{Index.getFlags()});
+
+ for (const auto &GVI : valueIds()) {
+ Stream.EmitRecord(bitc::FS_VALUE_GUID,
+ ArrayRef<uint64_t>{GVI.second, GVI.first});
+ }
+
+ // Abbrev for FS_COMBINED.
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // modid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // flags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // instcount
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // fflags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // entrycount
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // numrefs
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // rorefcnt
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // worefcnt
+ // numrefs x valueid, n x (valueid)
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ unsigned FSCallsAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for FS_COMBINED_PROFILE.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED_PROFILE));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // modid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // flags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // instcount
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // fflags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // entrycount
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // numrefs
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // rorefcnt
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 4)); // worefcnt
+ // numrefs x valueid, n x (valueid, hotness)
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ unsigned FSCallsProfileAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for FS_COMBINED_GLOBALVAR_INIT_REFS.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED_GLOBALVAR_INIT_REFS));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // modid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // flags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array)); // valueids
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8));
+ unsigned FSModRefsAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // Abbrev for FS_COMBINED_ALIAS.
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::FS_COMBINED_ALIAS));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // modid
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // flags
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // valueid
+ unsigned FSAliasAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ // The aliases are emitted as a post-pass, and will point to the value
+ // id of the aliasee. Save them in a vector for post-processing.
+ SmallVector<AliasSummary *, 64> Aliases;
+
+ // Save the value id for each summary for alias emission.
+ DenseMap<const GlobalValueSummary *, unsigned> SummaryToValueIdMap;
+
+ SmallVector<uint64_t, 64> NameVals;
+
+ // Set that will be populated during call to writeFunctionTypeMetadataRecords
+ // with the type ids referenced by this index file.
+ std::set<GlobalValue::GUID> ReferencedTypeIds;
+
+ // For local linkage, we also emit the original name separately
+ // immediately after the record.
+ auto MaybeEmitOriginalName = [&](GlobalValueSummary &S) {
+ // We don't need to emit the original name if we are writing the index for
+ // distributed backends (in which case ModuleToSummariesForIndex is
+ // non-null). The original name is only needed during the thin link, since
+ // for SamplePGO the indirect call targets for local functions have
+ // have the original name annotated in profile.
+ // Continue to emit it when writing out the entire combined index, which is
+ // used in testing the thin link via llvm-lto.
+ if (ModuleToSummariesForIndex || !GlobalValue::isLocalLinkage(S.linkage()))
+ return;
+ NameVals.push_back(S.getOriginalName());
+ Stream.EmitRecord(bitc::FS_COMBINED_ORIGINAL_NAME, NameVals);
+ NameVals.clear();
+ };
+
+ std::set<GlobalValue::GUID> DefOrUseGUIDs;
+ forEachSummary([&](GVInfo I, bool IsAliasee) {
+ GlobalValueSummary *S = I.second;
+ assert(S);
+ DefOrUseGUIDs.insert(I.first);
+ for (const ValueInfo &VI : S->refs())
+ DefOrUseGUIDs.insert(VI.getGUID());
+
+ auto ValueId = getValueId(I.first);
+ assert(ValueId);
+ SummaryToValueIdMap[S] = *ValueId;
+
+ // If this is invoked for an aliasee, we want to record the above
+ // mapping, but then not emit a summary entry (if the aliasee is
+ // to be imported, we will invoke this separately with IsAliasee=false).
+ if (IsAliasee)
+ return;
+
+ if (auto *AS = dyn_cast<AliasSummary>(S)) {
+ // Will process aliases as a post-pass because the reader wants all
+ // global to be loaded first.
+ Aliases.push_back(AS);
+ return;
+ }
+
+ if (auto *VS = dyn_cast<GlobalVarSummary>(S)) {
+ NameVals.push_back(*ValueId);
+ NameVals.push_back(Index.getModuleId(VS->modulePath()));
+ NameVals.push_back(getEncodedGVSummaryFlags(VS->flags()));
+ NameVals.push_back(getEncodedGVarFlags(VS->varflags()));
+ for (auto &RI : VS->refs()) {
+ auto RefValueId = getValueId(RI.getGUID());
+ if (!RefValueId)
+ continue;
+ NameVals.push_back(*RefValueId);
+ }
+
+ // Emit the finished record.
+ Stream.EmitRecord(bitc::FS_COMBINED_GLOBALVAR_INIT_REFS, NameVals,
+ FSModRefsAbbrev);
+ NameVals.clear();
+ MaybeEmitOriginalName(*S);
+ return;
+ }
+
+ auto GetValueId = [&](const ValueInfo &VI) -> Optional<unsigned> {
+ return getValueId(VI.getGUID());
+ };
+
+ auto *FS = cast<FunctionSummary>(S);
+ writeFunctionTypeMetadataRecords(Stream, FS, GetValueId);
+ getReferencedTypeIds(FS, ReferencedTypeIds);
+
+ NameVals.push_back(*ValueId);
+ NameVals.push_back(Index.getModuleId(FS->modulePath()));
+ NameVals.push_back(getEncodedGVSummaryFlags(FS->flags()));
+ NameVals.push_back(FS->instCount());
+ NameVals.push_back(getEncodedFFlags(FS->fflags()));
+ NameVals.push_back(FS->entryCount());
+
+ // Fill in below
+ NameVals.push_back(0); // numrefs
+ NameVals.push_back(0); // rorefcnt
+ NameVals.push_back(0); // worefcnt
+
+ unsigned Count = 0, RORefCnt = 0, WORefCnt = 0;
+ for (auto &RI : FS->refs()) {
+ auto RefValueId = getValueId(RI.getGUID());
+ if (!RefValueId)
+ continue;
+ NameVals.push_back(*RefValueId);
+ if (RI.isReadOnly())
+ RORefCnt++;
+ else if (RI.isWriteOnly())
+ WORefCnt++;
+ Count++;
+ }
+ NameVals[6] = Count;
+ NameVals[7] = RORefCnt;
+ NameVals[8] = WORefCnt;
+
+ bool HasProfileData = false;
+ for (auto &EI : FS->calls()) {
+ HasProfileData |=
+ EI.second.getHotness() != CalleeInfo::HotnessType::Unknown;
+ if (HasProfileData)
+ break;
+ }
+
+ for (auto &EI : FS->calls()) {
+ // If this GUID doesn't have a value id, it doesn't have a function
+ // summary and we don't need to record any calls to it.
+ Optional<unsigned> CallValueId = GetValueId(EI.first);
+ if (!CallValueId)
+ continue;
+ NameVals.push_back(*CallValueId);
+ if (HasProfileData)
+ NameVals.push_back(static_cast<uint8_t>(EI.second.Hotness));
+ }
+
+ unsigned FSAbbrev = (HasProfileData ? FSCallsProfileAbbrev : FSCallsAbbrev);
+ unsigned Code =
+ (HasProfileData ? bitc::FS_COMBINED_PROFILE : bitc::FS_COMBINED);
+
+ // Emit the finished record.
+ Stream.EmitRecord(Code, NameVals, FSAbbrev);
+ NameVals.clear();
+ MaybeEmitOriginalName(*S);
+ });
+
+ for (auto *AS : Aliases) {
+ auto AliasValueId = SummaryToValueIdMap[AS];
+ assert(AliasValueId);
+ NameVals.push_back(AliasValueId);
+ NameVals.push_back(Index.getModuleId(AS->modulePath()));
+ NameVals.push_back(getEncodedGVSummaryFlags(AS->flags()));
+ auto AliaseeValueId = SummaryToValueIdMap[&AS->getAliasee()];
+ assert(AliaseeValueId);
+ NameVals.push_back(AliaseeValueId);
+
+ // Emit the finished record.
+ Stream.EmitRecord(bitc::FS_COMBINED_ALIAS, NameVals, FSAliasAbbrev);
+ NameVals.clear();
+ MaybeEmitOriginalName(*AS);
+
+ if (auto *FS = dyn_cast<FunctionSummary>(&AS->getAliasee()))
+ getReferencedTypeIds(FS, ReferencedTypeIds);
+ }
+
+ if (!Index.cfiFunctionDefs().empty()) {
+ for (auto &S : Index.cfiFunctionDefs()) {
+ if (DefOrUseGUIDs.count(
+ GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(S)))) {
+ NameVals.push_back(StrtabBuilder.add(S));
+ NameVals.push_back(S.size());
+ }
+ }
+ if (!NameVals.empty()) {
+ Stream.EmitRecord(bitc::FS_CFI_FUNCTION_DEFS, NameVals);
+ NameVals.clear();
+ }
+ }
+
+ if (!Index.cfiFunctionDecls().empty()) {
+ for (auto &S : Index.cfiFunctionDecls()) {
+ if (DefOrUseGUIDs.count(
+ GlobalValue::getGUID(GlobalValue::dropLLVMManglingEscape(S)))) {
+ NameVals.push_back(StrtabBuilder.add(S));
+ NameVals.push_back(S.size());
+ }
+ }
+ if (!NameVals.empty()) {
+ Stream.EmitRecord(bitc::FS_CFI_FUNCTION_DECLS, NameVals);
+ NameVals.clear();
+ }
+ }
+
+ // Walk the GUIDs that were referenced, and write the
+ // corresponding type id records.
+ for (auto &T : ReferencedTypeIds) {
+ auto TidIter = Index.typeIds().equal_range(T);
+ for (auto It = TidIter.first; It != TidIter.second; ++It) {
+ writeTypeIdSummaryRecord(NameVals, StrtabBuilder, It->second.first,
+ It->second.second);
+ Stream.EmitRecord(bitc::FS_TYPE_ID, NameVals);
+ NameVals.clear();
+ }
+ }
+
+ Stream.EmitRecord(bitc::FS_BLOCK_COUNT,
+ ArrayRef<uint64_t>{Index.getBlockCount()});
+
+ Stream.ExitBlock();
+}
+
+/// Create the "IDENTIFICATION_BLOCK_ID" containing a single string with the
+/// current llvm version, and a record for the epoch number.
+static void writeIdentificationBlock(BitstreamWriter &Stream) {
+ Stream.EnterSubblock(bitc::IDENTIFICATION_BLOCK_ID, 5);
+
+ // Write the "user readable" string identifying the bitcode producer
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::IDENTIFICATION_CODE_STRING));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Char6));
+ auto StringAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+ writeStringRecord(Stream, bitc::IDENTIFICATION_CODE_STRING,
+ "LLVM" LLVM_VERSION_STRING, StringAbbrev);
+
+ // Write the epoch version
+ Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::IDENTIFICATION_CODE_EPOCH));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ auto EpochAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+ constexpr std::array<unsigned, 1> Vals = {{bitc::BITCODE_CURRENT_EPOCH}};
+ Stream.EmitRecord(bitc::IDENTIFICATION_CODE_EPOCH, Vals, EpochAbbrev);
+ Stream.ExitBlock();
+}
+
+void ModuleBitcodeWriter::writeModuleHash(size_t BlockStartPos) {
+ // Emit the module's hash.
+ // MODULE_CODE_HASH: [5*i32]
+ if (GenerateHash) {
+ uint32_t Vals[5];
+ Hasher.update(ArrayRef<uint8_t>((const uint8_t *)&(Buffer)[BlockStartPos],
+ Buffer.size() - BlockStartPos));
+ StringRef Hash = Hasher.result();
+ for (int Pos = 0; Pos < 20; Pos += 4) {
+ Vals[Pos / 4] = support::endian::read32be(Hash.data() + Pos);
+ }
+
+ // Emit the finished record.
+ Stream.EmitRecord(bitc::MODULE_CODE_HASH, Vals);
+
+ if (ModHash)
+ // Save the written hash value.
+ llvm::copy(Vals, std::begin(*ModHash));
+ }
+}
+
+void ModuleBitcodeWriter::write() {
+ writeIdentificationBlock(Stream);
+
+ Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3);
+ size_t BlockStartPos = Buffer.size();
+
+ writeModuleVersion();
+
+ // Emit blockinfo, which defines the standard abbreviations etc.
+ writeBlockInfo();
+
+ // Emit information describing all of the types in the module.
+ writeTypeTable();
+
+ // Emit information about attribute groups.
+ writeAttributeGroupTable();
+
+ // Emit information about parameter attributes.
+ writeAttributeTable();
+
+ writeComdats();
+
+ // Emit top-level description of module, including target triple, inline asm,
+ // descriptors for global variables, and function prototype info.
+ writeModuleInfo();
+
+ // Emit constants.
+ writeModuleConstants();
+
+ // Emit metadata kind names.
+ writeModuleMetadataKinds();
+
+ // Emit metadata.
+ writeModuleMetadata();
+
+ // Emit module-level use-lists.
+ if (VE.shouldPreserveUseListOrder())
+ writeUseListBlock(nullptr);
+
+ writeOperandBundleTags();
+ writeSyncScopeNames();
+
+ // Emit function bodies.
+ DenseMap<const Function *, uint64_t> FunctionToBitcodeIndex;
+ for (const Function &F : M)
+ if (!F.isDeclaration())
+ writeFunction(F, FunctionToBitcodeIndex);
+
+ // Need to write after the above call to WriteFunction which populates
+ // the summary information in the index.
+ if (Index)
+ writePerModuleGlobalValueSummary();
+
+ writeGlobalValueSymbolTable(FunctionToBitcodeIndex);
+
+ writeModuleHash(BlockStartPos);
+
+ Stream.ExitBlock();
+}
+
+static void writeInt32ToBuffer(uint32_t Value, SmallVectorImpl<char> &Buffer,
+ uint32_t &Position) {
+ support::endian::write32le(&Buffer[Position], Value);
+ Position += 4;
+}
+
+/// If generating a bc file on darwin, we have to emit a
+/// header and trailer to make it compatible with the system archiver. To do
+/// this we emit the following header, and then emit a trailer that pads the
+/// file out to be a multiple of 16 bytes.
+///
+/// struct bc_header {
+/// uint32_t Magic; // 0x0B17C0DE
+/// uint32_t Version; // Version, currently always 0.
+/// uint32_t BitcodeOffset; // Offset to traditional bitcode file.
+/// uint32_t BitcodeSize; // Size of traditional bitcode file.
+/// uint32_t CPUType; // CPU specifier.
+/// ... potentially more later ...
+/// };
+static void emitDarwinBCHeaderAndTrailer(SmallVectorImpl<char> &Buffer,
+ const Triple &TT) {
+ unsigned CPUType = ~0U;
+
+ // Match x86_64-*, i[3-9]86-*, powerpc-*, powerpc64-*, arm-*, thumb-*,
+ // armv[0-9]-*, thumbv[0-9]-*, armv5te-*, or armv6t2-*. The CPUType is a magic
+ // number from /usr/include/mach/machine.h. It is ok to reproduce the
+ // specific constants here because they are implicitly part of the Darwin ABI.
+ enum {
+ DARWIN_CPU_ARCH_ABI64 = 0x01000000,
+ DARWIN_CPU_TYPE_X86 = 7,
+ DARWIN_CPU_TYPE_ARM = 12,
+ DARWIN_CPU_TYPE_POWERPC = 18
+ };
+
+ Triple::ArchType Arch = TT.getArch();
+ if (Arch == Triple::x86_64)
+ CPUType = DARWIN_CPU_TYPE_X86 | DARWIN_CPU_ARCH_ABI64;
+ else if (Arch == Triple::x86)
+ CPUType = DARWIN_CPU_TYPE_X86;
+ else if (Arch == Triple::ppc)
+ CPUType = DARWIN_CPU_TYPE_POWERPC;
+ else if (Arch == Triple::ppc64)
+ CPUType = DARWIN_CPU_TYPE_POWERPC | DARWIN_CPU_ARCH_ABI64;
+ else if (Arch == Triple::arm || Arch == Triple::thumb)
+ CPUType = DARWIN_CPU_TYPE_ARM;
+
+ // Traditional Bitcode starts after header.
+ assert(Buffer.size() >= BWH_HeaderSize &&
+ "Expected header size to be reserved");
+ unsigned BCOffset = BWH_HeaderSize;
+ unsigned BCSize = Buffer.size() - BWH_HeaderSize;
+
+ // Write the magic and version.
+ unsigned Position = 0;
+ writeInt32ToBuffer(0x0B17C0DE, Buffer, Position);
+ writeInt32ToBuffer(0, Buffer, Position); // Version.
+ writeInt32ToBuffer(BCOffset, Buffer, Position);
+ writeInt32ToBuffer(BCSize, Buffer, Position);
+ writeInt32ToBuffer(CPUType, Buffer, Position);
+
+ // If the file is not a multiple of 16 bytes, insert dummy padding.
+ while (Buffer.size() & 15)
+ Buffer.push_back(0);
+}
+
+/// Helper to write the header common to all bitcode files.
+static void writeBitcodeHeader(BitstreamWriter &Stream) {
+ // Emit the file header.
+ Stream.Emit((unsigned)'B', 8);
+ Stream.Emit((unsigned)'C', 8);
+ Stream.Emit(0x0, 4);
+ Stream.Emit(0xC, 4);
+ Stream.Emit(0xE, 4);
+ Stream.Emit(0xD, 4);
+}
+
+BitcodeWriter::BitcodeWriter(SmallVectorImpl<char> &Buffer, raw_fd_stream *FS)
+ : Buffer(Buffer), Stream(new BitstreamWriter(Buffer, FS, FlushThreshold)) {
+ writeBitcodeHeader(*Stream);
+}
+
+BitcodeWriter::~BitcodeWriter() { assert(WroteStrtab); }
+
+void BitcodeWriter::writeBlob(unsigned Block, unsigned Record, StringRef Blob) {
+ Stream->EnterSubblock(Block, 3);
+
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(Record));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ auto AbbrevNo = Stream->EmitAbbrev(std::move(Abbv));
+
+ Stream->EmitRecordWithBlob(AbbrevNo, ArrayRef<uint64_t>{Record}, Blob);
+
+ Stream->ExitBlock();
+}
+
+void BitcodeWriter::writeSymtab() {
+ assert(!WroteStrtab && !WroteSymtab);
+
+ // If any module has module-level inline asm, we will require a registered asm
+ // parser for the target so that we can create an accurate symbol table for
+ // the module.
+ for (Module *M : Mods) {
+ if (M->getModuleInlineAsm().empty())
+ continue;
+
+ std::string Err;
+ const Triple TT(M->getTargetTriple());
+ const Target *T = TargetRegistry::lookupTarget(TT.str(), Err);
+ if (!T || !T->hasMCAsmParser())
+ return;
+ }
+
+ WroteSymtab = true;
+ SmallVector<char, 0> Symtab;
+ // The irsymtab::build function may be unable to create a symbol table if the
+ // module is malformed (e.g. it contains an invalid alias). Writing a symbol
+ // table is not required for correctness, but we still want to be able to
+ // write malformed modules to bitcode files, so swallow the error.
+ if (Error E = irsymtab::build(Mods, Symtab, StrtabBuilder, Alloc)) {
+ consumeError(std::move(E));
+ return;
+ }
+
+ writeBlob(bitc::SYMTAB_BLOCK_ID, bitc::SYMTAB_BLOB,
+ {Symtab.data(), Symtab.size()});
+}
+
+void BitcodeWriter::writeStrtab() {
+ assert(!WroteStrtab);
+
+ std::vector<char> Strtab;
+ StrtabBuilder.finalizeInOrder();
+ Strtab.resize(StrtabBuilder.getSize());
+ StrtabBuilder.write((uint8_t *)Strtab.data());
+
+ writeBlob(bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB,
+ {Strtab.data(), Strtab.size()});
+
+ WroteStrtab = true;
+}
+
+void BitcodeWriter::copyStrtab(StringRef Strtab) {
+ writeBlob(bitc::STRTAB_BLOCK_ID, bitc::STRTAB_BLOB, Strtab);
+ WroteStrtab = true;
+}
+
+void BitcodeWriter::writeModule(const Module &M,
+ bool ShouldPreserveUseListOrder,
+ const ModuleSummaryIndex *Index,
+ bool GenerateHash, ModuleHash *ModHash) {
+ assert(!WroteStrtab);
+
+ // The Mods vector is used by irsymtab::build, which requires non-const
+ // Modules in case it needs to materialize metadata. But the bitcode writer
+ // requires that the module is materialized, so we can cast to non-const here,
+ // after checking that it is in fact materialized.
+ assert(M.isMaterialized());
+ Mods.push_back(const_cast<Module *>(&M));
+
+ ModuleBitcodeWriter ModuleWriter(M, Buffer, StrtabBuilder, *Stream,
+ ShouldPreserveUseListOrder, Index,
+ GenerateHash, ModHash);
+ ModuleWriter.write();
+}
+
+void BitcodeWriter::writeIndex(
+ const ModuleSummaryIndex *Index,
+ const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex) {
+ IndexBitcodeWriter IndexWriter(*Stream, StrtabBuilder, *Index,
+ ModuleToSummariesForIndex);
+ IndexWriter.write();
+}
+
+/// Write the specified module to the specified output stream.
+void llvm::WriteBitcodeToFile(const Module &M, raw_ostream &Out,
+ bool ShouldPreserveUseListOrder,
+ const ModuleSummaryIndex *Index,
+ bool GenerateHash, ModuleHash *ModHash) {
+ SmallVector<char, 0> Buffer;
+ Buffer.reserve(256*1024);
+
+ // If this is darwin or another generic macho target, reserve space for the
+ // header.
+ Triple TT(M.getTargetTriple());
+ if (TT.isOSDarwin() || TT.isOSBinFormatMachO())
+ Buffer.insert(Buffer.begin(), BWH_HeaderSize, 0);
+
+ BitcodeWriter Writer(Buffer, dyn_cast<raw_fd_stream>(&Out));
+ Writer.writeModule(M, ShouldPreserveUseListOrder, Index, GenerateHash,
+ ModHash);
+ Writer.writeSymtab();
+ Writer.writeStrtab();
+
+ if (TT.isOSDarwin() || TT.isOSBinFormatMachO())
+ emitDarwinBCHeaderAndTrailer(Buffer, TT);
+
+ // Write the generated bitstream to "Out".
+ if (!Buffer.empty())
+ Out.write((char *)&Buffer.front(), Buffer.size());
+}
+
+void IndexBitcodeWriter::write() {
+ Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3);
+
+ writeModuleVersion();
+
+ // Write the module paths in the combined index.
+ writeModStrings();
+
+ // Write the summary combined index records.
+ writeCombinedGlobalValueSummary();
+
+ Stream.ExitBlock();
+}
+
+// Write the specified module summary index to the given raw output stream,
+// where it will be written in a new bitcode block. This is used when
+// writing the combined index file for ThinLTO. When writing a subset of the
+// index for a distributed backend, provide a \p ModuleToSummariesForIndex map.
+void llvm::writeIndexToFile(
+ const ModuleSummaryIndex &Index, raw_ostream &Out,
+ const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex) {
+ SmallVector<char, 0> Buffer;
+ Buffer.reserve(256 * 1024);
+
+ BitcodeWriter Writer(Buffer);
+ Writer.writeIndex(&Index, ModuleToSummariesForIndex);
+ Writer.writeStrtab();
+
+ Out.write((char *)&Buffer.front(), Buffer.size());
+}
+
+namespace {
+
+/// Class to manage the bitcode writing for a thin link bitcode file.
+class ThinLinkBitcodeWriter : public ModuleBitcodeWriterBase {
+ /// ModHash is for use in ThinLTO incremental build, generated while writing
+ /// the module bitcode file.
+ const ModuleHash *ModHash;
+
+public:
+ ThinLinkBitcodeWriter(const Module &M, StringTableBuilder &StrtabBuilder,
+ BitstreamWriter &Stream,
+ const ModuleSummaryIndex &Index,
+ const ModuleHash &ModHash)
+ : ModuleBitcodeWriterBase(M, StrtabBuilder, Stream,
+ /*ShouldPreserveUseListOrder=*/false, &Index),
+ ModHash(&ModHash) {}
+
+ void write();
+
+private:
+ void writeSimplifiedModuleInfo();
+};
+
+} // end anonymous namespace
+
+// This function writes a simpilified module info for thin link bitcode file.
+// It only contains the source file name along with the name(the offset and
+// size in strtab) and linkage for global values. For the global value info
+// entry, in order to keep linkage at offset 5, there are three zeros used
+// as padding.
+void ThinLinkBitcodeWriter::writeSimplifiedModuleInfo() {
+ SmallVector<unsigned, 64> Vals;
+ // Emit the module's source file name.
+ {
+ StringEncoding Bits = getStringEncoding(M.getSourceFileName());
+ BitCodeAbbrevOp AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8);
+ if (Bits == SE_Char6)
+ AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Char6);
+ else if (Bits == SE_Fixed7)
+ AbbrevOpToUse = BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 7);
+
+ // MODULE_CODE_SOURCE_FILENAME: [namechar x N]
+ auto Abbv = std::make_shared<BitCodeAbbrev>();
+ Abbv->Add(BitCodeAbbrevOp(bitc::MODULE_CODE_SOURCE_FILENAME));
+ Abbv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abbv->Add(AbbrevOpToUse);
+ unsigned FilenameAbbrev = Stream.EmitAbbrev(std::move(Abbv));
+
+ for (const auto P : M.getSourceFileName())
+ Vals.push_back((unsigned char)P);
+
+ Stream.EmitRecord(bitc::MODULE_CODE_SOURCE_FILENAME, Vals, FilenameAbbrev);
+ Vals.clear();
+ }
+
+ // Emit the global variable information.
+ for (const GlobalVariable &GV : M.globals()) {
+ // GLOBALVAR: [strtab offset, strtab size, 0, 0, 0, linkage]
+ Vals.push_back(StrtabBuilder.add(GV.getName()));
+ Vals.push_back(GV.getName().size());
+ Vals.push_back(0);
+ Vals.push_back(0);
+ Vals.push_back(0);
+ Vals.push_back(getEncodedLinkage(GV));
+
+ Stream.EmitRecord(bitc::MODULE_CODE_GLOBALVAR, Vals);
+ Vals.clear();
+ }
+
+ // Emit the function proto information.
+ for (const Function &F : M) {
+ // FUNCTION: [strtab offset, strtab size, 0, 0, 0, linkage]
+ Vals.push_back(StrtabBuilder.add(F.getName()));
+ Vals.push_back(F.getName().size());
+ Vals.push_back(0);
+ Vals.push_back(0);
+ Vals.push_back(0);
+ Vals.push_back(getEncodedLinkage(F));
+
+ Stream.EmitRecord(bitc::MODULE_CODE_FUNCTION, Vals);
+ Vals.clear();
+ }
+
+ // Emit the alias information.
+ for (const GlobalAlias &A : M.aliases()) {
+ // ALIAS: [strtab offset, strtab size, 0, 0, 0, linkage]
+ Vals.push_back(StrtabBuilder.add(A.getName()));
+ Vals.push_back(A.getName().size());
+ Vals.push_back(0);
+ Vals.push_back(0);
+ Vals.push_back(0);
+ Vals.push_back(getEncodedLinkage(A));
+
+ Stream.EmitRecord(bitc::MODULE_CODE_ALIAS, Vals);
+ Vals.clear();
+ }
+
+ // Emit the ifunc information.
+ for (const GlobalIFunc &I : M.ifuncs()) {
+ // IFUNC: [strtab offset, strtab size, 0, 0, 0, linkage]
+ Vals.push_back(StrtabBuilder.add(I.getName()));
+ Vals.push_back(I.getName().size());
+ Vals.push_back(0);
+ Vals.push_back(0);
+ Vals.push_back(0);
+ Vals.push_back(getEncodedLinkage(I));
+
+ Stream.EmitRecord(bitc::MODULE_CODE_IFUNC, Vals);
+ Vals.clear();
+ }
+}
+
+void ThinLinkBitcodeWriter::write() {
+ Stream.EnterSubblock(bitc::MODULE_BLOCK_ID, 3);
+
+ writeModuleVersion();
+
+ writeSimplifiedModuleInfo();
+
+ writePerModuleGlobalValueSummary();
+
+ // Write module hash.
+ Stream.EmitRecord(bitc::MODULE_CODE_HASH, ArrayRef<uint32_t>(*ModHash));
+
+ Stream.ExitBlock();
+}
+
+void BitcodeWriter::writeThinLinkBitcode(const Module &M,
+ const ModuleSummaryIndex &Index,
+ const ModuleHash &ModHash) {
+ assert(!WroteStrtab);
+
+ // The Mods vector is used by irsymtab::build, which requires non-const
+ // Modules in case it needs to materialize metadata. But the bitcode writer
+ // requires that the module is materialized, so we can cast to non-const here,
+ // after checking that it is in fact materialized.
+ assert(M.isMaterialized());
+ Mods.push_back(const_cast<Module *>(&M));
+
+ ThinLinkBitcodeWriter ThinLinkWriter(M, StrtabBuilder, *Stream, Index,
+ ModHash);
+ ThinLinkWriter.write();
+}
+
+// Write the specified thin link bitcode file to the given raw output stream,
+// where it will be written in a new bitcode block. This is used when
+// writing the per-module index file for ThinLTO.
+void llvm::writeThinLinkBitcodeToFile(const Module &M, raw_ostream &Out,
+ const ModuleSummaryIndex &Index,
+ const ModuleHash &ModHash) {
+ SmallVector<char, 0> Buffer;
+ Buffer.reserve(256 * 1024);
+
+ BitcodeWriter Writer(Buffer);
+ Writer.writeThinLinkBitcode(M, Index, ModHash);
+ Writer.writeSymtab();
+ Writer.writeStrtab();
+
+ Out.write((char *)&Buffer.front(), Buffer.size());
+}
+
+static const char *getSectionNameForBitcode(const Triple &T) {
+ switch (T.getObjectFormat()) {
+ case Triple::MachO:
+ return "__LLVM,__bitcode";
+ case Triple::COFF:
+ case Triple::ELF:
+ case Triple::Wasm:
+ case Triple::UnknownObjectFormat:
+ return ".llvmbc";
+ case Triple::GOFF:
+ llvm_unreachable("GOFF is not yet implemented");
+ break;
+ case Triple::XCOFF:
+ llvm_unreachable("XCOFF is not yet implemented");
+ break;
+ }
+ llvm_unreachable("Unimplemented ObjectFormatType");
+}
+
+static const char *getSectionNameForCommandline(const Triple &T) {
+ switch (T.getObjectFormat()) {
+ case Triple::MachO:
+ return "__LLVM,__cmdline";
+ case Triple::COFF:
+ case Triple::ELF:
+ case Triple::Wasm:
+ case Triple::UnknownObjectFormat:
+ return ".llvmcmd";
+ case Triple::GOFF:
+ llvm_unreachable("GOFF is not yet implemented");
+ break;
+ case Triple::XCOFF:
+ llvm_unreachable("XCOFF is not yet implemented");
+ break;
+ }
+ llvm_unreachable("Unimplemented ObjectFormatType");
+}
+
+void llvm::embedBitcodeInModule(llvm::Module &M, llvm::MemoryBufferRef Buf,
+ bool EmbedBitcode, bool EmbedCmdline,
+ const std::vector<uint8_t> &CmdArgs) {
+ // Save llvm.compiler.used and remove it.
+ SmallVector<Constant *, 2> UsedArray;
+ SmallVector<GlobalValue *, 4> UsedGlobals;
+ Type *UsedElementType = Type::getInt8Ty(M.getContext())->getPointerTo(0);
+ GlobalVariable *Used = collectUsedGlobalVariables(M, UsedGlobals, true);
+ for (auto *GV : UsedGlobals) {
+ if (GV->getName() != "llvm.embedded.module" &&
+ GV->getName() != "llvm.cmdline")
+ UsedArray.push_back(
+ ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, UsedElementType));
+ }
+ if (Used)
+ Used->eraseFromParent();
+
+ // Embed the bitcode for the llvm module.
+ std::string Data;
+ ArrayRef<uint8_t> ModuleData;
+ Triple T(M.getTargetTriple());
+
+ if (EmbedBitcode) {
+ if (Buf.getBufferSize() == 0 ||
+ !isBitcode((const unsigned char *)Buf.getBufferStart(),
+ (const unsigned char *)Buf.getBufferEnd())) {
+ // If the input is LLVM Assembly, bitcode is produced by serializing
+ // the module. Use-lists order need to be preserved in this case.
+ llvm::raw_string_ostream OS(Data);
+ llvm::WriteBitcodeToFile(M, OS, /* ShouldPreserveUseListOrder */ true);
+ ModuleData =
+ ArrayRef<uint8_t>((const uint8_t *)OS.str().data(), OS.str().size());
+ } else
+ // If the input is LLVM bitcode, write the input byte stream directly.
+ ModuleData = ArrayRef<uint8_t>((const uint8_t *)Buf.getBufferStart(),
+ Buf.getBufferSize());
+ }
+ llvm::Constant *ModuleConstant =
+ llvm::ConstantDataArray::get(M.getContext(), ModuleData);
+ llvm::GlobalVariable *GV = new llvm::GlobalVariable(
+ M, ModuleConstant->getType(), true, llvm::GlobalValue::PrivateLinkage,
+ ModuleConstant);
+ GV->setSection(getSectionNameForBitcode(T));
+ // Set alignment to 1 to prevent padding between two contributions from input
+ // sections after linking.
+ GV->setAlignment(Align(1));
+ UsedArray.push_back(
+ ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, UsedElementType));
+ if (llvm::GlobalVariable *Old =
+ M.getGlobalVariable("llvm.embedded.module", true)) {
+ assert(Old->hasOneUse() &&
+ "llvm.embedded.module can only be used once in llvm.compiler.used");
+ GV->takeName(Old);
+ Old->eraseFromParent();
+ } else {
+ GV->setName("llvm.embedded.module");
+ }
+
+ // Skip if only bitcode needs to be embedded.
+ if (EmbedCmdline) {
+ // Embed command-line options.
+ ArrayRef<uint8_t> CmdData(const_cast<uint8_t *>(CmdArgs.data()),
+ CmdArgs.size());
+ llvm::Constant *CmdConstant =
+ llvm::ConstantDataArray::get(M.getContext(), CmdData);
+ GV = new llvm::GlobalVariable(M, CmdConstant->getType(), true,
+ llvm::GlobalValue::PrivateLinkage,
+ CmdConstant);
+ GV->setSection(getSectionNameForCommandline(T));
+ GV->setAlignment(Align(1));
+ UsedArray.push_back(
+ ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, UsedElementType));
+ if (llvm::GlobalVariable *Old = M.getGlobalVariable("llvm.cmdline", true)) {
+ assert(Old->hasOneUse() &&
+ "llvm.cmdline can only be used once in llvm.compiler.used");
+ GV->takeName(Old);
+ Old->eraseFromParent();
+ } else {
+ GV->setName("llvm.cmdline");
+ }
+ }
+
+ if (UsedArray.empty())
+ return;
+
+ // Recreate llvm.compiler.used.
+ ArrayType *ATy = ArrayType::get(UsedElementType, UsedArray.size());
+ auto *NewUsed = new GlobalVariable(
+ M, ATy, false, llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(ATy, UsedArray), "llvm.compiler.used");
+ NewUsed->setSection("llvm.metadata");
+}
diff --git a/contrib/libs/llvm14/lib/Bitcode/Writer/BitcodeWriterPass.cpp b/contrib/libs/llvm14/lib/Bitcode/Writer/BitcodeWriterPass.cpp
new file mode 100644
index 0000000000..d884415aaf
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Writer/BitcodeWriterPass.cpp
@@ -0,0 +1,86 @@
+//===- BitcodeWriterPass.cpp - Bitcode writing pass -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// BitcodeWriterPass implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Bitcode/BitcodeWriterPass.h"
+#include "llvm/Analysis/ModuleSummaryAnalysis.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+using namespace llvm;
+
+PreservedAnalyses BitcodeWriterPass::run(Module &M, ModuleAnalysisManager &AM) {
+ const ModuleSummaryIndex *Index =
+ EmitSummaryIndex ? &(AM.getResult<ModuleSummaryIndexAnalysis>(M))
+ : nullptr;
+ WriteBitcodeToFile(M, OS, ShouldPreserveUseListOrder, Index, EmitModuleHash);
+ return PreservedAnalyses::all();
+}
+
+namespace {
+ class WriteBitcodePass : public ModulePass {
+ raw_ostream &OS; // raw_ostream to print on
+ bool ShouldPreserveUseListOrder;
+ bool EmitSummaryIndex;
+ bool EmitModuleHash;
+
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ WriteBitcodePass() : ModulePass(ID), OS(dbgs()) {
+ initializeWriteBitcodePassPass(*PassRegistry::getPassRegistry());
+ }
+
+ explicit WriteBitcodePass(raw_ostream &o, bool ShouldPreserveUseListOrder,
+ bool EmitSummaryIndex, bool EmitModuleHash)
+ : ModulePass(ID), OS(o),
+ ShouldPreserveUseListOrder(ShouldPreserveUseListOrder),
+ EmitSummaryIndex(EmitSummaryIndex), EmitModuleHash(EmitModuleHash) {
+ initializeWriteBitcodePassPass(*PassRegistry::getPassRegistry());
+ }
+
+ StringRef getPassName() const override { return "Bitcode Writer"; }
+
+ bool runOnModule(Module &M) override {
+ const ModuleSummaryIndex *Index =
+ EmitSummaryIndex
+ ? &(getAnalysis<ModuleSummaryIndexWrapperPass>().getIndex())
+ : nullptr;
+ WriteBitcodeToFile(M, OS, ShouldPreserveUseListOrder, Index,
+ EmitModuleHash);
+ return false;
+ }
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.setPreservesAll();
+ if (EmitSummaryIndex)
+ AU.addRequired<ModuleSummaryIndexWrapperPass>();
+ }
+ };
+}
+
+char WriteBitcodePass::ID = 0;
+INITIALIZE_PASS_BEGIN(WriteBitcodePass, "write-bitcode", "Write Bitcode", false,
+ true)
+INITIALIZE_PASS_DEPENDENCY(ModuleSummaryIndexWrapperPass)
+INITIALIZE_PASS_END(WriteBitcodePass, "write-bitcode", "Write Bitcode", false,
+ true)
+
+ModulePass *llvm::createBitcodeWriterPass(raw_ostream &Str,
+ bool ShouldPreserveUseListOrder,
+ bool EmitSummaryIndex, bool EmitModuleHash) {
+ return new WriteBitcodePass(Str, ShouldPreserveUseListOrder,
+ EmitSummaryIndex, EmitModuleHash);
+}
+
+bool llvm::isBitcodeWriterPass(Pass *P) {
+ return P->getPassID() == (llvm::AnalysisID)&WriteBitcodePass::ID;
+}
diff --git a/contrib/libs/llvm14/lib/Bitcode/Writer/ValueEnumerator.cpp b/contrib/libs/llvm14/lib/Bitcode/Writer/ValueEnumerator.cpp
new file mode 100644
index 0000000000..01f7e85bd6
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Writer/ValueEnumerator.cpp
@@ -0,0 +1,1181 @@
+//===- ValueEnumerator.cpp - Number values and types for bitcode writer ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ValueEnumerator class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ValueEnumerator.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/IR/Argument.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Constant.h"
+#include "llvm/IR/DebugInfoMetadata.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/GlobalAlias.h"
+#include "llvm/IR/GlobalIFunc.h"
+#include "llvm/IR/GlobalObject.h"
+#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/GlobalVariable.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+#include "llvm/IR/Operator.h"
+#include "llvm/IR/Type.h"
+#include "llvm/IR/Use.h"
+#include "llvm/IR/User.h"
+#include "llvm/IR/Value.h"
+#include "llvm/IR/ValueSymbolTable.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstddef>
+#include <iterator>
+#include <tuple>
+
+using namespace llvm;
+
+namespace {
+
+struct OrderMap {
+ DenseMap<const Value *, std::pair<unsigned, bool>> IDs;
+ unsigned LastGlobalConstantID = 0;
+ unsigned LastGlobalValueID = 0;
+
+ OrderMap() = default;
+
+ bool isGlobalConstant(unsigned ID) const {
+ return ID <= LastGlobalConstantID;
+ }
+
+ bool isGlobalValue(unsigned ID) const {
+ return ID <= LastGlobalValueID && !isGlobalConstant(ID);
+ }
+
+ unsigned size() const { return IDs.size(); }
+ std::pair<unsigned, bool> &operator[](const Value *V) { return IDs[V]; }
+
+ std::pair<unsigned, bool> lookup(const Value *V) const {
+ return IDs.lookup(V);
+ }
+
+ void index(const Value *V) {
+ // Explicitly sequence get-size and insert-value operations to avoid UB.
+ unsigned ID = IDs.size() + 1;
+ IDs[V].first = ID;
+ }
+};
+
+} // end anonymous namespace
+
+static void orderValue(const Value *V, OrderMap &OM) {
+ if (OM.lookup(V).first)
+ return;
+
+ if (const Constant *C = dyn_cast<Constant>(V)) {
+ if (C->getNumOperands() && !isa<GlobalValue>(C)) {
+ for (const Value *Op : C->operands())
+ if (!isa<BasicBlock>(Op) && !isa<GlobalValue>(Op))
+ orderValue(Op, OM);
+ if (auto *CE = dyn_cast<ConstantExpr>(C))
+ if (CE->getOpcode() == Instruction::ShuffleVector)
+ orderValue(CE->getShuffleMaskForBitcode(), OM);
+ }
+ }
+
+ // Note: we cannot cache this lookup above, since inserting into the map
+ // changes the map's size, and thus affects the other IDs.
+ OM.index(V);
+}
+
+static OrderMap orderModule(const Module &M) {
+ // This needs to match the order used by ValueEnumerator::ValueEnumerator()
+ // and ValueEnumerator::incorporateFunction().
+ OrderMap OM;
+
+ // In the reader, initializers of GlobalValues are set *after* all the
+ // globals have been read. Rather than awkwardly modeling this behaviour
+ // directly in predictValueUseListOrderImpl(), just assign IDs to
+ // initializers of GlobalValues before GlobalValues themselves to model this
+ // implicitly.
+ for (const GlobalVariable &G : M.globals())
+ if (G.hasInitializer())
+ if (!isa<GlobalValue>(G.getInitializer()))
+ orderValue(G.getInitializer(), OM);
+ for (const GlobalAlias &A : M.aliases())
+ if (!isa<GlobalValue>(A.getAliasee()))
+ orderValue(A.getAliasee(), OM);
+ for (const GlobalIFunc &I : M.ifuncs())
+ if (!isa<GlobalValue>(I.getResolver()))
+ orderValue(I.getResolver(), OM);
+ for (const Function &F : M) {
+ for (const Use &U : F.operands())
+ if (!isa<GlobalValue>(U.get()))
+ orderValue(U.get(), OM);
+ }
+
+ // As constants used in metadata operands are emitted as module-level
+ // constants, we must order them before other operands. Also, we must order
+ // these before global values, as these will be read before setting the
+ // global values' initializers. The latter matters for constants which have
+ // uses towards other constants that are used as initializers.
+ auto orderConstantValue = [&OM](const Value *V) {
+ if ((isa<Constant>(V) && !isa<GlobalValue>(V)) || isa<InlineAsm>(V))
+ orderValue(V, OM);
+ };
+ for (const Function &F : M) {
+ if (F.isDeclaration())
+ continue;
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ for (const Value *V : I.operands()) {
+ if (const auto *MAV = dyn_cast<MetadataAsValue>(V)) {
+ if (const auto *VAM =
+ dyn_cast<ValueAsMetadata>(MAV->getMetadata())) {
+ orderConstantValue(VAM->getValue());
+ } else if (const auto *AL =
+ dyn_cast<DIArgList>(MAV->getMetadata())) {
+ for (const auto *VAM : AL->getArgs())
+ orderConstantValue(VAM->getValue());
+ }
+ }
+ }
+ }
+ OM.LastGlobalConstantID = OM.size();
+
+ // Initializers of GlobalValues are processed in
+ // BitcodeReader::ResolveGlobalAndAliasInits(). Match the order there rather
+ // than ValueEnumerator, and match the code in predictValueUseListOrderImpl()
+ // by giving IDs in reverse order.
+ //
+ // Since GlobalValues never reference each other directly (just through
+ // initializers), their relative IDs only matter for determining order of
+ // uses in their initializers.
+ for (const Function &F : M)
+ orderValue(&F, OM);
+ for (const GlobalAlias &A : M.aliases())
+ orderValue(&A, OM);
+ for (const GlobalIFunc &I : M.ifuncs())
+ orderValue(&I, OM);
+ for (const GlobalVariable &G : M.globals())
+ orderValue(&G, OM);
+ OM.LastGlobalValueID = OM.size();
+
+ for (const Function &F : M) {
+ if (F.isDeclaration())
+ continue;
+ // Here we need to match the union of ValueEnumerator::incorporateFunction()
+ // and WriteFunction(). Basic blocks are implicitly declared before
+ // anything else (by declaring their size).
+ for (const BasicBlock &BB : F)
+ orderValue(&BB, OM);
+ for (const Argument &A : F.args())
+ orderValue(&A, OM);
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB) {
+ for (const Value *Op : I.operands())
+ if ((isa<Constant>(*Op) && !isa<GlobalValue>(*Op)) ||
+ isa<InlineAsm>(*Op))
+ orderValue(Op, OM);
+ if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
+ orderValue(SVI->getShuffleMaskForBitcode(), OM);
+ }
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ orderValue(&I, OM);
+ }
+ return OM;
+}
+
+static void predictValueUseListOrderImpl(const Value *V, const Function *F,
+ unsigned ID, const OrderMap &OM,
+ UseListOrderStack &Stack) {
+ // Predict use-list order for this one.
+ using Entry = std::pair<const Use *, unsigned>;
+ SmallVector<Entry, 64> List;
+ for (const Use &U : V->uses())
+ // Check if this user will be serialized.
+ if (OM.lookup(U.getUser()).first)
+ List.push_back(std::make_pair(&U, List.size()));
+
+ if (List.size() < 2)
+ // We may have lost some users.
+ return;
+
+ bool IsGlobalValue = OM.isGlobalValue(ID);
+ llvm::sort(List, [&](const Entry &L, const Entry &R) {
+ const Use *LU = L.first;
+ const Use *RU = R.first;
+ if (LU == RU)
+ return false;
+
+ auto LID = OM.lookup(LU->getUser()).first;
+ auto RID = OM.lookup(RU->getUser()).first;
+
+ // Global values are processed in reverse order.
+ //
+ // Moreover, initializers of GlobalValues are set *after* all the globals
+ // have been read (despite having earlier IDs). Rather than awkwardly
+ // modeling this behaviour here, orderModule() has assigned IDs to
+ // initializers of GlobalValues before GlobalValues themselves.
+ if (OM.isGlobalValue(LID) && OM.isGlobalValue(RID)) {
+ if (LID == RID)
+ return LU->getOperandNo() > RU->getOperandNo();
+ return LID < RID;
+ }
+
+ // If ID is 4, then expect: 7 6 5 1 2 3.
+ if (LID < RID) {
+ if (RID <= ID)
+ if (!IsGlobalValue) // GlobalValue uses don't get reversed.
+ return true;
+ return false;
+ }
+ if (RID < LID) {
+ if (LID <= ID)
+ if (!IsGlobalValue) // GlobalValue uses don't get reversed.
+ return false;
+ return true;
+ }
+
+ // LID and RID are equal, so we have different operands of the same user.
+ // Assume operands are added in order for all instructions.
+ if (LID <= ID)
+ if (!IsGlobalValue) // GlobalValue uses don't get reversed.
+ return LU->getOperandNo() < RU->getOperandNo();
+ return LU->getOperandNo() > RU->getOperandNo();
+ });
+
+ if (llvm::is_sorted(List, [](const Entry &L, const Entry &R) {
+ return L.second < R.second;
+ }))
+ // Order is already correct.
+ return;
+
+ // Store the shuffle.
+ Stack.emplace_back(V, F, List.size());
+ assert(List.size() == Stack.back().Shuffle.size() && "Wrong size");
+ for (size_t I = 0, E = List.size(); I != E; ++I)
+ Stack.back().Shuffle[I] = List[I].second;
+}
+
+static void predictValueUseListOrder(const Value *V, const Function *F,
+ OrderMap &OM, UseListOrderStack &Stack) {
+ auto &IDPair = OM[V];
+ assert(IDPair.first && "Unmapped value");
+ if (IDPair.second)
+ // Already predicted.
+ return;
+
+ // Do the actual prediction.
+ IDPair.second = true;
+ if (!V->use_empty() && std::next(V->use_begin()) != V->use_end())
+ predictValueUseListOrderImpl(V, F, IDPair.first, OM, Stack);
+
+ // Recursive descent into constants.
+ if (const Constant *C = dyn_cast<Constant>(V)) {
+ if (C->getNumOperands()) { // Visit GlobalValues.
+ for (const Value *Op : C->operands())
+ if (isa<Constant>(Op)) // Visit GlobalValues.
+ predictValueUseListOrder(Op, F, OM, Stack);
+ if (auto *CE = dyn_cast<ConstantExpr>(C))
+ if (CE->getOpcode() == Instruction::ShuffleVector)
+ predictValueUseListOrder(CE->getShuffleMaskForBitcode(), F, OM,
+ Stack);
+ }
+ }
+}
+
+static UseListOrderStack predictUseListOrder(const Module &M) {
+ OrderMap OM = orderModule(M);
+
+ // Use-list orders need to be serialized after all the users have been added
+ // to a value, or else the shuffles will be incomplete. Store them per
+ // function in a stack.
+ //
+ // Aside from function order, the order of values doesn't matter much here.
+ UseListOrderStack Stack;
+
+ // We want to visit the functions backward now so we can list function-local
+ // constants in the last Function they're used in. Module-level constants
+ // have already been visited above.
+ for (const Function &F : llvm::reverse(M)) {
+ if (F.isDeclaration())
+ continue;
+ for (const BasicBlock &BB : F)
+ predictValueUseListOrder(&BB, &F, OM, Stack);
+ for (const Argument &A : F.args())
+ predictValueUseListOrder(&A, &F, OM, Stack);
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB) {
+ for (const Value *Op : I.operands())
+ if (isa<Constant>(*Op) || isa<InlineAsm>(*Op)) // Visit GlobalValues.
+ predictValueUseListOrder(Op, &F, OM, Stack);
+ if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
+ predictValueUseListOrder(SVI->getShuffleMaskForBitcode(), &F, OM,
+ Stack);
+ }
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB)
+ predictValueUseListOrder(&I, &F, OM, Stack);
+ }
+
+ // Visit globals last, since the module-level use-list block will be seen
+ // before the function bodies are processed.
+ for (const GlobalVariable &G : M.globals())
+ predictValueUseListOrder(&G, nullptr, OM, Stack);
+ for (const Function &F : M)
+ predictValueUseListOrder(&F, nullptr, OM, Stack);
+ for (const GlobalAlias &A : M.aliases())
+ predictValueUseListOrder(&A, nullptr, OM, Stack);
+ for (const GlobalIFunc &I : M.ifuncs())
+ predictValueUseListOrder(&I, nullptr, OM, Stack);
+ for (const GlobalVariable &G : M.globals())
+ if (G.hasInitializer())
+ predictValueUseListOrder(G.getInitializer(), nullptr, OM, Stack);
+ for (const GlobalAlias &A : M.aliases())
+ predictValueUseListOrder(A.getAliasee(), nullptr, OM, Stack);
+ for (const GlobalIFunc &I : M.ifuncs())
+ predictValueUseListOrder(I.getResolver(), nullptr, OM, Stack);
+ for (const Function &F : M) {
+ for (const Use &U : F.operands())
+ predictValueUseListOrder(U.get(), nullptr, OM, Stack);
+ }
+
+ return Stack;
+}
+
+static bool isIntOrIntVectorValue(const std::pair<const Value*, unsigned> &V) {
+ return V.first->getType()->isIntOrIntVectorTy();
+}
+
+ValueEnumerator::ValueEnumerator(const Module &M,
+ bool ShouldPreserveUseListOrder)
+ : ShouldPreserveUseListOrder(ShouldPreserveUseListOrder) {
+ if (ShouldPreserveUseListOrder)
+ UseListOrders = predictUseListOrder(M);
+
+ // Enumerate the global variables.
+ for (const GlobalVariable &GV : M.globals()) {
+ EnumerateValue(&GV);
+ EnumerateType(GV.getValueType());
+ }
+
+ // Enumerate the functions.
+ for (const Function & F : M) {
+ EnumerateValue(&F);
+ EnumerateType(F.getValueType());
+ EnumerateAttributes(F.getAttributes());
+ }
+
+ // Enumerate the aliases.
+ for (const GlobalAlias &GA : M.aliases()) {
+ EnumerateValue(&GA);
+ EnumerateType(GA.getValueType());
+ }
+
+ // Enumerate the ifuncs.
+ for (const GlobalIFunc &GIF : M.ifuncs()) {
+ EnumerateValue(&GIF);
+ EnumerateType(GIF.getValueType());
+ }
+
+ // Remember what is the cutoff between globalvalue's and other constants.
+ unsigned FirstConstant = Values.size();
+
+ // Enumerate the global variable initializers and attributes.
+ for (const GlobalVariable &GV : M.globals()) {
+ if (GV.hasInitializer())
+ EnumerateValue(GV.getInitializer());
+ if (GV.hasAttributes())
+ EnumerateAttributes(GV.getAttributesAsList(AttributeList::FunctionIndex));
+ }
+
+ // Enumerate the aliasees.
+ for (const GlobalAlias &GA : M.aliases())
+ EnumerateValue(GA.getAliasee());
+
+ // Enumerate the ifunc resolvers.
+ for (const GlobalIFunc &GIF : M.ifuncs())
+ EnumerateValue(GIF.getResolver());
+
+ // Enumerate any optional Function data.
+ for (const Function &F : M)
+ for (const Use &U : F.operands())
+ EnumerateValue(U.get());
+
+ // Enumerate the metadata type.
+ //
+ // TODO: Move this to ValueEnumerator::EnumerateOperandType() once bitcode
+ // only encodes the metadata type when it's used as a value.
+ EnumerateType(Type::getMetadataTy(M.getContext()));
+
+ // Insert constants and metadata that are named at module level into the slot
+ // pool so that the module symbol table can refer to them...
+ EnumerateValueSymbolTable(M.getValueSymbolTable());
+ EnumerateNamedMetadata(M);
+
+ SmallVector<std::pair<unsigned, MDNode *>, 8> MDs;
+ for (const GlobalVariable &GV : M.globals()) {
+ MDs.clear();
+ GV.getAllMetadata(MDs);
+ for (const auto &I : MDs)
+ // FIXME: Pass GV to EnumerateMetadata and arrange for the bitcode writer
+ // to write metadata to the global variable's own metadata block
+ // (PR28134).
+ EnumerateMetadata(nullptr, I.second);
+ }
+
+ // Enumerate types used by function bodies and argument lists.
+ for (const Function &F : M) {
+ for (const Argument &A : F.args())
+ EnumerateType(A.getType());
+
+ // Enumerate metadata attached to this function.
+ MDs.clear();
+ F.getAllMetadata(MDs);
+ for (const auto &I : MDs)
+ EnumerateMetadata(F.isDeclaration() ? nullptr : &F, I.second);
+
+ for (const BasicBlock &BB : F)
+ for (const Instruction &I : BB) {
+ for (const Use &Op : I.operands()) {
+ auto *MD = dyn_cast<MetadataAsValue>(&Op);
+ if (!MD) {
+ EnumerateOperandType(Op);
+ continue;
+ }
+
+ // Local metadata is enumerated during function-incorporation, but
+ // any ConstantAsMetadata arguments in a DIArgList should be examined
+ // now.
+ if (isa<LocalAsMetadata>(MD->getMetadata()))
+ continue;
+ if (auto *AL = dyn_cast<DIArgList>(MD->getMetadata())) {
+ for (auto *VAM : AL->getArgs())
+ if (isa<ConstantAsMetadata>(VAM))
+ EnumerateMetadata(&F, VAM);
+ continue;
+ }
+
+ EnumerateMetadata(&F, MD->getMetadata());
+ }
+ if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
+ EnumerateType(SVI->getShuffleMaskForBitcode()->getType());
+ if (auto *GEP = dyn_cast<GetElementPtrInst>(&I))
+ EnumerateType(GEP->getSourceElementType());
+ if (auto *AI = dyn_cast<AllocaInst>(&I))
+ EnumerateType(AI->getAllocatedType());
+ EnumerateType(I.getType());
+ if (const auto *Call = dyn_cast<CallBase>(&I)) {
+ EnumerateAttributes(Call->getAttributes());
+ EnumerateType(Call->getFunctionType());
+ }
+
+ // Enumerate metadata attached with this instruction.
+ MDs.clear();
+ I.getAllMetadataOtherThanDebugLoc(MDs);
+ for (unsigned i = 0, e = MDs.size(); i != e; ++i)
+ EnumerateMetadata(&F, MDs[i].second);
+
+ // Don't enumerate the location directly -- it has a special record
+ // type -- but enumerate its operands.
+ if (DILocation *L = I.getDebugLoc())
+ for (const Metadata *Op : L->operands())
+ EnumerateMetadata(&F, Op);
+ }
+ }
+
+ // Optimize constant ordering.
+ OptimizeConstants(FirstConstant, Values.size());
+
+ // Organize metadata ordering.
+ organizeMetadata();
+}
+
+unsigned ValueEnumerator::getInstructionID(const Instruction *Inst) const {
+ InstructionMapType::const_iterator I = InstructionMap.find(Inst);
+ assert(I != InstructionMap.end() && "Instruction is not mapped!");
+ return I->second;
+}
+
+unsigned ValueEnumerator::getComdatID(const Comdat *C) const {
+ unsigned ComdatID = Comdats.idFor(C);
+ assert(ComdatID && "Comdat not found!");
+ return ComdatID;
+}
+
+void ValueEnumerator::setInstructionID(const Instruction *I) {
+ InstructionMap[I] = InstructionCount++;
+}
+
+unsigned ValueEnumerator::getValueID(const Value *V) const {
+ if (auto *MD = dyn_cast<MetadataAsValue>(V))
+ return getMetadataID(MD->getMetadata());
+
+ ValueMapType::const_iterator I = ValueMap.find(V);
+ assert(I != ValueMap.end() && "Value not in slotcalculator!");
+ return I->second-1;
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void ValueEnumerator::dump() const {
+ print(dbgs(), ValueMap, "Default");
+ dbgs() << '\n';
+ print(dbgs(), MetadataMap, "MetaData");
+ dbgs() << '\n';
+}
+#endif
+
+void ValueEnumerator::print(raw_ostream &OS, const ValueMapType &Map,
+ const char *Name) const {
+ OS << "Map Name: " << Name << "\n";
+ OS << "Size: " << Map.size() << "\n";
+ for (const auto &I : Map) {
+ const Value *V = I.first;
+ if (V->hasName())
+ OS << "Value: " << V->getName();
+ else
+ OS << "Value: [null]\n";
+ V->print(errs());
+ errs() << '\n';
+
+ OS << " Uses(" << V->getNumUses() << "):";
+ for (const Use &U : V->uses()) {
+ if (&U != &*V->use_begin())
+ OS << ",";
+ if(U->hasName())
+ OS << " " << U->getName();
+ else
+ OS << " [null]";
+
+ }
+ OS << "\n\n";
+ }
+}
+
+void ValueEnumerator::print(raw_ostream &OS, const MetadataMapType &Map,
+ const char *Name) const {
+ OS << "Map Name: " << Name << "\n";
+ OS << "Size: " << Map.size() << "\n";
+ for (const auto &I : Map) {
+ const Metadata *MD = I.first;
+ OS << "Metadata: slot = " << I.second.ID << "\n";
+ OS << "Metadata: function = " << I.second.F << "\n";
+ MD->print(OS);
+ OS << "\n";
+ }
+}
+
+/// OptimizeConstants - Reorder constant pool for denser encoding.
+void ValueEnumerator::OptimizeConstants(unsigned CstStart, unsigned CstEnd) {
+ if (CstStart == CstEnd || CstStart+1 == CstEnd) return;
+
+ if (ShouldPreserveUseListOrder)
+ // Optimizing constants makes the use-list order difficult to predict.
+ // Disable it for now when trying to preserve the order.
+ return;
+
+ std::stable_sort(Values.begin() + CstStart, Values.begin() + CstEnd,
+ [this](const std::pair<const Value *, unsigned> &LHS,
+ const std::pair<const Value *, unsigned> &RHS) {
+ // Sort by plane.
+ if (LHS.first->getType() != RHS.first->getType())
+ return getTypeID(LHS.first->getType()) < getTypeID(RHS.first->getType());
+ // Then by frequency.
+ return LHS.second > RHS.second;
+ });
+
+ // Ensure that integer and vector of integer constants are at the start of the
+ // constant pool. This is important so that GEP structure indices come before
+ // gep constant exprs.
+ std::stable_partition(Values.begin() + CstStart, Values.begin() + CstEnd,
+ isIntOrIntVectorValue);
+
+ // Rebuild the modified portion of ValueMap.
+ for (; CstStart != CstEnd; ++CstStart)
+ ValueMap[Values[CstStart].first] = CstStart+1;
+}
+
+/// EnumerateValueSymbolTable - Insert all of the values in the specified symbol
+/// table into the values table.
+void ValueEnumerator::EnumerateValueSymbolTable(const ValueSymbolTable &VST) {
+ for (ValueSymbolTable::const_iterator VI = VST.begin(), VE = VST.end();
+ VI != VE; ++VI)
+ EnumerateValue(VI->getValue());
+}
+
+/// Insert all of the values referenced by named metadata in the specified
+/// module.
+void ValueEnumerator::EnumerateNamedMetadata(const Module &M) {
+ for (const auto &I : M.named_metadata())
+ EnumerateNamedMDNode(&I);
+}
+
+void ValueEnumerator::EnumerateNamedMDNode(const NamedMDNode *MD) {
+ for (unsigned i = 0, e = MD->getNumOperands(); i != e; ++i)
+ EnumerateMetadata(nullptr, MD->getOperand(i));
+}
+
+unsigned ValueEnumerator::getMetadataFunctionID(const Function *F) const {
+ return F ? getValueID(F) + 1 : 0;
+}
+
+void ValueEnumerator::EnumerateMetadata(const Function *F, const Metadata *MD) {
+ EnumerateMetadata(getMetadataFunctionID(F), MD);
+}
+
+void ValueEnumerator::EnumerateFunctionLocalMetadata(
+ const Function &F, const LocalAsMetadata *Local) {
+ EnumerateFunctionLocalMetadata(getMetadataFunctionID(&F), Local);
+}
+
+void ValueEnumerator::EnumerateFunctionLocalListMetadata(
+ const Function &F, const DIArgList *ArgList) {
+ EnumerateFunctionLocalListMetadata(getMetadataFunctionID(&F), ArgList);
+}
+
+void ValueEnumerator::dropFunctionFromMetadata(
+ MetadataMapType::value_type &FirstMD) {
+ SmallVector<const MDNode *, 64> Worklist;
+ auto push = [&Worklist](MetadataMapType::value_type &MD) {
+ auto &Entry = MD.second;
+
+ // Nothing to do if this metadata isn't tagged.
+ if (!Entry.F)
+ return;
+
+ // Drop the function tag.
+ Entry.F = 0;
+
+ // If this is has an ID and is an MDNode, then its operands have entries as
+ // well. We need to drop the function from them too.
+ if (Entry.ID)
+ if (auto *N = dyn_cast<MDNode>(MD.first))
+ Worklist.push_back(N);
+ };
+ push(FirstMD);
+ while (!Worklist.empty())
+ for (const Metadata *Op : Worklist.pop_back_val()->operands()) {
+ if (!Op)
+ continue;
+ auto MD = MetadataMap.find(Op);
+ if (MD != MetadataMap.end())
+ push(*MD);
+ }
+}
+
+void ValueEnumerator::EnumerateMetadata(unsigned F, const Metadata *MD) {
+ // It's vital for reader efficiency that uniqued subgraphs are done in
+ // post-order; it's expensive when their operands have forward references.
+ // If a distinct node is referenced from a uniqued node, it'll be delayed
+ // until the uniqued subgraph has been completely traversed.
+ SmallVector<const MDNode *, 32> DelayedDistinctNodes;
+
+ // Start by enumerating MD, and then work through its transitive operands in
+ // post-order. This requires a depth-first search.
+ SmallVector<std::pair<const MDNode *, MDNode::op_iterator>, 32> Worklist;
+ if (const MDNode *N = enumerateMetadataImpl(F, MD))
+ Worklist.push_back(std::make_pair(N, N->op_begin()));
+
+ while (!Worklist.empty()) {
+ const MDNode *N = Worklist.back().first;
+
+ // Enumerate operands until we hit a new node. We need to traverse these
+ // nodes' operands before visiting the rest of N's operands.
+ MDNode::op_iterator I = std::find_if(
+ Worklist.back().second, N->op_end(),
+ [&](const Metadata *MD) { return enumerateMetadataImpl(F, MD); });
+ if (I != N->op_end()) {
+ auto *Op = cast<MDNode>(*I);
+ Worklist.back().second = ++I;
+
+ // Delay traversing Op if it's a distinct node and N is uniqued.
+ if (Op->isDistinct() && !N->isDistinct())
+ DelayedDistinctNodes.push_back(Op);
+ else
+ Worklist.push_back(std::make_pair(Op, Op->op_begin()));
+ continue;
+ }
+
+ // All the operands have been visited. Now assign an ID.
+ Worklist.pop_back();
+ MDs.push_back(N);
+ MetadataMap[N].ID = MDs.size();
+
+ // Flush out any delayed distinct nodes; these are all the distinct nodes
+ // that are leaves in last uniqued subgraph.
+ if (Worklist.empty() || Worklist.back().first->isDistinct()) {
+ for (const MDNode *N : DelayedDistinctNodes)
+ Worklist.push_back(std::make_pair(N, N->op_begin()));
+ DelayedDistinctNodes.clear();
+ }
+ }
+}
+
+const MDNode *ValueEnumerator::enumerateMetadataImpl(unsigned F, const Metadata *MD) {
+ if (!MD)
+ return nullptr;
+
+ assert(
+ (isa<MDNode>(MD) || isa<MDString>(MD) || isa<ConstantAsMetadata>(MD)) &&
+ "Invalid metadata kind");
+
+ auto Insertion = MetadataMap.insert(std::make_pair(MD, MDIndex(F)));
+ MDIndex &Entry = Insertion.first->second;
+ if (!Insertion.second) {
+ // Already mapped. If F doesn't match the function tag, drop it.
+ if (Entry.hasDifferentFunction(F))
+ dropFunctionFromMetadata(*Insertion.first);
+ return nullptr;
+ }
+
+ // Don't assign IDs to metadata nodes.
+ if (auto *N = dyn_cast<MDNode>(MD))
+ return N;
+
+ // Save the metadata.
+ MDs.push_back(MD);
+ Entry.ID = MDs.size();
+
+ // Enumerate the constant, if any.
+ if (auto *C = dyn_cast<ConstantAsMetadata>(MD))
+ EnumerateValue(C->getValue());
+
+ return nullptr;
+}
+
+/// EnumerateFunctionLocalMetadata - Incorporate function-local metadata
+/// information reachable from the metadata.
+void ValueEnumerator::EnumerateFunctionLocalMetadata(
+ unsigned F, const LocalAsMetadata *Local) {
+ assert(F && "Expected a function");
+
+ // Check to see if it's already in!
+ MDIndex &Index = MetadataMap[Local];
+ if (Index.ID) {
+ assert(Index.F == F && "Expected the same function");
+ return;
+ }
+
+ MDs.push_back(Local);
+ Index.F = F;
+ Index.ID = MDs.size();
+
+ EnumerateValue(Local->getValue());
+}
+
+/// EnumerateFunctionLocalListMetadata - Incorporate function-local metadata
+/// information reachable from the metadata.
+void ValueEnumerator::EnumerateFunctionLocalListMetadata(
+ unsigned F, const DIArgList *ArgList) {
+ assert(F && "Expected a function");
+
+ // Check to see if it's already in!
+ MDIndex &Index = MetadataMap[ArgList];
+ if (Index.ID) {
+ assert(Index.F == F && "Expected the same function");
+ return;
+ }
+
+ for (ValueAsMetadata *VAM : ArgList->getArgs()) {
+ if (isa<LocalAsMetadata>(VAM)) {
+ assert(MetadataMap.count(VAM) &&
+ "LocalAsMetadata should be enumerated before DIArgList");
+ assert(MetadataMap[VAM].F == F &&
+ "Expected LocalAsMetadata in the same function");
+ } else {
+ assert(isa<ConstantAsMetadata>(VAM) &&
+ "Expected LocalAsMetadata or ConstantAsMetadata");
+ assert(ValueMap.count(VAM->getValue()) &&
+ "Constant should be enumerated beforeDIArgList");
+ EnumerateMetadata(F, VAM);
+ }
+ }
+
+ MDs.push_back(ArgList);
+ Index.F = F;
+ Index.ID = MDs.size();
+}
+
+static unsigned getMetadataTypeOrder(const Metadata *MD) {
+ // Strings are emitted in bulk and must come first.
+ if (isa<MDString>(MD))
+ return 0;
+
+ // ConstantAsMetadata doesn't reference anything. We may as well shuffle it
+ // to the front since we can detect it.
+ auto *N = dyn_cast<MDNode>(MD);
+ if (!N)
+ return 1;
+
+ // The reader is fast forward references for distinct node operands, but slow
+ // when uniqued operands are unresolved.
+ return N->isDistinct() ? 2 : 3;
+}
+
+void ValueEnumerator::organizeMetadata() {
+ assert(MetadataMap.size() == MDs.size() &&
+ "Metadata map and vector out of sync");
+
+ if (MDs.empty())
+ return;
+
+ // Copy out the index information from MetadataMap in order to choose a new
+ // order.
+ SmallVector<MDIndex, 64> Order;
+ Order.reserve(MetadataMap.size());
+ for (const Metadata *MD : MDs)
+ Order.push_back(MetadataMap.lookup(MD));
+
+ // Partition:
+ // - by function, then
+ // - by isa<MDString>
+ // and then sort by the original/current ID. Since the IDs are guaranteed to
+ // be unique, the result of std::sort will be deterministic. There's no need
+ // for std::stable_sort.
+ llvm::sort(Order, [this](MDIndex LHS, MDIndex RHS) {
+ return std::make_tuple(LHS.F, getMetadataTypeOrder(LHS.get(MDs)), LHS.ID) <
+ std::make_tuple(RHS.F, getMetadataTypeOrder(RHS.get(MDs)), RHS.ID);
+ });
+
+ // Rebuild MDs, index the metadata ranges for each function in FunctionMDs,
+ // and fix up MetadataMap.
+ std::vector<const Metadata *> OldMDs;
+ MDs.swap(OldMDs);
+ MDs.reserve(OldMDs.size());
+ for (unsigned I = 0, E = Order.size(); I != E && !Order[I].F; ++I) {
+ auto *MD = Order[I].get(OldMDs);
+ MDs.push_back(MD);
+ MetadataMap[MD].ID = I + 1;
+ if (isa<MDString>(MD))
+ ++NumMDStrings;
+ }
+
+ // Return early if there's nothing for the functions.
+ if (MDs.size() == Order.size())
+ return;
+
+ // Build the function metadata ranges.
+ MDRange R;
+ FunctionMDs.reserve(OldMDs.size());
+ unsigned PrevF = 0;
+ for (unsigned I = MDs.size(), E = Order.size(), ID = MDs.size(); I != E;
+ ++I) {
+ unsigned F = Order[I].F;
+ if (!PrevF) {
+ PrevF = F;
+ } else if (PrevF != F) {
+ R.Last = FunctionMDs.size();
+ std::swap(R, FunctionMDInfo[PrevF]);
+ R.First = FunctionMDs.size();
+
+ ID = MDs.size();
+ PrevF = F;
+ }
+
+ auto *MD = Order[I].get(OldMDs);
+ FunctionMDs.push_back(MD);
+ MetadataMap[MD].ID = ++ID;
+ if (isa<MDString>(MD))
+ ++R.NumStrings;
+ }
+ R.Last = FunctionMDs.size();
+ FunctionMDInfo[PrevF] = R;
+}
+
+void ValueEnumerator::incorporateFunctionMetadata(const Function &F) {
+ NumModuleMDs = MDs.size();
+
+ auto R = FunctionMDInfo.lookup(getValueID(&F) + 1);
+ NumMDStrings = R.NumStrings;
+ MDs.insert(MDs.end(), FunctionMDs.begin() + R.First,
+ FunctionMDs.begin() + R.Last);
+}
+
+void ValueEnumerator::EnumerateValue(const Value *V) {
+ assert(!V->getType()->isVoidTy() && "Can't insert void values!");
+ assert(!isa<MetadataAsValue>(V) && "EnumerateValue doesn't handle Metadata!");
+
+ // Check to see if it's already in!
+ unsigned &ValueID = ValueMap[V];
+ if (ValueID) {
+ // Increment use count.
+ Values[ValueID-1].second++;
+ return;
+ }
+
+ if (auto *GO = dyn_cast<GlobalObject>(V))
+ if (const Comdat *C = GO->getComdat())
+ Comdats.insert(C);
+
+ // Enumerate the type of this value.
+ EnumerateType(V->getType());
+
+ if (const Constant *C = dyn_cast<Constant>(V)) {
+ if (isa<GlobalValue>(C)) {
+ // Initializers for globals are handled explicitly elsewhere.
+ } else if (C->getNumOperands()) {
+ // If a constant has operands, enumerate them. This makes sure that if a
+ // constant has uses (for example an array of const ints), that they are
+ // inserted also.
+
+ // We prefer to enumerate them with values before we enumerate the user
+ // itself. This makes it more likely that we can avoid forward references
+ // in the reader. We know that there can be no cycles in the constants
+ // graph that don't go through a global variable.
+ for (User::const_op_iterator I = C->op_begin(), E = C->op_end();
+ I != E; ++I)
+ if (!isa<BasicBlock>(*I)) // Don't enumerate BB operand to BlockAddress.
+ EnumerateValue(*I);
+ if (auto *CE = dyn_cast<ConstantExpr>(C))
+ if (CE->getOpcode() == Instruction::ShuffleVector)
+ EnumerateValue(CE->getShuffleMaskForBitcode());
+
+ // Finally, add the value. Doing this could make the ValueID reference be
+ // dangling, don't reuse it.
+ Values.push_back(std::make_pair(V, 1U));
+ ValueMap[V] = Values.size();
+ return;
+ }
+ }
+
+ // Add the value.
+ Values.push_back(std::make_pair(V, 1U));
+ ValueID = Values.size();
+}
+
+
+void ValueEnumerator::EnumerateType(Type *Ty) {
+ unsigned *TypeID = &TypeMap[Ty];
+
+ // We've already seen this type.
+ if (*TypeID)
+ return;
+
+ // If it is a non-anonymous struct, mark the type as being visited so that we
+ // don't recursively visit it. This is safe because we allow forward
+ // references of these in the bitcode reader.
+ if (StructType *STy = dyn_cast<StructType>(Ty))
+ if (!STy->isLiteral())
+ *TypeID = ~0U;
+
+ // Enumerate all of the subtypes before we enumerate this type. This ensures
+ // that the type will be enumerated in an order that can be directly built.
+ for (Type *SubTy : Ty->subtypes())
+ EnumerateType(SubTy);
+
+ // Refresh the TypeID pointer in case the table rehashed.
+ TypeID = &TypeMap[Ty];
+
+ // Check to see if we got the pointer another way. This can happen when
+ // enumerating recursive types that hit the base case deeper than they start.
+ //
+ // If this is actually a struct that we are treating as forward ref'able,
+ // then emit the definition now that all of its contents are available.
+ if (*TypeID && *TypeID != ~0U)
+ return;
+
+ // Add this type now that its contents are all happily enumerated.
+ Types.push_back(Ty);
+
+ *TypeID = Types.size();
+}
+
+// Enumerate the types for the specified value. If the value is a constant,
+// walk through it, enumerating the types of the constant.
+void ValueEnumerator::EnumerateOperandType(const Value *V) {
+ EnumerateType(V->getType());
+
+ assert(!isa<MetadataAsValue>(V) && "Unexpected metadata operand");
+
+ const Constant *C = dyn_cast<Constant>(V);
+ if (!C)
+ return;
+
+ // If this constant is already enumerated, ignore it, we know its type must
+ // be enumerated.
+ if (ValueMap.count(C))
+ return;
+
+ // This constant may have operands, make sure to enumerate the types in
+ // them.
+ for (const Value *Op : C->operands()) {
+ // Don't enumerate basic blocks here, this happens as operands to
+ // blockaddress.
+ if (isa<BasicBlock>(Op))
+ continue;
+
+ EnumerateOperandType(Op);
+ }
+ if (auto *CE = dyn_cast<ConstantExpr>(C)) {
+ if (CE->getOpcode() == Instruction::ShuffleVector)
+ EnumerateOperandType(CE->getShuffleMaskForBitcode());
+ if (CE->getOpcode() == Instruction::GetElementPtr)
+ EnumerateType(cast<GEPOperator>(CE)->getSourceElementType());
+ }
+}
+
+void ValueEnumerator::EnumerateAttributes(AttributeList PAL) {
+ if (PAL.isEmpty()) return; // null is always 0.
+
+ // Do a lookup.
+ unsigned &Entry = AttributeListMap[PAL];
+ if (Entry == 0) {
+ // Never saw this before, add it.
+ AttributeLists.push_back(PAL);
+ Entry = AttributeLists.size();
+ }
+
+ // Do lookups for all attribute groups.
+ for (unsigned i : PAL.indexes()) {
+ AttributeSet AS = PAL.getAttributes(i);
+ if (!AS.hasAttributes())
+ continue;
+ IndexAndAttrSet Pair = {i, AS};
+ unsigned &Entry = AttributeGroupMap[Pair];
+ if (Entry == 0) {
+ AttributeGroups.push_back(Pair);
+ Entry = AttributeGroups.size();
+
+ for (Attribute Attr : AS) {
+ if (Attr.isTypeAttribute())
+ EnumerateType(Attr.getValueAsType());
+ }
+ }
+ }
+}
+
+void ValueEnumerator::incorporateFunction(const Function &F) {
+ InstructionCount = 0;
+ NumModuleValues = Values.size();
+
+ // Add global metadata to the function block. This doesn't include
+ // LocalAsMetadata.
+ incorporateFunctionMetadata(F);
+
+ // Adding function arguments to the value table.
+ for (const auto &I : F.args()) {
+ EnumerateValue(&I);
+ if (I.hasAttribute(Attribute::ByVal))
+ EnumerateType(I.getParamByValType());
+ else if (I.hasAttribute(Attribute::StructRet))
+ EnumerateType(I.getParamStructRetType());
+ else if (I.hasAttribute(Attribute::ByRef))
+ EnumerateType(I.getParamByRefType());
+ }
+ FirstFuncConstantID = Values.size();
+
+ // Add all function-level constants to the value table.
+ for (const BasicBlock &BB : F) {
+ for (const Instruction &I : BB) {
+ for (const Use &OI : I.operands()) {
+ if ((isa<Constant>(OI) && !isa<GlobalValue>(OI)) || isa<InlineAsm>(OI))
+ EnumerateValue(OI);
+ }
+ if (auto *SVI = dyn_cast<ShuffleVectorInst>(&I))
+ EnumerateValue(SVI->getShuffleMaskForBitcode());
+ }
+ BasicBlocks.push_back(&BB);
+ ValueMap[&BB] = BasicBlocks.size();
+ }
+
+ // Optimize the constant layout.
+ OptimizeConstants(FirstFuncConstantID, Values.size());
+
+ // Add the function's parameter attributes so they are available for use in
+ // the function's instruction.
+ EnumerateAttributes(F.getAttributes());
+
+ FirstInstID = Values.size();
+
+ SmallVector<LocalAsMetadata *, 8> FnLocalMDVector;
+ SmallVector<DIArgList *, 8> ArgListMDVector;
+ // Add all of the instructions.
+ for (const BasicBlock &BB : F) {
+ for (const Instruction &I : BB) {
+ for (const Use &OI : I.operands()) {
+ if (auto *MD = dyn_cast<MetadataAsValue>(&OI)) {
+ if (auto *Local = dyn_cast<LocalAsMetadata>(MD->getMetadata())) {
+ // Enumerate metadata after the instructions they might refer to.
+ FnLocalMDVector.push_back(Local);
+ } else if (auto *ArgList = dyn_cast<DIArgList>(MD->getMetadata())) {
+ ArgListMDVector.push_back(ArgList);
+ for (ValueAsMetadata *VMD : ArgList->getArgs()) {
+ if (auto *Local = dyn_cast<LocalAsMetadata>(VMD)) {
+ // Enumerate metadata after the instructions they might refer
+ // to.
+ FnLocalMDVector.push_back(Local);
+ }
+ }
+ }
+ }
+ }
+
+ if (!I.getType()->isVoidTy())
+ EnumerateValue(&I);
+ }
+ }
+
+ // Add all of the function-local metadata.
+ for (unsigned i = 0, e = FnLocalMDVector.size(); i != e; ++i) {
+ // At this point, every local values have been incorporated, we shouldn't
+ // have a metadata operand that references a value that hasn't been seen.
+ assert(ValueMap.count(FnLocalMDVector[i]->getValue()) &&
+ "Missing value for metadata operand");
+ EnumerateFunctionLocalMetadata(F, FnLocalMDVector[i]);
+ }
+ // DIArgList entries must come after function-local metadata, as it is not
+ // possible to forward-reference them.
+ for (const DIArgList *ArgList : ArgListMDVector)
+ EnumerateFunctionLocalListMetadata(F, ArgList);
+}
+
+void ValueEnumerator::purgeFunction() {
+ /// Remove purged values from the ValueMap.
+ for (unsigned i = NumModuleValues, e = Values.size(); i != e; ++i)
+ ValueMap.erase(Values[i].first);
+ for (unsigned i = NumModuleMDs, e = MDs.size(); i != e; ++i)
+ MetadataMap.erase(MDs[i]);
+ for (const BasicBlock *BB : BasicBlocks)
+ ValueMap.erase(BB);
+
+ Values.resize(NumModuleValues);
+ MDs.resize(NumModuleMDs);
+ BasicBlocks.clear();
+ NumMDStrings = 0;
+}
+
+static void IncorporateFunctionInfoGlobalBBIDs(const Function *F,
+ DenseMap<const BasicBlock*, unsigned> &IDMap) {
+ unsigned Counter = 0;
+ for (const BasicBlock &BB : *F)
+ IDMap[&BB] = ++Counter;
+}
+
+/// getGlobalBasicBlockID - This returns the function-specific ID for the
+/// specified basic block. This is relatively expensive information, so it
+/// should only be used by rare constructs such as address-of-label.
+unsigned ValueEnumerator::getGlobalBasicBlockID(const BasicBlock *BB) const {
+ unsigned &Idx = GlobalBasicBlockIDs[BB];
+ if (Idx != 0)
+ return Idx-1;
+
+ IncorporateFunctionInfoGlobalBBIDs(BB->getParent(), GlobalBasicBlockIDs);
+ return getGlobalBasicBlockID(BB);
+}
+
+uint64_t ValueEnumerator::computeBitsRequiredForTypeIndicies() const {
+ return Log2_32_Ceil(getTypes().size() + 1);
+}
diff --git a/contrib/libs/llvm14/lib/Bitcode/Writer/ValueEnumerator.h b/contrib/libs/llvm14/lib/Bitcode/Writer/ValueEnumerator.h
new file mode 100644
index 0000000000..6c3f6d4ff6
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Writer/ValueEnumerator.h
@@ -0,0 +1,305 @@
+//===- Bitcode/Writer/ValueEnumerator.h - Number values ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This class gives values and types Unique ID's.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_BITCODE_WRITER_VALUEENUMERATOR_H
+#define LLVM_LIB_BITCODE_WRITER_VALUEENUMERATOR_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/UniqueVector.h"
+#include "llvm/IR/Attributes.h"
+#include "llvm/IR/UseListOrder.h"
+#include <cassert>
+#include <cstdint>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+
+class BasicBlock;
+class Comdat;
+class DIArgList;
+class Function;
+class Instruction;
+class LocalAsMetadata;
+class MDNode;
+class Metadata;
+class Module;
+class NamedMDNode;
+class raw_ostream;
+class Type;
+class Value;
+class ValueSymbolTable;
+
+class ValueEnumerator {
+public:
+ using TypeList = std::vector<Type *>;
+
+ // For each value, we remember its Value* and occurrence frequency.
+ using ValueList = std::vector<std::pair<const Value *, unsigned>>;
+
+ /// Attribute groups as encoded in bitcode are almost AttributeSets, but they
+ /// include the AttributeList index, so we have to track that in our map.
+ using IndexAndAttrSet = std::pair<unsigned, AttributeSet>;
+
+ UseListOrderStack UseListOrders;
+
+private:
+ using TypeMapType = DenseMap<Type *, unsigned>;
+ TypeMapType TypeMap;
+ TypeList Types;
+
+ using ValueMapType = DenseMap<const Value *, unsigned>;
+ ValueMapType ValueMap;
+ ValueList Values;
+
+ using ComdatSetType = UniqueVector<const Comdat *>;
+ ComdatSetType Comdats;
+
+ std::vector<const Metadata *> MDs;
+ std::vector<const Metadata *> FunctionMDs;
+
+ /// Index of information about a piece of metadata.
+ struct MDIndex {
+ unsigned F = 0; ///< The ID of the function for this metadata, if any.
+ unsigned ID = 0; ///< The implicit ID of this metadata in bitcode.
+
+ MDIndex() = default;
+ explicit MDIndex(unsigned F) : F(F) {}
+
+ /// Check if this has a function tag, and it's different from NewF.
+ bool hasDifferentFunction(unsigned NewF) const { return F && F != NewF; }
+
+ /// Fetch the MD this references out of the given metadata array.
+ const Metadata *get(ArrayRef<const Metadata *> MDs) const {
+ assert(ID && "Expected non-zero ID");
+ assert(ID <= MDs.size() && "Expected valid ID");
+ return MDs[ID - 1];
+ }
+ };
+
+ using MetadataMapType = DenseMap<const Metadata *, MDIndex>;
+ MetadataMapType MetadataMap;
+
+ /// Range of metadata IDs, as a half-open range.
+ struct MDRange {
+ unsigned First = 0;
+ unsigned Last = 0;
+
+ /// Number of strings in the prefix of the metadata range.
+ unsigned NumStrings = 0;
+
+ MDRange() = default;
+ explicit MDRange(unsigned First) : First(First) {}
+ };
+ SmallDenseMap<unsigned, MDRange, 1> FunctionMDInfo;
+
+ bool ShouldPreserveUseListOrder;
+
+ using AttributeGroupMapType = DenseMap<IndexAndAttrSet, unsigned>;
+ AttributeGroupMapType AttributeGroupMap;
+ std::vector<IndexAndAttrSet> AttributeGroups;
+
+ using AttributeListMapType = DenseMap<AttributeList, unsigned>;
+ AttributeListMapType AttributeListMap;
+ std::vector<AttributeList> AttributeLists;
+
+ /// GlobalBasicBlockIDs - This map memoizes the basic block ID's referenced by
+ /// the "getGlobalBasicBlockID" method.
+ mutable DenseMap<const BasicBlock*, unsigned> GlobalBasicBlockIDs;
+
+ using InstructionMapType = DenseMap<const Instruction *, unsigned>;
+ InstructionMapType InstructionMap;
+ unsigned InstructionCount;
+
+ /// BasicBlocks - This contains all the basic blocks for the currently
+ /// incorporated function. Their reverse mapping is stored in ValueMap.
+ std::vector<const BasicBlock*> BasicBlocks;
+
+ /// When a function is incorporated, this is the size of the Values list
+ /// before incorporation.
+ unsigned NumModuleValues;
+
+ /// When a function is incorporated, this is the size of the Metadatas list
+ /// before incorporation.
+ unsigned NumModuleMDs = 0;
+ unsigned NumMDStrings = 0;
+
+ unsigned FirstFuncConstantID;
+ unsigned FirstInstID;
+
+public:
+ ValueEnumerator(const Module &M, bool ShouldPreserveUseListOrder);
+ ValueEnumerator(const ValueEnumerator &) = delete;
+ ValueEnumerator &operator=(const ValueEnumerator &) = delete;
+
+ void dump() const;
+ void print(raw_ostream &OS, const ValueMapType &Map, const char *Name) const;
+ void print(raw_ostream &OS, const MetadataMapType &Map,
+ const char *Name) const;
+
+ unsigned getValueID(const Value *V) const;
+
+ unsigned getMetadataID(const Metadata *MD) const {
+ auto ID = getMetadataOrNullID(MD);
+ assert(ID != 0 && "Metadata not in slotcalculator!");
+ return ID - 1;
+ }
+
+ unsigned getMetadataOrNullID(const Metadata *MD) const {
+ return MetadataMap.lookup(MD).ID;
+ }
+
+ unsigned numMDs() const { return MDs.size(); }
+
+ bool shouldPreserveUseListOrder() const { return ShouldPreserveUseListOrder; }
+
+ unsigned getTypeID(Type *T) const {
+ TypeMapType::const_iterator I = TypeMap.find(T);
+ assert(I != TypeMap.end() && "Type not in ValueEnumerator!");
+ return I->second-1;
+ }
+
+ unsigned getInstructionID(const Instruction *I) const;
+ void setInstructionID(const Instruction *I);
+
+ unsigned getAttributeListID(AttributeList PAL) const {
+ if (PAL.isEmpty()) return 0; // Null maps to zero.
+ AttributeListMapType::const_iterator I = AttributeListMap.find(PAL);
+ assert(I != AttributeListMap.end() && "Attribute not in ValueEnumerator!");
+ return I->second;
+ }
+
+ unsigned getAttributeGroupID(IndexAndAttrSet Group) const {
+ if (!Group.second.hasAttributes())
+ return 0; // Null maps to zero.
+ AttributeGroupMapType::const_iterator I = AttributeGroupMap.find(Group);
+ assert(I != AttributeGroupMap.end() && "Attribute not in ValueEnumerator!");
+ return I->second;
+ }
+
+ /// getFunctionConstantRange - Return the range of values that corresponds to
+ /// function-local constants.
+ void getFunctionConstantRange(unsigned &Start, unsigned &End) const {
+ Start = FirstFuncConstantID;
+ End = FirstInstID;
+ }
+
+ const ValueList &getValues() const { return Values; }
+
+ /// Check whether the current block has any metadata to emit.
+ bool hasMDs() const { return NumModuleMDs < MDs.size(); }
+
+ /// Get the MDString metadata for this block.
+ ArrayRef<const Metadata *> getMDStrings() const {
+ return makeArrayRef(MDs).slice(NumModuleMDs, NumMDStrings);
+ }
+
+ /// Get the non-MDString metadata for this block.
+ ArrayRef<const Metadata *> getNonMDStrings() const {
+ return makeArrayRef(MDs).slice(NumModuleMDs).slice(NumMDStrings);
+ }
+
+ const TypeList &getTypes() const { return Types; }
+
+ const std::vector<const BasicBlock*> &getBasicBlocks() const {
+ return BasicBlocks;
+ }
+
+ const std::vector<AttributeList> &getAttributeLists() const { return AttributeLists; }
+
+ const std::vector<IndexAndAttrSet> &getAttributeGroups() const {
+ return AttributeGroups;
+ }
+
+ const ComdatSetType &getComdats() const { return Comdats; }
+ unsigned getComdatID(const Comdat *C) const;
+
+ /// getGlobalBasicBlockID - This returns the function-specific ID for the
+ /// specified basic block. This is relatively expensive information, so it
+ /// should only be used by rare constructs such as address-of-label.
+ unsigned getGlobalBasicBlockID(const BasicBlock *BB) const;
+
+ /// incorporateFunction/purgeFunction - If you'd like to deal with a function,
+ /// use these two methods to get its data into the ValueEnumerator!
+ void incorporateFunction(const Function &F);
+
+ void purgeFunction();
+ uint64_t computeBitsRequiredForTypeIndicies() const;
+
+private:
+ void OptimizeConstants(unsigned CstStart, unsigned CstEnd);
+
+ /// Reorder the reachable metadata.
+ ///
+ /// This is not just an optimization, but is mandatory for emitting MDString
+ /// correctly.
+ void organizeMetadata();
+
+ /// Drop the function tag from the transitive operands of the given node.
+ void dropFunctionFromMetadata(MetadataMapType::value_type &FirstMD);
+
+ /// Incorporate the function metadata.
+ ///
+ /// This should be called before enumerating LocalAsMetadata for the
+ /// function.
+ void incorporateFunctionMetadata(const Function &F);
+
+ /// Enumerate a single instance of metadata with the given function tag.
+ ///
+ /// If \c MD has already been enumerated, check that \c F matches its
+ /// function tag. If not, call \a dropFunctionFromMetadata().
+ ///
+ /// Otherwise, mark \c MD as visited. Assign it an ID, or just return it if
+ /// it's an \a MDNode.
+ const MDNode *enumerateMetadataImpl(unsigned F, const Metadata *MD);
+
+ unsigned getMetadataFunctionID(const Function *F) const;
+
+ /// Enumerate reachable metadata in (almost) post-order.
+ ///
+ /// Enumerate all the metadata reachable from MD. We want to minimize the
+ /// cost of reading bitcode records, and so the primary consideration is that
+ /// operands of uniqued nodes are resolved before the nodes are read. This
+ /// avoids re-uniquing them on the context and factors away RAUW support.
+ ///
+ /// This algorithm guarantees that subgraphs of uniqued nodes are in
+ /// post-order. Distinct subgraphs reachable only from a single uniqued node
+ /// will be in post-order.
+ ///
+ /// \note The relative order of a distinct and uniqued node is irrelevant.
+ /// \a organizeMetadata() will later partition distinct nodes ahead of
+ /// uniqued ones.
+ ///{
+ void EnumerateMetadata(const Function *F, const Metadata *MD);
+ void EnumerateMetadata(unsigned F, const Metadata *MD);
+ ///}
+
+ void EnumerateFunctionLocalMetadata(const Function &F,
+ const LocalAsMetadata *Local);
+ void EnumerateFunctionLocalMetadata(unsigned F, const LocalAsMetadata *Local);
+ void EnumerateFunctionLocalListMetadata(const Function &F,
+ const DIArgList *ArgList);
+ void EnumerateFunctionLocalListMetadata(unsigned F, const DIArgList *Arglist);
+ void EnumerateNamedMDNode(const NamedMDNode *NMD);
+ void EnumerateValue(const Value *V);
+ void EnumerateType(Type *T);
+ void EnumerateOperandType(const Value *V);
+ void EnumerateAttributes(AttributeList PAL);
+
+ void EnumerateValueSymbolTable(const ValueSymbolTable &ST);
+ void EnumerateNamedMetadata(const Module &M);
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_BITCODE_WRITER_VALUEENUMERATOR_H
diff --git a/contrib/libs/llvm14/lib/Bitcode/Writer/ya.make b/contrib/libs/llvm14/lib/Bitcode/Writer/ya.make
new file mode 100644
index 0000000000..e1217b83c0
--- /dev/null
+++ b/contrib/libs/llvm14/lib/Bitcode/Writer/ya.make
@@ -0,0 +1,34 @@
+# Generated by devtools/yamaker.
+
+LIBRARY()
+
+LICENSE(Apache-2.0 WITH LLVM-exception)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+PEERDIR(
+ contrib/libs/llvm14
+ contrib/libs/llvm14/include
+ contrib/libs/llvm14/lib/Analysis
+ contrib/libs/llvm14/lib/IR
+ contrib/libs/llvm14/lib/MC
+ contrib/libs/llvm14/lib/Object
+ contrib/libs/llvm14/lib/Support
+)
+
+ADDINCL(
+ contrib/libs/llvm14/lib/Bitcode/Writer
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+SRCS(
+ BitWriter.cpp
+ BitcodeWriter.cpp
+ BitcodeWriterPass.cpp
+ ValueEnumerator.cpp
+)
+
+END()