diff options
author | shadchin <shadchin@yandex-team.ru> | 2022-02-10 16:44:39 +0300 |
---|---|---|
committer | Daniil Cherednik <dcherednik@yandex-team.ru> | 2022-02-10 16:44:39 +0300 |
commit | e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0 (patch) | |
tree | 64175d5cadab313b3e7039ebaa06c5bc3295e274 /contrib/libs/llvm12/lib/Transforms/Instrumentation/ThreadSanitizer.cpp | |
parent | 2598ef1d0aee359b4b6d5fdd1758916d5907d04f (diff) | |
download | ydb-e9656aae26e0358d5378e5b63dcac5c8dbe0e4d0.tar.gz |
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 2 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/Transforms/Instrumentation/ThreadSanitizer.cpp')
-rw-r--r-- | contrib/libs/llvm12/lib/Transforms/Instrumentation/ThreadSanitizer.cpp | 250 |
1 files changed, 125 insertions, 125 deletions
diff --git a/contrib/libs/llvm12/lib/Transforms/Instrumentation/ThreadSanitizer.cpp b/contrib/libs/llvm12/lib/Transforms/Instrumentation/ThreadSanitizer.cpp index 85d36a0d4f..783878cf1e 100644 --- a/contrib/libs/llvm12/lib/Transforms/Instrumentation/ThreadSanitizer.cpp +++ b/contrib/libs/llvm12/lib/Transforms/Instrumentation/ThreadSanitizer.cpp @@ -19,8 +19,8 @@ //===----------------------------------------------------------------------===// #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h" -#include "llvm/ADT/DenseMap.h" -#include "llvm/ADT/Optional.h" +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/Optional.h" #include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" @@ -53,36 +53,36 @@ using namespace llvm; #define DEBUG_TYPE "tsan" -static cl::opt<bool> ClInstrumentMemoryAccesses( +static cl::opt<bool> ClInstrumentMemoryAccesses( "tsan-instrument-memory-accesses", cl::init(true), cl::desc("Instrument memory accesses"), cl::Hidden); -static cl::opt<bool> - ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true), - cl::desc("Instrument function entry and exit"), - cl::Hidden); -static cl::opt<bool> ClHandleCxxExceptions( +static cl::opt<bool> + ClInstrumentFuncEntryExit("tsan-instrument-func-entry-exit", cl::init(true), + cl::desc("Instrument function entry and exit"), + cl::Hidden); +static cl::opt<bool> ClHandleCxxExceptions( "tsan-handle-cxx-exceptions", cl::init(true), cl::desc("Handle C++ exceptions (insert cleanup blocks for unwinding)"), cl::Hidden); -static cl::opt<bool> ClInstrumentAtomics("tsan-instrument-atomics", - cl::init(true), - cl::desc("Instrument atomics"), - cl::Hidden); -static cl::opt<bool> ClInstrumentMemIntrinsics( +static cl::opt<bool> ClInstrumentAtomics("tsan-instrument-atomics", + cl::init(true), + cl::desc("Instrument atomics"), + cl::Hidden); +static cl::opt<bool> ClInstrumentMemIntrinsics( "tsan-instrument-memintrinsics", cl::init(true), cl::desc("Instrument memintrinsics (memset/memcpy/memmove)"), cl::Hidden); -static cl::opt<bool> ClDistinguishVolatile( +static cl::opt<bool> ClDistinguishVolatile( "tsan-distinguish-volatile", cl::init(false), cl::desc("Emit special instrumentation for accesses to volatiles"), cl::Hidden); -static cl::opt<bool> ClInstrumentReadBeforeWrite( +static cl::opt<bool> ClInstrumentReadBeforeWrite( "tsan-instrument-read-before-write", cl::init(false), cl::desc("Do not eliminate read instrumentation for read-before-writes"), cl::Hidden); -static cl::opt<bool> ClCompoundReadBeforeWrite( - "tsan-compound-read-before-write", cl::init(false), - cl::desc("Emit special compound instrumentation for reads-before-writes"), - cl::Hidden); +static cl::opt<bool> ClCompoundReadBeforeWrite( + "tsan-compound-read-before-write", cl::init(false), + cl::desc("Emit special compound instrumentation for reads-before-writes"), + cl::Hidden); STATISTIC(NumInstrumentedReads, "Number of instrumented reads"); STATISTIC(NumInstrumentedWrites, "Number of instrumented writes"); @@ -96,8 +96,8 @@ STATISTIC(NumOmittedReadsFromConstantGlobals, STATISTIC(NumOmittedReadsFromVtable, "Number of vtable reads"); STATISTIC(NumOmittedNonCaptured, "Number of accesses ignored due to capturing"); -const char kTsanModuleCtorName[] = "tsan.module_ctor"; -const char kTsanInitName[] = "__tsan_init"; +const char kTsanModuleCtorName[] = "tsan.module_ctor"; +const char kTsanInitName[] = "__tsan_init"; namespace { @@ -108,37 +108,37 @@ namespace { /// ensures the __tsan_init function is in the list of global constructors for /// the module. struct ThreadSanitizer { - ThreadSanitizer() { - // Sanity check options and warn user. - if (ClInstrumentReadBeforeWrite && ClCompoundReadBeforeWrite) { - errs() - << "warning: Option -tsan-compound-read-before-write has no effect " - "when -tsan-instrument-read-before-write is set.\n"; - } - } - + ThreadSanitizer() { + // Sanity check options and warn user. + if (ClInstrumentReadBeforeWrite && ClCompoundReadBeforeWrite) { + errs() + << "warning: Option -tsan-compound-read-before-write has no effect " + "when -tsan-instrument-read-before-write is set.\n"; + } + } + bool sanitizeFunction(Function &F, const TargetLibraryInfo &TLI); private: - // Internal Instruction wrapper that contains more information about the - // Instruction from prior analysis. - struct InstructionInfo { - // Instrumentation emitted for this instruction is for a compounded set of - // read and write operations in the same basic block. - static constexpr unsigned kCompoundRW = (1U << 0); - - explicit InstructionInfo(Instruction *Inst) : Inst(Inst) {} - - Instruction *Inst; - unsigned Flags = 0; - }; - + // Internal Instruction wrapper that contains more information about the + // Instruction from prior analysis. + struct InstructionInfo { + // Instrumentation emitted for this instruction is for a compounded set of + // read and write operations in the same basic block. + static constexpr unsigned kCompoundRW = (1U << 0); + + explicit InstructionInfo(Instruction *Inst) : Inst(Inst) {} + + Instruction *Inst; + unsigned Flags = 0; + }; + void initialize(Module &M); - bool instrumentLoadOrStore(const InstructionInfo &II, const DataLayout &DL); + bool instrumentLoadOrStore(const InstructionInfo &II, const DataLayout &DL); bool instrumentAtomic(Instruction *I, const DataLayout &DL); bool instrumentMemIntrinsic(Instruction *I); void chooseInstructionsToInstrument(SmallVectorImpl<Instruction *> &Local, - SmallVectorImpl<InstructionInfo> &All, + SmallVectorImpl<InstructionInfo> &All, const DataLayout &DL); bool addrPointsToConstantData(Value *Addr); int getMemoryAccessFuncIndex(Value *Addr, const DataLayout &DL); @@ -159,8 +159,8 @@ private: FunctionCallee TsanVolatileWrite[kNumberOfAccessSizes]; FunctionCallee TsanUnalignedVolatileRead[kNumberOfAccessSizes]; FunctionCallee TsanUnalignedVolatileWrite[kNumberOfAccessSizes]; - FunctionCallee TsanCompoundRW[kNumberOfAccessSizes]; - FunctionCallee TsanUnalignedCompoundRW[kNumberOfAccessSizes]; + FunctionCallee TsanCompoundRW[kNumberOfAccessSizes]; + FunctionCallee TsanUnalignedCompoundRW[kNumberOfAccessSizes]; FunctionCallee TsanAtomicLoad[kNumberOfAccessSizes]; FunctionCallee TsanAtomicStore[kNumberOfAccessSizes]; FunctionCallee TsanAtomicRMW[AtomicRMWInst::LAST_BINOP + 1] @@ -299,15 +299,15 @@ void ThreadSanitizer::initialize(Module &M) { TsanUnalignedVolatileWrite[i] = M.getOrInsertFunction( UnalignedVolatileWriteName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); - SmallString<64> CompoundRWName("__tsan_read_write" + ByteSizeStr); - TsanCompoundRW[i] = M.getOrInsertFunction( - CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); - - SmallString<64> UnalignedCompoundRWName("__tsan_unaligned_read_write" + - ByteSizeStr); - TsanUnalignedCompoundRW[i] = M.getOrInsertFunction( - UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); - + SmallString<64> CompoundRWName("__tsan_read_write" + ByteSizeStr); + TsanCompoundRW[i] = M.getOrInsertFunction( + CompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); + + SmallString<64> UnalignedCompoundRWName("__tsan_unaligned_read_write" + + ByteSizeStr); + TsanUnalignedCompoundRW[i] = M.getOrInsertFunction( + UnalignedCompoundRWName, Attr, IRB.getVoidTy(), IRB.getInt8PtrTy()); + Type *Ty = Type::getIntNTy(M.getContext(), BitSize); Type *PtrTy = Ty->getPointerTo(); SmallString<32> AtomicLoadName("__tsan_atomic" + BitSizeStr + "_load"); @@ -442,43 +442,43 @@ bool ThreadSanitizer::addrPointsToConstantData(Value *Addr) { // 'Local' is a vector of insns within the same BB (no calls between). // 'All' is a vector of insns that will be instrumented. void ThreadSanitizer::chooseInstructionsToInstrument( - SmallVectorImpl<Instruction *> &Local, - SmallVectorImpl<InstructionInfo> &All, const DataLayout &DL) { - DenseMap<Value *, size_t> WriteTargets; // Map of addresses to index in All + SmallVectorImpl<Instruction *> &Local, + SmallVectorImpl<InstructionInfo> &All, const DataLayout &DL) { + DenseMap<Value *, size_t> WriteTargets; // Map of addresses to index in All // Iterate from the end. for (Instruction *I : reverse(Local)) { - const bool IsWrite = isa<StoreInst>(*I); - Value *Addr = IsWrite ? cast<StoreInst>(I)->getPointerOperand() - : cast<LoadInst>(I)->getPointerOperand(); - - if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr)) - continue; - - if (!IsWrite) { - const auto WriteEntry = WriteTargets.find(Addr); - if (!ClInstrumentReadBeforeWrite && WriteEntry != WriteTargets.end()) { - auto &WI = All[WriteEntry->second]; - // If we distinguish volatile accesses and if either the read or write - // is volatile, do not omit any instrumentation. - const bool AnyVolatile = - ClDistinguishVolatile && (cast<LoadInst>(I)->isVolatile() || - cast<StoreInst>(WI.Inst)->isVolatile()); - if (!AnyVolatile) { - // We will write to this temp, so no reason to analyze the read. - // Mark the write instruction as compound. - WI.Flags |= InstructionInfo::kCompoundRW; - NumOmittedReadsBeforeWrite++; - continue; - } + const bool IsWrite = isa<StoreInst>(*I); + Value *Addr = IsWrite ? cast<StoreInst>(I)->getPointerOperand() + : cast<LoadInst>(I)->getPointerOperand(); + + if (!shouldInstrumentReadWriteFromAddress(I->getModule(), Addr)) + continue; + + if (!IsWrite) { + const auto WriteEntry = WriteTargets.find(Addr); + if (!ClInstrumentReadBeforeWrite && WriteEntry != WriteTargets.end()) { + auto &WI = All[WriteEntry->second]; + // If we distinguish volatile accesses and if either the read or write + // is volatile, do not omit any instrumentation. + const bool AnyVolatile = + ClDistinguishVolatile && (cast<LoadInst>(I)->isVolatile() || + cast<StoreInst>(WI.Inst)->isVolatile()); + if (!AnyVolatile) { + // We will write to this temp, so no reason to analyze the read. + // Mark the write instruction as compound. + WI.Flags |= InstructionInfo::kCompoundRW; + NumOmittedReadsBeforeWrite++; + continue; + } } - + if (addrPointsToConstantData(Addr)) { // Addr points to some constant data -- it can not race with any writes. continue; } } - - if (isa<AllocaInst>(getUnderlyingObject(Addr)) && + + if (isa<AllocaInst>(getUnderlyingObject(Addr)) && !PointerMayBeCaptured(Addr, true, true)) { // The variable is addressable but not captured, so it cannot be // referenced from a different thread and participate in a data race @@ -486,14 +486,14 @@ void ThreadSanitizer::chooseInstructionsToInstrument( NumOmittedNonCaptured++; continue; } - - // Instrument this instruction. - All.emplace_back(I); - if (IsWrite) { - // For read-before-write and compound instrumentation we only need one - // write target, and we can override any previous entry if it exists. - WriteTargets[Addr] = All.size() - 1; - } + + // Instrument this instruction. + All.emplace_back(I); + if (IsWrite) { + // For read-before-write and compound instrumentation we only need one + // write target, and we can override any previous entry if it exists. + WriteTargets[Addr] = All.size() - 1; + } } Local.clear(); } @@ -534,7 +534,7 @@ bool ThreadSanitizer::sanitizeFunction(Function &F, if (F.hasFnAttribute(Attribute::Naked)) return false; initialize(*F.getParent()); - SmallVector<InstructionInfo, 8> AllLoadsAndStores; + SmallVector<InstructionInfo, 8> AllLoadsAndStores; SmallVector<Instruction*, 8> LocalLoadsAndStores; SmallVector<Instruction*, 8> AtomicAccesses; SmallVector<Instruction*, 8> MemIntrinCalls; @@ -569,8 +569,8 @@ bool ThreadSanitizer::sanitizeFunction(Function &F, // Instrument memory accesses only if we want to report bugs in the function. if (ClInstrumentMemoryAccesses && SanitizeFunction) - for (const auto &II : AllLoadsAndStores) { - Res |= instrumentLoadOrStore(II, DL); + for (const auto &II : AllLoadsAndStores) { + Res |= instrumentLoadOrStore(II, DL); } // Instrument atomic memory accesses in any case (they can be used to @@ -608,12 +608,12 @@ bool ThreadSanitizer::sanitizeFunction(Function &F, return Res; } -bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II, +bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II, const DataLayout &DL) { - IRBuilder<> IRB(II.Inst); - const bool IsWrite = isa<StoreInst>(*II.Inst); - Value *Addr = IsWrite ? cast<StoreInst>(II.Inst)->getPointerOperand() - : cast<LoadInst>(II.Inst)->getPointerOperand(); + IRBuilder<> IRB(II.Inst); + const bool IsWrite = isa<StoreInst>(*II.Inst); + Value *Addr = IsWrite ? cast<StoreInst>(II.Inst)->getPointerOperand() + : cast<LoadInst>(II.Inst)->getPointerOperand(); // swifterror memory addresses are mem2reg promoted by instruction selection. // As such they cannot have regular uses like an instrumentation function and @@ -624,9 +624,9 @@ bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II, int Idx = getMemoryAccessFuncIndex(Addr, DL); if (Idx < 0) return false; - if (IsWrite && isVtableAccess(II.Inst)) { - LLVM_DEBUG(dbgs() << " VPTR : " << *II.Inst << "\n"); - Value *StoredValue = cast<StoreInst>(II.Inst)->getValueOperand(); + if (IsWrite && isVtableAccess(II.Inst)) { + LLVM_DEBUG(dbgs() << " VPTR : " << *II.Inst << "\n"); + Value *StoredValue = cast<StoreInst>(II.Inst)->getValueOperand(); // StoredValue may be a vector type if we are storing several vptrs at once. // In this case, just take the first element of the vector since this is // enough to find vptr races. @@ -642,46 +642,46 @@ bool ThreadSanitizer::instrumentLoadOrStore(const InstructionInfo &II, NumInstrumentedVtableWrites++; return true; } - if (!IsWrite && isVtableAccess(II.Inst)) { + if (!IsWrite && isVtableAccess(II.Inst)) { IRB.CreateCall(TsanVptrLoad, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); NumInstrumentedVtableReads++; return true; } - - const unsigned Alignment = IsWrite ? cast<StoreInst>(II.Inst)->getAlignment() - : cast<LoadInst>(II.Inst)->getAlignment(); - const bool IsCompoundRW = - ClCompoundReadBeforeWrite && (II.Flags & InstructionInfo::kCompoundRW); - const bool IsVolatile = ClDistinguishVolatile && - (IsWrite ? cast<StoreInst>(II.Inst)->isVolatile() - : cast<LoadInst>(II.Inst)->isVolatile()); - assert((!IsVolatile || !IsCompoundRW) && "Compound volatile invalid!"); - + + const unsigned Alignment = IsWrite ? cast<StoreInst>(II.Inst)->getAlignment() + : cast<LoadInst>(II.Inst)->getAlignment(); + const bool IsCompoundRW = + ClCompoundReadBeforeWrite && (II.Flags & InstructionInfo::kCompoundRW); + const bool IsVolatile = ClDistinguishVolatile && + (IsWrite ? cast<StoreInst>(II.Inst)->isVolatile() + : cast<LoadInst>(II.Inst)->isVolatile()); + assert((!IsVolatile || !IsCompoundRW) && "Compound volatile invalid!"); + Type *OrigTy = cast<PointerType>(Addr->getType())->getElementType(); const uint32_t TypeSize = DL.getTypeStoreSizeInBits(OrigTy); FunctionCallee OnAccessFunc = nullptr; if (Alignment == 0 || Alignment >= 8 || (Alignment % (TypeSize / 8)) == 0) { - if (IsCompoundRW) - OnAccessFunc = TsanCompoundRW[Idx]; - else if (IsVolatile) + if (IsCompoundRW) + OnAccessFunc = TsanCompoundRW[Idx]; + else if (IsVolatile) OnAccessFunc = IsWrite ? TsanVolatileWrite[Idx] : TsanVolatileRead[Idx]; else OnAccessFunc = IsWrite ? TsanWrite[Idx] : TsanRead[Idx]; } else { - if (IsCompoundRW) - OnAccessFunc = TsanUnalignedCompoundRW[Idx]; - else if (IsVolatile) + if (IsCompoundRW) + OnAccessFunc = TsanUnalignedCompoundRW[Idx]; + else if (IsVolatile) OnAccessFunc = IsWrite ? TsanUnalignedVolatileWrite[Idx] : TsanUnalignedVolatileRead[Idx]; else OnAccessFunc = IsWrite ? TsanUnalignedWrite[Idx] : TsanUnalignedRead[Idx]; } IRB.CreateCall(OnAccessFunc, IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy())); - if (IsCompoundRW || IsWrite) - NumInstrumentedWrites++; - if (IsCompoundRW || !IsWrite) - NumInstrumentedReads++; + if (IsCompoundRW || IsWrite) + NumInstrumentedWrites++; + if (IsCompoundRW || !IsWrite) + NumInstrumentedReads++; return true; } |