aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/include/llvm/Analysis/Utils
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:30 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:30 +0300
commit2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch)
tree012bb94d777798f1f56ac1cec429509766d05181 /contrib/libs/llvm12/include/llvm/Analysis/Utils
parent6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff)
downloadydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/llvm12/include/llvm/Analysis/Utils')
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h246
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/Utils/Local.h78
-rw-r--r--contrib/libs/llvm12/include/llvm/Analysis/Utils/TFUtils.h356
3 files changed, 340 insertions, 340 deletions
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h b/contrib/libs/llvm12/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
index 3fc8df0c75..bf20189de3 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/Utils/ImportedFunctionsInliningStatistics.h
@@ -1,123 +1,123 @@
-#pragma once
-
-#ifdef __GNUC__
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wunused-parameter"
-#endif
-
-//===-- ImportedFunctionsInliningStatistics.h -------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-// Generating inliner statistics for imported functions, mostly useful for
-// ThinLTO.
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
-#define LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
-
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringRef.h"
-#include <string>
-#include <vector>
-
-namespace llvm {
-class Module;
-class Function;
-/// Calculate and dump ThinLTO specific inliner stats.
-/// The main statistics are:
-/// (1) Number of inlined imported functions,
-/// (2) Number of imported functions inlined into importing module (indirect),
-/// (3) Number of non imported functions inlined into importing module
-/// (indirect).
-/// The difference between first and the second is that first stat counts
-/// all performed inlines on imported functions, but the second one only the
-/// functions that have been eventually inlined to a function in the importing
-/// module (by a chain of inlines). Because llvm uses bottom-up inliner, it is
-/// possible to e.g. import function `A`, `B` and then inline `B` to `A`,
-/// and after this `A` might be too big to be inlined into some other function
-/// that calls it. It calculates this statistic by building graph, where
-/// the nodes are functions, and edges are performed inlines and then by marking
-/// the edges starting from not imported function.
-///
-/// If `Verbose` is set to true, then it also dumps statistics
-/// per each inlined function, sorted by the greatest inlines count like
-/// - number of performed inlines
-/// - number of performed inlines to importing module
-class ImportedFunctionsInliningStatistics {
-private:
- /// InlineGraphNode represents node in graph of inlined functions.
- struct InlineGraphNode {
- // Default-constructible and movable.
- InlineGraphNode() = default;
- InlineGraphNode(InlineGraphNode &&) = default;
- InlineGraphNode &operator=(InlineGraphNode &&) = default;
-
- llvm::SmallVector<InlineGraphNode *, 8> InlinedCallees;
- /// Incremented every direct inline.
- int32_t NumberOfInlines = 0;
- /// Number of inlines into non imported function (possibly indirect via
- /// intermediate inlines). Computed based on graph search.
- int32_t NumberOfRealInlines = 0;
- bool Imported = false;
- bool Visited = false;
- };
-
-public:
- ImportedFunctionsInliningStatistics() = default;
- ImportedFunctionsInliningStatistics(
- const ImportedFunctionsInliningStatistics &) = delete;
-
- /// Set information like AllFunctions, ImportedFunctions, ModuleName.
- void setModuleInfo(const Module &M);
- /// Record inline of @param Callee to @param Caller for statistis.
- void recordInline(const Function &Caller, const Function &Callee);
- /// Dump stats computed with InlinerStatistics class.
- /// If @param Verbose is true then separate statistics for every inlined
- /// function will be printed.
- void dump(bool Verbose);
-
-private:
- /// Creates new Node in NodeMap and sets attributes, or returns existed one.
- InlineGraphNode &createInlineGraphNode(const Function &);
- void calculateRealInlines();
- void dfs(InlineGraphNode &GraphNode);
-
- using NodesMapTy =
- llvm::StringMap<std::unique_ptr<InlineGraphNode>>;
- using SortedNodesTy =
- std::vector<const NodesMapTy::MapEntryTy*>;
- /// Returns vector of elements sorted by
- /// (-NumberOfInlines, -NumberOfRealInlines, FunctionName).
- SortedNodesTy getSortedNodes();
-
-private:
- /// This map manage life of all InlineGraphNodes. Unique pointer to
- /// InlineGraphNode used since the node pointers are also saved in the
- /// InlinedCallees vector. If it would store InlineGraphNode instead then the
- /// address of the node would not be invariant.
- NodesMapTy NodesMap;
- /// Non external functions that have some other function inlined inside.
- std::vector<StringRef> NonImportedCallers;
- int AllFunctions = 0;
- int ImportedFunctions = 0;
- StringRef ModuleName;
-};
-
-enum class InlinerFunctionImportStatsOpts {
- No = 0,
- Basic = 1,
- Verbose = 2,
-};
-
-} // llvm
-
-#endif // LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
-
-#ifdef __GNUC__
-#pragma GCC diagnostic pop
-#endif
+#pragma once
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+#endif
+
+//===-- ImportedFunctionsInliningStatistics.h -------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Generating inliner statistics for imported functions, mostly useful for
+// ThinLTO.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+#define LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+class Module;
+class Function;
+/// Calculate and dump ThinLTO specific inliner stats.
+/// The main statistics are:
+/// (1) Number of inlined imported functions,
+/// (2) Number of imported functions inlined into importing module (indirect),
+/// (3) Number of non imported functions inlined into importing module
+/// (indirect).
+/// The difference between first and the second is that first stat counts
+/// all performed inlines on imported functions, but the second one only the
+/// functions that have been eventually inlined to a function in the importing
+/// module (by a chain of inlines). Because llvm uses bottom-up inliner, it is
+/// possible to e.g. import function `A`, `B` and then inline `B` to `A`,
+/// and after this `A` might be too big to be inlined into some other function
+/// that calls it. It calculates this statistic by building graph, where
+/// the nodes are functions, and edges are performed inlines and then by marking
+/// the edges starting from not imported function.
+///
+/// If `Verbose` is set to true, then it also dumps statistics
+/// per each inlined function, sorted by the greatest inlines count like
+/// - number of performed inlines
+/// - number of performed inlines to importing module
+class ImportedFunctionsInliningStatistics {
+private:
+ /// InlineGraphNode represents node in graph of inlined functions.
+ struct InlineGraphNode {
+ // Default-constructible and movable.
+ InlineGraphNode() = default;
+ InlineGraphNode(InlineGraphNode &&) = default;
+ InlineGraphNode &operator=(InlineGraphNode &&) = default;
+
+ llvm::SmallVector<InlineGraphNode *, 8> InlinedCallees;
+ /// Incremented every direct inline.
+ int32_t NumberOfInlines = 0;
+ /// Number of inlines into non imported function (possibly indirect via
+ /// intermediate inlines). Computed based on graph search.
+ int32_t NumberOfRealInlines = 0;
+ bool Imported = false;
+ bool Visited = false;
+ };
+
+public:
+ ImportedFunctionsInliningStatistics() = default;
+ ImportedFunctionsInliningStatistics(
+ const ImportedFunctionsInliningStatistics &) = delete;
+
+ /// Set information like AllFunctions, ImportedFunctions, ModuleName.
+ void setModuleInfo(const Module &M);
+ /// Record inline of @param Callee to @param Caller for statistis.
+ void recordInline(const Function &Caller, const Function &Callee);
+ /// Dump stats computed with InlinerStatistics class.
+ /// If @param Verbose is true then separate statistics for every inlined
+ /// function will be printed.
+ void dump(bool Verbose);
+
+private:
+ /// Creates new Node in NodeMap and sets attributes, or returns existed one.
+ InlineGraphNode &createInlineGraphNode(const Function &);
+ void calculateRealInlines();
+ void dfs(InlineGraphNode &GraphNode);
+
+ using NodesMapTy =
+ llvm::StringMap<std::unique_ptr<InlineGraphNode>>;
+ using SortedNodesTy =
+ std::vector<const NodesMapTy::MapEntryTy*>;
+ /// Returns vector of elements sorted by
+ /// (-NumberOfInlines, -NumberOfRealInlines, FunctionName).
+ SortedNodesTy getSortedNodes();
+
+private:
+ /// This map manage life of all InlineGraphNodes. Unique pointer to
+ /// InlineGraphNode used since the node pointers are also saved in the
+ /// InlinedCallees vector. If it would store InlineGraphNode instead then the
+ /// address of the node would not be invariant.
+ NodesMapTy NodesMap;
+ /// Non external functions that have some other function inlined inside.
+ std::vector<StringRef> NonImportedCallers;
+ int AllFunctions = 0;
+ int ImportedFunctions = 0;
+ StringRef ModuleName;
+};
+
+enum class InlinerFunctionImportStatsOpts {
+ No = 0,
+ Basic = 1,
+ Verbose = 2,
+};
+
+} // llvm
+
+#endif // LLVM_TRANSFORMS_UTILS_IMPORTEDFUNCTIONSINLININGSTATISTICS_H
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop
+#endif
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/Utils/Local.h b/contrib/libs/llvm12/include/llvm/Analysis/Utils/Local.h
index f6b4cf83b2..32801f7c14 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/Utils/Local.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/Utils/Local.h
@@ -37,7 +37,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
bool NoAssumptions = false) {
GEPOperator *GEPOp = cast<GEPOperator>(GEP);
Type *IntIdxTy = DL.getIndexType(GEP->getType());
- Value *Result = nullptr;
+ Value *Result = nullptr;
// If the GEP is inbounds, we know that none of the addressing operations will
// overflow in a signed sense.
@@ -53,7 +53,7 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
++i, ++GTI) {
Value *Op = *i;
uint64_t Size = DL.getTypeAllocSize(GTI.getIndexedType()) & PtrSizeMask;
- Value *Offset;
+ Value *Offset;
if (Constant *OpC = dyn_cast<Constant>(Op)) {
if (OpC->isZeroValue())
continue;
@@ -62,47 +62,47 @@ Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &DL, User *GEP,
if (StructType *STy = GTI.getStructTypeOrNull()) {
uint64_t OpValue = OpC->getUniqueInteger().getZExtValue();
Size = DL.getStructLayout(STy)->getElementOffset(OpValue);
- if (!Size)
- continue;
-
- Offset = ConstantInt::get(IntIdxTy, Size);
- } else {
- // Splat the constant if needed.
- if (IntIdxTy->isVectorTy() && !OpC->getType()->isVectorTy())
- OpC = ConstantVector::getSplat(
- cast<VectorType>(IntIdxTy)->getElementCount(), OpC);
-
- Constant *Scale = ConstantInt::get(IntIdxTy, Size);
- Constant *OC =
- ConstantExpr::getIntegerCast(OpC, IntIdxTy, true /*SExt*/);
- Offset =
- ConstantExpr::getMul(OC, Scale, false /*NUW*/, isInBounds /*NSW*/);
+ if (!Size)
+ continue;
+
+ Offset = ConstantInt::get(IntIdxTy, Size);
+ } else {
+ // Splat the constant if needed.
+ if (IntIdxTy->isVectorTy() && !OpC->getType()->isVectorTy())
+ OpC = ConstantVector::getSplat(
+ cast<VectorType>(IntIdxTy)->getElementCount(), OpC);
+
+ Constant *Scale = ConstantInt::get(IntIdxTy, Size);
+ Constant *OC =
+ ConstantExpr::getIntegerCast(OpC, IntIdxTy, true /*SExt*/);
+ Offset =
+ ConstantExpr::getMul(OC, Scale, false /*NUW*/, isInBounds /*NSW*/);
}
- } else {
- // Splat the index if needed.
- if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy())
- Op = Builder->CreateVectorSplat(
- cast<FixedVectorType>(IntIdxTy)->getNumElements(), Op);
-
- // Convert to correct type.
- if (Op->getType() != IntIdxTy)
- Op = Builder->CreateIntCast(Op, IntIdxTy, true, Op->getName().str()+".c");
- if (Size != 1) {
- // We'll let instcombine(mul) convert this to a shl if possible.
- Op = Builder->CreateMul(Op, ConstantInt::get(IntIdxTy, Size),
- GEP->getName().str() + ".idx", false /*NUW*/,
- isInBounds /*NSW*/);
- }
- Offset = Op;
+ } else {
+ // Splat the index if needed.
+ if (IntIdxTy->isVectorTy() && !Op->getType()->isVectorTy())
+ Op = Builder->CreateVectorSplat(
+ cast<FixedVectorType>(IntIdxTy)->getNumElements(), Op);
+
+ // Convert to correct type.
+ if (Op->getType() != IntIdxTy)
+ Op = Builder->CreateIntCast(Op, IntIdxTy, true, Op->getName().str()+".c");
+ if (Size != 1) {
+ // We'll let instcombine(mul) convert this to a shl if possible.
+ Op = Builder->CreateMul(Op, ConstantInt::get(IntIdxTy, Size),
+ GEP->getName().str() + ".idx", false /*NUW*/,
+ isInBounds /*NSW*/);
+ }
+ Offset = Op;
}
- if (Result)
- Result = Builder->CreateAdd(Result, Offset, GEP->getName().str()+".offs",
- false /*NUW*/, isInBounds /*NSW*/);
- else
- Result = Offset;
+ if (Result)
+ Result = Builder->CreateAdd(Result, Offset, GEP->getName().str()+".offs",
+ false /*NUW*/, isInBounds /*NSW*/);
+ else
+ Result = Offset;
}
- return Result ? Result : Constant::getNullValue(IntIdxTy);
+ return Result ? Result : Constant::getNullValue(IntIdxTy);
}
}
diff --git a/contrib/libs/llvm12/include/llvm/Analysis/Utils/TFUtils.h b/contrib/libs/llvm12/include/llvm/Analysis/Utils/TFUtils.h
index 2248ebf6da..ce5f3088d7 100644
--- a/contrib/libs/llvm12/include/llvm/Analysis/Utils/TFUtils.h
+++ b/contrib/libs/llvm12/include/llvm/Analysis/Utils/TFUtils.h
@@ -16,11 +16,11 @@
#ifndef LLVM_ANALYSIS_UTILS_TFUTILS_H
#define LLVM_ANALYSIS_UTILS_TFUTILS_H
-#include "llvm/Config/llvm-config.h"
+#include "llvm/Config/llvm-config.h"
#ifdef LLVM_HAVE_TF_API
#include "llvm/IR/LLVMContext.h"
-#include "llvm/Support/JSON.h"
+#include "llvm/Support/JSON.h"
#include <memory>
#include <vector>
@@ -44,141 +44,141 @@ namespace llvm {
class TFModelEvaluatorImpl;
class EvaluationResultImpl;
-/// TensorSpec encapsulates the specification of a tensor: its dimensions, or
-/// "shape" (row-major), its type (see TensorSpec::getDataType specializations
-/// for supported types), its name and port (see "TensorFlow: Large-Scale
-/// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2:
-/// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
-///
-/// TensorSpec is used to set up a TFModelEvaluator by describing the expected
-/// inputs and outputs.
-class TensorSpec final {
-public:
- template <typename T>
- static TensorSpec createSpec(const std::string &Name,
- const std::vector<int64_t> &Shape,
- int Port = 0) {
- return TensorSpec(Name, Port, getDataType<T>(), Shape);
- }
-
- const std::string &name() const { return Name; }
- int port() const { return Port; }
- int typeIndex() const { return TypeIndex; }
- const std::vector<int64_t> &shape() const { return Shape; }
-
- bool operator==(const TensorSpec &Other) const {
- return Name == Other.Name && Port == Other.Port &&
- TypeIndex == Other.TypeIndex && Shape == Other.Shape;
- }
-
- bool operator!=(const TensorSpec &Other) const { return !(*this == Other); }
-
- /// Get the number of elements in a tensor with this shape.
- size_t getElementCount() const { return ElementCount; }
- /// Get the size, in bytes, of one element.
- size_t getElementByteSize() const;
-
- template <typename T> bool isElementType() const {
- return getDataType<T>() == TypeIndex;
- }
-
-private:
- TensorSpec(const std::string &Name, int Port, int TypeIndex,
- const std::vector<int64_t> &Shape);
-
- template <typename T> static int getDataType() {
- llvm_unreachable("Undefined tensor type");
- }
-
- std::string Name;
- int Port = 0;
- int TypeIndex = 0;
- std::vector<int64_t> Shape;
- size_t ElementCount = 0;
-};
-
-/// Construct a TensorSpec from a JSON dictionary of the form:
-/// { "name": <string>,
-/// "port": <int>,
-/// "type": <string. Use LLVM's types, e.g. float, double, int64_t>,
-/// "shape": <array of ints> }
-/// For the "type" field, see the C++ primitive types used in
-/// TFUTILS_SUPPORTED_TYPES.
-Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
- const json::Value &Value);
-
-struct LoggedFeatureSpec {
- TensorSpec Spec;
- Optional<std::string> LoggingName;
-};
-
-/// Load the output specs. If SpecFileOverride is not empty, that path is used.
-/// Otherwise, the file is assumed to be called 'output_spec.json' and be found
-/// under ModelPath (the model directory).
-/// The first output tensor name must match ExpectedDecisionName.
-/// In case of error, the return is None and the error is logged.
-Optional<std::vector<LoggedFeatureSpec>>
-loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
- StringRef ModelPath, StringRef SpecFileOverride = StringRef());
-
-/// Logging utility - given an ordered specification of features, and assuming
-/// a scalar reward, allow logging feature values and rewards, and then print
-/// as tf.train.SequenceExample text protobuf.
-/// The assumption is that, for an event to be logged (i.e. a set of feature
-/// values and a reward), the user calls the log* API for each feature exactly
-/// once, providing the index matching the position in the feature spec list
-/// provided at construction:
-/// event 0:
-/// logTensorValue(0, ...)
-/// logTensorValue(1, ...)
-/// ...
-/// logReward(...)
-/// event 1:
-/// logTensorValue(0, ...)
-/// logTensorValue(1, ...)
-/// ...
-/// logReward(...)
-///
-/// At the end, call print to generate the protobuf.
-class Logger final {
-public:
- /// Construct a Logger. If IncludeReward is false, then logReward shouldn't
- /// be called, and the reward feature won't be printed out.
- Logger(const std::vector<LoggedFeatureSpec> &FeatureSpecs,
- const TensorSpec &RewardSpec, bool IncludeReward)
- : FeatureSpecs(FeatureSpecs), RewardSpec(RewardSpec),
- RawLogData(FeatureSpecs.size() + IncludeReward),
- IncludeReward(IncludeReward) {}
-
- template <typename T> void logReward(T Value) {
- assert(IncludeReward);
- logTensorValue(RawLogData.size() - 1, &Value);
- }
-
- template <typename T> void logFinalReward(T Value) {
- assert(RawLogData.back().empty());
- logReward(Value);
- }
-
- template <typename T>
- void logTensorValue(size_t FeatureID, const T *Value, size_t Size = 1) {
- const char *Start = reinterpret_cast<const char *>(Value);
- const char *End = Start + sizeof(T) * Size;
- RawLogData[FeatureID].insert(RawLogData[FeatureID].end(), Start, End);
- }
-
- void print(raw_ostream &OS);
-
-private:
- std::vector<LoggedFeatureSpec> FeatureSpecs;
- TensorSpec RewardSpec;
- /// RawData has one entry per feature, plus one more for the reward.
- /// Each feature's values are then stored in a vector, in succession.
- /// This means the ith event is stored at [*][i]
- std::vector<std::vector<char>> RawLogData;
- const bool IncludeReward;
-};
-
+/// TensorSpec encapsulates the specification of a tensor: its dimensions, or
+/// "shape" (row-major), its type (see TensorSpec::getDataType specializations
+/// for supported types), its name and port (see "TensorFlow: Large-Scale
+/// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2:
+/// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
+///
+/// TensorSpec is used to set up a TFModelEvaluator by describing the expected
+/// inputs and outputs.
+class TensorSpec final {
+public:
+ template <typename T>
+ static TensorSpec createSpec(const std::string &Name,
+ const std::vector<int64_t> &Shape,
+ int Port = 0) {
+ return TensorSpec(Name, Port, getDataType<T>(), Shape);
+ }
+
+ const std::string &name() const { return Name; }
+ int port() const { return Port; }
+ int typeIndex() const { return TypeIndex; }
+ const std::vector<int64_t> &shape() const { return Shape; }
+
+ bool operator==(const TensorSpec &Other) const {
+ return Name == Other.Name && Port == Other.Port &&
+ TypeIndex == Other.TypeIndex && Shape == Other.Shape;
+ }
+
+ bool operator!=(const TensorSpec &Other) const { return !(*this == Other); }
+
+ /// Get the number of elements in a tensor with this shape.
+ size_t getElementCount() const { return ElementCount; }
+ /// Get the size, in bytes, of one element.
+ size_t getElementByteSize() const;
+
+ template <typename T> bool isElementType() const {
+ return getDataType<T>() == TypeIndex;
+ }
+
+private:
+ TensorSpec(const std::string &Name, int Port, int TypeIndex,
+ const std::vector<int64_t> &Shape);
+
+ template <typename T> static int getDataType() {
+ llvm_unreachable("Undefined tensor type");
+ }
+
+ std::string Name;
+ int Port = 0;
+ int TypeIndex = 0;
+ std::vector<int64_t> Shape;
+ size_t ElementCount = 0;
+};
+
+/// Construct a TensorSpec from a JSON dictionary of the form:
+/// { "name": <string>,
+/// "port": <int>,
+/// "type": <string. Use LLVM's types, e.g. float, double, int64_t>,
+/// "shape": <array of ints> }
+/// For the "type" field, see the C++ primitive types used in
+/// TFUTILS_SUPPORTED_TYPES.
+Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
+ const json::Value &Value);
+
+struct LoggedFeatureSpec {
+ TensorSpec Spec;
+ Optional<std::string> LoggingName;
+};
+
+/// Load the output specs. If SpecFileOverride is not empty, that path is used.
+/// Otherwise, the file is assumed to be called 'output_spec.json' and be found
+/// under ModelPath (the model directory).
+/// The first output tensor name must match ExpectedDecisionName.
+/// In case of error, the return is None and the error is logged.
+Optional<std::vector<LoggedFeatureSpec>>
+loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
+ StringRef ModelPath, StringRef SpecFileOverride = StringRef());
+
+/// Logging utility - given an ordered specification of features, and assuming
+/// a scalar reward, allow logging feature values and rewards, and then print
+/// as tf.train.SequenceExample text protobuf.
+/// The assumption is that, for an event to be logged (i.e. a set of feature
+/// values and a reward), the user calls the log* API for each feature exactly
+/// once, providing the index matching the position in the feature spec list
+/// provided at construction:
+/// event 0:
+/// logTensorValue(0, ...)
+/// logTensorValue(1, ...)
+/// ...
+/// logReward(...)
+/// event 1:
+/// logTensorValue(0, ...)
+/// logTensorValue(1, ...)
+/// ...
+/// logReward(...)
+///
+/// At the end, call print to generate the protobuf.
+class Logger final {
+public:
+ /// Construct a Logger. If IncludeReward is false, then logReward shouldn't
+ /// be called, and the reward feature won't be printed out.
+ Logger(const std::vector<LoggedFeatureSpec> &FeatureSpecs,
+ const TensorSpec &RewardSpec, bool IncludeReward)
+ : FeatureSpecs(FeatureSpecs), RewardSpec(RewardSpec),
+ RawLogData(FeatureSpecs.size() + IncludeReward),
+ IncludeReward(IncludeReward) {}
+
+ template <typename T> void logReward(T Value) {
+ assert(IncludeReward);
+ logTensorValue(RawLogData.size() - 1, &Value);
+ }
+
+ template <typename T> void logFinalReward(T Value) {
+ assert(RawLogData.back().empty());
+ logReward(Value);
+ }
+
+ template <typename T>
+ void logTensorValue(size_t FeatureID, const T *Value, size_t Size = 1) {
+ const char *Start = reinterpret_cast<const char *>(Value);
+ const char *End = Start + sizeof(T) * Size;
+ RawLogData[FeatureID].insert(RawLogData[FeatureID].end(), Start, End);
+ }
+
+ void print(raw_ostream &OS);
+
+private:
+ std::vector<LoggedFeatureSpec> FeatureSpecs;
+ TensorSpec RewardSpec;
+ /// RawData has one entry per feature, plus one more for the reward.
+ /// Each feature's values are then stored in a vector, in succession.
+ /// This means the ith event is stored at [*][i]
+ std::vector<std::vector<char>> RawLogData;
+ const bool IncludeReward;
+};
+
class TFModelEvaluator final {
public:
/// The result of a model evaluation. Handles the lifetime of the output
@@ -187,26 +187,26 @@ public:
class EvaluationResult {
public:
EvaluationResult(const EvaluationResult &) = delete;
- EvaluationResult &operator=(const EvaluationResult &Other) = delete;
-
+ EvaluationResult &operator=(const EvaluationResult &Other) = delete;
+
EvaluationResult(EvaluationResult &&Other);
- EvaluationResult &operator=(EvaluationResult &&Other);
-
+ EvaluationResult &operator=(EvaluationResult &&Other);
+
~EvaluationResult();
- /// Get a (const) pointer to the first element of the tensor at Index.
+ /// Get a (const) pointer to the first element of the tensor at Index.
template <typename T> T *getTensorValue(size_t Index) {
return static_cast<T *>(getUntypedTensorValue(Index));
}
- template <typename T> const T *getTensorValue(size_t Index) const {
- return static_cast<T *>(getUntypedTensorValue(Index));
- }
-
- /// Get a (const) pointer to the untyped data of the tensor.
- void *getUntypedTensorValue(size_t Index);
- const void *getUntypedTensorValue(size_t Index) const;
-
+ template <typename T> const T *getTensorValue(size_t Index) const {
+ return static_cast<T *>(getUntypedTensorValue(Index));
+ }
+
+ /// Get a (const) pointer to the untyped data of the tensor.
+ void *getUntypedTensorValue(size_t Index);
+ const void *getUntypedTensorValue(size_t Index) const;
+
private:
friend class TFModelEvaluator;
EvaluationResult(std::unique_ptr<EvaluationResultImpl> Impl);
@@ -214,14 +214,14 @@ public:
};
TFModelEvaluator(StringRef SavedModelPath,
- const std::vector<TensorSpec> &InputSpecs,
- const std::vector<TensorSpec> &OutputSpecs,
+ const std::vector<TensorSpec> &InputSpecs,
+ const std::vector<TensorSpec> &OutputSpecs,
const char *Tags = "serve");
- TFModelEvaluator(StringRef SavedModelPath,
- const std::vector<TensorSpec> &InputSpecs,
- function_ref<TensorSpec(size_t)> GetOutputSpecs,
- size_t OutputSpecsSize, const char *Tags = "serve");
-
+ TFModelEvaluator(StringRef SavedModelPath,
+ const std::vector<TensorSpec> &InputSpecs,
+ function_ref<TensorSpec(size_t)> GetOutputSpecs,
+ size_t OutputSpecsSize, const char *Tags = "serve");
+
~TFModelEvaluator();
TFModelEvaluator(const TFModelEvaluator &) = delete;
TFModelEvaluator(TFModelEvaluator &&) = delete;
@@ -246,27 +246,27 @@ private:
std::unique_ptr<TFModelEvaluatorImpl> Impl;
};
-/// List of supported types, as a pair:
-/// - C++ type
-/// - enum name (implementation-specific)
-#define TFUTILS_SUPPORTED_TYPES(M) \
- M(float, TF_FLOAT) \
- M(double, TF_DOUBLE) \
- M(int8_t, TF_INT8) \
- M(uint8_t, TF_UINT8) \
- M(int16_t, TF_INT16) \
- M(uint16_t, TF_UINT16) \
- M(int32_t, TF_INT32) \
- M(uint32_t, TF_UINT32) \
- M(int64_t, TF_INT64) \
- M(uint64_t, TF_UINT64)
-
-#define TFUTILS_GETDATATYPE_DEF(T, E) \
- template <> int TensorSpec::getDataType<T>();
-
-TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_DEF)
-
-#undef TFUTILS_GETDATATYPE_DEF
+/// List of supported types, as a pair:
+/// - C++ type
+/// - enum name (implementation-specific)
+#define TFUTILS_SUPPORTED_TYPES(M) \
+ M(float, TF_FLOAT) \
+ M(double, TF_DOUBLE) \
+ M(int8_t, TF_INT8) \
+ M(uint8_t, TF_UINT8) \
+ M(int16_t, TF_INT16) \
+ M(uint16_t, TF_UINT16) \
+ M(int32_t, TF_INT32) \
+ M(uint32_t, TF_UINT32) \
+ M(int64_t, TF_INT64) \
+ M(uint64_t, TF_UINT64)
+
+#define TFUTILS_GETDATATYPE_DEF(T, E) \
+ template <> int TensorSpec::getDataType<T>();
+
+TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_DEF)
+
+#undef TFUTILS_GETDATATYPE_DEF
} // namespace llvm
#endif // LLVM_HAVE_TF_API