aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm12/lib/Analysis/TFUtils.cpp
diff options
context:
space:
mode:
authorshadchin <shadchin@yandex-team.ru>2022-02-10 16:44:30 +0300
committerDaniil Cherednik <dcherednik@yandex-team.ru>2022-02-10 16:44:30 +0300
commit2598ef1d0aee359b4b6d5fdd1758916d5907d04f (patch)
tree012bb94d777798f1f56ac1cec429509766d05181 /contrib/libs/llvm12/lib/Analysis/TFUtils.cpp
parent6751af0b0c1b952fede40b19b71da8025b5d8bcf (diff)
downloadydb-2598ef1d0aee359b4b6d5fdd1758916d5907d04f.tar.gz
Restoring authorship annotation for <shadchin@yandex-team.ru>. Commit 1 of 2.
Diffstat (limited to 'contrib/libs/llvm12/lib/Analysis/TFUtils.cpp')
-rw-r--r--contrib/libs/llvm12/lib/Analysis/TFUtils.cpp572
1 files changed, 286 insertions, 286 deletions
diff --git a/contrib/libs/llvm12/lib/Analysis/TFUtils.cpp b/contrib/libs/llvm12/lib/Analysis/TFUtils.cpp
index 3f26bdfdc0..9d4859ab85 100644
--- a/contrib/libs/llvm12/lib/Analysis/TFUtils.cpp
+++ b/contrib/libs/llvm12/lib/Analysis/TFUtils.cpp
@@ -10,23 +10,23 @@
// This file implements utilities for interfacing with tensorflow C APIs.
//
//===----------------------------------------------------------------------===//
-#include "llvm/Config/config.h"
-#if defined(LLVM_HAVE_TF_API)
+#include "llvm/Config/config.h"
+#if defined(LLVM_HAVE_TF_API)
-#include "llvm/ADT/Twine.h"
+#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/Utils/TFUtils.h"
#include "llvm/Support/Debug.h"
-#include "llvm/Support/JSON.h"
+#include "llvm/Support/JSON.h"
#include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include "llvm/Support/Path.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"
-#error #include "tensorflow/c/c_api.h"
-#error #include "tensorflow/c/c_api_experimental.h"
+#error #include "tensorflow/c/c_api.h"
+#error #include "tensorflow/c/c_api_experimental.h"
#include <cassert>
-#include <numeric>
+#include <numeric>
using namespace llvm;
@@ -64,89 +64,89 @@ TFStatusPtr createTFStatus() {
TFSessionOptionsPtr createTFSessionOptions() {
return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
}
-
-/// Write the values of one tensor as a list.
-template <typename T>
-void writeTensorValues(raw_ostream &OutFile, const char *TensorData,
- size_t ElemCount) {
- OutFile << "[";
- const T *TypedData = reinterpret_cast<const T *>(TensorData);
- for (size_t I = 0; I < ElemCount; ++I) {
- if (I > 0)
- OutFile << ", ";
- OutFile << TypedData[I];
- }
- OutFile << "]";
-}
-
-/// Write a list of tensors as a sequence of TensorFlow FeatureList protobufs.
-/// The tensors are assumed to be stored contiguously, in row-major format,
-/// in the TensorData buffer. Each tensor has the shape given by Spec. The
-/// feature name in the output is either the provided LoggingName, if
-/// specified, otherwise it's the name of the tensor (as given by Spec).
-void writeRawTensorsAsFeatureLists(raw_ostream &OutFile,
- const LoggedFeatureSpec &LoggedSpec,
- const char *TensorData, size_t TensorCount,
- bool FinalReward = false) {
- const char *FieldName = "<invalid>";
- std::function<void(const char *)> ValueWriter;
- const auto &Spec = LoggedSpec.Spec;
- // The 'Feature' protobuf only has 3 possible fields: float_list,
- // int64_list, or bytes_list, so we capture int32 values as int64. We don't
- // support any other types.
- if (Spec.isElementType<int64_t>()) {
- FieldName = "int64_list";
- ValueWriter = [&](const char *Data) {
- writeTensorValues<int64_t>(OutFile, Data, Spec.getElementCount());
- };
- } else if (Spec.isElementType<int32_t>()) {
- FieldName = "int64_list";
- ValueWriter = [&](const char *Data) {
- writeTensorValues<int32_t>(OutFile, Data, Spec.getElementCount());
- };
-
- } else if (Spec.isElementType<float>()) {
- FieldName = "float_list";
- ValueWriter = [&](const char *Data) {
- writeTensorValues<float>(OutFile, Data, Spec.getElementCount());
- };
-
- } else {
- llvm_unreachable("Unsupported tensor type.");
- }
-
- OutFile << " feature_list: {\n";
- OutFile << " key: "
- << "\""
- << (LoggedSpec.LoggingName ? *LoggedSpec.LoggingName : Spec.name())
- << "\" ";
- OutFile << "value: {\n";
- size_t TensorByteSize = Spec.getElementCount() * Spec.getElementByteSize();
-
- auto WriteFeatureProto = [&](const char *P) {
- OutFile << " feature: { " << FieldName << ": { value: ";
- ValueWriter(P);
- OutFile << " } }\n";
- };
-
- const char *CurrentTensor = TensorData;
- static int64_t Zero = 0;
- // Write all but the last value. If this is the final reward, don't increment
- // the CurrentTensor, and just write 0.
- for (size_t I = 0; I < TensorCount - 1; ++I) {
- if (FinalReward)
- WriteFeatureProto(reinterpret_cast<const char *>(&Zero));
- else {
- WriteFeatureProto(CurrentTensor);
- CurrentTensor += TensorByteSize;
- }
- }
-
- WriteFeatureProto(CurrentTensor);
-
- OutFile << " }\n";
- OutFile << " }\n";
-}
+
+/// Write the values of one tensor as a list.
+template <typename T>
+void writeTensorValues(raw_ostream &OutFile, const char *TensorData,
+ size_t ElemCount) {
+ OutFile << "[";
+ const T *TypedData = reinterpret_cast<const T *>(TensorData);
+ for (size_t I = 0; I < ElemCount; ++I) {
+ if (I > 0)
+ OutFile << ", ";
+ OutFile << TypedData[I];
+ }
+ OutFile << "]";
+}
+
+/// Write a list of tensors as a sequence of TensorFlow FeatureList protobufs.
+/// The tensors are assumed to be stored contiguously, in row-major format,
+/// in the TensorData buffer. Each tensor has the shape given by Spec. The
+/// feature name in the output is either the provided LoggingName, if
+/// specified, otherwise it's the name of the tensor (as given by Spec).
+void writeRawTensorsAsFeatureLists(raw_ostream &OutFile,
+ const LoggedFeatureSpec &LoggedSpec,
+ const char *TensorData, size_t TensorCount,
+ bool FinalReward = false) {
+ const char *FieldName = "<invalid>";
+ std::function<void(const char *)> ValueWriter;
+ const auto &Spec = LoggedSpec.Spec;
+ // The 'Feature' protobuf only has 3 possible fields: float_list,
+ // int64_list, or bytes_list, so we capture int32 values as int64. We don't
+ // support any other types.
+ if (Spec.isElementType<int64_t>()) {
+ FieldName = "int64_list";
+ ValueWriter = [&](const char *Data) {
+ writeTensorValues<int64_t>(OutFile, Data, Spec.getElementCount());
+ };
+ } else if (Spec.isElementType<int32_t>()) {
+ FieldName = "int64_list";
+ ValueWriter = [&](const char *Data) {
+ writeTensorValues<int32_t>(OutFile, Data, Spec.getElementCount());
+ };
+
+ } else if (Spec.isElementType<float>()) {
+ FieldName = "float_list";
+ ValueWriter = [&](const char *Data) {
+ writeTensorValues<float>(OutFile, Data, Spec.getElementCount());
+ };
+
+ } else {
+ llvm_unreachable("Unsupported tensor type.");
+ }
+
+ OutFile << " feature_list: {\n";
+ OutFile << " key: "
+ << "\""
+ << (LoggedSpec.LoggingName ? *LoggedSpec.LoggingName : Spec.name())
+ << "\" ";
+ OutFile << "value: {\n";
+ size_t TensorByteSize = Spec.getElementCount() * Spec.getElementByteSize();
+
+ auto WriteFeatureProto = [&](const char *P) {
+ OutFile << " feature: { " << FieldName << ": { value: ";
+ ValueWriter(P);
+ OutFile << " } }\n";
+ };
+
+ const char *CurrentTensor = TensorData;
+ static int64_t Zero = 0;
+ // Write all but the last value. If this is the final reward, don't increment
+ // the CurrentTensor, and just write 0.
+ for (size_t I = 0; I < TensorCount - 1; ++I) {
+ if (FinalReward)
+ WriteFeatureProto(reinterpret_cast<const char *>(&Zero));
+ else {
+ WriteFeatureProto(CurrentTensor);
+ CurrentTensor += TensorByteSize;
+ }
+ }
+
+ WriteFeatureProto(CurrentTensor);
+
+ OutFile << " }\n";
+ OutFile << " }\n";
+}
} // namespace
namespace llvm {
@@ -170,122 +170,122 @@ private:
std::vector<TF_Tensor *> Output;
};
-size_t TensorSpec::getElementByteSize() const {
- return TF_DataTypeSize(static_cast<TF_DataType>(TypeIndex));
-}
-
-TensorSpec::TensorSpec(const std::string &Name, int Port, int TypeIndex,
- const std::vector<int64_t> &Shape)
- : Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape),
- ElementCount(std::accumulate(Shape.begin(), Shape.end(), 1,
- std::multiplies<int64_t>())) {}
-
-Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
- const json::Value &Value) {
- auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
- std::string S;
- llvm::raw_string_ostream OS(S);
- OS << Value;
- Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
- return None;
- };
- // FIXME: accept a Path as a parameter, and use it for error reporting.
- json::Path::Root Root("tensor_spec");
- json::ObjectMapper Mapper(Value, Root);
- if (!Mapper)
- return EmitError("Value is not a dict");
-
- std::string TensorName;
- int TensorPort = -1;
- std::string TensorType;
- std::vector<int64_t> TensorShape;
-
- if (!Mapper.map<std::string>("name", TensorName))
- return EmitError("'name' property not present or not a string");
- if (!Mapper.map<std::string>("type", TensorType))
- return EmitError("'type' property not present or not a string");
- if (!Mapper.map<int>("port", TensorPort))
- return EmitError("'port' property not present or not an int");
- if (!Mapper.map<std::vector<int64_t>>("shape", TensorShape))
- return EmitError("'shape' property not present or not an int array");
-
-#define PARSE_TYPE(T, E) \
- if (TensorType == #T) \
- return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
- TFUTILS_SUPPORTED_TYPES(PARSE_TYPE)
-#undef PARSE_TYPE
- return None;
-}
-
-Optional<std::vector<LoggedFeatureSpec>>
-loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
- StringRef ModelPath, StringRef SpecFileOverride) {
- SmallVector<char, 128> OutputSpecsPath;
- StringRef FileName = SpecFileOverride;
- if (FileName.empty()) {
- llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json");
- FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()};
- }
-
- auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName);
- if (!BufferOrError) {
- Ctx.emitError("Error opening output specs file: " + FileName + " : " +
- BufferOrError.getError().message());
- return None;
- }
- auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
- if (!ParsedJSONValues) {
- Ctx.emitError("Could not parse specs file: " + FileName);
- return None;
- }
- auto ValuesArray = ParsedJSONValues->getAsArray();
- if (!ValuesArray) {
- Ctx.emitError("Expected an array of {tensor_spec:<TensorSpec>, "
- "logging_name:<name>} dictionaries");
- return None;
- }
- std::vector<LoggedFeatureSpec> Ret;
- for (const auto &Value : *ValuesArray)
- if (const auto *Obj = Value.getAsObject())
- if (const auto *SpecPart = Obj->get("tensor_spec"))
- if (auto TensorSpec = getTensorSpecFromJSON(Ctx, *SpecPart))
- if (auto LoggingName = Obj->getString("logging_name")) {
- if (!TensorSpec->isElementType<int64_t>() &&
- !TensorSpec->isElementType<int32_t>() &&
- !TensorSpec->isElementType<float>()) {
- Ctx.emitError(
- "Only int64, int32, and float tensors are supported. "
- "Found unsupported type for tensor named " +
- TensorSpec->name());
- return None;
- }
- Ret.push_back({*TensorSpec, LoggingName->str()});
- }
-
- if (ValuesArray->size() != Ret.size()) {
- Ctx.emitError(
- "Unable to parse output spec. It should be a json file containing an "
- "array of dictionaries. Each dictionary must have a 'tensor_spec' key, "
- "with a json object describing a TensorSpec; and a 'logging_name' key, "
- "which is a string to use as name when logging this tensor in the "
- "training log.");
- return None;
- }
- if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) {
- Ctx.emitError("The first output spec must describe the decision tensor, "
- "and must have the logging_name " +
- StringRef(ExpectedDecisionName));
- return None;
- }
- return Ret;
-}
-
+size_t TensorSpec::getElementByteSize() const {
+ return TF_DataTypeSize(static_cast<TF_DataType>(TypeIndex));
+}
+
+TensorSpec::TensorSpec(const std::string &Name, int Port, int TypeIndex,
+ const std::vector<int64_t> &Shape)
+ : Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape),
+ ElementCount(std::accumulate(Shape.begin(), Shape.end(), 1,
+ std::multiplies<int64_t>())) {}
+
+Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
+ const json::Value &Value) {
+ auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
+ std::string S;
+ llvm::raw_string_ostream OS(S);
+ OS << Value;
+ Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
+ return None;
+ };
+ // FIXME: accept a Path as a parameter, and use it for error reporting.
+ json::Path::Root Root("tensor_spec");
+ json::ObjectMapper Mapper(Value, Root);
+ if (!Mapper)
+ return EmitError("Value is not a dict");
+
+ std::string TensorName;
+ int TensorPort = -1;
+ std::string TensorType;
+ std::vector<int64_t> TensorShape;
+
+ if (!Mapper.map<std::string>("name", TensorName))
+ return EmitError("'name' property not present or not a string");
+ if (!Mapper.map<std::string>("type", TensorType))
+ return EmitError("'type' property not present or not a string");
+ if (!Mapper.map<int>("port", TensorPort))
+ return EmitError("'port' property not present or not an int");
+ if (!Mapper.map<std::vector<int64_t>>("shape", TensorShape))
+ return EmitError("'shape' property not present or not an int array");
+
+#define PARSE_TYPE(T, E) \
+ if (TensorType == #T) \
+ return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
+ TFUTILS_SUPPORTED_TYPES(PARSE_TYPE)
+#undef PARSE_TYPE
+ return None;
+}
+
+Optional<std::vector<LoggedFeatureSpec>>
+loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
+ StringRef ModelPath, StringRef SpecFileOverride) {
+ SmallVector<char, 128> OutputSpecsPath;
+ StringRef FileName = SpecFileOverride;
+ if (FileName.empty()) {
+ llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json");
+ FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()};
+ }
+
+ auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName);
+ if (!BufferOrError) {
+ Ctx.emitError("Error opening output specs file: " + FileName + " : " +
+ BufferOrError.getError().message());
+ return None;
+ }
+ auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
+ if (!ParsedJSONValues) {
+ Ctx.emitError("Could not parse specs file: " + FileName);
+ return None;
+ }
+ auto ValuesArray = ParsedJSONValues->getAsArray();
+ if (!ValuesArray) {
+ Ctx.emitError("Expected an array of {tensor_spec:<TensorSpec>, "
+ "logging_name:<name>} dictionaries");
+ return None;
+ }
+ std::vector<LoggedFeatureSpec> Ret;
+ for (const auto &Value : *ValuesArray)
+ if (const auto *Obj = Value.getAsObject())
+ if (const auto *SpecPart = Obj->get("tensor_spec"))
+ if (auto TensorSpec = getTensorSpecFromJSON(Ctx, *SpecPart))
+ if (auto LoggingName = Obj->getString("logging_name")) {
+ if (!TensorSpec->isElementType<int64_t>() &&
+ !TensorSpec->isElementType<int32_t>() &&
+ !TensorSpec->isElementType<float>()) {
+ Ctx.emitError(
+ "Only int64, int32, and float tensors are supported. "
+ "Found unsupported type for tensor named " +
+ TensorSpec->name());
+ return None;
+ }
+ Ret.push_back({*TensorSpec, LoggingName->str()});
+ }
+
+ if (ValuesArray->size() != Ret.size()) {
+ Ctx.emitError(
+ "Unable to parse output spec. It should be a json file containing an "
+ "array of dictionaries. Each dictionary must have a 'tensor_spec' key, "
+ "with a json object describing a TensorSpec; and a 'logging_name' key, "
+ "which is a string to use as name when logging this tensor in the "
+ "training log.");
+ return None;
+ }
+ if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) {
+ Ctx.emitError("The first output spec must describe the decision tensor, "
+ "and must have the logging_name " +
+ StringRef(ExpectedDecisionName));
+ return None;
+ }
+ return Ret;
+}
+
class TFModelEvaluatorImpl {
public:
TFModelEvaluatorImpl(StringRef SavedModelPath,
- const std::vector<TensorSpec> &InputSpecs,
- function_ref<TensorSpec(size_t)> GetOutputSpecs,
- size_t OutputSpecsSize, const char *Tags);
+ const std::vector<TensorSpec> &InputSpecs,
+ function_ref<TensorSpec(size_t)> GetOutputSpecs,
+ size_t OutputSpecsSize, const char *Tags);
bool isValid() const { return IsValid; }
size_t OutputSize() const { return OutputFeed.size(); }
@@ -329,18 +329,18 @@ private:
/// Reusable utility for ensuring we can bind the requested Name to a node in
/// the SavedModel Graph.
- bool checkReportAndInvalidate(const TF_Output &Output,
- const TensorSpec &OutputSpec);
+ bool checkReportAndInvalidate(const TF_Output &Output,
+ const TensorSpec &OutputSpec);
};
} // namespace llvm
TFModelEvaluatorImpl::TFModelEvaluatorImpl(
- StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
- function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
- const char *Tags = "serve")
+ StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
+ function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
+ const char *Tags = "serve")
: Graph(createTFGraph()), Options(createTFSessionOptions()),
- InputFeed(InputSpecs.size()), Input(InputSpecs.size()),
- OutputFeed(OutputSpecsSize) {
+ InputFeed(InputSpecs.size()), Input(InputSpecs.size()),
+ OutputFeed(OutputSpecsSize) {
if (!ensureInitTF()) {
errs() << "Tensorflow should have been initialized";
return;
@@ -354,44 +354,44 @@ TFModelEvaluatorImpl::TFModelEvaluatorImpl(
errs() << TF_Message(Status.get());
invalidate();
}
- for (size_t I = 0; I < InputSpecs.size(); ++I) {
- auto &InputSpec = InputSpecs[I];
+ for (size_t I = 0; I < InputSpecs.size(); ++I) {
+ auto &InputSpec = InputSpecs[I];
InputFeed[I] = {
- TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
- InputSpec.port()};
- if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
+ TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
+ InputSpec.port()};
+ if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
return;
- initInput(I, static_cast<TF_DataType>(InputSpec.typeIndex()),
- InputSpec.shape());
+ initInput(I, static_cast<TF_DataType>(InputSpec.typeIndex()),
+ InputSpec.shape());
}
- for (size_t I = 0; I < OutputSpecsSize; ++I) {
- auto OutputSpec = GetOutputSpecs(I);
+ for (size_t I = 0; I < OutputSpecsSize; ++I) {
+ auto OutputSpec = GetOutputSpecs(I);
OutputFeed[I] = {
- TF_GraphOperationByName(Graph.get(), (OutputSpec.name()).c_str()),
- OutputSpec.port()};
- if (!checkReportAndInvalidate(OutputFeed[I], OutputSpec))
+ TF_GraphOperationByName(Graph.get(), (OutputSpec.name()).c_str()),
+ OutputSpec.port()};
+ if (!checkReportAndInvalidate(OutputFeed[I], OutputSpec))
return;
}
}
-TFModelEvaluator::TFModelEvaluator(
- StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
- function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
- const char *Tags)
- : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, GetOutputSpecs,
- OutputSpecsSize, Tags)) {
+TFModelEvaluator::TFModelEvaluator(
+ StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
+ function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
+ const char *Tags)
+ : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, GetOutputSpecs,
+ OutputSpecsSize, Tags)) {
if (!Impl->isValid())
Impl.reset();
}
-TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
- const std::vector<TensorSpec> &InputSpecs,
- const std::vector<TensorSpec> &OutputSpecs,
- const char *Tags)
- : TFModelEvaluator(
- SavedModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I]; },
- OutputSpecs.size(), Tags) {}
-
+TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
+ const std::vector<TensorSpec> &InputSpecs,
+ const std::vector<TensorSpec> &OutputSpecs,
+ const char *Tags)
+ : TFModelEvaluator(
+ SavedModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I]; },
+ OutputSpecs.size(), Tags) {}
+
TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {
for (auto *T : Input) {
TF_DeleteTensor(T);
@@ -405,11 +405,11 @@ TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {
errs() << "Could not delete TF session";
}
-bool TFModelEvaluatorImpl::checkReportAndInvalidate(
- const TF_Output &Output, const TensorSpec &OutputSpec) {
+bool TFModelEvaluatorImpl::checkReportAndInvalidate(
+ const TF_Output &Output, const TensorSpec &OutputSpec) {
if (Output.oper)
return true;
- errs() << "Could not find TF_Output named: " + OutputSpec.name();
+ errs() << "Could not find TF_Output named: " + OutputSpec.name();
IsValid = false;
return IsValid;
}
@@ -451,55 +451,55 @@ TFModelEvaluator::EvaluationResult::EvaluationResult(
TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
: Impl(std::move(Other.Impl)) {}
-TFModelEvaluator::EvaluationResult &
-TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) {
- Impl = std::move(Other.Impl);
- return *this;
-}
-
+TFModelEvaluator::EvaluationResult &
+TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) {
+ Impl = std::move(Other.Impl);
+ return *this;
+}
+
void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
return TF_TensorData(Impl->getOutput()[Index]);
}
-const void *
-TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
- return TF_TensorData(Impl->getOutput()[Index]);
+const void *
+TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
+ return TF_TensorData(Impl->getOutput()[Index]);
}
-#define TFUTILS_GETDATATYPE_IMPL(T, E) \
- template <> int TensorSpec::getDataType<T>() { return E; }
-
-TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_IMPL)
-
-#undef TFUTILS_GETDATATYPE_IMPL
-
-TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
-TFModelEvaluator::~TFModelEvaluator() {}
-
-void Logger::print(raw_ostream &OS) {
- if (RawLogData.empty())
- return;
- if (RawLogData[0].empty())
- return;
- size_t Tensor0Size = FeatureSpecs[0].Spec.getElementCount() *
- FeatureSpecs[0].Spec.getElementByteSize();
- size_t NumberOfRecords = RawLogData[0].size() / Tensor0Size;
- if (NumberOfRecords == 0)
- return;
- size_t RewardSize =
- RewardSpec.getElementCount() * RewardSpec.getElementByteSize();
- size_t NumberOfRewards = RawLogData.back().size() / RewardSize;
-
- OS << "feature_lists: {\n";
- for (size_t I = 0; I < FeatureSpecs.size(); ++I)
- writeRawTensorsAsFeatureLists(OS, FeatureSpecs[I], RawLogData[I].data(),
- NumberOfRecords);
-
- if (IncludeReward)
- writeRawTensorsAsFeatureLists(OS, {RewardSpec, None},
- RawLogData.back().data(), NumberOfRecords,
- NumberOfRewards == 1);
-
- OS << "}\n";
+#define TFUTILS_GETDATATYPE_IMPL(T, E) \
+ template <> int TensorSpec::getDataType<T>() { return E; }
+
+TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_IMPL)
+
+#undef TFUTILS_GETDATATYPE_IMPL
+
+TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
+TFModelEvaluator::~TFModelEvaluator() {}
+
+void Logger::print(raw_ostream &OS) {
+ if (RawLogData.empty())
+ return;
+ if (RawLogData[0].empty())
+ return;
+ size_t Tensor0Size = FeatureSpecs[0].Spec.getElementCount() *
+ FeatureSpecs[0].Spec.getElementByteSize();
+ size_t NumberOfRecords = RawLogData[0].size() / Tensor0Size;
+ if (NumberOfRecords == 0)
+ return;
+ size_t RewardSize =
+ RewardSpec.getElementCount() * RewardSpec.getElementByteSize();
+ size_t NumberOfRewards = RawLogData.back().size() / RewardSize;
+
+ OS << "feature_lists: {\n";
+ for (size_t I = 0; I < FeatureSpecs.size(); ++I)
+ writeRawTensorsAsFeatureLists(OS, FeatureSpecs[I], RawLogData[I].data(),
+ NumberOfRecords);
+
+ if (IncludeReward)
+ writeRawTensorsAsFeatureLists(OS, {RewardSpec, None},
+ RawLogData.back().data(), NumberOfRecords,
+ NumberOfRewards == 1);
+
+ OS << "}\n";
}
-#endif // defined(LLVM_HAVE_TF_API)
+#endif // defined(LLVM_HAVE_TF_API)