aboutsummaryrefslogtreecommitdiffstats
path: root/contrib/libs/llvm16/lib/Analysis/TFLiteUtils.cpp
blob: 0dbc16d5773ac1622db7c8559305a782689868b5 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
//===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements utilities for interfacing with tensorflow C APIs.
//
//===----------------------------------------------------------------------===//
#include "llvm/Config/config.h"
#if defined(LLVM_HAVE_TFLITE)

#include "llvm/ADT/Twine.h"
#include "llvm/Analysis/Utils/TFUtils.h"
#include "llvm/Support/Base64.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/JSON.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/raw_ostream.h"

#error #include "tensorflow/lite/interpreter.h"
#error #include "tensorflow/lite/kernels/register.h"
#error #include "tensorflow/lite/model.h"
#error #include "tensorflow/lite/model_builder.h"
#error #include "tensorflow/lite/op_resolver.h"
#error #include "tensorflow/lite/logger.h"

#include <cassert>
#include <numeric>
#include <optional>

using namespace llvm;

namespace llvm {
class EvaluationResultImpl {
public:
  EvaluationResultImpl(const std::vector<const TfLiteTensor *> &Outputs)
      : Outputs(Outputs){};

  const TfLiteTensor *getOutput(size_t I) { return Outputs[I]; }

  EvaluationResultImpl(const EvaluationResultImpl &) = delete;
  EvaluationResultImpl(EvaluationResultImpl &&Other) = delete;

private:
  const std::vector<const TfLiteTensor *> Outputs;
};

class TFModelEvaluatorImpl {
public:
  TFModelEvaluatorImpl(StringRef SavedModelPath,
                       const std::vector<TensorSpec> &InputSpecs,
                       const std::vector<TensorSpec> &OutputSpecs,
                       const char *Tags);

  bool isValid() const { return IsValid; }
  size_t outputSize() const { return Output.size(); }

  std::unique_ptr<EvaluationResultImpl> evaluate() {
    Interpreter->Invoke();
    return std::make_unique<EvaluationResultImpl>(Output);
  }

  const std::vector<TfLiteTensor *> &getInput() const { return Input; }

  ~TFModelEvaluatorImpl();

private:
  std::unique_ptr<tflite::FlatBufferModel> Model;

  /// The objects necessary for carrying out an evaluation of the SavedModel.
  /// They are expensive to set up, and we maintain them accross all the
  /// evaluations of the model.
  std::unique_ptr<tflite::Interpreter> Interpreter;

  /// The input tensors. We set up the tensors once and just mutate theirs
  /// scalars before each evaluation. The input tensors keep their value after
  /// an evaluation.
  std::vector<TfLiteTensor *> Input;

  /// The output nodes.
  std::vector<const TfLiteTensor *> Output;

  void invalidate() { IsValid = false; }

  bool IsValid = true;

  /// Reusable utility for ensuring we can bind the requested Name to a node in
  /// the SavedModel Graph.
  bool checkReportAndInvalidate(const TfLiteTensor *Tensor,
                                const TensorSpec &Spec);
};

} // namespace llvm

TFModelEvaluatorImpl::TFModelEvaluatorImpl(
    StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
    const std::vector<TensorSpec> &OutputSpecs, const char *Tags = "serve")
    : Input(InputSpecs.size()), Output(OutputSpecs.size()) {
  // INFO and DEBUG messages could be numerous and not particularly interesting
  tflite::LoggerOptions::SetMinimumLogSeverity(tflite::TFLITE_LOG_WARNING);
  // FIXME: make ErrorReporter a member (may also need subclassing
  // StatefulErrorReporter) to easily get the latest error status, for
  // debugging.
  tflite::StderrReporter ErrorReporter;
  SmallVector<char, 128> TFLitePathBuff;
  llvm::sys::path::append(TFLitePathBuff, SavedModelPath, "model.tflite");
  StringRef TFLitePath(TFLitePathBuff.data(), TFLitePathBuff.size());
  Model = tflite::FlatBufferModel::BuildFromFile(TFLitePath.str().c_str(),
                                                 &ErrorReporter);
  if (!Model) {
    invalidate();
    return;
  }

  tflite::ops::builtin::BuiltinOpResolver Resolver;
  tflite::InterpreterBuilder Builder(*Model, Resolver);
  Builder(&Interpreter);

  if (!Interpreter) {
    invalidate();
    return;
  }

  // We assume the input buffers are valid for the lifetime of the interpreter.
  // By default, tflite allocates memory in an arena and will periodically take
  // away memory and reallocate it in a different location after evaluations in
  // order to improve utilization of the buffers owned in the arena. So, we
  // explicitly mark our input buffers as persistent to avoid this behavior.
  for (size_t I = 0; I < Interpreter->inputs().size(); ++I)
    Interpreter->tensor(I)->allocation_type =
        TfLiteAllocationType::kTfLiteArenaRwPersistent;

  if (Interpreter->AllocateTensors() != TfLiteStatus::kTfLiteOk) {
    invalidate();
    return;
  }
  // Known inputs and outputs
  StringMap<int> InputsMap;
  StringMap<int> OutputsMap;
  for (size_t I = 0; I < Interpreter->inputs().size(); ++I)
    InputsMap[Interpreter->GetInputName(I)] = I;
  for (size_t I = 0; I < Interpreter->outputs().size(); ++I)
    OutputsMap[Interpreter->GetOutputName(I)] = I;

  size_t NumberFeaturesPassed = 0;
  for (size_t I = 0; I < InputSpecs.size(); ++I) {
    auto &InputSpec = InputSpecs[I];
    auto MapI = InputsMap.find(InputSpec.name() + ":" +
                               std::to_string(InputSpec.port()));
    if (MapI == InputsMap.end()) {
      Input[I] = nullptr;
      continue;
    }
    Input[I] = Interpreter->tensor(MapI->second);
    if (!checkReportAndInvalidate(Input[I], InputSpec))
      return;
    std::memset(Input[I]->data.data, 0,
                InputSpecs[I].getTotalTensorBufferSize());
    ++NumberFeaturesPassed;
  }

  if (NumberFeaturesPassed < Interpreter->inputs().size()) {
    // we haven't passed all the required features to the model, throw an error.
    errs() << "Required feature(s) have not been passed to the ML model";
    invalidate();
    return;
  }

  for (size_t I = 0; I < OutputSpecs.size(); ++I) {
    const auto &OutputSpec = OutputSpecs[I];
    Output[I] = Interpreter->output_tensor(
        OutputsMap[OutputSpec.name() + ":" +
                   std::to_string(OutputSpec.port())]);
    if (!checkReportAndInvalidate(Output[I], OutputSpec))
      return;
  }
}

TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
                                   const std::vector<TensorSpec> &InputSpecs,
                                   const std::vector<TensorSpec> &OutputSpecs,
                                   const char *Tags)
    : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, OutputSpecs,
                                    Tags)) {
  if (!Impl->isValid())
    Impl.reset();
}

TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {}

bool TFModelEvaluatorImpl::checkReportAndInvalidate(const TfLiteTensor *Tensor,
                                                    const TensorSpec &Spec) {
  if (!Tensor) {
    errs() << "Could not find TF_Output named: " + Spec.name();
    IsValid = false;
  }
  if (Spec.getTotalTensorBufferSize() != Tensor->bytes)
    IsValid = false;

  // If the total sizes match, there could still be a mismatch in the shape.
  // We ignore that for now.

  return IsValid;
}

std::optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
  if (!isValid())
    return std::nullopt;
  return EvaluationResult(Impl->evaluate());
}

void *TFModelEvaluator::getUntypedInput(size_t Index) {
  TfLiteTensor *T = Impl->getInput()[Index];
  if (!T)
    return nullptr;
  return T->data.data;
}

TFModelEvaluator::EvaluationResult::EvaluationResult(
    std::unique_ptr<EvaluationResultImpl> Impl)
    : Impl(std::move(Impl)) {}

TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
    : Impl(std::move(Other.Impl)) {}

TFModelEvaluator::EvaluationResult &
TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) {
  Impl = std::move(Other.Impl);
  return *this;
}

void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
  return Impl->getOutput(Index)->data.data;
}

const void *
TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
  return Impl->getOutput(Index)->data.data;
}

TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
TFModelEvaluator::~TFModelEvaluator() {}

#endif // defined(LLVM_HAVE_TFLITE)