blob: 808d81d378da7ff3866e09243f800d0df92ae24f (
plain) (
blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
|
#pragma once
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
//===- NoInferenceModelRunner.h ---- noop ML model runner ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
#ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
#define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
#include "llvm/Config/llvm-config.h"
/// While not strictly necessary to conditionally compile this, it really
/// has no usecase outside the 'development' mode.
#ifdef LLVM_HAVE_TF_API
#include "llvm/Analysis/MLModelRunner.h"
#include "llvm/Analysis/Utils/TFUtils.h"
namespace llvm {
/// A pseudo model runner. We use it to store feature values when collecting
/// logs for the default policy, in 'development' mode, but never ask it to
/// 'run'.
class NoInferenceModelRunner : public MLModelRunner {
public:
NoInferenceModelRunner(LLVMContext &Ctx,
const std::vector<TensorSpec> &Inputs);
static bool classof(const MLModelRunner *R) {
return R->getKind() == MLModelRunner::Kind::NoOp;
}
private:
void *evaluateUntyped() override {
llvm_unreachable("We shouldn't call run on this model runner.");
}
void *getTensorUntyped(size_t Index) override;
std::vector<std::unique_ptr<char[]>> ValuesBuffer;
};
} // namespace llvm
#endif // defined(LLVM_HAVE_TF_API)
#endif // LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
|