LLVM 22.0.0git
TFLiteUtils.cpp
Go to the documentation of this file.
1//===- TFUtils.cpp - TFLite-based evaluation utilities --------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements utilities for interfacing with TFLite.
10//
11//===----------------------------------------------------------------------===//
12#include "llvm/Config/config.h"
13#if defined(LLVM_HAVE_TFLITE)
14
15#include "llvm/ADT/Twine.h"
17#include "llvm/Support/Base64.h"
19#include "llvm/Support/Debug.h"
20#include "llvm/Support/JSON.h"
22#include "llvm/Support/Path.h"
24
25#include "tensorflow/lite/interpreter.h"
26#include "tensorflow/lite/kernels/register.h"
27#include "tensorflow/lite/model.h"
28#include "tensorflow/lite/model_builder.h"
29#include "tensorflow/lite/op_resolver.h"
30#include "tensorflow/lite/logger.h"
31
32#include <cassert>
33#include <optional>
34
35using namespace llvm;
36
37namespace llvm {
38class EvaluationResultImpl {
39public:
40 EvaluationResultImpl(const std::vector<const TfLiteTensor *> &Outputs)
41 : Outputs(Outputs){};
42
43 const TfLiteTensor *getOutput(size_t I) { return Outputs[I]; }
44
45 EvaluationResultImpl(const EvaluationResultImpl &) = delete;
46 EvaluationResultImpl(EvaluationResultImpl &&Other) = delete;
47
48private:
49 const std::vector<const TfLiteTensor *> Outputs;
50};
51
52class TFModelEvaluatorImpl {
53public:
54 TFModelEvaluatorImpl(StringRef SavedModelPath,
55 const std::vector<TensorSpec> &InputSpecs,
56 const std::vector<TensorSpec> &OutputSpecs,
57 const char *Tags);
58
59 bool isValid() const { return IsValid; }
60 size_t outputSize() const { return Output.size(); }
61
62 std::unique_ptr<EvaluationResultImpl> evaluate() {
63 Interpreter->Invoke();
64 return std::make_unique<EvaluationResultImpl>(Output);
65 }
66
67 const std::vector<TfLiteTensor *> &getInput() const { return Input; }
68
69 ~TFModelEvaluatorImpl();
70
71private:
72 std::unique_ptr<tflite::FlatBufferModel> Model;
73
74 /// The objects necessary for carrying out an evaluation of the SavedModel.
75 /// They are expensive to set up, and we maintain them accross all the
76 /// evaluations of the model.
77 std::unique_ptr<tflite::Interpreter> Interpreter;
78
79 /// The input tensors. We set up the tensors once and just mutate theirs
80 /// scalars before each evaluation. The input tensors keep their value after
81 /// an evaluation.
82 std::vector<TfLiteTensor *> Input;
83
84 /// The output nodes.
85 std::vector<const TfLiteTensor *> Output;
86
87 void invalidate() { IsValid = false; }
88
89 bool IsValid = true;
90
91 /// Reusable utility for ensuring we can bind the requested Name to a node in
92 /// the SavedModel Graph.
93 bool checkReportAndInvalidate(const TfLiteTensor *Tensor,
94 const TensorSpec &Spec);
95};
96
97} // namespace llvm
98
99TFModelEvaluatorImpl::TFModelEvaluatorImpl(
100 StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
101 const std::vector<TensorSpec> &OutputSpecs, const char *Tags = "serve")
102 : Input(InputSpecs.size()), Output(OutputSpecs.size()) {
103 // INFO and DEBUG messages could be numerous and not particularly interesting
104 tflite::LoggerOptions::SetMinimumLogSeverity(tflite::TFLITE_LOG_WARNING);
105 // FIXME: make ErrorReporter a member (may also need subclassing
106 // StatefulErrorReporter) to easily get the latest error status, for
107 // debugging.
108 tflite::StderrReporter ErrorReporter;
109 SmallVector<char, 128> TFLitePathBuff;
110 llvm::sys::path::append(TFLitePathBuff, SavedModelPath, "model.tflite");
111 StringRef TFLitePath(TFLitePathBuff.data(), TFLitePathBuff.size());
112 Model = tflite::FlatBufferModel::BuildFromFile(TFLitePath.str().c_str(),
113 &ErrorReporter);
114 if (!Model) {
115 invalidate();
116 return;
117 }
118
119 tflite::ops::builtin::BuiltinOpResolver Resolver;
120 tflite::InterpreterBuilder Builder(*Model, Resolver);
121 Builder(&Interpreter);
122
123 if (!Interpreter) {
124 invalidate();
125 return;
126 }
127
128 // We assume the input buffers are valid for the lifetime of the interpreter.
129 // By default, tflite allocates memory in an arena and will periodically take
130 // away memory and reallocate it in a different location after evaluations in
131 // order to improve utilization of the buffers owned in the arena. So, we
132 // explicitly mark our input buffers as persistent to avoid this behavior.
133 for (size_t I = 0; I < Interpreter->inputs().size(); ++I)
134 Interpreter->tensor(I)->allocation_type =
135 TfLiteAllocationType::kTfLiteArenaRwPersistent;
136
137 if (Interpreter->AllocateTensors() != TfLiteStatus::kTfLiteOk) {
138 invalidate();
139 return;
140 }
141 // Known inputs and outputs
142 StringMap<int> InputsMap;
143 StringMap<int> OutputsMap;
144 for (size_t I = 0; I < Interpreter->inputs().size(); ++I)
145 InputsMap[Interpreter->GetInputName(I)] = I;
146 for (size_t I = 0; I < Interpreter->outputs().size(); ++I)
147 OutputsMap[Interpreter->GetOutputName(I)] = I;
148
149 size_t NumberFeaturesPassed = 0;
150 for (size_t I = 0; I < InputSpecs.size(); ++I) {
151 auto &InputSpec = InputSpecs[I];
152 auto MapI = InputsMap.find(InputSpec.name() + ":" +
153 std::to_string(InputSpec.port()));
154 if (MapI == InputsMap.end()) {
155 Input[I] = nullptr;
156 continue;
157 }
158 Input[I] = Interpreter->tensor(MapI->second);
159 if (!checkReportAndInvalidate(Input[I], InputSpec))
160 return;
161 std::memset(Input[I]->data.data, 0,
162 InputSpecs[I].getTotalTensorBufferSize());
163 ++NumberFeaturesPassed;
164 }
165
166 if (NumberFeaturesPassed < Interpreter->inputs().size()) {
167 // we haven't passed all the required features to the model, throw an error.
168 errs() << "Required feature(s) have not been passed to the ML model";
169 invalidate();
170 return;
171 }
172
173 for (size_t I = 0; I < OutputSpecs.size(); ++I) {
174 const auto &OutputSpec = OutputSpecs[I];
175 Output[I] = Interpreter->output_tensor(
176 OutputsMap[OutputSpec.name() + ":" +
177 std::to_string(OutputSpec.port())]);
178 if (!checkReportAndInvalidate(Output[I], OutputSpec))
179 return;
180 }
181}
182
183TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
184 const std::vector<TensorSpec> &InputSpecs,
185 const std::vector<TensorSpec> &OutputSpecs,
186 const char *Tags)
187 : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, OutputSpecs,
188 Tags)) {
189 if (!Impl->isValid())
190 Impl.reset();
191}
192
193TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {}
194
195bool TFModelEvaluatorImpl::checkReportAndInvalidate(const TfLiteTensor *Tensor,
196 const TensorSpec &Spec) {
197 if (!Tensor) {
198 errs() << "Could not find TF_Output named: " + Spec.name();
199 IsValid = false;
200 }
201 if (Spec.getTotalTensorBufferSize() != Tensor->bytes)
202 IsValid = false;
203
204 // If the total sizes match, there could still be a mismatch in the shape.
205 // We ignore that for now.
206
207 return IsValid;
208}
209
210std::optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
211 if (!isValid())
212 return std::nullopt;
213 return EvaluationResult(Impl->evaluate());
214}
215
216void *TFModelEvaluator::getUntypedInput(size_t Index) {
217 TfLiteTensor *T = Impl->getInput()[Index];
218 if (!T)
219 return nullptr;
220 return T->data.data;
221}
222
223TFModelEvaluator::EvaluationResult::EvaluationResult(
224 std::unique_ptr<EvaluationResultImpl> Impl)
225 : Impl(std::move(Impl)) {}
226
227TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
228 : Impl(std::move(Other.Impl)) {}
229
230TFModelEvaluator::EvaluationResult &
231TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) {
232 Impl = std::move(Other.Impl);
233 return *this;
234}
235
236void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
237 return Impl->getOutput(Index)->data.data;
238}
239
240const void *
241TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
242 return Impl->getOutput(Index)->data.data;
243}
244
245TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
246TFModelEvaluator::~TFModelEvaluator() {}
247
248#endif // defined(LLVM_HAVE_TFLITE)
static bool evaluate(const MCSpecifierExpr &Expr, MCValue &Res, const MCAssembler *Asm)
This file supports working with JSON data.
#define I(x, y, z)
Definition MD5.cpp:57
#define T
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
static Split data
The Input class is used to parse a yaml document into in-memory structs and vectors.
Interface for looking up the initializer for a variable name, used by Init::resolveReferences.
Definition Record.h:2199
pointer data()
Return a pointer to the vector's buffer, even if empty().
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringMap - This is an unconventional map that is specialized for handling keys that are "strings",...
Definition StringMap.h:133
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:237
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
LLVM_ABI void append(SmallVectorImpl< char > &path, const Twine &a, const Twine &b="", const Twine &c="", const Twine &d="")
Append to path.
Definition Path.cpp:456
This is an optimization pass for GlobalISel generic memory operations.
auto size(R &&Range, std::enable_if_t< std::is_base_of< std::random_access_iterator_tag, typename std::iterator_traits< decltype(Range.begin())>::iterator_category >::value, void > *=nullptr)
Get the size of a range.
Definition STLExtras.h:1655
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
@ Other
Any other memory.
Definition ModRef.h:68
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1867
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:867