LLVM  14.0.0git
TFUtils.cpp
Go to the documentation of this file.
1 //===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements utilities for interfacing with tensorflow C APIs.
10 //
11 //===----------------------------------------------------------------------===//
12 #include "llvm/Config/config.h"
13 #if defined(LLVM_HAVE_TF_API)
14 
15 #include "llvm/ADT/Twine.h"
18 #include "llvm/Support/Debug.h"
19 #include "llvm/Support/JSON.h"
22 #include "llvm/Support/Path.h"
24 
25 #include "google/protobuf/text_format.h"
26 #include "tensorflow/c/c_api.h"
27 #include "tensorflow/c/c_api_experimental.h"
28 #include "tensorflow/core/example/example.pb.h"
29 #include <cassert>
30 #include <numeric>
31 
32 using namespace llvm;
33 
34 using google::protobuf::Message;
35 using google::protobuf::TextFormat;
36 
37 static cl::opt<bool>
38  ProtobufTextMode("tfutils-text-log", cl::init(false), cl::Hidden,
39  cl::desc("Output textual (human-readable) protobuf."));
40 
41 namespace {
42 
43 using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;
44 using TFSessionOptionsPtr =
45  std::unique_ptr<TF_SessionOptions, decltype(&TF_DeleteSessionOptions)>;
46 using TFStatusPtr = std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
47 
48 struct TFInitializer {
49  TFInitializer() {
50  assert(!IsInitialized && "TFInitialized should be called only once");
51  int Argc = 1;
52  const char *Name = "";
53  const char **NamePtr = &Name;
54  TF_InitMain(Name, &Argc, const_cast<char ***>(&NamePtr));
55  IsInitialized = true;
56  }
57  bool IsInitialized = false;
58 };
59 
60 llvm::ManagedStatic<TFInitializer> TFLibInitializer;
61 
62 bool ensureInitTF() { return TFLibInitializer->IsInitialized; }
63 
64 TFGraphPtr createTFGraph() {
65  return TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph);
66 }
67 
68 TFStatusPtr createTFStatus() {
69  return TFStatusPtr(TF_NewStatus(), &TF_DeleteStatus);
70 }
71 
72 TFSessionOptionsPtr createTFSessionOptions() {
73  return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
74 }
75 } // namespace
76 
77 namespace llvm {
78 class EvaluationResultImpl {
79 public:
80  EvaluationResultImpl(size_t OutputSize)
81  : OutputSize(OutputSize), Output(OutputSize){};
82 
83  ~EvaluationResultImpl() {
84  for (auto *P : Output)
85  if (P)
86  TF_DeleteTensor(P);
87  }
88 
89  EvaluationResultImpl(const EvaluationResultImpl &) = delete;
90  EvaluationResultImpl(EvaluationResultImpl &&Other) = delete;
91  std::vector<TF_Tensor *> &getOutput() { return Output; }
92 
93 private:
94  const size_t OutputSize;
95  std::vector<TF_Tensor *> Output;
96 };
97 
98 size_t TensorSpec::getElementByteSize() const {
99  return TF_DataTypeSize(static_cast<TF_DataType>(TypeIndex));
100 }
101 
102 TensorSpec::TensorSpec(const std::string &Name, int Port, int TypeIndex,
103  const std::vector<int64_t> &Shape)
104  : Name(Name), Port(Port), TypeIndex(TypeIndex), Shape(Shape),
105  ElementCount(std::accumulate(Shape.begin(), Shape.end(), 1,
106  std::multiplies<int64_t>())) {}
107 
108 Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
109  const json::Value &Value) {
110  auto EmitError = [&](const llvm::Twine &Message) -> Optional<TensorSpec> {
111  std::string S;
113  OS << Value;
114  Ctx.emitError("Unable to parse JSON Value as spec (" + Message + "): " + S);
115  return None;
116  };
117  // FIXME: accept a Path as a parameter, and use it for error reporting.
118  json::Path::Root Root("tensor_spec");
119  json::ObjectMapper Mapper(Value, Root);
120  if (!Mapper)
121  return EmitError("Value is not a dict");
122 
123  std::string TensorName;
124  int TensorPort = -1;
125  std::string TensorType;
126  std::vector<int64_t> TensorShape;
127 
128  if (!Mapper.map<std::string>("name", TensorName))
129  return EmitError("'name' property not present or not a string");
130  if (!Mapper.map<std::string>("type", TensorType))
131  return EmitError("'type' property not present or not a string");
132  if (!Mapper.map<int>("port", TensorPort))
133  return EmitError("'port' property not present or not an int");
134  if (!Mapper.map<std::vector<int64_t>>("shape", TensorShape))
135  return EmitError("'shape' property not present or not an int array");
136 
137 #define PARSE_TYPE(T, E) \
138  if (TensorType == #T) \
139  return TensorSpec::createSpec<T>(TensorName, TensorShape, TensorPort);
140  TFUTILS_SUPPORTED_TYPES(PARSE_TYPE)
141 #undef PARSE_TYPE
142  return None;
143 }
144 
146 loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
147  StringRef ModelPath, StringRef SpecFileOverride) {
148  SmallVector<char, 128> OutputSpecsPath;
149  StringRef FileName = SpecFileOverride;
150  if (FileName.empty()) {
151  llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json");
152  FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()};
153  }
154 
155  auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName);
156  if (!BufferOrError) {
157  Ctx.emitError("Error opening output specs file: " + FileName + " : " +
158  BufferOrError.getError().message());
159  return None;
160  }
161  auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
162  if (!ParsedJSONValues) {
163  Ctx.emitError("Could not parse specs file: " + FileName);
164  return None;
165  }
166  auto ValuesArray = ParsedJSONValues->getAsArray();
167  if (!ValuesArray) {
168  Ctx.emitError("Expected an array of {tensor_spec:<TensorSpec>, "
169  "logging_name:<name>} dictionaries");
170  return None;
171  }
172  std::vector<LoggedFeatureSpec> Ret;
173  for (const auto &Value : *ValuesArray)
174  if (const auto *Obj = Value.getAsObject())
175  if (const auto *SpecPart = Obj->get("tensor_spec"))
176  if (auto TensorSpec = getTensorSpecFromJSON(Ctx, *SpecPart))
177  if (auto LoggingName = Obj->getString("logging_name")) {
178  if (!TensorSpec->isElementType<int64_t>() &&
179  !TensorSpec->isElementType<int32_t>() &&
180  !TensorSpec->isElementType<float>()) {
181  Ctx.emitError(
182  "Only int64, int32, and float tensors are supported. "
183  "Found unsupported type for tensor named " +
184  TensorSpec->name());
185  return None;
186  }
187  Ret.push_back({*TensorSpec, LoggingName->str()});
188  }
189 
190  if (ValuesArray->size() != Ret.size()) {
191  Ctx.emitError(
192  "Unable to parse output spec. It should be a json file containing an "
193  "array of dictionaries. Each dictionary must have a 'tensor_spec' key, "
194  "with a json object describing a TensorSpec; and a 'logging_name' key, "
195  "which is a string to use as name when logging this tensor in the "
196  "training log.");
197  return None;
198  }
199  if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) {
200  Ctx.emitError("The first output spec must describe the decision tensor, "
201  "and must have the logging_name " +
202  StringRef(ExpectedDecisionName));
203  return None;
204  }
205  return Ret;
206 }
207 
208 class TFModelEvaluatorImpl {
209 public:
210  TFModelEvaluatorImpl(StringRef SavedModelPath,
211  const std::vector<TensorSpec> &InputSpecs,
212  function_ref<TensorSpec(size_t)> GetOutputSpecs,
213  size_t OutputSpecsSize, const char *Tags);
214 
215  bool isValid() const { return IsValid; }
216  size_t OutputSize() const { return OutputFeed.size(); }
217 
218  void evaluate(TF_Tensor **Output, TF_Status *Status) {
219  TF_SessionRun(Session, nullptr, InputFeed.data(), Input.data(),
220  Input.size(), OutputFeed.data(), Output, OutputFeed.size(),
221  nullptr, 0, nullptr, Status);
222  }
223 
224  void initInput(size_t Index, TF_DataType Type,
225  const std::vector<int64_t> &Dimensions);
226  const std::vector<TF_Tensor *> &getInput() const { return Input; }
227 
228  ~TFModelEvaluatorImpl();
229 
230 private:
231  /// The objects necessary for carrying out an evaluation of the SavedModel.
232  /// They are expensive to set up, and we maintain them accross all the
233  /// evaluations of the model.
234  TF_Session *Session = nullptr;
235  TFGraphPtr Graph;
236  TFSessionOptionsPtr Options;
237 
238  /// The specification of the input nodes.
239  std::vector<TF_Output> InputFeed;
240 
241  /// The input tensors. They must match by index of the corresponding InputFeed
242  /// value. We set up the tensors once and just mutate theirs scalars before
243  /// each evaluation. The input tensors keep their value after an evaluation.
244  std::vector<TF_Tensor *> Input;
245 
246  /// The specification of the output nodes. When evaluating, the tensors in the
247  /// output tensor vector must match by index the corresponding element in the
248  /// OutputFeed.
249  std::vector<TF_Output> OutputFeed;
250 
251  void invalidate() { IsValid = false; }
252 
253  bool IsValid = true;
254 
255  /// Reusable utility for ensuring we can bind the requested Name to a node in
256  /// the SavedModel Graph.
257  bool checkReportAndInvalidate(const TF_Output &Output,
258  const TensorSpec &OutputSpec);
259 };
260 
261 class LoggerDataImpl {
262  const std::vector<LoggedFeatureSpec> LoggedFeatureSpecs;
263  const TensorSpec RewardSpec;
264  const bool IncludeReward;
265 
266  std::vector<tensorflow::FeatureList> FeatureLists;
267  tensorflow::FeatureList Reward;
268 
269  bool isSelfConsistent(const tensorflow::SequenceExample &SE,
270  size_t NrRecords) const {
271  bool Ret = true;
272  for (const auto &TSpecs : LoggedFeatureSpecs) {
273  const auto &Name = TSpecs.getLoggingName();
274  const auto &FL = SE.feature_lists().feature_list().at(Name).feature();
275  if (NrRecords != static_cast<size_t>(FL.size())) {
276  dbgs() << "[TF-UTILS]: " << Name << " has missing records. Expected "
277  << NrRecords << " got " << FL.size() << "\n";
278  Ret = false;
279  }
280  }
281  if (IncludeReward && static_cast<size_t>(SE.feature_lists()
282  .feature_list()
283  .at(RewardSpec.name())
284  .feature()
285  .size()) != NrRecords) {
286  dbgs() << "[TF-UTILS]: reward is missing records.\n";
287  Ret = false;
288  }
289  return Ret;
290  }
291 
292  void transferLog(tensorflow::SequenceExample &SE) {
293  auto *FL = SE.mutable_feature_lists()->mutable_feature_list();
294  if (IncludeReward)
295  (*FL)[RewardSpec.name()] = std::move(Reward);
296  assert(FeatureLists.size() == LoggedFeatureSpecs.size());
297  for (size_t I = 0; I < FeatureLists.size(); ++I) {
298  const auto &LFS = LoggedFeatureSpecs[I];
299  (*FL)[LFS.getLoggingName()] = std::move(FeatureLists[I]);
300  }
301  }
302 
303 public:
304  LoggerDataImpl(const std::vector<LoggedFeatureSpec> &LoggedSpecs,
305  const TensorSpec &RewardSpec, bool IncludeReward)
306  : LoggedFeatureSpecs(LoggedSpecs), RewardSpec(RewardSpec),
307  IncludeReward(IncludeReward), FeatureLists(LoggedFeatureSpecs.size()) {}
308 
309  // flush the logged info to a stream and clear the log contents.
310  void flush(raw_ostream &OS) {
311  size_t NrRecords = getNrRecords();
312  (void)NrRecords;
313  tensorflow::SequenceExample SE;
314  transferLog(SE);
315  assert(isSelfConsistent(SE, NrRecords));
316  std::string OutStr;
317  if (ProtobufTextMode)
318  google::protobuf::TextFormat::PrintToString(SE, &OutStr);
319  else
320  OutStr = SE.SerializeAsString();
321 
322  OS << OutStr;
323  }
324 
325  char *addNewTensor(size_t FeatureID) {
326  const auto &Spec = LoggedFeatureSpecs[FeatureID].Spec;
327  if (Spec.isElementType<float>()) {
328  auto *RF = FeatureLists[FeatureID]
329  .add_feature()
330  ->mutable_float_list()
331  ->mutable_value();
332  RF->Resize(Spec.getElementCount(), 0.0);
333  return reinterpret_cast<char *>(RF->mutable_data());
334  } else if (Spec.isElementType<int32_t>() || Spec.isElementType<int64_t>()) {
335  auto *RF = FeatureLists[FeatureID]
336  .add_feature()
337  ->mutable_int64_list()
338  ->mutable_value();
339  RF->Resize(Spec.getElementCount(), 0);
340  return reinterpret_cast<char *>(RF->mutable_data());
341  }
342  llvm_unreachable("Unsupported tensor type.");
343  }
344 
345  template <typename T> void logReward(T Value) {
346  assert(IncludeReward);
347  if (RewardSpec.isElementType<float>())
348  Reward.add_feature()->mutable_float_list()->add_value(Value);
349  else if (RewardSpec.isElementType<int32_t>() ||
350  RewardSpec.isElementType<int64_t>())
351  Reward.add_feature()->mutable_int64_list()->add_value(Value);
352  else
353  llvm_unreachable("Unsupported tensor type.");
354  }
355 
356  size_t getNrRecords() const {
357  return FeatureLists.empty() ? 0 : FeatureLists[0].feature().size();
358  }
359 };
360 } // namespace llvm
361 
362 TFModelEvaluatorImpl::TFModelEvaluatorImpl(
363  StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
364  function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
365  const char *Tags = "serve")
366  : Graph(createTFGraph()), Options(createTFSessionOptions()),
367  InputFeed(InputSpecs.size()), Input(InputSpecs.size()),
368  OutputFeed(OutputSpecsSize) {
369  if (!ensureInitTF()) {
370  errs() << "Tensorflow should have been initialized";
371  return;
372  }
373  auto Status = createTFStatus();
374 
375  Session = TF_LoadSessionFromSavedModel(Options.get(), nullptr,
376  SavedModelPath.str().c_str(), &Tags, 1,
377  Graph.get(), nullptr, Status.get());
378  if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
379  errs() << TF_Message(Status.get());
380  invalidate();
381  }
382  for (size_t I = 0; I < InputSpecs.size(); ++I) {
383  auto &InputSpec = InputSpecs[I];
384  InputFeed[I] = {
385  TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
386  InputSpec.port()};
387  if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
388  return;
389  initInput(I, static_cast<TF_DataType>(InputSpec.typeIndex()),
390  InputSpec.shape());
391  }
392  for (size_t I = 0; I < OutputSpecsSize; ++I) {
393  auto OutputSpec = GetOutputSpecs(I);
394  OutputFeed[I] = {
395  TF_GraphOperationByName(Graph.get(), (OutputSpec.name()).c_str()),
396  OutputSpec.port()};
397  if (!checkReportAndInvalidate(OutputFeed[I], OutputSpec))
398  return;
399  }
400 }
401 
402 TFModelEvaluator::TFModelEvaluator(
403  StringRef SavedModelPath, const std::vector<TensorSpec> &InputSpecs,
404  function_ref<TensorSpec(size_t)> GetOutputSpecs, size_t OutputSpecsSize,
405  const char *Tags)
406  : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputSpecs, GetOutputSpecs,
407  OutputSpecsSize, Tags)) {
408  if (!Impl->isValid())
409  Impl.reset();
410 }
411 
412 TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
413  const std::vector<TensorSpec> &InputSpecs,
414  const std::vector<TensorSpec> &OutputSpecs,
415  const char *Tags)
417  SavedModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I]; },
418  OutputSpecs.size(), Tags) {}
419 
420 TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {
421  for (auto *T : Input) {
422  TF_DeleteTensor(T);
423  }
424  if (Session == nullptr)
425  return;
426  auto Status = createTFStatus();
427  TF_DeleteSession(Session, Status.get());
428  Session = nullptr;
429  if (TF_GetCode(Status.get()) != TF_Code::TF_OK)
430  errs() << "Could not delete TF session";
431 }
432 
433 bool TFModelEvaluatorImpl::checkReportAndInvalidate(
434  const TF_Output &Output, const TensorSpec &OutputSpec) {
435  if (Output.oper)
436  return true;
437  errs() << "Could not find TF_Output named: " + OutputSpec.name();
438  IsValid = false;
439  return IsValid;
440 }
441 
442 Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
443  if (!isValid())
444  return None;
445  std::unique_ptr<EvaluationResultImpl> Ret =
446  std::make_unique<EvaluationResultImpl>(Impl->OutputSize());
447  auto Status = createTFStatus();
448  Impl->evaluate(Ret->getOutput().data(), Status.get());
449  if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
450  errs() << TF_Message(Status.get());
451  Impl.reset();
452  return None;
453  }
454  return EvaluationResult(std::move(Ret));
455 }
456 
457 void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type,
458  const std::vector<int64_t> &Dimensions) {
459  int64_t TotalSize = TF_DataTypeSize(Type);
460  for (auto &D : Dimensions)
461  TotalSize *= D;
462 
463  Input[Index] =
464  TF_AllocateTensor(Type, Dimensions.data(), Dimensions.size(), TotalSize);
465  std::memset(TF_TensorData(Input[Index]), 0, TotalSize);
466 }
467 
468 void *TFModelEvaluator::getUntypedInput(size_t Index) {
469  return TF_TensorData(Impl->getInput()[Index]);
470 }
471 
472 TFModelEvaluator::EvaluationResult::EvaluationResult(
473  std::unique_ptr<EvaluationResultImpl> Impl)
474  : Impl(std::move(Impl)) {}
475 
476 TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
477  : Impl(std::move(Other.Impl)) {}
478 
479 TFModelEvaluator::EvaluationResult &
480 TFModelEvaluator::EvaluationResult::operator=(EvaluationResult &&Other) {
481  Impl = std::move(Other.Impl);
482  return *this;
483 }
484 
485 void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
486  return TF_TensorData(Impl->getOutput()[Index]);
487 }
488 
489 const void *
490 TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) const {
491  return TF_TensorData(Impl->getOutput()[Index]);
492 }
493 
494 #define TFUTILS_GETDATATYPE_IMPL(T, E) \
495  template <> int TensorSpec::getDataType<T>() { return E; }
496 
497 TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_IMPL)
498 
499 #undef TFUTILS_GETDATATYPE_IMPL
500 
501 TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
502 TFModelEvaluator::~TFModelEvaluator() {}
503 
504 Logger::Logger(const std::vector<LoggedFeatureSpec> &FeatureSpecs,
505  const TensorSpec &RewardSpec, bool IncludeReward)
506  : FeatureSpecs(FeatureSpecs), RewardSpec(RewardSpec),
507  IncludeReward(IncludeReward),
508  LoggerData(std::make_unique<LoggerDataImpl>(FeatureSpecs, RewardSpec,
509  IncludeReward)) {}
510 
511 Logger::~Logger() {}
512 
513 #define LOG_REWARD(NAME, TYPE) \
514  void Logger::log##NAME##Reward(TYPE Value) { \
515  assert(IncludeReward); \
516  LoggerData->logReward(Value); \
517  }
518 
519 LOG_REWARD(Float, float)
520 LOG_REWARD(Int32, int32_t)
521 LOG_REWARD(Int64, int64_t)
522 #undef LOG_REWARD
523 
524 #define LOG_FINAL_REWARD(NAME, TYPE) \
525  void Logger::log##NAME##FinalReward(TYPE Value) { \
526  assert(RewardSpec.isElementType<TYPE>()); \
527  for (size_t I = 1; I < LoggerData->getNrRecords(); ++I) \
528  log##NAME##Reward(0); \
529  log##NAME##Reward(Value); \
530  }
531 
532 LOG_FINAL_REWARD(Float, float)
533 LOG_FINAL_REWARD(Int32, int32_t)
534 LOG_FINAL_REWARD(Int64, int64_t)
535 #undef LOG_FINAL_REWARD
536 
537 void Logger::logFloatValue(size_t FeatureID, const float *Value) {
538  assert(FeatureSpecs[FeatureID].Spec.isElementType<float>());
539  logSpecifiedTensorValue(FeatureID, reinterpret_cast<const char *>(Value));
540 }
541 
542 void Logger::logInt64Value(size_t FeatureID, const int64_t *Value) {
543  assert(FeatureSpecs[FeatureID].Spec.isElementType<int64_t>());
544  logSpecifiedTensorValue(FeatureID, reinterpret_cast<const char *>(Value));
545 }
546 
547 void Logger::logInt32Value(size_t FeatureID, const int32_t *Value) {
548  assert(FeatureSpecs[FeatureID].Spec.isElementType<int32_t>());
549  logSpecifiedTensorValue(FeatureID, reinterpret_cast<const char *>(Value));
550 }
551 
552 void Logger::logSpecifiedTensorValue(size_t FeatureID, const char *RawData) {
553  const auto &Spec = FeatureSpecs[FeatureID].Spec;
554  char *Buff = addEntryAndGetFloatOrInt64Buffer(FeatureID);
555  if (Spec.isElementType<int32_t>())
556  for (size_t I = 0; I < Spec.getElementCount(); ++I)
557  (reinterpret_cast<int64_t *>(Buff))[I] =
558  static_cast<int64_t>((reinterpret_cast<const int32_t *>(RawData))[I]);
559  else if (Spec.isElementType<int64_t>() || Spec.isElementType<float>())
560  std::memcpy(Buff, RawData,
561  Spec.getElementCount() * Spec.getElementByteSize());
562  else
563  llvm_unreachable("Unsupported tensor type");
564 }
565 
566 char *Logger::addEntryAndGetFloatOrInt64Buffer(size_t FeatureID) {
567  return reinterpret_cast<char *>(LoggerData->addNewTensor(FeatureID));
568 }
569 
570 void Logger::flush(raw_ostream &OS) { LoggerData->flush(OS); }
571 #endif // defined(LLVM_HAVE_TF_API)
MemoryBuffer.h
llvm::json::Path::Root
The root is the trivial Path to the root value.
Definition: JSON.h:617
TFUtils.h
llvm::json::Value
A Value is an JSON value of unknown type.
Definition: JSON.h:289
llvm
---------------------— PointerInfo ------------------------------------—
Definition: AllocatorList.h:23
llvm::LLVMContext::emitError
void emitError(uint64_t LocCookie, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
Definition: LLVMContext.cpp:251
llvm::StringRef::empty
LLVM_NODISCARD bool empty() const
empty - Check if the string is empty.
Definition: StringRef.h:153
llvm::ElementCount
Definition: TypeSize.h:386
llvm::TFModelEvaluator
Definition: InlineSizeEstimatorAnalysis.cpp:262
llvm::pdb::Int64
@ Int64
Definition: PDBTypes.h:399
P
This currently compiles esp xmm0 movsd esp eax eax esp ret We should use not the dag combiner This is because dagcombine2 needs to be able to see through the X86ISD::Wrapper which DAGCombine can t really do The code for turning x load into a single vector load is target independent and should be moved to the dag combiner The code for turning x load into a vector load can only handle a direct load from a global or a direct load from the stack It should be generalized to handle any load from P
Definition: README-SSE.txt:411
llvm::raw_string_ostream
A raw_ostream that writes to an std::string.
Definition: raw_ostream.h:625
llvm::SmallVector
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1168
Path.h
ManagedStatic.h
llvm::cl::Hidden
@ Hidden
Definition: CommandLine.h:143
llvm::Type
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
llvm::sys::path::end
const_iterator end(StringRef path)
Get end iterator over path.
Definition: Path.cpp:233
llvm::sys::path::begin
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
Definition: Path.cpp:224
llvm::Optional
Definition: APInt.h:33
T
#define T
Definition: Mips16ISelLowering.cpp:341
llvm::errs
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
Definition: raw_ostream.cpp:892
llvm::MipsISD::Ret
@ Ret
Definition: MipsISelLowering.h:116
new
Common register allocation spilling lr str ldr sxth r3 ldr mla r4 can lr mov lr str ldr sxth r3 mla r4 and then merge mul and lr str ldr sxth r3 mla r4 It also increase the likelihood the store may become dead bb27 Successors according to LLVM ID Predecessors according to mbb< bb27, 0x8b0a7c0 > Note ADDri is not a two address instruction its result reg1037 is an operand of the PHI node in bb76 and its operand reg1039 is the result of the PHI node We should treat it as a two address code and make sure the ADDri is scheduled after any node that reads reg1039 Use info(i.e. register scavenger) to assign it a free register to allow reuse the collector could move the objects and invalidate the derived pointer This is bad enough in the first but safe points can crop up unpredictably **array_addr i32 n y store obj * new
Definition: README.txt:125
llvm::sys::path::append
void append(SmallVectorImpl< char > &path, const Twine &a, const Twine &b="", const Twine &c="", const Twine &d="")
Append to path.
Definition: Path.cpp:454
llvm::dbgs
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
CommandLine.h
Twine.h
llvm::ManagedStatic
ManagedStatic - This transparently changes the behavior of global statics to be lazily constructed on...
Definition: ManagedStatic.h:83
Options
const char LLVMTargetMachineRef LLVMPassBuilderOptionsRef Options
Definition: PassBuilderBindings.cpp:48
llvm::raw_ostream
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
JSON.h
llvm::StringRef::str
LLVM_NODISCARD std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:245
llvm::None
const NoneType None
Definition: None.h:23
llvm::function_ref
An efficient, type-erasing, non-owning reference to a callable.
Definition: STLExtras.h:168
llvm::cl::opt< bool >
Index
uint32_t Index
Definition: ELFObjHandler.cpp:84
D
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
move
compiles ldr LCPI1_0 ldr ldr mov lsr tst moveq r1 ldr LCPI1_1 and r0 bx lr It would be better to do something like to fold the shift into the conditional move
Definition: README.txt:546
llvm::LLVMContext
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:68
llvm::pdb::Int32
@ Int32
Definition: PDBTypes.h:398
llvm::NVPTX::PTXLdStInstCode::Float
@ Float
Definition: NVPTX.h:118
I
#define I(x, y, z)
Definition: MD5.cpp:59
llvm::cl::init
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
size
i< reg-> size
Definition: README.txt:166
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
llvm::move
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1605
memcpy
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
Status
Definition: SIModeRegister.cpp:28
llvm::StringRef
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:58
llvm_unreachable
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:136
parse
static llvm::Error parse(DataExtractor &Data, uint64_t BaseAddr, LineEntryCallback const &Callback)
Definition: LineTable.cpp:54
S
add sub stmia L5 ldr r0 bl L_printf $stub Instead of a and a wouldn t it be better to do three moves *Return an aggregate type is even return S
Definition: README.txt:210
llvm::Twine
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:83
llvm::GraphProgram::Name
Name
Definition: GraphWriter.h:52
std
Definition: BitVector.h:838
llvm::TargetStackID::Value
Value
Definition: TargetFrameLowering.h:27
isValid
static bool isValid(const char C)
Returns true if C is a valid mangled character: <0-9a-zA-Z_>.
Definition: RustDemangle.cpp:216
llvm::cl::desc
Definition: CommandLine.h:414
raw_ostream.h
llvm::StringRef::size
LLVM_NODISCARD size_t size() const
size - Get the string size.
Definition: StringRef.h:157
llvm::Value
LLVM Value Representation.
Definition: Value.h:75
Debug.h
Other
Optional< std::vector< StOtherPiece > > Other
Definition: ELFYAML.cpp:1172
llvm::json::ObjectMapper
Helper for mapping JSON objects onto protocol structs.
Definition: JSON.h:749