1 //===- TFUtils.h - utilities for tensorflow C API ---------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 #ifndef LLVM_ANALYSIS_UTILS_TFUTILS_H
10 #define LLVM_ANALYSIS_UTILS_TFUTILS_H
11 
12 #include "llvm/Config/llvm-config.h"
13 
14 #ifdef LLVM_HAVE_TF_API
15 #include "llvm/ADT/StringMap.h"
16 #include "llvm/IR/LLVMContext.h"
17 #include "llvm/Support/JSON.h"
18 
19 #include <memory>
20 #include <vector>
21 
22 namespace llvm {
23 
24 /// Load a SavedModel, find the given inputs and outputs, and setup storage
25 /// for input tensors. The user is responsible for correctly dimensioning the
26 /// input tensors and setting their values before calling evaluate().
27 /// To initialize:
28 /// - construct the object
29 /// - initialize the input tensors using initInput. Indices must correspond to
30 ///   indices in the InputNames used at construction.
31 /// To use:
32 /// - set input values by using getInput to get each input tensor, and then
33 ///   setting internal scalars, for all dimensions (tensors are row-major:
34 ///   https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/c/c_api.h#L205)
35 /// - call evaluate. The input tensors' values are not consumed after this, and
36 ///   may still be read.
37 /// - use the outputs in the output vector
38 class TFModelEvaluatorImpl;
39 class EvaluationResultImpl;
40 
41 /// TensorSpec encapsulates the specification of a tensor: its dimensions, or
42 /// "shape" (row-major), its type (see TensorSpec::getDataType specializations
43 /// for supported types), its name and port (see "TensorFlow: Large-Scale
44 /// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2:
45 /// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf)
46 ///
47 /// TensorSpec is used to set up a TFModelEvaluator by describing the expected
48 /// inputs and outputs.
49 class TensorSpec final {
50 public:
51   template <typename T>
52   static TensorSpec createSpec(const std::string &Name,
53                                const std::vector<int64_t> &Shape,
54                                int Port = 0) {
55     return TensorSpec(Name, Port, getDataType<T>(), Shape);
56   }
57 
58   const std::string &name() const { return Name; }
59   int port() const { return Port; }
60   int typeIndex() const { return TypeIndex; }
61   const std::vector<int64_t> &shape() const { return Shape; }
62 
63   bool operator==(const TensorSpec &Other) const {
64     return Name == Other.Name && Port == Other.Port &&
65            TypeIndex == Other.TypeIndex && Shape == Other.Shape;
66   }
67 
68   bool operator!=(const TensorSpec &Other) const { return !(*this == Other); }
69 
70   /// Get the number of elements in a tensor with this shape.
71   size_t getElementCount() const { return ElementCount; }
72   /// Get the size, in bytes, of one element.
73   size_t getElementByteSize() const;
74 
75   template <typename T> bool isElementType() const {
76     return getDataType<T>() == TypeIndex;
77   }
78 
79 private:
80   TensorSpec(const std::string &Name, int Port, int TypeIndex,
81              const std::vector<int64_t> &Shape);
82 
83   template <typename T> static int getDataType() {
84     llvm_unreachable("Undefined tensor type");
85   }
86 
87   std::string Name;
88   int Port = 0;
89   int TypeIndex = 0;
90   std::vector<int64_t> Shape;
91   size_t ElementCount = 0;
92 };
93 
94 /// Construct a TensorSpec from a JSON dictionary of the form:
95 /// { "name": <string>,
96 ///   "port": <int>,
97 ///   "type": <string. Use LLVM's types, e.g. float, double, int64_t>,
98 ///   "shape": <array of ints> }
99 /// For the "type" field, see the C++ primitive types used in
100 /// TFUTILS_SUPPORTED_TYPES.
101 Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
102                                            const json::Value &Value);
103 
104 struct LoggedFeatureSpec {
105   TensorSpec Spec;
106   Optional<std::string> LoggingName;
107 };
108 
109 /// Load the output specs. If SpecFileOverride is not empty, that path is used.
110 /// Otherwise, the file is assumed to be called 'output_spec.json' and be found
111 /// under ModelPath (the model directory).
112 /// The first output tensor name must match ExpectedDecisionName.
113 /// In case of error, the return is None and the error is logged.
114 Optional<std::vector<LoggedFeatureSpec>>
115 loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
116                 StringRef ModelPath, StringRef SpecFileOverride = StringRef());
117 
118 /// Logging utility - given an ordered specification of features, and assuming
119 /// a scalar reward, allow logging feature values and rewards, and then print
120 /// as tf.train.SequenceExample text protobuf.
121 /// The assumption is that, for an event to be logged (i.e. a set of feature
122 /// values and a reward), the user calls the log* API for each feature exactly
123 /// once, providing the index matching the position in the feature spec list
124 /// provided at construction. The example assumes the first feature's element
125 /// type is float, the second is int64, and the reward is float:
126 ///
127 /// event 0:
128 ///   logFloatValue(0, ...)
129 ///   logInt64Value(1, ...)
130 ///   ...
131 ///   logFloatReward(...)
132 /// event 1:
133 ///   logFloatValue(0, ...)
134 ///   logInt64Value(1, ...)
135 ///   ...
136 ///   logFloatReward(...)
137 ///
138 /// At the end, call print to generate the protobuf.
139 /// Alternatively, don't call logReward at the end of each event, just
140 /// log{Float|Int32|Int64}FinalReward at the end.
141 class LoggerDataImpl;
142 class Logger final {
143 public:
144   /// Construct a Logger. If IncludeReward is false, then logReward or
145   /// logFinalReward shouldn't be called, and the reward feature won't be
146   /// printed out.
147   Logger(const std::vector<LoggedFeatureSpec> &FeatureSpecs,
148          const TensorSpec &RewardSpec, bool IncludeReward);
149 
150   ~Logger();
151 
152   void logFloatReward(float Value);
153   void logInt32Reward(int32_t Value);
154   void logInt64Reward(int64_t Value);
155 
156   void logFloatFinalReward(float Value);
157   void logInt32FinalReward(int32_t Value);
158   void logInt64FinalReward(int64_t Value);
159 
160   void logFloatValue(size_t FeatureID, const float *Value);
161   void logInt32Value(size_t FeatureID, const int32_t *Value);
162   void logInt64Value(size_t FeatureID, const int64_t *Value);
163 
164   void logSpecifiedTensorValue(size_t FeatureID, const char *RawData);
165 
166   // Warning! For int32_t, the return is set up for int64_t, so the caller needs
167   // to piecemeal cast their int32_t values.
168   // FIXME: let's drop int32_t support. While it's supported by evaluator, it's
169   // not supported by the tensorflow::SequenceExample proto. For small values,
170   // we can consider using bytes.
171   char *addEntryAndGetFloatOrInt64Buffer(size_t FeatureID);
172 
173   void print(raw_ostream &OS);
174 
175 private:
176   std::vector<LoggedFeatureSpec> FeatureSpecs;
177   TensorSpec RewardSpec;
178   const bool IncludeReward;
179   std::unique_ptr<LoggerDataImpl> LoggerData;
180 };
181 
182 class TFModelEvaluator final {
183 public:
184   /// The result of a model evaluation. Handles the lifetime of the output
185   /// tensors, which means that their values need to be used before
186   /// the EvaluationResult's dtor is called.
187   class EvaluationResult {
188   public:
189     EvaluationResult(const EvaluationResult &) = delete;
190     EvaluationResult &operator=(const EvaluationResult &Other) = delete;
191 
192     EvaluationResult(EvaluationResult &&Other);
193     EvaluationResult &operator=(EvaluationResult &&Other);
194 
195     ~EvaluationResult();
196 
197     /// Get a (const) pointer to the first element of the tensor at Index.
198     template <typename T> T *getTensorValue(size_t Index) {
199       return static_cast<T *>(getUntypedTensorValue(Index));
200     }
201 
202     template <typename T> const T *getTensorValue(size_t Index) const {
203       return static_cast<T *>(getUntypedTensorValue(Index));
204     }
205 
206     /// Get a (const) pointer to the untyped data of the tensor.
207     void *getUntypedTensorValue(size_t Index);
208     const void *getUntypedTensorValue(size_t Index) const;
209 
210   private:
211     friend class TFModelEvaluator;
212     EvaluationResult(std::unique_ptr<EvaluationResultImpl> Impl);
213     std::unique_ptr<EvaluationResultImpl> Impl;
214   };
215 
216   TFModelEvaluator(StringRef SavedModelPath,
217                    const std::vector<TensorSpec> &InputSpecs,
218                    const std::vector<TensorSpec> &OutputSpecs,
219                    const char *Tags = "serve");
220   TFModelEvaluator(StringRef SavedModelPath,
221                    const std::vector<TensorSpec> &InputSpecs,
222                    function_ref<TensorSpec(size_t)> GetOutputSpecs,
223                    size_t OutputSpecsSize, const char *Tags = "serve");
224 
225   ~TFModelEvaluator();
226   TFModelEvaluator(const TFModelEvaluator &) = delete;
227   TFModelEvaluator(TFModelEvaluator &&) = delete;
228 
229   /// Evaluate the model, assuming it is valid. Returns None if the evaluation
230   /// fails or the model is invalid, or an EvaluationResult otherwise. The
231   /// inputs are assumed to have been already provided via getInput(). When
232   /// returning None, it also invalidates this object.
233   Optional<EvaluationResult> evaluate();
234 
235   /// Provides access to the input vector.
236   template <typename T> T *getInput(size_t Index) {
237     return static_cast<T *>(getUntypedInput(Index));
238   }
239 
240   /// Returns true if the tensorflow model was loaded successfully, false
241   /// otherwise.
242   bool isValid() const { return !!Impl; }
243 
244 private:
245   void *getUntypedInput(size_t Index);
246   std::unique_ptr<TFModelEvaluatorImpl> Impl;
247 };
248 
249 /// List of supported types, as a pair:
250 /// - C++ type
251 /// - enum name (implementation-specific)
252 #define TFUTILS_SUPPORTED_TYPES(M)                                             \
253   M(float, TF_FLOAT)                                                           \
254   M(double, TF_DOUBLE)                                                         \
255   M(int8_t, TF_INT8)                                                           \
256   M(uint8_t, TF_UINT8)                                                         \
257   M(int16_t, TF_INT16)                                                         \
258   M(uint16_t, TF_UINT16)                                                       \
259   M(int32_t, TF_INT32)                                                         \
260   M(uint32_t, TF_UINT32)                                                       \
261   M(int64_t, TF_INT64)                                                         \
262   M(uint64_t, TF_UINT64)
263 
264 #define TFUTILS_GETDATATYPE_DEF(T, E)                                          \
265   template <> int TensorSpec::getDataType<T>();
266 
267 TFUTILS_SUPPORTED_TYPES(TFUTILS_GETDATATYPE_DEF)
268 
269 #undef TFUTILS_GETDATATYPE_DEF
270 } // namespace llvm
271 
272 #endif // LLVM_HAVE_TF_API
273 #endif // LLVM_ANALYSIS_UTILS_TFUTILS_H
274