1 //===- TensorSpec.h - type descriptor for a tensor --------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 #ifndef LLVM_ANALYSIS_TENSORSPEC_H 10 #define LLVM_ANALYSIS_TENSORSPEC_H 11 12 #include "llvm/Config/llvm-config.h" 13 14 #include "llvm/ADT/StringMap.h" 15 #include "llvm/IR/LLVMContext.h" 16 #include "llvm/Support/JSON.h" 17 18 #include <memory> 19 #include <vector> 20 21 namespace llvm { 22 /// TensorSpec encapsulates the specification of a tensor: its dimensions, or 23 /// "shape" (row-major), its type (see TensorSpec::getDataType specializations 24 /// for supported types), its name and port (see "TensorFlow: Large-Scale 25 /// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2: 26 /// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf) 27 /// 28 /// Known tensor types. The left part is the C type, the right is a name we 29 /// can use to identify the type (to implement TensorSpec equality checks), and 30 /// to use, if needed, when mapping to an underlying evaluator's type system. 31 /// The main requirement is that the C type we use has the same size and 32 /// encoding (e.g. endian-ness) as the one used by the evaluator. 33 #define SUPPORTED_TENSOR_TYPES(M) \ 34 M(float, Float) \ 35 M(double, Double) \ 36 M(int8_t, Int8) \ 37 M(uint8_t, UInt8) \ 38 M(int16_t, Int16) \ 39 M(uint16_t, UInt16) \ 40 M(int32_t, Int32) \ 41 M(uint32_t, UInt32) \ 42 M(int64_t, Int64) \ 43 M(uint64_t, UInt64) 44 45 enum class TensorType { 46 Invalid, 47 #define _TENSOR_TYPE_ENUM_MEMBERS(_, Name) Name, 48 SUPPORTED_TENSOR_TYPES(_TENSOR_TYPE_ENUM_MEMBERS) 49 #undef _TENSOR_TYPE_ENUM_MEMBERS 50 }; 51 52 class TensorSpec final { 53 public: 54 template <typename T> 55 static TensorSpec createSpec(const std::string &Name, 56 const std::vector<int64_t> &Shape, 57 int Port = 0) { 58 return TensorSpec(Name, Port, getDataType<T>(), sizeof(T), Shape); 59 } 60 61 const std::string &name() const { return Name; } 62 int port() const { return Port; } 63 TensorType type() const { return Type; } 64 const std::vector<int64_t> &shape() const { return Shape; } 65 66 bool operator==(const TensorSpec &Other) const { 67 return Name == Other.Name && Port == Other.Port && Type == Other.Type && 68 Shape == Other.Shape; 69 } 70 71 bool operator!=(const TensorSpec &Other) const { return !(*this == Other); } 72 73 /// Get the number of elements in a tensor with this shape. 74 size_t getElementCount() const { return ElementCount; } 75 /// Get the size, in bytes, of one element. 76 size_t getElementByteSize() const { return ElementSize; } 77 /// Get the total size of a memory buffer needed to store the whole tensor. 78 size_t getTotalTensorBufferSize() const { return ElementCount * ElementSize; } 79 80 template <typename T> bool isElementType() const { 81 return getDataType<T>() == Type; 82 } 83 84 private: 85 TensorSpec(const std::string &Name, int Port, TensorType Type, 86 size_t ElementSize, const std::vector<int64_t> &Shape); 87 88 template <typename T> static TensorType getDataType(); 89 90 std::string Name; 91 int Port = 0; 92 TensorType Type = TensorType::Invalid; 93 std::vector<int64_t> Shape; 94 size_t ElementCount = 0; 95 size_t ElementSize = 0; 96 }; 97 98 /// Construct a TensorSpec from a JSON dictionary of the form: 99 /// { "name": <string>, 100 /// "port": <int>, 101 /// "type": <string. Use LLVM's types, e.g. float, double, int64_t>, 102 /// "shape": <array of ints> } 103 /// For the "type" field, see the C++ primitive types used in 104 /// TFUTILS_SUPPORTED_TYPES. 105 Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx, 106 const json::Value &Value); 107 108 struct LoggedFeatureSpec { 109 TensorSpec Spec; 110 Optional<std::string> LoggingName; 111 const std::string &getLoggingName() const { 112 return LoggingName ? *LoggingName : Spec.name(); 113 } 114 }; 115 116 /// Load the output specs. If SpecFileOverride is not empty, that path is used. 117 /// Otherwise, the file is assumed to be called 'output_spec.json' and be found 118 /// under ModelPath (the model directory). 119 /// The first output tensor name must match ExpectedDecisionName. 120 /// In case of error, the return is None and the error is logged. 121 Optional<std::vector<LoggedFeatureSpec>> 122 loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName, 123 StringRef ModelPath, StringRef SpecFileOverride = StringRef()); 124 125 #define TFUTILS_GETDATATYPE_DEF(T, Name) \ 126 template <> TensorType TensorSpec::getDataType<T>(); 127 SUPPORTED_TENSOR_TYPES(TFUTILS_GETDATATYPE_DEF) 128 129 #undef TFUTILS_GETDATATYPE_DEF 130 } // namespace llvm 131 132 #endif // LLVM_ANALYSIS_TENSORSPEC_H 133