1 //===- MemRefUtils.h - Memref helpers to invoke MLIR JIT code ---*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Utils for MLIR ABI interfacing with frameworks.
10 //
11 // The templated free functions below make it possible to allocate dense
12 // contiguous buffers with shapes that interoperate properly with the MLIR
13 // codegen ABI.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "mlir/ExecutionEngine/CRunnerUtils.h"
18 #include "mlir/Support/LLVM.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/Optional.h"
21 #include "llvm/ADT/STLExtras.h"
22 
23 #include "llvm/Support/raw_ostream.h"
24 
25 #include <algorithm>
26 #include <array>
27 #include <cassert>
28 #include <functional>
29 #include <initializer_list>
30 #include <memory>
31 
32 #ifndef MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
33 #define MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
34 
35 namespace mlir {
36 using AllocFunType = llvm::function_ref<void *(size_t)>;
37 
38 namespace detail {
39 
40 /// Given a shape with sizes greater than 0 along all dimensions, returns the
41 /// distance, in number of elements, between a slice in a dimension and the next
42 /// slice in the same dimension.
43 ///    e.g. shape[3, 4, 5] -> strides[20, 5, 1]
44 template <size_t N>
makeStrides(ArrayRef<int64_t> shape)45 inline std::array<int64_t, N> makeStrides(ArrayRef<int64_t> shape) {
46   assert(shape.size() == N && "expect shape specification to match rank");
47   std::array<int64_t, N> res;
48   int64_t running = 1;
49   for (int64_t idx = N - 1; idx >= 0; --idx) {
50     assert(shape[idx] && "size must be non-negative for all shape dimensions");
51     res[idx] = running;
52     running *= shape[idx];
53   }
54   return res;
55 }
56 
57 /// Build a `StridedMemRefDescriptor<T, N>` that matches the MLIR ABI.
58 /// This is an implementation detail that is kept in sync with MLIR codegen
59 /// conventions.  Additionally takes a `shapeAlloc` array which
60 /// is used instead of `shape` to allocate "more aligned" data and compute the
61 /// corresponding strides.
62 template <int N, typename T>
63 typename std::enable_if<(N >= 1), StridedMemRefType<T, N>>::type
makeStridedMemRefDescriptor(T * ptr,T * alignedPtr,ArrayRef<int64_t> shape,ArrayRef<int64_t> shapeAlloc)64 makeStridedMemRefDescriptor(T *ptr, T *alignedPtr, ArrayRef<int64_t> shape,
65                             ArrayRef<int64_t> shapeAlloc) {
66   assert(shape.size() == N);
67   assert(shapeAlloc.size() == N);
68   StridedMemRefType<T, N> descriptor;
69   descriptor.basePtr = static_cast<T *>(ptr);
70   descriptor.data = static_cast<T *>(alignedPtr);
71   descriptor.offset = 0;
72   std::copy(shape.begin(), shape.end(), descriptor.sizes);
73   auto strides = makeStrides<N>(shapeAlloc);
74   std::copy(strides.begin(), strides.end(), descriptor.strides);
75   return descriptor;
76 }
77 
78 /// Build a `StridedMemRefDescriptor<T, 0>` that matches the MLIR ABI.
79 /// This is an implementation detail that is kept in sync with MLIR codegen
80 /// conventions.  Additionally takes a `shapeAlloc` array which
81 /// is used instead of `shape` to allocate "more aligned" data and compute the
82 /// corresponding strides.
83 template <int N, typename T>
84 typename std::enable_if<(N == 0), StridedMemRefType<T, 0>>::type
85 makeStridedMemRefDescriptor(T *ptr, T *alignedPtr, ArrayRef<int64_t> shape = {},
86                             ArrayRef<int64_t> shapeAlloc = {}) {
87   assert(shape.size() == N);
88   assert(shapeAlloc.size() == N);
89   StridedMemRefType<T, 0> descriptor;
90   descriptor.basePtr = static_cast<T *>(ptr);
91   descriptor.data = static_cast<T *>(alignedPtr);
92   descriptor.offset = 0;
93   return descriptor;
94 }
95 
96 /// Align `nElements` of type T with an optional `alignment`.
97 /// This replaces a portable `posix_memalign`.
98 /// `alignment` must be a power of 2 and greater than the size of T. By default
99 /// the alignment is sizeof(T).
100 template <typename T>
101 std::pair<T *, T *>
102 allocAligned(size_t nElements, AllocFunType allocFun = &::malloc,
103              llvm::Optional<uint64_t> alignment = llvm::Optional<uint64_t>()) {
104   assert(sizeof(T) < (1ul << 32) && "Elemental type overflows");
105   auto size = nElements * sizeof(T);
106   auto desiredAlignment = alignment.getValueOr(nextPowerOf2(sizeof(T)));
107   assert((desiredAlignment & (desiredAlignment - 1)) == 0);
108   assert(desiredAlignment >= sizeof(T));
109   T *data = reinterpret_cast<T *>(allocFun(size + desiredAlignment));
110   uintptr_t addr = reinterpret_cast<uintptr_t>(data);
111   uintptr_t rem = addr % desiredAlignment;
112   T *alignedData = (rem == 0)
113                        ? data
114                        : reinterpret_cast<T *>(addr + (desiredAlignment - rem));
115   assert(reinterpret_cast<uintptr_t>(alignedData) % desiredAlignment == 0);
116   return std::make_pair(data, alignedData);
117 }
118 
119 } // namespace detail
120 
121 //===----------------------------------------------------------------------===//
122 // Public API
123 //===----------------------------------------------------------------------===//
124 
125 /// Convenient callback to "visit" a memref element by element.
126 /// This takes a reference to an individual element as well as the coordinates.
127 /// It can be used in conjuction with a StridedMemrefIterator.
128 template <typename T>
129 using ElementWiseVisitor = llvm::function_ref<void(T &ptr, ArrayRef<int64_t>)>;
130 
131 /// Owning MemRef type that abstracts over the runtime type for ranked strided
132 /// memref.
133 template <typename T, unsigned Rank>
134 class OwningMemRef {
135 public:
136   using DescriptorType = StridedMemRefType<T, Rank>;
137   using FreeFunType = std::function<void(DescriptorType)>;
138 
139   /// Allocate a new dense StridedMemrefRef with a given `shape`. An optional
140   /// `shapeAlloc` array can be supplied to "pad" every dimension individually.
141   /// If an ElementWiseVisitor is provided, it will be used to initialize the
142   /// data, else the memory will be zero-initialized. The alloc and free method
143   /// used to manage the data allocation can be optionally provided, and default
144   /// to malloc/free.
145   OwningMemRef(
146       ArrayRef<int64_t> shape, ArrayRef<int64_t> shapeAlloc = {},
147       ElementWiseVisitor<T> init = {},
148       llvm::Optional<uint64_t> alignment = llvm::Optional<uint64_t>(),
149       AllocFunType allocFun = &::malloc,
150       std::function<void(StridedMemRefType<T, Rank>)> freeFun =
151           [](StridedMemRefType<T, Rank> descriptor) {
152             ::free(descriptor.data);
153           })
freeFunc(freeFun)154       : freeFunc(freeFun) {
155     if (shapeAlloc.empty())
156       shapeAlloc = shape;
157     assert(shape.size() == Rank);
158     assert(shapeAlloc.size() == Rank);
159     for (unsigned i = 0; i < Rank; ++i)
160       assert(shape[i] <= shapeAlloc[i] &&
161              "shapeAlloc must be greater than or equal to shape");
162     int64_t nElements = 1;
163     for (int64_t s : shapeAlloc)
164       nElements *= s;
165     T *data, *alignedData;
166     std::tie(data, alignedData) =
167         detail::allocAligned<T>(nElements, allocFun, alignment);
168     descriptor = detail::makeStridedMemRefDescriptor<Rank>(data, alignedData,
169                                                            shape, shapeAlloc);
170     if (init) {
171       for (StridedMemrefIterator<T, Rank> it = descriptor.begin(),
172                                           end = descriptor.end();
173            it != end; ++it)
174         init(*it, it.getIndices());
175     } else {
176       memset(descriptor.data, 0,
177              nElements * sizeof(T) +
178                  alignment.getValueOr(detail::nextPowerOf2(sizeof(T))));
179     }
180   }
181   /// Take ownership of an existing descriptor with a custom deleter.
OwningMemRef(DescriptorType descriptor,FreeFunType freeFunc)182   OwningMemRef(DescriptorType descriptor, FreeFunType freeFunc)
183       : freeFunc(freeFunc), descriptor(descriptor) {}
~OwningMemRef()184   ~OwningMemRef() {
185     if (freeFunc)
186       freeFunc(descriptor);
187   }
188   OwningMemRef(const OwningMemRef &) = delete;
189   OwningMemRef &operator=(const OwningMemRef &) = delete;
190   OwningMemRef &operator=(const OwningMemRef &&other) {
191     freeFunc = other.freeFunc;
192     descriptor = other.descriptor;
193     other.freeFunc = nullptr;
194     memset(0, &other.descriptor, sizeof(other.descriptor));
195   }
OwningMemRef(OwningMemRef && other)196   OwningMemRef(OwningMemRef &&other) { *this = std::move(other); }
197 
198   DescriptorType &operator*() { return descriptor; }
199   DescriptorType *operator->() { return &descriptor; }
200   T &operator[](std::initializer_list<int64_t> indices) {
201     return descriptor[std::move(indices)];
202   }
203 
204 private:
205   /// Custom deleter used to release the data buffer manager with the descriptor
206   /// below.
207   FreeFunType freeFunc;
208   /// The descriptor is an instance of StridedMemRefType<T, rank>.
209   DescriptorType descriptor;
210 };
211 
212 } // namespace mlir
213 
214 #endif // MLIR_EXECUTIONENGINE_MEMREFUTILS_H_
215