1 //
2 //  CPUBackend.hpp
3 //  MNN
4 //
5 //  Created by MNN on 2018/07/06.
6 //  Copyright © 2018, Alibaba Group Holding Limited
7 //
8 
9 #ifndef CPUBackend_hpp
10 #define CPUBackend_hpp
11 
12 #include <map>
13 #include <memory>
14 #include "core/Backend.hpp"
15 #include "core/Execution.hpp"
16 #include "MNN_generated.h"
17 
18 namespace MNN {
19 class BufferAllocator;
20 class CPURuntime : public Runtime {
21 public:
22     friend class CPUBackend;
23     CPURuntime(const Backend::Info& info);
24     virtual ~ CPURuntime();
25     virtual Backend* onCreate(const BackendConfig* config) const override;
26     virtual void onGabageCollect(int level) override;
27     virtual float onGetMemoryInMB() override;
onGetCompilerType() const28     virtual CompilerType onGetCompilerType() const override {
29         return Compiler_Loop;
30     }
31 private:
32     std::shared_ptr<BufferAllocator> mStaticAllocator;
33     int mThreadNumber;
34     int mTaskIndex;
35     BackendConfig::MemoryMode mMemory;
36     BackendConfig::PowerMode mPower;
37     BackendConfig::PrecisionMode mPrecision;
38 
39     // Backend features
40     // CPU features
41     float mFlops = 0.0f;
42     static Backend*(*gExtraCreate)(const Runtime* runtime);
43     size_t mFlags = 0;
44 };
45 struct CoreFunctions;
46 struct CoreInt8Functions;
47 
48 class CPUBackend : public Backend {
49 public:
50     CPUBackend(const CPURuntime* runtime, BackendConfig::PrecisionMode precision, MNNForwardType type = MNN_FORWARD_CPU, size_t flags = 0);
51     virtual ~CPUBackend();
52 
53     // Return sizeDivide, scheduleNumber aligned memory
54     std::pair<int, int> multiThreadDivide(int size) const;
55 public:
56     virtual bool onAcquireBuffer(const Tensor* nativeTensor, StorageType storageType) override;
57     virtual bool onReleaseBuffer(const Tensor* nativeTensor, StorageType storageType) override;
58     virtual bool onClearBuffer() override;
59     virtual void onCopyBuffer(const Tensor* srcTensor, const Tensor* dstTensor) const override;
60     virtual std::pair<float, bool> onMeasure(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
61                                             const MNN::Op* op) override;
62 
63     virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
64                                 const MNN::Op* op) override;
65     virtual void onExecuteBegin() const override;
66     virtual void onExecuteEnd() const override;
67 
functions() const68     const CoreFunctions* functions() const {
69         return mCoreFunctions;
70     }
71 
72     // Return element size for Tensor, conside pack
73     int getTensorSize(const Tensor* tensor) const;
int8Functions() const74     const CoreInt8Functions* int8Functions() const {
75         return mInt8CoreFunctions;
76     }
77     Execution* makePostWrapExectuion(Execution* execution) const;
78 public:
79     class Creator {
80     public:
81         virtual Execution* onCreate(const std::vector<Tensor*>& inputs, const std::vector<Tensor*>& outputs,
82                                     const MNN::Op* op, Backend* backend) const = 0;
83     };
84 
85     static bool addCreator(OpType t, Creator* c);
86 
threadNumber() const87     int threadNumber() const {
88         return mRuntime->mThreadNumber;
89     }
90 
getBufferAllocator() const91     BufferAllocator* getBufferAllocator() const {
92         return mDynamicAllocator.get();
93     }
94 
memoryMode() const95     BackendConfig::MemoryMode memoryMode() const {
96         return mRuntime->mMemory;
97     }
precisionMode() const98     BackendConfig::PrecisionMode precisionMode() const {
99         return mPrecisionMode;
100     }
getCachedCastTensor()101     std::map<const Tensor*, std::unique_ptr<const Tensor>>& getCachedCastTensor() {
102         return mCachedCastTensor;
103     }
104 #ifdef MNN_USE_THREAD_POOL
taskIndex() const105     inline int taskIndex() const {return mRuntime->mTaskIndex;}
106 #endif
107     static void initCreatorMap();
108     halide_type_t getRunType(const Op* op, halide_type_t qtype, halide_type_t rtype) override;
109 private:
110     OpType getRealOpType(OpType opType, halide_type_t dataType);
111 protected:
112     bool allocBuffer(int size, Tensor* dest,  StorageType storageType);
113     const CoreFunctions* mCoreFunctions;
114     const CoreInt8Functions* mInt8CoreFunctions;
115 private:
116     std::shared_ptr<BufferAllocator> mStaticAllocator;
117     std::shared_ptr<BufferAllocator> mDynamicAllocator;
118     bool mCheckNAN = false;
119     const CPURuntime* mRuntime;
120     BackendConfig::PrecisionMode mPrecisionMode;
121     static std::map<OpType, CPUBackend::Creator*>* gCreator;
122     std::map<const Tensor*, std::unique_ptr<const Tensor>> mCachedCastTensor;
123 };
124 
125 #define REGISTER_CPU_OP_CREATOR(name, opType)     \
126     void ___##name##__##opType##__() {            \
127         static name _temp;\
128         CPUBackend::addCreator(opType, &_temp); \
129     }
130 
131 } // namespace MNN
132 
133 #endif /* CPUBackend_hpp */
134