1 // Copyright (C) 2020 by Yuri Victorovich. All rights reserved.
2 
3 #include "model-functions.h"
4 #include "plugin-interface.h"
5 #include "nn-types.h"
6 #include "tensor.h"
7 #include "misc.h"
8 #include "util.h"
9 
10 #include <assert.h>
11 
12 namespace ModelFunctions {
13 
isTensorComputed(const PluginInterface::Model * model,PluginInterface::TensorId tensorId)14 bool isTensorComputed(const PluginInterface::Model *model, PluginInterface::TensorId tensorId) {
15 	return !model->getTensorHasData(tensorId) && !model->getTensorIsVariableFlag(tensorId);
16 }
17 
tensorKind(const PluginInterface::Model * model,PluginInterface::TensorId tensorId)18 std::string tensorKind(const PluginInterface::Model *model, PluginInterface::TensorId tensorId) { // TODO translations, tr() doesn't work outside of Q_OBJECT scope
19 	return Util::isValueIn(model->getInputs(), tensorId) ? "input"
20 	       : Util::isValueIn(model->getOutputs(), tensorId) ? "output"
21 	       : model->getTensorHasData(tensorId) ? "static tensor"
22 	       : model->getTensorIsVariableFlag(tensorId) ? "variable"
23 	       : "computed";
24 }
25 
computeModelFlops(const PluginInterface::Model * model)26 size_t computeModelFlops(const PluginInterface::Model *model) {
27 	size_t flops = 0;
28 	for (PluginInterface::OperatorId oid = 0, oide = model->numOperators(); oid < oide; oid++)
29 		flops += computeOperatorFlops(model, oid);
30 	return flops;
31 }
32 
computeOperatorFlops(const PluginInterface::Model * model,PluginInterface::OperatorId operatorId)33 size_t computeOperatorFlops(const PluginInterface::Model *model, PluginInterface::OperatorId operatorId) {
34 	std::vector<PluginInterface::TensorId> inputs, outputs;
35 	model->getOperatorIo(operatorId, inputs, outputs);
36 	switch (model->getOperatorKind(operatorId)) {
37 	case PluginInterface::KindConv2D: {
38 		auto shapeImage = model->getTensorShape(inputs[0]);
39 		auto shapeWeights = model->getTensorShape(inputs[1]);
40 		assert(shapeImage.size() == 4 && shapeImage[0] == 1);
41 		assert(shapeWeights.size() == 4);
42 		return Tensor::flatSize(shapeWeights)*(shapeImage[1]*shapeImage[2]); // TODO add summations, strides, pads
43 	} case PluginInterface::KindFullyConnected: {
44 		auto shapeWeights = model->getTensorShape(inputs[1]);
45 		assert(shapeWeights.size() == 2);
46 		return Tensor::flatSize(shapeWeights); // add summations, strides, pads
47 	} case PluginInterface::KindLocalResponseNormalization: {
48 		return 0; // TODO
49 	} case PluginInterface::KindAdd:
50 	  case PluginInterface::KindRelu:
51 	  case PluginInterface::KindRelu6:
52 	  case PluginInterface::KindSub:
53 	  case PluginInterface::KindMul:
54 	  case PluginInterface::KindDiv:
55 	  case PluginInterface::KindMaximum:
56 	  case PluginInterface::KindMinimum:
57 		return Tensor::flatSize(model->getTensorShape(inputs[0])); // input size
58 	  case PluginInterface::KindTanh:
59 		return 10*Tensor::flatSize(model->getTensorShape(inputs[0])); //  tanh is expensive, maybe 10X at least
60 	  case PluginInterface::KindLogistic:
61 		return 10*Tensor::flatSize(model->getTensorShape(inputs[0])); //  logistic function is expensive, maybe 10X at least
62 	  case PluginInterface::KindLeakyRelu:
63 		return 2*Tensor::flatSize(model->getTensorShape(inputs[0])); // compare and multiply
64 	  case PluginInterface::KindHardSwish:
65 		return 5*Tensor::flatSize(model->getTensorShape(inputs[0])); // variable number of operations, 1..7, depending on value
66 	  case PluginInterface::KindRSqrt:
67 		return 25*Tensor::flatSize(model->getTensorShape(inputs[0])); // it's very expensive to compute RSqrt
68 	  case PluginInterface::KindConcatenation:
69 		return Tensor::flatSize(model->getTensorShape(inputs[0])); // unclear how to count flops for concatenation
70 	  case PluginInterface::KindArgMax:
71 	  case PluginInterface::KindArgMin:
72 		return Tensor::flatSize(model->getTensorShape(inputs[0]));
73 	  case PluginInterface::KindSquaredDifference:
74 		return Tensor::flatSize(model->getTensorShape(inputs[0]))*2;
75 	  default:
76 		return 0; // TODO
77 	}
78 }
79 
sizeOfModelStaticData(const PluginInterface::Model * model,unsigned & outObjectCount,size_t & outMaxStaticDataPerOperator)80 size_t sizeOfModelStaticData(const PluginInterface::Model *model, unsigned &outObjectCount, size_t &outMaxStaticDataPerOperator) {
81 	size_t size = 0;
82 	outObjectCount = 0;
83 	outMaxStaticDataPerOperator = 0;
84 	for (PluginInterface::OperatorId oid = 0, oide = model->numOperators(); oid < oide; oid++) {
85 		auto sizeForOperator = sizeOfOperatorStaticData(model, oid, outObjectCount);
86 		size += sizeForOperator;
87 		if (sizeForOperator > outMaxStaticDataPerOperator)
88 			outMaxStaticDataPerOperator = sizeForOperator;
89 	}
90 	return size;
91 }
92 
sizeOfOperatorStaticData(const PluginInterface::Model * model,PluginInterface::OperatorId operatorId,unsigned & outObjectCount)93 size_t sizeOfOperatorStaticData(const PluginInterface::Model *model, PluginInterface::OperatorId operatorId, unsigned &outObjectCount) {
94 	size_t size = 0;
95 	std::vector<PluginInterface::TensorId> inputs, outputs;
96 	model->getOperatorIo(operatorId, inputs, outputs);
97 	for (PluginInterface::TensorId tensorId : inputs)
98 		if (model->getTensorHasData(tensorId)) {
99 			size += Tensor::flatSize(model->getTensorShape(tensorId))*sizeof(float); // TODO handle other types
100 			outObjectCount++;
101 		}
102 	return size;
103 }
104 
dataRatioOfOperator(const PluginInterface::Model * model,PluginInterface::OperatorId operatorId)105 float dataRatioOfOperator(const PluginInterface::Model *model, PluginInterface::OperatorId operatorId) {
106 	std::vector<PluginInterface::TensorId> inputs, outputs;
107 	model->getOperatorIo(operatorId, inputs, outputs);
108 
109 	unsigned sizeOfInputs = 0;
110 	unsigned sizeOfOutputs = 0;
111 	for (auto i : inputs)
112 		if (isTensorComputed(model, i))
113 			sizeOfInputs += Tensor::flatSize(model->getTensorShape(i));
114 	for (auto o : outputs)
115 		sizeOfOutputs += Tensor::flatSize(model->getTensorShape(o));
116 
117 	return float(sizeOfOutputs)/float(sizeOfInputs);
118 }
119 
dataRatioOfOperatorModelInputToIns(const PluginInterface::Model * model,PluginInterface::OperatorId operatorId)120 float dataRatioOfOperatorModelInputToIns(const PluginInterface::Model *model, PluginInterface::OperatorId operatorId) {
121 	std::vector<PluginInterface::TensorId> inputs, outputs;
122 	model->getOperatorIo(operatorId, inputs, outputs);
123 
124 	unsigned sizeOfInputs = 0, cntInputs = 0;
125 	for (auto i : inputs)
126 		if (isTensorComputed(model, i)) {
127 			sizeOfInputs += Tensor::flatSize(model->getTensorShape(i));
128 			cntInputs++;
129 		}
130 
131 	// XXX the below is incorrect for unbalanced operators (data use isn't equal between branches)
132 	return float(sizeOfInputs)/float(Tensor::flatSize(model->getTensorShape(model->getInputs()[0])));
133 }
134 
dataRatioOfOperatorModelInputToOuts(const PluginInterface::Model * model,PluginInterface::OperatorId operatorId)135 float dataRatioOfOperatorModelInputToOuts(const PluginInterface::Model *model, PluginInterface::OperatorId operatorId) {
136 	std::vector<PluginInterface::TensorId> inputs, outputs;
137 	model->getOperatorIo(operatorId, inputs, outputs);
138 
139 	unsigned sizeOfOutputs = 0, cntOutputs = 0;
140 	for (auto o : outputs) {
141 		sizeOfOutputs += Tensor::flatSize(model->getTensorShape(o));
142 		cntOutputs++;
143 	}
144 
145 	//assert(model->numInputs()==1);
146 	return float(sizeOfOutputs)/cntOutputs/float(Tensor::flatSize(model->getTensorShape(model->getInputs()[0])));
147 }
148 
computeTensors(const PluginInterface::Model * model,std::vector<std::unique_ptr<float>> * tensorData)149 void computeTensors(const PluginInterface::Model *model, std::vector<std::unique_ptr<float>> *tensorData) {
150 }
151 
guessOutputInterpretationKind(const PluginInterface::Model * model)152 OutputInterpretationKind guessOutputInterpretationKind(const PluginInterface::Model *model) {
153 	// classify based on the first output tensor shape
154 	auto outputTensorId = model->getOutputs()[0];
155 	auto outputShape = Tensor::stripLeadingOnes(model->getTensorShape(outputTensorId));
156 
157 	switch (outputShape.size()) {
158 	case 1: // 1-dimensional vector with numbers, must be object classification
159 		switch (outputShape[0]) {
160 		case 1000:
161 			return OutputInterpretationKind_ImageNet1000; // might be wrong, but the number is the same and there aren't too many networks around
162 		case 1001:
163 			return OutputInterpretationKind_ImageNet1001;
164 		case 2:
165 			return OutputInterpretationKind_NoYes; // it could also be OutputInterpretationKind_YesNo but we can't know this
166 		default:
167 			return OutputInterpretationKind_Undefined; // we don't know from the information that we have
168 		}
169 	case 3: { // see if the shape matches the input shape
170 		auto inputTensorId = model->getInputs()[0];
171 		auto inputShape = Tensor::stripLeadingOnes(model->getTensorShape(inputTensorId));
172 		if (inputShape.size()==3 && inputShape[0]==outputShape[0] && inputShape[1]==outputShape[1])
173 			return OutputInterpretationKind_PixelClassification;
174 		return OutputInterpretationKind_Undefined; // some large shape but it doesn't match the input so we don't know
175 	} default:
176 		return OutputInterpretationKind_Undefined; // we don't know from the information that we have
177 	}
178 }
179 
getOperatorExtraInfoString(const PluginInterface::Model * model,PluginInterface::OperatorId operatorId)180 std::string getOperatorExtraInfoString(const PluginInterface::Model *model, PluginInterface::OperatorId operatorId) {
181 	typedef PluginInterface PI;
182 	switch (model->getOperatorKind(operatorId)) {
183 	  case PI::KindConv2D:
184 	  case PI::KindDepthwiseConv2D: {
185 		std::vector<PI::TensorId> inputs, outputs;
186 		model->getOperatorIo(operatorId, inputs, outputs);
187 		assert(inputs.size()==3);
188 		auto filterShape = model->getTensorShape(inputs[1]);
189 		assert(filterShape.size()==4);
190 		return Util::stringToSubscript(STR(filterShape[1] << "x" << filterShape[2]));
191 	} case PI::KindFullyConnected: {
192 		std::vector<PI::TensorId> inputs, outputs;
193 		model->getOperatorIo(operatorId, inputs, outputs);
194 		auto filterShape = model->getTensorShape(inputs[1]);
195 		assert(filterShape.size()==2);
196 		return Util::stringToSubscript(STR(filterShape[0] << "x" << filterShape[1]));
197 	} case PI::KindMaxPool:
198 	  case PI::KindAveragePool: {
199 		int filterWidth=0, filterHeight=0;
200 		std::unique_ptr<PI::OperatorOptionsList> opts(model->getOperatorOptions(operatorId));
201 		for (auto &o : *opts)
202 			if (o.name == PI::OperatorOption_FILTER_WIDTH)
203 				filterWidth = o.value.as<int>();
204 			else if (o.name == PI::OperatorOption_FILTER_HEIGHT)
205 				filterHeight = o.value.as<int>();
206 		return Util::stringToSubscript(STR(filterWidth << "x" << filterHeight));
207 	} default:
208 		return ""; // no extra info
209 	}
210 }
211 
212 /// string-returting aggretgate versions
213 
dataRatioOfOperatorStr(const PluginInterface::Model * model,PluginInterface::OperatorId operatorId,float & outIncreaseAboveInput,float & outModelInputToOut)214 std::string dataRatioOfOperatorStr(const PluginInterface::Model *model, PluginInterface::OperatorId operatorId,
215                                    float &outIncreaseAboveInput, float &outModelInputToOut)
216 {
217 	auto modelInputToIns = ModelFunctions::dataRatioOfOperatorModelInputToIns(model, operatorId);
218 	auto modelInputToOuts = ModelFunctions::dataRatioOfOperatorModelInputToOuts(model, operatorId);
219 
220 	outIncreaseAboveInput = modelInputToOuts/modelInputToIns;
221 	outModelInputToOut = modelInputToOuts;
222 
223 	return STR(ModelFunctions::dataRatioOfOperator(model, operatorId) <<
224 	           ", model-input-to-ins: " << modelInputToIns <<
225 	           ", model-input-to-outs: " << modelInputToOuts);
226 }
227 
228 }
229