1 /**
2 * Copyright (c) Glow Contributors. See CONTRIBUTORS file.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #include "ImporterTestUtils.h"
17 #include "glow/ExecutionEngine/ExecutionEngine.h"
18 #include "glow/Graph/Graph.h"
19 #include "glow/Importer/Caffe2ModelLoader.h"
20 #include "gtest/gtest.h"
21
22 #ifndef GLOW_DATA_PATH
23 #define GLOW_DATA_PATH
24 #endif
25
26 class Caffe2ImporterTest : public ::testing::Test {
27 protected:
28 // By default constant folding at load time is enabled in general, but we do
29 // many tests here loading Constants, so keep it false during these tests by
30 // default.
SetUp()31 void SetUp() override { glow::setConstantFoldLoaderOpsFlag(false); }
TearDown()32 void TearDown() override { glow::setConstantFoldLoaderOpsFlag(true); }
33 };
34
35 using namespace glow;
36 /// Test loading of Elementwise Unary Ops floating point.
testEltwiseUnaryOpFloat(std::string fileName,llvm::ArrayRef<dim_t> inputShape,std::string input_name,float delta,const std::function<float (float)> & op)37 static void testEltwiseUnaryOpFloat(std::string fileName,
38 llvm::ArrayRef<dim_t> inputShape,
39 std::string input_name, float delta,
40 const std::function<float(float)> &op) {
41 ExecutionEngine EE{};
42 auto &mod = EE.getModule();
43 Function *F = mod.createFunction("main");
44 std::string NetDescFilename =
45 std::string(GLOW_DATA_PATH "tests/models/caffe2Models/") + fileName;
46 std::string NetWeightFilename(
47 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
48
49 PlaceholderBindings bindings;
50 Placeholder *graphOutputVar;
51 Type input_type(ElemKind::FloatTy, inputShape);
52 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
53 {input_name.c_str()}, {&input_type}, *F);
54 graphOutputVar = EXIT_ON_ERR(caffe2LD.getSingleOutput());
55 auto PH = mod.getPlaceholderByNameSlow(input_name);
56 auto *inTensor = bindings.allocate(PH);
57 inTensor->getHandle().randomize(-10.0, 10.0, mod.getPRNG());
58 EE.compile(CompilationMode::Infer);
59 bindings.allocate(mod.getPlaceholders());
60 EE.run(bindings);
61 auto result = bindings.get(graphOutputVar)->getHandle();
62 auto inHandle = inTensor->getHandle();
63 ASSERT_TRUE(result.dims() == inputShape);
64 for (size_t i = 0; i < result.getType().size(); i++) {
65 EXPECT_NEAR(result.raw(i), op(inHandle.raw(i)), delta);
66 }
67 }
68
TEST_F(Caffe2ImporterTest,importExp)69 TEST_F(Caffe2ImporterTest, importExp) {
70 testEltwiseUnaryOpFloat("exp_op_net.pbtxt", {1, 2, 4, 3}, "data", 0.002,
71 [](float a) { return std::exp(a); });
72 }
73
74 /// Test loading conv op from a Caffe2 model.
75 /// The input is N*C*H*W (1*1*3*3), the kernel is 2,
76 /// stride is 1, pad is 1, group is 1.
TEST_F(Caffe2ImporterTest,importConv)77 TEST_F(Caffe2ImporterTest, importConv) {
78 ExecutionEngine EE{};
79 auto &mod = EE.getModule();
80 Function *F = mod.createFunction("main");
81
82 std::string NetDescFilename(GLOW_DATA_PATH
83 "tests/models/caffe2Models/predict_net.pbtxt");
84 std::string NetWeightFilename(GLOW_DATA_PATH
85 "tests/models/caffe2Models/init_net.pbtxt");
86
87 Placeholder *output;
88 PlaceholderBindings bindings;
89
90 // Destroy the loader after the graph is loaded since the following execution
91 // will not depend on anything from the loader.
92 {
93 Tensor data;
94 getNCHWData(&data, 1, 1, 3, 3);
95 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
96 {"gpu_0/data_0"}, {&data.getType()}, *F);
97 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
98
99 bindings.allocate(mod.getPlaceholders());
100 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
101 }
102
103 auto res = bindings.get(output);
104 EE.compile(CompilationMode::Infer);
105
106 EE.run(bindings);
107 auto result = res->getHandle();
108 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
109 std::vector<float> expectedValues = {2, 3, 5, 4, 5, 10, 14, 9,
110 11, 22, 26, 15, 8, 15, 17, 10};
111 EXPECT_TRUE(result.dims().vec() == expectedDims);
112 for (size_t i = 0; i < 4 * 4; i++)
113 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
114 }
115
116 /// Test loading ConvRelu op from a Caffe2 model.
117 /// The input is N*C*H*W (1*1*3*3), the kernel is 2,
118 /// stride is 1, pad is 1, group is 1.
TEST_F(Caffe2ImporterTest,importConvRelu)119 TEST_F(Caffe2ImporterTest, importConvRelu) {
120 ExecutionEngine EE{};
121 auto &mod = EE.getModule();
122 Function *F = mod.createFunction("main");
123
124 std::string NetDescFilename(
125 GLOW_DATA_PATH "tests/models/caffe2Models/convrelu_pred_net.pbtxt");
126 std::string NetWeightFilename(
127 GLOW_DATA_PATH "tests/models/caffe2Models/convrelu_init_net.pbtxt");
128
129 Placeholder *output;
130 PlaceholderBindings bindings;
131
132 // Destroy the loader after the graph is loaded since the following execution
133 // will not depend on anything from the loader.
134 {
135 Tensor data;
136 getNCHWData(&data, 1, 1, 3, 3);
137 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
138 {"gpu_0/data_0"}, {&data.getType()}, *F);
139 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
140
141 bindings.allocate(mod.getPlaceholders());
142 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
143 }
144
145 // High level check on the content of the graph. We should have
146 // transpose => conv => relu => transpose => save
147 EXPECT_EQ(F->getNodes().size(), 5);
148 auto *saveNode = getSaveNodeFromDest(output);
149
150 auto *transNode1 =
151 llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
152 ASSERT_TRUE(transNode1);
153 auto *reluNode = llvm::dyn_cast<ReluNode>(transNode1->getInput().getNode());
154 ASSERT_TRUE(reluNode);
155 auto *convNode =
156 llvm::dyn_cast<ConvolutionNode>(reluNode->getInput().getNode());
157 ASSERT_TRUE(convNode);
158 auto *transNode2 =
159 llvm::dyn_cast<TransposeNode>(convNode->getInput().getNode());
160 ASSERT_TRUE(transNode2);
161
162 auto res = bindings.get(output);
163 EE.compile(CompilationMode::Infer);
164
165 EE.run(bindings);
166 auto result = res->getHandle();
167 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
168 std::vector<float> expectedValues = {2, 3, 5, 4, 5, 10, 14, 9,
169 11, 22, 26, 15, 8, 15, 17, 10};
170 EXPECT_TRUE(result.dims().vec() == expectedDims);
171 for (size_t i = 0; i < 4 * 4; i++)
172 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
173 }
174
175 /// Test loading conv op from a Caffe2 model.
176 /// The input is N*H*W*C (1*3*3*1), the kernel is 2,
177 /// stride is 1, pad is 1, group is 1.
TEST_F(Caffe2ImporterTest,convNHWC)178 TEST_F(Caffe2ImporterTest, convNHWC) {
179 ExecutionEngine EE{};
180 auto &mod = EE.getModule();
181 Function *F = mod.createFunction("main");
182
183 std::string NetDescFilename(
184 GLOW_DATA_PATH "tests/models/caffe2Models/conv_nhwc_predict_net.pbtxt");
185 std::string NetWeightFilename(
186 GLOW_DATA_PATH "tests/models/caffe2Models/conv_nhwc_init_net.pbtxt");
187
188 Placeholder *output;
189 PlaceholderBindings bindings;
190
191 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
192
193 // Destroy the loader after the graph is loaded since the following execution
194 // will not depend on anything from the loader.
195 {
196 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
197 {&inputs.getType()}, *F);
198 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
199 }
200
201 // High level check on the content of the graph. We have 1 conv and 1 save.
202 EXPECT_EQ(F->getNodes().size(), 2);
203 auto *saveNode = getSaveNodeFromDest(output);
204 auto *convNode =
205 llvm::dyn_cast<ConvolutionNode>(saveNode->getInput().getNode());
206 ASSERT_TRUE(convNode);
207
208 // We have 2 placeholders: 1 input and 1 output.
209 EXPECT_EQ(mod.getPlaceholders().size(), 2);
210 // We have 2 constants: Weights and bias.
211 EXPECT_EQ(mod.getConstants().size(), 2);
212 }
213
214 /// Test loading ChannelwiseQuantizedConvolutionNode op from a Caffe2 model.
215 /// The input is N*H*W*C (1*1*1*4), the kernel is 1, stride is 1, pad is 1,
216 /// group is 2.
TEST_F(Caffe2ImporterTest,convGroupQuantized)217 TEST_F(Caffe2ImporterTest, convGroupQuantized) {
218 ExecutionEngine EE{};
219 auto &mod = EE.getModule();
220 Function *F = mod.createFunction("main");
221
222 std::string NetDescFilename(
223 GLOW_DATA_PATH
224 "tests/models/caffe2Models/conv_group_quantized_pred_net.pbtxt");
225 std::string NetWeightFilename(
226 GLOW_DATA_PATH
227 "tests/models/caffe2Models/conv_group_quantized_init_net.pbtxt");
228
229 Placeholder *output;
230 PlaceholderBindings bindings;
231
232 Tensor input(ElemKind::Int8QTy, {1, 1, 1, 4}, 1.0, 0);
233
234 // Destroy the loader after the graph is loaded since the following execution
235 // will not depend on anything from the loader.
236 {
237 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
238 {&input.getType()}, *F);
239 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
240 }
241
242 // High level check on the content of the graph. We have 1
243 // ChannelwiseQuantizedConvolutionNode and 1 save.
244 EXPECT_EQ(F->getNodes().size(), 2);
245 auto *saveNode = getSaveNodeFromDest(output);
246 auto *groupwiseConv = llvm::dyn_cast<ChannelwiseQuantizedConvolutionNode>(
247 saveNode->getInput().getNode());
248 ASSERT_TRUE(groupwiseConv);
249
250 // Check params.
251 std::vector<unsigned> expectedKernelsAndStrides = {1, 1};
252 std::vector<unsigned> expectedPads = {1, 1, 1, 1};
253 EXPECT_EQ(groupwiseConv->getKernels(),
254 llvm::makeArrayRef(expectedKernelsAndStrides));
255 EXPECT_EQ(groupwiseConv->getStrides(),
256 llvm::makeArrayRef(expectedKernelsAndStrides));
257 EXPECT_EQ(groupwiseConv->getPads(), llvm::makeArrayRef(expectedPads));
258 EXPECT_EQ(groupwiseConv->getGroup(), 2);
259 EXPECT_EQ(groupwiseConv->getDilation(), 1);
260
261 // Check constant inputs.
262 Constant *filterConstant =
263 llvm::dyn_cast<Constant>(groupwiseConv->getFilter().getNode());
264 Constant *biasConstant =
265 llvm::dyn_cast<Constant>(groupwiseConv->getBias().getNode());
266 Constant *filterScalesConstant =
267 llvm::dyn_cast<Constant>(groupwiseConv->getFilterScales().getNode());
268 Constant *filterOffsetsConstant =
269 llvm::dyn_cast<Constant>(groupwiseConv->getFilterOffsets().getNode());
270 Constant *biasScalesConstant =
271 llvm::dyn_cast<Constant>(groupwiseConv->getBiasScales().getNode());
272 Constant *biasOffsetsConstant =
273 llvm::dyn_cast<Constant>(groupwiseConv->getBiasOffsets().getNode());
274
275 ASSERT_TRUE(filterConstant);
276 ASSERT_TRUE(biasConstant);
277 ASSERT_TRUE(filterScalesConstant);
278 ASSERT_TRUE(filterOffsetsConstant);
279 ASSERT_TRUE(biasScalesConstant);
280 ASSERT_TRUE(biasOffsetsConstant);
281
282 const auto filterH = filterConstant->getPayload().getHandle<int8_t>();
283 const auto biasH = biasConstant->getPayload().getHandle<float>();
284 const auto filterScalesH =
285 filterScalesConstant->getPayload().getHandle<float>();
286 const auto filterOffsetsH =
287 filterOffsetsConstant->getPayload().getHandle<int32_t>();
288 const auto biasScalesH = biasScalesConstant->getPayload().getHandle<float>();
289 const auto biasOffsetsH =
290 biasOffsetsConstant->getPayload().getHandle<int32_t>();
291
292 for (size_t i = 0; i < filterH.size(); ++i) {
293 EXPECT_EQ(filterH.raw(i), i % 2);
294 }
295
296 for (size_t i = 0; i < biasH.size(); ++i) {
297 EXPECT_EQ(biasH.raw(i), 7.0);
298 }
299
300 for (size_t i = 0; i < filterScalesH.size(); ++i) {
301 EXPECT_EQ(filterScalesH.raw(i), 6.0f);
302 }
303
304 for (size_t i = 0; i < filterOffsetsH.size(); ++i) {
305 EXPECT_EQ(filterOffsetsH.raw(i), 5);
306 }
307
308 for (size_t i = 0; i < biasScalesH.size(); ++i) {
309 float matmulScale = filterScalesH.raw(i) * input.getType().getScale();
310 EXPECT_EQ(biasScalesH.raw(i), matmulScale);
311 }
312
313 for (size_t i = 0; i < biasOffsetsH.size(); ++i) {
314 EXPECT_EQ(biasOffsetsH.raw(i), 0);
315 }
316
317 // We have 2 placeholders: 1 input and 1 output.
318 EXPECT_EQ(mod.getPlaceholders().size(), 2);
319 // We have 6 constants: Bias, Filter, FilterScales, FilterOffsets, BiasScales
320 // and BiasOffsets.
321 EXPECT_EQ(mod.getConstants().size(), 6);
322 }
323
324 /// Helper method to run the ConvTranspose operator test cases.
325 /// \p filename contains the model .onnxtxt.
326 /// \p expectedDims: output Tensor dimensions.
327 /// \p expectedValues : output Tensor values expected.
328 /// The input is N*C*H*W (1*1*2*2), the kernels is {3, 3},
329 /// strides is {1, 1}, group is 1. Pads can vary.
convTransposeTestHelper(std::string & netname,std::string & initname,llvm::ArrayRef<dim_t> expectedDims,llvm::ArrayRef<float> expectedValues)330 static void convTransposeTestHelper(std::string &netname, std::string &initname,
331 llvm::ArrayRef<dim_t> expectedDims,
332 llvm::ArrayRef<float> expectedValues) {
333 ExecutionEngine EE{};
334 auto &mod = EE.getModule();
335 Function *F = mod.createFunction("main");
336
337 std::string NetDescFilename =
338 std::string(GLOW_DATA_PATH "tests/models/caffe2Models/") + netname;
339
340 std::string NetWeightFilename =
341 std::string(GLOW_DATA_PATH "tests/models/caffe2Models/") + initname;
342
343 Placeholder *output;
344 PlaceholderBindings bindings;
345
346 // Destroy the loader after the graph is loaded since the following execution
347 // will not depend on anything from the loader.
348 {
349 Tensor data;
350 getNCHWData(&data, 1, 1, 2, 2);
351 data.getHandle() = {2., 3., 4., 5.};
352
353 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
354 {"gpu_0/data_0"}, {&data.getType()}, *F);
355 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
356
357 bindings.allocate(mod.getPlaceholders());
358 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
359 }
360
361 auto res = bindings.get(output);
362 EE.compile(CompilationMode::Infer);
363
364 EE.run(bindings);
365 auto result = res->getHandle();
366
367 EXPECT_TRUE(result.dims() == expectedDims);
368 for (dim_t i = 0, e = expectedValues.size(); i < e; i++) {
369 EXPECT_FLOAT_EQ(result.raw(i), expectedValues[i]);
370 }
371 }
372
373 /// Test loading ConvTranspose op from a ONNX model.
374 /// The input is N*C*H*W (1*1*2*2), the kernels is {3, 3},
375 /// strides is {1, 1}, pads is {0, 0, 0, 0}, group is 1.
TEST(caffe2,importConvTranspose)376 TEST(caffe2, importConvTranspose) {
377 std::string netname("convtranspose.pbtxt");
378 std::string initname("convtranspose_init.pbtxt");
379 std::vector<dim_t> expectedDims = {1, 1, 4, 4};
380 std::vector<float> expectedValues = {5, 13, 18, 13, 19, 50, 64, 42,
381 37, 92, 106, 66, 33, 77, 86, 51};
382 convTransposeTestHelper(netname, initname, expectedDims, expectedValues);
383 }
384
385 /// Test loading ConvTranspose op from a ONNX model.
386 /// The input is N*C*H*W (1*1*2*2), the kernels is {3, 3},
387 /// strides is {1, 1}, pads is {1, 1, 1, 1}, group is 1.
TEST(onnx,importConvTransposePads)388 TEST(onnx, importConvTransposePads) {
389 std::string netname("convtranspose_pads.pbtxt");
390 std::string initname("convtranspose_init.pbtxt");
391 std::vector<dim_t> expectedDims = {1, 1, 2, 2};
392 std::vector<float> expectedValues = {50, 64, 92, 106};
393 convTransposeTestHelper(netname, initname, expectedDims, expectedValues);
394 }
395
396 /// Test loading conv op from a Caffe2 model.
397 /// The input is N*H*W*C (1*3*3*1), the kernel is 2,
398 /// stride is 1, pad is 1, group is 1.
TEST(caffe2,convTransposeNHWC)399 TEST(caffe2, convTransposeNHWC) {
400 ExecutionEngine EE{};
401 auto &mod = EE.getModule();
402 Function *F = mod.createFunction("main");
403
404 std::string NetDescFilename(
405 GLOW_DATA_PATH "tests/models/caffe2Models/convtranspose_nhwc.pbtxt");
406 std::string NetWeightFilename(
407 GLOW_DATA_PATH "tests/models/caffe2Models/convtranspose_nhwc_init.pbtxt");
408
409 Placeholder *output;
410 PlaceholderBindings bindings;
411
412 Tensor inputs(ElemKind::FloatTy, {1, 2, 2, 1});
413 inputs.getHandle() = {2., 3., 4., 5.};
414
415 // Destroy the loader after the graph is loaded since the following execution
416 // will not depend on anything from the loader.
417 {
418 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
419 {&inputs.getType()}, *F);
420 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
421 }
422
423 // High level check on the content of the graph. We have 1 conv and 1 save.
424 EXPECT_EQ(F->getNodes().size(), 2);
425 auto *saveNode = getSaveNodeFromDest(output);
426 auto *convTransposeNode =
427 llvm::dyn_cast<ConvTransposeNode>(saveNode->getInput().getNode());
428 ASSERT_TRUE(convTransposeNode);
429
430 // We have 2 placeholders: 1 input and 1 output.
431 EXPECT_EQ(mod.getPlaceholders().size(), 2);
432 // We have 2 constants: Weights and bias.
433 EXPECT_EQ(mod.getConstants().size(), 2);
434 }
435
436 /// Test loading MaxPool with NHWC order input.
TEST_F(Caffe2ImporterTest,maxPoolNHWC)437 TEST_F(Caffe2ImporterTest, maxPoolNHWC) {
438 ExecutionEngine EE{};
439 auto &mod = EE.getModule();
440 Function *F = mod.createFunction("main");
441
442 std::string NetDescFilename(
443 GLOW_DATA_PATH
444 "tests/models/caffe2Models/maxpool_nhwc_predict_net.pbtxt");
445 std::string NetWeightFilename(
446 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
447
448 Placeholder *output;
449 PlaceholderBindings bindings;
450
451 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
452
453 // Destroy the loader after the graph is loaded since the following execution
454 // will not depend on anything from the loader.
455 {
456 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
457 {&inputs.getType()}, *F);
458 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
459 }
460
461 // High level check on the content of the graph. We have 1 maxpool and 1 save.
462 EXPECT_EQ(F->getNodes().size(), 2);
463 auto *saveNode = getSaveNodeFromDest(output);
464 auto *maxPoolNode =
465 llvm::dyn_cast<MaxPoolNode>(saveNode->getInput().getNode());
466 ASSERT_TRUE(maxPoolNode);
467
468 // We have 2 placeholders: 1 input and 1 output.
469 EXPECT_EQ(mod.getPlaceholders().size(), 2);
470 // We have 0 constants.
471 EXPECT_EQ(mod.getConstants().size(), 0);
472 }
473
474 /// Test that loading MaxPool with legacy padding terminates early.
TEST_F(Caffe2ImporterTest,maxPoolLegacyPadding)475 TEST_F(Caffe2ImporterTest, maxPoolLegacyPadding) {
476 ExecutionEngine EE{};
477 auto &mod = EE.getModule();
478 Function *F = mod.createFunction("main");
479
480 std::string NetDescFilename(
481 GLOW_DATA_PATH
482 "tests/models/caffe2Models/maxpool_legacy_padding_predict_net.pbtxt");
483 std::string NetWeightFilename(
484 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
485
486 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
487
488 Error err(Error::success());
489 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
490 {&inputs.getType()}, *F, &err);
491
492 // Test that the error is the expected one.
493 auto msg = ERR_TO_STRING(std::move(err));
494 ASSERT_NE(msg.find("MaxPool nodes with legacy caffe padding are "
495 "deprecated and not supported."),
496 std::string::npos);
497 }
498
499 /// Test loading MaxPool with default NCHW order input.
TEST_F(Caffe2ImporterTest,maxPool)500 TEST_F(Caffe2ImporterTest, maxPool) {
501 ExecutionEngine EE{};
502 auto &mod = EE.getModule();
503 Function *F = mod.createFunction("main");
504
505 std::string NetDescFilename(
506 GLOW_DATA_PATH "tests/models/caffe2Models/maxpool_predict_net.pbtxt");
507 std::string NetWeightFilename(
508 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
509
510 Placeholder *output;
511 PlaceholderBindings bindings;
512
513 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
514
515 // Destroy the loader after the graph is loaded since the following execution
516 // will not depend on anything from the loader.
517 {
518 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
519 {&inputs.getType()}, *F);
520 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
521 }
522
523 // High level check on the content of the graph. We have 1 maxpool, 1 save
524 // and 2 transpose.
525 EXPECT_EQ(F->getNodes().size(), 4);
526 auto *saveNode = getSaveNodeFromDest(output);
527 auto *transNode1 =
528 llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
529 ASSERT_TRUE(transNode1);
530 auto *maxPoolNode =
531 llvm::dyn_cast<MaxPoolNode>(transNode1->getInput().getNode());
532 ASSERT_TRUE(maxPoolNode);
533 auto *transNode2 =
534 llvm::dyn_cast<TransposeNode>(maxPoolNode->getInput().getNode());
535 ASSERT_TRUE(transNode2);
536
537 // We have 2 placeholders: 1 input and 1 output.
538 EXPECT_EQ(mod.getPlaceholders().size(), 2);
539 // We have 0 constants.
540 EXPECT_EQ(mod.getConstants().size(), 0);
541 }
542
543 /// Test loading AvgPool with NHWC order input.
TEST_F(Caffe2ImporterTest,avgPoolNHWC)544 TEST_F(Caffe2ImporterTest, avgPoolNHWC) {
545 ExecutionEngine EE{};
546 auto &mod = EE.getModule();
547 Function *F = mod.createFunction("main");
548
549 std::string NetDescFilename(
550 GLOW_DATA_PATH
551 "tests/models/caffe2Models/avgpool_nhwc_predict_net.pbtxt");
552 std::string NetWeightFilename(
553 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
554
555 Placeholder *output;
556 PlaceholderBindings bindings;
557
558 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
559
560 // Destroy the loader after the graph is loaded since the following execution
561 // will not depend on anything from the loader.
562 {
563 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
564 {&inputs.getType()}, *F);
565 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
566 }
567
568 // High level check on the content of the graph. We have 1 maxpool and 1 save.
569 EXPECT_EQ(F->getNodes().size(), 2);
570 auto *saveNode = getSaveNodeFromDest(output);
571 auto *avgPoolNode =
572 llvm::dyn_cast<AvgPoolNode>(saveNode->getInput().getNode());
573 ASSERT_TRUE(avgPoolNode);
574
575 // We have 2 placeholders: 1 input and 1 output.
576 EXPECT_EQ(mod.getPlaceholders().size(), 2);
577 // We have 0 constants.
578 EXPECT_EQ(mod.getConstants().size(), 0);
579 }
580
581 /// Test loading AveragePool with default NCHW order input.
TEST_F(Caffe2ImporterTest,avgPool)582 TEST_F(Caffe2ImporterTest, avgPool) {
583 ExecutionEngine EE{};
584 auto &mod = EE.getModule();
585 Function *F = mod.createFunction("main");
586
587 std::string NetDescFilename(
588 GLOW_DATA_PATH "tests/models/caffe2Models/avgpool_predict_net.pbtxt");
589 std::string NetWeightFilename(
590 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
591
592 Placeholder *output;
593 PlaceholderBindings bindings;
594
595 Tensor inputs(ElemKind::FloatTy, {1, 3, 3, 1});
596
597 // Destroy the loader after the graph is loaded since the following execution
598 // will not depend on anything from the loader.
599 {
600 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
601 {&inputs.getType()}, *F);
602 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
603 }
604
605 // High level check on the content of the graph. We have 1 maxpool, 1 save
606 // and 2 transpose.
607 EXPECT_EQ(F->getNodes().size(), 4);
608 auto *saveNode = getSaveNodeFromDest(output);
609 auto *transNode1 =
610 llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
611 ASSERT_TRUE(transNode1);
612 auto *avgPoolNode =
613 llvm::dyn_cast<AvgPoolNode>(transNode1->getInput().getNode());
614 ASSERT_TRUE(avgPoolNode);
615 auto *transNode2 =
616 llvm::dyn_cast<TransposeNode>(avgPoolNode->getInput().getNode());
617 ASSERT_TRUE(transNode2);
618
619 // We have 2 placeholders: 1 input and 1 output.
620 EXPECT_EQ(mod.getPlaceholders().size(), 2);
621 // We have 0 constants.
622 EXPECT_EQ(mod.getConstants().size(), 0);
623 }
624
625 /// Test loading a concat node with add_axis.
626 /// Concat nodes with add_axis have a different semantic
627 /// than the plain glow concat.
628 /// concat A(dim0, dim1), B(dim0, dim1), ... 1, add_axis = 1
629 /// res = A, B...
630 /// C2 shape: dim0, #input, dim1, i.e., three dimensions.
631 /// Glow shape: dim0, #input x dim1, i.e., two dimensions.
632 ///
633 /// To fill the gap between the two, glow issues a reshape
634 /// right after its concat.
TEST_F(Caffe2ImporterTest,concatAddAxis)635 TEST_F(Caffe2ImporterTest, concatAddAxis) {
636 ExecutionEngine EE{};
637 auto &mod = EE.getModule();
638 Function *F = mod.createFunction("main");
639
640 std::string NetDescFilename(
641 GLOW_DATA_PATH
642 "tests/models/caffe2Models/concat_add_axis_predict_net.pbtxt");
643 std::string NetWeightFilename(
644 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
645
646 PlaceholderBindings bindings;
647
648 Placeholder *output;
649 Tensor inputs_0(ElemKind::FloatTy, {10, 7});
650 Tensor inputs_1(ElemKind::FloatTy, {10, 7});
651 Tensor inputs_2(ElemKind::FloatTy, {10, 7});
652 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
653 inputs_1.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
654 inputs_2.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
655 // Destroy the loader after the graph is loaded since the following execution
656 // will not depend on anything from the loader.
657 {
658 Caffe2ModelLoader caffe2LD(
659 NetDescFilename, NetWeightFilename,
660 {"inputs_0", "inputs_1", "inputs_2"},
661 {&inputs_0.getType(), &inputs_1.getType(), &inputs_2.getType()}, *F);
662 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
663
664 bindings.allocate(mod.getPlaceholders());
665 updateInputPlaceholdersByName(bindings, &mod,
666 {"inputs_0", "inputs_1", "inputs_2"},
667 {&inputs_0, &inputs_1, &inputs_2});
668 }
669
670 // Check that the shape of the output matches what Caffe2 expects.
671 std::vector<dim_t> expectedDims = {10, 3, 7};
672 EXPECT_TRUE(output->dims().vec() == expectedDims);
673
674 auto res = bindings.get(output);
675 EE.compile(CompilationMode::Infer);
676
677 EE.run(bindings);
678 // High level check on the content of the graph.
679 // We have 1 reshape, 1 concat, and 1 save.
680 EXPECT_EQ(F->getNodes().size(), 3);
681 // With have three inputs and one outputs.
682 EXPECT_EQ(mod.getPlaceholders().size(), 4);
683
684 // Check that the graph has the expected shape,
685 // starting from the output.
686 auto *saveNode = getSaveNodeFromDest(output);
687 auto *reshape = llvm::dyn_cast<ReshapeNode>(saveNode->getInput().getNode());
688 ASSERT_TRUE(reshape);
689 auto *concat = llvm::dyn_cast<ConcatNode>(reshape->getInput());
690 ASSERT_TRUE(concat);
691 // We will check that the inputs are correct within
692 // the next loop.
693
694 auto result = res->getHandle();
695
696 // Check that the output matches the concatenation of
697 // all the inputs.
698 Tensor *inputs[] = {&inputs_0, &inputs_1, &inputs_2};
699 for (dim_t i = 0; i < 3; ++i) {
700 const auto inputsHandle = inputs[i]->getHandle();
701 ASSERT_TRUE(llvm::isa<Placeholder>(concat->getInputs()[i]));
702
703 for (dim_t row = 0; row < 10; ++row) {
704 for (dim_t column = 0; column < 7; ++column) {
705 EXPECT_FLOAT_EQ(result.at({row, i, column}),
706 inputsHandle.at({row, column}));
707 }
708 }
709 }
710 }
711
712 /// Test loading a regular concat node.
TEST_F(Caffe2ImporterTest,concat)713 TEST_F(Caffe2ImporterTest, concat) {
714 ExecutionEngine EE{};
715 auto &mod = EE.getModule();
716 Function *F = mod.createFunction("main");
717
718 std::string NetDescFilename(
719 GLOW_DATA_PATH "tests/models/caffe2Models/concat_predict_net.pbtxt");
720 std::string NetWeightFilename(
721 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
722
723 PlaceholderBindings bindings;
724 Placeholder *output;
725 Tensor inputs_0(ElemKind::FloatTy, {10, 7});
726 Tensor inputs_1(ElemKind::FloatTy, {10, 12});
727 Tensor inputs_2(ElemKind::FloatTy, {10, 5});
728 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
729 inputs_1.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
730 inputs_2.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
731 // Destroy the loader after the graph is loaded since the following execution
732 // will not depend on anything from the loader.
733 {
734 Caffe2ModelLoader caffe2LD(
735 NetDescFilename, NetWeightFilename,
736 {"inputs_0", "inputs_1", "inputs_2"},
737 {&inputs_0.getType(), &inputs_1.getType(), &inputs_2.getType()}, *F);
738 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
739
740 bindings.allocate(mod.getPlaceholders());
741 updateInputPlaceholdersByName(bindings, &mod,
742 {"inputs_0", "inputs_1", "inputs_2"},
743 {&inputs_0, &inputs_1, &inputs_2});
744 }
745
746 // Check that the shape of the output matches what Caffe2 expects.
747 std::vector<dim_t> expectedDims = {10, 24};
748 EXPECT_TRUE(output->dims().vec() == expectedDims);
749
750 bindings.allocate(mod.getPlaceholders());
751 auto res = bindings.get(output);
752 EE.compile(CompilationMode::Infer);
753
754 EE.run(bindings);
755 // High level check on the content of the graph.
756 // We have 1 concat, and 1 save.
757 EXPECT_EQ(F->getNodes().size(), 2);
758 // With have three inputs and one outputs.
759 EXPECT_EQ(mod.getPlaceholders().size(), 4);
760
761 auto result = res->getHandle();
762
763 // Check that the graph has the expected shape,
764 // starting from the output.
765 auto *saveNode = getSaveNodeFromDest(output);
766 auto *concat = llvm::dyn_cast<ConcatNode>(saveNode->getInput());
767 ASSERT_TRUE(concat);
768 // We will check that the inputs are correct within
769 // the next loop.
770
771 // Check that the output matches the concatenation of
772 // all the inputs.
773 Tensor *inputs[] = {&inputs_0, &inputs_1, &inputs_2};
774 dim_t columnsChecked = 0;
775 for (size_t i = 0; i < 3; ++i) {
776 const auto inputsHandle = inputs[i]->getHandle();
777 ASSERT_TRUE(llvm::isa<Placeholder>(concat->getInputs()[i]));
778
779 dim_t currentColumnWidth = inputs[i]->dims()[1];
780 for (dim_t row = 0; row < 10; ++row) {
781 for (dim_t column = 0; column < currentColumnWidth; ++column) {
782 EXPECT_FLOAT_EQ(result.at({row, columnsChecked + column}),
783 inputsHandle.at({row, column}));
784 }
785 }
786 columnsChecked += currentColumnWidth;
787 }
788 }
789
790 /// Test loading a batched matmul with transpose on RHS.
TEST_F(Caffe2ImporterTest,batchedMatmulRHS)791 TEST_F(Caffe2ImporterTest, batchedMatmulRHS) {
792 ExecutionEngine EE{};
793 auto &mod = EE.getModule();
794 Function *F = mod.createFunction("main");
795 std::string NetDescFilename(
796 GLOW_DATA_PATH
797 "tests/models/caffe2Models/matmul_trans_RHS_predict_net.pbtxt");
798 std::string NetWeightFilename(
799 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
800
801 Placeholder *output;
802 Tensor inputs_0(ElemKind::FloatTy, {3, 10, 7});
803 Tensor inputs_1(ElemKind::FloatTy, {10, 7});
804 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
805 inputs_1.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
806 // Destroy the loader after the graph is loaded since the following execution
807 // will not depend on anything from the loader.
808 {
809 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
810 {"inputs_0", "inputs_1"},
811 {&inputs_0.getType(), &inputs_1.getType()}, *F);
812 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
813 }
814
815 // Check that the shape of the output matches what Caffe2 expects.
816 std::vector<dim_t> expectedDims = {3, 10, 10};
817 EXPECT_TRUE(output->dims().vec() == expectedDims);
818 // High level check on the content of the graph.
819 // We have 1 transpose, 1 matmul, 1 save, and 2 reshapes.
820 EXPECT_EQ(F->getNodes().size(), 5);
821 // With have 2 inputs and one outputs.
822 EXPECT_EQ(mod.getPlaceholders().size(), 3);
823 // Check that the graph has the expected shape,
824 // starting from the output.
825 auto *saveNode = getSaveNodeFromDest(output);
826 auto *BMMN = llvm::dyn_cast<BatchMatMulNode>(saveNode->getInput().getNode());
827 ASSERT_TRUE(BMMN);
828 const dim_t batchMatmulDims[] = {3, 10, 10};
829 EXPECT_EQ(BMMN->getResult().dims(), llvm::makeArrayRef(batchMatmulDims));
830 EXPECT_TRUE(llvm::isa<Placeholder>(BMMN->getLHS()));
831 auto *tileRHS = llvm::dyn_cast<TileNode>(BMMN->getRHS());
832 ASSERT_TRUE(tileRHS);
833 auto *reshapeRHS = llvm::dyn_cast<ReshapeNode>(tileRHS->getInput());
834 ASSERT_TRUE(reshapeRHS);
835 auto *transposeRHS = llvm::dyn_cast<TransposeNode>(reshapeRHS->getInput());
836 ASSERT_TRUE(transposeRHS);
837 EXPECT_TRUE(llvm::isa<Placeholder>(transposeRHS->getInput()));
838 // Check that the last two dimensions are swapped.
839 const unsigned_t shuffle[] = {1, 0};
840 EXPECT_EQ(transposeRHS->getShuffle(), llvm::makeArrayRef(shuffle));
841 // We don't actually check that the output is correct, because this
842 // should be covered in the OperatorTest for MatMul already.
843 }
844
845 /// Test loading a parallel batched matmul.
TEST_F(Caffe2ImporterTest,parallelBatchedMatmulRHS)846 TEST_F(Caffe2ImporterTest, parallelBatchedMatmulRHS) {
847 ExecutionEngine EE{};
848 auto &mod = EE.getModule();
849 Function *F = mod.createFunction("main");
850 std::string NetDescFilename(
851 GLOW_DATA_PATH
852 "tests/models/caffe2Models/parallel_matmul_predict_net.pbtxt");
853 std::string NetWeightFilename(
854 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
855
856 Placeholder *output;
857 Tensor inputs_0(ElemKind::FloatTy, {3, 10, 7});
858 Tensor inputs_1(ElemKind::FloatTy, {3, 7, 10});
859 inputs_0.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
860 inputs_1.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
861 // Destroy the loader after the graph is loaded since the following execution
862 // will not depend on anything from the loader.
863 {
864 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
865 {"inputs_0", "inputs_1"},
866 {&inputs_0.getType(), &inputs_1.getType()}, *F);
867 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
868 }
869
870 // High level check on the content of the graph.
871 // We have a BatchMatMul and a Save.
872 EXPECT_EQ(F->getNodes().size(), 2);
873 // With have 2 inputs and one outputs.
874 EXPECT_EQ(mod.getPlaceholders().size(), 3);
875 // Check that the graph has the expected shape,
876 // starting from the output.
877 // Parallel Batched matmul is lowered to a sequence of slices, reshapes and
878 // regular matmuls.
879 auto *saveNode = getSaveNodeFromDest(output);
880 auto *BMMN = llvm::dyn_cast<BatchMatMulNode>(saveNode->getInput());
881 ASSERT_TRUE(BMMN);
882
883 const dim_t lhsDims[] = {3, 10, 7};
884 EXPECT_EQ(BMMN->getLHS().dims(), llvm::makeArrayRef(lhsDims));
885 const dim_t rhsDims[] = {3, 7, 10};
886 EXPECT_EQ(BMMN->getRHS().dims(), llvm::makeArrayRef(rhsDims));
887 const dim_t resultDims[] = {3, 10, 10};
888 EXPECT_EQ(BMMN->getResult().dims(), llvm::makeArrayRef(resultDims));
889
890 // We don't actually check that the output is correct, because this
891 // should be covered in the OperatorTest for MatMul already.
892 }
893
894 /// Test loading a FC node : I * transpose(W) + B.
TEST_F(Caffe2ImporterTest,FC)895 TEST_F(Caffe2ImporterTest, FC) {
896 ExecutionEngine EE{};
897 auto &mod = EE.getModule();
898 Function *F = mod.createFunction("main");
899
900 std::string NetDescFilename(GLOW_DATA_PATH
901 "tests/models/caffe2Models/fc_predict_net.pbtxt");
902 std::string NetWeightFilename(GLOW_DATA_PATH
903 "tests/models/caffe2Models/fc_init_net.pbtxt");
904
905 Placeholder *output;
906 PlaceholderBindings bindings;
907 // Destroy the loader after the graph is loaded since the following execution
908 // will not depend on anything from the loader.
909 {
910 Tensor inputs(ElemKind::FloatTy, {2, 3});
911 inputs.getHandle() = {1, 2, 3, 4, 5, 6};
912
913 // Weights and bias are read from NetWeightFilename. And the values are:
914 // weights : {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
915 // bias : {0.1f, 0.2f, 0.3f, 0.4f};
916 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
917 {&inputs.getType()}, *F);
918 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
919 bindings.allocate(mod.getPlaceholders());
920 updateInputPlaceholdersByName(bindings, &mod, {"inputs"}, {&inputs});
921 }
922
923 // High level check on the content of the graph. We have 1 FC node and 1 save.
924 EXPECT_EQ(F->getNodes().size(), 2);
925 auto *saveNode = getSaveNodeFromDest(output);
926 auto *fcNode =
927 llvm::dyn_cast<FullyConnectedNode>(saveNode->getInput().getNode());
928 EXPECT_TRUE(fcNode);
929
930 // Check the numerical values of the weights and biases.
931 {
932 // NOTE: this is weights1 because the weights constant was transposed
933 const Constant *constant = mod.getConstantByName("weights__1");
934 ASSERT_TRUE(constant);
935 const Tensor &weights = constant->getPayload();
936 const std::vector<dim_t> expectedDimensions = {3, 4};
937 const std::vector<float> expectedValues = {1.0f, 4.0f, 7.0f, 10.0f, //
938 2.0f, 5.0f, 8.0f, 11.0f, //
939 3.0f, 6.0f, 9.0f, 12.0f};
940 EXPECT_EQ(expectedDimensions, weights.dims().vec());
941 ASSERT_EQ(expectedValues.size(), weights.size());
942 const auto elements = weights.getHandle();
943 for (size_t i = 0; i < expectedValues.size(); ++i) {
944 EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
945 << "Where i = " << i;
946 }
947 }
948 {
949 const Constant *constant = mod.getConstantByName("bias");
950 ASSERT_TRUE(constant);
951 const Tensor &bias = constant->getPayload();
952 const std::vector<dim_t> expectedDimensions = {4};
953 const std::vector<float> expectedValues = {0.1f, 0.2f, 0.3f, 0.4f};
954 EXPECT_EQ(expectedDimensions, bias.dims().vec());
955 ASSERT_EQ(expectedValues.size(), bias.size());
956 const auto elements = bias.getHandle();
957 for (size_t i = 0; i < expectedValues.size(); ++i) {
958 EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
959 << "Where i = " << i;
960 }
961 }
962
963 // We don't actually check that the output is correct, because this is
964 // already covered in the Operator.FC/* tests.
965 }
966
967 /// Test loading a FC node : I * transpose(W) + B, where I is need to be
968 /// flatten.
TEST_F(Caffe2ImporterTest,FCWithFlatten)969 TEST_F(Caffe2ImporterTest, FCWithFlatten) {
970 ExecutionEngine EE{};
971 auto &mod = EE.getModule();
972 Function *F = mod.createFunction("main");
973
974 std::string NetDescFilename(
975 GLOW_DATA_PATH "tests/models/caffe2Models/fc_4d_predict_net.pbtxt");
976 std::string NetWeightFilename(
977 GLOW_DATA_PATH "tests/models/caffe2Models/fc_4d_init_net.pbtxt");
978
979 Placeholder *output;
980 PlaceholderBindings bindings;
981
982 {
983 Tensor inputs(ElemKind::FloatTy, {1, 1, 1, 2048});
984
985 // Weights and bias are read from NetWeightFilename
986 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
987 {&inputs.getType()}, *F);
988 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
989 bindings.allocate(mod.getPlaceholders());
990 updateInputPlaceholdersByName(bindings, &mod, {"inputs"}, {&inputs});
991 }
992
993 // High level check on the content of the graph. We have a reshape, an FC,
994 // another reshape, and a save.
995 EXPECT_EQ(F->getNodes().size(), 4);
996
997 auto finalShape = output->getType()->dims();
998 std::vector<dim_t> expectedOutput{1, 1, 1, 9190};
999 EXPECT_EQ(finalShape, llvm::makeArrayRef(expectedOutput));
1000
1001 auto *saveNode = getSaveNodeFromDest(output);
1002 auto *reshapeAfterNode =
1003 llvm::dyn_cast<ReshapeNode>(saveNode->getInput().getNode());
1004 ASSERT_TRUE(reshapeAfterNode);
1005 auto *fcNode = llvm::dyn_cast<FullyConnectedNode>(
1006 reshapeAfterNode->getInput().getNode());
1007 ASSERT_TRUE(fcNode);
1008 auto *reshape = llvm::dyn_cast<ReshapeNode>(fcNode->getInput());
1009 ASSERT_TRUE(reshape);
1010
1011 // We don't actually check that the output is correct, because this is
1012 // already covered in the Operator.FCWithFlatten/* tests.
1013 }
1014
1015 /// Test loading a FCTransposed node: I * W + B
TEST_F(Caffe2ImporterTest,FCTransposed)1016 TEST_F(Caffe2ImporterTest, FCTransposed) {
1017 ExecutionEngine EE{};
1018 auto &mod = EE.getModule();
1019 Function *F = mod.createFunction("main");
1020
1021 std::string NetDescFilename(
1022 GLOW_DATA_PATH
1023 "tests/models/caffe2Models/fcTransposed_predict_net.pbtxt");
1024 std::string NetWeightFilename(
1025 GLOW_DATA_PATH "tests/models/caffe2Models/fcTransposed_init_net.pbtxt");
1026
1027 Placeholder *output;
1028 PlaceholderBindings bindings;
1029
1030 // Destroy the loader after the graph is loaded since the following execution
1031 // will not depend on anything from the loader.
1032 {
1033 Tensor inputs(ElemKind::FloatTy, {2, 3});
1034 inputs.getHandle() = {1, 2, 3, 4, 5, 6};
1035
1036 // Weights and bias are read from NetWeightFilename. And the values are:
1037 // weights : {1, 4, 7, 10, 2, 5, 8, 11, 3, 6, 9, 12};
1038 // bias : {0.1f, 0.2f, 0.3f, 0.4f};
1039 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
1040 {&inputs.getType()}, *F);
1041 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1042 bindings.allocate(mod.getPlaceholders());
1043 updateInputPlaceholdersByName(bindings, &mod, {"inputs"}, {&inputs});
1044 }
1045
1046 // High level check on the content of the graph. We have 1 FC and 1 save,
1047 EXPECT_EQ(F->getNodes().size(), 2);
1048 auto *saveNode = getSaveNodeFromDest(output);
1049 auto *fcNode =
1050 llvm::dyn_cast<FullyConnectedNode>(saveNode->getInput().getNode());
1051 ASSERT_TRUE(fcNode);
1052
1053 // Check the numerical values of the weights and biases.
1054 {
1055 const Constant *constant = mod.getConstantByName("weights");
1056 ASSERT_TRUE(constant);
1057 const Tensor &weights = constant->getPayload();
1058 const std::vector<dim_t> expectedDimensions = {3, 4};
1059 const std::vector<float> expectedValues = {1.0f, 4.0f, 7.0f, 10.0f, //
1060 2.0f, 5.0f, 8.0f, 11.0f, //
1061 3.0f, 6.0f, 9.0f, 12.0f};
1062 EXPECT_EQ(expectedDimensions, weights.dims().vec());
1063 ASSERT_EQ(expectedValues.size(), weights.size());
1064 const auto elements = weights.getHandle();
1065 for (size_t i = 0; i < expectedValues.size(); ++i) {
1066 EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
1067 << "Where i = " << i;
1068 }
1069 }
1070 {
1071 const Constant *constant = mod.getConstantByName("bias");
1072 ASSERT_TRUE(constant);
1073 const Tensor &bias = constant->getPayload();
1074 const std::vector<dim_t> expectedDimensions = {4};
1075 const std::vector<float> expectedValues = {0.1f, 0.2f, 0.3f, 0.4f};
1076 EXPECT_EQ(expectedDimensions, bias.dims().vec());
1077 ASSERT_EQ(expectedValues.size(), bias.size());
1078 const auto elements = bias.getHandle();
1079 for (size_t i = 0; i < expectedValues.size(); ++i) {
1080 EXPECT_FLOAT_EQ(expectedValues.at(i), elements.raw(i))
1081 << "Where i = " << i;
1082 }
1083 }
1084
1085 // We don't actually check that the output is correct, because this is
1086 // already covered in the Operator.FCWithFlatten/* tests.
1087 }
1088
1089 /// Test loading a FCTransposed node: I * W + B, where I is need to be flatten.
TEST_F(Caffe2ImporterTest,FCTransposedWithFlatten)1090 TEST_F(Caffe2ImporterTest, FCTransposedWithFlatten) {
1091 ExecutionEngine EE{};
1092 auto &mod = EE.getModule();
1093 Function *F = mod.createFunction("main");
1094
1095 std::string NetDescFilename(
1096 GLOW_DATA_PATH
1097 "tests/models/caffe2Models/fcTransposed_4d_predict_net.pbtxt");
1098 std::string NetWeightFilename(
1099 GLOW_DATA_PATH
1100 "tests/models/caffe2Models/fcTransposed_4d_init_net.pbtxt");
1101
1102 Placeholder *output;
1103 PlaceholderBindings bindings;
1104
1105 {
1106 Tensor inputs(ElemKind::FloatTy, {1, 1, 1, 2048});
1107
1108 // Weights and bias are read from NetWeightFilename.
1109 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
1110 {&inputs.getType()}, *F);
1111 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1112 bindings.allocate(mod.getPlaceholders());
1113 updateInputPlaceholdersByName(bindings, &mod, {"inputs"}, {&inputs});
1114 }
1115
1116 // High level check on the content of the graph. We have a reshape, an FC,
1117 // another reshape, and a save.
1118 EXPECT_EQ(F->getNodes().size(), 4);
1119
1120 auto finalShape = output->getType()->dims();
1121 std::vector<dim_t> expectedOutput{1, 1, 1, 9190};
1122 EXPECT_EQ(finalShape, llvm::makeArrayRef(expectedOutput));
1123
1124 auto *saveNode = getSaveNodeFromDest(output);
1125 auto *reshapeAfterNode =
1126 llvm::dyn_cast<ReshapeNode>(saveNode->getInput().getNode());
1127 ASSERT_TRUE(reshapeAfterNode);
1128 auto *fcNode = llvm::dyn_cast<FullyConnectedNode>(
1129 reshapeAfterNode->getInput().getNode());
1130 ASSERT_TRUE(fcNode);
1131 auto *reshape = llvm::dyn_cast<ReshapeNode>(fcNode->getInput());
1132 ASSERT_TRUE(reshape);
1133
1134 // We don't actually check that the output is correct, because this is
1135 // already covered in the Operator.FCWithFlatten/* tests.
1136 }
1137
1138 /// Test loading bucketize op from a Caffe2 model.
1139 /// Test with arg boundaries = [0.1, 2.5]
TEST_F(Caffe2ImporterTest,importBucketize)1140 TEST_F(Caffe2ImporterTest, importBucketize) {
1141 ExecutionEngine EE{};
1142 auto &mod = EE.getModule();
1143 Function *F = mod.createFunction("main");
1144
1145 std::string NetDescFilename(
1146 GLOW_DATA_PATH "tests/models/caffe2Models/bucketize_op_net.pbtxt");
1147 std::string NetWeightFilename(
1148 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1149
1150 PlaceholderBindings bindings;
1151 Placeholder *output;
1152 Tensor inputs_0(ElemKind::FloatTy, {3, 2});
1153 // Destroy the loader after the graph is loaded since the following execution
1154 // will not depend on anything from the loader.
1155 {
1156 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input_0"},
1157 {&inputs_0.getType()}, *F);
1158 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1159 bindings.allocate(mod.getPlaceholders());
1160 updateInputPlaceholdersByName(bindings, &mod, {"input_0"}, {&inputs_0});
1161 }
1162
1163 EXPECT_EQ(F->getNodes().size(), 2);
1164 auto *saveNode = getSaveNodeFromDest(output);
1165 auto *bucketizeNode =
1166 llvm::dyn_cast<BucketizeNode>(saveNode->getInput().getNode());
1167 ASSERT_TRUE(bucketizeNode);
1168 auto boundriesVec = bucketizeNode->getBoundaries();
1169 ASSERT_EQ(boundriesVec.size(), 2);
1170 EXPECT_NEAR(boundriesVec[0], 0.1, 0.00001);
1171 EXPECT_NEAR(boundriesVec[1], 2.5, 0.00001);
1172 // We have one input and one output.
1173 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1174 }
1175
1176 /// Test loading ResizeNearest op from a Caffe2 model.
1177 /// Test with NHWC order, 2.0 height scale and 1.5 width scale
TEST_F(Caffe2ImporterTest,importResizeNearest)1178 TEST_F(Caffe2ImporterTest, importResizeNearest) {
1179 ExecutionEngine EE{};
1180 auto &mod = EE.getModule();
1181 Function *F = mod.createFunction("main");
1182
1183 std::string NetDescFilename(
1184 GLOW_DATA_PATH "tests/models/caffe2Models/resize_nearest_op_net.pbtxt");
1185 std::string NetWeightFilename(
1186 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1187
1188 Placeholder *output;
1189 PlaceholderBindings bindings;
1190
1191 {
1192 Tensor input(ElemKind::FloatTy, {1, 2, 2, 1});
1193
1194 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input_0"},
1195 {&input.getType()}, *F);
1196 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1197 bindings.allocate(mod.getPlaceholders());
1198 updateInputPlaceholdersByName(bindings, &mod, {"input_0"}, {&input});
1199 }
1200
1201 EXPECT_EQ(F->getNodes().size(), 2);
1202 auto *saveNode = getSaveNodeFromDest(output);
1203 auto *resizeNearestNode =
1204 llvm::dyn_cast<ResizeNearestNode>(saveNode->getInput().getNode());
1205 ASSERT_TRUE(resizeNearestNode);
1206 // We have one input and one output.
1207 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1208 auto scale = resizeNearestNode->getScale();
1209 EXPECT_EQ(scale[0], 1);
1210 auto heightScale = scale[1];
1211 auto widthScale = scale[2];
1212 EXPECT_EQ(scale[3], 1);
1213 EXPECT_NEAR(heightScale, 2.0, 0.00001);
1214 EXPECT_NEAR(widthScale, 1.5, 0.00001);
1215 }
1216
1217 /// Test loading clip op from a Caffe2 model.
1218 /// Test with arg min = 20.0 max = 60.0
TEST_F(Caffe2ImporterTest,importClip)1219 TEST_F(Caffe2ImporterTest, importClip) {
1220 ExecutionEngine EE{};
1221 auto &mod = EE.getModule();
1222 Function *F = mod.createFunction("main");
1223
1224 std::string NetDescFilename(GLOW_DATA_PATH
1225 "tests/models/caffe2Models/clip_op_net.pbtxt");
1226 std::string NetWeightFilename(
1227 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1228
1229 PlaceholderBindings bindings;
1230 Placeholder *output;
1231 Tensor inputs_0(ElemKind::FloatTy, {5, 5});
1232 // Destroy the loader after the graph is loaded since the following execution
1233 // will not depend on anything from the loader.
1234 {
1235 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs_0"},
1236 {&inputs_0.getType()}, *F);
1237 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1238 bindings.allocate(mod.getPlaceholders());
1239 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0"}, {&inputs_0});
1240 }
1241
1242 EXPECT_EQ(F->getNodes().size(), 2);
1243 auto *saveNode = getSaveNodeFromDest(output);
1244 auto *clipNode = llvm::dyn_cast<ClipNode>(saveNode->getInput().getNode());
1245 ASSERT_TRUE(clipNode);
1246 EXPECT_EQ(clipNode->getMax(), 60.0);
1247 EXPECT_EQ(clipNode->getMin(), 20.0);
1248 auto *inputNode = llvm::dyn_cast<Placeholder>(clipNode->getInput());
1249 ASSERT_EQ(inputNode, mod.getPlaceholderByNameSlow("inputs_0"));
1250 // We have one input and one output.
1251 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1252 }
1253
1254 /// Test loading clip op from a Caffe2 model with default arg values:
1255 /// min = std::numeric_limits<float>::lowest()
1256 /// max = std::numeric_limits<float>::max()
TEST_F(Caffe2ImporterTest,importClipDefault)1257 TEST_F(Caffe2ImporterTest, importClipDefault) {
1258 ExecutionEngine EE{};
1259 auto &mod = EE.getModule();
1260 Function *F = mod.createFunction("main");
1261
1262 std::string NetDescFilename(
1263 GLOW_DATA_PATH "tests/models/caffe2Models/clip_op_default_net.pbtxt");
1264 std::string NetWeightFilename(
1265 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1266
1267 PlaceholderBindings bindings;
1268 Placeholder *output;
1269 Tensor inputs_0(ElemKind::FloatTy, {5, 5});
1270
1271 // Destroy the loader after the graph is loaded since the following execution
1272 // will not depend on anything from the loader.
1273 {
1274 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs_0"},
1275 {&inputs_0.getType()}, *F);
1276 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1277 bindings.allocate(mod.getPlaceholders());
1278 updateInputPlaceholdersByName(bindings, &mod, {"inputs_0"}, {&inputs_0});
1279 }
1280 EXPECT_EQ(F->getNodes().size(), 2);
1281 auto *saveNode = getSaveNodeFromDest(output);
1282 auto *clipNode = llvm::dyn_cast<ClipNode>(saveNode->getInput().getNode());
1283 EXPECT_EQ(clipNode->getMax(), std::numeric_limits<float>::max());
1284 EXPECT_EQ(clipNode->getMin(), std::numeric_limits<float>::lowest());
1285 auto *inputNode = llvm::dyn_cast<Placeholder>(clipNode->getInput().getNode());
1286 ASSERT_EQ(inputNode, mod.getPlaceholderByNameSlow("inputs_0"));
1287 // We have one input and one output.
1288 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1289 }
1290
1291 /// Test loading a ReplaceNaN operator.
TEST_F(Caffe2ImporterTest,replaceNaN)1292 TEST_F(Caffe2ImporterTest, replaceNaN) {
1293 ExecutionEngine EE{};
1294 auto &mod = EE.getModule();
1295 Function *F = mod.createFunction("main");
1296
1297 std::string NetDescFilename(
1298 GLOW_DATA_PATH "tests/models/caffe2Models/replace_nan_predict_net.pbtxt");
1299 std::string NetWeightFilename(
1300 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1301
1302 PlaceholderBindings bindings;
1303 Placeholder *output;
1304 Tensor input(ElemKind::FloatTy, {10, 10});
1305
1306 // Destroy the loader after the graph is loaded since the following execution
1307 // will not depend on anything from the loader.
1308 {
1309 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
1310 {&input.getType()}, *F);
1311 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1312 bindings.allocate(mod.getPlaceholders());
1313 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&input});
1314 }
1315
1316 // Check that the shape of the output matches the input.
1317 std::vector<dim_t> expectedDims = {10, 10};
1318 EXPECT_TRUE(output->dims().vec() == expectedDims);
1319
1320 // High level checks on the content of the graph.
1321 // We have 1 ReplaceNaN and 1 Output.
1322 EXPECT_EQ(F->getNodes().size(), 2);
1323 auto *saveNode = getSaveNodeFromDest(output);
1324 auto *replaceNaNNode =
1325 llvm::dyn_cast<ReplaceNaNNode>(saveNode->getInput().getNode());
1326 EXPECT_EQ(replaceNaNNode->getValue(), 1.0f);
1327 auto *inputNode =
1328 llvm::dyn_cast<Placeholder>(replaceNaNNode->getInput().getNode());
1329 ASSERT_EQ(inputNode, mod.getPlaceholderByNameSlow("input"));
1330
1331 // We have one input and one output.
1332 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1333 }
1334
1335 /// Test loading a DotProduct operator with 1D inputs.
TEST_F(Caffe2ImporterTest,dotProduct1D)1336 TEST_F(Caffe2ImporterTest, dotProduct1D) {
1337 ExecutionEngine EE{};
1338 auto &mod = EE.getModule();
1339 Function *F = mod.createFunction("main");
1340
1341 std::string NetDescFilename(
1342 GLOW_DATA_PATH "tests/models/caffe2Models/dot_product_predict_net.pbtxt");
1343 std::string NetWeightFilename(
1344 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1345
1346 Placeholder *output;
1347
1348 // Input tensors.
1349 constexpr dim_t kDataSize = 10;
1350 auto type = mod.uniqueType(ElemKind::FloatTy, {kDataSize});
1351
1352 // Destroy the loader after the graph is loaded to ensure the function F
1353 // does not depend on anything stored in it.
1354 {
1355 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"X", "Y"},
1356 {type, type}, *F);
1357 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1358 }
1359
1360 // Check that the shape of the output matches that of the expected output.
1361 EXPECT_TRUE(output->dims().equals({kDataSize}));
1362
1363 // High level checks on the content of the graph.
1364 // We have 1 Mul and 1 Output.
1365 EXPECT_EQ(F->getNodes().size(), 2);
1366
1367 // Check that the graph has the expected shape (Mul -> Save),
1368 // starting from the output.
1369 auto *saveNode = getSaveNodeFromDest(output);
1370 auto *MN = llvm::dyn_cast<MulNode>(saveNode->getInput());
1371 ASSERT_TRUE(MN);
1372
1373 // We have two inputs and one output.
1374 EXPECT_EQ(mod.getPlaceholders().size(), 3);
1375 }
1376
1377 // Test loading a DotProduct operator with 2D inputs.
TEST_F(Caffe2ImporterTest,dotProduct2D)1378 TEST_F(Caffe2ImporterTest, dotProduct2D) {
1379 ExecutionEngine EE{};
1380 auto &mod = EE.getModule();
1381 Function *F = mod.createFunction("main");
1382
1383 std::string NetDescFilename(
1384 GLOW_DATA_PATH "tests/models/caffe2Models/dot_product_predict_net.pbtxt");
1385 std::string NetWeightFilename(
1386 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1387
1388 Placeholder *output;
1389
1390 // Input tensors.
1391 constexpr dim_t kRows = 10;
1392 constexpr dim_t kCols = 20;
1393 auto type = mod.uniqueType(ElemKind::FloatTy, {kRows, kCols});
1394
1395 // Destroy the loader after the graph is loaded to ensure the function F
1396 // does not depend on anything stored in it.
1397 {
1398 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"X", "Y"},
1399 {type, type}, *F);
1400 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1401 }
1402
1403 // Check that the shape of the output matches that of the expected output.
1404 EXPECT_TRUE(output->dims().equals({kRows}));
1405
1406 // High level checks on the content of the graph.
1407 // We have 1 Mul, 1 BatchedReduceAdd and 1 Output.
1408 EXPECT_EQ(F->getNodes().size(), 3);
1409
1410 // Check that the graph has the expected shape
1411 // (Mul -> BatchedReduceAdd -> Save), starting from the output.
1412 auto *saveNode = getSaveNodeFromDest(output);
1413 auto *BRA = llvm::dyn_cast<BatchedReduceAddNode>(saveNode->getInput());
1414 ASSERT_TRUE(BRA);
1415 ASSERT_EQ(BRA->getNumInputs(), 1);
1416
1417 auto *MN = llvm::dyn_cast<MulNode>(BRA->getBatch());
1418 ASSERT_TRUE(MN);
1419
1420 // We have two inputs and one output.
1421 EXPECT_EQ(mod.getPlaceholders().size(), 3);
1422 }
1423
1424 // Test loading a BatchBoxCox operator.
TEST_F(Caffe2ImporterTest,batchBoxCox)1425 TEST_F(Caffe2ImporterTest, batchBoxCox) {
1426 ExecutionEngine EE{};
1427 auto &mod = EE.getModule();
1428 Function *F = mod.createFunction("main");
1429
1430 std::string NetDescFilename(
1431 GLOW_DATA_PATH
1432 "tests/models/caffe2Models/batch_box_cox_predict_net.pbtxt");
1433 std::string NetWeightFilename(
1434 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1435
1436 PlaceholderBindings bindings;
1437 Placeholder *output;
1438
1439 // Input tensors.
1440 const dim_t kRows = 10;
1441 const dim_t kCols = 5;
1442 Tensor data(ElemKind::FloatTy, {kRows, kCols});
1443 Tensor lambda1(ElemKind::FloatTy, {kCols});
1444 Tensor lambda2(ElemKind::FloatTy, {kCols});
1445 Tensor O(ElemKind::FloatTy, {kRows, kCols});
1446 // Destroy the loader after the graph is loaded since the following execution
1447 // will not depend on anything from the loader.
1448 {
1449 Caffe2ModelLoader caffe2LD(
1450 NetDescFilename, NetWeightFilename, {"data", "lambda1", "lambda2"},
1451 {&data.getType(), &lambda1.getType(), &lambda2.getType()}, *F);
1452 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1453 bindings.allocate(mod.getPlaceholders());
1454 updateInputPlaceholdersByName(bindings, &mod,
1455 {"data", "lambda1", "lambda2"},
1456 {&data, &lambda1, &lambda2});
1457 }
1458
1459 EXPECT_EQ(F->getNodes().size(), 2);
1460
1461 // Output.
1462 auto *saveNode = getSaveNodeFromDest(output);
1463 ASSERT_TRUE(saveNode);
1464
1465 // Select.
1466 auto *BBCN = llvm::dyn_cast<BatchBoxCoxNode>(saveNode->getInput());
1467 ASSERT_TRUE(BBCN);
1468
1469 // There are three inputs and one output.
1470 EXPECT_EQ(mod.getPlaceholders().size(), 4);
1471 }
1472
1473 // Test loading a EQ operator with 1D inputs.
TEST_F(Caffe2ImporterTest,EQ1D)1474 TEST_F(Caffe2ImporterTest, EQ1D) {
1475 ExecutionEngine EE{};
1476 auto &mod = EE.getModule();
1477 Function *F = mod.createFunction("main");
1478
1479 std::string NetDescFilename(GLOW_DATA_PATH
1480 "tests/models/caffe2Models/eq_op_net.pbtxt");
1481 std::string NetWeightFilename(
1482 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1483
1484 Placeholder *output;
1485 PlaceholderBindings bindings;
1486
1487 // Input tensors.
1488 const dim_t kDataSize = 10;
1489 Tensor X(ElemKind::FloatTy, {kDataSize});
1490 Tensor Y(ElemKind::FloatTy, {kDataSize});
1491
1492 // Destroy the loader after the graph is loaded
1493 {
1494 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"X", "Y"},
1495 {&X.getType(), &Y.getType()}, *F);
1496 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1497 }
1498
1499 // High level checks on the content of the graph.
1500 // We have 1 EQ and 1 Output.
1501 EXPECT_EQ(F->getNodes().size(), 2);
1502
1503 // Check that the graph has the expected shape (EQ -> Save),
1504 // starting from the output.
1505 auto *saveNode = getSaveNodeFromDest(output);
1506 auto *EQN = llvm::dyn_cast<CmpEQNode>(saveNode->getInput());
1507 ASSERT_TRUE(EQN);
1508
1509 // Graph has two inputs and one output.
1510 EXPECT_EQ(mod.getPlaceholders().size(), 3);
1511 }
1512
1513 // Test loading a LengthsToRanges operator.
TEST_F(Caffe2ImporterTest,LengthsToRanges)1514 TEST_F(Caffe2ImporterTest, LengthsToRanges) {
1515 ExecutionEngine EE{};
1516 auto &mod = EE.getModule();
1517 Function *F = mod.createFunction("main");
1518
1519 std::string NetDescFilename(
1520 GLOW_DATA_PATH "tests/models/caffe2Models/lengths_to_ranges.pbtxt");
1521 std::string NetWeightFilename(
1522 GLOW_DATA_PATH
1523 "tests/models/caffe2Models/lengths_to_ranges_init_net.pbtxt");
1524
1525 Placeholder *output;
1526
1527 // Destroy the loader after the graph is loaded
1528 {
1529 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {}, {}, *F);
1530 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1531 }
1532
1533 // High level checks on the content of the graph.
1534 // We have 1 LengthsToRanges and 1 Save.
1535 EXPECT_EQ(F->getNodes().size(), 2);
1536
1537 // Check that the graph has the expected shape (LengthsToRanges -> Save),
1538 // starting from the output.
1539 auto *saveNode = getSaveNodeFromDest(output);
1540 auto *N = llvm::dyn_cast<LengthsToRangesNode>(saveNode->getInput());
1541 ASSERT_TRUE(N);
1542
1543 // Graph has one output.
1544 EXPECT_EQ(mod.getPlaceholders().size(), 1);
1545 }
1546
1547 // Test loading Logit operator from a Caffe2 model.
TEST_F(Caffe2ImporterTest,Logit)1548 TEST_F(Caffe2ImporterTest, Logit) {
1549 ExecutionEngine EE{};
1550 auto &mod = EE.getModule();
1551 Function *F = mod.createFunction("main");
1552
1553 std::string NetDescFilename(GLOW_DATA_PATH
1554 "tests/models/caffe2Models/logit_op_net.pbtxt");
1555 std::string NetWeightFilename(
1556 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1557
1558 Placeholder *output;
1559
1560 // Input tensors.
1561 const dim_t kDataSize = 10;
1562 Tensor X(ElemKind::FloatTy, {kDataSize});
1563
1564 // Destroy the loader after the graph is loaded
1565 {
1566 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs_0"},
1567 {&X.getType()}, *F);
1568 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1569 }
1570
1571 // Check that the shape of the output matches what Caffe2 expects.
1572 std::vector<dim_t> expectedDims = {kDataSize};
1573 EXPECT_EQ(output->dims().vec(), expectedDims);
1574
1575 // High level checks on the content of the graph.
1576 // We have 1 Logit, 1 Save.
1577 EXPECT_EQ(F->getNodes().size(), 2);
1578
1579 // Graph has one input and one output.
1580 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1581 }
1582
1583 // Test loading Logit operator from a Caffe2 model.
TEST_F(Caffe2ImporterTest,Swish)1584 TEST_F(Caffe2ImporterTest, Swish) {
1585 ExecutionEngine EE{};
1586 auto &mod = EE.getModule();
1587 Function *F = mod.createFunction("main");
1588
1589 std::string NetDescFilename(GLOW_DATA_PATH
1590 "tests/models/caffe2Models/swish_op_net.pbtxt");
1591 std::string NetWeightFilename(
1592 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1593
1594 PlaceholderBindings bindings;
1595 Placeholder *output;
1596
1597 // Input tensors.
1598 Tensor X(ElemKind::FloatTy, {10});
1599
1600 // Destroy the loader after the graph is loaded
1601 {
1602 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
1603 {&X.getType()}, *F);
1604 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1605 bindings.allocate(mod.getPlaceholders());
1606 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&X});
1607 }
1608
1609 // Check that the type of the output matches the input.
1610 EXPECT_TRUE(output->getType()->isEqual(X.getType()));
1611
1612 // High level checks on the content of the graph.
1613 EXPECT_EQ(F->getNodes().size(), 2); // Save and Swish
1614 auto *saveNode = getSaveNodeFromDest(output);
1615 auto *swish = llvm::dyn_cast<SwishNode>(saveNode->getInput());
1616 ASSERT_TRUE(swish);
1617
1618 // Graph has one input and one output.
1619 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1620
1621 EE.compile(CompilationMode::Infer);
1622 EE.run(bindings);
1623 }
1624
1625 // Test loading a SparseToDense operator.
TEST_F(Caffe2ImporterTest,sparseToDense)1626 TEST_F(Caffe2ImporterTest, sparseToDense) {
1627 ExecutionEngine EE{};
1628 auto &mod = EE.getModule();
1629 Function *F = mod.createFunction("main");
1630
1631 std::string NetDescFilename(
1632 GLOW_DATA_PATH "tests/models/caffe2Models/sparse_to_dense.pbtxt");
1633 std::string NetWeightFilename(
1634 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1635
1636 Placeholder *output;
1637 PlaceholderBindings bindings;
1638
1639 // Create inputs.
1640 constexpr dim_t kNumIndices = 5;
1641 constexpr dim_t kMaxIndex = 20;
1642 constexpr dim_t kRows = 10;
1643 constexpr dim_t kCols = 5;
1644 Tensor indices(ElemKind::Int64ITy, {kNumIndices});
1645 Tensor values(ElemKind::FloatTy, {kNumIndices, kRows, kCols});
1646 Tensor dataToInferDim(ElemKind::FloatTy, {kMaxIndex, kRows, kCols});
1647
1648 // Destroy the loader after the graph is loaded since the following execution
1649 // will not depend on anything from the loader.
1650 {
1651 Caffe2ModelLoader caffe2LD(
1652 NetDescFilename, NetWeightFilename,
1653 {"indices", "values", "dataToInferDim"},
1654 {&indices.getType(), &values.getType(), &dataToInferDim.getType()}, *F);
1655 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1656 bindings.allocate(mod.getPlaceholders());
1657 updateInputPlaceholdersByName(bindings, &mod, {"indices", "values"},
1658 {&indices, &values});
1659 }
1660
1661 // Check that the shape of the output matches that of the expected output.
1662 EXPECT_TRUE(output->dims().vec() == dataToInferDim.dims().vec());
1663
1664 // High level checks on the content of the graph.
1665 // We should have 1 SparseToDense and 1 Output node = 2 nodes in total.
1666 EXPECT_EQ(F->getNodes().size(), 2);
1667
1668 // Check that the graph has the expected shape (SparseToDense -> Save),
1669 // starting from the output.
1670 auto *saveNode = getSaveNodeFromDest(output);
1671 auto *STDN = llvm::dyn_cast<SparseToDenseNode>(saveNode->getInput());
1672 ASSERT_TRUE(STDN);
1673
1674 // Graph has three inputs and one output.
1675 EXPECT_EQ(mod.getPlaceholders().size(), 4);
1676 }
1677
TEST_F(Caffe2ImporterTest,SparseToDenseMask)1678 TEST_F(Caffe2ImporterTest, SparseToDenseMask) {
1679 ExecutionEngine EE{};
1680 auto &mod = EE.getModule();
1681 Function *F = mod.createFunction("main");
1682
1683 std::string NetDescFilename(
1684 GLOW_DATA_PATH
1685 "tests/models/caffe2Models/sparse_to_dense_mask_op_net.pbtxt");
1686 std::string NetWeightFilename(
1687 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1688
1689 Placeholder *output;
1690 PlaceholderBindings bindings;
1691
1692 Tensor indices(ElemKind::Int64ITy, {4});
1693 Tensor values(ElemKind::FloatTy, {4, 10, 20, 30});
1694 Tensor defaultValue(ElemKind::FloatTy, {10, 20, 30});
1695
1696 // Destroy the loader after the graph is loaded since the following execution
1697 // will not depend on anything from the loader.
1698 {
1699 // Loaded protos must have at least one external output, so load an unused
1700 // output and type to satisfy it. It is named unused_output in
1701 // empty_predict_net.pbtxt.
1702 Caffe2ModelLoader caffe2LD(
1703 NetDescFilename, NetWeightFilename,
1704 {"indices", "values", "defaultValue"},
1705 {&indices.getType(), &values.getType(), &defaultValue.getType()}, *F);
1706 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1707 }
1708
1709 ASSERT_TRUE(output);
1710
1711 // Graph has 2 nodes: Save and SparseToDenseMask
1712 EXPECT_EQ(F->getNodes().size(), 2);
1713
1714 // One constant was created for implicit Lengths input
1715 EXPECT_EQ(mod.getConstants().size(), 1);
1716
1717 // Net has 3 inputs.
1718 EXPECT_EQ(mod.getPlaceholders().size(), 4);
1719
1720 auto *saveNode = getSaveNodeFromDest(output);
1721 auto *N = llvm::dyn_cast<SparseToDenseMaskNode>(saveNode->getInput());
1722 ASSERT_TRUE(N);
1723
1724 // Check that no batch dimension was added because Lengths was not given.
1725 EXPECT_TRUE(N->getResult().dims().equals({6, 10, 20, 30}));
1726 // Check that mask was read correctly.
1727 EXPECT_TRUE(N->getMask().equals({42, 100, 300, 1, 0, 312}));
1728 }
1729
1730 /// Test loading NCHW2NHWC op.
TEST_F(Caffe2ImporterTest,testNCHW2NHWC)1731 TEST_F(Caffe2ImporterTest, testNCHW2NHWC) {
1732 ExecutionEngine EE{};
1733 auto &mod = EE.getModule();
1734 Function *F = mod.createFunction("main");
1735
1736 std::string NetDescFilename(
1737 GLOW_DATA_PATH "tests/models/caffe2Models/NCHW2NHWC_predict_net.pbtxt");
1738 std::string NetWeightFilename(
1739 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1740
1741 Placeholder *output;
1742 PlaceholderBindings bindings;
1743
1744 Tensor inputs(ElemKind::FloatTy, {1, 2, 3, 4});
1745
1746 // Destroy the loader after the graph is loaded since the following execution
1747 // will not depend on anything from the loader.
1748 {
1749 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"inputs"},
1750 {&inputs.getType()}, *F);
1751 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1752 bindings.allocate(mod.getPlaceholders());
1753 }
1754
1755 // Check output shape.
1756 auto res = bindings.get(output);
1757 std::vector<dim_t> expectedDims = {1, 3, 4, 2};
1758 EXPECT_TRUE(res->getHandle<float>().dims().vec() == expectedDims);
1759
1760 // High level check on the content of the graph. We have 1 transpose and 1
1761 // save.
1762 EXPECT_EQ(F->getNodes().size(), 2);
1763 auto *saveNode = getSaveNodeFromDest(output);
1764 auto *transNode =
1765 llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
1766 ASSERT_TRUE(transNode);
1767
1768 // We have 2 placeholders: 1 input and 1 output.
1769 EXPECT_EQ(mod.getPlaceholders().size(), 2);
1770 // We have 0 constants.
1771 EXPECT_EQ(mod.getConstants().size(), 0);
1772 }
1773
1774 /// Test loading a LengthsSum operator.
TEST_F(Caffe2ImporterTest,lengthsSum)1775 TEST_F(Caffe2ImporterTest, lengthsSum) {
1776 ExecutionEngine EE{};
1777 auto &mod = EE.getModule();
1778 Function *F = mod.createFunction("main");
1779
1780 std::string NetDescFilename(GLOW_DATA_PATH
1781 "tests/models/caffe2Models/lengths_sum.pbtxt");
1782 std::string NetWeightFilename(
1783 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1784
1785 Placeholder *output;
1786 PlaceholderBindings bindings;
1787
1788 // Create inputs.
1789 Tensor data(ElemKind::Int64ITy, {10, 2, 3});
1790 Tensor lengths(ElemKind::FloatTy, {5});
1791
1792 // Destroy the loader after the graph is loaded since the following execution
1793 // will not depend on anything from the loader.
1794 {
1795 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
1796 {"data", "lengths"},
1797 {&data.getType(), &lengths.getType()}, *F);
1798 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
1799 }
1800
1801 // Check that the shape of the output matches that of the expected output.
1802 std::vector<dim_t> expectedShape{5, 2, 3};
1803 EXPECT_TRUE(output->dims().vec() == expectedShape);
1804
1805 // High level checks on the content of the graph.
1806 // We should have 1 LengthsSum and 1 Output node = 2 nodes in total.
1807 EXPECT_EQ(F->getNodes().size(), 2);
1808
1809 // Check that the graph has the expected shape (LengthsSum -> Save),
1810 // starting from the output.
1811 auto *saveNode = getSaveNodeFromDest(output);
1812 auto *LSN = llvm::dyn_cast<LengthsSumNode>(saveNode->getInput());
1813 ASSERT_TRUE(LSN);
1814
1815 // Graph has two inputs and one output.
1816 EXPECT_EQ(mod.getPlaceholders().size(), 3);
1817 }
1818
1819 /// Test loading a GatherRanges op.
TEST_F(Caffe2ImporterTest,gatherRanges)1820 TEST_F(Caffe2ImporterTest, gatherRanges) {
1821 ExecutionEngine EE;
1822 auto &mod = EE.getModule();
1823 auto *F = mod.createFunction("main");
1824
1825 std::string NetDescFilename(GLOW_DATA_PATH
1826 "tests/models/caffe2Models/gather_ranges.pbtxt");
1827 std::string NetWeightFilename(
1828 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1829
1830 Placeholder *output;
1831 Tensor data(ElemKind::FloatTy, {6});
1832 Tensor ranges(ElemKind::Int32ITy, {2, 2, 2});
1833
1834 {
1835 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
1836 {"data", "ranges"},
1837 {&data.getType(), &ranges.getType()}, *F);
1838 output = EXIT_ON_ERR(caffe2LD.getOutputByName("output"));
1839 }
1840
1841 // Verify structure: PH/PH -> GatherRanges -> Save -> PH/PH.
1842 ASSERT_EQ(mod.getPlaceholders().size(), 4);
1843 ASSERT_EQ(F->getNodes().size(), 3);
1844 auto *save = getSaveNodeFromDest(output);
1845 auto *gatherRanges =
1846 llvm::dyn_cast<GatherRangesNode>(save->getInput().getNode());
1847 ASSERT_TRUE(gatherRanges);
1848 EXPECT_TRUE(gatherRanges->getOutput().dims().equals({5}));
1849 EXPECT_TRUE(gatherRanges->getLengths().dims().equals({2}));
1850 }
1851
1852 /// Test loading Gather ops with constant folding from an Caffe2 model.
TEST_F(Caffe2ImporterTest,gatherConstantFoldingAndReshape)1853 TEST_F(Caffe2ImporterTest, gatherConstantFoldingAndReshape) {
1854 // This test verifies that Gather gets constant-folded, so that the argument
1855 // of the reshape becomes constant.
1856 ExecutionEngine EE;
1857 auto &mod = EE.getModule();
1858
1859 std::string netDescFilename(
1860 GLOW_DATA_PATH "tests/models/caffe2Models/gather_const_fold.pbtxt");
1861 std::string netWeightFilename(
1862 GLOW_DATA_PATH "tests/models/caffe2Models/gather_const_fold_init.pbtxt");
1863 PlaceholderBindings bindings;
1864 auto *F = mod.createFunction("main");
1865 Placeholder *output;
1866 Tensor data(ElemKind::FloatTy, {1, 2, 4, 3});
1867 // This test is testing constant folding during loading, so enable it
1868 // explicitly.
1869 setConstantFoldLoaderOpsFlag(true);
1870 {
1871 Caffe2ModelLoader caffe2LD(netDescFilename, netWeightFilename, {"data"},
1872 {&data.getType()}, *F);
1873 output = EXIT_ON_ERR(caffe2LD.getOutputByName("result"));
1874 bindings.allocate(mod.getPlaceholders());
1875 }
1876 EE.compile(CompilationMode::Infer);
1877 EE.run(bindings);
1878
1879 auto result = bindings.get(output)->getHandle();
1880 std::vector<dim_t> expectedDims = {1, 4, 3, 2};
1881 EXPECT_TRUE(result.dims().vec() == expectedDims);
1882 }
1883 /// Test loading a LengthsRangeFill op.
TEST_F(Caffe2ImporterTest,LengthsRangeFill)1884 TEST_F(Caffe2ImporterTest, LengthsRangeFill) {
1885 ExecutionEngine EE;
1886 auto &mod = EE.getModule();
1887 auto *F = mod.createFunction("main");
1888
1889 std::string NetDescFilename(
1890 GLOW_DATA_PATH
1891 "tests/models/caffe2Models/lengths_range_fill_predict_net.pbtxt");
1892 std::string NetWeightFilename(
1893 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1894
1895 Placeholder *output;
1896 Tensor lengths(ElemKind::Int32ITy, {3});
1897
1898 {
1899 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"lengths"},
1900 {&lengths.getType()}, *F);
1901 output = EXIT_ON_ERR(caffe2LD.getOutputByName("result"));
1902 }
1903
1904 // Verify structure: PH -> LengthsRangeFill -> Save -> PH.
1905 ASSERT_EQ(mod.getPlaceholders().size(), 2);
1906 ASSERT_EQ(F->getNodes().size(), 2);
1907 auto *save = getSaveNodeFromDest(output);
1908 auto *LRF = llvm::dyn_cast<LengthsRangeFillNode>(save->getInput().getNode());
1909 ASSERT_TRUE(LRF);
1910 EXPECT_TRUE(LRF->getLengths().dims().equals({3}));
1911 EXPECT_EQ(LRF->getResult().dims().size(), 1);
1912 // Proto specifies the max output size is 8.
1913 EXPECT_TRUE(LRF->getResult().dims().equals({8}));
1914 }
1915
1916 /// Verify that different fill types are loaded with the correct types.
TEST_F(Caffe2ImporterTest,tensorFillsTest)1917 TEST_F(Caffe2ImporterTest, tensorFillsTest) {
1918 ExecutionEngine EE{};
1919 auto &mod = EE.getModule();
1920 Function *F = mod.createFunction("main");
1921
1922 std::string NetDescFilename(
1923 GLOW_DATA_PATH "tests/models/caffe2Models/fill_test_predict_net.pbtxt");
1924 std::string NetWeightFilename(
1925 GLOW_DATA_PATH "tests/models/caffe2Models/fill_test_init_net.pbtxt");
1926
1927 Constant *tensorFillFloat, *tensorIntFill, *tensorInt64Fill,
1928 *tensorStringToUInt8Fill;
1929
1930 // Destroy the loader after the graph is loaded since the following execution
1931 // will not depend on anything from the loader.
1932 {
1933 // Loaded protos must have at least one external output, so load an unused
1934 // output and type to satisfy it. It is named unused_output in
1935 // empty_predict_net.pbtxt.
1936 Type unusedTy = Type(ElemKind::FloatTy, {4});
1937 Caffe2ModelLoader caffe2LD(
1938 NetDescFilename, NetWeightFilename,
1939 {"tensor_fill_float_eq", "tensor_int_fill_eq", "tensor_int64_fill_eq",
1940 "tensor_string_to_uint8_fill_eq"},
1941 {&unusedTy, &unusedTy, &unusedTy, &unusedTy}, *F);
1942 tensorFillFloat = llvm::dyn_cast<Constant>(
1943 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_fill_float")));
1944 tensorIntFill = llvm::dyn_cast<Constant>(
1945 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_int_fill")));
1946 tensorInt64Fill = llvm::dyn_cast<Constant>(
1947 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_int64_fill")));
1948 tensorStringToUInt8Fill = llvm::dyn_cast<Constant>(EXIT_ON_ERR(
1949 caffe2LD.getNodeValueByName("tensor_string_to_uint8_fill")));
1950 }
1951
1952 ASSERT_TRUE(tensorFillFloat);
1953 ASSERT_TRUE(tensorIntFill);
1954 ASSERT_TRUE(tensorInt64Fill);
1955 ASSERT_TRUE(tensorStringToUInt8Fill);
1956
1957 // All fills in fill_test_init_net.pbtxt use shape {2, 2}.
1958 const std::vector<dim_t> expectedDims = {2, 2};
1959 ASSERT_TRUE(tensorFillFloat->dims().equals(expectedDims));
1960 ASSERT_TRUE(tensorIntFill->dims().equals(expectedDims));
1961 ASSERT_TRUE(tensorInt64Fill->dims().equals(expectedDims));
1962 ASSERT_TRUE(tensorStringToUInt8Fill->dims().equals(expectedDims));
1963
1964 auto tensorFillFloatH = tensorFillFloat->getPayload().getHandle<float>();
1965 auto tensorIntFillH = tensorIntFill->getPayload().getHandle<int32_t>();
1966 auto tensorInt64FillH = tensorInt64Fill->getPayload().getHandle<int64_t>();
1967 // We load GivenTensorByteStringToUInt8Fill as UInt8QTy with dummy
1968 // scale/offset for now, because it's only used for rowwise-quantized tensors.
1969 auto tensorStringToUInt8FillH =
1970 tensorStringToUInt8Fill->getPayload().getHandle<uint8_t>();
1971
1972 // All fills in fill_test_init_net.pbtxt are set to 0 through 3.
1973 for (size_t i = 0; i < 4; i++) {
1974 EXPECT_FLOAT_EQ(tensorFillFloatH.raw(i), (float)i);
1975 EXPECT_EQ(tensorIntFillH.raw(i), (int32_t)i);
1976 EXPECT_EQ(tensorInt64FillH.raw(i), (int64_t)i);
1977 EXPECT_EQ(tensorStringToUInt8FillH.raw(i), (uint8_t)(i + 128));
1978 }
1979 }
1980
TEST_F(Caffe2ImporterTest,HalfToFloat)1981 TEST_F(Caffe2ImporterTest, HalfToFloat) {
1982 ExecutionEngine EE{};
1983 auto &mod = EE.getModule();
1984 Function *F = mod.createFunction("main");
1985
1986 llvm::StringRef NetDescFilename(
1987 GLOW_DATA_PATH "tests/models/caffe2Models/halftofloat_op_net.pbtxt");
1988 std::string NetWeightFilename(
1989 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
1990
1991 Placeholder *output;
1992 PlaceholderBindings bindings;
1993
1994 Tensor input(ElemKind::Float16Ty, {1, 2, 3, 4});
1995
1996 // Destroy the loader after the graph is loaded since the following execution
1997 // will not depend on anything from the loader.
1998 {
1999 // Loaded protos must have at least one external output, so load an unused
2000 // output and type to satisfy it. It is named unused_output in
2001 // empty_predict_net.pbtxt.
2002 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"X"},
2003 {&input.getType()}, *F);
2004 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2005 }
2006
2007 ASSERT_TRUE(output);
2008
2009 // Graph has 2 nodes: Save and ConvertTo
2010 EXPECT_EQ(F->getNodes().size(), 2);
2011
2012 // Input to save node is ConvertToNode.
2013 auto *saveNode = getSaveNodeFromDest(output);
2014 auto *N = llvm::dyn_cast<ConvertToNode>(saveNode->getInput());
2015 EXPECT_TRUE(N);
2016 EXPECT_EQ(N->getResult().getElementType(), ElemKind::FloatTy);
2017 }
2018
TEST_F(Caffe2ImporterTest,Alias)2019 TEST_F(Caffe2ImporterTest, Alias) {
2020 ExecutionEngine EE{};
2021 auto &mod = EE.getModule();
2022 Function *F = mod.createFunction("main");
2023
2024 llvm::StringRef NetDescFilename(
2025 GLOW_DATA_PATH "tests/models/caffe2Models/alias_op_net.pbtxt");
2026 std::string NetWeightFilename(
2027 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2028
2029 Placeholder *output;
2030 PlaceholderBindings bindings;
2031
2032 Tensor input(ElemKind::FloatTy, {1, 2, 3, 4});
2033
2034 // Destroy the loader after the graph is loaded since the following execution
2035 // will not depend on anything from the loader.
2036 {
2037 // Loaded protos must have at least one external output, so load an unused
2038 // output and type to satisfy it. It is named unused_output in
2039 // empty_predict_net.pbtxt.
2040 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"X"},
2041 {&input.getType()}, *F);
2042 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2043 }
2044
2045 ASSERT_TRUE(output);
2046
2047 // The only node is Save.
2048 EXPECT_EQ(F->getNodes().size(), 1);
2049
2050 auto *saveNode = getSaveNodeFromDest(output);
2051 auto *N = llvm::dyn_cast<Placeholder>(saveNode->getInput());
2052 EXPECT_TRUE(N);
2053 }
2054
TEST_F(Caffe2ImporterTest,Modulo)2055 TEST_F(Caffe2ImporterTest, Modulo) {
2056 ExecutionEngine EE{};
2057 auto &mod = EE.getModule();
2058 Function *F = mod.createFunction("main");
2059
2060 std::string NetDescFilename(GLOW_DATA_PATH
2061 "tests/models/caffe2Models/modulo_op_net.pbtxt");
2062 std::string NetWeightFilename(
2063 GLOW_DATA_PATH "tests/models/caffe2Models/fill_test_init_net.pbtxt");
2064
2065 Placeholder *output;
2066 PlaceholderBindings bindings;
2067
2068 Tensor data(ElemKind::Int64ITy, {7});
2069
2070 // Destroy the loader after the graph is loaded since the following execution
2071 // will not depend on anything from the loader.
2072 {
2073 // Loaded protos must have at least one external output, so load an unused
2074 // output and type to satisfy it. It is named unused_output in
2075 // empty_predict_net.pbtxt.
2076 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"data"},
2077 {&data.getType()}, *F);
2078 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2079 }
2080
2081 ASSERT_TRUE(output);
2082
2083 // Graph has 2 nodes: Save and Modulo.
2084 EXPECT_EQ(F->getNodes().size(), 2);
2085
2086 // Net has 1 inputs.
2087 EXPECT_EQ(mod.getPlaceholders().size(), 2);
2088
2089 // Input to save node is ModuloNode.
2090 auto *saveNode = getSaveNodeFromDest(output);
2091 auto *N = llvm::dyn_cast<ModuloNode>(saveNode->getInput());
2092 ASSERT_TRUE(N);
2093 }
2094
2095 /// Test loading an ElementwiseLinear operator.
TEST_F(Caffe2ImporterTest,elementwiseLinear)2096 TEST_F(Caffe2ImporterTest, elementwiseLinear) {
2097 ExecutionEngine EE{};
2098 auto &mod = EE.getModule();
2099 Function *F = mod.createFunction("main");
2100
2101 std::string NetDescFilename(
2102 GLOW_DATA_PATH "tests/models/caffe2Models/elementwise_linear_net.pbtxt");
2103 std::string NetWeightFilename(
2104 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2105
2106 PlaceholderBindings bindings;
2107 Placeholder *output;
2108 Tensor X(ElemKind::FloatTy, {10, 5});
2109 Tensor w(ElemKind::FloatTy, {10}), b(ElemKind::FloatTy, {10});
2110
2111 // Destroy the loader after the graph is loaded since the following execution
2112 // will not depend on anything from the loader.
2113 {
2114 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2115 {"X", "w", "b"},
2116 {&X.getType(), &w.getType(), &b.getType()}, *F);
2117 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2118 }
2119
2120 // Check that the shape of the output matches that of the input.
2121 std::vector<dim_t> expectedDims = {10, 5};
2122 EXPECT_TRUE(output->dims().vec() == expectedDims);
2123
2124 // High level checks on the content of the graph.
2125 // It should look like this:
2126 //
2127 // X w b
2128 // | | |
2129 // | v v
2130 // | Reshape Reshape
2131 // | | |
2132 // | v v
2133 // | Tile Tile
2134 // | / /
2135 // v v------ /
2136 // Mul /
2137 // | /---------------
2138 // v v
2139 // Add
2140 // |
2141 // v
2142 // Save
2143
2144 EXPECT_EQ(F->getNodes().size(), 7);
2145 auto *save = getSaveNodeFromDest(output);
2146 auto *add = llvm::dyn_cast<AddNode>(save->getInput().getNode());
2147 ASSERT_TRUE(add);
2148 auto *mul = llvm::dyn_cast<MulNode>(add->getLHS().getNode());
2149 ASSERT_TRUE(mul);
2150 auto *bTile = llvm::dyn_cast<TileNode>(add->getRHS().getNode());
2151 ASSERT_TRUE(bTile);
2152 EXPECT_EQ(bTile->getAxis(), 1);
2153 auto *XPH = llvm::dyn_cast<Placeholder>(mul->getRHS().getNode());
2154 EXPECT_EQ(XPH, mod.getPlaceholderByNameSlow("X"));
2155 auto *wTile = llvm::dyn_cast<TileNode>(mul->getLHS().getNode());
2156 ASSERT_TRUE(wTile);
2157 EXPECT_EQ(wTile->getAxis(), 1);
2158 auto *bReshape = llvm::dyn_cast<ReshapeNode>(bTile->getInput().getNode());
2159 ASSERT_TRUE(bReshape);
2160 auto *wReshape = llvm::dyn_cast<ReshapeNode>(wTile->getInput().getNode());
2161 ASSERT_TRUE(wReshape);
2162 auto *wPH = llvm::dyn_cast<Placeholder>(wReshape->getInput().getNode());
2163 EXPECT_EQ(wPH, mod.getPlaceholderByNameSlow("w"));
2164 auto *bPH = llvm::dyn_cast<Placeholder>(bReshape->getInput().getNode());
2165 EXPECT_EQ(bPH, mod.getPlaceholderByNameSlow("b"));
2166
2167 // We have three inputs and one output.
2168 EXPECT_EQ(mod.getPlaceholders().size(), 4);
2169 }
2170
2171 /// Test loading an ElementwiseLinear operator with no axis specified.
TEST_F(Caffe2ImporterTest,elementwiseLinearUnspecifiedAxis)2172 TEST_F(Caffe2ImporterTest, elementwiseLinearUnspecifiedAxis) {
2173 ExecutionEngine EE{};
2174 auto &mod = EE.getModule();
2175 Function *F = mod.createFunction("main");
2176
2177 std::string NetDescFilename(
2178 GLOW_DATA_PATH
2179 "tests/models/caffe2Models/elementwise_linear_default_net.pbtxt");
2180 std::string NetWeightFilename(
2181 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2182
2183 PlaceholderBindings bindings;
2184 Placeholder *output;
2185
2186 // Since the loader will assume that axis = 1, the 0th dim of the shapes of w
2187 // and b must match the 1st dim of X.
2188 Tensor X(ElemKind::FloatTy, {5, 10});
2189 Tensor w(ElemKind::FloatTy, {10}), b(ElemKind::FloatTy, {10});
2190
2191 // Destroy the loader after the graph is loaded since the following execution
2192 // will not depend on anything from the loader.
2193 {
2194 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2195 {"X", "w", "b"},
2196 {&X.getType(), &w.getType(), &b.getType()}, *F);
2197 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2198 }
2199
2200 // Check that the shape of the output matches that of the input.
2201 std::vector<dim_t> expectedDims = {5, 10};
2202 EXPECT_TRUE(output->dims().vec() == expectedDims);
2203
2204 // High level checks on the content of the graph.
2205 // It should look like this:
2206 //
2207 // X w b
2208 // | | |
2209 // | v v
2210 // | Reshape Reshape
2211 // | | |
2212 // | v v
2213 // | Tile Tile
2214 // | / /
2215 // v v------ /
2216 // Mul /
2217 // | /---------------
2218 // v v
2219 // Add
2220 // |
2221 // v
2222 // Save
2223
2224 EXPECT_EQ(F->getNodes().size(), 7);
2225 auto *save = getSaveNodeFromDest(output);
2226 auto *add = llvm::dyn_cast<AddNode>(save->getInput().getNode());
2227 ASSERT_TRUE(add);
2228 auto *mul = llvm::dyn_cast<MulNode>(add->getLHS().getNode());
2229 ASSERT_TRUE(mul);
2230 auto *bTile = llvm::dyn_cast<TileNode>(add->getRHS().getNode());
2231 ASSERT_TRUE(bTile);
2232 EXPECT_EQ(bTile->getAxis(), 0);
2233 auto *XPH = llvm::dyn_cast<Placeholder>(mul->getRHS().getNode());
2234 EXPECT_EQ(XPH, mod.getPlaceholderByNameSlow("X"));
2235 auto *wTile = llvm::dyn_cast<TileNode>(mul->getLHS().getNode());
2236 ASSERT_TRUE(wTile);
2237 EXPECT_EQ(wTile->getAxis(), 0);
2238 auto *bReshape = llvm::dyn_cast<ReshapeNode>(bTile->getInput().getNode());
2239 ASSERT_TRUE(bReshape);
2240 auto *wReshape = llvm::dyn_cast<ReshapeNode>(wTile->getInput().getNode());
2241 ASSERT_TRUE(wReshape);
2242 auto *wPH = llvm::dyn_cast<Placeholder>(wReshape->getInput().getNode());
2243 EXPECT_EQ(wPH, mod.getPlaceholderByNameSlow("w"));
2244 auto *bPH = llvm::dyn_cast<Placeholder>(bReshape->getInput().getNode());
2245 EXPECT_EQ(bPH, mod.getPlaceholderByNameSlow("b"));
2246
2247 // We have three inputs and one output.
2248 EXPECT_EQ(mod.getPlaceholders().size(), 4);
2249 }
2250
2251 /// Test loading an ElementwiseLinear operator with implicit broadcast
TEST_F(Caffe2ImporterTest,elementwiseImplicitBroadcast)2252 TEST_F(Caffe2ImporterTest, elementwiseImplicitBroadcast) {
2253 ExecutionEngine EE{};
2254 auto &mod = EE.getModule();
2255 Function *F = mod.createFunction("main");
2256
2257 std::string NetDescFilename(
2258 GLOW_DATA_PATH
2259 "tests/models/caffe2Models/elementwise_linear_broadcast_net.pbtxt");
2260 std::string NetWeightFilename(
2261 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2262
2263 PlaceholderBindings bindings;
2264 Placeholder *output;
2265
2266 // Since the loader will assume that axis = 1, the 0th dim of the shapes of w
2267 // and b must match the 1st dim of X.
2268 Tensor X(ElemKind::FloatTy, {5, 10});
2269 Tensor w(ElemKind::FloatTy, {10}), b(ElemKind::FloatTy, {10});
2270
2271 // Destroy the loader after the graph is loaded since the following execution
2272 // will not depend on anything from the loader.
2273 {
2274 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2275 {"X", "w", "b"},
2276 {&X.getType(), &w.getType(), &b.getType()}, *F);
2277 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2278 }
2279
2280 // Check that the shape of the output matches that of the input.
2281 std::vector<dim_t> expectedDims = {5, 10};
2282 EXPECT_TRUE(output->dims().vec() == expectedDims);
2283
2284 // High level checks on the content of the graph.
2285 // It should look like this:
2286 //
2287 // X w b
2288 // | | |
2289 // | v v
2290 // | Reshape Reshape
2291 // | | |
2292 // | v v
2293 // | Tile Tile
2294 // | / /
2295 // v v------ /
2296 // Mul /
2297 // | /---------------
2298 // v v
2299 // Add
2300 // |
2301 // v
2302 // Save
2303
2304 EXPECT_EQ(F->getNodes().size(), 7);
2305 auto *save = getSaveNodeFromDest(output);
2306 auto *add = llvm::dyn_cast<AddNode>(save->getInput().getNode());
2307 ASSERT_TRUE(add);
2308 auto *mul = llvm::dyn_cast<MulNode>(add->getLHS().getNode());
2309 ASSERT_TRUE(mul);
2310 auto *bTile = llvm::dyn_cast<TileNode>(add->getRHS().getNode());
2311 ASSERT_TRUE(bTile);
2312 EXPECT_EQ(bTile->getAxis(), 0);
2313 auto *XPH = llvm::dyn_cast<Placeholder>(mul->getLHS().getNode());
2314 EXPECT_EQ(XPH, mod.getPlaceholderByNameSlow("X"));
2315 auto *wTile = llvm::dyn_cast<TileNode>(mul->getRHS().getNode());
2316 ASSERT_TRUE(wTile);
2317 EXPECT_EQ(wTile->getAxis(), 0);
2318 auto *bReshape = llvm::dyn_cast<ReshapeNode>(bTile->getInput().getNode());
2319 ASSERT_TRUE(bReshape);
2320 auto *wReshape = llvm::dyn_cast<ReshapeNode>(wTile->getInput().getNode());
2321 ASSERT_TRUE(wReshape);
2322 auto *wPH = llvm::dyn_cast<Placeholder>(wReshape->getInput().getNode());
2323 EXPECT_EQ(wPH, mod.getPlaceholderByNameSlow("w"));
2324 auto *bPH = llvm::dyn_cast<Placeholder>(bReshape->getInput().getNode());
2325 EXPECT_EQ(bPH, mod.getPlaceholderByNameSlow("b"));
2326
2327 // We have three inputs and one output.
2328 EXPECT_EQ(mod.getPlaceholders().size(), 4);
2329 }
2330
2331 /// Test loading SparseLengthsWeightedSum8BitsRowwise. This is created as a
2332 /// RowwiseQuantizedSparseLengthsWeightedSumNode. The following inputs/outputs
2333 /// are used/expected for this test. Note that the DATA input is
2334 /// rowwise-quantized in the init_net proto. Scales/offsets are loaded in a
2335 /// separate tensor scales_bias. The C2 loader will copy the scales/offsets into
2336 /// separate Constants for use by RowwiseQuantizedSparseLengthsWeightedSumNode.
2337 /// DATA = [[2.0, -0.5, 13]]
2338 /// WEIGHTS = [3, 1, 0, 0, 0, 0, 2, -0.5]
2339 /// INDICES = [1, 0, 2, 0, 1, 2, 2, 0]
2340 /// LENGTHS = [3, 0, 3, 2]
2341 /// OUTPUT = [[0.5, 0, 0, 25]]
TEST_F(Caffe2ImporterTest,SparseLengthsWeightedSum8BitsRowwise)2342 TEST_F(Caffe2ImporterTest, SparseLengthsWeightedSum8BitsRowwise) {
2343 ExecutionEngine EE{};
2344 auto &mod = EE.getModule();
2345 Function *F = mod.createFunction("main");
2346
2347 std::string NetDescFilename(
2348 GLOW_DATA_PATH
2349 "tests/models/caffe2Models/"
2350 "rowwise_quantized_sparse_lengths_weighted_sum_predict_net.pbtxt");
2351 std::string NetWeightFilename(
2352 GLOW_DATA_PATH
2353 "tests/models/caffe2Models/"
2354 "rowwise_quantized_sparse_lengths_weighted_sum_init_net.pbtxt");
2355
2356 Placeholder *output, *indices, *lengths;
2357 PlaceholderBindings bindings;
2358
2359 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {8});
2360 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {4});
2361
2362 // Destroy the loader after the graph is loaded since the following execution
2363 // will not depend on anything from the loader.
2364 {
2365 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2366 {"indices", "lengths"},
2367 {indicesType, lengthsType}, *F);
2368
2369 indices = llvm::dyn_cast<Placeholder>(
2370 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
2371 lengths = llvm::dyn_cast<Placeholder>(
2372 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
2373 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2374 }
2375
2376 ASSERT_TRUE(indices);
2377 ASSERT_TRUE(lengths);
2378
2379 bindings.allocate(indices)->getHandle<int64_t>() = {
2380 1, 0, 2, 0, 1, 2, 2, 0,
2381 };
2382 bindings.allocate(lengths)->getHandle<int32_t>() = {
2383 3,
2384 0,
2385 3,
2386 2,
2387 };
2388
2389 // High level check on the content of the graph. We have 1 rowwise-quantized
2390 // SLWS and 1 save.
2391 EXPECT_EQ(F->getNodes().size(), 2);
2392 SaveNode *saveNode = getSaveNodeFromDest(output);
2393 RowwiseQuantizedSparseLengthsWeightedSumNode *RWQSLWS =
2394 llvm::dyn_cast<RowwiseQuantizedSparseLengthsWeightedSumNode>(
2395 saveNode->getInput().getNode());
2396 ASSERT_TRUE(RWQSLWS);
2397 // Check that the weights input is a Constant node.
2398 Constant *weights = llvm::dyn_cast<Constant>(RWQSLWS->getWeights().getNode());
2399 ASSERT_TRUE(weights);
2400
2401 // We have 3 placeholders: 1 for save, and then indices and lengths.
2402 EXPECT_EQ(mod.getPlaceholders().size(), 3);
2403
2404 // We have 4 constants: data, scales, offsets, and weights. Originally fused
2405 // data is no longer used and is removed by loader.
2406 EXPECT_EQ(mod.getConstants().size(), 4);
2407
2408 EE.compile(CompilationMode::Infer);
2409 bindings.allocate(mod.getPlaceholders());
2410
2411 // Post compile, DCE should have gotten rid of the originally fused data
2412 // Constant, as it is no longer used.
2413 EXPECT_EQ(mod.getConstants().size(), 4);
2414
2415 EE.run(bindings);
2416
2417 Tensor &result = *bindings.get(output);
2418 Tensor expected(ElemKind::FloatTy, {4, 1});
2419 expected.getHandle() = {
2420 0.5,
2421 0,
2422 0,
2423 25,
2424 };
2425
2426 EXPECT_TRUE(expected.isEqual(result, 0.03f));
2427 }
2428
2429 /// Test loading SparseLengthsSum8BitsRowwise. This is created as a
2430 /// RowwiseQuantizedSparseLengthsWeightedSumNode. The following inputs/outputs
2431 /// are used/expected for this test. Note that the DATA input is
2432 /// rowwise-quantized in the init_net proto. Scales/offsets are loaded in a
2433 /// separate tensor scales_bias. The C2 loader will copy the scales/offsets into
2434 /// separate Constants for use by RowwiseQuantizedSparseLengthsSumNode.
2435 /// DATA = [
2436 /// [1.0, 1.2],
2437 /// [2.3, 3.4],
2438 /// [4.5, 5.7],
2439 /// ]
2440 /// INDICES = [2, 0, 1, 2, 0, 0, 0, 0]
2441 /// LENGTHS = [2, 0, 2, 1, 3]
2442 /// OUTPUT = [
2443 /// [5.5, 6.9],
2444 /// [0.0, 0.0],
2445 /// [6.8, 9.1],
2446 /// [1.0, 1.2],
2447 /// [3.0, 3.6],
2448 /// ]
TEST_F(Caffe2ImporterTest,SparseLengthsSum8BitsRowwise)2449 TEST_F(Caffe2ImporterTest, SparseLengthsSum8BitsRowwise) {
2450 ExecutionEngine EE{};
2451 auto &mod = EE.getModule();
2452 Function *F = mod.createFunction("main");
2453
2454 std::string NetDescFilename(
2455 GLOW_DATA_PATH "tests/models/caffe2Models/"
2456 "rowwise_quantized_sparse_lengths_sum_predict_net.pbtxt");
2457 std::string NetWeightFilename(
2458 GLOW_DATA_PATH "tests/models/caffe2Models/"
2459 "rowwise_quantized_sparse_lengths_sum_init_net.pbtxt");
2460
2461 Placeholder *output, *indices, *lengths;
2462 PlaceholderBindings bindings;
2463
2464 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {8});
2465 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {5});
2466
2467 // Destroy the loader after the graph is loaded since the following execution
2468 // will not depend on anything from the loader.
2469 {
2470 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2471 {"indices", "lengths"},
2472 {indicesType, lengthsType}, *F);
2473
2474 indices = llvm::dyn_cast<Placeholder>(
2475 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
2476 lengths = llvm::dyn_cast<Placeholder>(
2477 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
2478 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2479 }
2480
2481 ASSERT_TRUE(indices);
2482 ASSERT_TRUE(lengths);
2483
2484 bindings.allocate(indices)->getHandle<int64_t>() = {
2485 2, 0, 1, 2, 0, 0, 0, 0,
2486 };
2487 bindings.allocate(lengths)->getHandle<int32_t>() = {
2488 2, 0, 2, 1, 3,
2489 };
2490
2491 // High level check on the content of the graph. We have 1 rowwise-quantized
2492 // SLWS (which implements SLS), 1 Splat for the weights, and 1 save.
2493 EXPECT_EQ(F->getNodes().size(), 3);
2494 SaveNode *saveNode = getSaveNodeFromDest(output);
2495 RowwiseQuantizedSparseLengthsWeightedSumNode *RWQSLS =
2496 llvm::dyn_cast<RowwiseQuantizedSparseLengthsWeightedSumNode>(
2497 saveNode->getInput().getNode());
2498 ASSERT_TRUE(RWQSLS);
2499 SplatNode *splatNode =
2500 llvm::dyn_cast<SplatNode>(RWQSLS->getWeights().getNode());
2501 ASSERT_TRUE(splatNode);
2502 EXPECT_EQ(splatNode->getValue(), 1.0f);
2503
2504 // We have 3 placeholders: 1 for save, and then indices and lengths.
2505 EXPECT_EQ(mod.getPlaceholders().size(), 3);
2506
2507 // We have 5 constants: Data, scales, and offsets. Originally fused data is no
2508 // longer used and is removed by loader.
2509 EXPECT_EQ(mod.getConstants().size(), 3);
2510
2511 EE.compile(CompilationMode::Infer);
2512 bindings.allocate(mod.getPlaceholders());
2513
2514 // Post compile, DCE should have gotten rid of the originally fused data
2515 // Constant, as it is no longer used.
2516 EXPECT_EQ(mod.getConstants().size(), 3);
2517
2518 EE.run(bindings);
2519
2520 Tensor &result = *bindings.get(output);
2521 Tensor expected(ElemKind::FloatTy, {5, 2});
2522 expected.getHandle() = {
2523 5.5f, 6.9f, 0.0f, 0.0f, 6.8f, 9.1f, 1.0f, 1.2f, 3.0f, 3.6f,
2524 };
2525
2526 EXPECT_TRUE(expected.isEqual(result, 0.02f));
2527 }
2528
2529 /// Test loading SparseLengthsWeightedSumFused8BitRowwise. This is created as a
2530 /// RowwiseQuantizedSparseLengthsWeightedSumNode. The following inputs/outputs
2531 /// are used/expected for this test. Note that the DATA input is
2532 /// rowwise-quantized in the init_net proto.
2533 /// DATA = [[2.0, -0.5, 13]]
2534 /// WEIGHTS = [3, 1, 0, 0, 0, 0, 2, -0.5]
2535 /// INDICES = [1, 0, 2, 0, 1, 2, 2, 0]
2536 /// LENGTHS = [3, 0, 3, 2]
2537 /// OUTPUT = [[0.5, 0, 0, 25]]
testFRWQSLWS(float avgLength)2538 static void testFRWQSLWS(float avgLength) {
2539 ExecutionEngine EE{};
2540 auto &mod = EE.getModule();
2541 Function *F = mod.createFunction("main");
2542
2543 std::string NetDescFilename(
2544 std::isnan(avgLength) ? GLOW_DATA_PATH
2545 "tests/models/caffe2Models/"
2546 "fused_rowwise_quantized_sparse_lengths_weighted_sum_predict_net."
2547 "pbtxt"
2548 : GLOW_DATA_PATH
2549 "tests/models/caffe2Models/"
2550 "fused_rowwise_quantized_sparse_lengths_weighted_sum_avg_length_"
2551 "predict_net.pbtxt");
2552 std::string NetWeightFilename(
2553 GLOW_DATA_PATH
2554 "tests/models/caffe2Models/"
2555 "fused_rowwise_quantized_sparse_lengths_weighted_sum_init_net.pbtxt");
2556
2557 Placeholder *output, *indices, *lengths;
2558 PlaceholderBindings bindings;
2559
2560 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {8});
2561 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {4});
2562
2563 // Destroy the loader after the graph is loaded since the following execution
2564 // will not depend on anything from the loader.
2565 {
2566 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2567 {"indices", "lengths"},
2568 {indicesType, lengthsType}, *F);
2569
2570 indices = llvm::dyn_cast<Placeholder>(
2571 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
2572 lengths = llvm::dyn_cast<Placeholder>(
2573 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
2574 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2575 }
2576
2577 ASSERT_TRUE(indices);
2578 ASSERT_TRUE(lengths);
2579
2580 bindings.allocate(indices)->getHandle<int64_t>() = {
2581 1, 0, 2, 0, 1, 2, 2, 0,
2582 };
2583 bindings.allocate(lengths)->getHandle<int32_t>() = {
2584 3,
2585 0,
2586 3,
2587 2,
2588 };
2589
2590 // High level check on the content of the graph. We have 1 rowwise-quantized
2591 // SLWS and 1 save.
2592 EXPECT_EQ(F->getNodes().size(), 2);
2593 SaveNode *saveNode = getSaveNodeFromDest(output);
2594 FusedRowwiseQuantizedSparseLengthsWeightedSumNode *FRWQSLWS =
2595 llvm::dyn_cast<FusedRowwiseQuantizedSparseLengthsWeightedSumNode>(
2596 saveNode->getInput().getNode());
2597 ASSERT_TRUE(FRWQSLWS);
2598 if (std::isnan(avgLength)) {
2599 EXPECT_TRUE(std::isnan(FRWQSLWS->getAvgLength()));
2600 } else {
2601 EXPECT_EQ(FRWQSLWS->getAvgLength(), avgLength);
2602 }
2603 // Check that the weights input is a Constant node.
2604 Constant *weights =
2605 llvm::dyn_cast<Constant>(FRWQSLWS->getWeights().getNode());
2606 ASSERT_TRUE(weights);
2607 // Check that the data input is a Constant node with expected ElemKind.
2608 Constant *data = llvm::dyn_cast<Constant>(FRWQSLWS->getData().getNode());
2609 ASSERT_TRUE(data);
2610 EXPECT_TRUE(data->getElementType() == ElemKind::UInt8FusedQTy);
2611
2612 // We have 3 placeholders: 1 for save, and then indices and lengths.
2613 EXPECT_EQ(mod.getPlaceholders().size(), 3);
2614
2615 // We have 2 constants: data and weights.
2616 EXPECT_EQ(mod.getConstants().size(), 2);
2617
2618 EE.compile(CompilationMode::Infer);
2619 bindings.allocate(mod.getPlaceholders());
2620
2621 EE.run(bindings);
2622
2623 Tensor &result = *bindings.get(output);
2624 Tensor expected(ElemKind::FloatTy, {4, 1});
2625 expected.getHandle() = {
2626 0.5,
2627 0,
2628 0,
2629 25,
2630 };
2631
2632 EXPECT_TRUE(expected.isEqual(result, 0.02f));
2633 }
2634
TEST_F(Caffe2ImporterTest,SparseLengthsWeightedSumFused8BitRowwise)2635 TEST_F(Caffe2ImporterTest, SparseLengthsWeightedSumFused8BitRowwise) {
2636 testFRWQSLWS(NAN);
2637 }
2638
TEST_F(Caffe2ImporterTest,SparseLengthsWeightedSumFused8BitRowwiseAvgLength)2639 TEST_F(Caffe2ImporterTest, SparseLengthsWeightedSumFused8BitRowwiseAvgLength) {
2640 testFRWQSLWS(5.0f);
2641 }
2642
2643 /// Test loading SparseLengthsSumFused8BitRowwise. This is created as a
2644 /// RowwiseQuantizedSparseLengthsWeightedSumNode. The following inputs/outputs
2645 /// are used/expected for this test. Note that the DATA input is
2646 /// rowwise-quantized in the init_net proto.
2647 /// DATA = [
2648 /// [1.0, 1.2],
2649 /// [2.3, 3.4],
2650 /// [4.5, 5.7],
2651 /// ]
2652 /// INDICES = [2, 0, 1, 2, 0, 0, 0, 0]
2653 /// LENGTHS = [2, 0, 2, 1, 3]
2654 /// OUTPUT = [
2655 /// [5.5, 6.9],
2656 /// [0.0, 0.0],
2657 /// [6.8, 9.1],
2658 /// [1.0, 1.2],
2659 /// [3.0, 3.6],
2660 /// ]
TEST_F(Caffe2ImporterTest,SparseLengthsSumFused8BitRowwise)2661 TEST_F(Caffe2ImporterTest, SparseLengthsSumFused8BitRowwise) {
2662 ExecutionEngine EE{};
2663 auto &mod = EE.getModule();
2664 Function *F = mod.createFunction("main");
2665
2666 std::string NetDescFilename(
2667 GLOW_DATA_PATH
2668 "tests/models/caffe2Models/"
2669 "fused_rowwise_quantized_sparse_lengths_sum_predict_net.pbtxt");
2670 std::string NetWeightFilename(
2671 GLOW_DATA_PATH
2672 "tests/models/caffe2Models/"
2673 "fused_rowwise_quantized_sparse_lengths_sum_init_net.pbtxt");
2674
2675 Placeholder *output, *indices, *lengths;
2676 PlaceholderBindings bindings;
2677
2678 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {8});
2679 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {5});
2680
2681 // Destroy the loader after the graph is loaded since the following execution
2682 // will not depend on anything from the loader.
2683 {
2684 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2685 {"indices", "lengths"},
2686 {indicesType, lengthsType}, *F);
2687
2688 indices = llvm::dyn_cast<Placeholder>(
2689 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
2690 lengths = llvm::dyn_cast<Placeholder>(
2691 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
2692 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2693 }
2694
2695 ASSERT_TRUE(indices);
2696 ASSERT_TRUE(lengths);
2697
2698 bindings.allocate(indices)->getHandle<int64_t>() = {
2699 2, 0, 1, 2, 0, 0, 0, 0,
2700 };
2701 bindings.allocate(lengths)->getHandle<int32_t>() = {
2702 2, 0, 2, 1, 3,
2703 };
2704
2705 // High level check on the content of the graph. We have 1 rowwise-quantized
2706 // SLS and 1 save.
2707 EXPECT_EQ(F->getNodes().size(), 2);
2708 SaveNode *saveNode = getSaveNodeFromDest(output);
2709 FusedRowwiseQuantizedSparseLengthsSumNode *FRWQSLS =
2710 llvm::dyn_cast<FusedRowwiseQuantizedSparseLengthsSumNode>(
2711 saveNode->getInput().getNode());
2712 ASSERT_TRUE(FRWQSLS);
2713 // Check that the data input is a Constant node with expected ElemKind.
2714 Constant *data = llvm::dyn_cast<Constant>(FRWQSLS->getData().getNode());
2715 ASSERT_TRUE(data);
2716 EXPECT_TRUE(data->getElementType() == ElemKind::UInt8FusedQTy);
2717
2718 // We have 3 placeholders: 1 for save, and then indices and lengths.
2719 EXPECT_EQ(mod.getPlaceholders().size(), 3);
2720
2721 // We have 1 constant: data.
2722 EXPECT_EQ(mod.getConstants().size(), 1);
2723
2724 EE.compile(CompilationMode::Infer);
2725 bindings.allocate(mod.getPlaceholders());
2726
2727 EE.run(bindings);
2728
2729 Tensor &result = *bindings.get(output);
2730 Tensor expected(ElemKind::FloatTy, {5, 2});
2731 expected.getHandle() = {
2732 5.5f, 6.9f, 0.0f, 0.0f, 6.8f, 9.1f, 1.0f, 1.2f, 3.0f, 3.6f,
2733 };
2734
2735 EXPECT_TRUE(expected.isEqual(result, 0.02f));
2736 }
2737
2738 /// Test loading SparseLengthsSumFused8BitRowwise with all lookup lengths equal
2739 /// to one. This is created as a RowwiseQuantizedSparseLengthsWeightedSumNode
2740 /// with `AllLengthsOne=true`. The following inputs/outputs are used/expected
2741 /// for this test. Note that the DATA input is rowwise-quantized in the init_net
2742 /// proto.
2743 /// DATA = [
2744 /// [1.0, 1.2],
2745 /// [2.3, 3.4],
2746 /// [4.5, 5.7],
2747 /// ]
2748 /// INDICES = [2, 0, 1, 2, 0]
2749 /// LENGTHS = [1, 1, 1, 1, 1]
2750 /// OUTPUT = [
2751 /// [4.5, 5.7],
2752 /// [1.0, 1.2],
2753 /// [2.3, 3.4],
2754 /// [4.5, 5.7],
2755 /// [1.0, 1.2],
2756 /// ]
TEST_F(Caffe2ImporterTest,SparseLengthsSumFused8BitRowwiseAllLengthsOne)2757 TEST_F(Caffe2ImporterTest, SparseLengthsSumFused8BitRowwiseAllLengthsOne) {
2758 ExecutionEngine EE{};
2759 auto &mod = EE.getModule();
2760 Function *F = mod.createFunction("main");
2761
2762 std::string NetDescFilename(
2763 GLOW_DATA_PATH
2764 "tests/models/caffe2Models/"
2765 "fused_rowwise_quantized_sparse_lengths_sum_predict_net_length1.pbtxt");
2766 std::string NetWeightFilename(
2767 GLOW_DATA_PATH
2768 "tests/models/caffe2Models/"
2769 "fused_rowwise_quantized_sparse_lengths_sum_init_net.pbtxt");
2770
2771 Placeholder *output, *indices, *lengths;
2772 PlaceholderBindings bindings;
2773
2774 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {5});
2775 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {5});
2776
2777 // Destroy the loader after the graph is loaded since the following execution
2778 // will not depend on anything from the loader.
2779 {
2780 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2781 {"indices", "lengths"},
2782 {indicesType, lengthsType}, *F);
2783
2784 indices = llvm::dyn_cast<Placeholder>(
2785 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
2786 lengths = llvm::dyn_cast<Placeholder>(
2787 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
2788 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2789 }
2790
2791 ASSERT_TRUE(indices);
2792 ASSERT_TRUE(lengths);
2793
2794 bindings.allocate(indices)->getHandle<int64_t>() = {
2795 2, 0, 1, 2, 0,
2796 };
2797 bindings.allocate(lengths)->getHandle<int32_t>() = {
2798 1, 1, 1, 1, 1,
2799 };
2800
2801 // High level check on the content of the graph. We have 1 rowwise-quantized
2802 // SLS and 1 save.
2803 EXPECT_EQ(F->getNodes().size(), 2);
2804 SaveNode *saveNode = getSaveNodeFromDest(output);
2805 FusedRowwiseQuantizedSparseLengthsSumNode *FRWQSLS =
2806 llvm::dyn_cast<FusedRowwiseQuantizedSparseLengthsSumNode>(
2807 saveNode->getInput().getNode());
2808 ASSERT_TRUE(FRWQSLS);
2809 EXPECT_EQ(FRWQSLS->getLengthsMode(), LengthsMode::AllOne);
2810 // Check that the data input is a Constant node with expected ElemKind.
2811 Constant *data = llvm::dyn_cast<Constant>(FRWQSLS->getData().getNode());
2812 ASSERT_TRUE(data);
2813 EXPECT_TRUE(data->getElementType() == ElemKind::UInt8FusedQTy);
2814
2815 // We have 3 placeholders: 1 for save, and then indices and lengths.
2816 EXPECT_EQ(mod.getPlaceholders().size(), 3);
2817
2818 // We have 1 constant: data.
2819 EXPECT_EQ(mod.getConstants().size(), 1);
2820
2821 EE.compile(CompilationMode::Infer);
2822 bindings.allocate(mod.getPlaceholders());
2823
2824 EE.run(bindings);
2825
2826 Tensor &result = *bindings.get(output);
2827 Tensor expected(ElemKind::FloatTy, {5, 2});
2828 expected.getHandle() = {
2829 4.5f, 5.7f, 1.0f, 1.2f, 2.3f, 3.4f, 4.5f, 5.7f, 1.0f, 1.2f,
2830 };
2831
2832 EXPECT_TRUE(expected.isEqual(result, 0.02f));
2833 }
2834
2835 /// Test loading SparseLengthsSumFused4BitRowwise.
TEST_F(Caffe2ImporterTest,SparseLengthsSumFused4BitRowwise)2836 TEST_F(Caffe2ImporterTest, SparseLengthsSumFused4BitRowwise) {
2837 ExecutionEngine EE{};
2838 auto &mod = EE.getModule();
2839 Function *F = mod.createFunction("main");
2840
2841 std::string NetDescFilename(
2842 GLOW_DATA_PATH
2843 "tests/models/caffe2Models/"
2844 "4bit_fused_rowwise_quantized_sparse_lengths_sum_predict_net.pbtxt");
2845 std::string NetWeightFilename(
2846 GLOW_DATA_PATH
2847 "tests/models/caffe2Models/"
2848 "4bit_fused_rowwise_quantized_sparse_lengths_sum_init_net.pbtxt");
2849
2850 Placeholder *output, *indices, *lengths;
2851 PlaceholderBindings bindings;
2852
2853 TypeRef indicesType = F->getParent()->uniqueType(ElemKind::Int64ITy, {8});
2854 TypeRef lengthsType = F->getParent()->uniqueType(ElemKind::Int32ITy, {5});
2855
2856 // Destroy the loader after the graph is loaded since the following execution
2857 // will not depend on anything from the loader.
2858 {
2859 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2860 {"indices", "lengths"},
2861 {indicesType, lengthsType}, *F);
2862
2863 indices = llvm::dyn_cast<Placeholder>(
2864 EXIT_ON_ERR(caffe2LD.getNodeValueByName("indices")));
2865 lengths = llvm::dyn_cast<Placeholder>(
2866 EXIT_ON_ERR(caffe2LD.getNodeValueByName("lengths")));
2867 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2868 }
2869
2870 ASSERT_TRUE(indices);
2871 ASSERT_TRUE(lengths);
2872
2873 // High level check on the content of the graph. We have 1 rowwise-quantized
2874 // SLS, 1 convertTo and 1 save.
2875 EXPECT_EQ(F->getNodes().size(), 3);
2876 SaveNode *saveNode = getSaveNodeFromDest(output);
2877 ConvertToNode *C =
2878 llvm::dyn_cast<ConvertToNode>(saveNode->getInput().getNode());
2879 ASSERT_TRUE(C);
2880 FusedRowwiseQuantizedSparseLengthsSumNode *FRWQSLS =
2881 llvm::dyn_cast<FusedRowwiseQuantizedSparseLengthsSumNode>(
2882 C->getInput().getNode());
2883 ASSERT_TRUE(FRWQSLS);
2884 // Check that the data input is a Constant node with expected ElemKind.
2885 Constant *data = llvm::dyn_cast<Constant>(FRWQSLS->getData().getNode());
2886 ASSERT_TRUE(data);
2887 EXPECT_TRUE(data->getElementType() == ElemKind::UInt4FusedFP16QTy);
2888
2889 // Check the output dim
2890 const auto out_node = saveNode->getOutput();
2891 EXPECT_EQ(out_node.getElementType(), ElemKind::FloatTy);
2892 const auto dims = out_node.dims();
2893 EXPECT_EQ(dims.size(), 2);
2894 EXPECT_EQ(dims[0], 5);
2895 EXPECT_EQ(dims[1], 10);
2896
2897 // We have 3 placeholders: 1 for save, and then indices and lengths.
2898 EXPECT_EQ(mod.getPlaceholders().size(), 3);
2899
2900 // We have 1 constant: data.
2901 EXPECT_EQ(mod.getConstants().size(), 1);
2902 }
2903
2904 /// Load big enough model and validate node order.
TEST_F(Caffe2ImporterTest,validateNodeOrder)2905 TEST_F(Caffe2ImporterTest, validateNodeOrder) {
2906 ExecutionEngine EE{};
2907 auto &mod = EE.getModule();
2908 Function *F = mod.createFunction("main");
2909 std::string NetDescFilename(
2910 GLOW_DATA_PATH
2911 "tests/models/caffe2Models/batch_box_cox_predict_net.pbtxt");
2912 std::string NetWeightFilename(
2913 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
2914
2915 PlaceholderBindings bindings;
2916
2917 // Input tensors.
2918 const dim_t kRows = 10;
2919 const dim_t kCols = 5;
2920 Tensor data(ElemKind::FloatTy, {kRows, kCols});
2921 Tensor lambda1(ElemKind::FloatTy, {kCols});
2922 Tensor lambda2(ElemKind::FloatTy, {kCols});
2923 Tensor O(ElemKind::FloatTy, {kRows, kCols});
2924 // Destroy the loader after the graph is loaded since the following execution
2925 // will not depend on anything from the loader.
2926 {
2927 Caffe2ModelLoader caffe2LD(
2928 NetDescFilename, NetWeightFilename, {"data", "lambda1", "lambda2"},
2929 {&data.getType(), &lambda1.getType(), &lambda2.getType()}, *F);
2930 bindings.allocate(mod.getPlaceholders());
2931 updateInputPlaceholdersByName(bindings, &mod,
2932 {"data", "lambda1", "lambda2"},
2933 {&data, &lambda1, &lambda2});
2934 }
2935
2936 EXPECT_EQ(F->getNodes().size(), 2);
2937 // Make sure that nodes are sorted by name.
2938 EXPECT_TRUE(std::is_sorted(
2939 F->getNodes().begin(), F->getNodes().end(),
2940 [](const Node &a, const Node &b) { return a.getName() < b.getName(); }));
2941 }
2942
TEST_F(Caffe2ImporterTest,importInt8ConvRelu)2943 TEST_F(Caffe2ImporterTest, importInt8ConvRelu) {
2944 ExecutionEngine EE{};
2945 auto &mod = EE.getModule();
2946 Function *F = mod.createFunction("main");
2947
2948 std::string NetDescFilename(
2949 GLOW_DATA_PATH "tests/models/caffe2Models/int8convrelu_pred_net.pbtxt");
2950 std::string NetWeightFilename(
2951 GLOW_DATA_PATH "tests/models/caffe2Models/int8convrelu_init_net.pbtxt");
2952
2953 Placeholder *output;
2954 PlaceholderBindings bindings;
2955
2956 // Destroy the loader after the graph is loaded since the following execution
2957 // will not depend on anything from the loader.
2958 {
2959 Tensor data(ElemKind::Int8QTy, {1, 1, 3, 3}, 1, 0);
2960 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
2961 {"gpu_0/data_0"}, {&data.getType()}, *F);
2962 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
2963
2964 bindings.allocate(mod.getPlaceholders());
2965 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
2966 }
2967
2968 // High level check on the content of the graph. We should have
2969 // transpose => conv => relu => transpose => save
2970 EXPECT_EQ(F->getNodes().size(), 5);
2971 auto *saveNode = getSaveNodeFromDest(output);
2972
2973 auto *transNode1 =
2974 llvm::dyn_cast<TransposeNode>(saveNode->getInput().getNode());
2975 ASSERT_TRUE(transNode1);
2976 auto *reluNode = llvm::dyn_cast<ReluNode>(transNode1->getInput().getNode());
2977 ASSERT_TRUE(reluNode);
2978 auto *convNode =
2979 llvm::dyn_cast<ConvolutionNode>(reluNode->getInput().getNode());
2980 ASSERT_TRUE(convNode);
2981 auto *transNode2 =
2982 llvm::dyn_cast<TransposeNode>(convNode->getInput().getNode());
2983 ASSERT_TRUE(transNode2);
2984
2985 EE.compile(CompilationMode::Infer);
2986 }
2987
TEST_F(Caffe2ImporterTest,importInt8SumRelu)2988 TEST_F(Caffe2ImporterTest, importInt8SumRelu) {
2989 ExecutionEngine EE{};
2990 auto &mod = EE.getModule();
2991 Function *F = mod.createFunction("main");
2992
2993 std::string NetDescFilename(
2994 GLOW_DATA_PATH "tests/models/caffe2Models/int8sumrelu_pred_net.pbtxt");
2995 std::string NetWeightFilename(
2996 GLOW_DATA_PATH "tests/models/caffe2Models/int8sumrelu_init_net.pbtxt");
2997
2998 Placeholder *output;
2999 PlaceholderBindings bindings;
3000
3001 // Destroy the loader after the graph is loaded since the following execution
3002 // will not depend on anything from the loader.
3003 {
3004 Tensor data(ElemKind::Int8QTy, {4, 2}, 1, 0);
3005 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3006 {"gpu_0/data_0"}, {&data.getType()}, *F);
3007 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3008
3009 bindings.allocate(mod.getPlaceholders());
3010 updateInputPlaceholdersByName(bindings, &mod, {"gpu_0/data_0"}, {&data});
3011 }
3012
3013 // High level check on the content of the graph. We should have
3014 // input-=> add => relu => save
3015 // const/
3016 EXPECT_EQ(F->getNodes().size(), 3);
3017 auto *save = getSaveNodeFromDest(output);
3018
3019 auto *relu = llvm::dyn_cast<ReluNode>(save->getInput().getNode());
3020 ASSERT_TRUE(relu);
3021 auto *add = llvm::dyn_cast<AddNode>(relu->getInput().getNode());
3022 ASSERT_TRUE(add);
3023 auto *input = llvm::dyn_cast<Placeholder>(add->getLHS().getNode());
3024 ASSERT_TRUE(input);
3025 auto *val = llvm::dyn_cast<Constant>(add->getRHS().getNode());
3026 ASSERT_TRUE(val);
3027
3028 EE.compile(CompilationMode::Infer);
3029 }
3030
TEST_F(Caffe2ImporterTest,importNames)3031 TEST_F(Caffe2ImporterTest, importNames) {
3032 std::string NetDescFilename(GLOW_DATA_PATH
3033 "tests/models/caffe2Models/sigmoid.pbtxt");
3034 std::string NetWeightFilename(
3035 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
3036 ExecutionEngine EE;
3037 auto &mod = EE.getModule();
3038 auto *F = mod.createFunction("main");
3039 Tensor input(ElemKind::FloatTy, {6});
3040 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename,
3041 {"sigmoid_test_input"}, {&input.getType()}, *F);
3042 EXPECT_TRUE(mod.getPlaceholderByNameSlow("sigmoid_test_output"));
3043 EXPECT_TRUE(F->getNodeByName("sigmoid_test_output__1"));
3044 }
3045
TEST_F(Caffe2ImporterTest,importSqr)3046 TEST_F(Caffe2ImporterTest, importSqr) {
3047 ExecutionEngine EE{};
3048 auto &mod = EE.getModule();
3049 Function *F = mod.createFunction("main");
3050
3051 std::string NetDescFilename(
3052 GLOW_DATA_PATH "tests/models/caffe2Models/sqr_predict_net.pbtxt");
3053 std::string NetWeightFilename(
3054 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
3055
3056 Placeholder *output;
3057 PlaceholderBindings bindings;
3058
3059 // Destroy the loader after the graph is loaded since the following execution
3060 // will not depend on anything from the loader.
3061 {
3062 Tensor data(ElemKind::FloatTy, {4, 2});
3063 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
3064 {&data.getType()}, *F);
3065 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3066
3067 bindings.allocate(mod.getPlaceholders());
3068 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&data});
3069 }
3070
3071 // High level check on the content of the graph. We should have
3072 // save(pow(input, splat(2)))
3073 EXPECT_EQ(F->getNodes().size(), 3);
3074 auto *save = getSaveNodeFromDest(output);
3075 ASSERT_TRUE(save);
3076 auto *pow = llvm::dyn_cast<PowNode>(save->getInput().getNode());
3077 ASSERT_TRUE(pow);
3078 auto *input = llvm::dyn_cast<Placeholder>(pow->getLHS().getNode());
3079 ASSERT_TRUE(input);
3080 auto *splat = llvm::dyn_cast<SplatNode>(pow->getRHS().getNode());
3081 ASSERT_TRUE(splat);
3082 EXPECT_EQ(splat->getValue(), 2);
3083
3084 EE.compile(CompilationMode::Infer);
3085 }
3086
3087 /// \returns whether \p val is found in \p vec.
vecContainsVal(const std::vector<runtime::DeviceIDTy> & vec,runtime::DeviceIDTy val)3088 static bool vecContainsVal(const std::vector<runtime::DeviceIDTy> &vec,
3089 runtime::DeviceIDTy val) {
3090 return std::find(vec.begin(), vec.end(), val) != vec.end();
3091 }
3092
3093 /// Verify that different fill types are loaded with the correct types into
3094 /// their respective partitions specified in the C2 proto.
TEST_F(Caffe2ImporterTest,PrePartitionedTensorFillsTest)3095 TEST_F(Caffe2ImporterTest, PrePartitionedTensorFillsTest) {
3096 ExecutionEngine EE("Interpreter", /* deviceMemory (16GB) */ 0x400000000,
3097 /* ignoreUserDeviceConfig */ false, /* numDevices */ 3);
3098 auto &mod = EE.getModule();
3099
3100 std::string NetDescFilename(
3101 GLOW_DATA_PATH
3102 "tests/models/caffe2Models/pre_partitioned_fill_test_predict_net.pbtxt");
3103 std::string NetWeightFilename(
3104 GLOW_DATA_PATH "tests/models/caffe2Models/fill_test_init_net.pbtxt");
3105
3106 Constant *tensorFillFloat, *tensorIntFill, *tensorInt64Fill,
3107 *tensorStringToUInt8Fill;
3108
3109 // Destroy the loader after the graph is loaded since the following execution
3110 // will not depend on anything from the loader.
3111 runtime::PrePartitionedConfig PPC;
3112 {
3113 // Loaded protos must have at least one external output, so load an unused
3114 // output and type to satisfy it. It is named unused_output in
3115 // empty_predict_net.pbtxt.
3116 Type unusedTy = Type(ElemKind::FloatTy, {4});
3117 Caffe2ModelLoader caffe2LD(
3118 NetDescFilename, NetWeightFilename,
3119 {"tensor_fill_float_eq", "tensor_int_fill_eq", "tensor_int64_fill_eq",
3120 "tensor_string_to_uint8_fill_eq"},
3121 {&unusedTy, &unusedTy, &unusedTy, &unusedTy}, mod, "main", &PPC);
3122 tensorFillFloat = llvm::dyn_cast<Constant>(
3123 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_fill_float")));
3124 tensorIntFill = llvm::dyn_cast<Constant>(
3125 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_int_fill")));
3126 tensorInt64Fill = llvm::dyn_cast<Constant>(
3127 EXIT_ON_ERR(caffe2LD.getNodeValueByName("tensor_int64_fill")));
3128 tensorStringToUInt8Fill = llvm::dyn_cast<Constant>(EXIT_ON_ERR(
3129 caffe2LD.getNodeValueByName("tensor_string_to_uint8_fill")));
3130 }
3131
3132 ASSERT_EQ(mod.getFunctions().size(), 3);
3133 Function *P0 = nullptr, *P1 = nullptr, *P2 = nullptr;
3134 for (size_t i = 0, e = PPC.funcs.size(); i < e; i++) {
3135 // Find the expected Function, and check that the logical device IDs were
3136 // correctly loaded.
3137 Function *F = PPC.funcs[i];
3138 if (F->getName() == "main_p0") {
3139 P0 = F;
3140 ASSERT_EQ(PPC.logicalIDs[i].size(), 2);
3141 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 0));
3142 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 2));
3143 } else if (F->getName() == "main_p1") {
3144 P1 = F;
3145 ASSERT_EQ(PPC.logicalIDs[i].size(), 1);
3146 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 1));
3147 } else if (F->getName() == "main_p2") {
3148 P2 = F;
3149 } else {
3150 FAIL() << "Unknown Function found.";
3151 ASSERT_EQ(PPC.logicalIDs[i].size(), 1);
3152 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 2));
3153 }
3154
3155 // Check that the function was also found in the module.
3156 auto &modFuns = mod.getFunctions();
3157 ASSERT_NE(std::find(modFuns.begin(), modFuns.end(), F), modFuns.end());
3158 }
3159 ASSERT_TRUE(P0);
3160 ASSERT_TRUE(P1);
3161 ASSERT_TRUE(P2);
3162
3163 ASSERT_TRUE(tensorFillFloat);
3164 ASSERT_TRUE(tensorIntFill);
3165 ASSERT_TRUE(tensorInt64Fill);
3166 ASSERT_TRUE(tensorStringToUInt8Fill);
3167
3168 // Note: Only user is a no-op Reshape, which is fed into a Save.
3169 ASSERT_EQ(tensorFillFloat->getNumUsers(), 1);
3170 ASSERT_EQ(tensorIntFill->getNumUsers(), 1);
3171 ASSERT_EQ(tensorInt64Fill->getNumUsers(), 1);
3172 ASSERT_EQ(tensorStringToUInt8Fill->getNumUsers(), 1);
3173
3174 // Check that the parent Functions of the Reshapes match expected partitions.
3175 EXPECT_EQ(tensorFillFloat->getUsers().front().getUser()->getParent(), P0);
3176 EXPECT_EQ(tensorIntFill->getUsers().front().getUser()->getParent(), P1);
3177 EXPECT_EQ(tensorInt64Fill->getUsers().front().getUser()->getParent(), P2);
3178 EXPECT_EQ(tensorStringToUInt8Fill->getUsers().front().getUser()->getParent(),
3179 P0);
3180
3181 // All fills in fill_test_init_net.pbtxt use shape {2, 2}.
3182 const std::vector<dim_t> expectedDims = {2, 2};
3183 ASSERT_TRUE(tensorFillFloat->dims().equals(expectedDims));
3184 ASSERT_TRUE(tensorIntFill->dims().equals(expectedDims));
3185 ASSERT_TRUE(tensorInt64Fill->dims().equals(expectedDims));
3186 ASSERT_TRUE(tensorStringToUInt8Fill->dims().equals(expectedDims));
3187
3188 auto tensorFillFloatH = tensorFillFloat->getPayload().getHandle<float>();
3189 auto tensorIntFillH = tensorIntFill->getPayload().getHandle<int32_t>();
3190 auto tensorInt64FillH = tensorInt64Fill->getPayload().getHandle<int64_t>();
3191 // We load GivenTensorByteStringToUInt8Fill as UInt8QTy with dummy
3192 // scale/offset for now, because it's only used for rowwise-quantized tensors.
3193 auto tensorStringToUInt8FillH =
3194 tensorStringToUInt8Fill->getPayload().getHandle<uint8_t>();
3195
3196 // All fills in fill_test_init_net.pbtxt are set to 0 through 3.
3197 for (size_t i = 0; i < 4; i++) {
3198 EXPECT_FLOAT_EQ(tensorFillFloatH.raw(i), (float)i);
3199 EXPECT_EQ(tensorIntFillH.raw(i), (int32_t)i);
3200 EXPECT_EQ(tensorInt64FillH.raw(i), (int64_t)i);
3201 EXPECT_EQ(tensorStringToUInt8FillH.raw(i), (uint8_t)(i + 128));
3202 }
3203
3204 CompilationContext cctx;
3205 cctx.prepartitionedConfig = &PPC;
3206 EE.compile(cctx);
3207 PlaceholderBindings bindings;
3208 bindings.allocate(mod.getPlaceholders());
3209 EE.run(bindings);
3210 }
3211
3212 /// Verify that multiple ops loaded into different pre-partitioned Functions
3213 /// with a non-trivial dependence between them works correctly.
3214 /// Note: DAG of the partitions looks like: F0 -> F1
3215 /// \ |
3216 /// v v
3217 /// F2
TEST_F(Caffe2ImporterTest,PrePartitionedMultiOpTest)3218 TEST_F(Caffe2ImporterTest, PrePartitionedMultiOpTest) {
3219 ExecutionEngine EE("Interpreter", /* deviceMemory (16GB) */ 0x400000000,
3220 /* ignoreUserDeviceConfig */ false, /* numDevices */ 3);
3221 auto &mod = EE.getModule();
3222
3223 const std::string NetDescFilename(
3224 GLOW_DATA_PATH
3225 "tests/models/caffe2Models/pre_partitioned_multi_op_predict_net.pbtxt");
3226 const std::string NetWeightFilename(
3227 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
3228
3229 Placeholder *outputPH;
3230 Tensor *resultPartitionedT;
3231 PlaceholderBindings bindingsU;
3232 PlaceholderBindings bindingsP;
3233
3234 // Destroy the loader after the graph is loaded since the following execution
3235 // will not depend on anything from the loader.
3236 runtime::PrePartitionedConfig PPC;
3237 Tensor mmIn0T(ElemKind::FloatTy, {10, 10});
3238 Tensor mmIn1T(ElemKind::FloatTy, {10, 10});
3239 Tensor addInT(ElemKind::FloatTy, {10, 10});
3240 mmIn0T.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
3241 mmIn1T.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
3242 addInT.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
3243 Placeholder *mmIn0P = nullptr, *mmIn1P = nullptr, *addInP = nullptr;
3244 {
3245 Caffe2ModelLoader caffe2LD(
3246 NetDescFilename, NetWeightFilename, {"mm0_in", "mm1_in", "add_in"},
3247 {&mmIn0T.getType(), &mmIn1T.getType(), &addInT.getType()}, mod, "main",
3248 &PPC);
3249 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3250 NodeValue mmIn0NV;
3251 ASSIGN_VALUE_OR_FAIL_TEST(mmIn0NV, caffe2LD.getNodeValueByName("mm0_in"));
3252 mmIn0P = llvm::dyn_cast<Placeholder>(mmIn0NV);
3253 NodeValue mmIn1NV;
3254 ASSIGN_VALUE_OR_FAIL_TEST(mmIn1NV, caffe2LD.getNodeValueByName("mm1_in"));
3255 mmIn1P = llvm::dyn_cast<Placeholder>(mmIn1NV);
3256 NodeValue addInNV;
3257 ASSIGN_VALUE_OR_FAIL_TEST(addInNV, caffe2LD.getNodeValueByName("add_in"));
3258 addInP = llvm::dyn_cast<Placeholder>(addInNV);
3259 }
3260
3261 // First we are going to make sure the structure of the pre-partitioned Module
3262 // is set up as expected, and run it with random inputs to get some results.
3263 {
3264 ASSERT_TRUE(mmIn0P);
3265 ASSERT_TRUE(mmIn1P);
3266 ASSERT_TRUE(addInP);
3267
3268 ASSERT_EQ(mod.getFunctions().size(), 3);
3269 Function *P0 = nullptr, *P1 = nullptr, *P2 = nullptr;
3270 for (size_t i = 0, e = PPC.funcs.size(); i < e; i++) {
3271 // Find the expected Function, and check that the logical device IDs were
3272 // correctly loaded.
3273 Function *F = PPC.funcs[i];
3274 if (F->getName() == "main_p0") {
3275 P0 = F;
3276 ASSERT_EQ(PPC.logicalIDs[i].size(), 1);
3277 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 2));
3278 EXPECT_EQ(PPC.backendSpecificOpts[i].size(), 0);
3279 } else if (F->getName() == "main_p1") {
3280 P1 = F;
3281 ASSERT_EQ(PPC.logicalIDs[i].size(), 2);
3282 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 0));
3283 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 1));
3284 EXPECT_EQ(PPC.backendSpecificOpts[i].size(), 0);
3285 } else if (F->getName() == "main_p2") {
3286 P2 = F;
3287 ASSERT_EQ(PPC.logicalIDs[i].size(), 1);
3288 EXPECT_TRUE(vecContainsVal(PPC.logicalIDs[i], 2));
3289 EXPECT_EQ(PPC.backendSpecificOpts[i].size(), 3);
3290 ASSERT_TRUE(PPC.backendSpecificOpts[i].count("BackendA_opt1"));
3291 EXPECT_EQ(PPC.backendSpecificOpts[i].at("BackendA_opt1"), "val1");
3292 ASSERT_TRUE(PPC.backendSpecificOpts[i].count("BackendA_opt2"));
3293 EXPECT_EQ(PPC.backendSpecificOpts[i].at("BackendA_opt2"), "val2");
3294 ASSERT_TRUE(PPC.backendSpecificOpts[i].count("BackendB_opt3"));
3295 EXPECT_EQ(PPC.backendSpecificOpts[i].at("BackendB_opt3"), "val3");
3296 } else {
3297 FAIL() << "Unknown Function found.";
3298 }
3299
3300 // Check that the function was also found in the module.
3301 auto &modFuns = mod.getFunctions();
3302 ASSERT_NE(std::find(modFuns.begin(), modFuns.end(), F), modFuns.end());
3303 }
3304 ASSERT_TRUE(P0);
3305 ASSERT_TRUE(P1);
3306 ASSERT_TRUE(P2);
3307
3308 // Verify P0:
3309 auto *finalSave = getSaveNodeFromDest(outputPH);
3310 ASSERT_TRUE(finalSave);
3311 EXPECT_EQ(finalSave->getParent(), P0);
3312 SubNode *sub = llvm::dyn_cast<SubNode>(finalSave->getInput());
3313 ASSERT_TRUE(sub);
3314 Placeholder *intermedAddOut = llvm::dyn_cast<Placeholder>(sub->getRHS());
3315 ASSERT_TRUE(intermedAddOut);
3316 MulNode *mul = llvm::dyn_cast<MulNode>(sub->getLHS());
3317 ASSERT_TRUE(mul);
3318 Placeholder *intermedMMOut = llvm::dyn_cast<Placeholder>(mul->getRHS());
3319 ASSERT_TRUE(intermedMMOut);
3320 Placeholder *mmIn0 = llvm::dyn_cast<Placeholder>(mul->getLHS());
3321 ASSERT_TRUE(mmIn0);
3322
3323 // Verify P2:
3324 Node *userFromP2 = nullptr;
3325 for (auto &U : intermedAddOut->getUsers()) {
3326 if (U.getUser()->getParent() == P2) {
3327 ASSERT_FALSE(userFromP2);
3328 userFromP2 = U.getUser();
3329 }
3330 }
3331 ASSERT_TRUE(userFromP2);
3332 SaveNode *saveIntermedP2Out = llvm::dyn_cast<SaveNode>(userFromP2);
3333 ASSERT_TRUE(saveIntermedP2Out);
3334 AddNode *add = llvm::dyn_cast<AddNode>(saveIntermedP2Out->getInput());
3335 ASSERT_TRUE(add);
3336 Placeholder *addIn = llvm::dyn_cast<Placeholder>(add->getRHS());
3337 ASSERT_TRUE(addIn);
3338 EXPECT_EQ(add->getLHS().getNode(), intermedMMOut);
3339
3340 // Verify P1:
3341 Node *userFromP1 = nullptr;
3342 for (auto &U : intermedMMOut->getUsers()) {
3343 if (U.getUser()->getParent() == P1) {
3344 ASSERT_FALSE(userFromP1);
3345 userFromP1 = U.getUser();
3346 }
3347 }
3348 ASSERT_TRUE(userFromP1);
3349 SaveNode *saveIntermedP1Out = llvm::dyn_cast<SaveNode>(userFromP1);
3350 ASSERT_TRUE(saveIntermedP1Out);
3351 MatMulNode *matMul =
3352 llvm::dyn_cast<MatMulNode>(saveIntermedP1Out->getInput());
3353 ASSERT_TRUE(matMul);
3354 EXPECT_EQ(matMul->getLHS().getNode(), mmIn0);
3355 Placeholder *matMulIn = llvm::dyn_cast<Placeholder>(matMul->getRHS());
3356 ASSERT_TRUE(matMulIn);
3357
3358 // Now that we've verifed the shape of the Module, run it and keep around
3359 // the pointer to the result.
3360 CompilationContext cctx;
3361 cctx.prepartitionedConfig = &PPC;
3362 EE.compile(cctx);
3363 bindingsP.insert(mmIn0P, mmIn0T.getUnowned());
3364 bindingsP.insert(mmIn1P, mmIn1T.getUnowned());
3365 bindingsP.insert(addInP, addInT.getUnowned());
3366 bindingsP.allocate(mod.getPlaceholders());
3367 EE.run(bindingsP);
3368
3369 resultPartitionedT = bindingsP.get(outputPH);
3370 }
3371
3372 // Now that we have the model result from pre-partitioned execution, execute
3373 // the model ignoring the pre-partitioning and bitwise compare results.
3374 EE.setBackendName(EE.getBackendName());
3375
3376 Module &modU = EE.getModule();
3377 {
3378 Function *F = modU.createFunction("main");
3379 Caffe2ModelLoader caffe2LD(
3380 NetDescFilename, NetWeightFilename, {"mm0_in", "mm1_in", "add_in"},
3381 {&mmIn0T.getType(), &mmIn1T.getType(), &addInT.getType()}, *F);
3382 outputPH = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3383 NodeValue mmIn0NV;
3384 ASSIGN_VALUE_OR_FAIL_TEST(mmIn0NV, caffe2LD.getNodeValueByName("mm0_in"));
3385 mmIn0P = llvm::dyn_cast<Placeholder>(mmIn0NV);
3386 NodeValue mmIn1NV;
3387 ASSIGN_VALUE_OR_FAIL_TEST(mmIn1NV, caffe2LD.getNodeValueByName("mm1_in"));
3388 mmIn1P = llvm::dyn_cast<Placeholder>(mmIn1NV);
3389 NodeValue addInNV;
3390 ASSIGN_VALUE_OR_FAIL_TEST(addInNV, caffe2LD.getNodeValueByName("add_in"));
3391 addInP = llvm::dyn_cast<Placeholder>(addInNV);
3392 }
3393
3394 Tensor *resultUnpartitonedT;
3395
3396 {
3397 ASSERT_TRUE(mmIn0P);
3398 ASSERT_TRUE(mmIn1P);
3399 ASSERT_TRUE(addInP);
3400 ASSERT_EQ(modU.getFunctions().size(), 1);
3401
3402 EE.compile(CompilationMode::Infer);
3403 bindingsU.insert(mmIn0P, mmIn0T.getUnowned());
3404 bindingsU.insert(mmIn1P, mmIn1T.getUnowned());
3405 bindingsU.insert(addInP, addInT.getUnowned());
3406 bindingsU.allocate(modU.getPlaceholders());
3407 EE.run(bindingsU);
3408
3409 resultUnpartitonedT = bindingsU.get(outputPH);
3410 }
3411
3412 EXPECT_TRUE(resultPartitionedT->isBitwiseEqual(*resultUnpartitonedT,
3413 /* verbose */ true));
3414 }
3415
3416 /// Test importing a Caffe2 LayerNorm without weights and bias provided but with
3417 /// epsilon or axis.
TEST_F(Caffe2ImporterTest,importLayerNormNoWeightBias)3418 TEST_F(Caffe2ImporterTest, importLayerNormNoWeightBias) {
3419 ExecutionEngine EE{};
3420 auto &mod = EE.getModule();
3421 Function *F = mod.createFunction("main");
3422
3423 std::string NetDescFilename(
3424 GLOW_DATA_PATH "tests/models/caffe2Models/layernorm_pred_net.pbtxt");
3425 std::string NetWeightFilename(
3426 GLOW_DATA_PATH "tests/models/caffe2Models/empty_init_net.pbtxt");
3427
3428 Placeholder *output;
3429 PlaceholderBindings bindings;
3430
3431 const ShapeVector inShape({4, 2, 5, 5});
3432
3433 // Destroy the loader after the graph is loaded since the following execution
3434 // will not depend on anything from the loader.
3435 {
3436 Tensor data(ElemKind::FloatTy, inShape);
3437 data.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
3438 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
3439 {&data.getType()}, *F);
3440 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3441
3442 bindings.allocate(mod.getPlaceholders());
3443 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&data});
3444 }
3445
3446 // High level check on the content of the graph. We should have
3447 // {Placeholder, Splat, Splat} => LayerNorm => Save
3448 EXPECT_EQ(F->getNodes().size(), 4);
3449 SaveNode *save = getSaveNodeFromDest(output);
3450
3451 auto *LN = llvm::dyn_cast<LayerNormalizationNode>(save->getInput().getNode());
3452 ASSERT_TRUE(LN);
3453 EXPECT_EQ(LN->getEpsilon(), 0.05f);
3454 EXPECT_TRUE(LN->getInput().dims().equals(inShape));
3455 EXPECT_TRUE(LN->getResult().dims().equals(inShape));
3456
3457 auto *scale = llvm::dyn_cast<SplatNode>(LN->getScale().getNode());
3458 ASSERT_TRUE(scale);
3459 EXPECT_EQ(scale->getValue(), 1.0f);
3460
3461 auto *bias = llvm::dyn_cast<SplatNode>(LN->getBias().getNode());
3462 ASSERT_TRUE(bias);
3463 EXPECT_EQ(bias->getValue(), 0.0f);
3464
3465 // Axis is 2, so check shape with second and third dims of inShape.
3466 EXPECT_TRUE(scale->getResult().dims().equals({inShape[2], inShape[3]}));
3467 EXPECT_TRUE(bias->getResult().dims().equals({inShape[2], inShape[3]}));
3468
3469 EE.compile(CompilationMode::Infer);
3470 EE.run(bindings);
3471 }
3472
3473 /// Test importing a Caffe2 LayerNorm with weights and bias provided but no
3474 /// epsilon or axis.
TEST_F(Caffe2ImporterTest,importLayerNormWithWeightBias)3475 TEST_F(Caffe2ImporterTest, importLayerNormWithWeightBias) {
3476 ExecutionEngine EE{};
3477 auto &mod = EE.getModule();
3478 Function *F = mod.createFunction("main");
3479
3480 std::string NetDescFilename(
3481 GLOW_DATA_PATH
3482 "tests/models/caffe2Models/layernorm_weight_bias_pred_net.pbtxt");
3483 std::string NetWeightFilename(
3484 GLOW_DATA_PATH
3485 "tests/models/caffe2Models/layernorm_weight_bias_init_net.pbtxt");
3486
3487 Placeholder *output;
3488 PlaceholderBindings bindings;
3489
3490 const ShapeVector inShape({5, 4, 3});
3491
3492 // Destroy the loader after the graph is loaded since the following execution
3493 // will not depend on anything from the loader.
3494 {
3495 Tensor data(ElemKind::FloatTy, inShape);
3496 data.getHandle().randomize(-3.0, 3.0, mod.getPRNG());
3497 Caffe2ModelLoader caffe2LD(NetDescFilename, NetWeightFilename, {"input"},
3498 {&data.getType()}, *F);
3499 output = EXIT_ON_ERR(caffe2LD.getSingleOutput());
3500
3501 bindings.allocate(mod.getPlaceholders());
3502 updateInputPlaceholdersByName(bindings, &mod, {"input"}, {&data});
3503 }
3504
3505 // High level check on the content of the graph. We should have
3506 // {Placeholder, Constant, Constant} => LayerNorm => Save
3507 EXPECT_EQ(F->getNodes().size(), 2);
3508 SaveNode *save = getSaveNodeFromDest(output);
3509
3510 auto *LN = llvm::dyn_cast<LayerNormalizationNode>(save->getInput().getNode());
3511 ASSERT_TRUE(LN);
3512 EXPECT_EQ(LN->getEpsilon(), 0.001f); // Caffe2 default.
3513 EXPECT_TRUE(LN->getInput().dims().equals(inShape));
3514 EXPECT_TRUE(LN->getResult().dims().equals(inShape));
3515
3516 auto *scale = llvm::dyn_cast<Constant>(LN->getScale().getNode());
3517 ASSERT_TRUE(scale);
3518
3519 auto *bias = llvm::dyn_cast<Constant>(LN->getBias().getNode());
3520 ASSERT_TRUE(bias);
3521
3522 // Default axis is 1 and it was unspecified in the input proto, so check shape
3523 // with first and second dims of inShape.
3524 EXPECT_TRUE(scale->getOutput().dims().equals({inShape[1], inShape[2]}));
3525 EXPECT_TRUE(bias->getOutput().dims().equals({inShape[1], inShape[2]}));
3526
3527 EE.compile(CompilationMode::Infer);
3528 EE.run(bindings);
3529 }
3530