1 #include <numeric>
2 #include <vector>
3 #include <mkldnn_test_common.hpp>
4 #include <gtest/gtest.h>
5 #include <ideep.hpp>
6 #include <test_convolution_backward_data_common.hpp>
7 
8 #include "test_ideep_common.hpp"
9 
10 using namespace ideep;
11 
12 template <typename data_t>
13 class convolution_backward_weights_test
14   : public ::testing::TestWithParam<test_convolution_params_t> {
15 protected:
SetUp()16   virtual void SetUp() {}
TestCommon()17   void TestCommon() {
18     auto p = ::testing::TestWithParam<test_convolution_params_t>::GetParam();
19     auto cd = p.sizes;
20 
21     auto data_type = data_traits<data_t>::data_type;
22 
23     tensor::descriptor src_desc({cd.mb, cd.ic, cd.ih, cd.iw}, data_type,
24         static_cast<format>(p.formats.src_format));
25 
26     tensor::descriptor grady_desc({cd.mb, cd.oc, cd.oh, cd.ow}, data_type,
27         static_cast<format>(p.formats.dst_format));
28 
29     src_.init(src_desc);
30     grady_.init(grady_desc);
31 
32     fill_data<data_t>(
33         src_.get_size() / sizeof(data_t),
34         reinterpret_cast<data_t *>(src_.get_data_handle()));
35 
36     fill_data<data_t>(
37         grady_.get_size() / sizeof(data_t),
38         reinterpret_cast<data_t *>(grady_.get_data_handle()));
39 
40     padR_ = {cd.padh, cd.padw};
41     for (int i = 0; i < 2; ++i) {
42       if ((cd.ih - ((cd.kh - 1) * (cd.dilh + 1) + 1) + cd.padh + padR_[0])
43           / cd.strh + 1 != cd.oh)
44           ++padR_[0];
45       if ((cd.iw - ((cd.kw - 1) * (cd.dilw + 1) + 1) + cd.padw + padR_[1])
46           / cd.strw + 1 != cd.ow)
47           ++padR_[1];
48     }
49 
50     gradw_dims_ = cd.ng > 1 ?
51       tensor::dims {cd.ng, cd.oc / cd.ng, cd.ic / cd.ng, cd.kh, cd.kw } :
52       tensor::dims {cd.oc, cd.ic, cd.kh, cd.kw};
53 
54     gradb_dims_ = {cd.oc};
55   }
56 
57   tensor src_, grady_;
58   tensor::dims gradw_dims_;
59   tensor::dims gradb_dims_;
60   tensor::dims padR_;
61 };
62 
63 using convolution_test =
64   convolution_backward_weights_test<float>;
65 
TEST_P(convolution_test,TestCompute)66 TEST_P(convolution_test, TestCompute) {
67   test_convolution_params_t p =
68     ::testing::TestWithParam<test_convolution_params_t>::GetParam();
69   test_convolution_sizes_t cd = p.sizes;
70 
71   auto gradw = make_output();
72   auto gradb = make_output();
73   auto test = [&]() {
74     TestCommon();
75     convolution_backward_weights::compute(src_, grady_,
76         gradw_dims_, gradw, gradb,
77         tensor::dims {cd.strh, cd.strw}, tensor::dims {cd.dilh, cd.dilw},
78         tensor::dims {cd.padh, cd.padw}, padR_);
79   };
80 
81   if (catch_ideep_expected_failures(test, p.expect_to_fail, p.expected_status))
82     return;
83 
84   tensor ref_gradw(gradw.get_descriptor());
85   //tensor::descriptor gradb_desc ({grady_.get_dim(1)}, grady_.get_data_type());
86   tensor ref_gradb(gradb.get_descriptor());
87   compute_ref_conv_bwd_weights<float>(cd, src_, grady_, ref_gradw);
88   compare_tensor<float>(ref_gradw, gradw);
89   compute_ref_conv_bwd_bias<float>(cd, grady_, ref_gradb);
90   compare_tensor<float>(ref_gradb, gradb);
91 }
92 
93 #define FP32
94 #define DIRECTION_BACKWARD_WEIGHTS
95 #include "convolution_common.h"
96 // #include "dilated_convolution.h"
97