1 /*******************************************************************************
2 * Copyright 2020 Intel Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *     http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *******************************************************************************/
16 
17 /// @example pooling.cpp
18 /// > Annotated version: @ref pooling_example_cpp
19 ///
20 /// @page pooling_example_cpp_short
21 ///
22 /// This C++ API example demonstrates how to create and execute a
23 /// [Pooling](@ref dev_guide_pooling) primitive in forward training propagation
24 /// mode.
25 ///
26 /// @page pooling_example_cpp Pooling Primitive Example
27 /// @copydetails pooling_example_cpp_short
28 ///
29 /// @include pooling.cpp
30 
31 #include <algorithm>
32 #include <cmath>
33 #include <iostream>
34 #include <string>
35 #include <vector>
36 
37 #include "example_utils.hpp"
38 #include "oneapi/dnnl/dnnl.hpp"
39 
40 using namespace dnnl;
41 
42 using tag = memory::format_tag;
43 using dt = memory::data_type;
44 
pooling_example(dnnl::engine::kind engine_kind)45 void pooling_example(dnnl::engine::kind engine_kind) {
46 
47     // Create execution dnnl::engine.
48     dnnl::engine engine(engine_kind, 0);
49 
50     // Create dnnl::stream.
51     dnnl::stream engine_stream(engine);
52 
53     // Tensor dimensions.
54     const memory::dim N = 3, // batch size
55             IC = 3, // input channels
56             IH = 27, // input tensor height
57             IW = 27, // input tensor width
58             KH = 11, // kernel height
59             KW = 11, // kernel width
60             PH_L = 0, // height padding: left
61             PH_R = 0, // height padding: right
62             PW_L = 0, // width padding: left
63             PW_R = 0, // width padding: right
64             SH = 4, // height-wise stride
65             SW = 4, // width-wise stride
66             DH = 1, // height-wise dilation
67             DW = 1; // width-wise dilation
68 
69     const memory::dim OH = (IH - ((KH - 1) * DH + KH) + PH_L + PH_R) / SH + 1;
70     const memory::dim OW = (IW - ((KW - 1) * DW + KW) + PW_L + PW_R) / SW + 1;
71 
72     // Source (src) and destination (dst) tensors dimensions.
73     memory::dims src_dims = {N, IC, IH, IW};
74     memory::dims dst_dims = {N, IC, OH, OW};
75 
76     // Kernel dimensions.
77     memory::dims kernel_dims = {KH, KW};
78 
79     // Strides, padding dimensions.
80     memory::dims strides_dims = {SH, SW};
81     memory::dims padding_dims_l = {PH_L, PW_L};
82     memory::dims padding_dims_r = {PH_R, PW_R};
83     memory::dims dilation = {DH, DW};
84 
85     // Allocate buffers.
86     std::vector<float> src_data(product(src_dims));
87     std::vector<float> dst_data(product(dst_dims));
88 
89     std::generate(src_data.begin(), src_data.end(), []() {
90         static int i = 0;
91         return std::cos(i++ / 10.f);
92     });
93 
94     // Create memory descriptors and memory objects for src and dst.
95     auto src_md = memory::desc(src_dims, dt::f32, tag::nchw);
96     auto src_mem = memory(src_md, engine);
97 
98     auto dst_md = memory::desc(dst_dims, dt::f32, tag::nchw);
99     auto dst_mem = memory(dst_md, engine);
100 
101     // Write data to memory object's handle.
102     write_to_dnnl_memory(src_data.data(), src_mem);
103 
104     // Create operation descriptor.
105     auto pooling_d = pooling_v2_forward::desc(prop_kind::forward_training,
106             algorithm::pooling_max, src_md, dst_md, strides_dims, kernel_dims,
107             dilation, padding_dims_l, padding_dims_r);
108 
109     // Create primitive descriptor.
110     auto pooling_pd = pooling_v2_forward::primitive_desc(pooling_d, engine);
111 
112     // Create workspace memory objects using memory descriptor created by the
113     // primitive descriptor.
114     // NOTE: Here, the workspace is required to save the indices where maximum
115     // was found, and is used in backward pooling to perform upsampling.
116     auto workspace_mem = memory(pooling_pd.workspace_desc(), engine);
117 
118     // Create the primitive.
119     auto pooling_prim = pooling_v2_forward(pooling_pd);
120 
121     // Primitive arguments. Set up in-place execution by assigning src as DST.
122     std::unordered_map<int, memory> pooling_args;
123     pooling_args.insert({DNNL_ARG_SRC, src_mem});
124     pooling_args.insert({DNNL_ARG_DST, dst_mem});
125     pooling_args.insert({DNNL_ARG_WORKSPACE, workspace_mem});
126 
127     // Primitive execution: pooling.
128     pooling_prim.execute(engine_stream, pooling_args);
129 
130     // Wait for the computation to finalize.
131     engine_stream.wait();
132 
133     // Read data from memory object's handle.
134     read_from_dnnl_memory(dst_data.data(), dst_mem);
135 }
136 
main(int argc,char ** argv)137 int main(int argc, char **argv) {
138     return handle_example_errors(
139             pooling_example, parse_engine_kind(argc, argv));
140 }
141