1 /*******************************************************************************
2 * Copyright 2020 Intel Corporation
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *******************************************************************************/
16
17 /// @example reduction.cpp
18 /// > Annotated version: @ref reduction_example_cpp
19 ///
20 /// @page reduction_example_cpp_short
21 ///
22 /// This C++ API example demonstrates how to create and execute a
23 /// [Reduction](@ref dev_guide_reduction) primitive.
24 ///
25 /// @page reduction_example_cpp Reduction Primitive Example
26 /// @copydetails reduction_example_cpp_short
27 ///
28 /// @include reduction.cpp
29
30 #include <cmath>
31
32 #include "example_utils.hpp"
33 #include "oneapi/dnnl/dnnl.hpp"
34
35 using namespace dnnl;
36
37 using tag = memory::format_tag;
38 using dt = memory::data_type;
39
reduction_example(dnnl::engine::kind engine_kind)40 void reduction_example(dnnl::engine::kind engine_kind) {
41
42 // Create execution dnnl::engine.
43 dnnl::engine engine(engine_kind, 0);
44
45 // Create dnnl::stream.
46 dnnl::stream engine_stream(engine);
47
48 // Tensor dimensions.
49 const memory::dim N = 3, // batch size
50 IC = 3, // channels
51 IH = 227, // tensor height
52 IW = 227; // tensor width
53
54 // Source (src) and destination (dst) tensors dimensions.
55 memory::dims src_dims = {N, IC, IH, IW};
56 memory::dims dst_dims = {1, IC, 1, 1};
57
58 // Allocate buffers.
59 std::vector<float> src_data(product(src_dims));
60 std::vector<float> dst_data(product(dst_dims));
61
62 // Initialize src tensor.
63 std::generate(src_data.begin(), src_data.end(), []() {
64 static int i = 0;
65 return std::cos(i++ / 10.f);
66 });
67
68 // Create src and dst memory descriptors and memory objects.
69 auto src_md = memory::desc(src_dims, dt::f32, tag::nchw);
70 auto dst_md = memory::desc(dst_dims, dt::f32, tag::nchw);
71
72 auto src_mem = memory(src_md, engine);
73 auto dst_mem = memory(dst_md, engine);
74
75 // Write data to memory object's handle.
76 write_to_dnnl_memory(src_data.data(), src_mem);
77
78 // Create operation descriptor.
79 auto reduction_d = reduction::desc(
80 algorithm::reduction_sum, src_md, dst_md, 0.f, 0.f);
81
82 // Create primitive descriptor.
83 auto reduction_pd = reduction::primitive_desc(reduction_d, engine);
84
85 // Create the primitive.
86 auto reduction_prim = reduction(reduction_pd);
87
88 // Primitive arguments.
89 std::unordered_map<int, memory> reduction_args;
90 reduction_args.insert({DNNL_ARG_SRC, src_mem});
91 reduction_args.insert({DNNL_ARG_DST, dst_mem});
92
93 // Primitive execution: Reduction (Sum).
94 reduction_prim.execute(engine_stream, reduction_args);
95
96 // Wait for the computation to finalize.
97 engine_stream.wait();
98
99 // Read data from memory object's handle.
100 read_from_dnnl_memory(dst_data.data(), dst_mem);
101 }
102
main(int argc,char ** argv)103 int main(int argc, char **argv) {
104 return handle_example_errors(
105 reduction_example, parse_engine_kind(argc, argv));
106 }
107