1 /*
2 * Licensed to the Apache Software Foundation (ASF) under one
3 * or more contributor license agreements. See the NOTICE file
4 * distributed with this work for additional information
5 * regarding copyright ownership. The ASF licenses this file
6 * to you under the Apache License, Version 2.0 (the
7 * "License"); you may not use this file except in compliance
8 * with the License. You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing,
13 * software distributed under the License is distributed on an
14 * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
15 * KIND, either express or implied. See the License for the
16 * specific language governing permissions and limitations
17 * under the License.
18 */
19
20 /*!
21 * \file calibrate.cc
22 * \brief
23 */
24
25 #include <numeric>
26 #include "./calibrate-inl.h"
27
28 namespace mxnet {
29 namespace op {
30
31 DMLC_REGISTER_PARAMETER(CalibrateEntropyParam);
32
33 // Given a discrete distribution (may have not been normalized to 1),
34 // smooth it by replacing zeros with eps multiplied by a scaling factor and taking the
35 // corresponding amount off the non-zero values.
SmoothDistribution(const std::vector<float> & p,const float eps=0.0001)36 std::vector<float> SmoothDistribution(const std::vector<float>& p, const float eps = 0.0001) {
37 std::vector<size_t> is_zeros(p.size());
38 std::vector<size_t> is_nonzeros(p.size());
39 {
40 auto it = p.begin();
41 std::generate(is_zeros.begin(), is_zeros.end(),
42 [&it]() { return static_cast<size_t>(*(it++) == 0.f); });
43 }
44 {
45 auto it = p.begin();
46 std::generate(is_nonzeros.begin(), is_nonzeros.end(),
47 [&it]() { return static_cast<size_t>(*(it++) != 0.f); });
48 }
49
50 size_t n_zeros = std::accumulate(is_zeros.begin(), is_zeros.end(), 0);
51 size_t n_nonzeros = p.size() - n_zeros;
52 if (!n_nonzeros) {
53 // The discrete probability distribution is malformed. All entries are 0.
54 return std::vector<float>();
55 }
56 float eps1 = eps * static_cast<float>(n_zeros) / static_cast<float>(n_nonzeros);
57 if (eps1 >= 1.0) return std::vector<float>();
58 auto ret = p;
59 for (size_t i = 0; i < p.size(); i++) {
60 ret[i] += eps * is_zeros[i] - eps1 * is_nonzeros[i];
61 }
62 return ret;
63 }
64
ComputeEntropy(std::vector<float> * p_ptr,std::vector<float> * q_ptr)65 static float ComputeEntropy(std::vector<float>* p_ptr, std::vector<float>* q_ptr) {
66 std::vector<float>& p = *p_ptr;
67 std::vector<float>& q = *q_ptr;
68 CHECK_EQ(p.size(), q.size());
69 float p_sum = std::accumulate(p.begin(), p.end(), 0.f);
70 float q_sum = std::accumulate(q.begin(), q.end(), 0.f);
71 for (auto& it : p) {
72 it = it / p_sum;
73 }
74
75 for (auto& it : q) {
76 it = it / q_sum;
77 }
78 float ret = 0;
79 for (size_t i = 0; i < p.size(); i++) {
80 CHECK(p[i] > 0 && q[i] > 0);
81 if (p[i] && q[i]) ret += p[i] * std::log(p[i] / q[i]);
82 }
83 return ret;
84 }
85
CalibrateComputeCPU(const nnvm::NodeAttrs & attrs,const OpContext & ctx,const std::vector<TBlob> & inputs,const std::vector<OpReqType> & req,const std::vector<TBlob> & outputs)86 void CalibrateComputeCPU(const nnvm::NodeAttrs& attrs, const OpContext& ctx,
87 const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req,
88 const std::vector<TBlob>& outputs) {
89 const auto& param = nnvm::get<CalibrateEntropyParam>(attrs.parsed);
90 const auto& hist = inputs[0];
91 const auto& hist_ptr = hist.dptr<float>();
92 const auto& hist_edges = inputs[1];
93 const auto& hist_edges_ptr = hist_edges.dptr<float>();
94 float* const out_threshold = outputs[0].dptr<float>();
95 float* const out_divergence = outputs[1].dptr<float>();
96 const auto num_bins = hist.Size();
97 CHECK_EQ(num_bins + 1, hist_edges.Size());
98 int num_quantized_bins = param.num_quantized_bins;
99
100 const int zero_bin_idx = num_bins / 2;
101 const int num_half_quantized_bins = num_quantized_bins / 2;
102 std::vector<float> thresholds(num_bins / 2 + 1 - num_quantized_bins / 2, 0.f);
103 std::vector<float> divergence(thresholds.size(), 0.f);
104 #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
105 for (index_t i = num_quantized_bins / 2; i <= zero_bin_idx; i++) {
106 const size_t p_bin_idx_start = zero_bin_idx - i;
107 const size_t p_bin_idx_stop = zero_bin_idx + i + 1;
108 thresholds[i - num_half_quantized_bins] = hist_edges_ptr[p_bin_idx_stop];
109
110 std::vector<size_t> sliced_nd_hist(p_bin_idx_stop - p_bin_idx_start);
111 std::vector<float> p(p_bin_idx_stop - p_bin_idx_start);
112 p[0] = 0;
113 p.back() = 0;
114 for (size_t j = 0; j < num_bins; j++) {
115 if (j <= p_bin_idx_start) {
116 p[0] += hist_ptr[j];
117 } else if (j >= p_bin_idx_stop) {
118 p.back() += hist_ptr[j];
119 } else {
120 sliced_nd_hist[j - p_bin_idx_start] = hist_ptr[j];
121 p[j - p_bin_idx_start] = hist_ptr[j];
122 }
123 }
124 // calculate how many bins should be merged to generate quantized distribution q
125 const auto num_merged_bins = sliced_nd_hist.size() / num_quantized_bins;
126 // merge hist into num_quantized_bins bins
127 std::vector<float> quantized_bins(num_quantized_bins, 0);
128 for (index_t j = 0; j < num_quantized_bins; j++) {
129 const int start = j * num_merged_bins;
130 const int stop = (j + 1) * num_merged_bins;
131 quantized_bins[j] =
132 std::accumulate(sliced_nd_hist.begin() + start, sliced_nd_hist.begin() + stop, 0);
133 }
134 quantized_bins.back() += std::accumulate(
135 sliced_nd_hist.begin() + static_cast<int>(num_quantized_bins * num_merged_bins),
136 sliced_nd_hist.end(), 0);
137 // expand quantized_bins into p.size bins
138 std::vector<float> q(sliced_nd_hist.size(), 0);
139 for (index_t j = 0; j < num_quantized_bins; j++) {
140 const int start = j * num_merged_bins;
141 const int stop = (j == num_quantized_bins - 1) ? q.size() : ((j + 1) * num_merged_bins);
142 int norm = std::count_if(sliced_nd_hist.begin() + start, sliced_nd_hist.begin() + stop,
143 [](size_t i) { return i != 0; });
144 if (norm) {
145 for (index_t k = start; k < stop; k++) {
146 if (p[k]) q[k] = quantized_bins[j] / norm;
147 }
148 }
149 }
150 p = SmoothDistribution(p);
151 q = SmoothDistribution(q);
152
153 if (!q.size()) {
154 divergence[i - num_half_quantized_bins] = std::numeric_limits<float>::infinity();
155 } else {
156 divergence[i - num_half_quantized_bins] = ComputeEntropy(&p, &q);
157 }
158 }
159
160 size_t min_divergence_idx = 0;
161 float min_divergence = mshadow::red::limits::MaxValue<float>();
162 for (size_t i = 0; i < divergence.size(); i++) {
163 if (divergence[i] < min_divergence) {
164 min_divergence = divergence[i];
165 min_divergence_idx = i;
166 }
167 }
168 *out_divergence = min_divergence;
169 *out_threshold = thresholds[min_divergence_idx];
170 }
171
CalibrateShape(const nnvm::NodeAttrs & attrs,std::vector<TShape> * in_attrs,std::vector<TShape> * out_attrs)172 static inline bool CalibrateShape(const nnvm::NodeAttrs& attrs, std::vector<TShape>* in_attrs,
173 std::vector<TShape>* out_attrs) {
174 CHECK_EQ(in_attrs->size(), 2U);
175 CHECK_EQ(out_attrs->size(), 2U);
176 SHAPE_ASSIGN_CHECK(*out_attrs, 0, TShape(1, 1));
177 SHAPE_ASSIGN_CHECK(*out_attrs, 1, TShape(1, 1));
178 return (!shape_is_none(in_attrs->at(0))) && (!shape_is_none(in_attrs->at(1)));
179 }
180
CalibrateType(const nnvm::NodeAttrs & attrs,std::vector<int> * in_attrs,std::vector<int> * out_attrs)181 static inline bool CalibrateType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs,
182 std::vector<int>* out_attrs) {
183 CHECK_EQ(in_attrs->size(), 2U);
184 CHECK_EQ(out_attrs->size(), 2U);
185 CHECK(in_attrs->at(0) == mshadow::kFloat32);
186 TYPE_ASSIGN_CHECK(*in_attrs, 1, mshadow::kFloat32);
187 TYPE_ASSIGN_CHECK(*out_attrs, 0, mshadow::kFloat32);
188 TYPE_ASSIGN_CHECK(*out_attrs, 1, mshadow::kFloat32);
189 return true;
190 }
191
192 NNVM_REGISTER_OP(_contrib_calibrate_entropy)
193 .describe(R"code(Provide calibrated min/max for input histogram.
194
195 .. Note::
196 This operator only supports forward propagation. DO NOT use it in training.)code" ADD_FILELINE)
197 .set_attr_parser(ParamParser<CalibrateEntropyParam>)
198 .set_num_inputs(2)
199 .set_num_outputs(2)
__anond10b44150402(const NodeAttrs& attrs) 200 .set_attr<nnvm::FListInputNames>("FListInputNames", [](const NodeAttrs& attrs) {
201 return std::vector<std::string>{"hist", "hist_edges"};
202 })
__anond10b44150502(const NodeAttrs& attrs) 203 .set_attr<nnvm::FListOutputNames>("FListOutputNames", [](const NodeAttrs& attrs) {
204 return std::vector<std::string>{"threshold", "divergence"};
205 })
206 .set_attr<mxnet::FInferShape>("FInferShape", CalibrateShape)
207 .set_attr<nnvm::FInferType>("FInferType", CalibrateType)
208 .set_attr<FCompute>("FCompute<cpu>", CalibrateComputeCPU)
209 .add_argument("hist", "NDArray-or-Symbol", "A ndarray/symbol of type `float32`")
210 .add_argument("hist_edges", "NDArray-or-Symbol", "A ndarray/symbol of type `float32`")
211 .add_arguments(CalibrateEntropyParam::__FIELDS__());
212
213 } // namespace op
214 } // namespace mxnet
215