1 // Copyright 2017 The Draco Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 #ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_
16 #define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_
17 
18 #include <math.h>
19 
20 #include "draco/attributes/point_attribute.h"
21 #include "draco/core/math_utils.h"
22 #include "draco/core/vector_d.h"
23 #include "draco/mesh/corner_table.h"
24 
25 namespace draco {
26 
27 // Predictor functionality used for portable UV prediction by both encoder and
28 // decoder.
29 template <typename DataTypeT, class MeshDataT>
30 class MeshPredictionSchemeTexCoordsPortablePredictor {
31  public:
32   static constexpr int kNumComponents = 2;
33 
MeshPredictionSchemeTexCoordsPortablePredictor(const MeshDataT & md)34   explicit MeshPredictionSchemeTexCoordsPortablePredictor(const MeshDataT &md)
35       : pos_attribute_(nullptr),
36         entry_to_point_id_map_(nullptr),
37         mesh_data_(md) {}
SetPositionAttribute(const PointAttribute & position_attribute)38   void SetPositionAttribute(const PointAttribute &position_attribute) {
39     pos_attribute_ = &position_attribute;
40   }
SetEntryToPointIdMap(const PointIndex * map)41   void SetEntryToPointIdMap(const PointIndex *map) {
42     entry_to_point_id_map_ = map;
43   }
IsInitialized()44   bool IsInitialized() const { return pos_attribute_ != nullptr; }
45 
GetPositionForEntryId(int entry_id)46   VectorD<int64_t, 3> GetPositionForEntryId(int entry_id) const {
47     const PointIndex point_id = entry_to_point_id_map_[entry_id];
48     VectorD<int64_t, 3> pos;
49     pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id),
50                                  &pos[0]);
51     return pos;
52   }
53 
GetTexCoordForEntryId(int entry_id,const DataTypeT * data)54   VectorD<int64_t, 2> GetTexCoordForEntryId(int entry_id,
55                                             const DataTypeT *data) const {
56     const int data_offset = entry_id * kNumComponents;
57     return VectorD<int64_t, 2>(data[data_offset], data[data_offset + 1]);
58   }
59 
60   // Computes predicted UV coordinates on a given corner. The coordinates are
61   // stored in |predicted_value_| member.
62   template <bool is_encoder_t>
63   bool ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
64                              int data_id);
65 
predicted_value()66   const DataTypeT *predicted_value() const { return predicted_value_; }
orientation(int i)67   bool orientation(int i) const { return orientations_[i]; }
set_orientation(int i,bool v)68   void set_orientation(int i, bool v) { orientations_[i] = v; }
num_orientations()69   size_t num_orientations() const { return orientations_.size(); }
ResizeOrientations(int num_orientations)70   void ResizeOrientations(int num_orientations) {
71     orientations_.resize(num_orientations);
72   }
73 
74  private:
75   const PointAttribute *pos_attribute_;
76   const PointIndex *entry_to_point_id_map_;
77   DataTypeT predicted_value_[kNumComponents];
78   // Encoded / decoded array of UV flips.
79   // TODO(ostava): We should remove this and replace this with in-place encoding
80   // and decoding to avoid unnecessary copy.
81   std::vector<bool> orientations_;
82   MeshDataT mesh_data_;
83 };
84 
85 template <typename DataTypeT, class MeshDataT>
86 template <bool is_encoder_t>
87 bool MeshPredictionSchemeTexCoordsPortablePredictor<
ComputePredictedValue(CornerIndex corner_id,const DataTypeT * data,int data_id)88     DataTypeT, MeshDataT>::ComputePredictedValue(CornerIndex corner_id,
89                                                  const DataTypeT *data,
90                                                  int data_id) {
91   // Compute the predicted UV coordinate from the positions on all corners
92   // of the processed triangle. For the best prediction, the UV coordinates
93   // on the next/previous corners need to be already encoded/decoded.
94   const CornerIndex next_corner_id = mesh_data_.corner_table()->Next(corner_id);
95   const CornerIndex prev_corner_id =
96       mesh_data_.corner_table()->Previous(corner_id);
97   // Get the encoded data ids from the next and previous corners.
98   // The data id is the encoding order of the UV coordinates.
99   int next_data_id, prev_data_id;
100 
101   int next_vert_id, prev_vert_id;
102   next_vert_id = mesh_data_.corner_table()->Vertex(next_corner_id).value();
103   prev_vert_id = mesh_data_.corner_table()->Vertex(prev_corner_id).value();
104 
105   next_data_id = mesh_data_.vertex_to_data_map()->at(next_vert_id);
106   prev_data_id = mesh_data_.vertex_to_data_map()->at(prev_vert_id);
107 
108   if (prev_data_id < data_id && next_data_id < data_id) {
109     // Both other corners have available UV coordinates for prediction.
110     const VectorD<int64_t, 2> n_uv = GetTexCoordForEntryId(next_data_id, data);
111     const VectorD<int64_t, 2> p_uv = GetTexCoordForEntryId(prev_data_id, data);
112     if (p_uv == n_uv) {
113       // We cannot do a reliable prediction on degenerated UV triangles.
114       predicted_value_[0] = p_uv[0];
115       predicted_value_[1] = p_uv[1];
116       return true;
117     }
118 
119     // Get positions at all corners.
120     const VectorD<int64_t, 3> tip_pos = GetPositionForEntryId(data_id);
121     const VectorD<int64_t, 3> next_pos = GetPositionForEntryId(next_data_id);
122     const VectorD<int64_t, 3> prev_pos = GetPositionForEntryId(prev_data_id);
123     // We use the positions of the above triangle to predict the texture
124     // coordinate on the tip corner C.
125     // To convert the triangle into the UV coordinate system we first compute
126     // position X on the vector |prev_pos - next_pos| that is the projection of
127     // point C onto vector |prev_pos - next_pos|:
128     //
129     //              C
130     //             /.  \
131     //            / .     \
132     //           /  .        \
133     //          N---X----------P
134     //
135     // Where next_pos is point (N), prev_pos is point (P) and tip_pos is the
136     // position of predicted coordinate (C).
137     //
138     const VectorD<int64_t, 3> pn = prev_pos - next_pos;
139     const uint64_t pn_norm2_squared = pn.SquaredNorm();
140     if (pn_norm2_squared != 0) {
141       // Compute the projection of C onto PN by computing dot product of CN with
142       // PN and normalizing it by length of PN. This gives us a factor |s| where
143       // |s = PN.Dot(CN) / PN.SquaredNorm2()|. This factor can be used to
144       // compute X in UV space |X_UV| as |X_UV = N_UV + s * PN_UV|.
145       const VectorD<int64_t, 3> cn = tip_pos - next_pos;
146       const int64_t cn_dot_pn = pn.Dot(cn);
147 
148       const VectorD<int64_t, 2> pn_uv = p_uv - n_uv;
149       // Because we perform all computations with integers, we don't explicitly
150       // compute the normalized factor |s|, but rather we perform all operations
151       // over UV vectors in a non-normalized coordinate system scaled with a
152       // scaling factor |pn_norm2_squared|:
153       //
154       //      x_uv = X_UV * PN.Norm2Squared()
155       //
156       const VectorD<int64_t, 2> x_uv =
157           n_uv * pn_norm2_squared + (cn_dot_pn * pn_uv);
158 
159       const int64_t pn_absmax_element =
160           std::max(std::max(std::abs(pn[0]), std::abs(pn[1])), std::abs(pn[2]));
161       if (cn_dot_pn > std::numeric_limits<int64_t>::max() / pn_absmax_element) {
162         // return false if squared length calculation would overflow.
163         return false;
164       }
165 
166       // Compute squared length of vector CX in position coordinate system:
167       const VectorD<int64_t, 3> x_pos =
168           next_pos + (cn_dot_pn * pn) / pn_norm2_squared;
169       const uint64_t cx_norm2_squared = (tip_pos - x_pos).SquaredNorm();
170 
171       // Compute vector CX_UV in the uv space by rotating vector PN_UV by 90
172       // degrees and scaling it with factor CX.Norm2() / PN.Norm2():
173       //
174       //     CX_UV = (CX.Norm2() / PN.Norm2()) * Rot(PN_UV)
175       //
176       // To preserve precision, we perform all operations in scaled space as
177       // explained above, so we want the final vector to be:
178       //
179       //     cx_uv = CX_UV * PN.Norm2Squared()
180       //
181       // We can then rewrite the formula as:
182       //
183       //     cx_uv = CX.Norm2() * PN.Norm2() * Rot(PN_UV)
184       //
185       VectorD<int64_t, 2> cx_uv(pn_uv[1], -pn_uv[0]);  // Rotated PN_UV.
186       // Compute CX.Norm2() * PN.Norm2()
187       const uint64_t norm_squared =
188           IntSqrt(cx_norm2_squared * pn_norm2_squared);
189       // Final cx_uv in the scaled coordinate space.
190       cx_uv = cx_uv * norm_squared;
191 
192       // Predicted uv coordinate is then computed by either adding or
193       // subtracting CX_UV to/from X_UV.
194       VectorD<int64_t, 2> predicted_uv;
195       if (is_encoder_t) {
196         // When encoding, compute both possible vectors and determine which one
197         // results in a better prediction.
198         // Both vectors need to be transformed back from the scaled space to
199         // the real UV coordinate space.
200         const VectorD<int64_t, 2> predicted_uv_0((x_uv + cx_uv) /
201                                                  pn_norm2_squared);
202         const VectorD<int64_t, 2> predicted_uv_1((x_uv - cx_uv) /
203                                                  pn_norm2_squared);
204         const VectorD<int64_t, 2> c_uv = GetTexCoordForEntryId(data_id, data);
205         if ((c_uv - predicted_uv_0).SquaredNorm() <
206             (c_uv - predicted_uv_1).SquaredNorm()) {
207           predicted_uv = predicted_uv_0;
208           orientations_.push_back(true);
209         } else {
210           predicted_uv = predicted_uv_1;
211           orientations_.push_back(false);
212         }
213       } else {
214         // When decoding the data, we already know which orientation to use.
215         if (orientations_.empty()) {
216           return false;
217         }
218         const bool orientation = orientations_.back();
219         orientations_.pop_back();
220         if (orientation) {
221           predicted_uv = (x_uv + cx_uv) / pn_norm2_squared;
222         } else {
223           predicted_uv = (x_uv - cx_uv) / pn_norm2_squared;
224         }
225       }
226       predicted_value_[0] = static_cast<int>(predicted_uv[0]);
227       predicted_value_[1] = static_cast<int>(predicted_uv[1]);
228       return true;
229     }
230   }
231   // Else we don't have available textures on both corners or the position data
232   // is invalid. For such cases we can't use positions for predicting the uv
233   // value and we resort to delta coding.
234   int data_offset = 0;
235   if (prev_data_id < data_id) {
236     // Use the value on the previous corner as the prediction.
237     data_offset = prev_data_id * kNumComponents;
238   }
239   if (next_data_id < data_id) {
240     // Use the value on the next corner as the prediction.
241     data_offset = next_data_id * kNumComponents;
242   } else {
243     // None of the other corners have a valid value. Use the last encoded value
244     // as the prediction if possible.
245     if (data_id > 0) {
246       data_offset = (data_id - 1) * kNumComponents;
247     } else {
248       // We are encoding the first value. Predict 0.
249       for (int i = 0; i < kNumComponents; ++i) {
250         predicted_value_[i] = 0;
251       }
252       return true;
253     }
254   }
255   for (int i = 0; i < kNumComponents; ++i) {
256     predicted_value_[i] = data[data_offset + i];
257   }
258   return true;
259 }
260 
261 }  // namespace draco
262 
263 #endif  // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_
264