1 // Copyright 2016 The Draco Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 #ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_ENCODER_H_
16 #define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_ENCODER_H_
17 
18 #include <math.h>
19 
20 #include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
21 #include "draco/compression/bit_coders/rans_bit_encoder.h"
22 #include "draco/core/varint_encoding.h"
23 #include "draco/core/vector_d.h"
24 #include "draco/mesh/corner_table.h"
25 
26 namespace draco {
27 
28 // Prediction scheme designed for predicting texture coordinates from known
29 // spatial position of vertices. For good parametrization, the ratios between
30 // triangle edge lengths should be about the same in both the spatial and UV
31 // coordinate spaces, which makes the positions a good predictor for the UV
32 // coordinates.
33 template <typename DataTypeT, class TransformT, class MeshDataT>
34 class MeshPredictionSchemeTexCoordsEncoder
35     : public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
36  public:
37   using CorrType = typename MeshPredictionSchemeEncoder<DataTypeT, TransformT,
38                                                         MeshDataT>::CorrType;
MeshPredictionSchemeTexCoordsEncoder(const PointAttribute * attribute,const TransformT & transform,const MeshDataT & mesh_data)39   MeshPredictionSchemeTexCoordsEncoder(const PointAttribute *attribute,
40                                        const TransformT &transform,
41                                        const MeshDataT &mesh_data)
42       : MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
43             attribute, transform, mesh_data),
44         pos_attribute_(nullptr),
45         entry_to_point_id_map_(nullptr),
46         num_components_(0) {}
47 
48   bool ComputeCorrectionValues(
49       const DataTypeT *in_data, CorrType *out_corr, int size,
50       int num_components, const PointIndex *entry_to_point_id_map) override;
51 
52   bool EncodePredictionData(EncoderBuffer *buffer) override;
53 
GetPredictionMethod()54   PredictionSchemeMethod GetPredictionMethod() const override {
55     return MESH_PREDICTION_TEX_COORDS_DEPRECATED;
56   }
57 
IsInitialized()58   bool IsInitialized() const override {
59     if (pos_attribute_ == nullptr) {
60       return false;
61     }
62     if (!this->mesh_data().IsInitialized()) {
63       return false;
64     }
65     return true;
66   }
67 
GetNumParentAttributes()68   int GetNumParentAttributes() const override { return 1; }
69 
GetParentAttributeType(int i)70   GeometryAttribute::Type GetParentAttributeType(int i) const override {
71     DRACO_DCHECK_EQ(i, 0);
72     (void)i;
73     return GeometryAttribute::POSITION;
74   }
75 
SetParentAttribute(const PointAttribute * att)76   bool SetParentAttribute(const PointAttribute *att) override {
77     if (att->attribute_type() != GeometryAttribute::POSITION) {
78       return false;  // Invalid attribute type.
79     }
80     if (att->num_components() != 3) {
81       return false;  // Currently works only for 3 component positions.
82     }
83     pos_attribute_ = att;
84     return true;
85   }
86 
87  protected:
GetPositionForEntryId(int entry_id)88   Vector3f GetPositionForEntryId(int entry_id) const {
89     const PointIndex point_id = entry_to_point_id_map_[entry_id];
90     Vector3f pos;
91     pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id),
92                                  &pos[0]);
93     return pos;
94   }
95 
GetTexCoordForEntryId(int entry_id,const DataTypeT * data)96   Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const {
97     const int data_offset = entry_id * num_components_;
98     return Vector2f(static_cast<float>(data[data_offset]),
99                     static_cast<float>(data[data_offset + 1]));
100   }
101 
102   void ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
103                              int data_id);
104 
105  private:
106   const PointAttribute *pos_attribute_;
107   const PointIndex *entry_to_point_id_map_;
108   std::unique_ptr<DataTypeT[]> predicted_value_;
109   int num_components_;
110   // Encoded / decoded array of UV flips.
111   std::vector<bool> orientations_;
112 };
113 
114 template <typename DataTypeT, class TransformT, class MeshDataT>
115 bool MeshPredictionSchemeTexCoordsEncoder<DataTypeT, TransformT, MeshDataT>::
ComputeCorrectionValues(const DataTypeT * in_data,CorrType * out_corr,int size,int num_components,const PointIndex * entry_to_point_id_map)116     ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
117                             int size, int num_components,
118                             const PointIndex *entry_to_point_id_map) {
119   num_components_ = num_components;
120   entry_to_point_id_map_ = entry_to_point_id_map;
121   predicted_value_ =
122       std::unique_ptr<DataTypeT[]>(new DataTypeT[num_components]);
123   this->transform().Init(in_data, size, num_components);
124   // We start processing from the end because this prediction uses data from
125   // previous entries that could be overwritten when an entry is processed.
126   for (int p =
127            static_cast<int>(this->mesh_data().data_to_corner_map()->size()) - 1;
128        p >= 0; --p) {
129     const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
130     ComputePredictedValue(corner_id, in_data, p);
131 
132     const int dst_offset = p * num_components;
133     this->transform().ComputeCorrection(
134         in_data + dst_offset, predicted_value_.get(), out_corr + dst_offset);
135   }
136   return true;
137 }
138 
139 template <typename DataTypeT, class TransformT, class MeshDataT>
140 bool MeshPredictionSchemeTexCoordsEncoder<DataTypeT, TransformT, MeshDataT>::
EncodePredictionData(EncoderBuffer * buffer)141     EncodePredictionData(EncoderBuffer *buffer) {
142   // Encode the delta-coded orientations using arithmetic coding.
143   const uint32_t num_orientations = static_cast<uint32_t>(orientations_.size());
144   EncodeVarint(num_orientations, buffer);
145   bool last_orientation = true;
146   RAnsBitEncoder encoder;
147   encoder.StartEncoding();
148   for (bool orientation : orientations_) {
149     encoder.EncodeBit(orientation == last_orientation);
150     last_orientation = orientation;
151   }
152   encoder.EndEncoding(buffer);
153   return MeshPredictionSchemeEncoder<DataTypeT, TransformT,
154                                      MeshDataT>::EncodePredictionData(buffer);
155 }
156 
157 template <typename DataTypeT, class TransformT, class MeshDataT>
158 void MeshPredictionSchemeTexCoordsEncoder<DataTypeT, TransformT, MeshDataT>::
ComputePredictedValue(CornerIndex corner_id,const DataTypeT * data,int data_id)159     ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
160                           int data_id) {
161   // Compute the predicted UV coordinate from the positions on all corners
162   // of the processed triangle. For the best prediction, the UV coordinates
163   // on the next/previous corners need to be already encoded/decoded.
164   const CornerIndex next_corner_id =
165       this->mesh_data().corner_table()->Next(corner_id);
166   const CornerIndex prev_corner_id =
167       this->mesh_data().corner_table()->Previous(corner_id);
168   // Get the encoded data ids from the next and previous corners.
169   // The data id is the encoding order of the UV coordinates.
170   int next_data_id, prev_data_id;
171 
172   int next_vert_id, prev_vert_id;
173   next_vert_id =
174       this->mesh_data().corner_table()->Vertex(next_corner_id).value();
175   prev_vert_id =
176       this->mesh_data().corner_table()->Vertex(prev_corner_id).value();
177 
178   next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id);
179   prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id);
180 
181   if (prev_data_id < data_id && next_data_id < data_id) {
182     // Both other corners have available UV coordinates for prediction.
183     const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data);
184     const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data);
185     if (p_uv == n_uv) {
186       // We cannot do a reliable prediction on degenerated UV triangles.
187       predicted_value_[0] = static_cast<int>(p_uv[0]);
188       predicted_value_[1] = static_cast<int>(p_uv[1]);
189       return;
190     }
191 
192     // Get positions at all corners.
193     const Vector3f tip_pos = GetPositionForEntryId(data_id);
194     const Vector3f next_pos = GetPositionForEntryId(next_data_id);
195     const Vector3f prev_pos = GetPositionForEntryId(prev_data_id);
196     // Use the positions of the above triangle to predict the texture coordinate
197     // on the tip corner C.
198     // Convert the triangle into a new coordinate system defined by orthogonal
199     // bases vectors S, T, where S is vector prev_pos - next_pos and T is an
200     // perpendicular vector to S in the same plane as vector the
201     // tip_pos - next_pos.
202     // The transformed triangle in the new coordinate system is then going to
203     // be represented as:
204     //
205     //        1 ^
206     //          |
207     //          |
208     //          |   C
209     //          |  /  \
210     //          | /      \
211     //          |/          \
212     //          N--------------P
213     //          0              1
214     //
215     // Where next_pos point (N) is at position (0, 0), prev_pos point (P) is
216     // at (1, 0). Our goal is to compute the position of the tip_pos point (C)
217     // in this new coordinate space (s, t).
218     //
219     const Vector3f pn = prev_pos - next_pos;
220     const Vector3f cn = tip_pos - next_pos;
221     const float pn_norm2_squared = pn.SquaredNorm();
222     // Coordinate s of the tip corner C is simply the dot product of the
223     // normalized vectors |pn| and |cn| (normalized by the length of |pn|).
224     // Since both of these vectors are normalized, we don't need to perform the
225     // normalization explicitly and instead we can just use the squared norm
226     // of |pn| as a denominator of the resulting dot product of non normalized
227     // vectors.
228     float s, t;
229     // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are
230     // the same positions (e.g. because they were quantized to the same
231     // location).
232     if (pn_norm2_squared > 0) {
233       s = pn.Dot(cn) / pn_norm2_squared;
234       // To get the coordinate t, we can use formula:
235       //      t = |C-N - (P-N) * s| / |P-N|
236       // Do not use std::sqrt to avoid changes in the bitstream.
237       t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared);
238     } else {
239       s = 0;
240       t = 0;
241     }
242 
243     // Now we need to transform the point (s, t) to the texture coordinate space
244     // UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets
245     // denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can
246     // be used to define transformation from the normalized coordinate system
247     // to the texture coordinate system using a 3x3 affine matrix M:
248     //
249     //  M = | PN_UV[0]  -PN_UV[1]  N_UV[0] |
250     //      | PN_UV[1]   PN_UV[0]  N_UV[1] |
251     //      | 0          0         1       |
252     //
253     // The predicted point C_UV in the texture space is then equal to
254     // C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped
255     // around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t)
256     // as the prediction.
257     const Vector2f pn_uv = p_uv - n_uv;
258     const float pnus = pn_uv[0] * s + n_uv[0];
259     const float pnut = pn_uv[0] * t;
260     const float pnvs = pn_uv[1] * s + n_uv[1];
261     const float pnvt = pn_uv[1] * t;
262     Vector2f predicted_uv;
263 
264     // When encoding compute both possible vectors and determine which one
265     // results in a better prediction.
266     const Vector2f predicted_uv_0(pnus - pnvt, pnvs + pnut);
267     const Vector2f predicted_uv_1(pnus + pnvt, pnvs - pnut);
268     const Vector2f c_uv = GetTexCoordForEntryId(data_id, data);
269     if ((c_uv - predicted_uv_0).SquaredNorm() <
270         (c_uv - predicted_uv_1).SquaredNorm()) {
271       predicted_uv = predicted_uv_0;
272       orientations_.push_back(true);
273     } else {
274       predicted_uv = predicted_uv_1;
275       orientations_.push_back(false);
276     }
277     if (std::is_integral<DataTypeT>::value) {
278       // Round the predicted value for integer types.
279       predicted_value_[0] = static_cast<int>(floor(predicted_uv[0] + 0.5));
280       predicted_value_[1] = static_cast<int>(floor(predicted_uv[1] + 0.5));
281     } else {
282       predicted_value_[0] = static_cast<int>(predicted_uv[0]);
283       predicted_value_[1] = static_cast<int>(predicted_uv[1]);
284     }
285     return;
286   }
287   // Else we don't have available textures on both corners. For such case we
288   // can't use positions for predicting the uv value and we resort to delta
289   // coding.
290   int data_offset = 0;
291   if (prev_data_id < data_id) {
292     // Use the value on the previous corner as the prediction.
293     data_offset = prev_data_id * num_components_;
294   }
295   if (next_data_id < data_id) {
296     // Use the value on the next corner as the prediction.
297     data_offset = next_data_id * num_components_;
298   } else {
299     // None of the other corners have a valid value. Use the last encoded value
300     // as the prediction if possible.
301     if (data_id > 0) {
302       data_offset = (data_id - 1) * num_components_;
303     } else {
304       // We are encoding the first value. Predict 0.
305       for (int i = 0; i < num_components_; ++i) {
306         predicted_value_[i] = 0;
307       }
308       return;
309     }
310   }
311   for (int i = 0; i < num_components_; ++i) {
312     predicted_value_[i] = data[data_offset + i];
313   }
314 }
315 
316 }  // namespace draco
317 
318 #endif  // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_H_
319