1 // Copyright 2016 The Draco Authors. 2 // 3 // Licensed under the Apache License, Version 2.0 (the "License"); 4 // you may not use this file except in compliance with the License. 5 // You may obtain a copy of the License at 6 // 7 // http://www.apache.org/licenses/LICENSE-2.0 8 // 9 // Unless required by applicable law or agreed to in writing, software 10 // distributed under the License is distributed on an "AS IS" BASIS, 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 // See the License for the specific language governing permissions and 13 // limitations under the License. 14 // 15 #ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED 16 #ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ 17 #define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ 18 19 #include <math.h> 20 21 #include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h" 22 #include "draco/compression/bit_coders/rans_bit_decoder.h" 23 #include "draco/core/varint_decoding.h" 24 #include "draco/core/vector_d.h" 25 #include "draco/draco_features.h" 26 #include "draco/mesh/corner_table.h" 27 28 namespace draco { 29 30 // Decoder for predictions of UV coordinates encoded by our specialized texture 31 // coordinate predictor. See the corresponding encoder for more details. Note 32 // that this predictor is not portable and should not be used anymore. See 33 // MeshPredictionSchemeTexCoordsPortableEncoder/Decoder for a portable version 34 // of this prediction scheme. 35 template <typename DataTypeT, class TransformT, class MeshDataT> 36 class MeshPredictionSchemeTexCoordsDecoder 37 : public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> { 38 public: 39 using CorrType = typename MeshPredictionSchemeDecoder<DataTypeT, TransformT, 40 MeshDataT>::CorrType; MeshPredictionSchemeTexCoordsDecoder(const PointAttribute * attribute,const TransformT & transform,const MeshDataT & mesh_data,int version)41 MeshPredictionSchemeTexCoordsDecoder(const PointAttribute *attribute, 42 const TransformT &transform, 43 const MeshDataT &mesh_data, int version) 44 : MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>( 45 attribute, transform, mesh_data), 46 pos_attribute_(nullptr), 47 entry_to_point_id_map_(nullptr), 48 num_components_(0), 49 version_(version) {} 50 51 bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, 52 int size, int num_components, 53 const PointIndex *entry_to_point_id_map) override; 54 55 bool DecodePredictionData(DecoderBuffer *buffer) override; 56 GetPredictionMethod()57 PredictionSchemeMethod GetPredictionMethod() const override { 58 return MESH_PREDICTION_TEX_COORDS_DEPRECATED; 59 } 60 IsInitialized()61 bool IsInitialized() const override { 62 if (pos_attribute_ == nullptr) { 63 return false; 64 } 65 if (!this->mesh_data().IsInitialized()) { 66 return false; 67 } 68 return true; 69 } 70 GetNumParentAttributes()71 int GetNumParentAttributes() const override { return 1; } 72 GetParentAttributeType(int i)73 GeometryAttribute::Type GetParentAttributeType(int i) const override { 74 DRACO_DCHECK_EQ(i, 0); 75 (void)i; 76 return GeometryAttribute::POSITION; 77 } 78 SetParentAttribute(const PointAttribute * att)79 bool SetParentAttribute(const PointAttribute *att) override { 80 if (att == nullptr) { 81 return false; 82 } 83 if (att->attribute_type() != GeometryAttribute::POSITION) { 84 return false; // Invalid attribute type. 85 } 86 if (att->num_components() != 3) { 87 return false; // Currently works only for 3 component positions. 88 } 89 pos_attribute_ = att; 90 return true; 91 } 92 93 protected: GetPositionForEntryId(int entry_id)94 Vector3f GetPositionForEntryId(int entry_id) const { 95 const PointIndex point_id = entry_to_point_id_map_[entry_id]; 96 Vector3f pos; 97 pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id), 98 &pos[0]); 99 return pos; 100 } 101 GetTexCoordForEntryId(int entry_id,const DataTypeT * data)102 Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const { 103 const int data_offset = entry_id * num_components_; 104 return Vector2f(static_cast<float>(data[data_offset]), 105 static_cast<float>(data[data_offset + 1])); 106 } 107 108 void ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, 109 int data_id); 110 111 private: 112 const PointAttribute *pos_attribute_; 113 const PointIndex *entry_to_point_id_map_; 114 std::unique_ptr<DataTypeT[]> predicted_value_; 115 int num_components_; 116 // Encoded / decoded array of UV flips. 117 std::vector<bool> orientations_; 118 int version_; 119 }; 120 121 template <typename DataTypeT, class TransformT, class MeshDataT> 122 bool MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>:: ComputeOriginalValues(const CorrType * in_corr,DataTypeT * out_data,int,int num_components,const PointIndex * entry_to_point_id_map)123 ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data, 124 int /* size */, int num_components, 125 const PointIndex *entry_to_point_id_map) { 126 num_components_ = num_components; 127 entry_to_point_id_map_ = entry_to_point_id_map; 128 predicted_value_ = 129 std::unique_ptr<DataTypeT[]>(new DataTypeT[num_components]); 130 this->transform().Init(num_components); 131 132 const int corner_map_size = 133 static_cast<int>(this->mesh_data().data_to_corner_map()->size()); 134 for (int p = 0; p < corner_map_size; ++p) { 135 const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); 136 ComputePredictedValue(corner_id, out_data, p); 137 138 const int dst_offset = p * num_components; 139 this->transform().ComputeOriginalValue( 140 predicted_value_.get(), in_corr + dst_offset, out_data + dst_offset); 141 } 142 return true; 143 } 144 145 template <typename DataTypeT, class TransformT, class MeshDataT> 146 bool MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>:: DecodePredictionData(DecoderBuffer * buffer)147 DecodePredictionData(DecoderBuffer *buffer) { 148 // Decode the delta coded orientations. 149 uint32_t num_orientations = 0; 150 if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) { 151 if (!buffer->Decode(&num_orientations)) { 152 return false; 153 } 154 } else { 155 if (!DecodeVarint(&num_orientations, buffer)) { 156 return false; 157 } 158 } 159 if (num_orientations == 0) { 160 return false; 161 } 162 orientations_.resize(num_orientations); 163 bool last_orientation = true; 164 RAnsBitDecoder decoder; 165 if (!decoder.StartDecoding(buffer)) { 166 return false; 167 } 168 for (uint32_t i = 0; i < num_orientations; ++i) { 169 if (!decoder.DecodeNextBit()) { 170 last_orientation = !last_orientation; 171 } 172 orientations_[i] = last_orientation; 173 } 174 decoder.EndDecoding(); 175 return MeshPredictionSchemeDecoder<DataTypeT, TransformT, 176 MeshDataT>::DecodePredictionData(buffer); 177 } 178 179 template <typename DataTypeT, class TransformT, class MeshDataT> 180 void MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>:: ComputePredictedValue(CornerIndex corner_id,const DataTypeT * data,int data_id)181 ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data, 182 int data_id) { 183 // Compute the predicted UV coordinate from the positions on all corners 184 // of the processed triangle. For the best prediction, the UV coordinates 185 // on the next/previous corners need to be already encoded/decoded. 186 const CornerIndex next_corner_id = 187 this->mesh_data().corner_table()->Next(corner_id); 188 const CornerIndex prev_corner_id = 189 this->mesh_data().corner_table()->Previous(corner_id); 190 // Get the encoded data ids from the next and previous corners. 191 // The data id is the encoding order of the UV coordinates. 192 int next_data_id, prev_data_id; 193 194 int next_vert_id, prev_vert_id; 195 next_vert_id = 196 this->mesh_data().corner_table()->Vertex(next_corner_id).value(); 197 prev_vert_id = 198 this->mesh_data().corner_table()->Vertex(prev_corner_id).value(); 199 200 next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id); 201 prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id); 202 203 if (prev_data_id < data_id && next_data_id < data_id) { 204 // Both other corners have available UV coordinates for prediction. 205 const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data); 206 const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data); 207 if (p_uv == n_uv) { 208 // We cannot do a reliable prediction on degenerated UV triangles. 209 predicted_value_[0] = static_cast<int>(p_uv[0]); 210 predicted_value_[1] = static_cast<int>(p_uv[1]); 211 return; 212 } 213 214 // Get positions at all corners. 215 const Vector3f tip_pos = GetPositionForEntryId(data_id); 216 const Vector3f next_pos = GetPositionForEntryId(next_data_id); 217 const Vector3f prev_pos = GetPositionForEntryId(prev_data_id); 218 // Use the positions of the above triangle to predict the texture coordinate 219 // on the tip corner C. 220 // Convert the triangle into a new coordinate system defined by orthogonal 221 // bases vectors S, T, where S is vector prev_pos - next_pos and T is an 222 // perpendicular vector to S in the same plane as vector the 223 // tip_pos - next_pos. 224 // The transformed triangle in the new coordinate system is then going to 225 // be represented as: 226 // 227 // 1 ^ 228 // | 229 // | 230 // | C 231 // | / \ 232 // | / \ 233 // |/ \ 234 // N--------------P 235 // 0 1 236 // 237 // Where next_pos point (N) is at position (0, 0), prev_pos point (P) is 238 // at (1, 0). Our goal is to compute the position of the tip_pos point (C) 239 // in this new coordinate space (s, t). 240 // 241 const Vector3f pn = prev_pos - next_pos; 242 const Vector3f cn = tip_pos - next_pos; 243 const float pn_norm2_squared = pn.SquaredNorm(); 244 // Coordinate s of the tip corner C is simply the dot product of the 245 // normalized vectors |pn| and |cn| (normalized by the length of |pn|). 246 // Since both of these vectors are normalized, we don't need to perform the 247 // normalization explicitly and instead we can just use the squared norm 248 // of |pn| as a denominator of the resulting dot product of non normalized 249 // vectors. 250 float s, t; 251 // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are 252 // the same positions (e.g. because they were quantized to the same 253 // location). 254 if (version_ < DRACO_BITSTREAM_VERSION(1, 2) || pn_norm2_squared > 0) { 255 s = pn.Dot(cn) / pn_norm2_squared; 256 // To get the coordinate t, we can use formula: 257 // t = |C-N - (P-N) * s| / |P-N| 258 // Do not use std::sqrt to avoid changes in the bitstream. 259 t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared); 260 } else { 261 s = 0; 262 t = 0; 263 } 264 265 // Now we need to transform the point (s, t) to the texture coordinate space 266 // UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets 267 // denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can 268 // be used to define transformation from the normalized coordinate system 269 // to the texture coordinate system using a 3x3 affine matrix M: 270 // 271 // M = | PN_UV[0] -PN_UV[1] N_UV[0] | 272 // | PN_UV[1] PN_UV[0] N_UV[1] | 273 // | 0 0 1 | 274 // 275 // The predicted point C_UV in the texture space is then equal to 276 // C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped 277 // around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t) 278 // as the prediction. 279 const Vector2f pn_uv = p_uv - n_uv; 280 const float pnus = pn_uv[0] * s + n_uv[0]; 281 const float pnut = pn_uv[0] * t; 282 const float pnvs = pn_uv[1] * s + n_uv[1]; 283 const float pnvt = pn_uv[1] * t; 284 Vector2f predicted_uv; 285 286 // When decoding the data, we already know which orientation to use. 287 const bool orientation = orientations_.back(); 288 orientations_.pop_back(); 289 if (orientation) 290 predicted_uv = Vector2f(pnus - pnvt, pnvs + pnut); 291 else 292 predicted_uv = Vector2f(pnus + pnvt, pnvs - pnut); 293 294 if (std::is_integral<DataTypeT>::value) { 295 // Round the predicted value for integer types. 296 if (std::isnan(predicted_uv[0])) { 297 predicted_value_[0] = INT_MIN; 298 } else { 299 predicted_value_[0] = static_cast<int>(floor(predicted_uv[0] + 0.5)); 300 } 301 if (std::isnan(predicted_uv[1])) { 302 predicted_value_[1] = INT_MIN; 303 } else { 304 predicted_value_[1] = static_cast<int>(floor(predicted_uv[1] + 0.5)); 305 } 306 } else { 307 predicted_value_[0] = static_cast<int>(predicted_uv[0]); 308 predicted_value_[1] = static_cast<int>(predicted_uv[1]); 309 } 310 return; 311 } 312 // Else we don't have available textures on both corners. For such case we 313 // can't use positions for predicting the uv value and we resort to delta 314 // coding. 315 int data_offset = 0; 316 if (prev_data_id < data_id) { 317 // Use the value on the previous corner as the prediction. 318 data_offset = prev_data_id * num_components_; 319 } 320 if (next_data_id < data_id) { 321 // Use the value on the next corner as the prediction. 322 data_offset = next_data_id * num_components_; 323 } else { 324 // None of the other corners have a valid value. Use the last encoded value 325 // as the prediction if possible. 326 if (data_id > 0) { 327 data_offset = (data_id - 1) * num_components_; 328 } else { 329 // We are encoding the first value. Predict 0. 330 for (int i = 0; i < num_components_; ++i) { 331 predicted_value_[i] = 0; 332 } 333 return; 334 } 335 } 336 for (int i = 0; i < num_components_; ++i) { 337 predicted_value_[i] = data[data_offset + i]; 338 } 339 } 340 341 } // namespace draco 342 343 #endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_ 344 #endif 345