1 /* 2 * VMMLib - Tensor Classes 3 * 4 * @author Susanne Suter 5 * @author Jonas Boesch 6 * 7 * The Tucker3 tensor class is consists of the same components (core tensor, basis matrices u1-u3) as the tucker3 model described in: 8 * - Tucker, 1966: Some mathematical notes on three-mode factor analysis, Psychometrika. 9 * - De Lathauwer, De Moor, Vandewalle, 2000a: A multilinear singular value decomposition, SIAM J. Matrix Anal. Appl. 10 * - De Lathauwer, De Moor, Vandewalle, 2000b: On the Best rank-1 and Rank-(R_1, R_2, ..., R_N) Approximation and Applications of Higher-Order Tensors, SIAM J. Matrix Anal. Appl. 11 * - Kolda & Bader, 2009: Tensor Decompositions and Applications, SIAM Review. 12 * 13 * see also quantized Tucker3 tensor (qtucker3_tensor.hpp) 14 */ 15 16 #ifndef __VMML__TUCKER3_TENSOR__HPP__ 17 #define __VMML__TUCKER3_TENSOR__HPP__ 18 19 #include <vmmlib/t3_hooi.hpp> 20 #include <vmmlib/t3_ihooi.hpp> 21 22 23 namespace vmml { 24 25 template< size_t R1, size_t R2, size_t R3, size_t I1, size_t I2, size_t I3, typename T_value = float, typename T_coeff = double > 26 class tucker3_tensor { 27 public: 28 typedef float T_internal; 29 30 typedef tucker3_tensor< R1, R2, R3, I1, I2, I3, T_value, T_coeff > tucker3_type; 31 typedef t3_hooi< R1, R2, R3, I1, I2, I3, T_coeff > t3_hooi_type; 32 33 typedef tensor3< I1, I2, I3, T_value > t3_type; 34 typedef tensor3< R1, R2, R3, T_coeff > t3_core_type; 35 typedef matrix< I1, R1, T_coeff > u1_type; 36 typedef matrix< I2, R2, T_coeff > u2_type; 37 typedef matrix< I3, R3, T_coeff > u3_type; 38 39 typedef tensor3< I1, I2, I3, T_internal > t3_comp_type; 40 typedef tensor3< R1, R2, R3, T_internal > t3_core_comp_type; 41 typedef matrix< I1, R1, T_internal > u1_comp_type; 42 typedef matrix< I2, R2, T_internal > u2_comp_type; 43 typedef matrix< I3, R3, T_internal > u3_comp_type; 44 45 static const size_t SIZE = R1*R2*R3 + I1*R1 + I2*R2 + I3*R3; 46 47 tucker3_tensor(); 48 tucker3_tensor(t3_core_type& core); 49 tucker3_tensor(t3_core_type& core, u1_type& U1, u2_type& U2, u3_type& U3); 50 tucker3_tensor(const t3_type& data_, u1_type& U1, u2_type& U2, u3_type& U3); 51 tucker3_tensor(const tucker3_type& other); 52 ~tucker3_tensor(); 53 set_core(t3_core_type & core)54 void set_core(t3_core_type& core) { 55 _core = t3_core_type(core); 56 _core_comp.cast_from(core); 57 }; 58 set_u1(u1_type & U1)59 void set_u1(u1_type& U1) { 60 *_u1 = U1; 61 _u1_comp->cast_from(U1); 62 }; 63 set_u2(u2_type & U2)64 void set_u2(u2_type& U2) { 65 *_u2 = U2; 66 _u2_comp->cast_from(U2); 67 }; 68 set_u3(u3_type & U3)69 void set_u3(u3_type& U3) { 70 *_u3 = U3; 71 _u3_comp->cast_from(U3); 72 }; 73 get_core() const74 t3_core_type get_core() const { 75 return _core; 76 }; //be careful: creates copy! 77 get_core(t3_core_type & data_) const78 void get_core(t3_core_type& data_) const { 79 data_ = _core; 80 }; 81 get_u1(u1_type & U1) const82 void get_u1(u1_type& U1) const { 83 U1 = *_u1; 84 }; 85 get_u2(u2_type & U2) const86 void get_u2(u2_type& U2) const { 87 U2 = *_u2; 88 }; 89 get_u3(u3_type & U3) const90 void get_u3(u3_type& U3) const { 91 U3 = *_u3; 92 }; 93 set_core_comp(t3_core_comp_type & core)94 void set_core_comp(t3_core_comp_type& core) { 95 _core_comp = t3_core_comp_type(core); 96 _core.cast_from(_core_comp); 97 }; 98 set_u1_comp(u1_comp_type & U1)99 void set_u1_comp(u1_comp_type& U1) { 100 *_u1_comp = U1; 101 _u1->cast_from(U1); 102 }; 103 set_u2_comp(u2_comp_type & U2)104 void set_u2_comp(u2_comp_type& U2) { 105 *_u2_comp = U2; 106 _u2->cast_from(U2); 107 }; 108 set_u3_comp(u3_comp_type & U3)109 void set_u3_comp(u3_comp_type& U3) { 110 *_u3_comp = U3; 111 _u3->cast_from(U3); 112 }; 113 get_core_comp(t3_core_comp_type & data_) const114 void get_core_comp(t3_core_comp_type& data_) const { 115 data_ = _core_comp; 116 }; 117 get_u1_comp(u1_comp_type & U1) const118 void get_u1_comp(u1_comp_type& U1) const { 119 U1 = *_u1_comp; 120 }; 121 get_u2_comp(u2_comp_type & U2) const122 void get_u2_comp(u2_comp_type& U2) const { 123 U2 = *_u2_comp; 124 }; 125 get_u3_comp(u3_comp_type & U3) const126 void get_u3_comp(u3_comp_type& U3) const { 127 U3 = *_u3_comp; 128 }; 129 130 //get number of nonzeros for tensor decomposition 131 size_t nnz() const; 132 size_t nnz(const T_value& threshold) const; 133 size_t nnz_core() const; 134 size_t size_core() const; 135 size() const136 size_t size() const { 137 return SIZE; 138 }; 139 140 void threshold_core(const size_t& nnz_core_, size_t& nnz_core_is_); 141 void threshold_core(const T_coeff& threshold_value_, size_t& nnz_core_); 142 143 template< size_t J1, size_t J2, size_t J3 > 144 typename enable_if< J1 <= I1 && J2 <= I2 && J3 <= I3 >::type* 145 set_sub_core(const tensor3<J1, J2, J3, T_coeff >& sub_data_, 146 size_t row_offset, size_t col_offset, size_t slice_offset); 147 148 void reconstruct(t3_type& data_); 149 double error(t3_type& original) const; 150 151 template< typename T_init> 152 tensor_stats decompose(const t3_type& data_, T_init init, const size_t max_iterations = 10, const float tolerance = 10); 153 154 template< typename T_init> 155 tensor_stats tucker_als(const t3_type& data_, T_init init, const size_t max_iterations = 10, const float tolerance = 1e-04); 156 157 template< size_t NBLOCKS, typename T_init> 158 tensor_stats i_tucker_als(const t3_type& data_, T_init init, const size_t max_iterations = 10, const float tolerance = 1e-04); 159 160 template< size_t R, size_t NBLOCKS, typename T_init> 161 tensor_stats i_cp_tucker_als(const t3_type& data_, T_init init, const size_t max_iterations = 10, const float tolerance = 1e-04); 162 163 void als_rand(const t3_type& data_); 164 //template< typename T_init> 165 // void tucker_als(const t3_type& data_, T_init init); 166 // template< size_t NBLOCKS, typename T_init > 167 // void incr_block_diag_als(const t3_type& data_, T_init init); 168 // void incr_block_diag_als( T_init init ); 169 170 171 template< size_t K1, size_t K2, size_t K3> 172 void reduce_ranks(const tucker3_tensor< K1, K2, K3, I1, I2, I3, T_value, T_coeff >& other); //call TuckerJI.reduce_ranks(TuckerKI) K1 -> R1, K2 -> R2, K3 -> R3 173 174 template< size_t K1, size_t K2, size_t K3> 175 void subsampling(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other, const size_t& factor); 176 177 template< size_t K1, size_t K2, size_t K3> 178 void subsampling_on_average(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other, const size_t& factor); 179 180 template< size_t K1, size_t K2, size_t K3> 181 void region_of_interest(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other, 182 const size_t& start_index1, const size_t& end_index1, 183 const size_t& start_index2, const size_t& end_index2, 184 const size_t& start_index3, const size_t& end_index3); 185 operator <<(std::ostream & os,const tucker3_type & t3)186 friend std::ostream& operator <<(std::ostream& os, const tucker3_type& t3) { 187 t3_core_type core; 188 t3.get_core(core); 189 u1_type* u1 = new u1_type; 190 t3.get_u1(*u1); 191 u2_type* u2 = new u2_type; 192 t3.get_u2(*u2); 193 u3_type* u3 = new u3_type; 194 t3.get_u3(*u3); 195 196 os << "U1: " << std::endl << *u1 << std::endl 197 << "U2: " << std::endl << *u2 << std::endl 198 << "U3: " << std::endl << *u3 << std::endl 199 << "core: " << std::endl << core << std::endl; 200 201 delete u1; 202 delete u2; 203 delete u3; 204 return os; 205 } 206 207 void cast_members(); 208 void cast_comp_members(); 209 210 protected: 211 operator =(const tucker3_type & other)212 tucker3_type operator=(const tucker3_type& other) { 213 return (*this); 214 }; 215 216 private: 217 //t3_core_type* _core ; 218 u1_type* _u1; 219 u2_type* _u2; 220 u3_type* _u3; 221 t3_core_type _core; 222 223 //used only internally for computations to have a higher precision 224 t3_core_comp_type _core_comp; 225 u1_comp_type* _u1_comp; 226 u2_comp_type* _u2_comp; 227 u3_comp_type* _u3_comp; 228 229 }; // class tucker3_tensor 230 231 232 233 #define VMML_TEMPLATE_STRING template< size_t R1, size_t R2, size_t R3, size_t I1, size_t I2, size_t I3, typename T_value, typename T_coeff > 234 #define VMML_TEMPLATE_CLASSNAME tucker3_tensor< R1, R2, R3, I1, I2, I3, T_value, T_coeff > 235 236 VMML_TEMPLATE_STRING tucker3_tensor()237 VMML_TEMPLATE_CLASSNAME::tucker3_tensor() { 238 _core.zero(); 239 _u1 = new u1_type(); 240 _u1->zero(); 241 _u2 = new u2_type(); 242 _u2->zero(); 243 _u3 = new u3_type(); 244 _u3->zero(); 245 _core_comp.zero(); 246 _u1_comp = new u1_comp_type(); 247 _u1_comp->zero(); 248 _u2_comp = new u2_comp_type(); 249 _u2_comp->zero(); 250 _u3_comp = new u3_comp_type(); 251 _u3_comp->zero(); 252 } 253 254 VMML_TEMPLATE_STRING tucker3_tensor(t3_core_type & core)255 VMML_TEMPLATE_CLASSNAME::tucker3_tensor(t3_core_type& core) { 256 _core = core; 257 _u1 = new u1_type(); 258 _u1->zero(); 259 _u2 = new u2_type(); 260 _u2->zero(); 261 _u3 = new u3_type(); 262 _u3->zero(); 263 _u1_comp = new u1_comp_type(); 264 _u1_comp->zero(); 265 _u2_comp = new u2_comp_type(); 266 _u2_comp->zero(); 267 _u3_comp = new u3_comp_type(); 268 _u3_comp->zero(); 269 _core_comp.cast_from(core); 270 } 271 272 VMML_TEMPLATE_STRING tucker3_tensor(t3_core_type & core,u1_type & U1,u2_type & U2,u3_type & U3)273 VMML_TEMPLATE_CLASSNAME::tucker3_tensor(t3_core_type& core, u1_type& U1, u2_type& U2, u3_type& U3) { 274 _core = core; 275 _u1 = new u1_type(U1); 276 _u2 = new u2_type(U2); 277 _u3 = new u3_type(U3); 278 _u1_comp = new u1_comp_type(); 279 _u2_comp = new u2_comp_type(); 280 _u3_comp = new u3_comp_type(); 281 cast_comp_members(); 282 } 283 284 VMML_TEMPLATE_STRING tucker3_tensor(const t3_type & data_,u1_type & U1,u2_type & U2,u3_type & U3)285 VMML_TEMPLATE_CLASSNAME::tucker3_tensor(const t3_type& data_, u1_type& U1, u2_type& U2, u3_type& U3) { 286 _u1 = new u1_type(U1); 287 _u2 = new u2_type(U2); 288 _u3 = new u3_type(U3); 289 _u1_comp = new u1_comp_type(); 290 _u2_comp = new u2_comp_type(); 291 _u3_comp = new u3_comp_type(); 292 293 t3_hooi_type::derive_core_orthogonal_bases(data_, *_u1, *_u2, *_u3, _core); 294 295 cast_comp_members(); 296 } 297 298 VMML_TEMPLATE_STRING tucker3_tensor(const tucker3_type & other)299 VMML_TEMPLATE_CLASSNAME::tucker3_tensor(const tucker3_type& other) { 300 _u1 = new u1_type(); 301 _u2 = new u2_type(); 302 _u3 = new u3_type(); 303 _u1_comp = new u1_comp_type(); 304 _u2_comp = new u2_comp_type(); 305 _u3_comp = new u3_comp_type(); 306 307 other.get_core(_core); 308 other.get_u1(*_u1); 309 other.get_u2(*_u2); 310 other.get_u3(*_u3); 311 312 cast_comp_members(); 313 } 314 315 VMML_TEMPLATE_STRING 316 void cast_members()317 VMML_TEMPLATE_CLASSNAME::cast_members() { 318 _u1->cast_from(*_u1_comp); 319 _u2->cast_from(*_u2_comp); 320 _u3->cast_from(*_u3_comp); 321 _core.cast_from(_core_comp); 322 } 323 324 VMML_TEMPLATE_STRING 325 void cast_comp_members()326 VMML_TEMPLATE_CLASSNAME::cast_comp_members() { 327 _u1_comp->cast_from(*_u1); 328 _u2_comp->cast_from(*_u2); 329 _u3_comp->cast_from(*_u3); 330 _core_comp.cast_from(_core); 331 } 332 333 VMML_TEMPLATE_STRING 334 size_t nnz_core() const335 VMML_TEMPLATE_CLASSNAME::nnz_core() const { 336 return _core_comp.nnz(); 337 } 338 339 VMML_TEMPLATE_STRING 340 size_t size_core() const341 VMML_TEMPLATE_CLASSNAME::size_core() const { 342 return _core_comp.size(); 343 } 344 345 VMML_TEMPLATE_STRING ~tucker3_tensor()346 VMML_TEMPLATE_CLASSNAME::~tucker3_tensor() { 347 delete _u1; 348 delete _u2; 349 delete _u3; 350 delete _u1_comp; 351 delete _u2_comp; 352 delete _u3_comp; 353 } 354 355 VMML_TEMPLATE_STRING 356 void reconstruct(t3_type & data_)357 VMML_TEMPLATE_CLASSNAME::reconstruct(t3_type& data_) { 358 t3_comp_type data; 359 data.cast_from(data_); 360 t3_ttm::full_tensor3_matrix_multiplication(_core_comp, *_u1_comp, *_u2_comp, *_u3_comp, data); 361 362 // TODO have a closer look at this. It's mangling the error computation (after the cast, frobenius_norm() changes substantially) 363 //convert reconstructed data, which is in type T_internal (double, float) to T_value (uint8 or uint16) 364 if ((sizeof (T_value) == 1) || (sizeof (T_value) == 2)) { 365 data_.float_t_to_uint_t(data); 366 } else { 367 data_.cast_from(data); 368 } 369 370 } 371 372 VMML_TEMPLATE_STRING 373 double error(t3_type & original) const374 VMML_TEMPLATE_CLASSNAME::error(t3_type& original) const { 375 376 t3_comp_type data; 377 t3_ttm::full_tensor3_matrix_multiplication(_core_comp, *_u1_comp, *_u2_comp, *_u3_comp, data); 378 379 double err = data.frobenius_norm(original) / original.frobenius_norm() * 100; 380 return err; 381 } 382 383 VMML_TEMPLATE_STRING 384 void threshold_core(const size_t & nnz_core_,size_t & nnz_core_is_)385 VMML_TEMPLATE_CLASSNAME::threshold_core(const size_t& nnz_core_, size_t& nnz_core_is_) { 386 nnz_core_is_ = _core_comp.nnz(); 387 T_coeff threshold_value = 0.00001; 388 while (nnz_core_is_ > nnz_core_) { 389 _core_comp.threshold(threshold_value); 390 nnz_core_is_ = _core_comp.nnz(); 391 392 //threshold value scheme 393 if (threshold_value < 0.01) { 394 threshold_value *= 10; 395 } else if (threshold_value < 0.2) { 396 threshold_value += 0.05; 397 } else if (threshold_value < 1) { 398 threshold_value += 0.25; 399 } else if (threshold_value < 10) { 400 threshold_value += 1; 401 } else if (threshold_value < 50) { 402 threshold_value += 10; 403 } else if (threshold_value < 200) { 404 threshold_value += 50; 405 } else if (threshold_value < 500) { 406 threshold_value += 100; 407 } else if (threshold_value < 2000) { 408 threshold_value += 500; 409 } else if (threshold_value < 5000) { 410 threshold_value += 3000; 411 } else if (threshold_value >= 5000) { 412 threshold_value += 5000; 413 } 414 } 415 _core.cast_from(_core_comp); 416 } 417 418 VMML_TEMPLATE_STRING 419 void threshold_core(const T_coeff & threshold_value_,size_t & nnz_core_)420 VMML_TEMPLATE_CLASSNAME::threshold_core(const T_coeff& threshold_value_, size_t& nnz_core_) { 421 _core_comp.threshold(threshold_value_); 422 nnz_core_ = _core_comp.nnz(); 423 _core.cast_from(_core_comp); 424 } 425 426 VMML_TEMPLATE_STRING 427 template< size_t J1, size_t J2, size_t J3 > 428 typename enable_if< J1 <= I1 && J2 <= I2 && J3 <= I3 >::type* 429 VMML_TEMPLATE_CLASSNAME:: set_sub_core(const tensor3<J1,J2,J3,T_coeff> & sub_data_,size_t row_offset,size_t col_offset,size_t slice_offset)430 set_sub_core(const tensor3<J1, J2, J3, T_coeff >& sub_data_, 431 size_t row_offset, size_t col_offset, size_t slice_offset) { 432 _core_comp.set_sub_tensor3(sub_data_, row_offset, col_offset, slice_offset); 433 _core.cast_from(_core_comp); 434 } 435 436 VMML_TEMPLATE_STRING 437 void als_rand(const t3_type & data_)438 VMML_TEMPLATE_CLASSNAME::als_rand(const t3_type& data_) { 439 typedef t3_hooi< R1, R2, R3, I1, I2, I3, T_internal > hooi_type; 440 tucker_als(data_, typename hooi_type::init_random()); 441 } 442 443 444 VMML_TEMPLATE_STRING 445 template< typename T_init> 446 tensor_stats decompose(const t3_type & data_,T_init init,const size_t max_iterations,const float tolerance)447 VMML_TEMPLATE_CLASSNAME::decompose(const t3_type& data_, T_init init, const size_t max_iterations, const float tolerance) { 448 return tucker_als(data_, init, max_iterations, tolerance); 449 } 450 451 VMML_TEMPLATE_STRING 452 template< typename T_init> 453 tensor_stats tucker_als(const t3_type & data_,T_init init,const size_t max_iterations,const float tolerance)454 VMML_TEMPLATE_CLASSNAME::tucker_als(const t3_type& data_, T_init init, const size_t max_iterations, const float tolerance) { 455 tensor_stats result; 456 457 t3_comp_type data; 458 data.cast_from(data_); 459 460 typedef t3_hooi< R1, R2, R3, I1, I2, I3, T_internal > hooi_type; 461 result += hooi_type::als(data, *_u1_comp, *_u2_comp, *_u3_comp, _core_comp, init, 0, max_iterations, tolerance); 462 463 cast_members(); 464 465 return result; 466 } 467 468 VMML_TEMPLATE_STRING 469 template< size_t NBLOCKS, typename T_init> 470 tensor_stats i_tucker_als(const t3_type & data_,T_init init,const size_t max_iterations,const float tolerance)471 VMML_TEMPLATE_CLASSNAME::i_tucker_als(const t3_type& data_, T_init init, const size_t max_iterations, const float tolerance) { 472 tensor_stats result; 473 474 t3_comp_type data; 475 data.cast_from(data_); 476 477 typedef t3_ihooi< R1, R2, R3, NBLOCKS, I1, I2, I3, T_internal > ihooi_type; 478 result += ihooi_type::i_als(data, *_u1_comp, *_u2_comp, *_u3_comp, _core_comp, init, 0, max_iterations, tolerance); 479 480 cast_members(); 481 482 return result; 483 } 484 485 VMML_TEMPLATE_STRING 486 template< size_t R, size_t NBLOCKS, typename T_init> 487 tensor_stats i_cp_tucker_als(const t3_type & data_,T_init init,const size_t max_iterations,const float tolerance)488 VMML_TEMPLATE_CLASSNAME::i_cp_tucker_als(const t3_type& data_, T_init init, const size_t max_iterations, const float tolerance) { 489 tensor_stats result; 490 491 t3_comp_type data; 492 data.cast_from(data_); 493 494 typedef t3_ihooi< R1, R2, R3, NBLOCKS, I1, I2, I3, T_internal > ihooi_type; 495 result += ihooi_type::template i_cp_als < R > (data, *_u1_comp, *_u2_comp, *_u3_comp, _core_comp, init, 0, max_iterations, tolerance); 496 497 cast_members(); 498 499 return result; 500 } 501 502 VMML_TEMPLATE_STRING 503 template< size_t K1, size_t K2, size_t K3> 504 void reduce_ranks(const tucker3_tensor<K1,K2,K3,I1,I2,I3,T_value,T_coeff> & other)505 VMML_TEMPLATE_CLASSNAME::reduce_ranks(const tucker3_tensor< K1, K2, K3, I1, I2, I3, T_value, T_coeff >& other) 506 //TuckerJI.rank_reduction(TuckerKI) K1 -> R1, K2 -> R2, K3 -> R3; I1, I2, I3 stay the same 507 { 508 assert(R1 <= K1); 509 assert(R2 <= K2); 510 assert(R3 <= K3); 511 512 //reduce basis matrices 513 matrix< I1, K1, T_coeff >* u1 = new matrix< I1, K1, T_coeff > (); 514 other.get_u1(*u1); 515 for (size_t r1 = 0; r1 < R1; ++r1) { 516 _u1->set_column(r1, u1->get_column(r1)); 517 } 518 519 matrix< I2, K2, T_coeff >* u2 = new matrix< I2, K2, T_coeff > (); 520 other.get_u2(*u2); 521 for (size_t r2 = 0; r2 < R2; ++r2) { 522 _u2->set_column(r2, u2->get_column(r2)); 523 } 524 525 matrix< I3, K3, T_coeff >* u3 = new matrix< I3, K3, T_coeff > (); 526 other.get_u3(*u3); 527 for (size_t r3 = 0; r3 < R3; ++r3) { 528 _u3->set_column(r3, u3->get_column(r3)); 529 } 530 531 //reduce core 532 tensor3<K1, K2, K3, T_coeff > other_core; 533 other.get_core(other_core); 534 535 for (size_t r3 = 0; r3 < R3; ++r3) { 536 for (size_t r1 = 0; r1 < R1; ++r1) { 537 for (size_t r2 = 0; r2 < R2; ++r2) { 538 _core.at(r1, r2, r3) = other_core.at(r1, r2, r3); 539 } 540 } 541 } 542 543 544 cast_comp_members(); 545 546 delete u1; 547 delete u2; 548 delete u3; 549 } 550 551 VMML_TEMPLATE_STRING 552 template< size_t K1, size_t K2, size_t K3> 553 void subsampling(const tucker3_tensor<R1,R2,R3,K1,K2,K3,T_value,T_coeff> & other,const size_t & factor)554 VMML_TEMPLATE_CLASSNAME::subsampling(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other, const size_t& factor) { 555 assert(I1 <= K1); 556 assert(I1 <= K2); 557 assert(I1 <= K3); 558 559 //subsample basis matrices 560 matrix< K1, R1, T_coeff >* u1 = new matrix< K1, R1, T_coeff > (); 561 other.get_u1(*u1); 562 for (size_t i1 = 0, i = 0; i1 < K1; i1 += factor, ++i) { 563 _u1->set_row(i, u1->get_row(i1)); 564 } 565 566 matrix< K2, R2, T_coeff >* u2 = new matrix< K2, R2, T_coeff > (); 567 other.get_u2(*u2); 568 for (size_t i2 = 0, i = 0; i2 < K2; i2 += factor, ++i) { 569 _u2->set_row(i, u2->get_row(i2)); 570 } 571 572 matrix< K3, R3, T_coeff >* u3 = new matrix< K3, R3, T_coeff > (); 573 other.get_u3(*u3); 574 for (size_t i3 = 0, i = 0; i3 < K3; i3 += factor, ++i) { 575 _u3->set_row(i, u3->get_row(i3)); 576 } 577 578 other.get_core(_core); 579 580 cast_comp_members(); 581 delete u1; 582 delete u2; 583 delete u3; 584 } 585 586 VMML_TEMPLATE_STRING 587 template< size_t K1, size_t K2, size_t K3> 588 void subsampling_on_average(const tucker3_tensor<R1,R2,R3,K1,K2,K3,T_value,T_coeff> & other,const size_t & factor)589 VMML_TEMPLATE_CLASSNAME::subsampling_on_average(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other, const size_t& factor) { 590 assert(I1 <= K1); 591 assert(I1 <= K2); 592 assert(I1 <= K3); 593 594 595 //subsample basis matrices 596 matrix< K1, R1, T_coeff >* u1 = new matrix< K1, R1, T_coeff > (); 597 other.get_u1(*u1); 598 for (size_t i1 = 0, i = 0; i1 < K1; i1 += factor, ++i) { 599 vector< R1, T_internal > tmp_row = u1->get_row(i1); 600 T_internal num_items_averaged = 1; 601 for (size_t j = i1 + 1; (j < (factor + i1)) & (j < K1); ++j, ++num_items_averaged) 602 tmp_row += u1->get_row(j); 603 604 tmp_row /= num_items_averaged; 605 _u1->set_row(i, tmp_row); 606 } 607 608 matrix< K2, R2, T_coeff >* u2 = new matrix< K2, R2, T_coeff > (); 609 other.get_u2(*u2); 610 for (size_t i2 = 0, i = 0; i2 < K2; i2 += factor, ++i) { 611 vector< R2, T_internal > tmp_row = u2->get_row(i2); 612 T_internal num_items_averaged = 1; 613 for (size_t j = i2 + 1; (j < (factor + i2)) & (j < K2); ++j, ++num_items_averaged) 614 tmp_row += u2->get_row(j); 615 616 tmp_row /= num_items_averaged; 617 _u2->set_row(i, u2->get_row(i2)); 618 } 619 620 matrix< K3, R3, T_coeff >* u3 = new matrix< K3, R3, T_coeff > (); 621 other.get_u3(*u3); 622 for (size_t i3 = 0, i = 0; i3 < K3; i3 += factor, ++i) { 623 vector< R3, T_internal > tmp_row = u3->get_row(i3); 624 T_internal num_items_averaged = 1; 625 for (size_t j = i3 + 1; (j < (factor + i3)) & (j < K3); ++j, ++num_items_averaged) 626 tmp_row += u3->get_row(j); 627 628 tmp_row /= num_items_averaged; 629 _u3->set_row(i, u3->get_row(i3)); 630 } 631 632 other.get_core(_core); 633 cast_comp_members(); 634 delete u1; 635 delete u2; 636 delete u3; 637 } 638 639 VMML_TEMPLATE_STRING 640 template< size_t K1, size_t K2, size_t K3> 641 void region_of_interest(const tucker3_tensor<R1,R2,R3,K1,K2,K3,T_value,T_coeff> & other,const size_t & start_index1,const size_t & end_index1,const size_t & start_index2,const size_t & end_index2,const size_t & start_index3,const size_t & end_index3)642 VMML_TEMPLATE_CLASSNAME::region_of_interest(const tucker3_tensor< R1, R2, R3, K1, K2, K3, T_value, T_coeff >& other, 643 const size_t& start_index1, const size_t& end_index1, 644 const size_t& start_index2, const size_t& end_index2, 645 const size_t& start_index3, const size_t& end_index3) { 646 assert(I1 <= K1); 647 assert(I1 <= K2); 648 assert(I1 <= K3); 649 assert(start_index1 < end_index1); 650 assert(start_index2 < end_index2); 651 assert(start_index3 < end_index3); 652 assert(end_index1 < K1); 653 assert(end_index2 < K2); 654 assert(end_index3 < K3); 655 656 //region_of_interes of basis matrices 657 matrix< K1, R1, T_internal >* u1 = new matrix< K1, R1, T_internal > (); 658 other.get_u1_comp(*u1); 659 for (size_t i1 = start_index1, i = 0; i1 < end_index1; ++i1, ++i) { 660 _u1_comp->set_row(i, u1->get_row(i1)); 661 } 662 663 matrix< K2, R2, T_internal>* u2 = new matrix< K2, R2, T_internal > (); 664 other.get_u2_comp(*u2); 665 for (size_t i2 = start_index2, i = 0; i2 < end_index2; ++i2, ++i) { 666 _u2_comp->set_row(i, u2->get_row(i2)); 667 } 668 669 matrix< K3, R3, T_internal >* u3 = new matrix< K3, R3, T_internal > (); 670 other.get_u3_comp(*u3); 671 for (size_t i3 = start_index3, i = 0; i3 < end_index3; ++i3, ++i) { 672 _u3_comp->set_row(i, u3->get_row(i3)); 673 } 674 675 other.get_core_comp(_core_comp); 676 677 //cast_comp_members(); 678 delete u1; 679 delete u2; 680 delete u3; 681 } 682 683 VMML_TEMPLATE_STRING 684 size_t nnz() const685 VMML_TEMPLATE_CLASSNAME::nnz() const { 686 size_t counter = 0; 687 688 counter += _u1_comp->nnz(); 689 counter += _u2_comp->nnz(); 690 counter += _u3_comp->nnz(); 691 counter += _core_comp.nnz(); 692 693 return counter; 694 } 695 696 VMML_TEMPLATE_STRING 697 size_t nnz(const T_value & threshold) const698 VMML_TEMPLATE_CLASSNAME::nnz(const T_value& threshold) const { 699 size_t counter = 0; 700 701 counter += _u1_comp->nnz(threshold); 702 counter += _u2_comp->nnz(threshold); 703 counter += _u3_comp->nnz(threshold); 704 counter += _core_comp.nnz(threshold); 705 706 return counter; 707 } 708 709 710 #undef VMML_TEMPLATE_STRING 711 #undef VMML_TEMPLATE_CLASSNAME 712 713 } // namespace vmml 714 715 #endif 716 717