1 /*
2 This file is part of MADNESS.
3
4 Copyright (C) 2007,2010 Oak Ridge National Laboratory
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
20 For more information please contact:
21
22 Robert J. Harrison
23 Oak Ridge National Laboratory
24 One Bethel Valley Road
25 P.O. Box 2008, MS-6367
26
27 email: harrisonrj@ornl.gov
28 tel: 865-241-3937
29 fax: 865-572-0680
30
31 $Id$
32 */
33
34 #ifndef MADNESS_TENSOR_LOWRANKTENSOR_H_
35 #define MADNESS_TENSOR_LOWRANKTENSOR_H_
36
37 #include <memory>
38 #include <vector>
39
40 #include "../world/madness_exception.h"
41 #include "../world/print.h"
42 #include "gentensor.h"
43 #include "slice.h"
44 #include "srconf.h"
45 #include "tensor.h"
46 #include "tensortrain.h"
47 #include "type_data.h"
48
49 namespace madness {
50
51 template<typename T>
52 class SVDTensor : public SRConf<T> {
53 public:
54 using SRConf<T>::SRConf;
55
SVDTensor()56 explicit SVDTensor() : SRConf<T>() { }
SVDTensor(const Tensor<T> & rhs)57 explicit SVDTensor(const Tensor<T>& rhs) : SRConf<T>(rhs) { }
SVDTensor(const SVDTensor<T> & rhs)58 explicit SVDTensor(const SVDTensor<T>& rhs) : SRConf<T>(static_cast<SRConf<T> >(rhs)) { }
SVDTensor(const SRConf<T> & rhs)59 explicit SVDTensor(const SRConf<T>& rhs) : SRConf<T>(SRConf<T>(rhs)) { }
SVDTensor(const std::vector<long> & dims)60 explicit SVDTensor(const std::vector<long>& dims) : SRConf<T>(dims.size(),dims[0],TT_2D) { }
SVDTensor(const Tensor<double> & weights,const Tensor<T> & vector1,const Tensor<T> & vector2,const unsigned int & dim,const unsigned int maxk)61 explicit SVDTensor(const Tensor<double>& weights, const Tensor<T>& vector1,
62 const Tensor<T>& vector2, const unsigned int& dim,
63 const unsigned int maxk) : SRConf<T>(weights,
64 vector1, vector2, dim, maxk ) {}
65
66 SVDTensor& operator=(const T& number) {
67 SRConf<T>& base=*this;
68 base=number;
69 return *this;
70 }
71 };
72
73 // forward declaration
74 template <class T> class SliceLowRankTensor;
75
76
77 template<typename T>
78 class LowRankTensor {
79
80 public:
81
82 /// C++ typename of the real type associated with a complex type.
83 typedef typename TensorTypeData<T>::scalar_type scalar_type;
84
85 /// C++ typename of the floating point type associated with scalar real type
86 typedef typename TensorTypeData<T>::float_scalar_type float_scalar_type;
87
88 struct implT {
89 std::shared_ptr<Tensor<T> > full;
90 std::shared_ptr<TensorTrain<T> > tt;
91 std::shared_ptr<SVDTensor<T> > svd;
92
93 /// ctor resets all shared_ptr
implTimplT94 implT() : full(), tt(), svd() {}
95
96 /// make sure only a single pointer is set
check_uniqueimplT97 void check_unique() const {
98 MADNESS_ASSERT(full.get() xor tt.get() xor svd.get());
99 }
100 };
101
102 TensorType type;
103 implT impl;
104
105 /// empty ctor
LowRankTensor()106 LowRankTensor() : type(TT_NONE) {}
107
108 /// copy ctor, shallow
LowRankTensor(const LowRankTensor<T> & other)109 LowRankTensor(const LowRankTensor<T>& other) : type(other.type),
110 impl(other.impl) {
111 }
112
113 /// ctor with dimensions; constructs tensor filled with zeros
LowRankTensor(const std::vector<long> & dim,const TensorType & tt)114 LowRankTensor(const std::vector<long>& dim, const TensorType& tt)
115 : type(tt) {
116
117 if (type==TT_FULL) impl.full=std::shared_ptr<Tensor<T> >(new Tensor<T>(dim));
118 if (type==TT_2D) impl.svd=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(dim));
119 if (type==TT_TENSORTRAIN) impl.tt=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(dim));
120 }
121
122 /// ctor with dimensions; constructs tensor filled with zeros
LowRankTensor(const std::vector<long> & dim,const TensorArgs & targs)123 LowRankTensor(const std::vector<long>& dim, const TensorArgs& targs) :
124 LowRankTensor(dim, targs.tt) {
125 }
126
127 /// ctor with dimensions; all dims have the same value k
LowRankTensor(const TensorType & tt,const long k,const long ndim)128 LowRankTensor(const TensorType& tt, const long k, const long ndim) :
129 LowRankTensor(std::vector<long>(ndim,k), tt) {
130 }
131
132 /// ctor with a regular Tensor and arguments, deep
LowRankTensor(const Tensor<T> & rhs,const double & thresh,const TensorType & tt)133 LowRankTensor(const Tensor<T>& rhs, const double& thresh, const TensorType& tt) :
134 LowRankTensor(rhs,TensorArgs(thresh,tt)) {
135 }
136
137 /// ctor with a regular Tensor and arguments, deep
LowRankTensor(const Tensor<T> & rhs,const TensorArgs & targs)138 LowRankTensor(const Tensor<T>& rhs, const TensorArgs& targs)
139 : type(targs.tt) {
140
141 if (not rhs.has_data()) {
142 type=TT_NONE;
143
144 } else {
145
146 // deep copy, either explicitly (full) or implicitly (SVD, TensorTrain)
147 if (type==TT_FULL) {
148 impl.full=std::shared_ptr<Tensor<T> >(new Tensor<T>(copy(rhs)));
149
150 } else if ((type==TT_2D) or (type==TT_TENSORTRAIN)) {
151
152 // construct tensortrain first, convert into SVD format afterwards
153 std::shared_ptr<TensorTrain<T> > tt(new TensorTrain<T>(rhs,targs.thresh*facReduce()));
154
155 if (type==TT_2D) {
156 Tensor<T> U,VT;
157 Tensor< typename Tensor<T>::scalar_type > s;
158 tt->two_mode_representation(U,VT,s);
159 const long r=VT.dim(0);
160 const long nd=VT.ndim();
161 MADNESS_ASSERT(U.dim(nd-1)==r);
162
163 // empty SVDTensor due to zero rank
164 if (r==0) {
165 impl.svd.reset(new SVDTensor<T>(tt->dims()));
166
167 } else {
168 Tensor<T> UU=U.reshape(U.size()/r,r);
169 SVDTensor<T> sr(s, copy(transpose(UU)), VT.reshape(r,VT.size()/r),
170 rhs.ndim(), rhs.dim(0));
171 sr.normalize();
172 impl.svd.reset(new SVDTensor<T>(sr));
173 }
174 } else if (type==TT_TENSORTRAIN) {
175 impl.tt=tt;
176 }
177 }
178 }
179 }
180
181 /// ctor with a regular Tensor, deep
LowRankTensor(const Tensor<T> & other)182 LowRankTensor(const Tensor<T>& other) : type(TT_FULL), impl() {
183 impl.full=std::shared_ptr<Tensor<T> >(new Tensor<T>(copy(other)));
184 }
185
186 /// ctor with a TensorTrain as argument, shallow
LowRankTensor(const TensorTrain<T> & tt)187 LowRankTensor(const TensorTrain<T>& tt) : type(TT_TENSORTRAIN), impl() {
188 impl.tt=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(tt));
189 }
190
191 /// ctor with a SVDTensor as argument, shallow
LowRankTensor(const SVDTensor<T> & t)192 LowRankTensor(const SVDTensor<T>& t) : type(TT_2D), impl() {
193 impl.svd=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(t));
194 }
195
196 /// ctor with a SliceLowRankTensor as argument, deep
LowRankTensor(const SliceLowRankTensor<T> & other)197 LowRankTensor(const SliceLowRankTensor<T>& other) : type(other.lrt.type), impl() {
198 *this=other;
199 }
200
201 /// shallow assignment operator
202 LowRankTensor& operator=(const LowRankTensor<T>& other) {
203 if (this!=&other) {
204 type=other.type;
205 impl=other.impl;
206 }
207 return *this;
208 }
209
210 /// deep assignment with slices: g0 = g1(s)
211 LowRankTensor& operator=(const SliceLowRankTensor<T>& other) {
212 type=other.lrt.type;
213 const std::vector<Slice>& s=other.thisslice;
214 if (this->type==TT_FULL)
215 impl.full.reset(new Tensor<T>(copy((*other.lrt.impl.full)(s))));
216 else if (this->type==TT_2D)
217 impl.svd.reset(new SVDTensor<T>(other.lrt.impl.svd->copy_slice(s)));
218 else if (this->type==TT_TENSORTRAIN)
219 impl.tt.reset(new TensorTrain<T>(copy(*(other.lrt.impl.tt),s)));
220 else {
221 MADNESS_EXCEPTION("you should not be here",1);
222 }
223 return *this;
224 }
225
226 /// Type conversion makes a deep copy
227 template <class Q> operator LowRankTensor<Q>() const { // type conv => deep copy
228
229 LowRankTensor<Q> result;
230 if (this->type==TT_FULL) {
231 result.impl.full.reset(new Tensor<Q>(*impl.full));
232 } else if (this->type==TT_2D) {
233 MADNESS_EXCEPTION("no type conversion for TT_2D yes=t",1);
234 } else if (this->type==TT_TENSORTRAIN) {
235 result.impl.tt.reset(new TensorTrain<Q>(*impl.tt));
236 } else {
237 MADNESS_EXCEPTION("you should not be here",1);
238 }
239 return result;
240 }
241
242
243 /// general slicing, shallow; for temporary use only!
operator()244 SliceLowRankTensor<T> operator()(const std::vector<Slice>& s) {
245 return SliceLowRankTensor<T>(*this,s);
246 }
247
248 /// general slicing, shallow; for temporary use only!
operator()249 const SliceLowRankTensor<T> operator()(const std::vector<Slice>& s) const {
250 return SliceLowRankTensor<T>(*this,s);
251 }
252
253
254
255 // /// shallow assignment operator
256 // LowRankTensor& operator=(const double& fac) {
257 // MADNESS_EXCEPTION("no assignment of numbers in LowRankTensor",1);
258 // return *this;
259 // }
260
261 /// deep copy
copy(const LowRankTensor & other)262 friend LowRankTensor copy(const LowRankTensor& other) {
263 LowRankTensor result;
264 result.type=other.type;
265 if (other.type==TT_FULL)
266 result.impl.full=std::shared_ptr<Tensor<T> >(new Tensor<T>(copy(*other.impl.full)));
267 if (other.type==TT_2D)
268 result.impl.svd=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(copy(*other.impl.svd)));
269 if (other.type==TT_TENSORTRAIN)
270 result.impl.tt=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(copy(*other.impl.tt)));
271 return result;
272 }
273
274 /// return the tensor type
tensor_type()275 TensorType tensor_type() const {return type;}
276
277 /// convert this to a new LowRankTensor of given tensor type
convert(const TensorArgs & targs)278 LowRankTensor convert(const TensorArgs& targs) const {
279
280 // fast return if old and new type are identical
281 if (this->tensor_type()==targs.tt) return copy(*this);
282
283 LowRankTensor result;
284
285 if (tensor_type()==TT_FULL) {
286 // TT_FULL -> TT_SVD
287 // TT_FULL -> TT_TENSORTRAIN
288 result=LowRankTensor(*impl.full,targs);
289
290 } else if (tensor_type()==TT_2D) {
291
292 // TT_SVD -> TT_FULL
293 if (targs.tt==TT_FULL) result=LowRankTensor(this->reconstruct_tensor(),targs);
294
295 // TT_SVD -> TT_TENSORTRAIN
296 else if (targs.tt==TT_TENSORTRAIN) {
297
298 // extract core tensors from SVD representation
299 Tensor< typename Tensor<T>::scalar_type >& s=impl.svd->weights_;
300 const Tensor<T>& U=impl.svd->ref_vector(0); // (r,k,k,..,k)
301 const Tensor<T>& VT=impl.svd->ref_vector(1); // (r,k,k,..,k)
302
303 const long rank=s.size();
304
305 if (rank==0) {
306 std::vector<long> dims(this->ndim());
307 for (int i=0; i<dims.size(); ++i) dims[i]=this->dim(i);
308 result=LowRankTensor(dims,targs);
309 } else {
310
311 // convolve singular values into U/ core[0]
312 std::vector<Tensor<T> > core(2);
313 core[0]=transpose(U.reshape(rank,U.size()/rank)); // (k^n,r)
314 for (int j=0; j<core[0].dim(0); ++j) core[0](j,_).emul(s);
315
316 core[1]=VT.reshape(rank,VT.size()/rank); // (r,k^m)
317
318 // construct TensorTrain with 2 dimensions only
319 result=LowRankTensor(core);
320
321 // set correct dimensions
322 const long k=impl.svd->get_k();
323 for (long d=0; d<impl.svd->dim(); ++d) {
324 if (result.dim(d)==k) continue;
325
326 const long k1=k;
327 const long k2=result.dim(d)/k1;
328 result=result.impl.tt->splitdim(d,k1,k2,targs.thresh*facReduce());
329 }
330 }
331 } else {
332 MADNESS_EXCEPTION("confused tensor types in convert TT_SVD -> ?",1);
333 }
334
335 } else if (tensor_type()==TT_TENSORTRAIN) {
336
337 if (targs.tt==TT_FULL) {
338
339 // TT_TENSORTRAIN -> TT_FULL
340 result=LowRankTensor(this->reconstruct_tensor(),targs);
341
342 } else {
343
344 // TT_TENSORTRAIN -> TT_SVD
345 Tensor<T> U,VT;
346 Tensor< typename Tensor<T>::scalar_type > s;
347 impl.tt->two_mode_representation(U, VT, s);
348 long rank=s.size();
349 if (rank>0) {
350 long n=1,m=1;
351 for (int i=0; i<U.ndim()-1; ++i) n*=U.dim(i);
352 for (int i=1; i<VT.ndim(); ++i) m*=VT.dim(i);
353 MADNESS_ASSERT(rank*n==U.size());
354 MADNESS_ASSERT(rank*m==VT.size());
355 U=copy(transpose(U.reshape(n,rank))); // make it contiguous
356 VT=VT.reshape(rank,m);
357 SVDTensor<T> svdtensor(s, U, VT, ndim(), dim(0));
358 result=LowRankTensor<T>(svdtensor);
359 } else {
360 SVDTensor<T> svdtensor(ndim(), dim(0), TT_2D);
361 result=LowRankTensor<T>(svdtensor);
362 }
363 }
364
365 } else {
366 MADNESS_EXCEPTION("unknown tensor type in LowRankTensor::convert",1);
367 }
368
369 return result;
370 }
371
ndim()372 long ndim() const {
373 if (type==TT_NONE) return -1;
374 else if (type==TT_FULL) return impl.full->ndim();
375 else if (type==TT_2D) return impl.svd->dim();
376 else if (type==TT_TENSORTRAIN) return impl.tt->ndim();
377 else {
378 MADNESS_EXCEPTION("you should not be here",1);
379 }
380 return -1;
381 }
382
383 /// return the number of entries in dimension i
dim(const int i)384 long dim(const int i) const {
385 if (type==TT_NONE) return -1;
386 else if (type==TT_FULL) return impl.full->dim(i);
387 else if (type==TT_2D) return impl.svd->get_k();
388 else if (type==TT_TENSORTRAIN) return impl.tt->dim(i);
389 else {
390 MADNESS_EXCEPTION("you should not be here",1);
391 }
392 return -1;
393 }
394
395
normalize()396 void normalize() {
397 if (type==TT_2D) impl.svd->normalize();
398 }
399
normf()400 float_scalar_type normf() const {
401 if (type==TT_NONE) return 0.0;
402 else if (type==TT_FULL) return impl.full->normf();
403 else if (type==TT_2D) return impl.svd->normf();
404 else if (type==TT_TENSORTRAIN) return impl.tt->normf();
405 else {
406 MADNESS_EXCEPTION("you should not be here",1);
407 }
408 return 0;
409 }
410
svd_normf()411 float_scalar_type svd_normf() const {
412 if (type==TT_FULL) return impl.full->normf();
413 else if (type==TT_2D) return impl.svd->svd_normf();
414 else if (type==TT_TENSORTRAIN) return impl.tt->normf();
415 else {
416 MADNESS_EXCEPTION("you should not be here",1);
417 }
418 return 0;
419 }
420
421
422 /// Inplace multiplication by scalar of supported type (legacy name)
423
424 /// @param[in] x Scalar value
425 /// @return %Reference to this tensor
426 template <typename Q>
427 typename IsSupported<TensorTypeData<Q>,LowRankTensor<T>&>::type
scale(Q fac)428 scale(Q fac) {
429 if (type==TT_NONE) return *this;
430 else if (type==TT_FULL) impl.full->scale(T(fac));
431 else if (type==TT_2D) impl.svd->scale(T(fac));
432 else if (type==TT_TENSORTRAIN) impl.tt->scale(T(fac));
433 else {
434 MADNESS_EXCEPTION("you should not be here",1);
435 }
436 return *this;
437 }
438
full_tensor_copy()439 Tensor<T> full_tensor_copy() const {
440 if (type==TT_NONE) return Tensor<T>();
441 else if (type==TT_FULL) return copy(*impl.full);
442 else if (type==TT_2D) return impl.svd->reconstruct();
443 else if (type==TT_TENSORTRAIN) return impl.tt->reconstruct();
444 else {
445 MADNESS_EXCEPTION("you should not be here",1);
446 }
447 return Tensor<T>();
448 }
449
full_tensor()450 const Tensor<T>& full_tensor() const {
451 MADNESS_ASSERT(type==TT_FULL);
452 return *impl.full;
453 }
454
full_tensor()455 Tensor<T>& full_tensor() {
456 MADNESS_ASSERT(type==TT_FULL);
457 return *impl.full;
458 }
459
460
461 /// reconstruct this to return a full tensor
reconstruct_tensor()462 Tensor<T> reconstruct_tensor() const {
463
464 if (type==TT_FULL) return full_tensor();
465 else if (type==TT_2D or type==TT_TENSORTRAIN) return full_tensor_copy();
466 else {
467 MADNESS_EXCEPTION("you should not be here",1);
468 }
469 return Tensor<T>();
470 }
471
472
facReduce()473 static double facReduce() {return 1.e-3;}
fac_reduce()474 static double fac_reduce() {return 1.e-3;}
475
rank()476 long rank() const {
477 if (type==TT_NONE) return 0;
478 else if (type==TT_FULL) return -1;
479 else if (type==TT_2D) return impl.svd->rank();
480 else if (type==TT_TENSORTRAIN) {
481 std::vector<long> r=impl.tt->ranks();
482 return *(std::max_element(r.begin(), r.end()));
483 }
484 else {
485 MADNESS_EXCEPTION("you should not be here",1);
486 }
487 return 0l;
488 }
489
has_data()490 bool has_data() const {return (type != TT_NONE);}
491
has_no_data()492 bool has_no_data() const {return (not has_data());}
493
size()494 long size() const {
495 if (type==TT_NONE) return 0l;
496 else if (type==TT_FULL) return impl.full->size();
497 else if (type==TT_2D) return impl.svd->nCoeff();
498 else if (type==TT_TENSORTRAIN) return impl.tt->size();
499 else {
500 MADNESS_EXCEPTION("you should not be here",1);
501 }
502 return false;
503 }
504
real_size()505 long real_size() const {
506 if (type==TT_NONE) return 0l;
507 else if (type==TT_FULL) return impl.full->size();
508 else if (type==TT_2D) return impl.svd->real_size();
509 else if (type==TT_TENSORTRAIN) return impl.tt->real_size();
510 else {
511 MADNESS_EXCEPTION("you should not be here",1);
512 }
513 return false;
514 }
515
516 /// returns the trace of <this|rhs>
517 template<typename Q>
TENSOR_RESULT_TYPE(T,Q)518 TENSOR_RESULT_TYPE(T,Q) trace_conj(const LowRankTensor<Q>& rhs) const {
519
520 if (TensorTypeData<T>::iscomplex) MADNESS_EXCEPTION("no complex trace in LowRankTensor, sorry",1);
521 if (TensorTypeData<Q>::iscomplex) MADNESS_EXCEPTION("no complex trace in LowRankTensor, sorry",1);
522
523 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
524 // fast return if possible
525 if ((this->rank()==0) or (rhs.rank()==0)) return resultT(0.0);
526
527 MADNESS_ASSERT(this->type==rhs.type);
528
529 if (type==TT_NONE) return resultT(0.0);
530 else if (type==TT_FULL) return impl.full->trace_conj(*(rhs.impl.full));
531 else if (type==TT_2D) return overlap(*impl.svd,*(rhs.impl.svd));
532 else if (type==TT_TENSORTRAIN) return impl.tt->trace(*rhs.impl.tt);
533 else {
534 MADNESS_EXCEPTION("you should not be here",1);
535 }
536 return TENSOR_RESULT_TYPE(T,Q)(0);
537 }
538
539 /// multiply with a number
540 template<typename Q>
541 LowRankTensor<TENSOR_RESULT_TYPE(T,Q)> operator*(const Q& x) const {
542 LowRankTensor<TENSOR_RESULT_TYPE(T,Q)> result(copy(*this));
543 result.scale(x);
544 return result;
545 }
546
547 LowRankTensor& operator+=(const LowRankTensor& other) {
548 gaxpy(1.0,other,1.0);
549 return *this;
550 }
551
552 LowRankTensor& operator-=(const LowRankTensor& other) {
553 gaxpy(1.0,other,-1.0);
554 return *this;
555 }
556
gaxpy(const T alpha,const LowRankTensor & other,const T beta)557 LowRankTensor& gaxpy(const T alpha, const LowRankTensor& other, const T beta) {
558
559 if (this->type != TT_NONE) MADNESS_ASSERT(this->type==other.type);
560
561 // fast return if possible
562 if (this->type==TT_NONE) {
563 *this=other*beta;
564 } else if (type==TT_FULL) {
565 impl.full->gaxpy(alpha,*other.impl.full,beta);
566 } else if (type==TT_2D) {
567 if (not (alpha==1.0)) this->scale(alpha);
568 impl.svd->append(*other.impl.svd,beta);
569 } else if (type==TT_TENSORTRAIN) {
570 impl.tt->gaxpy(alpha,*other.impl.tt,beta);
571 } else {
572 MADNESS_EXCEPTION("you should not be here",1);
573 }
574 return *this;
575 }
576
577 /// assign a number to this tensor
578 LowRankTensor& operator=(const T& number) {
579 if (type==TT_FULL) {
580 *impl.full=number;
581 } else if (type==TT_2D) {
582 *impl.svd=number;
583 } else if (type==TT_TENSORTRAIN) {
584 *impl.tt=number;
585 } else {
586 MADNESS_EXCEPTION("you should not be here",1);
587 }
588 return *this;
589
590 }
591
add_SVD(const LowRankTensor & other,const double & thresh)592 void add_SVD(const LowRankTensor& other, const double& thresh) {
593 if (type==TT_FULL) impl.full->operator+=(*other.impl.full);
594 else if (type==TT_2D) impl.svd->add_SVD((*other.impl.svd),thresh*facReduce());
595 else if (type==TT_TENSORTRAIN) impl.tt->operator+=(*other.impl.tt);
596 else {
597 MADNESS_EXCEPTION("you should not be here",1);
598 }
599 }
600
601 /// Inplace multiply by corresponding elements of argument Tensor
emul(const LowRankTensor<T> & other)602 LowRankTensor<T>& emul(const LowRankTensor<T>& other) {
603
604 // fast return if possible
605 if (this->type==TT_NONE or other.type==TT_NONE) {
606 MADNESS_EXCEPTION("no TT_NONE in LowRankTensor::emul",1);
607 }
608
609 MADNESS_ASSERT(this->type==other.type);
610
611 if (type==TT_FULL) {
612 impl.full->emul(*other.impl.full);
613 } else if (type==TT_2D) {
614 impl.svd->emul(*other.impl.svd);
615 } else if (type==TT_TENSORTRAIN) {
616 impl.tt->emul(*other.impl.tt);
617 } else {
618 MADNESS_EXCEPTION("you should not be here",1);
619 }
620 return *this;
621 }
622
623
624
reduce_rank(const double & thresh)625 void reduce_rank(const double& thresh) {
626 if ((type==TT_FULL) or (type==TT_NONE)) return;
627 else if (type==TT_2D) impl.svd->divide_and_conquer_reduce(thresh*facReduce());
628 else if (type==TT_TENSORTRAIN) impl.tt->truncate(thresh*facReduce());
629 else {
630 MADNESS_EXCEPTION("you should not be here",1);
631 }
632 }
633
634 /// Returns a pointer to the internal data
635
636 /// @param[in] ivec index of core vector to which the return values points
637 T* ptr(const int ivec=0) {
638 if (type==TT_NONE) return 0;
639 else if (type==TT_FULL) return impl.full->ptr();
640 else if (type==TT_2D) return impl.svd->ref_vector(ivec).ptr();
641 else if (type==TT_TENSORTRAIN) return impl.tt->ptr(ivec);
642 else {
643 MADNESS_EXCEPTION("you should not be here",1);
644 }
645 return 0;
646 }
647
648 /// Returns a pointer to the internal data
649
650 /// @param[in] ivec index of core vector to which the return values points
651 const T* ptr(const int ivec=0) const {
652 if (type==TT_NONE) return 0;
653 else if (type==TT_FULL) return impl.full->ptr();
654 else if (type==TT_2D) return impl.svd->ref_vector(ivec).ptr();
655 else if (type==TT_TENSORTRAIN) return impl.tt->ptr(ivec);
656 else {
657 MADNESS_EXCEPTION("you should not be here",1);
658 }
659 return 0;
660 }
661
662
config()663 SRConf<T>& config() {
664 MADNESS_EXCEPTION("implement config in LowRankTensor",1);
665 }
666
method1()667 void method1() {
668 if (type == TT_FULL) impl.full->method1();
669 if (type == TT_2D) impl.svd->method1();
670 if (type == TT_TENSORTRAIN) impl.tt->method1();
671 // etc.
672 }
673
674
675 /// Transform all dimensions of the tensor t by the matrix c
676
677 /// \ingroup tensor
678 /// Often used to transform all dimensions from one basis to another
679 /// \code
680 /// result(i,j,k...) <-- sum(i',j', k',...) t(i',j',k',...) c(i',i) c(j',j) c(k',k) ...
681 /// \endcode
682 /// The input dimensions of \c t must all be the same and agree with
683 /// the first dimension of \c c . The dimensions of \c c may differ in
684 /// size.
685 template <typename Q>
transform(const Tensor<Q> & c)686 LowRankTensor< TENSOR_RESULT_TYPE(T,Q) > transform(const Tensor<Q>& c) const {
687 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
688
689 if (type==TT_NONE) return LowRankTensor<resultT>();
690 else if (type==TT_FULL) {
691 Tensor<resultT> result=madness::transform((*impl.full),c);
692 return LowRankTensor<resultT>(result,0.0,TT_FULL);
693 } else if (type==TT_2D) {
694 SVDTensor<resultT> result(impl.svd->transform(c));
695 return LowRankTensor<resultT>(result);
696 } else if (type==TT_TENSORTRAIN) {
697 TensorTrain<resultT> tt=madness::transform(*impl.tt,c);
698 return LowRankTensor(tt);
699 } else {
700 MADNESS_EXCEPTION("you should not be here",1);
701 }
702 if (has_no_data()) return LowRankTensor<resultT>();
703 return LowRankTensor<resultT>();
704 }
705
706
707 /// Transform all dimensions of the tensor t by distinct matrices c
708
709 /// \ingroup tensor
710 /// Similar to transform but each dimension is transformed with a
711 /// distinct matrix.
712 /// \code
713 /// result(i,j,k...) <-- sum(i',j', k',...) t(i',j',k',...) c[0](i',i) c[1](j',j) c[2](k',k) ...
714 /// \endcode
715 /// The first dimension of the matrices c must match the corresponding
716 /// dimension of t.
717 template <typename Q>
general_transform(const Tensor<Q> c[])718 LowRankTensor<TENSOR_RESULT_TYPE(T,Q)> general_transform(const Tensor<Q> c[]) const {
719 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
720
721 if (type==TT_NONE) return LowRankTensor<resultT>();
722 else if (type==TT_FULL) {
723 Tensor<resultT> result=madness::general_transform((*impl.full),c);
724 return LowRankTensor<resultT>(result,0.0,TT_FULL);
725 } else if (type==TT_2D) {
726 SVDTensor<resultT> result(impl.svd->general_transform(c));
727 return LowRankTensor<resultT>(result);
728 } else if (type==TT_TENSORTRAIN) {
729 TensorTrain<resultT> tt=madness::general_transform((*impl.tt),c);
730 return LowRankTensor(tt);
731 } else {
732 MADNESS_EXCEPTION("you should not be here",1);
733 }
734 if (has_no_data()) return LowRankTensor<resultT>();
735 return LowRankTensor<resultT>();
736 }
737
738
739 /// Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor
740
741 /// \ingroup tensor
742 /// \code
743 /// transform_dir(t,c,1) = r(i,j,k,...) = sum(j') t(i,j',k,...) * c(j',j)
744 /// \endcode
745 /// @param[in] t Tensor to transform (size of dimension to be transformed must match size of first dimension of \c c )
746 /// @param[in] c Matrix used for the transformation
747 /// @param[in] axis Dimension (or axis) to be transformed
748 /// @result Returns a new, contiguous tensor
749 template <typename Q>
transform_dir(const Tensor<Q> & c,int axis)750 LowRankTensor<TENSOR_RESULT_TYPE(T,Q)> transform_dir(const Tensor<Q>& c, int axis) const {
751 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
752
753 if (type==TT_NONE) return LowRankTensor<resultT>();
754 else if (type==TT_FULL) {
755 Tensor<resultT> result=madness::transform_dir((*impl.full),c,axis);
756 return LowRankTensor<resultT>(result,0.0,TT_FULL);
757 } else if (type==TT_2D) {
758 SVDTensor<resultT> result(impl.svd->transform_dir(c,axis));
759 return LowRankTensor<resultT>(result);
760 } else if (type==TT_TENSORTRAIN) {
761 TensorTrain<resultT> tt=madness::transform_dir((*impl.tt),c,axis);
762 return LowRankTensor(tt);
763 } else {
764 MADNESS_EXCEPTION("you should not be here",1);
765 }
766 if (has_no_data()) return LowRankTensor<resultT>();
767 return LowRankTensor<resultT>();
768 }
769
770 };
771
772
773 /// type conversion implies a deep copy
774
775 /// @result Returns a new tensor that is a deep copy of the input
776 template <class Q, class T>
convert(const LowRankTensor<T> & other)777 LowRankTensor<Q> convert(const LowRankTensor<T>& other) {
778
779 // simple return
780 if (std::is_same<Q, T>::value) return copy(other);
781
782 LowRankTensor<Q> result;
783 result.type=other.type;
784 if (other.type==TT_FULL)
785 result.impl.full=std::shared_ptr<Tensor<Q> >(new Tensor<Q>(convert<Q,T>(*other.impl.full)));
786 if (other.type==TT_2D)
787 MADNESS_EXCEPTION("no type conversion for SVDTensors",1);
788 if (other.type==TT_TENSORTRAIN)
789 MADNESS_EXCEPTION("no type conversion for TensorTrain",1);
790 return result;
791 }
792
793
794 template <class T, class Q>
transform(const LowRankTensor<Q> & t,const Tensor<T> & c)795 LowRankTensor< TENSOR_RESULT_TYPE(T,Q) > transform(const LowRankTensor<Q>& t, const Tensor<T>& c) {
796 return t.transform(c);
797 }
798
799 template <class T, class Q>
general_transform(const LowRankTensor<T> & t,const Tensor<Q> c[])800 LowRankTensor<TENSOR_RESULT_TYPE(T,Q)> general_transform(const LowRankTensor<T>& t, const Tensor<Q> c[]) {
801 return t.general_transform(c);
802 }
803
804 /// Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor
805
806 /// \ingroup tensor
807 /// \code
808 /// transform_dir(t,c,1) = r(i,j,k,...) = sum(j') t(i,j',k,...) * c(j',j)
809 /// \endcode
810 /// @param[in] t Tensor to transform (size of dim to be transformed must match size of first dim of \c c )
811 /// @param[in] c Matrix used for the transformation
812 /// @param[in] axis Dimension (or axis) to be transformed
813 /// @result Returns a new, contiguous tensor
814 template <class T, class Q>
transform_dir(const LowRankTensor<Q> & t,const Tensor<T> & c,int axis)815 LowRankTensor<TENSOR_RESULT_TYPE(T,Q)> transform_dir(const LowRankTensor<Q>& t,
816 const Tensor<T>& c, int axis) {
817 return t.transform_dir(c,axis);
818 }
819
820 /// outer product of two Tensors, yielding a low rank tensor
821
822 /// do the outer product of two tensors; distinguish these tensortype cases by
823 /// the use of final_tensor_type
824 /// - full x full -> full
825 /// - full x full -> SVD ( default )
826 /// - TensorTrain x TensorTrain -> TensorTrain
827 /// all other combinations are currently invalid.
828 template <class T, class Q>
outer(const LowRankTensor<T> & t1,const LowRankTensor<Q> & t2,const TensorArgs final_tensor_args)829 LowRankTensor<TENSOR_RESULT_TYPE(T,Q)> outer(const LowRankTensor<T>& t1,
830 const LowRankTensor<Q>& t2, const TensorArgs final_tensor_args) {
831
832 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
833
834
835 MADNESS_ASSERT(t1.tensor_type()==t2.tensor_type());
836
837 if (final_tensor_args.tt==TT_FULL) {
838 MADNESS_ASSERT(t1.tensor_type()==TT_FULL);
839 Tensor<resultT> t(outer(*t1.impl.full,*t2.impl.full));
840 return LowRankTensor<resultT>(t);
841
842 } else if (final_tensor_args.tt==TT_2D) {
843 MADNESS_ASSERT(t1.tensor_type()==TT_FULL);
844
845 // srconf is shallow, do deep copy here
846 const Tensor<T> lhs=t1.full_tensor_copy();
847 const Tensor<Q> rhs=t2.full_tensor_copy();
848
849 const long k=lhs.dim(0);
850 const long dim=lhs.ndim()+rhs.ndim();
851 long size=1;
852 for (int i=0; i<lhs.ndim(); ++i) size*=k;
853 MADNESS_ASSERT(size==lhs.size());
854 MADNESS_ASSERT(size==rhs.size());
855 MADNESS_ASSERT(lhs.size()==rhs.size());
856
857 Tensor<double> weights(1);
858 weights=1.0;
859
860 SRConf<resultT> srconf(weights,lhs.reshape(1,lhs.size()),rhs.reshape(1,rhs.size()),dim,k);
861 // srconf.normalize();
862 return LowRankTensor<resultT>(SVDTensor<resultT>(srconf));
863
864 } else if (final_tensor_args.tt==TT_TENSORTRAIN) {
865 MADNESS_ASSERT(t1.tensor_type()==TT_TENSORTRAIN);
866 MADNESS_ASSERT(t2.tensor_type()==TT_TENSORTRAIN);
867 return outer(*t1.impl.tt,*t2.impl.tt);
868 } else {
869 MADNESS_EXCEPTION("you should not be here",1);
870 }
871 return LowRankTensor<TENSOR_RESULT_TYPE(T,Q)>();
872
873 }
874
875
876 namespace archive {
877 /// Serialize a tensor
878 template <class Archive, typename T>
879 struct ArchiveStoreImpl< Archive, LowRankTensor<T> > {
880
881 friend class LowRankTensor<T>;
882 /// Stores the GenTensor to an archive
883 static void store(const Archive& ar, const LowRankTensor<T>& t) {
884 bool exist=t.has_data();
885 int i=int(t.type);
886 ar & exist & i;
887 if (exist) {
888 if (t.impl.svd) ar & *t.impl.svd.get();
889 if (t.impl.full) ar & *t.impl.full.get();
890 if (t.impl.tt) ar & *t.impl.tt.get();
891 }
892 };
893 };
894
895
896 /// Deserialize a tensor ... existing tensor is replaced
897 template <class Archive, typename T>
898 struct ArchiveLoadImpl< Archive, LowRankTensor<T> > {
899
900 friend class GenTensor<T>;
901 /// Replaces this GenTensor with one loaded from an archive
902 static void load(const Archive& ar, LowRankTensor<T>& t) {
903 // check for pointer existence
904 bool exist=false;
905 int i=-1;
906 ar & exist & i;
907 t.type=TensorType(i);
908
909 if (exist) {
910 if (t.type==TT_2D) {
911 SVDTensor<T> svd;
912 ar & svd;
913 t.impl.svd.reset(new SVDTensor<T>(svd));
914 } else if (t.type==TT_FULL) {
915 Tensor<T> full;
916 ar & full;
917 t.impl.full.reset(new Tensor<T>(full));
918 } else if (t.type==TT_TENSORTRAIN) {
919 TensorTrain<T> tt;
920 ar & tt;
921 t.impl.tt.reset(new TensorTrain<T>(tt));
922 }
923
924 }
925 };
926 };
927 };
928
929
930 /// The class defines tensor op scalar ... here define scalar op tensor.
931 template <typename T, typename Q>
932 typename IsSupported < TensorTypeData<Q>, LowRankTensor<T> >::type
933 operator*(const Q& x, const LowRankTensor<T>& t) {
934 return t*x;
935 }
936
937 /// add all the GenTensors of a given list
938
939 /// If there are many tensors to add it's beneficial to do a sorted addition and start with
940 /// those tensors with low ranks
941 /// @param[in] addends a list with gentensors of same dimensions; will be destroyed upon return
942 /// @param[in] eps the accuracy threshold
943 /// @param[in] are_optimal flag if the GenTensors in the list are already in SVD format (if TT_2D)
944 /// @return the sum GenTensor of the input GenTensors
945 template<typename T>
946 LowRankTensor<T> reduce(std::list<LowRankTensor<T> >& addends, double eps, bool are_optimal=false) {
947 typedef typename std::list<LowRankTensor<T> >::iterator iterT;
948 LowRankTensor<T> result=copy(addends.front());
949 for (iterT it=++addends.begin(); it!=addends.end(); ++it) {
950 result+=*it;
951 }
952 result.reduce_rank(eps);
953 return result;
954
955 }
956
957
958 /// implements a temporary(!) slice of a LowRankTensor
959 template<typename T>
960 class SliceLowRankTensor {
961 public:
962
963 LowRankTensor<T> lrt;
964 std::vector<Slice> thisslice;
965
966 // all ctors are private, only accessible by GenTensor
967
968 /// default ctor
969 SliceLowRankTensor<T> () {}
970
971 /// ctor with a GenTensor; shallow
972 SliceLowRankTensor<T> (const LowRankTensor<T>& gt, const std::vector<Slice>& s)
973 : lrt(gt), thisslice(s) {}
974
975 public:
976
977 /// assignment as in g(s) = g1;
978 SliceLowRankTensor<T>& operator=(const LowRankTensor<T>& rhs) {
979 print("You don't want to assign to a SliceLowRankTensor; use operator+= instead");
980 MADNESS_ASSERT(0);
981 return *this;
982 };
983
984 /// assignment as in g(s) = g1(s);
985 SliceLowRankTensor<T>& operator=(const SliceLowRankTensor<T>& rhs) {
986 print("You don't want to assign to a SliceLowRankTensor; use operator+= instead");
987 MADNESS_ASSERT(0);
988 return *this;
989 };
990
991 /// inplace addition as in g(s)+=g1
992 SliceLowRankTensor<T>& operator+=(const LowRankTensor<T>& rhs) {
993
994 // fast return if possible
995 if (rhs.has_no_data() or rhs.rank()==0) return *this;
996
997 if (lrt.has_data()) MADNESS_ASSERT(lrt.tensor_type()==rhs.tensor_type());
998
999 // no fast return possible!!!
1000 // if (this->rank()==0) {
1001 // // this is a deep copy
1002 // *this=rhs(rhs_s);
1003 // this->scale(beta);
1004 // return;
1005 // }
1006
1007 std::vector<Slice> rhs_slice(rhs.ndim(),Slice(_));
1008
1009 if (lrt.type==TT_FULL) {
1010 (*lrt.impl.full)(thisslice).gaxpy(1.0,(*rhs.impl.full)(rhs_slice),1.0);
1011
1012 } else if (lrt.type==TT_2D) {
1013 MADNESS_ASSERT(lrt.impl.svd->has_structure());
1014 MADNESS_ASSERT(rhs.impl.svd->has_structure());
1015 lrt.impl.svd->inplace_add(*rhs.impl.svd,thisslice,rhs_slice, 1.0, 1.0);
1016
1017 } else if (lrt.type==TT_TENSORTRAIN) {
1018 lrt.impl.tt->gaxpy(thisslice,*rhs.impl.tt,1.0,rhs_slice);
1019 }
1020 return *this;
1021 }
1022
1023 /// inplace subtraction as in g(s)-=g1
1024 SliceLowRankTensor<T>& operator-=(const LowRankTensor<T>& rhs) {
1025
1026 // fast return if possible
1027 if (rhs.has_no_data() or rhs.rank()==0) return *this;
1028
1029 if (lrt.has_data()) MADNESS_ASSERT(lrt.tensor_type()==rhs.tensor_type());
1030
1031 // no fast return possible!!!
1032 // if (lrt.rank()==0) {
1033 // // this is a deep copy
1034 // *this=rhs(rhs_s);
1035 // lrt.scale(beta);
1036 // return;
1037 // }
1038
1039 std::vector<Slice> rhs_slice(rhs.ndim(),Slice(_));
1040
1041 if (lrt.type==TT_FULL) {
1042 (*lrt.impl.full)(thisslice).gaxpy(1.0,(*rhs.impl.full)(rhs_slice),-1.0);
1043
1044 } else if (lrt.type==TT_2D) {
1045 MADNESS_ASSERT(lrt.impl.svd->has_structure());
1046 MADNESS_ASSERT(rhs.impl.svd->has_structure());
1047 lrt.impl.svd->inplace_add(*rhs.impl.svd,thisslice,rhs_slice, 1.0, -1.0);
1048
1049 } else if (lrt.type==TT_TENSORTRAIN) {
1050 lrt.impl.tt->gaxpy(thisslice,*rhs.impl.tt,-1.0,rhs_slice);
1051 }
1052 return *this;
1053 }
1054
1055 /// inplace addition as in g(s)+=g1(s)
1056 SliceLowRankTensor<T>& operator+=(const SliceLowRankTensor<T>& rhs) {
1057 // fast return if possible
1058 if (rhs.lrt.has_no_data() or rhs.lrt.rank()==0) return *this;
1059
1060 if (lrt.has_data()) MADNESS_ASSERT(lrt.tensor_type()==rhs.lrt.tensor_type());
1061
1062 // no fast return possible!!!
1063 // if (lrt.rank()==0) {
1064 // // this is a deep copy
1065 // *this=rhs(rhs_s);
1066 // lrt.scale(beta);
1067 // return;
1068 // }
1069
1070 std::vector<Slice> rhs_slice=rhs.thisslice;
1071
1072 if (lrt.type==TT_FULL) {
1073 (*lrt.impl.full)(thisslice).gaxpy(1.0,(*rhs.lrt.impl.full)(rhs_slice),1.0);
1074
1075 } else if (lrt.type==TT_2D) {
1076 MADNESS_ASSERT(lrt.impl.svd->has_structure());
1077 MADNESS_ASSERT(rhs.lrt.impl.svd->has_structure());
1078 lrt.impl.svd->inplace_add(*rhs.lrt.impl.svd,thisslice,rhs_slice, 1.0, 1.0);
1079
1080 } else if (lrt.type==TT_TENSORTRAIN) {
1081 lrt.impl.tt->gaxpy(thisslice,*rhs.lrt.impl.tt,1.0,rhs_slice);
1082 }
1083 return *this;
1084
1085 }
1086
1087 /// inplace zero-ing as in g(s)=0.0
1088 SliceLowRankTensor<T>& operator=(const T& number) {
1089 MADNESS_ASSERT(number==T(0.0));
1090
1091 if (lrt.type==TT_FULL) {
1092 (*lrt.impl.full)(thisslice)=0.0;
1093
1094 } else if (lrt.type==TT_2D) {
1095 MADNESS_ASSERT(lrt.impl.svd->has_structure());
1096 LowRankTensor<T> tmp(lrt);
1097 lrt.impl.svd->inplace_add(*tmp.impl.svd,thisslice,thisslice, 1.0, -1.0);
1098
1099 } else if (lrt.type==TT_TENSORTRAIN) {
1100 lrt.impl.tt->gaxpy(thisslice,*lrt.impl.tt,-1.0,thisslice);
1101 }
1102 return *this;
1103 }
1104
1105 friend LowRankTensor<T> copy(const SliceLowRankTensor<T>& other) {
1106 LowRankTensor<T> result;
1107 const std::vector<Slice> s=other.thisslice;
1108 result.type=other.lrt.type;
1109 if (result.type==TT_FULL)
1110 result.impl.full.reset(new Tensor<T>(copy((*other.lrt.impl.full)(s))));
1111 else if (result.type==TT_2D)
1112 result.impl.svd.reset(new SVDTensor<T>(other.lrt.impl.svd->copy_slice(s)));
1113 else if (result.type==TT_TENSORTRAIN)
1114 result.impl.tt.reset(new TensorTrain<T>(copy(*(other.lrt.impl.tt),s)));
1115 else {
1116 MADNESS_EXCEPTION("you should not be here",1);
1117 }
1118 return result;
1119
1120 }
1121
1122
1123 };
1124
1125
1126
1127 } // namespace madness
1128
1129 #endif /* MADNESS_TENSOR_LOWRANKTENSOR_H_ */
1130