1 /*
2   This file is part of MADNESS.
3 
4   Copyright (C) 2007,2010 Oak Ridge National Laboratory
5 
6   This program is free software; you can redistribute it and/or modify
7   it under the terms of the GNU General Public License as published by
8   the Free Software Foundation; either version 2 of the License, or
9   (at your option) any later version.
10 
11   This program is distributed in the hope that it will be useful,
12   but WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14   GNU General Public License for more details.
15 
16   You should have received a copy of the GNU General Public License
17   along with this program; if not, write to the Free Software
18   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 
20   For more information please contact:
21 
22   Robert J. Harrison
23   Oak Ridge National Laboratory
24   One Bethel Valley Road
25   P.O. Box 2008, MS-6367
26 
27   email: harrisonrj@ornl.gov
28   tel:   865-241-3937
29   fax:   865-572-0680
30 
31   $Id$
32 */
33 
34 #ifndef GENTENSOR_H_
35 #define GENTENSOR_H_
36 
37 
38 /*!
39  *
40  * \file GenTensor.h
41  * \brief Provides a tensor with taking advantage of possibly low rank
42  *
43  ****************************************************************
44  * MAIN DIFFERENCES (Tensor t; GenTensor g)
45  *  t=t1(s) is shallow
46  *  g=g1(s) is deep
47  ****************************************************************
48  *
49  * a GenTensor is a generalized form of a Tensor
50  * for now only little functionality shall be implemented; feel free to extend
51  * a consequence is that we (usually) can't directly access individual
52  * matrix elements
53  * note that a GenTensor might have zero rank, but is still a valid
54  * tensor, and should therefore return a FullTensor with zeros in it.
55  *
56  * \par Slicing in GenTensors:
57  *
58  * A GenTensor differs from a Tensor in that we can't directly access
59  * individual matrix elements, and thus can't directly assign or manipulate slices
60  * as lvalues. For rvalues we simply provide a slices of the constituent vectors
61  * in SRConf, which is a valid GenTensor by itself
62  * \code
63  *  lhs = rhs(s)
64  * \endcode
65  *
66  * Manipulations of slices of a GenTensor are heavily restricted, but should
67  * cover the most important cases:
68  * - assignment to zero; done by inplace subtraction of the slice
69  *    \code
70  *    GenTensor lrt1(t);
71  *    lrt1(s) = 0.0;
72  *    \endcode
73  * - in-place addition
74  *    \code
75  *    GenTensor lrt1(3,3,3,3), lrt2(t);
76  *    lrt1(s) += lrt2;
77  *    \endcode
78  *
79  * Note that *all* of these operation increase the rank of lhs
80  *
81  * \par Addition in GenTensors
82  *
83  * Addition in a GenTensor is a fairly complicated issue, and there are several
84  * algorithms you can use, each with certain pros and cons
85  *
86  * - append()
87  *   plain concatenating of the configurations will increase the rank and will
88  *   introduce linear dependencies and redundancies. Also, you might have to
89  *   reallocate memory and copy a lot of data around. However, no accuracy is lost
90  *   and it is fairly fast. Final rank reduction might be expensive, since you
91  *   might have accumulated a huge amount of terms.
92  *
93  * - low_rank_add_sequential()
94  *   only for SVD (2-way decomposition)
95  *   take the 2nd vector as a basis of a vector space, project the overlap of the
96  *   old and new terms on the lhs basis, perform modified Gram-Schmidt
97  *   orthogonalization on the rhs basis and increase the basis if necessary.
98  *   This will require the left hand side to be right-orthonormal, and the caller is
99  *   responsible for doing that. If a new GenTensor is created and only additions
100  *   using this method are performed the tensor will automatically be orthonormal.
101  *   Cost depends on the number of terms of the rhs and the lhs
102  *
103  * - addition of slices
104  *   as of now we can add slices only using append()
105  *
106  * - addition in full rank form
107  *   at times it might be sensible to accumulate your result in a full rank tensor,
108  *   and after collecting all the contribution you can transform it into a low
109  *   rank tensor. This will also maintain "all" digits, and cost depends not on
110  *   the number of terms on the lhs.
111  */
112 
113 #include <madness/tensor/tensor.h>
114 #include <madness/tensor/srconf.h>
115 #include <madness/tensor/tensortrain.h>
116 #include <stdexcept>
117 
118 
119 // you can use low-rank tensors only when you use gentensor
120 #if HAVE_GENTENSOR
121 #define USE_LRT
122 #else
123 #undef USE_LRT
124 #endif
125 
126 namespace madness {
127 
128 	// a forward declaration
129 	template <class T> class SliceGenTensor;
130 	template <class T> class GenTensor;
131 
132 
133 	/// TensorArgs holds the arguments for creating a LowRankTensor
134 	struct TensorArgs {
135 		double thresh;
136 		TensorType tt;
TensorArgsTensorArgs137         TensorArgs() : thresh(-1.0), tt(TT_NONE) {}
TensorArgsTensorArgs138 		TensorArgs(const double& thresh1, const TensorType& tt1)
139 			: thresh(thresh1)
140 			, tt(tt1) {
141 		}
what_am_iTensorArgs142 		static std::string what_am_i(const TensorType& tt) {
143 			if (tt==TT_2D) return "TT_2D";
144 			if (tt==TT_FULL) return "TT_FULL";
145             if (tt==TT_TENSORTRAIN) return "TT_TENSORTRAIN";
146 			return "unknown tensor type";
147 		}
148 		template <typename Archive>
serializeTensorArgs149 		void serialize(const Archive& ar) {
150 		    int i=int(tt);
151 		    ar & thresh & i;
152 		    tt=TensorType(i);
153 		}
154 	};
155 
156 
157     /// true if GenTensor has zero rank or no data
158     template<typename T>
has_zero_rank(const GenTensor<T> & g)159     bool has_zero_rank(const GenTensor<T>& g) {
160     	return ((g.rank()==0) or (g.tensor_type()==TT_NONE));
161     }
162 
163 
164 #if !HAVE_GENTENSOR
165 
166 	template <typename T>
167 	class GenTensor : public Tensor<T> {
168 
169 	public:
170 
171 		GenTensor<T>() : Tensor<T>() {}
172 
173 		GenTensor<T>(const Tensor<T>& t1) : Tensor<T>(t1) {}
174 		GenTensor<T>(const Tensor<T>& t1, const TensorArgs& targs) : Tensor<T>(t1) {}
175 		GenTensor<T>(const Tensor<T>& t1, double eps, const TensorType tt) : Tensor<T>(t1) {}
176 		GenTensor<T>(const TensorType tt): Tensor<T>() {}
177 		GenTensor<T>(std::vector<long> v, const TensorType& tt) : Tensor<T>(v) {}
178 		GenTensor<T>(std::vector<long> v, const TensorArgs& targs) : Tensor<T>(v) {}
179 		GenTensor<T>(const SRConf<T>& sr1) : Tensor<T>() {MADNESS_EXCEPTION("no ctor with SRConf: use HAVE_GENTENSOR",1);}
180 
181         /// Type conversion makes a deep copy
182         template <class Q> operator GenTensor<Q>() const { // type conv => deep copy
183             Tensor<Q> result = Tensor<Q>(this->_ndim,this->_dim,false);
184             BINARY_OPTIMIZED_ITERATOR(Q, result, const T, (*this), *_p0 = (Q)(*_p1));
185             return result;
186         }
187 
convert(const TensorArgs & targs)188         GenTensor convert(const TensorArgs& targs) const {return copy(*this);}
189 
reconstruct_tensor()190 		GenTensor<T> reconstruct_tensor() const {return *this;}
full_tensor()191 		GenTensor<T> full_tensor() const {return *this;}
full_tensor()192 		GenTensor<T>& full_tensor() {return *this;}
full_tensor_copy()193         GenTensor<T> full_tensor_copy() const {return *this;}
full_tensor_copy()194         GenTensor<T> full_tensor_copy() {return *this;}
195 
has_data()196         bool has_data() const {return this->size()>0;};
has_no_data()197         bool has_no_data() const {return not has_data();};
rank()198 		long rank() const {return -1;}
svd_normf()199 		double svd_normf() const {return this->normf();}
real_size()200 		size_t real_size() const {return this->size();}
201 
reduce_rank(const double & eps)202         void reduce_rank(const double& eps) {return;};
normalize()203         void normalize() {return;}
204 
what_am_i()205         std::string what_am_i() const {return "GenTensor, aliased to Tensor";};
tensor_type()206 		TensorType tensor_type() const {return TT_FULL;}
207 
add_SVD(const GenTensor<T> & rhs,const double & eps)208 		void add_SVD(const GenTensor<T>& rhs, const double& eps) {*this+=rhs;}
209 
config()210 		SRConf<T> config() const {MADNESS_EXCEPTION("no SRConf in complex GenTensor",1);}
get_configs(const int & start,const int & end)211         SRConf<T> get_configs(const int& start, const int& end) const {MADNESS_EXCEPTION("no SRConf in complex GenTensor",1);}
212 
213         template<typename Q>
general_transform(const Tensor<Q> c[])214         GenTensor<T> general_transform(const Tensor<Q> c[]) const {
215 
216             return madness::general_transform(*static_cast<const Tensor<T>*>(this),c);
217         }
218 
219 
220 		/// return the additional safety for rank reduction
fac_reduce()221 		static double fac_reduce() {return -1.0;};
222 
223 	};
224 
225     template <class T>
226 	GenTensor<T> reduce(std::list<GenTensor<T> >& addends, double eps, bool are_optimal=false) {
227     	typedef typename std::list<GenTensor<T> >::iterator iterT;
228     	GenTensor<T> result=copy(addends.front());
229     	for (iterT it=++addends.begin(); it!=addends.end(); ++it) {
230     		result+=*it;
231     	}
232 	    return result;
233     }
234 
235     /// Outer product ... result(i,j,...,p,q,...) = left(i,k,...)*right(p,q,...)
236 
237     /// \ingroup tensor
238     template <class T>
outer(const GenTensor<T> & left,const GenTensor<T> & right,const TensorArgs final_tensor_args)239     GenTensor<T> outer(const GenTensor<T>& left, const GenTensor<T>& right,
240             const TensorArgs final_tensor_args) {
241         return madness::outer(static_cast<Tensor<T> >(left),static_cast<Tensor<T> >(right));
242     }
243 
244     /// Outer product ... result(i,j,...,p,q,...) = left(i,k,...)*right(p,q,...)
245 
246      /// \ingroup tensor
247      template <class T>
outer(const Tensor<T> & left,const Tensor<T> & right,const TensorArgs final_tensor_args)248      GenTensor<T> outer(const Tensor<T>& left, const Tensor<T>& right,
249              const TensorArgs final_tensor_args) {
250          return madness::outer(left,right);
251      }
252 
253 
254 	namespace archive {
255 	/// Serialize a tensor
256 	template <class Archive, typename T>
257 	struct ArchiveStoreImpl< Archive, GenTensor<T> > {
258 		static void store(const Archive& s, const GenTensor<T>& t) {
259 			if (t.iscontiguous()) {
260 				s & t.size() & t.id();
261 				if (t.size()) s & t.ndim() & wrap(t.dims(),TENSOR_MAXDIM) & wrap(t.ptr(),t.size());
262 			}
263 			else {
264 				s & copy(t);
265 			}
266 		};
267 	};
268 
269 
270 	/// Deserialize a tensor ... existing tensor is replaced
271 	template <class Archive, typename T>
272 	struct ArchiveLoadImpl< Archive, GenTensor<T> > {
273 		static void load(const Archive& s, GenTensor<T>& t) {
274 			long sz = 0l, id =0l;
275 			s & sz & id;
276 			if (id != t.id()) throw "type mismatch deserializing a tensor";
277 			if (sz) {
278 				long _ndim=0l, _dim[TENSOR_MAXDIM];
279 				s & _ndim & wrap(_dim,TENSOR_MAXDIM);
280 				t = Tensor<T>(_ndim, _dim, false);
281 				if (sz != t.size()) throw "size mismatch deserializing a tensor";
282 				s & wrap(t.ptr(), t.size());
283 			}
284 			else {
285 				t = Tensor<T>();
286 			}
287 		};
288 	};
289 
290 	}
291 
292 #else
293 
294 #ifndef USE_LRT
295 
296 	/// A GenTensor is a generalized tensor, possibly in a low rank representation
297 
298 	/// Since not all operations are possible (or sensible) for low rank tensors,
299 	/// only those that are are provided. There are no assignment for slices,
300 	/// neither for numbers nor for other GenTensors, use inplace-addition instead.
301 	///
302 	/// Behavior (in particular shallow and deep construction/assignment) should
303 	/// resemble the one of Tensor as far as possible:
304 	/// assignments/construction to/from other GenTensors are shallow
305 	/// assignments/construction from Tensor is deep
306 	/// assignments/construction to/from Slices are deep
307 	template<typename T>
308 	class GenTensor {
309 
310 	private:
311 
312 		friend class SliceGenTensor<T>;
313 
314         /// C++ typename of the real type associated with a complex type.
315         typedef typename TensorTypeData<T>::scalar_type scalar_type;
316 
317         /// C++ typename of the floating point type associated with scalar real type
318         typedef typename TensorTypeData<T>::float_scalar_type float_scalar_type;
319 
320 
321 		typedef SRConf<T> configT;
322 		typedef Tensor<T> tensorT;
323 		typedef GenTensor<T> gentensorT;
324 		typedef std::shared_ptr<configT> sr_ptr;
325 
326 		/// pointer to the low rank tensor
327 		sr_ptr _ptr;
328 
329 		/// the machine precision
330 		static double machinePrecision() {return 1.e-14;}
331 
332 		/// safety for rank reduction
333 		static double facReduce() {return 1.e-3;}
334 
335 	public:
336 
337 		/// empty ctor
338 		GenTensor() : _ptr() {
339 		}
340 
341 		/// copy ctor, shallow
342 //		GenTensor(const GenTensor<T>& rhs) : _ptr(rhs._ptr) { // DON'T DO THIS: USE_COUNT BLOWS UP
343 		GenTensor(const GenTensor<T>& rhs) : _ptr() {
344 			if (rhs.has_data()) _ptr=rhs._ptr;
345 		};
346 
347 		/// ctor with dimensions
348 		GenTensor(const std::vector<long>& dim, const TensorType tt) : _ptr() {
349 
350     		// check consistency
351     		const long ndim=dim.size();
352     		const long maxk=dim[0];
353     		for (long idim=0; idim<ndim; idim++) {
354     			MADNESS_ASSERT(maxk==dim[0]);
355     		}
356 
357    			_ptr=sr_ptr(new configT(dim.size(),dim[0],tt));
358 
359 		}
360 
361 		/// ctor with dimensions
362 		GenTensor(const std::vector<long>& dim, const TensorArgs& targs) : _ptr() {
363 
364 			// check consistency
365     		const long ndim=dim.size();
366     		const long maxk=dim[0];
367     		for (long idim=0; idim<ndim; idim++) {
368     			MADNESS_ASSERT(maxk==dim[0]);
369     		}
370 
371    			_ptr=sr_ptr(new configT(dim.size(),dim[0],targs.tt));
372 
373 		}
374 
375 		/// ctor with dimensions
376 		GenTensor(const TensorType& tt, const unsigned int& k, const unsigned int& dim) {
377    			_ptr=sr_ptr(new configT(dim,k,tt));
378 		}
379 
380 		/// ctor with a regular Tensor and arguments, deep
381 		GenTensor(const Tensor<T>& rhs, const TensorArgs& targs) {
382 
383 			// fast return if possible
384 			if (not rhs.has_data()) {
385 				_ptr.reset();
386 				return;
387 			}
388 
389 			MADNESS_ASSERT(rhs.ndim()>0);
390 			for (long idim=0; idim<rhs.ndim(); idim++) {
391 			    MADNESS_ASSERT(rhs.dim(0)==rhs.dim(idim));
392 			}
393 
394 			_ptr=sr_ptr(new configT(rhs.ndim(),rhs.dim(0),targs.tt));
395 
396 			// direct reduction on the polynomial values on the Tensor
397 			TensorType ttype=tensor_type();
398 			if (ttype==TT_2D) {
399 
400 #if 1
401 				Tensor<T> U,VT;
402 				Tensor< typename Tensor<T>::scalar_type > s;
403 
404 				// add some extra dimensions if this is supposedly NS form:
405 				// separate sum and wavelet coeffs
406 				if (rhs.dim(0)%2==0) {
407 					std::vector<long> dims(rhs.ndim()*2);
408 					for (int i=0; i<rhs.ndim(); ++i) {
409 						int k=rhs.dim(i);
410 						dims[2*i]=k/2;
411 						dims[2*i+1]=2;
412 					}
413 					TensorTrain<T> tt(rhs,targs.thresh*facReduce(),dims);
414 					// fuse sum and wavelet coeffs back together
415 					for (int i=0; i<rhs.ndim(); ++i) tt.fusedim(i);
416 					// fuse dimensions into particles 1 and 2
417 					tt.two_mode_representation(U,VT,s);
418 
419 				} else {
420 					TensorTrain<T> tt(rhs,targs.thresh*facReduce());
421 					tt.two_mode_representation(U,VT,s);
422 				}
423 
424 				const long r=VT.dim(0);
425 				const long nd=VT.ndim();
426 				if (r==0) {
427                     _ptr=sr_ptr(new configT(dim(),get_k(),ttype));
428 				} else {
429 					MADNESS_ASSERT(U.dim(nd-1)==r);
430 					Tensor<T> UU=U.reshape(U.size()/r,r);
431 					_ptr=sr_ptr(new configT(s, copy(transpose(UU)), VT.reshape(r,VT.size()/r),
432 				                          dim(), get_k()));
433 					this->normalize();
434 				}
435 #else
436 				// adapt form of values
437 				MADNESS_ASSERT(rhs.iscontiguous());
438 				std::vector<long> d(_ptr->dim_eff(),_ptr->kVec());
439 				Tensor<T> values_eff=rhs.reshape(d);
440 
441 				this->computeSVD(targs.thresh,values_eff);
442 #endif
443 			} else if (ttype==TT_FULL){
444 				_ptr.reset(new configT(copy(rhs)));
445 			} else {
446 				MADNESS_EXCEPTION("unknown tensor type in GenTensor(tensorT,targs)",0);
447 			}
448 		}
449 
450 		/// ctor with a regular Tensor and arguments, deep
451 		GenTensor(const Tensor<T>& rhs, const double& thresh, const TensorType& tt) {
452 			*this=gentensorT(rhs,TensorArgs(thresh,tt));
453 		}
454 
455 		/// ctor with a SliceGenTensor, deep
456 		GenTensor(const SliceGenTensor<T>& rhs) : _ptr() {
457 			*this=rhs;
458 		}
459 
460 		/// dtor
461 		~GenTensor() {
462 			this->clear();
463 		}
464 
465 		/// shallow assignment operator: g0 = g1
466 		gentensorT& operator=(const gentensorT& rhs) {
467 			if (this != &rhs) _ptr=rhs._ptr;
468 			return *this;
469 		}
470 
471 		/// deep assignment with slices: g0 = g1(s)
472 		GenTensor& operator=(const SliceGenTensor<T>& rhs) {
473 			*this=rhs._refGT.copy_slice(rhs._s);
474 			return *this;
475 		}
476 
477 		/// deep copy of rhs by deep copying rhs.configs
478 		friend gentensorT copy(const gentensorT& rhs) {
479 			if (rhs._ptr) return gentensorT(copy(*rhs._ptr));
480 			return gentensorT();
481 		}
482 
483 
484         /// Type conversion makes a deep copy
485         template <class Q> operator GenTensor<Q>() const { // type conv => deep copy
486             GenTensor<Q> result;
487             MADNESS_EXCEPTION("no type conversion in GenTensor -- use NO_GENTENSOR",1);
488             return result;
489         }
490 
491 		/// return some of the terms of the SRConf (start,..,end), inclusively
492 		/// shallow copy
493 		const GenTensor get_configs(const int& start, const int& end) const {
494 			return gentensorT(config().get_configs(start,end));
495 		}
496 
497 		/// general slicing, shallow; for temporary use only!
498 		SliceGenTensor<T> operator()(const std::vector<Slice>& s) {
499 			return SliceGenTensor<T>(*this,s);
500 		}
501 
502 		/// general slicing, for g0 = g1(s), shallow, for temporary use only!
503 		const SliceGenTensor<T> operator()(const std::vector<Slice>& s) const {
504 			return SliceGenTensor<T>(*this,s);
505 		}
506 
507 		/// assign a number
508 		GenTensor& operator=(const T& fac) {
509             if (this->tensor_type()==TT_FULL) full_tensor()=fac;
510             else config()=fac;
511             return *this;
512 		}
513 
514 	private:
515 		/// disable direct construction with a tensor since it's not shallow
516 		GenTensor(const Tensor<T>& t) {
517 			print("in wrong constructor");
518 		}
519 	public:
520 
521 		/// ctor w/ configs, shallow (indirectly, via vector_)
522 		explicit GenTensor(const SRConf<T>& config) : _ptr(new configT(config)) {
523 		}
524 
525 	private:
526 		/// return a slice of this (deep copy)
527 		gentensorT copy_slice(const std::vector<Slice>& s) const {
528 
529             // fast return if possible
530             if (this->has_no_data()) {
531                 int k_new=s[0].end-s[0].start+1;
532                 return gentensorT (this->tensor_type(),k_new,this->dim());
533             }
534 
535 			// consistency check
536 			MADNESS_ASSERT(s.size()==this->dim());
537 			MADNESS_ASSERT(s[0].step==1);
538 
539 			// fast return for full rank tensors
540 			if (tensor_type()==TT_FULL) {
541 				tensorT a=copy(full_tensor()(s));
542 				return gentensorT(configT(a));
543 			} else if (tensor_type()==TT_2D) {
544 			    return gentensorT(config().copy_slice(s));
545 			} else {
546 			    MADNESS_EXCEPTION("fault in GenTensor::copy_slice",1);
547 			    return gentensorT();
548 			}
549 		}
550 
551 	public:
552 
553 		/// access the tensor values, iff this is full rank representation
554 		const tensorT& full_tensor() const {
555 			MADNESS_ASSERT(tensor_type()==TT_FULL);
556 			return _ptr->ref_vector(0);
557 		}
558 
559 		/// access the tensor values, iff this is full rank representation
560 		tensorT& full_tensor() {
561 			MADNESS_ASSERT(tensor_type()==TT_FULL);
562 			return _ptr->ref_vector(0);
563 		}
564 
565 		/// add another SepRep to this one
566 		gentensorT& operator+=(const gentensorT& rhs) {
567 
568 		    if (rhs.has_no_data()) return *this;
569 		    if (this->has_no_data()) {
570 		    	*this=copy(rhs);
571 		    	return *this;
572 		    }
573 		    this->gaxpy(1.0,rhs,1.0);
574 			return *this;
575 		}
576 
577 		/// inplace subtraction
578 		gentensorT& operator-=(const gentensorT& rhs) {
579 
580 		    if (rhs.has_no_data()) return *this;
581 		    if (this->has_no_data()) {
582 		    	*this=copy(rhs);
583 		    	return *this;
584 		    }
585 		    this->gaxpy(1.0,rhs,-1.0);
586 			return *this;
587 		}
588 
589 		/// inplace addition
590 		gentensorT& operator+=(const SliceGenTensor<T>& rhs) {
591 			const std::vector<Slice> s(this->ndim(),Slice(0,get_k()-1,1));
592 			this->_ptr->inplace_add(*rhs._refGT._ptr,s,rhs._s,1.0,1.0);
593 			return *this;
594 		}
595 
596 		/// inplace subtraction
597 		gentensorT& operator-=(const SliceGenTensor<T>& rhs) {
598 			const std::vector<Slice> s(this->ndim(),Slice(0,get_k()-1,1));
599 			this->_ptr->inplace_add(*rhs._refGT._ptr,s,rhs._s,1.0,-1.0);
600 			return *this;
601 		}
602 
603 		/// multiply with a number
604 		template<typename Q>
605 		GenTensor<TENSOR_RESULT_TYPE(T,Q)> operator*(const Q& x) const {
606 			GenTensor<TENSOR_RESULT_TYPE(T,Q)> result(copy(*this));
607         	result.scale(x);
608         	return result;
609 		}
610 
611 	    /// Inplace generalized saxpy ... this = this*alpha + other*beta
612 	    gentensorT& gaxpy(const T alpha, const gentensorT& rhs, const T beta) {
613 			MADNESS_ASSERT(this->tensor_type()==rhs.tensor_type());
614 
615 	    	if (tensor_type()==TT_FULL) {
616                 full_tensor().gaxpy(alpha,rhs.full_tensor(),beta);
617                 return *this;
618             }
619 	    	if (not (alpha==1.0)) this->scale(alpha);
620 	    	rhs.append(*this,beta);
621 	    	return *this;
622 	    }
623 
624 		/// multiply with a scalar
625 	    template<typename Q>
626 	    GenTensor<TENSOR_RESULT_TYPE(T,Q)>& scale(const Q& dfac) {
627 			if (!_ptr) return *this;
628 			if (tensor_type()==TT_FULL) {
629 				full_tensor().scale(dfac);
630 			} else {
631 				_ptr->scale(dfac);
632 			}
633 			return *this;
634 		};
635 
636 		/// normalize
637 		void normalize() {
638 			if (tensor_type()==TT_2D) _ptr->normalize();
639 		};
640 
641 		void fillrandom(const int r=1) {
642 			if (tensor_type()==TT_FULL) full_tensor().fillrandom();
643 			else _ptr->fillWithRandom(r);
644 		}
645 
646 		/// do we have data? note difference to SRConf::has_data() !
647 		bool has_data() const {
648 		    if (_ptr) return true;
649 		    return false;
650 		}
651 
652 		/// do we have data?
653 		bool has_no_data() const {return (!has_data());}
654 
655 		/// return the separation rank
656 		long rank() const {
657 			if (_ptr) return _ptr->rank();
658 			else return 0;
659 		};
660 
661 		/// return the dimension
662 		unsigned int dim() const {return _ptr->dim();};
663 
664 		/// returns the dimensions
665 		long dim(const int& i) const {return _ptr->get_k();};
666 
667 		/// returns the number of dimensions
668 		long ndim() const {
669 			if (_ptr) return _ptr->dim();
670 			return -1;
671 		};
672 
673 		/// return the polynomial order
674 		unsigned int get_k() const {return _ptr->get_k();};
675 
676 		/// returns the TensorType of this
677 		TensorType tensor_type() const {
678 			if (_ptr) return _ptr->type();
679 			return TT_NONE;
680 		};
681 
682         /// return the type of the derived class for me
683         std::string what_am_i() const {return TensorArgs::what_am_i(_ptr->type());};
684 
685 		/// returns the number of coefficients (might return zero, although tensor exists)
686 		size_t size() const {
687 			if (_ptr) return _ptr->nCoeff();
688 			return 0;
689 		};
690 
691 		/// returns the number of coefficients (might return zero, although tensor exists)
692 		size_t real_size() const {
693 			if (_ptr) return _ptr->real_size()+sizeof(*this);
694 			return 0;
695 		};
696 
697 		/// returns the Frobenius norm
698 		float_scalar_type normf() const {
699 			if (has_no_data()) return 0.0;
700 			return config().normf();
701 		};
702 
703         /// returns the Frobenius norm; expects full rank or SVD!
704         double svd_normf() const {
705             if (has_no_data()) return 0.0;
706             if (tensor_type()==TT_2D) return config().svd_normf();
707             return config().normf();
708         };
709 
710         /// returns the trace of <this|rhs>
711 		T trace(const GenTensor<T>& rhs) const {
712 			return this->trace_conj(rhs);
713 		}
714 
715         /// returns the trace of <this|rhs>
716 		template<typename Q>
717 		TENSOR_RESULT_TYPE(T,Q) trace_conj(const GenTensor<Q>& rhs) const {
718             if (TensorTypeData<T>::iscomplex) MADNESS_EXCEPTION("no complex trace in GenTensor, sorry",1);
719             if (TensorTypeData<Q>::iscomplex) MADNESS_EXCEPTION("no complex trace in GenTensor, sorry",1);
720 
721             typedef TENSOR_RESULT_TYPE(T,Q) resultT;
722 			// fast return if possible
723 			if ((this->rank()==0) or (rhs.rank()==0)) return resultT(0.0);
724 
725 			MADNESS_ASSERT(compatible(*this,rhs));
726 			MADNESS_ASSERT(this->tensor_type()==rhs.tensor_type());
727 
728 			return overlap(*(this->_ptr),*rhs._ptr);
729 		}
730 
731         /// returns the trace of <this|rhs>
732 		template<typename Q>
733 		TENSOR_RESULT_TYPE(T,Q) trace_conj(const Tensor<Q>& rhs) const {
734             if (TensorTypeData<T>::iscomplex) MADNESS_EXCEPTION("no complex trace in GenTensor, sorry",1);
735             if (TensorTypeData<Q>::iscomplex) MADNESS_EXCEPTION("no complex trace in GenTensor, sorry",1);
736 
737             typedef TENSOR_RESULT_TYPE(T,Q) resultT;
738 			// fast return if possible
739 			if ((this->rank()==0)) return resultT(0.0);
740 
741 			// fast return if this is a full tensor
742 			// otherwise reconstruct this to a full tensor, since it's presumably
743 			// faster than turning the full tensor rhs into a low-rank approximation
744 			if (this->tensor_type()==TT_FULL) return full_tensor().trace_conj(rhs);
745 			else return full_tensor_copy().trace_conj(rhs);
746 
747 		}
748 
749         /// Inplace multiply by corresponding elements of argument Tensor
750         GenTensor<T>& emul(const GenTensor<T>& t) {
751             MADNESS_ASSERT(t.tensor_type()==this->tensor_type());
752 
753             if (this->tensor_type()==TT_FULL) full_tensor().emul(t.full_tensor());
754             else config().emul(t.config());
755             return *this;
756         }
757 
758         GenTensor convert(const TensorArgs& targs) const {
759             GenTensor<T> result=copy(*this);
760             change_tensor_type(result,targs);
761             return result;
762         }
763 
764 
765         /// return a Tensor, no matter what
766 		Tensor<T> full_tensor_copy() const {
767 			const TensorType tt=tensor_type();
768 			if (tt==TT_NONE) return Tensor<T>();
769 			else if (tt==TT_2D) return this->reconstruct_tensor();
770 			else if (tt==TT_FULL) {
771 				return copy(full_tensor());
772 			} else {
773 				print(TensorArgs::what_am_i(tt),"unknown tensor type");
774 				MADNESS_ASSERT(0);
775 			}
776 		}
777 
778 		/// reduce the rank of this
779 		void reduce_rank(const double& eps) {
780 
781 			if (rank()==0) return;
782 
783 			// direct reduction on the polynomial values on the Tensor
784 			if (tensor_type()==TT_FULL or tensor_type()==TT_NONE) {
785 				return;
786 			} else if (this->tensor_type()==TT_2D) {
787 				config().divide_and_conquer_reduce(eps*facReduce());
788 			} else {
789 				MADNESS_EXCEPTION("unknown tensor type in GenTensor::reduceRank()",0);
790 			}
791 			MADNESS_ASSERT(this->_ptr->has_structure() or this->rank()==0);
792 		}
793 
794 		/// print this' coefficients
795 		void printCoeff(const std::string title) const {
796 			print("printing SepRep",title);
797 			print(_ptr->weights_);
798 			for (unsigned int idim=0; idim<this->_ptr->dim_eff(); idim++) {
799 				print("coefficients for dimension",idim);
800 				print(_ptr->vector_[idim]);
801 			}
802 		}
803 
804 		/// reconstruct this to return a full tensor
805 		Tensor<T> reconstruct_tensor() const {
806 
807 			if (tensor_type()==TT_FULL) return full_tensor();
808 			else if (tensor_type()==TT_2D) return config().reconstruct();
809 			else {
810 			    MADNESS_EXCEPTION("you should not be here",1);
811 			}
812             return Tensor<T>();
813 		}
814 
815 		/// append this to rhs, shape must conform
816 		void append(gentensorT& rhs, const T fac=1.0) const {
817 			rhs.config().append(*this->_ptr,fac);
818 		}
819 
820 		/// add SVD
821 		void add_SVD(const gentensorT& rhs, const double& thresh) {
822 			if (rhs.has_no_data()) return;
823             if (has_no_data()) {
824                 *this=rhs;
825                 return;
826             }
827 			if (tensor_type()==TT_FULL or tensor_type()==TT_NONE) {
828 				this->full_tensor()+=rhs.full_tensor();
829 				return;
830 			}
831 			config().add_SVD(rhs.config(),thresh*facReduce());
832 		}
833 
834 	    /// check compatibility
835 		friend bool compatible(const gentensorT& rhs, const gentensorT& lhs) {
836 			return ((rhs.tensor_type()==lhs.tensor_type()) and (rhs.get_k()==lhs.get_k())
837 					and (rhs.dim()==lhs.dim()));
838 		};
839 
840 		/// transform the Legendre coefficients with the tensor
841 		gentensorT transform(const Tensor<T> c) const {
842 //			_ptr->make_structure();
843 		    if (has_no_data()) return gentensorT();
844 			MADNESS_ASSERT(_ptr->has_structure());
845 			return gentensorT (this->_ptr->transform(c));
846 		}
847 
848 		/// transform the Legendre coefficients with the tensor
849 		template<typename Q>
850 		gentensorT general_transform(const Tensor<Q> c[]) const {
851 //		    this->_ptr->make_structure();
852 		    if (has_no_data()) return gentensorT();
853             MADNESS_ASSERT(_ptr->has_structure());
854 			return gentensorT (this->config().general_transform(c));
855 		}
856 
857 		/// inner product
858 		gentensorT transform_dir(const Tensor<T>& c, const int& axis) const {
859 //            this->_ptr->make_structure();
860             MADNESS_ASSERT(_ptr->has_structure());
861             return GenTensor<T>(this->_ptr->transform_dir(c,axis));
862 		}
863 
864 		/// return a reference to the SRConf
865 		const SRConf<T>& config() const {return *_ptr;}
866 
867 		/// return a reference to the SRConf
868 		SRConf<T>& config() {return *_ptr;}
869 
870 		/// return the additional safety for rank reduction
871 		static double fac_reduce() {return facReduce();};
872 
873 	private:
874 
875 		/// release memory
876 		void clear() {_ptr.reset();};
877 
878 		/// same as operator+=, but handles non-conforming vectors (i.e. slices)
879 		void inplace_add(const gentensorT& rhs, const std::vector<Slice>& lhs_s,
880 				const std::vector<Slice>& rhs_s, const double alpha, const double beta) {
881 
882 			// fast return if possible
883 			if (rhs.has_no_data() or rhs.rank()==0) return;
884 
885 			if (this->has_data()) MADNESS_ASSERT(this->tensor_type()==rhs.tensor_type());
886 
887 			// no fast return possible!!!
888 			//			if (this->rank()==0) {
889 			//				// this is a deep copy
890 			//				*this=rhs(rhs_s);
891 			//				this->scale(beta);
892 			//				return;
893 			//			}
894 
895 			if (tensor_type()==TT_FULL) {
896 				full_tensor()(lhs_s).gaxpy(alpha,rhs.full_tensor()(rhs_s),beta);
897 			} else {
898 //				rhs._ptr->make_structure();
899 //				_ptr->make_structure();
900 				MADNESS_ASSERT(_ptr->has_structure());
901                 MADNESS_ASSERT(rhs._ptr->has_structure());
902 				this->_ptr->inplace_add(*rhs._ptr,lhs_s,rhs_s, alpha, beta);
903 			}
904 		}
905 
906 		/// reduce the rank using SVD
907 		void computeSVD(const double& eps,const Tensor<T>& values_eff) {
908 
909 			// SVD works only with matrices (2D)
910 			MADNESS_ASSERT(values_eff.ndim()==2);
911 			MADNESS_ASSERT(this->tensor_type()==TT_2D);
912 
913 			// fast return if possible
914 			if (values_eff.normf()<eps*facReduce()) {
915 				_ptr=sr_ptr(new configT(_ptr->dim(),_ptr->get_k(),tensor_type()));
916 				return;
917 			}
918 
919 			// output from svd
920 			Tensor<T> U;
921 			Tensor<T> VT;
922 			Tensor< typename Tensor<T>::scalar_type > s;
923 
924 			svd(values_eff,U,s,VT);
925 
926 			// find the maximal singular value that's supposed to contribute
927 			// singular values are ordered (largest first)
928 			const double thresh=eps*facReduce();
929 			long i=SRConf<T>::max_sigma(thresh,s.dim(0),s);
930 
931 			// convert SVD output to our convention
932 			if (i>=0) {
933 			    // copy to have contiguous and tailored singular vectors
934 				_ptr=sr_ptr(new configT(copy(s(Slice(0,i))), copy(transpose(U(_,Slice(0,i)))),
935 				        copy(VT(Slice(0,i),_)), dim(), get_k()));
936                 MADNESS_ASSERT(this->_ptr->get_k()==this->_ptr->vector_[0].dim(1));
937                 MADNESS_ASSERT(this->_ptr->rank()==this->_ptr->vector_[0].dim(0));
938                 MADNESS_ASSERT(this->_ptr->rank()==this->_ptr->weights_.dim(0));
939 
940 			} else {
941 				_ptr=sr_ptr(new configT(dim(),get_k(),tensor_type()));
942 			}
943 		}
944 	};
945 
946 
947 
948 	/// implements a slice of a GenTensor
949 	template <typename T>
950 	class SliceGenTensor {
951 
952 	private:
953 		friend class GenTensor<T>;
954 
955 		GenTensor<T>& _refGT;
956 		std::vector<Slice> _s;
957 
958 		// all ctors are private, only accessible by GenTensor
959 
960 		/// default ctor
961 		SliceGenTensor<T> () {}
962 
963 		/// ctor with a GenTensor;
964 		SliceGenTensor<T> (const GenTensor<T>& gt, const std::vector<Slice>& s)
965 				: _refGT(const_cast<GenTensor<T>& >(gt))
966 				, _s(s) {}
967 
968 	public:
969 
970 		/// assignment as in g(s) = g1;
971 		SliceGenTensor<T>& operator=(const GenTensor<T>& rhs) {
972 			print("You don't want to assign to a SliceGenTensor; use operator+= instead");
973 			MADNESS_ASSERT(0);
974 			return *this;
975 		};
976 
977 		/// assignment as in g(s) = g1(s);
978 		SliceGenTensor<T>& operator=(const SliceGenTensor<T>& rhs) {
979 			print("You don't want to assign to a SliceGenTensor; use operator+= instead");
980 			MADNESS_ASSERT(0);
981 			return *this;
982 		};
983 
984 		/// inplace addition
985 		SliceGenTensor<T>& operator+=(const GenTensor<T>& rhs) {
986 			std::vector<Slice> s(this->_refGT.ndim(),Slice(_));
987 			_refGT.inplace_add(rhs,this->_s,s,1.0,1.0);
988 			return *this;
989 		}
990 
991 		/// inplace subtraction
992 		SliceGenTensor<T>& operator-=(const GenTensor<T>& rhs) {
993 			std::vector<Slice> s(this->_refGT.ndim(),Slice(_));
994 			_refGT.inplace_add(rhs,this->_s,s,1.0,-1.0);
995 			return *this;
996 		}
997 
998 		/// inplace addition
999 		SliceGenTensor<T>& operator+=(const SliceGenTensor<T>& rhs) {
1000 			_refGT.inplace_add(GenTensor<T>(*rhs._refGT._ptr),this->_s,rhs._s,1.0,1.0);
1001 			return *this;
1002 		}
1003 
1004 		/// inplace zero-ing
1005 		SliceGenTensor<T>& operator=(const T& number) {
1006 			MADNESS_ASSERT(number==T(0.0));
1007 			if (this->_refGT.tensor_type()==TT_FULL) {
1008 				_refGT.full_tensor()(_s)=0.0;
1009 			} else {
1010 				const GenTensor<T>& tmp=(this->_refGT);
1011 				_refGT.inplace_add(tmp,_s,_s,1.0,-1.0);
1012 			}
1013 			return *this;
1014 		}
1015 
1016 		/// for compatibility with tensor
1017 		friend GenTensor<T> copy(const SliceGenTensor<T>& rhs) {
1018 		    if (rhs._refGT.has_data()) return GenTensor<T>(rhs);
1019 		    return GenTensor<T>();
1020 		}
1021 
1022 	};
1023 
1024 
1025     /// Often used to transform all dimensions from one basis to another
1026     /// \code
1027     /// result(i,j,k...) <-- sum(i',j', k',...) t(i',j',k',...) c(i',i) c(j',j) c(k',k) ...
1028     /// \endcode
1029 	/// cf tensor/tensor.h
1030     template <class T, class Q>
1031     GenTensor< TENSOR_RESULT_TYPE(T,Q) > transform(const GenTensor<Q>& t, const Tensor<T>& c) {
1032     	return t.transform(c);
1033     }
1034 
1035     /// helper struct for reduce
1036     struct RankReduceWrapper {
1037     	double eps;
1038     	RankReduceWrapper(const double& e) : eps(e) {}
1039 
1040     	template<typename T>
1041     	void operator()(GenTensor<T>& g) const {
1042     		g.reduce_rank(eps);
1043     	}
1044     };
1045 
1046     /// compare the rank of two GenTensors for sorting
1047     template<typename T>
1048     bool compare_rank(const GenTensor<T>& g1, const GenTensor<T>& g2) {
1049     	return g1.rank()<g2.rank();
1050     }
1051 
1052 
1053     /// add all the GenTensors of a given list
1054 
1055     /// If there are many tensors to add it's beneficial to do a sorted addition and start with
1056     /// those tensors with low ranks
1057     /// @param[in]	addends		a list with gentensors of same dimensions; will be destroyed upon return
1058     /// @param[in]	eps			the accuracy threshold
1059     /// @param[in]	are_optimal	flag if the GenTensors in the list are already in SVD format (if TT_2D)
1060     ///	@return		the sum GenTensor of the input GenTensors
1061     template<typename T>
1062 	GenTensor<T> reduce(std::list<GenTensor<T> >& addends, double eps, bool are_optimal=false) {
1063 
1064     	// fast return
1065     	if (addends.size()==0) return GenTensor<T>();
1066 
1067     	typedef typename std::list<GenTensor<T> >::iterator iterT;
1068 
1069     	// make error relative
1070     	eps=eps/addends.size();
1071 
1072     	// if the addends are not in SVD format do that now so that we can call add_svd later
1073     	if (not are_optimal) {
1074     		RankReduceWrapper f(eps);
1075     		std::for_each(addends.begin(),addends.end(),f);
1076     	}
1077 
1078     	// remove zero ranks and sort the list according to the gentensor's ranks
1079 		addends.remove_if(has_zero_rank<T>);
1080     	if (addends.size()==0) return GenTensor<T>();
1081 		addends.sort(compare_rank<T>);
1082 
1083     	// do the additions
1084     	GenTensor<T> result=copy(addends.front());
1085     	for (iterT it=++addends.begin(); it!=addends.end(); ++it) {
1086     		GenTensor<T>& rhs=*it;
1087     		MADNESS_ASSERT(&rhs!=&result);
1088     		result.add_SVD(rhs,eps);
1089     	}
1090     	addends.clear();
1091 
1092     	return result;
1093     }
1094 
1095     /// Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor
1096 
1097     /// \ingroup tensor
1098     /// \code
1099     /// transform_dir(t,c,1) = r(i,j,k,...) = sum(j') t(i,j',k,...) * c(j',j)
1100     /// \endcode
1101     /// @param[in] t Tensor to transform (size of dim to be transformed must match size of first dim of \c c )
1102     /// @param[in] c Matrix used for the transformation
1103     /// @param[in] axis Dimension (or axis) to be transformed
1104     /// @result Returns a new, contiguous tensor
1105     template <class T, class Q>
1106     GenTensor<TENSOR_RESULT_TYPE(T,Q)> transform_dir(const GenTensor<Q>& t, const Tensor<T>& c,
1107                                           int axis) {
1108 
1109     	return t.transform_dir(c,axis);
1110     }
1111 
1112     template<typename T>
1113     static inline
1114     std::ostream& operator<<(std::ostream& s, const GenTensor<T>& g) {
1115         std::string str="GenTensor has no data";
1116         if (g.has_no_data()) {
1117             std::string str="GenTensor has no data";
1118             s << str.c_str() ;
1119         } else {
1120             str="GenTensor has data";
1121             s << str.c_str() << g.config();
1122         }
1123         return s;
1124     }
1125 
1126     namespace archive {
1127         /// Serialize a tensor
1128         template <class Archive, typename T>
1129 		struct ArchiveStoreImpl< Archive, GenTensor<T> > {
1130 
1131 			friend class GenTensor<T>;
1132 			/// Stores the GenTensor to an archive
1133 			static void store(const Archive& ar, const GenTensor<T>& t) {
1134 				bool exist=t.has_data();
1135 				ar & exist;
1136 				if (exist) ar & t.config();
1137 			};
1138 		};
1139 
1140 
1141 		/// Deserialize a tensor ... existing tensor is replaced
1142 		template <class Archive, typename T>
1143 		struct ArchiveLoadImpl< Archive, GenTensor<T> > {
1144 
1145 			friend class GenTensor<T>;
1146 			/// Replaces this GenTensor with one loaded from an archive
1147 			static void load(const Archive& ar, GenTensor<T>& t) {
1148 				// check for pointer existence
1149 				bool exist=false;
1150 				ar & exist;
1151 				if (exist) {
1152 					SRConf<T> conf;
1153 					ar & conf;
1154 					//t.config()=conf;
1155 					t=GenTensor<T>(conf);
1156 				}
1157 			};
1158 		};
1159     };
1160 
1161     /// outer product of two Tensors, yielding a low rank tensor
1162      template <class T, class Q>
1163      GenTensor<TENSOR_RESULT_TYPE(T,Q)> outer(const GenTensor<T>& lhs2,
1164              const GenTensor<Q>& rhs2, const TensorArgs final_tensor_args) {
1165      	return outer(lhs2.full_tensor(),rhs2.full_tensor(), final_tensor_args);
1166      }
1167 
1168      /// outer product of two Tensors, yielding a low rank tensor
1169      template <class T, class Q>
1170      GenTensor<TENSOR_RESULT_TYPE(T,Q)> outer(const Tensor<T>& lhs2,
1171              const Tensor<Q>& rhs2, const TensorArgs final_tensor_args) {
1172 
1173          MADNESS_ASSERT(final_tensor_args.tt==TT_2D);
1174      	typedef TENSOR_RESULT_TYPE(T,Q) resultT;
1175 
1176      	// srconf is shallow, do deep copy here
1177      	const Tensor<T> lhs=copy(lhs2);
1178      	const Tensor<Q> rhs=copy(rhs2);
1179 
1180      	const long k=lhs.dim(0);
1181      	const long dim=lhs.ndim()+rhs.ndim();
1182      	long size=1;
1183      	for (int i=0; i<lhs.ndim(); ++i) size*=k;
1184      	MADNESS_ASSERT(size==lhs.size());
1185      	MADNESS_ASSERT(size==rhs.size());
1186      	MADNESS_ASSERT(lhs.size()==rhs.size());
1187 
1188  		Tensor<double> weights(1);
1189  		weights=1.0;
1190 
1191  		SRConf<resultT> srconf(weights,lhs.reshape(1,lhs.size()),rhs.reshape(1,rhs.size()),dim,k);
1192  		GenTensor<resultT> coeff(srconf);
1193  		coeff.normalize();
1194  		return coeff;
1195      }
1196 
1197 #endif /* not USE_LRT */
1198 
1199     #endif /* HAVE_GENTENSOR */
1200 
1201     /// change representation to targ.tt
1202     template<typename T>
1203     void change_tensor_type(GenTensor<T>& t, const TensorArgs& targs) {
1204 
1205         // fast return if possible
1206         const TensorType current_type=t.tensor_type();
1207         if (current_type==targs.tt) return;
1208         if (t.has_no_data()) return;
1209 
1210         // for now
1211         MADNESS_ASSERT(targs.tt==TT_FULL or targs.tt==TT_2D);
1212         MADNESS_ASSERT(current_type==TT_FULL or current_type==TT_2D);
1213 
1214         GenTensor<T> result;
1215         if (targs.tt==TT_FULL) {
1216             result=GenTensor<T>(t.full_tensor_copy(),targs);
1217         } else if (targs.tt==TT_2D) {
1218             MADNESS_ASSERT(current_type==TT_FULL);
1219             result=GenTensor<T>(t.full_tensor(),targs);
1220         }
1221 
1222         t=result;
1223 
1224     }
1225 
1226     /// Transform all dimensions of the tensor t by distinct matrices c
1227 
1228     /// Similar to transform but each dimension is transformed with a
1229     /// distinct matrix.
1230     /// \code
1231     /// result(i,j,k...) <-- sum(i',j', k',...) t(i',j',k',...) c[0](i',i) c[1](j',j) c[2](k',k) ...
1232     /// \endcode
1233     /// The first dimension of the matrices c must match the corresponding
1234     /// dimension of t.
1235     template <class T, class Q>
1236     GenTensor<TENSOR_RESULT_TYPE(T,Q)> general_transform(const GenTensor<T>& t, const Tensor<Q> c[]) {
1237         return t.general_transform(c);
1238     }
1239 
1240     template <class T>
1241     GenTensor<T> general_transform(const GenTensor<T>& t, const Tensor<T> c[]) {
1242         return t.general_transform(c);
1243     }
1244 
1245     /// The class defines tensor op scalar ... here define scalar op tensor.
1246     template <typename T, typename Q>
1247     typename IsSupported < TensorTypeData<Q>, GenTensor<T> >::type
1248     operator*(const Q& x, const GenTensor<T>& t) {
1249         return t*x;
1250     }
1251 
1252 
1253 }   // namespace madness
1254 
1255 #if HAVE_GENTENSOR
1256 #ifdef USE_LRT
1257 #include <madness/tensor/lowranktensor.h>
1258 
1259 namespace madness {
1260 
1261 template<typename T>
1262 class GenTensor : public LowRankTensor<T> {
1263 
1264 public:
1265 
1266     using LowRankTensor<T>::LowRankTensor;
1267 
1268     GenTensor<T>() : LowRankTensor<T>() {}
1269     GenTensor<T>(const GenTensor<T>& g) : LowRankTensor<T>(g) {}
1270     GenTensor<T>(const LowRankTensor<T>& g) : LowRankTensor<T>(g) {}
1271     GenTensor<T>(const SRConf<T>& sr1) : LowRankTensor<T>(SVDTensor<T>(sr1)) {
1272     }
1273 
1274     operator LowRankTensor<T>() const {return *this;}
1275     operator LowRankTensor<T>() {return *this;}
1276 
1277     /// general slicing, shallow; for temporary use only!
1278     SliceGenTensor<T> operator()(const std::vector<Slice>& s) {
1279         return SliceGenTensor<T>(*this,s);
1280     }
1281 
1282     /// general slicing, shallow; for temporary use only!
1283     const SliceGenTensor<T> operator()(const std::vector<Slice>& s) const {
1284         return SliceGenTensor<T>(*this,s);
1285     }
1286 
1287     /// assign a number to this tensor
1288     GenTensor<T>& operator=(const T& number) {
1289         LowRankTensor<T>& base=*this;
1290         base=(number);
1291         return *this;
1292     }
1293 
1294 
1295     std::string what_am_i() const {return TensorArgs::what_am_i(this->tensor_type());};
1296 
1297     SRConf<T>& config() const {
1298         MADNESS_ASSERT(this->type==TT_2D and (this->impl.svd));
1299         return *this->impl.svd.get();
1300     }
1301     GenTensor<T> get_configs(const int& start, const int& end) const {
1302         MADNESS_ASSERT(this->type==TT_2D and (this->impl.svd));
1303         return GenTensor<T>(config().get_configs(start,end));
1304     }
1305 
1306     /// deep copy of rhs by deep copying rhs.configs
1307     friend GenTensor<T> copy(const GenTensor<T>& rhs) {
1308         return GenTensor<T>(copy(LowRankTensor<T>(rhs)));
1309     }
1310 
1311 };
1312 
1313 /// implements a slice of a GenTensor
1314 template <typename T>
1315 class SliceGenTensor : public SliceLowRankTensor<T> {
1316 public:
1317     using SliceLowRankTensor<T>::SliceLowRankTensor;
1318 
1319     SliceGenTensor<T>(const SliceGenTensor<T>& g) : SliceLowRankTensor<T>(g) {}
1320     SliceGenTensor<T>(const SliceLowRankTensor<T>& g) : SliceLowRankTensor<T>(g) {}
1321 
1322     operator SliceLowRankTensor<T>() const {return *this;}
1323     operator SliceLowRankTensor<T>() {return *this;}
1324 
1325     /// inplace zero-ing as in g(s)=0.0
1326     SliceGenTensor<T>& operator=(const T& number) {
1327         SliceLowRankTensor<T>& base=*this;
1328         base=number;
1329         return *this;
1330     }
1331 
1332 };
1333 
1334 /// add all the GenTensors of a given list
1335 
1336  /// If there are many tensors to add it's beneficial to do a sorted addition and start with
1337  /// those tensors with low ranks
1338  /// @param[in]  addends     a list with gentensors of same dimensions; will be destroyed upon return
1339 /// @param[in]  eps         the accuracy threshold
1340 /// @param[in]  are_optimal flag if the GenTensors in the list are already in SVD format (if TT_2D)
1341 /// @return     the sum GenTensor of the input GenTensors
1342 template<typename T>
1343 GenTensor<T> reduce(std::list<GenTensor<T> >& addends, double eps, bool are_optimal=false) {
1344     typedef typename std::list<GenTensor<T> >::iterator iterT;
1345     GenTensor<T> result=copy(addends.front());
1346     for (iterT it=++addends.begin(); it!=addends.end(); ++it) {
1347         result+=*it;
1348     }
1349     result.reduce_rank(eps);
1350     return result;
1351 
1352 }
1353 
1354 /// outer product of two Tensors, yielding a low rank tensor
1355  template <class T, class Q>
1356  GenTensor<TENSOR_RESULT_TYPE(T,Q)> outer(const GenTensor<T>& lhs2,
1357          const GenTensor<Q>& rhs2, const TensorArgs final_tensor_args) {
1358 //    return outer_low_rank(lhs2.full_tensor(),rhs2.full_tensor(), final_tensor_args);
1359      typedef TENSOR_RESULT_TYPE(T,Q) resultT;
1360 
1361      // prepare lo-dim tensors for the outer product
1362      TensorArgs targs;
1363      targs.thresh=final_tensor_args.thresh;
1364      if (final_tensor_args.tt==TT_FULL) targs.tt=TT_FULL;
1365      else if (final_tensor_args.tt==TT_2D) targs.tt=TT_FULL;
1366      else if (final_tensor_args.tt==TT_TENSORTRAIN) targs.tt=TT_TENSORTRAIN;
1367      else {
1368          MADNESS_EXCEPTION("confused tensor args in outer_low_rank",1);
1369      }
1370 
1371      LowRankTensor<T> lhs=lhs2.convert(targs);
1372      LowRankTensor<Q> rhs=rhs2.convert(targs);
1373 
1374      LowRankTensor<resultT> result=outer(lhs,rhs,final_tensor_args);
1375      return result;
1376 
1377 
1378  }
1379 
1380  /// outer product of two Tensors, yielding a low rank tensor
1381  template <class T, class Q>
1382  GenTensor<TENSOR_RESULT_TYPE(T,Q)> outer(const Tensor<T>& lhs2,
1383          const Tensor<Q>& rhs2, const TensorArgs final_tensor_args) {
1384 
1385      typedef TENSOR_RESULT_TYPE(T,Q) resultT;
1386 
1387      // prepare lo-dim tensors for the outer product
1388      TensorArgs targs;
1389      targs.thresh=final_tensor_args.thresh;
1390      if (final_tensor_args.tt==TT_FULL) targs.tt=TT_FULL;
1391      else if (final_tensor_args.tt==TT_2D) targs.tt=TT_FULL;
1392      else if (final_tensor_args.tt==TT_TENSORTRAIN) targs.tt=TT_TENSORTRAIN;
1393      else {
1394          MADNESS_EXCEPTION("confused tensor args in outer_low_rank",1);
1395      }
1396 
1397      LowRankTensor<T> lhs(lhs2,targs);
1398      LowRankTensor<Q> rhs(rhs2,targs);
1399      LowRankTensor<resultT> result=outer(lhs,rhs,final_tensor_args);
1400      return result;
1401  }
1402 
1403 
1404 
1405 namespace archive {
1406 /// Serialize a tensor
1407 template <class Archive, typename T>
1408 struct ArchiveStoreImpl< Archive, GenTensor<T> > {
1409 
1410     friend class GenTensor<T>;
1411     /// Stores the GenTensor to an archive
1412     static void store(const Archive& ar, const GenTensor<T>& t) {
1413         LowRankTensor<T> tt(t);
1414         ar & tt;
1415     };
1416 };
1417 
1418 
1419 /// Deserialize a tensor ... existing tensor is replaced
1420 template <class Archive, typename T>
1421 struct ArchiveLoadImpl< Archive, GenTensor<T> > {
1422 
1423     friend class GenTensor<T>;
1424     /// Replaces this GenTensor with one loaded from an archive
1425     static void load(const Archive& ar, GenTensor<T>& t) {
1426         LowRankTensor<T> tt;
1427         ar & tt;
1428         t=tt;
1429     };
1430 };
1431 };
1432 
1433 }
1434 #endif /* USE_LRT */
1435 #endif /* HAVE_GENTENSOR */
1436 #endif /* GENTENSOR_H_ */
1437