MADNESS  0.10.1
lowranktensor.h
Go to the documentation of this file.
1 /*
2  This file is part of MADNESS.
3 
4  Copyright (C) 2007,2010 Oak Ridge National Laboratory
5 
6  This program is free software; you can redistribute it and/or modify
7  it under the terms of the GNU General Public License as published by
8  the Free Software Foundation; either version 2 of the License, or
9  (at your option) any later version.
10 
11  This program is distributed in the hope that it will be useful,
12  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  GNU General Public License for more details.
15 
16  You should have received a copy of the GNU General Public License
17  along with this program; if not, write to the Free Software
18  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 
20  For more information please contact:
21 
22  Robert J. Harrison
23  Oak Ridge National Laboratory
24  One Bethel Valley Road
25  P.O. Box 2008, MS-6367
26 
27  email: harrisonrj@ornl.gov
28  tel: 865-241-3937
29  fax: 865-572-0680
30 
31  $Id$
32 */
33 
34 #ifndef MADNESS_TENSOR_LOWRANKTENSOR_H_
35 #define MADNESS_TENSOR_LOWRANKTENSOR_H_
36 
37 #include <memory>
38 #include <vector>
39 #include <variant>
40 
42 #include <madness/world/print.h>
43 #include <madness/tensor/slice.h>
45 #include <madness/tensor/tensor.h>
47 #include "type_data.h"
49 
50 
51 namespace madness {
52 
53 
54 // forward declaration
55 template <class T> class SliceLowRankTensor;
56 
57 
58 template<typename T>
59 class GenTensor {
60 
61 public:
62 
63  friend class SliceLowRankTensor<T>;
64 
65  /// C++ typename of the real type associated with a complex type.
67 
68  /// C++ typename of the floating point type associated with scalar real type
70 
71  /// empty ctor
72  GenTensor() = default;
73 
74  /// copy ctor, shallow
75  GenTensor(const GenTensor<T>& other) = default;
76 
77  GenTensor(const long ndim, const long* dims, const TensorType& tt) {
78  if (tt==TT_FULL) tensor=std::shared_ptr<Tensor<T> >(new Tensor<T>(ndim, dims));
79  if (tt==TT_2D) tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(ndim, dims));
80  if (tt==TT_TENSORTRAIN) tensor=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(ndim, dims));
81  }
82 
83  /// ctor with dimensions; constructs tensor filled with zeros
84  GenTensor(const std::vector<long>& dim, const TensorType& tt) :
85  GenTensor(long(dim.size()),&dim.front(),tt) {
86  }
87 
88  /// ctor with dimensions; constructs tensor filled with zeros
89  GenTensor(const std::vector<long>& dim, const TensorArgs& targs) :
90  GenTensor(dim, targs.tt) {
91  }
92 
93  /// ctor with dimensions; all dims have the same value k
94  GenTensor(const TensorType& tt, const long k, const long ndim) :
95  GenTensor(std::vector<long>(ndim,k), tt) {
96  }
97 
98  /// ctor with a regular Tensor and arguments, deep
99  GenTensor(const Tensor<T>& rhs, const double& thresh, const TensorType& tt) :
100  GenTensor(rhs,TensorArgs(thresh,tt)) {
101  }
102 
103  /// ctor with a regular Tensor and arguments, deep
104  GenTensor(const Tensor<T>& rhs, const TensorArgs& targs) {
105  if (targs.tt==TT_FULL) *this=copy(rhs);
106  else if (targs.tt==TT_2D) {
107  if (rhs.size()==0) {
108  tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(rhs,targs.thresh*facReduce()));
109  } else {
110  TensorTrain<T> tt(rhs,targs.thresh*facReduce());
111  GenTensor<T> tmp=tt;
112  *this=tmp.convert(targs);
113  }
114 // } else if (targs.tt==TT_DYNAMIC) {
115 // if (rhs.size()==0) {
116 // tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(rhs,targs.thresh*facReduce()));
117 // } else {
118 //
119 // long maxrank=std::max(50.0,floor(0.3*sqrt(rhs.size())));
120 // RandomizedMatrixDecomposition<T> rmd=RMDFactory().maxrank(maxrank);
121 // Tensor<T> Q=rmd.compute_range(rhs,targs.thresh*facReduce()*0.1,{0,0});
122 // if (Q.size()==0) {
123 // *this=SVDTensor<T>(rhs.ndim(),rhs.dims());
124 // } else if (not rmd.exceeds_maxrank()) {
125 // SVDTensor<T> result(rhs.ndim(),rhs.dims());
126 // result=SVDTensor<T>::compute_svd_from_range(Q,rhs);
127 // *this=result;
128 // } else {
129 // *this=copy(rhs);
130 //// TensorTrain<T> tt(rhs,targs.thresh*facReduce());
131 //// GenTensor<T> tmp=tt;
132 //// *this=tmp.convert(targs);
133 // }
134 //// tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(rhs,targs.thresh*facReduce()));
135 // }
136  } else if (targs.tt==TT_TENSORTRAIN) {
137  tensor=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(rhs,targs.thresh*facReduce()));
138  } else {
139  MADNESS_EXCEPTION("unknown tensor type in LowRankTensor constructor",1);
140  }
141  }
142 
143  /// ctor with a regular Tensor, deep
144  GenTensor(const Tensor<T>& other) {
145  tensor=std::shared_ptr<Tensor<T> >(new Tensor<T>(copy(other)));
146  }
147 
148  /// ctor with a TensorTrain as argument, shallow
149  GenTensor(const TensorTrain<T>& other) {
150  tensor=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(copy(other))) ;
151  }
152 
153  /// ctor with a SVDTensor as argument, shallow
154  GenTensor(const SVDTensor<T>& other) {
155  tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(copy(other)));
156  }
157 
158  /// ctor with a SliceLowRankTensor as argument, deep
160  *this=other;
161  }
162 
163  /// shallow assignment operator
165  if (this!=&other) tensor=other.tensor;
166  return *this;
167  }
168 
169  /// deep assignment operator
170  GenTensor& operator=(const Tensor<T>& other) {
171  tensor=std::shared_ptr<Tensor<T> >(new Tensor<T>(copy(other)));
172  return *this;
173  }
174 
175  /// deep assignment operator
177  tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(copy(other)));
178  return *this;
179  }
180 
181  /// deep assignment operator
183  tensor=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(copy(other)));
184  return *this;
185  }
186 
187  /// deep assignment with slices: g0 = g1(s)
189  const std::array<Slice,TENSOR_MAXDIM>& s=other.thisslice;
190  MADNESS_ASSERT(other.is_assigned());
191  if (other.is_full_tensor())
192  tensor=std::shared_ptr<Tensor<T> >(new Tensor<T>(copy(other.get_tensor()(s))));
193  else if (other.is_svd_tensor())
194  tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(other.get_svdtensor().copy_slice(s)));
195  else if (other.is_tensortrain())
196  tensor=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(copy(other.get_tensortrain(),s)));
197  else {
198  MADNESS_EXCEPTION("you should not be here",1);
199  }
200  return *this;
201  }
202 
203  /// Type conversion makes a deep copy
204  template <class Q> operator GenTensor<Q>() const { // type conv => deep copy
205 
206  GenTensor<Q> result;
207  if (is_full_tensor()) {
208  result=Tensor<Q>(get_tensor());
209  } else if (is_svd_tensor()) {
210  MADNESS_EXCEPTION("no type conversion for TT_2D yes=t",1);
211  } else if (is_tensortrain()) {
212  MADNESS_EXCEPTION("no type conversion for TT_2D yes=t",1);
213  }
214  return result;
215  }
216 
219  try {
220  return *(std::get<1>(tensor).get());
221  } catch (...) {
222  MADNESS_EXCEPTION("failure to return SVDTensor from LowRankTensor",1);
223  }
224  }
225 
226  const SVDTensor<T>& get_svdtensor() const {
228  try {
229  return *(std::get<1>(tensor).get());
230  } catch (...) {
231  MADNESS_EXCEPTION("failure to return SVDTensor from LowRankTensor",1);
232  }
233  }
234 
237  try {
238  return *(std::get<0>(tensor).get());
239  } catch (...) {
240  MADNESS_EXCEPTION("failure to return Tensor from LowRankTensor",1);
241  }
242  }
243 
244  const Tensor<T>& get_tensor() const {
246  try {
247  return *(std::get<0>(tensor).get());
248  } catch (...) {
249  MADNESS_EXCEPTION("failure to return Tensor from LowRankTensor",1);
250  }
251  }
252 
255  try {
256  return *(std::get<2>(tensor).get());
257  } catch (...) {
258  MADNESS_EXCEPTION("failure to return TensorTrain from LowRankTensor",1);
259  }
260  }
261 
264  try {
265  return *(std::get<2>(tensor).get());
266  } catch (...) {
267  MADNESS_EXCEPTION("failure to return TensorTrain from LowRankTensor",1);
268  }
269  }
270 
271  /// general slicing, shallow; for temporary use only!
272  SliceLowRankTensor<T> operator()(const std::vector<Slice>& s) {
273  return SliceLowRankTensor<T>(*this,s);
274  }
275 
276  /// general slicing, shallow; for temporary use only!
277  const SliceLowRankTensor<T> operator()(const std::vector<Slice>& s) const {
278  return SliceLowRankTensor<T>(*this,s);
279  }
280 
281 
282  /// deep copy
283  friend GenTensor copy(const GenTensor& other) {
284  GenTensor<T> result;
285  if (other.is_assigned()) std::visit([&result](auto& obj) {result=copy(*obj);}, other.tensor);
286  return result;
287  }
288 
289  /// return the tensor type
291  if (index()==0) return TT_FULL;
292  if (index()==1) return TT_2D;
293  if (index()==2) return TT_TENSORTRAIN;
294  MADNESS_EXCEPTION("confused tensor types ",1);
295  }
296 
297  constexpr bool is_full_tensor() const {
298  return (index()==0);
299  }
300 
301  constexpr bool is_svd_tensor() const {
302  return (index()==1);
303  }
304 
305  constexpr bool is_tensortrain() const {
306  return (index()==2);
307  }
308 
309  bool is_of_tensortype(const TensorType& tt) const {
310  if ((index()==0) and (tt==TT_FULL)) return true;
311  if ((index()==1) and (tt==TT_2D)) return true;
312  if ((index()==2) and (tt==TT_TENSORTRAIN)) return true;
313  return false;
314  }
315 
316  template<typename Q, typename R>
317  friend bool is_same_tensor_type(const GenTensor<R>& rhs, const GenTensor<Q>& lhs);
318 
319  int index() const {
320  return is_assigned() ? tensor.index() : -1;
321  }
322 
324 
325  // fast return
326  if (not is_assigned()) return *this;
327  if (is_of_tensortype(targs.tt)) return *this;
328 // if (targs.tt==TT_DYNAMIC) if (is_svd_tensor()) return *this;
329 
330  // target is full tensor
331  if (targs.tt == TT_FULL) {
332  *this = this->full_tensor_copy();
333  }
334 
335  // source is full tensor: construct the corresponding representation
336  else if (is_full_tensor()) {
337  *this = GenTensor<T>(get_tensor(), targs);
338  }
339 
340  // TT_TENSORTRAIN TO TT_2D
341  else if ((is_tensortrain()) and (targs.tt == TT_2D)) {
342  Tensor<T> U, VT;
344  get_tensortrain().two_mode_representation(U, VT, s);
345  long rank = s.size();
346  if (rank == 0) {
347  *this = SVDTensor<T>(get_tensortrain().ndim(), get_tensortrain().dims(), ndim() / 2);
348  return *this;
349  }
350 
351  long n = 1, m = 1;
352  for (int i = 0; i < U.ndim() - 1; ++i) n *= U.dim(i);
353  for (int i = 1; i < VT.ndim(); ++i) m *= VT.dim(i);
354  MADNESS_ASSERT(rank * n == U.size());
355  MADNESS_ASSERT(rank * m == VT.size());
356  U = copy(transpose(U.reshape(n, rank))); // make it contiguous
357  VT = VT.reshape(rank, m);
358  SVDTensor<T> svdtensor(s, U, VT, ndim(), dims());
359  svdtensor.normalize();
360  *this = svdtensor;
361  }
362  else if ((is_svd_tensor()) and (targs.tt == TT_TENSORTRAIN)) {
363  TensorTrain<T> tt(this->full_tensor_copy(),targs.thresh);
364  *this=tt;
365  } else {
366  print("conversion from type ", index(), "to type", targs.tt, "not supported");
367  MADNESS_EXCEPTION("type conversion not supported in LowRankTensor::convert ", 1);
368  }
369  return *this;
370  }
371 
372  /// convert this to a new LowRankTensor of given tensor type
373  GenTensor convert(const TensorArgs& targs) const {
374 
375  // deep copy for same type
376  if (is_of_tensortype(targs.tt)) return copy(*this);
377 
378  // new LRT will be newly constructed anyways
379  if (is_full_tensor()) return GenTensor<T>(get_tensor(),targs);
380 
381  GenTensor<T> result(*this); // shallow
382  result.convert_inplace(targs);
383  return result;
384  }
385 
386  long ndim() const {
387  return (is_assigned()) ? ptr()->ndim() : -1;
388  }
389 
390  /// return the number of entries in dimension i
391  long dim(const int i) const {
393  return ptr()->dim(i);
394  }
395 
396  /// return the number of entries in dimension i
397  const long* dims() const {
399  return ptr()->dims();
400  }
401 
402  void normalize() {
403  if (is_svd_tensor()) get_svdtensor().normalize();
404  }
405 
408  std::visit([&norm](auto& obj) {norm=obj->normf();}, tensor);
409  return norm;
410  }
411 
414  if (is_svd_tensor()) return get_svdtensor().svd_normf();
415  std::visit([&norm](auto& obj) {norm=obj->normf();}, tensor);
416  return norm;
417  }
418 
419 
420  /// Inplace multiplication by scalar of supported type (legacy name)
421 
422  /// @param[in] x Scalar value
423  /// @return %Reference to this tensor
424  template <typename Q>
426  scale(Q fac) {
427  if (not is_assigned()) return *this;
428  std::visit([&fac](auto& obj) {obj->scale(T(fac));}, tensor);
429  return *this;
430  }
431 
433  if (not is_assigned()) return Tensor<T>();
434  else if (is_full_tensor()) return copy(get_tensor());
435  else if (is_svd_tensor()) return get_svdtensor().reconstruct();
436  else if (is_tensortrain()) return get_tensortrain().reconstruct();
437  else {
438  MADNESS_EXCEPTION("you should not be here",1);
439  }
440  return Tensor<T>();
441  }
442 
443  const Tensor<T>& full_tensor() const {
444  return get_tensor();
445  }
446 
448  return get_tensor();
449  }
450 
451 
452  /// reconstruct this to return a full tensor
454 
455  if (is_full_tensor()) return full_tensor();
456  if (is_svd_tensor() or is_tensortrain()) return full_tensor_copy();
457  return Tensor<T>();
458  }
459 
460 
461  static double facReduce() {return 1.e-3;}
462  static double fac_reduce() {return 1.e-3;}
463 
464  long rank() const {
465  if (is_full_tensor()) return -1;
466  else if (is_svd_tensor()) return get_svdtensor().rank();
467  else if (is_tensortrain()) {
468  std::vector<long> r=get_tensortrain().ranks();
469  return *(std::max_element(r.begin(), r.end()));
470  }
471  return 0l;
472  }
473 
474  bool is_assigned() const {
475  return ptr() ? true : false;
476  }
477 
478  bool has_data() const {return size()>0;}
479 
480  bool has_no_data() const {return (not has_data());}
481 
482  long size() const {
483  return (is_assigned()) ? ptr()->size() : 0;
484  }
485 
486  long nCoeff() const {
487  if (is_full_tensor()) return get_tensor().size();
488  else if (is_svd_tensor()) return get_svdtensor().nCoeff();
489  else if (is_tensortrain()) return get_tensortrain().real_size();
490  else {
491  MADNESS_EXCEPTION("you should not be here",1);
492  }
493  return false;
494  }
495 
496  long real_size() const {
497  if (is_full_tensor()) return get_tensor().size();
498  else if (is_svd_tensor()) return get_svdtensor().real_size();
499  else if (is_tensortrain()) return get_tensortrain().real_size();
500  else {
501  MADNESS_EXCEPTION("you should not be here",1);
502  }
503  return false;
504  }
505 
506  /// returns the trace of <this|rhs>
507  template<typename Q>
508  TENSOR_RESULT_TYPE(T,Q) trace_conj(const GenTensor<Q>& rhs) const {
509 
510  if (TensorTypeData<T>::iscomplex) MADNESS_EXCEPTION("no complex trace in LowRankTensor, sorry",1);
511  if (TensorTypeData<Q>::iscomplex) MADNESS_EXCEPTION("no complex trace in LowRankTensor, sorry",1);
512 
513  typedef TENSOR_RESULT_TYPE(T,Q) resultT;
514  // fast return if possible
515  if ((this->rank()==0) or (rhs.rank()==0)) return resultT(0.0);
516 
518 
519  if (is_full_tensor()) return get_tensor().trace_conj(rhs.get_tensor());
520  else if (is_svd_tensor()) return trace(get_svdtensor(),rhs.get_svdtensor());
521  else if (is_tensortrain()) return get_tensortrain().trace(rhs.get_tensortrain());
522  else {
523  MADNESS_EXCEPTION("you should not be here",1);
524  }
525  return TENSOR_RESULT_TYPE(T,Q)(0);
526  }
527 
528  /// multiply with a number
529  template<typename Q>
531  GenTensor<TENSOR_RESULT_TYPE(T,Q)> result(copy(*this));
532  result.scale(x);
533  return result;
534  }
535 
536  GenTensor operator+(const GenTensor& other) {
537  GenTensor<T> result=copy(*this);
538  result.gaxpy(1.0,other,1.0);
539  return result;
540  }
541 
543  GenTensor<T> result=copy(*this);
544  std::array<Slice,TENSOR_MAXDIM> s0;
545  s0.fill(_);
546  result.gaxpy(1.0,s0,other,1.0,other.thisslice);
547  return result;
548  }
549 
550  GenTensor& operator+=(const GenTensor& other) {
551  gaxpy(1.0,other,1.0);
552  return *this;
553  }
554 
556  std::array<Slice,TENSOR_MAXDIM> s0;
557  s0.fill(_);
558  this->gaxpy(1.0,s0,other,1.0,other.thisslice);
559  return *this;
560  }
561 
562  GenTensor operator-(const GenTensor& other) {
563  GenTensor<T> result=copy(*this);
564  result.gaxpy(1.0,other,-1.0);
565  return result;
566  }
567 
568  GenTensor& operator-=(const GenTensor& other) {
569  gaxpy(1.0,other,-1.0);
570  return *this;
571  }
572 
574  std::array<Slice,TENSOR_MAXDIM> s0;
575  s0.fill(_);
576  this->gaxpy(1.0,s0,other,-1.0,other.thisslice);
577  return *this;
578  }
579 
580  GenTensor& gaxpy(const T alpha, const GenTensor& other, const T beta) {
581 
582  // deliberately excluding gaxpys for different tensors due to efficiency considerations!
583  MADNESS_ASSERT(is_same_tensor_type(*this,other));
586  else if (is_svd_tensor()) get_svdtensor().gaxpy(alpha,other.get_svdtensor(),beta);
587  else if (is_tensortrain()) get_tensortrain().gaxpy(alpha,other.get_tensortrain(),beta);
588  else {
589  MADNESS_EXCEPTION("unknown tensor type in LowRankTensor::gaxpy",1);
590  }
591  return *this;
592  }
593 
594  GenTensor& gaxpy(const T alpha, std::array<Slice,TENSOR_MAXDIM> s0,
595  const GenTensor& other, const T beta, std::array<Slice,TENSOR_MAXDIM> s1) {
596 
597  // deliberately excluding gaxpys for different tensors due to efficiency considerations!
598  MADNESS_ASSERT(is_same_tensor_type(*this,other));
600 
601  if (is_full_tensor()) {
602  get_tensor()(s0).gaxpy(alpha,other.get_tensor()(s1),beta);
603  } else if (is_svd_tensor()) {
604  get_svdtensor().inplace_add(other.get_svdtensor(),s0,s1,alpha,beta);
605  } else if (is_tensortrain()) {
606  MADNESS_ASSERT(alpha==1.0);
607  get_tensortrain().gaxpy(s0, other.get_tensortrain(), beta, s1);
608  } else {
609  MADNESS_EXCEPTION("unknown tensor type in LowRankTensor::gaxpy",1);
610  }
611  return *this;
612  }
613 
614  /// assign a number to this tensor
615  GenTensor& operator=(const T& number) {
616  std::visit([&number](auto& obj) {*obj=number;}, tensor);
617  return *this;
618 
619  }
620 
621  void add_SVD(const GenTensor& other, const double& thresh) {
622  if (is_full_tensor()) get_tensor()+=other.get_tensor();
623  else if (is_svd_tensor()) get_svdtensor().add_SVD(other.get_svdtensor(),thresh*facReduce());
624  else if (is_tensortrain()) get_tensortrain()+=(other.get_tensortrain());
625  else {
626  MADNESS_EXCEPTION("unknown tensor type in LowRankTensor::add_SVD",1);
627  }
628  }
629 
630  /// Inplace multiply by corresponding elements of argument Tensor
631  GenTensor<T>& emul(const GenTensor<T>& other) {
632 
633  // deliberately excluding emuls for different tensors due to efficiency considerations!
634  MADNESS_ASSERT(is_same_tensor_type(*this,other));
635 
636  // binary operation with the visitor pattern
637  // std::visit([&other](auto& obj) {obj.emul(other.tensor);}, tensor);
638  if (is_full_tensor()) get_tensor().emul(other.get_tensor());
639  else if (is_svd_tensor()) get_svdtensor().emul(other.get_svdtensor());
640  else if (is_tensortrain()) get_tensortrain().emul(other.get_tensortrain());
641  else {
642  MADNESS_EXCEPTION("unknown tensor type in LowRankTensor::gaxpy",1);
643  }
644  return *this;
645 
646  }
647 
648  void reduce_rank(const double& thresh) {
649  if (is_svd_tensor()) get_svdtensor().divide_and_conquer_reduce(thresh*facReduce());
650  if (is_tensortrain()) get_tensortrain().truncate(thresh*facReduce());
651  }
652 
653 
654 public:
655 
656  /// Transform all dimensions of the tensor t by the matrix c
657 
658  /// \ingroup tensor
659  /// Often used to transform all dimensions from one basis to another
660  /// \code
661  /// result(i,j,k...) <-- sum(i',j', k',...) t(i',j',k',...) c(i',i) c(j',j) c(k',k) ...
662  /// \endcode
663  /// The input dimensions of \c t must all be the same and agree with
664  /// the first dimension of \c c . The dimensions of \c c may differ in
665  /// size.
666  template <typename R, typename Q>
668  const GenTensor<R>& t, const Tensor<Q>& c);
669 
670  /// Transform all dimensions of the tensor t by distinct matrices c
671 
672  /// \ingroup tensor
673  /// Similar to transform but each dimension is transformed with a
674  /// distinct matrix.
675  /// \code
676  /// result(i,j,k...) <-- sum(i',j', k',...) t(i',j',k',...) c[0](i',i) c[1](j',j) c[2](k',k) ...
677  /// \endcode
678  /// The first dimension of the matrices c must match the corresponding
679  /// dimension of t. template <typename R, typename Q>
680  template <typename R, typename Q>
682  const GenTensor<R>& t, const Tensor<Q> c[]);
683 
684  /// Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor
685 
686  /// \ingroup tensor
687  /// \code
688  /// transform_dir(t,c,1) = r(i,j,k,...) = sum(j') t(i,j',k,...) * c(j',j)
689  /// \endcode
690  /// @param[in] t Tensor to transform (size of dim to be transformed must match size of first dim of \c c )
691  /// @param[in] c Matrix used for the transformation
692  /// @param[in] axis Dimension (or axis) to be transformed
693  /// @result Returns a new, contiguous tensor template <typename R, typename Q>
694  template <typename R, typename Q>
696  const GenTensor<R>& t, const Tensor<Q>& c, const int axis);
697 
698 
699  std::string what_am_i() const {
700  TensorType tt;
701  if (this->is_full_tensor()) tt=TT_FULL;
702  if (this->is_svd_tensor()) tt=TT_2D;
703  if (this->is_tensortrain()) tt=TT_TENSORTRAIN;
704  return TensorArgs::what_am_i(tt);
705  };
706 
707 
708  /// might return a NULL pointer!
709  const BaseTensor* ptr() const {
710  const BaseTensor* p;
711  std::visit([&p](auto& obj) {p=dynamic_cast<const BaseTensor*>(obj.get());}, tensor);
712  return p;
713  }
714 
715 private:
716 
717  /// holding the implementation of the low rank tensor representations
718  // std::variant<Tensor<T>, SVDTensor<T>, TensorTrain<T> > tensor;
719  std::variant<std::shared_ptr<Tensor<T> >,
720  std::shared_ptr<SVDTensor<T> >,
721  std::shared_ptr<TensorTrain<T> > > tensor;
722 
723 };
724 
725 
726 
727 namespace archive {
728 /// Serialize a tensor
729 template <class Archive, typename T>
730 struct ArchiveStoreImpl< Archive, GenTensor<T> > {
731 
732  friend class GenTensor<T>;
733  /// Stores the GenTensor to an archive
734  static void store(const Archive& ar, const GenTensor<T>& t) {
735  int index1=t.index();
736  ar & index1;
737  if (index1==0) {
738  const Tensor<T>& tt=t.get_tensor();
739  ar & tt;
740  } else if (index1==1) {
741  const SVDTensor<T>& tt=t.get_svdtensor();
742  ar & tt;
743  } else if (index1==2) {
744  const TensorTrain<T>& tt=t.get_tensortrain();
745  ar & tt;
746  }
747  };
748 };
749 
750 
751 /// Deserialize a tensor ... existing tensor is replaced
752 template <class Archive, typename T>
753 struct ArchiveLoadImpl< Archive, GenTensor<T> > {
754 
755  friend class GenTensor<T>;
756  /// Replaces this GenTensor with one loaded from an archive
757  static void load(const Archive& ar, GenTensor<T>& tensor) {
758  int index=-2;
759  ar & index;
760  if (index==0) {
761  Tensor<T> tt;
762  ar & tt;
763  tensor=tt;
764  } else if (index==1) {
765  SVDTensor<T> tt;
766  ar & tt;
767  tensor=tt;
768  } else if (index==2) {
769  TensorTrain<T> tt;
770  ar & tt;
771  tensor=tt;
772  } else if (index==-1) { // defined value: empty tensor
773  ;
774  } else {
775  MADNESS_EXCEPTION("unknow tensor type",1);
776  }
777 
778 
779  };
780 };
781 };
782 
783 /// type conversion implies a deep copy
784 
785 /// @result Returns a new tensor that is a deep copy of the input
786 template <class Q, class T>
788 
789  // simple return
790  if (std::is_same<Q, T>::value) return copy(other);
791 
792  GenTensor<Q> result;
793  if (other.is_full_tensor())
794  result=Tensor<Q>(convert<Q,T>(other.get_tensor()));
795  if (other.is_svd_tensor())
796  MADNESS_EXCEPTION("no type conversion for SVDTensors",1);
797  if (other.is_tensortrain())
798  MADNESS_EXCEPTION("no type conversion for TensorTrain",1);
799  return result;
800 }
801 
802 
803 /// change representation to targ.tt
804 template<typename T>
805 void change_tensor_type(GenTensor<T>& t, const TensorArgs& targs) {
806  t.convert_inplace(targs);
807 }
808 
809 /// outer product of two Tensors, yielding a low rank tensor
810 
811 /// do the outer product of two tensors; distinguish these tensortype cases by
812 /// the use of final_tensor_type
813 /// - full x full -> full
814 /// - full x full -> SVD ( default )
815 /// - TensorTrain x TensorTrain -> TensorTrain
816 /// all other combinations are currently invalid.
817 template <class T, class Q>
819  const GenTensor<Q>& t2, const TensorArgs final_tensor_args=TensorArgs(-1.0,TT_2D)) {
820 
821  typedef TENSOR_RESULT_TYPE(T,Q) resultT;
822 
823 
825 
826  if (final_tensor_args.tt==TT_FULL) {
829  return GenTensor<resultT>(t);
830 
831  } else if (final_tensor_args.tt==TT_2D) {
833 
834  // srconf is shallow, do deep copy here
835  const Tensor<T> lhs=t1.full_tensor_copy();
836  const Tensor<Q> rhs=t2.full_tensor_copy();
837 
838  const long k=lhs.dim(0);
839  const long ndim=lhs.ndim()+rhs.ndim();
840  long size=1;
841  for (int i=0; i<lhs.ndim(); ++i) size*=k;
842  MADNESS_ASSERT(size==lhs.size());
843  MADNESS_ASSERT(size==rhs.size());
844  MADNESS_ASSERT(lhs.size()==rhs.size());
845 
847  weights=1.0;
848 
849  std::array<long,TENSOR_MAXDIM> dims;
850  for (int i=0; i<t1.ndim(); ++i) dims[i]=t1.dim(i);
851  for (int i=0; i<t2.ndim(); ++i) dims[i+t1.ndim()]=t2.dim(i);
852 
853  SRConf<resultT> srconf(weights,lhs.reshape(1,lhs.size()),rhs.reshape(1,rhs.size()),ndim,dims.data(),t1.ndim());
854 // srconf.normalize();
855  return GenTensor<resultT>(SVDTensor<resultT>(srconf));
856 
857  } else if (final_tensor_args.tt==TT_TENSORTRAIN) {
860  return outer(t1.get_tensortrain(),t2.get_tensortrain());
861  } else {
862  MADNESS_EXCEPTION("you should not be here",1);
863  }
865 
866  }
867 
868 
869 /// outer product of two Tensors, yielding a low rank tensor
870 template <class T, class Q>
872  const Tensor<Q>& rhs2, const TensorArgs final_tensor_args) {
873 
874  typedef TENSOR_RESULT_TYPE(T,Q) resultT;
875 
876  // prepare lo-dim tensors for the outer product
877  TensorArgs targs;
878  targs.thresh=final_tensor_args.thresh;
879  if (final_tensor_args.tt==TT_FULL) targs.tt=TT_FULL;
880  else if (final_tensor_args.tt==TT_2D) targs.tt=TT_FULL;
881  else if (final_tensor_args.tt==TT_TENSORTRAIN) targs.tt=TT_TENSORTRAIN;
882  else {
883  MADNESS_EXCEPTION("confused tensor args in outer_low_rank",1);
884  }
885 
886  GenTensor<T> lhs(lhs2,targs);
887  GenTensor<Q> rhs(rhs2,targs);
888  GenTensor<resultT> result=outer(lhs,rhs,final_tensor_args);
889  return result;
890  }
891 
892 
893 /// The class defines tensor op scalar ... here define scalar op tensor.
894 template <typename T, typename Q>
895 typename IsSupported < TensorTypeData<Q>, GenTensor<T> >::type
896 operator*(const Q& x, const GenTensor<T>& t) {
897  return t*x;
898 }
899 
900 /// add all the GenTensors of a given list
901 
902  /// If there are many tensors to add it's beneficial to do a sorted addition and start with
903  /// those tensors with low ranks
904  /// @param[in] addends a list with gentensors of same dimensions; will be destroyed upon return
905  /// @param[in] eps the accuracy threshold
906  /// @param[in] are_optimal flag if the GenTensors in the list are already in SVD format (if TT_2D)
907  /// @return the sum GenTensor of the input GenTensors
908  template<typename T>
909 GenTensor<T> reduce(std::list<GenTensor<T> >& addends, double eps, bool are_optimal=false) {
910 
911  // fast return
912  addends.remove_if([](auto element) {return not element.is_assigned();});
913  addends.remove_if([](auto element) {return element.rank()==0;});
914  if (addends.size()==0) return GenTensor<T>();
915 
916 
917  if (addends.front().is_svd_tensor()) {
918  std::list<SVDTensor<T> > addends1;
919  for (auto a : addends) addends1.push_back(a.get_svdtensor());
920  return reduce(addends1,eps*GenTensor<T>::facReduce());
921  }
922  // make error relative
923  eps=eps/addends.size();
924 
925  // if the addends are not in SVD format do that now so that we can call add_svd later
926  if (not are_optimal) {
927  for (auto element : addends) element.reduce_rank(eps);
928  }
929 
930  // remove zero ranks and sort the list according to the gentensor's ranks
931  addends.remove_if([](auto element) {return element.rank()==0;});
932  if (addends.size()==0) return GenTensor<T>();
933  addends.sort([](auto element1, auto element2) {return element1.rank()<element2.rank();});
934 
935  // do the additions
936  GenTensor<T> result=copy(addends.front());
937  addends.pop_front();
938  for (auto element : addends) result.add_SVD(element,eps);
939  addends.clear();
940 
941  return result;
942 }
943 
944 
945 
946 
947 /// implements a temporary(!) slice of a LowRankTensor
948 template<typename T>
949 class SliceLowRankTensor : public GenTensor<T> {
950  //class SliceLowRankTensor {
951 public:
952 
953  std::array<Slice,TENSOR_MAXDIM> thisslice;
954  // GenTensor<T>* lrt;
955 
956  // all ctors are private, only accessible by GenTensor
957 
958  /// default ctor
960 
961  /// ctor with a GenTensor; shallow
962  SliceLowRankTensor<T> (const GenTensor<T>& gt, const std::vector<Slice>& s)
963  : GenTensor<T>(const_cast<GenTensor<T>& > (gt)) {
964  // : Tensor<T>(const_cast<Tensor<T>&>(t)) //!!!!!!!!!!!
965  for (int i=0; i<s.size(); ++i) thisslice[i]=s[i];
966  }
967 
968  /// ctor with a GenTensor; shallow
969  SliceLowRankTensor<T> (const GenTensor<T>& gt, const std::array<Slice,TENSOR_MAXDIM>& s)
970  : GenTensor<T>(&gt), thisslice(s) {}
971 
972 public:
973 
974  /// assignment as in g(s) = g1;
976  print("You don't want to assign to a SliceLowRankTensor; use operator+= instead");
977  MADNESS_ASSERT(0);
978  return *this;
979  };
980 
981  /// assignment as in g(s) = g1(s);
983  print("You don't want to assign to a SliceLowRankTensor; use operator+= instead");
984  MADNESS_ASSERT(0);
985  return *this;
986  };
987 
988  /// inplace addition as in g(s)+=g1
990  std::array<Slice,TENSOR_MAXDIM> rhs_slice;
991  rhs_slice.fill(_);
992  gaxpy(thisslice,rhs,rhs_slice,1.0);
993  return *this;
994  }
995 
996  /// inplace subtraction as in g(s)-=g1
998  std::array<Slice,TENSOR_MAXDIM> rhs_slice;
999  rhs_slice.fill(_);
1000  gaxpy(thisslice,rhs,rhs_slice,-1.0);
1001  return *this;
1002  }
1003 
1004  /// inplace addition as in g(s)+=g1(s)
1006  gaxpy(thisslice,rhs,rhs.thisslice,1.0);
1007  return *this;
1008  }
1009 
1010  /// inplace addition as in g(s)-=g1(s)
1012  gaxpy(thisslice,rhs,rhs.thisslice,-1.0);
1013  return *this;
1014  }
1015 
1016  /// *this = *this(s) + beta * rhs
1017  void gaxpy(const std::array<Slice,TENSOR_MAXDIM>& lslice, const GenTensor<T>& rhs,
1018  const std::array<Slice,TENSOR_MAXDIM>& rslice, const double& beta) {
1019 
1020  // fast return if possible
1021  if (rhs.has_no_data() or rhs.rank()==0) return;
1022 
1023  if (this->has_data()) MADNESS_ASSERT(is_same_tensor_type(*this,rhs));
1024 
1025  if (this->is_full_tensor()) {
1026  this->get_tensor()(thisslice).gaxpy(1.0,rhs.get_tensor()(rslice),beta);
1027 
1028  } else if (this->is_svd_tensor()) {
1029  this->get_svdtensor().inplace_add(rhs.get_svdtensor(),thisslice,rslice, 1.0, beta);
1030 
1031  } else if (this->is_tensortrain()) {
1032  this->get_tensortrain().gaxpy(thisslice,rhs.get_tensortrain(),beta,rslice);
1033  }
1034  return ;
1035  }
1036 
1037  /// inplace zero-ing as in g(s)=0.0
1039  MADNESS_ASSERT(number==T(0.0));
1040 
1041  if (this->is_full_tensor()) {
1042  this->get_tensor()(thisslice)=0.0;
1043 
1044  } else if (this->is_svd_tensor()) {
1045  MADNESS_ASSERT(this->get_svdtensor().has_structure());
1046  SliceLowRankTensor<T> tmp(*this);
1047  this->get_svdtensor().inplace_add(tmp.get_svdtensor(),thisslice,thisslice, 1.0, -1.0);
1048 
1049  } else if (this->is_tensortrain()) {
1050  this->get_tensortrain().gaxpy(thisslice,this->get_tensortrain(),-1.0,thisslice);
1051  } else {
1052  MADNESS_EXCEPTION("you should not be here",1);
1053  }
1054  return *this;
1055  }
1056 
1057  friend GenTensor<T> copy(const SliceLowRankTensor<T>& other) {
1058  GenTensor<T> result;
1059  const std::array<Slice,TENSOR_MAXDIM> s=other.thisslice;
1060  if (other.is_full_tensor())
1061  result=Tensor<T>(copy(other.get_tensor()(s)));
1062  else if (other.is_svd_tensor())
1063  result=SVDTensor<T>(other.get_svdtensor().copy_slice(s));
1064  else if (other.is_tensortrain())
1065  result=TensorTrain<T>(copy(other.get_tensortrain(),s));
1066  else {
1067  }
1068  return result;
1069  }
1070 
1071 
1072 };
1073 
1074 
1075 template<typename Q, typename R>
1076 bool is_same_tensor_type(const GenTensor<R>& rhs, const GenTensor<Q>& lhs) {
1077  return (rhs.tensor.index()==lhs.tensor.index());
1078 }
1079 
1080 template <typename R, typename Q>
1082  const GenTensor<R>& t, const Tensor<Q>& c) {
1083  typedef TENSOR_RESULT_TYPE(R,Q) resultT;
1084  GenTensor<resultT> result;
1085  std::visit([&result, &c](auto& obj) {result=transform(*obj,c);}, t.tensor);
1086  return result;
1087  }
1088 
1089 template <typename R, typename Q>
1091  const GenTensor<R>& t, const Tensor<Q> c[]) {
1092  typedef TENSOR_RESULT_TYPE(R,Q) resultT;
1093  GenTensor<resultT> result;
1094  std::visit([&result, &c](auto& obj) {result=general_transform(*obj,c);}, t.tensor);
1095  return result;
1096 }
1097 
1098 template <typename R, typename Q>
1100  const GenTensor<R>& t, const Tensor<Q>& c, const int axis) {
1101  GenTensor<TENSOR_RESULT_TYPE(R,Q)> result;
1102  std::visit([&result, &c, &axis](auto& obj) {result=transform_dir(*obj,c,axis);}, t.tensor);
1103  return result;
1104 
1105  }
1106 
1107 
1108 
1109 } // namespace madness
1110 
1111 #endif /* MADNESS_TENSOR_LOWRANKTENSOR_H_ */
The base class for tensors defines generic capabilities.
Definition: basetensor.h:85
long dim(int i) const
Returns the size of dimension i.
Definition: basetensor.h:147
const long * dims() const
Returns the array of tensor dimensions.
Definition: basetensor.h:153
long ndim() const
Returns the number of dimensions in the tensor.
Definition: basetensor.h:144
long size() const
Returns the number of elements in the tensor.
Definition: basetensor.h:138
Definition: lowranktensor.h:59
TensorTypeData< T >::float_scalar_type float_scalar_type
C++ typename of the floating point type associated with scalar real type.
Definition: lowranktensor.h:69
bool is_of_tensortype(const TensorType &tt) const
Definition: lowranktensor.h:309
GenTensor & operator=(const SliceLowRankTensor< T > &other)
deep assignment with slices: g0 = g1(s)
Definition: lowranktensor.h:188
GenTensor & operator-=(const SliceLowRankTensor< T > &other)
Definition: lowranktensor.h:573
GenTensor & operator+=(const GenTensor &other)
Definition: lowranktensor.h:550
GenTensor(const GenTensor< T > &other)=default
copy ctor, shallow
GenTensor(const TensorTrain< T > &other)
ctor with a TensorTrain as argument, shallow
Definition: lowranktensor.h:149
GenTensor< T > & emul(const GenTensor< T > &other)
Inplace multiply by corresponding elements of argument Tensor.
Definition: lowranktensor.h:631
GenTensor convert(const TensorArgs &targs) const
Definition: gentensor.h:198
GenTensor & operator+=(const SliceLowRankTensor< T > &other)
Definition: lowranktensor.h:555
GenTensor full_tensor() const
Definition: gentensor.h:200
long dim(const int i) const
return the number of entries in dimension i
Definition: lowranktensor.h:391
const SVDTensor< T > & get_svdtensor() const
Definition: lowranktensor.h:226
GenTensor operator+(const SliceLowRankTensor< T > &other)
Definition: lowranktensor.h:542
static double facReduce()
Definition: lowranktensor.h:461
friend class SliceLowRankTensor< T >
Definition: lowranktensor.h:63
long ndim() const
Definition: lowranktensor.h:386
GenTensor & operator=(const GenTensor< T > &other)
shallow assignment operator
Definition: lowranktensor.h:164
GenTensor & operator=(const Tensor< T > &other)
deep assignment operator
Definition: lowranktensor.h:170
friend GenTensor copy(const GenTensor &other)
deep copy
Definition: lowranktensor.h:283
constexpr bool is_full_tensor() const
Definition: gentensor.h:224
GenTensor & operator=(const T &number)
assign a number to this tensor
Definition: lowranktensor.h:615
TENSOR_RESULT_TYPE(T, Q) trace_conj(const GenTensor< Q > &rhs) const
returns the trace of <this|rhs>
Definition: lowranktensor.h:508
TensorTypeData< T >::scalar_type scalar_type
C++ typename of the real type associated with a complex type.
Definition: lowranktensor.h:66
GenTensor get_tensor() const
Definition: gentensor.h:203
IsSupported< TensorTypeData< Q >, GenTensor< T > & >::type scale(Q fac)
Inplace multiplication by scalar of supported type (legacy name)
Definition: lowranktensor.h:426
long nCoeff() const
Definition: lowranktensor.h:486
SliceLowRankTensor< T > operator()(const std::vector< Slice > &s)
general slicing, shallow; for temporary use only!
Definition: lowranktensor.h:272
std::variant< std::shared_ptr< Tensor< T > >, std::shared_ptr< SVDTensor< T > >, std::shared_ptr< TensorTrain< T > > > tensor
holding the implementation of the low rank tensor representations
Definition: lowranktensor.h:721
const Tensor< T > & get_tensor() const
Definition: lowranktensor.h:244
std::string what_am_i() const
Definition: lowranktensor.h:699
int index() const
Definition: lowranktensor.h:319
bool has_no_data() const
Definition: lowranktensor.h:480
GenTensor(const SVDTensor< T > &other)
ctor with a SVDTensor as argument, shallow
Definition: lowranktensor.h:154
Tensor< T > full_tensor_copy() const
Definition: gentensor.h:206
void normalize()
Definition: lowranktensor.h:402
GenTensor(const SliceLowRankTensor< T > &other)
ctor with a SliceLowRankTensor as argument, deep
Definition: lowranktensor.h:159
GenTensor< TENSOR_RESULT_TYPE(T, Q)> operator*(const Q &x) const
multiply with a number
Definition: lowranktensor.h:530
const SliceLowRankTensor< T > operator()(const std::vector< Slice > &s) const
general slicing, shallow; for temporary use only!
Definition: lowranktensor.h:277
const BaseTensor * ptr() const
might return a NULL pointer!
Definition: lowranktensor.h:709
GenTensor(const Tensor< T > &rhs, const TensorArgs &targs)
ctor with a regular Tensor and arguments, deep
Definition: lowranktensor.h:104
float_scalar_type normf() const
Definition: lowranktensor.h:406
TensorTrain< T > & get_tensortrain()
Definition: lowranktensor.h:253
constexpr bool is_tensortrain() const
Definition: gentensor.h:223
long rank() const
Definition: gentensor.h:212
GenTensor(const std::vector< long > &dim, const TensorType &tt)
ctor with dimensions; constructs tensor filled with zeros
Definition: lowranktensor.h:84
Tensor< T > & full_tensor()
Definition: lowranktensor.h:447
const Tensor< T > & full_tensor() const
Definition: lowranktensor.h:443
GenTensor & gaxpy(const T alpha, const GenTensor &other, const T beta)
Definition: lowranktensor.h:580
long size() const
Definition: lowranktensor.h:482
GenTensor operator-(const GenTensor &other)
Definition: lowranktensor.h:562
long real_size() const
Definition: lowranktensor.h:496
SVDTensor< T > & get_svdtensor()
Definition: gentensor.h:228
GenTensor()=default
empty ctor
void add_SVD(const GenTensor &other, const double &thresh)
Definition: lowranktensor.h:621
GenTensor()
Definition: gentensor.h:180
SVDTensor< T > & get_tensortrain()
Definition: gentensor.h:229
GenTensor(const Tensor< T > &rhs, const double &thresh, const TensorType &tt)
ctor with a regular Tensor and arguments, deep
Definition: lowranktensor.h:99
GenTensor(const Tensor< T > &other)
ctor with a regular Tensor, deep
Definition: lowranktensor.h:144
GenTensor & operator=(const TensorTrain< T > &other)
deep assignment operator
Definition: lowranktensor.h:182
GenTensor & operator-=(const GenTensor &other)
Definition: lowranktensor.h:568
GenTensor(const std::vector< long > &dim, const TensorArgs &targs)
ctor with dimensions; constructs tensor filled with zeros
Definition: lowranktensor.h:89
void reduce_rank(const double &thresh)
Definition: lowranktensor.h:648
TensorType tensor_type() const
return the tensor type
Definition: lowranktensor.h:290
static double fac_reduce()
Definition: lowranktensor.h:462
friend bool is_same_tensor_type(const GenTensor< R > &rhs, const GenTensor< Q > &lhs)
Definition: lowranktensor.h:1076
Tensor< T > reconstruct_tensor() const
reconstruct this to return a full tensor
Definition: lowranktensor.h:453
bool has_data() const
Definition: lowranktensor.h:478
GenTensor operator+(const GenTensor &other)
Definition: lowranktensor.h:536
float_scalar_type svd_normf() const
Definition: lowranktensor.h:412
GenTensor & operator=(const SVDTensor< T > &other)
deep assignment operator
Definition: lowranktensor.h:176
GenTensor(const TensorType &tt, const long k, const long ndim)
ctor with dimensions; all dims have the same value k
Definition: lowranktensor.h:94
const TensorTrain< T > & get_tensortrain() const
Definition: lowranktensor.h:262
bool is_assigned() const
Definition: gentensor.h:209
Tensor< T > & get_tensor()
Definition: lowranktensor.h:235
const long * dims() const
return the number of entries in dimension i
Definition: lowranktensor.h:397
GenTensor(const long ndim, const long *dims, const TensorType &tt)
Definition: lowranktensor.h:77
GenTensor & gaxpy(const T alpha, std::array< Slice, TENSOR_MAXDIM > s0, const GenTensor &other, const T beta, std::array< Slice, TENSOR_MAXDIM > s1)
Definition: lowranktensor.h:594
constexpr bool is_svd_tensor() const
Definition: gentensor.h:222
GenTensor & convert_inplace(const TensorArgs &targs)
Definition: lowranktensor.h:323
Definition: srconf.h:67
void normalize()
normalize the vectors (tested)
Definition: srconf.h:584
Definition: SVDTensor.h:42
implements a temporary(!) slice of a LowRankTensor
Definition: lowranktensor.h:949
SliceLowRankTensor< T > & operator=(const SliceLowRankTensor< T > &rhs)
assignment as in g(s) = g1(s);
Definition: lowranktensor.h:982
SliceLowRankTensor< T > & operator+=(const SliceLowRankTensor< T > &rhs)
inplace addition as in g(s)+=g1(s)
Definition: lowranktensor.h:1005
SliceLowRankTensor< T > & operator=(const T &number)
inplace zero-ing as in g(s)=0.0
Definition: lowranktensor.h:1038
SliceLowRankTensor< T > & operator-=(const GenTensor< T > &rhs)
inplace subtraction as in g(s)-=g1
Definition: lowranktensor.h:997
SliceLowRankTensor< T > & operator=(const GenTensor< T > &rhs)
assignment as in g(s) = g1;
Definition: lowranktensor.h:975
std::array< Slice, TENSOR_MAXDIM > thisslice
Definition: lowranktensor.h:953
void gaxpy(const std::array< Slice, TENSOR_MAXDIM > &lslice, const GenTensor< T > &rhs, const std::array< Slice, TENSOR_MAXDIM > &rslice, const double &beta)
*this = *this(s) + beta * rhs
Definition: lowranktensor.h:1017
SliceLowRankTensor< T > & operator+=(const GenTensor< T > &rhs)
inplace addition as in g(s)+=g1
Definition: lowranktensor.h:989
friend GenTensor< T > copy(const SliceLowRankTensor< T > &other)
Definition: lowranktensor.h:1057
SliceLowRankTensor< T > & operator-=(const SliceLowRankTensor< T > &rhs)
inplace addition as in g(s)-=g1(s)
Definition: lowranktensor.h:1011
Definition: tensortrain.h:123
Traits class to specify support of numeric types.
Definition: type_data.h:56
A tensor is a multidimension array.
Definition: tensor.h:317
T type
C++ typename of this tensor.
Definition: tensor.h:406
Tensor< T > reshape(int ndimnew, const long *d)
Returns new view/tensor reshaping size/number of dimensions to conforming tensor.
Definition: tensor.h:1384
T trace(const Tensor< T > &t) const
Return the trace of two tensors (no complex conjugate invoked)
Definition: tensor.h:1776
static const double R
Definition: csqrt.cc:46
char * p(char *buf, const char *name, int k, int initial_level, double thresh, int order)
Definition: derivatives.cc:72
const double m
Definition: gfit.cc:199
auto T(World &world, response_space &f) -> response_space
Definition: global_functions.cc:34
friend GenTensor< TENSOR_RESULT_TYPE(R, Q)> general_transform(const GenTensor< R > &t, const Tensor< Q > c[])
Transform all dimensions of the tensor t by distinct matrices c.
Definition: gentensor.h:274
friend GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const GenTensor< R > &t, const Tensor< Q > &c, const int axis)
Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor.
Definition: lowranktensor.h:1099
friend GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform(const GenTensor< R > &t, const Tensor< Q > &c)
Transform all dimensions of the tensor t by the matrix c.
Definition: lowranktensor.h:1081
const double beta
Definition: gygi_soltion.cc:62
Defines madness::MadnessException for exception handling.
#define MADNESS_EXCEPTION(msg, value)
Macro for throwing a MADNESS exception.
Definition: madness_exception.h:119
#define MADNESS_ASSERT(condition)
Assert a condition that should be free of side-effects since in release builds this might be a no-op.
Definition: madness_exception.h:134
double norm(const T &t)
Definition: adquad.h:42
File holds all helper structures necessary for the CC_Operator and CC2 class.
Definition: DFParameters.h:10
GenTensor< TENSOR_RESULT_TYPE(R, Q)> general_transform(const GenTensor< R > &t, const Tensor< Q > c[])
Definition: gentensor.h:274
GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const GenTensor< R > &t, const Tensor< Q > &c, const int axis)
Definition: lowranktensor.h:1099
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence.
Definition: mra.h:2002
static Tensor< double > weights[max_npt+1]
Definition: legendre.cc:99
Function< Q, NDIM > convert(const Function< T, NDIM > &f, bool fence=true)
Type conversion implies a deep copy. No communication except for optional fence.
Definition: mra.h:2032
response_space transpose(response_space &f)
Definition: basic_operators.cc:10
static const Slice _(0,-1, 1)
void change_tensor_type(GenTensor< T > &t, const TensorArgs &targs)
change representation to targ.tt
Definition: gentensor.h:284
TENSOR_RESULT_TYPE(T, R) inner(const Function< T
Computes the scalar/inner product between two functions.
Definition: vmra.h:1059
void print(const T &t, const Ts &... ts)
Print items to std::cout (items separated by spaces) and terminate with a new line.
Definition: print.h:225
TensorType
low rank representations of tensors (see gentensor.h)
Definition: gentensor.h:120
@ TT_TENSORTRAIN
Definition: gentensor.h:120
@ TT_2D
Definition: gentensor.h:120
@ TT_FULL
Definition: gentensor.h:120
GenTensor< T > reduce(std::list< GenTensor< T > > &addends, double eps, bool are_optimal=false)
add all the GenTensors of a given list
Definition: gentensor.h:246
std::vector< CCPairFunction< T, NDIM > > operator*(const double fac, const std::vector< CCPairFunction< T, NDIM > > &arg)
Definition: ccpairfunction.h:1084
std::string type(const PairType &n)
Definition: PNOParameters.h:18
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const Tensor< R > &c, bool fence=true)
Transforms a vector of functions according to new[i] = sum[j] old[j]*c[j,i].
Definition: vmra.h:689
std::enable_if< std::is_base_of< ProjectorBase, projT >::value, OuterProjector< projT, projQ > >::type outer(const projT &p0, const projQ &p1)
Definition: projector.h:457
bool is_same_tensor_type(const GenTensor< R > &rhs, const GenTensor< Q > &lhs)
Definition: lowranktensor.h:1076
Definition: mraimpl.h:50
static const double a
Definition: nonlinschro.cc:118
Defines simple templates for printing to std::cout "a la Python".
double Q(double a)
Definition: relops.cc:20
static const double c
Definition: relops.cc:10
static const double thresh
Definition: rk.cc:45
static const long k
Definition: rk.cc:44
Declares and implements Slice.
Definition: type_data.h:146
TensorArgs holds the arguments for creating a LowRankTensor.
Definition: gentensor.h:134
static std::string what_am_i(const TensorType &tt)
Definition: gentensor.h:147
double thresh
Definition: gentensor.h:135
TensorType tt
Definition: gentensor.h:136
static void load(const Archive &ar, GenTensor< T > &tensor)
Replaces this GenTensor with one loaded from an archive.
Definition: lowranktensor.h:757
Default store of an object via serialize(ar, t).
Definition: archive.h:611
static std::enable_if_t< is_output_archive_v< A > &&!std::is_function< U >::value &&(has_member_serialize_v< U, A >||has_nonmember_serialize_v< U, A >||has_freestanding_serialize_v< U, A >||has_freestanding_default_serialize_v< U, A >), void > store(const A &ar, const U &t)
Definition: archive.h:621
static const double s0
Definition: tdse4.cc:83
Defines and implements most of Tensor.
Defines and implements the tensor train decomposition as described in I.V. Oseledets,...
const double alpha
Definition: test_coulomb.cc:54
std::size_t axis
Definition: testpdiff.cc:59
Defines and implements TensorTypeData, a type traits class.