MADNESS 0.10.1
lowranktensor.h
Go to the documentation of this file.
1/*
2 This file is part of MADNESS.
3
4 Copyright (C) 2007,2010 Oak Ridge National Laboratory
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
20 For more information please contact:
21
22 Robert J. Harrison
23 Oak Ridge National Laboratory
24 One Bethel Valley Road
25 P.O. Box 2008, MS-6367
26
27 email: harrisonrj@ornl.gov
28 tel: 865-241-3937
29 fax: 865-572-0680
30
31 $Id$
32*/
33
34#ifndef MADNESS_TENSOR_LOWRANKTENSOR_H_
35#define MADNESS_TENSOR_LOWRANKTENSOR_H_
36
37#include <memory>
38#include <vector>
39#include <variant>
40
42#include <madness/world/print.h>
47#include "type_data.h"
49
50
51namespace madness {
52
53
54// forward declaration
55template <class T> class SliceLowRankTensor;
56
57
58template<typename T>
59class GenTensor {
60
61public:
62
63 friend class SliceLowRankTensor<T>;
64
65 /// C++ typename of the real type associated with a complex type.
67
68 /// C++ typename of the floating point type associated with scalar real type
70
71 /// empty ctor
72 GenTensor() = default;
73
74 /// copy ctor, shallow
75 GenTensor(const GenTensor<T>& other) = default;
76
77 GenTensor(const long ndim, const long* dims, const TensorType& tt) {
78 if (tt==TT_FULL) tensor=std::shared_ptr<Tensor<T> >(new Tensor<T>(ndim, dims));
79 if (tt==TT_2D) tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(ndim, dims));
80 if (tt==TT_TENSORTRAIN) tensor=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(ndim, dims));
81 }
82
83 /// ctor with dimensions; constructs tensor filled with zeros
84 GenTensor(const std::vector<long>& dim, const TensorType& tt) :
85 GenTensor(long(dim.size()),&dim.front(),tt) {
86 }
87
88 /// ctor with dimensions; constructs tensor filled with zeros
89 GenTensor(const std::vector<long>& dim, const TensorArgs& targs) :
90 GenTensor(dim, targs.tt) {
91 }
92
93 /// ctor with dimensions; all dims have the same value k
94 GenTensor(const TensorType& tt, const long k, const long ndim) :
95 GenTensor(std::vector<long>(ndim,k), tt) {
96 }
97
98 /// ctor with a regular Tensor and arguments, deep
99 GenTensor(const Tensor<T>& rhs, const double& thresh, const TensorType& tt) :
100 GenTensor(rhs,TensorArgs(thresh,tt)) {
101 }
102
103 /// ctor with a regular Tensor and arguments, deep
104 GenTensor(const Tensor<T>& rhs, const TensorArgs& targs) {
105 if (targs.tt==TT_FULL) *this=copy(rhs);
106 else if (targs.tt==TT_2D) {
107 if (rhs.size()==0) {
108 tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(rhs,targs.thresh*facReduce()));
109 } else {
110 TensorTrain<T> tt(rhs,targs.thresh*facReduce());
111 GenTensor<T> tmp=tt;
112 *this=tmp.convert(targs);
113 }
114// } else if (targs.tt==TT_DYNAMIC) {
115// if (rhs.size()==0) {
116// tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(rhs,targs.thresh*facReduce()));
117// } else {
118//
119// long maxrank=std::max(50.0,floor(0.3*sqrt(rhs.size())));
120// RandomizedMatrixDecomposition<T> rmd=RMDFactory().maxrank(maxrank);
121// Tensor<T> Q=rmd.compute_range(rhs,targs.thresh*facReduce()*0.1,{0,0});
122// if (Q.size()==0) {
123// *this=SVDTensor<T>(rhs.ndim(),rhs.dims());
124// } else if (not rmd.exceeds_maxrank()) {
125// SVDTensor<T> result(rhs.ndim(),rhs.dims());
126// result=SVDTensor<T>::compute_svd_from_range(Q,rhs);
127// *this=result;
128// } else {
129// *this=copy(rhs);
130//// TensorTrain<T> tt(rhs,targs.thresh*facReduce());
131//// GenTensor<T> tmp=tt;
132//// *this=tmp.convert(targs);
133// }
134//// tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(rhs,targs.thresh*facReduce()));
135// }
136 } else if (targs.tt==TT_TENSORTRAIN) {
137 tensor=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(rhs,targs.thresh*facReduce()));
138 } else {
139 MADNESS_EXCEPTION("unknown tensor type in LowRankTensor constructor",1);
140 }
141 }
142
143 /// ctor with a regular Tensor, deep
144 GenTensor(const Tensor<T>& other) {
145 tensor=std::shared_ptr<Tensor<T> >(new Tensor<T>(copy(other)));
146 }
147
148 /// ctor with a TensorTrain as argument, shallow
149 GenTensor(const TensorTrain<T>& other) {
150 tensor=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(copy(other))) ;
151 }
152
153 /// ctor with a SVDTensor as argument, shallow
154 GenTensor(const SVDTensor<T>& other) {
155 tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(copy(other)));
156 }
157
158 /// ctor with a SliceLowRankTensor as argument, deep
160 *this=other;
161 }
162
163 /// shallow assignment operator
165 if (this!=&other) tensor=other.tensor;
166 return *this;
167 }
168
169 /// deep assignment operator
171 tensor=std::shared_ptr<Tensor<T> >(new Tensor<T>(copy(other)));
172 return *this;
173 }
174
175 /// deep assignment operator
177 tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(copy(other)));
178 return *this;
179 }
180
181 /// deep assignment operator
183 tensor=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(copy(other)));
184 return *this;
185 }
186
187 /// deep assignment with slices: g0 = g1(s)
189 const std::array<Slice,TENSOR_MAXDIM>& s=other.thisslice;
191 if (other.is_full_tensor())
192 tensor=std::shared_ptr<Tensor<T> >(new Tensor<T>(copy(other.get_tensor()(s))));
193 else if (other.is_svd_tensor())
194 tensor=std::shared_ptr<SVDTensor<T> >(new SVDTensor<T>(other.get_svdtensor().copy_slice(s)));
195 else if (other.is_tensortrain())
196 tensor=std::shared_ptr<TensorTrain<T> >(new TensorTrain<T>(copy(other.get_tensortrain(),s)));
197 else {
198 MADNESS_EXCEPTION("you should not be here",1);
199 }
200 return *this;
201 }
202
203 /// Type conversion makes a deep copy
204 template <class Q> operator GenTensor<Q>() const { // type conv => deep copy
205
206 GenTensor<Q> result;
207 if (is_full_tensor()) {
208 result=Tensor<Q>(get_tensor());
209 } else if (is_svd_tensor()) {
210 MADNESS_EXCEPTION("no type conversion for TT_2D yes=t",1);
211 } else if (is_tensortrain()) {
212 MADNESS_EXCEPTION("no type conversion for TT_2D yes=t",1);
213 }
214 return result;
215 }
216
219 try {
220 return *(std::get<1>(tensor).get());
221 } catch (...) {
222 MADNESS_EXCEPTION("failure to return SVDTensor from LowRankTensor",1);
223 }
224 }
225
228 try {
229 return *(std::get<1>(tensor).get());
230 } catch (...) {
231 MADNESS_EXCEPTION("failure to return SVDTensor from LowRankTensor",1);
232 }
233 }
234
237 try {
238 return *(std::get<0>(tensor).get());
239 } catch (...) {
240 MADNESS_EXCEPTION("failure to return Tensor from LowRankTensor",1);
241 }
242 }
243
244 const Tensor<T>& get_tensor() const {
246 try {
247 return *(std::get<0>(tensor).get());
248 } catch (...) {
249 MADNESS_EXCEPTION("failure to return Tensor from LowRankTensor",1);
250 }
251 }
252
255 try {
256 return *(std::get<2>(tensor).get());
257 } catch (...) {
258 MADNESS_EXCEPTION("failure to return TensorTrain from LowRankTensor",1);
259 }
260 }
261
264 try {
265 return *(std::get<2>(tensor).get());
266 } catch (...) {
267 MADNESS_EXCEPTION("failure to return TensorTrain from LowRankTensor",1);
268 }
269 }
270
271 /// general slicing, shallow; for temporary use only!
272 SliceLowRankTensor<T> operator()(const std::vector<Slice>& s) {
273 return SliceLowRankTensor<T>(*this,s);
274 }
275
276 /// general slicing, shallow; for temporary use only!
277 const SliceLowRankTensor<T> operator()(const std::vector<Slice>& s) const {
278 return SliceLowRankTensor<T>(*this,s);
279 }
280
281
282 /// deep copy
283 friend GenTensor copy(const GenTensor& other) {
284 GenTensor<T> result;
285 if (other.is_assigned()) std::visit([&result](auto& obj) {result=copy(*obj);}, other.tensor);
286 return result;
287 }
288
289 /// return the tensor type
291 if (index()==0) return TT_FULL;
292 if (index()==1) return TT_2D;
293 if (index()==2) return TT_TENSORTRAIN;
294 MADNESS_EXCEPTION("confused tensor types ",1);
295 }
296
297 constexpr bool is_full_tensor() const {
298 return (index()==0);
299 }
300
301 constexpr bool is_svd_tensor() const {
302 return (index()==1);
303 }
304
305 constexpr bool is_tensortrain() const {
306 return (index()==2);
307 }
308
309 bool is_of_tensortype(const TensorType& tt) const {
310 if ((index()==0) and (tt==TT_FULL)) return true;
311 if ((index()==1) and (tt==TT_2D)) return true;
312 if ((index()==2) and (tt==TT_TENSORTRAIN)) return true;
313 return false;
314 }
315
316 template<typename Q, typename R>
317 friend bool is_same_tensor_type(const GenTensor<R>& rhs, const GenTensor<Q>& lhs);
318
319 int index() const {
320 return is_assigned() ? tensor.index() : -1;
321 }
322
324
325 // fast return
326 if (not is_assigned()) return *this;
327 if (is_of_tensortype(targs.tt)) return *this;
328// if (targs.tt==TT_DYNAMIC) if (is_svd_tensor()) return *this;
329
330 // target is full tensor
331 if (targs.tt == TT_FULL) {
332 *this = this->full_tensor_copy();
333 }
334
335 // source is full tensor: construct the corresponding representation
336 else if (is_full_tensor()) {
337 *this = GenTensor<T>(get_tensor(), targs);
338 }
339
340 // TT_TENSORTRAIN TO TT_2D
341 else if ((is_tensortrain()) and (targs.tt == TT_2D)) {
342 Tensor<T> U, VT;
344 get_tensortrain().two_mode_representation(U, VT, s);
345 long rank = s.size();
346 if (rank == 0) {
347 *this = SVDTensor<T>(get_tensortrain().ndim(), get_tensortrain().dims(), ndim() / 2);
348 return *this;
349 }
350
351 long n = 1, m = 1;
352 for (int i = 0; i < U.ndim() - 1; ++i) n *= U.dim(i);
353 for (int i = 1; i < VT.ndim(); ++i) m *= VT.dim(i);
354 MADNESS_ASSERT(rank * n == U.size());
355 MADNESS_ASSERT(rank * m == VT.size());
356 U = copy(transpose(U.reshape(n, rank))); // make it contiguous
357 VT = VT.reshape(rank, m);
358 SVDTensor<T> svdtensor(s, U, VT, ndim(), dims());
359 svdtensor.normalize();
360 *this = svdtensor;
361 }
362 else if ((is_svd_tensor()) and (targs.tt == TT_TENSORTRAIN)) {
363 TensorTrain<T> tt(this->full_tensor_copy(),targs.thresh);
364 *this=tt;
365 } else {
366 print("conversion from type ", index(), "to type", targs.tt, "not supported");
367 MADNESS_EXCEPTION("type conversion not supported in LowRankTensor::convert ", 1);
368 }
369 return *this;
370 }
371
372 /// convert this to a new LowRankTensor of given tensor type
373 GenTensor convert(const TensorArgs& targs) const {
374
375 // deep copy for same type
376 if (is_of_tensortype(targs.tt)) return copy(*this);
377
378 // new LRT will be newly constructed anyways
379 if (is_full_tensor()) return GenTensor<T>(get_tensor(),targs);
380
381 GenTensor<T> result(*this); // shallow
382 result.convert_inplace(targs);
383 return result;
384 }
385
386 long ndim() const {
387 return (is_assigned()) ? ptr()->ndim() : -1;
388 }
389
390 /// return the number of entries in dimension i
391 long dim(const int i) const {
393 return ptr()->dim(i);
394 }
395
396 /// return the number of entries in dimension i
397 const long* dims() const {
399 return ptr()->dims();
400 }
401
402 void normalize() {
403 if (is_svd_tensor()) get_svdtensor().normalize();
404 }
405
408 std::visit([&norm](auto& obj) {norm=obj->normf();}, tensor);
409 return norm;
410 }
411
414 if (is_svd_tensor()) return get_svdtensor().svd_normf();
415 std::visit([&norm](auto& obj) {norm=obj->normf();}, tensor);
416 return norm;
417 }
418
419
420 /// Inplace multiplication by scalar of supported type (legacy name)
421
422 /// @param[in] x Scalar value
423 /// @return %Reference to this tensor
424 template <typename Q>
426 scale(Q fac) {
427 if (not is_assigned()) return *this;
428 std::visit([&fac](auto& obj) {obj->scale(T(fac));}, tensor);
429 return *this;
430 }
431
433 if (not is_assigned()) return Tensor<T>();
434 else if (is_full_tensor()) return copy(get_tensor());
435 else if (is_svd_tensor()) return get_svdtensor().reconstruct();
436 else if (is_tensortrain()) return get_tensortrain().reconstruct();
437 else {
438 MADNESS_EXCEPTION("you should not be here",1);
439 }
440 return Tensor<T>();
441 }
442
443 /// return a view (shallow copy) of the full tensor, if possible,
444 /// otherwise reconstruct to full tensor and return a deep copy
446 if (is_full_tensor()) return get_tensor();
447 return full_tensor_copy();
448 }
449
450 /// return a reference of the full tensor, presumably for in-place modification
451 /// WARNING: works only if this is indeed a full tensor
456
457
458 /// reconstruct this to return a full tensor
460
461 if (is_full_tensor()) return full_tensor();
462 if (is_svd_tensor() or is_tensortrain()) return full_tensor_copy();
463 return Tensor<T>();
464 }
465
466
467 static double facReduce() {return 1.e-3;}
468 static double fac_reduce() {return 1.e-3;}
469
470 long rank() const {
471 if (is_full_tensor()) return -1;
472 else if (is_svd_tensor()) return get_svdtensor().rank();
473 else if (is_tensortrain()) {
474 std::vector<long> r=get_tensortrain().ranks();
475 return *(std::max_element(r.begin(), r.end()));
476 }
477 return 0l;
478 }
479
480 bool is_assigned() const {
481 return ptr() ? true : false;
482 }
483
484 bool has_data() const {return size()>0;}
485
486 bool has_no_data() const {return (not has_data());}
487
488 long size() const {
489 return (is_assigned()) ? ptr()->size() : 0;
490 }
491
492 long nCoeff() const {
493 if (is_full_tensor()) return get_tensor().size();
494 else if (is_svd_tensor()) return get_svdtensor().nCoeff();
495 else if (is_tensortrain()) return get_tensortrain().real_size();
496 else {
497 MADNESS_EXCEPTION("you should not be here",1);
498 }
499 return false;
500 }
501
502 long real_size() const {
503 if (is_full_tensor()) return get_tensor().size();
504 else if (is_svd_tensor()) return get_svdtensor().real_size();
505 else if (is_tensortrain()) return get_tensortrain().real_size();
506 else {
507 MADNESS_EXCEPTION("you should not be here",1);
508 }
509 return false;
510 }
511
512 /// returns the trace of <this|rhs>
513 template<typename Q>
514 TENSOR_RESULT_TYPE(T,Q) trace_conj(const GenTensor<Q>& rhs) const {
515
516 if (TensorTypeData<T>::iscomplex) MADNESS_EXCEPTION("no complex trace in LowRankTensor, sorry",1);
517 if (TensorTypeData<Q>::iscomplex) MADNESS_EXCEPTION("no complex trace in LowRankTensor, sorry",1);
518
519 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
520 // fast return if possible
521 if ((this->rank()==0) or (rhs.rank()==0)) return resultT(0.0);
522
524
525 if (is_full_tensor()) return get_tensor().trace_conj(rhs.get_tensor());
526 else if (is_svd_tensor()) return trace(get_svdtensor(),rhs.get_svdtensor());
527 else if (is_tensortrain()) return get_tensortrain().trace(rhs.get_tensortrain());
528 else {
529 MADNESS_EXCEPTION("you should not be here",1);
530 }
531 return TENSOR_RESULT_TYPE(T,Q)(0);
532 }
533
534 /// multiply with a number
535 template<typename Q>
537 GenTensor<TENSOR_RESULT_TYPE(T,Q)> result(copy(*this));
538 result.scale(x);
539 return result;
540 }
541
543 GenTensor<T> result=copy(*this);
544 result.gaxpy(1.0,other,1.0);
545 return result;
546 }
547
549 GenTensor<T> result=copy(*this);
550 std::array<Slice,TENSOR_MAXDIM> s0;
551 s0.fill(_);
552 result.gaxpy(1.0,s0,other,1.0,other.thisslice);
553 return result;
554 }
555
557 gaxpy(1.0,other,1.0);
558 return *this;
559 }
560
562 std::array<Slice,TENSOR_MAXDIM> s0;
563 s0.fill(_);
564 this->gaxpy(1.0,s0,other,1.0,other.thisslice);
565 return *this;
566 }
567
569 GenTensor<T> result=copy(*this);
570 result.gaxpy(1.0,other,-1.0);
571 return result;
572 }
573
575 gaxpy(1.0,other,-1.0);
576 return *this;
577 }
578
580 std::array<Slice,TENSOR_MAXDIM> s0;
581 s0.fill(_);
582 this->gaxpy(1.0,s0,other,-1.0,other.thisslice);
583 return *this;
584 }
585
586 GenTensor& gaxpy(const T alpha, const GenTensor& other, const T beta) {
587
588 // deliberately excluding gaxpys for different tensors due to efficiency considerations!
591 if (is_full_tensor()) get_tensor().gaxpy(alpha,other.get_tensor(),beta);
592 else if (is_svd_tensor()) get_svdtensor().gaxpy(alpha,other.get_svdtensor(),beta);
593 else if (is_tensortrain()) get_tensortrain().gaxpy(alpha,other.get_tensortrain(),beta);
594 else {
595 MADNESS_EXCEPTION("unknown tensor type in LowRankTensor::gaxpy",1);
596 }
597 return *this;
598 }
599
600 GenTensor& gaxpy(const T alpha, std::array<Slice,TENSOR_MAXDIM> s0,
601 const GenTensor& other, const T beta, std::array<Slice,TENSOR_MAXDIM> s1) {
602
603 // deliberately excluding gaxpys for different tensors due to efficiency considerations!
606
607 if (is_full_tensor()) {
608 get_tensor()(s0).gaxpy(alpha,other.get_tensor()(s1),beta);
609 } else if (is_svd_tensor()) {
610 get_svdtensor().inplace_add(other.get_svdtensor(),s0,s1,alpha,beta);
611 } else if (is_tensortrain()) {
612 MADNESS_ASSERT(alpha==1.0);
613 get_tensortrain().gaxpy(s0, other.get_tensortrain(), beta, s1);
614 } else {
615 MADNESS_EXCEPTION("unknown tensor type in LowRankTensor::gaxpy",1);
616 }
617 return *this;
618 }
619
620 /// assign a number to this tensor
621 GenTensor& operator=(const T& number) {
622 std::visit([&number](auto& obj) {*obj=number;}, tensor);
623 return *this;
624
625 }
626
627 void add_SVD(const GenTensor& other, const double& thresh) {
628 if (is_full_tensor()) get_tensor()+=other.get_tensor();
629 else if (is_svd_tensor()) get_svdtensor().add_SVD(other.get_svdtensor(),thresh*facReduce());
630 else if (is_tensortrain()) get_tensortrain()+=(other.get_tensortrain());
631 else {
632 MADNESS_EXCEPTION("unknown tensor type in LowRankTensor::add_SVD",1);
633 }
634 }
635
636 /// Inplace multiply by corresponding elements of argument Tensor
638
639 // deliberately excluding emuls for different tensors due to efficiency considerations!
641
642 // binary operation with the visitor pattern
643 // std::visit([&other](auto& obj) {obj.emul(other.tensor);}, tensor);
644 if (is_full_tensor()) get_tensor().emul(other.get_tensor());
645 else if (is_svd_tensor()) get_svdtensor().emul(other.get_svdtensor());
646 else if (is_tensortrain()) get_tensortrain().emul(other.get_tensortrain());
647 else {
648 MADNESS_EXCEPTION("unknown tensor type in LowRankTensor::gaxpy",1);
649 }
650 return *this;
651
652 }
653
654 void reduce_rank(const double& thresh) {
655 if (is_svd_tensor()) get_svdtensor().divide_and_conquer_reduce(thresh*facReduce());
656 if (is_tensortrain()) get_tensortrain().truncate(thresh*facReduce());
657 }
658
659
660public:
661
662 /// Transform all dimensions of the tensor t by the matrix c
663
664 /// \ingroup tensor
665 /// Often used to transform all dimensions from one basis to another
666 /// \code
667 /// result(i,j,k...) <-- sum(i',j', k',...) t(i',j',k',...) c(i',i) c(j',j) c(k',k) ...
668 /// \endcode
669 /// The input dimensions of \c t must all be the same and agree with
670 /// the first dimension of \c c . The dimensions of \c c may differ in
671 /// size.
672 template <typename R, typename Q>
674 const GenTensor<R>& t, const Tensor<Q>& c);
675
676 /// Transform all dimensions of the tensor t by distinct matrices c
677
678 /// \ingroup tensor
679 /// Similar to transform but each dimension is transformed with a
680 /// distinct matrix.
681 /// \code
682 /// result(i,j,k...) <-- sum(i',j', k',...) t(i',j',k',...) c[0](i',i) c[1](j',j) c[2](k',k) ...
683 /// \endcode
684 /// The first dimension of the matrices c must match the corresponding
685 /// dimension of t. template <typename R, typename Q>
686 template <typename R, typename Q>
688 const GenTensor<R>& t, const Tensor<Q> c[]);
689
690 /// Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor
691
692 /// \ingroup tensor
693 /// \code
694 /// transform_dir(t,c,1) = r(i,j,k,...) = sum(j') t(i,j',k,...) * c(j',j)
695 /// \endcode
696 /// @param[in] t Tensor to transform (size of dim to be transformed must match size of first dim of \c c )
697 /// @param[in] c Matrix used for the transformation
698 /// @param[in] axis Dimension (or axis) to be transformed
699 /// @result Returns a new, contiguous tensor template <typename R, typename Q>
700 template <typename R, typename Q>
702 const GenTensor<R>& t, const Tensor<Q>& c, const int axis);
703
704
705 std::string what_am_i() const {
706 TensorType tt;
707 if (this->is_full_tensor()) tt=TT_FULL;
708 if (this->is_svd_tensor()) tt=TT_2D;
709 if (this->is_tensortrain()) tt=TT_TENSORTRAIN;
710 return TensorArgs::what_am_i(tt);
711 };
712
713
714 /// might return a NULL pointer!
715 const BaseTensor* ptr() const {
716 const BaseTensor* p;
717 std::visit([&p](auto& obj) {p=dynamic_cast<const BaseTensor*>(obj.get());}, tensor);
718 return p;
719 }
720
721private:
722
723 /// holding the implementation of the low rank tensor representations
724 // std::variant<Tensor<T>, SVDTensor<T>, TensorTrain<T> > tensor;
725 std::variant<std::shared_ptr<Tensor<T> >,
726 std::shared_ptr<SVDTensor<T> >,
727 std::shared_ptr<TensorTrain<T> > > tensor;
728
729};
730
731
732
733namespace archive {
734/// Serialize a tensor
735template <class Archive, typename T>
736struct ArchiveStoreImpl< Archive, GenTensor<T> > {
737
738 friend class GenTensor<T>;
739 /// Stores the GenTensor to an archive
740 static void store(const Archive& ar, const GenTensor<T>& t) {
741 int index1=t.index();
742 ar & index1;
743 if (index1==0) {
744 const Tensor<T>& tt=t.get_tensor();
745 ar & tt;
746 } else if (index1==1) {
747 const SVDTensor<T>& tt=t.get_svdtensor();
748 ar & tt;
749 } else if (index1==2) {
750 const TensorTrain<T>& tt=t.get_tensortrain();
751 ar & tt;
752 }
753 };
754};
755
756
757/// Deserialize a tensor ... existing tensor is replaced
758template <class Archive, typename T>
759struct ArchiveLoadImpl< Archive, GenTensor<T> > {
760
761 friend class GenTensor<T>;
762 /// Replaces this GenTensor with one loaded from an archive
763 static void load(const Archive& ar, GenTensor<T>& tensor) {
764 int index=-2;
765 ar & index;
766 if (index==0) {
767 Tensor<T> tt;
768 ar & tt;
769 tensor=tt;
770 } else if (index==1) {
771 SVDTensor<T> tt;
772 ar & tt;
773 tensor=tt;
774 } else if (index==2) {
776 ar & tt;
777 tensor=tt;
778 } else if (index==-1) { // defined value: empty tensor
779 ;
780 } else {
781 MADNESS_EXCEPTION("unknow tensor type",1);
782 }
783
784
785 };
786};
787};
788
789/// type conversion implies a deep copy
790
791/// @result Returns a new tensor that is a deep copy of the input
792template <class Q, class T>
794
795 // simple return
796 if (std::is_same<Q, T>::value) return copy(other);
797
798 GenTensor<Q> result;
799 if (other.is_full_tensor())
800 result=Tensor<Q>(convert<Q,T>(other.get_tensor()));
801 if (other.is_svd_tensor())
802 MADNESS_EXCEPTION("no type conversion for SVDTensors",1);
803 if (other.is_tensortrain())
804 MADNESS_EXCEPTION("no type conversion for TensorTrain",1);
805 return result;
806}
807
808
809/// change representation to targ.tt
810template<typename T>
811void change_tensor_type(GenTensor<T>& t, const TensorArgs& targs) {
812 t.convert_inplace(targs);
813}
814
815/// outer product of two Tensors, yielding a low rank tensor
816
817/// do the outer product of two tensors; distinguish these tensortype cases by
818/// the use of final_tensor_type
819/// - full x full -> full
820/// - full x full -> SVD ( default )
821/// - TensorTrain x TensorTrain -> TensorTrain
822/// all other combinations are currently invalid.
823template <class T, class Q>
825 const GenTensor<Q>& t2, const TensorArgs final_tensor_args=TensorArgs(-1.0,TT_2D)) {
826
827 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
828
829
831
832 if (final_tensor_args.tt==TT_FULL) {
835 return GenTensor<resultT>(t);
836
837 } else if (final_tensor_args.tt==TT_2D) {
839
840 // srconf is shallow, do deep copy here
841 const Tensor<T> lhs=t1.full_tensor_copy();
842 const Tensor<Q> rhs=t2.full_tensor_copy();
843
844 const long k=lhs.dim(0);
845 const long ndim=lhs.ndim()+rhs.ndim();
846 long size=1;
847 for (int i=0; i<lhs.ndim(); ++i) size*=k;
848 MADNESS_ASSERT(size==lhs.size());
849 MADNESS_ASSERT(size==rhs.size());
850 MADNESS_ASSERT(lhs.size()==rhs.size());
851
853 weights=1.0;
854
855 std::array<long,TENSOR_MAXDIM> dims;
856 for (int i=0; i<t1.ndim(); ++i) dims[i]=t1.dim(i);
857 for (int i=0; i<t2.ndim(); ++i) dims[i+t1.ndim()]=t2.dim(i);
858
859 SRConf<resultT> srconf(weights,lhs.reshape(1,lhs.size()),rhs.reshape(1,rhs.size()),ndim,dims.data(),t1.ndim());
860// srconf.normalize();
862
863 } else if (final_tensor_args.tt==TT_TENSORTRAIN) {
866 return outer(t1.get_tensortrain(),t2.get_tensortrain());
867 } else {
868 MADNESS_EXCEPTION("you should not be here",1);
869 }
871
872 }
873
874
875/// outer product of two Tensors, yielding a low rank tensor
876template <class T, class Q>
878 const Tensor<Q>& rhs2, const TensorArgs final_tensor_args) {
879
880 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
881
882 // prepare lo-dim tensors for the outer product
883 TensorArgs targs;
884 targs.thresh=final_tensor_args.thresh;
885 if (final_tensor_args.tt==TT_FULL) targs.tt=TT_FULL;
886 else if (final_tensor_args.tt==TT_2D) targs.tt=TT_FULL;
887 else if (final_tensor_args.tt==TT_TENSORTRAIN) targs.tt=TT_TENSORTRAIN;
888 else {
889 MADNESS_EXCEPTION("confused tensor args in outer_low_rank",1);
890 }
891
892 GenTensor<T> lhs(lhs2,targs);
893 GenTensor<Q> rhs(rhs2,targs);
894 GenTensor<resultT> result=outer(lhs,rhs,final_tensor_args);
895 return result;
896 }
897
898
899/// The class defines tensor op scalar ... here define scalar op tensor.
900template <typename T, typename Q>
901typename IsSupported < TensorTypeData<Q>, GenTensor<T> >::type
902operator*(const Q& x, const GenTensor<T>& t) {
903 return t*x;
904}
905
906/// add all the GenTensors of a given list
907
908 /// If there are many tensors to add it's beneficial to do a sorted addition and start with
909 /// those tensors with low ranks
910 /// @param[in] addends a list with gentensors of same dimensions; will be destroyed upon return
911 /// @param[in] eps the accuracy threshold
912 /// @param[in] are_optimal flag if the GenTensors in the list are already in SVD format (if TT_2D)
913 /// @return the sum GenTensor of the input GenTensors
914 template<typename T>
915GenTensor<T> reduce(std::list<GenTensor<T> >& addends, double eps, bool are_optimal=false) {
916
917 // fast return
918 addends.remove_if([](auto element) {return not element.is_assigned();});
919 addends.remove_if([](auto element) {return element.rank()==0;});
920 if (addends.size()==0) return GenTensor<T>();
921
922
923 if (addends.front().is_svd_tensor()) {
924 std::list<SVDTensor<T> > addends1;
925 for (auto a : addends) addends1.push_back(a.get_svdtensor());
926 return reduce(addends1,eps*GenTensor<T>::facReduce());
927 }
928 // make error relative
929 eps=eps/addends.size();
930
931 // if the addends are not in SVD format do that now so that we can call add_svd later
932 if (not are_optimal) {
933 for (auto element : addends) element.reduce_rank(eps);
934 }
935
936 // remove zero ranks and sort the list according to the gentensor's ranks
937 addends.remove_if([](auto element) {return element.rank()==0;});
938 if (addends.size()==0) return GenTensor<T>();
939 addends.sort([](auto element1, auto element2) {return element1.rank()<element2.rank();});
940
941 // do the additions
942 GenTensor<T> result=copy(addends.front());
943 addends.pop_front();
944 for (auto element : addends) result.add_SVD(element,eps);
945 addends.clear();
946
947 return result;
948}
949
950
951
952
953/// implements a temporary(!) slice of a LowRankTensor
954template<typename T>
955class SliceLowRankTensor : public GenTensor<T> {
956 //class SliceLowRankTensor {
957public:
958
959 std::array<Slice,TENSOR_MAXDIM> thisslice;
960 // GenTensor<T>* lrt;
961
962 // all ctors are private, only accessible by GenTensor
963
964 /// default ctor
966
967 /// ctor with a GenTensor; shallow
968 SliceLowRankTensor<T> (const GenTensor<T>& gt, const std::vector<Slice>& s)
969 : GenTensor<T>(const_cast<GenTensor<T>& > (gt)) {
970 // : Tensor<T>(const_cast<Tensor<T>&>(t)) //!!!!!!!!!!!
971 for (int i=0; i<s.size(); ++i) thisslice[i]=s[i];
972 }
973
974 /// ctor with a GenTensor; shallow
975 SliceLowRankTensor<T> (const GenTensor<T>& gt, const std::array<Slice,TENSOR_MAXDIM>& s)
976 : GenTensor<T>(&gt), thisslice(s) {}
977
978public:
979
980 /// assignment as in g(s) = g1;
982 print("You don't want to assign to a SliceLowRankTensor; use operator+= instead");
984 return *this;
985 };
986
987 /// assignment as in g(s) = g1(s);
989 print("You don't want to assign to a SliceLowRankTensor; use operator+= instead");
991 return *this;
992 };
993
994 /// inplace addition as in g(s)+=g1
996 std::array<Slice,TENSOR_MAXDIM> rhs_slice;
997 rhs_slice.fill(_);
998 gaxpy(thisslice,rhs,rhs_slice,1.0);
999 return *this;
1000 }
1001
1002 /// inplace subtraction as in g(s)-=g1
1004 std::array<Slice,TENSOR_MAXDIM> rhs_slice;
1005 rhs_slice.fill(_);
1006 gaxpy(thisslice,rhs,rhs_slice,-1.0);
1007 return *this;
1008 }
1009
1010 /// inplace addition as in g(s)+=g1(s)
1012 gaxpy(thisslice,rhs,rhs.thisslice,1.0);
1013 return *this;
1014 }
1015
1016 /// inplace addition as in g(s)-=g1(s)
1018 gaxpy(thisslice,rhs,rhs.thisslice,-1.0);
1019 return *this;
1020 }
1021
1022 /// *this = *this(s) + beta * rhs
1023 void gaxpy(const std::array<Slice,TENSOR_MAXDIM>& lslice, const GenTensor<T>& rhs,
1024 const std::array<Slice,TENSOR_MAXDIM>& rslice, const double& beta) {
1025
1026 // fast return if possible
1027 if (rhs.has_no_data() or rhs.rank()==0) return;
1028
1029 if (this->has_data()) MADNESS_ASSERT(is_same_tensor_type(*this,rhs));
1030
1031 if (this->is_full_tensor()) {
1032 this->get_tensor()(thisslice).gaxpy(1.0,rhs.get_tensor()(rslice),beta);
1033
1034 } else if (this->is_svd_tensor()) {
1035 this->get_svdtensor().inplace_add(rhs.get_svdtensor(),thisslice,rslice, 1.0, beta);
1036
1037 } else if (this->is_tensortrain()) {
1038 this->get_tensortrain().gaxpy(thisslice,rhs.get_tensortrain(),beta,rslice);
1039 }
1040 return ;
1041 }
1042
1043 /// inplace zero-ing as in g(s)=0.0
1045 MADNESS_ASSERT(number==T(0.0));
1046
1047 if (this->is_full_tensor()) {
1048 this->get_tensor()(thisslice)=0.0;
1049
1050 } else if (this->is_svd_tensor()) {
1051 MADNESS_ASSERT(this->get_svdtensor().has_structure());
1052 SliceLowRankTensor<T> tmp(*this);
1053 this->get_svdtensor().inplace_add(tmp.get_svdtensor(),thisslice,thisslice, 1.0, -1.0);
1054
1055 } else if (this->is_tensortrain()) {
1056 this->get_tensortrain().gaxpy(thisslice,this->get_tensortrain(),-1.0,thisslice);
1057 } else {
1058 MADNESS_EXCEPTION("you should not be here",1);
1059 }
1060 return *this;
1061 }
1062
1064 GenTensor<T> result;
1065 const std::array<Slice,TENSOR_MAXDIM> s=other.thisslice;
1066 if (other.is_full_tensor())
1067 result=Tensor<T>(copy(other.get_tensor()(s)));
1068 else if (other.is_svd_tensor())
1069 result=SVDTensor<T>(other.get_svdtensor().copy_slice(s));
1070 else if (other.is_tensortrain())
1071 result=TensorTrain<T>(copy(other.get_tensortrain(),s));
1072 else {
1073 }
1074 return result;
1075 }
1076
1077
1078};
1079
1080
1081template<typename Q, typename R>
1082bool is_same_tensor_type(const GenTensor<R>& rhs, const GenTensor<Q>& lhs) {
1083 return (rhs.tensor.index()==lhs.tensor.index());
1084}
1085
1086template <typename R, typename Q>
1088 const GenTensor<R>& t, const Tensor<Q>& c) {
1089 typedef TENSOR_RESULT_TYPE(R,Q) resultT;
1090 GenTensor<resultT> result;
1091 std::visit([&result, &c](auto& obj) {result=transform(*obj,c);}, t.tensor);
1092 return result;
1093 }
1094
1095template <typename R, typename Q>
1097 const GenTensor<R>& t, const Tensor<Q> c[]) {
1098 typedef TENSOR_RESULT_TYPE(R,Q) resultT;
1099 GenTensor<resultT> result;
1100 std::visit([&result, &c](auto& obj) {result=general_transform(*obj,c);}, t.tensor);
1101 return result;
1102}
1103
1104template <typename R, typename Q>
1106 const GenTensor<R>& t, const Tensor<Q>& c, const int axis) {
1108 std::visit([&result, &c, &axis](auto& obj) {result=transform_dir(*obj,c,axis);}, t.tensor);
1109 return result;
1110
1111 }
1112
1113
1114
1115} // namespace madness
1116
1117#endif /* MADNESS_TENSOR_LOWRANKTENSOR_H_ */
The base class for tensors defines generic capabilities.
Definition basetensor.h:85
long dim(int i) const
Returns the size of dimension i.
Definition basetensor.h:147
const long * dims() const
Returns the array of tensor dimensions.
Definition basetensor.h:153
long ndim() const
Returns the number of dimensions in the tensor.
Definition basetensor.h:144
long size() const
Returns the number of elements in the tensor.
Definition basetensor.h:138
Definition lowranktensor.h:59
TensorTypeData< T >::float_scalar_type float_scalar_type
C++ typename of the floating point type associated with scalar real type.
Definition lowranktensor.h:69
bool is_of_tensortype(const TensorType &tt) const
Definition lowranktensor.h:309
GenTensor(const GenTensor< T > &other)=default
copy ctor, shallow
GenTensor(const TensorTrain< T > &other)
ctor with a TensorTrain as argument, shallow
Definition lowranktensor.h:149
GenTensor convert(const TensorArgs &targs) const
Definition gentensor.h:198
long dim(const int i) const
return the number of entries in dimension i
Definition lowranktensor.h:391
GenTensor operator+(const SliceLowRankTensor< T > &other)
Definition lowranktensor.h:548
static double facReduce()
Definition lowranktensor.h:467
friend class SliceLowRankTensor< T >
Definition lowranktensor.h:63
Tensor< T > full_tensor_copy() const
Definition gentensor.h:206
long ndim() const
Definition lowranktensor.h:386
GenTensor & operator+=(const GenTensor &other)
Definition lowranktensor.h:556
friend GenTensor copy(const GenTensor &other)
deep copy
Definition lowranktensor.h:283
constexpr bool is_full_tensor() const
Definition gentensor.h:224
TENSOR_RESULT_TYPE(T, Q) trace_conj(const GenTensor< Q > &rhs) const
returns the trace of <this|rhs>
Definition lowranktensor.h:514
Tensor< T > & get_tensor()
Definition lowranktensor.h:235
TensorTypeData< T >::scalar_type scalar_type
C++ typename of the real type associated with a complex type.
Definition lowranktensor.h:66
SliceLowRankTensor< T > operator()(const std::vector< Slice > &s)
general slicing, shallow; for temporary use only!
Definition lowranktensor.h:272
GenTensor< TENSOR_RESULT_TYPE(T, Q)> operator*(const Q &x) const
multiply with a number
Definition lowranktensor.h:536
GenTensor & operator=(const SliceLowRankTensor< T > &other)
deep assignment with slices: g0 = g1(s)
Definition lowranktensor.h:188
const Tensor< T > & get_tensor() const
Definition gentensor.h:203
long nCoeff() const
Definition lowranktensor.h:492
GenTensor & operator=(const GenTensor< T > &other)
shallow assignment operator
Definition lowranktensor.h:164
std::variant< std::shared_ptr< Tensor< T > >, std::shared_ptr< SVDTensor< T > >, std::shared_ptr< TensorTrain< T > > > tensor
holding the implementation of the low rank tensor representations
Definition lowranktensor.h:727
Tensor< T > & full_tensor()
Definition lowranktensor.h:452
std::string what_am_i() const
Definition lowranktensor.h:705
GenTensor & operator=(const Tensor< T > &other)
deep assignment operator
Definition lowranktensor.h:170
int index() const
Definition lowranktensor.h:319
bool has_no_data() const
Definition lowranktensor.h:486
GenTensor(const SVDTensor< T > &other)
ctor with a SVDTensor as argument, shallow
Definition lowranktensor.h:154
GenTensor & operator=(const TensorTrain< T > &other)
deep assignment operator
Definition lowranktensor.h:182
GenTensor & convert_inplace(const TensorArgs &targs)
Definition lowranktensor.h:323
void normalize()
Definition lowranktensor.h:402
GenTensor(const SliceLowRankTensor< T > &other)
ctor with a SliceLowRankTensor as argument, deep
Definition lowranktensor.h:159
GenTensor & operator=(const SVDTensor< T > &other)
deep assignment operator
Definition lowranktensor.h:176
GenTensor & gaxpy(const T alpha, std::array< Slice, TENSOR_MAXDIM > s0, const GenTensor &other, const T beta, std::array< Slice, TENSOR_MAXDIM > s1)
Definition lowranktensor.h:600
const SVDTensor< T > & get_svdtensor() const
Definition lowranktensor.h:226
GenTensor & operator-=(const SliceLowRankTensor< T > &other)
Definition lowranktensor.h:579
GenTensor & operator=(const T &number)
assign a number to this tensor
Definition lowranktensor.h:621
GenTensor< T > & emul(const GenTensor< T > &other)
Inplace multiply by corresponding elements of argument Tensor.
Definition lowranktensor.h:637
GenTensor(const Tensor< T > &rhs, const TensorArgs &targs)
ctor with a regular Tensor and arguments, deep
Definition lowranktensor.h:104
float_scalar_type normf() const
Definition lowranktensor.h:406
constexpr bool is_tensortrain() const
Definition gentensor.h:223
Tensor< T > full_tensor() const
Definition lowranktensor.h:445
TensorTrain< T > & get_tensortrain()
Definition lowranktensor.h:253
long rank() const
Definition gentensor.h:212
GenTensor(const std::vector< long > &dim, const TensorType &tt)
ctor with dimensions; constructs tensor filled with zeros
Definition lowranktensor.h:84
const Tensor< T > & full_tensor() const
Definition gentensor.h:200
long size() const
Definition lowranktensor.h:488
GenTensor operator-(const GenTensor &other)
Definition lowranktensor.h:568
long real_size() const
Definition lowranktensor.h:502
GenTensor()=default
empty ctor
void add_SVD(const GenTensor &other, const double &thresh)
Definition lowranktensor.h:627
SVDTensor< T > & get_svdtensor()
Definition gentensor.h:228
GenTensor()
Definition gentensor.h:180
GenTensor(const Tensor< T > &rhs, const double &thresh, const TensorType &tt)
ctor with a regular Tensor and arguments, deep
Definition lowranktensor.h:99
GenTensor(const Tensor< T > &other)
ctor with a regular Tensor, deep
Definition lowranktensor.h:144
const SliceLowRankTensor< T > operator()(const std::vector< Slice > &s) const
general slicing, shallow; for temporary use only!
Definition lowranktensor.h:277
const TensorTrain< T > & get_tensortrain() const
Definition lowranktensor.h:262
GenTensor(const std::vector< long > &dim, const TensorArgs &targs)
ctor with dimensions; constructs tensor filled with zeros
Definition lowranktensor.h:89
void reduce_rank(const double &thresh)
Definition lowranktensor.h:654
TensorType tensor_type() const
return the tensor type
Definition lowranktensor.h:290
const BaseTensor * ptr() const
might return a NULL pointer!
Definition lowranktensor.h:715
static double fac_reduce()
Definition lowranktensor.h:468
friend bool is_same_tensor_type(const GenTensor< R > &rhs, const GenTensor< Q > &lhs)
Definition lowranktensor.h:1082
bool has_data() const
Definition lowranktensor.h:484
GenTensor & operator-=(const GenTensor &other)
Definition lowranktensor.h:574
const long * dims() const
return the number of entries in dimension i
Definition lowranktensor.h:397
Tensor< T > reconstruct_tensor() const
reconstruct this to return a full tensor
Definition lowranktensor.h:459
GenTensor operator+(const GenTensor &other)
Definition lowranktensor.h:542
float_scalar_type svd_normf() const
Definition lowranktensor.h:412
GenTensor & gaxpy(const T alpha, const GenTensor &other, const T beta)
Definition lowranktensor.h:586
GenTensor(const TensorType &tt, const long k, const long ndim)
ctor with dimensions; all dims have the same value k
Definition lowranktensor.h:94
bool is_assigned() const
Definition gentensor.h:209
IsSupported< TensorTypeData< Q >, GenTensor< T > & >::type scale(Q fac)
Inplace multiplication by scalar of supported type (legacy name)
Definition lowranktensor.h:426
GenTensor(const long ndim, const long *dims, const TensorType &tt)
Definition lowranktensor.h:77
GenTensor & operator+=(const SliceLowRankTensor< T > &other)
Definition lowranktensor.h:561
SVDTensor< T > & get_tensortrain()
Definition gentensor.h:229
constexpr bool is_svd_tensor() const
Definition gentensor.h:222
Definition srconf.h:67
void normalize()
normalize the vectors (tested)
Definition srconf.h:584
Definition SVDTensor.h:42
implements a temporary(!) slice of a LowRankTensor
Definition lowranktensor.h:955
SliceLowRankTensor< T > & operator+=(const SliceLowRankTensor< T > &rhs)
inplace addition as in g(s)+=g1(s)
Definition lowranktensor.h:1011
SliceLowRankTensor< T > & operator+=(const GenTensor< T > &rhs)
inplace addition as in g(s)+=g1
Definition lowranktensor.h:995
SliceLowRankTensor< T > & operator=(const GenTensor< T > &rhs)
assignment as in g(s) = g1;
Definition lowranktensor.h:981
std::array< Slice, TENSOR_MAXDIM > thisslice
Definition lowranktensor.h:959
void gaxpy(const std::array< Slice, TENSOR_MAXDIM > &lslice, const GenTensor< T > &rhs, const std::array< Slice, TENSOR_MAXDIM > &rslice, const double &beta)
*this = *this(s) + beta * rhs
Definition lowranktensor.h:1023
SliceLowRankTensor< T > & operator-=(const SliceLowRankTensor< T > &rhs)
inplace addition as in g(s)-=g1(s)
Definition lowranktensor.h:1017
SliceLowRankTensor< T > & operator=(const T &number)
inplace zero-ing as in g(s)=0.0
Definition lowranktensor.h:1044
SliceLowRankTensor< T > & operator=(const SliceLowRankTensor< T > &rhs)
assignment as in g(s) = g1(s);
Definition lowranktensor.h:988
SliceLowRankTensor< T > & operator-=(const GenTensor< T > &rhs)
inplace subtraction as in g(s)-=g1
Definition lowranktensor.h:1003
friend GenTensor< T > copy(const SliceLowRankTensor< T > &other)
Definition lowranktensor.h:1063
Definition tensortrain.h:123
Traits class to specify support of numeric types.
Definition type_data.h:56
A tensor is a multidimensional array.
Definition tensor.h:317
Tensor< T > & fill(T x)
Inplace fill with a scalar (legacy name)
Definition tensor.h:562
Tensor< T > reshape(int ndimnew, const long *d)
Returns new view/tensor reshaping size/number of dimensions to conforming tensor.
Definition tensor.h:1384
T type
C++ typename of this tensor.
Definition tensor.h:406
static const double R
Definition csqrt.cc:46
char * p(char *buf, const char *name, int k, int initial_level, double thresh, int order)
Definition derivatives.cc:72
auto T(World &world, response_space &f) -> response_space
Definition global_functions.cc:28
friend GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const GenTensor< R > &t, const Tensor< Q > &c, const int axis)
Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor.
Definition lowranktensor.h:1105
friend GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform(const GenTensor< R > &t, const Tensor< Q > &c)
Transform all dimensions of the tensor t by the matrix c.
Definition lowranktensor.h:1087
friend GenTensor< TENSOR_RESULT_TYPE(R, Q)> general_transform(const GenTensor< R > &t, const Tensor< Q > c[])
Transform all dimensions of the tensor t by distinct matrices c.
Definition gentensor.h:274
const double beta
Definition gygi_soltion.cc:62
Defines madness::MadnessException for exception handling.
#define MADNESS_EXCEPTION(msg, value)
Macro for throwing a MADNESS exception.
Definition madness_exception.h:119
#define MADNESS_ASSERT(condition)
Assert a condition that should be free of side-effects since in release builds this might be a no-op.
Definition madness_exception.h:134
Namespace for all elements and tools of MADNESS.
Definition DFParameters.h:10
GenTensor< TENSOR_RESULT_TYPE(R, Q)> general_transform(const GenTensor< R > &t, const Tensor< Q > c[])
Definition gentensor.h:274
std::vector< Function< T, NDIM > > reduce_rank(std::vector< Function< T, NDIM > > v, double thresh=0.0, bool fence=true)
reduces the tensor rank of the coefficient tensor (if applicable)
Definition vmra.h:366
Function< Q, NDIM > convert(const Function< T, NDIM > &f, bool fence=true)
Type conversion implies a deep copy. No communication except for optional fence.
Definition mra.h:2143
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const Tensor< R > &c, bool fence=true)
Transforms a vector of functions according to new[i] = sum[j] old[j]*c[j,i].
Definition vmra.h:707
static Tensor< double > weights[max_npt+1]
Definition legendre.cc:99
response_space transpose(response_space &f)
Definition basic_operators.cc:10
std::vector< CCPairFunction< T, NDIM > > operator*(const double fac, const std::vector< CCPairFunction< T, NDIM > > &arg)
Definition ccpairfunction.h:1089
static const Slice _(0,-1, 1)
void change_tensor_type(GenTensor< T > &t, const TensorArgs &targs)
change representation to targ.tt
Definition gentensor.h:284
std::enable_if< std::is_base_of< ProjectorBase, projT >::value, OuterProjector< projT, projQ > >::type outer(const projT &p0, const projQ &p1)
Definition projector.h:457
void print(const T &t, const Ts &... ts)
Print items to std::cout (items separated by spaces) and terminate with a new line.
Definition print.h:226
TensorType
low rank representations of tensors (see gentensor.h)
Definition gentensor.h:120
@ TT_TENSORTRAIN
Definition gentensor.h:120
@ TT_2D
Definition gentensor.h:120
@ TT_FULL
Definition gentensor.h:120
GenTensor< T > reduce(std::list< GenTensor< T > > &addends, double eps, bool are_optimal=false)
add all the GenTensors of a given list
Definition gentensor.h:246
GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const GenTensor< R > &t, const Tensor< Q > &c, const int axis)
Definition lowranktensor.h:1105
std::string type(const PairType &n)
Definition PNOParameters.h:18
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence.
Definition mra.h:2096
bool is_same_tensor_type(const GenTensor< R > &rhs, const GenTensor< Q > &lhs)
Definition lowranktensor.h:1082
Definition mraimpl.h:51
static const double a
Definition nonlinschro.cc:118
Defines simple templates for printing to std::cout "a la Python".
double Q(double a)
Definition relops.cc:20
static const double c
Definition relops.cc:10
static const double m
Definition relops.cc:9
static const double thresh
Definition rk.cc:45
static const long k
Definition rk.cc:44
Declares and implements Slice.
Definition type_data.h:146
TensorArgs holds the arguments for creating a LowRankTensor.
Definition gentensor.h:134
static std::string what_am_i(const TensorType &tt)
Definition gentensor.h:147
double thresh
Definition gentensor.h:135
TensorType tt
Definition gentensor.h:136
static void load(const Archive &ar, GenTensor< T > &tensor)
Replaces this GenTensor with one loaded from an archive.
Definition lowranktensor.h:763
Default store of an object via serialize(ar, t).
Definition archive.h:612
static std::enable_if_t< is_output_archive_v< A > &&!std::is_function< U >::value &&(has_member_serialize_v< U, A >||has_nonmember_serialize_v< U, A >||has_freestanding_serialize_v< U, A >||has_freestanding_default_serialize_v< U, A >), void > store(const A &ar, const U &t)
Definition archive.h:622
static const double s0
Definition tdse4.cc:83
Defines and implements most of Tensor.
Defines and implements the tensor train decomposition as described in I.V. Oseledets,...
double norm(const T i1)
Definition test_cloud.cc:85
static const double alpha
Definition testcosine.cc:10
double gt(const coord_t &r)
Definition testgconv.cc:142
std::size_t axis
Definition testpdiff.cc:59
Defines and implements TensorTypeData, a type traits class.
#define TENSOR_RESULT_TYPE(L, R)
This macro simplifies access to TensorResultType.
Definition type_data.h:205