MADNESS 0.10.1
tensortrain.h
Go to the documentation of this file.
1/*
2 This file is part of MADNESS.
3
4 Copyright (C) 2007,2010 Oak Ridge National Laboratory
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
20 For more information please contact:
21
22 Robert J. Harrison
23 Oak Ridge National Laboratory
24 One Bethel Valley Road
25 P.O. Box 2008, MS-6367
26
27 email: harrisonrj@ornl.gov
28 tel: 865-241-3937
29 fax: 865-572-0680
30
31 $Id: srconf.h 2816 2012-03-23 14:59:52Z 3ru6ruWu $
32*/
33
34#ifndef TENSORTRAIN_H_
35#define TENSORTRAIN_H_
36
43
44/**
45 \file tensortrain.h
46 \brief Defines and implements the tensor train decomposition as described in
47 I.V. Oseledets, Siam J. Sci. Comput. 33, 2295 (2011).
48 \ingroup tensor
49 \addtogroup tensor
50
51*/
52
53namespace madness {
54
55 /// decompose the input tensor A into U and V, skipping singular vectors of
56 /// small singular values. One deep copy/allocation for U is performed
57
58 /// @param[inout] A the input matrix, on exit the matrix VT (right sing. vectors)
59 /// @param[out] U contiguous new tensor holding the left sing. vectors
60 /// @param[in] thresh threshold for truncation of the singular vectors
61 /// @param[in] s scratch tensor for the singular values, dimension min(n,m)
62 /// @param[in] scr scratch tensor
63 /// the dimension of the scratch tensor may be computed as
64 /// long lwork=std::max(3*std::min(m,n)+std::max(m,n),5*std::min(m,n)) + n*m;
65 template<typename T>
67 const double thresh, Tensor< typename Tensor<T>::scalar_type > & s,
68 Tensor<T>& scr) {
69
70 MADNESS_ASSERT(A.ndim()==2); // must be a matrix
71 const long n=A.dim(0);
72 const long m=A.dim(1);
73 const long rmax=std::min(n,m);
74 long lwork=std::max(3*std::min(m,n)+std::max(m,n),5*std::min(m,n));
75
76 // set up contiguous scratch arrays
77 MADNESS_ASSERT(scr.size()>lwork+n*m);
78 scr=scr.flat();
79 Tensor<T> work=scr(Slice(0,lwork-1));
80 Tensor<T> utmp=scr(Slice(lwork,lwork+n*m-1));
81 Tensor<T> dummy; // real dummy
82
83 svd_result(A,utmp,s,dummy,work);
84
85 // this is rank_right
86 const long R1=SRConf<T>::max_sigma(thresh,rmax,s)+1;
87
88 // skip if rank=0
89 if (R1>0) {
90
91 U=madness::copy((utmp(Slice(0,n*rmax-1)))
92 .reshape(n,rmax)(_,Slice(0,R1-1)));
93
94 A=A(Slice(0,R1-1),_);
95
96 // continue with the next dimension
97 for (int j=0; j<m; ++j) {
98 for (int i=0; i<R1; ++i) {
99 A(i,j)*=s(i);
100 }
101 }
102 } else {
103 U=Tensor<T>(n,0l);
104 }
105 return R1;
106 }
107
108
109
110 /**
111 * A tensor train is a multi-modal representation of a tensor t
112 * \code
113 * t(i,j,k,l) = \sum G^1_{a1,i,a2} G^2_{a2,j,a3} G^3_{a3,k,a4} G^4_{a4,l,a5}
114 * \endcode
115 * The "core" tensors G are connected via a linear index network, where the
116 * first index a1 and the last index a5 are boundary indices and are set to 1.
117 *
118 * The tensor train representation is suited for any number of dimensions and
119 * in general at least as fast as the 2-way decomposition SVD. If the tensor
120 * has full rank it will need about twice the storage space of the full tensor
121 */
122 template<typename T>
123 class TensorTrain : public BaseTensor {
124
125 // make all types of TensorTrain friends of each other (for type conversion)
126 template<typename Q>
127 friend class TensorTrain;
128
129 /// C++ typename of this tensor.
130 typedef T type;
131
132 /// C++ typename of the real type associated with a complex type.
134
135 /// C++ typename of the floating point type associated with scalar real type
137
138 /// holding the core tensors of a tensor train
139 /// the tensors have the shape (k,r0) (r0,k,r1) (r1,k,r2) .. (rn-1,k)
140 std::vector<Tensor<T> > core;
141 /// true if rank is zero
143
144 public:
145
146 /// empty constructor
149 }
150
151 /// ctor for a TensorTrain, with the tolerance eps
152
153 /// The tensor train will represent the input tensor with
154 /// accuracy || t - this ||_2 < eps
155 ///
156 /// Note that we rely on specific layout of the memory in the tensors, e.g.
157 /// we pass SliceTensors on to lapack. This will only work if the slices are
158 /// contiguous.
159 ///
160 /// @param[in] t full representation of a tensor
161 /// @param[in] eps the accuracy threshold
162 TensorTrain(const Tensor<T>& t, double eps)
163 : core(), zero_rank(false) {
165 if (t.size()==0) return;
166
167 MADNESS_ASSERT(t.size() != 0);
168 MADNESS_ASSERT(t.ndim() != 0);
169
170 std::vector<long> dims(t.ndim());
171 for (int d=0; d<t.ndim(); ++d) dims[d]=t.dim(d);
172 decompose(t.flat(),eps,dims);
173
174 }
175
176 /// ctor for a TensorTrain, with the tolerance eps
177
178 /// The tensor train will represent the input tensor with
179 /// accuracy || t - this ||_2 < eps
180 ///
181 /// Note that we rely on specific layout of the memory in the tensors, e.g.
182 /// we pass SliceTensors on to lapack. This will only work if the slices are
183 /// contiguous.
184 ///
185 /// @param[in] t full representation of a tensor
186 /// @param[in] eps the accuracy threshold
187 /// @param[in] dims the tt structure
188 TensorTrain(const Tensor<T>& t, double eps, const std::vector<long> dims)
189 : core(), zero_rank(false) {
191
192 MADNESS_ASSERT(t.size() != 0);
193 MADNESS_ASSERT(t.ndim() != 0);
194 decompose(t,eps,dims);
195 }
196
197 /// ctor for a TensorTrain, set up only the dimensions, no data
198 TensorTrain(const long& ndims, const long* dims) {
199 zero_rank = true;
200 set_dims_and_size(ndims,dims);
201 }
202
203 /// ctor for a TensorTrain, set up only the dimensions, no data
204 TensorTrain(const std::vector<long>& dims) {
205 zero_rank = true;
207
208// core.resize(dims.size());
209// // first and last core tensor
210// core[0] = Tensor<T>(dims[0],long(0));
211// core[dims.size()-1] = Tensor<T>(long(0),dims[dims.size()-1]);
212//
213// // iterate through the rest -- fast forward
214// for (int d=1; d<dims.size()-1; ++d) {
215// core[d] = Tensor<T>(long(0),dims[d],long(0));
216// }
217
218 }
219
220
221 /// ctor for a TensorTrain, with core tensors explicitly given
222
223 /// the core tensors must have the shape (k1,r1) (r1,k2,r2) .. (r2,k3)
224 /// @param[in] core vector of core tensors, properly shaped
225 TensorTrain(const std::vector<Tensor<T> >& c) : core(c) {
226 zero_rank=false;
227 std::vector<long> d(c.size());
228 d[0]=core[0].dim(0); // dim,rank
229 for (long i=1; i<d.size(); ++i) d[i]=core[i].dim(1); // rank,dim,rank
231
232 // check for zero ranks
233 for (int d=1; d<core.size(); ++d) if (core[d].dim(0)==0) zero_me();
234 }
235
236
237 /// copy constructor, shallow
238 TensorTrain(const TensorTrain& other) : core(other.core),
239 zero_rank(other.zero_rank) {
240 set_dims_and_size(other.ndim(),other.dims());
241
242 }
243
244 /// assigment operator
246 if (this!=&other) {
247 set_dims_and_size(other.ndim(),other.dims());
248 zero_rank=other.zero_rank;
249 core=other.core;
250 }
251 return *this;
252 }
253
254 /// Type conversion makes a deep copy
255 template <class Q> operator TensorTrain<Q>() const { // type conv => deep copy
256
257 TensorTrain<Q> result(this->dims());
258 result.zero_rank=zero_rank;
259 for (const Tensor<T>& c : core) {
260 result.core.push_back(Tensor<Q>(c));
261 }
262 return result;
263 }
264
265 void set_size_and_dim(std::vector<long> dims) {
267 }
268
269 /// serialize this
270 template <typename Archive>
271 void serialize(Archive& ar) {
272
273 // serialize basetensor
276
277 long core_size=core.size();
278 ar & zero_rank & core_size;
279
280 // no empty tensor
281 if (core_size>0) ar & core;
282 }
283
284
285 /// deep copy of the whole tensor
286
287 /// if argument has zero rank return a zero-rank tensor of the same dimensions
288 friend TensorTrain copy(const TensorTrain& other) {
289
290 // fast return
291 if (other.zero_rank) return TensorTrain(other.ndim(),other.dims());
292
293 TensorTrain result(other.ndim(),other.dims());
294 for (const Tensor<T>& t: other.core) {
295 if (t.size()==0) return TensorTrain(other.ndim(),other.dims()); // also zero
296 result.core.push_back(madness::copy(t));
297 }
298 result.zero_rank=other.zero_rank;
299 return result;
300 }
301
302 /// deep copy of a slice of the tensor
303
304 /// this operation does not change the ranks, i.e. the resulting
305 /// tensor is most likely not in an optimal compression state
306 /// @param[in] other tensor to be sliced
307 /// @param[in] s vector of slices
308 friend TensorTrain copy(const TensorTrain& other, const std::array<Slice,TENSOR_MAXDIM>& s) {
309
310 std::vector<long> dims(s.size());
311 for (int i=0; i<dims.size(); ++i) dims[i]=s[i].end-s[i].start+1;
312
313 TensorTrain result(other.ndim(),&dims.front());
314 if (other.zero_rank) return result;
315 const long nd=other.ndim();
316 result.zero_rank=other.zero_rank;
317 result.core.resize(nd);
318
319 // special treatment for first and last core tensor
320 // slice dim only, keep ranks
321 result.core[0]=copy(other.core[0](s[0],_));
322 for (long i=1; i<nd-1; ++i) {
323 result.core[i]=copy(other.core[i](_,s[i],_));
324 }
325
326 if (other.core.size()>1) result.core[nd-1]=copy(other.core[nd-1](_,s[nd-1]));
327 return result;
328 }
329
330 /// decompose the input tensor into a TT representation
331
332 /// @param[in] t tensor in full rank
333 /// @param[in] eps the precision threshold
334 /// @param[in] dims the tt structure
335 void decompose(const Tensor<T>& t, double eps,
336 const std::vector<long>& dims) {
337
338 core.resize(dims.size());
339 eps=eps/sqrt(dims.size()-1); // error is relative
340
341 // the maximum rank of the SVD decompositions
342 long Udim0=1;
343 long rmax=1;
344 for (long i=0; i<dims.size(); ++i) {
345 Udim0*=dims[i];
346 rmax=std::max(rmax,std::min(Udim0,t.size()/Udim0));
347 }
348
349 long rmax1=1;
350 long rmax2=1;
351 long n1=1, n2=1;
352 long lwork=0;
353 for (int i=0, j=dims.size()-1; i<=j; ++i, --j) {
354 rmax1*=dims[i];
355 rmax2*=dims[j];
356
357 // work array for dgesvd
358 n1*=dims[i];
359 long m1=t.size()/dims[i];
360 n2*=dims[j];
361 long m2=t.size()/dims[j];
362 long max1=std::max(n1,m1);
363 long max2=std::max(n2,m2);
364 long min1=std::min(n1,m1);
365 long min2=std::min(n2,m2);
366
367 lwork=std::max(lwork,std::max(3*min1+max1,5*min1));
368 lwork=std::max(lwork,std::max(3*min2+max2,5*min2));
369 }
370
371 // these are max dimensions, so we can avoid frequent reallocation
372 Tensor<T> u(rmax1*rmax2);
373 Tensor<T> dummy;
375
376 // the dimension of the remainder tensor; will be cut down in each iteration
377 long vtdim=t.size();
378
379 // c will be destroyed, and assignment is only shallow, so need to deep copy
381
382 // work array for dgesvd
383 Tensor<T> work(lwork);
384
385
386 // this keeps track of the ranks
387 std::vector<long> r(dims.size()+1,0l);
388 r[0] = r[dims.size()] = 1;
389
390 for (std::size_t d=1; d<dims.size(); ++d) {
391
392 // the core tensors will have dimensions c(rank_left,k,rank_right)
393 // or for lapack's purposes c(d1,rank_right)
394 const long k=dims[d-1];
395 const long d1=r[d-1]*k;
396 c=c.reshape(d1,c.size()/d1);
397 const long rmax=std::min(c.dim(0),c.dim(1));
398 vtdim=vtdim/k;
399
400 // testing
401#if 0
402 // c will be destroyed upon return
404#endif
405 // The svd routine assumes lda=a etc. Pass in a flat tensor and reshape
406 // and slice it after processing.
407 u=u.flat();
408 svd_result(c,u,s,dummy,work);
409
410 // this is rank_right
411 r[d]=SRConf<T>::max_sigma(eps,rmax,s)+1;
412 const long rank=r[d];
413
414 // this is for testing
415#if 0
416 if (0) {
417 Tensor<T> uu=(u(Slice(0,c.dim(0)*rmax-1))).reshape(c.dim(0),rmax);
418 Tensor<T> vtt=copy(c(Slice(0,rmax-1),_));
419 Tensor<T> b(d1,vtdim);
420 for (long i=0; i<c.dim(0); ++i)
421 for (long j=0; j<c.dim(1); ++j)
422 for (long k=0; k<rmax; ++k)
423 b(i,j) += uu(i,k) * T(s(k)) * vtt(k,j);
424 b -= aa;
425 print("b.conforms c",b.conforms(c));
426 print("early error:",b.absmax());
427 }
428#endif
429
430 // U = Tensor<T>(m,rmax);
431 // VT = Tensor<T>(rmax,n);
432
433 // handle rank=0 explicitly
434 if (r[d]) {
435
436 // done with this dimension -- slice and deep-copy
437 core[d-1]=madness::copy((u(Slice(0,c.dim(0)*rmax-1)))
438 .reshape(c.dim(0),rmax)(_,Slice(0,rank-1)));
439 core[d-1]=core[d-1].reshape(r[d-1],k,r[d]);
440
441 // continue with the next dimension
442 c=c(Slice(0,rank-1),_);
443
444 for (int i=0; i<rank; ++i) {
445 for (int j=0; j<c.dim(1); ++j) {
446 c(i,j)*=s(i);
447 }
448 }
449
450 if (d == dims.size()-1) core[d]=c;
451 }
452 else {
453 zero_rank = true;
454 core[d-1] = Tensor<T>(r[d-1],k,long(0));
455 // iterate through the rest -- fast forward
456 for(++d; d<dims.size(); ++d) {
457 const long k=dims[d-1];
458 core[d-1] = Tensor<T>(long(0),k,long(0));
459 }
460 core[dims.size()-1] = Tensor<T>(long(0),dims[dims.size()-1]);
461 }
462 }
463 core[0]=core[0].fusedim(0);
464
465
466 }
467
468
469 /// turn this into an empty tensor with all cores properly shaped
470 void zero_me() {
471 *this=TensorTrain<T>(this->ndim(),this->dims());
472 }
473
474 /// turn this into an empty tensor with all cores properly shaped
475 void zero_me(const std::vector<long>& dim) {
476 *this=TensorTrain<T>(dim);
477 }
478
479 /// assign a number to this tensor
480 TensorTrain<T>& operator=(const T& number) {
481
482 // tensor will have ranks = 1 all over
483 core[0]=Tensor<T>(dim(0),1);
484 for (int i=1; i<ndim()-1; ++i) core[i]=Tensor<T>(1,dim(i),1);
485 if (ndim()>1) core[ndim()-1]=Tensor<T>(1,dim(ndim()-1));
486
487 core[0]=number;
488 for (int i=1; i<ndim(); ++i) core[i]=1.0;
489
490 zero_rank=false;
491 return *this;
492 }
493
494 /// return this multiplied by a scalar
495
496 /// @return new tensor
497 TensorTrain<T> operator*(const T& factor) const {
498 TensorTrain result=copy(*this);
499 result.scale(factor);
500 return result;
501 }
502
503 /// inplace addition of two Tensortrains; will increase ranks of this
504
505 /// inefficient if many additions are performed, since it requires
506 /// many calls of new.
507 /// @param[in] rhs a TensorTrain to be added
509 gaxpy(1.0,rhs,1.0);
510 return *this;
511 }
512
513 /// inplace subtraction of two Tensortrains; will increase ranks of this
514
515 /// inefficient if many subtractions are performed, since it requires
516 /// many calls of new.
517 /// @param[in] rhs a TensorTrain to be added
519 gaxpy(1.0,rhs,-1.0);
520 return *this;
521 }
522
523 /// Inplace generalized saxpy ... this = this*alpha + other*beta
525
526 // make sure dimensions conform
527 MADNESS_ASSERT(this->ndim()==rhs.ndim());
528
529 if (this->zero_rank or (alpha==0.0)) {
530 *this=rhs*beta;
531 } else if (rhs.zero_rank or (beta==0.0)) {
532 scale(alpha);
533 } else if (ndim()==1) { // simple algorithm for ndim=1
534 core[0].gaxpy(alpha,rhs.core[0],beta);
535 } else {
536
537 // special treatment for first border cores (k,r1)
538
539 // alpha and beta are only included in the first core(!)
540 {
541 long k=core[0].dim(0);
542 long r1_this=core[0].dim(1);
543 long r1_rhs=rhs.core[0].dim(1);
544 Tensor<T> core_new(k,r1_this+r1_rhs);
545 core_new(_,Slice(0,r1_this-1))=alpha*core[0];
546 core_new(_,Slice(r1_this,r1_this+r1_rhs-1))=beta*rhs.core[0];
547 core[0]=core_new;
548 }
549
550 // interior cores (r0,k,r1)
551 for (std::size_t i=1; i<core.size()-1; ++i) {
552 MADNESS_ASSERT(core[i].ndim()==3 or i==core.size()-1);
553 long r0_this=core[i].dim(0);
554 long r0_rhs=rhs.core[i].dim(0);
555 long k=core[i].dim(1);
556 long r1_this=core[i].dim(2);
557 long r1_rhs=rhs.core[i].dim(2);
558 Tensor<T> core_new(r0_this+r0_rhs,k,r1_this+r1_rhs);
559 core_new(Slice(0,r0_this-1),_,Slice(0,r1_this-1))=core[i];
560 core_new(Slice(r0_this,r0_this+r0_rhs-1),_,Slice(r1_this,r1_this+r1_rhs-1))=rhs.core[i];
561 core[i]=core_new;
562 }
563
564 // special treatment for last border core (r0,k)
565 {
566 std::size_t d=core.size()-1;
567 long r0_this=core[d].dim(0);
568 long r0_rhs=rhs.core[d].dim(0);
569 long k=core[d].dim(1);
570 Tensor<T> core_new(r0_this+r0_rhs,k);
571 core_new(Slice(0,r0_this-1),_)=core[d];
572 core_new(Slice(r0_this,r0_this+r0_rhs-1),_)=rhs.core[d];
573 core[d]=core_new;
574 }
575 }
576 if (not verify()) MADNESS_EXCEPTION("ranks in TensorTrain inconsistent",1);
577 return *this;
578 }
579
580 /// Inplace generalized saxpy with slices and without alpha
581
582 /// return this = this(s1) + other(s2) * beta
583 TensorTrain<T>& gaxpy(const std::array<Slice,TENSOR_MAXDIM>& s1,
584 const TensorTrain<T>& rhs, T beta, const std::array<Slice,TENSOR_MAXDIM>& s2) {
585
586 // make sure dimensions conform
587 MADNESS_ASSERT(this->ndim()==rhs.ndim());
588
589 if (rhs.zero_rank or (beta==0.0)) return *this;
590 if (this->zero_rank) {
591 core.resize(ndim(),Tensor<T>());
592 }
593 // special treatment for first border cores (k,r1)
594
595 // alpha and beta are only included in the first core(!)
596 {
597 long k=dim(0); // this is max dimension: k>=slice1; k>=slice2
598 long r1_this= zero_rank ? 0 : core[0].dim(1);
599 long r1_rhs=rhs.core[0].dim(1);
600
601 Tensor<T> core_new(k,r1_this+r1_rhs);
602 if (r1_this>0) core_new(_,Slice(0,r1_this-1))=core[0];
603 if (r1_rhs>0) core_new(s1[0],Slice(r1_this,r1_this+r1_rhs-1))=beta*rhs.core[0](s2[0],_);
604 core[0]=core_new;
605 }
606
607 // interior cores (r0,k,r1)
608 for (std::size_t i=1; i<core.size()-1; ++i) {
609 MADNESS_ASSERT(zero_rank or core[i].ndim()==3 or i==core.size()-1);
610 long r0_this= zero_rank ? 0 : core[i].dim(0);
611 long r0_rhs=rhs.core[i].dim(0);
612 long k=dim(i);
613 long r1_this=zero_rank ? 0 : core[i].dim(2);
614 long r1_rhs=rhs.core[i].dim(2);
615 Tensor<T> core_new(r0_this+r0_rhs,k,r1_this+r1_rhs);
616 if (r1_this>0) core_new(Slice(0,r0_this-1),_,Slice(0,r1_this-1))=core[i];
617 if (r1_rhs>0) core_new(Slice(r0_this,r0_this+r0_rhs-1),s1[i],Slice(r1_this,r1_this+r1_rhs-1))=rhs.core[i](_,s2[i],_);
618 core[i]=core_new;
619 }
620
621 // special treatment for last border core (r0,k)
622 {
623 std::size_t d=core.size()-1;
624 long r0_this= zero_rank ? 0 : core[d].dim(0);
625 long r0_rhs=rhs.core[d].dim(0);
626 long k=dim(d);
627 Tensor<T> core_new(r0_this+r0_rhs,k);
628 if (r0_this>0) core_new(Slice(0,r0_this-1),_)=core[d];
629 if (r0_rhs>0) core_new(Slice(r0_this,r0_this+r0_rhs-1),s1[d])=rhs.core[d](_,s2[d]);
630 core[d]=core_new;
631 }
632 if (not rhs.zero_rank) zero_rank=false;
633 if (not verify()) MADNESS_EXCEPTION("ranks in TensorTrain inconsistent",1);
634 return *this;
635 }
636
637 /// compute the Hadamard product of two TensorTrains
638
639 /// see Sec. 4.2 of the TT paper
640 /// @return *this with the ranks of the input tensors multiplied!
642
643 // consistency checks
644 MADNESS_ASSERT(ndim()==other.ndim());
645 for (int i=0; i<ndim(); ++i) MADNESS_ASSERT(dim(i)==other.dim(i));
646
647 // set up the result tensor
648 std::vector<long> cranks(ndim()-1); // ranks of result C
649 for (int i=0; i<ndim()-1; ++i) cranks[i]=ranks(i)*other.ranks(i);
650
651 // check for large sizes
652 for (int i=0; i<ndim()-2; ++i) {
653 long size=cranks[i]*cranks[i+1]*dim(i);
654 MADNESS_CHECK_THROW(size<10000000,"emul in TensorTrain too large -- use full rank tensor");
655 }
656 TensorTrain result(ndim(),dims());
657 result.core.resize(ndim());
658
659 // fast return for zero ranks
660 if (zero_rank or other.zero_rank) {
661 ;
662 } else if (ndim()==1) {
663 // fast return for one core only (all ranks equal 1)
664 result.core[0]=copy(core[0]);
665 result.core[0].emul(other.core[0]);
666
667 } else {
668 result.zero_rank=false;
669 // compute outer product for each core for each k
670
671 // special treatment for first core
672 result.core[0]=Tensor<T>(dim(0),cranks[0]);
673 for (int k=0; k<dim(0); ++k) {
674 Tensor<T> a1=core[0](Slice(k,k),_);
675 Tensor<T> b1=other.core[0](Slice(k,k),_);
676 result.core[0](Slice(k,k),_)=outer(a1,b1).reshape(1,cranks[0]);
677 // before reshape rhs has dimensions (1, r1, 1, r1')
678 // after reshape rhs has dimensions (1, r1*r1')
679 }
680
681 for (int i=1; i<ndim()-1; ++i) {
682 result.core[i]=Tensor<T>(cranks[i-1],dim(i),cranks[i]);
683 for (int k=0; k<dim(i); ++k) {
684 Tensor<T> a1=core[i](_,Slice(k,k),_).fusedim(1); // (r1,r2)
685 Tensor<T> b1=other.core[i](_,Slice(k,k),_).fusedim(1); // (r1',r2')
686 Tensor<T> tmp=copy(outer(a1,b1).swapdim(1,2)); // make contiguous
687 result.core[i](_,Slice(k,k),_)=tmp.reshape(cranks[i-1],1,cranks[i]);
688 // before swap/fuse/splitdim rhs has dimensions (r1, r2, r1', r2')
689 // after fusedim/splitdim rhs has dimensions (r1*r1', 1, r2*r2')
690 }
691 }
692
693 // special treatment for last core
694 const long n=ndim()-1;
695 result.core[n]=Tensor<T>(cranks[n-1],dim(n));
696 for (int k=0; k<dim(n); ++k) {
697 Tensor<T> a1=core[n](_,Slice(k,k));
698 Tensor<T> b1=other.core[n](_,Slice(k,k));
699 result.core[n](_,Slice(k,k))=outer(a1,b1).reshape(cranks[n-1],1);
700 // before reshape rhs has dimensions (r1,1, r1',1)
701 // after reshape rhs has dimensions (r1*r1', 1)
702
703 }
704 }
705 *this=result;
706 return *this;
707 }
708
709
710
711 /// merge two dimensions into one
712
713 /// merge dimension i and i+1 into new dimension i
714 /// @param[in] i the first dimension
715 void fusedim(const long i) {
716 // core_new = left * right
717 // (r1, k1*k2, r3) = sum_r2 (r1, k1, r2) * (r2, k2, r3)
718
720 // determine index
721 const int index=core[i].ndim()-2; // (r-1, k, k, .. , k, r1)
722
723 if (not zero_rank) {
724 core[i]=inner(core[i],core[i+1]);
725 core[i]=core[i].fusedim(index);
726 } else {
727 if (i==0) { // (k1*k2, r2=0)
728 core[i]=Tensor<T>(core[i].dim(0)*core[i+1].dim(1),0l);
729 } else { /// (r1=0, k1*k2, r2=0)
730 core[i]=Tensor<T>(0l,core[i].dim(0)*core[i+1].dim(1),0l);
731 }
732 }
733
734 // shift all subsequent cores and remove the last one
735 for (std::size_t d=i+1; d<core.size()-1; ++d) core[d]=core[d+1];
736 core.pop_back();
737
738 }
739
740 /// Returns new view/tensor splitting dimension \c i as \c dimi0*dimi1
741 /// to produce conforming d+1 dimension tensor
742
743 /// @param[in] idim the dimension to be split
744 /// @param[in] k1 new first dimension of idim
745 /// @param[in] k2 new second dimension of idim
746 /// @param[in] eps threshold for SVD (choose negative to keep all terms)
747 /// @return new deep copy of this with split dimensions
748 TensorTrain<T> splitdim(long idim, long k1, long k2, const double eps) const {
749 // core_new = left * right
750 // (r1, k1*k2, r3) = sum_r2 (r1, k1, r2) * (r2, k2, r3)
751
752 // check for consistency
753 MADNESS_ASSERT(k1*k2==dim(idim));
754
755 if (zero_rank) {
756 std::vector<long> newdims(this->ndim()+1);
757 for (long i=0; i<idim; ++i) newdims[i]=this->dim(i);
758 newdims[idim]=k1;
759 newdims[idim+1]=k2;
760 for (long i=idim+1; i<ndim(); ++i) newdims[i+1]=dim(i);
761 return TensorTrain(newdims);
762 }
763
764 TensorTrain<T> result(ndim(),dims());
765 result.splitdim_inplace(idim,k1,k2);
766
767 long r1= (idim==0) ? 1 : ranks(idim-1); // left-side rank
768 long r2= (idim==ndim()-1) ? 1 : ranks(idim); // right-side rank
769 long k12=dim(idim);
770
771 Tensor<T> A=core[idim].reshape(r1*k1,r2*k2);
772 Tensor<T> U,VT;
774 svd(A,U,s,VT);
775
776 // this is the new interior rank
777 long r=SRConf<T>::max_sigma(eps,std::min(A.dim(0),A.dim(1)),s)+1;
778
779 // handle rank=0 explicitly
780 if (r==0) {
781 std::vector<long> newdims(this->ndim()+1);
782 for (long i=0; i<idim; ++i) newdims[i]=this->dim(i);
783 newdims[idim]=k1;
784 newdims[idim+1]=k2;
785 for (long i=idim+1; i<ndim(); ++i) newdims[i+1]=dim(i);
786 return TensorTrain(newdims);
787 } else {
788
789 // convolve the singular values into the right singular vectors
790 for (int ii=0; ii<r; ++ii) {
791 for (int j=0; j<VT.dim(1); ++j) {
792 VT(ii,j)*=s(ii);
793 }
794 }
795
796 for (long ii=0; ii<idim; ++ii) result.core.push_back(copy(core[ii]));
797 result.core.push_back(copy(U(_,Slice(0,r-1))).reshape(r1,k1,r));
798 result.core.push_back(copy(VT(Slice(0,r-1),_)).reshape(r,k2,r2));
799 for (long ii=idim+1; ii<ndim(); ++ii) result.core.push_back(core[ii]);
800
801 // post-processing
802 if (result.core.front().ndim()==3) result.core.front()=result.core.front().fusedim(0);
803 if (result.core.back().ndim()==3) result.core.back()=result.core.back().fusedim(1);
804 result.zero_rank=false;
805 }
806 return result;
807
808 }
809
810 /// reconstruct this to a full representation
811
812 /// @param[in] flat return this in flat representation
813 /// @return this in full rank representation
814 Tensor<T> reconstruct(const bool flat=false) const {
815
816 if (zero_rank) {
817 if (flat) {
818 long size=1;
819 for (int i=1; i<this->ndim(); ++i) size*=core[i].dim(1);
820 return Tensor<T>(size);
821 } else {
823 return Tensor<T>(ndim(),dims());
824 }
825 }
826
827 Tensor<T> result=core.front();
828 typename std::vector<Tensor<T> >::const_iterator it;
829
830 for (it=++core.begin(); it!=core.end(); ++it) {
831 result=inner(result,*it);
832 if (flat) result=result.fusedim(0);
833 }
834 return result;
835 }
836
837 /// construct a two-mode representation (aka unnormalized SVD)
838
839 /// @param[out] U The left singular vectors, ({i},rank)
840 /// @param[out] VT The right singular vectors, (rank,{i})
841 /// @param[out] s Vector holding 1's, (rank)
843 Tensor< typename Tensor<T>::scalar_type >& s) const {
844
845 // number of dimensions needs to be even
846 MADNESS_ASSERT(ndim()%2==0);
847
848 if (not zero_rank) {
849 typename std::vector<Tensor<T> >::const_iterator it1, it2;
850 U=core.front();
851 VT=core.back();
852 for (it1=++core.begin(), it2=--(--core.end()); it1<it2; ++it1, --it2) {
853 U=inner(U,*it1);
854 VT=inner(*it2,VT);
855 }
857 s=1.0;
858 }
859 else {
860 if (ndim()==2) {
861 U = Tensor<T>(dim(0),long(0));
862 VT = Tensor<T>(long(0),dim(1));
863 } else if (ndim()==4) {
864 U = Tensor<T>(dim(0),dim(1),long(0));
865 VT = Tensor<T>(long(0),dim(2),dim(3));
866 } else if (ndim()==6) {
867 U = Tensor<T>(dim(0),dim(1),dim(2),long(0));
868 VT = Tensor<T>(long(0),dim(3),dim(4),dim(5));
869 } else {
870 MADNESS_EXCEPTION("ndim>6 in tensortrain::two_mode_representation",1);
871 }
873 }
874 }
875
876 /// recompress and truncate this TT representation
877
878 /// this in recompressed TT form with optimal rank
879 /// @param[in] eps the truncation threshold
880 template<typename R=T>
881 typename std::enable_if<!std::is_arithmetic<R>::value, void>::type
882 truncate(double eps) {
883 MADNESS_EXCEPTION("no complex truncate in TensorTrain",1);
884 }
885
886
887 /// recompress and truncate this TT representation
888
889 /// this in recompressed TT form with optimal rank
890 /// @param[in] eps the truncation threshold
891 template<typename R=T>
892 typename std::enable_if<std::is_arithmetic<R>::value, void>::type
893 truncate(double eps) {
894
895 // fast return
896 if (zero_rank) return;
897
898 for (long i=0; i<core.size()-1; ++i) if (ranks(i)==0) zero_rank=true;
899
900 if (zero_rank) {
901 zero_me();
902 return;
903 }
904
905 eps=eps/sqrt(this->ndim());
906 if (not verify()) MADNESS_EXCEPTION("ranks in TensorTrain inconsistent",1);
907
908 // right-to-left orthogonalization (line 4)
909 // get maximum rank and maximum k-value
910 // cores are (k0,r0), (r0,k1,r1), (r1,k2,r2), ... (rd-1,kd)
911 long rmax = core[0].dim(1);
912 long kmax = core[0].dim(0);
913 for(size_t i=1;i<core.size();i++){
914 rmax = std::max(rmax,core[i].dim(0));
915 kmax = std::max(kmax,core[i].dim(1));
916 }
917
918 Tensor<T> L;//L_buffer(rmax,rmax*kmax);
919 Tensor<T> lq_tau(rmax);
920 long max_rk = std::max(rmax,kmax);
921 long lq_work_dim = 2*max_rk+(max_rk+1)*64;
922 Tensor<T> lq_work(lq_work_dim);
923 Tensor<T> L_buffer(max_rk,max_rk);
924 long dimensions[TENSOR_MAXDIM];
925 // last tensor differs in dimension (first tensor also, but we dont reach it in the loop)
926 if(core.size()>1){
927 const long n_dim = core.back().ndim();
928 for (int i=0; i<n_dim; ++i) dimensions[i]=core.back().dim(i);
929
930 const long r0 = core.back().dim(0);
931 const long r1 = core.back().size()/r0;
932 core.back()=core.back().reshape(r0,r1);
933
934 // assignement of L with the L_buffer tensor
935 // works only if the bool for lq_result is set to false
936 {
937 long r_rows= (core.back().dim(1)>=core.back().dim(0)) ? core.back().dim(0) : core.back().dim(1);
938 long r_cols=core.back().dim(0);
939 L = L_buffer(Slice(0,r_cols-1),Slice(0,r_rows-1));
940 L = 0.0;
941 }
942 lq_result(core.back(),L,lq_tau,lq_work,false);
943 //Tensor<T> L = L_buffer(Slice(0,r0-1),Slice(0,r1-1));
944
945 dimensions[0]=std::min(dimensions[0],core.back().dim(0));
946 core.back()=core.back().reshape(n_dim,dimensions);
947 // multiply to the left (line 6)
948 core[core.size()-2]=inner(core[core.size()-2],L);
949
950 }
951
952
953 for (std::size_t d=core.size()-2; d>0; --d) {
954
955 // save tensor structure
956 const long ndim=core[d].ndim();
957 for (int i=0; i<ndim; ++i) dimensions[i]=core[d].dim(i);
958
959 // G(r0, k*r1)
960 const long r0=core[d].dim(0);
961 const long r1=core[d].size()/r0;
962 core[d]=core[d].reshape(r0,r1);
963
964 // decompose the core tensor (line 5)
965 //lq(core[d],L_buffer); // might shrink the core
966
967 // assignement of L with the inner_buffer tensor
968 // works only if the bool for lq_result is set to false
969 {
970 long r_rows= (core[d].dim(1)>=core[d].dim(0)) ? core[d].dim(0) : core[d].dim(1);
971 long r_cols=core[d].dim(0);
972 L = L_buffer(Slice(0,r_cols-1),Slice(0,r_rows-1));
973 L = 0.0;
974 }
975
976 // workaround for LQ decomposition to avoid reallocations
977 //L_buffer = 0.0;
978 lq_tau = 0.0;
979 lq_work = 0.0;
980 lq_result(core[d],L,lq_tau,lq_work,false);
981 // slice L to the right size
982 //Tensor<T> L = L_buffer(Slice(0,r0-1),Slice(0,r1-1));
983
984 dimensions[0]=std::min(r0,core[d].dim(0));
985 core[d]=core[d].reshape(ndim,dimensions);
986
987 // multiply to the left (line 6)
988 core[d-1]=inner(core[d-1],L);
989 }
990
991 // svd buffer tensor (see svd_results in lapack.cc)
992 long m =rmax*kmax;
993 long n =rmax;
994 long k =std::min<long>(m,n);
995 long svd_buffer_size = std::max<long>(3*std::min<long>(m,n)+std::max<long>(m,n),5*std::min<long>(m,n)-4)*32;
996 Tensor<T> svd_buffer(svd_buffer_size);
997 Tensor<T> U_buffer(m,k);
998 Tensor<T> dummy;
1000
1001 // left-to-right SVD (line 9)
1002 for (std::size_t d=0; d<core.size()-1; ++d) {
1003
1004 // save tensor structure
1005 const long ndim=core[d].ndim();
1006 for (int i=0; i<ndim; ++i) dimensions[i]=core[d].dim(i);
1007
1008 // reshape the core tensor (r0*k, r1)
1009 long r1=core[d].dim(core[d].ndim()-1);
1010 // long r1=core[d].dim(1);
1011 core[d]=core[d].reshape(core[d].size()/r1,r1);
1012
1013 Tensor<T> U,VT;
1014 long ds=std::min(core[d].dim(0),core[d].dim(1));
1015 s_buffer = 0.0;
1016 //Tensor< typename Tensor<T>::scalar_type > s(ds)
1017 Tensor< typename Tensor<T>::scalar_type > s = s_buffer(Slice(0,ds-1));
1018
1019 // decompose (line 10)
1020 //svd(core[d],U,s,VT);
1021 // get the dimensions of U and V
1022 long du = core[d].dim(0);
1023 long dv = core[d].dim(1);
1024 U_buffer = 0.0;
1025 svd_buffer = 0.0;
1026 U_buffer = U_buffer.flat();
1027 // VT is written on core[d] input
1028 svd_result(core[d],U_buffer,s,dummy,svd_buffer);
1029
1030 // truncate the SVD
1031 int r_truncate=SRConf<T>::max_sigma(eps,ds,s)+1;
1032 if (r_truncate==0) {
1033 zero_me();
1034 return;
1035 }
1036
1037 // make tensors contiguous
1038 U=madness::copy(U_buffer(Slice(0,(du*ds)-1)).reshape(du,ds)(_,Slice(0,r_truncate-1)));
1039 VT=madness::copy(core[d](Slice(0,r_truncate-1),Slice(0,dv-1)));
1040
1041 dimensions[ndim-1]=r_truncate;
1042 core[d]=U.reshape(ndim,dimensions);
1043
1044 for (int i=0; i<VT.dim(0); ++i) {
1045 for (int j=0; j<VT.dim(1); ++j) {
1046 VT(i,j)*=s(i);
1047 }
1048 }
1049
1050 // multiply to the right (line 11)
1051 core[d+1]=inner(VT,core[d+1]);
1052
1053 }
1054
1055 if (not verify()) MADNESS_EXCEPTION("ranks in TensorTrain inconsistent",1);
1056 }
1057
1058// /// return the number of dimensions
1059// long ndim() const {return core.size();}
1060
1061 /// return the number of coefficients in all core tensors
1062 long size() const {
1063 if (zero_rank) return 0;
1064 long n=0;
1065 typename std::vector<Tensor<T> >::const_iterator it;
1066 for (it=core.begin(); it!=core.end(); ++it) n+=it->size();
1067 return n;
1068 }
1069
1070 /// return the size of this instance, including static memory for vectors and such
1071 long real_size() const {
1072 long n=this->size()*sizeof(T);
1073 n+=core.size() * sizeof(Tensor<T>);
1074 n+=sizeof(*this);
1075 return n;
1076 }
1077
1078// /// return the number of entries in dimension i
1079// long dim(const int i) const {
1080// if (i==0) return core[0].dim(0);
1081// return core[i].dim(1);
1082// }
1083
1084 /// return the dimensions of this tensor
1085// std::vector<long> dims() const {
1086// std::vector<long> d(ndim());
1087// d[0]=core[0].dim(0); // dim,rank
1088// for (long i=1; i<ndim(); ++i) d[i]=core[i].dim(1); // rank,dim,rank
1089// return d;
1090// }
1091
1092 /// check that the ranks of all core tensors are consistent
1093 bool verify() const {
1094 if (core.size()<2) return true; // no ranks
1095 if (core[0].dim(1)!=core[1].dim(0)) return false;
1096 for (int d=2; d<ndim(); ++d) {
1097 if (core[d-1].dim(2)!=core[d].dim(0)) return false;
1098 }
1099 for (const Tensor<T>& c : core) {
1100 int size=1;
1101 for (int i=0; i<c.ndim(); ++i) size*=c.dim(i);
1102 if (size!=c.size()) return false;
1103 if (not c.iscontiguous()) return false;
1104 }
1105 return true;
1106 }
1107
1108 /// if rank is zero
1109 bool is_zero_rank() const {return zero_rank;}
1110
1111 /// return the TT ranks
1112 std::vector<long> ranks() const {
1113 if (ndim()==0) return std::vector<long>(1,0);
1114 if (zero_rank) return std::vector<long>(ndim()-1,0);
1115// MADNESS_ASSERT(is_tensor());
1116 std::vector<long> r(core.size()-1);
1117 for (std::size_t i=0; i<r.size(); ++i) r[i]=core[i+1].dim(0);
1118 return r;
1119 }
1120
1121 /// return the TT ranks for dimension i (to i+1)
1122 long ranks(const int i) const {
1123 if (zero_rank) return 0;
1124 if (i<core.size()-1) {
1125 return core[i].dim(core[i].ndim()-1);
1126 } else {
1127 print("ndim ",ndim());
1128 print("i ",i);
1129 MADNESS_EXCEPTION("requested invalid rank in TensorTrain",1);
1130 return 0;
1131 }
1132 }
1133
1134 /// returns the Frobenius norm
1136 return sqrt(float_scalar_type(std::abs(trace(*this))));
1137 };
1138
1139 /// scale this by a number
1140
1141 /// @param[in] fac the factor to multiply
1142 /// @return *this * fac
1143 void scale(T fac) {
1144 if (not zero_rank and (core.size()>0)) core.front().scale(fac);
1145 }
1146
1147 /// Returns a pointer to the internal data
1148
1149 /// @param[in] ivec index of core vector to which the return values points
1150 T* ptr(const int ivec=0) {
1151 if (core.size()) return core[ivec].ptr();
1152 return 0;
1153 }
1154
1155 /// Returns a pointer to the internal data
1156
1157 /// @param[in] ivec index of core vector to which the return values points
1158 const T* ptr(const int ivec=0) const {
1159 if (core.size()) return core[ivec].ptr();
1160 return 0;
1161 }
1162
1163 /// check if this is a tensor (r,k,r)
1164 bool is_tensor() const {
1165 if (ndim()>0) return (core[0].ndim()==2);
1166 return false;
1167 }
1168
1169 /// check if this is an operator (r,k',k,r)
1170 bool is_operator() const {
1171 return (!is_tensor());
1172 }
1173
1174 /// convert this into a tensor representation (r,k,r)
1176 if (is_tensor()) return *this;
1177
1178 long nd=this->ndim();
1179 core[0]=core[0].fusedim(0); // (k,k,r) -> (k,r)
1180 core[nd-1]=core[nd-1].fusedim(1); // (r,k,k) -> (r,k)
1181 for (int i=1; i<nd-1; ++i) core[i]=core[i].fusedim(1); // (r,k',k,r) -> (r,k,r)
1182 return *this;
1183
1184 }
1185
1186 /// convert this into an operator representation (r,k',k,r)
1188 if (is_operator()) return *this;
1189
1190 long nd=this->ndim();
1191
1192 long k2=core[0].dim(0);
1193 long k0=sqrt(k2);
1195 MADNESS_ASSERT(core[0].ndim()==2);
1196 core[0]=core[0].splitdim(0,k0,k0); // (k*k,r) -> (k,k,r)
1197
1198 k2=core[nd-1].dim(1);
1199 k0=sqrt(k2);
1201 MADNESS_ASSERT(core[nd-1].ndim()==2);
1202 core[nd-1]=core[nd-1].splitdim(1,k0,k0); // (r,k*k) -> (r,k,k)
1203
1204 for (int i=1; i<nd-1; ++i) {
1205 k2=core[i].dim(1);
1206 k0=sqrt(k2);
1208 MADNESS_ASSERT(core[i].ndim()==3);
1209 core[i]=core[i].splitdim(1,k0,k0); // (r,k*k,r) -> (r,k',k,r)
1210 }
1211 return *this;
1212 }
1213
1214
1215 /// reference to the internal core
1216 Tensor<T>& get_core(const int i) {
1217 MADNESS_ASSERT(i<ndim());
1218 return core[i];
1219 }
1220
1221 /// const reference to the internal core
1222 const Tensor<T>& get_core(const int i) const {
1223 MADNESS_ASSERT(i<ndim());
1224 return core[i];
1225 }
1226
1227 template <class Q>
1230 trace(const TensorTrain<Q>& B) const {
1231 MADNESS_EXCEPTION("no complex trace in tensortrain",1);
1232 }
1233 /// Return the trace of two tensors, no complex conjugate involved
1234
1235 /// @return <this | B>
1236 template <class Q>
1237 typename std::enable_if<!(TensorTypeData<T>::iscomplex or TensorTypeData<Q>::iscomplex),
1239 trace(const TensorTrain<Q>& B) const {
1240 if (TensorTypeData<T>::iscomplex) MADNESS_EXCEPTION("no complex trace in TensorTrain, sorry",1);
1241 if (TensorTypeData<Q>::iscomplex) MADNESS_EXCEPTION("no complex trace in TensorTrain, sorry",1);
1242
1243 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
1244
1245 // alias
1246 const TensorTrain<T>& A=*this;
1247
1248 MADNESS_ASSERT(A.ndim()==B.ndim()); // number of dimensions
1249
1250 // fast return
1251 if (A.zero_rank or B.zero_rank) return resultT(0.0);
1252 if (A.ndim()==1) {
1253 return A.core[0].trace(B.core[0]);
1254 }
1255
1256 // set up temporary tensors for intermediates
1257 long size1=A.ranks(0)*B.ranks(0); // first contraction
1258 long size2=B.ranks(ndim()-2)*A.dim(ndim()-1); // last contraction
1259
1260 for (int d=1; d<A.ndim()-1; ++d) {
1261 size1=std::max(size1,A.ranks(d)*B.ranks(d));
1262 size2=std::max(size2,A.ranks(d)*B.ranks(d-1)*A.dim(d));
1263 }
1264 Tensor<resultT> tmp1(size1), tmp2(size2); // scratch
1265 Tensor<resultT> Aprime, AB; // for flat views of tmp
1266
1267 // loop over all dimensions but the last one
1268 for (int d=0; d<A.ndim()-1; ++d) {
1269
1270 // contract cores to matrix AB(rank(A), rank(B))
1271 // AB(ra1,rb1) = sum_(r0,i0) A(ra0,i0,ra1) B(rb0,i0,rb1)
1272 // index dimension is the first one: core((r * i), r)
1273
1274 // calculate dimensions
1275 long rA= (d==0) ? A.core[d].dim(1) : Aprime.dim(2); // r_d (A)
1276 long rB= (d==0) ? B.core[d].dim(1) : B.core[d].dim(2); // r_d (B)
1277 MADNESS_ASSERT(rA*rB<=size1);
1278 if (d>0) tmp1(Slice(0,rA*rB-1))=0.0; // zero out old stuff
1279
1280// Tensor<resultT> AB;
1281 if (d==0) {
1282// AB=inner(A.core[d],B.core[d],0,0);
1283 inner_result(A.core[d],B.core[d],0,0,tmp1);
1284 } else {
1285// AB=inner(Aprime.fusedim(0),B.core[d].fusedim(0),0,0);
1286 inner_result(Aprime.fusedim(0),B.core[d].fusedim(0),0,0,tmp1);
1287 }
1288 AB=tmp1(Slice(0,rA*rB-1)).reshape(rA,rB);
1289
1290 // contract temp matrix AB into core of A of dimension i+1
1291 // into temp core A_i+1
1292 // Atmp(rank(B1), i(2)) = sum_ rank(A1) ABi(rank(A1),rank(B1)) A.core[1](rank(A1),i(2),rank(A2))
1293
1294 // calculate dimensions
1295 long d1=AB.dim(1); // r_d
1296 long d2=A.core[d+1].dim(1); // i_d
1297 long d3=A.core[d+1].dim(2); // r_{d+1}
1298 MADNESS_ASSERT(d1*d2*d3<=size2);
1299
1300 // repeated zero-ing probably much faster than reallocation
1301 // Aprime=inner(AB,A.core[d+1],0,0);
1302 if (d>0) tmp2(Slice(0,d1*d2*d3-1))=0.0;
1303 inner_result(AB,A.core[d+1],0,0,tmp2);
1304 Aprime=tmp2(Slice(0,d1*d2*d3-1)).reshape(d1,d2,d3);
1305
1306 }
1307
1308 // special treatment for the last dimension
1309 resultT result=Aprime.trace(B.core[ndim()-1]);
1310 return result;
1311 }
1312
1313public:
1314 template <typename R, typename Q>
1316 const TensorTrain<R>& t, const Tensor<Q>& c);
1317
1318 template <typename R, typename Q>
1320 const TensorTrain<R>& t, const Tensor<Q> c[]);
1321
1322 template <typename R, typename Q>
1324 const TensorTrain<R>& t, const Tensor<Q>& c, const int axis);
1325
1326 template <typename R, typename Q>
1328 const TensorTrain<R>& t1, const TensorTrain<Q>& t2);
1329
1330 };
1331
1332
1333 /// transform each dimension with the same operator matrix
1334
1335 /// result(i,j,k...) <-- sum(i',j', k',...) t(i',j',k',...) c(i',i) c(j',j) c(k',k) ...
1336 /// TODO: merge this with general_transform
1337 template <class T, class Q>
1339 const Tensor<Q>& c) {
1340
1341 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
1342
1343 // fast return if possible
1344 if (t.zero_rank or (t.ndim()==0)) return TensorTrain<resultT>(t.ndim(),t.dims());
1345
1346 const long ndim=t.ndim();
1347
1348 TensorTrain<resultT> result;
1349 result.zero_rank=false;
1350 result.core.resize(ndim);
1351 // special treatment for first core(i1,r1) and last core (rd-1, id)
1352 result.core[0]=inner(c,t.core[0],0,0);
1353 if (ndim>1) result.core[ndim-1]=inner(t.core[ndim-1],c,1,0);
1354
1355 // other cores have dimensions core(r1,i2,r2);
1356
1357 // set up scratch tensor
1358 long size=0;
1359 for (int d=1; d<ndim-1; ++d) size=std::max(size,t.core[d].size());
1360 Tensor<resultT> tmp(size);
1361
1362 for (int d=1; d<ndim-1; ++d) {
1363 long r1=t.core[d].dim(0);
1364 long i2=t.core[d].dim(1);
1365 long r2=t.core[d].dim(2);
1366
1367 // zero out old stuff from the scratch tensor
1368 if (d>1) tmp(Slice(0,r1*i2*r2-1))=0.0;
1369 inner_result(t.core[d],c,1,0,tmp);
1370 result.core[d]=copy(tmp(Slice(0,r1*i2*r2-1)).reshape(r1,r2,i2).swapdim(1,2));
1371 }
1372 return result;
1373 }
1374
1375
1376 /// Transform all dimensions of the tensor t by distinct matrices c
1377
1378 /// \ingroup tensor
1379 /// Similar to transform but each dimension is transformed with a
1380 /// distinct matrix.
1381 /// \code
1382 /// result(i,j,k...) <-- sum(i',j', k',...) t(i',j',k',...) c[0](i',i) c[1](j',j) c[2](k',k) ...
1383 /// \endcode
1384 /// The first dimension of the matrices c must match the corresponding
1385 /// dimension of t.
1386 template <class T, class Q>
1388 const Tensor<Q> c[]) {
1389
1390 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
1391
1392 // fast return if possible
1393 if (t.zero_rank or (t.ndim()==0)) return TensorTrain<resultT>(t.ndim(),t.dims());
1394
1395 const long ndim=t.ndim();
1396
1397 TensorTrain<resultT> result;
1398 result.zero_rank=false;
1399 result.core.resize(ndim);
1400 // special treatment for first core(i1,r1) and last core (rd-1, id)
1401 result.core[0]=inner(c[0],t.core[0],0,0);
1402 if (ndim>1) result.core[ndim-1]=inner(t.core[ndim-1],c[ndim-1],1,0);
1403
1404 // other cores have dimensions core(r1,i2,r2);
1405
1406 // set up scratch tensor
1407 long size=0;
1408 for (int d=1; d<ndim-1; ++d) size=std::max(size,t.core[d].size());
1409 Tensor<resultT> tmp(size);
1410
1411 for (int d=1; d<ndim-1; ++d) {
1412 long r1=t.core[d].dim(0);
1413 long i2=t.core[d].dim(1);
1414 long r2=t.core[d].dim(2);
1415
1416 // zero out old stuff from the scratch tensor
1417 if (d>1) tmp(Slice(0,r1*i2*r2-1))=0.0;
1418 inner_result(t.core[d],c[d],1,0,tmp);
1419 result.core[d]=copy(tmp(Slice(0,r1*i2*r2-1)).reshape(r1,r2,i2).swapdim(1,2));
1420 }
1421 return result;
1422 }
1423
1424 /// Transforms one dimension of the tensor t by the matrix c, returns new contiguous tensor
1425
1426 /// \ingroup tensor
1427 /// \code
1428 /// transform_dir(t,c,1) = r(i,j,k,...) = sum(j') t(i,j',k,...) * c(j',j)
1429 /// \endcode
1430 /// @param[in] t Tensor to transform (size of dimension to be transformed must match size of first dimension of \c c )
1431 /// @param[in] c Matrix used for the transformation
1432 /// @param[in] axis Dimension (or axis) to be transformed
1433 /// @result Returns a new tensor train
1434 template <class T, class Q>
1436 const Tensor<Q>& c, const int axis) {
1437
1438 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
1439
1440 // fast return if possible
1441 if (t.zero_rank or (t.ndim()==0)) return TensorTrain<resultT>(t.ndim(),t.dims());
1442
1443 const long ndim=t.ndim();
1444 MADNESS_ASSERT(axis<ndim and axis>=0);
1445 MADNESS_ASSERT(c.ndim()==2);
1446
1447 TensorTrain<resultT> result=copy(t);
1448
1449 if (axis==0) {
1450 result.core[0]=inner(c,t.core[0],0,0);
1451 } else if (axis==ndim-1) {
1452 result.core[ndim-1]=inner(t.core[ndim-1],c,1,0);
1453 } else {
1454 Tensor<resultT> tmp=inner(t.core[axis],c,1,0); // G~(r1,r2,i')
1455 result.core[axis]=copy(tmp.swapdim(1,2));
1456 }
1457 return result;
1458
1459 }
1460
1461
1462 /// apply an operator in TT format on a tensor in TT format
1463
1464 /// @param[in] op operator in TT format ..(r_1,k',k,r_2)..
1465 /// @param[in] t tensor in TT format ..(r_1,k',r_2)..
1466 /// the result tensor will be
1467 /// .. (r_1,k,r_2) = \sum_k' ..(r_1,k',k,r_2).. ..(r_1,k',r_2)..
1468 /// during the apply a rank reduction will be performed
1469 /// 2*ndim allocates are needed
1470 template <class T, class Q>
1472 const TensorTrain<Q>& t, const double thresh) {
1473
1474 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
1475 MADNESS_ASSERT(op.ndim()==t.ndim());
1476
1477 const long nd=t.ndim();
1478
1479 // need to add special cases for low dimensions
1480 MADNESS_ASSERT(nd>2);
1481
1482 std::vector<Tensor<resultT> > B(t.ndim()); // will be the result cores
1483
1484 // set up scratch tensors
1485 long maxk=0; // max dimension of the tensor t
1486 long maxr_t=1; // max rank of the input tensor t
1487 long maxr_op=1; // max rank of the operator op
1488 for (int i=0; i<nd-1; ++i) {
1489 maxk=std::max(maxk,t.dim(i));
1490 maxr_t=std::max(t.ranks(i),maxr_t);
1491 maxr_op=std::max(op.ranks(i),maxr_op);
1492 }
1493
1494 long maxr_r=1; // max rank of the result tensor
1495 for (int i=0, j=nd-1; i<j; ++i, --j) {
1496 maxr_r*=t.dim(i);
1497 }
1498
1499 long maxn=0, maxm=0;
1500// {
1501 long R11=t.dim(0); // max final ranks
1502 long rR=R11*t.ranks(1); // R1 r2
1503 long maxR=R11, maxr=t.ranks(1); //
1504 for (int i=1; i<nd-2; ++i) {
1505 long k=t.dim(i);
1506 R11*=k; // max final ranks
1507 R11=std::min(R11,maxr_r);
1508 long r2=t.ranks(i+1); // initial ranks
1509 if (rR<R11*r2) {
1510 maxR=std::max(maxR,R11);
1511 maxr=std::max(maxr,r2);
1512 rR=R11*r2;
1513 }
1514 }
1515 // max matrix dimensions to be svd'ed
1516 maxn=maxR*maxk;
1517 maxm=maxr_op*maxr;
1518// }
1519
1520
1521 if (maxm*maxn>5e7) {
1522 print("huge scratch spaces!! ",maxn*maxm/1024/1024,"MByte");
1523 }
1524 long lscr=std::max(3*std::min(maxm,maxn)+std::max(maxm,maxn),
1525 5*std::min(maxm,maxn)) + maxn*maxm;
1526 Tensor<resultT> scr(lscr); // scratch
1527 Tensor< typename Tensor<T>::scalar_type > s(std::min(maxn,maxm));
1528
1529 // scratch space for contractions
1530 Tensor<resultT> scr3(2*maxn*maxm);
1531 Tensor<resultT> scr1=scr3(Slice(0,maxn*maxm-1));
1532 Tensor<resultT> scr2=scr3(Slice(maxn*maxm,-1));
1533
1534
1535 // contract first core
1536 const long r0=t.ranks(0l);
1537 const long q0=op.get_core(0).dim(2);
1538 const long k0=t.dim(0);
1539 inner_result(op.get_core(0),t.get_core(0),0,0,scr2);
1540 Tensor<resultT> AC=scr2(Slice(0,r0*q0*k0-1)).reshape(k0,r0*q0);
1541
1542 // SVD on first core, skip small singular values
1543 long R=rank_revealing_decompose(AC,B[0],thresh,s,scr1);
1544 if (R==0) return TensorTrain<resultT>(t.ndim(),t.dims()); // fast return for zero ranks
1545 B[0]=B[0].reshape(k0,R);
1546
1547 // AC has dimensions R1,(q1,r1)
1548 Tensor<resultT> VT=AC.reshape(R,q0,r0);
1549
1550 // loop over all dimensions 1,..,nd-1
1551 for (int d=1; d<nd; ++d) {
1552
1553 // alias
1554 Tensor<T> C=t.get_core(d);
1555 Tensor<T> A=op.get_core(d);
1556
1557 // alias dimensions
1558 long R1=VT.dim(0); // left rank of the result tensor
1559 long q1=A.dim(0); // left rank of the operator
1560 long k=C.dim(1); // true for d>0
1561 long r2=1; // right rank of the input tensor, true for d=nd-1
1562 long q2=1; // right rank of the operator, true for d=nd-1
1563 if (d<nd-1) {
1564 r2=C.dim(2); // right rank of the input tensor, true for d<nd-1
1565 q2=A.dim(3); // right rank of the operator
1566 }
1567
1568 // contract VT into the next core
1569 Tensor<resultT> VC=scr1(Slice(0,R1*q1*k*r2-1));
1570 VC(Slice(0,R1*q1*k*r2-1))=resultT(0.0); // zero out result tensor
1571 inner_result(VT,C,-1,0,VC); // VT(R1,q1,r1) * C(r1,k',r2)
1572
1573 // contract A into VC (R,q,k,r2)
1574 VC=VC.reshape(R1,q1*k,r2); // VC(R,(q,k),r2)
1575 A=A.fusedim(0); // A((q1,k),k,q2);
1576 Tensor<resultT> AVC=scr2(Slice(0,R1*q2*k*r2-1));
1577 AVC(Slice(0,R1*q2*k*r2-1))=resultT(0.0); // zero out result tensor
1578 inner_result(VC,A,1,0,AVC); // AVC(R1,r2,k,q2)
1579 AVC=AVC.reshape(R1,r2,k,q2);
1580
1581 // AVC is the final core if we have reached the end of the tensor train
1582 if (d==nd-1) {
1583 B[d]=copy(AVC.reshape(R1,k));
1584 break;
1585 }
1586
1587 // SVD on current core
1588 AVC=copy(AVC.cycledim(2,1,3)); // AVC(R,k,q2,r2); deep copy necessary
1589
1590 MADNESS_ASSERT(AVC.dim(0)==R1);
1591 MADNESS_ASSERT(AVC.dim(1)==k);
1592 MADNESS_ASSERT(AVC.dim(2)==q2);
1593
1594 AVC=AVC.reshape(R1*k,q2*r2);
1595 long R2=rank_revealing_decompose(AVC,B[d],thresh,s,scr1);
1596 if (R2==0) return TensorTrain<resultT>(t.ndim(),t.dims()); // fast return for zero ranks
1597 B[d]=B[d].reshape(R1,k,R2);
1598 VT=AVC.reshape(R2,q2,r2);
1599 }
1600
1601 TensorTrain<T> result(B);
1602 return result;
1603
1604 }
1605
1606
1607 /// compute the n-D identity operator with k elements per dimension
1608 template<typename T>
1609 TensorTrain<T> tt_identity(const long ndim, const long k) {
1610 Tensor<T> id(k,k);
1611 for (int i=0; i<k; ++i) id(i,i)=1.0;
1612 id=id.reshape(1,k,k,1);
1613 std::vector<Tensor<T> > cores(ndim,id);
1614 TensorTrain<T> result(cores);
1615 if (ndim>1) {
1616 result.get_core(0)=result.get_core(0).reshape(k,k,1);
1617 result.get_core(ndim-1)=result.get_core(ndim-1).reshape(1,k,k);
1618 } else {
1619 result.get_core(0)=result.get_core(0).reshape(k,k);
1620 }
1621 return result;
1622 }
1623
1624
1625 /// computes the outer product of two tensors
1626
1627 /// result(i,j,...,p,q,...) = left(i,k,...)*right(p,q,...)
1628 /// @result Returns a new tensor train
1629 template <class T, class Q>
1631 const TensorTrain<Q>& t2) {
1632
1633 typedef TENSOR_RESULT_TYPE(T,Q) resultT;
1634
1635
1636 // fast return if possible
1637 if (t1.zero_rank or t2.zero_rank) {
1638 // compute new dimensions
1639 std::vector<long> dims(t1.ndim()+t2.ndim());
1640 for (int i=0; i<t1.ndim(); ++i) dims[i]=t1.dim(i);
1641 for (int i=0; i<t2.ndim(); ++i) dims[t1.ndim()+i]=t2.dim(i);
1642
1643 return TensorTrain<resultT>(dims);
1644 }
1645
1646 TensorTrain<resultT> result;
1647 for (int i=0; i<t1.ndim(); ++i) result.core.push_back(copy(t1.core[i]));
1648 for (int i=0; i<t2.ndim(); ++i) result.core.push_back(copy(t2.core[i]));
1649
1650 // reshape the new interior cores
1651 long core_dim=t1.core.back().ndim(); // 2 for tensors, 3 for operators
1652 long k1=t1.core.back().dim(core_dim-1); // (r,k) for tensors, (r,k',k) for operators
1653 long k2=t2.core.front().dim(0);
1654 result.core[t1.ndim()-1]=result.core[t1.ndim()-1].splitdim(core_dim-1,k1,1);
1655 result.core[t1.ndim()]=result.core[t1.ndim()].splitdim(0,1,k2);
1656 result.zero_rank=false;
1657
1658 return result;
1659
1660 }
1661
1662}
1663
1664#endif /* TENSORTRAIN_H_ */
Interface templates for the archives (serialization).
C++ interface to LAPACK, either directly via Fortran API (see clapack_fortran.h) or via LAPACKE (see ...
Definition test_ar.cc:118
Definition test_ar.cc:141
Definition test_ar.cc:170
Definition AC.h:427
The base class for tensors defines generic capabilities.
Definition basetensor.h:85
long dim(int i) const
Returns the size of dimension i.
Definition basetensor.h:147
const long * dims() const
Returns the array of tensor dimensions.
Definition basetensor.h:153
long _stride[TENSOR_MAXDIM]
Increment between elements in each dimension.
Definition basetensor.h:97
long _size
Number of elements in the tensor.
Definition basetensor.h:93
void set_dims_and_size(long nd, const long d[])
Definition basetensor.h:99
long _id
Id from TensorTypeData<T> in type_data.h.
Definition basetensor.h:95
void splitdim_inplace(long i, long dimi0, long dimi1)
Splits dimension i.
Definition basetensor.cc:88
void fusedim_inplace(long i)
Fuses dimensions i and i+1.
Definition basetensor.cc:107
long _dim[TENSOR_MAXDIM]
Size of each dimension.
Definition basetensor.h:96
long ndim() const
Returns the number of dimensions in the tensor.
Definition basetensor.h:144
long size() const
Returns the number of elements in the tensor.
Definition basetensor.h:138
long _ndim
Number of dimensions (-1=invalid; 0=no supported; >0=tensor)
Definition basetensor.h:94
static int max_sigma(const double &thresh, const long &rank, const Tensor< double > &w)
Definition srconf.h:109
A slice defines a sub-range or patch of a dimension.
Definition slice.h:103
Definition tensortrain.h:123
void zero_me()
turn this into an empty tensor with all cores properly shaped
Definition tensortrain.h:470
TensorTrain(const TensorTrain &other)
copy constructor, shallow
Definition tensortrain.h:238
const Tensor< T > & get_core(const int i) const
const reference to the internal core
Definition tensortrain.h:1222
bool verify() const
return the dimensions of this tensor
Definition tensortrain.h:1093
TensorTrain(const long &ndims, const long *dims)
ctor for a TensorTrain, set up only the dimensions, no data
Definition tensortrain.h:198
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> outer(const TensorTrain< R > &t1, const TensorTrain< Q > &t2)
std::enable_if< std::is_arithmetic< R >::value, void >::type truncate(double eps)
recompress and truncate this TT representation
Definition tensortrain.h:893
TensorTrain< T > operator*(const T &factor) const
return this multiplied by a scalar
Definition tensortrain.h:497
friend class TensorTrain
Definition tensortrain.h:127
void set_size_and_dim(std::vector< long > dims)
Definition tensortrain.h:265
TensorTrain< T > splitdim(long idim, long k1, long k2, const double eps) const
Definition tensortrain.h:748
void scale(T fac)
scale this by a number
Definition tensortrain.h:1143
void two_mode_representation(Tensor< T > &U, Tensor< T > &VT, Tensor< typename Tensor< T >::scalar_type > &s) const
construct a two-mode representation (aka unnormalized SVD)
Definition tensortrain.h:842
std::vector< Tensor< T > > core
Definition tensortrain.h:140
TensorTypeData< T >::scalar_type scalar_type
C++ typename of the real type associated with a complex type.
Definition tensortrain.h:133
long real_size() const
return the size of this instance, including static memory for vectors and such
Definition tensortrain.h:1071
std::enable_if<!std::is_arithmetic< R >::value, void >::type truncate(double eps)
recompress and truncate this TT representation
Definition tensortrain.h:882
void zero_me(const std::vector< long > &dim)
turn this into an empty tensor with all cores properly shaped
Definition tensortrain.h:475
float_scalar_type normf() const
returns the Frobenius norm
Definition tensortrain.h:1135
TensorTrain< T > & operator-=(const TensorTrain< T > &rhs)
inplace subtraction of two Tensortrains; will increase ranks of this
Definition tensortrain.h:518
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const TensorTrain< R > &t, const Tensor< Q > &c, const int axis)
Tensor< T > reconstruct(const bool flat=false) const
reconstruct this to a full representation
Definition tensortrain.h:814
Tensor< T > & get_core(const int i)
reference to the internal core
Definition tensortrain.h:1216
long size() const
return the number of coefficients in all core tensors
Definition tensortrain.h:1062
void fusedim(const long i)
merge two dimensions into one
Definition tensortrain.h:715
TensorTrain(const Tensor< T > &t, double eps, const std::vector< long > dims)
ctor for a TensorTrain, with the tolerance eps
Definition tensortrain.h:188
TensorTrain(const std::vector< Tensor< T > > &c)
ctor for a TensorTrain, with core tensors explicitly given
Definition tensortrain.h:225
T type
C++ typename of this tensor.
Definition tensortrain.h:130
TensorTrain< T > & operator=(const T &number)
assign a number to this tensor
Definition tensortrain.h:480
TensorTrain(const std::vector< long > &dims)
ctor for a TensorTrain, set up only the dimensions, no data
Definition tensortrain.h:204
std::vector< long > ranks() const
return the TT ranks
Definition tensortrain.h:1112
TensorTrain< T > & make_operator()
convert this into an operator representation (r,k',k,r)
Definition tensortrain.h:1187
TensorTrain< T > & gaxpy(const std::array< Slice, TENSOR_MAXDIM > &s1, const TensorTrain< T > &rhs, T beta, const std::array< Slice, TENSOR_MAXDIM > &s2)
Inplace generalized saxpy with slices and without alpha.
Definition tensortrain.h:583
const T * ptr(const int ivec=0) const
Returns a pointer to the internal data.
Definition tensortrain.h:1158
void decompose(const Tensor< T > &t, double eps, const std::vector< long > &dims)
decompose the input tensor into a TT representation
Definition tensortrain.h:335
TensorTrain< T > & operator+=(const TensorTrain< T > &rhs)
inplace addition of two Tensortrains; will increase ranks of this
Definition tensortrain.h:508
friend TensorTrain copy(const TensorTrain &other)
deep copy of the whole tensor
Definition tensortrain.h:288
bool is_operator() const
check if this is an operator (r,k',k,r)
Definition tensortrain.h:1170
friend TensorTrain copy(const TensorTrain &other, const std::array< Slice, TENSOR_MAXDIM > &s)
deep copy of a slice of the tensor
Definition tensortrain.h:308
bool zero_rank
true if rank is zero
Definition tensortrain.h:142
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> general_transform(const TensorTrain< R > &t, const Tensor< Q > c[])
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> transform(const TensorTrain< R > &t, const Tensor< Q > &c)
bool is_tensor() const
check if this is a tensor (r,k,r)
Definition tensortrain.h:1164
TensorTrain< T > & gaxpy(T alpha, const TensorTrain< T > &rhs, T beta)
Inplace generalized saxpy ... this = this*alpha + other*beta.
Definition tensortrain.h:524
bool is_zero_rank() const
if rank is zero
Definition tensortrain.h:1109
TensorTrain< T > & emul(const TensorTrain< T > &other)
compute the Hadamard product of two TensorTrains
Definition tensortrain.h:641
std::enable_if<!(TensorTypeData< T >::iscomplexorTensorTypeData< Q >::iscomplex), TENSOR_RESULT_TYPE(T, Q)>::type trace(const TensorTrain< Q > &B) const
Return the trace of two tensors, no complex conjugate involved.
Definition tensortrain.h:1239
long ranks(const int i) const
return the TT ranks for dimension i (to i+1)
Definition tensortrain.h:1122
TensorTrain()
empty constructor
Definition tensortrain.h:147
std::enable_if<(TensorTypeData< T >::iscomplexorTensorTypeData< Q >::iscomplex), TENSOR_RESULT_TYPE(T, Q)>::type trace(const TensorTrain< Q > &B) const
Definition tensortrain.h:1230
T * ptr(const int ivec=0)
Returns a pointer to the internal data.
Definition tensortrain.h:1150
void serialize(Archive &ar)
serialize this
Definition tensortrain.h:271
TensorTypeData< T >::float_scalar_type float_scalar_type
C++ typename of the floating point type associated with scalar real type.
Definition tensortrain.h:136
TensorTrain< T > & make_tensor()
convert this into a tensor representation (r,k,r)
Definition tensortrain.h:1175
TensorTrain(const Tensor< T > &t, double eps)
ctor for a TensorTrain, with the tolerance eps
Definition tensortrain.h:162
TensorTrain & operator=(const TensorTrain &other)
assigment operator
Definition tensortrain.h:245
Traits class to specify support of numeric types.
Definition type_data.h:56
A tensor is a multidimensional array.
Definition tensor.h:317
Tensor< T > swapdim(long idim, long jdim)
Returns new view/tensor swaping dimensions i and j.
Definition tensor.h:1605
Tensor< T > reshape(int ndimnew, const long *d)
Returns new view/tensor reshaping size/number of dimensions to conforming tensor.
Definition tensor.h:1384
TensorTypeData< T >::scalar_type scalar_type
C++ typename of the real type associated with a complex type.
Definition tensor.h:409
Tensor< T > cycledim(long nshift, long start, long end)
Returns new view/tensor cycling the sub-dimensions (start,...,end) with shift steps.
Definition tensor.h:1641
Tensor< T > fusedim(long i)
Returns new view/tensor fusing contiguous dimensions i and i+1.
Definition tensor.h:1587
Tensor< T > flat()
Returns new view/tensor rehshaping to flat (1-d) tensor.
Definition tensor.h:1555
static const double R
Definition csqrt.cc:46
Correspondence between C++ and Fortran types.
auto T(World &world, response_space &f) -> response_space
Definition global_functions.cc:28
archive_array< T > wrap(const T *, unsigned int)
Factory function to wrap a dynamically allocated pointer as a typed archive_array.
Definition archive.h:914
void inner_result(const Tensor< T > &left, const Tensor< Q > &right, long k0, long k1, Tensor< TENSOR_RESULT_TYPE(T, Q) > &result)
Accumulate inner product into user provided, contiguous, correctly sized result tensor.
Definition tensor.h:2311
const double beta
Definition gygi_soltion.cc:62
static double u(double r, double c)
Definition he.cc:20
Tensor< double > op(const Tensor< double > &x)
Definition kain.cc:508
#define MADNESS_EXCEPTION(msg, value)
Macro for throwing a MADNESS exception.
Definition madness_exception.h:119
#define MADNESS_ASSERT(condition)
Assert a condition that should be free of side-effects since in release builds this might be a no-op.
Definition madness_exception.h:134
#define MADNESS_CHECK_THROW(condition, msg)
Check a condition — even in a release build the condition is always evaluated so it can have side eff...
Definition madness_exception.h:207
Namespace for all elements and tools of MADNESS.
Definition DFParameters.h:10
void svd_result(Tensor< T > &a, Tensor< T > &U, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &VT, Tensor< T > &work)
same as svd, but it optimizes away the tensor construction: a = U * diag(s) * VT
Definition lapack.cc:773
GenTensor< TENSOR_RESULT_TYPE(R, Q)> general_transform(const GenTensor< R > &t, const Tensor< Q > c[])
Definition gentensor.h:274
void lq_result(Tensor< T > &A, Tensor< T > &R, Tensor< T > &tau, Tensor< T > &work, bool do_qr)
compute the LQ decomposition of the matrix A = L Q
Definition lapack.cc:1320
TensorTrain< T > tt_identity(const long ndim, const long k)
compute the n-D identity operator with k elements per dimension
Definition tensortrain.h:1609
long rank_revealing_decompose(Tensor< T > &A, Tensor< T > &U, const double thresh, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &scr)
Definition tensortrain.h:66
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const Tensor< R > &c, bool fence=true)
Transforms a vector of functions according to new[i] = sum[j] old[j]*c[j,i].
Definition vmra.h:731
static double r2(const coord_3d &x)
Definition smooth.h:45
static const Slice _(0,-1, 1)
std::enable_if< std::is_base_of< ProjectorBase, projT >::value, OuterProjector< projT, projQ > >::type outer(const projT &p0, const projQ &p1)
Definition projector.h:457
void svd(const Tensor< T > &a, Tensor< T > &U, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &VT)
Compute the singluar value decomposition of an n-by-m matrix using *gesvd.
Definition lapack.cc:739
void print(const T &t, const Ts &... ts)
Print items to std::cout (items separated by spaces) and terminate with a new line.
Definition print.h:226
response_space apply(World &world, std::vector< std::vector< std::shared_ptr< real_convolution_3d > > > &op, response_space &f)
Definition basic_operators.cc:43
static const int kmax
Definition twoscale.cc:52
double inner(response_space &a, response_space &b)
Definition response_functions.h:639
GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const GenTensor< R > &t, const Tensor< Q > &c, const int axis)
Definition lowranktensor.h:1106
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence.
Definition mra.h:2096
static long abs(long a)
Definition tensor.h:218
static const double b
Definition nonlinschro.cc:119
static const double d
Definition nonlinschro.cc:121
double Q(double a)
Definition relops.cc:20
static const double c
Definition relops.cc:10
static const double m
Definition relops.cc:9
static const double L
Definition rk.cc:46
static const double thresh
Definition rk.cc:45
static const long k
Definition rk.cc:44
Defines and implements most of Tensor.
Prototypes for a partial interface from Tensor to LAPACK.
#define TENSOR_MAXDIM
Definition tensor_macros.h:194
double aa
Definition testbsh.cc:68
static const double alpha
Definition testcosine.cc:10
std::size_t axis
Definition testpdiff.cc:59
double k0
Definition testperiodic.cc:66
double k2
Definition testperiodic.cc:68
double k1
Definition testperiodic.cc:67
static const int maxR
Definition testperiodicdft.cc:34
#define TENSOR_RESULT_TYPE(L, R)
This macro simplifies access to TensorResultType.
Definition type_data.h:205
const double R1
Definition vnucso.cc:83
const double R2
Definition vnucso.cc:84
const double a1
Definition vnucso.cc:85