71 const long n=
A.dim(0);
72 const long m=
A.dim(1);
73 const long rmax=std::min(n,
m);
74 long lwork=std::max(3*std::min(
m,n)+std::max(
m,n),5*std::min(
m,n));
97 for (
int j=0; j<
m; ++j) {
98 for (
int i=0; i<
R1; ++i) {
165 if (t.
size()==0)
return;
227 std::vector<long>
d(
c.size());
229 for (
long i=1; i<
d.size(); ++i)
d[i]=
core[i].
dim(1);
270 template <
typename Archive>
277 long core_size=
core.size();
281 if (core_size>0) ar &
core;
310 std::vector<long>
dims(s.size());
311 for (
int i=0; i<
dims.size(); ++i)
dims[i]=s[i].end-s[i].start+1;
315 const long nd=other.
ndim();
317 result.
core.resize(nd);
322 for (
long i=1; i<nd-1; ++i) {
336 const std::vector<long>&
dims) {
339 eps=eps/sqrt(
dims.size()-1);
344 for (
long i=0; i<
dims.size(); ++i) {
346 rmax=std::max(rmax,std::min(Udim0,t.
size()/Udim0));
353 for (
int i=0, j=
dims.size()-1; i<=j; ++i, --j) {
362 long max1=std::max(n1,m1);
363 long max2=std::max(n2,m2);
364 long min1=std::min(n1,m1);
365 long min2=std::min(n2,m2);
367 lwork=std::max(lwork,std::max(3*min1+max1,5*min1));
368 lwork=std::max(lwork,std::max(3*min2+max2,5*min2));
387 std::vector<long> r(
dims.size()+1,0l);
388 r[0] = r[
dims.size()] = 1;
390 for (std::size_t
d=1;
d<
dims.size(); ++
d) {
395 const long d1=r[
d-1]*
k;
396 c=
c.reshape(d1,
c.size()/d1);
397 const long rmax=std::min(
c.dim(0),
c.dim(1));
412 const long rank=r[
d];
420 for (
long i=0; i<
c.dim(0); ++i)
421 for (
long j=0; j<
c.dim(1); ++j)
422 for (
long k=0;
k<rmax; ++
k)
423 b(i,j) += uu(i,
k) *
T(s(
k)) * vtt(
k,j);
425 print(
"b.conforms c",
b.conforms(
c));
426 print(
"early error:",
b.absmax());
438 .reshape(
c.dim(0),rmax)(
_,
Slice(0,rank-1)));
444 for (
int i=0; i<rank; ++i) {
445 for (
int j=0; j<
c.dim(1); ++j) {
488 for (
int i=1; i<
ndim(); ++i)
core[i]=1.0;
499 result.
scale(factor);
529 if (this->zero_rank or (
alpha==0.0)) {
533 }
else if (
ndim()==1) {
541 long k=
core[0].dim(0);
542 long r1_this=
core[0].dim(1);
543 long r1_rhs=rhs.
core[0].dim(1);
551 for (std::size_t i=1; i<
core.size()-1; ++i) {
553 long r0_this=
core[i].dim(0);
554 long r0_rhs=rhs.
core[i].dim(0);
555 long k=
core[i].dim(1);
556 long r1_this=
core[i].dim(2);
557 long r1_rhs=rhs.
core[i].dim(2);
558 Tensor<T> core_new(r0_this+r0_rhs,
k,r1_this+r1_rhs);
560 core_new(
Slice(r0_this,r0_this+r0_rhs-1),
_,
Slice(r1_this,r1_this+r1_rhs-1))=rhs.
core[i];
567 long r0_this=
core[
d].dim(0);
568 long r0_rhs=rhs.
core[
d].dim(0);
572 core_new(
Slice(r0_this,r0_this+r0_rhs-1),
_)=rhs.
core[
d];
590 if (this->zero_rank) {
599 long r1_rhs=rhs.
core[0].dim(1);
602 if (r1_this>0) core_new(
_,
Slice(0,r1_this-1))=
core[0];
603 if (r1_rhs>0) core_new(s1[0],
Slice(r1_this,r1_this+r1_rhs-1))=
beta*rhs.
core[0](s2[0],
_);
608 for (std::size_t i=1; i<
core.size()-1; ++i) {
611 long r0_rhs=rhs.
core[i].dim(0);
614 long r1_rhs=rhs.
core[i].dim(2);
615 Tensor<T> core_new(r0_this+r0_rhs,
k,r1_this+r1_rhs);
616 if (r1_this>0) core_new(
Slice(0,r0_this-1),
_,
Slice(0,r1_this-1))=
core[i];
617 if (r1_rhs>0) core_new(
Slice(r0_this,r0_this+r0_rhs-1),s1[i],
Slice(r1_this,r1_this+r1_rhs-1))=rhs.
core[i](
_,s2[i],
_);
625 long r0_rhs=rhs.
core[
d].dim(0);
628 if (r0_this>0) core_new(
Slice(0,r0_this-1),
_)=
core[
d];
629 if (r0_rhs>0) core_new(
Slice(r0_this,r0_this+r0_rhs-1),s1[
d])=rhs.
core[
d](
_,s2[
d]);
648 std::vector<long> cranks(
ndim()-1);
652 for (
int i=0; i<
ndim()-2; ++i) {
653 long size=cranks[i]*cranks[i+1]*
dim(i);
662 }
else if (
ndim()==1) {
673 for (
int k=0;
k<
dim(0); ++
k) {
681 for (
int i=1; i<
ndim()-1; ++i) {
683 for (
int k=0;
k<
dim(i); ++
k) {
694 const long n=
ndim()-1;
696 for (
int k=0;
k<
dim(n); ++
k) {
721 const int index=
core[i].ndim()-2;
756 std::vector<long> newdims(this->
ndim()+1);
757 for (
long i=0; i<idim; ++i) newdims[i]=this->
dim(i);
760 for (
long i=idim+1; i<
ndim(); ++i) newdims[i+1]=
dim(i);
767 long r1= (idim==0) ? 1 :
ranks(idim-1);
781 std::vector<long> newdims(this->
ndim()+1);
782 for (
long i=0; i<idim; ++i) newdims[i]=this->
dim(i);
785 for (
long i=idim+1; i<
ndim(); ++i) newdims[i+1]=
dim(i);
790 for (
int ii=0; ii<r; ++ii) {
791 for (
int j=0; j<VT.
dim(1); ++j) {
796 for (
long ii=0; ii<idim; ++ii) result.
core.push_back(
copy(
core[ii]));
799 for (
long ii=idim+1; ii<
ndim(); ++ii) result.
core.push_back(
core[ii]);
802 if (result.
core.front().ndim()==3) result.
core.front()=result.
core.front().fusedim(0);
803 if (result.
core.back().ndim()==3) result.
core.back()=result.
core.back().fusedim(1);
828 typename std::vector<Tensor<T> >::const_iterator it;
830 for (it=++
core.begin(); it!=
core.end(); ++it) {
831 result=
inner(result,*it);
832 if (flat) result=result.
fusedim(0);
849 typename std::vector<Tensor<T> >::const_iterator it1, it2;
852 for (it1=++
core.begin(), it2=--(--
core.end()); it1<it2; ++it1, --it2) {
863 }
else if (
ndim()==4) {
866 }
else if (
ndim()==6) {
880 template<
typename R=T>
881 typename std::enable_if<!std::is_arithmetic<R>::value,
void>
::type
891 template<
typename R=T>
892 typename std::enable_if<std::is_arithmetic<R>::value,
void>
::type
905 eps=eps/sqrt(this->
ndim());
911 long rmax =
core[0].dim(1);
913 for(
size_t i=1;i<
core.size();i++){
914 rmax = std::max(rmax,
core[i].
dim(0));
920 long max_rk = std::max(rmax,
kmax);
921 long lq_work_dim = 2*max_rk+(max_rk+1)*64;
927 const long n_dim =
core.back().ndim();
928 for (
int i=0; i<n_dim; ++i) dimensions[i]=
core.back().dim(i);
930 const long r0 =
core.back().dim(0);
931 const long r1 =
core.back().size()/r0;
932 core.back()=
core.back().reshape(r0,r1);
937 long r_rows= (
core.back().dim(1)>=
core.back().dim(0)) ?
core.back().dim(0) :
core.back().dim(1);
938 long r_cols=
core.back().dim(0);
945 dimensions[0]=std::min(dimensions[0],
core.back().dim(0));
946 core.back()=
core.back().reshape(n_dim,dimensions);
953 for (std::size_t
d=
core.size()-2;
d>0; --
d) {
957 for (
int i=0; i<
ndim; ++i) dimensions[i]=
core[
d].
dim(i);
960 const long r0=
core[
d].dim(0);
961 const long r1=
core[
d].size()/r0;
971 long r_cols=
core[
d].dim(0);
984 dimensions[0]=std::min(r0,
core[
d].
dim(0));
994 long k =std::min<long>(
m,n);
995 long svd_buffer_size = std::max<long>(3*std::min<long>(
m,n)+std::max<long>(
m,n),5*std::min<long>(
m,n)-4)*32;
1002 for (std::size_t
d=0;
d<
core.size()-1; ++
d) {
1006 for (
int i=0; i<
ndim; ++i) dimensions[i]=
core[
d].
dim(i);
1022 long du =
core[
d].dim(0);
1023 long dv =
core[
d].dim(1);
1026 U_buffer = U_buffer.
flat();
1032 if (r_truncate==0) {
1041 dimensions[
ndim-1]=r_truncate;
1044 for (
int i=0; i<VT.
dim(0); ++i) {
1045 for (
int j=0; j<VT.
dim(1); ++j) {
1065 typename std::vector<Tensor<T> >::const_iterator it;
1066 for (it=
core.begin(); it!=
core.end(); ++it) n+=it->size();
1072 long n=this->
size()*
sizeof(
T);
1094 if (
core.size()<2)
return true;
1096 for (
int d=2;
d<
ndim(); ++
d) {
1101 for (
int i=0; i<
c.ndim(); ++i)
size*=
c.dim(i);
1102 if (
size!=
c.size())
return false;
1103 if (not
c.iscontiguous())
return false;
1113 if (
ndim()==0)
return std::vector<long>(1,0);
1116 std::vector<long> r(
core.size()-1);
1117 for (std::size_t i=0; i<r.size(); ++i) r[i]=
core[i+1].
dim(0);
1124 if (i<
core.size()-1) {
1151 if (
core.size())
return core[ivec].ptr();
1158 const T*
ptr(
const int ivec=0)
const {
1159 if (
core.size())
return core[ivec].ptr();
1178 long nd=this->
ndim();
1179 core[0]=
core[0].fusedim(0);
1190 long nd=this->
ndim();
1192 long k2=core[0].dim(0);
1204 for (
int i=1; i<nd-1; ++i) {
1251 if (
A.zero_rank or
B.zero_rank)
return resultT(0.0);
1253 return A.core[0].trace(
B.core[0]);
1257 long size1=
A.ranks(0)*
B.ranks(0);
1258 long size2=
B.ranks(
ndim()-2)*
A.dim(
ndim()-1);
1260 for (
int d=1;
d<
A.ndim()-1; ++
d) {
1261 size1=std::max(size1,
A.ranks(
d)*
B.ranks(
d));
1262 size2=std::max(size2,
A.ranks(
d)*
B.ranks(
d-1)*
A.dim(
d));
1268 for (
int d=0;
d<
A.ndim()-1; ++
d) {
1275 long rA= (
d==0) ?
A.core[
d].dim(1) : Aprime.
dim(2);
1276 long rB= (
d==0) ?
B.core[
d].dim(1) :
B.core[
d].dim(2);
1278 if (
d>0) tmp1(
Slice(0,rA*rB-1))=0.0;
1296 long d2=
A.core[
d+1].dim(1);
1297 long d3=
A.core[
d+1].dim(2);
1302 if (
d>0) tmp2(
Slice(0,d1*d2*d3-1))=0.0;
1309 resultT result=Aprime.trace(
B.core[
ndim()-1]);
1314 template <
typename R,
typename Q>
1318 template <
typename R,
typename Q>
1322 template <
typename R,
typename Q>
1326 template <
typename R,
typename Q>
1337 template <
class T,
class Q>
1346 const long ndim=t.
ndim();
1350 result.
core.resize(ndim);
1359 for (
int d=1;
d<ndim-1; ++
d) size=std::max(size,t.
core[
d].size());
1362 for (
int d=1;
d<ndim-1; ++
d) {
1363 long r1=t.
core[
d].dim(0);
1364 long i2=t.
core[
d].dim(1);
1368 if (
d>1) tmp(
Slice(0,r1*i2*
r2-1))=0.0;
1386 template <
class T,
class Q>
1395 const long ndim=t.
ndim();
1399 result.
core.resize(ndim);
1402 if (ndim>1) result.
core[ndim-1]=
inner(t.
core[ndim-1],
c[ndim-1],1,0);
1408 for (
int d=1;
d<ndim-1; ++
d) size=std::max(size,t.
core[
d].size());
1411 for (
int d=1;
d<ndim-1; ++
d) {
1412 long r1=t.
core[
d].dim(0);
1413 long i2=t.
core[
d].dim(1);
1417 if (
d>1) tmp(
Slice(0,r1*i2*
r2-1))=0.0;
1434 template <
class T,
class Q>
1443 const long ndim=t.
ndim();
1451 }
else if (
axis==ndim-1) {
1470 template <
class T,
class Q>
1477 const long nd=t.
ndim();
1482 std::vector<Tensor<resultT> >
B(t.
ndim());
1488 for (
int i=0; i<nd-1; ++i) {
1489 maxk=std::max(maxk,t.
dim(i));
1490 maxr_t=std::max(t.
ranks(i),maxr_t);
1491 maxr_op=std::max(
op.ranks(i),maxr_op);
1495 for (
int i=0, j=nd-1; i<j; ++i, --j) {
1499 long maxn=0, maxm=0;
1502 long rR=R11*t.
ranks(1);
1504 for (
int i=1; i<nd-2; ++i) {
1507 R11=std::min(R11,maxr_r);
1511 maxr=std::max(maxr,
r2);
1521 if (maxm*maxn>5e7) {
1522 print(
"huge scratch spaces!! ",maxn*maxm/1024/1024,
"MByte");
1524 long lscr=std::max(3*std::min(maxm,maxn)+std::max(maxm,maxn),
1525 5*std::min(maxm,maxn)) + maxn*maxm;
1536 const long r0=t.
ranks(0l);
1537 const long q0=
op.get_core(0).
dim(2);
1538 const long k0=t.
dim(0);
1545 B[0]=
B[0].reshape(
k0,
R);
1551 for (
int d=1;
d<nd; ++
d) {
1608 template<
typename T>
1611 for (
int i=0; i<
k; ++i)
id(i,i)=1.0;
1612 id=
id.reshape(1,
k,
k,1);
1613 std::vector<Tensor<T> > cores(ndim,
id);
1629 template <
class T,
class Q>
1639 std::vector<long> dims(t1.
ndim()+t2.
ndim());
1640 for (
int i=0; i<t1.
ndim(); ++i) dims[i]=t1.
dim(i);
1641 for (
int i=0; i<t2.
ndim(); ++i) dims[t1.
ndim()+i]=t2.
dim(i);
1651 long core_dim=t1.
core.back().ndim();
1652 long k1=t1.
core.back().dim(core_dim-1);
1653 long k2=t2.
core.front().dim(0);
Interface templates for the archives (serialization).
C++ interface to LAPACK, either directly via Fortran API (see clapack_fortran.h) or via LAPACKE (see ...
Definition test_ar.cc:118
Definition test_ar.cc:141
Definition test_ar.cc:170
The base class for tensors defines generic capabilities.
Definition basetensor.h:85
long dim(int i) const
Returns the size of dimension i.
Definition basetensor.h:147
const long * dims() const
Returns the array of tensor dimensions.
Definition basetensor.h:153
long _stride[TENSOR_MAXDIM]
Increment between elements in each dimension.
Definition basetensor.h:97
long _size
Number of elements in the tensor.
Definition basetensor.h:93
void set_dims_and_size(long nd, const long d[])
Definition basetensor.h:99
long _id
Id from TensorTypeData<T> in type_data.h.
Definition basetensor.h:95
void splitdim_inplace(long i, long dimi0, long dimi1)
Splits dimension i.
Definition basetensor.cc:88
void fusedim_inplace(long i)
Fuses dimensions i and i+1.
Definition basetensor.cc:107
long _dim[TENSOR_MAXDIM]
Size of each dimension.
Definition basetensor.h:96
long ndim() const
Returns the number of dimensions in the tensor.
Definition basetensor.h:144
long size() const
Returns the number of elements in the tensor.
Definition basetensor.h:138
long _ndim
Number of dimensions (-1=invalid; 0=no supported; >0=tensor)
Definition basetensor.h:94
static int max_sigma(const double &thresh, const long &rank, const Tensor< double > &w)
Definition srconf.h:109
A slice defines a sub-range or patch of a dimension.
Definition slice.h:103
Definition tensortrain.h:123
void zero_me()
turn this into an empty tensor with all cores properly shaped
Definition tensortrain.h:470
TensorTrain(const TensorTrain &other)
copy constructor, shallow
Definition tensortrain.h:238
const Tensor< T > & get_core(const int i) const
const reference to the internal core
Definition tensortrain.h:1222
bool verify() const
return the dimensions of this tensor
Definition tensortrain.h:1093
TensorTrain(const long &ndims, const long *dims)
ctor for a TensorTrain, set up only the dimensions, no data
Definition tensortrain.h:198
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> outer(const TensorTrain< R > &t1, const TensorTrain< Q > &t2)
std::enable_if< std::is_arithmetic< R >::value, void >::type truncate(double eps)
recompress and truncate this TT representation
Definition tensortrain.h:893
TensorTrain< T > operator*(const T &factor) const
return this multiplied by a scalar
Definition tensortrain.h:497
friend class TensorTrain
Definition tensortrain.h:127
void set_size_and_dim(std::vector< long > dims)
Definition tensortrain.h:265
TensorTrain< T > splitdim(long idim, long k1, long k2, const double eps) const
Definition tensortrain.h:748
void scale(T fac)
scale this by a number
Definition tensortrain.h:1143
void two_mode_representation(Tensor< T > &U, Tensor< T > &VT, Tensor< typename Tensor< T >::scalar_type > &s) const
construct a two-mode representation (aka unnormalized SVD)
Definition tensortrain.h:842
std::vector< Tensor< T > > core
Definition tensortrain.h:140
TensorTypeData< T >::scalar_type scalar_type
C++ typename of the real type associated with a complex type.
Definition tensortrain.h:133
long real_size() const
return the size of this instance, including static memory for vectors and such
Definition tensortrain.h:1071
std::enable_if<!std::is_arithmetic< R >::value, void >::type truncate(double eps)
recompress and truncate this TT representation
Definition tensortrain.h:882
void zero_me(const std::vector< long > &dim)
turn this into an empty tensor with all cores properly shaped
Definition tensortrain.h:475
float_scalar_type normf() const
returns the Frobenius norm
Definition tensortrain.h:1135
TensorTrain< T > & operator-=(const TensorTrain< T > &rhs)
inplace subtraction of two Tensortrains; will increase ranks of this
Definition tensortrain.h:518
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const TensorTrain< R > &t, const Tensor< Q > &c, const int axis)
Tensor< T > reconstruct(const bool flat=false) const
reconstruct this to a full representation
Definition tensortrain.h:814
Tensor< T > & get_core(const int i)
reference to the internal core
Definition tensortrain.h:1216
long size() const
return the number of coefficients in all core tensors
Definition tensortrain.h:1062
void fusedim(const long i)
merge two dimensions into one
Definition tensortrain.h:715
TensorTrain(const Tensor< T > &t, double eps, const std::vector< long > dims)
ctor for a TensorTrain, with the tolerance eps
Definition tensortrain.h:188
TensorTrain(const std::vector< Tensor< T > > &c)
ctor for a TensorTrain, with core tensors explicitly given
Definition tensortrain.h:225
T type
C++ typename of this tensor.
Definition tensortrain.h:130
TensorTrain< T > & operator=(const T &number)
assign a number to this tensor
Definition tensortrain.h:480
TensorTrain(const std::vector< long > &dims)
ctor for a TensorTrain, set up only the dimensions, no data
Definition tensortrain.h:204
std::vector< long > ranks() const
return the TT ranks
Definition tensortrain.h:1112
TensorTrain< T > & make_operator()
convert this into an operator representation (r,k',k,r)
Definition tensortrain.h:1187
TensorTrain< T > & gaxpy(const std::array< Slice, TENSOR_MAXDIM > &s1, const TensorTrain< T > &rhs, T beta, const std::array< Slice, TENSOR_MAXDIM > &s2)
Inplace generalized saxpy with slices and without alpha.
Definition tensortrain.h:583
const T * ptr(const int ivec=0) const
Returns a pointer to the internal data.
Definition tensortrain.h:1158
void decompose(const Tensor< T > &t, double eps, const std::vector< long > &dims)
decompose the input tensor into a TT representation
Definition tensortrain.h:335
TensorTrain< T > & operator+=(const TensorTrain< T > &rhs)
inplace addition of two Tensortrains; will increase ranks of this
Definition tensortrain.h:508
friend TensorTrain copy(const TensorTrain &other)
deep copy of the whole tensor
Definition tensortrain.h:288
bool is_operator() const
check if this is an operator (r,k',k,r)
Definition tensortrain.h:1170
friend TensorTrain copy(const TensorTrain &other, const std::array< Slice, TENSOR_MAXDIM > &s)
deep copy of a slice of the tensor
Definition tensortrain.h:308
bool zero_rank
true if rank is zero
Definition tensortrain.h:142
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> general_transform(const TensorTrain< R > &t, const Tensor< Q > c[])
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> transform(const TensorTrain< R > &t, const Tensor< Q > &c)
bool is_tensor() const
check if this is a tensor (r,k,r)
Definition tensortrain.h:1164
TensorTrain< T > & gaxpy(T alpha, const TensorTrain< T > &rhs, T beta)
Inplace generalized saxpy ... this = this*alpha + other*beta.
Definition tensortrain.h:524
bool is_zero_rank() const
if rank is zero
Definition tensortrain.h:1109
TensorTrain< T > & emul(const TensorTrain< T > &other)
compute the Hadamard product of two TensorTrains
Definition tensortrain.h:641
std::enable_if<!(TensorTypeData< T >::iscomplexorTensorTypeData< Q >::iscomplex), TENSOR_RESULT_TYPE(T, Q)>::type trace(const TensorTrain< Q > &B) const
Return the trace of two tensors, no complex conjugate involved.
Definition tensortrain.h:1239
long ranks(const int i) const
return the TT ranks for dimension i (to i+1)
Definition tensortrain.h:1122
TensorTrain()
empty constructor
Definition tensortrain.h:147
std::enable_if<(TensorTypeData< T >::iscomplexorTensorTypeData< Q >::iscomplex), TENSOR_RESULT_TYPE(T, Q)>::type trace(const TensorTrain< Q > &B) const
Definition tensortrain.h:1230
T * ptr(const int ivec=0)
Returns a pointer to the internal data.
Definition tensortrain.h:1150
void serialize(Archive &ar)
serialize this
Definition tensortrain.h:271
TensorTypeData< T >::float_scalar_type float_scalar_type
C++ typename of the floating point type associated with scalar real type.
Definition tensortrain.h:136
TensorTrain< T > & make_tensor()
convert this into a tensor representation (r,k,r)
Definition tensortrain.h:1175
TensorTrain(const Tensor< T > &t, double eps)
ctor for a TensorTrain, with the tolerance eps
Definition tensortrain.h:162
TensorTrain & operator=(const TensorTrain &other)
assigment operator
Definition tensortrain.h:245
Traits class to specify support of numeric types.
Definition type_data.h:56
A tensor is a multidimensional array.
Definition tensor.h:317
Tensor< T > swapdim(long idim, long jdim)
Returns new view/tensor swaping dimensions i and j.
Definition tensor.h:1605
Tensor< T > reshape(int ndimnew, const long *d)
Returns new view/tensor reshaping size/number of dimensions to conforming tensor.
Definition tensor.h:1384
TensorTypeData< T >::scalar_type scalar_type
C++ typename of the real type associated with a complex type.
Definition tensor.h:409
Tensor< T > cycledim(long nshift, long start, long end)
Returns new view/tensor cycling the sub-dimensions (start,...,end) with shift steps.
Definition tensor.h:1641
Tensor< T > fusedim(long i)
Returns new view/tensor fusing contiguous dimensions i and i+1.
Definition tensor.h:1587
Tensor< T > flat()
Returns new view/tensor rehshaping to flat (1-d) tensor.
Definition tensor.h:1555
static const double R
Definition csqrt.cc:46
Correspondence between C++ and Fortran types.
auto T(World &world, response_space &f) -> response_space
Definition global_functions.cc:28
archive_array< T > wrap(const T *, unsigned int)
Factory function to wrap a dynamically allocated pointer as a typed archive_array.
Definition archive.h:914
void inner_result(const Tensor< T > &left, const Tensor< Q > &right, long k0, long k1, Tensor< TENSOR_RESULT_TYPE(T, Q) > &result)
Accumulate inner product into user provided, contiguous, correctly sized result tensor.
Definition tensor.h:2311
const double beta
Definition gygi_soltion.cc:62
static double u(double r, double c)
Definition he.cc:20
Tensor< double > op(const Tensor< double > &x)
Definition kain.cc:508
#define MADNESS_EXCEPTION(msg, value)
Macro for throwing a MADNESS exception.
Definition madness_exception.h:119
#define MADNESS_ASSERT(condition)
Assert a condition that should be free of side-effects since in release builds this might be a no-op.
Definition madness_exception.h:134
#define MADNESS_CHECK_THROW(condition, msg)
Check a condition — even in a release build the condition is always evaluated so it can have side eff...
Definition madness_exception.h:207
Namespace for all elements and tools of MADNESS.
Definition DFParameters.h:10
void svd_result(Tensor< T > &a, Tensor< T > &U, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &VT, Tensor< T > &work)
same as svd, but it optimizes away the tensor construction: a = U * diag(s) * VT
Definition lapack.cc:773
GenTensor< TENSOR_RESULT_TYPE(R, Q)> general_transform(const GenTensor< R > &t, const Tensor< Q > c[])
Definition gentensor.h:274
void lq_result(Tensor< T > &A, Tensor< T > &R, Tensor< T > &tau, Tensor< T > &work, bool do_qr)
compute the LQ decomposition of the matrix A = L Q
Definition lapack.cc:1320
TensorTrain< T > tt_identity(const long ndim, const long k)
compute the n-D identity operator with k elements per dimension
Definition tensortrain.h:1609
long rank_revealing_decompose(Tensor< T > &A, Tensor< T > &U, const double thresh, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &scr)
Definition tensortrain.h:66
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const Tensor< R > &c, bool fence=true)
Transforms a vector of functions according to new[i] = sum[j] old[j]*c[j,i].
Definition vmra.h:731
static double r2(const coord_3d &x)
Definition smooth.h:45
static const Slice _(0,-1, 1)
std::enable_if< std::is_base_of< ProjectorBase, projT >::value, OuterProjector< projT, projQ > >::type outer(const projT &p0, const projQ &p1)
Definition projector.h:457
void svd(const Tensor< T > &a, Tensor< T > &U, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &VT)
Compute the singluar value decomposition of an n-by-m matrix using *gesvd.
Definition lapack.cc:739
void print(const T &t, const Ts &... ts)
Print items to std::cout (items separated by spaces) and terminate with a new line.
Definition print.h:226
response_space apply(World &world, std::vector< std::vector< std::shared_ptr< real_convolution_3d > > > &op, response_space &f)
Definition basic_operators.cc:43
static const int kmax
Definition twoscale.cc:52
double inner(response_space &a, response_space &b)
Definition response_functions.h:639
GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const GenTensor< R > &t, const Tensor< Q > &c, const int axis)
Definition lowranktensor.h:1106
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence.
Definition mra.h:2096
static long abs(long a)
Definition tensor.h:218
static const double b
Definition nonlinschro.cc:119
static const double d
Definition nonlinschro.cc:121
double Q(double a)
Definition relops.cc:20
static const double c
Definition relops.cc:10
static const double m
Definition relops.cc:9
static const double L
Definition rk.cc:46
static const double thresh
Definition rk.cc:45
static const long k
Definition rk.cc:44
Defines and implements most of Tensor.
Prototypes for a partial interface from Tensor to LAPACK.
#define TENSOR_MAXDIM
Definition tensor_macros.h:194
double aa
Definition testbsh.cc:68
static const double alpha
Definition testcosine.cc:10
std::size_t axis
Definition testpdiff.cc:59
double k0
Definition testperiodic.cc:66
double k2
Definition testperiodic.cc:68
double k1
Definition testperiodic.cc:67
static const int maxR
Definition testperiodicdft.cc:34
#define TENSOR_RESULT_TYPE(L, R)
This macro simplifies access to TensorResultType.
Definition type_data.h:205
const double R1
Definition vnucso.cc:83
const double R2
Definition vnucso.cc:84
const double a1
Definition vnucso.cc:85