34 #ifndef TENSORTRAIN_H_
35 #define TENSORTRAIN_H_
71 const long n=
A.dim(0);
72 const long m=
A.dim(1);
73 const long rmax=std::min(n,
m);
97 for (
int j=0; j<
m; ++j) {
98 for (
int i=0; i<
R1; ++i) {
165 if (t.
size()==0)
return;
227 std::vector<long>
d(
c.size());
229 for (
long i=1; i<
d.size(); ++i)
d[i]=
core[i].
dim(1);
270 template <
typename Archive>
277 long core_size=
core.size();
281 if (core_size>0) ar &
core;
310 std::vector<long>
dims(s.size());
311 for (
int i=0; i<
dims.size(); ++i)
dims[i]=s[i].end-s[i].start+1;
315 const long nd=other.
ndim();
317 result.
core.resize(nd);
322 for (
long i=1; i<nd-1; ++i) {
336 const std::vector<long>&
dims) {
339 eps=eps/sqrt(
dims.size()-1);
344 for (
long i=0; i<
dims.size(); ++i) {
353 for (
int i=0, j=
dims.size()-1; i<=j; ++i, --j) {
364 long min1=std::min(n1,m1);
365 long min2=std::min(n2,m2);
387 std::vector<long> r(
dims.size()+1,0l);
388 r[0] = r[
dims.size()] = 1;
390 for (std::size_t
d=1;
d<
dims.size(); ++
d) {
395 const long d1=r[
d-1]*
k;
396 c=
c.reshape(d1,
c.size()/d1);
397 const long rmax=std::min(
c.dim(0),
c.dim(1));
412 const long rank=r[
d];
420 for (
long i=0; i<
c.dim(0); ++i)
421 for (
long j=0; j<
c.dim(1); ++j)
422 for (
long k=0;
k<rmax; ++
k)
423 b(i,j) += uu(i,
k) *
T(s(
k)) * vtt(
k,j);
425 print(
"b.conforms c",
b.conforms(
c));
426 print(
"early error:",
b.absmax());
438 .reshape(
c.dim(0),rmax)(
_,
Slice(0,rank-1)));
444 for (
int i=0; i<rank; ++i) {
445 for (
int j=0; j<
c.dim(1); ++j) {
488 for (
int i=1; i<
ndim(); ++i)
core[i]=1.0;
499 result.
scale(factor);
529 if (this->zero_rank or (
alpha==0.0)) {
533 }
else if (
ndim()==1) {
541 long k=
core[0].dim(0);
542 long r1_this=
core[0].dim(1);
543 long r1_rhs=rhs.
core[0].dim(1);
551 for (std::size_t i=1; i<
core.size()-1; ++i) {
553 long r0_this=
core[i].dim(0);
554 long r0_rhs=rhs.
core[i].dim(0);
555 long k=
core[i].dim(1);
556 long r1_this=
core[i].dim(2);
557 long r1_rhs=rhs.
core[i].dim(2);
558 Tensor<T> core_new(r0_this+r0_rhs,
k,r1_this+r1_rhs);
560 core_new(
Slice(r0_this,r0_this+r0_rhs-1),
_,
Slice(r1_this,r1_this+r1_rhs-1))=rhs.
core[i];
566 std::size_t
d=
core.size()-1;
567 long r0_this=
core[
d].dim(0);
568 long r0_rhs=rhs.
core[
d].dim(0);
572 core_new(
Slice(r0_this,r0_this+r0_rhs-1),
_)=rhs.
core[
d];
590 if (this->zero_rank) {
599 long r1_rhs=rhs.
core[0].dim(1);
602 if (r1_this>0) core_new(
_,
Slice(0,r1_this-1))=
core[0];
603 if (r1_rhs>0) core_new(s1[0],
Slice(r1_this,r1_this+r1_rhs-1))=
beta*rhs.
core[0](s2[0],
_);
608 for (std::size_t i=1; i<
core.size()-1; ++i) {
611 long r0_rhs=rhs.
core[i].dim(0);
614 long r1_rhs=rhs.
core[i].dim(2);
615 Tensor<T> core_new(r0_this+r0_rhs,
k,r1_this+r1_rhs);
616 if (r1_this>0) core_new(
Slice(0,r0_this-1),
_,
Slice(0,r1_this-1))=
core[i];
617 if (r1_rhs>0) core_new(
Slice(r0_this,r0_this+r0_rhs-1),s1[i],
Slice(r1_this,r1_this+r1_rhs-1))=rhs.
core[i](
_,s2[i],
_);
623 std::size_t
d=
core.size()-1;
625 long r0_rhs=rhs.
core[
d].dim(0);
628 if (r0_this>0) core_new(
Slice(0,r0_this-1),
_)=
core[
d];
629 if (r0_rhs>0) core_new(
Slice(r0_this,r0_this+r0_rhs-1),s1[
d])=rhs.
core[
d](
_,s2[
d]);
648 std::vector<long> cranks(
ndim()-1);
652 for (
int i=0; i<
ndim()-2; ++i) {
653 long size=cranks[i]*cranks[i+1]*
dim(i);
663 }
else if (
ndim()==1) {
674 for (
int k=0;
k<
dim(0); ++
k) {
682 for (
int i=1; i<
ndim()-1; ++i) {
684 for (
int k=0;
k<
dim(i); ++
k) {
695 const long n=
ndim()-1;
697 for (
int k=0;
k<
dim(n); ++
k) {
722 const int index=
core[i].ndim()-2;
757 std::vector<long> newdims(this->
ndim()+1);
758 for (
long i=0; i<idim; ++i) newdims[i]=this->
dim(i);
761 for (
long i=idim+1; i<
ndim(); ++i) newdims[i+1]=
dim(i);
768 long r1= (idim==0) ? 1 :
ranks(idim-1);
782 std::vector<long> newdims(this->
ndim()+1);
783 for (
long i=0; i<idim; ++i) newdims[i]=this->
dim(i);
786 for (
long i=idim+1; i<
ndim(); ++i) newdims[i+1]=
dim(i);
791 for (
int ii=0; ii<r; ++ii) {
792 for (
int j=0; j<VT.
dim(1); ++j) {
797 for (
long ii=0; ii<idim; ++ii) result.
core.push_back(
copy(
core[ii]));
800 for (
long ii=idim+1; ii<
ndim(); ++ii) result.
core.push_back(
core[ii]);
803 if (result.
core.front().ndim()==3) result.
core.front()=result.
core.front().fusedim(0);
804 if (result.
core.back().ndim()==3) result.
core.back()=result.
core.back().fusedim(1);
829 typename std::vector<Tensor<T> >::const_iterator it;
831 for (it=++
core.begin(); it!=
core.end(); ++it) {
832 result=
inner(result,*it);
833 if (flat) result=result.
fusedim(0);
850 typename std::vector<Tensor<T> >::const_iterator it1, it2;
853 for (it1=++
core.begin(), it2=--(--
core.end()); it1<it2; ++it1, --it2) {
864 }
else if (
ndim()==4) {
867 }
else if (
ndim()==6) {
881 template<
typename R=T>
882 typename std::enable_if<!std::is_arithmetic<R>::value,
void>
::type
892 template<
typename R=T>
893 typename std::enable_if<std::is_arithmetic<R>::value,
void>
::type
906 eps=eps/sqrt(this->
ndim());
912 long rmax =
core[0].dim(1);
914 for(
size_t i=1;i<
core.size();i++){
922 long lq_work_dim = 2*max_rk+(max_rk+1)*64;
928 const long n_dim =
core.back().ndim();
929 for (
int i=0; i<n_dim; ++i) dimensions[i]=
core.back().dim(i);
931 const long r0 =
core.back().dim(0);
932 const long r1 =
core.back().size()/r0;
933 core.back()=
core.back().reshape(r0,r1);
938 long r_rows= (
core.back().dim(1)>=
core.back().dim(0)) ?
core.back().dim(0) :
core.back().dim(1);
939 long r_cols=
core.back().dim(0);
946 dimensions[0]=std::min(dimensions[0],
core.back().dim(0));
947 core.back()=
core.back().reshape(n_dim,dimensions);
954 for (std::size_t
d=
core.size()-2;
d>0; --
d) {
958 for (
int i=0; i<
ndim; ++i) dimensions[i]=
core[
d].
dim(i);
961 const long r0=
core[
d].dim(0);
962 const long r1=
core[
d].size()/r0;
972 long r_cols=
core[
d].dim(0);
985 dimensions[0]=std::min(r0,
core[
d].
dim(0));
995 long k =std::min<long>(
m,n);
996 long svd_buffer_size = std::max<long>(3*std::min<long>(
m,n)+std::max<long>(
m,n),5*std::min<long>(
m,n)-4)*32;
1003 for (std::size_t
d=0;
d<
core.size()-1; ++
d) {
1007 for (
int i=0; i<
ndim; ++i) dimensions[i]=
core[
d].
dim(i);
1023 long du =
core[
d].dim(0);
1024 long dv =
core[
d].dim(1);
1027 U_buffer = U_buffer.
flat();
1033 if (r_truncate==0) {
1042 dimensions[
ndim-1]=r_truncate;
1045 for (
int i=0; i<VT.
dim(0); ++i) {
1046 for (
int j=0; j<VT.
dim(1); ++j) {
1066 typename std::vector<Tensor<T> >::const_iterator it;
1067 for (it=
core.begin(); it!=
core.end(); ++it) n+=it->size();
1073 long n=this->
size()*
sizeof(
T);
1095 if (
core.size()<2)
return true;
1097 for (
int d=2;
d<
ndim(); ++
d) {
1102 for (
int i=0; i<
c.ndim(); ++i)
size*=
c.dim(i);
1103 if (
size!=
c.size())
return false;
1104 if (not
c.iscontiguous())
return false;
1114 if (
ndim()==0)
return std::vector<long>(1,0);
1117 std::vector<long> r(
core.size()-1);
1118 for (std::size_t i=0; i<r.size(); ++i) r[i]=
core[i+1].
dim(0);
1125 if (i<
core.size()-1) {
1152 if (
core.size())
return core[ivec].ptr();
1159 const T*
ptr(
const int ivec=0)
const {
1160 if (
core.size())
return core[ivec].ptr();
1179 long nd=this->
ndim();
1180 core[0]=
core[0].fusedim(0);
1191 long nd=this->
ndim();
1193 long k2=core[0].dim(0);
1205 for (
int i=1; i<nd-1; ++i) {
1252 if (
A.zero_rank or
B.zero_rank)
return resultT(0.0);
1254 return A.core[0].trace(
B.core[0]);
1258 long size1=
A.ranks(0)*
B.ranks(0);
1259 long size2=
B.ranks(
ndim()-2)*
A.dim(
ndim()-1);
1261 for (
int d=1;
d<
A.ndim()-1; ++
d) {
1269 for (
int d=0;
d<
A.ndim()-1; ++
d) {
1276 long rA= (
d==0) ?
A.core[
d].dim(1) : Aprime.
dim(2);
1277 long rB= (
d==0) ?
B.core[
d].dim(1) :
B.core[
d].dim(2);
1279 if (
d>0) tmp1(
Slice(0,rA*rB-1))=0.0;
1297 long d2=
A.core[
d+1].dim(1);
1298 long d3=
A.core[
d+1].dim(2);
1303 if (
d>0) tmp2(
Slice(0,d1*d2*d3-1))=0.0;
1310 resultT result=Aprime.
trace(
B.core[
ndim()-1]);
1315 template <
typename R,
typename Q>
1319 template <
typename R,
typename Q>
1323 template <
typename R,
typename Q>
1327 template <
typename R,
typename Q>
1338 template <
class T,
class Q>
1347 const long ndim=t.
ndim();
1351 result.
core.resize(ndim);
1363 for (
int d=1;
d<ndim-1; ++
d) {
1364 long r1=t.
core[
d].dim(0);
1365 long i2=t.
core[
d].dim(1);
1369 if (
d>1) tmp(
Slice(0,r1*i2*
r2-1))=0.0;
1387 template <
class T,
class Q>
1396 const long ndim=t.
ndim();
1400 result.
core.resize(ndim);
1403 if (ndim>1) result.
core[ndim-1]=
inner(t.
core[ndim-1],
c[ndim-1],1,0);
1412 for (
int d=1;
d<ndim-1; ++
d) {
1413 long r1=t.
core[
d].dim(0);
1414 long i2=t.
core[
d].dim(1);
1418 if (
d>1) tmp(
Slice(0,r1*i2*
r2-1))=0.0;
1435 template <
class T,
class Q>
1444 const long ndim=t.
ndim();
1452 }
else if (
axis==ndim-1) {
1471 template <
class T,
class Q>
1478 const long nd=t.
ndim();
1483 std::vector<Tensor<resultT> >
B(t.
ndim());
1489 for (
int i=0; i<nd-1; ++i) {
1496 for (
int i=0, j=nd-1; i<j; ++i, --j) {
1500 long maxn=0, maxm=0;
1503 long rR=R11*t.
ranks(1);
1505 for (
int i=1; i<nd-2; ++i) {
1508 R11=std::min(R11,maxr_r);
1522 if (maxm*maxn>5e7) {
1523 print(
"huge scratch spaces!! ",maxn*maxm/1024/1024,
"MByte");
1526 5*std::min(maxm,maxn)) + maxn*maxm;
1537 const long r0=t.
ranks(0l);
1538 const long q0=
op.get_core(0).
dim(2);
1539 const long k0=t.
dim(0);
1546 B[0]=
B[0].reshape(
k0,
R);
1552 for (
int d=1;
d<nd; ++
d) {
1609 template<
typename T>
1612 for (
int i=0; i<
k; ++i)
id(i,i)=1.0;
1613 id=
id.reshape(1,
k,
k,1);
1614 std::vector<Tensor<T> > cores(ndim,
id);
1630 template <
class T,
class Q>
1640 std::vector<long> dims(t1.
ndim()+t2.
ndim());
1641 for (
int i=0; i<t1.
ndim(); ++i) dims[i]=t1.
dim(i);
1642 for (
int i=0; i<t2.
ndim(); ++i) dims[t1.
ndim()+i]=t2.
dim(i);
1652 long core_dim=t1.
core.back().ndim();
1653 long k1=t1.
core.back().dim(core_dim-1);
1654 long k2=t2.
core.front().dim(0);
real_convolution_3d A(World &world)
Definition: DKops.h:230
Interface templates for the archives (serialization).
C++ interface to LAPACK, either directly via Fortran API (see clapack_fortran.h) or via LAPACKE (see ...
Definition: test_ar.cc:118
Definition: test_ar.cc:141
Definition: test_ar.cc:170
The base class for tensors defines generic capabilities.
Definition: basetensor.h:85
long dim(int i) const
Returns the size of dimension i.
Definition: basetensor.h:147
long _stride[TENSOR_MAXDIM]
Increment between elements in each dimension.
Definition: basetensor.h:97
long _size
Number of elements in the tensor.
Definition: basetensor.h:93
void set_dims_and_size(long nd, const long d[])
Definition: basetensor.h:99
long _id
Id from TensorTypeData<T> in type_data.h.
Definition: basetensor.h:95
const long * dims() const
Returns the array of tensor dimensions.
Definition: basetensor.h:153
void splitdim_inplace(long i, long dimi0, long dimi1)
Splits dimension i.
Definition: basetensor.cc:88
void fusedim_inplace(long i)
Fuses dimensions i and i+1.
Definition: basetensor.cc:107
long _dim[TENSOR_MAXDIM]
Size of each dimension.
Definition: basetensor.h:96
long ndim() const
Returns the number of dimensions in the tensor.
Definition: basetensor.h:144
long size() const
Returns the number of elements in the tensor.
Definition: basetensor.h:138
long _ndim
Number of dimensions (-1=invalid; 0=no supported; >0=tensor)
Definition: basetensor.h:94
static int max_sigma(const double &thresh, const long &rank, const Tensor< double > &w)
Definition: srconf.h:109
A slice defines a sub-range or patch of a dimension.
Definition: slice.h:103
Definition: tensortrain.h:123
std::vector< long > ranks() const
return the TT ranks
Definition: tensortrain.h:1113
void zero_me()
turn this into an empty tensor with all cores properly shaped
Definition: tensortrain.h:470
TensorTrain(const TensorTrain &other)
copy constructor, shallow
Definition: tensortrain.h:238
bool verify() const
return the dimensions of this tensor
Definition: tensortrain.h:1094
TensorTrain(const long &ndims, const long *dims)
ctor for a TensorTrain, set up only the dimensions, no data
Definition: tensortrain.h:198
T * ptr(const int ivec=0)
Returns a pointer to the internal data.
Definition: tensortrain.h:1151
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> outer(const TensorTrain< R > &t1, const TensorTrain< Q > &t2)
TensorTrain< T > & operator=(const T &number)
assign a number to this tensor
Definition: tensortrain.h:480
friend class TensorTrain
Definition: tensortrain.h:127
void set_size_and_dim(std::vector< long > dims)
Definition: tensortrain.h:265
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const TensorTrain< R > &t, const Tensor< Q > &c, const int axis)
TensorTrain< T > & make_operator()
convert this into an operator representation (r,k',k,r)
Definition: tensortrain.h:1188
void scale(T fac)
scale this by a number
Definition: tensortrain.h:1144
void two_mode_representation(Tensor< T > &U, Tensor< T > &VT, Tensor< typename Tensor< T >::scalar_type > &s) const
construct a two-mode representation (aka unnormalized SVD)
Definition: tensortrain.h:843
std::vector< Tensor< T > > core
Definition: tensortrain.h:140
TensorTypeData< T >::scalar_type scalar_type
C++ typename of the real type associated with a complex type.
Definition: tensortrain.h:133
long real_size() const
return the size of this instance, including static memory for vectors and such
Definition: tensortrain.h:1072
void zero_me(const std::vector< long > &dim)
turn this into an empty tensor with all cores properly shaped
Definition: tensortrain.h:475
float_scalar_type normf() const
returns the Frobenius norm
Definition: tensortrain.h:1136
TensorTrain< T > operator*(const T &factor) const
return this multiplied by a scalar
Definition: tensortrain.h:497
TensorTrain & operator=(const TensorTrain &other)
assigment operator
Definition: tensortrain.h:245
long size() const
return the number of coefficients in all core tensors
Definition: tensortrain.h:1063
const T * ptr(const int ivec=0) const
Returns a pointer to the internal data.
Definition: tensortrain.h:1159
void fusedim(const long i)
merge two dimensions into one
Definition: tensortrain.h:716
TensorTrain(const Tensor< T > &t, double eps, const std::vector< long > dims)
ctor for a TensorTrain, with the tolerance eps
Definition: tensortrain.h:188
TensorTrain(const std::vector< Tensor< T > > &c)
ctor for a TensorTrain, with core tensors explicitly given
Definition: tensortrain.h:225
T type
C++ typename of this tensor.
Definition: tensortrain.h:130
TensorTrain< T > & make_tensor()
convert this into a tensor representation (r,k,r)
Definition: tensortrain.h:1176
TensorTrain(const std::vector< long > &dims)
ctor for a TensorTrain, set up only the dimensions, no data
Definition: tensortrain.h:204
TensorTrain< T > & gaxpy(const std::array< Slice, TENSOR_MAXDIM > &s1, const TensorTrain< T > &rhs, T beta, const std::array< Slice, TENSOR_MAXDIM > &s2)
Inplace generalized saxpy with slices and without alpha.
Definition: tensortrain.h:583
void decompose(const Tensor< T > &t, double eps, const std::vector< long > &dims)
decompose the input tensor into a TT representation
Definition: tensortrain.h:335
std::enable_if<!std::is_arithmetic< R >::value, void >::type truncate(double eps)
recompress and truncate this TT representation
Definition: tensortrain.h:883
friend TensorTrain copy(const TensorTrain &other)
deep copy of the whole tensor
Definition: tensortrain.h:288
TensorTrain< T > & gaxpy(T alpha, const TensorTrain< T > &rhs, T beta)
Inplace generalized saxpy ... this = this*alpha + other*beta.
Definition: tensortrain.h:524
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> transform(const TensorTrain< R > &t, const Tensor< Q > &c)
TensorTrain< T > & emul(const TensorTrain< T > &other)
compute the Hadamard product of two TensorTrains
Definition: tensortrain.h:641
bool is_operator() const
check if this is an operator (r,k',k,r)
Definition: tensortrain.h:1171
friend TensorTrain copy(const TensorTrain &other, const std::array< Slice, TENSOR_MAXDIM > &s)
deep copy of a slice of the tensor
Definition: tensortrain.h:308
bool zero_rank
true if rank is zero
Definition: tensortrain.h:142
friend TensorTrain< TENSOR_RESULT_TYPE(R, Q)> general_transform(const TensorTrain< R > &t, const Tensor< Q > c[])
std::enable_if< std::is_arithmetic< R >::value, void >::type truncate(double eps)
recompress and truncate this TT representation
Definition: tensortrain.h:894
bool is_tensor() const
check if this is a tensor (r,k,r)
Definition: tensortrain.h:1165
std::enable_if<!(TensorTypeData< T >::iscomplex or TensorTypeData< Q >::iscomplex), TENSOR_RESULT_TYPE(T, Q)>::type trace(const TensorTrain< Q > &B) const
Return the trace of two tensors, no complex conjugate involved.
Definition: tensortrain.h:1240
std::enable_if<(TensorTypeData< T >::iscomplex or TensorTypeData< Q >::iscomplex), TENSOR_RESULT_TYPE(T, Q)>::type trace(const TensorTrain< Q > &B) const
Definition: tensortrain.h:1231
bool is_zero_rank() const
if rank is zero
Definition: tensortrain.h:1110
long ranks(const int i) const
return the TT ranks for dimension i (to i+1)
Definition: tensortrain.h:1123
Tensor< T > & get_core(const int i)
reference to the internal core
Definition: tensortrain.h:1217
TensorTrain()
empty constructor
Definition: tensortrain.h:147
Tensor< T > reconstruct(const bool flat=false) const
reconstruct this to a full representation
Definition: tensortrain.h:815
void serialize(Archive &ar)
serialize this
Definition: tensortrain.h:271
TensorTrain< T > & operator-=(const TensorTrain< T > &rhs)
inplace subtraction of two Tensortrains; will increase ranks of this
Definition: tensortrain.h:518
TensorTrain< T > splitdim(long idim, long k1, long k2, const double eps) const
Definition: tensortrain.h:749
TensorTypeData< T >::float_scalar_type float_scalar_type
C++ typename of the floating point type associated with scalar real type.
Definition: tensortrain.h:136
TensorTrain< T > & operator+=(const TensorTrain< T > &rhs)
inplace addition of two Tensortrains; will increase ranks of this
Definition: tensortrain.h:508
TensorTrain(const Tensor< T > &t, double eps)
ctor for a TensorTrain, with the tolerance eps
Definition: tensortrain.h:162
const Tensor< T > & get_core(const int i) const
const reference to the internal core
Definition: tensortrain.h:1223
Traits class to specify support of numeric types.
Definition: type_data.h:56
A tensor is a multidimension array.
Definition: tensor.h:317
Tensor< T > fusedim(long i)
Returns new view/tensor fusing contiguous dimensions i and i+1.
Definition: tensor.h:1587
TensorTypeData< T >::scalar_type scalar_type
C++ typename of the real type associated with a complex type.
Definition: tensor.h:409
Tensor< T > reshape(int ndimnew, const long *d)
Returns new view/tensor reshaping size/number of dimensions to conforming tensor.
Definition: tensor.h:1384
Tensor< T > flat()
Returns new view/tensor rehshaping to flat (1-d) tensor.
Definition: tensor.h:1555
T trace(const Tensor< T > &t) const
Return the trace of two tensors (no complex conjugate invoked)
Definition: tensor.h:1776
Tensor< T > cycledim(long nshift, long start, long end)
Returns new view/tensor cycling the sub-dimensions (start,...,end) with shift steps.
Definition: tensor.h:1641
Tensor< T > swapdim(long idim, long jdim)
Returns new view/tensor swaping dimensions i and j.
Definition: tensor.h:1605
static const double R
Definition: csqrt.cc:46
Correspondence between C++ and Fortran types.
const double m
Definition: gfit.cc:199
auto T(World &world, response_space &f) -> response_space
Definition: global_functions.cc:34
archive_array< T > wrap(const T *, unsigned int)
Factory function to wrap a dynamically allocated pointer as a typed archive_array.
Definition: archive.h:913
void inner_result(const Tensor< T > &left, const Tensor< Q > &right, long k0, long k1, Tensor< TENSOR_RESULT_TYPE(T, Q) > &result)
Accumulate inner product into user provided, contiguous, correctly sized result tensor.
Definition: tensor.h:2295
const double beta
Definition: gygi_soltion.cc:62
Tensor< double > op(const Tensor< double > &x)
Definition: kain.cc:508
#define max(a, b)
Definition: lda.h:51
#define MADNESS_EXCEPTION(msg, value)
Macro for throwing a MADNESS exception.
Definition: madness_exception.h:119
#define MADNESS_ASSERT(condition)
Assert a condition that should be free of side-effects since in release builds this might be a no-op.
Definition: madness_exception.h:134
File holds all helper structures necessary for the CC_Operator and CC2 class.
Definition: DFParameters.h:10
void svd_result(Tensor< T > &a, Tensor< T > &U, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &VT, Tensor< T > &work)
same as svd, but it optimizes away the tensor construction: a = U * diag(s) * VT
Definition: lapack.cc:773
GenTensor< TENSOR_RESULT_TYPE(R, Q)> general_transform(const GenTensor< R > &t, const Tensor< Q > c[])
Definition: gentensor.h:274
void lq_result(Tensor< T > &A, Tensor< T > &R, Tensor< T > &tau, Tensor< T > &work, bool do_qr)
compute the LQ decomposition of the matrix A = L Q
Definition: lapack.cc:1320
response_space apply(World &world, std::vector< std::vector< std::shared_ptr< real_convolution_3d >>> &op, response_space &f)
Definition: basic_operators.cc:39
TensorTrain< T > tt_identity(const long ndim, const long k)
compute the n-D identity operator with k elements per dimension
Definition: tensortrain.h:1610
long rank_revealing_decompose(Tensor< T > &A, Tensor< T > &U, const double thresh, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &scr)
Definition: tensortrain.h:66
GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const GenTensor< R > &t, const Tensor< Q > &c, const int axis)
Definition: lowranktensor.h:1099
static double r2(const coord_3d &x)
Definition: smooth.h:45
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence.
Definition: mra.h:2002
static const Slice _(0,-1, 1)
TENSOR_RESULT_TYPE(T, R) inner(const Function< T
Computes the scalar/inner product between two functions.
Definition: vmra.h:1059
void svd(const Tensor< T > &a, Tensor< T > &U, Tensor< typename Tensor< T >::scalar_type > &s, Tensor< T > &VT)
Compute the singluar value decomposition of an n-by-m matrix using *gesvd.
Definition: lapack.cc:739
void print(const T &t, const Ts &... ts)
Print items to std::cout (items separated by spaces) and terminate with a new line.
Definition: print.h:225
static const int kmax
Definition: twoscale.cc:52
double inner(response_space &a, response_space &b)
Definition: response_functions.h:442
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const Tensor< R > &c, bool fence=true)
Transforms a vector of functions according to new[i] = sum[j] old[j]*c[j,i].
Definition: vmra.h:689
std::enable_if< std::is_base_of< ProjectorBase, projT >::value, OuterProjector< projT, projQ > >::type outer(const projT &p0, const projQ &p1)
Definition: projector.h:457
static long abs(long a)
Definition: tensor.h:218
static const double b
Definition: nonlinschro.cc:119
double Q(double a)
Definition: relops.cc:20
static const double c
Definition: relops.cc:10
static const double L
Definition: rk.cc:46
static const double thresh
Definition: rk.cc:45
static const long k
Definition: rk.cc:44
Tensor< double > B
Definition: tdse1d.cc:166
Defines and implements most of Tensor.
Prototypes for a partial interface from Tensor to LAPACK.
#define TENSOR_MAXDIM
Definition: tensor_macros.h:194
const double alpha
Definition: test_coulomb.cc:54
void d()
Definition: test_sig.cc:79
double aa
Definition: testbsh.cc:68
std::size_t axis
Definition: testpdiff.cc:59
double k0
Definition: testperiodic.cc:66
double u(const double x, const double expnt)
Definition: testperiodic.cc:56
double k2
Definition: testperiodic.cc:68
double k1
Definition: testperiodic.cc:67
static const int maxR
Definition: testperiodicdft.cc:33
const double R1
Definition: vnucso.cc:83
const double R2
Definition: vnucso.cc:84
const double a1
Definition: vnucso.cc:85