MADNESS 0.10.1
gentensor.h
Go to the documentation of this file.
1/*
2 This file is part of MADNESS.
3
4 Copyright (C) 2007,2010 Oak Ridge National Laboratory
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
20 For more information please contact:
21
22 Robert J. Harrison
23 Oak Ridge National Laboratory
24 One Bethel Valley Road
25 P.O. Box 2008, MS-6367
26
27 email: harrisonrj@ornl.gov
28 tel: 865-241-3937
29 fax: 865-572-0680
30
31 $Id$
32*/
33
34#ifndef GENTENSOR_H_
35#define GENTENSOR_H_
36
37
38/*!
39 *
40 * \file GenTensor.h
41 * \brief Provides a tensor with taking advantage of possibly low rank
42 *
43 ****************************************************************
44 * MAIN DIFFERENCES (Tensor t; GenTensor g)
45 * t=t1(s) is shallow
46 * g=g1(s) is deep
47 ****************************************************************
48 *
49 * a GenTensor is a generalized form of a Tensor
50 * for now only little functionality shall be implemented; feel free to extend
51 * a consequence is that we (usually) can't directly access individual
52 * matrix elements
53 * note that a GenTensor might have zero rank, but is still a valid
54 * tensor, and should therefore return a FullTensor with zeros in it.
55 *
56 * \par Slicing in GenTensors:
57 *
58 * A GenTensor differs from a Tensor in that we can't directly access
59 * individual matrix elements, and thus can't directly assign or manipulate slices
60 * as lvalues. For rvalues we simply provide a slices of the constituent vectors
61 * in SRConf, which is a valid GenTensor by itself
62 * \code
63 * lhs = rhs(s)
64 * \endcode
65 *
66 * Manipulations of slices of a GenTensor are heavily restricted, but should
67 * cover the most important cases:
68 * - assignment to zero; done by inplace subtraction of the slice
69 * \code
70 * GenTensor lrt1(t);
71 * lrt1(s) = 0.0;
72 * \endcode
73 * - in-place addition
74 * \code
75 * GenTensor lrt1(3,3,3,3), lrt2(t);
76 * lrt1(s) += lrt2;
77 * \endcode
78 *
79 * Note that *all* of these operation increase the rank of lhs
80 *
81 * \par Addition in GenTensors
82 *
83 * Addition in a GenTensor is a fairly complicated issue, and there are several
84 * algorithms you can use, each with certain pros and cons
85 *
86 * - append()
87 * plain concatenating of the configurations will increase the rank and will
88 * introduce linear dependencies and redundancies. Also, you might have to
89 * reallocate memory and copy a lot of data around. However, no accuracy is lost
90 * and it is fairly fast. Final rank reduction might be expensive, since you
91 * might have accumulated a huge amount of terms.
92 *
93 * - low_rank_add_sequential()
94 * only for SVD (2-way decomposition)
95 * take the 2nd vector as a basis of a vector space, project the overlap of the
96 * old and new terms on the lhs basis, perform modified Gram-Schmidt
97 * orthogonalization on the rhs basis and increase the basis if necessary.
98 * This will require the left hand side to be right-orthonormal, and the caller is
99 * responsible for doing that. If a new GenTensor is created and only additions
100 * using this method are performed the tensor will automatically be orthonormal.
101 * Cost depends on the number of terms of the rhs and the lhs
102 *
103 * - addition of slices
104 * as of now we can add slices only using append()
105 *
106 * - addition in full rank form
107 * at times it might be sensible to accumulate your result in a full rank tensor,
108 * and after collecting all the contribution you can transform it into a low
109 * rank tensor. This will also maintain "all" digits, and cost depends not on
110 * the number of terms on the lhs.
111 */
112
114
115
116namespace madness {
117
118
119/// low rank representations of tensors (see gentensor.h)
121
122static
123inline
124std::ostream& operator << (std::ostream& s, const TensorType& tt) {
125 std::string str="confused tensor type";
126 if (tt==TT_FULL) str="full rank tensor";
127 if (tt==TT_2D) str="low rank tensor 2-way";
128 if (tt==TT_TENSORTRAIN) str="tensor train";
129// if (tt==TT_DYNAMIC) str="dynamic";
130 s << str.c_str();
131 return s;
132}
133 /// TensorArgs holds the arguments for creating a LowRankTensor
134 struct TensorArgs {
135 double thresh;
138 TensorArgs(const double& thresh1, const TensorType& tt1)
139 : thresh(thresh1)
140 , tt(tt1) {
141 }
142 TensorArgs(const TensorType& tt1, const double& thresh1)
143 : thresh(thresh1)
144 , tt(tt1) {
145 }
146
147 static std::string what_am_i(const TensorType& tt) {
148 if (tt==TT_2D) return "TT_2D";
149 if (tt==TT_FULL) return "TT_FULL";
150 if (tt==TT_TENSORTRAIN) return "TT_TENSORTRAIN";
151 return "unknown tensor type";
152 }
153 template <typename Archive>
154 void serialize(const Archive& ar) {
155 int i=int(tt);
156 ar & thresh & i;
157 tt=TensorType(i);
158 }
159 };
160}
161
162// you can use low-rank tensors only when you use gentensor
163#if HAVE_GENTENSOR
165
166#else
167
170
171namespace madness {
172
173 template<typename T> class SRConf;
174
175 template <typename T>
176 class GenTensor : public Tensor<T> {
177
178 public:
179
181
182 GenTensor(const Tensor<T>& t1) : Tensor<T>(t1) {}
183 GenTensor(const Tensor<T>& t1, const TensorArgs& targs) : Tensor<T>(t1) {}
184 GenTensor(const Tensor<T>& t1, double eps, const TensorType tt) : Tensor<T>(t1) {}
185 GenTensor(const TensorType tt): Tensor<T>() {}
186 GenTensor(std::vector<long> v, const TensorType& tt) : Tensor<T>(v) {}
187 GenTensor(std::vector<long> v, const TensorArgs& targs) : Tensor<T>(v) {}
188 GenTensor(const SRConf<T>& sr1) : Tensor<T>() {MADNESS_EXCEPTION("no ctor with SRConf: use HAVE_GENTENSOR",1);}
189 GenTensor(long nd, const long d[], const TensorType& tt) : Tensor<T>(nd,d){};
190
191 /// Type conversion makes a deep copy
192 template <class Q> operator GenTensor<Q>() const { // type conv => deep copy
193 Tensor<Q> result = Tensor<Q>(this->_ndim,this->_dim,false);
194 BINARY_OPTIMIZED_ITERATOR(Q, result, const T, (*this), *_p0 = (Q)(*_p1));
195 return result;
196 }
197
198 GenTensor convert(const TensorArgs& targs) const {return copy(*this);}
199 GenTensor reconstruct_tensor() const {return *this;}
200 GenTensor full_tensor() const {return *this;}
201 GenTensor& full_tensor() {return *this;}
202
203 GenTensor get_tensor() const {return *this;}
204 GenTensor& get_tensor() {return *this;}
205
206 Tensor<T> full_tensor_copy() const {return copy(*this);}
207 Tensor<T> full_tensor_copy() {return copy(*this);}
208
209 bool is_assigned() const {return this->size()>0;};
210 bool has_data() const {return this->size()>0;};
211 bool has_no_data() const {return not has_data();};
212 long rank() const {return -1;}
213 double svd_normf() const {return this->normf();}
214 size_t real_size() const {return this->size();}
215 size_t nCoeff() const {return this->size();}
216
217 void reduce_rank(const double& eps) {return;};
218 void normalize() {return;}
219
220 std::string what_am_i() const {return "GenTensor, aliased to Tensor";};
222 constexpr bool is_svd_tensor() const {return false;}
223 constexpr bool is_tensortrain() const {return false;}
224 constexpr bool is_full_tensor() const {return true;}
225 bool is_of_tensortype(const TensorType& tt) const {return (tt==TT_FULL);}
226
227
228 SVDTensor<T>& get_svdtensor() {MADNESS_EXCEPTION("no SVDTensor in aliased GenTensor",1);}
229 SVDTensor<T>& get_tensortrain() {MADNESS_EXCEPTION("no SVDTensor in aliased GenTensor",1);}
230 const SVDTensor<T>& get_svdtensor() const {MADNESS_EXCEPTION("no SVDTensor in aliased GenTensor",1);}
231 const SVDTensor<T>& get_tensortrain() const {MADNESS_EXCEPTION("no SVDTensor in aliased GenTensor",1);}
232
233
234
235 void add_SVD(const GenTensor<T>& rhs, const double& eps) {*this+=rhs;}
236
237 SRConf<T> config() const {MADNESS_EXCEPTION("no SRConf in complex GenTensor",1);}
238 SRConf<T> get_configs(const int& start, const int& end) const {MADNESS_EXCEPTION("no SRConf in complex GenTensor",1);}
239
240 /// return the additional safety for rank reduction
241 static double fac_reduce() {return -1.0;};
242
243 };
244
245 template <class T>
246 GenTensor<T> reduce(std::list<GenTensor<T> >& addends, double eps, bool are_optimal=false) {
247 typedef typename std::list<GenTensor<T> >::iterator iterT;
248 GenTensor<T> result=copy(addends.front());
249 for (iterT it=++addends.begin(); it!=addends.end(); ++it) {
250 result+=*it;
251 }
252 return result;
253 }
254
255 /// Outer product ... result(i,j,...,p,q,...) = left(i,k,...)*right(p,q,...)
256
257 /// \ingroup tensor
258 template <class T>
259 GenTensor<T> outer(const GenTensor<T>& left, const GenTensor<T>& right,
260 const TensorArgs final_tensor_args) {
261 return madness::outer(static_cast<Tensor<T> >(left),static_cast<Tensor<T> >(right));
262 }
263
264 /// Outer product ... result(i,j,...,p,q,...) = left(i,k,...)*right(p,q,...)
265
266 /// \ingroup tensor
267 template <class T>
268 GenTensor<T> outer(const Tensor<T>& left, const Tensor<T>& right,
269 const TensorArgs final_tensor_args) {
270 return madness::outer(left,right);
271 }
272
273 template <typename R, typename Q>
275 const GenTensor<R>& t, const Tensor<Q> c[]) {
276 const Tensor<R>& tensor=static_cast<const Tensor<R>& >(t);
278 }
279
280
281
282 /// change representation to targ.tt
283 template<typename T>
285 MADNESS_ASSERT(targs.tt==TT_FULL);
286 }
287
288 namespace archive {
289 /// Serialize a tensor
290 template <class Archive, typename T>
291 struct ArchiveStoreImpl< Archive, GenTensor<T> > {
292 static void store(const Archive& s, const GenTensor<T>& t) {
293 if (t.iscontiguous()) {
294 s & t.size() & t.id();
295 if (t.size()) s & t.ndim() & wrap(t.dims(),TENSOR_MAXDIM) & wrap(t.ptr(),t.size());
296 }
297 else {
298 s & copy(t);
299 }
300 };
301 };
302
303
304 /// Deserialize a tensor ... existing tensor is replaced
305 template <class Archive, typename T>
306 struct ArchiveLoadImpl< Archive, GenTensor<T> > {
307 static void load(const Archive& s, GenTensor<T>& t) {
308 long sz = 0l, id =0l;
309 s & sz & id;
310 if (id != t.id()) throw "type mismatch deserializing a tensor";
311 if (sz) {
312 long _ndim=0l, _dim[TENSOR_MAXDIM];
313 s & _ndim & wrap(_dim,TENSOR_MAXDIM);
314 t = Tensor<T>(_ndim, _dim, false);
315 if (sz != t.size()) throw "size mismatch deserializing a tensor";
316 s & wrap(t.ptr(), t.size());
317 }
318 else {
319 t = Tensor<T>();
320 }
321 };
322 };
323
324 }
325
326} // namespace madness
327
328#endif /* HAVE_GENTENSOR */
329#endif /* GENTENSOR_H_ */
long _dim[TENSOR_MAXDIM]
Size of each dimension.
Definition basetensor.h:96
long _ndim
Number of dimensions (-1=invalid; 0=no supported; >0=tensor)
Definition basetensor.h:94
Definition lowranktensor.h:59
Tensor< T > full_tensor_copy()
Definition gentensor.h:207
bool is_of_tensortype(const TensorType &tt) const
Definition gentensor.h:225
GenTensor(const TensorType tt)
Definition gentensor.h:185
GenTensor(std::vector< long > v, const TensorType &tt)
Definition gentensor.h:186
GenTensor convert(const TensorArgs &targs) const
Definition gentensor.h:198
const SVDTensor< T > & get_tensortrain() const
Definition gentensor.h:231
GenTensor full_tensor() const
Definition gentensor.h:200
GenTensor(const SRConf< T > &sr1)
Definition gentensor.h:188
Tensor< T > full_tensor_copy() const
Definition gentensor.h:206
friend GenTensor copy(const GenTensor &other)
deep copy
Definition lowranktensor.h:283
void add_SVD(const GenTensor< T > &rhs, const double &eps)
Definition gentensor.h:235
constexpr bool is_full_tensor() const
Definition gentensor.h:224
GenTensor & full_tensor()
Definition gentensor.h:201
GenTensor get_tensor() const
Definition gentensor.h:203
GenTensor reconstruct_tensor() const
Definition gentensor.h:199
std::string what_am_i() const
Definition gentensor.h:220
bool has_no_data() const
Definition gentensor.h:211
size_t real_size() const
Definition gentensor.h:214
void normalize()
Definition gentensor.h:218
GenTensor(const Tensor< T > &t1)
Definition gentensor.h:182
const SVDTensor< T > & get_svdtensor() const
Definition gentensor.h:230
float_scalar_type normf() const
Definition lowranktensor.h:406
double svd_normf() const
Definition gentensor.h:213
SRConf< T > config() const
Definition gentensor.h:237
constexpr bool is_tensortrain() const
Definition gentensor.h:223
void reduce_rank(const double &eps)
Definition gentensor.h:217
long rank() const
Definition gentensor.h:212
size_t nCoeff() const
Definition gentensor.h:215
long size() const
Definition lowranktensor.h:482
GenTensor(const Tensor< T > &t1, double eps, const TensorType tt)
Definition gentensor.h:184
SVDTensor< T > & get_svdtensor()
Definition gentensor.h:228
GenTensor()
Definition gentensor.h:180
GenTensor(std::vector< long > v, const TensorArgs &targs)
Definition gentensor.h:187
TensorType tensor_type() const
Definition gentensor.h:221
static double fac_reduce()
return the additional safety for rank reduction
Definition gentensor.h:241
bool has_data() const
Definition gentensor.h:210
GenTensor(long nd, const long d[], const TensorType &tt)
Definition gentensor.h:189
GenTensor(const Tensor< T > &t1, const TensorArgs &targs)
Definition gentensor.h:183
bool is_assigned() const
Definition gentensor.h:209
SRConf< T > get_configs(const int &start, const int &end) const
Definition gentensor.h:238
GenTensor & get_tensor()
Definition gentensor.h:204
SVDTensor< T > & get_tensortrain()
Definition gentensor.h:229
constexpr bool is_svd_tensor() const
Definition gentensor.h:222
Definition srconf.h:67
Definition SVDTensor.h:42
A tensor is a multidimension array.
Definition tensor.h:317
const TensorIterator< T > & end() const
End point for forward iteration.
Definition tensor.h:1876
static const double R
Definition csqrt.cc:46
auto T(World &world, response_space &f) -> response_space
Definition global_functions.cc:34
archive_array< T > wrap(const T *, unsigned int)
Factory function to wrap a dynamically allocated pointer as a typed archive_array.
Definition archive.h:913
static const double v
Definition hatom_sf_dirac.cc:20
double tt1
Definition lapack.cc:62
#define MADNESS_EXCEPTION(msg, value)
Macro for throwing a MADNESS exception.
Definition madness_exception.h:119
#define MADNESS_ASSERT(condition)
Assert a condition that should be free of side-effects since in release builds this might be a no-op.
Definition madness_exception.h:134
Namespace for all elements and tools of MADNESS.
Definition DFParameters.h:10
std::ostream & operator<<(std::ostream &os, const particle< PDIM > &p)
Definition lowrankfunction.h:397
GenTensor< TENSOR_RESULT_TYPE(R, Q)> general_transform(const GenTensor< R > &t, const Tensor< Q > c[])
Definition gentensor.h:274
void change_tensor_type(GenTensor< T > &t, const TensorArgs &targs)
change representation to targ.tt
Definition gentensor.h:284
std::enable_if< std::is_base_of< ProjectorBase, projT >::value, OuterProjector< projT, projQ > >::type outer(const projT &p0, const projQ &p1)
Definition projector.h:457
TensorType
low rank representations of tensors (see gentensor.h)
Definition gentensor.h:120
@ TT_TENSORTRAIN
Definition gentensor.h:120
@ TT_2D
Definition gentensor.h:120
@ TT_FULL
Definition gentensor.h:120
GenTensor< T > reduce(std::list< GenTensor< T > > &addends, double eps, bool are_optimal=false)
add all the GenTensors of a given list
Definition gentensor.h:246
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence.
Definition mra.h:2002
static const double d
Definition nonlinschro.cc:121
double Q(double a)
Definition relops.cc:20
static const double c
Definition relops.cc:10
TensorArgs holds the arguments for creating a LowRankTensor.
Definition gentensor.h:134
void serialize(const Archive &ar)
Definition gentensor.h:154
static std::string what_am_i(const TensorType &tt)
Definition gentensor.h:147
double thresh
Definition gentensor.h:135
TensorArgs(const double &thresh1, const TensorType &tt1)
Definition gentensor.h:138
TensorArgs()
Definition gentensor.h:137
TensorArgs(const TensorType &tt1, const double &thresh1)
Definition gentensor.h:142
TensorType tt
Definition gentensor.h:136
static void load(const Archive &s, GenTensor< T > &t)
Definition gentensor.h:307
Default load of an object via serialize(ar, t).
Definition archive.h:666
static void store(const Archive &s, const GenTensor< T > &t)
Definition gentensor.h:292
Default store of an object via serialize(ar, t).
Definition archive.h:611
Defines and implements most of Tensor.
#define BINARY_OPTIMIZED_ITERATOR(X, x, Y, y, exp)
Definition tensor_macros.h:701
#define TENSOR_MAXDIM
Definition tensor_macros.h:194
Defines and implements the tensor train decomposition as described in I.V. Oseledets,...
#define TENSOR_RESULT_TYPE(L, R)
This macro simplifies access to TensorResultType.
Definition type_data.h:205