MADNESS 0.10.1
funcimpl.h
Go to the documentation of this file.
1/*
2 This file is part of MADNESS.
3
4 Copyright (C) 2007,2010 Oak Ridge National Laboratory
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
20 For more information please contact:
21
22 Robert J. Harrison
23 Oak Ridge National Laboratory
24 One Bethel Valley Road
25 P.O. Box 2008, MS-6367
26
27 email: harrisonrj@ornl.gov
28 tel: 865-241-3937
29 fax: 865-572-0680
30*/
31
32#ifndef MADNESS_MRA_FUNCIMPL_H__INCLUDED
33#define MADNESS_MRA_FUNCIMPL_H__INCLUDED
34
35/// \file funcimpl.h
36/// \brief Provides FunctionCommonData, FunctionImpl and FunctionFactory
37
39#include <madness/world/print.h>
40#include <madness/misc/misc.h>
43
45#include <madness/mra/indexit.h>
46#include <madness/mra/key.h>
50
51#include <madness/mra/leafop.h>
52
53#include <array>
54#include <iostream>
55#include <type_traits>
56
57namespace madness {
58 template <typename T, std::size_t NDIM>
59 class DerivativeBase;
60
61 template<typename T, std::size_t NDIM>
62 class FunctionImpl;
63
64 template<typename T, std::size_t NDIM>
65 class FunctionNode;
66
67 template<typename T, std::size_t NDIM>
68 class Function;
69
70 template<typename T, std::size_t NDIM>
71 class FunctionFactory;
72
73 template<typename T, std::size_t NDIM, std::size_t MDIM>
74 class CompositeFunctorInterface;
75
76 template<int D>
78
79}
80
81namespace madness {
82
83
84 /// A simple process map
85 template<typename keyT>
86 class SimplePmap : public WorldDCPmapInterface<keyT> {
87 private:
88 const int nproc;
90
91 public:
92 SimplePmap(World& world) : nproc(world.nproc()), me(world.rank())
93 { }
94
95 ProcessID owner(const keyT& key) const {
96 if (key.level() == 0)
97 return 0;
98 else
99 return key.hash() % nproc;
100 }
101 };
102
103 /// A pmap that locates children on odd levels with their even level parents
104 template <typename keyT>
105 class LevelPmap : public WorldDCPmapInterface<keyT> {
106 private:
107 const int nproc;
108 public:
109 LevelPmap() : nproc(0) {};
110
111 LevelPmap(World& world) : nproc(world.nproc()) {}
112
113 /// Find the owner of a given key
114 ProcessID owner(const keyT& key) const {
115 Level n = key.level();
116 if (n == 0) return 0;
117 hashT hash;
118 if (n <= 3 || (n&0x1)) hash = key.hash();
119 else hash = key.parent().hash();
120 return hash%nproc;
121 }
122 };
123
124
125 /// FunctionNode holds the coefficients, etc., at each node of the 2^NDIM-tree
126 template<typename T, std::size_t NDIM>
128 public:
131 private:
132 // Should compile OK with these volatile but there should
133 // be no need to set as volatile since the container internally
134 // stores the entire entry as volatile
135
136 coeffT _coeffs; ///< The coefficients, if any
137 double _norm_tree; ///< After norm_tree will contain norm of coefficients summed up tree
138 bool _has_children; ///< True if there are children
139 coeffT buffer; ///< The coefficients, if any
140 double dnorm=-1.0; ///< norm of the d coefficients, also defined if there are no d coefficients
141 double snorm=-1.0; ///< norm of the s coefficients
142
143 public:
144 typedef WorldContainer<Key<NDIM> , FunctionNode<T, NDIM> > dcT; ///< Type of container holding the nodes
145 /// Default constructor makes node without coeff or children
147 _coeffs(), _norm_tree(1e300), _has_children(false) {
148 }
149
150 /// Constructor from given coefficients with optional children
151
152 /// Note that only a shallow copy of the coeff are taken so
153 /// you should pass in a deep copy if you want the node to
154 /// take ownership.
155 explicit
159
160 explicit
164
165 explicit
169
172 dnorm(other.dnorm), snorm(other.snorm) {
173 }
174
177 if (this != &other) {
178 coeff() = copy(other.coeff());
179 _norm_tree = other._norm_tree;
181 dnorm=other.dnorm;
182 snorm=other.snorm;
184 }
185 return *this;
186 }
187
188 /// Copy with possible type conversion of coefficients, copying all other state
189
190 /// Choose to not overload copy and type conversion operators
191 /// so there are no automatic type conversions.
192 template<typename Q>
194 convert() const {
195 return FunctionNode<Q, NDIM> (madness::convert<Q,T>(coeff()), _norm_tree, snorm, dnorm, _has_children);
196 }
197
198 /// Returns true if there are coefficients in this node
199 bool
200 has_coeff() const {
201 return _coeffs.has_data();
202 }
203
204
205 /// Returns true if this node has children
206 bool
207 has_children() const {
208 return _has_children;
209 }
210
211 /// Returns true if this does not have children
212 bool
213 is_leaf() const {
214 return !_has_children;
215 }
216
217 /// Returns true if this node is invalid (no coeffs and no children)
218 bool
219 is_invalid() const {
220 return !(has_coeff() || has_children());
221 }
222
223 /// Returns a non-const reference to the tensor containing the coeffs
224
225 /// Returns an empty tensor if there are no coefficients.
226 coeffT&
228 MADNESS_ASSERT(_coeffs.ndim() == -1 || (_coeffs.dim(0) <= 2
229 * MAXK && _coeffs.dim(0) >= 0));
230 return const_cast<coeffT&>(_coeffs);
231 }
232
233 /// Returns a const reference to the tensor containing the coeffs
234
235 /// Returns an empty tensor if there are no coefficeints.
236 const coeffT&
237 coeff() const {
238 return const_cast<const coeffT&>(_coeffs);
239 }
240
241 /// Returns the number of coefficients in this node
242 size_t size() const {
243 return _coeffs.size();
244 }
245
246 public:
247
248 /// reduces the rank of the coefficients (if applicable)
249 void reduceRank(const double& eps) {
250 _coeffs.reduce_rank(eps);
251 }
252
253 /// Sets \c has_children attribute to value of \c flag.
254 void set_has_children(bool flag) {
255 _has_children = flag;
256 }
257
258 /// Sets \c has_children attribute to true recurring up to ensure connected
260 //madness::print(" set_chi_recu: ", key, *this);
261 //PROFILE_MEMBER_FUNC(FunctionNode); // Too fine grain for routine profiling
262 if (!(has_children() || has_coeff() || key.level()==0)) {
263 // If node already knows it has children or it has
264 // coefficients then it must already be connected to
265 // its parent. If not, the node was probably just
266 // created for this operation and must be connected to
267 // its parent.
268 Key<NDIM> parent = key.parent();
269 // Task on next line used to be TaskAttributes::hipri()) ... but deferring execution of this
270 // makes sense since it is not urgent and lazy connection will likely mean that less forwarding
271 // will happen since the upper level task will have already made the connection.
272 const_cast<dcT&>(c).task(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
273 //const_cast<dcT&>(c).send(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
274 //madness::print(" set_chi_recu: forwarding",key,parent);
275 }
276 _has_children = true;
277 }
278
279 /// Sets \c has_children attribute to value of \c !flag
280 void set_is_leaf(bool flag) {
281 _has_children = !flag;
282 }
283
284 /// Takes a \em shallow copy of the coeff --- same as \c this->coeff()=coeff
285 void set_coeff(const coeffT& coeffs) {
286 coeff() = coeffs;
287 if ((_coeffs.has_data()) and ((_coeffs.dim(0) < 0) || (_coeffs.dim(0)>2*MAXK))) {
288 print("set_coeff: may have a problem");
289 print("set_coeff: coeff.dim[0] =", coeffs.dim(0), ", 2* MAXK =", 2*MAXK);
290 }
291 MADNESS_ASSERT(coeffs.dim(0)<=2*MAXK && coeffs.dim(0)>=0);
292 }
293
294 /// Clears the coefficients (has_coeff() will subsequently return false)
295 void clear_coeff() {
296 coeff()=coeffT();
297 }
298
299 /// Scale the coefficients of this node
300 template <typename Q>
301 void scale(Q a) {
302 _coeffs.scale(a);
303 }
304
305 /// Sets the value of norm_tree
308 }
309
310 /// Gets the value of norm_tree
311 double get_norm_tree() const {
312 return _norm_tree;
313 }
314
315 /// return the precomputed norm of the (virtual) d coefficients
316 double get_dnorm() const {
317 return dnorm;
318 }
319
320 /// set the precomputed norm of the (virtual) s coefficients
321 void set_snorm(const double sn) {
322 snorm=sn;
323 }
324
325 /// set the precomputed norm of the (virtual) d coefficients
326 void set_dnorm(const double dn) {
327 dnorm=dn;
328 }
329
330 /// get the precomputed norm of the (virtual) s coefficients
331 double get_snorm() const {
332 return snorm;
333 }
334
336 snorm = 0.0;
337 dnorm = 0.0;
338 if (coeff().size() == 0) { ;
339 } else if (coeff().dim(0) == cdata.vk[0]) {
340 snorm = coeff().normf();
341
342 } else if (coeff().is_full_tensor()) {
343 Tensor<T> c = copy(coeff().get_tensor());
344 snorm = c(cdata.s0).normf();
345 c(cdata.s0) = 0.0;
346 dnorm = c.normf();
347
348 } else if (coeff().is_svd_tensor()) {
349 coeffT c= coeff()(cdata.s0);
350 snorm = c.normf();
351 double norm = coeff().normf();
352 dnorm = sqrt(norm * norm - snorm * snorm);
353
354 } else {
355 MADNESS_EXCEPTION("cannot use compute_dnorm", 1);
356 }
357 }
358
359
360 /// General bi-linear operation --- this = this*alpha + other*beta
361
362 /// This/other may not have coefficients. Has_children will be
363 /// true in the result if either this/other have children.
364 template <typename Q, typename R>
365 void gaxpy_inplace(const T& alpha, const FunctionNode<Q,NDIM>& other, const R& beta) {
366 //PROFILE_MEMBER_FUNC(FuncNode); // Too fine grain for routine profiling
367 if (other.has_children())
368 _has_children = true;
369 if (has_coeff()) {
370 if (other.has_coeff()) {
371 coeff().gaxpy(alpha,other.coeff(),beta);
372 }
373 else {
374 coeff().scale(alpha);
375 }
376 }
377 else if (other.has_coeff()) {
378 coeff() = other.coeff()*beta; //? Is this the correct type conversion?
379 }
380 }
381
382 /// Accumulate inplace and if necessary connect node to parent
383 void accumulate2(const tensorT& t, const typename FunctionNode<T,NDIM>::dcT& c,
384 const Key<NDIM>& key) {
385 // double cpu0=cpu_time();
386 if (has_coeff()) {
387 MADNESS_ASSERT(coeff().is_full_tensor());
388 // if (coeff().type==TT_FULL) {
389 coeff() += coeffT(t,-1.0,TT_FULL);
390 // } else {
391 // tensorT cc=coeff().full_tensor_copy();;
392 // cc += t;
393 // coeff()=coeffT(cc,args);
394 // }
395 }
396 else {
397 // No coeff and no children means the node is newly
398 // created for this operation and therefore we must
399 // tell its parent that it exists.
400 coeff() = coeffT(t,-1.0,TT_FULL);
401 // coeff() = copy(t);
402 // coeff() = coeffT(t,args);
403 if ((!_has_children) && key.level()> 0) {
404 Key<NDIM> parent = key.parent();
405 if (c.is_local(parent))
406 const_cast<dcT&>(c).send(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
407 else
408 const_cast<dcT&>(c).task(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
409 }
410 }
411 //double cpu1=cpu_time();
412 }
413
414
415 /// Accumulate inplace and if necessary connect node to parent
416 void accumulate(const coeffT& t, const typename FunctionNode<T,NDIM>::dcT& c,
417 const Key<NDIM>& key, const TensorArgs& args) {
418 if (has_coeff()) {
419 coeff().add_SVD(t,args.thresh);
420 if (buffer.rank()<coeff().rank()) {
421 if (buffer.has_data()) {
422 buffer.add_SVD(coeff(),args.thresh);
423 } else {
424 buffer=copy(coeff());
425 }
426 coeff()=coeffT();
427 }
428
429 } else {
430 // No coeff and no children means the node is newly
431 // created for this operation and therefore we must
432 // tell its parent that it exists.
433 coeff() = copy(t);
434 if ((!_has_children) && key.level()> 0) {
435 Key<NDIM> parent = key.parent();
436 if (c.is_local(parent))
437 const_cast<dcT&>(c).send(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
438 else
439 const_cast<dcT&>(c).task(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
440 }
441 }
442 }
443
444 void consolidate_buffer(const TensorArgs& args) {
445 if ((coeff().has_data()) and (buffer.has_data())) {
446 coeff().add_SVD(buffer,args.thresh);
447 } else if (buffer.has_data()) {
448 coeff()=buffer;
449 }
450 buffer=coeffT();
451 }
452
454 return this->_coeffs.trace_conj((rhs._coeffs));
455 }
456
457 template <typename Archive>
458 void serialize(Archive& ar) {
460 }
461
462 /// like operator<<(ostream&, const FunctionNode<T,NDIM>&) but
463 /// produces a sequence JSON-formatted key-value pairs
464 /// @warning enclose the output in curly braces to make
465 /// a valid JSON object
466 void print_json(std::ostream& s) const {
467 s << "\"has_coeff\":" << this->has_coeff()
468 << ",\"has_children\":" << this->has_children() << ",\"norm\":";
469 double norm = this->has_coeff() ? this->coeff().normf() : 0.0;
470 if (norm < 1e-12)
471 norm = 0.0;
472 double nt = this->get_norm_tree();
473 if (nt == 1e300)
474 nt = 0.0;
475 s << norm << ",\"norm_tree\":" << nt << ",\"snorm\":"
476 << this->get_snorm() << ",\"dnorm\":" << this->get_dnorm()
477 << ",\"rank\":" << this->coeff().rank();
478 if (this->coeff().is_assigned())
479 s << ",\"dim\":" << this->coeff().dim(0);
480 }
481
482 };
483
484 template <typename T, std::size_t NDIM>
485 std::ostream& operator<<(std::ostream& s, const FunctionNode<T,NDIM>& node) {
486 s << "(has_coeff=" << node.has_coeff() << ", has_children=" << node.has_children() << ", norm=";
487 double norm = node.has_coeff() ? node.coeff().normf() : 0.0;
488 if (norm < 1e-12)
489 norm = 0.0;
490 double nt = node.get_norm_tree();
491 if (nt == 1e300) nt = 0.0;
492 s << norm << ", norm_tree, s/dnorm =" << nt << ", " << node.get_snorm() << " " << node.get_dnorm() << "), rank="<< node.coeff().rank()<<")";
493 if (node.coeff().is_assigned()) s << " dim " << node.coeff().dim(0) << " ";
494 return s;
495 }
496
497
498 /// returns true if the result of a hartree_product is a leaf node (compute norm & error)
499 template<typename T, size_t NDIM>
501
504 long k;
505 bool do_error_leaf_op() const {return false;}
506
507 hartree_leaf_op() = default;
508 hartree_leaf_op(const implT* f, const long& k) : f(f), k(k) {}
509
510 /// no pre-determination
511 bool operator()(const Key<NDIM>& key) const {return false;}
512
513 /// no post-determination
514 bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
515 MADNESS_EXCEPTION("no post-determination in hartree_leaf_op",1);
516 return true;
517 }
518
519 /// post-determination: true if f is a leaf and the result is well-represented
520
521 /// @param[in] key the hi-dimensional key (breaks into keys for f and g)
522 /// @param[in] fcoeff coefficients of f of its appropriate key in NS form
523 /// @param[in] gcoeff coefficients of g of its appropriate key in NS form
524 bool operator()(const Key<NDIM>& key, const Tensor<T>& fcoeff, const Tensor<T>& gcoeff) const {
525
526 if (key.level()<2) return false;
527 Slice s = Slice(0,k-1);
528 std::vector<Slice> s0(NDIM/2,s);
529
530 const double tol=f->get_thresh();
531 const double thresh=f->truncate_tol(tol, key)*0.3; // custom factor to "ensure" accuracy
532 // include the wavelets in the norm, makes it much more accurate
533 const double fnorm=fcoeff.normf();
534 const double gnorm=gcoeff.normf();
535
536 // if the final norm is small, perform the hartree product and return
537 const double norm=fnorm*gnorm; // computing the outer product
538 if (norm < thresh) return true;
539
540 // norm of the scaling function coefficients
541 const double sfnorm=fcoeff(s0).normf();
542 const double sgnorm=gcoeff(s0).normf();
543
544 // get the error of both functions and of the pair function;
545 // need the abs for numerics: sfnorm might be equal fnorm.
546 const double ferror=sqrt(std::abs(fnorm*fnorm-sfnorm*sfnorm));
547 const double gerror=sqrt(std::abs(gnorm*gnorm-sgnorm*sgnorm));
548
549 // if the expected error is small, perform the hartree product and return
550 const double error=fnorm*gerror + ferror*gnorm + ferror*gerror;
551 // const double error=sqrt(fnorm*fnorm*gnorm*gnorm - sfnorm*sfnorm*sgnorm*sgnorm);
552
553 if (error < thresh) return true;
554 return false;
555 }
556 template <typename Archive> void serialize (Archive& ar) {
557 ar & f & k;
558 }
559 };
560
561 /// returns true if the result of the convolution operator op with some provided
562 /// coefficients will be small
563 template<typename T, size_t NDIM, typename opT>
564 struct op_leaf_op {
566
567 const opT* op; ///< the convolution operator
568 const implT* f; ///< the source or result function, needed for truncate_tol
569 bool do_error_leaf_op() const {return true;}
570
571 op_leaf_op() = default;
572 op_leaf_op(const opT* op, const implT* f) : op(op), f(f) {}
573
574 /// pre-determination: we can't know if this will be a leaf node before we got the final coeffs
575 bool operator()(const Key<NDIM>& key) const {return true;}
576
577 /// post-determination: return true if operator and coefficient norms are small
578 bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
579 if (key.level()<2) return false;
580 const double cnorm=coeff.normf();
581 return this->operator()(key,cnorm);
582 }
583
584 /// post-determination: return true if operator and coefficient norms are small
585 bool operator()(const Key<NDIM>& key, const double& cnorm) const {
586 if (key.level()<2) return false;
587
588 typedef Key<opT::opdim> opkeyT;
589 const opkeyT source=op->get_source_key(key);
590
591 const double thresh=f->truncate_tol(f->get_thresh(),key);
592 const std::vector<opkeyT>& disp = op->get_disp(key.level());
593 const opkeyT& d = *disp.begin(); // use the zero-displacement for screening
594 const double opnorm = op->norm(key.level(), d, source);
595 const double norm=opnorm*cnorm;
596 return norm<thresh;
597
598 }
599
600 template <typename Archive> void serialize (Archive& ar) {
601 ar & op & f;
602 }
603
604 };
605
606
607 /// returns true if the result of a hartree_product is a leaf node
608 /// criteria are error, norm and its effect on a convolution operator
609 template<typename T, size_t NDIM, size_t LDIM, typename opT>
611
614
616 const implL* g; // for use of its cdata only
617 const opT* op;
618 bool do_error_leaf_op() const {return false;}
619
621 hartree_convolute_leaf_op(const implT* f, const implL* g, const opT* op)
622 : f(f), g(g), op(op) {}
623
624 /// no pre-determination
625 bool operator()(const Key<NDIM>& key) const {return true;}
626
627 /// no post-determination
628 bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
629 MADNESS_EXCEPTION("no post-determination in hartree_convolute_leaf_op",1);
630 return true;
631 }
632
633 /// post-determination: true if f is a leaf and the result is well-represented
634
635 /// @param[in] key the hi-dimensional key (breaks into keys for f and g)
636 /// @param[in] fcoeff coefficients of f of its appropriate key in NS form
637 /// @param[in] gcoeff coefficients of g of its appropriate key in NS form
638 bool operator()(const Key<NDIM>& key, const Tensor<T>& fcoeff, const Tensor<T>& gcoeff) const {
639 // bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
640
641 if (key.level()<2) return false;
642
643 const double tol=f->get_thresh();
644 const double thresh=f->truncate_tol(tol, key);
645 // include the wavelets in the norm, makes it much more accurate
646 const double fnorm=fcoeff.normf();
647 const double gnorm=gcoeff.normf();
648
649 // norm of the scaling function coefficients
650 const double sfnorm=fcoeff(g->get_cdata().s0).normf();
651 const double sgnorm=gcoeff(g->get_cdata().s0).normf();
652
653 // if the final norm is small, perform the hartree product and return
654 const double norm=fnorm*gnorm; // computing the outer product
655 if (norm < thresh) return true;
656
657 // get the error of both functions and of the pair function
658 const double ferror=sqrt(fnorm*fnorm-sfnorm*sfnorm);
659 const double gerror=sqrt(gnorm*gnorm-sgnorm*sgnorm);
660
661 // if the expected error is small, perform the hartree product and return
662 const double error=fnorm*gerror + ferror*gnorm + ferror*gerror;
663 if (error < thresh) return true;
664
665 // now check if the norm of this and the norm of the operator are significant
666 const std::vector<Key<NDIM> >& disp = op->get_disp(key.level());
667 const Key<NDIM>& d = *disp.begin(); // use the zero-displacement for screening
668 const double opnorm = op->norm(key.level(), d, key);
669 const double final_norm=opnorm*sfnorm*sgnorm;
670 if (final_norm < thresh) return true;
671
672 return false;
673 }
674 template <typename Archive> void serialize (Archive& ar) {
675 ar & f & op;
676 }
677 };
678
679 template<typename T, size_t NDIM>
680 struct noop {
681 void operator()(const Key<NDIM>& key, const GenTensor<T>& coeff, const bool& is_leaf) const {}
682 bool operator()(const Key<NDIM>& key, const GenTensor<T>& fcoeff, const GenTensor<T>& gcoeff) const {
683 MADNESS_EXCEPTION("in noop::operator()",1);
684 return true;
685 }
686 template <typename Archive> void serialize (Archive& ar) {}
687
688 };
689
690 /// insert/replaces the coefficients into the function
691 template<typename T, std::size_t NDIM>
692 struct insert_op {
697
701 insert_op(const insert_op& other) : impl(other.impl) {}
702 void operator()(const keyT& key, const coeffT& coeff, const bool& is_leaf) const {
704 impl->get_coeffs().replace(key,nodeT(coeff,not is_leaf));
705 }
706 template <typename Archive> void serialize (Archive& ar) {
707 ar & impl;
708 }
709
710 };
711
712 /// inserts/accumulates coefficients into impl's tree
713
714 /// NOTE: will use buffer and will need consolidation after operation ended !! NOTE !!
715 template<typename T, std::size_t NDIM>
719
721 accumulate_op() = default;
723 accumulate_op(const accumulate_op& other) = default;
724 void operator()(const Key<NDIM>& key, const coeffT& coeff, const bool& is_leaf) const {
725 if (coeff.has_data())
726 impl->get_coeffs().task(key, &nodeT::accumulate, coeff, impl->get_coeffs(), key, impl->get_tensor_args());
727 }
728 template <typename Archive> void serialize (Archive& ar) {
729 ar & impl;
730 }
731
732 };
733
734
735template<size_t NDIM>
736 struct true_op {
737
738 template<typename T>
739 bool operator()(const Key<NDIM>& key, const T& t) const {return true;}
740
741 template<typename T, typename R>
742 bool operator()(const Key<NDIM>& key, const T& t, const R& r) const {return true;}
743 template <typename Archive> void serialize (Archive& ar) {}
744
745 };
746
747 /// shallow-copy, pared-down version of FunctionNode, for special purpose only
748 template<typename T, std::size_t NDIM>
749 struct ShallowNode {
753 double dnorm=-1.0;
756 : _coeffs(node.coeff()), _has_children(node.has_children()),
757 dnorm(node.get_dnorm()) {}
759 : _coeffs(node.coeff()), _has_children(node._has_children),
760 dnorm(node.dnorm) {}
761
762 const coeffT& coeff() const {return _coeffs;}
763 coeffT& coeff() {return _coeffs;}
764 bool has_children() const {return _has_children;}
765 bool is_leaf() const {return not _has_children;}
766 template <typename Archive>
767 void serialize(Archive& ar) {
768 ar & coeff() & _has_children & dnorm;
769 }
770 };
771
772
773 /// a class to track where relevant (parent) coeffs are
774
775 /// E.g. if a 6D function is composed of two 3D functions their coefficients must be tracked.
776 /// We might need coeffs from a box that does not exist, and to avoid searching for
777 /// parents we track which are their required respective boxes.
778 /// - CoeffTracker will refer either to a requested key, if it exists, or to its
779 /// outermost parent.
780 /// - Children must be made in sequential order to be able to track correctly.
781 ///
782 /// Usage: 1. make the child of a given CoeffTracker.
783 /// If the parent CoeffTracker refers to a leaf node (flag is_leaf)
784 /// the child will refer to the same node. Otherwise it will refer
785 /// to the child node.
786 /// 2. retrieve its coefficients (possible communication/ returns a Future).
787 /// Member variable key always refers to an existing node,
788 /// so we can fetch it. Once we have the node we can determine
789 /// if it has children which allows us to make a child (see 1. )
790 template<typename T, size_t NDIM>
792
796 typedef std::pair<Key<NDIM>,ShallowNode<T,NDIM> > datumT;
798
799 /// the funcimpl that has the coeffs
800 const implT* impl;
801 /// the current key, which must exists in impl
803 /// flag if key is a leaf node
805 /// the coefficients belonging to key
807 /// norm of d coefficients corresponding to key
808 double dnorm_=-1.0;
809
810 public:
811
812 /// default ctor
814
815 /// the initial ctor making the root key
817 if (impl) key_=impl->get_cdata().key0;
818 }
819
820 /// ctor with a pair<keyT,nodeT>
821 explicit CoeffTracker(const CoeffTracker& other, const datumT& datum)
822 : impl(other.impl), key_(other.key_), coeff_(datum.second.coeff()),
823 dnorm_(datum.second.dnorm) {
824 if (datum.second.is_leaf()) is_leaf_=yes;
825 else is_leaf_=no;
826 }
827
828 /// copy ctor
829 CoeffTracker(const CoeffTracker& other) : impl(other.impl), key_(other.key_),
830 is_leaf_(other.is_leaf_), coeff_(other.coeff_), dnorm_(other.dnorm_) {};
831
832 /// const reference to impl
833 const implT* get_impl() const {return impl;}
834
835 /// const reference to the coeffs
836 const coeffT& coeff() const {return coeff_;}
837
838 /// const reference to the key
839 const keyT& key() const {return key_;}
840
841 /// return the coefficients belonging to the passed-in key
842
843 /// if key equals tracked key just return the coeffs, otherwise
844 /// make the child coefficients.
845 /// @param[in] key return coeffs corresponding to this key
846 /// @return coefficients belonging to key
854
855 /// return the s and dnorm belonging to the passed-in key
856 double dnorm(const keyT& key) const {
857 if (key==key_) return dnorm_;
858 MADNESS_ASSERT(key.is_child_of(key_));
859 return 0.0;
860 }
861
862 /// const reference to is_leaf flag
863 const LeafStatus& is_leaf() const {return is_leaf_;}
864
865 /// make a child of this, ignoring the coeffs
866 CoeffTracker make_child(const keyT& child) const {
867
868 // fast return
869 if ((not impl) or impl->is_on_demand()) return CoeffTracker(*this);
870
871 // can't make a child without knowing if this is a leaf -- activate first
873
874 CoeffTracker result;
875 if (impl) {
876 result.impl=impl;
877 if (is_leaf_==yes) result.key_=key_;
878 if (is_leaf_==no) {
879 result.key_=child;
880 // check if child is direct descendent of this, but root node is special case
881 if (child.level()>0) MADNESS_ASSERT(result.key().level()==key().level()+1);
882 }
883 result.is_leaf_=unknown;
884 }
885 return result;
886 }
887
888 /// find the coefficients
889
890 /// this involves communication to a remote node
891 /// @return a Future<CoeffTracker> with the coefficients that key refers to
893
894 // fast return
895 if (not impl) return Future<CoeffTracker>(CoeffTracker());
897
898 // this will return a <keyT,nodeT> from a remote node
901
902 // construct a new CoeffTracker locally
903 return impl->world.taskq.add(*const_cast<CoeffTracker*> (this),
904 &CoeffTracker::forward_ctor,*this,datum1);
905 }
906
907 private:
908 /// taskq-compatible forwarding to the ctor
909 CoeffTracker forward_ctor(const CoeffTracker& other, const datumT& datum) const {
910 return CoeffTracker(other,datum);
911 }
912
913 public:
914 /// serialization
915 template <typename Archive> void serialize(const Archive& ar) {
916 int il=int(is_leaf_);
917 ar & impl & key_ & il & coeff_ & dnorm_;
919 }
920 };
921
922 template<typename T, std::size_t NDIM>
923 std::ostream&
924 operator<<(std::ostream& s, const CoeffTracker<T,NDIM>& ct) {
925 s << ct.key() << ct.is_leaf() << " " << ct.get_impl();
926 return s;
927 }
928
929 /// FunctionImpl holds all Function state to facilitate shallow copy semantics
930
931 /// Since Function assignment and copy constructors are shallow it
932 /// greatly simplifies maintaining consistent state to have all
933 /// (permanent) state encapsulated in a single class. The state
934 /// is shared between instances using a shared_ptr<FunctionImpl>.
935 ///
936 /// The FunctionImpl inherits all of the functionality of WorldContainer
937 /// (to store the coefficients) and WorldObject<WorldContainer> (used
938 /// for RMI and for its unqiue id).
939 ///
940 /// The class methods are public to avoid painful multiple friend template
941 /// declarations for Function and FunctionImpl ... but this trust should not be
942 /// abused ... NOTHING except FunctionImpl methods should mess with FunctionImplData.
943 /// The LB stuff might have to be an exception.
944 template <typename T, std::size_t NDIM>
945 class FunctionImpl : public WorldObject< FunctionImpl<T,NDIM> > {
946 private:
947 typedef WorldObject< FunctionImpl<T,NDIM> > woT; ///< Base class world object type
948 public:
949 typedef T typeT;
950 typedef FunctionImpl<T,NDIM> implT; ///< Type of this class (implementation)
951 typedef std::shared_ptr< FunctionImpl<T,NDIM> > pimplT; ///< pointer to this class
952 typedef Tensor<T> tensorT; ///< Type of tensor for anything but to hold coeffs
953 typedef Vector<Translation,NDIM> tranT; ///< Type of array holding translation
954 typedef Key<NDIM> keyT; ///< Type of key
955 typedef FunctionNode<T,NDIM> nodeT; ///< Type of node
956 typedef GenTensor<T> coeffT; ///< Type of tensor used to hold coeffs
957 typedef WorldContainer<keyT,nodeT> dcT; ///< Type of container holding the coefficients
958 typedef std::pair<const keyT,nodeT> datumT; ///< Type of entry in container
959 typedef Vector<double,NDIM> coordT; ///< Type of vector holding coordinates
960
961 //template <typename Q, int D> friend class Function;
962 template <typename Q, std::size_t D> friend class FunctionImpl;
963
965
966 /// getter
969 const std::vector<Vector<double,NDIM> >& get_special_points()const{return special_points;}
970
971 private:
972 int k; ///< Wavelet order
973 double thresh; ///< Screening threshold
974 int initial_level; ///< Initial level for refinement
975 int special_level; ///< Minimium level for refinement on special points
976 std::vector<Vector<double,NDIM> > special_points; ///< special points for further refinement (needed for composite functions or multiplication)
977 int max_refine_level; ///< Do not refine below this level
978 int truncate_mode; ///< 0=default=(|d|<thresh), 1=(|d|<thresh/2^n), 2=(|d|<thresh/4^n);
979 bool autorefine; ///< If true, autorefine where appropriate
980 bool truncate_on_project; ///< If true projection inserts at level n-1 not n
981 TensorArgs targs; ///< type of tensor to be used in the FunctionNodes
982
984
985 std::shared_ptr< FunctionFunctorInterface<T,NDIM> > functor;
987
988 dcT coeffs; ///< The coefficients
989
990 // Disable the default copy constructor
992
993 public:
1002
1003 /// Initialize function impl from data in factory
1005 : WorldObject<implT>(factory._world)
1006 , world(factory._world)
1007 , k(factory._k)
1008 , thresh(factory._thresh)
1009 , initial_level(factory._initial_level)
1010 , special_level(factory._special_level)
1011 , special_points(factory._special_points)
1012 , max_refine_level(factory._max_refine_level)
1013 , truncate_mode(factory._truncate_mode)
1014 , autorefine(factory._autorefine)
1015 , truncate_on_project(factory._truncate_on_project)
1016// , nonstandard(false)
1017 , targs(factory._thresh,FunctionDefaults<NDIM>::get_tensor_type())
1018 , cdata(FunctionCommonData<T,NDIM>::get(k))
1019 , functor(factory.get_functor())
1020// , on_demand(factory._is_on_demand)
1021// , compressed(factory._compressed)
1022// , redundant(false)
1023 , tree_state(factory._tree_state)
1024 , coeffs(world,factory._pmap,false)
1025 //, bc(factory._bc)
1026 {
1027 // PROFILE_MEMBER_FUNC(FunctionImpl); // No need to profile this
1028 // !!! Ensure that all local state is correctly formed
1029 // before invoking process_pending for the coeffs and
1030 // for this. Otherwise, there is a race condition.
1031 MADNESS_ASSERT(k>0 && k<=MAXK);
1032
1033 bool empty = (factory._empty or is_on_demand());
1034 bool do_refine = factory._refine;
1035
1036 if (do_refine)
1037 initial_level = std::max(0,initial_level - 1);
1038
1039 if (empty) { // Do not set any coefficients at all
1040 // additional functors are only evaluated on-demand
1041 } else if (functor) { // Project function and optionally refine
1043 // set the union of the special points of functor and the ones explicitly given to FunctionFactory
1044 std::vector<coordT> functor_special_points=functor->special_points();
1045 if (!functor_special_points.empty()) special_points.insert(special_points.end(), functor_special_points.begin(), functor_special_points.end());
1046 // near special points refine as deeply as requested by the factory AND the functor
1047 special_level = std::max(special_level, functor->special_level());
1048
1049 typename dcT::const_iterator end = coeffs.end();
1050 for (typename dcT::const_iterator it=coeffs.begin(); it!=end; ++it) {
1051 if (it->second.is_leaf())
1052 woT::task(coeffs.owner(it->first), &implT::project_refine_op, it->first, do_refine,
1054 }
1055 }
1056 else { // Set as if a zero function
1057 initial_level = 1;
1059 }
1060
1062 this->process_pending();
1063 if (factory._fence && (functor || !empty)) world.gop.fence();
1064 }
1065
1066 /// Copy constructor
1067
1068 /// Allocates a \em new function in preparation for a deep copy
1069 ///
1070 /// By default takes pmap from other but can also specify a different pmap.
1071 /// Does \em not copy the coefficients ... creates an empty container.
1072 template <typename Q>
1074 const std::shared_ptr< WorldDCPmapInterface< Key<NDIM> > >& pmap,
1075 bool dozero)
1076 : WorldObject<implT>(other.world)
1077 , world(other.world)
1078 , k(other.k)
1079 , thresh(other.thresh)
1085 , autorefine(other.autorefine)
1087 , targs(other.targs)
1088 , cdata(FunctionCommonData<T,NDIM>::get(k))
1089 , functor()
1090 , tree_state(other.tree_state)
1091 , coeffs(world, pmap ? pmap : other.coeffs.get_pmap())
1092 {
1093 if (dozero) {
1094 initial_level = 1;
1096 //world.gop.fence(); <<<<<<<<<<<<<<<<<<<<<< needs a fence argument
1097 }
1099 this->process_pending();
1100 }
1101
1102 virtual ~FunctionImpl() { }
1103
1104 const std::shared_ptr< WorldDCPmapInterface< Key<NDIM> > >& get_pmap() const;
1105
1106 void replicate(bool fence=true) {
1107 coeffs.replicate(fence);
1108 }
1109
1110 void distribute(std::shared_ptr< WorldDCPmapInterface< Key<NDIM> > > newmap) const {
1111 auto currentmap=coeffs.get_pmap();
1112 currentmap->redistribute(world,newmap);
1113 }
1114
1115
1116 /// Copy coeffs from other into self
1117 template <typename Q>
1118 void copy_coeffs(const FunctionImpl<Q,NDIM>& other, bool fence) {
1121 it!=end; ++it) {
1122 const keyT& key = it->first;
1123 const typename FunctionImpl<Q,NDIM>::nodeT& node = it->second;
1124 coeffs.replace(key,node. template convert<T>());
1125 }
1126 if (fence)
1127 world.gop.fence();
1128 }
1129
1130 /// perform inplace gaxpy: this = alpha*this + beta*other
1131 /// @param[in] alpha prefactor for this
1132 /// @param[in] beta prefactor for other
1133 /// @param[in] g the other function, reconstructed
1134 /// @return *this = alpha*this + beta*other, in either reconstructed or redundant_after_merge state
1135 template<typename Q, typename R>
1136 void gaxpy_inplace_reconstructed(const T& alpha, const FunctionImpl<Q,NDIM>& g, const R& beta, const bool fence) {
1137 // merge g's tree into this' tree
1138 gaxpy_inplace(alpha,g,beta,fence);
1140 // this->merge_trees(beta,g,alpha,fence);
1141 // tree is now redundant_after_merge
1142 // sum down the sum coeffs into the leafs if possible to keep the state most clean
1143 if (fence) sum_down(fence);
1144 }
1145
1146 /// merge the trees of this and other, while multiplying them with the alpha or beta, resp
1147
1148 /// first step in an inplace gaxpy operation for reconstructed functions; assuming the same
1149 /// distribution for this and other
1150
1151 /// on output, *this = alpha* *this + beta * other
1152 /// @param[in] alpha prefactor for this
1153 /// @param[in] beta prefactor for other
1154 /// @param[in] other the other function, reconstructed
1155 template<typename Q, typename R>
1156 void merge_trees(const T alpha, const FunctionImpl<Q,NDIM>& other, const R beta, const bool fence=true) {
1157 MADNESS_ASSERT(get_pmap() == other.get_pmap());
1160 }
1161
1162 /// merge the trees of this and other, while multiplying them with the alpha or beta, resp
1163
1164 /// result and rhs do not have to have the same distribution or live in the same world
1165 /// result+=alpha* this
1166 /// @param[in] alpha prefactor for this
1167 template<typename Q, typename R>
1168 void accumulate_trees(FunctionImpl<Q,NDIM>& result, const R alpha, const bool fence=true) const {
1170 }
1171
1172 /// perform: this= alpha*f + beta*g, invoked by result
1173
1174 /// f and g are reconstructed, so we can save on the compress operation,
1175 /// walk down the joint tree, and add leaf coefficients; effectively refines
1176 /// to common finest level.
1177
1178 /// nothing returned, but leaves this's tree reconstructed and as sum of f and g
1179 /// @param[in] alpha prefactor for f
1180 /// @param[in] f first addend
1181 /// @param[in] beta prefactor for g
1182 /// @param[in] g second addend
1183 void gaxpy_oop_reconstructed(const double alpha, const implT& f,
1184 const double beta, const implT& g, const bool fence);
1185
1186 /// functor for the gaxpy_inplace method
1187 template <typename Q, typename R>
1190 FunctionImpl<T,NDIM>* f; ///< prefactor for current function impl
1191 T alpha; ///< the current function impl
1192 R beta; ///< prefactor for other function impl
1193 do_gaxpy_inplace() = default;
1195 bool operator()(typename rangeT::iterator& it) const {
1196 const keyT& key = it->first;
1197 const FunctionNode<Q,NDIM>& other_node = it->second;
1198 // Use send to get write accessor and automated construction if missing
1199 f->coeffs.send(key, &nodeT:: template gaxpy_inplace<Q,R>, alpha, other_node, beta);
1200 return true;
1201 }
1202 template <typename Archive>
1203 void serialize(Archive& ar) {
1204 ar & f & alpha & beta;
1205 }
1206 };
1207
1208 /// Inplace general bilinear operation
1209
1210 /// this's world can differ from other's world
1211 /// this = alpha * this + beta * other
1212 /// @param[in] alpha prefactor for the current function impl
1213 /// @param[in] other the other function impl
1214 /// @param[in] beta prefactor for other
1215 template <typename Q, typename R>
1216 void gaxpy_inplace(const T& alpha,const FunctionImpl<Q,NDIM>& other, const R& beta, bool fence) {
1217// MADNESS_ASSERT(get_pmap() == other.get_pmap());
1218 if (alpha != T(1.0)) scale_inplace(alpha,false);
1220 typedef do_gaxpy_inplace<Q,R> opT;
1221 other.world.taskq. template for_each<rangeT,opT>(rangeT(other.coeffs.begin(), other.coeffs.end()), opT(this, T(1.0), beta));
1222 if (fence)
1223 other.world.gop.fence();
1224 }
1225
1226 // loads a function impl from persistence
1227 // @param[in] ar the archive where the function impl is stored
1228 template <typename Archive>
1229 void load(Archive& ar) {
1230 // WE RELY ON K BEING STORED FIRST
1231 int kk = 0;
1232 ar & kk;
1233
1234 MADNESS_ASSERT(kk==k);
1235
1236 // note that functor should not be (re)stored
1238 & autorefine & truncate_on_project & tree_state;//nonstandard & compressed ; //& bc;
1239
1240 ar & coeffs;
1241 world.gop.fence();
1242 }
1243
1244 // saves a function impl to persistence
1245 // @param[in] ar the archive where the function impl is to be stored
1246 template <typename Archive>
1247 void store(Archive& ar) {
1248 // WE RELY ON K BEING STORED FIRST
1249
1250 // note that functor should not be (re)stored
1252 & autorefine & truncate_on_project & tree_state;//nonstandard & compressed ; //& bc;
1253
1254 ar & coeffs;
1255 world.gop.fence();
1256 }
1257
1258 /// Returns true if the function is compressed.
1259 bool is_compressed() const;
1260
1261 /// Returns true if the function is compressed.
1262 bool is_reconstructed() const;
1263
1264 /// Returns true if the function is redundant.
1265 bool is_redundant() const;
1266
1267 /// Returns true if the function is redundant_after_merge.
1268 bool is_redundant_after_merge() const;
1269
1270 bool is_nonstandard() const;
1271
1272 bool is_nonstandard_with_leaves() const;
1273
1274 bool is_on_demand() const;
1275
1276 bool has_leaves() const;
1277
1278 void set_tree_state(const TreeState& state) {
1279 tree_state=state;
1280 }
1281
1283
1284 void set_functor(const std::shared_ptr<FunctionFunctorInterface<T,NDIM> > functor1);
1285
1286 std::shared_ptr<FunctionFunctorInterface<T,NDIM> > get_functor();
1287
1288 std::shared_ptr<FunctionFunctorInterface<T,NDIM> > get_functor() const;
1289
1290 void unset_functor();
1291
1292
1294
1296 void set_tensor_args(const TensorArgs& t);
1297
1298 double get_thresh() const;
1299
1300 void set_thresh(double value);
1301
1302 bool get_autorefine() const;
1303
1304 void set_autorefine(bool value);
1305
1306 int get_k() const;
1307
1308 const dcT& get_coeffs() const;
1309
1310 dcT& get_coeffs();
1311
1313
1314 void accumulate_timer(const double time) const; // !!!!!!!!!!!! REDUNDANT !!!!!!!!!!!!!!!
1315
1316 void print_timer() const;
1317
1318 void reset_timer();
1319
1320 /// Adds a constant to the function. Local operation, optional fence
1321
1322 /// In scaling function basis must add value to first polyn in
1323 /// each box with appropriate scaling for level. In wavelet basis
1324 /// need only add at level zero.
1325 /// @param[in] t the scalar to be added
1326 void add_scalar_inplace(T t, bool fence);
1327
1328 /// Initialize nodes to zero function at initial_level of refinement.
1329
1330 /// Works for either basis. No communication.
1331 void insert_zero_down_to_initial_level(const keyT& key);
1332
1333 /// Truncate according to the threshold with optional global fence
1334
1335 /// If thresh<=0 the default value of this->thresh is used
1336 /// @param[in] tol the truncation tolerance
1337 void truncate(double tol, bool fence);
1338
1339 /// Returns true if after truncation this node has coefficients
1340
1341 /// Assumed to be invoked on process owning key. Possible non-blocking
1342 /// communication.
1343 /// @param[in] key the key of the current function node
1344 Future<bool> truncate_spawn(const keyT& key, double tol);
1345
1346 /// Actually do the truncate operation
1347 /// @param[in] key the key to the current function node being evaluated for truncation
1348 /// @param[in] tol the tolerance for thresholding
1349 /// @param[in] v vector of Future<bool>'s that specify whether the current nodes children have coeffs
1350 bool truncate_op(const keyT& key, double tol, const std::vector< Future<bool> >& v);
1351
1352 /// Evaluate function at quadrature points in the specified box
1353
1354 /// @param[in] key the key indicating where the quadrature points are located
1355 /// @param[in] f the interface to the elementary function
1356 /// @param[in] qx quadrature points on a level=0 box
1357 /// @param[out] fval values
1358 void fcube(const keyT& key, const FunctionFunctorInterface<T,NDIM>& f, const Tensor<double>& qx, tensorT& fval) const;
1359
1360 /// Evaluate function at quadrature points in the specified box
1361
1362 /// @param[in] key the key indicating where the quadrature points are located
1363 /// @param[in] f the interface to the elementary function
1364 /// @param[in] qx quadrature points on a level=0 box
1365 /// @param[out] fval values
1366 void fcube(const keyT& key, T (*f)(const coordT&), const Tensor<double>& qx, tensorT& fval) const;
1367
1368 /// Returns cdata.key0
1369 const keyT& key0() const;
1370
1371 /// Prints the coeffs tree of the current function impl
1372 /// @param[in] maxlevel the maximum level of the tree for printing
1373 /// @param[out] os the ostream to where the output is sent
1374 void print_tree(std::ostream& os = std::cout, Level maxlevel = 10000) const;
1375
1376 /// Functor for the do_print_tree method
1377 void do_print_tree(const keyT& key, std::ostream& os, Level maxlevel) const;
1378
1379 /// Prints the coeffs tree of the current function impl (using GraphViz)
1380 /// @param[in] maxlevel the maximum level of the tree for printing
1381 /// @param[out] os the ostream to where the output is sent
1382 void print_tree_graphviz(std::ostream& os = std::cout, Level maxlevel = 10000) const;
1383
1384 /// Functor for the do_print_tree method (using GraphViz)
1385 void do_print_tree_graphviz(const keyT& key, std::ostream& os, Level maxlevel) const;
1386
1387 /// Same as print_tree() but in JSON format
1388 /// @param[out] os the ostream to where the output is sent
1389 /// @param[in] maxlevel the maximum level of the tree for printing
1390 void print_tree_json(std::ostream& os = std::cout, Level maxlevel = 10000) const;
1391
1392 /// Functor for the do_print_tree_json method
1393 void do_print_tree_json(const keyT& key, std::multimap<Level, std::tuple<tranT, std::string>>& data, Level maxlevel) const;
1394
1395 /// convert a number [0,limit] to a hue color code [blue,red],
1396 /// or, if log is set, a number [1.e-10,limit]
1398 double limit;
1399 bool log;
1400 static double lower() {return 1.e-10;};
1402 do_convert_to_color(const double limit, const bool log) : limit(limit), log(log) {}
1403 double operator()(double val) const {
1404 double color=0.0;
1405
1406 if (log) {
1407 double val2=log10(val) - log10(lower()); // will yield >0.0
1408 double upper=log10(limit) -log10(lower());
1409 val2=0.7-(0.7/upper)*val2;
1410 color= std::max(0.0,val2);
1411 color= std::min(0.7,color);
1412 } else {
1413 double hue=0.7-(0.7/limit)*(val);
1414 color= std::max(0.0,hue);
1415 }
1416 return color;
1417 }
1418 };
1419
1420
1421 /// Print a plane ("xy", "xz", or "yz") containing the point x to file
1422
1423 /// works for all dimensions; we walk through the tree, and if a leaf node
1424 /// inside the sub-cell touches the plane we print it in pstricks format
1425 void print_plane(const std::string filename, const int xaxis, const int yaxis, const coordT& el2);
1426
1427 /// collect the data for a plot of the MRA structure locally on each node
1428
1429 /// @param[in] xaxis the x-axis in the plot (can be any axis of the MRA box)
1430 /// @param[in] yaxis the y-axis in the plot (can be any axis of the MRA box)
1431 /// @param[in] el2 needs a description
1432 /// \todo Provide a description for el2
1433 Tensor<double> print_plane_local(const int xaxis, const int yaxis, const coordT& el2);
1434
1435 /// Functor for the print_plane method
1436 /// @param[in] filename the filename for the output
1437 /// @param[in] plotinfo plotting parameters
1438 /// @param[in] xaxis the x-axis in the plot (can be any axis of the MRA box)
1439 /// @param[in] yaxis the y-axis in the plot (can be any axis of the MRA box)
1440 void do_print_plane(const std::string filename, std::vector<Tensor<double> > plotinfo,
1441 const int xaxis, const int yaxis, const coordT el2);
1442
1443 /// print the grid (the roots of the quadrature of each leaf box)
1444 /// of this function in user xyz coordinates
1445 /// @param[in] filename the filename for the output
1446 void print_grid(const std::string filename) const;
1447
1448 /// return the keys of the local leaf boxes
1449 std::vector<keyT> local_leaf_keys() const;
1450
1451 /// print the grid in xyz format
1452
1453 /// the quadrature points and the key information will be written to file,
1454 /// @param[in] filename where the quadrature points will be written to
1455 /// @param[in] keys all leaf keys
1456 void do_print_grid(const std::string filename, const std::vector<keyT>& keys) const;
1457
1458 /// read data from a grid
1459
1460 /// @param[in] keyfile file with keys and grid points for each key
1461 /// @param[in] gridfile file with grid points, w/o key, but with same ordering
1462 /// @param[in] vnuc_functor subtract the values of this functor if regularization is needed
1463 template<size_t FDIM>
1464 typename std::enable_if<NDIM==FDIM>::type
1465 read_grid(const std::string keyfile, const std::string gridfile,
1466 std::shared_ptr< FunctionFunctorInterface<double,NDIM> > vnuc_functor) {
1467
1468 std::ifstream kfile(keyfile.c_str());
1469 std::ifstream gfile(gridfile.c_str());
1470 std::string line;
1471
1472 long ndata,ndata1;
1473 if (not (std::getline(kfile,line))) MADNESS_EXCEPTION("failed reading 1st line of key data",0);
1474 if (not (std::istringstream(line) >> ndata)) MADNESS_EXCEPTION("failed reading k",0);
1475 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 1st line of grid data",0);
1476 if (not (std::istringstream(line) >> ndata1)) MADNESS_EXCEPTION("failed reading k",0);
1477 MADNESS_CHECK(ndata==ndata1);
1478 if (not (std::getline(kfile,line))) MADNESS_EXCEPTION("failed reading 2nd line of key data",0);
1479 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 2nd line of grid data",0);
1480
1481 // the quadrature points in simulation coordinates of the root node
1482 const Tensor<double> qx=cdata.quad_x;
1483 const size_t npt = qx.dim(0);
1484
1485 // the number of coordinates (grid point tuples) per box ({x1},{x2},{x3},..,{xNDIM})
1486 long npoints=power<NDIM>(npt);
1487 // the number of boxes
1488 long nboxes=ndata/npoints;
1489 MADNESS_ASSERT(nboxes*npoints==ndata);
1490 print("reading ",nboxes,"boxes from file",gridfile,keyfile);
1491
1492 // these will be the data
1493 Tensor<T> values(cdata.vk,false);
1494
1495 int ii=0;
1496 std::string gline,kline;
1497 // while (1) {
1498 while (std::getline(kfile,kline)) {
1499
1500 double x,y,z,x1,y1,z1,val;
1501
1502 // get the key
1503 long nn;
1504 Translation l1,l2,l3;
1505 // line looks like: # key: n l1 l2 l3
1506 kline.erase(0,7);
1507 std::stringstream(kline) >> nn >> l1 >> l2 >> l3;
1508 // kfile >> s >> nn >> l1 >> l2 >> l3;
1509 const Vector<Translation,3> ll{ l1,l2,l3 };
1510 Key<3> key(nn,ll);
1511
1512 // this is borrowed from fcube
1513 const Vector<Translation,3>& l = key.translation();
1514 const Level n = key.level();
1515 const double h = std::pow(0.5,double(n));
1516 coordT c; // will hold the point in user coordinates
1519
1520
1521 if (NDIM == 3) {
1522 for (size_t i=0; i<npt; ++i) {
1523 c[0] = cell(0,0) + h*cell_width[0]*(l[0] + qx(i)); // x
1524 for (size_t j=0; j<npt; ++j) {
1525 c[1] = cell(1,0) + h*cell_width[1]*(l[1] + qx(j)); // y
1526 for (size_t k=0; k<npt; ++k) {
1527 c[2] = cell(2,0) + h*cell_width[2]*(l[2] + qx(k)); // z
1528 // fprintf(pFile,"%18.12f %18.12f %18.12f\n",c[0],c[1],c[2]);
1529 auto& success1 = std::getline(gfile,gline); MADNESS_CHECK(success1);
1530 auto& success2 = std::getline(kfile,kline); MADNESS_CHECK(success2);
1531 std::istringstream(gline) >> x >> y >> z >> val;
1532 std::istringstream(kline) >> x1 >> y1 >> z1;
1533 MADNESS_CHECK(std::fabs(x-c[0])<1.e-4);
1534 MADNESS_CHECK(std::fabs(x1-c[0])<1.e-4);
1535 MADNESS_CHECK(std::fabs(y-c[1])<1.e-4);
1536 MADNESS_CHECK(std::fabs(y1-c[1])<1.e-4);
1537 MADNESS_CHECK(std::fabs(z-c[2])<1.e-4);
1538 MADNESS_CHECK(std::fabs(z1-c[2])<1.e-4);
1539
1540 // regularize if a functor is given
1541 if (vnuc_functor) val-=(*vnuc_functor)(c);
1542 values(i,j,k)=val;
1543 }
1544 }
1545 }
1546 } else {
1547 MADNESS_EXCEPTION("only NDIM=3 in print_grid",0);
1548 }
1549
1550 // insert the new leaf node
1551 const bool has_children=false;
1552 coeffT coeff=coeffT(this->values2coeffs(key,values),targs);
1553 nodeT node(coeff,has_children);
1554 coeffs.replace(key,node);
1556 ii++;
1557 }
1558
1559 kfile.close();
1560 gfile.close();
1561 MADNESS_CHECK(ii==nboxes);
1562
1563 }
1564
1565
1566 /// read data from a grid
1567
1568 /// @param[in] gridfile file with keys and grid points and values for each key
1569 /// @param[in] vnuc_functor subtract the values of this functor if regularization is needed
1570 template<size_t FDIM>
1571 typename std::enable_if<NDIM==FDIM>::type
1572 read_grid2(const std::string gridfile,
1573 std::shared_ptr< FunctionFunctorInterface<double,NDIM> > vnuc_functor) {
1574
1575 std::ifstream gfile(gridfile.c_str());
1576 std::string line;
1577
1578 long ndata;
1579 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 1st line of grid data",0);
1580 if (not (std::istringstream(line) >> ndata)) MADNESS_EXCEPTION("failed reading k",0);
1581 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 2nd line of grid data",0);
1582
1583 // the quadrature points in simulation coordinates of the root node
1584 const Tensor<double> qx=cdata.quad_x;
1585 const size_t npt = qx.dim(0);
1586
1587 // the number of coordinates (grid point tuples) per box ({x1},{x2},{x3},..,{xNDIM})
1588 long npoints=power<NDIM>(npt);
1589 // the number of boxes
1590 long nboxes=ndata/npoints;
1591 MADNESS_CHECK(nboxes*npoints==ndata);
1592 print("reading ",nboxes,"boxes from file",gridfile);
1593
1594 // these will be the data
1595 Tensor<T> values(cdata.vk,false);
1596
1597 int ii=0;
1598 std::string gline;
1599 // while (1) {
1600 while (std::getline(gfile,gline)) {
1601
1602 double x1,y1,z1,val;
1603
1604 // get the key
1605 long nn;
1606 Translation l1,l2,l3;
1607 // line looks like: # key: n l1 l2 l3
1608 gline.erase(0,7);
1609 std::stringstream(gline) >> nn >> l1 >> l2 >> l3;
1610 const Vector<Translation,3> ll{ l1,l2,l3 };
1611 Key<3> key(nn,ll);
1612
1613 // this is borrowed from fcube
1614 const Vector<Translation,3>& l = key.translation();
1615 const Level n = key.level();
1616 const double h = std::pow(0.5,double(n));
1617 coordT c; // will hold the point in user coordinates
1620
1621
1622 if (NDIM == 3) {
1623 for (int i=0; i<npt; ++i) {
1624 c[0] = cell(0,0) + h*cell_width[0]*(l[0] + qx(i)); // x
1625 for (int j=0; j<npt; ++j) {
1626 c[1] = cell(1,0) + h*cell_width[1]*(l[1] + qx(j)); // y
1627 for (int k=0; k<npt; ++k) {
1628 c[2] = cell(2,0) + h*cell_width[2]*(l[2] + qx(k)); // z
1629
1630 auto& success = std::getline(gfile,gline);
1631 MADNESS_CHECK(success);
1632 std::istringstream(gline) >> x1 >> y1 >> z1 >> val;
1633 MADNESS_CHECK(std::fabs(x1-c[0])<1.e-4);
1634 MADNESS_CHECK(std::fabs(y1-c[1])<1.e-4);
1635 MADNESS_CHECK(std::fabs(z1-c[2])<1.e-4);
1636
1637 // regularize if a functor is given
1638 if (vnuc_functor) val-=(*vnuc_functor)(c);
1639 values(i,j,k)=val;
1640 }
1641 }
1642 }
1643 } else {
1644 MADNESS_EXCEPTION("only NDIM=3 in print_grid",0);
1645 }
1646
1647 // insert the new leaf node
1648 const bool has_children=false;
1649 coeffT coeff=coeffT(this->values2coeffs(key,values),targs);
1650 nodeT node(coeff,has_children);
1651 coeffs.replace(key,node);
1652 const_cast<dcT&>(coeffs).send(key.parent(),
1654 coeffs, key.parent());
1655 ii++;
1656 }
1657
1658 gfile.close();
1659 MADNESS_CHECK(ii==nboxes);
1660
1661 }
1662
1663
1664 /// Compute by projection the scaling function coeffs in specified box
1665 /// @param[in] key the key to the current function node (box)
1666 tensorT project(const keyT& key) const;
1667
1668 /// Returns the truncation threshold according to truncate_method
1669
1670 /// here is our handwaving argument:
1671 /// this threshold will give each FunctionNode an error of less than tol. The
1672 /// total error can then be as high as sqrt(#nodes) * tol. Therefore in order
1673 /// to account for higher dimensions: divide tol by about the root of number
1674 /// of siblings (2^NDIM) that have a large error when we refine along a deep
1675 /// branch of the tree.
1676 double truncate_tol(double tol, const keyT& key) const;
1677
1678
1679 /// Returns patch referring to coeffs of child in parent box
1680 /// @param[in] child the key to the child function node (box)
1681 std::vector<Slice> child_patch(const keyT& child) const;
1682
1683 /// Projection with optional refinement w/ special points
1684 /// @param[in] key the key to the current function node (box)
1685 /// @param[in] do_refine should we continue refinement?
1686 /// @param[in] specialpts vector of special points in the function where we need
1687 /// to refine at a much finer level
1688 void project_refine_op(const keyT& key, bool do_refine,
1689 const std::vector<Vector<double,NDIM> >& specialpts);
1690
1691 /// Compute the Legendre scaling functions for multiplication
1692
1693 /// Evaluate parent polyn at quadrature points of a child. The prefactor of
1694 /// 2^n/2 is included. The tensor must be preallocated as phi(k,npt).
1695 /// Refer to the implementation notes for more info.
1696 /// @todo Robert please verify this comment. I don't understand this method.
1697 /// @param[in] np level of the parent function node (box)
1698 /// @param[in] nc level of the child function node (box)
1699 /// @param[in] lp translation of the parent function node (box)
1700 /// @param[in] lc translation of the child function node (box)
1701 /// @param[out] phi tensor of the legendre scaling functions
1702 void phi_for_mul(Level np, Translation lp, Level nc, Translation lc, Tensor<double>& phi) const;
1703
1704 /// Directly project parent coeffs to child coeffs
1705
1706 /// Currently used by diff, but other uses can be anticipated
1707
1708 /// @todo is this documentation correct?
1709 /// @param[in] child the key whose coeffs we are requesting
1710 /// @param[in] parent the (leaf) key of our function
1711 /// @param[in] s the (leaf) coeffs belonging to parent
1712 /// @return coeffs
1713 const coeffT parent_to_child(const coeffT& s, const keyT& parent, const keyT& child) const;
1714
1715 /// Directly project parent NS coeffs to child NS coeffs
1716
1717 /// return the NS coefficients if parent and child are the same,
1718 /// or construct sum coeffs from the parents and "add" zero wavelet coeffs
1719 /// @param[in] child the key whose coeffs we are requesting
1720 /// @param[in] parent the (leaf) key of our function
1721 /// @param[in] coeff the (leaf) coeffs belonging to parent
1722 /// @return coeffs in NS form
1723 coeffT parent_to_child_NS(const keyT& child, const keyT& parent,
1724 const coeffT& coeff) const;
1725
1726 /// Return the values when given the coeffs in scaling function basis
1727 /// @param[in] key the key of the function node (box)
1728 /// @param[in] coeff the tensor of scaling function coefficients for function node (box)
1729 /// @return function values for function node (box)
1730 template <typename Q>
1731 GenTensor<Q> coeffs2values(const keyT& key, const GenTensor<Q>& coeff) const {
1732 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1733 double scale = pow(2.0,0.5*NDIM*key.level())/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1734 return transform(coeff,cdata.quad_phit).scale(scale);
1735 }
1736
1737 /// convert S or NS coeffs to values on a 2k grid of the children
1738
1739 /// equivalent to unfiltering the NS coeffs and then converting all child S-coeffs
1740 /// to values in their respective boxes. If only S coeffs are provided d coeffs are
1741 /// assumed to be zero. Reverse operation to values2NScoeffs().
1742 /// @param[in] key the key of the current S or NS coeffs, level n
1743 /// @param[in] coeff coeffs in S or NS form; if S then d coeffs are assumed zero
1744 /// @param[in] s_only sanity check to avoid unintended discard of d coeffs
1745 /// @return function values on the quadrature points of the children of child (!)
1746 template <typename Q>
1748 const bool s_only) const {
1749 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1750
1751 // sanity checks
1752 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) == s_only);
1753 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) or (coeff.dim(0)==2*this->get_k()));
1754
1755 // this is a block-diagonal matrix with the quadrature points on the diagonal
1756 Tensor<double> quad_phit_2k(2*cdata.k,2*cdata.npt);
1757 quad_phit_2k(cdata.s[0],cdata.s[0])=cdata.quad_phit;
1758 quad_phit_2k(cdata.s[1],cdata.s[1])=cdata.quad_phit;
1759
1760 // the transformation matrix unfilters (cdata.hg) and transforms to values in one step
1761 const Tensor<double> transf = (s_only)
1762 ? inner(cdata.hg(Slice(0,k-1),_),quad_phit_2k) // S coeffs
1763 : inner(cdata.hg,quad_phit_2k); // NS coeffs
1764
1765 // increment the level since the coeffs2values part happens on level n+1
1766 const double scale = pow(2.0,0.5*NDIM*(key.level()+1))/
1768
1769 return transform(coeff,transf).scale(scale);
1770 }
1771
1772 /// Compute the function values for multiplication
1773
1774 /// Given S or NS coefficients from a parent cell, compute the value of
1775 /// the functions at the quadrature points of a child
1776 /// currently restricted to special cases
1777 /// @param[in] child key of the box in which we compute values
1778 /// @param[in] parent key of the parent box holding the coeffs
1779 /// @param[in] coeff coeffs of the parent box
1780 /// @param[in] s_only sanity check to avoid unintended discard of d coeffs
1781 /// @return function values on the quadrature points of the children of child (!)
1782 template <typename Q>
1783 GenTensor<Q> NS_fcube_for_mul(const keyT& child, const keyT& parent,
1784 const GenTensor<Q>& coeff, const bool s_only) const {
1785 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1786
1787 // sanity checks
1788 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) == s_only);
1789 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) or (coeff.dim(0)==2*this->get_k()));
1790
1791 // fast return if possible
1792 // if (child.level()==parent.level()) return NScoeffs2values(child,coeff,s_only);
1793
1794 if (s_only) {
1795
1796 Tensor<double> quad_phi[NDIM];
1797 // tmp tensor
1798 Tensor<double> phi1(cdata.k,cdata.npt);
1799
1800 for (std::size_t d=0; d<NDIM; ++d) {
1801
1802 // input is S coeffs (dimension k), output is values on 2*npt grid points
1803 quad_phi[d]=Tensor<double>(cdata.k,2*cdata.npt);
1804
1805 // for both children of "child" evaluate the Legendre polynomials
1806 // first the left child on level n+1 and translations 2l
1807 phi_for_mul(parent.level(),parent.translation()[d],
1808 child.level()+1, 2*child.translation()[d], phi1);
1809 quad_phi[d](_,Slice(0,k-1))=phi1;
1810
1811 // next the right child on level n+1 and translations 2l+1
1812 phi_for_mul(parent.level(),parent.translation()[d],
1813 child.level()+1, 2*child.translation()[d]+1, phi1);
1814 quad_phi[d](_,Slice(k,2*k-1))=phi1;
1815 }
1816
1817 const double scale = 1.0/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1818 return general_transform(coeff,quad_phi).scale(scale);
1819 }
1820 MADNESS_EXCEPTION("you should not be here in NS_fcube_for_mul",1);
1821 return GenTensor<Q>();
1822 }
1823
1824 /// convert function values of the a child generation directly to NS coeffs
1825
1826 /// equivalent to converting the function values to 2^NDIM S coeffs and then
1827 /// filtering them to NS coeffs. Reverse operation to NScoeffs2values().
1828 /// @param[in] key key of the parent of the generation
1829 /// @param[in] values tensor holding function values of the 2^NDIM children of key
1830 /// @return NS coeffs belonging to key
1831 template <typename Q>
1832 GenTensor<Q> values2NScoeffs(const keyT& key, const GenTensor<Q>& values) const {
1833 //PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1834
1835 // sanity checks
1836 MADNESS_ASSERT(values.dim(0)==2*this->get_k());
1837
1838 // this is a block-diagonal matrix with the quadrature points on the diagonal
1839 Tensor<double> quad_phit_2k(2*cdata.npt,2*cdata.k);
1840 quad_phit_2k(cdata.s[0],cdata.s[0])=cdata.quad_phiw;
1841 quad_phit_2k(cdata.s[1],cdata.s[1])=cdata.quad_phiw;
1842
1843 // the transformation matrix unfilters (cdata.hg) and transforms to values in one step
1844 const Tensor<double> transf=inner(quad_phit_2k,cdata.hgT);
1845
1846 // increment the level since the values2coeffs part happens on level n+1
1847 const double scale = pow(0.5,0.5*NDIM*(key.level()+1))
1849
1850 return transform(values,transf).scale(scale);
1851 }
1852
1853 /// Return the scaling function coeffs when given the function values at the quadrature points
1854 /// @param[in] key the key of the function node (box)
1855 /// @return function values for function node (box)
1856 template <typename Q>
1857 Tensor<Q> coeffs2values(const keyT& key, const Tensor<Q>& coeff) const {
1858 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1859 double scale = pow(2.0,0.5*NDIM*key.level())/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1860 return transform(coeff,cdata.quad_phit).scale(scale);
1861 }
1862
1863 template <typename Q>
1864 GenTensor<Q> values2coeffs(const keyT& key, const GenTensor<Q>& values) const {
1865 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1866 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1867 return transform(values,cdata.quad_phiw).scale(scale);
1868 }
1869
1870 template <typename Q>
1871 Tensor<Q> values2coeffs(const keyT& key, const Tensor<Q>& values) const {
1872 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1873 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1874 return transform(values,cdata.quad_phiw).scale(scale);
1875 }
1876
1877 /// Compute the function values for multiplication
1878
1879 /// Given coefficients from a parent cell, compute the value of
1880 /// the functions at the quadrature points of a child
1881 /// @param[in] child the key for the child function node (box)
1882 /// @param[in] parent the key for the parent function node (box)
1883 /// @param[in] coeff the coefficients of scaling function basis of the parent box
1884 template <typename Q>
1885 Tensor<Q> fcube_for_mul(const keyT& child, const keyT& parent, const Tensor<Q>& coeff) const {
1886 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1887 if (child.level() == parent.level()) {
1888 return coeffs2values(parent, coeff);
1889 }
1890 else if (child.level() < parent.level()) {
1891 MADNESS_EXCEPTION("FunctionImpl: fcube_for_mul: child-parent relationship bad?",0);
1892 }
1893 else {
1894 Tensor<double> phi[NDIM];
1895 for (std::size_t d=0; d<NDIM; ++d) {
1896 phi[d] = Tensor<double>(cdata.k,cdata.npt);
1897 phi_for_mul(parent.level(),parent.translation()[d],
1898 child.level(), child.translation()[d], phi[d]);
1899 }
1900 return general_transform(coeff,phi).scale(1.0/sqrt(FunctionDefaults<NDIM>::get_cell_volume()));;
1901 }
1902 }
1903
1904
1905 /// Compute the function values for multiplication
1906
1907 /// Given coefficients from a parent cell, compute the value of
1908 /// the functions at the quadrature points of a child
1909 /// @param[in] child the key for the child function node (box)
1910 /// @param[in] parent the key for the parent function node (box)
1911 /// @param[in] coeff the coefficients of scaling function basis of the parent box
1912 template <typename Q>
1913 GenTensor<Q> fcube_for_mul(const keyT& child, const keyT& parent, const GenTensor<Q>& coeff) const {
1914 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1915 if (child.level() == parent.level()) {
1916 return coeffs2values(parent, coeff);
1917 }
1918 else if (child.level() < parent.level()) {
1919 MADNESS_EXCEPTION("FunctionImpl: fcube_for_mul: child-parent relationship bad?",0);
1920 }
1921 else {
1922 Tensor<double> phi[NDIM];
1923 for (size_t d=0; d<NDIM; d++) {
1924 phi[d] = Tensor<double>(cdata.k,cdata.npt);
1925 phi_for_mul(parent.level(),parent.translation()[d],
1926 child.level(), child.translation()[d], phi[d]);
1927 }
1928 return general_transform(coeff,phi).scale(1.0/sqrt(FunctionDefaults<NDIM>::get_cell_volume()));
1929 }
1930 }
1931
1932
1933 /// Functor for the mul method
1934 template <typename L, typename R>
1935 void do_mul(const keyT& key, const Tensor<L>& left, const std::pair< keyT, Tensor<R> >& arg) {
1936 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1937 const keyT& rkey = arg.first;
1938 const Tensor<R>& rcoeff = arg.second;
1939 //madness::print("do_mul: r", rkey, rcoeff.size());
1940 Tensor<R> rcube = fcube_for_mul(key, rkey, rcoeff);
1941 //madness::print("do_mul: l", key, left.size());
1942 Tensor<L> lcube = fcube_for_mul(key, key, left);
1943
1944 Tensor<T> tcube(cdata.vk,false);
1945 TERNARY_OPTIMIZED_ITERATOR(T, tcube, L, lcube, R, rcube, *_p0 = *_p1 * *_p2;);
1946 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1947 tcube = transform(tcube,cdata.quad_phiw).scale(scale);
1948 coeffs.replace(key, nodeT(coeffT(tcube,targs),false));
1949 }
1950
1951
1952 /// multiply the values of two coefficient tensors using a custom number of grid points
1953
1954 /// note both coefficient tensors have to refer to the same key!
1955 /// @param[in] c1 a tensor holding coefficients
1956 /// @param[in] c2 another tensor holding coeffs
1957 /// @param[in] npt number of grid points (optional, default is cdata.npt)
1958 /// @return coefficient tensor holding the product of the values of c1 and c2
1959 template<typename R>
1961 const int npt, const keyT& key) const {
1962 typedef TENSOR_RESULT_TYPE(T,R) resultT;
1963
1965
1966 // construct a tensor with the npt coeffs
1967 Tensor<T> c11(cdata2.vk), c22(cdata2.vk);
1968 c11(this->cdata.s0)=c1;
1969 c22(this->cdata.s0)=c2;
1970
1971 // it's sufficient to scale once
1972 double scale = pow(2.0,0.5*NDIM*key.level())/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1973 Tensor<T> c1value=transform(c11,cdata2.quad_phit).scale(scale);
1974 Tensor<R> c2value=transform(c22,cdata2.quad_phit);
1975 Tensor<resultT> resultvalue(cdata2.vk,false);
1976 TERNARY_OPTIMIZED_ITERATOR(resultT, resultvalue, T, c1value, R, c2value, *_p0 = *_p1 * *_p2;);
1977
1978 Tensor<resultT> result=transform(resultvalue,cdata2.quad_phiw);
1979
1980 // return a copy of the slice to have the tensor contiguous
1981 return copy(result(this->cdata.s0));
1982 }
1983
1984
1985 /// Functor for the binary_op method
1986 template <typename L, typename R, typename opT>
1987 void do_binary_op(const keyT& key, const Tensor<L>& left,
1988 const std::pair< keyT, Tensor<R> >& arg,
1989 const opT& op) {
1990 //PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1991 const keyT& rkey = arg.first;
1992 const Tensor<R>& rcoeff = arg.second;
1993 Tensor<R> rcube = fcube_for_mul(key, rkey, rcoeff);
1994 Tensor<L> lcube = fcube_for_mul(key, key, left);
1995
1996 Tensor<T> tcube(cdata.vk,false);
1997 op(key, tcube, lcube, rcube);
1998 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1999 tcube = transform(tcube,cdata.quad_phiw).scale(scale);
2000 coeffs.replace(key, nodeT(coeffT(tcube,targs),false));
2001 }
2002
2003 /// Invoked by result to perform result += alpha*left+beta*right in wavelet basis
2004
2005 /// Does not assume that any of result, left, right have the same distribution.
2006 /// For most purposes result will start as an empty so actually are implementing
2007 /// out of place gaxpy. If all functions have the same distribution there is
2008 /// no communication except for the optional fence.
2009 template <typename L, typename R>
2011 T beta, const FunctionImpl<R,NDIM>& right, bool fence) {
2012 // Loop over local nodes in both functions. Add in left and subtract right.
2013 // Not that efficient in terms of memory bandwidth but ensures we do
2014 // not miss any nodes.
2015 typename FunctionImpl<L,NDIM>::dcT::const_iterator left_end = left.coeffs.end();
2017 it!=left_end;
2018 ++it) {
2019 const keyT& key = it->first;
2020 const typename FunctionImpl<L,NDIM>::nodeT& other_node = it->second;
2021 coeffs.send(key, &nodeT:: template gaxpy_inplace<T,L>, 1.0, other_node, alpha);
2022 }
2023 typename FunctionImpl<R,NDIM>::dcT::const_iterator right_end = right.coeffs.end();
2025 it!=right_end;
2026 ++it) {
2027 const keyT& key = it->first;
2028 const typename FunctionImpl<L,NDIM>::nodeT& other_node = it->second;
2029 coeffs.send(key, &nodeT:: template gaxpy_inplace<T,R>, 1.0, other_node, beta);
2030 }
2031 if (fence)
2032 world.gop.fence();
2033 }
2034
2035 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2036 /// @param[in] op the unary operator for the coefficients
2037 template <typename opT>
2038 void unary_op_coeff_inplace(const opT& op, bool fence) {
2039 typename dcT::iterator end = coeffs.end();
2040 for (typename dcT::iterator it=coeffs.begin(); it!=end; ++it) {
2041 const keyT& parent = it->first;
2042 nodeT& node = it->second;
2043 if (node.has_coeff()) {
2044 // op(parent, node.coeff());
2045 TensorArgs full(-1.0,TT_FULL);
2046 change_tensor_type(node.coeff(),full);
2047 op(parent, node.coeff().full_tensor());
2049 // op(parent,node);
2050 }
2051 }
2052 if (fence)
2053 world.gop.fence();
2054 }
2055
2056 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2057 /// @param[in] op the unary operator for the coefficients
2058 template <typename opT>
2059 void unary_op_node_inplace(const opT& op, bool fence) {
2060 typename dcT::iterator end = coeffs.end();
2061 for (typename dcT::iterator it=coeffs.begin(); it!=end; ++it) {
2062 const keyT& parent = it->first;
2063 nodeT& node = it->second;
2064 op(parent, node);
2065 }
2066 if (fence)
2067 world.gop.fence();
2068 }
2069
2070 /// Integrate over one particle of a two particle function and get a one particle function
2071 /// bsp \int g(1,2) \delta(2-1) d2 = f(1)
2072 /// The overall dimension of g should be even
2073
2074 /// The operator
2075 template<std::size_t LDIM>
2076 void dirac_convolution_op(const keyT &key, const nodeT &node, FunctionImpl<T,LDIM>* f) const {
2077 // fast return if the node has children (not a leaf node)
2078 if(node.has_children()) return;
2079
2080 const implT* g=this;
2081
2082 // break the 6D key into two 3D keys (may also work for every even dimension)
2083 Key<LDIM> key1, key2;
2084 key.break_apart(key1,key2);
2085
2086 // get the coefficients of the 6D function g
2087 const coeffT& g_coeff = node.coeff();
2088
2089 // get the values of the 6D function g
2090 coeffT g_values = g->coeffs2values(key,g_coeff);
2091
2092 // Determine rank and k
2093 const long rank=g_values.rank();
2094 const long maxk=f->get_k();
2095 MADNESS_ASSERT(maxk==g_coeff.dim(0));
2096
2097 // get tensors for particle 1 and 2 (U and V in SVD)
2098 tensorT vec1=copy(g_values.get_svdtensor().ref_vector(0).reshape(rank,maxk,maxk,maxk));
2099 tensorT vec2=g_values.get_svdtensor().ref_vector(1).reshape(rank,maxk,maxk,maxk);
2100 tensorT result(maxk,maxk,maxk); // should give zero tensor
2101 // Multiply the values of each U and V vector
2102 for (long i=0; i<rank; ++i) {
2103 tensorT c1=vec1(Slice(i,i),_,_,_); // shallow copy (!)
2104 tensorT c2=vec2(Slice(i,i),_,_,_);
2105 c1.emul(c2); // this changes vec1 because of shallow copy, but not the g function because of the deep copy made above
2106 double singular_value_i = g_values.get_svdtensor().weights(i);
2107 result += (singular_value_i*c1);
2108 }
2109
2110 // accumulate coefficients (since only diagonal boxes are used the coefficients get just replaced, but accumulate is needed to create the right tree structure
2111 tensorT f_coeff = f->values2coeffs(key1,result);
2112 f->coeffs.task(key1, &FunctionNode<T,LDIM>::accumulate2, f_coeff, f->coeffs, key1, TaskAttributes::hipri());
2113// coeffs.task(dest, &nodeT::accumulate2, result, coeffs, dest, TaskAttributes::hipri());
2114
2115
2116 return;
2117 }
2118
2119
2120 template<std::size_t LDIM>
2122 typename dcT::const_iterator end = this->coeffs.end();
2123 for (typename dcT::const_iterator it=this->coeffs.begin(); it!=end; ++it) {
2124 // looping through all the leaf(!) coefficients in the NDIM function ("this")
2125 const keyT& key = it->first;
2126 const FunctionNode<T,NDIM>& node = it->second;
2127 if (node.is_leaf()) {
2128 // only process the diagonal boxes
2129 Key<LDIM> key1, key2;
2130 key.break_apart(key1,key2);
2131 if(key1 == key2){
2132 ProcessID p = coeffs.owner(key);
2133 woT::task(p, &implT:: template dirac_convolution_op<LDIM>, key, node, f);
2134 }
2135 }
2136 }
2137 world.gop.fence(); // fence is necessary if trickle down is used afterwards
2138 // trickle down and undo redundand shouldnt change anything if only the diagonal elements are considered above -> check this
2139 f->trickle_down(true); // fence must be true otherwise undo_redundant will have trouble
2140// f->undo_redundant(true);
2141 f->verify_tree();
2142 //if (fence) world.gop.fence(); // unnecessary, fence is activated in undo_redundant
2143
2144 }
2145
2146
2147 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2148 /// @param[in] op the unary operator for the coefficients
2149 template <typename opT>
2150 void flo_unary_op_node_inplace(const opT& op, bool fence) {
2152// typedef do_unary_op_value_inplace<opT> xopT;
2154 if (fence) world.gop.fence();
2155 }
2156
2157 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2158 /// @param[in] op the unary operator for the coefficients
2159 template <typename opT>
2160 void flo_unary_op_node_inplace(const opT& op, bool fence) const {
2162// typedef do_unary_op_value_inplace<opT> xopT;
2164 if (fence)
2165 world.gop.fence();
2166 }
2167
2168 /// truncate tree at a certain level
2169 /// @param[in] max_level truncate tree below this level
2170 void erase(const Level& max_level);
2171
2172 /// Returns some asymmetry measure ... no comms
2173 double check_symmetry_local() const;
2174
2175 /// given an NS tree resulting from a convolution, truncate leafs if appropriate
2178 const implT* f; // for calling its member functions
2179
2181
2182 bool operator()(typename rangeT::iterator& it) const {
2183
2184 const keyT& key = it->first;
2185 nodeT& node = it->second;
2186
2187 if (node.is_leaf() and node.coeff().has_data()) {
2188 coeffT d = copy(node.coeff());
2189 d(f->cdata.s0)=0.0;
2190 const double error=d.normf();
2191 const double tol=f->truncate_tol(f->get_thresh(),key);
2192 if (error<tol) node.coeff()=copy(node.coeff()(f->cdata.s0));
2193 }
2194 return true;
2195 }
2196 template <typename Archive> void serialize(const Archive& ar) {}
2197
2198 };
2199
2200 /// remove all coefficients of internal nodes
2203
2204 /// constructor need impl for cdata
2206
2207 bool operator()(typename rangeT::iterator& it) const {
2208
2209 nodeT& node = it->second;
2210 if (node.has_children()) node.clear_coeff();
2211 return true;
2212 }
2213 template <typename Archive> void serialize(const Archive& ar) {}
2214
2215 };
2216
2217 /// remove all coefficients of leaf nodes
2220
2221 /// constructor need impl for cdata
2223
2224 bool operator()(typename rangeT::iterator& it) const {
2225 nodeT& node = it->second;
2226 if (not node.has_children()) node.clear_coeff();
2227 return true;
2228 }
2229 template <typename Archive> void serialize(const Archive& ar) {}
2230
2231 };
2232
2233
2234 /// keep only the sum coefficients in each node
2238
2239 /// constructor need impl for cdata
2241
2242 bool operator()(typename rangeT::iterator& it) const {
2243
2244 nodeT& node = it->second;
2245 coeffT s=copy(node.coeff()(impl->cdata.s0));
2246 node.coeff()=s;
2247 return true;
2248 }
2249 template <typename Archive> void serialize(const Archive& ar) {}
2250
2251 };
2252
2253
2254 /// reduce the rank of the nodes, optional fence
2257
2258 // threshold for rank reduction / SVD truncation
2260
2261 // constructor takes target precision
2262 do_reduce_rank() = default;
2264 do_reduce_rank(const double& thresh) {
2266 }
2267
2268 //
2269 bool operator()(typename rangeT::iterator& it) const {
2270
2271 nodeT& node = it->second;
2272 node.reduceRank(args.thresh);
2273 return true;
2274 }
2275 template <typename Archive> void serialize(const Archive& ar) {}
2276 };
2277
2278
2279
2280 /// check symmetry wrt particle exchange
2283 const implT* f;
2286
2287 /// return the norm of the difference of this node and its "mirror" node
2288 double operator()(typename rangeT::iterator& it) const {
2289
2290 // Temporary fix to GCC whining about out of range access for NDIM!=6
2291 if constexpr(NDIM==6) {
2292 const keyT& key = it->first;
2293 const nodeT& fnode = it->second;
2294
2295 // skip internal nodes
2296 if (fnode.has_children()) return 0.0;
2297
2298 if (f->world.size()>1) return 0.0;
2299
2300 // exchange particles
2301 std::vector<long> map(NDIM);
2302 map[0]=3; map[1]=4; map[2]=5;
2303 map[3]=0; map[4]=1; map[5]=2;
2304
2305 // make mapped key
2307 for (std::size_t i=0; i<NDIM; ++i) l[map[i]] = key.translation()[i];
2308 const keyT mapkey(key.level(),l);
2309
2310 double norm=0.0;
2311
2312
2313 // hope it's local
2314 if (f->get_coeffs().probe(mapkey)) {
2315 MADNESS_ASSERT(f->get_coeffs().probe(mapkey));
2316 const nodeT& mapnode=f->get_coeffs().find(mapkey).get()->second;
2317
2318// bool have_c1=fnode.coeff().has_data() and fnode.coeff().config().has_data();
2319// bool have_c2=mapnode.coeff().has_data() and mapnode.coeff().config().has_data();
2320 bool have_c1=fnode.coeff().has_data();
2321 bool have_c2=mapnode.coeff().has_data();
2322
2323 if (have_c1 and have_c2) {
2324 tensorT c1=fnode.coeff().full_tensor_copy();
2325 tensorT c2=mapnode.coeff().full_tensor_copy();
2326 c2 = copy(c2.mapdim(map));
2327 norm=(c1-c2).normf();
2328 } else if (have_c1) {
2329 tensorT c1=fnode.coeff().full_tensor_copy();
2330 norm=c1.normf();
2331 } else if (have_c2) {
2332 tensorT c2=mapnode.coeff().full_tensor_copy();
2333 norm=c2.normf();
2334 } else {
2335 norm=0.0;
2336 }
2337 } else {
2338 norm=fnode.coeff().normf();
2339 }
2340 return norm*norm;
2341 }
2342 else {
2343 MADNESS_EXCEPTION("ONLY FOR DIM 6!", 1);
2344 }
2345 }
2346
2347 double operator()(double a, double b) const {
2348 return (a+b);
2349 }
2350
2351 template <typename Archive> void serialize(const Archive& ar) {
2352 MADNESS_EXCEPTION("no serialization of do_check_symmetry yet",1);
2353 }
2354
2355
2356 };
2357
2358 /// merge the coefficent boxes of this into result's tree
2359
2360 /// result+= alpha*this
2361 /// this and result don't have to have the same distribution or live in the same world
2362 /// no comm, and the tree should be in an consistent state by virtue
2363 template<typename Q, typename R>
2367 T alpha=T(1.0);
2371
2372 /// return the norm of the difference of this node and its "mirror" node
2373 bool operator()(typename rangeT::iterator& it) const {
2374
2375 const keyT& key = it->first;
2376 const nodeT& node = it->second;
2377 if (node.has_coeff()) result->get_coeffs().task(key, &nodeT::accumulate,
2378 alpha*node.coeff(), result->get_coeffs(), key, result->targs);
2379 return true;
2380 }
2381
2382 template <typename Archive> void serialize(const Archive& ar) {
2383 MADNESS_EXCEPTION("no serialization of do_accumulate_trees",1);
2384 }
2385 };
2386
2387
2388 /// merge the coefficient boxes of this into other's tree
2389
2390 /// no comm, and the tree should be in an consistent state by virtue
2391 /// of FunctionNode::gaxpy_inplace
2392 template<typename Q, typename R>
2401
2402 /// return the norm of the difference of this node and its "mirror" node
2403 bool operator()(typename rangeT::iterator& it) const {
2404
2405 const keyT& key = it->first;
2406 const nodeT& fnode = it->second;
2407
2408 // if other's node exists: add this' coeffs to it
2409 // otherwise insert this' node into other's tree
2410 typename dcT::accessor acc;
2411 if (other->get_coeffs().find(acc,key)) {
2412 nodeT& gnode=acc->second;
2413 gnode.gaxpy_inplace(beta,fnode,alpha);
2414 } else {
2415 nodeT gnode=fnode;
2416 gnode.scale(alpha);
2417 other->get_coeffs().replace(key,gnode);
2418 }
2419 return true;
2420 }
2421
2422 template <typename Archive> void serialize(const Archive& ar) {
2423 MADNESS_EXCEPTION("no serialization of do_merge_trees",1);
2424 }
2425 };
2426
2427
2428 /// map this on f
2429 struct do_mapdim {
2431
2432 std::vector<long> map;
2434
2435 do_mapdim() : f(0) {};
2436 do_mapdim(const std::vector<long> map, implT& f) : map(map), f(&f) {}
2437
2438 bool operator()(typename rangeT::iterator& it) const {
2439
2440 const keyT& key = it->first;
2441 const nodeT& node = it->second;
2442
2444 for (std::size_t i=0; i<NDIM; ++i) l[map[i]] = key.translation()[i];
2445 tensorT c = node.coeff().reconstruct_tensor();
2446 if (c.size()) c = copy(c.mapdim(map));
2448 f->get_coeffs().replace(keyT(key.level(),l), nodeT(cc,node.has_children()));
2449
2450 return true;
2451 }
2452 template <typename Archive> void serialize(const Archive& ar) {
2453 MADNESS_EXCEPTION("no serialization of do_mapdim",1);
2454 }
2455
2456 };
2457
2458 /// mirror dimensions of this, write result on f
2459 struct do_mirror {
2461
2462 std::vector<long> mirror;
2464
2465 do_mirror() : f(0) {};
2466 do_mirror(const std::vector<long> mirror, implT& f) : mirror(mirror), f(&f) {}
2467
2468 bool operator()(typename rangeT::iterator& it) const {
2469
2470 const keyT& key = it->first;
2471 const nodeT& node = it->second;
2472
2473 // mirror translation index: l_new + l_old = l_max
2475 Translation lmax = (Translation(1)<<key.level()) - 1;
2476 for (std::size_t i=0; i<NDIM; ++i) {
2477 if (mirror[i]==-1) l[i]= lmax - key.translation()[i];
2478 }
2479
2480 // mirror coefficients: multiply all odd-k slices with -1
2481 tensorT c = node.coeff().full_tensor_copy();
2482 if (c.size()) {
2483 std::vector<Slice> s(___);
2484
2485 // loop over dimensions and over k
2486 for (size_t i=0; i<NDIM; ++i) {
2487 std::size_t kmax=c.dim(i);
2488 if (mirror[i]==-1) {
2489 for (size_t k=1; k<kmax; k+=2) {
2490 s[i]=Slice(k,k,1);
2491 c(s)*=(-1.0);
2492 }
2493 s[i]=_;
2494 }
2495 }
2496 }
2498 f->get_coeffs().replace(keyT(key.level(),l), nodeT(cc,node.has_children()));
2499
2500 return true;
2501 }
2502 template <typename Archive> void serialize(const Archive& ar) {
2503 MADNESS_EXCEPTION("no serialization of do_mirror",1);
2504 }
2505
2506 };
2507
2508 /// mirror dimensions of this, write result on f
2511
2512 std::vector<long> map,mirror;
2514
2516 do_map_and_mirror(const std::vector<long> map, const std::vector<long> mirror, implT& f)
2517 : map(map), mirror(mirror), f(&f) {}
2518
2519 bool operator()(typename rangeT::iterator& it) const {
2520
2521 const keyT& key = it->first;
2522 const nodeT& node = it->second;
2523
2524 tensorT c = node.coeff().full_tensor_copy();
2526
2527 // do the mapping first (if present)
2528 if (map.size()>0) {
2530 for (std::size_t i=0; i<NDIM; ++i) l1[map[i]] = l[i];
2531 std::swap(l,l1);
2532 if (c.size()) c = copy(c.mapdim(map));
2533 }
2534
2535 if (mirror.size()>0) {
2536 // mirror translation index: l_new + l_old = l_max
2538 Translation lmax = (Translation(1)<<key.level()) - 1;
2539 for (std::size_t i=0; i<NDIM; ++i) {
2540 if (mirror[i]==-1) l1[i]= lmax - l[i];
2541 }
2542 std::swap(l,l1);
2543
2544 // mirror coefficients: multiply all odd-k slices with -1
2545 if (c.size()) {
2546 std::vector<Slice> s(___);
2547
2548 // loop over dimensions and over k
2549 for (size_t i=0; i<NDIM; ++i) {
2550 std::size_t kmax=c.dim(i);
2551 if (mirror[i]==-1) {
2552 for (size_t k=1; k<kmax; k+=2) {
2553 s[i]=Slice(k,k,1);
2554 c(s)*=(-1.0);
2555 }
2556 s[i]=_;
2557 }
2558 }
2559 }
2560 }
2561
2563 f->get_coeffs().replace(keyT(key.level(),l), nodeT(cc,node.has_children()));
2564 return true;
2565 }
2566 template <typename Archive> void serialize(const Archive& ar) {
2567 MADNESS_EXCEPTION("no serialization of do_mirror",1);
2568 }
2569
2570 };
2571
2572
2573
2574 /// "put" this on g
2575 struct do_average {
2577
2579
2580 do_average() : g(0) {}
2582
2583 /// iterator it points to this
2584 bool operator()(typename rangeT::iterator& it) const {
2585
2586 const keyT& key = it->first;
2587 const nodeT& fnode = it->second;
2588
2589 // fast return if rhs has no coeff here
2590 if (fnode.has_coeff()) {
2591
2592 // check if there is a node already existing
2593 typename dcT::accessor acc;
2594 if (g->get_coeffs().find(acc,key)) {
2595 nodeT& gnode=acc->second;
2596 if (gnode.has_coeff()) gnode.coeff()+=fnode.coeff();
2597 } else {
2598 g->get_coeffs().replace(key,fnode);
2599 }
2600 }
2601
2602 return true;
2603 }
2604 template <typename Archive> void serialize(const Archive& ar) {}
2605 };
2606
2607 /// change representation of nodes' coeffs to low rank, optional fence
2610
2611 // threshold for rank reduction / SVD truncation
2614
2615 // constructor takes target precision
2617 // do_change_tensor_type(const TensorArgs& targs) : targs(targs) {}
2619
2620 //
2621 bool operator()(typename rangeT::iterator& it) const {
2622
2623 double cpu0=cpu_time();
2624 nodeT& node = it->second;
2626 double cpu1=cpu_time();
2628
2629 return true;
2630
2631 }
2632 template <typename Archive> void serialize(const Archive& ar) {}
2633 };
2634
2637
2638 // threshold for rank reduction / SVD truncation
2640
2641 // constructor takes target precision
2644 bool operator()(typename rangeT::iterator& it) const {
2645 it->second.consolidate_buffer(targs);
2646 return true;
2647 }
2648 template <typename Archive> void serialize(const Archive& ar) {}
2649 };
2650
2651
2652
2653 template <typename opT>
2657 opT op;
2659 bool operator()(typename rangeT::iterator& it) const {
2660 const keyT& key = it->first;
2661 nodeT& node = it->second;
2662 if (node.has_coeff()) {
2663 const TensorArgs full_args(-1.0,TT_FULL);
2664 change_tensor_type(node.coeff(),full_args);
2665 tensorT& t= node.coeff().full_tensor();
2666 //double before = t.normf();
2667 tensorT values = impl->fcube_for_mul(key, key, t);
2668 op(key, values);
2669 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
2670 t = transform(values,impl->cdata.quad_phiw).scale(scale);
2671 node.coeff()=coeffT(t,impl->get_tensor_args());
2672 //double after = t.normf();
2673 //madness::print("XOP:", key, before, after);
2674 }
2675 return true;
2676 }
2677 template <typename Archive> void serialize(const Archive& ar) {}
2678 };
2679
2680 template <typename Q, typename R>
2681 /// @todo I don't know what this does other than a trasform
2682 void vtransform_doit(const std::shared_ptr< FunctionImpl<R,NDIM> >& right,
2683 const Tensor<Q>& c,
2684 const std::vector< std::shared_ptr< FunctionImpl<T,NDIM> > >& vleft,
2685 double tol) {
2686 // To reduce crunch on vectors being transformed each task
2687 // does them in a random order
2688 std::vector<unsigned int> ind(vleft.size());
2689 for (unsigned int i=0; i<vleft.size(); ++i) {
2690 ind[i] = i;
2691 }
2692 for (unsigned int i=0; i<vleft.size(); ++i) {
2693 unsigned int j = RandomValue<int>()%vleft.size();
2694 std::swap(ind[i],ind[j]);
2695 }
2696
2697 typename FunctionImpl<R,NDIM>::dcT::const_iterator end = right->coeffs.end();
2698 for (typename FunctionImpl<R,NDIM>::dcT::const_iterator it=right->coeffs.begin(); it != end; ++it) {
2699 if (it->second.has_coeff()) {
2700 const Key<NDIM>& key = it->first;
2701 const GenTensor<R>& r = it->second.coeff();
2702 double norm = r.normf();
2703 double keytol = truncate_tol(tol,key);
2704
2705 for (unsigned int j=0; j<vleft.size(); ++j) {
2706 unsigned int i = ind[j]; // Random permutation
2707 if (std::abs(norm*c(i)) > keytol) {
2708 implT* left = vleft[i].get();
2709 typename dcT::accessor acc;
2710 bool newnode = left->coeffs.insert(acc,key);
2711 if (newnode && key.level()>0) {
2712 Key<NDIM> parent = key.parent();
2713 if (left->coeffs.is_local(parent))
2714 left->coeffs.send(parent, &nodeT::set_has_children_recursive, left->coeffs, parent);
2715 else
2716 left->coeffs.task(parent, &nodeT::set_has_children_recursive, left->coeffs, parent);
2717
2718 }
2719 nodeT& node = acc->second;
2720 if (!node.has_coeff())
2721 node.set_coeff(coeffT(cdata.v2k,targs));
2722 coeffT& t = node.coeff();
2723 t.gaxpy(1.0, r, c(i));
2724 }
2725 }
2726 }
2727 }
2728 }
2729
2730 /// Refine multiple functions down to the same finest level
2731
2732 /// @param v the vector of functions we are refining.
2733 /// @param key the current node.
2734 /// @param c the vector of coefficients passed from above.
2735 void refine_to_common_level(const std::vector<FunctionImpl<T,NDIM>*>& v,
2736 const std::vector<tensorT>& c,
2737 const keyT key);
2738
2739 /// Inplace operate on many functions (impl's) with an operator within a certain box
2740 /// @param[in] key the key of the current function node (box)
2741 /// @param[in] op the operator
2742 /// @param[in] v the vector of function impl's on which to be operated
2743 template <typename opT>
2744 void multiop_values_doit(const keyT& key, const opT& op, const std::vector<implT*>& v) {
2745 std::vector<tensorT> c(v.size());
2746 for (unsigned int i=0; i<v.size(); i++) {
2747 if (v[i]) {
2748 coeffT cc = coeffs2values(key, v[i]->coeffs.find(key).get()->second.coeff());
2749 c[i]=cc.full_tensor();
2750 }
2751 }
2752 tensorT r = op(key, c);
2753 coeffs.replace(key, nodeT(coeffT(values2coeffs(key, r),targs),false));
2754 }
2755
2756 /// Inplace operate on many functions (impl's) with an operator within a certain box
2757 /// Assumes all functions have been refined down to the same level
2758 /// @param[in] op the operator
2759 /// @param[in] v the vector of function impl's on which to be operated
2760 template <typename opT>
2761 void multiop_values(const opT& op, const std::vector<implT*>& v) {
2762 // rough check on refinement level (ignore non-initialized functions
2763 for (std::size_t i=1; i<v.size(); ++i) {
2764 if (v[i] and v[i-1]) {
2765 MADNESS_ASSERT(v[i]->coeffs.size()==v[i-1]->coeffs.size());
2766 }
2767 }
2768 typename dcT::iterator end = v[0]->coeffs.end();
2769 for (typename dcT::iterator it=v[0]->coeffs.begin(); it!=end; ++it) {
2770 const keyT& key = it->first;
2771 if (it->second.has_coeff())
2772 world.taskq.add(*this, &implT:: template multiop_values_doit<opT>, key, op, v);
2773 else
2774 coeffs.replace(key, nodeT(coeffT(),true));
2775 }
2776 world.gop.fence();
2777 }
2778
2779 /// Inplace operate on many functions (impl's) with an operator within a certain box
2780
2781 /// @param[in] key the key of the current function node (box)
2782 /// @param[in] op the operator
2783 /// @param[in] vin the vector of function impl's on which to be operated
2784 /// @param[out] vout the resulting vector of function impl's
2785 template <typename opT>
2786 void multi_to_multi_op_values_doit(const keyT& key, const opT& op,
2787 const std::vector<implT*>& vin, std::vector<implT*>& vout) {
2788 std::vector<tensorT> c(vin.size());
2789 for (unsigned int i=0; i<vin.size(); i++) {
2790 if (vin[i]) {
2791 coeffT cc = coeffs2values(key, vin[i]->coeffs.find(key).get()->second.coeff());
2792 c[i]=cc.full_tensor();
2793 }
2794 }
2795 std::vector<tensorT> r = op(key, c);
2796 MADNESS_ASSERT(r.size()==vout.size());
2797 for (std::size_t i=0; i<vout.size(); ++i) {
2798 vout[i]->coeffs.replace(key, nodeT(coeffT(values2coeffs(key, r[i]),targs),false));
2799 }
2800 }
2801
2802 /// Inplace operate on many functions (impl's) with an operator within a certain box
2803
2804 /// Assumes all functions have been refined down to the same level
2805 /// @param[in] op the operator
2806 /// @param[in] vin the vector of function impl's on which to be operated
2807 /// @param[out] vout the resulting vector of function impl's
2808 template <typename opT>
2809 void multi_to_multi_op_values(const opT& op, const std::vector<implT*>& vin,
2810 std::vector<implT*>& vout, const bool fence=true) {
2811 // rough check on refinement level (ignore non-initialized functions
2812 for (std::size_t i=1; i<vin.size(); ++i) {
2813 if (vin[i] and vin[i-1]) {
2814 MADNESS_ASSERT(vin[i]->coeffs.size()==vin[i-1]->coeffs.size());
2815 }
2816 }
2817 typename dcT::iterator end = vin[0]->coeffs.end();
2818 for (typename dcT::iterator it=vin[0]->coeffs.begin(); it!=end; ++it) {
2819 const keyT& key = it->first;
2820 if (it->second.has_coeff())
2821 world.taskq.add(*this, &implT:: template multi_to_multi_op_values_doit<opT>,
2822 key, op, vin, vout);
2823 else {
2824 // fill result functions with empty box in this key
2825 for (implT* it2 : vout) {
2826 it2->coeffs.replace(key, nodeT(coeffT(),true));
2827 }
2828 }
2829 }
2830 if (fence) world.gop.fence();
2831 }
2832
2833 /// Transforms a vector of functions left[i] = sum[j] right[j]*c[j,i] using sparsity
2834 /// @param[in] vright vector of functions (impl's) on which to be transformed
2835 /// @param[in] c the tensor (matrix) transformer
2836 /// @param[in] vleft vector of of the *newly* transformed functions (impl's)
2837 template <typename Q, typename R>
2838 void vtransform(const std::vector< std::shared_ptr< FunctionImpl<R,NDIM> > >& vright,
2839 const Tensor<Q>& c,
2840 const std::vector< std::shared_ptr< FunctionImpl<T,NDIM> > >& vleft,
2841 double tol,
2842 bool fence) {
2843 for (unsigned int j=0; j<vright.size(); ++j) {
2844 world.taskq.add(*this, &implT:: template vtransform_doit<Q,R>, vright[j], copy(c(j,_)), vleft, tol);
2845 }
2846 if (fence)
2847 world.gop.fence();
2848 }
2849
2850 /// Unary operation applied inplace to the values with optional refinement and fence
2851 /// @param[in] op the unary operator for the values
2852 template <typename opT>
2853 void unary_op_value_inplace(const opT& op, bool fence) {
2855 typedef do_unary_op_value_inplace<opT> xopT;
2856 world.taskq.for_each<rangeT,xopT>(rangeT(coeffs.begin(), coeffs.end()), xopT(this,op));
2857 if (fence)
2858 world.gop.fence();
2859 }
2860
2861 // Multiplication assuming same distribution and recursive descent
2862 /// Both left and right functions are in the scaling function basis
2863 /// @param[in] key the key to the current function node (box)
2864 /// @param[in] left the function impl associated with the left function
2865 /// @param[in] lcin the scaling function coefficients associated with the
2866 /// current box in the left function
2867 /// @param[in] vrightin the vector of function impl's associated with
2868 /// the vector of right functions
2869 /// @param[in] vrcin the vector scaling function coefficients associated with the
2870 /// current box in the right functions
2871 /// @param[out] vresultin the vector of resulting functions (impl's)
2872 template <typename L, typename R>
2873 void mulXXveca(const keyT& key,
2874 const FunctionImpl<L,NDIM>* left, const Tensor<L>& lcin,
2875 const std::vector<const FunctionImpl<R,NDIM>*> vrightin,
2876 const std::vector< Tensor<R> >& vrcin,
2877 const std::vector<FunctionImpl<T,NDIM>*> vresultin,
2878 double tol) {
2879 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
2880 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator riterT;
2881
2882 double lnorm = 1e99;
2883 Tensor<L> lc = lcin;
2884 if (lc.size() == 0) {
2885 literT it = left->coeffs.find(key).get();
2886 MADNESS_ASSERT(it != left->coeffs.end());
2887 lnorm = it->second.get_norm_tree();
2888 if (it->second.has_coeff())
2889 lc = it->second.coeff().full_tensor_copy();
2890 }
2891
2892 // Loop thru RHS functions seeing if anything can be multiplied
2893 std::vector<FunctionImpl<T,NDIM>*> vresult;
2894 std::vector<const FunctionImpl<R,NDIM>*> vright;
2895 std::vector< Tensor<R> > vrc;
2896 vresult.reserve(vrightin.size());
2897 vright.reserve(vrightin.size());
2898 vrc.reserve(vrightin.size());
2899
2900 for (unsigned int i=0; i<vrightin.size(); ++i) {
2901 FunctionImpl<T,NDIM>* result = vresultin[i];
2902 const FunctionImpl<R,NDIM>* right = vrightin[i];
2903 Tensor<R> rc = vrcin[i];
2904 double rnorm;
2905 if (rc.size() == 0) {
2906 riterT it = right->coeffs.find(key).get();
2907 MADNESS_ASSERT(it != right->coeffs.end());
2908 rnorm = it->second.get_norm_tree();
2909 if (it->second.has_coeff())
2910 rc = it->second.coeff().full_tensor_copy();
2911 }
2912 else {
2913 rnorm = rc.normf();
2914 }
2915
2916 if (rc.size() && lc.size()) { // Yipee!
2917 result->task(world.rank(), &implT:: template do_mul<L,R>, key, lc, std::make_pair(key,rc));
2918 }
2919 else if (tol && lnorm*rnorm < truncate_tol(tol, key)) {
2920 result->coeffs.replace(key, nodeT(coeffT(cdata.vk,targs),false)); // Zero leaf
2921 }
2922 else { // Interior node
2923 result->coeffs.replace(key, nodeT(coeffT(),true));
2924 vresult.push_back(result);
2925 vright.push_back(right);
2926 vrc.push_back(rc);
2927 }
2928 }
2929
2930 if (vresult.size()) {
2931 Tensor<L> lss;
2932 if (lc.size()) {
2933 Tensor<L> ld(cdata.v2k);
2934 ld(cdata.s0) = lc(___);
2935 lss = left->unfilter(ld);
2936 }
2937
2938 std::vector< Tensor<R> > vrss(vresult.size());
2939 for (unsigned int i=0; i<vresult.size(); ++i) {
2940 if (vrc[i].size()) {
2941 Tensor<R> rd(cdata.v2k);
2942 rd(cdata.s0) = vrc[i](___);
2943 vrss[i] = vright[i]->unfilter(rd);
2944 }
2945 }
2946
2947 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
2948 const keyT& child = kit.key();
2949 Tensor<L> ll;
2950
2951 std::vector<Slice> cp = child_patch(child);
2952
2953 if (lc.size())
2954 ll = copy(lss(cp));
2955
2956 std::vector< Tensor<R> > vv(vresult.size());
2957 for (unsigned int i=0; i<vresult.size(); ++i) {
2958 if (vrc[i].size())
2959 vv[i] = copy(vrss[i](cp));
2960 }
2961
2962 woT::task(coeffs.owner(child), &implT:: template mulXXveca<L,R>, child, left, ll, vright, vv, vresult, tol);
2963 }
2964 }
2965 }
2966
2967 /// Multiplication using recursive descent and assuming same distribution
2968 /// Both left and right functions are in the scaling function basis
2969 /// @param[in] key the key to the current function node (box)
2970 /// @param[in] left the function impl associated with the left function
2971 /// @param[in] lcin the scaling function coefficients associated with the
2972 /// current box in the left function
2973 /// @param[in] right the function impl associated with the right function
2974 /// @param[in] rcin the scaling function coefficients associated with the
2975 /// current box in the right function
2976 template <typename L, typename R>
2977 void mulXXa(const keyT& key,
2978 const FunctionImpl<L,NDIM>* left, const Tensor<L>& lcin,
2979 const FunctionImpl<R,NDIM>* right,const Tensor<R>& rcin,
2980 double tol) {
2981 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
2982 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator riterT;
2983
2984 double lnorm=1e99, rnorm=1e99;
2985
2986 Tensor<L> lc = lcin;
2987 if (lc.size() == 0) {
2988 literT it = left->coeffs.find(key).get();
2989 MADNESS_ASSERT(it != left->coeffs.end());
2990 lnorm = it->second.get_norm_tree();
2991 if (it->second.has_coeff())
2992 lc = it->second.coeff().reconstruct_tensor();
2993 }
2994
2995 Tensor<R> rc = rcin;
2996 if (rc.size() == 0) {
2997 riterT it = right->coeffs.find(key).get();
2998 MADNESS_ASSERT(it != right->coeffs.end());
2999 rnorm = it->second.get_norm_tree();
3000 if (it->second.has_coeff())
3001 rc = it->second.coeff().reconstruct_tensor();
3002 }
3003
3004 // both nodes are leaf nodes: multiply and return
3005 if (rc.size() && lc.size()) { // Yipee!
3006 do_mul<L,R>(key, lc, std::make_pair(key,rc));
3007 return;
3008 }
3009
3010 if (tol) {
3011 if (lc.size())
3012 lnorm = lc.normf(); // Otherwise got from norm tree above
3013 if (rc.size())
3014 rnorm = rc.normf();
3015 if (lnorm*rnorm < truncate_tol(tol, key)) {
3016 coeffs.replace(key, nodeT(coeffT(cdata.vk,targs),false)); // Zero leaf node
3017 return;
3018 }
3019 }
3020
3021 // Recur down
3022 coeffs.replace(key, nodeT(coeffT(),true)); // Interior node
3023
3024 Tensor<L> lss;
3025 if (lc.size()) {
3026 Tensor<L> ld(cdata.v2k);
3027 ld(cdata.s0) = lc(___);
3028 lss = left->unfilter(ld);
3029 }
3030
3031 Tensor<R> rss;
3032 if (rc.size()) {
3033 Tensor<R> rd(cdata.v2k);
3034 rd(cdata.s0) = rc(___);
3035 rss = right->unfilter(rd);
3036 }
3037
3038 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3039 const keyT& child = kit.key();
3040 Tensor<L> ll;
3041 Tensor<R> rr;
3042 if (lc.size())
3043 ll = copy(lss(child_patch(child)));
3044 if (rc.size())
3045 rr = copy(rss(child_patch(child)));
3046
3047 woT::task(coeffs.owner(child), &implT:: template mulXXa<L,R>, child, left, ll, right, rr, tol);
3048 }
3049 }
3050
3051
3052 // Binary operation on values using recursive descent and assuming same distribution
3053 /// Both left and right functions are in the scaling function basis
3054 /// @param[in] key the key to the current function node (box)
3055 /// @param[in] left the function impl associated with the left function
3056 /// @param[in] lcin the scaling function coefficients associated with the
3057 /// current box in the left function
3058 /// @param[in] right the function impl associated with the right function
3059 /// @param[in] rcin the scaling function coefficients associated with the
3060 /// current box in the right function
3061 /// @param[in] op the binary operator
3062 template <typename L, typename R, typename opT>
3063 void binaryXXa(const keyT& key,
3064 const FunctionImpl<L,NDIM>* left, const Tensor<L>& lcin,
3065 const FunctionImpl<R,NDIM>* right,const Tensor<R>& rcin,
3066 const opT& op) {
3067 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
3068 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator riterT;
3069
3070 Tensor<L> lc = lcin;
3071 if (lc.size() == 0) {
3072 literT it = left->coeffs.find(key).get();
3073 MADNESS_ASSERT(it != left->coeffs.end());
3074 if (it->second.has_coeff())
3075 lc = it->second.coeff().reconstruct_tensor();
3076 }
3077
3078 Tensor<R> rc = rcin;
3079 if (rc.size() == 0) {
3080 riterT it = right->coeffs.find(key).get();
3081 MADNESS_ASSERT(it != right->coeffs.end());
3082 if (it->second.has_coeff())
3083 rc = it->second.coeff().reconstruct_tensor();
3084 }
3085
3086 if (rc.size() && lc.size()) { // Yipee!
3087 do_binary_op<L,R>(key, lc, std::make_pair(key,rc), op);
3088 return;
3089 }
3090
3091 // Recur down
3092 coeffs.replace(key, nodeT(coeffT(),true)); // Interior node
3093
3094 Tensor<L> lss;
3095 if (lc.size()) {
3096 Tensor<L> ld(cdata.v2k);
3097 ld(cdata.s0) = lc(___);
3098 lss = left->unfilter(ld);
3099 }
3100
3101 Tensor<R> rss;
3102 if (rc.size()) {
3103 Tensor<R> rd(cdata.v2k);
3104 rd(cdata.s0) = rc(___);
3105 rss = right->unfilter(rd);
3106 }
3107
3108 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3109 const keyT& child = kit.key();
3110 Tensor<L> ll;
3111 Tensor<R> rr;
3112 if (lc.size())
3113 ll = copy(lss(child_patch(child)));
3114 if (rc.size())
3115 rr = copy(rss(child_patch(child)));
3116
3117 woT::task(coeffs.owner(child), &implT:: template binaryXXa<L,R,opT>, child, left, ll, right, rr, op);
3118 }
3119 }
3120
3121 template <typename Q, typename opT>
3123 typedef typename opT::resultT resultT;
3125 opT op;
3126
3131
3132 Tensor<resultT> operator()(const Key<NDIM>& key, const Tensor<Q>& t) const {
3133 Tensor<Q> invalues = impl_func->coeffs2values(key, t);
3134
3135 Tensor<resultT> outvalues = op(key, invalues);
3136
3137 return impl_func->values2coeffs(key, outvalues);
3138 }
3139
3140 template <typename Archive>
3141 void serialize(Archive& ar) {
3142 ar & impl_func & op;
3143 }
3144 };
3145
3146 /// Out of place unary operation on function impl
3147 /// The skeleton algorithm should resemble something like
3148 ///
3149 /// *this = op(*func)
3150 ///
3151 /// @param[in] key the key of the current function node (box)
3152 /// @param[in] func the function impl on which to be operated
3153 /// @param[in] op the unary operator
3154 template <typename Q, typename opT>
3155 void unaryXXa(const keyT& key,
3156 const FunctionImpl<Q,NDIM>* func, const opT& op) {
3157
3158 // const Tensor<Q>& fc = func->coeffs.find(key).get()->second.full_tensor_copy();
3159 const Tensor<Q> fc = func->coeffs.find(key).get()->second.coeff().reconstruct_tensor();
3160
3161 if (fc.size() == 0) {
3162 // Recur down
3163 coeffs.replace(key, nodeT(coeffT(),true)); // Interior node
3164 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3165 const keyT& child = kit.key();
3166 woT::task(coeffs.owner(child), &implT:: template unaryXXa<Q,opT>, child, func, op);
3167 }
3168 }
3169 else {
3170 tensorT t=op(key,fc);
3171 coeffs.replace(key, nodeT(coeffT(t,targs),false)); // Leaf node
3172 }
3173 }
3174
3175 /// Multiplies two functions (impl's) together. Delegates to the mulXXa() method
3176 /// @param[in] left pointer to the left function impl
3177 /// @param[in] right pointer to the right function impl
3178 /// @param[in] tol numerical tolerance
3179 template <typename L, typename R>
3180 void mulXX(const FunctionImpl<L,NDIM>* left, const FunctionImpl<R,NDIM>* right, double tol, bool fence) {
3181 if (world.rank() == coeffs.owner(cdata.key0))
3182 mulXXa(cdata.key0, left, Tensor<L>(), right, Tensor<R>(), tol);
3183 if (fence)
3184 world.gop.fence();
3185
3186 //verify_tree();
3187 }
3188
3189 /// Performs binary operation on two functions (impl's). Delegates to the binaryXXa() method
3190 /// @param[in] left pointer to the left function impl
3191 /// @param[in] right pointer to the right function impl
3192 /// @param[in] op the binary operator
3193 template <typename L, typename R, typename opT>
3195 const opT& op, bool fence) {
3196 if (world.rank() == coeffs.owner(cdata.key0))
3197 binaryXXa(cdata.key0, left, Tensor<L>(), right, Tensor<R>(), op);
3198 if (fence)
3199 world.gop.fence();
3200
3201 //verify_tree();
3202 }
3203
3204 /// Performs unary operation on function impl. Delegates to the unaryXXa() method
3205 /// @param[in] func function impl of the operand
3206 /// @param[in] op the unary operator
3207 template <typename Q, typename opT>
3208 void unaryXX(const FunctionImpl<Q,NDIM>* func, const opT& op, bool fence) {
3209 if (world.rank() == coeffs.owner(cdata.key0))
3210 unaryXXa(cdata.key0, func, op);
3211 if (fence)
3212 world.gop.fence();
3213
3214 //verify_tree();
3215 }
3216
3217 /// Performs unary operation on function impl. Delegates to the unaryXXa() method
3218 /// @param[in] func function impl of the operand
3219 /// @param[in] op the unary operator
3220 template <typename Q, typename opT>
3221 void unaryXXvalues(const FunctionImpl<Q,NDIM>* func, const opT& op, bool fence) {
3222 if (world.rank() == coeffs.owner(cdata.key0))
3224 if (fence)
3225 world.gop.fence();
3226
3227 //verify_tree();
3228 }
3229
3230 /// Multiplies a function (impl) with a vector of functions (impl's). Delegates to the
3231 /// mulXXveca() method.
3232 /// @param[in] left pointer to the left function impl
3233 /// @param[in] vright vector of pointers to the right function impl's
3234 /// @param[in] tol numerical tolerance
3235 /// @param[out] vresult vector of pointers to the resulting function impl's
3236 template <typename L, typename R>
3238 const std::vector<const FunctionImpl<R,NDIM>*>& vright,
3239 const std::vector<FunctionImpl<T,NDIM>*>& vresult,
3240 double tol,
3241 bool fence) {
3242 std::vector< Tensor<R> > vr(vright.size());
3243 if (world.rank() == coeffs.owner(cdata.key0))
3244 mulXXveca(cdata.key0, left, Tensor<L>(), vright, vr, vresult, tol);
3245 if (fence)
3246 world.gop.fence();
3247 }
3248
3250
3251 mutable long box_leaf[1000];
3252 mutable long box_interior[1000];
3253
3254 // horrifically non-scalable
3255 void put_in_box(ProcessID from, long nl, long ni) const;
3256
3257 /// Prints summary of data distribution
3258 void print_info() const;
3259
3260 /// Verify tree is properly constructed ... global synchronization involved
3261
3262 /// If an inconsistency is detected, prints a message describing the error and
3263 /// then throws a madness exception.
3264 ///
3265 /// This is a reasonably quick and scalable operation that is
3266 /// useful for debugging and paranoia.
3267 void verify_tree() const;
3268
3269 /// check that parents and children are consistent
3270
3271 /// will not check proper size of coefficients
3272 /// global communication
3273 bool verify_parents_and_children() const;
3274
3275 /// check that the tree state and the coeffs are consistent
3276
3277 /// will not check existence of children and/or parents
3278 /// no communication
3279 bool verify_tree_state_local() const;
3280
3281 /// Walk up the tree returning pair(key,node) for first node with coefficients
3282
3283 /// Three possibilities.
3284 ///
3285 /// 1) The coeffs are present and returned with the key of the containing node.
3286 ///
3287 /// 2) The coeffs are further up the tree ... the request is forwarded up.
3288 ///
3289 /// 3) The coeffs are futher down the tree ... an empty tensor is returned.
3290 ///
3291 /// !! This routine is crying out for an optimization to
3292 /// manage the number of messages being sent ... presently
3293 /// each parent is fetched 2^(n*d) times where n is the no. of
3294 /// levels between the level of evaluation and the parent.
3295 /// Alternatively, reimplement multiply as a downward tree
3296 /// walk and just pass the parent down. Slightly less
3297 /// parallelism but much less communication.
3298 /// @todo Robert .... help!
3299 void sock_it_to_me(const keyT& key,
3300 const RemoteReference< FutureImpl< std::pair<keyT,coeffT> > >& ref) const;
3301 /// As above, except
3302 /// 3) The coeffs are constructed from the avg of nodes further down the tree
3303 /// @todo Robert .... help!
3304 void sock_it_to_me_too(const keyT& key,
3305 const RemoteReference< FutureImpl< std::pair<keyT,coeffT> > >& ref) const;
3306
3307 /// @todo help!
3309 const keyT& key,
3310 const coordT& plotlo, const coordT& plothi, const std::vector<long>& npt,
3311 bool eval_refine) const;
3312
3313
3314 /// Evaluate a cube/slice of points ... plotlo and plothi are already in simulation coordinates
3315 /// No communications
3316 /// @param[in] plotlo the coordinate of the starting point
3317 /// @param[in] plothi the coordinate of the ending point
3318 /// @param[in] npt the number of points in each dimension
3319 Tensor<T> eval_plot_cube(const coordT& plotlo,
3320 const coordT& plothi,
3321 const std::vector<long>& npt,
3322 const bool eval_refine = false) const;
3323
3324
3325 /// Evaluate function only if point is local returning (true,value); otherwise return (false,0.0)
3326
3327 /// maxlevel is the maximum depth to search down to --- the max local depth can be
3328 /// computed with max_local_depth();
3329 std::pair<bool,T> eval_local_only(const Vector<double,NDIM>& xin, Level maxlevel) ;
3330
3331
3332 /// Evaluate the function at a point in \em simulation coordinates
3333
3334 /// Only the invoking process will get the result via the
3335 /// remote reference to a future. Active messages may be sent
3336 /// to other nodes.
3337 void eval(const Vector<double,NDIM>& xin,
3338 const keyT& keyin,
3339 const typename Future<T>::remote_refT& ref);
3340
3341 /// Get the depth of the tree at a point in \em simulation coordinates
3342
3343 /// Only the invoking process will get the result via the
3344 /// remote reference to a future. Active messages may be sent
3345 /// to other nodes.
3346 ///
3347 /// This function is a minimally-modified version of eval()
3348 void evaldepthpt(const Vector<double,NDIM>& xin,
3349 const keyT& keyin,
3350 const typename Future<Level>::remote_refT& ref);
3351
3352 /// Get the rank of leaf box of the tree at a point in \em simulation coordinates
3353
3354 /// Only the invoking process will get the result via the
3355 /// remote reference to a future. Active messages may be sent
3356 /// to other nodes.
3357 ///
3358 /// This function is a minimally-modified version of eval()
3359 void evalR(const Vector<double,NDIM>& xin,
3360 const keyT& keyin,
3361 const typename Future<long>::remote_refT& ref);
3362
3363
3364 /// Computes norm of low/high-order polyn. coeffs for autorefinement test
3365
3366 /// t is a k^d tensor. In order to screen the autorefinement
3367 /// during multiplication compute the norms of
3368 /// ... lo ... the block of t for all polynomials of order < k/2
3369 /// ... hi ... the block of t for all polynomials of order >= k/2
3370 ///
3371 /// k=5 0,1,2,3,4 --> 0,1,2 ... 3,4
3372 /// k=6 0,1,2,3,4,5 --> 0,1,2 ... 3,4,5
3373 ///
3374 /// k=number of wavelets, so k=5 means max order is 4, so max exactly
3375 /// representable squarable polynomial is of order 2.
3376 void static tnorm(const tensorT& t, double* lo, double* hi);
3377
3378 void static tnorm(const GenTensor<T>& t, double* lo, double* hi);
3379
3380 void static tnorm(const SVDTensor<T>& t, double* lo, double* hi, const int particle);
3381
3382 // This invoked if node has not been autorefined
3383 void do_square_inplace(const keyT& key);
3384
3385 // This invoked if node has been autorefined
3386 void do_square_inplace2(const keyT& parent, const keyT& child, const tensorT& parent_coeff);
3387
3388 /// Always returns false (for when autorefine is not wanted)
3389 bool noautorefine(const keyT& key, const tensorT& t) const;
3390
3391 /// Returns true if this block of coeffs needs autorefining
3392 bool autorefine_square_test(const keyT& key, const nodeT& t) const;
3393
3394 /// Pointwise squaring of function with optional global fence
3395
3396 /// If not autorefining, local computation only if not fencing.
3397 /// If autorefining, may result in asynchronous communication.
3398 void square_inplace(bool fence);
3399 void abs_inplace(bool fence);
3400 void abs_square_inplace(bool fence);
3401
3402 /// is this the same as trickle_down() ?
3403 void sum_down_spawn(const keyT& key, const coeffT& s);
3404
3405 /// After 1d push operator must sum coeffs down the tree to restore correct scaling function coefficients
3406 void sum_down(bool fence);
3407
3408 /// perform this multiplication: h(1,2) = f(1,2) * g(1)
3409 template<size_t LDIM>
3411
3412 static bool randomize() {return false;}
3416
3417 implT* h; ///< the result function h(1,2) = f(1,2) * g(1)
3420 int particle; ///< if g is g(1) or g(2)
3421
3422 multiply_op() : h(), f(), g(), particle(1) {}
3423
3424 multiply_op(implT* h1, const ctT& f1, const ctL& g1, const int particle1)
3425 : h(h1), f(f1), g(g1), particle(particle1) {};
3426
3427 /// return true if this will be a leaf node
3428
3429 /// use generalization of tnorm for a GenTensor
3430 bool screen(const coeffT& fcoeff, const coeffT& gcoeff, const keyT& key) const {
3432 MADNESS_ASSERT(fcoeff.is_svd_tensor());
3435
3436 double glo=0.0, ghi=0.0, flo=0.0, fhi=0.0;
3437 g.get_impl()->tnorm(gcoeff.get_tensor(), &glo, &ghi);
3438 g.get_impl()->tnorm(fcoeff.get_svdtensor(),&flo,&fhi,particle);
3439
3440 double total_hi=glo*fhi + ghi*flo + fhi*ghi;
3441 return (total_hi<h->truncate_tol(h->get_thresh(),key));
3442
3443 }
3444
3445 /// apply this on a FunctionNode of f and g of Key key
3446
3447 /// @param[in] key key for FunctionNode in f and g, (g: broken into particles)
3448 /// @return <this node is a leaf, coefficients of this node>
3449 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
3450
3451 // bool is_leaf=(not fdatum.second.has_children());
3452 // if (not is_leaf) return std::pair<bool,coeffT> (is_leaf,coeffT());
3453
3454 // break key into particles (these are the child keys, with f/gdatum come the parent keys)
3455 Key<LDIM> key1,key2;
3456 key.break_apart(key1,key2);
3457 const Key<LDIM> gkey= (particle==1) ? key1 : key2;
3458
3459 // get coefficients of the actual FunctionNode
3460 coeffT coeff1=f.get_impl()->parent_to_child(f.coeff(),f.key(),key);
3461 coeff1.normalize();
3462 const coeffT coeff2=g.get_impl()->parent_to_child(g.coeff(),g.key(),gkey);
3463
3464 // multiplication is done in TT_2D
3465 coeffT coeff1_2D=coeff1.convert(TensorArgs(h->get_thresh(),TT_2D));
3466 coeff1_2D.normalize();
3467
3468 bool is_leaf=screen(coeff1_2D,coeff2,key);
3469 if (key.level()<2) is_leaf=false;
3470
3471 coeffT hcoeff;
3472 if (is_leaf) {
3473
3474 // convert coefficients to values
3475 coeffT hvalues=f.get_impl()->coeffs2values(key,coeff1_2D);
3476 coeffT gvalues=g.get_impl()->coeffs2values(gkey,coeff2);
3477
3478 // perform multiplication
3479 coeffT result_val=h->multiply(hvalues,gvalues,particle-1);
3480
3481 hcoeff=h->values2coeffs(key,result_val);
3482
3483 // conversion on coeffs, not on values, because it implies truncation!
3484 if (not hcoeff.is_of_tensortype(h->get_tensor_type()))
3485 hcoeff=hcoeff.convert(h->get_tensor_args());
3486 }
3487
3488 return std::pair<bool,coeffT> (is_leaf,hcoeff);
3489 }
3490
3491 this_type make_child(const keyT& child) const {
3492
3493 // break key into particles
3494 Key<LDIM> key1, key2;
3495 child.break_apart(key1,key2);
3496 const Key<LDIM> gkey= (particle==1) ? key1 : key2;
3497
3498 return this_type(h,f.make_child(child),g.make_child(gkey),particle);
3499 }
3500
3502 Future<ctT> f1=f.activate();
3504 return h->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
3505 &this_type::forward_ctor),h,f1,g1,particle);
3506 }
3507
3508 this_type forward_ctor(implT* h1, const ctT& f1, const ctL& g1, const int particle) {
3509 return this_type(h1,f1,g1,particle);
3510 }
3511
3512 template <typename Archive> void serialize(const Archive& ar) {
3513 ar & h & f & g & particle;
3514 }
3515 };
3516
3517
3518 /// add two functions f and g: result=alpha * f + beta * g
3519 struct add_op {
3520
3523
3524 bool randomize() const {return false;}
3525
3526 /// tracking coeffs of first and second addend
3528 /// prefactor for f, g
3529 double alpha, beta;
3530
3531 add_op() = default;
3532 add_op(const ctT& f, const ctT& g, const double alpha, const double beta)
3533 : f(f), g(g), alpha(alpha), beta(beta){}
3534
3535 /// if we are at the bottom of the trees, return the sum of the coeffs
3536 std::pair<bool,coeffT> operator()(const keyT& key) const {
3537
3538 bool is_leaf=(f.is_leaf() and g.is_leaf());
3539 if (not is_leaf) return std::pair<bool,coeffT> (is_leaf,coeffT());
3540
3541 coeffT fcoeff=f.get_impl()->parent_to_child(f.coeff(),f.key(),key);
3542 coeffT gcoeff=g.get_impl()->parent_to_child(g.coeff(),g.key(),key);
3543 coeffT hcoeff=copy(fcoeff);
3544 hcoeff.gaxpy(alpha,gcoeff,beta);
3545 hcoeff.reduce_rank(f.get_impl()->get_tensor_args().thresh);
3546 return std::pair<bool,coeffT> (is_leaf,hcoeff);
3547 }
3548
3549 this_type make_child(const keyT& child) const {
3550 return this_type(f.make_child(child),g.make_child(child),alpha,beta);
3551 }
3552
3553 /// retrieve the coefficients (parent coeffs might be remote)
3555 Future<ctT> f1=f.activate();
3556 Future<ctT> g1=g.activate();
3557 return f.get_impl()->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
3559 }
3560
3561 /// taskq-compatible ctor
3562 this_type forward_ctor(const ctT& f1, const ctT& g1, const double alpha, const double beta) {
3563 return this_type(f1,g1,alpha,beta);
3564 }
3565
3566 template <typename Archive> void serialize(const Archive& ar) {
3567 ar & f & g & alpha & beta;
3568 }
3569
3570 };
3571
3572 /// multiply f (a pair function of NDIM) with an orbital g (LDIM=NDIM/2)
3573
3574 /// as in (with h(1,2)=*this) : h(1,2) = g(1) * f(1,2)
3575 /// use tnorm as a measure to determine if f (=*this) must be refined
3576 /// @param[in] f the NDIM function f=f(1,2)
3577 /// @param[in] g the LDIM function g(1) (or g(2))
3578 /// @param[in] particle 1 or 2, as in g(1) or g(2)
3579 template<size_t LDIM>
3580 void multiply(const implT* f, const FunctionImpl<T,LDIM>* g, const int particle) {
3581
3584
3585 typedef multiply_op<LDIM> coeff_opT;
3586 coeff_opT coeff_op(this,ff,gg,particle);
3587
3588 typedef insert_op<T,NDIM> apply_opT;
3589 apply_opT apply_op(this);
3590
3591 keyT key0=f->cdata.key0;
3592 if (world.rank() == coeffs.owner(key0)) {
3594 woT::task(p, &implT:: template forward_traverse<coeff_opT,apply_opT>, coeff_op, apply_op, key0);
3595 }
3596
3598 }
3599
3600 /// Hartree product of two LDIM functions to yield a NDIM = 2*LDIM function
3601 template<size_t LDIM, typename leaf_opT>
3602 struct hartree_op {
3603 bool randomize() const {return false;}
3604
3607
3608 implT* result; ///< where to construct the pair function
3609 ctL p1, p2; ///< tracking coeffs of the two lo-dim functions
3610 leaf_opT leaf_op; ///< determine if a given node will be a leaf node
3611
3612 // ctor
3614 hartree_op(implT* result, const ctL& p11, const ctL& p22, const leaf_opT& leaf_op)
3615 : result(result), p1(p11), p2(p22), leaf_op(leaf_op) {
3616 MADNESS_ASSERT(LDIM+LDIM==NDIM);
3617 }
3618
3619 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
3620
3621 // break key into particles (these are the child keys, with datum1/2 come the parent keys)
3622 Key<LDIM> key1,key2;
3623 key.break_apart(key1,key2);
3624
3625 // this returns the appropriate NS coeffs for key1 and key2 resp.
3626 const coeffT fcoeff=p1.coeff(key1);
3627 const coeffT gcoeff=p2.coeff(key2);
3628 bool is_leaf=leaf_op(key,fcoeff.full_tensor(),gcoeff.full_tensor());
3629 if (not is_leaf) return std::pair<bool,coeffT> (is_leaf,coeffT());
3630
3631 // extract the sum coeffs from the NS coeffs
3632 const coeffT s1=fcoeff(p1.get_impl()->cdata.s0);
3633 const coeffT s2=gcoeff(p2.get_impl()->cdata.s0);
3634
3635 // new coeffs are simply the hartree/kronecker/outer product --
3636 coeffT coeff=outer(s1,s2,result->get_tensor_args());
3637 // no post-determination
3638 // is_leaf=leaf_op(key,coeff);
3639 return std::pair<bool,coeffT>(is_leaf,coeff);
3640 }
3641
3642 this_type make_child(const keyT& child) const {
3643
3644 // break key into particles
3645 Key<LDIM> key1, key2;
3646 child.break_apart(key1,key2);
3647
3648 return this_type(result,p1.make_child(key1),p2.make_child(key2),leaf_op);
3649 }
3650
3652 Future<ctL> p11=p1.activate();
3653 Future<ctL> p22=p2.activate();
3654 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
3655 &this_type::forward_ctor),result,p11,p22,leaf_op);
3656 }
3657
3658 this_type forward_ctor(implT* result1, const ctL& p11, const ctL& p22, const leaf_opT& leaf_op) {
3659 return this_type(result1,p11,p22,leaf_op);
3660 }
3661
3662 template <typename Archive> void serialize(const Archive& ar) {
3663 ar & result & p1 & p2 & leaf_op;
3664 }
3665 };
3666
3667 /// traverse a non-existing tree
3668
3669 /// part II: activate coeff_op, i.e. retrieve all the necessary remote boxes (communication)
3670 /// @param[in] coeff_op operator making the coefficients that needs activation
3671 /// @param[in] apply_op just passing thru
3672 /// @param[in] key the key we are working on
3673 template<typename coeff_opT, typename apply_opT>
3674 void forward_traverse(const coeff_opT& coeff_op, const apply_opT& apply_op, const keyT& key) const {
3676 Future<coeff_opT> active_coeff=coeff_op.activate();
3677 woT::task(world.rank(), &implT:: template traverse_tree<coeff_opT,apply_opT>, active_coeff, apply_op, key);
3678 }
3679
3680
3681 /// traverse a non-existing tree
3682
3683 /// part I: make the coefficients, process them and continue the recursion if necessary
3684 /// @param[in] coeff_op operator making the coefficients and determining them being leaves
3685 /// @param[in] apply_op operator processing the coefficients
3686 /// @param[in] key the key we are currently working on
3687 template<typename coeff_opT, typename apply_opT>
3688 void traverse_tree(const coeff_opT& coeff_op, const apply_opT& apply_op, const keyT& key) const {
3690
3691 typedef typename std::pair<bool,coeffT> argT;
3692 const argT arg=coeff_op(key);
3693 apply_op.operator()(key,arg.second,arg.first);
3694
3695 const bool has_children=(not arg.first);
3696 if (has_children) {
3697 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3698 const keyT& child=kit.key();
3699 coeff_opT child_op=coeff_op.make_child(child);
3700 // spawn activation where child is local
3701 ProcessID p=coeffs.owner(child);
3702
3703 void (implT::*ft)(const coeff_opT&, const apply_opT&, const keyT&) const = &implT::forward_traverse<coeff_opT,apply_opT>;
3704
3705 woT::task(p, ft, child_op, apply_op, child);
3706 }
3707 }
3708 }
3709
3710
3711 /// given two functions of LDIM, perform the Hartree/Kronecker/outer product
3712
3713 /// |Phi(1,2)> = |phi(1)> x |phi(2)>
3714 /// @param[in] p1 FunctionImpl of particle 1
3715 /// @param[in] p2 FunctionImpl of particle 2
3716 /// @param[in] leaf_op operator determining of a given box will be a leaf
3717 template<std::size_t LDIM, typename leaf_opT>
3718 void hartree_product(const std::vector<std::shared_ptr<FunctionImpl<T,LDIM>>> p1,
3719 const std::vector<std::shared_ptr<FunctionImpl<T,LDIM>>> p2,
3720 const leaf_opT& leaf_op, bool fence) {
3721 MADNESS_CHECK_THROW(p1.size()==p2.size(),"hartree_product: p1 and p2 must have the same size");
3722 for (auto& p : p1) MADNESS_CHECK(p->is_nonstandard() or p->is_nonstandard_with_leaves());
3723 for (auto& p : p2) MADNESS_CHECK(p->is_nonstandard() or p->is_nonstandard_with_leaves());
3724
3725 const keyT key0=cdata.key0;
3726
3727 for (std::size_t i=0; i<p1.size(); ++i) {
3728 if (world.rank() == this->get_coeffs().owner(key0)) {
3729
3730 // prepare the CoeffTracker
3731 CoeffTracker<T,LDIM> iap1(p1[i].get());
3732 CoeffTracker<T,LDIM> iap2(p2[i].get());
3733
3734 // the operator making the coefficients
3735 typedef hartree_op<LDIM,leaf_opT> coeff_opT;
3736 coeff_opT coeff_op(this,iap1,iap2,leaf_op);
3737
3738 // this operator simply inserts the coeffs into this' tree
3739// typedef insert_op<T,NDIM> apply_opT;
3740 typedef accumulate_op<T,NDIM> apply_opT;
3741 apply_opT apply_op(this);
3742
3743 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
3744 coeff_op, apply_op, cdata.key0);
3745
3746 }
3747 }
3748
3750 if (fence) world.gop.fence();
3751 }
3752
3753
3754 template <typename opT, typename R>
3755 void
3757 const opT* op = pop.ptr;
3758 const Level n = key.level();
3759 const double cnorm = c.normf();
3760 const double tol = truncate_tol(thresh, key)*0.1; // ??? why this value????
3761
3763 const Translation lold = lnew[axis];
3764 const Translation maxs = Translation(1)<<n;
3765
3766 int nsmall = 0; // Counts neglected blocks to terminate s loop
3767 for (Translation s=0; s<maxs; ++s) {
3768 int maxdir = s ? 1 : -1;
3769 for (int direction=-1; direction<=maxdir; direction+=2) {
3770 lnew[axis] = lold + direction*s;
3771 if (lnew[axis] >= 0 && lnew[axis] < maxs) { // NON-ZERO BOUNDARY CONDITIONS IGNORED HERE !!!!!!!!!!!!!!!!!!!!
3772 const Tensor<typename opT::opT>& r = op->rnlij(n, s*direction, true);
3773 double Rnorm = r.normf();
3774
3775 if (Rnorm == 0.0) {
3776 return; // Hard zero means finished!
3777 }
3778
3779 if (s <= 1 || r.normf()*cnorm > tol) { // Always do kernel and neighbor
3780 nsmall = 0;
3781 tensorT result = transform_dir(c,r,axis);
3782
3783 if (result.normf() > tol*0.3) {
3784 Key<NDIM> dest(n,lnew);
3785 coeffs.task(dest, &nodeT::accumulate2, result, coeffs, dest, TaskAttributes::hipri());
3786 }
3787 }
3788 else {
3789 ++nsmall;
3790 }
3791 }
3792 else {
3793 ++nsmall;
3794 }
3795 }
3796 if (nsmall >= 4) {
3797 // If have two negligble blocks in
3798 // succession in each direction interpret
3799 // this as the operator being zero beyond
3800 break;
3801 }
3802 }
3803 }
3804
3805 template <typename opT, typename R>
3806 void
3807 apply_1d_realspace_push(const opT& op, const FunctionImpl<R,NDIM>* f, int axis, bool fence) {
3808 MADNESS_ASSERT(!f->is_compressed());
3809
3810 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator fiterT;
3811 typedef FunctionNode<R,NDIM> fnodeT;
3812 fiterT end = f->coeffs.end();
3813 ProcessID me = world.rank();
3814 for (fiterT it=f->coeffs.begin(); it!=end; ++it) {
3815 const fnodeT& node = it->second;
3816 if (node.has_coeff()) {
3817 const keyT& key = it->first;
3818 const Tensor<R>& c = node.coeff().full_tensor_copy();
3819 woT::task(me, &implT:: template apply_1d_realspace_push_op<opT,R>,
3821 }
3822 }
3823 if (fence) world.gop.fence();
3824 }
3825
3827 const implT* f,
3828 const keyT& key,
3829 const std::pair<keyT,coeffT>& left,
3830 const std::pair<keyT,coeffT>& center,
3831 const std::pair<keyT,coeffT>& right);
3832
3833 void do_diff1(const DerivativeBase<T,NDIM>* D,
3834 const implT* f,
3835 const keyT& key,
3836 const std::pair<keyT,coeffT>& left,
3837 const std::pair<keyT,coeffT>& center,
3838 const std::pair<keyT,coeffT>& right);
3839
3840 // Called by result function to differentiate f
3841 void diff(const DerivativeBase<T,NDIM>* D, const implT* f, bool fence);
3842
3843 /// Returns key of general neighbor enforcing BC
3844
3845 /// Out of volume keys are mapped to enforce the BC as follows.
3846 /// * Periodic BC map back into the volume and return the correct key
3847 /// * non-periodic BC - returns invalid() to indicate out of volume
3848 keyT neighbor(const keyT& key, const keyT& disp, const array_of_bools<NDIM>& is_periodic) const;
3849
3850 /// Returns key of general neighbor that resides in-volume
3851
3852 /// Out of volume keys are mapped to invalid()
3853 keyT neighbor_in_volume(const keyT& key, const keyT& disp) const;
3854
3855 /// find_me. Called by diff_bdry to get coefficients of boundary function
3856 Future< std::pair<keyT,coeffT> > find_me(const keyT& key) const;
3857
3858 /// return the a std::pair<key, node>, which MUST exist
3859 std::pair<Key<NDIM>,ShallowNode<T,NDIM> > find_datum(keyT key) const;
3860
3861 /// multiply the ket with a one-electron potential rr(1,2)= f(1,2)*g(1)
3862
3863 /// @param[in] val_ket function values of f(1,2)
3864 /// @param[in] val_pot function values of g(1)
3865 /// @param[in] particle if 0 then g(1), if 1 then g(2)
3866 /// @return the resulting function values
3867 coeffT multiply(const coeffT& val_ket, const coeffT& val_pot, int particle) const;
3868
3869
3870 /// given several coefficient tensors, assemble a result tensor
3871
3872 /// the result looks like: (v(1,2) + v(1) + v(2)) |ket(1,2)>
3873 /// or (v(1,2) + v(1) + v(2)) |p(1) p(2)>
3874 /// i.e. coefficients for the ket and coefficients for the two particles are
3875 /// mutually exclusive. All potential terms are optional, just pass in empty coeffs.
3876 /// @param[in] key the key of the FunctionNode to which these coeffs belong
3877 /// @param[in] coeff_ket coefficients of the ket
3878 /// @param[in] vpotential1 function values of the potential for particle 1
3879 /// @param[in] vpotential2 function values of the potential for particle 2
3880 /// @param[in] veri function values for the 2-particle potential
3881 coeffT assemble_coefficients(const keyT& key, const coeffT& coeff_ket,
3882 const coeffT& vpotential1, const coeffT& vpotential2,
3883 const tensorT& veri) const;
3884
3885
3886
3887 template<std::size_t LDIM>
3891 double error=0.0;
3892 double lo=0.0, hi=0.0, lo1=0.0, hi1=0.0, lo2=0.0, hi2=0.0;
3893
3895 pointwise_multiplier(const Key<NDIM> key, const coeffT& clhs) : coeff_lhs(clhs) {
3897 val_lhs=fcf.coeffs2values(key,coeff_lhs);
3898 error=0.0;
3900 if (coeff_lhs.is_svd_tensor()) {
3903 }
3904 }
3905
3906 /// multiply values of rhs and lhs, result on rhs, rhs and lhs are of the same dimensions
3907 tensorT operator()(const Key<NDIM> key, const tensorT& coeff_rhs) {
3908
3909 MADNESS_ASSERT(coeff_rhs.dim(0)==coeff_lhs.dim(0));
3911
3912 // the tnorm estimate is not tight enough to be efficient, better use oversampling
3913 bool use_tnorm=false;
3914 if (use_tnorm) {
3915 double rlo, rhi;
3916 implT::tnorm(coeff_rhs,&rlo,&rhi);
3917 error = hi*rlo + rhi*lo + rhi*hi;
3918 tensorT val_rhs=fcf.coeffs2values(key, coeff_rhs);
3919 val_rhs.emul(val_lhs.full_tensor_copy());
3920 return fcf.values2coeffs(key,val_rhs);
3921 } else { // use quadrature of order k+1
3922
3923 auto& cdata=FunctionCommonData<T,NDIM>::get(coeff_rhs.dim(0)); // npt=k+1
3924 auto& cdata_npt=FunctionCommonData<T,NDIM>::get(coeff_rhs.dim(0)+oversampling); // npt=k+1
3925 FunctionCommonFunctionality<T,NDIM> fcf_hi_npt(cdata_npt);
3926
3927 // coeffs2values for rhs: k -> npt=k+1
3928 tensorT coeff1(cdata_npt.vk);
3929 coeff1(cdata.s0)=coeff_rhs; // s0 is smaller than vk!
3930 tensorT val_rhs_k1=fcf_hi_npt.coeffs2values(key,coeff1);
3931
3932 // coeffs2values for lhs: k -> npt=k+1
3933 tensorT coeff_lhs_k1(cdata_npt.vk);
3934 coeff_lhs_k1(cdata.s0)=coeff_lhs.full_tensor_copy();
3935 tensorT val_lhs_k1=fcf_hi_npt.coeffs2values(key,coeff_lhs_k1);
3936
3937 // multiply
3938 val_lhs_k1.emul(val_rhs_k1);
3939
3940 // values2coeffs: npt = k+1-> k
3941 tensorT result1=fcf_hi_npt.values2coeffs(key,val_lhs_k1);
3942
3943 // extract coeffs up to k
3944 tensorT result=copy(result1(cdata.s0));
3945 result1(cdata.s0)=0.0;
3946 error=result1.normf();
3947 return result;
3948 }
3949 }
3950
3951 /// multiply values of rhs and lhs, result on rhs, rhs and lhs are of differnet dimensions
3952 coeffT operator()(const Key<NDIM> key, const tensorT& coeff_rhs, const int particle) {
3953 Key<LDIM> key1, key2;
3954 key.break_apart(key1,key2);
3955 const long k=coeff_rhs.dim(0);
3957 auto& cdata_lowdim=FunctionCommonData<T,LDIM>::get(k);
3958 FunctionCommonFunctionality<T,LDIM> fcf_lo(cdata_lowdim);
3962
3963
3964 // make hi-dim values from lo-dim coeff_rhs on npt grid points
3965 tensorT ones=tensorT(fcf_lo_npt.cdata.vk);
3966 ones=1.0;
3967
3968 tensorT coeff_rhs_npt1(fcf_lo_npt.cdata.vk);
3969 coeff_rhs_npt1(fcf_lo.cdata.s0)=coeff_rhs;
3970 tensorT val_rhs_npt1=fcf_lo_npt.coeffs2values(key1,coeff_rhs_npt1);
3971
3972 TensorArgs targs(-1.0,TT_2D);
3973 coeffT val_rhs;
3974 if (particle==1) val_rhs=outer(val_rhs_npt1,ones,targs);
3975 if (particle==2) val_rhs=outer(ones,val_rhs_npt1,targs);
3976
3977 // make values from hi-dim coeff_lhs on npt grid points
3978 coeffT coeff_lhs_k1(fcf_hi_npt.cdata.vk,coeff_lhs.tensor_type());
3979 coeff_lhs_k1(fcf_hi.cdata.s0)+=coeff_lhs;
3980 coeffT val_lhs_npt=fcf_hi_npt.coeffs2values(key,coeff_lhs_k1);
3981
3982 // multiply
3983 val_lhs_npt.emul(val_rhs);
3984
3985 // values2coeffs: npt = k+1-> k
3986 coeffT result1=fcf_hi_npt.values2coeffs(key,val_lhs_npt);
3987
3988 // extract coeffs up to k
3989 coeffT result=copy(result1(cdata.s0));
3990 result1(cdata.s0)=0.0;
3991 error=result1.normf();
3992 return result;
3993 }
3994
3995 template <typename Archive> void serialize(const Archive& ar) {
3996 ar & error & lo & lo1 & lo2 & hi & hi1& hi2 & val_lhs & coeff_lhs;
3997 }
3998
3999
4000 };
4001
4002 /// given a ket and the 1- and 2-electron potentials, construct the function V phi
4003
4004 /// small memory footstep version of Vphi_op: use the NS form to have information
4005 /// about parent and children to determine if a box is a leaf. This will require
4006 /// compression of the constituent functions, which will lead to more memory usage
4007 /// there, but will avoid oversampling of the result function.
4008 template<typename opT, size_t LDIM>
4009 struct Vphi_op_NS {
4010
4011 bool randomize() const {return true;}
4012
4016
4017 implT* result; ///< where to construct Vphi, no need to track parents
4018 opT leaf_op; ///< deciding if a given FunctionNode will be a leaf node
4019 ctT iaket; ///< the ket of a pair function (exclusive with p1, p2)
4020 ctL iap1, iap2; ///< the particles 1 and 2 (exclusive with ket)
4021 ctL iav1, iav2; ///< potentials for particles 1 and 2
4022 const implT* eri; ///< 2-particle potential, must be on-demand
4023
4024 bool have_ket() const {return iaket.get_impl();}
4025 bool have_v1() const {return iav1.get_impl();}
4026 bool have_v2() const {return iav2.get_impl();}
4027 bool have_eri() const {return eri;}
4028
4029 void accumulate_into_result(const Key<NDIM>& key, const coeffT& coeff) const {
4031 }
4032
4033 // ctor
4035 Vphi_op_NS(implT* result, const opT& leaf_op, const ctT& iaket,
4036 const ctL& iap1, const ctL& iap2, const ctL& iav1, const ctL& iav2,
4037 const implT* eri)
4039 , iav1(iav1), iav2(iav2), eri(eri) {
4040
4041 // 2-particle potential must be on-demand
4043 }
4044
4045 /// make and insert the coefficients into result's tree
4046 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
4047
4049 if(leaf_op.do_pre_screening()){
4050 // this means that we only construct the boxes which are leaf boxes from the other function in the leaf_op
4051 if(leaf_op.pre_screening(key)){
4052 // construct sum_coefficients, insert them and leave
4053 auto [sum_coeff, error]=make_sum_coeffs(key);
4054 accumulate_into_result(key,sum_coeff);
4055 return std::pair<bool,coeffT> (true,coeffT());
4056 }else{
4057 return continue_recursion(std::vector<bool>(1<<NDIM,false),tensorT(),key);
4058 }
4059 }
4060
4061 // this means that the function has to be completely constructed and not mirrored by another function
4062
4063 // if the initial level is not reached then this must not be a leaf box
4064 size_t il = result->get_initial_level();
4066 if(key.level()<int(il)){
4067 return continue_recursion(std::vector<bool>(1<<NDIM,false),tensorT(),key);
4068 }
4069 // if further refinement is needed (because we are at a special box, special point)
4070 // and the special_level is not reached then this must not be a leaf box
4071 if(key.level()<result->get_special_level() and leaf_op.special_refinement_needed(key)){
4072 return continue_recursion(std::vector<bool>(1<<NDIM,false),tensorT(),key);
4073 }
4074
4075 auto [sum_coeff,error]=make_sum_coeffs(key);
4076
4077 // coeffs are leaf (for whatever reason), insert into tree and stop recursion
4078 if(leaf_op.post_screening(key,sum_coeff)){
4079 accumulate_into_result(key,sum_coeff);
4080 return std::pair<bool,coeffT> (true,coeffT());
4081 }
4082
4083 // coeffs are accurate, insert into tree and stop recursion
4084 if(error<result->truncate_tol(result->get_thresh(),key)){
4085 accumulate_into_result(key,sum_coeff);
4086 return std::pair<bool,coeffT> (true,coeffT());
4087 }
4088
4089 // coeffs are inaccurate, continue recursion
4090 std::vector<bool> child_is_leaf(1<<NDIM,false);
4091 return continue_recursion(child_is_leaf,tensorT(),key);
4092 }
4093
4094
4095 /// loop over all children and either insert their sum coeffs or continue the recursion
4096
4097 /// @param[in] child_is_leaf for each child: is it a leaf?
4098 /// @param[in] coeffs coefficient tensor with 2^N sum coeffs (=unfiltered NS coeffs)
4099 /// @param[in] key the key for the NS coeffs (=parent key of the children)
4100 /// @return to avoid recursion outside this return: std::pair<is_leaf,coeff> = true,coeffT()
4101 std::pair<bool,coeffT> continue_recursion(const std::vector<bool> child_is_leaf,
4102 const tensorT& coeffs, const keyT& key) const {
4103 std::size_t i=0;
4104 for (KeyChildIterator<NDIM> kit(key); kit; ++kit, ++i) {
4105 keyT child=kit.key();
4106 bool is_leaf=child_is_leaf[i];
4107
4108 if (is_leaf) {
4109 // insert the sum coeffs
4111 iop(child,coeffT(copy(coeffs(result->child_patch(child))),result->get_tensor_args()),is_leaf);
4112 } else {
4113 this_type child_op=this->make_child(child);
4114 noop<T,NDIM> no;
4115 // spawn activation where child is local
4116 ProcessID p=result->get_coeffs().owner(child);
4117
4118 void (implT::*ft)(const Vphi_op_NS<opT,LDIM>&, const noop<T,NDIM>&, const keyT&) const = &implT:: template forward_traverse< Vphi_op_NS<opT,LDIM>, noop<T,NDIM> >;
4119 result->task(p, ft, child_op, no, child);
4120 }
4121 }
4122 // return e sum coeffs; also return always is_leaf=true:
4123 // the recursion is continued within this struct, not outside in traverse_tree!
4124 return std::pair<bool,coeffT> (true,coeffT());
4125 }
4126
4127 tensorT eri_coeffs(const keyT& key) const {
4130 if (eri->get_functor()->provides_coeff()) {
4131 return eri->get_functor()->coeff(key).full_tensor();
4132 } else {
4133 tensorT val_eri(eri->cdata.vk);
4134 eri->fcube(key,*(eri->get_functor()),eri->cdata.quad_x,val_eri);
4135 return eri->values2coeffs(key,val_eri);
4136 }
4137 }
4138
4139 /// the error is computed from the d coefficients of the constituent functions
4140
4141 /// the result is h_n = P_n(f g), computed as h_n \approx Pn(f_n g_n)
4142 /// its error is therefore
4143 /// h_n = (f g)_n = ((Pn(f) + Qn(f)) (Pn(g) + Qn(g))
4144 /// = Pn(fn gn) + Qn(fn gn) + Pn(f) Qn(g) + Qn(f) Pn(g) + Qn(f) Pn(g)
4145 /// the first term is what we compute, the second term is estimated by tnorm (in another function),
4146 /// the third to last terms are estimated in this function by e.g.: Qn(f)Pn(g) < ||Qn(f)|| ||Pn(g)||
4148 const tensorT& ceri) const {
4149 double error = 0.0;
4150 Key<LDIM> key1, key2;
4151 key.break_apart(key1,key2);
4152
4153 PROFILE_BLOCK(compute_error);
4154 double dnorm_ket, snorm_ket;
4155 if (have_ket()) {
4156 snorm_ket=iaket.coeff(key).normf();
4157 dnorm_ket=iaket.dnorm(key);
4158 } else {
4159 double s1=iap1.coeff(key1).normf();
4160 double s2=iap2.coeff(key2).normf();
4161 double d1=iap1.dnorm(key1);
4162 double d2=iap2.dnorm(key2);
4163 snorm_ket=s1*s2;
4164 dnorm_ket=s1*d2 + s2*d1 + d1*d2;
4165 }
4166
4167 if (have_v1()) {
4168 double snorm=iav1.coeff(key1).normf();
4169 double dnorm=iav1.dnorm(key1);
4170 error+=snorm*dnorm_ket + dnorm*snorm_ket + dnorm*dnorm_ket;
4171 }
4172 if (have_v2()) {
4173 double snorm=iav2.coeff(key2).normf();
4174 double dnorm=iav2.dnorm(key2);
4175 error+=snorm*dnorm_ket + dnorm*snorm_ket + dnorm*dnorm_ket;
4176 }
4177 if (have_eri()) {
4178 tensorT s_coeffs=ceri(result->cdata.s0);
4179 double snorm=s_coeffs.normf();
4180 tensorT d=copy(ceri);
4181 d(result->cdata.s0)=0.0;
4182 double dnorm=d.normf();
4183 error+=snorm*dnorm_ket + dnorm*snorm_ket + dnorm*dnorm_ket;
4184 }
4185
4186 bool no_potential=not ((have_v1() or have_v2() or have_eri()));
4187 if (no_potential) {
4188 error=dnorm_ket;
4189 }
4190 return error;
4191 }
4192
4193 /// make the sum coeffs for key
4194 std::pair<coeffT,double> make_sum_coeffs(const keyT& key) const {
4196 // break key into particles
4197 Key<LDIM> key1, key2;
4198 key.break_apart(key1,key2);
4199
4200 // bool printme=(int(key.translation()[0])==int(std::pow(key.level(),2)/2)) and
4201 // (int(key.translation()[1])==int(std::pow(key.level(),2)/2)) and
4202 // (int(key.translation()[2])==int(std::pow(key.level(),2)/2));
4203
4204// printme=false;
4205
4206 // get/make all coefficients
4207 const coeffT coeff_ket = (iaket.get_impl()) ? iaket.coeff(key)
4208 : outer(iap1.coeff(key1),iap2.coeff(key2),result->get_tensor_args());
4209 const coeffT cpot1 = (have_v1()) ? iav1.coeff(key1) : coeffT();
4210 const coeffT cpot2 = (have_v2()) ? iav2.coeff(key2) : coeffT();
4211 const tensorT ceri = (have_eri()) ? eri_coeffs(key) : tensorT();
4212
4213 // compute first part of the total error
4214 double refine_error=compute_error_from_inaccurate_refinement(key,ceri);
4215 double error=refine_error;
4216
4217 // prepare the multiplication
4218 pointwise_multiplier<LDIM> pm(key,coeff_ket);
4219
4220 // perform the multiplication, compute tnorm part of the total error
4221 coeffT cresult(result->cdata.vk,result->get_tensor_args());
4222 if (have_v1()) {
4223 cresult+=pm(key,cpot1.get_tensor(),1);
4224 error+=pm.error;
4225 }
4226 if (have_v2()) {
4227 cresult+=pm(key,cpot2.get_tensor(),2);
4228 error+=pm.error;
4229 }
4230
4231 if (have_eri()) {
4232 tensorT result1=cresult.full_tensor_copy();
4233 result1+=pm(key,copy(ceri(result->cdata.s0)));
4234 cresult=coeffT(result1,result->get_tensor_args());
4235 error+=pm.error;
4236 } else {
4238 }
4239 if ((not have_v1()) and (not have_v2()) and (not have_eri())) {
4240 cresult=coeff_ket;
4241 }
4242
4243 return std::make_pair(cresult,error);
4244 }
4245
4246 this_type make_child(const keyT& child) const {
4247
4248 // break key into particles
4249 Key<LDIM> key1, key2;
4250 child.break_apart(key1,key2);
4251
4252 return this_type(result,leaf_op,iaket.make_child(child),
4253 iap1.make_child(key1),iap2.make_child(key2),
4254 iav1.make_child(key1),iav2.make_child(key2),eri);
4255 }
4256
4258 Future<ctT> iaket1=iaket.activate();
4259 Future<ctL> iap11=iap1.activate();
4260 Future<ctL> iap21=iap2.activate();
4261 Future<ctL> iav11=iav1.activate();
4262 Future<ctL> iav21=iav2.activate();
4263 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
4264 &this_type::forward_ctor),result,leaf_op,
4265 iaket1,iap11,iap21,iav11,iav21,eri);
4266 }
4267
4268 this_type forward_ctor(implT* result1, const opT& leaf_op, const ctT& iaket1,
4269 const ctL& iap11, const ctL& iap21, const ctL& iav11, const ctL& iav21,
4270 const implT* eri1) {
4271 return this_type(result1,leaf_op,iaket1,iap11,iap21,iav11,iav21,eri1);
4272 }
4273
4274 /// serialize this (needed for use in recursive_op)
4275 template <typename Archive> void serialize(const Archive& ar) {
4276 ar & iaket & eri & result & leaf_op & iap1 & iap2 & iav1 & iav2;
4277 }
4278 };
4279
4280 /// assemble the function V*phi using V and phi given from the functor
4281
4282 /// this function must have been constructed using the CompositeFunctorInterface.
4283 /// The interface provides one- and two-electron potentials, and the ket, which are
4284 /// assembled to give V*phi.
4285 /// @param[in] leaf_op operator to decide if a given node is a leaf node
4286 /// @param[in] fence global fence
4287 template<typename opT>
4288 void make_Vphi(const opT& leaf_op, const bool fence=true) {
4289
4290 constexpr size_t LDIM=NDIM/2;
4291 MADNESS_CHECK_THROW(NDIM==LDIM*2,"make_Vphi only works for even dimensions");
4292
4293
4294 // keep the functor available, but remove it from the result
4295 // result will return false upon is_on_demand(), which is necessary for the
4296 // CoeffTracker to track the parent coeffs correctly for error_leaf_op
4297 std::shared_ptr< FunctionFunctorInterface<T,NDIM> > func2(this->get_functor());
4298 this->unset_functor();
4299
4301 dynamic_cast<CompositeFunctorInterface<T,NDIM,LDIM>* >(&(*func2));
4303
4304 // make sure everything is in place if no fence is requested
4305 if (fence) func->make_redundant(true); // no-op if already redundant
4306 MADNESS_CHECK_THROW(func->check_redundant(),"make_Vphi requires redundant functions");
4307
4308 // loop over all functions in the functor (either ket or particles)
4309 for (auto& ket : func->impl_ket_vector) {
4310 FunctionImpl<T,NDIM>* eri=func->impl_eri.get();
4311 FunctionImpl<T,LDIM>* v1=func->impl_m1.get();
4312 FunctionImpl<T,LDIM>* v2=func->impl_m2.get();
4313 FunctionImpl<T,LDIM>* p1=nullptr;
4314 FunctionImpl<T,LDIM>* p2=nullptr;
4315 make_Vphi_only(leaf_op,ket.get(),v1,v2,p1,p2,eri,false);
4316 }
4317
4318 for (std::size_t i=0; i<func->impl_p1_vector.size(); ++i) {
4319 FunctionImpl<T,NDIM>* ket=nullptr;
4320 FunctionImpl<T,NDIM>* eri=func->impl_eri.get();
4321 FunctionImpl<T,LDIM>* v1=func->impl_m1.get();
4322 FunctionImpl<T,LDIM>* v2=func->impl_m2.get();
4323 FunctionImpl<T,LDIM>* p1=func->impl_p1_vector[i].get();
4324 FunctionImpl<T,LDIM>* p2=func->impl_p2_vector[i].get();
4325 make_Vphi_only(leaf_op,ket,v1,v2,p1,p2,eri,false);
4326 }
4327
4328 // some post-processing:
4329 // - FunctionNode::accumulate() uses buffer -> add the buffer contents to the actual coefficients
4330 // - the operation constructs sum coefficients on all scales -> sum down to get a well-defined tree-state
4331 if (fence) {
4332 world.gop.fence();
4334 sum_down(true);
4336 }
4337
4338
4339 }
4340
4341 /// assemble the function V*phi using V and phi given from the functor
4342
4343 /// this function must have been constructed using the CompositeFunctorInterface.
4344 /// The interface provides one- and two-electron potentials, and the ket, which are
4345 /// assembled to give V*phi.
4346 /// @param[in] leaf_op operator to decide if a given node is a leaf node
4347 /// @param[in] fence global fence
4348 template<typename opT, std::size_t LDIM>
4353 const bool fence=true) {
4354
4355 // prepare the CoeffTracker
4356 CoeffTracker<T,NDIM> iaket(ket);
4357 CoeffTracker<T,LDIM> iap1(p1);
4358 CoeffTracker<T,LDIM> iap2(p2);
4359 CoeffTracker<T,LDIM> iav1(v1);
4360 CoeffTracker<T,LDIM> iav2(v2);
4361
4362 // the operator making the coefficients
4363 typedef Vphi_op_NS<opT,LDIM> coeff_opT;
4364 coeff_opT coeff_op(this,leaf_op,iaket,iap1,iap2,iav1,iav2,eri);
4365
4366 // this operator simply inserts the coeffs into this' tree
4367 typedef noop<T,NDIM> apply_opT;
4368 apply_opT apply_op;
4369
4370 if (world.rank() == coeffs.owner(cdata.key0)) {
4371 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
4372 coeff_op, apply_op, cdata.key0);
4373 }
4374
4376 if (fence) world.gop.fence();
4377
4378 }
4379
4380 /// Permute the dimensions of f according to map, result on this
4381 void mapdim(const implT& f, const std::vector<long>& map, bool fence);
4382
4383 /// mirror the dimensions of f according to map, result on this
4384 void mirror(const implT& f, const std::vector<long>& mirror, bool fence);
4385
4386 /// map and mirror the translation index and the coefficients, result on this
4387
4388 /// first map the dimensions, the mirror!
4389 /// this = mirror(map(f))
4390 void map_and_mirror(const implT& f, const std::vector<long>& map,
4391 const std::vector<long>& mirror, bool fence);
4392
4393 /// take the average of two functions, similar to: this=0.5*(this+rhs)
4394
4395 /// works in either basis and also in nonstandard form
4396 void average(const implT& rhs);
4397
4398 /// change the tensor type of the coefficients in the FunctionNode
4399
4400 /// @param[in] targs target tensor arguments (threshold and full/low rank)
4401 void change_tensor_type1(const TensorArgs& targs, bool fence);
4402
4403 /// reduce the rank of the coefficients tensors
4404
4405 /// @param[in] targs target tensor arguments (threshold and full/low rank)
4406 void reduce_rank(const double thresh, bool fence);
4407
4408
4409 /// remove all nodes with level higher than n
4410 void chop_at_level(const int n, const bool fence=true);
4411
4412 /// compute norm of s and d coefficients for all nodes
4413 void compute_snorm_and_dnorm(bool fence=true);
4414
4415 /// compute the norm of the wavelet coefficients
4418
4422
4423 bool operator()(typename rangeT::iterator& it) const {
4424 auto& node=it->second;
4425 node.recompute_snorm_and_dnorm(cdata);
4426 return true;
4427 }
4428 };
4429
4430
4431 T eval_cube(Level n, coordT& x, const tensorT& c) const;
4432
4433 /// Transform sum coefficients at level n to sums+differences at level n-1
4434
4435 /// Given scaling function coefficients s[n][l][i] and s[n][l+1][i]
4436 /// return the scaling function and wavelet coefficients at the
4437 /// coarser level. I.e., decompose Vn using Vn = Vn-1 + Wn-1.
4438 /// \code
4439 /// s_i = sum(j) h0_ij*s0_j + h1_ij*s1_j
4440 /// d_i = sum(j) g0_ij*s0_j + g1_ij*s1_j
4441 // \endcode
4442 /// Returns a new tensor and has no side effects. Works for any
4443 /// number of dimensions.
4444 ///
4445 /// No communication involved.
4446 tensorT filter(const tensorT& s) const;
4447
4448 coeffT filter(const coeffT& s) const;
4449
4450 /// Transform sums+differences at level n to sum coefficients at level n+1
4451
4452 /// Given scaling function and wavelet coefficients (s and d)
4453 /// returns the scaling function coefficients at the next finer
4454 /// level. I.e., reconstruct Vn using Vn = Vn-1 + Wn-1.
4455 /// \code
4456 /// s0 = sum(j) h0_ji*s_j + g0_ji*d_j
4457 /// s1 = sum(j) h1_ji*s_j + g1_ji*d_j
4458 /// \endcode
4459 /// Returns a new tensor and has no side effects
4460 ///
4461 /// If (sonly) ... then ss is only the scaling function coeff (and
4462 /// assume the d are zero). Works for any number of dimensions.
4463 ///
4464 /// No communication involved.
4465 tensorT unfilter(const tensorT& s) const;
4466
4467 coeffT unfilter(const coeffT& s) const;
4468
4469 /// downsample the sum coefficients of level n+1 to sum coeffs on level n
4470
4471 /// specialization of the filter method, will yield only the sum coefficients
4472 /// @param[in] key key of level n
4473 /// @param[in] v vector of sum coefficients of level n+1
4474 /// @return sum coefficients on level n in full tensor format
4475 tensorT downsample(const keyT& key, const std::vector< Future<coeffT > >& v) const;
4476
4477 /// upsample the sum coefficients of level 1 to sum coeffs on level n+1
4478
4479 /// specialization of the unfilter method, will transform only the sum coefficients
4480 /// @param[in] key key of level n+1
4481 /// @param[in] coeff sum coefficients of level n (does NOT belong to key!!)
4482 /// @return sum coefficients on level n+1
4483 coeffT upsample(const keyT& key, const coeffT& coeff) const;
4484
4485 /// Projects old function into new basis (only in reconstructed form)
4486 void project(const implT& old, bool fence);
4487
4489 bool operator()(const implT* f, const keyT& key, const nodeT& t) const {
4490 return true;
4491 }
4492 template <typename Archive> void serialize(Archive& ar) {}
4493 };
4494
4495 template <typename opT>
4496 void refine_op(const opT& op, const keyT& key) {
4497 // Must allow for someone already having autorefined the coeffs
4498 // and we get a write accessor just in case they are already executing
4499 typename dcT::accessor acc;
4500 const auto found = coeffs.find(acc,key);
4501 MADNESS_CHECK(found);
4502 nodeT& node = acc->second;
4503 if (node.has_coeff() && key.level() < max_refine_level && op(this, key, node)) {
4504 coeffT d(cdata.v2k,targs);
4505 d(cdata.s0) += copy(node.coeff());
4506 d = unfilter(d);
4507 node.clear_coeff();
4508 node.set_has_children(true);
4509 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
4510 const keyT& child = kit.key();
4511 coeffT ss = copy(d(child_patch(child)));
4513 // coeffs.replace(child,nodeT(ss,-1.0,false).node_to_low_rank());
4514 coeffs.replace(child,nodeT(ss,-1.0,false));
4515 // Note value -1.0 for norm tree to indicate result of refinement
4516 }
4517 }
4518 }
4519
4520 template <typename opT>
4521 void refine_spawn(const opT& op, const keyT& key) {
4522 nodeT& node = coeffs.find(key).get()->second;
4523 if (node.has_children()) {
4524 for (KeyChildIterator<NDIM> kit(key); kit; ++kit)
4525 woT::task(coeffs.owner(kit.key()), &implT:: template refine_spawn<opT>, op, kit.key(), TaskAttributes::hipri());
4526 }
4527 else {
4528 woT::task(coeffs.owner(key), &implT:: template refine_op<opT>, op, key);
4529 }
4530 }
4531
4532 // Refine in real space according to local user-defined criterion
4533 template <typename opT>
4534 void refine(const opT& op, bool fence) {
4535 if (world.rank() == coeffs.owner(cdata.key0))
4536 woT::task(coeffs.owner(cdata.key0), &implT:: template refine_spawn<opT>, op, cdata.key0, TaskAttributes::hipri());
4537 if (fence)
4538 world.gop.fence();
4539 }
4540
4541 bool exists_and_has_children(const keyT& key) const;
4542
4543 bool exists_and_is_leaf(const keyT& key) const;
4544
4545
4546 void broaden_op(const keyT& key, const std::vector< Future <bool> >& v);
4547
4548 // For each local node sets value of norm tree, snorm and dnorm to 0.0
4549 void zero_norm_tree();
4550
4551 // Broaden tree
4552 void broaden(const array_of_bools<NDIM>& is_periodic, bool fence);
4553
4554 /// sum all the contributions from all scales after applying an operator in mod-NS form
4555 void trickle_down(bool fence);
4556
4557 /// sum all the contributions from all scales after applying an operator in mod-NS form
4558
4559 /// cf reconstruct_op
4560 void trickle_down_op(const keyT& key, const coeffT& s);
4561
4562 /// reconstruct this tree -- respects fence
4563 void reconstruct(bool fence);
4564
4565 void change_tree_state(const TreeState finalstate, bool fence=true);
4566
4567 // Invoked on node where key is local
4568 // void reconstruct_op(const keyT& key, const tensorT& s);
4569 void reconstruct_op(const keyT& key, const coeffT& s, const bool accumulate_NS=true);
4570
4571 /// compress the wave function
4572
4573 /// after application there will be sum coefficients at the root level,
4574 /// and difference coefficients at all other levels; furthermore:
4575 /// @param[in] nonstandard keep sum coeffs at all other levels, except leaves
4576 /// @param[in] keepleaves keep sum coeffs (but no diff coeffs) at leaves
4577 /// @param[in] redundant keep only sum coeffs at all levels, discard difference coeffs
4578// void compress(bool nonstandard, bool keepleaves, bool redundant, bool fence);
4579 void compress(const TreeState newstate, bool fence);
4580
4581 /// Invoked on node where key is local
4582 Future<std::pair<coeffT,double> > compress_spawn(const keyT& key, bool nonstandard, bool keepleaves,
4583 bool redundant1);
4584
4585 private:
4586 /// convert this to redundant, i.e. have sum coefficients on all levels
4587 void make_redundant(const bool fence);
4588 public:
4589
4590 /// convert this from redundant to standard reconstructed form
4591 void undo_redundant(const bool fence);
4592
4593 void remove_internal_coefficients(const bool fence);
4594 void remove_leaf_coefficients(const bool fence);
4595
4596
4597 /// compute for each FunctionNode the norm of the function inside that node
4598 void norm_tree(bool fence);
4599
4600 double norm_tree_op(const keyT& key, const std::vector< Future<double> >& v);
4601
4603
4604 /// truncate using a tree in reconstructed form
4605
4606 /// must be invoked where key is local
4607 Future<coeffT> truncate_reconstructed_spawn(const keyT& key, const double tol);
4608
4609 /// given the sum coefficients of all children, truncate or not
4610
4611 /// @return new sum coefficients (empty if internal, not empty, if new leaf); might delete its children
4612 coeffT truncate_reconstructed_op(const keyT& key, const std::vector< Future<coeffT > >& v, const double tol);
4613
4614 /// calculate the wavelet coefficients using the sum coefficients of all child nodes
4615
4616 /// also compute the norm tree for all nodes
4617 /// @param[in] key this's key
4618 /// @param[in] v sum coefficients of the child nodes
4619 /// @param[in] nonstandard keep the sum coefficients with the wavelet coefficients
4620 /// @param[in] redundant keep only the sum coefficients, discard the wavelet coefficients
4621 /// @return the sum coefficients
4622 std::pair<coeffT,double> compress_op(const keyT& key, const std::vector< Future<std::pair<coeffT,double>> >& v, bool nonstandard);
4623
4624
4625 /// similar to compress_op, but insert only the sum coefficients in the tree
4626
4627 /// also compute the norm tree for all nodes
4628 /// @param[in] key this's key
4629 /// @param[in] v sum coefficients of the child nodes
4630 /// @return the sum coefficients
4631 std::pair<coeffT,double> make_redundant_op(const keyT& key,const std::vector< Future<std::pair<coeffT,double> > >& v);
4632
4633 /// Changes non-standard compressed form to standard compressed form
4634 void standard(bool fence);
4635
4636 /// Changes non-standard compressed form to standard compressed form
4639
4640 // threshold for rank reduction / SVD truncation
4642
4643 // constructor takes target precision
4644 do_standard() = default;
4646
4647 //
4648 bool operator()(typename rangeT::iterator& it) const {
4649
4650 const keyT& key = it->first;
4651 nodeT& node = it->second;
4652 if (key.level()> 0 && node.has_coeff()) {
4653 if (node.has_children()) {
4654 // Zero out scaling coeffs
4655 MADNESS_ASSERT(node.coeff().dim(0)==2*impl->get_k());
4656 node.coeff()(impl->cdata.s0)=0.0;
4657 node.reduceRank(impl->targs.thresh);
4658 } else {
4659 // Deleting both scaling and wavelet coeffs
4660 node.clear_coeff();
4661 }
4662 }
4663 return true;
4664 }
4665 template <typename Archive> void serialize(const Archive& ar) {
4666 MADNESS_EXCEPTION("no serialization of do_standard",1);
4667 }
4668 };
4669
4670
4671 /// laziness
4672 template<size_t OPDIM>
4673 struct do_op_args {
4676 double tol, fac, cnorm;
4677
4678 do_op_args() = default;
4679 do_op_args(const Key<OPDIM>& key, const Key<OPDIM>& d, const keyT& dest, double tol, double fac, double cnorm)
4680 : key(key), d(d), dest(dest), tol(tol), fac(fac), cnorm(cnorm) {}
4681 template <class Archive>
4682 void serialize(Archive& ar) {
4683 ar & archive::wrap_opaque(this,1);
4684 }
4685 };
4686
4687 /// for fine-grain parallelism: call the apply method of an operator in a separate task
4688
4689 /// @param[in] op the operator working on our function
4690 /// @param[in] c full rank tensor holding the NS coefficients
4691 /// @param[in] args laziness holding norm of the coefficients, displacement, destination, ..
4692 template <typename opT, typename R, size_t OPDIM>
4693 void do_apply_kernel(const opT* op, const Tensor<R>& c, const do_op_args<OPDIM>& args) {
4694
4695 tensorT result = op->apply(args.key, args.d, c, args.tol/args.fac/args.cnorm);
4696
4697 // Screen here to reduce communication cost of negligible data
4698 // and also to ensure we don't needlessly widen the tree when
4699 // applying the operator
4700 if (result.normf()> 0.3*args.tol/args.fac) {
4702 //woT::task(world.rank(),&implT::accumulate_timer,time,TaskAttributes::hipri());
4703 // UGLY BUT ADDED THE OPTIMIZATION BACK IN HERE EXPLICITLY/
4704 if (args.dest == world.rank()) {
4705 coeffs.send(args.dest, &nodeT::accumulate, result, coeffs, args.dest);
4706 }
4707 else {
4709 }
4710 }
4711 }
4712
4713 /// same as do_apply_kernel, but use full rank tensors as input and low rank tensors as output
4714
4715 /// @param[in] op the operator working on our function
4716 /// @param[in] c full rank tensor holding the NS coefficients
4717 /// @param[in] args laziness holding norm of the coefficients, displacement, destination, ..
4718 /// @param[in] apply_targs TensorArgs with tightened threshold for accumulation
4719 /// @return nothing, but accumulate the result tensor into the destination node
4720 template <typename opT, typename R, size_t OPDIM>
4721 double do_apply_kernel2(const opT* op, const Tensor<R>& c, const do_op_args<OPDIM>& args,
4722 const TensorArgs& apply_targs) {
4723
4724 tensorT result_full = op->apply(args.key, args.d, c, args.tol/args.fac/args.cnorm);
4725 const double norm=result_full.normf();
4726
4727 // Screen here to reduce communication cost of negligible data
4728 // and also to ensure we don't needlessly widen the tree when
4729 // applying the operator
4730 // OPTIMIZATION NEEDED HERE ... CHANGING THIS TO TASK NOT SEND REMOVED
4731 // BUILTIN OPTIMIZATION TO SHORTCIRCUIT MSG IF DATA IS LOCAL
4732 if (norm > 0.3*args.tol/args.fac) {
4733
4734 small++;
4735 //double cpu0=cpu_time();
4736 coeffT result=coeffT(result_full,apply_targs);
4737 MADNESS_ASSERT(result.is_full_tensor() or result.is_svd_tensor());
4738 //double cpu1=cpu_time();
4739 //timer_lr_result.accumulate(cpu1-cpu0);
4740
4741 coeffs.task(args.dest, &nodeT::accumulate, result, coeffs, args.dest, apply_targs,
4743
4744 //woT::task(world.rank(),&implT::accumulate_timer,time,TaskAttributes::hipri());
4745 }
4746 return norm;
4747 }
4748
4749
4750
4751 /// same as do_apply_kernel2, but use low rank tensors as input and low rank tensors as output
4752
4753 /// @param[in] op the operator working on our function
4754 /// @param[in] coeff full rank tensor holding the NS coefficients
4755 /// @param[in] args laziness holding norm of the coefficients, displacement, destination, ..
4756 /// @param[in] apply_targs TensorArgs with tightened threshold for accumulation
4757 /// @return nothing, but accumulate the result tensor into the destination node
4758 template <typename opT, typename R, size_t OPDIM>
4759 double do_apply_kernel3(const opT* op, const GenTensor<R>& coeff, const do_op_args<OPDIM>& args,
4760 const TensorArgs& apply_targs) {
4761
4762 coeffT result;
4763 if (2*OPDIM==NDIM) result= op->apply2_lowdim(args.key, args.d, coeff,
4764 args.tol/args.fac/args.cnorm, args.tol/args.fac);
4765 if (OPDIM==NDIM) result = op->apply2(args.key, args.d, coeff,
4766 args.tol/args.fac/args.cnorm, args.tol/args.fac);
4767
4768 const double result_norm=result.svd_normf();
4769
4770 if (result_norm> 0.3*args.tol/args.fac) {
4771 small++;
4772
4773 double cpu0=cpu_time();
4774 if (not result.is_of_tensortype(targs.tt)) result=result.convert(targs);
4775 double cpu1=cpu_time();
4776 timer_lr_result.accumulate(cpu1-cpu0);
4777
4778 // accumulate also expects result in SVD form
4779 coeffs.task(args.dest, &nodeT::accumulate, result, coeffs, args.dest, apply_targs,
4781// woT::task(world.rank(),&implT::accumulate_timer,time,TaskAttributes::hipri());
4782
4783 }
4784 return result_norm;
4785
4786 }
4787
4788 // volume of n-dimensional sphere of radius R
4789 double vol_nsphere(int n, double R) {
4790 return std::pow(madness::constants::pi,n*0.5)*std::pow(R,n)/std::tgamma(1+0.5*n);
4791 }
4792
4793
4794 /// apply an operator on the coeffs c (at node key)
4795
4796 /// the result is accumulated inplace to this's tree at various FunctionNodes
4797 /// @param[in] op the operator to act on the source function
4798 /// @param[in] key key of the source FunctionNode of f which is processed
4799 /// @param[in] c coeffs of the FunctionNode of f which is processed
4800 template <typename opT, typename R>
4801 void do_apply(const opT* op, const keyT& key, const Tensor<R>& c) {
4803
4804 // working assumption here WAS that the operator is
4805 // isotropic and monotonically decreasing with distance
4806 // ... however, now we are using derivative Gaussian
4807 // expansions (and also non-cubic boxes) isotropic is
4808 // violated. While not strictly monotonically decreasing,
4809 // the derivative gaussian is still such that once it
4810 // becomes negligible we are in the asymptotic region.
4811
4812 typedef typename opT::keyT opkeyT;
4813 constexpr auto opdim = opT::opdim;
4814 const opkeyT source = op->get_source_key(key);
4815
4816 // Tuning here is based on observation that with
4817 // sufficiently high-order wavelet relative to the
4818 // precision, that only nearest neighbor boxes contribute,
4819 // whereas for low-order wavelets more neighbors will
4820 // contribute. Sufficiently high is picked as
4821 // k>=2-log10(eps) which is our empirical rule for
4822 // efficiency/accuracy and code instrumentation has
4823 // previously indicated that (in 3D) just unit
4824 // displacements are invoked. The error decays as R^-(k+1),
4825 // and the number of boxes increases as R^d.
4826 //
4827 // Fac is the expected number of contributions to a given
4828 // box, so the error permitted per contribution will be
4829 // tol/fac
4830
4831 // radius of shell (nearest neighbor is diameter of 3 boxes, so radius=1.5)
4832 double radius = 1.5 + 0.33 * std::max(0.0, 2 - std::log10(thresh) -
4833 k); // 0.33 was 0.5
4834 //double radius = 2.5;
4835 double fac = vol_nsphere(NDIM, radius);
4836 // previously fac=10.0 selected empirically constrained by qmprop
4837
4838 double cnorm = c.normf();
4839
4840 // BC handling:
4841 // - if operator is lattice-summed then treat this as nonperiodic (i.e. tell neighbor() to stay in simulation cell)
4842 // - if operator is NOT lattice-summed then obey BC (i.e. tell neighbor() to go outside the simulation cell along periodic dimensions)
4843 // - BUT user can force operator to treat its arguments as non-periodic (`op.set_domain_periodicity({true,true,true})`) so ... which dimensions of this function are treated as periodic by op?
4844 const array_of_bools<NDIM> this_is_treated_by_op_as_periodic =
4845 (op->particle() == 1)
4846 ? array_of_bools<NDIM>{false}.or_front(
4847 op->domain_is_periodic())
4848 : array_of_bools<NDIM>{false}.or_back(
4849 op->domain_is_periodic());
4850
4851 const auto default_distance_squared = [&](const auto &displacement)
4852 -> std::uint64_t {
4853 return displacement.distsq_bc(op->lattice_summed());
4854 };
4855 const auto default_skip_predicate = [&](const auto &displacement)
4856 -> bool {
4857 return false;
4858 };
4859 const auto for_each = [&](const auto &displacements,
4860 const auto &distance_squared,
4861 const auto &skip_predicate) -> std::optional<std::uint64_t> {
4862
4863 // used to screen estimated and actual contributions
4864 //const double tol = truncate_tol(thresh, key);
4865 //const double tol = 0.1*truncate_tol(thresh, key);
4866 const double tol = truncate_tol(thresh, key);
4867
4868 // assume isotropic decaying kernel, screen in shell-wise fashion by
4869 // monitoring the decay of magnitude of contribution norms with the
4870 // distance ... as soon as we find a shell of displacements at least
4871 // one of each in simulation domain (see neighbor()) and
4872 // all in-domain shells produce negligible contributions, stop.
4873 // a displacement is negligible if ||op|| * ||c|| > tol / fac
4874 // where fac takes into account
4875 int nvalid = 1; // Counts #valid at each distance
4876 int nused = 1; // Counts #used at each distance
4877 std::optional<std::uint64_t> distsq;
4878
4879 // displacements to the kernel range boundary are typically same magnitude (modulo variation estimate the norm of the resulting contributions and skip all if one is too small
4880 // this
4881 if constexpr (std::is_same_v<std::decay_t<decltype(displacements)>,BoxSurfaceDisplacementRange<opdim>>) {
4882 const auto &probing_displacement =
4883 displacements.probing_displacement();
4884 const double opnorm =
4885 op->norm(key.level(), probing_displacement, source);
4886 if (cnorm * opnorm <= tol / fac) {
4887 return {};
4888 }
4889 }
4890
4891 const auto disp_end = displacements.end();
4892 for (auto disp_it = displacements.begin(); disp_it != disp_end;
4893 ++disp_it) {
4894 const auto &displacement = *disp_it;
4895 if (skip_predicate(displacement)) continue;
4896
4897 keyT d;
4898 Key<NDIM - opdim> nullkey(key.level());
4899 MADNESS_ASSERT(op->particle() == 1 || op->particle() == 2);
4900 if (op->particle() == 1)
4901 d = displacement.merge_with(nullkey);
4902 else
4903 d = nullkey.merge_with(displacement);
4904
4905 // shell-wise screening, assumes displacements are grouped into shells sorted so that operator decays with shell index N.B. lattice-summed decaying kernel is periodic (i.e. does decay w.r.t. r), so loop over shells of displacements sorted by distances modulated by periodicity (Key::distsq_bc)
4906 const uint64_t dsq = distance_squared(displacement);
4907 if (!distsq ||
4908 dsq != *distsq) { // Moved to next shell of neighbors
4909 if (nvalid > 0 && nused == 0 && dsq > 1) {
4910 // Have at least done the input box and all first
4911 // nearest neighbors, and none of the last set
4912 // of neighbors made significant contributions. Thus,
4913 // assuming monotonic decrease, we are done.
4914 break;
4915 }
4916 nused = 0;
4917 nvalid = 0;
4918 distsq = dsq;
4919 }
4920
4921 keyT dest = neighbor(key, d, this_is_treated_by_op_as_periodic);
4922 if (dest.is_valid()) {
4923 nvalid++;
4924 const double opnorm = op->norm(key.level(), displacement, source);
4925
4926 if (cnorm * opnorm > tol / fac) {
4927 tensorT result =
4928 op->apply(source, displacement, c, tol / fac / cnorm);
4929 if (result.normf() > 0.3 * tol / fac) {
4930 if (coeffs.is_local(dest))
4931 coeffs.send(dest, &nodeT::accumulate2, result, coeffs,
4932 dest);
4933 else
4934 coeffs.task(dest, &nodeT::accumulate2, result, coeffs,
4935 dest);
4936 nused++;
4937 }
4938 }
4939 }
4940 }
4941
4942 return distsq;
4943 };
4944
4945 // process "standard" displacements, screening assumes monotonic decay of the kernel
4946 // list of displacements sorted in order of increasing distance
4947 // N.B. if op is lattice-summed use periodic displacements, else use
4948 // non-periodic even if op treats any modes of this as periodic
4949 const std::vector<opkeyT> &disp = op->get_disp(key.level());
4950 const auto max_distsq_reached = for_each(disp, default_distance_squared, default_skip_predicate);
4951
4952 // for range-restricted kernels displacements to the boundary of the kernel range also need to be included
4953 // N.B. hard range restriction will result in slow decay of operator matrix elements for the displacements
4954 // to the range boundary, should use soft restriction or sacrifice precision
4955 if (op->range_restricted() && key.level() >= 1) {
4956
4957 std::array<std::optional<std::int64_t>, opdim> box_radius;
4958 std::array<std::optional<std::int64_t>, opdim> surface_thickness;
4959 auto &range = op->get_range();
4960 for (int d = 0; d != opdim; ++d) {
4961 if (range[d]) {
4962 box_radius[d] = range[d].N();
4963 surface_thickness[d] = range[d].finite_soft() ? 1 : 0;
4964 }
4965 }
4966
4968 // skip surface displacements that take us outside of the domain and/or were included in regular displacements
4969 // N.B. for lattice-summed axes the "filter" also maps the displacement back into the simulation cell
4970 if (max_distsq_reached)
4971 filter = BoxSurfaceDisplacementFilter<opdim>(/* domain_is_infinite= */ op->domain_is_periodic(), /* domain_is_periodic= */ op->lattice_summed(), range, default_distance_squared, *max_distsq_reached);
4972
4973 // this range iterates over the entire surface layer(s), and provides a probing displacement that can be used to screen out the entire box
4974 auto opkey = op->particle() == 1 ? key.template extract_front<opdim>() : key.template extract_front<opdim>();
4976 range_boundary_face_displacements(opkey, box_radius,
4977 surface_thickness,
4978 op->lattice_summed(), // along lattice-summed axes treat the box as periodic, make displacements to one side of the box
4979 filter);
4980 for_each(
4981 range_boundary_face_displacements,
4982 // surface displacements are not screened, all are included
4983 [](const auto &displacement) -> std::uint64_t { return 0; },
4984 default_skip_predicate);
4985 }
4986 }
4987
4988
4989 /// apply an operator on f to return this
4990 template <typename opT, typename R>
4991 void apply(opT& op, const FunctionImpl<R,NDIM>& f, bool fence) {
4993 MADNESS_ASSERT(!op.modified());
4994 typename dcT::const_iterator end = f.coeffs.end();
4995 for (typename dcT::const_iterator it=f.coeffs.begin(); it!=end; ++it) {
4996 // looping through all the coefficients in the source
4997 const keyT& key = it->first;
4998 const FunctionNode<R,NDIM>& node = it->second;
4999 if (node.has_coeff()) {
5000 if (node.coeff().dim(0) != k /* i.e. not a leaf */ || op.doleaves) {
5002// woT::task(p, &implT:: template do_apply<opT,R>, &op, key, node.coeff()); //.full_tensor_copy() ????? why copy ????
5003 woT::task(p, &implT:: template do_apply<opT,R>, &op, key, node.coeff().reconstruct_tensor());
5004 }
5005 }
5006 }
5007 if (fence)
5008 world.gop.fence();
5009
5011// this->compressed=true;
5012// this->nonstandard=true;
5013// this->redundant=false;
5014
5015 }
5016
5017
5018
5019 /// apply an operator on the coeffs c (at node key)
5020
5021 /// invoked by result; the result is accumulated inplace to this's tree at various FunctionNodes
5022 /// @param[in] op the operator to act on the source function
5023 /// @param[in] key key of the source FunctionNode of f which is processed (see "source")
5024 /// @param[in] coeff coeffs of FunctionNode being processed
5025 /// @param[in] do_kernel true: do the 0-disp only; false: do everything but the kernel
5026 /// @return max norm, and will modify or include new nodes in this' tree
5027 template <typename opT, typename R>
5028 double do_apply_directed_screening(const opT* op, const keyT& key, const coeffT& coeff,
5029 const bool& do_kernel) {
5031 // insert timer here
5032 typedef typename opT::keyT opkeyT;
5033
5034 // screening: contains all displacement keys that had small result norms
5035 std::list<opkeyT> blacklist;
5036
5037 constexpr auto opdim=opT::opdim;
5038 Key<NDIM-opdim> nullkey(key.level());
5039
5040 // source is that part of key that corresponds to those dimensions being processed
5041 const opkeyT source=op->get_source_key(key);
5042
5043 const double tol = truncate_tol(thresh, key);
5044
5045 // fac is the root of the number of contributing neighbors (1st shell)
5046 double fac=std::pow(3,NDIM*0.5);
5047 double cnorm = coeff.normf();
5048
5049 // for accumulation: keep slightly tighter TensorArgs
5050 TensorArgs apply_targs(targs);
5051 apply_targs.thresh=tol/fac*0.03;
5052
5053 double maxnorm=0.0;
5054
5055 // for the kernel it may be more efficient to do the convolution in full rank
5056 tensorT coeff_full;
5057 // for partial application (exchange operator) it's more efficient to
5058 // do SVD tensors instead of tensortrains, because addition in apply
5059 // can be done in full form for the specific particle
5060 coeffT coeff_SVD=coeff.convert(TensorArgs(-1.0,TT_2D));
5061#ifdef HAVE_GENTENSOR
5062 coeff_SVD.get_svdtensor().orthonormalize(tol*GenTensor<T>::fac_reduce());
5063#endif
5064
5065 // list of displacements sorted in order of increasing distance
5066 // N.B. if op is lattice-summed gives periodic displacements, else uses
5067 // non-periodic even if op treats any modes of this as periodic
5068 const std::vector<opkeyT>& disp = Displacements<opdim>().get_disp(key.level(), op->lattice_summed());
5069
5070 for (typename std::vector<opkeyT>::const_iterator it=disp.begin(); it != disp.end(); ++it) {
5071 const opkeyT& d = *it;
5072
5073 const int shell=d.distsq_bc(op->lattice_summed());
5074 if (do_kernel and (shell>0)) break;
5075 if ((not do_kernel) and (shell==0)) continue;
5076
5077 keyT disp1;
5078 if (op->particle()==1) disp1=it->merge_with(nullkey);
5079 else if (op->particle()==2) disp1=nullkey.merge_with(*it);
5080 else {
5081 MADNESS_EXCEPTION("confused particle in operator??",1);
5082 }
5083
5084 keyT dest = neighbor_in_volume(key, disp1);
5085
5086 if (not dest.is_valid()) continue;
5087
5088 // directed screening
5089 // working assumption here is that the operator is isotropic and
5090 // monotonically decreasing with distance
5091 bool screened=false;
5092 typename std::list<opkeyT>::const_iterator it2;
5093 for (it2=blacklist.begin(); it2!=blacklist.end(); it2++) {
5094 if (d.is_farther_out_than(*it2)) {
5095 screened=true;
5096 break;
5097 }
5098 }
5099 if (not screened) {
5100
5101 double opnorm = op->norm(key.level(), d, source);
5102 double norm=0.0;
5103
5104 if (cnorm*opnorm> tol/fac) {
5105
5106 double cost_ratio=op->estimate_costs(source, d, coeff_SVD, tol/fac/cnorm, tol/fac);
5107 // cost_ratio=1.5; // force low rank
5108 // cost_ratio=0.5; // force full rank
5109
5110 if (cost_ratio>0.0) {
5111
5112 do_op_args<opdim> args(source, d, dest, tol, fac, cnorm);
5113 norm=0.0;
5114 if (cost_ratio<1.0) {
5115 if (not coeff_full.has_data()) coeff_full=coeff.full_tensor_copy();
5116 norm=do_apply_kernel2(op, coeff_full,args,apply_targs);
5117 } else {
5118 if (2*opdim==NDIM) { // apply operator on one particle only
5119 norm=do_apply_kernel3(op,coeff_SVD,args,apply_targs);
5120 } else {
5121 norm=do_apply_kernel3(op,coeff,args,apply_targs);
5122 }
5123 }
5124 maxnorm=std::max(norm,maxnorm);
5125 }
5126
5127 } else if (shell >= 12) {
5128 break; // Assumes monotonic decay beyond nearest neighbor
5129 }
5130 if (norm<0.3*tol/fac) blacklist.push_back(d);
5131 }
5132 }
5133 return maxnorm;
5134 }
5135
5136
5137 /// similar to apply, but for low rank coeffs
5138 template <typename opT, typename R>
5139 void apply_source_driven(opT& op, const FunctionImpl<R,NDIM>& f, bool fence) {
5141
5142 MADNESS_ASSERT(not op.modified());
5143 // looping through all the coefficients of the source f
5144 typename dcT::const_iterator end = f.get_coeffs().end();
5145 for (typename dcT::const_iterator it=f.get_coeffs().begin(); it!=end; ++it) {
5146
5147 const keyT& key = it->first;
5148 const coeffT& coeff = it->second.coeff();
5149
5150 if (coeff.has_data() and (coeff.rank()!=0)) {
5152 woT::task(p, &implT:: template do_apply_directed_screening<opT,R>, &op, key, coeff, true);
5153 woT::task(p, &implT:: template do_apply_directed_screening<opT,R>, &op, key, coeff, false);
5154 }
5155 }
5156 if (fence) world.gop.fence();
5158 }
5159
5160 /// after apply we need to do some cleanup;
5161
5162 /// forces fence
5163 double finalize_apply();
5164
5165 /// after summing up we need to do some cleanup;
5166
5167 /// forces fence
5168 void finalize_sum();
5169
5170 /// traverse a non-existing tree, make its coeffs and apply an operator
5171
5172 /// invoked by result
5173 /// here we use the fact that the hi-dim NS coefficients on all scales are exactly
5174 /// the outer product of the underlying low-dim functions (also in NS form),
5175 /// so we don't need to construct the full hi-dim tree and then turn it into NS form.
5176 /// @param[in] apply_op the operator acting on the NS tree
5177 /// @param[in] fimpl the funcimpl of the function of particle 1
5178 /// @param[in] gimpl the funcimpl of the function of particle 2
5179 template<typename opT, std::size_t LDIM>
5180 void recursive_apply(opT& apply_op, const FunctionImpl<T,LDIM>* fimpl,
5181 const FunctionImpl<T,LDIM>* gimpl, const bool fence) {
5182
5183 //print("IN RECUR2");
5184 const keyT& key0=cdata.key0;
5185
5186 if (world.rank() == coeffs.owner(key0)) {
5187
5188 CoeffTracker<T,LDIM> ff(fimpl);
5189 CoeffTracker<T,LDIM> gg(gimpl);
5190
5191 typedef recursive_apply_op<opT,LDIM> coeff_opT;
5192 coeff_opT coeff_op(this,ff,gg,&apply_op);
5193
5194 typedef noop<T,NDIM> apply_opT;
5195 apply_opT apply_op;
5196
5198 woT::task(p, &implT:: template forward_traverse<coeff_opT,apply_opT>, coeff_op, apply_op, key0);
5199
5200 }
5201 if (fence) world.gop.fence();
5203 }
5204
5205 /// recursive part of recursive_apply
5206 template<typename opT, std::size_t LDIM>
5208 bool randomize() const {return true;}
5209
5211
5216
5217 // ctor
5221 const opT* apply_op) : result(result), iaf(iaf), iag(iag), apply_op(apply_op)
5222 {
5223 MADNESS_ASSERT(LDIM+LDIM==NDIM);
5224 }
5226 iag(other.iag), apply_op(other.apply_op) {}
5227
5228
5229 /// make the NS-coefficients and send off the application of the operator
5230
5231 /// @return a Future<bool,coeffT>(is_leaf,coeffT())
5232 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
5233
5234 // World& world=result->world;
5235 // break key into particles (these are the child keys, with datum1/2 come the parent keys)
5236 Key<LDIM> key1,key2;
5237 key.break_apart(key1,key2);
5238
5239 // the lo-dim functions should be in full tensor form
5240 const tensorT fcoeff=iaf.coeff(key1).full_tensor();
5241 const tensorT gcoeff=iag.coeff(key2).full_tensor();
5242
5243 // would this be a leaf node? If so, then its sum coeffs have already been
5244 // processed by the parent node's wavelet coeffs. Therefore we won't
5245 // process it any more.
5247 bool is_leaf=leaf_op(key,fcoeff,gcoeff);
5248
5249 if (not is_leaf) {
5250 // new coeffs are simply the hartree/kronecker/outer product --
5251 const std::vector<Slice>& s0=iaf.get_impl()->cdata.s0;
5252 const coeffT coeff = (apply_op->modified())
5253 ? outer(copy(fcoeff(s0)),copy(gcoeff(s0)),result->targs)
5254 : outer(fcoeff,gcoeff,result->targs);
5255
5256 // now send off the application
5257 tensorT coeff_full;
5259 double norm0=result->do_apply_directed_screening<opT,T>(apply_op, key, coeff, true);
5260
5261 result->task(p,&implT:: template do_apply_directed_screening<opT,T>,
5262 apply_op,key,coeff,false);
5263
5264 return finalize(norm0,key,coeff);
5265
5266 } else {
5267 return std::pair<bool,coeffT> (is_leaf,coeffT());
5268 }
5269 }
5270
5271 /// sole purpose is to wait for the kernel norm, wrap it and send it back to caller
5272 std::pair<bool,coeffT> finalize(const double kernel_norm, const keyT& key,
5273 const coeffT& coeff) const {
5274 const double thresh=result->get_thresh()*0.1;
5275 bool is_leaf=(kernel_norm<result->truncate_tol(thresh,key));
5276 if (key.level()<2) is_leaf=false;
5277 return std::pair<bool,coeffT> (is_leaf,coeff);
5278 }
5279
5280
5281 this_type make_child(const keyT& child) const {
5282
5283 // break key into particles
5284 Key<LDIM> key1, key2;
5285 child.break_apart(key1,key2);
5286
5287 return this_type(result,iaf.make_child(key1),iag.make_child(key2),apply_op);
5288 }
5289
5293 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
5294 &this_type::forward_ctor),result,f1,g1,apply_op);
5295 }
5296
5298 const opT* apply_op1) {
5299 return this_type(r,f1,g1,apply_op1);
5300 }
5301
5302 template <typename Archive> void serialize(const Archive& ar) {
5303 ar & result & iaf & iag & apply_op;
5304 }
5305 };
5306
5307 /// traverse an existing tree and apply an operator
5308
5309 /// invoked by result
5310 /// @param[in] apply_op the operator acting on the NS tree
5311 /// @param[in] fimpl the funcimpl of the source function
5312 /// @param[in] rimpl a dummy function for recursive_op to insert data
5313 template<typename opT>
5314 void recursive_apply(opT& apply_op, const implT* fimpl, implT* rimpl, const bool fence) {
5315
5316 print("IN RECUR1");
5317
5318 const keyT& key0=cdata.key0;
5319
5320 if (world.rank() == coeffs.owner(key0)) {
5321
5322 typedef recursive_apply_op2<opT> coeff_opT;
5323 coeff_opT coeff_op(this,fimpl,&apply_op);
5324
5325 typedef noop<T,NDIM> apply_opT;
5326 apply_opT apply_op;
5327
5328 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
5329 coeff_op, apply_op, cdata.key0);
5330
5331 }
5332 if (fence) world.gop.fence();
5334 }
5335
5336 /// recursive part of recursive_apply
5337 template<typename opT>
5339 bool randomize() const {return true;}
5340
5343 typedef std::pair<bool,coeffT> argT;
5344
5345 mutable implT* result;
5346 ctT iaf; /// need this for randomization
5347 const opT* apply_op;
5348
5349 // ctor
5353
5355 iaf(other.iaf), apply_op(other.apply_op) {}
5356
5357
5358 /// send off the application of the operator
5359
5360 /// the first (core) neighbor (ie. the box itself) is processed
5361 /// immediately, all other ones are shoved into the taskq
5362 /// @return a pair<bool,coeffT>(is_leaf,coeffT())
5363 argT operator()(const Key<NDIM>& key) const {
5364
5365 const coeffT& coeff=iaf.coeff();
5366
5367 if (coeff.has_data()) {
5368
5369 // now send off the application for all neighbor boxes
5371 result->task(p,&implT:: template do_apply_directed_screening<opT,T>,
5372 apply_op, key, coeff, false);
5373
5374 // process the core box
5375 double norm0=result->do_apply_directed_screening<opT,T>(apply_op,key,coeff,true);
5376
5377 if (iaf.is_leaf()) return argT(true,coeff);
5378 return finalize(norm0,key,coeff,result);
5379
5380 } else {
5381 const bool is_leaf=true;
5382 return argT(is_leaf,coeffT());
5383 }
5384 }
5385
5386 /// sole purpose is to wait for the kernel norm, wrap it and send it back to caller
5387 argT finalize(const double kernel_norm, const keyT& key,
5388 const coeffT& coeff, const implT* r) const {
5389 const double thresh=r->get_thresh()*0.1;
5390 bool is_leaf=(kernel_norm<r->truncate_tol(thresh,key));
5391 if (key.level()<2) is_leaf=false;
5392 return argT(is_leaf,coeff);
5393 }
5394
5395
5396 this_type make_child(const keyT& child) const {
5397 return this_type(result,iaf.make_child(child),apply_op);
5398 }
5399
5400 /// retrieve the coefficients (parent coeffs might be remote)
5402 Future<ctT> f1=iaf.activate();
5403
5404// Future<ctL> g1=g.activate();
5405// return h->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
5406// &this_type::forward_ctor),h,f1,g1,particle);
5407
5408 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
5409 &this_type::forward_ctor),result,f1,apply_op);
5410 }
5411
5412 /// taskq-compatible ctor
5413 this_type forward_ctor(implT* result1, const ctT& iaf1, const opT* apply_op1) {
5414 return this_type(result1,iaf1,apply_op1);
5415 }
5416
5417 template <typename Archive> void serialize(const Archive& ar) {
5418 ar & result & iaf & apply_op;
5419 }
5420 };
5421
5422 /// Returns the square of the error norm in the box labeled by key
5423
5424 /// Assumed to be invoked locally but it would be easy to eliminate
5425 /// this assumption
5426 template <typename opT>
5427 double err_box(const keyT& key, const nodeT& node, const opT& func,
5428 int npt, const Tensor<double>& qx, const Tensor<double>& quad_phit,
5429 const Tensor<double>& quad_phiw) const {
5430
5431 std::vector<long> vq(NDIM);
5432 for (std::size_t i=0; i<NDIM; ++i)
5433 vq[i] = npt;
5434 tensorT fval(vq,false), work(vq,false), result(vq,false);
5435
5436 // Compute the "exact" function in this volume at npt points
5437 // where npt is usually this->npt+1.
5438 fcube(key, func, qx, fval);
5439
5440 // Transform into the scaling function basis of order npt
5441 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
5442 fval = fast_transform(fval,quad_phiw,result,work).scale(scale);
5443
5444 // Subtract to get the error ... the original coeffs are in the order k
5445 // basis but we just computed the coeffs in the order npt(=k+1) basis
5446 // so we can either use slices or an iterator macro.
5447 const tensorT coeff = node.coeff().full_tensor_copy();
5448 ITERATOR(coeff,fval(IND)-=coeff(IND););
5449 // flo note: we do want to keep a full tensor here!
5450
5451 // Compute the norm of what remains
5452 double err = fval.normf();
5453 return err*err;
5454 }
5455
5456 template <typename opT>
5458 const implT* impl;
5459 const opT* func;
5460 int npt;
5464 public:
5465 do_err_box() = default;
5466
5470
5473
5474 double operator()(typename dcT::const_iterator& it) const {
5475 const keyT& key = it->first;
5476 const nodeT& node = it->second;
5477 if (node.has_coeff())
5478 return impl->err_box(key, node, *func, npt, qx, quad_phit, quad_phiw);
5479 else
5480 return 0.0;
5481 }
5482
5483 double operator()(double a, double b) const {
5484 return a+b;
5485 }
5486
5487 template <typename Archive>
5488 void serialize(const Archive& ar) {
5489 MADNESS_EXCEPTION("not yet", 1);
5490 }
5491 };
5492
5493 /// Returns the sum of squares of errors from local info ... no comms
5494 template <typename opT>
5495 double errsq_local(const opT& func) const {
5497 // Make quadrature rule of higher order
5498 const int npt = cdata.npt + 1;
5499 Tensor<double> qx, qw, quad_phi, quad_phiw, quad_phit;
5500 FunctionCommonData<T,NDIM>::_init_quadrature(k+1, npt, qx, qw, quad_phi, quad_phiw, quad_phit);
5501
5504 return world.taskq.reduce< double,rangeT,do_err_box<opT> >(range,
5505 do_err_box<opT>(this, &func, npt, qx, quad_phit, quad_phiw));
5506 }
5507
5508 /// Returns \c int(f(x),x) in local volume
5509 T trace_local() const;
5510
5512 double operator()(typename dcT::const_iterator& it) const {
5513 const nodeT& node = it->second;
5514 if (node.has_coeff()) {
5515 double norm = node.coeff().normf();
5516 return norm*norm;
5517 }
5518 else {
5519 return 0.0;
5520 }
5521 }
5522
5523 double operator()(double a, double b) const {
5524 return (a+b);
5525 }
5526
5527 template <typename Archive> void serialize(const Archive& ar) {
5528 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
5529 }
5530 };
5531
5532
5533 /// Returns the square of the local norm ... no comms
5534 double norm2sq_local() const;
5535
5536 /// compute the inner product of this range with other
5537 template<typename R>
5541 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5542
5545 resultT operator()(typename dcT::const_iterator& it) const {
5546
5548 const keyT& key=it->first;
5549 const nodeT& fnode = it->second;
5550 if (fnode.has_coeff()) {
5551 if (other->coeffs.probe(it->first)) {
5552 const FunctionNode<R,NDIM>& gnode = other->coeffs.find(key).get()->second;
5553 if (gnode.has_coeff()) {
5554 if (gnode.coeff().dim(0) != fnode.coeff().dim(0)) {
5555 madness::print("INNER", it->first, gnode.coeff().dim(0),fnode.coeff().dim(0));
5556 MADNESS_EXCEPTION("functions have different k or compress/reconstruct error", 0);
5557 }
5558 if (leaves_only) {
5559 if (gnode.is_leaf() or fnode.is_leaf()) {
5560 sum += fnode.coeff().trace_conj(gnode.coeff());
5561 }
5562 } else {
5563 sum += fnode.coeff().trace_conj(gnode.coeff());
5564 }
5565 }
5566 }
5567 }
5568 return sum;
5569 }
5570
5571 resultT operator()(resultT a, resultT b) const {
5572 return (a+b);
5573 }
5574
5575 template <typename Archive> void serialize(const Archive& ar) {
5576 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
5577 }
5578 };
5579
5580 /// Returns the inner product ASSUMING same distribution
5581
5582 /// handles compressed and redundant form
5583 template <typename R>
5587 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5588
5589 // make sure the states of the trees are consistent
5592 return world.taskq.reduce<resultT,rangeT,do_inner_local<R> >
5594 }
5595
5596
5597 /// compute the inner product of this range with other
5598 template<typename R>
5602 bool leaves_only=true;
5603 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5604
5608 resultT operator()(typename dcT::const_iterator& it) const {
5609
5610 constexpr std::size_t LDIM=std::max(NDIM/2,std::size_t(1));
5611
5612 const keyT& key=it->first;
5613 const nodeT& fnode = it->second;
5614 if (not fnode.has_coeff()) return resultT(0.0); // probably internal nodes
5615
5616 // assuming all boxes (esp the low-dim ones) are local, i.e. the functions are replicated
5617 auto find_valid_parent = [](auto& key, auto& impl, auto&& find_valid_parent) {
5618 MADNESS_CHECK(impl->get_coeffs().owner(key)==impl->world.rank()); // make sure everything is local!
5619 if (impl->get_coeffs().probe(key)) return key;
5620 auto parentkey=key.parent();
5621 return find_valid_parent(parentkey, impl, find_valid_parent);
5622 };
5623
5624 // returns coefficients, empty if no functor present
5625 auto get_coeff = [&find_valid_parent](const auto& key, const auto& v_impl) {
5626 if ((v_impl.size()>0) and v_impl.front().get()) {
5627 auto impl=v_impl.front();
5628
5629// bool have_impl=impl.get();
5630// if (have_impl) {
5631 auto parentkey = find_valid_parent(key, impl, find_valid_parent);
5632 MADNESS_CHECK(impl->get_coeffs().probe(parentkey));
5633 typename decltype(impl->coeffs)::accessor acc;
5634 impl->get_coeffs().find(acc,parentkey);
5635 auto parentcoeff=acc->second.coeff();
5636 auto coeff=impl->parent_to_child(parentcoeff, parentkey, key);
5637 return coeff;
5638 } else {
5639 // get type of vector elements
5640 typedef typename std::decay_t<decltype(v_impl)>::value_type::element_type::typeT S;
5641// typedef typename std::decay_t<decltype(v_impl)>::value_type S;
5642 return GenTensor<S>();
5643// return GenTensor<typename std::decay_t<decltype(*impl)>::typeT>();
5644 }
5645 };
5646
5647 auto make_vector = [](auto& arg) {
5648 return std::vector<std::decay_t<decltype(arg)>>(1,arg);
5649 };
5650
5651
5652 Key<LDIM> key1,key2;
5653 key.break_apart(key1,key2);
5654
5655 auto func=dynamic_cast<CompositeFunctorInterface<R,NDIM,LDIM>* >(ket->functor.get());
5657
5658 MADNESS_CHECK_THROW(func->impl_ket_vector.size()==0 or func->impl_ket_vector.size()==1,
5659 "only one ket function supported in inner_on_demand");
5660 MADNESS_CHECK_THROW(func->impl_p1_vector.size()==0 or func->impl_p1_vector.size()==1,
5661 "only one p1 function supported in inner_on_demand");
5662 MADNESS_CHECK_THROW(func->impl_p2_vector.size()==0 or func->impl_p2_vector.size()==1,
5663 "only one p2 function supported in inner_on_demand");
5664 auto coeff_bra=fnode.coeff();
5665 auto coeff_ket=get_coeff(key,func->impl_ket_vector);
5666 auto coeff_v1=get_coeff(key1,make_vector(func->impl_m1));
5667 auto coeff_v2=get_coeff(key2,make_vector(func->impl_m2));
5668 auto coeff_p1=get_coeff(key1,func->impl_p1_vector);
5669 auto coeff_p2=get_coeff(key2,func->impl_p2_vector);
5670
5671 // construct |ket(1,2)> or |p(1)p(2)> or |p(1)p(2) ket(1,2)>
5672 double error=0.0;
5673 if (coeff_ket.has_data() and coeff_p1.has_data()) {
5674 pointwise_multiplier<LDIM> pm(key,coeff_ket);
5675 coeff_ket=pm(key,outer(coeff_p1,coeff_p2,TensorArgs(TT_FULL,-1.0)).full_tensor());
5676 error+=pm.error;
5677 } else if (coeff_ket.has_data() or coeff_p1.has_data()) {
5678 coeff_ket = (coeff_ket.has_data()) ? coeff_ket : outer(coeff_p1,coeff_p2);
5679 } else { // not ket and no p1p2
5680 MADNESS_EXCEPTION("confused ket/p1p2 in do_inner_local_on_demand",1);
5681 }
5682
5683 // construct (v(1) + v(2)) |ket(1,2)>
5684 coeffT v1v2ket;
5685 if (coeff_v1.has_data()) {
5686 pointwise_multiplier<LDIM> pm(key,coeff_ket);
5687 v1v2ket = pm(key,coeff_v1.full_tensor(), 1);
5688 error+=pm.error;
5689 v1v2ket+= pm(key,coeff_v2.full_tensor(), 2);
5690 error+=pm.error;
5691 } else {
5692 v1v2ket = coeff_ket;
5693 }
5694
5695 resultT result;
5696 if (func->impl_eri) { // project bra*ket onto eri, avoid multiplication with eri
5697 MADNESS_CHECK(func->impl_eri->get_functor()->provides_coeff());
5698 coeffT coeff_eri=func->impl_eri->get_functor()->coeff(key).full_tensor();
5699 pointwise_multiplier<LDIM> pm(key,v1v2ket);
5700 tensorT braket=pm(key,coeff_bra.full_tensor_copy().conj());
5701 error+=pm.error;
5702 if (error>1.e-3) print("error in key",key,error);
5703 result=coeff_eri.full_tensor().trace(braket);
5704
5705 } else { // no eri, project ket onto bra
5706 result=coeff_bra.full_tensor_copy().trace_conj(v1v2ket.full_tensor_copy());
5707 }
5708 return result;
5709 }
5710
5711 resultT operator()(resultT a, resultT b) const {
5712 return (a+b);
5713 }
5714
5715 template <typename Archive> void serialize(const Archive& ar) {
5716 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
5717 }
5718 };
5719
5720 /// Returns the inner product of this with function g constructed on-the-fly
5721
5722 /// the leaf boxes of this' MRA tree defines the inner product
5723 template <typename R>
5724 TENSOR_RESULT_TYPE(T,R) inner_local_on_demand(const FunctionImpl<R,NDIM>& gimpl) const {
5727
5731 do_inner_local_on_demand<R>(this, &gimpl));
5732 }
5733
5734 /// compute the inner product of this range with other
5735 template<typename R>
5739 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5740
5743 resultT operator()(typename dcT::const_iterator& it) const {
5744
5746 const keyT& key=it->first;
5747 const nodeT& fnode = it->second;
5748 if (fnode.has_coeff()) {
5749 if (other->coeffs.probe(it->first)) {
5750 const FunctionNode<R,NDIM>& gnode = other->coeffs.find(key).get()->second;
5751 if (gnode.has_coeff()) {
5752 if (gnode.coeff().dim(0) != fnode.coeff().dim(0)) {
5753 madness::print("DOT", it->first, gnode.coeff().dim(0),fnode.coeff().dim(0));
5754 MADNESS_EXCEPTION("functions have different k or compress/reconstruct error", 0);
5755 }
5756 if (leaves_only) {
5757 if (gnode.is_leaf() or fnode.is_leaf()) {
5758 sum += fnode.coeff().full_tensor().trace(gnode.coeff().full_tensor());
5759 }
5760 } else {
5761 sum += fnode.coeff().full_tensor().trace(gnode.coeff().full_tensor());
5762 }
5763 }
5764 }
5765 }
5766 return sum;
5767 }
5768
5769 resultT operator()(resultT a, resultT b) const {
5770 return (a+b);
5771 }
5772
5773 template <typename Archive> void serialize(const Archive& ar) {
5774 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
5775 }
5776 };
5777
5778 /// Returns the dot product ASSUMING same distribution
5779
5780 /// handles compressed and redundant form
5781 template <typename R>
5785 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5786
5787 // make sure the states of the trees are consistent
5789 bool leaves_only=(this->is_redundant());
5790 return world.taskq.reduce<resultT,rangeT,do_dot_local<R> >
5792 }
5793
5794 /// Type of the entry in the map returned by make_key_vec_map
5795 typedef std::vector< std::pair<int,const coeffT*> > mapvecT;
5796
5797 /// Type of the map returned by make_key_vec_map
5799
5800 /// Adds keys to union of local keys with specified index
5801 void add_keys_to_map(mapT* map, int index) const {
5802 typename dcT::const_iterator end = coeffs.end();
5803 for (typename dcT::const_iterator it=coeffs.begin(); it!=end; ++it) {
5804 typename mapT::accessor acc;
5805 const keyT& key = it->first;
5806 const FunctionNode<T,NDIM>& node = it->second;
5807 if (node.has_coeff()) {
5808 [[maybe_unused]] auto inserted = map->insert(acc,key);
5809 acc->second.push_back(std::make_pair(index,&(node.coeff())));
5810 }
5811 }
5812 }
5813
5814 /// Returns map of union of local keys to vector of indexes of functions containing that key
5815
5816 /// Local concurrency and synchronization only; no communication
5817 static
5818 mapT
5819 make_key_vec_map(const std::vector<const FunctionImpl<T,NDIM>*>& v) {
5820 mapT map(100000);
5821 // This loop must be parallelized
5822 for (unsigned int i=0; i<v.size(); i++) {
5823 //v[i]->add_keys_to_map(&map,i);
5824 v[i]->world.taskq.add(*(v[i]), &FunctionImpl<T,NDIM>::add_keys_to_map, &map, int(i));
5825 }
5826 if (v.size()) v[0]->world.taskq.fence();
5827 return map;
5828 }
5829
5830#if 0
5831// Original
5832 template <typename R>
5833 static void do_inner_localX(const typename mapT::iterator lstart,
5834 const typename mapT::iterator lend,
5835 typename FunctionImpl<R,NDIM>::mapT* rmap_ptr,
5836 const bool sym,
5837 Tensor< TENSOR_RESULT_TYPE(T,R) >* result_ptr,
5838 Mutex* mutex) {
5839 Tensor< TENSOR_RESULT_TYPE(T,R) >& result = *result_ptr;
5840 Tensor< TENSOR_RESULT_TYPE(T,R) > r(result.dim(0),result.dim(1));
5841 for (typename mapT::iterator lit=lstart; lit!=lend; ++lit) {
5842 const keyT& key = lit->first;
5843 typename FunctionImpl<R,NDIM>::mapT::iterator rit=rmap_ptr->find(key);
5844 if (rit != rmap_ptr->end()) {
5845 const mapvecT& leftv = lit->second;
5846 const typename FunctionImpl<R,NDIM>::mapvecT& rightv =rit->second;
5847 const int nleft = leftv.size();
5848 const int nright= rightv.size();
5849
5850 for (int iv=0; iv<nleft; iv++) {
5851 const int i = leftv[iv].first;
5852 const GenTensor<T>* iptr = leftv[iv].second;
5853
5854 for (int jv=0; jv<nright; jv++) {
5855 const int j = rightv[jv].first;
5856 const GenTensor<R>* jptr = rightv[jv].second;
5857
5858 if (!sym || (sym && i<=j))
5859 r(i,j) += iptr->trace_conj(*jptr);
5860 }
5861 }
5862 }
5863 }
5864 mutex->lock();
5865 result += r;
5866 mutex->unlock();
5867 }
5868#else
5869 template <typename R>
5870 static void do_inner_localX(const typename mapT::iterator lstart,
5871 const typename mapT::iterator lend,
5872 typename FunctionImpl<R,NDIM>::mapT* rmap_ptr,
5873 const bool sym,
5874 Tensor< TENSOR_RESULT_TYPE(T,R) >* result_ptr,
5875 Mutex* mutex) {
5876 Tensor< TENSOR_RESULT_TYPE(T,R) >& result = *result_ptr;
5877 //Tensor< TENSOR_RESULT_TYPE(T,R) > r(result.dim(0),result.dim(1));
5878 for (typename mapT::iterator lit=lstart; lit!=lend; ++lit) {
5879 const keyT& key = lit->first;
5880 typename FunctionImpl<R,NDIM>::mapT::iterator rit=rmap_ptr->find(key);
5881 if (rit != rmap_ptr->end()) {
5882 const mapvecT& leftv = lit->second;
5883 const typename FunctionImpl<R,NDIM>::mapvecT& rightv =rit->second;
5884 const size_t nleft = leftv.size();
5885 const size_t nright= rightv.size();
5886
5887 unsigned int size = leftv[0].second->size();
5888 Tensor<T> Left(nleft, size);
5889 Tensor<R> Right(nright, size);
5890 Tensor< TENSOR_RESULT_TYPE(T,R)> r(nleft, nright);
5891 for(unsigned int iv = 0; iv < nleft; ++iv) Left(iv,_) = (*(leftv[iv].second)).full_tensor();
5892 for(unsigned int jv = 0; jv < nright; ++jv) Right(jv,_) = (*(rightv[jv].second)).full_tensor();
5893 // call mxmT from mxm.h in tensor
5894 if(TensorTypeData<T>::iscomplex) Left = Left.conj(); // Should handle complex case and leave real case alone
5895 mxmT(nleft, nright, size, r.ptr(), Left.ptr(), Right.ptr());
5896 mutex->lock();
5897 for(unsigned int iv = 0; iv < nleft; ++iv) {
5898 const int i = leftv[iv].first;
5899 for(unsigned int jv = 0; jv < nright; ++jv) {
5900 const int j = rightv[jv].first;
5901 if (!sym || (sym && i<=j)) result(i,j) += r(iv,jv);
5902 }
5903 }
5904 mutex->unlock();
5905 }
5906 }
5907 }
5908#endif
5909
5910#if 0
5911// Original
5912 template <typename R, typename = std::enable_if_t<std::is_floating_point_v<R>>>
5913 static void do_dot_localX(const typename mapT::iterator lstart,
5914 const typename mapT::iterator lend,
5915 typename FunctionImpl<R, NDIM>::mapT* rmap_ptr,
5916 const bool sym,
5917 Tensor<TENSOR_RESULT_TYPE(T, R)>* result_ptr,
5918 Mutex* mutex) {
5919 if (TensorTypeData<T>::iscomplex) MADNESS_EXCEPTION("no complex trace in LowRankTensor, sorry", 1);
5920 Tensor<TENSOR_RESULT_TYPE(T, R)>& result = *result_ptr;
5921 Tensor<TENSOR_RESULT_TYPE(T, R)> r(result.dim(0), result.dim(1));
5922 for (typename mapT::iterator lit = lstart; lit != lend; ++lit) {
5923 const keyT& key = lit->first;
5924 typename FunctionImpl<R, NDIM>::mapT::iterator rit = rmap_ptr->find(key);
5925 if (rit != rmap_ptr->end()) {
5926 const mapvecT& leftv = lit->second;
5927 const typename FunctionImpl<R, NDIM>::mapvecT& rightv = rit->second;
5928 const int nleft = leftv.size();
5929 const int nright = rightv.size();
5930
5931 for (int iv = 0; iv < nleft; iv++) {
5932 const int i = leftv[iv].first;
5933 const GenTensor<T>* iptr = leftv[iv].second;
5934
5935 for (int jv = 0; jv < nright; jv++) {
5936 const int j = rightv[jv].first;
5937 const GenTensor<R>* jptr = rightv[jv].second;
5938
5939 if (!sym || (sym && i <= j))
5940 r(i, j) += iptr->trace_conj(*jptr);
5941 }
5942 }
5943 }
5944 }
5945 mutex->lock();
5946 result += r;
5947 mutex->unlock();
5948 }
5949#else
5950 template <typename R>
5951 static void do_dot_localX(const typename mapT::iterator lstart,
5952 const typename mapT::iterator lend,
5953 typename FunctionImpl<R, NDIM>::mapT* rmap_ptr,
5954 const bool sym,
5955 Tensor<TENSOR_RESULT_TYPE(T, R)>* result_ptr,
5956 Mutex* mutex) {
5957 Tensor<TENSOR_RESULT_TYPE(T, R)>& result = *result_ptr;
5958 // Tensor<TENSOR_RESULT_TYPE(T, R)> r(result.dim(0), result.dim(1));
5959 for (typename mapT::iterator lit = lstart; lit != lend; ++lit) {
5960 const keyT& key = lit->first;
5961 typename FunctionImpl<R, NDIM>::mapT::iterator rit = rmap_ptr->find(key);
5962 if (rit != rmap_ptr->end()) {
5963 const mapvecT& leftv = lit->second;
5964 const typename FunctionImpl<R, NDIM>::mapvecT& rightv = rit->second;
5965 const size_t nleft = leftv.size();
5966 const size_t nright= rightv.size();
5967
5968 unsigned int size = leftv[0].second->size();
5969 Tensor<T> Left(nleft, size);
5970 Tensor<R> Right(nright, size);
5971 Tensor< TENSOR_RESULT_TYPE(T, R)> r(nleft, nright);
5972 for(unsigned int iv = 0; iv < nleft; ++iv) Left(iv, _) = (*(leftv[iv].second)).full_tensor();
5973 for(unsigned int jv = 0; jv < nright; ++jv) Right(jv, _) = (*(rightv[jv].second)).full_tensor();
5974 // call mxmT from mxm.h in tensor
5975 mxmT(nleft, nright, size, r.ptr(), Left.ptr(), Right.ptr());
5976 mutex->lock();
5977 for(unsigned int iv = 0; iv < nleft; ++iv) {
5978 const int i = leftv[iv].first;
5979 for(unsigned int jv = 0; jv < nright; ++jv) {
5980 const int j = rightv[jv].first;
5981 if (!sym || (sym && i <= j)) result(i, j) += r(iv, jv);
5982 }
5983 }
5984 mutex->unlock();
5985 }
5986 }
5987 }
5988#endif
5989
5990 static double conj(float x) {
5991 return x;
5992 }
5993
5994 static std::complex<double> conj(const std::complex<double> x) {
5995 return std::conj(x);
5996 }
5997
5998 template <typename R>
5999 static Tensor< TENSOR_RESULT_TYPE(T,R) >
6000 inner_local(const std::vector<const FunctionImpl<T,NDIM>*>& left,
6001 const std::vector<const FunctionImpl<R,NDIM>*>& right,
6002 bool sym) {
6003
6004 // This is basically a sparse matrix^T * matrix product
6005 // Rij = sum(k) Aki * Bkj
6006 // where i and j index functions and k index the wavelet coeffs
6007 // eventually the goal is this structure (don't have jtile yet)
6008 //
6009 // do in parallel tiles of k (tensors of coeffs)
6010 // do tiles of j
6011 // do i
6012 // do j in jtile
6013 // do k in ktile
6014 // Rij += Aki*Bkj
6015
6016 mapT lmap = make_key_vec_map(left);
6017 typename FunctionImpl<R,NDIM>::mapT rmap;
6018 auto* rmap_ptr = (typename FunctionImpl<R,NDIM>::mapT*)(&lmap);
6019 if ((std::vector<const FunctionImpl<R,NDIM>*>*)(&left) != &right) {
6021 rmap_ptr = &rmap;
6022 }
6023
6024 size_t chunk = (lmap.size()-1)/(3*4*5)+1;
6025
6026 Tensor< TENSOR_RESULT_TYPE(T,R) > r(left.size(), right.size());
6027 Mutex mutex;
6028
6029 typename mapT::iterator lstart=lmap.begin();
6030 while (lstart != lmap.end()) {
6031 typename mapT::iterator lend = lstart;
6032 advance(lend,chunk);
6033 left[0]->world.taskq.add(&FunctionImpl<T,NDIM>::do_inner_localX<R>, lstart, lend, rmap_ptr, sym, &r, &mutex);
6034 lstart = lend;
6035 }
6036 left[0]->world.taskq.fence();
6037
6038 if (sym) {
6039 for (long i=0; i<r.dim(0); i++) {
6040 for (long j=0; j<i; j++) {
6041 TENSOR_RESULT_TYPE(T,R) sum = r(i,j)+conj(r(j,i));
6042 r(i,j) = sum;
6043 r(j,i) = conj(sum);
6044 }
6045 }
6046 }
6047 return r;
6048 }
6049
6050 template <typename R>
6051 static Tensor<TENSOR_RESULT_TYPE(T, R)>
6052 dot_local(const std::vector<const FunctionImpl<T, NDIM>*>& left,
6053 const std::vector<const FunctionImpl<R, NDIM>*>& right,
6054 bool sym) {
6055
6056 // This is basically a sparse matrix * matrix product
6057 // Rij = sum(k) Aik * Bkj
6058 // where i and j index functions and k index the wavelet coeffs
6059 // eventually the goal is this structure (don't have jtile yet)
6060 //
6061 // do in parallel tiles of k (tensors of coeffs)
6062 // do tiles of j
6063 // do i
6064 // do j in jtile
6065 // do k in ktile
6066 // Rij += Aik*Bkj
6067
6068 mapT lmap = make_key_vec_map(left);
6069 typename FunctionImpl<R, NDIM>::mapT rmap;
6070 auto* rmap_ptr = (typename FunctionImpl<R, NDIM>::mapT*)(&lmap);
6071 if ((std::vector<const FunctionImpl<R, NDIM>*>*)(&left) != &right) {
6073 rmap_ptr = &rmap;
6074 }
6075
6076 size_t chunk = (lmap.size() - 1) / (3 * 4 * 5) + 1;
6077
6078 Tensor<TENSOR_RESULT_TYPE(T, R)> r(left.size(), right.size());
6079 Mutex mutex;
6080
6081 typename mapT::iterator lstart=lmap.begin();
6082 while (lstart != lmap.end()) {
6083 typename mapT::iterator lend = lstart;
6084 advance(lend, chunk);
6085 left[0]->world.taskq.add(&FunctionImpl<T, NDIM>::do_dot_localX<R>, lstart, lend, rmap_ptr, sym, &r, &mutex);
6086 lstart = lend;
6087 }
6088 left[0]->world.taskq.fence();
6089
6090 // sym is for hermiticity
6091 if (sym) {
6092 for (long i = 0; i < r.dim(0); i++) {
6093 for (long j = 0; j < i; j++) {
6094 TENSOR_RESULT_TYPE(T, R) sum = r(i, j) + conj(r(j, i));
6095 r(i, j) = sum;
6096 r(j, i) = conj(sum);
6097 }
6098 }
6099 }
6100 return r;
6101 }
6102
6103 template <typename R>
6105 {
6106 static_assert(!std::is_same<R, int>::value &&
6107 std::is_same<R, int>::value,
6108 "Compilation failed because you wanted to know the type; see below:");
6109 }
6110
6111 /// invoked by result
6112
6113 /// contract 2 functions f(x,z) = \int g(x,y) * h(y,z) dy
6114 /// @tparam CDIM: the dimension of the contraction variable (y)
6115 /// @tparam NDIM: the dimension of the result (x,z)
6116 /// @tparam LDIM: the dimension of g(x,y)
6117 /// @tparam KDIM: the dimension of h(y,z)
6118 template<typename Q, std::size_t LDIM, typename R, std::size_t KDIM,
6119 std::size_t CDIM = (KDIM + LDIM - NDIM) / 2>
6121 const std::array<int, CDIM> v1, const std::array<int, CDIM> v2) {
6122
6123 typedef std::multimap<Key<NDIM>, std::list<Key<CDIM>>> contractionmapT;
6124 //double wall_get_lists=0.0;
6125 //double wall_recur=0.0;
6126 //double wall_contract=0.0;
6129
6130 // auto print_map = [](const auto& map) {
6131 // for (const auto& kv : map) print(kv.first,"--",kv.second);
6132 // };
6133 // logical constness, not bitwise constness
6134 FunctionImpl<Q,LDIM>& g_nc=const_cast<FunctionImpl<Q,LDIM>&>(g);
6135 FunctionImpl<R,KDIM>& h_nc=const_cast<FunctionImpl<R,KDIM>&>(h);
6136
6137 std::list<contractionmapT> all_contraction_maps;
6138 for (std::size_t n=0; n<nmax; ++n) {
6139
6140 // list of nodes with d coefficients (and their parents)
6141 //double wall0 = wall_time();
6142 auto [g_ijlist, g_jlist] = g.get_contraction_node_lists(n, v1);
6143 auto [h_ijlist, h_jlist] = h.get_contraction_node_lists(n, v2);
6144 if ((g_ijlist.size() == 0) and (h_ijlist.size() == 0)) break;
6145 //double wall1 = wall_time();
6146 //wall_get_lists += (wall1 - wall0);
6147 //wall0 = wall1;
6148// print("g_jlist");
6149// for (const auto& kv : g_jlist) print(kv.first,kv.second);
6150// print("h_jlist");
6151// for (const auto& kv : h_jlist) print(kv.first,kv.second);
6152
6153 // next lines will insert s nodes into g and h -> possible race condition!
6154 bool this_first = true; // are the remaining indices of g before those of g: f(x,z) = g(x,y) h(y,z)
6155 // CDIM, NDIM, KDIM
6156 contractionmapT contraction_map = g_nc.recur_down_for_contraction_map(
6157 g_nc.key0(), g_nc.get_coeffs().find(g_nc.key0()).get()->second, v1, v2,
6158 h_ijlist, h_jlist, this_first, thresh);
6159
6160 this_first = false;
6161 // CDIM, NDIM, LDIM
6162 auto hnode0=h_nc.get_coeffs().find(h_nc.key0()).get()->second;
6163 contractionmapT contraction_map1 = h_nc.recur_down_for_contraction_map(
6164 h_nc.key0(), hnode0, v2, v1,
6165 g_ijlist, g_jlist, this_first, thresh);
6166
6167 // will contain duplicate entries
6168 contraction_map.merge(contraction_map1);
6169 // turn multimap into a map of list
6170 auto it = contraction_map.begin();
6171 while (it != contraction_map.end()) {
6172 auto it_end = contraction_map.upper_bound(it->first);
6173 auto it2 = it;
6174 it2++;
6175 while (it2 != it_end) {
6176 it->second.splice(it->second.end(), it2->second);
6177 it2 = contraction_map.erase(it2);
6178 }
6179 it = it_end;
6180 }
6181// print("thresh ",thresh);
6182// print("contraction list size",contraction_map.size());
6183
6184 // remove all double entries
6185 for (auto& elem: contraction_map) {
6186 elem.second.sort();
6187 elem.second.unique();
6188 }
6189 //wall1 = wall_time();
6190 //wall_recur += (wall1 - wall0);
6191// if (n==2) {
6192// print("contraction map for n=", n);
6193// print_map(contraction_map);
6194// }
6195 all_contraction_maps.push_back(contraction_map);
6196
6197 long mapsize=contraction_map.size();
6198 if (mapsize==0) break;
6199 }
6200
6201
6202 // finally do the contraction
6203 for (const auto& contraction_map : all_contraction_maps) {
6204 for (const auto& key_list : contraction_map) {
6205 const Key<NDIM>& key=key_list.first;
6206 const std::list<Key<CDIM>>& list=key_list.second;
6207 woT::task(coeffs.owner(key), &implT:: template partial_inner_contract<Q,LDIM,R,KDIM>,
6208 &g,&h,v1,v2,key,list);
6209 }
6210 }
6211 }
6212
6213 /// for contraction two functions f(x,z) = \int g(x,y) h(y,z) dy
6214
6215 /// find all nodes with d coefficients and return a list of complete keys and of
6216 /// keys holding only the y dimension, also the maximum norm of all d for the j dimension
6217 /// @param[in] n the scale
6218 /// @param[in] v array holding the indices of the integration variable
6219 /// @return ijlist: list of all nodes with d coeffs; jlist: j-part of ij list only
6220 template<std::size_t CDIM>
6221 std::tuple<std::set<Key<NDIM>>, std::map<Key<CDIM>,double>>
6222 get_contraction_node_lists(const std::size_t n, const std::array<int, CDIM>& v) const {
6223
6224 const auto& cdata=get_cdata();
6225 auto has_d_coeffs = [&cdata](const coeffT& coeff) {
6226 if (coeff.has_no_data()) return false;
6227 return (coeff.dim(0)==2*cdata.k);
6228 };
6229
6230 // keys to be contracted in g
6231 std::set<Key<NDIM>> ij_list; // full key
6232 std::map<Key<CDIM>,double> j_list; // only that dimension that will be contracted
6233
6234 for (auto it=get_coeffs().begin(); it!=get_coeffs().end(); ++it) {
6235 const Key<NDIM>& key=it->first;
6236 const FunctionNode<T,NDIM>& node=it->second;
6237 if ((key.level()==n) and (has_d_coeffs(node.coeff()))) {
6238 ij_list.insert(key);
6240 for (std::size_t i=0; i<CDIM; ++i) j_trans[i]=key.translation()[v[i]];
6241 Key<CDIM> jkey(n,j_trans);
6242 const double max_d_norm=j_list[jkey];
6243 j_list.insert_or_assign(jkey,std::max(max_d_norm,node.get_dnorm()));
6244 Key<CDIM> parent_jkey=jkey.parent();
6245 while (j_list.count(parent_jkey)==0) {
6246 j_list.insert({parent_jkey,1.0});
6247 parent_jkey=parent_jkey.parent();
6248 }
6249 }
6250 }
6251 return std::make_tuple(ij_list,j_list);
6252 }
6253
6254 /// make a map of all nodes that will contribute to a partial inner product
6255
6256 /// given the list of d coefficient-holding nodes of the other function:
6257 /// recur down h if snorm * dnorm > tol and key n−jx ∈ other−ij-list. Make s
6258 /// coefficients if necessary. Make list of nodes n − ijk as map(n-ik, list(j)).
6259 ///
6260 /// !! WILL ADD NEW S NODES TO THIS TREE THAT MUST BE REMOVED TO AVOID INCONSISTENT TREE STRUCTURE !!
6261 ///
6262 /// @param[in] key for recursion
6263 /// @param[in] node corresponds to key
6264 /// @param[in] v_this this' dimension that are contracted
6265 /// @param[in] v_other other's dimension that are contracted
6266 /// @param[in] ij_other_list list of nodes of the other function that will be contracted (and their parents)
6267 /// @param[in] j_other_list list of column nodes of the other function that will be contracted (and their parents)
6268 /// @param[in] max_d_norm max d coeff norm of the nodes in j_list
6269 /// @param[in] this_first are the remaining coeffs of this functions first or last in the result function
6270 /// @param[in] thresh threshold for including nodes in the contraction: snorm*dnorm > thresh
6271 /// @tparam CDIM dimension to be contracted
6272 /// @tparam ODIM dimensions of the other function
6273 /// @tparam FDIM dimensions of the final function
6274 template<std::size_t CDIM, std::size_t ODIM, std::size_t FDIM=NDIM+ODIM-2*CDIM>
6275 std::multimap<Key<FDIM>, std::list<Key<CDIM>>> recur_down_for_contraction_map(
6276 const keyT& key, const nodeT& node,
6277 const std::array<int,CDIM>& v_this,
6278 const std::array<int,CDIM>& v_other,
6279 const std::set<Key<ODIM>>& ij_other_list,
6280 const std::map<Key<CDIM>,double>& j_other_list,
6281 bool this_first, const double thresh) {
6282
6283 std::multimap<Key<FDIM>, std::list<Key<CDIM>>> contraction_map;
6284
6285 // fast return if the other function has no d coeffs
6286 if (j_other_list.empty()) return contraction_map;
6287
6288 // continue recursion if this node may be contracted with the j column
6289 // extract relevant node translations from this node
6290 const auto j_this_key=key.extract_key(v_this);
6291
6292// print("\nkey, j_this_key", key, j_this_key);
6293 const double max_d_norm=j_other_list.find(j_this_key)->second;
6294 const bool sd_norm_product_large = node.get_snorm() * max_d_norm > truncate_tol(thresh,key);
6295// print("sd_product_norm",node.get_snorm() * max_d_norm, thresh);
6296
6297 // end recursion if we have reached the final scale n
6298 // with which nodes from other will this node be contracted?
6299 bool final_scale=key.level()==ij_other_list.begin()->level();
6300 if (final_scale and sd_norm_product_large) {
6301 for (auto& other_key : ij_other_list) {
6302 const auto j_other_key=other_key.extract_key(v_other);
6303 if (j_this_key != j_other_key) continue;
6304 auto i_key=key.extract_complement_key(v_this);
6305 auto k_key=other_key.extract_complement_key(v_other);
6306// print("key, ij_other_key",key,other_key);
6307// print("i, k, j key",i_key, k_key, j_this_key);
6308 Key<FDIM> ik_key=(this_first) ? i_key.merge_with(k_key) : k_key.merge_with(i_key);
6309// print("ik_key",ik_key);
6310// MADNESS_CHECK(contraction_map.count(ik_key)==0);
6311 contraction_map.insert(std::make_pair(ik_key,std::list<Key<CDIM>>{j_this_key}));
6312 }
6313 return contraction_map;
6314 }
6315
6316 bool continue_recursion = (j_other_list.count(j_this_key)==1);
6317 if (not continue_recursion) return contraction_map;
6318
6319
6320 // continue recursion if norms are large
6321 continue_recursion = (node.has_children() or sd_norm_product_large);
6322
6323 if (continue_recursion) {
6324 // in case we need to compute children's coefficients: unfilter only once
6325 bool compute_child_s_coeffs=true;
6326 coeffT d = node.coeff();
6327// print("continuing recursion from key",key);
6328
6329 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
6330 keyT child=kit.key();
6331 typename dcT::accessor acc;
6332
6333 // make child's s coeffs if it doesn't exist or if is has no s coeffs
6334 bool childnode_exists=get_coeffs().find(acc,child);
6335 bool need_s_coeffs= childnode_exists ? (acc->second.get_snorm()<=0.0) : true;
6336
6337 coeffT child_s_coeffs;
6338 if (need_s_coeffs and compute_child_s_coeffs) {
6339 if (d.dim(0)==cdata.vk[0]) { // s coeffs only in this node
6340 coeffT d1(cdata.v2k,get_tensor_args());
6341 d1(cdata.s0)+=d;
6342 d=d1;
6343 }
6344 d = unfilter(d);
6345 child_s_coeffs=copy(d(child_patch(child)));
6346 child_s_coeffs.reduce_rank(thresh);
6347 compute_child_s_coeffs=false;
6348 }
6349
6350 if (not childnode_exists) {
6351 get_coeffs().replace(child,nodeT(child_s_coeffs,false));
6352 get_coeffs().find(acc,child);
6353 } else if (childnode_exists and need_s_coeffs) {
6354 acc->second.coeff()=child_s_coeffs;
6355 }
6356 bool exists= get_coeffs().find(acc,child);
6357 MADNESS_CHECK(exists);
6358 nodeT& childnode = acc->second;
6359 if (need_s_coeffs) childnode.recompute_snorm_and_dnorm(get_cdata());
6360// print("recurring down to",child);
6361 contraction_map.merge(recur_down_for_contraction_map(child,childnode, v_this, v_other,
6362 ij_other_list, j_other_list, this_first, thresh));
6363// print("contraction_map.size()",contraction_map.size());
6364 }
6365
6366 }
6367
6368 return contraction_map;
6369 }
6370
6371
6372 /// tensor contraction part of partial_inner
6373
6374 /// @param[in] g rhs of the inner product
6375 /// @param[in] h lhs of the inner product
6376 /// @param[in] v1 dimensions of g to be contracted
6377 /// @param[in] v2 dimensions of h to be contracted
6378 /// @param[in] key key of result's (this) FunctionNode
6379 /// @param[in] j_key_list list of contraction index-j keys contributing to this' node
6380 template<typename Q, std::size_t LDIM, typename R, std::size_t KDIM,
6381 std::size_t CDIM = (KDIM + LDIM - NDIM) / 2>
6383 const std::array<int, CDIM> v1, const std::array<int, CDIM> v2,
6384 const Key<NDIM>& key, const std::list<Key<CDIM>>& j_key_list) {
6385
6386 Key<LDIM - CDIM> i_key;
6387 Key<KDIM - CDIM> k_key;
6388 key.break_apart(i_key, k_key);
6389
6390 coeffT result_coeff(get_cdata().v2k, get_tensor_type());
6391 for (const auto& j_key: j_key_list) {
6392
6393 auto v_complement = [](const auto& v, const auto& vc) {
6394 constexpr std::size_t VDIM = std::tuple_size<std::decay_t<decltype(v)>>::value;
6395 constexpr std::size_t VCDIM = std::tuple_size<std::decay_t<decltype(vc)>>::value;
6396 std::array<int, VCDIM> result;
6397 for (std::size_t i = 0; i < VCDIM; i++) result[i] = (v.back() + i + 1) % (VDIM + VCDIM);
6398 return result;
6399 };
6400 auto make_ij_key = [&v_complement](const auto i_key, const auto j_key, const auto& v) {
6401 constexpr std::size_t IDIM = std::decay_t<decltype(i_key)>::static_size;
6402 constexpr std::size_t JDIM = std::decay_t<decltype(j_key)>::static_size;
6403 static_assert(JDIM == std::tuple_size<std::decay_t<decltype(v)>>::value);
6404
6406 for (std::size_t i = 0; i < v.size(); ++i) l[v[i]] = j_key.translation()[i];
6407 std::array<int, IDIM> vc1;
6408 auto vc = v_complement(v, vc1);
6409 for (std::size_t i = 0; i < vc.size(); ++i) l[vc[i]] = i_key.translation()[i];
6410
6411 return Key<IDIM + JDIM>(i_key.level(), l);
6412 };
6413
6414 Key<LDIM> ij_key = make_ij_key(i_key, j_key, v1);
6415 Key<KDIM> jk_key = make_ij_key(k_key, j_key, v2);
6416
6417 MADNESS_CHECK(g->get_coeffs().probe(ij_key));
6418 MADNESS_CHECK(h->get_coeffs().probe(jk_key));
6419 const coeffT& gcoeff = g->get_coeffs().find(ij_key).get()->second.coeff();
6420 const coeffT& hcoeff = h->get_coeffs().find(jk_key).get()->second.coeff();
6421 coeffT gcoeff1, hcoeff1;
6422 if (gcoeff.dim(0) == g->get_cdata().k) {
6423 gcoeff1 = coeffT(g->get_cdata().v2k, g->get_tensor_args());
6424 gcoeff1(g->get_cdata().s0) += gcoeff;
6425 } else {
6426 gcoeff1 = gcoeff;
6427 }
6428 if (hcoeff.dim(0) == g->get_cdata().k) {
6429 hcoeff1 = coeffT(h->get_cdata().v2k, h->get_tensor_args());
6430 hcoeff1(h->get_cdata().s0) += hcoeff;
6431 } else {
6432 hcoeff1 = hcoeff;
6433 }
6434
6435 // offset: 0 for full tensor, 1 for svd representation with rand being the first dimension (r,d1,d2,d3) -> (r,d1*d2*d3)
6436 auto fuse = [](Tensor<T> tensor, const std::array<int, CDIM>& v, int offset) {
6437 for (std::size_t i = 0; i < CDIM - 1; ++i) {
6438 MADNESS_CHECK((v[i] + 1) == v[i + 1]); // make sure v is contiguous and ascending
6439 tensor = tensor.fusedim(v[0]+offset);
6440 }
6441 return tensor;
6442 };
6443
6444 // use case: partial_projection of 2-electron functions in svd representation f(1) = \int g(2) h(1,2) d2
6445 // c_i = \sum_j a_j b_ij = \sum_jr a_j b_rj b'_rj
6446 // = \sum_jr ( a_j b_rj) b'_rj )
6447 auto contract2 = [](const auto& svdcoeff, const auto& tensor, const int particle) {
6448#if HAVE_GENTENSOR
6449 const int spectator_particle=(particle+1)%2;
6450 Tensor<Q> gtensor = svdcoeff.get_svdtensor().make_vector_with_weights(particle);
6451 gtensor=gtensor.reshape(svdcoeff.rank(),gtensor.size()/svdcoeff.rank());
6452 MADNESS_CHECK(gtensor.ndim()==2);
6453 Tensor<Q> gtensor_other = svdcoeff.get_svdtensor().ref_vector(spectator_particle);
6454 Tensor<T> tmp1=inner(gtensor,tensor.flat(),1,0); // tmp1(r) = sum_j a'_(r,j) b(j)
6455 MADNESS_CHECK(tmp1.ndim()==1);
6456 Tensor<T> tmp2=inner(gtensor_other,tmp1,0,0); // tmp2(i) = sum_r a_(r,i) tmp1(r)
6457 return tmp2;
6458#else
6459 MADNESS_EXCEPTION("no partial_inner using svd without GenTensor",1);
6460 return Tensor<T>();
6461#endif
6462 };
6463
6464 if (gcoeff.is_full_tensor() and hcoeff.is_full_tensor() and result_coeff.is_full_tensor()) {
6465 // merge multiple contraction dimensions into one
6466 int offset = 0;
6467 Tensor<Q> gtensor = fuse(gcoeff1.full_tensor(), v1, offset);
6468 Tensor<R> htensor = fuse(hcoeff1.full_tensor(), v2, offset);
6469 result_coeff.full_tensor() += inner(gtensor, htensor, v1[0], v2[0]);
6470 if (key.level() > 0) {
6471 gtensor = copy(gcoeff1.full_tensor()(g->get_cdata().s0));
6472 htensor = copy(hcoeff1.full_tensor()(h->get_cdata().s0));
6473 gtensor = fuse(gtensor, v1, offset);
6474 htensor = fuse(htensor, v2, offset);
6475 result_coeff.full_tensor()(get_cdata().s0) -= inner(gtensor, htensor, v1[0], v2[0]);
6476 }
6477 }
6478
6479
6480 // use case: 2-electron functions in svd representation f(1,3) = \int g(1,2) h(2,3) d2
6481 // c_ik = \sum_j a_ij b_jk = \sum_jrr' a_ri a'_rj b_r'j b_r'k
6482 // = \sum_jrr' ( a_ri (a'_rj b_r'j) ) b_r'k
6483 // = \sum_jrr' c_r'i b_r'k
6484 else if (gcoeff.is_svd_tensor() and hcoeff.is_svd_tensor() and result_coeff.is_svd_tensor()) {
6485 MADNESS_CHECK(v1[0]==0 or v1[CDIM-1]==LDIM-1);
6486 MADNESS_CHECK(v2[0]==0 or v2[CDIM-1]==KDIM-1);
6487 int gparticle= v1[0]==0 ? 0 : 1; // which particle to integrate over
6488 int hparticle= v2[0]==0 ? 0 : 1; // which particle to integrate over
6489 // merge multiple contraction dimensions into one
6490 Tensor<Q> gtensor = gcoeff1.get_svdtensor().flat_vector_with_weights(gparticle);
6491 Tensor<Q> gtensor_other = gcoeff1.get_svdtensor().flat_vector((gparticle+1)%2);
6492 Tensor<R> htensor = hcoeff1.get_svdtensor().flat_vector_with_weights(hparticle);
6493 Tensor<R> htensor_other = hcoeff1.get_svdtensor().flat_vector((hparticle+1)%2);
6494 Tensor<T> tmp1=inner(gtensor,htensor,1,1); // tmp1(r,r') = sum_j b(r,j) a(r',j)
6495 Tensor<T> tmp2=inner(tmp1,gtensor_other,0,0); // tmp2(r',i) = sum_r tmp1(r,r') a(r,i)
6497 MADNESS_CHECK(tmp2.dim(0)==htensor_other.dim(0));
6498 w=1.0;
6499 coeffT result_tmp(get_cdata().v2k, get_tensor_type());
6500 result_tmp.get_svdtensor().set_vectors_and_weights(w,tmp2,htensor_other);
6501 if (key.level() > 0) {
6502 GenTensor<Q> gcoeff2 = copy(gcoeff1(g->get_cdata().s0));
6503 GenTensor<R> hcoeff2 = copy(hcoeff1(h->get_cdata().s0));
6504 Tensor<Q> gtensor = gcoeff2.get_svdtensor().flat_vector_with_weights(gparticle);
6505 Tensor<Q> gtensor_other = gcoeff2.get_svdtensor().flat_vector((gparticle+1)%2);
6506 Tensor<R> htensor = hcoeff2.get_svdtensor().flat_vector_with_weights(hparticle);
6507 Tensor<R> htensor_other = hcoeff2.get_svdtensor().flat_vector((hparticle+1)%2);
6508 Tensor<T> tmp1=inner(gtensor,htensor,1,1); // tmp1(r,r') = sum_j b(r,j) a(r',j)
6509 Tensor<T> tmp2=inner(tmp1,gtensor_other,0,0); // tmp2(r',i) = sum_r tmp1(r,r') a(r,i)
6511 MADNESS_CHECK(tmp2.dim(0)==htensor_other.dim(0));
6512 w=1.0;
6513 coeffT result_coeff1(get_cdata().vk, get_tensor_type());
6514 result_coeff1.get_svdtensor().set_vectors_and_weights(w,tmp2,htensor_other);
6515 result_tmp(get_cdata().s0)-=result_coeff1;
6516 }
6517 result_coeff+=result_tmp;
6518 }
6519
6520 // use case: partial_projection of 2-electron functions in svd representation f(1) = \int g(2) h(1,2) d2
6521 // c_i = \sum_j a_j b_ij = \sum_jr a_j b_rj b'_rj
6522 // = \sum_jr ( a_j b_rj) b'_rj )
6523 else if (gcoeff.is_full_tensor() and hcoeff.is_svd_tensor() and result_coeff.is_full_tensor()) {
6524 MADNESS_CHECK(v1[0]==0 and v1[CDIM-1]==LDIM-1);
6525 MADNESS_CHECK(v2[0]==0 or v2[CDIM-1]==KDIM-1);
6526 MADNESS_CHECK(LDIM==CDIM);
6527 int hparticle= v2[0]==0 ? 0 : 1; // which particle to integrate over
6528
6529 Tensor<T> r=contract2(hcoeff1,gcoeff1.full_tensor(),hparticle);
6530 if (key.level()>0) r(get_cdata().s0)-=contract2(copy(hcoeff1(h->get_cdata().s0)),copy(gcoeff.full_tensor()(g->get_cdata().s0)),hparticle);
6531 result_coeff.full_tensor()+=r;
6532 }
6533 // use case: partial_projection of 2-electron functions in svd representation f(1) = \int g(1,2) h(2) d2
6534 // c_i = \sum_j a_ij b_j = \sum_jr a_ri a'_rj b_j
6535 // = \sum_jr ( a_ri (a'_rj b_j) )
6536 else if (gcoeff.is_svd_tensor() and hcoeff.is_full_tensor() and result_coeff.is_full_tensor()) {
6537 MADNESS_CHECK(v1[0]==0 or v1[CDIM-1]==LDIM-1);
6538 MADNESS_CHECK(v2[0]==0 and v2[CDIM-1]==KDIM-1);
6539 MADNESS_CHECK(KDIM==CDIM);
6540 int gparticle= v1[0]==0 ? 0 : 1; // which particle to integrate over
6541
6542 Tensor<T> r=contract2(gcoeff1,hcoeff1.full_tensor(),gparticle);
6543 if (key.level()>0) r(get_cdata().s0)-=contract2(copy(gcoeff1(g->get_cdata().s0)),copy(hcoeff.full_tensor()(h->get_cdata().s0)),gparticle);
6544 result_coeff.full_tensor()+=r;
6545
6546 } else {
6547 MADNESS_EXCEPTION("unknown case in partial_inner_contract",1);
6548 }
6549 }
6550
6551 MADNESS_CHECK(result_coeff.is_assigned());
6552 result_coeff.reduce_rank(get_thresh());
6553
6554 if (coeffs.is_local(key))
6555 coeffs.send(key, &nodeT::accumulate, result_coeff, coeffs, key, get_tensor_args());
6556 else
6558 }
6559
6560 /// Return the inner product with an external function on a specified function node.
6561
6562 /// @param[in] key Key of the function node to compute the inner product on. (the domain of integration)
6563 /// @param[in] c Tensor of coefficients for the function at the function node given by key
6564 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6565 /// @return Returns the inner product over the domain of a single function node, no guarantee of accuracy.
6566 T inner_ext_node(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f) const {
6567 tensorT fvals = tensorT(this->cdata.vk);
6568 // Compute the value of the external function at the quadrature points.
6569 fcube(key, *(f), cdata.quad_x, fvals);
6570 // Convert quadrature point values to scaling coefficients.
6571 tensorT fc = tensorT(values2coeffs(key, fvals));
6572 // Return the inner product of the two functions' scaling coefficients.
6573 return c.trace_conj(fc);
6574 }
6575
6576 /// Call inner_ext_node recursively until convergence.
6577 /// @param[in] key Key of the function node on which to compute inner product (the domain of integration)
6578 /// @param[in] c coeffs for the function at the node given by key
6579 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6580 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6581 /// @param[in] old_inner the inner product on the parent function node
6582 /// @return Returns the inner product over the domain of a single function, checks for convergence.
6583 T inner_ext_recursive(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f, const bool leaf_refine, T old_inner=T(0)) const {
6584 int i = 0;
6585 tensorT c_child, inner_child;
6586 T new_inner, result = 0.0;
6587
6588 c_child = tensorT(cdata.v2k); // tensor of child coeffs
6589 inner_child = Tensor<double>(pow(2, NDIM)); // child inner products
6590
6591 // If old_inner is default value, assume this is the first call
6592 // and compute inner product on this node.
6593 if (old_inner == T(0)) {
6594 old_inner = inner_ext_node(key, c, f);
6595 }
6596
6597 if (coeffs.find(key).get()->second.has_children()) {
6598 // Since the key has children and we know the func is redundant,
6599 // Iterate over all children of this compute node, computing
6600 // the inner product on each child node. new_inner will store
6601 // the sum of these, yielding a more accurate inner product.
6602 for (KeyChildIterator<NDIM> it(key); it; ++it, ++i) {
6603 const keyT& child = it.key();
6604 tensorT cc = coeffs.find(child).get()->second.coeff().full_tensor_copy();
6605 inner_child(i) = inner_ext_node(child, cc, f);
6606 }
6607 new_inner = inner_child.sum();
6608 } else if (leaf_refine) {
6609 // We need the scaling coefficients of the numerical function
6610 // at each of the children nodes. We can't use project because
6611 // there is no guarantee that the numerical function will have
6612 // a functor. Instead, since we know we are at or below the
6613 // leaf nodes, the wavelet coefficients are zero (to within the
6614 // truncate tolerance). Thus, we can use unfilter() to
6615 // get the scaling coefficients at the next level.
6616 tensorT d = tensorT(cdata.v2k);
6617 d = T(0);
6618 d(cdata.s0) = copy(c);
6619 c_child = unfilter(d);
6620
6621 // Iterate over all children of this compute node, computing
6622 // the inner product on each child node. new_inner will store
6623 // the sum of these, yielding a more accurate inner product.
6624 for (KeyChildIterator<NDIM> it(key); it; ++it, ++i) {
6625 const keyT& child = it.key();
6626 tensorT cc = tensorT(c_child(child_patch(child)));
6627 inner_child(i) = inner_ext_node(child, cc, f);
6628 }
6629 new_inner = inner_child.sum();
6630 } else {
6631 // If we get to here, we are at the leaf nodes and the user has
6632 // specified that they do not want refinement past leaf nodes.
6633 new_inner = old_inner;
6634 }
6635
6636 // Check for convergence. If converged...yay, we're done. If not,
6637 // call inner_ext_node_recursive on each child node and accumulate
6638 // the inner product in result.
6639 // if (std::abs(new_inner - old_inner) <= truncate_tol(thresh, key)) {
6640 if (std::abs(new_inner - old_inner) <= thresh) {
6641 result = new_inner;
6642 } else {
6643 i = 0;
6644 for (KeyChildIterator<NDIM> it(key); it; ++it, ++i) {
6645 const keyT& child = it.key();
6646 tensorT cc = tensorT(c_child(child_patch(child)));
6647 result += inner_ext_recursive(child, cc, f, leaf_refine, inner_child(i));
6648 }
6649 }
6650
6651 return result;
6652 }
6653
6655 const std::shared_ptr< FunctionFunctorInterface<T, NDIM> > fref;
6656 const implT * impl;
6657 const bool leaf_refine;
6658 const bool do_leaves; ///< start with leaf nodes instead of initial_level
6659
6661 const implT * impl, const bool leaf_refine, const bool do_leaves)
6662 : fref(f), impl(impl), leaf_refine(leaf_refine), do_leaves(do_leaves) {};
6663
6664 T operator()(typename dcT::const_iterator& it) const {
6665 if (do_leaves and it->second.is_leaf()) {
6666 tensorT cc = it->second.coeff().full_tensor();
6667 return impl->inner_adaptive_recursive(it->first, cc, fref, leaf_refine, T(0));
6668 } else if ((not do_leaves) and (it->first.level() == impl->initial_level)) {
6669 tensorT cc = it->second.coeff().full_tensor();
6670 return impl->inner_ext_recursive(it->first, cc, fref, leaf_refine, T(0));
6671 } else {
6672 return 0.0;
6673 }
6674 }
6675
6676 T operator()(T a, T b) const {
6677 return (a + b);
6678 }
6679
6680 template <typename Archive> void serialize(const Archive& ar) {
6681 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
6682 }
6683 };
6684
6685 /// Return the local part of inner product with external function ... no communication.
6686 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6687 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6688 /// @return Returns local part of the inner product, i.e. over the domain of all function nodes on this compute node.
6689 T inner_ext_local(const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f, const bool leaf_refine) const {
6691
6693 do_inner_ext_local_ffi(f, this, leaf_refine, false));
6694 }
6695
6696 /// Return the local part of inner product with external function ... no communication.
6697 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6698 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6699 /// @return Returns local part of the inner product, i.e. over the domain of all function nodes on this compute node.
6700 T inner_adaptive_local(const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f, const bool leaf_refine) const {
6702
6704 do_inner_ext_local_ffi(f, this, leaf_refine, true));
6705 }
6706
6707 /// Call inner_ext_node recursively until convergence.
6708 /// @param[in] key Key of the function node on which to compute inner product (the domain of integration)
6709 /// @param[in] c coeffs for the function at the node given by key
6710 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6711 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6712 /// @param[in] old_inner the inner product on the parent function node
6713 /// @return Returns the inner product over the domain of a single function, checks for convergence.
6715 const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f,
6716 const bool leaf_refine, T old_inner=T(0)) const {
6717
6718 // the inner product in the current node
6719 old_inner = inner_ext_node(key, c, f);
6720 T result=0.0;
6721
6722 // the inner product in the child nodes
6723
6724 // compute the sum coefficients of the MRA function
6725 tensorT d = tensorT(cdata.v2k);
6726 d = T(0);
6727 d(cdata.s0) = copy(c);
6728 tensorT c_child = unfilter(d);
6729
6730 // compute the inner product in the child nodes
6731 T new_inner=0.0; // child inner products
6732 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6733 const keyT& child = it.key();
6734 tensorT cc = tensorT(c_child(child_patch(child)));
6735 new_inner+= inner_ext_node(child, cc, f);
6736 }
6737
6738 // continue recursion if needed
6739 const double tol=truncate_tol(thresh,key);
6740 if (leaf_refine and (std::abs(new_inner - old_inner) > tol)) {
6741 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6742 const keyT& child = it.key();
6743 tensorT cc = tensorT(c_child(child_patch(child)));
6744 result += inner_adaptive_recursive(child, cc, f, leaf_refine, T(0));
6745 }
6746 } else {
6747 result = new_inner;
6748 }
6749 return result;
6750
6751 }
6752
6753
6754 /// Return the gaxpy product with an external function on a specified
6755 /// function node.
6756 /// @param[in] key Key of the function node on which to compute gaxpy
6757 /// @param[in] lc Tensor of coefficients for the function at the
6758 /// function node given by key
6759 /// @param[in] f Pointer to function of type T that takes coordT
6760 /// arguments. This is the externally provided function and
6761 /// the right argument of gaxpy.
6762 /// @param[in] alpha prefactor of c Tensor for gaxpy
6763 /// @param[in] beta prefactor of fcoeffs for gaxpy
6764 /// @return Returns coefficient tensor of the gaxpy product at specified
6765 /// key, no guarantee of accuracy.
6766 template <typename L>
6767 tensorT gaxpy_ext_node(keyT key, Tensor<L> lc, T (*f)(const coordT&), T alpha, T beta) const {
6768 // Compute the value of external function at the quadrature points.
6769 tensorT fvals = madness::fcube(key, f, cdata.quad_x);
6770 // Convert quadrature point values to scaling coefficients.
6771 tensorT fcoeffs = values2coeffs(key, fvals);
6772 // Return the inner product of the two functions' scaling coeffs.
6773 tensorT c2 = copy(lc);
6774 c2.gaxpy(alpha, fcoeffs, beta);
6775 return c2;
6776 }
6777
6778 /// Return out of place gaxpy using recursive descent.
6779 /// @param[in] key Key of the function node on which to compute gaxpy
6780 /// @param[in] left FunctionImpl, left argument of gaxpy
6781 /// @param[in] lcin coefficients of left at this node
6782 /// @param[in] c coefficients of gaxpy product at this node
6783 /// @param[in] f pointer to function of type T that takes coordT
6784 /// arguments. This is the externally provided function and
6785 /// the right argument of gaxpy.
6786 /// @param[in] alpha prefactor of left argument for gaxpy
6787 /// @param[in] beta prefactor of right argument for gaxpy
6788 /// @param[in] tol convergence tolerance...when the norm of the gaxpy's
6789 /// difference coefficients is less than tol, we are done.
6790 template <typename L>
6791 void gaxpy_ext_recursive(const keyT& key, const FunctionImpl<L,NDIM>* left,
6792 Tensor<L> lcin, tensorT c, T (*f)(const coordT&),
6793 T alpha, T beta, double tol, bool below_leaf) {
6794 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
6795
6796 // If we haven't yet reached the leaf level, check whether the
6797 // current key is a leaf node of left. If so, set below_leaf to true
6798 // and continue. If not, make this a parent, recur down, return.
6799 if (not below_leaf) {
6800 bool left_leaf = left->coeffs.find(key).get()->second.is_leaf();
6801 if (left_leaf) {
6802 below_leaf = true;
6803 } else {
6804 this->coeffs.replace(key, nodeT(coeffT(), true));
6805 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6806 const keyT& child = it.key();
6807 woT::task(left->coeffs.owner(child), &implT:: template gaxpy_ext_recursive<L>,
6808 child, left, Tensor<L>(), tensorT(), f, alpha, beta, tol, below_leaf);
6809 }
6810 return;
6811 }
6812 }
6813
6814 // Compute left's coefficients if not provided
6815 Tensor<L> lc = lcin;
6816 if (lc.size() == 0) {
6817 literT it = left->coeffs.find(key).get();
6818 MADNESS_ASSERT(it != left->coeffs.end());
6819 if (it->second.has_coeff())
6820 lc = it->second.coeff().reconstruct_tensor();
6821 }
6822
6823 // Compute this node's coefficients if not provided in function call
6824 if (c.size() == 0) {
6825 c = gaxpy_ext_node(key, lc, f, alpha, beta);
6826 }
6827
6828 // We need the scaling coefficients of the numerical function at
6829 // each of the children nodes. We can't use project because there
6830 // is no guarantee that the numerical function will have a functor.
6831 // Instead, since we know we are at or below the leaf nodes, the
6832 // wavelet coefficients are zero (to within the truncate tolerance).
6833 // Thus, we can use unfilter() to get the scaling coefficients at
6834 // the next level.
6835 Tensor<L> lc_child = Tensor<L>(cdata.v2k); // left's child coeffs
6836 Tensor<L> ld = Tensor<L>(cdata.v2k);
6837 ld = L(0);
6838 ld(cdata.s0) = copy(lc);
6839 lc_child = unfilter(ld);
6840
6841 // Iterate over children of this node,
6842 // storing the gaxpy coeffs in c_child
6843 tensorT c_child = tensorT(cdata.v2k); // tensor of child coeffs
6844 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6845 const keyT& child = it.key();
6846 tensorT lcoeff = tensorT(lc_child(child_patch(child)));
6847 c_child(child_patch(child)) = gaxpy_ext_node(child, lcoeff, f, alpha, beta);
6848 }
6849
6850 // Compute the difference coefficients to test for convergence.
6851 tensorT d = tensorT(cdata.v2k);
6852 d = filter(c_child);
6853 // Filter returns both s and d coefficients, so set scaling
6854 // coefficient part of d to 0 so that we take only the
6855 // norm of the difference coefficients.
6856 d(cdata.s0) = T(0);
6857 double dnorm = d.normf();
6858
6859 // Small d.normf means we've reached a good level of resolution
6860 // Store the coefficients and return.
6861 if (dnorm <= truncate_tol(tol,key)) {
6862 this->coeffs.replace(key, nodeT(coeffT(c,targs), false));
6863 } else {
6864 // Otherwise, make this a parent node and recur down
6865 this->coeffs.replace(key, nodeT(coeffT(), true)); // Interior node
6866
6867 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6868 const keyT& child = it.key();
6869 tensorT child_coeff = tensorT(c_child(child_patch(child)));
6870 tensorT left_coeff = tensorT(lc_child(child_patch(child)));
6871 woT::task(left->coeffs.owner(child), &implT:: template gaxpy_ext_recursive<L>,
6872 child, left, left_coeff, child_coeff, f, alpha, beta, tol, below_leaf);
6873 }
6874 }
6875 }
6876
6877 template <typename L>
6878 void gaxpy_ext(const FunctionImpl<L,NDIM>* left, T (*f)(const coordT&), T alpha, T beta, double tol, bool fence) {
6879 if (world.rank() == coeffs.owner(cdata.key0))
6880 gaxpy_ext_recursive<L> (cdata.key0, left, Tensor<L>(), tensorT(), f, alpha, beta, tol, false);
6881 if (fence)
6882 world.gop.fence();
6883 }
6884
6885 /// project the low-dim function g on the hi-dim function f: result(x) = <this(x,y) | g(y)>
6886
6887 /// invoked by the hi-dim function, a function of NDIM+LDIM
6888
6889 /// Upon return, result matches this, with contributions on all scales
6890 /// @param[in] result lo-dim function of NDIM-LDIM \todo Should this be param[out]?
6891 /// @param[in] gimpl lo-dim function of LDIM
6892 /// @param[in] dim over which dimensions to be integrated: 0..LDIM or LDIM..LDIM+NDIM-1
6893 template<size_t LDIM>
6895 const int dim, const bool fence) {
6896
6897 const keyT& key0=cdata.key0;
6898
6899 if (world.rank() == coeffs.owner(key0)) {
6900
6901 // coeff_op will accumulate the result
6902 typedef project_out_op<LDIM> coeff_opT;
6903 coeff_opT coeff_op(this,result,CoeffTracker<T,LDIM>(gimpl),dim);
6904
6905 // don't do anything on this -- coeff_op will accumulate into result
6906 typedef noop<T,NDIM> apply_opT;
6907 apply_opT apply_op;
6908
6909 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
6910 coeff_op, apply_op, cdata.key0);
6911
6912 }
6913 if (fence) world.gop.fence();
6914
6915 }
6916
6917
6918 /// project the low-dim function g on the hi-dim function f: result(x) = <f(x,y) | g(y)>
6919 template<size_t LDIM>
6921 bool randomize() const {return false;}
6922
6925 typedef FunctionImpl<T,NDIM-LDIM> implL1;
6926 typedef std::pair<bool,coeffT> argT;
6927
6928 const implT* fimpl; ///< the hi dim function f
6929 mutable implL1* result; ///< the low dim result function
6930 ctL iag; ///< the low dim function g
6931 int dim; ///< 0: project 0..LDIM-1, 1: project LDIM..NDIM-1
6932
6933 // ctor
6934 project_out_op() = default;
6935 project_out_op(const implT* fimpl, implL1* result, const ctL& iag, const int dim)
6936 : fimpl(fimpl), result(result), iag(iag), dim(dim) {}
6938 : fimpl(other.fimpl), result(other.result), iag(other.iag), dim(other.dim) {}
6939
6940
6941 /// do the actual contraction
6943
6944 Key<LDIM> key1,key2,dest;
6945 key.break_apart(key1,key2);
6946
6947 // make the right coefficients
6948 coeffT gcoeff;
6949 if (dim==0) {
6950 gcoeff=iag.get_impl()->parent_to_child(iag.coeff(),iag.key(),key1);
6951 dest=key2;
6952 }
6953 if (dim==1) {
6954 gcoeff=iag.get_impl()->parent_to_child(iag.coeff(),iag.key(),key2);
6955 dest=key1;
6956 }
6957
6958 MADNESS_ASSERT(fimpl->get_coeffs().probe(key)); // must be local!
6959 const nodeT& fnode=fimpl->get_coeffs().find(key).get()->second;
6960 const coeffT& fcoeff=fnode.coeff();
6961
6962 // fast return if possible
6963 if (fcoeff.has_no_data() or gcoeff.has_no_data())
6964 return Future<argT> (argT(fnode.is_leaf(),coeffT()));;
6965
6966 MADNESS_CHECK(gcoeff.is_full_tensor());
6967 tensorT final(result->cdata.vk);
6968 const int k=fcoeff.dim(0);
6969 const int k_ldim=std::pow(k,LDIM);
6970 std::vector<long> shape(LDIM, k);
6971
6972 if (fcoeff.is_full_tensor()) {
6973 // result_i = \sum_j g_j f_ji
6974 const tensorT gtensor = gcoeff.full_tensor().reshape(k_ldim);
6975 const tensorT ftensor = fcoeff.full_tensor().reshape(k_ldim,k_ldim);
6976 final=inner(gtensor,ftensor,0,dim).reshape(shape);
6977
6978 } else if (fcoeff.is_svd_tensor()) {
6979 if (fcoeff.rank()>0) {
6980
6981 // result_i = \sum_jr g_j a_rj w_r b_ri
6982 const int otherdim = (dim + 1) % 2;
6983 const tensorT gtensor = gcoeff.full_tensor().flat();
6984 const tensorT atensor = fcoeff.get_svdtensor().flat_vector(dim); // a_rj
6985 const tensorT btensor = fcoeff.get_svdtensor().flat_vector(otherdim);
6986 const tensorT gatensor = inner(gtensor, atensor, 0, 1); // ga_r
6987 tensorT weights = copy(fcoeff.get_svdtensor().weights_);
6988 weights.emul(gatensor); // ga_r * w_r
6989 // sum over all ranks of b, include new weights:
6990 // result_i = \sum_r ga_r * w_r * b_ri
6991 for (int r = 0; r < fcoeff.rank(); ++r) final += weights(r) * btensor(r, _);
6992 final = final.reshape(shape);
6993 }
6994
6995 } else {
6996 MADNESS_EXCEPTION("unsupported tensor type in project_out_op",1);
6997 }
6998
6999 // accumulate the result
7000 result->coeffs.task(dest, &FunctionNode<T,LDIM>::accumulate2, final, result->coeffs, dest, TaskAttributes::hipri());
7001
7002 return Future<argT> (argT(fnode.is_leaf(),coeffT()));
7003 }
7004
7005 this_type make_child(const keyT& child) const {
7006 Key<LDIM> key1,key2;
7007 child.break_apart(key1,key2);
7008 const Key<LDIM> gkey = (dim==0) ? key1 : key2;
7009
7010 return this_type(fimpl,result,iag.make_child(gkey),dim);
7011 }
7012
7013 /// retrieve the coefficients (parent coeffs might be remote)
7016 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
7017 &this_type::forward_ctor),fimpl,result,g1,dim);
7018 }
7019
7020 /// taskq-compatible ctor
7021 this_type forward_ctor(const implT* fimpl1, implL1* result1, const ctL& iag1, const int dim1) {
7022 return this_type(fimpl1,result1,iag1,dim1);
7023 }
7024
7025 template <typename Archive> void serialize(const Archive& ar) {
7026 ar & result & iag & fimpl & dim;
7027 }
7028
7029 };
7030
7031
7032 /// project the low-dim function g on the hi-dim function f: this(x) = <f(x,y) | g(y)>
7033
7034 /// invoked by result, a function of NDIM
7035
7036 /// @param[in] f hi-dim function of LDIM+NDIM
7037 /// @param[in] g lo-dim function of LDIM
7038 /// @param[in] dim over which dimensions to be integrated: 0..LDIM or LDIM..LDIM+NDIM-1
7039 template<size_t LDIM>
7040 void project_out2(const FunctionImpl<T,LDIM+NDIM>* f, const FunctionImpl<T,LDIM>* g, const int dim) {
7041
7042 typedef std::pair< keyT,coeffT > pairT;
7043 typedef typename FunctionImpl<T,NDIM+LDIM>::dcT::const_iterator fiterator;
7044
7045 // loop over all nodes of hi-dim f, compute the inner products with all
7046 // appropriate nodes of g, and accumulate in result
7047 fiterator end = f->get_coeffs().end();
7048 for (fiterator it=f->get_coeffs().begin(); it!=end; ++it) {
7049 const Key<LDIM+NDIM> key=it->first;
7050 const FunctionNode<T,LDIM+NDIM> fnode=it->second;
7051 const coeffT& fcoeff=fnode.coeff();
7052
7053 if (fnode.is_leaf() and fcoeff.has_data()) {
7054
7055 // break key into particle: over key1 will be summed, over key2 will be
7056 // accumulated, or vice versa, depending on dim
7057 if (dim==0) {
7058 Key<NDIM> key1;
7059 Key<LDIM> key2;
7060 key.break_apart(key1,key2);
7061
7062 Future<pairT> result;
7063 // sock_it_to_me(key1, result.remote_ref(world));
7064 g->task(coeffs.owner(key1), &implT::sock_it_to_me, key1, result.remote_ref(world), TaskAttributes::hipri());
7065 woT::task(world.rank(),&implT:: template do_project_out<LDIM>,fcoeff,result,key1,key2,dim);
7066
7067 } else if (dim==1) {
7068 Key<LDIM> key1;
7069 Key<NDIM> key2;
7070 key.break_apart(key1,key2);
7071
7072 Future<pairT> result;
7073 // sock_it_to_me(key2, result.remote_ref(world));
7074 g->task(coeffs.owner(key2), &implT::sock_it_to_me, key2, result.remote_ref(world), TaskAttributes::hipri());
7075 woT::task(world.rank(),&implT:: template do_project_out<LDIM>,fcoeff,result,key2,key1,dim);
7076
7077 } else {
7078 MADNESS_EXCEPTION("confused dim in project_out",1);
7079 }
7080 }
7081 }
7083// this->compressed=false;
7084// this->nonstandard=false;
7085// this->redundant=true;
7086 }
7087
7088
7089 /// compute the inner product of two nodes of only some dimensions and accumulate on result
7090
7091 /// invoked by result
7092 /// @param[in] fcoeff coefficients of high dimension LDIM+NDIM
7093 /// @param[in] gpair key and coeffs of low dimension LDIM (possibly a parent node)
7094 /// @param[in] gkey key of actual low dim node (possibly the same as gpair.first, iff gnode exists)
7095 /// @param[in] dest destination node for the result
7096 /// @param[in] dim which dimensions should be contracted: 0..LDIM-1 or LDIM..NDIM+LDIM-1
7097 template<size_t LDIM>
7098 void do_project_out(const coeffT& fcoeff, const std::pair<keyT,coeffT> gpair, const keyT& gkey,
7099 const Key<NDIM>& dest, const int dim) const {
7100
7101 const coeffT gcoeff=parent_to_child(gpair.second,gpair.first,gkey);
7102
7103 // fast return if possible
7104 if (fcoeff.has_no_data() or gcoeff.has_no_data()) return;
7105
7106 // let's specialize for the time being on SVD tensors for f and full tensors of half dim for g
7108 MADNESS_ASSERT(fcoeff.tensor_type()==TT_2D);
7109 const tensorT gtensor=gcoeff.full_tensor();
7110 tensorT result(cdata.vk);
7111
7112 const int otherdim=(dim+1)%2;
7113 const int k=fcoeff.dim(0);
7114 std::vector<Slice> s(fcoeff.config().dim_per_vector()+1,_);
7115
7116 // do the actual contraction
7117 for (int r=0; r<fcoeff.rank(); ++r) {
7118 s[0]=Slice(r,r);
7119 const tensorT contracted_tensor=fcoeff.config().ref_vector(dim)(s).reshape(k,k,k);
7120 const tensorT other_tensor=fcoeff.config().ref_vector(otherdim)(s).reshape(k,k,k);
7121 const double ovlp= gtensor.trace_conj(contracted_tensor);
7122 const double fac=ovlp * fcoeff.config().weights(r);
7123 result+=fac*other_tensor;
7124 }
7125
7126 // accumulate the result
7127 coeffs.task(dest, &nodeT::accumulate2, result, coeffs, dest, TaskAttributes::hipri());
7128 }
7129
7130
7131
7132
7133 /// Returns the maximum local depth of the tree ... no communications.
7134 std::size_t max_local_depth() const;
7135
7136
7137 /// Returns the maximum depth of the tree ... collective ... global sum/broadcast
7138 std::size_t max_depth() const;
7139
7140 /// Returns the max number of nodes on a processor
7141 std::size_t max_nodes() const;
7142
7143 /// Returns the min number of nodes on a processor
7144 std::size_t min_nodes() const;
7145
7146 /// Returns the size of the tree structure of the function ... collective global sum
7147 std::size_t tree_size() const;
7148
7149 /// Returns the number of coefficients in the function for each rank
7150 std::size_t size_local() const;
7151
7152 /// Returns the number of coefficients in the function ... collective global sum
7153 std::size_t size() const;
7154
7155 /// Returns the number of coefficients in the function for this MPI rank
7156 std::size_t nCoeff_local() const;
7157
7158 /// Returns the number of coefficients in the function ... collective global sum
7159 std::size_t nCoeff() const;
7160
7161 /// Returns the number of coefficients in the function ... collective global sum
7162 std::size_t real_size() const;
7163
7164 /// print tree size and size
7165 void print_size(const std::string name) const;
7166
7167 /// print the number of configurations per node
7168 void print_stats() const;
7169
7170 /// In-place scale by a constant
7171 void scale_inplace(const T q, bool fence);
7172
7173 /// Out-of-place scale by a constant
7174 template <typename Q, typename F>
7175 void scale_oop(const Q q, const FunctionImpl<F,NDIM>& f, bool fence) {
7176 typedef typename FunctionImpl<F,NDIM>::nodeT fnodeT;
7177 typedef typename FunctionImpl<F,NDIM>::dcT fdcT;
7178 typename fdcT::const_iterator end = f.coeffs.end();
7179 for (typename fdcT::const_iterator it=f.coeffs.begin(); it!=end; ++it) {
7180 const keyT& key = it->first;
7181 const fnodeT& node = it->second;
7182
7183 if (node.has_coeff()) {
7184 coeffs.replace(key,nodeT(node.coeff()*q,node.has_children()));
7185 }
7186 else {
7187 coeffs.replace(key,nodeT(coeffT(),node.has_children()));
7188 }
7189 }
7190 if (fence)
7191 world.gop.fence();
7192 }
7193
7194 /// Hash a pointer to \c FunctionImpl
7195
7196 /// \param[in] impl pointer to a FunctionImpl
7197 /// \return The hash.
7198 inline friend hashT hash_value(const FunctionImpl<T,NDIM>* pimpl) {
7199 hashT seed = hash_value(pimpl->id().get_world_id());
7200 detail::combine_hash(seed, hash_value(pimpl->id().get_obj_id()));
7201 return seed;
7202 }
7203
7204 /// Hash a shared_ptr to \c FunctionImpl
7205
7206 /// \param[in] impl pointer to a FunctionImpl
7207 /// \return The hash.
7208 inline friend hashT hash_value(const std::shared_ptr<FunctionImpl<T,NDIM>> impl) {
7209 return hash_value(impl.get());
7210 }
7211 };
7212
7213 namespace archive {
7214 template <class Archive, class T, std::size_t NDIM>
7215 struct ArchiveLoadImpl<Archive,const FunctionImpl<T,NDIM>*> {
7216 static void load(const Archive& ar, const FunctionImpl<T,NDIM>*& ptr) {
7217 bool exists=false;
7218 ar & exists;
7219 if (exists) {
7220 uniqueidT id;
7221 ar & id;
7222 World* world = World::world_from_id(id.get_world_id());
7223 MADNESS_ASSERT(world);
7224 auto ptr_opt = world->ptr_from_id< WorldObject< FunctionImpl<T,NDIM> > >(id);
7225 if (!ptr_opt)
7226 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use a locally uninitialized object",0);
7227 ptr = static_cast< const FunctionImpl<T,NDIM>*>(*ptr_opt);
7228 if (!ptr)
7229 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use an unregistered object",0);
7230 } else {
7231 ptr=nullptr;
7232 }
7233 }
7234 };
7235
7236 template <class Archive, class T, std::size_t NDIM>
7237 struct ArchiveStoreImpl<Archive,const FunctionImpl<T,NDIM>*> {
7238 static void store(const Archive& ar, const FunctionImpl<T,NDIM>*const& ptr) {
7239 bool exists=(ptr) ? true : false;
7240 ar & exists;
7241 if (exists) ar & ptr->id();
7242 }
7243 };
7244
7245 template <class Archive, class T, std::size_t NDIM>
7246 struct ArchiveLoadImpl<Archive, FunctionImpl<T,NDIM>*> {
7247 static void load(const Archive& ar, FunctionImpl<T,NDIM>*& ptr) {
7248 bool exists=false;
7249 ar & exists;
7250 if (exists) {
7251 uniqueidT id;
7252 ar & id;
7253 World* world = World::world_from_id(id.get_world_id());
7254 MADNESS_ASSERT(world);
7255 auto ptr_opt = world->ptr_from_id< WorldObject< FunctionImpl<T,NDIM> > >(id);
7256 if (!ptr_opt)
7257 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use a locally uninitialized object",0);
7258 ptr = static_cast< FunctionImpl<T,NDIM>*>(*ptr_opt);
7259 if (!ptr)
7260 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use an unregistered object",0);
7261 } else {
7262 ptr=nullptr;
7263 }
7264 }
7265 };
7266
7267 template <class Archive, class T, std::size_t NDIM>
7269 static void store(const Archive& ar, FunctionImpl<T,NDIM>*const& ptr) {
7270 bool exists=(ptr) ? true : false;
7271 ar & exists;
7272 if (exists) ar & ptr->id();
7273 // ar & ptr->id();
7274 }
7275 };
7276
7277 template <class Archive, class T, std::size_t NDIM>
7278 struct ArchiveLoadImpl<Archive, std::shared_ptr<const FunctionImpl<T,NDIM> > > {
7279 static void load(const Archive& ar, std::shared_ptr<const FunctionImpl<T,NDIM> >& ptr) {
7280 const FunctionImpl<T,NDIM>* f = nullptr;
7282 ptr.reset(f, [] (const FunctionImpl<T,NDIM> *p_) -> void {});
7283 }
7284 };
7285
7286 template <class Archive, class T, std::size_t NDIM>
7287 struct ArchiveStoreImpl<Archive, std::shared_ptr<const FunctionImpl<T,NDIM> > > {
7288 static void store(const Archive& ar, const std::shared_ptr<const FunctionImpl<T,NDIM> >& ptr) {
7290 }
7291 };
7292
7293 template <class Archive, class T, std::size_t NDIM>
7294 struct ArchiveLoadImpl<Archive, std::shared_ptr<FunctionImpl<T,NDIM> > > {
7295 static void load(const Archive& ar, std::shared_ptr<FunctionImpl<T,NDIM> >& ptr) {
7296 FunctionImpl<T,NDIM>* f = nullptr;
7298 ptr.reset(f, [] (FunctionImpl<T,NDIM> *p_) -> void {});
7299 }
7300 };
7301
7302 template <class Archive, class T, std::size_t NDIM>
7303 struct ArchiveStoreImpl<Archive, std::shared_ptr<FunctionImpl<T,NDIM> > > {
7304 static void store(const Archive& ar, const std::shared_ptr<FunctionImpl<T,NDIM> >& ptr) {
7306 }
7307 };
7308 }
7309
7310}
7311
7312#endif // MADNESS_MRA_FUNCIMPL_H__INCLUDED
double w(double t, double eps)
Definition DKops.h:22
double q(double t)
Definition DKops.h:18
This header should include pretty much everything needed for the parallel runtime.
An integer with atomic set, get, read+increment, read+decrement, and decrement+test operations.
Definition atomicint.h:126
long dim(int i) const
Returns the size of dimension i.
Definition basetensor.h:147
long ndim() const
Returns the number of dimensions in the tensor.
Definition basetensor.h:144
long size() const
Returns the number of elements in the tensor.
Definition basetensor.h:138
Definition displacements.h:717
Definition displacements.h:294
std::function< bool(Level, const PointPattern &, std::optional< Displacement > &)> Filter
this callable filters out points and/or displacements; note that the displacement is optional (this u...
Definition displacements.h:300
a class to track where relevant (parent) coeffs are
Definition funcimpl.h:791
const keyT & key() const
const reference to the key
Definition funcimpl.h:839
CoeffTracker(const CoeffTracker &other, const datumT &datum)
ctor with a pair<keyT,nodeT>
Definition funcimpl.h:821
const LeafStatus & is_leaf() const
const reference to is_leaf flag
Definition funcimpl.h:863
const implT * impl
the funcimpl that has the coeffs
Definition funcimpl.h:800
LeafStatus
Definition funcimpl.h:797
@ yes
Definition funcimpl.h:797
@ no
Definition funcimpl.h:797
@ unknown
Definition funcimpl.h:797
CoeffTracker(const CoeffTracker &other)
copy ctor
Definition funcimpl.h:829
double dnorm(const keyT &key) const
return the s and dnorm belonging to the passed-in key
Definition funcimpl.h:856
coeffT coeff_
the coefficients belonging to key
Definition funcimpl.h:806
const implT * get_impl() const
const reference to impl
Definition funcimpl.h:833
const coeffT & coeff() const
const reference to the coeffs
Definition funcimpl.h:836
keyT key_
the current key, which must exists in impl
Definition funcimpl.h:802
double dnorm_
norm of d coefficients corresponding to key
Definition funcimpl.h:808
CoeffTracker(const implT *impl)
the initial ctor making the root key
Definition funcimpl.h:816
void serialize(const Archive &ar)
serialization
Definition funcimpl.h:915
Future< CoeffTracker > activate() const
find the coefficients
Definition funcimpl.h:892
CoeffTracker()
default ctor
Definition funcimpl.h:813
GenTensor< T > coeffT
Definition funcimpl.h:795
CoeffTracker make_child(const keyT &child) const
make a child of this, ignoring the coeffs
Definition funcimpl.h:866
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:793
std::pair< Key< NDIM >, ShallowNode< T, NDIM > > datumT
Definition funcimpl.h:796
CoeffTracker forward_ctor(const CoeffTracker &other, const datumT &datum) const
taskq-compatible forwarding to the ctor
Definition funcimpl.h:909
LeafStatus is_leaf_
flag if key is a leaf node
Definition funcimpl.h:804
coeffT coeff(const keyT &key) const
return the coefficients belonging to the passed-in key
Definition funcimpl.h:847
Key< NDIM > keyT
Definition funcimpl.h:794
CompositeFunctorInterface implements a wrapper of holding several functions and functors.
Definition function_interface.h:165
Definition worldhashmap.h:396
Tri-diagonal operator traversing tree primarily for derivative operator.
Definition derivative.h:73
Holds displacements for applying operators to avoid replicating for all operators.
Definition displacements.h:51
const std::vector< Key< NDIM > > & get_disp(Level n, const array_of_bools< NDIM > &kernel_lattice_sum_axes)
Definition displacements.h:211
FunctionCommonData holds all Function data common for given k.
Definition function_common_data.h:52
Tensor< double > quad_phit
transpose of quad_phi
Definition function_common_data.h:102
Tensor< double > quad_phiw
quad_phiw(i,j) = at x[i] value of w[i]*phi[j]
Definition function_common_data.h:103
std::vector< long > vk
(k,...) used to initialize Tensors
Definition function_common_data.h:93
std::vector< Slice > s0
s[0] in each dimension to get scaling coeff
Definition function_common_data.h:91
static const FunctionCommonData< T, NDIM > & get(int k)
Definition function_common_data.h:111
static void _init_quadrature(int k, int npt, Tensor< double > &quad_x, Tensor< double > &quad_w, Tensor< double > &quad_phi, Tensor< double > &quad_phiw, Tensor< double > &quad_phit)
Initialize the quadrature information.
Definition mraimpl.h:87
collect common functionality does not need to be member function of funcimpl
Definition function_common_data.h:135
const FunctionCommonData< T, NDIM > & cdata
Definition function_common_data.h:138
GenTensor< T > coeffs2values(const Key< NDIM > &key, const GenTensor< T > &coeff) const
Definition function_common_data.h:142
Tensor< T > values2coeffs(const Key< NDIM > &key, const Tensor< T > &values) const
Definition function_common_data.h:155
FunctionDefaults holds default paramaters as static class members.
Definition funcdefaults.h:100
static const double & get_thresh()
Returns the default threshold.
Definition funcdefaults.h:176
static int get_max_refine_level()
Gets the default maximum adaptive refinement level.
Definition funcdefaults.h:213
static const Tensor< double > & get_cell_width()
Returns the width of each user cell dimension.
Definition funcdefaults.h:369
static bool get_apply_randomize()
Gets the random load balancing for integral operators flag.
Definition funcdefaults.h:289
static const Tensor< double > & get_cell()
Gets the user cell for the simulation.
Definition funcdefaults.h:347
FunctionFactory implements the named-parameter idiom for Function.
Definition function_factory.h:86
bool _refine
Definition function_factory.h:99
bool _empty
Definition function_factory.h:100
bool _fence
Definition function_factory.h:103
Abstract base class interface required for functors used as input to Functions.
Definition function_interface.h:68
Definition funcimpl.h:5457
double operator()(double a, double b) const
Definition funcimpl.h:5483
const opT * func
Definition funcimpl.h:5459
Tensor< double > qx
Definition funcimpl.h:5461
double operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5474
void serialize(const Archive &ar)
Definition funcimpl.h:5488
do_err_box(const implT *impl, const opT *func, int npt, const Tensor< double > &qx, const Tensor< double > &quad_phit, const Tensor< double > &quad_phiw)
Definition funcimpl.h:5467
int npt
Definition funcimpl.h:5460
Tensor< double > quad_phiw
Definition funcimpl.h:5463
const implT * impl
Definition funcimpl.h:5458
Tensor< double > quad_phit
Definition funcimpl.h:5462
do_err_box(const do_err_box &e)
Definition funcimpl.h:5471
FunctionImpl holds all Function state to facilitate shallow copy semantics.
Definition funcimpl.h:945
std::tuple< std::set< Key< NDIM > >, std::map< Key< CDIM >, double > > get_contraction_node_lists(const std::size_t n, const std::array< int, CDIM > &v) const
for contraction two functions f(x,z) = \int g(x,y) h(y,z) dy
Definition funcimpl.h:6222
void copy_coeffs(const FunctionImpl< Q, NDIM > &other, bool fence)
Copy coeffs from other into self.
Definition funcimpl.h:1118
bool is_nonstandard() const
Definition mraimpl.h:269
T eval_cube(Level n, coordT &x, const tensorT &c) const
Definition mraimpl.h:2021
void partial_inner_contract(const FunctionImpl< Q, LDIM > *g, const FunctionImpl< R, KDIM > *h, const std::array< int, CDIM > v1, const std::array< int, CDIM > v2, const Key< NDIM > &key, const std::list< Key< CDIM > > &j_key_list)
tensor contraction part of partial_inner
Definition funcimpl.h:6382
AtomicInt large
Definition funcimpl.h:1001
Timer timer_target_driven
Definition funcimpl.h:999
void binaryXX(const FunctionImpl< L, NDIM > *left, const FunctionImpl< R, NDIM > *right, const opT &op, bool fence)
Definition funcimpl.h:3194
void do_apply(const opT *op, const keyT &key, const Tensor< R > &c)
apply an operator on the coeffs c (at node key)
Definition funcimpl.h:4801
void do_print_tree_graphviz(const keyT &key, std::ostream &os, Level maxlevel) const
Functor for the do_print_tree method (using GraphViz)
Definition mraimpl.h:2755
void add_keys_to_map(mapT *map, int index) const
Adds keys to union of local keys with specified index.
Definition funcimpl.h:5801
void change_tensor_type1(const TensorArgs &targs, bool fence)
change the tensor type of the coefficients in the FunctionNode
Definition mraimpl.h:1095
void gaxpy_ext_recursive(const keyT &key, const FunctionImpl< L, NDIM > *left, Tensor< L > lcin, tensorT c, T(*f)(const coordT &), T alpha, T beta, double tol, bool below_leaf)
Definition funcimpl.h:6791
int initial_level
Initial level for refinement.
Definition funcimpl.h:974
int max_refine_level
Do not refine below this level.
Definition funcimpl.h:977
double do_apply_kernel3(const opT *op, const GenTensor< R > &coeff, const do_op_args< OPDIM > &args, const TensorArgs &apply_targs)
same as do_apply_kernel2, but use low rank tensors as input and low rank tensors as output
Definition funcimpl.h:4759
void hartree_product(const std::vector< std::shared_ptr< FunctionImpl< T, LDIM > > > p1, const std::vector< std::shared_ptr< FunctionImpl< T, LDIM > > > p2, const leaf_opT &leaf_op, bool fence)
given two functions of LDIM, perform the Hartree/Kronecker/outer product
Definition funcimpl.h:3718
void traverse_tree(const coeff_opT &coeff_op, const apply_opT &apply_op, const keyT &key) const
traverse a non-existing tree
Definition funcimpl.h:3688
void do_square_inplace(const keyT &key)
int special_level
Minimium level for refinement on special points.
Definition funcimpl.h:975
void do_apply_kernel(const opT *op, const Tensor< R > &c, const do_op_args< OPDIM > &args)
for fine-grain parallelism: call the apply method of an operator in a separate task
Definition funcimpl.h:4693
double errsq_local(const opT &func) const
Returns the sum of squares of errors from local info ... no comms.
Definition funcimpl.h:5495
WorldContainer< keyT, nodeT > dcT
Type of container holding the coefficients.
Definition funcimpl.h:957
void evaldepthpt(const Vector< double, NDIM > &xin, const keyT &keyin, const typename Future< Level >::remote_refT &ref)
Get the depth of the tree at a point in simulation coordinates.
Definition mraimpl.h:2936
void scale_inplace(const T q, bool fence)
In-place scale by a constant.
Definition mraimpl.h:3107
void gaxpy_oop_reconstructed(const double alpha, const implT &f, const double beta, const implT &g, const bool fence)
perform: this= alpha*f + beta*g, invoked by result
Definition mraimpl.h:219
void unary_op_coeff_inplace(const opT &op, bool fence)
Definition funcimpl.h:2038
World & world
Definition funcimpl.h:964
void apply_1d_realspace_push_op(const archive::archive_ptr< const opT > &pop, int axis, const keyT &key, const Tensor< R > &c)
Definition funcimpl.h:3756
bool is_redundant() const
Returns true if the function is redundant.
Definition mraimpl.h:258
FunctionNode< T, NDIM > nodeT
Type of node.
Definition funcimpl.h:955
std::size_t nCoeff_local() const
Returns the number of coefficients in the function for this MPI rank.
Definition mraimpl.h:1918
void print_size(const std::string name) const
print tree size and size
Definition mraimpl.h:1937
FunctionImpl(const FunctionImpl< T, NDIM > &p)
void print_info() const
Prints summary of data distribution.
Definition mraimpl.h:829
void abs_inplace(bool fence)
Definition mraimpl.h:3119
void binaryXXa(const keyT &key, const FunctionImpl< L, NDIM > *left, const Tensor< L > &lcin, const FunctionImpl< R, NDIM > *right, const Tensor< R > &rcin, const opT &op)
Definition funcimpl.h:3063
void print_timer() const
Definition mraimpl.h:353
void evalR(const Vector< double, NDIM > &xin, const keyT &keyin, const typename Future< long >::remote_refT &ref)
Get the rank of leaf box of the tree at a point in simulation coordinates.
Definition mraimpl.h:2978
const FunctionCommonData< T, NDIM > & cdata
Definition funcimpl.h:983
void do_print_grid(const std::string filename, const std::vector< keyT > &keys) const
print the grid in xyz format
Definition mraimpl.h:580
void mulXXa(const keyT &key, const FunctionImpl< L, NDIM > *left, const Tensor< L > &lcin, const FunctionImpl< R, NDIM > *right, const Tensor< R > &rcin, double tol)
Definition funcimpl.h:2977
const std::vector< Vector< double, NDIM > > & get_special_points() const
Definition funcimpl.h:969
std::size_t nCoeff() const
Returns the number of coefficients in the function ... collective global sum.
Definition mraimpl.h:1928
double vol_nsphere(int n, double R)
Definition funcimpl.h:4789
keyT neighbor_in_volume(const keyT &key, const keyT &disp) const
Returns key of general neighbor that resides in-volume.
Definition mraimpl.h:3226
void compress(const TreeState newstate, bool fence)
compress the wave function
Definition mraimpl.h:1496
void do_dirac_convolution(FunctionImpl< T, LDIM > *f, bool fence) const
Definition funcimpl.h:2121
std::pair< coeffT, double > compress_op(const keyT &key, const std::vector< Future< std::pair< coeffT, double > > > &v, bool nonstandard)
calculate the wavelet coefficients using the sum coefficients of all child nodes
Definition mraimpl.h:1664
Future< bool > truncate_spawn(const keyT &key, double tol)
Returns true if after truncation this node has coefficients.
Definition mraimpl.h:2600
void print_type_in_compilation_error(R &&)
Definition funcimpl.h:6104
Future< double > norm_tree_spawn(const keyT &key)
Definition mraimpl.h:1566
std::vector< keyT > local_leaf_keys() const
return the keys of the local leaf boxes
Definition mraimpl.h:554
MADNESS_ASSERT(this->is_redundant()==g.is_redundant())
void do_print_tree(const keyT &key, std::ostream &os, Level maxlevel) const
Functor for the do_print_tree method.
Definition mraimpl.h:2673
void vtransform(const std::vector< std::shared_ptr< FunctionImpl< R, NDIM > > > &vright, const Tensor< Q > &c, const std::vector< std::shared_ptr< FunctionImpl< T, NDIM > > > &vleft, double tol, bool fence)
Definition funcimpl.h:2838
void unset_functor()
Definition mraimpl.h:308
void refine_spawn(const opT &op, const keyT &key)
Definition funcimpl.h:4521
void apply_1d_realspace_push(const opT &op, const FunctionImpl< R, NDIM > *f, int axis, bool fence)
Definition funcimpl.h:3807
static double conj(float x)
Definition funcimpl.h:5990
void do_print_plane(const std::string filename, std::vector< Tensor< double > > plotinfo, const int xaxis, const int yaxis, const coordT el2)
print the MRA structure
Definition mraimpl.h:495
std::pair< Key< NDIM >, ShallowNode< T, NDIM > > find_datum(keyT key) const
return the a std::pair<key, node>, which MUST exist
Definition mraimpl.h:961
void set_functor(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > functor1)
Definition mraimpl.h:289
std::enable_if< NDIM==FDIM >::type read_grid2(const std::string gridfile, std::shared_ptr< FunctionFunctorInterface< double, NDIM > > vnuc_functor)
read data from a grid
Definition funcimpl.h:1572
bool verify_tree_state_local() const
check that the tree state and the coeffs are consistent
Definition mraimpl.h:165
const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > & get_pmap() const
Definition mraimpl.h:203
Tensor< Q > fcube_for_mul(const keyT &child, const keyT &parent, const Tensor< Q > &coeff) const
Compute the function values for multiplication.
Definition funcimpl.h:1885
Timer timer_filter
Definition funcimpl.h:997
void sock_it_to_me(const keyT &key, const RemoteReference< FutureImpl< std::pair< keyT, coeffT > > > &ref) const
Walk up the tree returning pair(key,node) for first node with coefficients.
Definition mraimpl.h:2813
void recursive_apply(opT &apply_op, const implT *fimpl, implT *rimpl, const bool fence)
traverse an existing tree and apply an operator
Definition funcimpl.h:5314
double get_thresh() const
Definition mraimpl.h:324
void trickle_down(bool fence)
sum all the contributions from all scales after applying an operator in mod-NS form
Definition mraimpl.h:1350
bool autorefine
If true, autorefine where appropriate.
Definition funcimpl.h:979
std::pair< coeffT, double > make_redundant_op(const keyT &key, const std::vector< Future< std::pair< coeffT, double > > > &v)
similar to compress_op, but insert only the sum coefficients in the tree
Definition mraimpl.h:1724
void set_autorefine(bool value)
Definition mraimpl.h:333
tensorT filter(const tensorT &s) const
Transform sum coefficients at level n to sums+differences at level n-1.
Definition mraimpl.h:1148
void chop_at_level(const int n, const bool fence=true)
remove all nodes with level higher than n
Definition mraimpl.h:1111
void unaryXXvalues(const FunctionImpl< Q, NDIM > *func, const opT &op, bool fence)
Definition funcimpl.h:3221
static std::complex< double > conj(const std::complex< double > x)
Definition funcimpl.h:5994
void partial_inner(const FunctionImpl< Q, LDIM > &g, const FunctionImpl< R, KDIM > &h, const std::array< int, CDIM > v1, const std::array< int, CDIM > v2)
invoked by result
Definition funcimpl.h:6120
TreeState tree_state
Definition funcimpl.h:986
void print_tree_json(std::ostream &os=std::cout, Level maxlevel=10000) const
Definition mraimpl.h:2693
coeffT parent_to_child_NS(const keyT &child, const keyT &parent, const coeffT &coeff) const
Directly project parent NS coeffs to child NS coeffs.
Definition mraimpl.h:703
void mapdim(const implT &f, const std::vector< long > &map, bool fence)
Permute the dimensions of f according to map, result on this.
Definition mraimpl.h:1053
bool is_compressed() const
Returns true if the function is compressed.
Definition mraimpl.h:246
Vector< double, NDIM > coordT
Type of vector holding coordinates.
Definition funcimpl.h:959
void apply(opT &op, const FunctionImpl< R, NDIM > &f, bool fence)
apply an operator on f to return this
Definition funcimpl.h:4991
Tensor< T > tensorT
Type of tensor for anything but to hold coeffs.
Definition funcimpl.h:952
void mirror(const implT &f, const std::vector< long > &mirror, bool fence)
mirror the dimensions of f according to map, result on this
Definition mraimpl.h:1062
T inner_adaptive_recursive(keyT key, const tensorT &c, const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine, T old_inner=T(0)) const
Definition funcimpl.h:6714
void store(Archive &ar)
Definition funcimpl.h:1247
void do_binary_op(const keyT &key, const Tensor< L > &left, const std::pair< keyT, Tensor< R > > &arg, const opT &op)
Functor for the binary_op method.
Definition funcimpl.h:1987
void gaxpy_ext(const FunctionImpl< L, NDIM > *left, T(*f)(const coordT &), T alpha, T beta, double tol, bool fence)
Definition funcimpl.h:6878
void accumulate_trees(FunctionImpl< Q, NDIM > &result, const R alpha, const bool fence=true) const
merge the trees of this and other, while multiplying them with the alpha or beta, resp
Definition funcimpl.h:1168
void print_stats() const
print the number of configurations per node
Definition mraimpl.h:1965
void broaden(const array_of_bools< NDIM > &is_periodic, bool fence)
Definition mraimpl.h:1299
coeffT truncate_reconstructed_op(const keyT &key, const std::vector< Future< coeffT > > &v, const double tol)
given the sum coefficients of all children, truncate or not
Definition mraimpl.h:1613
void refine_op(const opT &op, const keyT &key)
Definition funcimpl.h:4496
static Tensor< TENSOR_RESULT_TYPE(T, R) > inner_local(const std::vector< const FunctionImpl< T, NDIM > * > &left, const std::vector< const FunctionImpl< R, NDIM > * > &right, bool sym)
Definition funcimpl.h:6000
void fcube(const keyT &key, const FunctionFunctorInterface< T, NDIM > &f, const Tensor< double > &qx, tensorT &fval) const
Evaluate function at quadrature points in the specified box.
Definition mraimpl.h:2438
Timer timer_change_tensor_type
Definition funcimpl.h:995
void forward_do_diff1(const DerivativeBase< T, NDIM > *D, const implT *f, const keyT &key, const std::pair< keyT, coeffT > &left, const std::pair< keyT, coeffT > &center, const std::pair< keyT, coeffT > &right)
Definition mraimpl.h:919
std::vector< Slice > child_patch(const keyT &child) const
Returns patch referring to coeffs of child in parent box.
Definition mraimpl.h:692
void print_tree_graphviz(std::ostream &os=std::cout, Level maxlevel=10000) const
Definition mraimpl.h:2746
void set_tree_state(const TreeState &state)
Definition funcimpl.h:1278
std::size_t min_nodes() const
Returns the min number of nodes on a processor.
Definition mraimpl.h:1869
std::shared_ptr< FunctionFunctorInterface< T, NDIM > > functor
Definition funcimpl.h:985
Timer timer_compress_svd
Definition funcimpl.h:998
Tensor< TENSOR_RESULT_TYPE(T, R)> mul(const Tensor< T > &c1, const Tensor< R > &c2, const int npt, const keyT &key) const
multiply the values of two coefficient tensors using a custom number of grid points
Definition funcimpl.h:1960
void make_redundant(const bool fence)
convert this to redundant, i.e. have sum coefficients on all levels
Definition mraimpl.h:1524
void load(Archive &ar)
Definition funcimpl.h:1229
std::size_t max_nodes() const
Returns the max number of nodes on a processor.
Definition mraimpl.h:1860
T inner_ext_local(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine) const
Definition funcimpl.h:6689
coeffT upsample(const keyT &key, const coeffT &coeff) const
upsample the sum coefficients of level 1 to sum coeffs on level n+1
Definition mraimpl.h:1227
TensorArgs targs
type of tensor to be used in the FunctionNodes
Definition funcimpl.h:981
void flo_unary_op_node_inplace(const opT &op, bool fence)
Definition funcimpl.h:2150
std::size_t size_local() const
Returns the number of coefficients in the function for each rank.
Definition mraimpl.h:1887
GenTensor< Q > values2coeffs(const keyT &key, const GenTensor< Q > &values) const
Definition funcimpl.h:1864
void plot_cube_kernel(archive::archive_ptr< Tensor< T > > ptr, const keyT &key, const coordT &plotlo, const coordT &plothi, const std::vector< long > &npt, bool eval_refine) const
Definition mraimpl.h:3317
T trace_local() const
Returns int(f(x),x) in local volume.
Definition mraimpl.h:3161
void print_grid(const std::string filename) const
Definition mraimpl.h:538
Future< std::pair< coeffT, double > > compress_spawn(const keyT &key, bool nonstandard, bool keepleaves, bool redundant1)
Invoked on node where key is local.
Definition mraimpl.h:3254
bool get_autorefine() const
Definition mraimpl.h:330
int k
Wavelet order.
Definition funcimpl.h:972
void vtransform_doit(const std::shared_ptr< FunctionImpl< R, NDIM > > &right, const Tensor< Q > &c, const std::vector< std::shared_ptr< FunctionImpl< T, NDIM > > > &vleft, double tol)
Definition funcimpl.h:2682
MADNESS_CHECK(this->is_reconstructed())
void phi_for_mul(Level np, Translation lp, Level nc, Translation lc, Tensor< double > &phi) const
Compute the Legendre scaling functions for multiplication.
Definition mraimpl.h:3129
Future< std::pair< keyT, coeffT > > find_me(const keyT &key) const
find_me. Called by diff_bdry to get coefficients of boundary function
Definition mraimpl.h:3241
TensorType get_tensor_type() const
Definition mraimpl.h:315
void do_project_out(const coeffT &fcoeff, const std::pair< keyT, coeffT > gpair, const keyT &gkey, const Key< NDIM > &dest, const int dim) const
compute the inner product of two nodes of only some dimensions and accumulate on result
Definition funcimpl.h:7098
void remove_leaf_coefficients(const bool fence)
Definition mraimpl.h:1518
void insert_zero_down_to_initial_level(const keyT &key)
Initialize nodes to zero function at initial_level of refinement.
Definition mraimpl.h:2569
void do_diff1(const DerivativeBase< T, NDIM > *D, const implT *f, const keyT &key, const std::pair< keyT, coeffT > &left, const std::pair< keyT, coeffT > &center, const std::pair< keyT, coeffT > &right)
Definition mraimpl.h:930
typedef TENSOR_RESULT_TYPE(T, R) resultT
void unary_op_node_inplace(const opT &op, bool fence)
Definition funcimpl.h:2059
T inner_adaptive_local(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine) const
Definition funcimpl.h:6700
void do_print_tree_json(const keyT &key, std::multimap< Level, std::tuple< tranT, std::string > > &data, Level maxlevel) const
Functor for the do_print_tree_json method.
Definition mraimpl.h:2724
std::multimap< Key< FDIM >, std::list< Key< CDIM > > > recur_down_for_contraction_map(const keyT &key, const nodeT &node, const std::array< int, CDIM > &v_this, const std::array< int, CDIM > &v_other, const std::set< Key< ODIM > > &ij_other_list, const std::map< Key< CDIM >, double > &j_other_list, bool this_first, const double thresh)
make a map of all nodes that will contribute to a partial inner product
Definition funcimpl.h:6275
std::shared_ptr< FunctionImpl< T, NDIM > > pimplT
pointer to this class
Definition funcimpl.h:951
TENSOR_RESULT_TYPE(T, R) dot_local(const FunctionImpl< R
Returns the dot product ASSUMING same distribution.
void finalize_sum()
after summing up we need to do some cleanup;
Definition mraimpl.h:1817
std::enable_if< NDIM==FDIM >::type read_grid(const std::string keyfile, const std::string gridfile, std::shared_ptr< FunctionFunctorInterface< double, NDIM > > vnuc_functor)
read data from a grid
Definition funcimpl.h:1465
dcT coeffs
The coefficients.
Definition funcimpl.h:988
bool exists_and_is_leaf(const keyT &key) const
Definition mraimpl.h:1271
void make_Vphi(const opT &leaf_op, const bool fence=true)
assemble the function V*phi using V and phi given from the functor
Definition funcimpl.h:4288
void unaryXX(const FunctionImpl< Q, NDIM > *func, const opT &op, bool fence)
Definition funcimpl.h:3208
std::vector< std::pair< int, const coeffT * > > mapvecT
Type of the entry in the map returned by make_key_vec_map.
Definition funcimpl.h:5795
void project_out(FunctionImpl< T, NDIM-LDIM > *result, const FunctionImpl< T, LDIM > *gimpl, const int dim, const bool fence)
project the low-dim function g on the hi-dim function f: result(x) = <this(x,y) | g(y)>
Definition funcimpl.h:6894
void verify_tree() const
Verify tree is properly constructed ... global synchronization involved.
Definition mraimpl.h:107
void do_square_inplace2(const keyT &parent, const keyT &child, const tensorT &parent_coeff)
void gaxpy_inplace_reconstructed(const T &alpha, const FunctionImpl< Q, NDIM > &g, const R &beta, const bool fence)
Definition funcimpl.h:1136
void set_tensor_args(const TensorArgs &t)
Definition mraimpl.h:321
GenTensor< Q > fcube_for_mul(const keyT &child, const keyT &parent, const GenTensor< Q > &coeff) const
Compute the function values for multiplication.
Definition funcimpl.h:1913
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:5586
std::size_t real_size() const
Returns the number of coefficients in the function ... collective global sum.
Definition mraimpl.h:1905
bool exists_and_has_children(const keyT &key) const
Definition mraimpl.h:1266
void sum_down_spawn(const keyT &key, const coeffT &s)
is this the same as trickle_down() ?
Definition mraimpl.h:872
void multi_to_multi_op_values(const opT &op, const std::vector< implT * > &vin, std::vector< implT * > &vout, const bool fence=true)
Inplace operate on many functions (impl's) with an operator within a certain box.
Definition funcimpl.h:2809
long box_interior[1000]
Definition funcimpl.h:3252
keyT neighbor(const keyT &key, const keyT &disp, const array_of_bools< NDIM > &is_periodic) const
Returns key of general neighbor enforcing BC.
Definition mraimpl.h:3211
GenTensor< Q > NS_fcube_for_mul(const keyT &child, const keyT &parent, const GenTensor< Q > &coeff, const bool s_only) const
Compute the function values for multiplication.
Definition funcimpl.h:1783
rangeT range(coeffs.begin(), coeffs.end())
void norm_tree(bool fence)
compute for each FunctionNode the norm of the function inside that node
Definition mraimpl.h:1543
void gaxpy_inplace(const T &alpha, const FunctionImpl< Q, NDIM > &other, const R &beta, bool fence)
Inplace general bilinear operation.
Definition funcimpl.h:1216
bool has_leaves() const
Definition mraimpl.h:284
bool verify_parents_and_children() const
check that parents and children are consistent
Definition mraimpl.h:115
void apply_source_driven(opT &op, const FunctionImpl< R, NDIM > &f, bool fence)
similar to apply, but for low rank coeffs
Definition funcimpl.h:5139
void distribute(std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > newmap) const
Definition funcimpl.h:1110
int get_special_level() const
Definition funcimpl.h:968
void reconstruct_op(const keyT &key, const coeffT &s, const bool accumulate_NS=true)
Definition mraimpl.h:2075
tensorT gaxpy_ext_node(keyT key, Tensor< L > lc, T(*f)(const coordT &), T alpha, T beta) const
Definition funcimpl.h:6767
const coeffT parent_to_child(const coeffT &s, const keyT &parent, const keyT &child) const
Directly project parent coeffs to child coeffs.
Definition mraimpl.h:3144
WorldObject< FunctionImpl< T, NDIM > > woT
Base class world object type.
Definition funcimpl.h:947
void undo_redundant(const bool fence)
convert this from redundant to standard reconstructed form
Definition mraimpl.h:1534
GenTensor< T > coeffT
Type of tensor used to hold coeffs.
Definition funcimpl.h:956
const keyT & key0() const
Returns cdata.key0.
Definition mraimpl.h:390
double finalize_apply()
after apply we need to do some cleanup;
Definition mraimpl.h:1774
bool leaves_only
Definition funcimpl.h:5591
friend hashT hash_value(const FunctionImpl< T, NDIM > *pimpl)
Hash a pointer to FunctionImpl.
Definition funcimpl.h:7198
const dcT & get_coeffs() const
Definition mraimpl.h:339
T inner_ext_node(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f) const
Return the inner product with an external function on a specified function node.
Definition funcimpl.h:6566
double norm2sq_local() const
Returns the square of the local norm ... no comms.
Definition mraimpl.h:1826
const FunctionCommonData< T, NDIM > & get_cdata() const
Definition mraimpl.h:345
void sum_down(bool fence)
After 1d push operator must sum coeffs down the tree to restore correct scaling function coefficients...
Definition mraimpl.h:911
T inner_ext_recursive(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine, T old_inner=T(0)) const
Definition funcimpl.h:6583
bool noautorefine(const keyT &key, const tensorT &t) const
Always returns false (for when autorefine is not wanted)
Definition mraimpl.h:855
double truncate_tol(double tol, const keyT &key) const
Returns the truncation threshold according to truncate_method.
Definition mraimpl.h:645
void flo_unary_op_node_inplace(const opT &op, bool fence) const
Definition funcimpl.h:2160
bool autorefine_square_test(const keyT &key, const nodeT &t) const
Returns true if this block of coeffs needs autorefining.
Definition mraimpl.h:861
void erase(const Level &max_level)
truncate tree at a certain level
Definition mraimpl.h:735
void mulXX(const FunctionImpl< L, NDIM > *left, const FunctionImpl< R, NDIM > *right, double tol, bool fence)
Definition funcimpl.h:3180
void reconstruct(bool fence)
reconstruct this tree – respects fence
Definition mraimpl.h:1464
void multiply(const implT *f, const FunctionImpl< T, LDIM > *g, const int particle)
multiply f (a pair function of NDIM) with an orbital g (LDIM=NDIM/2)
Definition funcimpl.h:3580
coeffT assemble_coefficients(const keyT &key, const coeffT &coeff_ket, const coeffT &vpotential1, const coeffT &vpotential2, const tensorT &veri) const
given several coefficient tensors, assemble a result tensor
Definition mraimpl.h:1009
static void tnorm(const tensorT &t, double *lo, double *hi)
Computes norm of low/high-order polyn. coeffs for autorefinement test.
Definition mraimpl.h:3021
std::pair< bool, T > eval_local_only(const Vector< double, NDIM > &xin, Level maxlevel)
Evaluate function only if point is local returning (true,value); otherwise return (false,...
Definition mraimpl.h:2907
std::size_t max_depth() const
Returns the maximum depth of the tree ... collective ... global sum/broadcast.
Definition mraimpl.h:1852
std::size_t size() const
Returns the number of coefficients in the function ... collective global sum.
Definition mraimpl.h:1897
void reduce_rank(const double thresh, bool fence)
reduce the rank of the coefficients tensors
Definition mraimpl.h:1103
TreeState get_tree_state() const
Definition funcimpl.h:1282
void merge_trees(const T alpha, const FunctionImpl< Q, NDIM > &other, const R beta, const bool fence=true)
merge the trees of this and other, while multiplying them with the alpha or beta, resp
Definition funcimpl.h:1156
std::shared_ptr< FunctionFunctorInterface< T, NDIM > > get_functor()
Definition mraimpl.h:296
double do_apply_directed_screening(const opT *op, const keyT &key, const coeffT &coeff, const bool &do_kernel)
apply an operator on the coeffs c (at node key)
Definition funcimpl.h:5028
tensorT unfilter(const tensorT &s) const
Transform sums+differences at level n to sum coefficients at level n+1.
Definition mraimpl.h:1177
int get_initial_level() const
getter
Definition funcimpl.h:967
Tensor< T > eval_plot_cube(const coordT &plotlo, const coordT &plothi, const std::vector< long > &npt, const bool eval_refine=false) const
Definition mraimpl.h:3410
virtual ~FunctionImpl()
Definition funcimpl.h:1102
Vector< Translation, NDIM > tranT
Type of array holding translation.
Definition funcimpl.h:953
void change_tree_state(const TreeState finalstate, bool fence=true)
change the tree state of this function, might or might not respect fence!
Definition mraimpl.h:1403
Future< coeffT > truncate_reconstructed_spawn(const keyT &key, const double tol)
truncate using a tree in reconstructed form
Definition mraimpl.h:1589
GenTensor< Q > coeffs2values(const keyT &key, const GenTensor< Q > &coeff) const
Definition funcimpl.h:1731
FunctionImpl(const FunctionFactory< T, NDIM > &factory)
Initialize function impl from data in factory.
Definition funcimpl.h:1004
void map_and_mirror(const implT &f, const std::vector< long > &map, const std::vector< long > &mirror, bool fence)
map and mirror the translation index and the coefficients, result on this
Definition mraimpl.h:1072
Timer timer_lr_result
Definition funcimpl.h:996
void gaxpy(T alpha, const FunctionImpl< L, NDIM > &left, T beta, const FunctionImpl< R, NDIM > &right, bool fence)
Invoked by result to perform result += alpha*left+beta*right in wavelet basis.
Definition funcimpl.h:2010
void truncate(double tol, bool fence)
Truncate according to the threshold with optional global fence.
Definition mraimpl.h:374
void do_mul(const keyT &key, const Tensor< L > &left, const std::pair< keyT, Tensor< R > > &arg)
Functor for the mul method.
Definition funcimpl.h:1935
void project_out2(const FunctionImpl< T, LDIM+NDIM > *f, const FunctionImpl< T, LDIM > *g, const int dim)
project the low-dim function g on the hi-dim function f: this(x) = <f(x,y) | g(y)>
Definition funcimpl.h:7040
double do_apply_kernel2(const opT *op, const Tensor< R > &c, const do_op_args< OPDIM > &args, const TensorArgs &apply_targs)
same as do_apply_kernel, but use full rank tensors as input and low rank tensors as output
Definition funcimpl.h:4721
static Tensor< TENSOR_RESULT_TYPE(T, R)> dot_local(const std::vector< const FunctionImpl< T, NDIM > * > &left, const std::vector< const FunctionImpl< R, NDIM > * > &right, bool sym)
Definition funcimpl.h:6052
Tensor< Q > coeffs2values(const keyT &key, const Tensor< Q > &coeff) const
Definition funcimpl.h:1857
Tensor< Q > values2coeffs(const keyT &key, const Tensor< Q > &values) const
Definition funcimpl.h:1871
void multi_to_multi_op_values_doit(const keyT &key, const opT &op, const std::vector< implT * > &vin, std::vector< implT * > &vout)
Inplace operate on many functions (impl's) with an operator within a certain box.
Definition funcimpl.h:2786
bool is_reconstructed() const
Returns true if the function is compressed.
Definition mraimpl.h:252
void replicate(bool fence=true)
Definition funcimpl.h:1106
double norm_tree_op(const keyT &key, const std::vector< Future< double > > &v)
Definition mraimpl.h:1551
void reset_timer()
Definition mraimpl.h:362
void refine_to_common_level(const std::vector< FunctionImpl< T, NDIM > * > &v, const std::vector< tensorT > &c, const keyT key)
Refine multiple functions down to the same finest level.
Definition mraimpl.h:765
int get_k() const
Definition mraimpl.h:336
void dirac_convolution_op(const keyT &key, const nodeT &node, FunctionImpl< T, LDIM > *f) const
The operator.
Definition funcimpl.h:2076
FunctionImpl< T, NDIM > implT
Type of this class (implementation)
Definition funcimpl.h:950
void eval(const Vector< double, NDIM > &xin, const keyT &keyin, const typename Future< T >::remote_refT &ref)
Evaluate the function at a point in simulation coordinates.
Definition mraimpl.h:2863
bool truncate_op(const keyT &key, double tol, const std::vector< Future< bool > > &v)
Definition mraimpl.h:2636
void zero_norm_tree()
Definition mraimpl.h:1288
std::size_t max_local_depth() const
Returns the maximum local depth of the tree ... no communications.
Definition mraimpl.h:1838
tensorT project(const keyT &key) const
Definition mraimpl.h:2781
double thresh
Screening threshold.
Definition funcimpl.h:973
double check_symmetry_local() const
Returns some asymmetry measure ... no comms.
Definition mraimpl.h:751
Future< double > get_norm_tree_recursive(const keyT &key) const
Definition mraimpl.h:2802
bool is_redundant_after_merge() const
Returns true if the function is redundant_after_merge.
Definition mraimpl.h:264
void mulXXvec(const FunctionImpl< L, NDIM > *left, const std::vector< const FunctionImpl< R, NDIM > * > &vright, const std::vector< FunctionImpl< T, NDIM > * > &vresult, double tol, bool fence)
Definition funcimpl.h:3237
Key< NDIM > keyT
Type of key.
Definition funcimpl.h:954
friend hashT hash_value(const std::shared_ptr< FunctionImpl< T, NDIM > > impl)
Hash a shared_ptr to FunctionImpl.
Definition funcimpl.h:7208
std::vector< Vector< double, NDIM > > special_points
special points for further refinement (needed for composite functions or multiplication)
Definition funcimpl.h:976
bool truncate_on_project
If true projection inserts at level n-1 not n.
Definition funcimpl.h:980
AtomicInt small
Definition funcimpl.h:1000
static void do_dot_localX(const typename mapT::iterator lstart, const typename mapT::iterator lend, typename FunctionImpl< R, NDIM >::mapT *rmap_ptr, const bool sym, Tensor< TENSOR_RESULT_TYPE(T, R)> *result_ptr, Mutex *mutex)
Definition funcimpl.h:5951
bool is_on_demand() const
Definition mraimpl.h:279
double err_box(const keyT &key, const nodeT &node, const opT &func, int npt, const Tensor< double > &qx, const Tensor< double > &quad_phit, const Tensor< double > &quad_phiw) const
Returns the square of the error norm in the box labeled by key.
Definition funcimpl.h:5427
void accumulate_timer(const double time) const
Definition mraimpl.h:348
void trickle_down_op(const keyT &key, const coeffT &s)
sum all the contributions from all scales after applying an operator in mod-NS form
Definition mraimpl.h:1361
static void do_inner_localX(const typename mapT::iterator lstart, const typename mapT::iterator lend, typename FunctionImpl< R, NDIM >::mapT *rmap_ptr, const bool sym, Tensor< TENSOR_RESULT_TYPE(T, R) > *result_ptr, Mutex *mutex)
Definition funcimpl.h:5870
void mulXXveca(const keyT &key, const FunctionImpl< L, NDIM > *left, const Tensor< L > &lcin, const std::vector< const FunctionImpl< R, NDIM > * > vrightin, const std::vector< Tensor< R > > &vrcin, const std::vector< FunctionImpl< T, NDIM > * > vresultin, double tol)
Definition funcimpl.h:2873
void set_thresh(double value)
Definition mraimpl.h:327
Tensor< double > print_plane_local(const int xaxis, const int yaxis, const coordT &el2)
collect the data for a plot of the MRA structure locally on each node
Definition mraimpl.h:419
void sock_it_to_me_too(const keyT &key, const RemoteReference< FutureImpl< std::pair< keyT, coeffT > > > &ref) const
Definition mraimpl.h:2841
void broaden_op(const keyT &key, const std::vector< Future< bool > > &v)
Definition mraimpl.h:1277
void print_plane(const std::string filename, const int xaxis, const int yaxis, const coordT &el2)
Print a plane ("xy", "xz", or "yz") containing the point x to file.
Definition mraimpl.h:399
void print_tree(std::ostream &os=std::cout, Level maxlevel=10000) const
Definition mraimpl.h:2664
void project_refine_op(const keyT &key, bool do_refine, const std::vector< Vector< double, NDIM > > &specialpts)
Definition mraimpl.h:2450
void scale_oop(const Q q, const FunctionImpl< F, NDIM > &f, bool fence)
Out-of-place scale by a constant.
Definition funcimpl.h:7175
T typeT
Definition funcimpl.h:949
std::size_t tree_size() const
Returns the size of the tree structure of the function ... collective global sum.
Definition mraimpl.h:1878
ConcurrentHashMap< keyT, mapvecT > mapT
Type of the map returned by make_key_vec_map.
Definition funcimpl.h:5798
void add_scalar_inplace(T t, bool fence)
Adds a constant to the function. Local operation, optional fence.
Definition mraimpl.h:2528
void forward_traverse(const coeff_opT &coeff_op, const apply_opT &apply_op, const keyT &key) const
traverse a non-existing tree
Definition funcimpl.h:3674
tensorT downsample(const keyT &key, const std::vector< Future< coeffT > > &v) const
downsample the sum coefficients of level n+1 to sum coeffs on level n
Definition mraimpl.h:1197
void abs_square_inplace(bool fence)
Definition mraimpl.h:3124
FunctionImpl(const FunctionImpl< Q, NDIM > &other, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool dozero)
Copy constructor.
Definition funcimpl.h:1073
void refine(const opT &op, bool fence)
Definition funcimpl.h:4534
static mapT make_key_vec_map(const std::vector< const FunctionImpl< T, NDIM > * > &v)
Returns map of union of local keys to vector of indexes of functions containing that key.
Definition funcimpl.h:5819
void put_in_box(ProcessID from, long nl, long ni) const
Definition mraimpl.h:820
void unary_op_value_inplace(const opT &op, bool fence)
Definition funcimpl.h:2853
std::pair< const keyT, nodeT > datumT
Type of entry in container.
Definition funcimpl.h:958
Timer timer_accumulate
Definition funcimpl.h:994
TensorArgs get_tensor_args() const
Definition mraimpl.h:318
void unaryXXa(const keyT &key, const FunctionImpl< Q, NDIM > *func, const opT &op)
Definition funcimpl.h:3155
void make_Vphi_only(const opT &leaf_op, FunctionImpl< T, NDIM > *ket, FunctionImpl< T, LDIM > *v1, FunctionImpl< T, LDIM > *v2, FunctionImpl< T, LDIM > *p1, FunctionImpl< T, LDIM > *p2, FunctionImpl< T, NDIM > *eri, const bool fence=true)
assemble the function V*phi using V and phi given from the functor
Definition funcimpl.h:4349
void average(const implT &rhs)
take the average of two functions, similar to: this=0.5*(this+rhs)
Definition mraimpl.h:1084
void recursive_apply(opT &apply_op, const FunctionImpl< T, LDIM > *fimpl, const FunctionImpl< T, LDIM > *gimpl, const bool fence)
traverse a non-existing tree, make its coeffs and apply an operator
Definition funcimpl.h:5180
void diff(const DerivativeBase< T, NDIM > *D, const implT *f, bool fence)
Definition mraimpl.h:942
void square_inplace(bool fence)
Pointwise squaring of function with optional global fence.
Definition mraimpl.h:3113
void remove_internal_coefficients(const bool fence)
Definition mraimpl.h:1513
void compute_snorm_and_dnorm(bool fence=true)
compute norm of s and d coefficients for all nodes
Definition mraimpl.h:1127
long box_leaf[1000]
Definition funcimpl.h:3251
void standard(bool fence)
Changes non-standard compressed form to standard compressed form.
Definition mraimpl.h:1761
void multiop_values_doit(const keyT &key, const opT &op, const std::vector< implT * > &v)
Definition funcimpl.h:2744
bool is_nonstandard_with_leaves() const
Definition mraimpl.h:274
GenTensor< Q > values2NScoeffs(const keyT &key, const GenTensor< Q > &values) const
convert function values of the a child generation directly to NS coeffs
Definition funcimpl.h:1832
int truncate_mode
0=default=(|d|<thresh), 1=(|d|<thresh/2^n), 2=(|d|<thresh/4^n);
Definition funcimpl.h:978
void multiop_values(const opT &op, const std::vector< implT * > &v)
Definition funcimpl.h:2761
GenTensor< Q > NScoeffs2values(const keyT &key, const GenTensor< Q > &coeff, const bool s_only) const
convert S or NS coeffs to values on a 2k grid of the children
Definition funcimpl.h:1747
FunctionNode holds the coefficients, etc., at each node of the 2^NDIM-tree.
Definition funcimpl.h:127
FunctionNode< Q, NDIM > convert() const
Copy with possible type conversion of coefficients, copying all other state.
Definition funcimpl.h:194
GenTensor< T > coeffT
Definition funcimpl.h:129
bool has_coeff() const
Returns true if there are coefficients in this node.
Definition funcimpl.h:200
void recompute_snorm_and_dnorm(const FunctionCommonData< T, NDIM > &cdata)
Definition funcimpl.h:335
FunctionNode(const coeffT &coeff, bool has_children=false)
Constructor from given coefficients with optional children.
Definition funcimpl.h:156
FunctionNode()
Default constructor makes node without coeff or children.
Definition funcimpl.h:146
void serialize(Archive &ar)
Definition funcimpl.h:458
void consolidate_buffer(const TensorArgs &args)
Definition funcimpl.h:444
double get_dnorm() const
return the precomputed norm of the (virtual) d coefficients
Definition funcimpl.h:316
size_t size() const
Returns the number of coefficients in this node.
Definition funcimpl.h:242
void set_has_children_recursive(const typename FunctionNode< T, NDIM >::dcT &c, const Key< NDIM > &key)
Sets has_children attribute to true recurring up to ensure connected.
Definition funcimpl.h:259
FunctionNode< T, NDIM > & operator=(const FunctionNode< T, NDIM > &other)
Definition funcimpl.h:176
double snorm
norm of the s coefficients
Definition funcimpl.h:141
void clear_coeff()
Clears the coefficients (has_coeff() will subsequently return false)
Definition funcimpl.h:295
Tensor< T > tensorT
Definition funcimpl.h:130
coeffT buffer
The coefficients, if any.
Definition funcimpl.h:139
T trace_conj(const FunctionNode< T, NDIM > &rhs) const
Definition funcimpl.h:453
void scale(Q a)
Scale the coefficients of this node.
Definition funcimpl.h:301
bool is_leaf() const
Returns true if this does not have children.
Definition funcimpl.h:213
void set_has_children(bool flag)
Sets has_children attribute to value of flag.
Definition funcimpl.h:254
void accumulate(const coeffT &t, const typename FunctionNode< T, NDIM >::dcT &c, const Key< NDIM > &key, const TensorArgs &args)
Accumulate inplace and if necessary connect node to parent.
Definition funcimpl.h:416
double get_norm_tree() const
Gets the value of norm_tree.
Definition funcimpl.h:311
bool _has_children
True if there are children.
Definition funcimpl.h:138
FunctionNode(const coeffT &coeff, double norm_tree, double snorm, double dnorm, bool has_children)
Definition funcimpl.h:166
void set_snorm(const double sn)
set the precomputed norm of the (virtual) s coefficients
Definition funcimpl.h:321
coeffT _coeffs
The coefficients, if any.
Definition funcimpl.h:136
void accumulate2(const tensorT &t, const typename FunctionNode< T, NDIM >::dcT &c, const Key< NDIM > &key)
Accumulate inplace and if necessary connect node to parent.
Definition funcimpl.h:383
void reduceRank(const double &eps)
reduces the rank of the coefficients (if applicable)
Definition funcimpl.h:249
WorldContainer< Key< NDIM >, FunctionNode< T, NDIM > > dcT
Definition funcimpl.h:144
void gaxpy_inplace(const T &alpha, const FunctionNode< Q, NDIM > &other, const R &beta)
General bi-linear operation — this = this*alpha + other*beta.
Definition funcimpl.h:365
double _norm_tree
After norm_tree will contain norm of coefficients summed up tree.
Definition funcimpl.h:137
void set_is_leaf(bool flag)
Sets has_children attribute to value of !flag.
Definition funcimpl.h:280
void print_json(std::ostream &s) const
Definition funcimpl.h:466
double get_snorm() const
get the precomputed norm of the (virtual) s coefficients
Definition funcimpl.h:331
const coeffT & coeff() const
Returns a const reference to the tensor containing the coeffs.
Definition funcimpl.h:237
FunctionNode(const coeffT &coeff, double norm_tree, bool has_children)
Definition funcimpl.h:161
bool has_children() const
Returns true if this node has children.
Definition funcimpl.h:207
void set_coeff(const coeffT &coeffs)
Takes a shallow copy of the coeff — same as this->coeff()=coeff.
Definition funcimpl.h:285
void set_dnorm(const double dn)
set the precomputed norm of the (virtual) d coefficients
Definition funcimpl.h:326
double dnorm
norm of the d coefficients, also defined if there are no d coefficients
Definition funcimpl.h:140
bool is_invalid() const
Returns true if this node is invalid (no coeffs and no children)
Definition funcimpl.h:219
FunctionNode(const FunctionNode< T, NDIM > &other)
Definition funcimpl.h:170
coeffT & coeff()
Returns a non-const reference to the tensor containing the coeffs.
Definition funcimpl.h:227
void set_norm_tree(double norm_tree)
Sets the value of norm_tree.
Definition funcimpl.h:306
Implements the functionality of futures.
Definition future.h:74
A future is a possibly yet unevaluated value.
Definition future.h:373
remote_refT remote_ref(World &world) const
Returns a structure used to pass references to another process.
Definition future.h:675
RemoteReference< FutureImpl< T > > remote_refT
Definition future.h:398
Definition lowranktensor.h:59
bool is_of_tensortype(const TensorType &tt) const
Definition gentensor.h:225
GenTensor convert(const TensorArgs &targs) const
Definition gentensor.h:198
GenTensor full_tensor() const
Definition gentensor.h:200
long dim(const int i) const
return the number of entries in dimension i
Definition lowranktensor.h:391
Tensor< T > full_tensor_copy() const
Definition gentensor.h:206
long ndim() const
Definition lowranktensor.h:386
void add_SVD(const GenTensor< T > &rhs, const double &eps)
Definition gentensor.h:235
constexpr bool is_full_tensor() const
Definition gentensor.h:224
GenTensor get_tensor() const
Definition gentensor.h:203
GenTensor reconstruct_tensor() const
Definition gentensor.h:199
bool has_no_data() const
Definition gentensor.h:211
void normalize()
Definition gentensor.h:218
GenTensor< T > & emul(const GenTensor< T > &other)
Inplace multiply by corresponding elements of argument Tensor.
Definition lowranktensor.h:631
float_scalar_type normf() const
Definition lowranktensor.h:406
double svd_normf() const
Definition gentensor.h:213
SRConf< T > config() const
Definition gentensor.h:237
void reduce_rank(const double &eps)
Definition gentensor.h:217
long rank() const
Definition gentensor.h:212
long size() const
Definition lowranktensor.h:482
SVDTensor< T > & get_svdtensor()
Definition gentensor.h:228
TensorType tensor_type() const
Definition gentensor.h:221
bool has_data() const
Definition gentensor.h:210
GenTensor & gaxpy(const T alpha, const GenTensor &other, const T beta)
Definition lowranktensor.h:580
bool is_assigned() const
Definition gentensor.h:209
IsSupported< TensorTypeData< Q >, GenTensor< T > & >::type scale(Q fac)
Inplace multiplication by scalar of supported type (legacy name)
Definition lowranktensor.h:426
constexpr bool is_svd_tensor() const
Definition gentensor.h:222
Iterates in lexical order thru all children of a key.
Definition key.h:466
Key is the index for a node of the 2^NDIM-tree.
Definition key.h:69
Key< NDIM+LDIM > merge_with(const Key< LDIM > &rhs) const
merge with other key (ie concatenate), use level of rhs, not of this
Definition key.h:405
Level level() const
Definition key.h:168
bool is_valid() const
Checks if a key is valid.
Definition key.h:123
Key< NDIM-VDIM > extract_complement_key(const std::array< int, VDIM > &v) const
extract a new key with the Translations complementary to the ones indicated in the v array
Definition key.h:391
Key< VDIM > extract_key(const std::array< int, VDIM > &v) const
extract a new key with the Translations indicated in the v array
Definition key.h:383
Key parent(int generation=1) const
Returns the key of the parent.
Definition key.h:252
const Vector< Translation, NDIM > & translation() const
Definition key.h:173
void break_apart(Key< LDIM > &key1, Key< KDIM > &key2) const
break key into two low-dimensional keys
Definition key.h:343
A pmap that locates children on odd levels with their even level parents.
Definition funcimpl.h:105
LevelPmap(World &world)
Definition funcimpl.h:111
const int nproc
Definition funcimpl.h:107
LevelPmap()
Definition funcimpl.h:109
ProcessID owner(const keyT &key) const
Find the owner of a given key.
Definition funcimpl.h:114
Definition funcimpl.h:77
Mutex using pthread mutex operations.
Definition worldmutex.h:131
void unlock() const
Free a mutex owned by this thread.
Definition worldmutex.h:165
void lock() const
Acquire the mutex waiting if necessary.
Definition worldmutex.h:155
Range, vaguely a la Intel TBB, to encapsulate a random-access, STL-like start and end iterator with c...
Definition range.h:64
Simple structure used to manage references/pointers to remote instances.
Definition worldref.h:395
Definition SVDTensor.h:42
A simple process map.
Definition funcimpl.h:86
SimplePmap(World &world)
Definition funcimpl.h:92
const int nproc
Definition funcimpl.h:88
const ProcessID me
Definition funcimpl.h:89
ProcessID owner(const keyT &key) const
Maps key to processor.
Definition funcimpl.h:95
A slice defines a sub-range or patch of a dimension.
Definition slice.h:103
static TaskAttributes hipri()
Definition thread.h:456
Traits class to specify support of numeric types.
Definition type_data.h:56
A tensor is a multidimensional array.
Definition tensor.h:317
float_scalar_type normf() const
Returns the Frobenius norm of the tensor.
Definition tensor.h:1726
T sum() const
Returns the sum of all elements of the tensor.
Definition tensor.h:1662
Tensor< T > reshape(int ndimnew, const long *d)
Returns new view/tensor reshaping size/number of dimensions to conforming tensor.
Definition tensor.h:1384
T * ptr()
Returns a pointer to the internal data.
Definition tensor.h:1825
Tensor< T > mapdim(const std::vector< long > &map)
Returns new view/tensor permuting the dimensions.
Definition tensor.h:1624
IsSupported< TensorTypeData< Q >, Tensor< T > & >::type scale(Q x)
Inplace multiplication by scalar of supported type (legacy name)
Definition tensor.h:686
Tensor< T > & emul(const Tensor< T > &t)
Inplace multiply by corresponding elements of argument Tensor.
Definition tensor.h:1799
bool has_data() const
Definition tensor.h:1887
const TensorIterator< T > & end() const
End point for forward iteration.
Definition tensor.h:1877
Tensor< T > fusedim(long i)
Returns new view/tensor fusing contiguous dimensions i and i+1.
Definition tensor.h:1587
Tensor< T > flat()
Returns new view/tensor rehshaping to flat (1-d) tensor.
Definition tensor.h:1555
Tensor< T > & gaxpy(T alpha, const Tensor< T > &t, T beta)
Inplace generalized saxpy ... this = this*alpha + other*beta.
Definition tensor.h:1805
Tensor< T > & conj()
Inplace complex conjugate.
Definition tensor.h:716
Definition function_common_data.h:169
void accumulate(const double time) const
accumulate timer
Definition function_common_data.h:183
A simple, fixed dimension vector.
Definition vector.h:64
Makes a distributed container with specified attributes.
Definition worlddc.h:866
void process_pending()
Process pending messages.
Definition worlddc.h:1166
bool find(accessor &acc, const keyT &key)
Write access to LOCAL value by key. Returns true if found, false otherwise (always false for remote).
Definition worlddc.h:987
bool probe(const keyT &key) const
Returns true if local data is immediately available (no communication)
Definition worlddc.h:1024
iterator begin()
Returns an iterator to the beginning of the local data (no communication)
Definition worlddc.h:1070
ProcessID owner(const keyT &key) const
Returns processor that logically owns key (no communication)
Definition worlddc.h:1034
implT::const_iterator const_iterator
Definition worlddc.h:872
void replicate(bool fence=true)
replicates this WorldContainer on all ProcessIDs
Definition worlddc.h:968
void replace(const pairT &datum)
Inserts/replaces key+value pair (non-blocking communication if key not local)
Definition worlddc.h:974
iterator end()
Returns an iterator past the end of the local data (no communication)
Definition worlddc.h:1084
const std::shared_ptr< WorldDCPmapInterface< keyT > > & get_pmap() const
Returns shared pointer to the process mapping.
Definition worlddc.h:1142
bool insert(accessor &acc, const keyT &key)
Write access to LOCAL value by key. Returns true if inserted, false if already exists (throws if remo...
Definition worlddc.h:1001
implT::iterator iterator
Definition worlddc.h:871
std::size_t size() const
Returns the number of local entries (no communication)
Definition worlddc.h:1135
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const TaskAttributes &attr=TaskAttributes())
Adds task "resultT memfun()" in process owning item (non-blocking comm if remote)
Definition worlddc.h:1426
bool is_local(const keyT &key) const
Returns true if the key maps to the local processor (no communication)
Definition worlddc.h:1041
Future< MEMFUN_RETURNT(memfunT)> send(const keyT &key, memfunT memfun)
Sends message "resultT memfun()" to item (non-blocking comm if remote)
Definition worlddc.h:1183
implT::accessor accessor
Definition worlddc.h:873
Interface to be provided by any process map.
Definition worlddc.h:82
void fence(bool debug=false)
Synchronizes all processes in communicator AND globally ensures no pending AM or tasks.
Definition worldgop.cc:161
Implements most parts of a globally addressable object (via unique ID).
Definition world_object.h:364
const uniqueidT & id() const
Returns the globally unique object ID.
Definition world_object.h:711
void process_pending()
To be called from derived constructor to process pending messages.
Definition world_object.h:656
ProcessID me
Rank of self.
Definition world_object.h:385
detail::task_result_type< memfnT >::futureT send(ProcessID dest, memfnT memfn) const
Definition world_object.h:731
detail::task_result_type< memfnT >::futureT task(ProcessID dest, memfnT memfn, const TaskAttributes &attr=TaskAttributes()) const
Sends task to derived class method returnT (this->*memfn)().
Definition world_object.h:1005
Future< bool > for_each(const rangeT &range, const opT &op)
Apply op(item) on all items in range.
Definition world_task_queue.h:572
void add(TaskInterface *t)
Add a new local task, taking ownership of the pointer.
Definition world_task_queue.h:466
Future< resultT > reduce(const rangeT &range, const opT &op)
Reduce op(item) for all items in range using op(sum,op(item)).
Definition world_task_queue.h:527
A parallel world class.
Definition world.h:132
static World * world_from_id(std::uint64_t id)
Convert a World ID to a World pointer.
Definition world.h:492
WorldTaskQueue & taskq
Task queue.
Definition world.h:206
ProcessID rank() const
Returns the process rank in this World (same as MPI_Comm_rank()).
Definition world.h:320
ProcessID size() const
Returns the number of processes in this World (same as MPI_Comm_size()).
Definition world.h:330
WorldGopInterface & gop
Global operations.
Definition world.h:207
std::optional< T * > ptr_from_id(uniqueidT id) const
Look up a local pointer from a world-wide unique ID.
Definition world.h:416
ProcessID random_proc()
Returns a random process number; that is, an integer in [0,world.size()).
Definition world.h:591
Wrapper for an opaque pointer for serialization purposes.
Definition archive.h:850
syntactic sugar for std::array<bool, N>
Definition array_of_bools.h:19
Class for unique global IDs.
Definition uniqueid.h:53
unsigned long get_obj_id() const
Access the object ID.
Definition uniqueid.h:97
unsigned long get_world_id() const
Access the World ID.
Definition uniqueid.h:90
static const double R
Definition csqrt.cc:46
double(* f1)(const coord_3d &)
Definition derivatives.cc:55
char * p(char *buf, const char *name, int k, int initial_level, double thresh, int order)
Definition derivatives.cc:72
static double lo
Definition dirac-hatom.cc:23
@ upper
Definition dirac-hatom.cc:15
Provides FunctionDefaults and utilities for coordinate transformation.
auto T(World &world, response_space &f) -> response_space
Definition global_functions.cc:34
archive_array< unsigned char > wrap_opaque(const T *, unsigned int)
Factory function to wrap a pointer to contiguous data as an opaque (uchar) archive_array.
Definition archive.h:925
Tensor< typename Tensor< T >::scalar_type > arg(const Tensor< T > &t)
Return a new tensor holding the argument of each element of t (complex types only)
Definition tensor.h:2503
Tensor< TENSOR_RESULT_TYPE(T, Q) > & fast_transform(const Tensor< T > &t, const Tensor< Q > &c, Tensor< TENSOR_RESULT_TYPE(T, Q) > &result, Tensor< TENSOR_RESULT_TYPE(T, Q) > &workspace)
Restricted but heavily optimized form of transform()
Definition tensor.h:2444
const double beta
Definition gygi_soltion.cc:62
static const double v
Definition hatom_sf_dirac.cc:20
Provides IndexIterator.
Tensor< double > op(const Tensor< double > &x)
Definition kain.cc:508
Multidimension Key for MRA tree and associated iterators.
static double pow(const double *a, const double *b)
Definition lda.h:74
#define MADNESS_CHECK(condition)
Check a condition — even in a release build the condition is always evaluated so it can have side eff...
Definition madness_exception.h:182
#define MADNESS_EXCEPTION(msg, value)
Macro for throwing a MADNESS exception.
Definition madness_exception.h:119
#define MADNESS_ASSERT(condition)
Assert a condition that should be free of side-effects since in release builds this might be a no-op.
Definition madness_exception.h:134
#define MADNESS_CHECK_THROW(condition, msg)
Check a condition — even in a release build the condition is always evaluated so it can have side eff...
Definition madness_exception.h:207
Header to declare stuff which has not yet found a home.
constexpr double pi
Mathematical constant .
Definition constants.h:48
MemFuncWrapper< objT *, memfnT, typename result_of< memfnT >::type > wrap_mem_fn(objT &obj, memfnT memfn)
Create a member function wrapper (MemFuncWrapper) from an object and a member function pointer.
Definition mem_func_wrapper.h:251
void combine_hash(hashT &seed, hashT hash)
Internal use only.
Definition worldhash.h:248
Namespace for all elements and tools of MADNESS.
Definition DFParameters.h:10
std::ostream & operator<<(std::ostream &os, const particle< PDIM > &p)
Definition lowrankfunction.h:397
static const char * filename
Definition legendre.cc:96
static const std::vector< Slice > ___
Entire dimension.
Definition slice.h:128
static double cpu_time()
Returns the cpu time in seconds relative to an arbitrary origin.
Definition timers.h:127
GenTensor< TENSOR_RESULT_TYPE(R, Q)> general_transform(const GenTensor< R > &t, const Tensor< Q > c[])
Definition gentensor.h:274
response_space scale(response_space a, double b)
void finalize()
Call this once at the very end of your main program instead of MPI_Finalize().
Definition world.cc:232
void norm_tree(World &world, const std::vector< Function< T, NDIM > > &v, bool fence=true)
Makes the norm tree for all functions in a vector.
Definition vmra.h:1181
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const Tensor< R > &c, bool fence=true)
Transforms a vector of functions according to new[i] = sum[j] old[j]*c[j,i].
Definition vmra.h:707
TreeState
Definition funcdefaults.h:59
@ nonstandard_after_apply
s and d coeffs, state after operator application
Definition funcdefaults.h:64
@ redundant_after_merge
s coeffs everywhere, must be summed up to yield the result
Definition funcdefaults.h:66
@ reconstructed
s coeffs at the leaves only
Definition funcdefaults.h:60
@ nonstandard
s and d coeffs in internal nodes
Definition funcdefaults.h:62
@ redundant
s coeffs everywhere
Definition funcdefaults.h:65
static Tensor< double > weights[max_npt+1]
Definition legendre.cc:99
int64_t Translation
Definition key.h:57
Key< NDIM > displacement(const Key< NDIM > &source, const Key< NDIM > &target)
given a source and a target, return the displacement in translation
Definition key.h:451
static const Slice _(0,-1, 1)
std::shared_ptr< FunctionFunctorInterface< double, 3 > > func(new opT(g))
void change_tensor_type(GenTensor< T > &t, const TensorArgs &targs)
change representation to targ.tt
Definition gentensor.h:284
int Level
Definition key.h:58
std::enable_if< std::is_base_of< ProjectorBase, projT >::value, OuterProjector< projT, projQ > >::type outer(const projT &p0, const projQ &p1)
Definition projector.h:457
int RandomValue< int >()
Random int.
Definition ran.cc:250
static double pop(std::vector< double > &v)
Definition SCF.cc:113
void print(const T &t, const Ts &... ts)
Print items to std::cout (items separated by spaces) and terminate with a new line.
Definition print.h:225
Tensor< T > fcube(const Key< NDIM > &, T(*f)(const Vector< double, NDIM > &), const Tensor< double > &)
Definition mraimpl.h:2129
TensorType
low rank representations of tensors (see gentensor.h)
Definition gentensor.h:120
@ TT_2D
Definition gentensor.h:120
@ TT_FULL
Definition gentensor.h:120
NDIM & f
Definition mra.h:2481
void error(const char *msg)
Definition world.cc:139
NDIM const Function< R, NDIM > & g
Definition mra.h:2481
std::size_t hashT
The hash value type.
Definition worldhash.h:145
static const int kmax
Definition twoscale.cc:52
double inner(response_space &a, response_space &b)
Definition response_functions.h:442
GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const GenTensor< R > &t, const Tensor< Q > &c, const int axis)
Definition lowranktensor.h:1099
std::string name(const FuncType &type, const int ex=-1)
Definition ccpairfunction.h:28
void mxmT(long dimi, long dimj, long dimk, T *MADNESS_RESTRICT c, const T *a, const T *b)
Matrix += Matrix * matrix transpose ... MKL interface version.
Definition mxm.h:225
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence.
Definition mra.h:2066
static const int MAXK
The maximum wavelet order presently supported.
Definition funcdefaults.h:54
Definition mraimpl.h:50
static long abs(long a)
Definition tensor.h:218
const double cc
Definition navstokes_cosines.cc:107
static const double b
Definition nonlinschro.cc:119
static const double d
Definition nonlinschro.cc:121
static const double a
Definition nonlinschro.cc:118
Defines simple templates for printing to std::cout "a la Python".
double Q(double a)
Definition relops.cc:20
static const double c
Definition relops.cc:10
static const double L
Definition rk.cc:46
static const double thresh
Definition rk.cc:45
Definition test_ar.cc:204
Definition test_ccpairfunction.cc:22
given a ket and the 1- and 2-electron potentials, construct the function V phi
Definition funcimpl.h:4009
implT * result
where to construct Vphi, no need to track parents
Definition funcimpl.h:4017
bool have_v2() const
Definition funcimpl.h:4026
ctL iav1
Definition funcimpl.h:4021
Vphi_op_NS(implT *result, const opT &leaf_op, const ctT &iaket, const ctL &iap1, const ctL &iap2, const ctL &iav1, const ctL &iav2, const implT *eri)
Definition funcimpl.h:4035
ctL iap1
Definition funcimpl.h:4020
bool have_v1() const
Definition funcimpl.h:4025
std::pair< bool, coeffT > continue_recursion(const std::vector< bool > child_is_leaf, const tensorT &coeffs, const keyT &key) const
loop over all children and either insert their sum coeffs or continue the recursion
Definition funcimpl.h:4101
opT leaf_op
deciding if a given FunctionNode will be a leaf node
Definition funcimpl.h:4018
std::pair< coeffT, double > make_sum_coeffs(const keyT &key) const
make the sum coeffs for key
Definition funcimpl.h:4194
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:4014
ctL iap2
the particles 1 and 2 (exclusive with ket)
Definition funcimpl.h:4020
bool have_ket() const
Definition funcimpl.h:4024
const implT * eri
2-particle potential, must be on-demand
Definition funcimpl.h:4022
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:4015
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
make and insert the coefficients into result's tree
Definition funcimpl.h:4046
void serialize(const Archive &ar)
serialize this (needed for use in recursive_op)
Definition funcimpl.h:4275
Vphi_op_NS< opT, LDIM > this_type
Definition funcimpl.h:4013
ctT iaket
the ket of a pair function (exclusive with p1, p2)
Definition funcimpl.h:4019
double compute_error_from_inaccurate_refinement(const keyT &key, const tensorT &ceri) const
the error is computed from the d coefficients of the constituent functions
Definition funcimpl.h:4147
void accumulate_into_result(const Key< NDIM > &key, const coeffT &coeff) const
Definition funcimpl.h:4029
this_type make_child(const keyT &child) const
Definition funcimpl.h:4246
tensorT eri_coeffs(const keyT &key) const
Definition funcimpl.h:4127
ctL iav2
potentials for particles 1 and 2
Definition funcimpl.h:4021
bool have_eri() const
Definition funcimpl.h:4027
this_type forward_ctor(implT *result1, const opT &leaf_op, const ctT &iaket1, const ctL &iap11, const ctL &iap21, const ctL &iav11, const ctL &iav21, const implT *eri1)
Definition funcimpl.h:4268
Vphi_op_NS()
Definition funcimpl.h:4034
Future< this_type > activate() const
Definition funcimpl.h:4257
bool randomize() const
Definition funcimpl.h:4011
add two functions f and g: result=alpha * f + beta * g
Definition funcimpl.h:3519
bool randomize() const
Definition funcimpl.h:3524
Future< this_type > activate() const
retrieve the coefficients (parent coeffs might be remote)
Definition funcimpl.h:3554
add_op(const ctT &f, const ctT &g, const double alpha, const double beta)
Definition funcimpl.h:3532
ctT f
tracking coeffs of first and second addend
Definition funcimpl.h:3527
double alpha
prefactor for f, g
Definition funcimpl.h:3529
add_op this_type
Definition funcimpl.h:3522
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:3521
void serialize(const Archive &ar)
Definition funcimpl.h:3566
ctT g
Definition funcimpl.h:3527
std::pair< bool, coeffT > operator()(const keyT &key) const
if we are at the bottom of the trees, return the sum of the coeffs
Definition funcimpl.h:3536
double beta
Definition funcimpl.h:3529
this_type make_child(const keyT &child) const
Definition funcimpl.h:3549
this_type forward_ctor(const ctT &f1, const ctT &g1, const double alpha, const double beta)
taskq-compatible ctor
Definition funcimpl.h:3562
opT op
Definition funcimpl.h:3125
opT::resultT resultT
Definition funcimpl.h:3123
Tensor< resultT > operator()(const Key< NDIM > &key, const Tensor< Q > &t) const
Definition funcimpl.h:3132
coeff_value_adaptor(const FunctionImpl< Q, NDIM > *impl_func, const opT &op)
Definition funcimpl.h:3128
const FunctionImpl< Q, NDIM > * impl_func
Definition funcimpl.h:3124
void serialize(Archive &ar)
Definition funcimpl.h:3141
merge the coefficent boxes of this into result's tree
Definition funcimpl.h:2364
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2365
void serialize(const Archive &ar)
Definition funcimpl.h:2382
FunctionImpl< Q, NDIM > * result
Definition funcimpl.h:2366
do_accumulate_trees(FunctionImpl< Q, NDIM > &result, const T alpha)
Definition funcimpl.h:2369
T alpha
Definition funcimpl.h:2367
bool operator()(typename rangeT::iterator &it) const
return the norm of the difference of this node and its "mirror" node
Definition funcimpl.h:2373
"put" this on g
Definition funcimpl.h:2575
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2576
void serialize(const Archive &ar)
Definition funcimpl.h:2604
implT * g
Definition funcimpl.h:2578
do_average()
Definition funcimpl.h:2580
bool operator()(typename rangeT::iterator &it) const
iterator it points to this
Definition funcimpl.h:2584
do_average(implT &g)
Definition funcimpl.h:2581
change representation of nodes' coeffs to low rank, optional fence
Definition funcimpl.h:2608
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2609
void serialize(const Archive &ar)
Definition funcimpl.h:2632
TensorArgs targs
Definition funcimpl.h:2612
do_change_tensor_type(const TensorArgs &targs, implT &g)
Definition funcimpl.h:2618
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2621
implT * f
Definition funcimpl.h:2613
check symmetry wrt particle exchange
Definition funcimpl.h:2281
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2282
double operator()(typename rangeT::iterator &it) const
return the norm of the difference of this node and its "mirror" node
Definition funcimpl.h:2288
do_check_symmetry_local()
Definition funcimpl.h:2284
void serialize(const Archive &ar)
Definition funcimpl.h:2351
double operator()(double a, double b) const
Definition funcimpl.h:2347
do_check_symmetry_local(const implT &f)
Definition funcimpl.h:2285
const implT * f
Definition funcimpl.h:2283
compute the norm of the wavelet coefficients
Definition funcimpl.h:4416
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:4417
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:4423
do_compute_snorm_and_dnorm(const FunctionCommonData< T, NDIM > &cdata)
Definition funcimpl.h:4420
const FunctionCommonData< T, NDIM > & cdata
Definition funcimpl.h:4419
TensorArgs targs
Definition funcimpl.h:2639
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2644
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2636
do_consolidate_buffer(const TensorArgs &targs)
Definition funcimpl.h:2643
void serialize(const Archive &ar)
Definition funcimpl.h:2648
double operator()(double val) const
Definition funcimpl.h:1403
double limit
Definition funcimpl.h:1398
do_convert_to_color(const double limit, const bool log)
Definition funcimpl.h:1402
bool log
Definition funcimpl.h:1399
static double lower()
Definition funcimpl.h:1400
compute the inner product of this range with other
Definition funcimpl.h:5736
do_dot_local(const FunctionImpl< R, NDIM > *other, const bool leaves_only)
Definition funcimpl.h:5741
bool leaves_only
Definition funcimpl.h:5738
typedef TENSOR_RESULT_TYPE(T, R) resultT
resultT operator()(resultT a, resultT b) const
Definition funcimpl.h:5769
const FunctionImpl< R, NDIM > * other
Definition funcimpl.h:5737
void serialize(const Archive &ar)
Definition funcimpl.h:5773
resultT operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5743
functor for the gaxpy_inplace method
Definition funcimpl.h:1188
FunctionImpl< T, NDIM > * f
prefactor for current function impl
Definition funcimpl.h:1190
do_gaxpy_inplace(FunctionImpl< T, NDIM > *f, T alpha, R beta)
Definition funcimpl.h:1194
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:1195
R beta
prefactor for other function impl
Definition funcimpl.h:1192
void serialize(Archive &ar)
Definition funcimpl.h:1203
Range< typename FunctionImpl< Q, NDIM >::dcT::const_iterator > rangeT
Definition funcimpl.h:1189
T alpha
the current function impl
Definition funcimpl.h:1191
const bool do_leaves
start with leaf nodes instead of initial_level
Definition funcimpl.h:6658
T operator()(T a, T b) const
Definition funcimpl.h:6676
do_inner_ext_local_ffi(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const implT *impl, const bool leaf_refine, const bool do_leaves)
Definition funcimpl.h:6660
void serialize(const Archive &ar)
Definition funcimpl.h:6680
const bool leaf_refine
Definition funcimpl.h:6657
const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > fref
Definition funcimpl.h:6655
T operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:6664
const implT * impl
Definition funcimpl.h:6656
compute the inner product of this range with other
Definition funcimpl.h:5599
const FunctionImpl< T, NDIM > * bra
Definition funcimpl.h:5600
void serialize(const Archive &ar)
Definition funcimpl.h:5715
const FunctionImpl< R, NDIM > * ket
Definition funcimpl.h:5601
bool leaves_only
Definition funcimpl.h:5602
do_inner_local_on_demand(const FunctionImpl< T, NDIM > *bra, const FunctionImpl< R, NDIM > *ket, const bool leaves_only=true)
Definition funcimpl.h:5605
resultT operator()(resultT a, resultT b) const
Definition funcimpl.h:5711
resultT operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5608
compute the inner product of this range with other
Definition funcimpl.h:5538
resultT operator()(resultT a, resultT b) const
Definition funcimpl.h:5571
bool leaves_only
Definition funcimpl.h:5540
void serialize(const Archive &ar)
Definition funcimpl.h:5575
do_inner_local(const FunctionImpl< R, NDIM > *other, const bool leaves_only)
Definition funcimpl.h:5543
const FunctionImpl< R, NDIM > * other
Definition funcimpl.h:5539
resultT operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5545
typedef TENSOR_RESULT_TYPE(T, R) resultT
keep only the sum coefficients in each node
Definition funcimpl.h:2235
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2236
do_keep_sum_coeffs(implT *impl)
constructor need impl for cdata
Definition funcimpl.h:2240
implT * impl
Definition funcimpl.h:2237
void serialize(const Archive &ar)
Definition funcimpl.h:2249
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2242
mirror dimensions of this, write result on f
Definition funcimpl.h:2509
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2519
implT * f
Definition funcimpl.h:2513
std::vector< long > mirror
Definition funcimpl.h:2512
void serialize(const Archive &ar)
Definition funcimpl.h:2566
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2510
std::vector< long > map
Definition funcimpl.h:2512
do_map_and_mirror(const std::vector< long > map, const std::vector< long > mirror, implT &f)
Definition funcimpl.h:2516
map this on f
Definition funcimpl.h:2429
do_mapdim(const std::vector< long > map, implT &f)
Definition funcimpl.h:2436
void serialize(const Archive &ar)
Definition funcimpl.h:2452
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2430
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2438
std::vector< long > map
Definition funcimpl.h:2432
do_mapdim()
Definition funcimpl.h:2435
implT * f
Definition funcimpl.h:2433
merge the coefficient boxes of this into other's tree
Definition funcimpl.h:2393
bool operator()(typename rangeT::iterator &it) const
return the norm of the difference of this node and its "mirror" node
Definition funcimpl.h:2403
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2394
FunctionImpl< Q, NDIM > * other
Definition funcimpl.h:2395
do_merge_trees(const T alpha, const R beta, FunctionImpl< Q, NDIM > &other)
Definition funcimpl.h:2399
T alpha
Definition funcimpl.h:2396
do_merge_trees()
Definition funcimpl.h:2398
R beta
Definition funcimpl.h:2397
void serialize(const Archive &ar)
Definition funcimpl.h:2422
mirror dimensions of this, write result on f
Definition funcimpl.h:2459
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2468
implT * f
Definition funcimpl.h:2463
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2460
do_mirror()
Definition funcimpl.h:2465
do_mirror(const std::vector< long > mirror, implT &f)
Definition funcimpl.h:2466
void serialize(const Archive &ar)
Definition funcimpl.h:2502
std::vector< long > mirror
Definition funcimpl.h:2462
Definition funcimpl.h:5511
double operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5512
void serialize(const Archive &ar)
Definition funcimpl.h:5527
double operator()(double a, double b) const
Definition funcimpl.h:5523
laziness
Definition funcimpl.h:4673
void serialize(Archive &ar)
Definition funcimpl.h:4682
Key< OPDIM > d
Definition funcimpl.h:4674
Key< OPDIM > key
Definition funcimpl.h:4674
keyT dest
Definition funcimpl.h:4675
double fac
Definition funcimpl.h:4676
do_op_args(const Key< OPDIM > &key, const Key< OPDIM > &d, const keyT &dest, double tol, double fac, double cnorm)
Definition funcimpl.h:4679
double cnorm
Definition funcimpl.h:4676
double tol
Definition funcimpl.h:4676
reduce the rank of the nodes, optional fence
Definition funcimpl.h:2255
do_reduce_rank(const TensorArgs &targs)
Definition funcimpl.h:2263
TensorArgs args
Definition funcimpl.h:2259
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2269
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2256
do_reduce_rank(const double &thresh)
Definition funcimpl.h:2264
void serialize(const Archive &ar)
Definition funcimpl.h:2275
Changes non-standard compressed form to standard compressed form.
Definition funcimpl.h:4637
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:4648
do_standard(implT *impl)
Definition funcimpl.h:4645
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:4638
void serialize(const Archive &ar)
Definition funcimpl.h:4665
implT * impl
Definition funcimpl.h:4641
given an NS tree resulting from a convolution, truncate leafs if appropriate
Definition funcimpl.h:2176
void serialize(const Archive &ar)
Definition funcimpl.h:2196
const implT * f
Definition funcimpl.h:2178
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2182
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2177
do_truncate_NS_leafs(const implT *f)
Definition funcimpl.h:2180
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2655
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2659
implT * impl
Definition funcimpl.h:2656
void serialize(const Archive &ar)
Definition funcimpl.h:2677
do_unary_op_value_inplace(implT *impl, const opT &op)
Definition funcimpl.h:2658
Hartree product of two LDIM functions to yield a NDIM = 2*LDIM function.
Definition funcimpl.h:3602
this_type forward_ctor(implT *result1, const ctL &p11, const ctL &p22, const leaf_opT &leaf_op)
Definition funcimpl.h:3658
bool randomize() const
Definition funcimpl.h:3603
void serialize(const Archive &ar)
Definition funcimpl.h:3662
hartree_op(implT *result, const ctL &p11, const ctL &p22, const leaf_opT &leaf_op)
Definition funcimpl.h:3614
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:3606
ctL p2
tracking coeffs of the two lo-dim functions
Definition funcimpl.h:3609
leaf_opT leaf_op
determine if a given node will be a leaf node
Definition funcimpl.h:3610
hartree_op()
Definition funcimpl.h:3613
implT * result
where to construct the pair function
Definition funcimpl.h:3608
hartree_op< LDIM, leaf_opT > this_type
Definition funcimpl.h:3605
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
Definition funcimpl.h:3619
ctL p1
Definition funcimpl.h:3609
this_type make_child(const keyT &child) const
Definition funcimpl.h:3642
Future< this_type > activate() const
Definition funcimpl.h:3651
perform this multiplication: h(1,2) = f(1,2) * g(1)
Definition funcimpl.h:3410
multiply_op()
Definition funcimpl.h:3422
ctL g
Definition funcimpl.h:3419
Future< this_type > activate() const
Definition funcimpl.h:3501
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:3414
implT * h
the result function h(1,2) = f(1,2) * g(1)
Definition funcimpl.h:3417
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:3413
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
apply this on a FunctionNode of f and g of Key key
Definition funcimpl.h:3449
this_type forward_ctor(implT *h1, const ctT &f1, const ctL &g1, const int particle)
Definition funcimpl.h:3508
static bool randomize()
Definition funcimpl.h:3412
int particle
if g is g(1) or g(2)
Definition funcimpl.h:3420
ctT f
Definition funcimpl.h:3418
multiply_op< LDIM > this_type
Definition funcimpl.h:3415
multiply_op(implT *h1, const ctT &f1, const ctL &g1, const int particle1)
Definition funcimpl.h:3424
bool screen(const coeffT &fcoeff, const coeffT &gcoeff, const keyT &key) const
return true if this will be a leaf node
Definition funcimpl.h:3430
this_type make_child(const keyT &child) const
Definition funcimpl.h:3491
void serialize(const Archive &ar)
Definition funcimpl.h:3512
coeffT val_lhs
Definition funcimpl.h:3889
double lo
Definition funcimpl.h:3892
double lo1
Definition funcimpl.h:3892
long oversampling
Definition funcimpl.h:3890
double error
Definition funcimpl.h:3891
tensorT operator()(const Key< NDIM > key, const tensorT &coeff_rhs)
multiply values of rhs and lhs, result on rhs, rhs and lhs are of the same dimensions
Definition funcimpl.h:3907
coeffT coeff_lhs
Definition funcimpl.h:3889
void serialize(const Archive &ar)
Definition funcimpl.h:3995
double lo2
Definition funcimpl.h:3892
double hi1
Definition funcimpl.h:3892
pointwise_multiplier(const Key< NDIM > key, const coeffT &clhs)
Definition funcimpl.h:3895
coeffT operator()(const Key< NDIM > key, const tensorT &coeff_rhs, const int particle)
multiply values of rhs and lhs, result on rhs, rhs and lhs are of differnet dimensions
Definition funcimpl.h:3952
double hi2
Definition funcimpl.h:3892
double hi
Definition funcimpl.h:3892
project the low-dim function g on the hi-dim function f: result(x) = <f(x,y) | g(y)>
Definition funcimpl.h:6920
project_out_op(const implT *fimpl, implL1 *result, const ctL &iag, const int dim)
Definition funcimpl.h:6935
ctL iag
the low dim function g
Definition funcimpl.h:6930
FunctionImpl< T, NDIM-LDIM > implL1
Definition funcimpl.h:6925
Future< this_type > activate() const
retrieve the coefficients (parent coeffs might be remote)
Definition funcimpl.h:7014
std::pair< bool, coeffT > argT
Definition funcimpl.h:6926
const implT * fimpl
the hi dim function f
Definition funcimpl.h:6928
this_type forward_ctor(const implT *fimpl1, implL1 *result1, const ctL &iag1, const int dim1)
taskq-compatible ctor
Definition funcimpl.h:7021
this_type make_child(const keyT &child) const
Definition funcimpl.h:7005
project_out_op< LDIM > this_type
Definition funcimpl.h:6923
implL1 * result
the low dim result function
Definition funcimpl.h:6929
Future< argT > operator()(const Key< NDIM > &key) const
do the actual contraction
Definition funcimpl.h:6942
void serialize(const Archive &ar)
Definition funcimpl.h:7025
project_out_op(const project_out_op &other)
Definition funcimpl.h:6937
int dim
0: project 0..LDIM-1, 1: project LDIM..NDIM-1
Definition funcimpl.h:6931
bool randomize() const
Definition funcimpl.h:6921
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:6924
recursive part of recursive_apply
Definition funcimpl.h:5338
ctT iaf
Definition funcimpl.h:5346
recursive_apply_op2< opT > this_type
Definition funcimpl.h:5341
Future< this_type > activate() const
retrieve the coefficients (parent coeffs might be remote)
Definition funcimpl.h:5401
const opT * apply_op
need this for randomization
Definition funcimpl.h:5347
bool randomize() const
Definition funcimpl.h:5339
recursive_apply_op2(const recursive_apply_op2 &other)
Definition funcimpl.h:5354
void serialize(const Archive &ar)
Definition funcimpl.h:5417
argT finalize(const double kernel_norm, const keyT &key, const coeffT &coeff, const implT *r) const
sole purpose is to wait for the kernel norm, wrap it and send it back to caller
Definition funcimpl.h:5387
this_type make_child(const keyT &child) const
Definition funcimpl.h:5396
recursive_apply_op2(implT *result, const ctT &iaf, const opT *apply_op)
Definition funcimpl.h:5351
std::pair< bool, coeffT > argT
Definition funcimpl.h:5343
implT * result
Definition funcimpl.h:5345
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:5342
argT operator()(const Key< NDIM > &key) const
send off the application of the operator
Definition funcimpl.h:5363
this_type forward_ctor(implT *result1, const ctT &iaf1, const opT *apply_op1)
taskq-compatible ctor
Definition funcimpl.h:5413
recursive part of recursive_apply
Definition funcimpl.h:5207
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
make the NS-coefficients and send off the application of the operator
Definition funcimpl.h:5232
this_type forward_ctor(implT *r, const CoeffTracker< T, LDIM > &f1, const CoeffTracker< T, LDIM > &g1, const opT *apply_op1)
Definition funcimpl.h:5297
opT * apply_op
Definition funcimpl.h:5215
recursive_apply_op(const recursive_apply_op &other)
Definition funcimpl.h:5225
recursive_apply_op< opT, LDIM > this_type
Definition funcimpl.h:5210
Future< this_type > activate() const
Definition funcimpl.h:5290
bool randomize() const
Definition funcimpl.h:5208
implT * result
Definition funcimpl.h:5212
CoeffTracker< T, LDIM > iaf
Definition funcimpl.h:5213
void serialize(const Archive &ar)
Definition funcimpl.h:5302
std::pair< bool, coeffT > finalize(const double kernel_norm, const keyT &key, const coeffT &coeff) const
sole purpose is to wait for the kernel norm, wrap it and send it back to caller
Definition funcimpl.h:5272
recursive_apply_op(implT *result, const CoeffTracker< T, LDIM > &iaf, const CoeffTracker< T, LDIM > &iag, const opT *apply_op)
Definition funcimpl.h:5219
this_type make_child(const keyT &child) const
Definition funcimpl.h:5281
CoeffTracker< T, LDIM > iag
Definition funcimpl.h:5214
remove all coefficients of internal nodes
Definition funcimpl.h:2201
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2202
remove_internal_coeffs()=default
constructor need impl for cdata
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2207
void serialize(const Archive &ar)
Definition funcimpl.h:2213
remove all coefficients of leaf nodes
Definition funcimpl.h:2218
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2224
remove_leaf_coeffs()=default
constructor need impl for cdata
void serialize(const Archive &ar)
Definition funcimpl.h:2229
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2219
Definition funcimpl.h:4488
void serialize(Archive &ar)
Definition funcimpl.h:4492
bool operator()(const implT *f, const keyT &key, const nodeT &t) const
Definition funcimpl.h:4489
shallow-copy, pared-down version of FunctionNode, for special purpose only
Definition funcimpl.h:749
coeffT & coeff()
Definition funcimpl.h:763
GenTensor< T > coeffT
Definition funcimpl.h:750
bool is_leaf() const
Definition funcimpl.h:765
void serialize(Archive &ar)
Definition funcimpl.h:767
ShallowNode(const ShallowNode< T, NDIM > &node)
Definition funcimpl.h:758
ShallowNode(const FunctionNode< T, NDIM > &node)
Definition funcimpl.h:755
bool has_children() const
Definition funcimpl.h:764
ShallowNode()
Definition funcimpl.h:754
bool _has_children
Definition funcimpl.h:752
double dnorm
Definition funcimpl.h:753
const coeffT & coeff() const
Definition funcimpl.h:762
coeffT _coeffs
Definition funcimpl.h:751
TensorArgs holds the arguments for creating a LowRankTensor.
Definition gentensor.h:134
double thresh
Definition gentensor.h:135
TensorType tt
Definition gentensor.h:136
inserts/accumulates coefficients into impl's tree
Definition funcimpl.h:716
FunctionImpl< T, NDIM > * impl
Definition funcimpl.h:720
FunctionNode< T, NDIM > nodeT
Definition funcimpl.h:718
accumulate_op(const accumulate_op &other)=default
void operator()(const Key< NDIM > &key, const coeffT &coeff, const bool &is_leaf) const
Definition funcimpl.h:724
void serialize(Archive &ar)
Definition funcimpl.h:728
GenTensor< T > coeffT
Definition funcimpl.h:717
accumulate_op(FunctionImpl< T, NDIM > *f)
Definition funcimpl.h:722
static void load(const Archive &ar, FunctionImpl< T, NDIM > *&ptr)
Definition funcimpl.h:7247
static void load(const Archive &ar, const FunctionImpl< T, NDIM > *&ptr)
Definition funcimpl.h:7216
static void load(const Archive &ar, std::shared_ptr< FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7295
static void load(const Archive &ar, std::shared_ptr< const FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7279
Default load of an object via serialize(ar, t).
Definition archive.h:666
static void load(const A &ar, const U &t)
Load an object.
Definition archive.h:678
static void store(const Archive &ar, FunctionImpl< T, NDIM > *const &ptr)
Definition funcimpl.h:7269
static void store(const Archive &ar, const FunctionImpl< T, NDIM > *const &ptr)
Definition funcimpl.h:7238
static void store(const Archive &ar, const std::shared_ptr< FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7304
static void store(const Archive &ar, const std::shared_ptr< const FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7288
Default store of an object via serialize(ar, t).
Definition archive.h:611
static std::enable_if_t< is_output_archive_v< A > &&!std::is_function< U >::value &&(has_member_serialize_v< U, A >||has_nonmember_serialize_v< U, A >||has_freestanding_serialize_v< U, A >||has_freestanding_default_serialize_v< U, A >), void > store(const A &ar, const U &t)
Definition archive.h:621
Definition funcimpl.h:610
void serialize(Archive &ar)
Definition funcimpl.h:674
const opT * op
Definition funcimpl.h:617
hartree_convolute_leaf_op(const implT *f, const implL *g, const opT *op)
Definition funcimpl.h:621
bool operator()(const Key< NDIM > &key) const
no pre-determination
Definition funcimpl.h:625
bool operator()(const Key< NDIM > &key, const Tensor< T > &fcoeff, const Tensor< T > &gcoeff) const
post-determination: true if f is a leaf and the result is well-represented
Definition funcimpl.h:638
const implL * g
Definition funcimpl.h:616
const FunctionImpl< T, NDIM > * f
Definition funcimpl.h:615
FunctionImpl< T, LDIM > implL
Definition funcimpl.h:613
bool do_error_leaf_op() const
Definition funcimpl.h:618
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:612
bool operator()(const Key< NDIM > &key, const GenTensor< T > &coeff) const
no post-determination
Definition funcimpl.h:628
returns true if the result of a hartree_product is a leaf node (compute norm & error)
Definition funcimpl.h:500
bool do_error_leaf_op() const
Definition funcimpl.h:505
const FunctionImpl< T, NDIM > * f
Definition funcimpl.h:503
hartree_leaf_op(const implT *f, const long &k)
Definition funcimpl.h:508
long k
Definition funcimpl.h:504
void serialize(Archive &ar)
Definition funcimpl.h:556
bool operator()(const Key< NDIM > &key, const GenTensor< T > &coeff) const
no post-determination
Definition funcimpl.h:514
bool operator()(const Key< NDIM > &key, const Tensor< T > &fcoeff, const Tensor< T > &gcoeff) const
post-determination: true if f is a leaf and the result is well-represented
Definition funcimpl.h:524
bool operator()(const Key< NDIM > &key) const
no pre-determination
Definition funcimpl.h:511
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:502
insert/replaces the coefficients into the function
Definition funcimpl.h:692
insert_op()
Definition funcimpl.h:699
implT * impl
Definition funcimpl.h:698
void operator()(const keyT &key, const coeffT &coeff, const bool &is_leaf) const
Definition funcimpl.h:702
FunctionNode< T, NDIM > nodeT
Definition funcimpl.h:696
Key< NDIM > keyT
Definition funcimpl.h:694
insert_op(const insert_op &other)
Definition funcimpl.h:701
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:693
GenTensor< T > coeffT
Definition funcimpl.h:695
insert_op(implT *f)
Definition funcimpl.h:700
void serialize(Archive &ar)
Definition funcimpl.h:706
Definition mra.h:112
Definition funcimpl.h:680
bool operator()(const Key< NDIM > &key, const GenTensor< T > &fcoeff, const GenTensor< T > &gcoeff) const
Definition funcimpl.h:682
void serialize(Archive &ar)
Definition funcimpl.h:686
void operator()(const Key< NDIM > &key, const GenTensor< T > &coeff, const bool &is_leaf) const
Definition funcimpl.h:681
Definition funcimpl.h:564
bool operator()(const Key< NDIM > &key, const double &cnorm) const
post-determination: return true if operator and coefficient norms are small
Definition funcimpl.h:585
void serialize(Archive &ar)
Definition funcimpl.h:600
const implT * f
the source or result function, needed for truncate_tol
Definition funcimpl.h:568
op_leaf_op(const opT *op, const implT *f)
Definition funcimpl.h:572
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:565
const opT * op
the convolution operator
Definition funcimpl.h:567
bool do_error_leaf_op() const
Definition funcimpl.h:569
bool operator()(const Key< NDIM > &key) const
pre-determination: we can't know if this will be a leaf node before we got the final coeffs
Definition funcimpl.h:575
bool operator()(const Key< NDIM > &key, const GenTensor< T > &coeff) const
post-determination: return true if operator and coefficient norms are small
Definition funcimpl.h:578
Definition lowrankfunction.h:332
Definition funcimpl.h:736
void serialize(Archive &ar)
Definition funcimpl.h:743
bool operator()(const Key< NDIM > &key, const T &t, const R &r) const
Definition funcimpl.h:742
bool operator()(const Key< NDIM > &key, const T &t) const
Definition funcimpl.h:739
int np
Definition tdse1d.cc:165
static const double s0
Definition tdse4.cc:83
Defines and implements most of Tensor.
#define ITERATOR(t, exp)
Definition tensor_macros.h:249
#define IND
Definition tensor_macros.h:204
#define TERNARY_OPTIMIZED_ITERATOR(X, x, Y, y, Z, z, exp)
Definition tensor_macros.h:719
AtomicInt sum
Definition test_atomicint.cc:46
double norm(const T i1)
Definition test_cloud.cc:72
int task(int i)
Definition test_runtime.cpp:4
void e()
Definition test_sig.cc:75
static const double alpha
Definition testcosine.cc:10
const double offset
Definition testfuns.cc:143
constexpr std::size_t NDIM
Definition testgconv.cc:54
double h(const coord_1d &r)
Definition testgconv.cc:175
double g1(const coord_t &r)
Definition testgconv.cc:122
std::size_t axis
Definition testpdiff.cc:59
double source(const coordT &r)
Definition testperiodic.cc:48
#define TENSOR_RESULT_TYPE(L, R)
This macro simplifies access to TensorResultType.
Definition type_data.h:205
#define PROFILE_MEMBER_FUNC(classname)
Definition worldprofile.h:210
#define PROFILE_BLOCK(name)
Definition worldprofile.h:208
int ProcessID
Used to clearly identify process number/rank.
Definition worldtypes.h:43