MADNESS 0.10.1
funcimpl.h
Go to the documentation of this file.
1/*
2 This file is part of MADNESS.
3
4 Copyright (C) 2007,2010 Oak Ridge National Laboratory
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
20 For more information please contact:
21
22 Robert J. Harrison
23 Oak Ridge National Laboratory
24 One Bethel Valley Road
25 P.O. Box 2008, MS-6367
26
27 email: harrisonrj@ornl.gov
28 tel: 865-241-3937
29 fax: 865-572-0680
30*/
31
32#ifndef MADNESS_MRA_FUNCIMPL_H__INCLUDED
33#define MADNESS_MRA_FUNCIMPL_H__INCLUDED
34
35/// \file funcimpl.h
36/// \brief Provides FunctionCommonData, FunctionImpl and FunctionFactory
37
39#include <madness/world/print.h>
40#include <madness/misc/misc.h>
43
45#include <madness/mra/indexit.h>
46#include <madness/mra/key.h>
50
51#include <madness/mra/leafop.h>
52
53#include <array>
54#include <iostream>
55#include <type_traits>
56
57namespace madness {
58 template <typename T, std::size_t NDIM>
59 class DerivativeBase;
60
61 template<typename T, std::size_t NDIM>
62 class FunctionImpl;
63
64 template<typename T, std::size_t NDIM>
65 class FunctionNode;
66
67 template<typename T, std::size_t NDIM>
68 class Function;
69
70 template<typename T, std::size_t NDIM>
71 class FunctionFactory;
72
73 template<typename T, std::size_t NDIM, std::size_t MDIM>
74 class CompositeFunctorInterface;
75
76 template<int D>
78
79}
80
81namespace madness {
82
83
84 /// A simple process map
85 template<typename keyT>
86 class SimplePmap : public WorldDCPmapInterface<keyT> {
87 private:
88 const int nproc;
90
91 public:
92 SimplePmap(World& world) : nproc(world.nproc()), me(world.rank())
93 { }
94
95 ProcessID owner(const keyT& key) const {
96 if (key.level() == 0)
97 return 0;
98 else
99 return key.hash() % nproc;
100 }
101 };
102
103 /// A pmap that locates children on odd levels with their even level parents
104 template <typename keyT>
105 class LevelPmap : public WorldDCPmapInterface<keyT> {
106 private:
107 const int nproc;
108 public:
109 LevelPmap() : nproc(0) {};
110
111 LevelPmap(World& world) : nproc(world.nproc()) {}
112
113 /// Find the owner of a given key
114 ProcessID owner(const keyT& key) const {
115 Level n = key.level();
116 if (n == 0) return 0;
117 hashT hash;
118 if (n <= 3 || (n&0x1)) hash = key.hash();
119 else hash = key.parent().hash();
120 return hash%nproc;
121 }
122 };
123
124
125 /// FunctionNode holds the coefficients, etc., at each node of the 2^NDIM-tree
126 template<typename T, std::size_t NDIM>
128 public:
131 private:
132 // Should compile OK with these volatile but there should
133 // be no need to set as volatile since the container internally
134 // stores the entire entry as volatile
135
136 coeffT _coeffs; ///< The coefficients, if any
137 double _norm_tree; ///< After norm_tree will contain norm of coefficients summed up tree
138 bool _has_children; ///< True if there are children
139 coeffT buffer; ///< The coefficients, if any
140 double dnorm=-1.0; ///< norm of the d coefficients, also defined if there are no d coefficients
141 double snorm=-1.0; ///< norm of the s coefficients
142
143 public:
144 typedef WorldContainer<Key<NDIM> , FunctionNode<T, NDIM> > dcT; ///< Type of container holding the nodes
145 /// Default constructor makes node without coeff or children
147 _coeffs(), _norm_tree(1e300), _has_children(false) {
148 }
149
150 /// Constructor from given coefficients with optional children
151
152 /// Note that only a shallow copy of the coeff are taken so
153 /// you should pass in a deep copy if you want the node to
154 /// take ownership.
155 explicit
159
160 explicit
164
165 explicit
169
172 dnorm(other.dnorm), snorm(other.snorm) {
173 }
174
177 if (this != &other) {
178 coeff() = copy(other.coeff());
179 _norm_tree = other._norm_tree;
181 dnorm=other.dnorm;
182 snorm=other.snorm;
184 }
185 return *this;
186 }
187
188 /// Copy with possible type conversion of coefficients, copying all other state
189
190 /// Choose to not overload copy and type conversion operators
191 /// so there are no automatic type conversions.
192 template<typename Q>
194 convert() const {
195 return FunctionNode<Q, NDIM> (madness::convert<Q,T>(coeff()), _norm_tree, snorm, dnorm, _has_children);
196 }
197
198 /// Returns true if there are coefficients in this node
199 bool
200 has_coeff() const {
201 return _coeffs.has_data();
202 }
203
204
205 /// Returns true if this node has children
206 bool
207 has_children() const {
208 return _has_children;
209 }
210
211 /// Returns true if this does not have children
212 bool
213 is_leaf() const {
214 return !_has_children;
215 }
216
217 /// Returns true if this node is invalid (no coeffs and no children)
218 bool
219 is_invalid() const {
220 return !(has_coeff() || has_children());
221 }
222
223 /// Returns a non-const reference to the tensor containing the coeffs
224
225 /// Returns an empty tensor if there are no coefficients.
226 coeffT&
228 MADNESS_ASSERT(_coeffs.ndim() == -1 || (_coeffs.dim(0) <= 2
229 * MAXK && _coeffs.dim(0) >= 0));
230 return const_cast<coeffT&>(_coeffs);
231 }
232
233 /// Returns a const reference to the tensor containing the coeffs
234
235 /// Returns an empty tensor if there are no coefficeints.
236 const coeffT&
237 coeff() const {
238 return const_cast<const coeffT&>(_coeffs);
239 }
240
241 /// Returns the number of coefficients in this node
242 size_t size() const {
243 return _coeffs.size();
244 }
245
246 public:
247
248 /// reduces the rank of the coefficients (if applicable)
249 void reduceRank(const double& eps) {
250 _coeffs.reduce_rank(eps);
251 }
252
253 /// Sets \c has_children attribute to value of \c flag.
254 void set_has_children(bool flag) {
255 _has_children = flag;
256 }
257
258 /// Sets \c has_children attribute to true recurring up to ensure connected
260 //madness::print(" set_chi_recu: ", key, *this);
261 //PROFILE_MEMBER_FUNC(FunctionNode); // Too fine grain for routine profiling
262 if (!(has_children() || has_coeff() || key.level()==0)) {
263 // If node already knows it has children or it has
264 // coefficients then it must already be connected to
265 // its parent. If not, the node was probably just
266 // created for this operation and must be connected to
267 // its parent.
268 Key<NDIM> parent = key.parent();
269 // Task on next line used to be TaskAttributes::hipri()) ... but deferring execution of this
270 // makes sense since it is not urgent and lazy connection will likely mean that less forwarding
271 // will happen since the upper level task will have already made the connection.
272 const_cast<dcT&>(c).task(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
273 //const_cast<dcT&>(c).send(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
274 //madness::print(" set_chi_recu: forwarding",key,parent);
275 }
276 _has_children = true;
277 }
278
279 /// Sets \c has_children attribute to value of \c !flag
280 void set_is_leaf(bool flag) {
281 _has_children = !flag;
282 }
283
284 /// Takes a \em shallow copy of the coeff --- same as \c this->coeff()=coeff
285 void set_coeff(const coeffT& coeffs) {
286 coeff() = coeffs;
287 if ((_coeffs.has_data()) and ((_coeffs.dim(0) < 0) || (_coeffs.dim(0)>2*MAXK))) {
288 print("set_coeff: may have a problem");
289 print("set_coeff: coeff.dim[0] =", coeffs.dim(0), ", 2* MAXK =", 2*MAXK);
290 }
291 MADNESS_ASSERT(coeffs.dim(0)<=2*MAXK && coeffs.dim(0)>=0);
292 }
293
294 /// Clears the coefficients (has_coeff() will subsequently return false)
295 void clear_coeff() {
296 coeff()=coeffT();
297 }
298
299 /// Scale the coefficients of this node
300 template <typename Q>
301 void scale(Q a) {
302 _coeffs.scale(a);
303 }
304
305 /// Sets the value of norm_tree
308 }
309
310 /// Gets the value of norm_tree
311 double get_norm_tree() const {
312 return _norm_tree;
313 }
314
315 /// return the precomputed norm of the (virtual) d coefficients
316 double get_dnorm() const {
317 return dnorm;
318 }
319
320 /// set the precomputed norm of the (virtual) s coefficients
321 void set_snorm(const double sn) {
322 snorm=sn;
323 }
324
325 /// set the precomputed norm of the (virtual) d coefficients
326 void set_dnorm(const double dn) {
327 dnorm=dn;
328 }
329
330 /// get the precomputed norm of the (virtual) s coefficients
331 double get_snorm() const {
332 return snorm;
333 }
334
336 snorm = 0.0;
337 dnorm = 0.0;
338 if (coeff().size() == 0) { ;
339 } else if (coeff().dim(0) == cdata.vk[0]) {
340 snorm = coeff().normf();
341
342 } else if (coeff().is_full_tensor()) {
343 Tensor<T> c = copy(coeff().get_tensor());
344 snorm = c(cdata.s0).normf();
345 c(cdata.s0) = 0.0;
346 dnorm = c.normf();
347
348 } else if (coeff().is_svd_tensor()) {
349 coeffT c= coeff()(cdata.s0);
350 snorm = c.normf();
351 double norm = coeff().normf();
352 dnorm = sqrt(norm * norm - snorm * snorm);
353
354 } else {
355 MADNESS_EXCEPTION("cannot use compute_dnorm", 1);
356 }
357 }
358
359
360 /// General bi-linear operation --- this = this*alpha + other*beta
361
362 /// This/other may not have coefficients. Has_children will be
363 /// true in the result if either this/other have children.
364 template <typename Q, typename R>
365 void gaxpy_inplace(const T& alpha, const FunctionNode<Q,NDIM>& other, const R& beta) {
366 //PROFILE_MEMBER_FUNC(FuncNode); // Too fine grain for routine profiling
367 if (other.has_children())
368 _has_children = true;
369 if (has_coeff()) {
370 if (other.has_coeff()) {
371 coeff().gaxpy(alpha,other.coeff(),beta);
372 }
373 else {
374 coeff().scale(alpha);
375 }
376 }
377 else if (other.has_coeff()) {
378 coeff() = other.coeff()*beta; //? Is this the correct type conversion?
379 }
380 }
381
382 /// Accumulate inplace and if necessary connect node to parent
383 void accumulate2(const tensorT& t, const typename FunctionNode<T,NDIM>::dcT& c,
384 const Key<NDIM>& key) {
385 // double cpu0=cpu_time();
386 if (has_coeff()) {
387 MADNESS_ASSERT(coeff().is_full_tensor());
388 // if (coeff().type==TT_FULL) {
389 coeff() += coeffT(t,-1.0,TT_FULL);
390 // } else {
391 // tensorT cc=coeff().full_tensor_copy();;
392 // cc += t;
393 // coeff()=coeffT(cc,args);
394 // }
395 }
396 else {
397 // No coeff and no children means the node is newly
398 // created for this operation and therefore we must
399 // tell its parent that it exists.
400 coeff() = coeffT(t,-1.0,TT_FULL);
401 // coeff() = copy(t);
402 // coeff() = coeffT(t,args);
403 if ((!_has_children) && key.level()> 0) {
404 Key<NDIM> parent = key.parent();
405 if (c.is_local(parent))
406 const_cast<dcT&>(c).send(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
407 else
408 const_cast<dcT&>(c).task(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
409 }
410 }
411 //double cpu1=cpu_time();
412 }
413
414
415 /// Accumulate inplace and if necessary connect node to parent
416 void accumulate(const coeffT& t, const typename FunctionNode<T,NDIM>::dcT& c,
417 const Key<NDIM>& key, const TensorArgs& args) {
418 if (has_coeff()) {
419 coeff().add_SVD(t,args.thresh);
420 if (buffer.rank()<coeff().rank()) {
421 if (buffer.has_data()) {
422 buffer.add_SVD(coeff(),args.thresh);
423 } else {
424 buffer=copy(coeff());
425 }
426 coeff()=coeffT();
427 }
428
429 } else {
430 // No coeff and no children means the node is newly
431 // created for this operation and therefore we must
432 // tell its parent that it exists.
433 coeff() = copy(t);
434 if ((!_has_children) && key.level()> 0) {
435 Key<NDIM> parent = key.parent();
436 if (c.is_local(parent))
437 const_cast<dcT&>(c).send(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
438 else
439 const_cast<dcT&>(c).task(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
440 }
441 }
442 }
443
444 void consolidate_buffer(const TensorArgs& args) {
445 if ((coeff().has_data()) and (buffer.has_data())) {
446 coeff().add_SVD(buffer,args.thresh);
447 } else if (buffer.has_data()) {
448 coeff()=buffer;
449 }
450 buffer=coeffT();
451 }
452
454 return this->_coeffs.trace_conj((rhs._coeffs));
455 }
456
457 template <typename Archive>
458 void serialize(Archive& ar) {
460 }
461
462 /// like operator<<(ostream&, const FunctionNode<T,NDIM>&) but
463 /// produces a sequence JSON-formatted key-value pairs
464 /// @warning enclose the output in curly braces to make
465 /// a valid JSON object
466 void print_json(std::ostream& s) const {
467 s << "\"has_coeff\":" << this->has_coeff()
468 << ",\"has_children\":" << this->has_children() << ",\"norm\":";
469 double norm = this->has_coeff() ? this->coeff().normf() : 0.0;
470 if (norm < 1e-12)
471 norm = 0.0;
472 double nt = this->get_norm_tree();
473 if (nt == 1e300)
474 nt = 0.0;
475 s << norm << ",\"norm_tree\":" << nt << ",\"snorm\":"
476 << this->get_snorm() << ",\"dnorm\":" << this->get_dnorm()
477 << ",\"rank\":" << this->coeff().rank();
478 if (this->coeff().is_assigned())
479 s << ",\"dim\":" << this->coeff().dim(0);
480 }
481
482 };
483
484 template <typename T, std::size_t NDIM>
485 std::ostream& operator<<(std::ostream& s, const FunctionNode<T,NDIM>& node) {
486 s << "(has_coeff=" << node.has_coeff() << ", has_children=" << node.has_children() << ", norm=";
487 double norm = node.has_coeff() ? node.coeff().normf() : 0.0;
488 if (norm < 1e-12)
489 norm = 0.0;
490 double nt = node.get_norm_tree();
491 if (nt == 1e300) nt = 0.0;
492 s << norm << ", norm_tree, s/dnorm =" << nt << ", " << node.get_snorm() << " " << node.get_dnorm() << "), rank="<< node.coeff().rank()<<")";
493 if (node.coeff().is_assigned()) s << " dim " << node.coeff().dim(0) << " ";
494 return s;
495 }
496
497
498 /// returns true if the result of a hartree_product is a leaf node (compute norm & error)
499 template<typename T, size_t NDIM>
501
504 long k;
505 bool do_error_leaf_op() const {return false;}
506
507 hartree_leaf_op() = default;
508 hartree_leaf_op(const implT* f, const long& k) : f(f), k(k) {}
509
510 /// no pre-determination
511 bool operator()(const Key<NDIM>& key) const {return false;}
512
513 /// no post-determination
514 bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
515 MADNESS_EXCEPTION("no post-determination in hartree_leaf_op",1);
516 return true;
517 }
518
519 /// post-determination: true if f is a leaf and the result is well-represented
520
521 /// @param[in] key the hi-dimensional key (breaks into keys for f and g)
522 /// @param[in] fcoeff coefficients of f of its appropriate key in NS form
523 /// @param[in] gcoeff coefficients of g of its appropriate key in NS form
524 bool operator()(const Key<NDIM>& key, const Tensor<T>& fcoeff, const Tensor<T>& gcoeff) const {
525
526 if (key.level()<2) return false;
527 Slice s = Slice(0,k-1);
528 std::vector<Slice> s0(NDIM/2,s);
529
530 const double tol=f->get_thresh();
531 const double thresh=f->truncate_tol(tol, key)*0.3; // custom factor to "ensure" accuracy
532 // include the wavelets in the norm, makes it much more accurate
533 const double fnorm=fcoeff.normf();
534 const double gnorm=gcoeff.normf();
535
536 // if the final norm is small, perform the hartree product and return
537 const double norm=fnorm*gnorm; // computing the outer product
538 if (norm < thresh) return true;
539
540 // norm of the scaling function coefficients
541 const double sfnorm=fcoeff(s0).normf();
542 const double sgnorm=gcoeff(s0).normf();
543
544 // get the error of both functions and of the pair function;
545 // need the abs for numerics: sfnorm might be equal fnorm.
546 const double ferror=sqrt(std::abs(fnorm*fnorm-sfnorm*sfnorm));
547 const double gerror=sqrt(std::abs(gnorm*gnorm-sgnorm*sgnorm));
548
549 // if the expected error is small, perform the hartree product and return
550 const double error=fnorm*gerror + ferror*gnorm + ferror*gerror;
551 // const double error=sqrt(fnorm*fnorm*gnorm*gnorm - sfnorm*sfnorm*sgnorm*sgnorm);
552
553 if (error < thresh) return true;
554 return false;
555 }
556 template <typename Archive> void serialize (Archive& ar) {
557 ar & f & k;
558 }
559 };
560
561 /// returns true if the result of the convolution operator op with some provided
562 /// coefficients will be small
563 template<typename T, size_t NDIM, typename opT>
564 struct op_leaf_op {
566
567 const opT* op; ///< the convolution operator
568 const implT* f; ///< the source or result function, needed for truncate_tol
569 bool do_error_leaf_op() const {return true;}
570
571 op_leaf_op() = default;
572 op_leaf_op(const opT* op, const implT* f) : op(op), f(f) {}
573
574 /// pre-determination: we can't know if this will be a leaf node before we got the final coeffs
575 bool operator()(const Key<NDIM>& key) const {return true;}
576
577 /// post-determination: return true if operator and coefficient norms are small
578 bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
579 if (key.level()<2) return false;
580 const double cnorm=coeff.normf();
581 return this->operator()(key,cnorm);
582 }
583
584 /// post-determination: return true if operator and coefficient norms are small
585 bool operator()(const Key<NDIM>& key, const double& cnorm) const {
586 if (key.level()<2) return false;
587
588 typedef Key<opT::opdim> opkeyT;
589 const opkeyT source=op->get_source_key(key);
590
591 const double thresh=f->truncate_tol(f->get_thresh(),key);
592 const std::vector<opkeyT>& disp = op->get_disp(key.level());
593 const opkeyT& d = *disp.begin(); // use the zero-displacement for screening
594 const double opnorm = op->norm(key.level(), d, source);
595 const double norm=opnorm*cnorm;
596 return norm<thresh;
597
598 }
599
600 template <typename Archive> void serialize (Archive& ar) {
601 ar & op & f;
602 }
603
604 };
605
606
607 /// returns true if the result of a hartree_product is a leaf node
608 /// criteria are error, norm and its effect on a convolution operator
609 template<typename T, size_t NDIM, size_t LDIM, typename opT>
611
614
616 const implL* g; // for use of its cdata only
617 const opT* op;
618 bool do_error_leaf_op() const {return false;}
619
621 hartree_convolute_leaf_op(const implT* f, const implL* g, const opT* op)
622 : f(f), g(g), op(op) {}
623
624 /// no pre-determination
625 bool operator()(const Key<NDIM>& key) const {return true;}
626
627 /// no post-determination
628 bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
629 MADNESS_EXCEPTION("no post-determination in hartree_convolute_leaf_op",1);
630 return true;
631 }
632
633 /// post-determination: true if f is a leaf and the result is well-represented
634
635 /// @param[in] key the hi-dimensional key (breaks into keys for f and g)
636 /// @param[in] fcoeff coefficients of f of its appropriate key in NS form
637 /// @param[in] gcoeff coefficients of g of its appropriate key in NS form
638 bool operator()(const Key<NDIM>& key, const Tensor<T>& fcoeff, const Tensor<T>& gcoeff) const {
639 // bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
640
641 if (key.level()<2) return false;
642
643 const double tol=f->get_thresh();
644 const double thresh=f->truncate_tol(tol, key);
645 // include the wavelets in the norm, makes it much more accurate
646 const double fnorm=fcoeff.normf();
647 const double gnorm=gcoeff.normf();
648
649 // norm of the scaling function coefficients
650 const double sfnorm=fcoeff(g->get_cdata().s0).normf();
651 const double sgnorm=gcoeff(g->get_cdata().s0).normf();
652
653 // if the final norm is small, perform the hartree product and return
654 const double norm=fnorm*gnorm; // computing the outer product
655 if (norm < thresh) return true;
656
657 // get the error of both functions and of the pair function
658 const double ferror=sqrt(fnorm*fnorm-sfnorm*sfnorm);
659 const double gerror=sqrt(gnorm*gnorm-sgnorm*sgnorm);
660
661 // if the expected error is small, perform the hartree product and return
662 const double error=fnorm*gerror + ferror*gnorm + ferror*gerror;
663 if (error < thresh) return true;
664
665 // now check if the norm of this and the norm of the operator are significant
666 const std::vector<Key<NDIM> >& disp = op->get_disp(key.level());
667 const Key<NDIM>& d = *disp.begin(); // use the zero-displacement for screening
668 const double opnorm = op->norm(key.level(), d, key);
669 const double final_norm=opnorm*sfnorm*sgnorm;
670 if (final_norm < thresh) return true;
671
672 return false;
673 }
674 template <typename Archive> void serialize (Archive& ar) {
675 ar & f & op;
676 }
677 };
678
679 template<typename T, size_t NDIM>
680 struct noop {
681 void operator()(const Key<NDIM>& key, const GenTensor<T>& coeff, const bool& is_leaf) const {}
682 bool operator()(const Key<NDIM>& key, const GenTensor<T>& fcoeff, const GenTensor<T>& gcoeff) const {
683 MADNESS_EXCEPTION("in noop::operator()",1);
684 return true;
685 }
686 template <typename Archive> void serialize (Archive& ar) {}
687
688 };
689
690 /// insert/replaces the coefficients into the function
691 template<typename T, std::size_t NDIM>
692 struct insert_op {
697
701 insert_op(const insert_op& other) : impl(other.impl) {}
702 void operator()(const keyT& key, const coeffT& coeff, const bool& is_leaf) const {
704 impl->get_coeffs().replace(key,nodeT(coeff,not is_leaf));
705 }
706 template <typename Archive> void serialize (Archive& ar) {
707 ar & impl;
708 }
709
710 };
711
712 /// inserts/accumulates coefficients into impl's tree
713
714 /// NOTE: will use buffer and will need consolidation after operation ended !! NOTE !!
715 template<typename T, std::size_t NDIM>
719
721 accumulate_op() = default;
723 accumulate_op(const accumulate_op& other) = default;
724 void operator()(const Key<NDIM>& key, const coeffT& coeff, const bool& is_leaf) const {
725 if (coeff.has_data())
726 impl->get_coeffs().task(key, &nodeT::accumulate, coeff, impl->get_coeffs(), key, impl->get_tensor_args());
727 }
728 template <typename Archive> void serialize (Archive& ar) {
729 ar & impl;
730 }
731
732 };
733
734
735template<size_t NDIM>
736 struct true_op {
737
738 template<typename T>
739 bool operator()(const Key<NDIM>& key, const T& t) const {return true;}
740
741 template<typename T, typename R>
742 bool operator()(const Key<NDIM>& key, const T& t, const R& r) const {return true;}
743 template <typename Archive> void serialize (Archive& ar) {}
744
745 };
746
747 /// shallow-copy, pared-down version of FunctionNode, for special purpose only
748 template<typename T, std::size_t NDIM>
749 struct ShallowNode {
753 double dnorm=-1.0;
756 : _coeffs(node.coeff()), _has_children(node.has_children()),
757 dnorm(node.get_dnorm()) {}
759 : _coeffs(node.coeff()), _has_children(node._has_children),
760 dnorm(node.dnorm) {}
761
762 const coeffT& coeff() const {return _coeffs;}
763 coeffT& coeff() {return _coeffs;}
764 bool has_children() const {return _has_children;}
765 bool is_leaf() const {return not _has_children;}
766 template <typename Archive>
767 void serialize(Archive& ar) {
768 ar & coeff() & _has_children & dnorm;
769 }
770 };
771
772
773 /// a class to track where relevant (parent) coeffs are
774
775 /// E.g. if a 6D function is composed of two 3D functions their coefficients must be tracked.
776 /// We might need coeffs from a box that does not exist, and to avoid searching for
777 /// parents we track which are their required respective boxes.
778 /// - CoeffTracker will refer either to a requested key, if it exists, or to its
779 /// outermost parent.
780 /// - Children must be made in sequential order to be able to track correctly.
781 ///
782 /// Usage: 1. make the child of a given CoeffTracker.
783 /// If the parent CoeffTracker refers to a leaf node (flag is_leaf)
784 /// the child will refer to the same node. Otherwise it will refer
785 /// to the child node.
786 /// 2. retrieve its coefficients (possible communication/ returns a Future).
787 /// Member variable key always refers to an existing node,
788 /// so we can fetch it. Once we have the node we can determine
789 /// if it has children which allows us to make a child (see 1. )
790 template<typename T, size_t NDIM>
792
796 typedef std::pair<Key<NDIM>,ShallowNode<T,NDIM> > datumT;
798
799 /// the funcimpl that has the coeffs
800 const implT* impl;
801 /// the current key, which must exists in impl
803 /// flag if key is a leaf node
805 /// the coefficients belonging to key
807 /// norm of d coefficients corresponding to key
808 double dnorm_=-1.0;
809
810 public:
811
812 /// default ctor
814
815 /// the initial ctor making the root key
817 if (impl) key_=impl->get_cdata().key0;
818 }
819
820 /// ctor with a pair<keyT,nodeT>
821 explicit CoeffTracker(const CoeffTracker& other, const datumT& datum)
822 : impl(other.impl), key_(other.key_), coeff_(datum.second.coeff()),
823 dnorm_(datum.second.dnorm) {
824 if (datum.second.is_leaf()) is_leaf_=yes;
825 else is_leaf_=no;
826 }
827
828 /// copy ctor
829 CoeffTracker(const CoeffTracker& other) : impl(other.impl), key_(other.key_),
830 is_leaf_(other.is_leaf_), coeff_(other.coeff_), dnorm_(other.dnorm_) {};
831
832 /// const reference to impl
833 const implT* get_impl() const {return impl;}
834
835 /// const reference to the coeffs
836 const coeffT& coeff() const {return coeff_;}
837
838 /// const reference to the key
839 const keyT& key() const {return key_;}
840
841 /// return the coefficients belonging to the passed-in key
842
843 /// if key equals tracked key just return the coeffs, otherwise
844 /// make the child coefficients.
845 /// @param[in] key return coeffs corresponding to this key
846 /// @return coefficients belonging to key
854
855 /// return the s and dnorm belonging to the passed-in key
856 double dnorm(const keyT& key) const {
857 if (key==key_) return dnorm_;
858 MADNESS_ASSERT(key.is_child_of(key_));
859 return 0.0;
860 }
861
862 /// const reference to is_leaf flag
863 const LeafStatus& is_leaf() const {return is_leaf_;}
864
865 /// make a child of this, ignoring the coeffs
866 CoeffTracker make_child(const keyT& child) const {
867
868 // fast return
869 if ((not impl) or impl->is_on_demand()) return CoeffTracker(*this);
870
871 // can't make a child without knowing if this is a leaf -- activate first
873
874 CoeffTracker result;
875 if (impl) {
876 result.impl=impl;
877 if (is_leaf_==yes) result.key_=key_;
878 if (is_leaf_==no) {
879 result.key_=child;
880 // check if child is direct descendent of this, but root node is special case
881 if (child.level()>0) MADNESS_ASSERT(result.key().level()==key().level()+1);
882 }
883 result.is_leaf_=unknown;
884 }
885 return result;
886 }
887
888 /// find the coefficients
889
890 /// this involves communication to a remote node
891 /// @return a Future<CoeffTracker> with the coefficients that key refers to
893
894 // fast return
895 if (not impl) return Future<CoeffTracker>(CoeffTracker());
897
898 // this will return a <keyT,nodeT> from a remote node
901
902 // construct a new CoeffTracker locally
903 return impl->world.taskq.add(*const_cast<CoeffTracker*> (this),
904 &CoeffTracker::forward_ctor,*this,datum1);
905 }
906
907 private:
908 /// taskq-compatible forwarding to the ctor
909 CoeffTracker forward_ctor(const CoeffTracker& other, const datumT& datum) const {
910 return CoeffTracker(other,datum);
911 }
912
913 public:
914 /// serialization
915 template <typename Archive> void serialize(const Archive& ar) {
916 int il=int(is_leaf_);
917 ar & impl & key_ & il & coeff_ & dnorm_;
919 }
920 };
921
922 template<typename T, std::size_t NDIM>
923 std::ostream&
924 operator<<(std::ostream& s, const CoeffTracker<T,NDIM>& ct) {
925 s << ct.key() << ct.is_leaf() << " " << ct.get_impl();
926 return s;
927 }
928
929 /// FunctionImpl holds all Function state to facilitate shallow copy semantics
930
931 /// Since Function assignment and copy constructors are shallow it
932 /// greatly simplifies maintaining consistent state to have all
933 /// (permanent) state encapsulated in a single class. The state
934 /// is shared between instances using a shared_ptr<FunctionImpl>.
935 ///
936 /// The FunctionImpl inherits all of the functionality of WorldContainer
937 /// (to store the coefficients) and WorldObject<WorldContainer> (used
938 /// for RMI and for its unqiue id).
939 ///
940 /// The class methods are public to avoid painful multiple friend template
941 /// declarations for Function and FunctionImpl ... but this trust should not be
942 /// abused ... NOTHING except FunctionImpl methods should mess with FunctionImplData.
943 /// The LB stuff might have to be an exception.
944 template <typename T, std::size_t NDIM>
945 class FunctionImpl : public WorldObject< FunctionImpl<T,NDIM> > {
946 private:
947 typedef WorldObject< FunctionImpl<T,NDIM> > woT; ///< Base class world object type
948 public:
949 typedef T typeT;
950 typedef FunctionImpl<T,NDIM> implT; ///< Type of this class (implementation)
951 typedef std::shared_ptr< FunctionImpl<T,NDIM> > pimplT; ///< pointer to this class
952 typedef Tensor<T> tensorT; ///< Type of tensor for anything but to hold coeffs
953 typedef Vector<Translation,NDIM> tranT; ///< Type of array holding translation
954 typedef Key<NDIM> keyT; ///< Type of key
955 typedef FunctionNode<T,NDIM> nodeT; ///< Type of node
956 typedef GenTensor<T> coeffT; ///< Type of tensor used to hold coeffs
957 typedef WorldContainer<keyT,nodeT> dcT; ///< Type of container holding the coefficients
958 typedef std::pair<const keyT,nodeT> datumT; ///< Type of entry in container
959 typedef Vector<double,NDIM> coordT; ///< Type of vector holding coordinates
960
961 //template <typename Q, int D> friend class Function;
962 template <typename Q, std::size_t D> friend class FunctionImpl;
963
965
966 /// getter
969 const std::vector<Vector<double,NDIM> >& get_special_points()const{return special_points;}
970
971 private:
972 int k; ///< Wavelet order
973 double thresh; ///< Screening threshold
974 int initial_level; ///< Initial level for refinement
975 int special_level; ///< Minimium level for refinement on special points
976 std::vector<Vector<double,NDIM> > special_points; ///< special points for further refinement (needed for composite functions or multiplication)
977 int max_refine_level; ///< Do not refine below this level
978 int truncate_mode; ///< 0=default=(|d|<thresh), 1=(|d|<thresh/2^n), 2=(|d|<thresh/4^n);
979 bool autorefine; ///< If true, autorefine where appropriate
980 bool truncate_on_project; ///< If true projection inserts at level n-1 not n
981 TensorArgs targs; ///< type of tensor to be used in the FunctionNodes
982
984
985 std::shared_ptr< FunctionFunctorInterface<T,NDIM> > functor;
987
988 dcT coeffs; ///< The coefficients
989
990 // Disable the default copy constructor
992
993 public:
1002
1003 /// Initialize function impl from data in factory
1005 : WorldObject<implT>(factory._world)
1006 , world(factory._world)
1007 , k(factory._k)
1008 , thresh(factory._thresh)
1009 , initial_level(factory._initial_level)
1010 , special_level(factory._special_level)
1011 , special_points(factory._special_points)
1012 , max_refine_level(factory._max_refine_level)
1013 , truncate_mode(factory._truncate_mode)
1014 , autorefine(factory._autorefine)
1015 , truncate_on_project(factory._truncate_on_project)
1016// , nonstandard(false)
1017 , targs(factory._thresh,FunctionDefaults<NDIM>::get_tensor_type())
1018 , cdata(FunctionCommonData<T,NDIM>::get(k))
1019 , functor(factory.get_functor())
1020// , on_demand(factory._is_on_demand)
1021// , compressed(factory._compressed)
1022// , redundant(false)
1023 , tree_state(factory._tree_state)
1024 , coeffs(world,factory._pmap,false)
1025 //, bc(factory._bc)
1026 {
1027 // PROFILE_MEMBER_FUNC(FunctionImpl); // No need to profile this
1028 // !!! Ensure that all local state is correctly formed
1029 // before invoking process_pending for the coeffs and
1030 // for this. Otherwise, there is a race condition.
1031 MADNESS_ASSERT(k>0 && k<=MAXK);
1032
1033 bool empty = (factory._empty or is_on_demand());
1034 bool do_refine = factory._refine;
1035
1036 if (do_refine)
1037 initial_level = std::max(0,initial_level - 1);
1038
1039 if (empty) { // Do not set any coefficients at all
1040 // additional functors are only evaluated on-demand
1041 } else if (functor) { // Project function and optionally refine
1043 // set the union of the special points of functor and the ones explicitly given to FunctionFactory
1044 std::vector<coordT> functor_special_points=functor->special_points();
1045 if (!functor_special_points.empty()) special_points.insert(special_points.end(), functor_special_points.begin(), functor_special_points.end());
1046 // near special points refine as deeply as requested by the factory AND the functor
1047 special_level = std::max(special_level, functor->special_level());
1048
1049 typename dcT::const_iterator end = coeffs.end();
1050 for (typename dcT::const_iterator it=coeffs.begin(); it!=end; ++it) {
1051 if (it->second.is_leaf())
1052 woT::task(coeffs.owner(it->first), &implT::project_refine_op, it->first, do_refine,
1054 }
1055 }
1056 else { // Set as if a zero function
1057 initial_level = 1;
1059 }
1060
1062 this->process_pending();
1063 if (factory._fence && (functor || !empty)) world.gop.fence();
1064 }
1065
1066 /// Copy constructor
1067
1068 /// Allocates a \em new function in preparation for a deep copy
1069 ///
1070 /// By default takes pmap from other but can also specify a different pmap.
1071 /// Does \em not copy the coefficients ... creates an empty container.
1072 template <typename Q>
1074 const std::shared_ptr< WorldDCPmapInterface< Key<NDIM> > >& pmap,
1075 bool dozero)
1076 : WorldObject<implT>(other.world)
1077 , world(other.world)
1078 , k(other.k)
1079 , thresh(other.thresh)
1085 , autorefine(other.autorefine)
1087 , targs(other.targs)
1088 , cdata(FunctionCommonData<T,NDIM>::get(k))
1089 , functor()
1090 , tree_state(other.tree_state)
1091 , coeffs(world, pmap ? pmap : other.coeffs.get_pmap())
1092 {
1093 if (dozero) {
1094 initial_level = 1;
1096 //world.gop.fence(); <<<<<<<<<<<<<<<<<<<<<< needs a fence argument
1097 }
1099 this->process_pending();
1100 }
1101
1102 virtual ~FunctionImpl() { }
1103
1104 const std::shared_ptr< WorldDCPmapInterface< Key<NDIM> > >& get_pmap() const;
1105
1106 void replicate(bool fence=true) {
1107 coeffs.replicate(fence);
1108 }
1109
1110 void distribute(std::shared_ptr< WorldDCPmapInterface< Key<NDIM> > > newmap) const {
1111 auto currentmap=coeffs.get_pmap();
1112 currentmap->redistribute(world,newmap);
1113 }
1114
1115
1116 /// Copy coeffs from other into self
1117 template <typename Q>
1118 void copy_coeffs(const FunctionImpl<Q,NDIM>& other, bool fence) {
1121 it!=end; ++it) {
1122 const keyT& key = it->first;
1123 const typename FunctionImpl<Q,NDIM>::nodeT& node = it->second;
1124 coeffs.replace(key,node. template convert<T>());
1125 }
1126 if (fence)
1127 world.gop.fence();
1128 }
1129
1130 /// perform inplace gaxpy: this = alpha*this + beta*other
1131 /// @param[in] alpha prefactor for this
1132 /// @param[in] beta prefactor for other
1133 /// @param[in] g the other function, reconstructed
1134 /// @return *this = alpha*this + beta*other, in either reconstructed or redundant_after_merge state
1135 template<typename Q, typename R>
1136 void gaxpy_inplace_reconstructed(const T& alpha, const FunctionImpl<Q,NDIM>& g, const R& beta, const bool fence) {
1137 // merge g's tree into this' tree
1138 gaxpy_inplace(alpha,g,beta,fence);
1140 // this->merge_trees(beta,g,alpha,fence);
1141 // tree is now redundant_after_merge
1142 // sum down the sum coeffs into the leafs if possible to keep the state most clean
1143 if (fence) sum_down(fence);
1144 }
1145
1146 /// merge the trees of this and other, while multiplying them with the alpha or beta, resp
1147
1148 /// first step in an inplace gaxpy operation for reconstructed functions; assuming the same
1149 /// distribution for this and other
1150
1151 /// on output, *this = alpha* *this + beta * other
1152 /// @param[in] alpha prefactor for this
1153 /// @param[in] beta prefactor for other
1154 /// @param[in] other the other function, reconstructed
1155 template<typename Q, typename R>
1156 void merge_trees(const T alpha, const FunctionImpl<Q,NDIM>& other, const R beta, const bool fence=true) {
1157 MADNESS_ASSERT(get_pmap() == other.get_pmap());
1160 }
1161
1162 /// merge the trees of this and other, while multiplying them with the alpha or beta, resp
1163
1164 /// result and rhs do not have to have the same distribution or live in the same world
1165 /// result+=alpha* this
1166 /// @param[in] alpha prefactor for this
1167 template<typename Q, typename R>
1168 void accumulate_trees(FunctionImpl<Q,NDIM>& result, const R alpha, const bool fence=true) const {
1170 }
1171
1172 /// perform: this= alpha*f + beta*g, invoked by result
1173
1174 /// f and g are reconstructed, so we can save on the compress operation,
1175 /// walk down the joint tree, and add leaf coefficients; effectively refines
1176 /// to common finest level.
1177
1178 /// nothing returned, but leaves this's tree reconstructed and as sum of f and g
1179 /// @param[in] alpha prefactor for f
1180 /// @param[in] f first addend
1181 /// @param[in] beta prefactor for g
1182 /// @param[in] g second addend
1183 void gaxpy_oop_reconstructed(const double alpha, const implT& f,
1184 const double beta, const implT& g, const bool fence);
1185
1186 /// functor for the gaxpy_inplace method
1187 template <typename Q, typename R>
1190 FunctionImpl<T,NDIM>* f; ///< prefactor for current function impl
1191 T alpha; ///< the current function impl
1192 R beta; ///< prefactor for other function impl
1193 do_gaxpy_inplace() = default;
1195 bool operator()(typename rangeT::iterator& it) const {
1196 const keyT& key = it->first;
1197 const FunctionNode<Q,NDIM>& other_node = it->second;
1198 // Use send to get write accessor and automated construction if missing
1199 f->coeffs.send(key, &nodeT:: template gaxpy_inplace<Q,R>, alpha, other_node, beta);
1200 return true;
1201 }
1202 template <typename Archive>
1203 void serialize(Archive& ar) {
1204 ar & f & alpha & beta;
1205 }
1206 };
1207
1208 /// Inplace general bilinear operation
1209
1210 /// this's world can differ from other's world
1211 /// this = alpha * this + beta * other
1212 /// @param[in] alpha prefactor for the current function impl
1213 /// @param[in] other the other function impl
1214 /// @param[in] beta prefactor for other
1215 template <typename Q, typename R>
1216 void gaxpy_inplace(const T& alpha,const FunctionImpl<Q,NDIM>& other, const R& beta, bool fence) {
1217// MADNESS_ASSERT(get_pmap() == other.get_pmap());
1218 if (alpha != T(1.0)) scale_inplace(alpha,false);
1220 typedef do_gaxpy_inplace<Q,R> opT;
1221 other.world.taskq. template for_each<rangeT,opT>(rangeT(other.coeffs.begin(), other.coeffs.end()), opT(this, T(1.0), beta));
1222 if (fence)
1223 other.world.gop.fence();
1224 }
1225
1226 // loads a function impl from persistence
1227 // @param[in] ar the archive where the function impl is stored
1228 template <typename Archive>
1229 void load(Archive& ar) {
1230 // WE RELY ON K BEING STORED FIRST
1231 int kk = 0;
1232 ar & kk;
1233
1234 MADNESS_ASSERT(kk==k);
1235
1236 // note that functor should not be (re)stored
1238 & autorefine & truncate_on_project & tree_state;//nonstandard & compressed ; //& bc;
1239
1240 ar & coeffs;
1241 world.gop.fence();
1242 }
1243
1244 // saves a function impl to persistence
1245 // @param[in] ar the archive where the function impl is to be stored
1246 template <typename Archive>
1247 void store(Archive& ar) {
1248 // WE RELY ON K BEING STORED FIRST
1249
1250 // note that functor should not be (re)stored
1252 & autorefine & truncate_on_project & tree_state;//nonstandard & compressed ; //& bc;
1253
1254 ar & coeffs;
1255 world.gop.fence();
1256 }
1257
1258 /// Returns true if the function is compressed.
1259 bool is_compressed() const;
1260
1261 /// Returns true if the function is compressed.
1262 bool is_reconstructed() const;
1263
1264 /// Returns true if the function is redundant.
1265 bool is_redundant() const;
1266
1267 /// Returns true if the function is redundant_after_merge.
1268 bool is_redundant_after_merge() const;
1269
1270 bool is_nonstandard() const;
1271
1272 bool is_nonstandard_with_leaves() const;
1273
1274 bool is_on_demand() const;
1275
1276 bool has_leaves() const;
1277
1278 void set_tree_state(const TreeState& state) {
1279 tree_state=state;
1280 }
1281
1283
1284 void set_functor(const std::shared_ptr<FunctionFunctorInterface<T,NDIM> > functor1);
1285
1286 std::shared_ptr<FunctionFunctorInterface<T,NDIM> > get_functor();
1287
1288 std::shared_ptr<FunctionFunctorInterface<T,NDIM> > get_functor() const;
1289
1290 void unset_functor();
1291
1292
1294
1296 void set_tensor_args(const TensorArgs& t);
1297
1298 double get_thresh() const;
1299
1300 void set_thresh(double value);
1301
1302 bool get_autorefine() const;
1303
1304 void set_autorefine(bool value);
1305
1306 int get_k() const;
1307
1308 const dcT& get_coeffs() const;
1309
1310 dcT& get_coeffs();
1311
1313
1314 void accumulate_timer(const double time) const; // !!!!!!!!!!!! REDUNDANT !!!!!!!!!!!!!!!
1315
1316 void print_timer() const;
1317
1318 void reset_timer();
1319
1320 /// Adds a constant to the function. Local operation, optional fence
1321
1322 /// In scaling function basis must add value to first polyn in
1323 /// each box with appropriate scaling for level. In wavelet basis
1324 /// need only add at level zero.
1325 /// @param[in] t the scalar to be added
1326 void add_scalar_inplace(T t, bool fence);
1327
1328 /// Initialize nodes to zero function at initial_level of refinement.
1329
1330 /// Works for either basis. No communication.
1331 void insert_zero_down_to_initial_level(const keyT& key);
1332
1333 /// Truncate according to the threshold with optional global fence
1334
1335 /// If thresh<=0 the default value of this->thresh is used
1336 /// @param[in] tol the truncation tolerance
1337 void truncate(double tol, bool fence);
1338
1339 /// Returns true if after truncation this node has coefficients
1340
1341 /// Assumed to be invoked on process owning key. Possible non-blocking
1342 /// communication.
1343 /// @param[in] key the key of the current function node
1344 Future<bool> truncate_spawn(const keyT& key, double tol);
1345
1346 /// Actually do the truncate operation
1347 /// @param[in] key the key to the current function node being evaluated for truncation
1348 /// @param[in] tol the tolerance for thresholding
1349 /// @param[in] v vector of Future<bool>'s that specify whether the current nodes children have coeffs
1350 bool truncate_op(const keyT& key, double tol, const std::vector< Future<bool> >& v);
1351
1352 /// Evaluate function at quadrature points in the specified box
1353
1354 /// @param[in] key the key indicating where the quadrature points are located
1355 /// @param[in] f the interface to the elementary function
1356 /// @param[in] qx quadrature points on a level=0 box
1357 /// @param[out] fval values
1358 void fcube(const keyT& key, const FunctionFunctorInterface<T,NDIM>& f, const Tensor<double>& qx, tensorT& fval) const;
1359
1360 /// Evaluate function at quadrature points in the specified box
1361
1362 /// @param[in] key the key indicating where the quadrature points are located
1363 /// @param[in] f the interface to the elementary function
1364 /// @param[in] qx quadrature points on a level=0 box
1365 /// @param[out] fval values
1366 void fcube(const keyT& key, T (*f)(const coordT&), const Tensor<double>& qx, tensorT& fval) const;
1367
1368 /// Returns cdata.key0
1369 const keyT& key0() const;
1370
1371 /// Prints the coeffs tree of the current function impl
1372 /// @param[in] maxlevel the maximum level of the tree for printing
1373 /// @param[out] os the ostream to where the output is sent
1374 void print_tree(std::ostream& os = std::cout, Level maxlevel = 10000) const;
1375
1376 /// Functor for the do_print_tree method
1377 void do_print_tree(const keyT& key, std::ostream& os, Level maxlevel) const;
1378
1379 /// Prints the coeffs tree of the current function impl (using GraphViz)
1380 /// @param[in] maxlevel the maximum level of the tree for printing
1381 /// @param[out] os the ostream to where the output is sent
1382 void print_tree_graphviz(std::ostream& os = std::cout, Level maxlevel = 10000) const;
1383
1384 /// Functor for the do_print_tree method (using GraphViz)
1385 void do_print_tree_graphviz(const keyT& key, std::ostream& os, Level maxlevel) const;
1386
1387 /// Same as print_tree() but in JSON format
1388 /// @param[out] os the ostream to where the output is sent
1389 /// @param[in] maxlevel the maximum level of the tree for printing
1390 void print_tree_json(std::ostream& os = std::cout, Level maxlevel = 10000) const;
1391
1392 /// Functor for the do_print_tree_json method
1393 void do_print_tree_json(const keyT& key, std::multimap<Level, std::tuple<tranT, std::string>>& data, Level maxlevel) const;
1394
1395 /// convert a number [0,limit] to a hue color code [blue,red],
1396 /// or, if log is set, a number [1.e-10,limit]
1398 double limit;
1399 bool log;
1400 static double lower() {return 1.e-10;};
1402 do_convert_to_color(const double limit, const bool log) : limit(limit), log(log) {}
1403 double operator()(double val) const {
1404 double color=0.0;
1405
1406 if (log) {
1407 double val2=log10(val) - log10(lower()); // will yield >0.0
1408 double upper=log10(limit) -log10(lower());
1409 val2=0.7-(0.7/upper)*val2;
1410 color= std::max(0.0,val2);
1411 color= std::min(0.7,color);
1412 } else {
1413 double hue=0.7-(0.7/limit)*(val);
1414 color= std::max(0.0,hue);
1415 }
1416 return color;
1417 }
1418 };
1419
1420
1421 /// Print a plane ("xy", "xz", or "yz") containing the point x to file
1422
1423 /// works for all dimensions; we walk through the tree, and if a leaf node
1424 /// inside the sub-cell touches the plane we print it in pstricks format
1425 void print_plane(const std::string filename, const int xaxis, const int yaxis, const coordT& el2);
1426
1427 /// collect the data for a plot of the MRA structure locally on each node
1428
1429 /// @param[in] xaxis the x-axis in the plot (can be any axis of the MRA box)
1430 /// @param[in] yaxis the y-axis in the plot (can be any axis of the MRA box)
1431 /// @param[in] el2 needs a description
1432 /// \todo Provide a description for el2
1433 Tensor<double> print_plane_local(const int xaxis, const int yaxis, const coordT& el2);
1434
1435 /// Functor for the print_plane method
1436 /// @param[in] filename the filename for the output
1437 /// @param[in] plotinfo plotting parameters
1438 /// @param[in] xaxis the x-axis in the plot (can be any axis of the MRA box)
1439 /// @param[in] yaxis the y-axis in the plot (can be any axis of the MRA box)
1440 void do_print_plane(const std::string filename, std::vector<Tensor<double> > plotinfo,
1441 const int xaxis, const int yaxis, const coordT el2);
1442
1443 /// print the grid (the roots of the quadrature of each leaf box)
1444 /// of this function in user xyz coordinates
1445 /// @param[in] filename the filename for the output
1446 void print_grid(const std::string filename) const;
1447
1448 /// return the keys of the local leaf boxes
1449 std::vector<keyT> local_leaf_keys() const;
1450
1451 /// print the grid in xyz format
1452
1453 /// the quadrature points and the key information will be written to file,
1454 /// @param[in] filename where the quadrature points will be written to
1455 /// @param[in] keys all leaf keys
1456 void do_print_grid(const std::string filename, const std::vector<keyT>& keys) const;
1457
1458 /// read data from a grid
1459
1460 /// @param[in] keyfile file with keys and grid points for each key
1461 /// @param[in] gridfile file with grid points, w/o key, but with same ordering
1462 /// @param[in] vnuc_functor subtract the values of this functor if regularization is needed
1463 template<size_t FDIM>
1464 typename std::enable_if<NDIM==FDIM>::type
1465 read_grid(const std::string keyfile, const std::string gridfile,
1466 std::shared_ptr< FunctionFunctorInterface<double,NDIM> > vnuc_functor) {
1467
1468 std::ifstream kfile(keyfile.c_str());
1469 std::ifstream gfile(gridfile.c_str());
1470 std::string line;
1471
1472 long ndata,ndata1;
1473 if (not (std::getline(kfile,line))) MADNESS_EXCEPTION("failed reading 1st line of key data",0);
1474 if (not (std::istringstream(line) >> ndata)) MADNESS_EXCEPTION("failed reading k",0);
1475 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 1st line of grid data",0);
1476 if (not (std::istringstream(line) >> ndata1)) MADNESS_EXCEPTION("failed reading k",0);
1477 MADNESS_CHECK(ndata==ndata1);
1478 if (not (std::getline(kfile,line))) MADNESS_EXCEPTION("failed reading 2nd line of key data",0);
1479 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 2nd line of grid data",0);
1480
1481 // the quadrature points in simulation coordinates of the root node
1482 const Tensor<double> qx=cdata.quad_x;
1483 const size_t npt = qx.dim(0);
1484
1485 // the number of coordinates (grid point tuples) per box ({x1},{x2},{x3},..,{xNDIM})
1486 long npoints=power<NDIM>(npt);
1487 // the number of boxes
1488 long nboxes=ndata/npoints;
1489 MADNESS_ASSERT(nboxes*npoints==ndata);
1490 print("reading ",nboxes,"boxes from file",gridfile,keyfile);
1491
1492 // these will be the data
1493 Tensor<T> values(cdata.vk,false);
1494
1495 int ii=0;
1496 std::string gline,kline;
1497 // while (1) {
1498 while (std::getline(kfile,kline)) {
1499
1500 double x,y,z,x1,y1,z1,val;
1501
1502 // get the key
1503 long nn;
1504 Translation l1,l2,l3;
1505 // line looks like: # key: n l1 l2 l3
1506 kline.erase(0,7);
1507 std::stringstream(kline) >> nn >> l1 >> l2 >> l3;
1508 // kfile >> s >> nn >> l1 >> l2 >> l3;
1509 const Vector<Translation,3> ll{ l1,l2,l3 };
1510 Key<3> key(nn,ll);
1511
1512 // this is borrowed from fcube
1513 const Vector<Translation,3>& l = key.translation();
1514 const Level n = key.level();
1515 const double h = std::pow(0.5,double(n));
1516 coordT c; // will hold the point in user coordinates
1519
1520
1521 if (NDIM == 3) {
1522 for (size_t i=0; i<npt; ++i) {
1523 c[0] = cell(0,0) + h*cell_width[0]*(l[0] + qx(i)); // x
1524 for (size_t j=0; j<npt; ++j) {
1525 c[1] = cell(1,0) + h*cell_width[1]*(l[1] + qx(j)); // y
1526 for (size_t k=0; k<npt; ++k) {
1527 c[2] = cell(2,0) + h*cell_width[2]*(l[2] + qx(k)); // z
1528 // fprintf(pFile,"%18.12f %18.12f %18.12f\n",c[0],c[1],c[2]);
1529 auto& success1 = std::getline(gfile,gline); MADNESS_CHECK(success1);
1530 auto& success2 = std::getline(kfile,kline); MADNESS_CHECK(success2);
1531 std::istringstream(gline) >> x >> y >> z >> val;
1532 std::istringstream(kline) >> x1 >> y1 >> z1;
1533 MADNESS_CHECK(std::fabs(x-c[0])<1.e-4);
1534 MADNESS_CHECK(std::fabs(x1-c[0])<1.e-4);
1535 MADNESS_CHECK(std::fabs(y-c[1])<1.e-4);
1536 MADNESS_CHECK(std::fabs(y1-c[1])<1.e-4);
1537 MADNESS_CHECK(std::fabs(z-c[2])<1.e-4);
1538 MADNESS_CHECK(std::fabs(z1-c[2])<1.e-4);
1539
1540 // regularize if a functor is given
1541 if (vnuc_functor) val-=(*vnuc_functor)(c);
1542 values(i,j,k)=val;
1543 }
1544 }
1545 }
1546 } else {
1547 MADNESS_EXCEPTION("only NDIM=3 in print_grid",0);
1548 }
1549
1550 // insert the new leaf node
1551 const bool has_children=false;
1552 coeffT coeff=coeffT(this->values2coeffs(key,values),targs);
1553 nodeT node(coeff,has_children);
1554 coeffs.replace(key,node);
1556 ii++;
1557 }
1558
1559 kfile.close();
1560 gfile.close();
1561 MADNESS_CHECK(ii==nboxes);
1562
1563 }
1564
1565
1566 /// read data from a grid
1567
1568 /// @param[in] gridfile file with keys and grid points and values for each key
1569 /// @param[in] vnuc_functor subtract the values of this functor if regularization is needed
1570 template<size_t FDIM>
1571 typename std::enable_if<NDIM==FDIM>::type
1572 read_grid2(const std::string gridfile,
1573 std::shared_ptr< FunctionFunctorInterface<double,NDIM> > vnuc_functor) {
1574
1575 std::ifstream gfile(gridfile.c_str());
1576 std::string line;
1577
1578 long ndata;
1579 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 1st line of grid data",0);
1580 if (not (std::istringstream(line) >> ndata)) MADNESS_EXCEPTION("failed reading k",0);
1581 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 2nd line of grid data",0);
1582
1583 // the quadrature points in simulation coordinates of the root node
1584 const Tensor<double> qx=cdata.quad_x;
1585 const size_t npt = qx.dim(0);
1586
1587 // the number of coordinates (grid point tuples) per box ({x1},{x2},{x3},..,{xNDIM})
1588 long npoints=power<NDIM>(npt);
1589 // the number of boxes
1590 long nboxes=ndata/npoints;
1591 MADNESS_CHECK(nboxes*npoints==ndata);
1592 print("reading ",nboxes,"boxes from file",gridfile);
1593
1594 // these will be the data
1595 Tensor<T> values(cdata.vk,false);
1596
1597 int ii=0;
1598 std::string gline;
1599 // while (1) {
1600 while (std::getline(gfile,gline)) {
1601
1602 double x1,y1,z1,val;
1603
1604 // get the key
1605 long nn;
1606 Translation l1,l2,l3;
1607 // line looks like: # key: n l1 l2 l3
1608 gline.erase(0,7);
1609 std::stringstream(gline) >> nn >> l1 >> l2 >> l3;
1610 const Vector<Translation,3> ll{ l1,l2,l3 };
1611 Key<3> key(nn,ll);
1612
1613 // this is borrowed from fcube
1614 const Vector<Translation,3>& l = key.translation();
1615 const Level n = key.level();
1616 const double h = std::pow(0.5,double(n));
1617 coordT c; // will hold the point in user coordinates
1620
1621
1622 if (NDIM == 3) {
1623 for (int i=0; i<npt; ++i) {
1624 c[0] = cell(0,0) + h*cell_width[0]*(l[0] + qx(i)); // x
1625 for (int j=0; j<npt; ++j) {
1626 c[1] = cell(1,0) + h*cell_width[1]*(l[1] + qx(j)); // y
1627 for (int k=0; k<npt; ++k) {
1628 c[2] = cell(2,0) + h*cell_width[2]*(l[2] + qx(k)); // z
1629
1630 auto& success = std::getline(gfile,gline);
1631 MADNESS_CHECK(success);
1632 std::istringstream(gline) >> x1 >> y1 >> z1 >> val;
1633 MADNESS_CHECK(std::fabs(x1-c[0])<1.e-4);
1634 MADNESS_CHECK(std::fabs(y1-c[1])<1.e-4);
1635 MADNESS_CHECK(std::fabs(z1-c[2])<1.e-4);
1636
1637 // regularize if a functor is given
1638 if (vnuc_functor) val-=(*vnuc_functor)(c);
1639 values(i,j,k)=val;
1640 }
1641 }
1642 }
1643 } else {
1644 MADNESS_EXCEPTION("only NDIM=3 in print_grid",0);
1645 }
1646
1647 // insert the new leaf node
1648 const bool has_children=false;
1649 coeffT coeff=coeffT(this->values2coeffs(key,values),targs);
1650 nodeT node(coeff,has_children);
1651 coeffs.replace(key,node);
1652 const_cast<dcT&>(coeffs).send(key.parent(),
1654 coeffs, key.parent());
1655 ii++;
1656 }
1657
1658 gfile.close();
1659 MADNESS_CHECK(ii==nboxes);
1660
1661 }
1662
1663
1664 /// Compute by projection the scaling function coeffs in specified box
1665 /// @param[in] key the key to the current function node (box)
1666 tensorT project(const keyT& key) const;
1667
1668 /// Returns the truncation threshold according to truncate_method
1669
1670 /// here is our handwaving argument:
1671 /// this threshold will give each FunctionNode an error of less than tol. The
1672 /// total error can then be as high as sqrt(#nodes) * tol. Therefore in order
1673 /// to account for higher dimensions: divide tol by about the root of number
1674 /// of siblings (2^NDIM) that have a large error when we refine along a deep
1675 /// branch of the tree.
1676 double truncate_tol(double tol, const keyT& key) const;
1677
1678 int get_truncate_mode() const { return truncate_mode; };
1679
1680
1681 /// Returns patch referring to coeffs of child in parent box
1682 /// @param[in] child the key to the child function node (box)
1683 std::vector<Slice> child_patch(const keyT& child) const;
1684
1685 /// Projection with optional refinement w/ special points
1686 /// @param[in] key the key to the current function node (box)
1687 /// @param[in] do_refine should we continue refinement?
1688 /// @param[in] specialpts vector of special points in the function where we need
1689 /// to refine at a much finer level
1690 void project_refine_op(const keyT& key, bool do_refine,
1691 const std::vector<Vector<double,NDIM> >& specialpts);
1692
1693 /// Compute the Legendre scaling functions for multiplication
1694
1695 /// Evaluate parent polyn at quadrature points of a child. The prefactor of
1696 /// 2^n/2 is included. The tensor must be preallocated as phi(k,npt).
1697 /// Refer to the implementation notes for more info.
1698 /// @todo Robert please verify this comment. I don't understand this method.
1699 /// @param[in] np level of the parent function node (box)
1700 /// @param[in] nc level of the child function node (box)
1701 /// @param[in] lp translation of the parent function node (box)
1702 /// @param[in] lc translation of the child function node (box)
1703 /// @param[out] phi tensor of the legendre scaling functions
1704 void phi_for_mul(Level np, Translation lp, Level nc, Translation lc, Tensor<double>& phi) const;
1705
1706 /// Directly project parent coeffs to child coeffs
1707
1708 /// Currently used by diff, but other uses can be anticipated
1709
1710 /// @todo is this documentation correct?
1711 /// @param[in] child the key whose coeffs we are requesting
1712 /// @param[in] parent the (leaf) key of our function
1713 /// @param[in] s the (leaf) coeffs belonging to parent
1714 /// @return coeffs
1715 const coeffT parent_to_child(const coeffT& s, const keyT& parent, const keyT& child) const;
1716
1717 /// Directly project parent NS coeffs to child NS coeffs
1718
1719 /// return the NS coefficients if parent and child are the same,
1720 /// or construct sum coeffs from the parents and "add" zero wavelet coeffs
1721 /// @param[in] child the key whose coeffs we are requesting
1722 /// @param[in] parent the (leaf) key of our function
1723 /// @param[in] coeff the (leaf) coeffs belonging to parent
1724 /// @return coeffs in NS form
1725 coeffT parent_to_child_NS(const keyT& child, const keyT& parent,
1726 const coeffT& coeff) const;
1727
1728 /// Return the values when given the coeffs in scaling function basis
1729 /// @param[in] key the key of the function node (box)
1730 /// @param[in] coeff the tensor of scaling function coefficients for function node (box)
1731 /// @return function values for function node (box)
1732 template <typename Q>
1733 GenTensor<Q> coeffs2values(const keyT& key, const GenTensor<Q>& coeff) const {
1734 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1735 double scale = pow(2.0,0.5*NDIM*key.level())/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1736 return transform(coeff,cdata.quad_phit).scale(scale);
1737 }
1738
1739 /// convert S or NS coeffs to values on a 2k grid of the children
1740
1741 /// equivalent to unfiltering the NS coeffs and then converting all child S-coeffs
1742 /// to values in their respective boxes. If only S coeffs are provided d coeffs are
1743 /// assumed to be zero. Reverse operation to values2NScoeffs().
1744 /// @param[in] key the key of the current S or NS coeffs, level n
1745 /// @param[in] coeff coeffs in S or NS form; if S then d coeffs are assumed zero
1746 /// @param[in] s_only sanity check to avoid unintended discard of d coeffs
1747 /// @return function values on the quadrature points of the children of child (!)
1748 template <typename Q>
1750 const bool s_only) const {
1751 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1752
1753 // sanity checks
1754 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) == s_only);
1755 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) or (coeff.dim(0)==2*this->get_k()));
1756
1757 // this is a block-diagonal matrix with the quadrature points on the diagonal
1758 Tensor<double> quad_phit_2k(2*cdata.k,2*cdata.npt);
1759 quad_phit_2k(cdata.s[0],cdata.s[0])=cdata.quad_phit;
1760 quad_phit_2k(cdata.s[1],cdata.s[1])=cdata.quad_phit;
1761
1762 // the transformation matrix unfilters (cdata.hg) and transforms to values in one step
1763 const Tensor<double> transf = (s_only)
1764 ? inner(cdata.hg(Slice(0,k-1),_),quad_phit_2k) // S coeffs
1765 : inner(cdata.hg,quad_phit_2k); // NS coeffs
1766
1767 // increment the level since the coeffs2values part happens on level n+1
1768 const double scale = pow(2.0,0.5*NDIM*(key.level()+1))/
1770
1771 return transform(coeff,transf).scale(scale);
1772 }
1773
1774 /// Compute the function values for multiplication
1775
1776 /// Given S or NS coefficients from a parent cell, compute the value of
1777 /// the functions at the quadrature points of a child
1778 /// currently restricted to special cases
1779 /// @param[in] child key of the box in which we compute values
1780 /// @param[in] parent key of the parent box holding the coeffs
1781 /// @param[in] coeff coeffs of the parent box
1782 /// @param[in] s_only sanity check to avoid unintended discard of d coeffs
1783 /// @return function values on the quadrature points of the children of child (!)
1784 template <typename Q>
1785 GenTensor<Q> NS_fcube_for_mul(const keyT& child, const keyT& parent,
1786 const GenTensor<Q>& coeff, const bool s_only) const {
1787 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1788
1789 // sanity checks
1790 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) == s_only);
1791 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) or (coeff.dim(0)==2*this->get_k()));
1792
1793 // fast return if possible
1794 // if (child.level()==parent.level()) return NScoeffs2values(child,coeff,s_only);
1795
1796 if (s_only) {
1797
1798 Tensor<double> quad_phi[NDIM];
1799 // tmp tensor
1800 Tensor<double> phi1(cdata.k,cdata.npt);
1801
1802 for (std::size_t d=0; d<NDIM; ++d) {
1803
1804 // input is S coeffs (dimension k), output is values on 2*npt grid points
1805 quad_phi[d]=Tensor<double>(cdata.k,2*cdata.npt);
1806
1807 // for both children of "child" evaluate the Legendre polynomials
1808 // first the left child on level n+1 and translations 2l
1809 phi_for_mul(parent.level(),parent.translation()[d],
1810 child.level()+1, 2*child.translation()[d], phi1);
1811 quad_phi[d](_,Slice(0,k-1))=phi1;
1812
1813 // next the right child on level n+1 and translations 2l+1
1814 phi_for_mul(parent.level(),parent.translation()[d],
1815 child.level()+1, 2*child.translation()[d]+1, phi1);
1816 quad_phi[d](_,Slice(k,2*k-1))=phi1;
1817 }
1818
1819 const double scale = 1.0/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1820 return general_transform(coeff,quad_phi).scale(scale);
1821 }
1822 MADNESS_EXCEPTION("you should not be here in NS_fcube_for_mul",1);
1823 return GenTensor<Q>();
1824 }
1825
1826 /// convert function values of the a child generation directly to NS coeffs
1827
1828 /// equivalent to converting the function values to 2^NDIM S coeffs and then
1829 /// filtering them to NS coeffs. Reverse operation to NScoeffs2values().
1830 /// @param[in] key key of the parent of the generation
1831 /// @param[in] values tensor holding function values of the 2^NDIM children of key
1832 /// @return NS coeffs belonging to key
1833 template <typename Q>
1834 GenTensor<Q> values2NScoeffs(const keyT& key, const GenTensor<Q>& values) const {
1835 //PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1836
1837 // sanity checks
1838 MADNESS_ASSERT(values.dim(0)==2*this->get_k());
1839
1840 // this is a block-diagonal matrix with the quadrature points on the diagonal
1841 Tensor<double> quad_phit_2k(2*cdata.npt,2*cdata.k);
1842 quad_phit_2k(cdata.s[0],cdata.s[0])=cdata.quad_phiw;
1843 quad_phit_2k(cdata.s[1],cdata.s[1])=cdata.quad_phiw;
1844
1845 // the transformation matrix unfilters (cdata.hg) and transforms to values in one step
1846 const Tensor<double> transf=inner(quad_phit_2k,cdata.hgT);
1847
1848 // increment the level since the values2coeffs part happens on level n+1
1849 const double scale = pow(0.5,0.5*NDIM*(key.level()+1))
1851
1852 return transform(values,transf).scale(scale);
1853 }
1854
1855 /// Return the scaling function coeffs when given the function values at the quadrature points
1856 /// @param[in] key the key of the function node (box)
1857 /// @return function values for function node (box)
1858 template <typename Q>
1859 Tensor<Q> coeffs2values(const keyT& key, const Tensor<Q>& coeff) const {
1860 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1861 double scale = pow(2.0,0.5*NDIM*key.level())/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1862 return transform(coeff,cdata.quad_phit).scale(scale);
1863 }
1864
1865 template <typename Q>
1866 GenTensor<Q> values2coeffs(const keyT& key, const GenTensor<Q>& values) const {
1867 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1868 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1869 return transform(values,cdata.quad_phiw).scale(scale);
1870 }
1871
1872 template <typename Q>
1873 Tensor<Q> values2coeffs(const keyT& key, const Tensor<Q>& values) const {
1874 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1875 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1876 return transform(values,cdata.quad_phiw).scale(scale);
1877 }
1878
1879 /// Compute the function values for multiplication
1880
1881 /// Given coefficients from a parent cell, compute the value of
1882 /// the functions at the quadrature points of a child
1883 /// @param[in] child the key for the child function node (box)
1884 /// @param[in] parent the key for the parent function node (box)
1885 /// @param[in] coeff the coefficients of scaling function basis of the parent box
1886 template <typename Q>
1887 Tensor<Q> fcube_for_mul(const keyT& child, const keyT& parent, const Tensor<Q>& coeff) const {
1888 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1889 if (child.level() == parent.level()) {
1890 return coeffs2values(parent, coeff);
1891 }
1892 else if (child.level() < parent.level()) {
1893 MADNESS_EXCEPTION("FunctionImpl: fcube_for_mul: child-parent relationship bad?",0);
1894 }
1895 else {
1896 Tensor<double> phi[NDIM];
1897 for (std::size_t d=0; d<NDIM; ++d) {
1898 phi[d] = Tensor<double>(cdata.k,cdata.npt);
1899 phi_for_mul(parent.level(),parent.translation()[d],
1900 child.level(), child.translation()[d], phi[d]);
1901 }
1902 return general_transform(coeff,phi).scale(1.0/sqrt(FunctionDefaults<NDIM>::get_cell_volume()));;
1903 }
1904 }
1905
1906
1907 /// Compute the function values for multiplication
1908
1909 /// Given coefficients from a parent cell, compute the value of
1910 /// the functions at the quadrature points of a child
1911 /// @param[in] child the key for the child function node (box)
1912 /// @param[in] parent the key for the parent function node (box)
1913 /// @param[in] coeff the coefficients of scaling function basis of the parent box
1914 template <typename Q>
1915 GenTensor<Q> fcube_for_mul(const keyT& child, const keyT& parent, const GenTensor<Q>& coeff) const {
1916 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1917 if (child.level() == parent.level()) {
1918 return coeffs2values(parent, coeff);
1919 }
1920 else if (child.level() < parent.level()) {
1921 MADNESS_EXCEPTION("FunctionImpl: fcube_for_mul: child-parent relationship bad?",0);
1922 }
1923 else {
1924 Tensor<double> phi[NDIM];
1925 for (size_t d=0; d<NDIM; d++) {
1926 phi[d] = Tensor<double>(cdata.k,cdata.npt);
1927 phi_for_mul(parent.level(),parent.translation()[d],
1928 child.level(), child.translation()[d], phi[d]);
1929 }
1930 return general_transform(coeff,phi).scale(1.0/sqrt(FunctionDefaults<NDIM>::get_cell_volume()));
1931 }
1932 }
1933
1934
1935 /// Functor for the mul method
1936 template <typename L, typename R>
1937 void do_mul(const keyT& key, const Tensor<L>& left, const std::pair< keyT, Tensor<R> >& arg) {
1938 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1939 const keyT& rkey = arg.first;
1940 const Tensor<R>& rcoeff = arg.second;
1941 //madness::print("do_mul: r", rkey, rcoeff.size());
1942 Tensor<R> rcube = fcube_for_mul(key, rkey, rcoeff);
1943 //madness::print("do_mul: l", key, left.size());
1944 Tensor<L> lcube = fcube_for_mul(key, key, left);
1945
1946 Tensor<T> tcube(cdata.vk,false);
1947 TERNARY_OPTIMIZED_ITERATOR(T, tcube, L, lcube, R, rcube, *_p0 = *_p1 * *_p2;);
1948 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1949 tcube = transform(tcube,cdata.quad_phiw).scale(scale);
1950 coeffs.replace(key, nodeT(coeffT(tcube,targs),false));
1951 }
1952
1953
1954 /// multiply the values of two coefficient tensors using a custom number of grid points
1955
1956 /// note both coefficient tensors have to refer to the same key!
1957 /// @param[in] c1 a tensor holding coefficients
1958 /// @param[in] c2 another tensor holding coeffs
1959 /// @param[in] npt number of grid points (optional, default is cdata.npt)
1960 /// @return coefficient tensor holding the product of the values of c1 and c2
1961 template<typename R>
1963 const int npt, const keyT& key) const {
1964 typedef TENSOR_RESULT_TYPE(T,R) resultT;
1965
1967
1968 // construct a tensor with the npt coeffs
1969 Tensor<T> c11(cdata2.vk), c22(cdata2.vk);
1970 c11(this->cdata.s0)=c1;
1971 c22(this->cdata.s0)=c2;
1972
1973 // it's sufficient to scale once
1974 double scale = pow(2.0,0.5*NDIM*key.level())/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1975 Tensor<T> c1value=transform(c11,cdata2.quad_phit).scale(scale);
1976 Tensor<R> c2value=transform(c22,cdata2.quad_phit);
1977 Tensor<resultT> resultvalue(cdata2.vk,false);
1978 TERNARY_OPTIMIZED_ITERATOR(resultT, resultvalue, T, c1value, R, c2value, *_p0 = *_p1 * *_p2;);
1979
1980 Tensor<resultT> result=transform(resultvalue,cdata2.quad_phiw);
1981
1982 // return a copy of the slice to have the tensor contiguous
1983 return copy(result(this->cdata.s0));
1984 }
1985
1986
1987 /// Functor for the binary_op method
1988 template <typename L, typename R, typename opT>
1989 void do_binary_op(const keyT& key, const Tensor<L>& left,
1990 const std::pair< keyT, Tensor<R> >& arg,
1991 const opT& op) {
1992 //PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1993 const keyT& rkey = arg.first;
1994 const Tensor<R>& rcoeff = arg.second;
1995 Tensor<R> rcube = fcube_for_mul(key, rkey, rcoeff);
1996 Tensor<L> lcube = fcube_for_mul(key, key, left);
1997
1998 Tensor<T> tcube(cdata.vk,false);
1999 op(key, tcube, lcube, rcube);
2000 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
2001 tcube = transform(tcube,cdata.quad_phiw).scale(scale);
2002 coeffs.replace(key, nodeT(coeffT(tcube,targs),false));
2003 }
2004
2005 /// Invoked by result to perform result += alpha*left+beta*right in wavelet basis
2006
2007 /// Does not assume that any of result, left, right have the same distribution.
2008 /// For most purposes result will start as an empty so actually are implementing
2009 /// out of place gaxpy. If all functions have the same distribution there is
2010 /// no communication except for the optional fence.
2011 template <typename L, typename R>
2013 T beta, const FunctionImpl<R,NDIM>& right, bool fence) {
2014 // Loop over local nodes in both functions. Add in left and subtract right.
2015 // Not that efficient in terms of memory bandwidth but ensures we do
2016 // not miss any nodes.
2017 typename FunctionImpl<L,NDIM>::dcT::const_iterator left_end = left.coeffs.end();
2019 it!=left_end;
2020 ++it) {
2021 const keyT& key = it->first;
2022 const typename FunctionImpl<L,NDIM>::nodeT& other_node = it->second;
2023 coeffs.send(key, &nodeT:: template gaxpy_inplace<T,L>, 1.0, other_node, alpha);
2024 }
2025 typename FunctionImpl<R,NDIM>::dcT::const_iterator right_end = right.coeffs.end();
2027 it!=right_end;
2028 ++it) {
2029 const keyT& key = it->first;
2030 const typename FunctionImpl<L,NDIM>::nodeT& other_node = it->second;
2031 coeffs.send(key, &nodeT:: template gaxpy_inplace<T,R>, 1.0, other_node, beta);
2032 }
2033 if (fence)
2034 world.gop.fence();
2035 }
2036
2037 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2038 /// @param[in] op the unary operator for the coefficients
2039 template <typename opT>
2040 void unary_op_coeff_inplace(const opT& op, bool fence) {
2041 typename dcT::iterator end = coeffs.end();
2042 for (typename dcT::iterator it=coeffs.begin(); it!=end; ++it) {
2043 const keyT& parent = it->first;
2044 nodeT& node = it->second;
2045 if (node.has_coeff()) {
2046 // op(parent, node.coeff());
2047 TensorArgs full(-1.0,TT_FULL);
2048 change_tensor_type(node.coeff(),full);
2049 op(parent, node.coeff().full_tensor());
2051 // op(parent,node);
2052 }
2053 }
2054 if (fence)
2055 world.gop.fence();
2056 }
2057
2058 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2059 /// @param[in] op the unary operator for the coefficients
2060 template <typename opT>
2061 void unary_op_node_inplace(const opT& op, bool fence) {
2062 typename dcT::iterator end = coeffs.end();
2063 for (typename dcT::iterator it=coeffs.begin(); it!=end; ++it) {
2064 const keyT& parent = it->first;
2065 nodeT& node = it->second;
2066 op(parent, node);
2067 }
2068 if (fence)
2069 world.gop.fence();
2070 }
2071
2072 /// Integrate over one particle of a two particle function and get a one particle function
2073 /// bsp \int g(1,2) \delta(2-1) d2 = f(1)
2074 /// The overall dimension of g should be even
2075
2076 /// The operator
2077 template<std::size_t LDIM>
2078 void dirac_convolution_op(const keyT &key, const nodeT &node, FunctionImpl<T,LDIM>* f) const {
2079 // fast return if the node has children (not a leaf node)
2080 if(node.has_children()) return;
2081
2082 const implT* g=this;
2083
2084 // break the 6D key into two 3D keys (may also work for every even dimension)
2085 Key<LDIM> key1, key2;
2086 key.break_apart(key1,key2);
2087
2088 // get the coefficients of the 6D function g
2089 const coeffT& g_coeff = node.coeff();
2090
2091 // get the values of the 6D function g
2092 coeffT g_values = g->coeffs2values(key,g_coeff);
2093
2094 // Determine rank and k
2095 const long rank=g_values.rank();
2096 const long maxk=f->get_k();
2097 MADNESS_ASSERT(maxk==g_coeff.dim(0));
2098
2099 // get tensors for particle 1 and 2 (U and V in SVD)
2100 tensorT vec1=copy(g_values.get_svdtensor().ref_vector(0).reshape(rank,maxk,maxk,maxk));
2101 tensorT vec2=g_values.get_svdtensor().ref_vector(1).reshape(rank,maxk,maxk,maxk);
2102 tensorT result(maxk,maxk,maxk); // should give zero tensor
2103 // Multiply the values of each U and V vector
2104 for (long i=0; i<rank; ++i) {
2105 tensorT c1=vec1(Slice(i,i),_,_,_); // shallow copy (!)
2106 tensorT c2=vec2(Slice(i,i),_,_,_);
2107 c1.emul(c2); // this changes vec1 because of shallow copy, but not the g function because of the deep copy made above
2108 double singular_value_i = g_values.get_svdtensor().weights(i);
2109 result += (singular_value_i*c1);
2110 }
2111
2112 // accumulate coefficients (since only diagonal boxes are used the coefficients get just replaced, but accumulate is needed to create the right tree structure
2113 tensorT f_coeff = f->values2coeffs(key1,result);
2114 f->coeffs.task(key1, &FunctionNode<T,LDIM>::accumulate2, f_coeff, f->coeffs, key1, TaskAttributes::hipri());
2115// coeffs.task(dest, &nodeT::accumulate2, result, coeffs, dest, TaskAttributes::hipri());
2116
2117
2118 return;
2119 }
2120
2121
2122 template<std::size_t LDIM>
2124 typename dcT::const_iterator end = this->coeffs.end();
2125 for (typename dcT::const_iterator it=this->coeffs.begin(); it!=end; ++it) {
2126 // looping through all the leaf(!) coefficients in the NDIM function ("this")
2127 const keyT& key = it->first;
2128 const FunctionNode<T,NDIM>& node = it->second;
2129 if (node.is_leaf()) {
2130 // only process the diagonal boxes
2131 Key<LDIM> key1, key2;
2132 key.break_apart(key1,key2);
2133 if(key1 == key2){
2134 ProcessID p = coeffs.owner(key);
2135 woT::task(p, &implT:: template dirac_convolution_op<LDIM>, key, node, f);
2136 }
2137 }
2138 }
2139 world.gop.fence(); // fence is necessary if trickle down is used afterwards
2140 // trickle down and undo redundand shouldnt change anything if only the diagonal elements are considered above -> check this
2141 f->trickle_down(true); // fence must be true otherwise undo_redundant will have trouble
2142// f->undo_redundant(true);
2143 f->verify_tree();
2144 //if (fence) world.gop.fence(); // unnecessary, fence is activated in undo_redundant
2145
2146 }
2147
2148
2149 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2150 /// @param[in] op the unary operator for the coefficients
2151 template <typename opT>
2152 void flo_unary_op_node_inplace(const opT& op, bool fence) {
2154// typedef do_unary_op_value_inplace<opT> xopT;
2156 if (fence) world.gop.fence();
2157 }
2158
2159 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2160 /// @param[in] op the unary operator for the coefficients
2161 template <typename opT>
2162 void flo_unary_op_node_inplace(const opT& op, bool fence) const {
2164// typedef do_unary_op_value_inplace<opT> xopT;
2166 if (fence)
2167 world.gop.fence();
2168 }
2169
2170 /// truncate tree at a certain level
2171 /// @param[in] max_level truncate tree below this level
2172 void erase(const Level& max_level);
2173
2174 /// Returns some asymmetry measure ... no comms
2175 double check_symmetry_local() const;
2176
2177 /// given an NS tree resulting from a convolution, truncate leafs if appropriate
2180 const implT* f; // for calling its member functions
2181
2183
2184 bool operator()(typename rangeT::iterator& it) const {
2185
2186 const keyT& key = it->first;
2187 nodeT& node = it->second;
2188
2189 if (node.is_leaf() and node.coeff().has_data()) {
2190 coeffT d = copy(node.coeff());
2191 d(f->cdata.s0)=0.0;
2192 const double error=d.normf();
2193 const double tol=f->truncate_tol(f->get_thresh(),key);
2194 if (error<tol) node.coeff()=copy(node.coeff()(f->cdata.s0));
2195 }
2196 return true;
2197 }
2198 template <typename Archive> void serialize(const Archive& ar) {}
2199
2200 };
2201
2202 /// remove all coefficients of internal nodes
2205
2206 /// constructor need impl for cdata
2208
2209 bool operator()(typename rangeT::iterator& it) const {
2210
2211 nodeT& node = it->second;
2212 if (node.has_children()) node.clear_coeff();
2213 return true;
2214 }
2215 template <typename Archive> void serialize(const Archive& ar) {}
2216
2217 };
2218
2219 /// remove all coefficients of leaf nodes
2222
2223 /// constructor need impl for cdata
2225
2226 bool operator()(typename rangeT::iterator& it) const {
2227 nodeT& node = it->second;
2228 if (not node.has_children()) node.clear_coeff();
2229 return true;
2230 }
2231 template <typename Archive> void serialize(const Archive& ar) {}
2232
2233 };
2234
2235
2236 /// keep only the sum coefficients in each node
2240
2241 /// constructor need impl for cdata
2243
2244 bool operator()(typename rangeT::iterator& it) const {
2245
2246 nodeT& node = it->second;
2247 coeffT s=copy(node.coeff()(impl->cdata.s0));
2248 node.coeff()=s;
2249 return true;
2250 }
2251 template <typename Archive> void serialize(const Archive& ar) {}
2252
2253 };
2254
2255
2256 /// reduce the rank of the nodes, optional fence
2259
2260 // threshold for rank reduction / SVD truncation
2262
2263 // constructor takes target precision
2264 do_reduce_rank() = default;
2266 do_reduce_rank(const double& thresh) {
2268 }
2269
2270 //
2271 bool operator()(typename rangeT::iterator& it) const {
2272
2273 nodeT& node = it->second;
2274 node.reduceRank(args.thresh);
2275 return true;
2276 }
2277 template <typename Archive> void serialize(const Archive& ar) {}
2278 };
2279
2280
2281
2282 /// check symmetry wrt particle exchange
2285 const implT* f;
2288
2289 /// return the norm of the difference of this node and its "mirror" node
2290 double operator()(typename rangeT::iterator& it) const {
2291
2292 // Temporary fix to GCC whining about out of range access for NDIM!=6
2293 if constexpr(NDIM==6) {
2294 const keyT& key = it->first;
2295 const nodeT& fnode = it->second;
2296
2297 // skip internal nodes
2298 if (fnode.has_children()) return 0.0;
2299
2300 if (f->world.size()>1) return 0.0;
2301
2302 // exchange particles
2303 std::vector<long> map(NDIM);
2304 map[0]=3; map[1]=4; map[2]=5;
2305 map[3]=0; map[4]=1; map[5]=2;
2306
2307 // make mapped key
2309 for (std::size_t i=0; i<NDIM; ++i) l[map[i]] = key.translation()[i];
2310 const keyT mapkey(key.level(),l);
2311
2312 double norm=0.0;
2313
2314
2315 // hope it's local
2316 if (f->get_coeffs().probe(mapkey)) {
2317 MADNESS_ASSERT(f->get_coeffs().probe(mapkey));
2318 const nodeT& mapnode=f->get_coeffs().find(mapkey).get()->second;
2319
2320// bool have_c1=fnode.coeff().has_data() and fnode.coeff().config().has_data();
2321// bool have_c2=mapnode.coeff().has_data() and mapnode.coeff().config().has_data();
2322 bool have_c1=fnode.coeff().has_data();
2323 bool have_c2=mapnode.coeff().has_data();
2324
2325 if (have_c1 and have_c2) {
2326 tensorT c1=fnode.coeff().full_tensor_copy();
2327 tensorT c2=mapnode.coeff().full_tensor_copy();
2328 c2 = copy(c2.mapdim(map));
2329 norm=(c1-c2).normf();
2330 } else if (have_c1) {
2331 tensorT c1=fnode.coeff().full_tensor_copy();
2332 norm=c1.normf();
2333 } else if (have_c2) {
2334 tensorT c2=mapnode.coeff().full_tensor_copy();
2335 norm=c2.normf();
2336 } else {
2337 norm=0.0;
2338 }
2339 } else {
2340 norm=fnode.coeff().normf();
2341 }
2342 return norm*norm;
2343 }
2344 else {
2345 MADNESS_EXCEPTION("ONLY FOR DIM 6!", 1);
2346 }
2347 }
2348
2349 double operator()(double a, double b) const {
2350 return (a+b);
2351 }
2352
2353 template <typename Archive> void serialize(const Archive& ar) {
2354 MADNESS_EXCEPTION("no serialization of do_check_symmetry yet",1);
2355 }
2356
2357
2358 };
2359
2360 /// merge the coefficent boxes of this into result's tree
2361
2362 /// result+= alpha*this
2363 /// this and result don't have to have the same distribution or live in the same world
2364 /// no comm, and the tree should be in an consistent state by virtue
2365 template<typename Q, typename R>
2369 T alpha=T(1.0);
2373
2374 /// return the norm of the difference of this node and its "mirror" node
2375 bool operator()(typename rangeT::iterator& it) const {
2376
2377 const keyT& key = it->first;
2378 const nodeT& node = it->second;
2379 if (node.has_coeff()) result->get_coeffs().task(key, &nodeT::accumulate,
2380 alpha*node.coeff(), result->get_coeffs(), key, result->targs);
2381 return true;
2382 }
2383
2384 template <typename Archive> void serialize(const Archive& ar) {
2385 MADNESS_EXCEPTION("no serialization of do_accumulate_trees",1);
2386 }
2387 };
2388
2389
2390 /// merge the coefficient boxes of this into other's tree
2391
2392 /// no comm, and the tree should be in an consistent state by virtue
2393 /// of FunctionNode::gaxpy_inplace
2394 template<typename Q, typename R>
2403
2404 /// return the norm of the difference of this node and its "mirror" node
2405 bool operator()(typename rangeT::iterator& it) const {
2406
2407 const keyT& key = it->first;
2408 const nodeT& fnode = it->second;
2409
2410 // if other's node exists: add this' coeffs to it
2411 // otherwise insert this' node into other's tree
2412 typename dcT::accessor acc;
2413 if (other->get_coeffs().find(acc,key)) {
2414 nodeT& gnode=acc->second;
2415 gnode.gaxpy_inplace(beta,fnode,alpha);
2416 } else {
2417 nodeT gnode=fnode;
2418 gnode.scale(alpha);
2419 other->get_coeffs().replace(key,gnode);
2420 }
2421 return true;
2422 }
2423
2424 template <typename Archive> void serialize(const Archive& ar) {
2425 MADNESS_EXCEPTION("no serialization of do_merge_trees",1);
2426 }
2427 };
2428
2429
2430 /// map this on f
2431 struct do_mapdim {
2433
2434 std::vector<long> map;
2436
2437 do_mapdim() : f(0) {};
2438 do_mapdim(const std::vector<long> map, implT& f) : map(map), f(&f) {}
2439
2440 bool operator()(typename rangeT::iterator& it) const {
2441
2442 const keyT& key = it->first;
2443 const nodeT& node = it->second;
2444
2446 for (std::size_t i=0; i<NDIM; ++i) l[map[i]] = key.translation()[i];
2447 tensorT c = node.coeff().reconstruct_tensor();
2448 if (c.size()) c = copy(c.mapdim(map));
2450 f->get_coeffs().replace(keyT(key.level(),l), nodeT(cc,node.has_children()));
2451
2452 return true;
2453 }
2454 template <typename Archive> void serialize(const Archive& ar) {
2455 MADNESS_EXCEPTION("no serialization of do_mapdim",1);
2456 }
2457
2458 };
2459
2460 /// mirror dimensions of this, write result on f
2461 struct do_mirror {
2463
2464 std::vector<long> mirror;
2466
2467 do_mirror() : f(0) {};
2468 do_mirror(const std::vector<long> mirror, implT& f) : mirror(mirror), f(&f) {}
2469
2470 bool operator()(typename rangeT::iterator& it) const {
2471
2472 const keyT& key = it->first;
2473 const nodeT& node = it->second;
2474
2475 // mirror translation index: l_new + l_old = l_max
2477 Translation lmax = (Translation(1)<<key.level()) - 1;
2478 for (std::size_t i=0; i<NDIM; ++i) {
2479 if (mirror[i]==-1) l[i]= lmax - key.translation()[i];
2480 }
2481
2482 // mirror coefficients: multiply all odd-k slices with -1
2483 tensorT c = node.coeff().full_tensor_copy();
2484 if (c.size()) {
2485 std::vector<Slice> s(___);
2486
2487 // loop over dimensions and over k
2488 for (size_t i=0; i<NDIM; ++i) {
2489 std::size_t kmax=c.dim(i);
2490 if (mirror[i]==-1) {
2491 for (size_t k=1; k<kmax; k+=2) {
2492 s[i]=Slice(k,k,1);
2493 c(s)*=(-1.0);
2494 }
2495 s[i]=_;
2496 }
2497 }
2498 }
2500 f->get_coeffs().replace(keyT(key.level(),l), nodeT(cc,node.has_children()));
2501
2502 return true;
2503 }
2504 template <typename Archive> void serialize(const Archive& ar) {
2505 MADNESS_EXCEPTION("no serialization of do_mirror",1);
2506 }
2507
2508 };
2509
2510 /// mirror dimensions of this, write result on f
2513
2514 std::vector<long> map,mirror;
2516
2518 do_map_and_mirror(const std::vector<long> map, const std::vector<long> mirror, implT& f)
2519 : map(map), mirror(mirror), f(&f) {}
2520
2521 bool operator()(typename rangeT::iterator& it) const {
2522
2523 const keyT& key = it->first;
2524 const nodeT& node = it->second;
2525
2526 tensorT c = node.coeff().full_tensor_copy();
2528
2529 // do the mapping first (if present)
2530 if (map.size()>0) {
2532 for (std::size_t i=0; i<NDIM; ++i) l1[map[i]] = l[i];
2533 std::swap(l,l1);
2534 if (c.size()) c = copy(c.mapdim(map));
2535 }
2536
2537 if (mirror.size()>0) {
2538 // mirror translation index: l_new + l_old = l_max
2540 Translation lmax = (Translation(1)<<key.level()) - 1;
2541 for (std::size_t i=0; i<NDIM; ++i) {
2542 if (mirror[i]==-1) l1[i]= lmax - l[i];
2543 }
2544 std::swap(l,l1);
2545
2546 // mirror coefficients: multiply all odd-k slices with -1
2547 if (c.size()) {
2548 std::vector<Slice> s(___);
2549
2550 // loop over dimensions and over k
2551 for (size_t i=0; i<NDIM; ++i) {
2552 std::size_t kmax=c.dim(i);
2553 if (mirror[i]==-1) {
2554 for (size_t k=1; k<kmax; k+=2) {
2555 s[i]=Slice(k,k,1);
2556 c(s)*=(-1.0);
2557 }
2558 s[i]=_;
2559 }
2560 }
2561 }
2562 }
2563
2565 f->get_coeffs().replace(keyT(key.level(),l), nodeT(cc,node.has_children()));
2566 return true;
2567 }
2568 template <typename Archive> void serialize(const Archive& ar) {
2569 MADNESS_EXCEPTION("no serialization of do_mirror",1);
2570 }
2571
2572 };
2573
2574
2575
2576 /// "put" this on g
2577 struct do_average {
2579
2581
2582 do_average() : g(0) {}
2584
2585 /// iterator it points to this
2586 bool operator()(typename rangeT::iterator& it) const {
2587
2588 const keyT& key = it->first;
2589 const nodeT& fnode = it->second;
2590
2591 // fast return if rhs has no coeff here
2592 if (fnode.has_coeff()) {
2593
2594 // check if there is a node already existing
2595 typename dcT::accessor acc;
2596 if (g->get_coeffs().find(acc,key)) {
2597 nodeT& gnode=acc->second;
2598 if (gnode.has_coeff()) gnode.coeff()+=fnode.coeff();
2599 } else {
2600 g->get_coeffs().replace(key,fnode);
2601 }
2602 }
2603
2604 return true;
2605 }
2606 template <typename Archive> void serialize(const Archive& ar) {}
2607 };
2608
2609 /// change representation of nodes' coeffs to low rank, optional fence
2612
2613 // threshold for rank reduction / SVD truncation
2616
2617 // constructor takes target precision
2619 // do_change_tensor_type(const TensorArgs& targs) : targs(targs) {}
2621
2622 //
2623 bool operator()(typename rangeT::iterator& it) const {
2624
2625 double cpu0=cpu_time();
2626 nodeT& node = it->second;
2628 double cpu1=cpu_time();
2630
2631 return true;
2632
2633 }
2634 template <typename Archive> void serialize(const Archive& ar) {}
2635 };
2636
2639
2640 // threshold for rank reduction / SVD truncation
2642
2643 // constructor takes target precision
2646 bool operator()(typename rangeT::iterator& it) const {
2647 it->second.consolidate_buffer(targs);
2648 return true;
2649 }
2650 template <typename Archive> void serialize(const Archive& ar) {}
2651 };
2652
2653
2654
2655 template <typename opT>
2659 opT op;
2661 bool operator()(typename rangeT::iterator& it) const {
2662 const keyT& key = it->first;
2663 nodeT& node = it->second;
2664 if (node.has_coeff()) {
2665 const TensorArgs full_args(-1.0,TT_FULL);
2666 change_tensor_type(node.coeff(),full_args);
2667 tensorT& t= node.coeff().full_tensor();
2668 //double before = t.normf();
2669 tensorT values = impl->fcube_for_mul(key, key, t);
2670 op(key, values);
2671 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
2672 t = transform(values,impl->cdata.quad_phiw).scale(scale);
2673 node.coeff()=coeffT(t,impl->get_tensor_args());
2674 //double after = t.normf();
2675 //madness::print("XOP:", key, before, after);
2676 }
2677 return true;
2678 }
2679 template <typename Archive> void serialize(const Archive& ar) {}
2680 };
2681
2682 template <typename Q, typename R>
2683 /// @todo I don't know what this does other than a trasform
2684 void vtransform_doit(const std::shared_ptr< FunctionImpl<R,NDIM> >& right,
2685 const Tensor<Q>& c,
2686 const std::vector< std::shared_ptr< FunctionImpl<T,NDIM> > >& vleft,
2687 double tol) {
2688 // To reduce crunch on vectors being transformed each task
2689 // does them in a random order
2690 std::vector<unsigned int> ind(vleft.size());
2691 for (unsigned int i=0; i<vleft.size(); ++i) {
2692 ind[i] = i;
2693 }
2694 for (unsigned int i=0; i<vleft.size(); ++i) {
2695 unsigned int j = RandomValue<int>()%vleft.size();
2696 std::swap(ind[i],ind[j]);
2697 }
2698
2699 typename FunctionImpl<R,NDIM>::dcT::const_iterator end = right->coeffs.end();
2700 for (typename FunctionImpl<R,NDIM>::dcT::const_iterator it=right->coeffs.begin(); it != end; ++it) {
2701 if (it->second.has_coeff()) {
2702 const Key<NDIM>& key = it->first;
2703 const GenTensor<R>& r = it->second.coeff();
2704 double norm = r.normf();
2705 double keytol = truncate_tol(tol,key);
2706
2707 for (unsigned int j=0; j<vleft.size(); ++j) {
2708 unsigned int i = ind[j]; // Random permutation
2709 if (std::abs(norm*c(i)) > keytol) {
2710 implT* left = vleft[i].get();
2711 typename dcT::accessor acc;
2712 bool newnode = left->coeffs.insert(acc,key);
2713 if (newnode && key.level()>0) {
2714 Key<NDIM> parent = key.parent();
2715 if (left->coeffs.is_local(parent))
2716 left->coeffs.send(parent, &nodeT::set_has_children_recursive, left->coeffs, parent);
2717 else
2718 left->coeffs.task(parent, &nodeT::set_has_children_recursive, left->coeffs, parent);
2719
2720 }
2721 nodeT& node = acc->second;
2722 if (!node.has_coeff())
2723 node.set_coeff(coeffT(cdata.v2k,targs));
2724 coeffT& t = node.coeff();
2725 t.gaxpy(1.0, r, c(i));
2726 }
2727 }
2728 }
2729 }
2730 }
2731
2732 /// Refine multiple functions down to the same finest level
2733
2734 /// @param v the vector of functions we are refining.
2735 /// @param key the current node.
2736 /// @param c the vector of coefficients passed from above.
2737 void refine_to_common_level(const std::vector<FunctionImpl<T,NDIM>*>& v,
2738 const std::vector<tensorT>& c,
2739 const keyT key);
2740
2741 /// Inplace operate on many functions (impl's) with an operator within a certain box
2742 /// @param[in] key the key of the current function node (box)
2743 /// @param[in] op the operator
2744 /// @param[in] v the vector of function impl's on which to be operated
2745 template <typename opT>
2746 void multiop_values_doit(const keyT& key, const opT& op, const std::vector<implT*>& v) {
2747 std::vector<tensorT> c(v.size());
2748 for (unsigned int i=0; i<v.size(); i++) {
2749 if (v[i]) {
2750 coeffT cc = coeffs2values(key, v[i]->coeffs.find(key).get()->second.coeff());
2751 c[i]=cc.full_tensor();
2752 }
2753 }
2754 tensorT r = op(key, c);
2755 coeffs.replace(key, nodeT(coeffT(values2coeffs(key, r),targs),false));
2756 }
2757
2758 /// Inplace operate on many functions (impl's) with an operator within a certain box
2759 /// Assumes all functions have been refined down to the same level
2760 /// @param[in] op the operator
2761 /// @param[in] v the vector of function impl's on which to be operated
2762 template <typename opT>
2763 void multiop_values(const opT& op, const std::vector<implT*>& v) {
2764 // rough check on refinement level (ignore non-initialized functions
2765 for (std::size_t i=1; i<v.size(); ++i) {
2766 if (v[i] and v[i-1]) {
2767 MADNESS_ASSERT(v[i]->coeffs.size()==v[i-1]->coeffs.size());
2768 }
2769 }
2770 typename dcT::iterator end = v[0]->coeffs.end();
2771 for (typename dcT::iterator it=v[0]->coeffs.begin(); it!=end; ++it) {
2772 const keyT& key = it->first;
2773 if (it->second.has_coeff())
2774 world.taskq.add(*this, &implT:: template multiop_values_doit<opT>, key, op, v);
2775 else
2776 coeffs.replace(key, nodeT(coeffT(),true));
2777 }
2778 world.gop.fence();
2779 }
2780
2781 /// Inplace operate on many functions (impl's) with an operator within a certain box
2782
2783 /// @param[in] key the key of the current function node (box)
2784 /// @param[in] op the operator
2785 /// @param[in] vin the vector of function impl's on which to be operated
2786 /// @param[out] vout the resulting vector of function impl's
2787 template <typename opT>
2788 void multi_to_multi_op_values_doit(const keyT& key, const opT& op,
2789 const std::vector<implT*>& vin, std::vector<implT*>& vout) {
2790 std::vector<tensorT> c(vin.size());
2791 for (unsigned int i=0; i<vin.size(); i++) {
2792 if (vin[i]) {
2793 coeffT cc = coeffs2values(key, vin[i]->coeffs.find(key).get()->second.coeff());
2794 c[i]=cc.full_tensor();
2795 }
2796 }
2797 std::vector<tensorT> r = op(key, c);
2798 MADNESS_ASSERT(r.size()==vout.size());
2799 for (std::size_t i=0; i<vout.size(); ++i) {
2800 vout[i]->coeffs.replace(key, nodeT(coeffT(values2coeffs(key, r[i]),targs),false));
2801 }
2802 }
2803
2804 /// Inplace operate on many functions (impl's) with an operator within a certain box
2805
2806 /// Assumes all functions have been refined down to the same level
2807 /// @param[in] op the operator
2808 /// @param[in] vin the vector of function impl's on which to be operated
2809 /// @param[out] vout the resulting vector of function impl's
2810 template <typename opT>
2811 void multi_to_multi_op_values(const opT& op, const std::vector<implT*>& vin,
2812 std::vector<implT*>& vout, const bool fence=true) {
2813 // rough check on refinement level (ignore non-initialized functions
2814 for (std::size_t i=1; i<vin.size(); ++i) {
2815 if (vin[i] and vin[i-1]) {
2816 MADNESS_ASSERT(vin[i]->coeffs.size()==vin[i-1]->coeffs.size());
2817 }
2818 }
2819 typename dcT::iterator end = vin[0]->coeffs.end();
2820 for (typename dcT::iterator it=vin[0]->coeffs.begin(); it!=end; ++it) {
2821 const keyT& key = it->first;
2822 if (it->second.has_coeff())
2823 world.taskq.add(*this, &implT:: template multi_to_multi_op_values_doit<opT>,
2824 key, op, vin, vout);
2825 else {
2826 // fill result functions with empty box in this key
2827 for (implT* it2 : vout) {
2828 it2->coeffs.replace(key, nodeT(coeffT(),true));
2829 }
2830 }
2831 }
2832 if (fence) world.gop.fence();
2833 }
2834
2835 /// Transforms a vector of functions left[i] = sum[j] right[j]*c[j,i] using sparsity
2836 /// @param[in] vright vector of functions (impl's) on which to be transformed
2837 /// @param[in] c the tensor (matrix) transformer
2838 /// @param[in] vleft vector of of the *newly* transformed functions (impl's)
2839 template <typename Q, typename R>
2840 void vtransform(const std::vector< std::shared_ptr< FunctionImpl<R,NDIM> > >& vright,
2841 const Tensor<Q>& c,
2842 const std::vector< std::shared_ptr< FunctionImpl<T,NDIM> > >& vleft,
2843 double tol,
2844 bool fence) {
2845 for (unsigned int j=0; j<vright.size(); ++j) {
2846 world.taskq.add(*this, &implT:: template vtransform_doit<Q,R>, vright[j], copy(c(j,_)), vleft, tol);
2847 }
2848 if (fence)
2849 world.gop.fence();
2850 }
2851
2852 /// Unary operation applied inplace to the values with optional refinement and fence
2853 /// @param[in] op the unary operator for the values
2854 template <typename opT>
2855 void unary_op_value_inplace(const opT& op, bool fence) {
2857 typedef do_unary_op_value_inplace<opT> xopT;
2858 world.taskq.for_each<rangeT,xopT>(rangeT(coeffs.begin(), coeffs.end()), xopT(this,op));
2859 if (fence)
2860 world.gop.fence();
2861 }
2862
2863 // Multiplication assuming same distribution and recursive descent
2864 /// Both left and right functions are in the scaling function basis
2865 /// @param[in] key the key to the current function node (box)
2866 /// @param[in] left the function impl associated with the left function
2867 /// @param[in] lcin the scaling function coefficients associated with the
2868 /// current box in the left function
2869 /// @param[in] vrightin the vector of function impl's associated with
2870 /// the vector of right functions
2871 /// @param[in] vrcin the vector scaling function coefficients associated with the
2872 /// current box in the right functions
2873 /// @param[out] vresultin the vector of resulting functions (impl's)
2874 template <typename L, typename R>
2875 void mulXXveca(const keyT& key,
2876 const FunctionImpl<L,NDIM>* left, const Tensor<L>& lcin,
2877 const std::vector<const FunctionImpl<R,NDIM>*> vrightin,
2878 const std::vector< Tensor<R> >& vrcin,
2879 const std::vector<FunctionImpl<T,NDIM>*> vresultin,
2880 double tol) {
2881 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
2882 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator riterT;
2883
2884 double lnorm = 1e99;
2885 Tensor<L> lc = lcin;
2886 if (lc.size() == 0) {
2887 literT it = left->coeffs.find(key).get();
2888 MADNESS_ASSERT(it != left->coeffs.end());
2889 lnorm = it->second.get_norm_tree();
2890 if (it->second.has_coeff())
2891 lc = it->second.coeff().full_tensor_copy();
2892 }
2893
2894 // Loop thru RHS functions seeing if anything can be multiplied
2895 std::vector<FunctionImpl<T,NDIM>*> vresult;
2896 std::vector<const FunctionImpl<R,NDIM>*> vright;
2897 std::vector< Tensor<R> > vrc;
2898 vresult.reserve(vrightin.size());
2899 vright.reserve(vrightin.size());
2900 vrc.reserve(vrightin.size());
2901
2902 for (unsigned int i=0; i<vrightin.size(); ++i) {
2903 FunctionImpl<T,NDIM>* result = vresultin[i];
2904 const FunctionImpl<R,NDIM>* right = vrightin[i];
2905 Tensor<R> rc = vrcin[i];
2906 double rnorm;
2907 if (rc.size() == 0) {
2908 riterT it = right->coeffs.find(key).get();
2909 MADNESS_ASSERT(it != right->coeffs.end());
2910 rnorm = it->second.get_norm_tree();
2911 if (it->second.has_coeff())
2912 rc = it->second.coeff().full_tensor_copy();
2913 }
2914 else {
2915 rnorm = rc.normf();
2916 }
2917
2918 if (rc.size() && lc.size()) { // Yipee!
2919 result->task(world.rank(), &implT:: template do_mul<L,R>, key, lc, std::make_pair(key,rc));
2920 }
2921 else if (tol && lnorm*rnorm < truncate_tol(tol, key)) {
2922 result->coeffs.replace(key, nodeT(coeffT(cdata.vk,targs),false)); // Zero leaf
2923 }
2924 else { // Interior node
2925 result->coeffs.replace(key, nodeT(coeffT(),true));
2926 vresult.push_back(result);
2927 vright.push_back(right);
2928 vrc.push_back(rc);
2929 }
2930 }
2931
2932 if (vresult.size()) {
2933 Tensor<L> lss;
2934 if (lc.size()) {
2935 Tensor<L> ld(cdata.v2k);
2936 ld(cdata.s0) = lc(___);
2937 lss = left->unfilter(ld);
2938 }
2939
2940 std::vector< Tensor<R> > vrss(vresult.size());
2941 for (unsigned int i=0; i<vresult.size(); ++i) {
2942 if (vrc[i].size()) {
2943 Tensor<R> rd(cdata.v2k);
2944 rd(cdata.s0) = vrc[i](___);
2945 vrss[i] = vright[i]->unfilter(rd);
2946 }
2947 }
2948
2949 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
2950 const keyT& child = kit.key();
2951 Tensor<L> ll;
2952
2953 std::vector<Slice> cp = child_patch(child);
2954
2955 if (lc.size())
2956 ll = copy(lss(cp));
2957
2958 std::vector< Tensor<R> > vv(vresult.size());
2959 for (unsigned int i=0; i<vresult.size(); ++i) {
2960 if (vrc[i].size())
2961 vv[i] = copy(vrss[i](cp));
2962 }
2963
2964 woT::task(coeffs.owner(child), &implT:: template mulXXveca<L,R>, child, left, ll, vright, vv, vresult, tol);
2965 }
2966 }
2967 }
2968
2969 /// Multiplication using recursive descent and assuming same distribution
2970 /// Both left and right functions are in the scaling function basis
2971 /// @param[in] key the key to the current function node (box)
2972 /// @param[in] left the function impl associated with the left function
2973 /// @param[in] lcin the scaling function coefficients associated with the
2974 /// current box in the left function
2975 /// @param[in] right the function impl associated with the right function
2976 /// @param[in] rcin the scaling function coefficients associated with the
2977 /// current box in the right function
2978 template <typename L, typename R>
2979 void mulXXa(const keyT& key,
2980 const FunctionImpl<L,NDIM>* left, const Tensor<L>& lcin,
2981 const FunctionImpl<R,NDIM>* right,const Tensor<R>& rcin,
2982 double tol) {
2983 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
2984 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator riterT;
2985
2986 double lnorm=1e99, rnorm=1e99;
2987
2988 Tensor<L> lc = lcin;
2989 if (lc.size() == 0) {
2990 literT it = left->coeffs.find(key).get();
2991 MADNESS_ASSERT(it != left->coeffs.end());
2992 lnorm = it->second.get_norm_tree();
2993 if (it->second.has_coeff())
2994 lc = it->second.coeff().reconstruct_tensor();
2995 }
2996
2997 Tensor<R> rc = rcin;
2998 if (rc.size() == 0) {
2999 riterT it = right->coeffs.find(key).get();
3000 MADNESS_ASSERT(it != right->coeffs.end());
3001 rnorm = it->second.get_norm_tree();
3002 if (it->second.has_coeff())
3003 rc = it->second.coeff().reconstruct_tensor();
3004 }
3005
3006 // both nodes are leaf nodes: multiply and return
3007 if (rc.size() && lc.size()) { // Yipee!
3008 do_mul<L,R>(key, lc, std::make_pair(key,rc));
3009 return;
3010 }
3011
3012 if (tol) {
3013 if (lc.size())
3014 lnorm = lc.normf(); // Otherwise got from norm tree above
3015 if (rc.size())
3016 rnorm = rc.normf();
3017 if (lnorm*rnorm < truncate_tol(tol, key)) {
3018 coeffs.replace(key, nodeT(coeffT(cdata.vk,targs),false)); // Zero leaf node
3019 return;
3020 }
3021 }
3022
3023 // Recur down
3024 coeffs.replace(key, nodeT(coeffT(),true)); // Interior node
3025
3026 Tensor<L> lss;
3027 if (lc.size()) {
3028 Tensor<L> ld(cdata.v2k);
3029 ld(cdata.s0) = lc(___);
3030 lss = left->unfilter(ld);
3031 }
3032
3033 Tensor<R> rss;
3034 if (rc.size()) {
3035 Tensor<R> rd(cdata.v2k);
3036 rd(cdata.s0) = rc(___);
3037 rss = right->unfilter(rd);
3038 }
3039
3040 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3041 const keyT& child = kit.key();
3042 Tensor<L> ll;
3043 Tensor<R> rr;
3044 if (lc.size())
3045 ll = copy(lss(child_patch(child)));
3046 if (rc.size())
3047 rr = copy(rss(child_patch(child)));
3048
3049 woT::task(coeffs.owner(child), &implT:: template mulXXa<L,R>, child, left, ll, right, rr, tol);
3050 }
3051 }
3052
3053
3054 // Binary operation on values using recursive descent and assuming same distribution
3055 /// Both left and right functions are in the scaling function basis
3056 /// @param[in] key the key to the current function node (box)
3057 /// @param[in] left the function impl associated with the left function
3058 /// @param[in] lcin the scaling function coefficients associated with the
3059 /// current box in the left function
3060 /// @param[in] right the function impl associated with the right function
3061 /// @param[in] rcin the scaling function coefficients associated with the
3062 /// current box in the right function
3063 /// @param[in] op the binary operator
3064 template <typename L, typename R, typename opT>
3065 void binaryXXa(const keyT& key,
3066 const FunctionImpl<L,NDIM>* left, const Tensor<L>& lcin,
3067 const FunctionImpl<R,NDIM>* right,const Tensor<R>& rcin,
3068 const opT& op) {
3069 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
3070 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator riterT;
3071
3072 Tensor<L> lc = lcin;
3073 if (lc.size() == 0) {
3074 literT it = left->coeffs.find(key).get();
3075 MADNESS_ASSERT(it != left->coeffs.end());
3076 if (it->second.has_coeff())
3077 lc = it->second.coeff().reconstruct_tensor();
3078 }
3079
3080 Tensor<R> rc = rcin;
3081 if (rc.size() == 0) {
3082 riterT it = right->coeffs.find(key).get();
3083 MADNESS_ASSERT(it != right->coeffs.end());
3084 if (it->second.has_coeff())
3085 rc = it->second.coeff().reconstruct_tensor();
3086 }
3087
3088 if (rc.size() && lc.size()) { // Yipee!
3089 do_binary_op<L,R>(key, lc, std::make_pair(key,rc), op);
3090 return;
3091 }
3092
3093 // Recur down
3094 coeffs.replace(key, nodeT(coeffT(),true)); // Interior node
3095
3096 Tensor<L> lss;
3097 if (lc.size()) {
3098 Tensor<L> ld(cdata.v2k);
3099 ld(cdata.s0) = lc(___);
3100 lss = left->unfilter(ld);
3101 }
3102
3103 Tensor<R> rss;
3104 if (rc.size()) {
3105 Tensor<R> rd(cdata.v2k);
3106 rd(cdata.s0) = rc(___);
3107 rss = right->unfilter(rd);
3108 }
3109
3110 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3111 const keyT& child = kit.key();
3112 Tensor<L> ll;
3113 Tensor<R> rr;
3114 if (lc.size())
3115 ll = copy(lss(child_patch(child)));
3116 if (rc.size())
3117 rr = copy(rss(child_patch(child)));
3118
3119 woT::task(coeffs.owner(child), &implT:: template binaryXXa<L,R,opT>, child, left, ll, right, rr, op);
3120 }
3121 }
3122
3123 template <typename Q, typename opT>
3125 typedef typename opT::resultT resultT;
3127 opT op;
3128
3133
3134 Tensor<resultT> operator()(const Key<NDIM>& key, const Tensor<Q>& t) const {
3135 Tensor<Q> invalues = impl_func->coeffs2values(key, t);
3136
3137 Tensor<resultT> outvalues = op(key, invalues);
3138
3139 return impl_func->values2coeffs(key, outvalues);
3140 }
3141
3142 template <typename Archive>
3143 void serialize(Archive& ar) {
3144 ar & impl_func & op;
3145 }
3146 };
3147
3148 /// Out of place unary operation on function impl
3149 /// The skeleton algorithm should resemble something like
3150 ///
3151 /// *this = op(*func)
3152 ///
3153 /// @param[in] key the key of the current function node (box)
3154 /// @param[in] func the function impl on which to be operated
3155 /// @param[in] op the unary operator
3156 template <typename Q, typename opT>
3157 void unaryXXa(const keyT& key,
3158 const FunctionImpl<Q,NDIM>* func, const opT& op) {
3159
3160 // const Tensor<Q>& fc = func->coeffs.find(key).get()->second.full_tensor_copy();
3161 const Tensor<Q> fc = func->coeffs.find(key).get()->second.coeff().reconstruct_tensor();
3162
3163 if (fc.size() == 0) {
3164 // Recur down
3165 coeffs.replace(key, nodeT(coeffT(),true)); // Interior node
3166 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3167 const keyT& child = kit.key();
3168 woT::task(coeffs.owner(child), &implT:: template unaryXXa<Q,opT>, child, func, op);
3169 }
3170 }
3171 else {
3172 tensorT t=op(key,fc);
3173 coeffs.replace(key, nodeT(coeffT(t,targs),false)); // Leaf node
3174 }
3175 }
3176
3177 /// Multiplies two functions (impl's) together. Delegates to the mulXXa() method
3178 /// @param[in] left pointer to the left function impl
3179 /// @param[in] right pointer to the right function impl
3180 /// @param[in] tol numerical tolerance
3181 template <typename L, typename R>
3182 void mulXX(const FunctionImpl<L,NDIM>* left, const FunctionImpl<R,NDIM>* right, double tol, bool fence) {
3183 if (world.rank() == coeffs.owner(cdata.key0))
3184 mulXXa(cdata.key0, left, Tensor<L>(), right, Tensor<R>(), tol);
3185 if (fence)
3186 world.gop.fence();
3187
3188 //verify_tree();
3189 }
3190
3191 /// Performs binary operation on two functions (impl's). Delegates to the binaryXXa() method
3192 /// @param[in] left pointer to the left function impl
3193 /// @param[in] right pointer to the right function impl
3194 /// @param[in] op the binary operator
3195 template <typename L, typename R, typename opT>
3197 const opT& op, bool fence) {
3198 if (world.rank() == coeffs.owner(cdata.key0))
3199 binaryXXa(cdata.key0, left, Tensor<L>(), right, Tensor<R>(), op);
3200 if (fence)
3201 world.gop.fence();
3202
3203 //verify_tree();
3204 }
3205
3206 /// Performs unary operation on function impl. Delegates to the unaryXXa() method
3207 /// @param[in] func function impl of the operand
3208 /// @param[in] op the unary operator
3209 template <typename Q, typename opT>
3210 void unaryXX(const FunctionImpl<Q,NDIM>* func, const opT& op, bool fence) {
3211 if (world.rank() == coeffs.owner(cdata.key0))
3212 unaryXXa(cdata.key0, func, op);
3213 if (fence)
3214 world.gop.fence();
3215
3216 //verify_tree();
3217 }
3218
3219 /// Performs unary operation on function impl. Delegates to the unaryXXa() method
3220 /// @param[in] func function impl of the operand
3221 /// @param[in] op the unary operator
3222 template <typename Q, typename opT>
3223 void unaryXXvalues(const FunctionImpl<Q,NDIM>* func, const opT& op, bool fence) {
3224 if (world.rank() == coeffs.owner(cdata.key0))
3226 if (fence)
3227 world.gop.fence();
3228
3229 //verify_tree();
3230 }
3231
3232 /// Multiplies a function (impl) with a vector of functions (impl's). Delegates to the
3233 /// mulXXveca() method.
3234 /// @param[in] left pointer to the left function impl
3235 /// @param[in] vright vector of pointers to the right function impl's
3236 /// @param[in] tol numerical tolerance
3237 /// @param[out] vresult vector of pointers to the resulting function impl's
3238 template <typename L, typename R>
3240 const std::vector<const FunctionImpl<R,NDIM>*>& vright,
3241 const std::vector<FunctionImpl<T,NDIM>*>& vresult,
3242 double tol,
3243 bool fence) {
3244 std::vector< Tensor<R> > vr(vright.size());
3245 if (world.rank() == coeffs.owner(cdata.key0))
3246 mulXXveca(cdata.key0, left, Tensor<L>(), vright, vr, vresult, tol);
3247 if (fence)
3248 world.gop.fence();
3249 }
3250
3252
3253 mutable long box_leaf[1000];
3254 mutable long box_interior[1000];
3255
3256 // horrifically non-scalable
3257 void put_in_box(ProcessID from, long nl, long ni) const;
3258
3259 /// Prints summary of data distribution
3260 void print_info() const;
3261
3262 /// Verify tree is properly constructed ... global synchronization involved
3263
3264 /// If an inconsistency is detected, prints a message describing the error and
3265 /// then throws a madness exception.
3266 ///
3267 /// This is a reasonably quick and scalable operation that is
3268 /// useful for debugging and paranoia.
3269 void verify_tree() const;
3270
3271 /// check that parents and children are consistent
3272
3273 /// will not check proper size of coefficients
3274 /// global communication
3275 bool verify_parents_and_children() const;
3276
3277 /// check that the tree state and the coeffs are consistent
3278
3279 /// will not check existence of children and/or parents
3280 /// no communication
3281 bool verify_tree_state_local() const;
3282
3283 /// Walk up the tree returning pair(key,node) for first node with coefficients
3284
3285 /// Three possibilities.
3286 ///
3287 /// 1) The coeffs are present and returned with the key of the containing node.
3288 ///
3289 /// 2) The coeffs are further up the tree ... the request is forwarded up.
3290 ///
3291 /// 3) The coeffs are futher down the tree ... an empty tensor is returned.
3292 ///
3293 /// !! This routine is crying out for an optimization to
3294 /// manage the number of messages being sent ... presently
3295 /// each parent is fetched 2^(n*d) times where n is the no. of
3296 /// levels between the level of evaluation and the parent.
3297 /// Alternatively, reimplement multiply as a downward tree
3298 /// walk and just pass the parent down. Slightly less
3299 /// parallelism but much less communication.
3300 /// @todo Robert .... help!
3301 void sock_it_to_me(const keyT& key,
3302 const RemoteReference< FutureImpl< std::pair<keyT,coeffT> > >& ref) const;
3303 /// As above, except
3304 /// 3) The coeffs are constructed from the avg of nodes further down the tree
3305 /// @todo Robert .... help!
3306 void sock_it_to_me_too(const keyT& key,
3307 const RemoteReference< FutureImpl< std::pair<keyT,coeffT> > >& ref) const;
3308
3309 /// @todo help!
3311 const keyT& key,
3312 const coordT& plotlo, const coordT& plothi, const std::vector<long>& npt,
3313 bool eval_refine) const;
3314
3315
3316 /// Evaluate a cube/slice of points ... plotlo and plothi are already in simulation coordinates
3317 /// No communications
3318 /// @param[in] plotlo the coordinate of the starting point
3319 /// @param[in] plothi the coordinate of the ending point
3320 /// @param[in] npt the number of points in each dimension
3321 Tensor<T> eval_plot_cube(const coordT& plotlo,
3322 const coordT& plothi,
3323 const std::vector<long>& npt,
3324 const bool eval_refine = false) const;
3325
3326
3327 /// Evaluate function only if point is local returning (true,value); otherwise return (false,0.0)
3328
3329 /// maxlevel is the maximum depth to search down to --- the max local depth can be
3330 /// computed with max_local_depth();
3331 std::pair<bool,T> eval_local_only(const Vector<double,NDIM>& xin, Level maxlevel) ;
3332
3333
3334 /// Evaluate the function at a point in \em simulation coordinates
3335
3336 /// Only the invoking process will get the result via the
3337 /// remote reference to a future. Active messages may be sent
3338 /// to other nodes.
3339 void eval(const Vector<double,NDIM>& xin,
3340 const keyT& keyin,
3341 const typename Future<T>::remote_refT& ref);
3342
3343 /// Get the depth of the tree at a point in \em simulation coordinates
3344
3345 /// Only the invoking process will get the result via the
3346 /// remote reference to a future. Active messages may be sent
3347 /// to other nodes.
3348 ///
3349 /// This function is a minimally-modified version of eval()
3350 void evaldepthpt(const Vector<double,NDIM>& xin,
3351 const keyT& keyin,
3352 const typename Future<Level>::remote_refT& ref);
3353
3354 /// Get the rank of leaf box of the tree at a point in \em simulation coordinates
3355
3356 /// Only the invoking process will get the result via the
3357 /// remote reference to a future. Active messages may be sent
3358 /// to other nodes.
3359 ///
3360 /// This function is a minimally-modified version of eval()
3361 void evalR(const Vector<double,NDIM>& xin,
3362 const keyT& keyin,
3363 const typename Future<long>::remote_refT& ref);
3364
3365
3366 /// Computes norm of low/high-order polyn. coeffs for autorefinement test
3367
3368 /// t is a k^d tensor. In order to screen the autorefinement
3369 /// during multiplication compute the norms of
3370 /// ... lo ... the block of t for all polynomials of order < k/2
3371 /// ... hi ... the block of t for all polynomials of order >= k/2
3372 ///
3373 /// k=5 0,1,2,3,4 --> 0,1,2 ... 3,4
3374 /// k=6 0,1,2,3,4,5 --> 0,1,2 ... 3,4,5
3375 ///
3376 /// k=number of wavelets, so k=5 means max order is 4, so max exactly
3377 /// representable squarable polynomial is of order 2.
3378 void static tnorm(const tensorT& t, double* lo, double* hi);
3379
3380 void static tnorm(const GenTensor<T>& t, double* lo, double* hi);
3381
3382 void static tnorm(const SVDTensor<T>& t, double* lo, double* hi, const int particle);
3383
3384 // This invoked if node has not been autorefined
3385 void do_square_inplace(const keyT& key);
3386
3387 // This invoked if node has been autorefined
3388 void do_square_inplace2(const keyT& parent, const keyT& child, const tensorT& parent_coeff);
3389
3390 /// Always returns false (for when autorefine is not wanted)
3391 bool noautorefine(const keyT& key, const tensorT& t) const;
3392
3393 /// Returns true if this block of coeffs needs autorefining
3394 bool autorefine_square_test(const keyT& key, const nodeT& t) const;
3395
3396 /// Pointwise squaring of function with optional global fence
3397
3398 /// If not autorefining, local computation only if not fencing.
3399 /// If autorefining, may result in asynchronous communication.
3400 void square_inplace(bool fence);
3401 void abs_inplace(bool fence);
3402 void abs_square_inplace(bool fence);
3403
3404 /// is this the same as trickle_down() ?
3405 void sum_down_spawn(const keyT& key, const coeffT& s);
3406
3407 /// After 1d push operator must sum coeffs down the tree to restore correct scaling function coefficients
3408 void sum_down(bool fence);
3409
3410 /// perform this multiplication: h(1,2) = f(1,2) * g(1)
3411 template<size_t LDIM>
3413
3414 static bool randomize() {return false;}
3418
3419 implT* h; ///< the result function h(1,2) = f(1,2) * g(1)
3422 int particle; ///< if g is g(1) or g(2)
3423
3424 multiply_op() : h(), f(), g(), particle(1) {}
3425
3426 multiply_op(implT* h1, const ctT& f1, const ctL& g1, const int particle1)
3427 : h(h1), f(f1), g(g1), particle(particle1) {};
3428
3429 /// return true if this will be a leaf node
3430
3431 /// use generalization of tnorm for a GenTensor
3432 bool screen(const coeffT& fcoeff, const coeffT& gcoeff, const keyT& key) const {
3434 MADNESS_ASSERT(fcoeff.is_svd_tensor());
3437
3438 double glo=0.0, ghi=0.0, flo=0.0, fhi=0.0;
3439 g.get_impl()->tnorm(gcoeff.get_tensor(), &glo, &ghi);
3440 g.get_impl()->tnorm(fcoeff.get_svdtensor(),&flo,&fhi,particle);
3441
3442 double total_hi=glo*fhi + ghi*flo + fhi*ghi;
3443 return (total_hi<h->truncate_tol(h->get_thresh(),key));
3444
3445 }
3446
3447 /// apply this on a FunctionNode of f and g of Key key
3448
3449 /// @param[in] key key for FunctionNode in f and g, (g: broken into particles)
3450 /// @return <this node is a leaf, coefficients of this node>
3451 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
3452
3453 // bool is_leaf=(not fdatum.second.has_children());
3454 // if (not is_leaf) return std::pair<bool,coeffT> (is_leaf,coeffT());
3455
3456 // break key into particles (these are the child keys, with f/gdatum come the parent keys)
3457 Key<LDIM> key1,key2;
3458 key.break_apart(key1,key2);
3459 const Key<LDIM> gkey= (particle==1) ? key1 : key2;
3460
3461 // get coefficients of the actual FunctionNode
3462 coeffT coeff1=f.get_impl()->parent_to_child(f.coeff(),f.key(),key);
3463 coeff1.normalize();
3464 const coeffT coeff2=g.get_impl()->parent_to_child(g.coeff(),g.key(),gkey);
3465
3466 // multiplication is done in TT_2D
3467 coeffT coeff1_2D=coeff1.convert(TensorArgs(h->get_thresh(),TT_2D));
3468 coeff1_2D.normalize();
3469
3470 bool is_leaf=screen(coeff1_2D,coeff2,key);
3471 if (key.level()<2) is_leaf=false;
3472
3473 coeffT hcoeff;
3474 if (is_leaf) {
3475
3476 // convert coefficients to values
3477 coeffT hvalues=f.get_impl()->coeffs2values(key,coeff1_2D);
3478 coeffT gvalues=g.get_impl()->coeffs2values(gkey,coeff2);
3479
3480 // perform multiplication
3481 coeffT result_val=h->multiply(hvalues,gvalues,particle-1);
3482
3483 hcoeff=h->values2coeffs(key,result_val);
3484
3485 // conversion on coeffs, not on values, because it implies truncation!
3486 if (not hcoeff.is_of_tensortype(h->get_tensor_type()))
3487 hcoeff=hcoeff.convert(h->get_tensor_args());
3488 }
3489
3490 return std::pair<bool,coeffT> (is_leaf,hcoeff);
3491 }
3492
3493 this_type make_child(const keyT& child) const {
3494
3495 // break key into particles
3496 Key<LDIM> key1, key2;
3497 child.break_apart(key1,key2);
3498 const Key<LDIM> gkey= (particle==1) ? key1 : key2;
3499
3500 return this_type(h,f.make_child(child),g.make_child(gkey),particle);
3501 }
3502
3504 Future<ctT> f1=f.activate();
3506 return h->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
3507 &this_type::forward_ctor),h,f1,g1,particle);
3508 }
3509
3510 this_type forward_ctor(implT* h1, const ctT& f1, const ctL& g1, const int particle) {
3511 return this_type(h1,f1,g1,particle);
3512 }
3513
3514 template <typename Archive> void serialize(const Archive& ar) {
3515 ar & h & f & g & particle;
3516 }
3517 };
3518
3519
3520 /// add two functions f and g: result=alpha * f + beta * g
3521 struct add_op {
3522
3525
3526 bool randomize() const {return false;}
3527
3528 /// tracking coeffs of first and second addend
3530 /// prefactor for f, g
3531 double alpha, beta;
3532
3533 add_op() = default;
3534 add_op(const ctT& f, const ctT& g, const double alpha, const double beta)
3535 : f(f), g(g), alpha(alpha), beta(beta){}
3536
3537 /// if we are at the bottom of the trees, return the sum of the coeffs
3538 std::pair<bool,coeffT> operator()(const keyT& key) const {
3539
3540 bool is_leaf=(f.is_leaf() and g.is_leaf());
3541 if (not is_leaf) return std::pair<bool,coeffT> (is_leaf,coeffT());
3542
3543 coeffT fcoeff=f.get_impl()->parent_to_child(f.coeff(),f.key(),key);
3544 coeffT gcoeff=g.get_impl()->parent_to_child(g.coeff(),g.key(),key);
3545 coeffT hcoeff=copy(fcoeff);
3546 hcoeff.gaxpy(alpha,gcoeff,beta);
3547 hcoeff.reduce_rank(f.get_impl()->get_tensor_args().thresh);
3548 return std::pair<bool,coeffT> (is_leaf,hcoeff);
3549 }
3550
3551 this_type make_child(const keyT& child) const {
3552 return this_type(f.make_child(child),g.make_child(child),alpha,beta);
3553 }
3554
3555 /// retrieve the coefficients (parent coeffs might be remote)
3557 Future<ctT> f1=f.activate();
3558 Future<ctT> g1=g.activate();
3559 return f.get_impl()->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
3561 }
3562
3563 /// taskq-compatible ctor
3564 this_type forward_ctor(const ctT& f1, const ctT& g1, const double alpha, const double beta) {
3565 return this_type(f1,g1,alpha,beta);
3566 }
3567
3568 template <typename Archive> void serialize(const Archive& ar) {
3569 ar & f & g & alpha & beta;
3570 }
3571
3572 };
3573
3574 /// multiply f (a pair function of NDIM) with an orbital g (LDIM=NDIM/2)
3575
3576 /// as in (with h(1,2)=*this) : h(1,2) = g(1) * f(1,2)
3577 /// use tnorm as a measure to determine if f (=*this) must be refined
3578 /// @param[in] f the NDIM function f=f(1,2)
3579 /// @param[in] g the LDIM function g(1) (or g(2))
3580 /// @param[in] particle 1 or 2, as in g(1) or g(2)
3581 template<size_t LDIM>
3582 void multiply(const implT* f, const FunctionImpl<T,LDIM>* g, const int particle) {
3583
3586
3587 typedef multiply_op<LDIM> coeff_opT;
3588 coeff_opT coeff_op(this,ff,gg,particle);
3589
3590 typedef insert_op<T,NDIM> apply_opT;
3591 apply_opT apply_op(this);
3592
3593 keyT key0=f->cdata.key0;
3594 if (world.rank() == coeffs.owner(key0)) {
3596 woT::task(p, &implT:: template forward_traverse<coeff_opT,apply_opT>, coeff_op, apply_op, key0);
3597 }
3598
3600 }
3601
3602 /// Hartree product of two LDIM functions to yield a NDIM = 2*LDIM function
3603 template<size_t LDIM, typename leaf_opT>
3604 struct hartree_op {
3605 bool randomize() const {return false;}
3606
3609
3610 implT* result; ///< where to construct the pair function
3611 ctL p1, p2; ///< tracking coeffs of the two lo-dim functions
3612 leaf_opT leaf_op; ///< determine if a given node will be a leaf node
3613
3614 // ctor
3616 hartree_op(implT* result, const ctL& p11, const ctL& p22, const leaf_opT& leaf_op)
3617 : result(result), p1(p11), p2(p22), leaf_op(leaf_op) {
3618 MADNESS_ASSERT(LDIM+LDIM==NDIM);
3619 }
3620
3621 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
3622
3623 // break key into particles (these are the child keys, with datum1/2 come the parent keys)
3624 Key<LDIM> key1,key2;
3625 key.break_apart(key1,key2);
3626
3627 // this returns the appropriate NS coeffs for key1 and key2 resp.
3628 const coeffT fcoeff=p1.coeff(key1);
3629 const coeffT gcoeff=p2.coeff(key2);
3630 bool is_leaf=leaf_op(key,fcoeff.full_tensor(),gcoeff.full_tensor());
3631 if (not is_leaf) return std::pair<bool,coeffT> (is_leaf,coeffT());
3632
3633 // extract the sum coeffs from the NS coeffs
3634 const coeffT s1=fcoeff(p1.get_impl()->cdata.s0);
3635 const coeffT s2=gcoeff(p2.get_impl()->cdata.s0);
3636
3637 // new coeffs are simply the hartree/kronecker/outer product --
3638 coeffT coeff=outer(s1,s2,result->get_tensor_args());
3639 // no post-determination
3640 // is_leaf=leaf_op(key,coeff);
3641 return std::pair<bool,coeffT>(is_leaf,coeff);
3642 }
3643
3644 this_type make_child(const keyT& child) const {
3645
3646 // break key into particles
3647 Key<LDIM> key1, key2;
3648 child.break_apart(key1,key2);
3649
3650 return this_type(result,p1.make_child(key1),p2.make_child(key2),leaf_op);
3651 }
3652
3654 Future<ctL> p11=p1.activate();
3655 Future<ctL> p22=p2.activate();
3656 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
3657 &this_type::forward_ctor),result,p11,p22,leaf_op);
3658 }
3659
3660 this_type forward_ctor(implT* result1, const ctL& p11, const ctL& p22, const leaf_opT& leaf_op) {
3661 return this_type(result1,p11,p22,leaf_op);
3662 }
3663
3664 template <typename Archive> void serialize(const Archive& ar) {
3665 ar & result & p1 & p2 & leaf_op;
3666 }
3667 };
3668
3669 /// traverse a non-existing tree
3670
3671 /// part II: activate coeff_op, i.e. retrieve all the necessary remote boxes (communication)
3672 /// @param[in] coeff_op operator making the coefficients that needs activation
3673 /// @param[in] apply_op just passing thru
3674 /// @param[in] key the key we are working on
3675 template<typename coeff_opT, typename apply_opT>
3676 void forward_traverse(const coeff_opT& coeff_op, const apply_opT& apply_op, const keyT& key) const {
3678 Future<coeff_opT> active_coeff=coeff_op.activate();
3679 woT::task(world.rank(), &implT:: template traverse_tree<coeff_opT,apply_opT>, active_coeff, apply_op, key);
3680 }
3681
3682
3683 /// traverse a non-existing tree
3684
3685 /// part I: make the coefficients, process them and continue the recursion if necessary
3686 /// @param[in] coeff_op operator making the coefficients and determining them being leaves
3687 /// @param[in] apply_op operator processing the coefficients
3688 /// @param[in] key the key we are currently working on
3689 template<typename coeff_opT, typename apply_opT>
3690 void traverse_tree(const coeff_opT& coeff_op, const apply_opT& apply_op, const keyT& key) const {
3692
3693 typedef typename std::pair<bool,coeffT> argT;
3694 const argT arg=coeff_op(key);
3695 apply_op.operator()(key,arg.second,arg.first);
3696
3697 const bool has_children=(not arg.first);
3698 if (has_children) {
3699 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3700 const keyT& child=kit.key();
3701 coeff_opT child_op=coeff_op.make_child(child);
3702 // spawn activation where child is local
3703 ProcessID p=coeffs.owner(child);
3704
3705 void (implT::*ft)(const coeff_opT&, const apply_opT&, const keyT&) const = &implT::forward_traverse<coeff_opT,apply_opT>;
3706
3707 woT::task(p, ft, child_op, apply_op, child);
3708 }
3709 }
3710 }
3711
3712
3713 /// given two functions of LDIM, perform the Hartree/Kronecker/outer product
3714
3715 /// |Phi(1,2)> = |phi(1)> x |phi(2)>
3716 /// @param[in] p1 FunctionImpl of particle 1
3717 /// @param[in] p2 FunctionImpl of particle 2
3718 /// @param[in] leaf_op operator determining of a given box will be a leaf
3719 template<std::size_t LDIM, typename leaf_opT>
3720 void hartree_product(const std::vector<std::shared_ptr<FunctionImpl<T,LDIM>>> p1,
3721 const std::vector<std::shared_ptr<FunctionImpl<T,LDIM>>> p2,
3722 const leaf_opT& leaf_op, bool fence) {
3723 MADNESS_CHECK_THROW(p1.size()==p2.size(),"hartree_product: p1 and p2 must have the same size");
3724 for (auto& p : p1) MADNESS_CHECK(p->is_nonstandard() or p->is_nonstandard_with_leaves());
3725 for (auto& p : p2) MADNESS_CHECK(p->is_nonstandard() or p->is_nonstandard_with_leaves());
3726
3727 const keyT key0=cdata.key0;
3728
3729 for (std::size_t i=0; i<p1.size(); ++i) {
3730 if (world.rank() == this->get_coeffs().owner(key0)) {
3731
3732 // prepare the CoeffTracker
3733 CoeffTracker<T,LDIM> iap1(p1[i].get());
3734 CoeffTracker<T,LDIM> iap2(p2[i].get());
3735
3736 // the operator making the coefficients
3737 typedef hartree_op<LDIM,leaf_opT> coeff_opT;
3738 coeff_opT coeff_op(this,iap1,iap2,leaf_op);
3739
3740 // this operator simply inserts the coeffs into this' tree
3741// typedef insert_op<T,NDIM> apply_opT;
3742 typedef accumulate_op<T,NDIM> apply_opT;
3743 apply_opT apply_op(this);
3744
3745 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
3746 coeff_op, apply_op, cdata.key0);
3747
3748 }
3749 }
3750
3752 if (fence) world.gop.fence();
3753 }
3754
3755
3756 template <typename opT, typename R>
3757 void
3759 const opT* op = pop.ptr;
3760 const Level n = key.level();
3761 const double cnorm = c.normf();
3762 const double tol = truncate_tol(thresh, key)*0.1; // ??? why this value????
3763
3765 const Translation lold = lnew[axis];
3766 const Translation maxs = Translation(1)<<n;
3767
3768 int nsmall = 0; // Counts neglected blocks to terminate s loop
3769 for (Translation s=0; s<maxs; ++s) {
3770 int maxdir = s ? 1 : -1;
3771 for (int direction=-1; direction<=maxdir; direction+=2) {
3772 lnew[axis] = lold + direction*s;
3773 if (lnew[axis] >= 0 && lnew[axis] < maxs) { // NON-ZERO BOUNDARY CONDITIONS IGNORED HERE !!!!!!!!!!!!!!!!!!!!
3774 const Tensor<typename opT::opT>& r = op->rnlij(n, s*direction, true);
3775 double Rnorm = r.normf();
3776
3777 if (Rnorm == 0.0) {
3778 return; // Hard zero means finished!
3779 }
3780
3781 if (s <= 1 || r.normf()*cnorm > tol) { // Always do kernel and neighbor
3782 nsmall = 0;
3783 tensorT result = transform_dir(c,r,axis);
3784
3785 if (result.normf() > tol*0.3) {
3786 Key<NDIM> dest(n,lnew);
3787 coeffs.task(dest, &nodeT::accumulate2, result, coeffs, dest, TaskAttributes::hipri());
3788 }
3789 }
3790 else {
3791 ++nsmall;
3792 }
3793 }
3794 else {
3795 ++nsmall;
3796 }
3797 }
3798 if (nsmall >= 4) {
3799 // If have two negligble blocks in
3800 // succession in each direction interpret
3801 // this as the operator being zero beyond
3802 break;
3803 }
3804 }
3805 }
3806
3807 template <typename opT, typename R>
3808 void
3809 apply_1d_realspace_push(const opT& op, const FunctionImpl<R,NDIM>* f, int axis, bool fence) {
3810 MADNESS_ASSERT(!f->is_compressed());
3811
3812 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator fiterT;
3813 typedef FunctionNode<R,NDIM> fnodeT;
3814 fiterT end = f->coeffs.end();
3815 ProcessID me = world.rank();
3816 for (fiterT it=f->coeffs.begin(); it!=end; ++it) {
3817 const fnodeT& node = it->second;
3818 if (node.has_coeff()) {
3819 const keyT& key = it->first;
3820 const Tensor<R>& c = node.coeff().full_tensor_copy();
3821 woT::task(me, &implT:: template apply_1d_realspace_push_op<opT,R>,
3823 }
3824 }
3825 if (fence) world.gop.fence();
3826 }
3827
3829 const implT* f,
3830 const keyT& key,
3831 const std::pair<keyT,coeffT>& left,
3832 const std::pair<keyT,coeffT>& center,
3833 const std::pair<keyT,coeffT>& right);
3834
3835 void do_diff1(const DerivativeBase<T,NDIM>* D,
3836 const implT* f,
3837 const keyT& key,
3838 const std::pair<keyT,coeffT>& left,
3839 const std::pair<keyT,coeffT>& center,
3840 const std::pair<keyT,coeffT>& right);
3841
3842 // Called by result function to differentiate f
3843 void diff(const DerivativeBase<T,NDIM>* D, const implT* f, bool fence);
3844
3845 /// Returns key of general neighbor enforcing BC
3846
3847 /// Out of volume keys are mapped to enforce the BC as follows.
3848 /// * Periodic BC map back into the volume and return the correct key
3849 /// * non-periodic BC - returns invalid() to indicate out of volume
3850 keyT neighbor(const keyT& key, const keyT& disp, const array_of_bools<NDIM>& is_periodic) const;
3851
3852 /// Returns key of general neighbor that resides in-volume
3853
3854 /// Out of volume keys are mapped to invalid()
3855 keyT neighbor_in_volume(const keyT& key, const keyT& disp) const;
3856
3857 /// find_me. Called by diff_bdry to get coefficients of boundary function
3858 Future< std::pair<keyT,coeffT> > find_me(const keyT& key) const;
3859
3860 /// return the a std::pair<key, node>, which MUST exist
3861 std::pair<Key<NDIM>,ShallowNode<T,NDIM> > find_datum(keyT key) const;
3862
3863 /// multiply the ket with a one-electron potential rr(1,2)= f(1,2)*g(1)
3864
3865 /// @param[in] val_ket function values of f(1,2)
3866 /// @param[in] val_pot function values of g(1)
3867 /// @param[in] particle if 0 then g(1), if 1 then g(2)
3868 /// @return the resulting function values
3869 coeffT multiply(const coeffT& val_ket, const coeffT& val_pot, int particle) const;
3870
3871
3872 /// given several coefficient tensors, assemble a result tensor
3873
3874 /// the result looks like: (v(1,2) + v(1) + v(2)) |ket(1,2)>
3875 /// or (v(1,2) + v(1) + v(2)) |p(1) p(2)>
3876 /// i.e. coefficients for the ket and coefficients for the two particles are
3877 /// mutually exclusive. All potential terms are optional, just pass in empty coeffs.
3878 /// @param[in] key the key of the FunctionNode to which these coeffs belong
3879 /// @param[in] coeff_ket coefficients of the ket
3880 /// @param[in] vpotential1 function values of the potential for particle 1
3881 /// @param[in] vpotential2 function values of the potential for particle 2
3882 /// @param[in] veri function values for the 2-particle potential
3883 coeffT assemble_coefficients(const keyT& key, const coeffT& coeff_ket,
3884 const coeffT& vpotential1, const coeffT& vpotential2,
3885 const tensorT& veri) const;
3886
3887
3888
3889 template<std::size_t LDIM>
3893 double error=0.0;
3894 double lo=0.0, hi=0.0, lo1=0.0, hi1=0.0, lo2=0.0, hi2=0.0;
3895
3897 pointwise_multiplier(const Key<NDIM> key, const coeffT& clhs) : coeff_lhs(clhs) {
3899 val_lhs=fcf.coeffs2values(key,coeff_lhs);
3900 error=0.0;
3902 if (coeff_lhs.is_svd_tensor()) {
3905 }
3906 }
3907
3908 /// multiply values of rhs and lhs, result on rhs, rhs and lhs are of the same dimensions
3909 tensorT operator()(const Key<NDIM> key, const tensorT& coeff_rhs) {
3910
3911 MADNESS_ASSERT(coeff_rhs.dim(0)==coeff_lhs.dim(0));
3913
3914 // the tnorm estimate is not tight enough to be efficient, better use oversampling
3915 bool use_tnorm=false;
3916 if (use_tnorm) {
3917 double rlo, rhi;
3918 implT::tnorm(coeff_rhs,&rlo,&rhi);
3919 error = hi*rlo + rhi*lo + rhi*hi;
3920 tensorT val_rhs=fcf.coeffs2values(key, coeff_rhs);
3921 val_rhs.emul(val_lhs.full_tensor_copy());
3922 return fcf.values2coeffs(key,val_rhs);
3923 } else { // use quadrature of order k+1
3924
3925 auto& cdata=FunctionCommonData<T,NDIM>::get(coeff_rhs.dim(0)); // npt=k+1
3926 auto& cdata_npt=FunctionCommonData<T,NDIM>::get(coeff_rhs.dim(0)+oversampling); // npt=k+1
3927 FunctionCommonFunctionality<T,NDIM> fcf_hi_npt(cdata_npt);
3928
3929 // coeffs2values for rhs: k -> npt=k+1
3930 tensorT coeff1(cdata_npt.vk);
3931 coeff1(cdata.s0)=coeff_rhs; // s0 is smaller than vk!
3932 tensorT val_rhs_k1=fcf_hi_npt.coeffs2values(key,coeff1);
3933
3934 // coeffs2values for lhs: k -> npt=k+1
3935 tensorT coeff_lhs_k1(cdata_npt.vk);
3936 coeff_lhs_k1(cdata.s0)=coeff_lhs.full_tensor_copy();
3937 tensorT val_lhs_k1=fcf_hi_npt.coeffs2values(key,coeff_lhs_k1);
3938
3939 // multiply
3940 val_lhs_k1.emul(val_rhs_k1);
3941
3942 // values2coeffs: npt = k+1-> k
3943 tensorT result1=fcf_hi_npt.values2coeffs(key,val_lhs_k1);
3944
3945 // extract coeffs up to k
3946 tensorT result=copy(result1(cdata.s0));
3947 result1(cdata.s0)=0.0;
3948 error=result1.normf();
3949 return result;
3950 }
3951 }
3952
3953 /// multiply values of rhs and lhs, result on rhs, rhs and lhs are of differnet dimensions
3954 coeffT operator()(const Key<NDIM> key, const tensorT& coeff_rhs, const int particle) {
3955 Key<LDIM> key1, key2;
3956 key.break_apart(key1,key2);
3957 const long k=coeff_rhs.dim(0);
3959 auto& cdata_lowdim=FunctionCommonData<T,LDIM>::get(k);
3960 FunctionCommonFunctionality<T,LDIM> fcf_lo(cdata_lowdim);
3964
3965
3966 // make hi-dim values from lo-dim coeff_rhs on npt grid points
3967 tensorT ones=tensorT(fcf_lo_npt.cdata.vk);
3968 ones=1.0;
3969
3970 tensorT coeff_rhs_npt1(fcf_lo_npt.cdata.vk);
3971 coeff_rhs_npt1(fcf_lo.cdata.s0)=coeff_rhs;
3972 tensorT val_rhs_npt1=fcf_lo_npt.coeffs2values(key1,coeff_rhs_npt1);
3973
3974 TensorArgs targs(-1.0,TT_2D);
3975 coeffT val_rhs;
3976 if (particle==1) val_rhs=outer(val_rhs_npt1,ones,targs);
3977 if (particle==2) val_rhs=outer(ones,val_rhs_npt1,targs);
3978
3979 // make values from hi-dim coeff_lhs on npt grid points
3980 coeffT coeff_lhs_k1(fcf_hi_npt.cdata.vk,coeff_lhs.tensor_type());
3981 coeff_lhs_k1(fcf_hi.cdata.s0)+=coeff_lhs;
3982 coeffT val_lhs_npt=fcf_hi_npt.coeffs2values(key,coeff_lhs_k1);
3983
3984 // multiply
3985 val_lhs_npt.emul(val_rhs);
3986
3987 // values2coeffs: npt = k+1-> k
3988 coeffT result1=fcf_hi_npt.values2coeffs(key,val_lhs_npt);
3989
3990 // extract coeffs up to k
3991 coeffT result=copy(result1(cdata.s0));
3992 result1(cdata.s0)=0.0;
3993 error=result1.normf();
3994 return result;
3995 }
3996
3997 template <typename Archive> void serialize(const Archive& ar) {
3998 ar & error & lo & lo1 & lo2 & hi & hi1& hi2 & val_lhs & coeff_lhs;
3999 }
4000
4001
4002 };
4003
4004 /// given a ket and the 1- and 2-electron potentials, construct the function V phi
4005
4006 /// small memory footstep version of Vphi_op: use the NS form to have information
4007 /// about parent and children to determine if a box is a leaf. This will require
4008 /// compression of the constituent functions, which will lead to more memory usage
4009 /// there, but will avoid oversampling of the result function.
4010 template<typename opT, size_t LDIM>
4011 struct Vphi_op_NS {
4012
4013 bool randomize() const {return true;}
4014
4018
4019 implT* result; ///< where to construct Vphi, no need to track parents
4020 opT leaf_op; ///< deciding if a given FunctionNode will be a leaf node
4021 ctT iaket; ///< the ket of a pair function (exclusive with p1, p2)
4022 ctL iap1, iap2; ///< the particles 1 and 2 (exclusive with ket)
4023 ctL iav1, iav2; ///< potentials for particles 1 and 2
4024 const implT* eri; ///< 2-particle potential, must be on-demand
4025
4026 bool have_ket() const {return iaket.get_impl();}
4027 bool have_v1() const {return iav1.get_impl();}
4028 bool have_v2() const {return iav2.get_impl();}
4029 bool have_eri() const {return eri;}
4030
4031 void accumulate_into_result(const Key<NDIM>& key, const coeffT& coeff) const {
4033 }
4034
4035 // ctor
4037 Vphi_op_NS(implT* result, const opT& leaf_op, const ctT& iaket,
4038 const ctL& iap1, const ctL& iap2, const ctL& iav1, const ctL& iav2,
4039 const implT* eri)
4041 , iav1(iav1), iav2(iav2), eri(eri) {
4042
4043 // 2-particle potential must be on-demand
4045 }
4046
4047 /// make and insert the coefficients into result's tree
4048 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
4049
4051 if(leaf_op.do_pre_screening()){
4052 // this means that we only construct the boxes which are leaf boxes from the other function in the leaf_op
4053 if(leaf_op.pre_screening(key)){
4054 // construct sum_coefficients, insert them and leave
4055 auto [sum_coeff, error]=make_sum_coeffs(key);
4056 accumulate_into_result(key,sum_coeff);
4057 return std::pair<bool,coeffT> (true,coeffT());
4058 }else{
4059 return continue_recursion(std::vector<bool>(1<<NDIM,false),tensorT(),key);
4060 }
4061 }
4062
4063 // this means that the function has to be completely constructed and not mirrored by another function
4064
4065 // if the initial level is not reached then this must not be a leaf box
4066 size_t il = result->get_initial_level();
4068 if(key.level()<int(il)){
4069 return continue_recursion(std::vector<bool>(1<<NDIM,false),tensorT(),key);
4070 }
4071 // if further refinement is needed (because we are at a special box, special point)
4072 // and the special_level is not reached then this must not be a leaf box
4073 if(key.level()<result->get_special_level() and leaf_op.special_refinement_needed(key)){
4074 return continue_recursion(std::vector<bool>(1<<NDIM,false),tensorT(),key);
4075 }
4076
4077 auto [sum_coeff,error]=make_sum_coeffs(key);
4078
4079 // coeffs are leaf (for whatever reason), insert into tree and stop recursion
4080 if(leaf_op.post_screening(key,sum_coeff)){
4081 accumulate_into_result(key,sum_coeff);
4082 return std::pair<bool,coeffT> (true,coeffT());
4083 }
4084
4085 // coeffs are accurate, insert into tree and stop recursion
4086 if(error<result->truncate_tol(result->get_thresh(),key)){
4087 accumulate_into_result(key,sum_coeff);
4088 return std::pair<bool,coeffT> (true,coeffT());
4089 }
4090
4091 // coeffs are inaccurate, continue recursion
4092 std::vector<bool> child_is_leaf(1<<NDIM,false);
4093 return continue_recursion(child_is_leaf,tensorT(),key);
4094 }
4095
4096
4097 /// loop over all children and either insert their sum coeffs or continue the recursion
4098
4099 /// @param[in] child_is_leaf for each child: is it a leaf?
4100 /// @param[in] coeffs coefficient tensor with 2^N sum coeffs (=unfiltered NS coeffs)
4101 /// @param[in] key the key for the NS coeffs (=parent key of the children)
4102 /// @return to avoid recursion outside this return: std::pair<is_leaf,coeff> = true,coeffT()
4103 std::pair<bool,coeffT> continue_recursion(const std::vector<bool> child_is_leaf,
4104 const tensorT& coeffs, const keyT& key) const {
4105 std::size_t i=0;
4106 for (KeyChildIterator<NDIM> kit(key); kit; ++kit, ++i) {
4107 keyT child=kit.key();
4108 bool is_leaf=child_is_leaf[i];
4109
4110 if (is_leaf) {
4111 // insert the sum coeffs
4113 iop(child,coeffT(copy(coeffs(result->child_patch(child))),result->get_tensor_args()),is_leaf);
4114 } else {
4115 this_type child_op=this->make_child(child);
4116 noop<T,NDIM> no;
4117 // spawn activation where child is local
4118 ProcessID p=result->get_coeffs().owner(child);
4119
4120 void (implT::*ft)(const Vphi_op_NS<opT,LDIM>&, const noop<T,NDIM>&, const keyT&) const = &implT:: template forward_traverse< Vphi_op_NS<opT,LDIM>, noop<T,NDIM> >;
4121 result->task(p, ft, child_op, no, child);
4122 }
4123 }
4124 // return e sum coeffs; also return always is_leaf=true:
4125 // the recursion is continued within this struct, not outside in traverse_tree!
4126 return std::pair<bool,coeffT> (true,coeffT());
4127 }
4128
4129 tensorT eri_coeffs(const keyT& key) const {
4132 if (eri->get_functor()->provides_coeff()) {
4133 return eri->get_functor()->coeff(key).full_tensor();
4134 } else {
4135 tensorT val_eri(eri->cdata.vk);
4136 eri->fcube(key,*(eri->get_functor()),eri->cdata.quad_x,val_eri);
4137 return eri->values2coeffs(key,val_eri);
4138 }
4139 }
4140
4141 /// the error is computed from the d coefficients of the constituent functions
4142
4143 /// the result is h_n = P_n(f g), computed as h_n \approx Pn(f_n g_n)
4144 /// its error is therefore
4145 /// h_n = (f g)_n = ((Pn(f) + Qn(f)) (Pn(g) + Qn(g))
4146 /// = Pn(fn gn) + Qn(fn gn) + Pn(f) Qn(g) + Qn(f) Pn(g) + Qn(f) Pn(g)
4147 /// the first term is what we compute, the second term is estimated by tnorm (in another function),
4148 /// the third to last terms are estimated in this function by e.g.: Qn(f)Pn(g) < ||Qn(f)|| ||Pn(g)||
4150 const tensorT& ceri) const {
4151 double error = 0.0;
4152 Key<LDIM> key1, key2;
4153 key.break_apart(key1,key2);
4154
4155 PROFILE_BLOCK(compute_error);
4156 double dnorm_ket, snorm_ket;
4157 if (have_ket()) {
4158 snorm_ket=iaket.coeff(key).normf();
4159 dnorm_ket=iaket.dnorm(key);
4160 } else {
4161 double s1=iap1.coeff(key1).normf();
4162 double s2=iap2.coeff(key2).normf();
4163 double d1=iap1.dnorm(key1);
4164 double d2=iap2.dnorm(key2);
4165 snorm_ket=s1*s2;
4166 dnorm_ket=s1*d2 + s2*d1 + d1*d2;
4167 }
4168
4169 if (have_v1()) {
4170 double snorm=iav1.coeff(key1).normf();
4171 double dnorm=iav1.dnorm(key1);
4172 error+=snorm*dnorm_ket + dnorm*snorm_ket + dnorm*dnorm_ket;
4173 }
4174 if (have_v2()) {
4175 double snorm=iav2.coeff(key2).normf();
4176 double dnorm=iav2.dnorm(key2);
4177 error+=snorm*dnorm_ket + dnorm*snorm_ket + dnorm*dnorm_ket;
4178 }
4179 if (have_eri()) {
4180 tensorT s_coeffs=ceri(result->cdata.s0);
4181 double snorm=s_coeffs.normf();
4182 tensorT d=copy(ceri);
4183 d(result->cdata.s0)=0.0;
4184 double dnorm=d.normf();
4185 error+=snorm*dnorm_ket + dnorm*snorm_ket + dnorm*dnorm_ket;
4186 }
4187
4188 bool no_potential=not ((have_v1() or have_v2() or have_eri()));
4189 if (no_potential) {
4190 error=dnorm_ket;
4191 }
4192 return error;
4193 }
4194
4195 /// make the sum coeffs for key
4196 std::pair<coeffT,double> make_sum_coeffs(const keyT& key) const {
4198 // break key into particles
4199 Key<LDIM> key1, key2;
4200 key.break_apart(key1,key2);
4201
4202 // bool printme=(int(key.translation()[0])==int(std::pow(key.level(),2)/2)) and
4203 // (int(key.translation()[1])==int(std::pow(key.level(),2)/2)) and
4204 // (int(key.translation()[2])==int(std::pow(key.level(),2)/2));
4205
4206// printme=false;
4207
4208 // get/make all coefficients
4209 const coeffT coeff_ket = (iaket.get_impl()) ? iaket.coeff(key)
4210 : outer(iap1.coeff(key1),iap2.coeff(key2),result->get_tensor_args());
4211 const coeffT cpot1 = (have_v1()) ? iav1.coeff(key1) : coeffT();
4212 const coeffT cpot2 = (have_v2()) ? iav2.coeff(key2) : coeffT();
4213 const tensorT ceri = (have_eri()) ? eri_coeffs(key) : tensorT();
4214
4215 // compute first part of the total error
4216 double refine_error=compute_error_from_inaccurate_refinement(key,ceri);
4217 double error=refine_error;
4218
4219 // prepare the multiplication
4220 pointwise_multiplier<LDIM> pm(key,coeff_ket);
4221
4222 // perform the multiplication, compute tnorm part of the total error
4223 coeffT cresult(result->cdata.vk,result->get_tensor_args());
4224 if (have_v1()) {
4225 cresult+=pm(key,cpot1.get_tensor(),1);
4226 error+=pm.error;
4227 }
4228 if (have_v2()) {
4229 cresult+=pm(key,cpot2.get_tensor(),2);
4230 error+=pm.error;
4231 }
4232
4233 if (have_eri()) {
4234 tensorT result1=cresult.full_tensor_copy();
4235 result1+=pm(key,copy(ceri(result->cdata.s0)));
4236 cresult=coeffT(result1,result->get_tensor_args());
4237 error+=pm.error;
4238 } else {
4240 }
4241 if ((not have_v1()) and (not have_v2()) and (not have_eri())) {
4242 cresult=coeff_ket;
4243 }
4244
4245 return std::make_pair(cresult,error);
4246 }
4247
4248 this_type make_child(const keyT& child) const {
4249
4250 // break key into particles
4251 Key<LDIM> key1, key2;
4252 child.break_apart(key1,key2);
4253
4254 return this_type(result,leaf_op,iaket.make_child(child),
4255 iap1.make_child(key1),iap2.make_child(key2),
4256 iav1.make_child(key1),iav2.make_child(key2),eri);
4257 }
4258
4260 Future<ctT> iaket1=iaket.activate();
4261 Future<ctL> iap11=iap1.activate();
4262 Future<ctL> iap21=iap2.activate();
4263 Future<ctL> iav11=iav1.activate();
4264 Future<ctL> iav21=iav2.activate();
4265 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
4266 &this_type::forward_ctor),result,leaf_op,
4267 iaket1,iap11,iap21,iav11,iav21,eri);
4268 }
4269
4270 this_type forward_ctor(implT* result1, const opT& leaf_op, const ctT& iaket1,
4271 const ctL& iap11, const ctL& iap21, const ctL& iav11, const ctL& iav21,
4272 const implT* eri1) {
4273 return this_type(result1,leaf_op,iaket1,iap11,iap21,iav11,iav21,eri1);
4274 }
4275
4276 /// serialize this (needed for use in recursive_op)
4277 template <typename Archive> void serialize(const Archive& ar) {
4278 ar & iaket & eri & result & leaf_op & iap1 & iap2 & iav1 & iav2;
4279 }
4280 };
4281
4282 /// assemble the function V*phi using V and phi given from the functor
4283
4284 /// this function must have been constructed using the CompositeFunctorInterface.
4285 /// The interface provides one- and two-electron potentials, and the ket, which are
4286 /// assembled to give V*phi.
4287 /// @param[in] leaf_op operator to decide if a given node is a leaf node
4288 /// @param[in] fence global fence
4289 template<typename opT>
4290 void make_Vphi(const opT& leaf_op, const bool fence=true) {
4291
4292 constexpr size_t LDIM=NDIM/2;
4293 MADNESS_CHECK_THROW(NDIM==LDIM*2,"make_Vphi only works for even dimensions");
4294
4295
4296 // keep the functor available, but remove it from the result
4297 // result will return false upon is_on_demand(), which is necessary for the
4298 // CoeffTracker to track the parent coeffs correctly for error_leaf_op
4299 std::shared_ptr< FunctionFunctorInterface<T,NDIM> > func2(this->get_functor());
4300 this->unset_functor();
4301
4303 dynamic_cast<CompositeFunctorInterface<T,NDIM,LDIM>* >(&(*func2));
4305
4306 // make sure everything is in place if no fence is requested
4307 if (fence) func->make_redundant(true); // no-op if already redundant
4308 MADNESS_CHECK_THROW(func->check_redundant(),"make_Vphi requires redundant functions");
4309
4310 // loop over all functions in the functor (either ket or particles)
4311 for (auto& ket : func->impl_ket_vector) {
4312 FunctionImpl<T,NDIM>* eri=func->impl_eri.get();
4313 FunctionImpl<T,LDIM>* v1=func->impl_m1.get();
4314 FunctionImpl<T,LDIM>* v2=func->impl_m2.get();
4315 FunctionImpl<T,LDIM>* p1=nullptr;
4316 FunctionImpl<T,LDIM>* p2=nullptr;
4317 make_Vphi_only(leaf_op,ket.get(),v1,v2,p1,p2,eri,false);
4318 }
4319
4320 for (std::size_t i=0; i<func->impl_p1_vector.size(); ++i) {
4321 FunctionImpl<T,NDIM>* ket=nullptr;
4322 FunctionImpl<T,NDIM>* eri=func->impl_eri.get();
4323 FunctionImpl<T,LDIM>* v1=func->impl_m1.get();
4324 FunctionImpl<T,LDIM>* v2=func->impl_m2.get();
4325 FunctionImpl<T,LDIM>* p1=func->impl_p1_vector[i].get();
4326 FunctionImpl<T,LDIM>* p2=func->impl_p2_vector[i].get();
4327 make_Vphi_only(leaf_op,ket,v1,v2,p1,p2,eri,false);
4328 }
4329
4330 // some post-processing:
4331 // - FunctionNode::accumulate() uses buffer -> add the buffer contents to the actual coefficients
4332 // - the operation constructs sum coefficients on all scales -> sum down to get a well-defined tree-state
4333 if (fence) {
4334 world.gop.fence();
4336 sum_down(true);
4338 }
4339
4340
4341 }
4342
4343 /// assemble the function V*phi using V and phi given from the functor
4344
4345 /// this function must have been constructed using the CompositeFunctorInterface.
4346 /// The interface provides one- and two-electron potentials, and the ket, which are
4347 /// assembled to give V*phi.
4348 /// @param[in] leaf_op operator to decide if a given node is a leaf node
4349 /// @param[in] fence global fence
4350 template<typename opT, std::size_t LDIM>
4355 const bool fence=true) {
4356
4357 // prepare the CoeffTracker
4358 CoeffTracker<T,NDIM> iaket(ket);
4359 CoeffTracker<T,LDIM> iap1(p1);
4360 CoeffTracker<T,LDIM> iap2(p2);
4361 CoeffTracker<T,LDIM> iav1(v1);
4362 CoeffTracker<T,LDIM> iav2(v2);
4363
4364 // the operator making the coefficients
4365 typedef Vphi_op_NS<opT,LDIM> coeff_opT;
4366 coeff_opT coeff_op(this,leaf_op,iaket,iap1,iap2,iav1,iav2,eri);
4367
4368 // this operator simply inserts the coeffs into this' tree
4369 typedef noop<T,NDIM> apply_opT;
4370 apply_opT apply_op;
4371
4372 if (world.rank() == coeffs.owner(cdata.key0)) {
4373 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
4374 coeff_op, apply_op, cdata.key0);
4375 }
4376
4378 if (fence) world.gop.fence();
4379
4380 }
4381
4382 /// Permute the dimensions of f according to map, result on this
4383 void mapdim(const implT& f, const std::vector<long>& map, bool fence);
4384
4385 /// mirror the dimensions of f according to map, result on this
4386 void mirror(const implT& f, const std::vector<long>& mirror, bool fence);
4387
4388 /// map and mirror the translation index and the coefficients, result on this
4389
4390 /// first map the dimensions, the mirror!
4391 /// this = mirror(map(f))
4392 void map_and_mirror(const implT& f, const std::vector<long>& map,
4393 const std::vector<long>& mirror, bool fence);
4394
4395 /// take the average of two functions, similar to: this=0.5*(this+rhs)
4396
4397 /// works in either basis and also in nonstandard form
4398 void average(const implT& rhs);
4399
4400 /// change the tensor type of the coefficients in the FunctionNode
4401
4402 /// @param[in] targs target tensor arguments (threshold and full/low rank)
4403 void change_tensor_type1(const TensorArgs& targs, bool fence);
4404
4405 /// reduce the rank of the coefficients tensors
4406
4407 /// @param[in] targs target tensor arguments (threshold and full/low rank)
4408 void reduce_rank(const double thresh, bool fence);
4409
4410
4411 /// remove all nodes with level higher than n
4412 void chop_at_level(const int n, const bool fence=true);
4413
4414 /// compute norm of s and d coefficients for all nodes
4415 void compute_snorm_and_dnorm(bool fence=true);
4416
4417 /// compute the norm of the wavelet coefficients
4420
4424
4425 bool operator()(typename rangeT::iterator& it) const {
4426 auto& node=it->second;
4427 node.recompute_snorm_and_dnorm(cdata);
4428 return true;
4429 }
4430 };
4431
4432
4433 T eval_cube(Level n, coordT& x, const tensorT& c) const;
4434
4435 /// Transform sum coefficients at level n to sums+differences at level n-1
4436
4437 /// Given scaling function coefficients s[n][l][i] and s[n][l+1][i]
4438 /// return the scaling function and wavelet coefficients at the
4439 /// coarser level. I.e., decompose Vn using Vn = Vn-1 + Wn-1.
4440 /// \code
4441 /// s_i = sum(j) h0_ij*s0_j + h1_ij*s1_j
4442 /// d_i = sum(j) g0_ij*s0_j + g1_ij*s1_j
4443 // \endcode
4444 /// Returns a new tensor and has no side effects. Works for any
4445 /// number of dimensions.
4446 ///
4447 /// No communication involved.
4448 tensorT filter(const tensorT& s) const;
4449
4450 coeffT filter(const coeffT& s) const;
4451
4452 /// Transform sums+differences at level n to sum coefficients at level n+1
4453
4454 /// Given scaling function and wavelet coefficients (s and d)
4455 /// returns the scaling function coefficients at the next finer
4456 /// level. I.e., reconstruct Vn using Vn = Vn-1 + Wn-1.
4457 /// \code
4458 /// s0 = sum(j) h0_ji*s_j + g0_ji*d_j
4459 /// s1 = sum(j) h1_ji*s_j + g1_ji*d_j
4460 /// \endcode
4461 /// Returns a new tensor and has no side effects
4462 ///
4463 /// If (sonly) ... then ss is only the scaling function coeff (and
4464 /// assume the d are zero). Works for any number of dimensions.
4465 ///
4466 /// No communication involved.
4467 tensorT unfilter(const tensorT& s) const;
4468
4469 coeffT unfilter(const coeffT& s) const;
4470
4471 /// downsample the sum coefficients of level n+1 to sum coeffs on level n
4472
4473 /// specialization of the filter method, will yield only the sum coefficients
4474 /// @param[in] key key of level n
4475 /// @param[in] v vector of sum coefficients of level n+1
4476 /// @return sum coefficients on level n in full tensor format
4477 tensorT downsample(const keyT& key, const std::vector< Future<coeffT > >& v) const;
4478
4479 /// upsample the sum coefficients of level 1 to sum coeffs on level n+1
4480
4481 /// specialization of the unfilter method, will transform only the sum coefficients
4482 /// @param[in] key key of level n+1
4483 /// @param[in] coeff sum coefficients of level n (does NOT belong to key!!)
4484 /// @return sum coefficients on level n+1
4485 coeffT upsample(const keyT& key, const coeffT& coeff) const;
4486
4487 /// Projects old function into new basis (only in reconstructed form)
4488 void project(const implT& old, bool fence);
4489
4491 bool operator()(const implT* f, const keyT& key, const nodeT& t) const {
4492 return true;
4493 }
4494 template <typename Archive> void serialize(Archive& ar) {}
4495 };
4496
4497 template <typename opT>
4498 void refine_op(const opT& op, const keyT& key) {
4499 // Must allow for someone already having autorefined the coeffs
4500 // and we get a write accessor just in case they are already executing
4501 typename dcT::accessor acc;
4502 const auto found = coeffs.find(acc,key);
4503 MADNESS_CHECK(found);
4504 nodeT& node = acc->second;
4505 if (node.has_coeff() && key.level() < max_refine_level && op(this, key, node)) {
4506 coeffT d(cdata.v2k,targs);
4507 d(cdata.s0) += copy(node.coeff());
4508 d = unfilter(d);
4509 node.clear_coeff();
4510 node.set_has_children(true);
4511 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
4512 const keyT& child = kit.key();
4513 coeffT ss = copy(d(child_patch(child)));
4515 // coeffs.replace(child,nodeT(ss,-1.0,false).node_to_low_rank());
4516 coeffs.replace(child,nodeT(ss,-1.0,false));
4517 // Note value -1.0 for norm tree to indicate result of refinement
4518 }
4519 }
4520 }
4521
4522 template <typename opT>
4523 void refine_spawn(const opT& op, const keyT& key) {
4524 nodeT& node = coeffs.find(key).get()->second;
4525 if (node.has_children()) {
4526 for (KeyChildIterator<NDIM> kit(key); kit; ++kit)
4527 woT::task(coeffs.owner(kit.key()), &implT:: template refine_spawn<opT>, op, kit.key(), TaskAttributes::hipri());
4528 }
4529 else {
4530 woT::task(coeffs.owner(key), &implT:: template refine_op<opT>, op, key);
4531 }
4532 }
4533
4534 // Refine in real space according to local user-defined criterion
4535 template <typename opT>
4536 void refine(const opT& op, bool fence) {
4537 if (world.rank() == coeffs.owner(cdata.key0))
4538 woT::task(coeffs.owner(cdata.key0), &implT:: template refine_spawn<opT>, op, cdata.key0, TaskAttributes::hipri());
4539 if (fence)
4540 world.gop.fence();
4541 }
4542
4543 bool exists_and_has_children(const keyT& key) const;
4544
4545 bool exists_and_is_leaf(const keyT& key) const;
4546
4547
4548 void broaden_op(const keyT& key, const std::vector< Future <bool> >& v);
4549
4550 // For each local node sets value of norm tree, snorm and dnorm to 0.0
4551 void zero_norm_tree();
4552
4553 // Broaden tree
4554 void broaden(const array_of_bools<NDIM>& is_periodic, bool fence);
4555
4556 /// sum all the contributions from all scales after applying an operator in mod-NS form
4557 void trickle_down(bool fence);
4558
4559 /// sum all the contributions from all scales after applying an operator in mod-NS form
4560
4561 /// cf reconstruct_op
4562 void trickle_down_op(const keyT& key, const coeffT& s);
4563
4564 /// reconstruct this tree -- respects fence
4565 void reconstruct(bool fence);
4566
4567 void change_tree_state(const TreeState finalstate, bool fence=true);
4568
4569 // Invoked on node where key is local
4570 // void reconstruct_op(const keyT& key, const tensorT& s);
4571 void reconstruct_op(const keyT& key, const coeffT& s, const bool accumulate_NS=true);
4572
4573 /// compress the wave function
4574
4575 /// after application there will be sum coefficients at the root level,
4576 /// and difference coefficients at all other levels; furthermore:
4577 /// @param[in] nonstandard keep sum coeffs at all other levels, except leaves
4578 /// @param[in] keepleaves keep sum coeffs (but no diff coeffs) at leaves
4579 /// @param[in] redundant keep only sum coeffs at all levels, discard difference coeffs
4580// void compress(bool nonstandard, bool keepleaves, bool redundant, bool fence);
4581 void compress(const TreeState newstate, bool fence);
4582
4583 /// Invoked on node where key is local
4584 Future<std::pair<coeffT,double> > compress_spawn(const keyT& key, bool nonstandard, bool keepleaves,
4585 bool redundant1);
4586
4587 private:
4588 /// convert this to redundant, i.e. have sum coefficients on all levels
4589 void make_redundant(const bool fence);
4590 public:
4591
4592 /// convert this from redundant to standard reconstructed form
4593 void undo_redundant(const bool fence);
4594
4595 void remove_internal_coefficients(const bool fence);
4596 void remove_leaf_coefficients(const bool fence);
4597
4598
4599 /// compute for each FunctionNode the norm of the function inside that node
4600 void norm_tree(bool fence);
4601
4602 double norm_tree_op(const keyT& key, const std::vector< Future<double> >& v);
4603
4605
4606 /// truncate using a tree in reconstructed form
4607
4608 /// must be invoked where key is local
4609 Future<coeffT> truncate_reconstructed_spawn(const keyT& key, const double tol);
4610
4611 /// given the sum coefficients of all children, truncate or not
4612
4613 /// @return new sum coefficients (empty if internal, not empty, if new leaf); might delete its children
4614 coeffT truncate_reconstructed_op(const keyT& key, const std::vector< Future<coeffT > >& v, const double tol);
4615
4616 /// calculate the wavelet coefficients using the sum coefficients of all child nodes
4617
4618 /// also compute the norm tree for all nodes
4619 /// @param[in] key this's key
4620 /// @param[in] v sum coefficients of the child nodes
4621 /// @param[in] nonstandard keep the sum coefficients with the wavelet coefficients
4622 /// @param[in] redundant keep only the sum coefficients, discard the wavelet coefficients
4623 /// @return the sum coefficients
4624 std::pair<coeffT,double> compress_op(const keyT& key, const std::vector< Future<std::pair<coeffT,double>> >& v, bool nonstandard);
4625
4626
4627 /// similar to compress_op, but insert only the sum coefficients in the tree
4628
4629 /// also compute the norm tree for all nodes
4630 /// @param[in] key this's key
4631 /// @param[in] v sum coefficients of the child nodes
4632 /// @return the sum coefficients
4633 std::pair<coeffT,double> make_redundant_op(const keyT& key,const std::vector< Future<std::pair<coeffT,double> > >& v);
4634
4635 /// Changes non-standard compressed form to standard compressed form
4636 void standard(bool fence);
4637
4638 /// Changes non-standard compressed form to standard compressed form
4641
4642 // threshold for rank reduction / SVD truncation
4644
4645 // constructor takes target precision
4646 do_standard() = default;
4648
4649 //
4650 bool operator()(typename rangeT::iterator& it) const {
4651
4652 const keyT& key = it->first;
4653 nodeT& node = it->second;
4654 if (key.level()> 0 && node.has_coeff()) {
4655 if (node.has_children()) {
4656 // Zero out scaling coeffs
4657 MADNESS_ASSERT(node.coeff().dim(0)==2*impl->get_k());
4658 node.coeff()(impl->cdata.s0)=0.0;
4659 node.reduceRank(impl->targs.thresh);
4660 } else {
4661 // Deleting both scaling and wavelet coeffs
4662 node.clear_coeff();
4663 }
4664 }
4665 return true;
4666 }
4667 template <typename Archive> void serialize(const Archive& ar) {
4668 MADNESS_EXCEPTION("no serialization of do_standard",1);
4669 }
4670 };
4671
4672
4673 /// laziness
4674 template<size_t OPDIM>
4675 struct do_op_args {
4678 double tol, fac, cnorm;
4679
4680 do_op_args() = default;
4681 do_op_args(const Key<OPDIM>& key, const Key<OPDIM>& d, const keyT& dest, double tol, double fac, double cnorm)
4682 : key(key), d(d), dest(dest), tol(tol), fac(fac), cnorm(cnorm) {}
4683 template <class Archive>
4684 void serialize(Archive& ar) {
4685 ar & archive::wrap_opaque(this,1);
4686 }
4687 };
4688
4689 /// for fine-grain parallelism: call the apply method of an operator in a separate task
4690
4691 /// @param[in] op the operator working on our function
4692 /// @param[in] c full rank tensor holding the NS coefficients
4693 /// @param[in] args laziness holding norm of the coefficients, displacement, destination, ..
4694 template <typename opT, typename R, size_t OPDIM>
4695 void do_apply_kernel(const opT* op, const Tensor<R>& c, const do_op_args<OPDIM>& args) {
4696
4697 tensorT result = op->apply(args.key, args.d, c, args.tol/args.fac/args.cnorm);
4698
4699 // Screen here to reduce communication cost of negligible data
4700 // and also to ensure we don't needlessly widen the tree when
4701 // applying the operator
4702 if (result.normf()> 0.3*args.tol/args.fac) {
4704 //woT::task(world.rank(),&implT::accumulate_timer,time,TaskAttributes::hipri());
4705 // UGLY BUT ADDED THE OPTIMIZATION BACK IN HERE EXPLICITLY/
4706 if (args.dest == world.rank()) {
4707 coeffs.send(args.dest, &nodeT::accumulate, result, coeffs, args.dest);
4708 }
4709 else {
4711 }
4712 }
4713 }
4714
4715 /// same as do_apply_kernel, but use full rank tensors as input and low rank tensors as output
4716
4717 /// @param[in] op the operator working on our function
4718 /// @param[in] c full rank tensor holding the NS coefficients
4719 /// @param[in] args laziness holding norm of the coefficients, displacement, destination, ..
4720 /// @param[in] apply_targs TensorArgs with tightened threshold for accumulation
4721 /// @return nothing, but accumulate the result tensor into the destination node
4722 template <typename opT, typename R, size_t OPDIM>
4723 double do_apply_kernel2(const opT* op, const Tensor<R>& c, const do_op_args<OPDIM>& args,
4724 const TensorArgs& apply_targs) {
4725
4726 tensorT result_full = op->apply(args.key, args.d, c, args.tol/args.fac/args.cnorm);
4727 const double norm=result_full.normf();
4728
4729 // Screen here to reduce communication cost of negligible data
4730 // and also to ensure we don't needlessly widen the tree when
4731 // applying the operator
4732 // OPTIMIZATION NEEDED HERE ... CHANGING THIS TO TASK NOT SEND REMOVED
4733 // BUILTIN OPTIMIZATION TO SHORTCIRCUIT MSG IF DATA IS LOCAL
4734 if (norm > 0.3*args.tol/args.fac) {
4735
4736 small++;
4737 //double cpu0=cpu_time();
4738 coeffT result=coeffT(result_full,apply_targs);
4739 MADNESS_ASSERT(result.is_full_tensor() or result.is_svd_tensor());
4740 //double cpu1=cpu_time();
4741 //timer_lr_result.accumulate(cpu1-cpu0);
4742
4743 coeffs.task(args.dest, &nodeT::accumulate, result, coeffs, args.dest, apply_targs,
4745
4746 //woT::task(world.rank(),&implT::accumulate_timer,time,TaskAttributes::hipri());
4747 }
4748 return norm;
4749 }
4750
4751
4752
4753 /// same as do_apply_kernel2, but use low rank tensors as input and low rank tensors as output
4754
4755 /// @param[in] op the operator working on our function
4756 /// @param[in] coeff full rank tensor holding the NS coefficients
4757 /// @param[in] args laziness holding norm of the coefficients, displacement, destination, ..
4758 /// @param[in] apply_targs TensorArgs with tightened threshold for accumulation
4759 /// @return nothing, but accumulate the result tensor into the destination node
4760 template <typename opT, typename R, size_t OPDIM>
4761 double do_apply_kernel3(const opT* op, const GenTensor<R>& coeff, const do_op_args<OPDIM>& args,
4762 const TensorArgs& apply_targs) {
4763
4764 coeffT result;
4765 if (2*OPDIM==NDIM) result= op->apply2_lowdim(args.key, args.d, coeff,
4766 args.tol/args.fac/args.cnorm, args.tol/args.fac);
4767 if (OPDIM==NDIM) result = op->apply2(args.key, args.d, coeff,
4768 args.tol/args.fac/args.cnorm, args.tol/args.fac);
4769
4770 const double result_norm=result.svd_normf();
4771
4772 if (result_norm> 0.3*args.tol/args.fac) {
4773 small++;
4774
4775 double cpu0=cpu_time();
4776 if (not result.is_of_tensortype(targs.tt)) result=result.convert(targs);
4777 double cpu1=cpu_time();
4778 timer_lr_result.accumulate(cpu1-cpu0);
4779
4780 // accumulate also expects result in SVD form
4781 coeffs.task(args.dest, &nodeT::accumulate, result, coeffs, args.dest, apply_targs,
4783// woT::task(world.rank(),&implT::accumulate_timer,time,TaskAttributes::hipri());
4784
4785 }
4786 return result_norm;
4787
4788 }
4789
4790 // volume of n-dimensional sphere of radius R
4791 double vol_nsphere(int n, double R) {
4792 return std::pow(madness::constants::pi,n*0.5)*std::pow(R,n)/std::tgamma(1+0.5*n);
4793 }
4794
4795
4796 /// apply an operator on the coeffs c (at node key)
4797
4798 /// the result is accumulated inplace to this's tree at various FunctionNodes
4799 /// @param[in] op the operator to act on the source function
4800 /// @param[in] key key of the source FunctionNode of f which is processed
4801 /// @param[in] c coeffs of the FunctionNode of f which is processed
4802 template <typename opT, typename R>
4803 void do_apply(const opT* op, const keyT& key, const Tensor<R>& c) {
4805
4806 // working assumption here WAS that the operator is
4807 // isotropic and monotonically decreasing with distance
4808 // ... however, now we are using derivative Gaussian
4809 // expansions (and also non-cubic boxes) isotropic is
4810 // violated. While not strictly monotonically decreasing,
4811 // the derivative gaussian is still such that once it
4812 // becomes negligible we are in the asymptotic region.
4813
4814 typedef typename opT::keyT opkeyT;
4815 constexpr auto opdim = opT::opdim;
4816 const opkeyT source = op->get_source_key(key);
4817
4818 // Tuning here is based on observation that with
4819 // sufficiently high-order wavelet relative to the
4820 // precision, that only nearest neighbor boxes contribute,
4821 // whereas for low-order wavelets more neighbors will
4822 // contribute. Sufficiently high is picked as
4823 // k>=2-log10(eps) which is our empirical rule for
4824 // efficiency/accuracy and code instrumentation has
4825 // previously indicated that (in 3D) just unit
4826 // displacements are invoked. The error decays as R^-(k+1),
4827 // and the number of boxes increases as R^d.
4828 //
4829 // Fac is the expected number of contributions to a given
4830 // box, so the error permitted per contribution will be
4831 // tol/fac
4832
4833 // radius of shell (nearest neighbor is diameter of 3 boxes, so radius=1.5)
4834 double radius = 1.5 + 0.33 * std::max(0.0, 2 - std::log10(thresh) -
4835 k); // 0.33 was 0.5
4836 //double radius = 2.5;
4837 double fac = vol_nsphere(NDIM, radius);
4838 // previously fac=10.0 selected empirically constrained by qmprop
4839
4840 double cnorm = c.normf();
4841
4842 // BC handling:
4843 // - if operator is lattice-summed then treat this as nonperiodic (i.e. tell neighbor() to stay in simulation cell)
4844 // - if operator is NOT lattice-summed then obey BC (i.e. tell neighbor() to go outside the simulation cell along periodic dimensions)
4845 // - BUT user can force operator to treat its arguments as non-periodic (`op.set_domain_periodicity({true,true,true})`) so ... which dimensions of this function are treated as periodic by op?
4846 const array_of_bools<NDIM> this_is_treated_by_op_as_periodic =
4847 (op->particle() == 1)
4848 ? array_of_bools<NDIM>{false}.or_front(
4849 op->domain_is_periodic())
4850 : array_of_bools<NDIM>{false}.or_back(
4851 op->domain_is_periodic());
4852
4853 const auto default_distance_squared = [&](const auto &displacement)
4854 -> std::uint64_t {
4855 return displacement.distsq_bc(op->lattice_summed());
4856 };
4857 const auto default_skip_predicate = [&](const auto &displacement)
4858 -> bool {
4859 return false;
4860 };
4861 const auto for_each = [&](const auto &displacements,
4862 const auto &distance_squared,
4863 const auto &skip_predicate) -> std::optional<std::uint64_t> {
4864
4865 // used to screen estimated and actual contributions
4866 //const double tol = truncate_tol(thresh, key);
4867 //const double tol = 0.1*truncate_tol(thresh, key);
4868 const double tol = truncate_tol(thresh, key);
4869
4870 // assume isotropic decaying kernel, screen in shell-wise fashion by
4871 // monitoring the decay of magnitude of contribution norms with the
4872 // distance ... as soon as we find a shell of displacements at least
4873 // one of each in simulation domain (see neighbor()) and
4874 // all in-domain shells produce negligible contributions, stop.
4875 // a displacement is negligible if ||op|| * ||c|| > tol / fac
4876 // where fac takes into account
4877 int nvalid = 1; // Counts #valid at each distance
4878 int nused = 1; // Counts #used at each distance
4879 std::optional<std::uint64_t> distsq;
4880
4881 // displacements to the kernel range boundary are typically same magnitude (modulo variation estimate the norm of the resulting contributions and skip all if one is too small
4882 // this
4883 if constexpr (std::is_same_v<std::decay_t<decltype(displacements)>,BoxSurfaceDisplacementRange<opdim>>) {
4884 const auto &probing_displacement =
4885 displacements.probing_displacement();
4886 const double opnorm =
4887 op->norm(key.level(), probing_displacement, source);
4888 if (cnorm * opnorm <= tol / fac) {
4889 return {};
4890 }
4891 }
4892
4893 const auto disp_end = displacements.end();
4894 for (auto disp_it = displacements.begin(); disp_it != disp_end;
4895 ++disp_it) {
4896 const auto &displacement = *disp_it;
4897 if (skip_predicate(displacement)) continue;
4898
4899 keyT d;
4900 Key<NDIM - opdim> nullkey(key.level());
4901 MADNESS_ASSERT(op->particle() == 1 || op->particle() == 2);
4902 if (op->particle() == 1)
4903 d = displacement.merge_with(nullkey);
4904 else
4905 d = nullkey.merge_with(displacement);
4906
4907 // shell-wise screening, assumes displacements are grouped into shells sorted so that operator decays with shell index N.B. lattice-summed decaying kernel is periodic (i.e. does decay w.r.t. r), so loop over shells of displacements sorted by distances modulated by periodicity (Key::distsq_bc)
4908 const uint64_t dsq = distance_squared(displacement);
4909 if (!distsq ||
4910 dsq != *distsq) { // Moved to next shell of neighbors
4911 if (nvalid > 0 && nused == 0 && dsq > 1) {
4912 // Have at least done the input box and all first
4913 // nearest neighbors, and none of the last set
4914 // of neighbors made significant contributions. Thus,
4915 // assuming monotonic decrease, we are done.
4916 break;
4917 }
4918 nused = 0;
4919 nvalid = 0;
4920 distsq = dsq;
4921 }
4922
4923 keyT dest = neighbor(key, d, this_is_treated_by_op_as_periodic);
4924 if (dest.is_valid()) {
4925 nvalid++;
4926 const double opnorm = op->norm(key.level(), displacement, source);
4927
4928 if (cnorm * opnorm > tol / fac) {
4929 tensorT result =
4930 op->apply(source, displacement, c, tol / fac / cnorm);
4931 if (result.normf() > 0.3 * tol / fac) {
4932 if (coeffs.is_local(dest))
4933 coeffs.send(dest, &nodeT::accumulate2, result, coeffs,
4934 dest);
4935 else
4936 coeffs.task(dest, &nodeT::accumulate2, result, coeffs,
4937 dest);
4938 nused++;
4939 }
4940 }
4941 }
4942 }
4943
4944 return distsq;
4945 };
4946
4947 // process "standard" displacements, screening assumes monotonic decay of the kernel
4948 // list of displacements sorted in order of increasing distance
4949 // N.B. if op is lattice-summed use periodic displacements, else use
4950 // non-periodic even if op treats any modes of this as periodic
4951 const std::vector<opkeyT> &disp = op->get_disp(key.level());
4952 const auto max_distsq_reached = for_each(disp, default_distance_squared, default_skip_predicate);
4953
4954 // for range-restricted kernels displacements to the boundary of the kernel range also need to be included
4955 // N.B. hard range restriction will result in slow decay of operator matrix elements for the displacements
4956 // to the range boundary, should use soft restriction or sacrifice precision
4957 if (op->range_restricted() && key.level() >= 1) {
4958
4959 std::array<std::optional<std::int64_t>, opdim> box_radius;
4960 std::array<std::optional<std::int64_t>, opdim> surface_thickness;
4961 auto &range = op->get_range();
4962 for (int d = 0; d != opdim; ++d) {
4963 if (range[d]) {
4964 box_radius[d] = range[d].N();
4965 surface_thickness[d] = range[d].finite_soft() ? 1 : 0;
4966 }
4967 }
4968
4970 // skip surface displacements that take us outside of the domain and/or were included in regular displacements
4971 // N.B. for lattice-summed axes the "filter" also maps the displacement back into the simulation cell
4972 if (max_distsq_reached)
4973 filter = BoxSurfaceDisplacementFilter<opdim>(/* domain_is_infinite= */ op->domain_is_periodic(), /* domain_is_periodic= */ op->lattice_summed(), range, default_distance_squared, *max_distsq_reached);
4974
4975 // this range iterates over the entire surface layer(s), and provides a probing displacement that can be used to screen out the entire box
4976 auto opkey = op->particle() == 1 ? key.template extract_front<opdim>() : key.template extract_front<opdim>();
4978 range_boundary_face_displacements(opkey, box_radius,
4979 surface_thickness,
4980 op->lattice_summed(), // along lattice-summed axes treat the box as periodic, make displacements to one side of the box
4981 filter);
4982 for_each(
4983 range_boundary_face_displacements,
4984 // surface displacements are not screened, all are included
4985 [](const auto &displacement) -> std::uint64_t { return 0; },
4986 default_skip_predicate);
4987 }
4988 }
4989
4990
4991 /// apply an operator on f to return this
4992 template <typename opT, typename R>
4993 void apply(opT& op, const FunctionImpl<R,NDIM>& f, bool fence) {
4995 MADNESS_ASSERT(!op.modified());
4996 typename dcT::const_iterator end = f.coeffs.end();
4997 for (typename dcT::const_iterator it=f.coeffs.begin(); it!=end; ++it) {
4998 // looping through all the coefficients in the source
4999 const keyT& key = it->first;
5000 const FunctionNode<R,NDIM>& node = it->second;
5001 if (node.has_coeff()) {
5002 if (node.coeff().dim(0) != k /* i.e. not a leaf */ || op.doleaves) {
5004// woT::task(p, &implT:: template do_apply<opT,R>, &op, key, node.coeff()); //.full_tensor_copy() ????? why copy ????
5005 woT::task(p, &implT:: template do_apply<opT,R>, &op, key, node.coeff().reconstruct_tensor());
5006 }
5007 }
5008 }
5009 if (fence)
5010 world.gop.fence();
5011
5013// this->compressed=true;
5014// this->nonstandard=true;
5015// this->redundant=false;
5016
5017 }
5018
5019
5020
5021 /// apply an operator on the coeffs c (at node key)
5022
5023 /// invoked by result; the result is accumulated inplace to this's tree at various FunctionNodes
5024 /// @param[in] op the operator to act on the source function
5025 /// @param[in] key key of the source FunctionNode of f which is processed (see "source")
5026 /// @param[in] coeff coeffs of FunctionNode being processed
5027 /// @param[in] do_kernel true: do the 0-disp only; false: do everything but the kernel
5028 /// @return max norm, and will modify or include new nodes in this' tree
5029 template <typename opT, typename R>
5030 double do_apply_directed_screening(const opT* op, const keyT& key, const coeffT& coeff,
5031 const bool& do_kernel) {
5033 // insert timer here
5034 typedef typename opT::keyT opkeyT;
5035
5036 // screening: contains all displacement keys that had small result norms
5037 std::list<opkeyT> blacklist;
5038
5039 constexpr auto opdim=opT::opdim;
5040 Key<NDIM-opdim> nullkey(key.level());
5041
5042 // source is that part of key that corresponds to those dimensions being processed
5043 const opkeyT source=op->get_source_key(key);
5044
5045 const double tol = truncate_tol(thresh, key);
5046
5047 // fac is the root of the number of contributing neighbors (1st shell)
5048 double fac=std::pow(3,NDIM*0.5);
5049 double cnorm = coeff.normf();
5050
5051 // for accumulation: keep slightly tighter TensorArgs
5052 TensorArgs apply_targs(targs);
5053 apply_targs.thresh=tol/fac*0.03;
5054
5055 double maxnorm=0.0;
5056
5057 // for the kernel it may be more efficient to do the convolution in full rank
5058 tensorT coeff_full;
5059 // for partial application (exchange operator) it's more efficient to
5060 // do SVD tensors instead of tensortrains, because addition in apply
5061 // can be done in full form for the specific particle
5062 coeffT coeff_SVD=coeff.convert(TensorArgs(-1.0,TT_2D));
5063#ifdef HAVE_GENTENSOR
5064 coeff_SVD.get_svdtensor().orthonormalize(tol*GenTensor<T>::fac_reduce());
5065#endif
5066
5067 // list of displacements sorted in order of increasing distance
5068 // N.B. if op is lattice-summed gives periodic displacements, else uses
5069 // non-periodic even if op treats any modes of this as periodic
5070 const std::vector<opkeyT>& disp = Displacements<opdim>().get_disp(key.level(), op->lattice_summed());
5071
5072 for (typename std::vector<opkeyT>::const_iterator it=disp.begin(); it != disp.end(); ++it) {
5073 const opkeyT& d = *it;
5074
5075 const int shell=d.distsq_bc(op->lattice_summed());
5076 if (do_kernel and (shell>0)) break;
5077 if ((not do_kernel) and (shell==0)) continue;
5078
5079 keyT disp1;
5080 if (op->particle()==1) disp1=it->merge_with(nullkey);
5081 else if (op->particle()==2) disp1=nullkey.merge_with(*it);
5082 else {
5083 MADNESS_EXCEPTION("confused particle in operator??",1);
5084 }
5085
5086 keyT dest = neighbor_in_volume(key, disp1);
5087
5088 if (not dest.is_valid()) continue;
5089
5090 // directed screening
5091 // working assumption here is that the operator is isotropic and
5092 // monotonically decreasing with distance
5093 bool screened=false;
5094 typename std::list<opkeyT>::const_iterator it2;
5095 for (it2=blacklist.begin(); it2!=blacklist.end(); it2++) {
5096 if (d.is_farther_out_than(*it2)) {
5097 screened=true;
5098 break;
5099 }
5100 }
5101 if (not screened) {
5102
5103 double opnorm = op->norm(key.level(), d, source);
5104 double norm=0.0;
5105
5106 if (cnorm*opnorm> tol/fac) {
5107
5108 double cost_ratio=op->estimate_costs(source, d, coeff_SVD, tol/fac/cnorm, tol/fac);
5109 // cost_ratio=1.5; // force low rank
5110 // cost_ratio=0.5; // force full rank
5111
5112 if (cost_ratio>0.0) {
5113
5114 do_op_args<opdim> args(source, d, dest, tol, fac, cnorm);
5115 norm=0.0;
5116 if (cost_ratio<1.0) {
5117 if (not coeff_full.has_data()) coeff_full=coeff.full_tensor_copy();
5118 norm=do_apply_kernel2(op, coeff_full,args,apply_targs);
5119 } else {
5120 if (2*opdim==NDIM) { // apply operator on one particle only
5121 norm=do_apply_kernel3(op,coeff_SVD,args,apply_targs);
5122 } else {
5123 norm=do_apply_kernel3(op,coeff,args,apply_targs);
5124 }
5125 }
5126 maxnorm=std::max(norm,maxnorm);
5127 }
5128
5129 } else if (shell >= 12) {
5130 break; // Assumes monotonic decay beyond nearest neighbor
5131 }
5132 if (norm<0.3*tol/fac) blacklist.push_back(d);
5133 }
5134 }
5135 return maxnorm;
5136 }
5137
5138
5139 /// similar to apply, but for low rank coeffs
5140 template <typename opT, typename R>
5141 void apply_source_driven(opT& op, const FunctionImpl<R,NDIM>& f, bool fence) {
5143
5144 MADNESS_ASSERT(not op.modified());
5145 // looping through all the coefficients of the source f
5146 typename dcT::const_iterator end = f.get_coeffs().end();
5147 for (typename dcT::const_iterator it=f.get_coeffs().begin(); it!=end; ++it) {
5148
5149 const keyT& key = it->first;
5150 const coeffT& coeff = it->second.coeff();
5151
5152 if (coeff.has_data() and (coeff.rank()!=0)) {
5154 woT::task(p, &implT:: template do_apply_directed_screening<opT,R>, &op, key, coeff, true);
5155 woT::task(p, &implT:: template do_apply_directed_screening<opT,R>, &op, key, coeff, false);
5156 }
5157 }
5158 if (fence) world.gop.fence();
5160 }
5161
5162 /// after apply we need to do some cleanup;
5163
5164 /// forces fence
5165 double finalize_apply();
5166
5167 /// after summing up we need to do some cleanup;
5168
5169 /// forces fence
5170 void finalize_sum();
5171
5172 /// traverse a non-existing tree, make its coeffs and apply an operator
5173
5174 /// invoked by result
5175 /// here we use the fact that the hi-dim NS coefficients on all scales are exactly
5176 /// the outer product of the underlying low-dim functions (also in NS form),
5177 /// so we don't need to construct the full hi-dim tree and then turn it into NS form.
5178 /// @param[in] apply_op the operator acting on the NS tree
5179 /// @param[in] fimpl the funcimpl of the function of particle 1
5180 /// @param[in] gimpl the funcimpl of the function of particle 2
5181 template<typename opT, std::size_t LDIM>
5182 void recursive_apply(opT& apply_op, const FunctionImpl<T,LDIM>* fimpl,
5183 const FunctionImpl<T,LDIM>* gimpl, const bool fence) {
5184
5185 //print("IN RECUR2");
5186 const keyT& key0=cdata.key0;
5187
5188 if (world.rank() == coeffs.owner(key0)) {
5189
5190 CoeffTracker<T,LDIM> ff(fimpl);
5191 CoeffTracker<T,LDIM> gg(gimpl);
5192
5193 typedef recursive_apply_op<opT,LDIM> coeff_opT;
5194 coeff_opT coeff_op(this,ff,gg,&apply_op);
5195
5196 typedef noop<T,NDIM> apply_opT;
5197 apply_opT apply_op;
5198
5200 woT::task(p, &implT:: template forward_traverse<coeff_opT,apply_opT>, coeff_op, apply_op, key0);
5201
5202 }
5203 if (fence) world.gop.fence();
5205 }
5206
5207 /// recursive part of recursive_apply
5208 template<typename opT, std::size_t LDIM>
5210 bool randomize() const {return true;}
5211
5213
5218
5219 // ctor
5223 const opT* apply_op) : result(result), iaf(iaf), iag(iag), apply_op(apply_op)
5224 {
5225 MADNESS_ASSERT(LDIM+LDIM==NDIM);
5226 }
5228 iag(other.iag), apply_op(other.apply_op) {}
5229
5230
5231 /// make the NS-coefficients and send off the application of the operator
5232
5233 /// @return a Future<bool,coeffT>(is_leaf,coeffT())
5234 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
5235
5236 // World& world=result->world;
5237 // break key into particles (these are the child keys, with datum1/2 come the parent keys)
5238 Key<LDIM> key1,key2;
5239 key.break_apart(key1,key2);
5240
5241 // the lo-dim functions should be in full tensor form
5242 const tensorT fcoeff=iaf.coeff(key1).full_tensor();
5243 const tensorT gcoeff=iag.coeff(key2).full_tensor();
5244
5245 // would this be a leaf node? If so, then its sum coeffs have already been
5246 // processed by the parent node's wavelet coeffs. Therefore we won't
5247 // process it any more.
5249 bool is_leaf=leaf_op(key,fcoeff,gcoeff);
5250
5251 if (not is_leaf) {
5252 // new coeffs are simply the hartree/kronecker/outer product --
5253 const std::vector<Slice>& s0=iaf.get_impl()->cdata.s0;
5254 const coeffT coeff = (apply_op->modified())
5255 ? outer(copy(fcoeff(s0)),copy(gcoeff(s0)),result->targs)
5256 : outer(fcoeff,gcoeff,result->targs);
5257
5258 // now send off the application
5259 tensorT coeff_full;
5261 double norm0=result->do_apply_directed_screening<opT,T>(apply_op, key, coeff, true);
5262
5263 result->task(p,&implT:: template do_apply_directed_screening<opT,T>,
5264 apply_op,key,coeff,false);
5265
5266 return finalize(norm0,key,coeff);
5267
5268 } else {
5269 return std::pair<bool,coeffT> (is_leaf,coeffT());
5270 }
5271 }
5272
5273 /// sole purpose is to wait for the kernel norm, wrap it and send it back to caller
5274 std::pair<bool,coeffT> finalize(const double kernel_norm, const keyT& key,
5275 const coeffT& coeff) const {
5276 const double thresh=result->get_thresh()*0.1;
5277 bool is_leaf=(kernel_norm<result->truncate_tol(thresh,key));
5278 if (key.level()<2) is_leaf=false;
5279 return std::pair<bool,coeffT> (is_leaf,coeff);
5280 }
5281
5282
5283 this_type make_child(const keyT& child) const {
5284
5285 // break key into particles
5286 Key<LDIM> key1, key2;
5287 child.break_apart(key1,key2);
5288
5289 return this_type(result,iaf.make_child(key1),iag.make_child(key2),apply_op);
5290 }
5291
5295 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
5296 &this_type::forward_ctor),result,f1,g1,apply_op);
5297 }
5298
5300 const opT* apply_op1) {
5301 return this_type(r,f1,g1,apply_op1);
5302 }
5303
5304 template <typename Archive> void serialize(const Archive& ar) {
5305 ar & result & iaf & iag & apply_op;
5306 }
5307 };
5308
5309 /// traverse an existing tree and apply an operator
5310
5311 /// invoked by result
5312 /// @param[in] apply_op the operator acting on the NS tree
5313 /// @param[in] fimpl the funcimpl of the source function
5314 /// @param[in] rimpl a dummy function for recursive_op to insert data
5315 template<typename opT>
5316 void recursive_apply(opT& apply_op, const implT* fimpl, implT* rimpl, const bool fence) {
5317
5318 print("IN RECUR1");
5319
5320 const keyT& key0=cdata.key0;
5321
5322 if (world.rank() == coeffs.owner(key0)) {
5323
5324 typedef recursive_apply_op2<opT> coeff_opT;
5325 coeff_opT coeff_op(this,fimpl,&apply_op);
5326
5327 typedef noop<T,NDIM> apply_opT;
5328 apply_opT apply_op;
5329
5330 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
5331 coeff_op, apply_op, cdata.key0);
5332
5333 }
5334 if (fence) world.gop.fence();
5336 }
5337
5338 /// recursive part of recursive_apply
5339 template<typename opT>
5341 bool randomize() const {return true;}
5342
5345 typedef std::pair<bool,coeffT> argT;
5346
5347 mutable implT* result;
5348 ctT iaf; /// need this for randomization
5349 const opT* apply_op;
5350
5351 // ctor
5355
5357 iaf(other.iaf), apply_op(other.apply_op) {}
5358
5359
5360 /// send off the application of the operator
5361
5362 /// the first (core) neighbor (ie. the box itself) is processed
5363 /// immediately, all other ones are shoved into the taskq
5364 /// @return a pair<bool,coeffT>(is_leaf,coeffT())
5365 argT operator()(const Key<NDIM>& key) const {
5366
5367 const coeffT& coeff=iaf.coeff();
5368
5369 if (coeff.has_data()) {
5370
5371 // now send off the application for all neighbor boxes
5373 result->task(p,&implT:: template do_apply_directed_screening<opT,T>,
5374 apply_op, key, coeff, false);
5375
5376 // process the core box
5377 double norm0=result->do_apply_directed_screening<opT,T>(apply_op,key,coeff,true);
5378
5379 if (iaf.is_leaf()) return argT(true,coeff);
5380 return finalize(norm0,key,coeff,result);
5381
5382 } else {
5383 const bool is_leaf=true;
5384 return argT(is_leaf,coeffT());
5385 }
5386 }
5387
5388 /// sole purpose is to wait for the kernel norm, wrap it and send it back to caller
5389 argT finalize(const double kernel_norm, const keyT& key,
5390 const coeffT& coeff, const implT* r) const {
5391 const double thresh=r->get_thresh()*0.1;
5392 bool is_leaf=(kernel_norm<r->truncate_tol(thresh,key));
5393 if (key.level()<2) is_leaf=false;
5394 return argT(is_leaf,coeff);
5395 }
5396
5397
5398 this_type make_child(const keyT& child) const {
5399 return this_type(result,iaf.make_child(child),apply_op);
5400 }
5401
5402 /// retrieve the coefficients (parent coeffs might be remote)
5404 Future<ctT> f1=iaf.activate();
5405
5406// Future<ctL> g1=g.activate();
5407// return h->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
5408// &this_type::forward_ctor),h,f1,g1,particle);
5409
5410 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
5411 &this_type::forward_ctor),result,f1,apply_op);
5412 }
5413
5414 /// taskq-compatible ctor
5415 this_type forward_ctor(implT* result1, const ctT& iaf1, const opT* apply_op1) {
5416 return this_type(result1,iaf1,apply_op1);
5417 }
5418
5419 template <typename Archive> void serialize(const Archive& ar) {
5420 ar & result & iaf & apply_op;
5421 }
5422 };
5423
5424 /// Returns the square of the error norm in the box labeled by key
5425
5426 /// Assumed to be invoked locally but it would be easy to eliminate
5427 /// this assumption
5428 template <typename opT>
5429 double err_box(const keyT& key, const nodeT& node, const opT& func,
5430 int npt, const Tensor<double>& qx, const Tensor<double>& quad_phit,
5431 const Tensor<double>& quad_phiw) const {
5432
5433 std::vector<long> vq(NDIM);
5434 for (std::size_t i=0; i<NDIM; ++i)
5435 vq[i] = npt;
5436 tensorT fval(vq,false), work(vq,false), result(vq,false);
5437
5438 // Compute the "exact" function in this volume at npt points
5439 // where npt is usually this->npt+1.
5440 fcube(key, func, qx, fval);
5441
5442 // Transform into the scaling function basis of order npt
5443 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
5444 fval = fast_transform(fval,quad_phiw,result,work).scale(scale);
5445
5446 // Subtract to get the error ... the original coeffs are in the order k
5447 // basis but we just computed the coeffs in the order npt(=k+1) basis
5448 // so we can either use slices or an iterator macro.
5449 const tensorT coeff = node.coeff().full_tensor_copy();
5450 ITERATOR(coeff,fval(IND)-=coeff(IND););
5451 // flo note: we do want to keep a full tensor here!
5452
5453 // Compute the norm of what remains
5454 double err = fval.normf();
5455 return err*err;
5456 }
5457
5458 template <typename opT>
5460 const implT* impl;
5461 const opT* func;
5462 int npt;
5466 public:
5467 do_err_box() = default;
5468
5472
5475
5476 double operator()(typename dcT::const_iterator& it) const {
5477 const keyT& key = it->first;
5478 const nodeT& node = it->second;
5479 if (node.has_coeff())
5480 return impl->err_box(key, node, *func, npt, qx, quad_phit, quad_phiw);
5481 else
5482 return 0.0;
5483 }
5484
5485 double operator()(double a, double b) const {
5486 return a+b;
5487 }
5488
5489 template <typename Archive>
5490 void serialize(const Archive& ar) {
5491 MADNESS_EXCEPTION("not yet", 1);
5492 }
5493 };
5494
5495 /// Returns the sum of squares of errors from local info ... no comms
5496 template <typename opT>
5497 double errsq_local(const opT& func) const {
5499 // Make quadrature rule of higher order
5500 const int npt = cdata.npt + 1;
5501 Tensor<double> qx, qw, quad_phi, quad_phiw, quad_phit;
5502 FunctionCommonData<T,NDIM>::_init_quadrature(k+1, npt, qx, qw, quad_phi, quad_phiw, quad_phit);
5503
5506 return world.taskq.reduce< double,rangeT,do_err_box<opT> >(range,
5507 do_err_box<opT>(this, &func, npt, qx, quad_phit, quad_phiw));
5508 }
5509
5510 /// Returns \c int(f(x),x) in local volume
5511 T trace_local() const;
5512
5514 double operator()(typename dcT::const_iterator& it) const {
5515 const nodeT& node = it->second;
5516 if (node.has_coeff()) {
5517 double norm = node.coeff().normf();
5518 return norm*norm;
5519 }
5520 else {
5521 return 0.0;
5522 }
5523 }
5524
5525 double operator()(double a, double b) const {
5526 return (a+b);
5527 }
5528
5529 template <typename Archive> void serialize(const Archive& ar) {
5530 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
5531 }
5532 };
5533
5534
5535 /// Returns the square of the local norm ... no comms
5536 double norm2sq_local() const;
5537
5538 /// compute the inner product of this range with other
5539 template<typename R>
5543 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5544
5547 resultT operator()(typename dcT::const_iterator& it) const {
5548
5550 const keyT& key=it->first;
5551 const nodeT& fnode = it->second;
5552 if (fnode.has_coeff()) {
5553 if (other->coeffs.probe(it->first)) {
5554 const FunctionNode<R,NDIM>& gnode = other->coeffs.find(key).get()->second;
5555 if (gnode.has_coeff()) {
5556 if (gnode.coeff().dim(0) != fnode.coeff().dim(0)) {
5557 madness::print("INNER", it->first, gnode.coeff().dim(0),fnode.coeff().dim(0));
5558 MADNESS_EXCEPTION("functions have different k or compress/reconstruct error", 0);
5559 }
5560 if (leaves_only) {
5561 if (gnode.is_leaf() or fnode.is_leaf()) {
5562 sum += fnode.coeff().trace_conj(gnode.coeff());
5563 }
5564 } else {
5565 sum += fnode.coeff().trace_conj(gnode.coeff());
5566 }
5567 }
5568 }
5569 }
5570 return sum;
5571 }
5572
5573 resultT operator()(resultT a, resultT b) const {
5574 return (a+b);
5575 }
5576
5577 template <typename Archive> void serialize(const Archive& ar) {
5578 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
5579 }
5580 };
5581
5582 /// Returns the inner product ASSUMING same distribution
5583
5584 /// handles compressed and redundant form
5585 template <typename R>
5589 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5590
5591 // make sure the states of the trees are consistent
5594 return world.taskq.reduce<resultT,rangeT,do_inner_local<R> >
5596 }
5597
5598
5599 /// compute the inner product of this range with other
5600 template<typename R>
5604 bool leaves_only=true;
5605 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5606
5610 resultT operator()(typename dcT::const_iterator& it) const {
5611
5612 constexpr std::size_t LDIM=std::max(NDIM/2,std::size_t(1));
5613
5614 const keyT& key=it->first;
5615 const nodeT& fnode = it->second;
5616 if (not fnode.has_coeff()) return resultT(0.0); // probably internal nodes
5617
5618 // assuming all boxes (esp the low-dim ones) are local, i.e. the functions are replicated
5619 auto find_valid_parent = [](auto& key, auto& impl, auto&& find_valid_parent) {
5620 MADNESS_CHECK(impl->get_coeffs().owner(key)==impl->world.rank()); // make sure everything is local!
5621 if (impl->get_coeffs().probe(key)) return key;
5622 auto parentkey=key.parent();
5623 return find_valid_parent(parentkey, impl, find_valid_parent);
5624 };
5625
5626 // returns coefficients, empty if no functor present
5627 auto get_coeff = [&find_valid_parent](const auto& key, const auto& v_impl) {
5628 if ((v_impl.size()>0) and v_impl.front().get()) {
5629 auto impl=v_impl.front();
5630
5631// bool have_impl=impl.get();
5632// if (have_impl) {
5633 auto parentkey = find_valid_parent(key, impl, find_valid_parent);
5634 MADNESS_CHECK(impl->get_coeffs().probe(parentkey));
5635 typename decltype(impl->coeffs)::accessor acc;
5636 impl->get_coeffs().find(acc,parentkey);
5637 auto parentcoeff=acc->second.coeff();
5638 auto coeff=impl->parent_to_child(parentcoeff, parentkey, key);
5639 return coeff;
5640 } else {
5641 // get type of vector elements
5642 typedef typename std::decay_t<decltype(v_impl)>::value_type::element_type::typeT S;
5643// typedef typename std::decay_t<decltype(v_impl)>::value_type S;
5644 return GenTensor<S>();
5645// return GenTensor<typename std::decay_t<decltype(*impl)>::typeT>();
5646 }
5647 };
5648
5649 auto make_vector = [](auto& arg) {
5650 return std::vector<std::decay_t<decltype(arg)>>(1,arg);
5651 };
5652
5653
5654 Key<LDIM> key1,key2;
5655 key.break_apart(key1,key2);
5656
5657 auto func=dynamic_cast<CompositeFunctorInterface<R,NDIM,LDIM>* >(ket->functor.get());
5659
5660 MADNESS_CHECK_THROW(func->impl_ket_vector.size()==0 or func->impl_ket_vector.size()==1,
5661 "only one ket function supported in inner_on_demand");
5662 MADNESS_CHECK_THROW(func->impl_p1_vector.size()==0 or func->impl_p1_vector.size()==1,
5663 "only one p1 function supported in inner_on_demand");
5664 MADNESS_CHECK_THROW(func->impl_p2_vector.size()==0 or func->impl_p2_vector.size()==1,
5665 "only one p2 function supported in inner_on_demand");
5666 auto coeff_bra=fnode.coeff();
5667 auto coeff_ket=get_coeff(key,func->impl_ket_vector);
5668 auto coeff_v1=get_coeff(key1,make_vector(func->impl_m1));
5669 auto coeff_v2=get_coeff(key2,make_vector(func->impl_m2));
5670 auto coeff_p1=get_coeff(key1,func->impl_p1_vector);
5671 auto coeff_p2=get_coeff(key2,func->impl_p2_vector);
5672
5673 // construct |ket(1,2)> or |p(1)p(2)> or |p(1)p(2) ket(1,2)>
5674 double error=0.0;
5675 if (coeff_ket.has_data() and coeff_p1.has_data()) {
5676 pointwise_multiplier<LDIM> pm(key,coeff_ket);
5677 coeff_ket=pm(key,outer(coeff_p1,coeff_p2,TensorArgs(TT_FULL,-1.0)).full_tensor());
5678 error+=pm.error;
5679 } else if (coeff_ket.has_data() or coeff_p1.has_data()) {
5680 coeff_ket = (coeff_ket.has_data()) ? coeff_ket : outer(coeff_p1,coeff_p2);
5681 } else { // not ket and no p1p2
5682 MADNESS_EXCEPTION("confused ket/p1p2 in do_inner_local_on_demand",1);
5683 }
5684
5685 // construct (v(1) + v(2)) |ket(1,2)>
5686 coeffT v1v2ket;
5687 if (coeff_v1.has_data()) {
5688 pointwise_multiplier<LDIM> pm(key,coeff_ket);
5689 v1v2ket = pm(key,coeff_v1.full_tensor(), 1);
5690 error+=pm.error;
5691 v1v2ket+= pm(key,coeff_v2.full_tensor(), 2);
5692 error+=pm.error;
5693 } else {
5694 v1v2ket = coeff_ket;
5695 }
5696
5697 resultT result;
5698 if (func->impl_eri) { // project bra*ket onto eri, avoid multiplication with eri
5699 MADNESS_CHECK(func->impl_eri->get_functor()->provides_coeff());
5700 coeffT coeff_eri=func->impl_eri->get_functor()->coeff(key).full_tensor();
5701 pointwise_multiplier<LDIM> pm(key,v1v2ket);
5702 tensorT braket=pm(key,coeff_bra.full_tensor_copy().conj());
5703 error+=pm.error;
5704 if (error>1.e-3) print("error in key",key,error);
5705 result=coeff_eri.full_tensor().trace(braket);
5706
5707 } else { // no eri, project ket onto bra
5708 result=coeff_bra.full_tensor_copy().trace_conj(v1v2ket.full_tensor_copy());
5709 }
5710 return result;
5711 }
5712
5713 resultT operator()(resultT a, resultT b) const {
5714 return (a+b);
5715 }
5716
5717 template <typename Archive> void serialize(const Archive& ar) {
5718 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
5719 }
5720 };
5721
5722 /// Returns the inner product of this with function g constructed on-the-fly
5723
5724 /// the leaf boxes of this' MRA tree defines the inner product
5725 template <typename R>
5726 TENSOR_RESULT_TYPE(T,R) inner_local_on_demand(const FunctionImpl<R,NDIM>& gimpl) const {
5729
5733 do_inner_local_on_demand<R>(this, &gimpl));
5734 }
5735
5736 /// compute the inner product of this range with other
5737 template<typename R>
5741 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5742
5745 resultT operator()(typename dcT::const_iterator& it) const {
5746
5748 const keyT& key=it->first;
5749 const nodeT& fnode = it->second;
5750 if (fnode.has_coeff()) {
5751 if (other->coeffs.probe(it->first)) {
5752 const FunctionNode<R,NDIM>& gnode = other->coeffs.find(key).get()->second;
5753 if (gnode.has_coeff()) {
5754 if (gnode.coeff().dim(0) != fnode.coeff().dim(0)) {
5755 madness::print("DOT", it->first, gnode.coeff().dim(0),fnode.coeff().dim(0));
5756 MADNESS_EXCEPTION("functions have different k or compress/reconstruct error", 0);
5757 }
5758 if (leaves_only) {
5759 if (gnode.is_leaf() or fnode.is_leaf()) {
5760 sum += fnode.coeff().full_tensor().trace(gnode.coeff().full_tensor());
5761 }
5762 } else {
5763 sum += fnode.coeff().full_tensor().trace(gnode.coeff().full_tensor());
5764 }
5765 }
5766 }
5767 }
5768 return sum;
5769 }
5770
5771 resultT operator()(resultT a, resultT b) const {
5772 return (a+b);
5773 }
5774
5775 template <typename Archive> void serialize(const Archive& ar) {
5776 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
5777 }
5778 };
5779
5780 /// Returns the dot product ASSUMING same distribution
5781
5782 /// handles compressed and redundant form
5783 template <typename R>
5787 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5788
5789 // make sure the states of the trees are consistent
5791 bool leaves_only=(this->is_redundant());
5792 return world.taskq.reduce<resultT,rangeT,do_dot_local<R> >
5794 }
5795
5796 /// Type of the entry in the map returned by make_key_vec_map
5797 typedef std::vector< std::pair<int,const coeffT*> > mapvecT;
5798
5799 /// Type of the map returned by make_key_vec_map
5801
5802 /// Adds keys to union of local keys with specified index
5803 void add_keys_to_map(mapT* map, int index) const {
5804 typename dcT::const_iterator end = coeffs.end();
5805 for (typename dcT::const_iterator it=coeffs.begin(); it!=end; ++it) {
5806 typename mapT::accessor acc;
5807 const keyT& key = it->first;
5808 const FunctionNode<T,NDIM>& node = it->second;
5809 if (node.has_coeff()) {
5810 [[maybe_unused]] auto inserted = map->insert(acc,key);
5811 acc->second.push_back(std::make_pair(index,&(node.coeff())));
5812 }
5813 }
5814 }
5815
5816 /// Returns map of union of local keys to vector of indexes of functions containing that key
5817
5818 /// Local concurrency and synchronization only; no communication
5819 static
5820 mapT
5821 make_key_vec_map(const std::vector<const FunctionImpl<T,NDIM>*>& v) {
5822 mapT map(100000);
5823 // This loop must be parallelized
5824 for (unsigned int i=0; i<v.size(); i++) {
5825 //v[i]->add_keys_to_map(&map,i);
5826 v[i]->world.taskq.add(*(v[i]), &FunctionImpl<T,NDIM>::add_keys_to_map, &map, int(i));
5827 }
5828 if (v.size()) v[0]->world.taskq.fence();
5829 return map;
5830 }
5831
5832#if 0
5833// Original
5834 template <typename R>
5835 static void do_inner_localX(const typename mapT::iterator lstart,
5836 const typename mapT::iterator lend,
5837 typename FunctionImpl<R,NDIM>::mapT* rmap_ptr,
5838 const bool sym,
5839 Tensor< TENSOR_RESULT_TYPE(T,R) >* result_ptr,
5840 Mutex* mutex) {
5841 Tensor< TENSOR_RESULT_TYPE(T,R) >& result = *result_ptr;
5842 Tensor< TENSOR_RESULT_TYPE(T,R) > r(result.dim(0),result.dim(1));
5843 for (typename mapT::iterator lit=lstart; lit!=lend; ++lit) {
5844 const keyT& key = lit->first;
5845 typename FunctionImpl<R,NDIM>::mapT::iterator rit=rmap_ptr->find(key);
5846 if (rit != rmap_ptr->end()) {
5847 const mapvecT& leftv = lit->second;
5848 const typename FunctionImpl<R,NDIM>::mapvecT& rightv =rit->second;
5849 const int nleft = leftv.size();
5850 const int nright= rightv.size();
5851
5852 for (int iv=0; iv<nleft; iv++) {
5853 const int i = leftv[iv].first;
5854 const GenTensor<T>* iptr = leftv[iv].second;
5855
5856 for (int jv=0; jv<nright; jv++) {
5857 const int j = rightv[jv].first;
5858 const GenTensor<R>* jptr = rightv[jv].second;
5859
5860 if (!sym || (sym && i<=j))
5861 r(i,j) += iptr->trace_conj(*jptr);
5862 }
5863 }
5864 }
5865 }
5866 mutex->lock();
5867 result += r;
5868 mutex->unlock();
5869 }
5870#else
5871 template <typename R>
5872 static void do_inner_localX(const typename mapT::iterator lstart,
5873 const typename mapT::iterator lend,
5874 typename FunctionImpl<R,NDIM>::mapT* rmap_ptr,
5875 const bool sym,
5876 Tensor< TENSOR_RESULT_TYPE(T,R) >* result_ptr,
5877 Mutex* mutex) {
5878 Tensor< TENSOR_RESULT_TYPE(T,R) >& result = *result_ptr;
5879 //Tensor< TENSOR_RESULT_TYPE(T,R) > r(result.dim(0),result.dim(1));
5880 for (typename mapT::iterator lit=lstart; lit!=lend; ++lit) {
5881 const keyT& key = lit->first;
5882 typename FunctionImpl<R,NDIM>::mapT::iterator rit=rmap_ptr->find(key);
5883 if (rit != rmap_ptr->end()) {
5884 const mapvecT& leftv = lit->second;
5885 const typename FunctionImpl<R,NDIM>::mapvecT& rightv =rit->second;
5886 const size_t nleft = leftv.size();
5887 const size_t nright= rightv.size();
5888
5889 unsigned int size = leftv[0].second->size();
5890 Tensor<T> Left(nleft, size);
5891 Tensor<R> Right(nright, size);
5892 Tensor< TENSOR_RESULT_TYPE(T,R)> r(nleft, nright);
5893 for(unsigned int iv = 0; iv < nleft; ++iv) Left(iv,_) = (*(leftv[iv].second)).full_tensor();
5894 for(unsigned int jv = 0; jv < nright; ++jv) Right(jv,_) = (*(rightv[jv].second)).full_tensor();
5895 // call mxmT from mxm.h in tensor
5896 if(TensorTypeData<T>::iscomplex) Left = Left.conj(); // Should handle complex case and leave real case alone
5897 mxmT(nleft, nright, size, r.ptr(), Left.ptr(), Right.ptr());
5898 mutex->lock();
5899 for(unsigned int iv = 0; iv < nleft; ++iv) {
5900 const int i = leftv[iv].first;
5901 for(unsigned int jv = 0; jv < nright; ++jv) {
5902 const int j = rightv[jv].first;
5903 if (!sym || (sym && i<=j)) result(i,j) += r(iv,jv);
5904 }
5905 }
5906 mutex->unlock();
5907 }
5908 }
5909 }
5910#endif
5911
5912#if 0
5913// Original
5914 template <typename R, typename = std::enable_if_t<std::is_floating_point_v<R>>>
5915 static void do_dot_localX(const typename mapT::iterator lstart,
5916 const typename mapT::iterator lend,
5917 typename FunctionImpl<R, NDIM>::mapT* rmap_ptr,
5918 const bool sym,
5919 Tensor<TENSOR_RESULT_TYPE(T, R)>* result_ptr,
5920 Mutex* mutex) {
5921 if (TensorTypeData<T>::iscomplex) MADNESS_EXCEPTION("no complex trace in LowRankTensor, sorry", 1);
5922 Tensor<TENSOR_RESULT_TYPE(T, R)>& result = *result_ptr;
5923 Tensor<TENSOR_RESULT_TYPE(T, R)> r(result.dim(0), result.dim(1));
5924 for (typename mapT::iterator lit = lstart; lit != lend; ++lit) {
5925 const keyT& key = lit->first;
5926 typename FunctionImpl<R, NDIM>::mapT::iterator rit = rmap_ptr->find(key);
5927 if (rit != rmap_ptr->end()) {
5928 const mapvecT& leftv = lit->second;
5929 const typename FunctionImpl<R, NDIM>::mapvecT& rightv = rit->second;
5930 const int nleft = leftv.size();
5931 const int nright = rightv.size();
5932
5933 for (int iv = 0; iv < nleft; iv++) {
5934 const int i = leftv[iv].first;
5935 const GenTensor<T>* iptr = leftv[iv].second;
5936
5937 for (int jv = 0; jv < nright; jv++) {
5938 const int j = rightv[jv].first;
5939 const GenTensor<R>* jptr = rightv[jv].second;
5940
5941 if (!sym || (sym && i <= j))
5942 r(i, j) += iptr->trace_conj(*jptr);
5943 }
5944 }
5945 }
5946 }
5947 mutex->lock();
5948 result += r;
5949 mutex->unlock();
5950 }
5951#else
5952 template <typename R>
5953 static void do_dot_localX(const typename mapT::iterator lstart,
5954 const typename mapT::iterator lend,
5955 typename FunctionImpl<R, NDIM>::mapT* rmap_ptr,
5956 const bool sym,
5957 Tensor<TENSOR_RESULT_TYPE(T, R)>* result_ptr,
5958 Mutex* mutex) {
5959 Tensor<TENSOR_RESULT_TYPE(T, R)>& result = *result_ptr;
5960 // Tensor<TENSOR_RESULT_TYPE(T, R)> r(result.dim(0), result.dim(1));
5961 for (typename mapT::iterator lit = lstart; lit != lend; ++lit) {
5962 const keyT& key = lit->first;
5963 typename FunctionImpl<R, NDIM>::mapT::iterator rit = rmap_ptr->find(key);
5964 if (rit != rmap_ptr->end()) {
5965 const mapvecT& leftv = lit->second;
5966 const typename FunctionImpl<R, NDIM>::mapvecT& rightv = rit->second;
5967 const size_t nleft = leftv.size();
5968 const size_t nright= rightv.size();
5969
5970 unsigned int size = leftv[0].second->size();
5971 Tensor<T> Left(nleft, size);
5972 Tensor<R> Right(nright, size);
5973 Tensor< TENSOR_RESULT_TYPE(T, R)> r(nleft, nright);
5974 for(unsigned int iv = 0; iv < nleft; ++iv) Left(iv, _) = (*(leftv[iv].second)).full_tensor();
5975 for(unsigned int jv = 0; jv < nright; ++jv) Right(jv, _) = (*(rightv[jv].second)).full_tensor();
5976 // call mxmT from mxm.h in tensor
5977 mxmT(nleft, nright, size, r.ptr(), Left.ptr(), Right.ptr());
5978 mutex->lock();
5979 for(unsigned int iv = 0; iv < nleft; ++iv) {
5980 const int i = leftv[iv].first;
5981 for(unsigned int jv = 0; jv < nright; ++jv) {
5982 const int j = rightv[jv].first;
5983 if (!sym || (sym && i <= j)) result(i, j) += r(iv, jv);
5984 }
5985 }
5986 mutex->unlock();
5987 }
5988 }
5989 }
5990#endif
5991
5992 static double conj(float x) {
5993 return x;
5994 }
5995
5996 static std::complex<double> conj(const std::complex<double> x) {
5997 return std::conj(x);
5998 }
5999
6000 template <typename R>
6001 static Tensor< TENSOR_RESULT_TYPE(T,R) >
6002 inner_local(const std::vector<const FunctionImpl<T,NDIM>*>& left,
6003 const std::vector<const FunctionImpl<R,NDIM>*>& right,
6004 bool sym) {
6005
6006 // This is basically a sparse matrix^T * matrix product
6007 // Rij = sum(k) Aki * Bkj
6008 // where i and j index functions and k index the wavelet coeffs
6009 // eventually the goal is this structure (don't have jtile yet)
6010 //
6011 // do in parallel tiles of k (tensors of coeffs)
6012 // do tiles of j
6013 // do i
6014 // do j in jtile
6015 // do k in ktile
6016 // Rij += Aki*Bkj
6017
6018 mapT lmap = make_key_vec_map(left);
6019 typename FunctionImpl<R,NDIM>::mapT rmap;
6020 auto* rmap_ptr = (typename FunctionImpl<R,NDIM>::mapT*)(&lmap);
6021 if ((std::vector<const FunctionImpl<R,NDIM>*>*)(&left) != &right) {
6023 rmap_ptr = &rmap;
6024 }
6025
6026 size_t chunk = (lmap.size()-1)/(3*4*5)+1;
6027
6028 Tensor< TENSOR_RESULT_TYPE(T,R) > r(left.size(), right.size());
6029 Mutex mutex;
6030
6031 typename mapT::iterator lstart=lmap.begin();
6032 while (lstart != lmap.end()) {
6033 typename mapT::iterator lend = lstart;
6034 advance(lend,chunk);
6035 left[0]->world.taskq.add(&FunctionImpl<T,NDIM>::do_inner_localX<R>, lstart, lend, rmap_ptr, sym, &r, &mutex);
6036 lstart = lend;
6037 }
6038 left[0]->world.taskq.fence();
6039
6040 if (sym) {
6041 for (long i=0; i<r.dim(0); i++) {
6042 for (long j=0; j<i; j++) {
6043 TENSOR_RESULT_TYPE(T,R) sum = r(i,j)+conj(r(j,i));
6044 r(i,j) = sum;
6045 r(j,i) = conj(sum);
6046 }
6047 }
6048 }
6049 return r;
6050 }
6051
6052 template <typename R>
6053 static Tensor<TENSOR_RESULT_TYPE(T, R)>
6054 dot_local(const std::vector<const FunctionImpl<T, NDIM>*>& left,
6055 const std::vector<const FunctionImpl<R, NDIM>*>& right,
6056 bool sym) {
6057
6058 // This is basically a sparse matrix * matrix product
6059 // Rij = sum(k) Aik * Bkj
6060 // where i and j index functions and k index the wavelet coeffs
6061 // eventually the goal is this structure (don't have jtile yet)
6062 //
6063 // do in parallel tiles of k (tensors of coeffs)
6064 // do tiles of j
6065 // do i
6066 // do j in jtile
6067 // do k in ktile
6068 // Rij += Aik*Bkj
6069
6070 mapT lmap = make_key_vec_map(left);
6071 typename FunctionImpl<R, NDIM>::mapT rmap;
6072 auto* rmap_ptr = (typename FunctionImpl<R, NDIM>::mapT*)(&lmap);
6073 if ((std::vector<const FunctionImpl<R, NDIM>*>*)(&left) != &right) {
6075 rmap_ptr = &rmap;
6076 }
6077
6078 size_t chunk = (lmap.size() - 1) / (3 * 4 * 5) + 1;
6079
6080 Tensor<TENSOR_RESULT_TYPE(T, R)> r(left.size(), right.size());
6081 Mutex mutex;
6082
6083 typename mapT::iterator lstart=lmap.begin();
6084 while (lstart != lmap.end()) {
6085 typename mapT::iterator lend = lstart;
6086 advance(lend, chunk);
6087 left[0]->world.taskq.add(&FunctionImpl<T, NDIM>::do_dot_localX<R>, lstart, lend, rmap_ptr, sym, &r, &mutex);
6088 lstart = lend;
6089 }
6090 left[0]->world.taskq.fence();
6091
6092 // sym is for hermiticity
6093 if (sym) {
6094 for (long i = 0; i < r.dim(0); i++) {
6095 for (long j = 0; j < i; j++) {
6096 TENSOR_RESULT_TYPE(T, R) sum = r(i, j) + conj(r(j, i));
6097 r(i, j) = sum;
6098 r(j, i) = conj(sum);
6099 }
6100 }
6101 }
6102 return r;
6103 }
6104
6105 template <typename R>
6107 {
6108 static_assert(!std::is_same<R, int>::value &&
6109 std::is_same<R, int>::value,
6110 "Compilation failed because you wanted to know the type; see below:");
6111 }
6112
6113 /// invoked by result
6114
6115 /// contract 2 functions f(x,z) = \int g(x,y) * h(y,z) dy
6116 /// @tparam CDIM: the dimension of the contraction variable (y)
6117 /// @tparam NDIM: the dimension of the result (x,z)
6118 /// @tparam LDIM: the dimension of g(x,y)
6119 /// @tparam KDIM: the dimension of h(y,z)
6120 template<typename Q, std::size_t LDIM, typename R, std::size_t KDIM,
6121 std::size_t CDIM = (KDIM + LDIM - NDIM) / 2>
6123 const std::array<int, CDIM> v1, const std::array<int, CDIM> v2) {
6124
6125 typedef std::multimap<Key<NDIM>, std::list<Key<CDIM>>> contractionmapT;
6126 //double wall_get_lists=0.0;
6127 //double wall_recur=0.0;
6128 //double wall_contract=0.0;
6131
6132 // auto print_map = [](const auto& map) {
6133 // for (const auto& kv : map) print(kv.first,"--",kv.second);
6134 // };
6135 // logical constness, not bitwise constness
6136 FunctionImpl<Q,LDIM>& g_nc=const_cast<FunctionImpl<Q,LDIM>&>(g);
6137 FunctionImpl<R,KDIM>& h_nc=const_cast<FunctionImpl<R,KDIM>&>(h);
6138
6139 std::list<contractionmapT> all_contraction_maps;
6140 for (std::size_t n=0; n<nmax; ++n) {
6141
6142 // list of nodes with d coefficients (and their parents)
6143 //double wall0 = wall_time();
6144 auto [g_ijlist, g_jlist] = g.get_contraction_node_lists(n, v1);
6145 auto [h_ijlist, h_jlist] = h.get_contraction_node_lists(n, v2);
6146 if ((g_ijlist.size() == 0) and (h_ijlist.size() == 0)) break;
6147 //double wall1 = wall_time();
6148 //wall_get_lists += (wall1 - wall0);
6149 //wall0 = wall1;
6150// print("g_jlist");
6151// for (const auto& kv : g_jlist) print(kv.first,kv.second);
6152// print("h_jlist");
6153// for (const auto& kv : h_jlist) print(kv.first,kv.second);
6154
6155 // next lines will insert s nodes into g and h -> possible race condition!
6156 bool this_first = true; // are the remaining indices of g before those of g: f(x,z) = g(x,y) h(y,z)
6157 // CDIM, NDIM, KDIM
6158 contractionmapT contraction_map = g_nc.recur_down_for_contraction_map(
6159 g_nc.key0(), g_nc.get_coeffs().find(g_nc.key0()).get()->second, v1, v2,
6160 h_ijlist, h_jlist, this_first, thresh);
6161
6162 this_first = false;
6163 // CDIM, NDIM, LDIM
6164 auto hnode0=h_nc.get_coeffs().find(h_nc.key0()).get()->second;
6165 contractionmapT contraction_map1 = h_nc.recur_down_for_contraction_map(
6166 h_nc.key0(), hnode0, v2, v1,
6167 g_ijlist, g_jlist, this_first, thresh);
6168
6169 // will contain duplicate entries
6170 contraction_map.merge(contraction_map1);
6171 // turn multimap into a map of list
6172 auto it = contraction_map.begin();
6173 while (it != contraction_map.end()) {
6174 auto it_end = contraction_map.upper_bound(it->first);
6175 auto it2 = it;
6176 it2++;
6177 while (it2 != it_end) {
6178 it->second.splice(it->second.end(), it2->second);
6179 it2 = contraction_map.erase(it2);
6180 }
6181 it = it_end;
6182 }
6183// print("thresh ",thresh);
6184// print("contraction list size",contraction_map.size());
6185
6186 // remove all double entries
6187 for (auto& elem: contraction_map) {
6188 elem.second.sort();
6189 elem.second.unique();
6190 }
6191 //wall1 = wall_time();
6192 //wall_recur += (wall1 - wall0);
6193// if (n==2) {
6194// print("contraction map for n=", n);
6195// print_map(contraction_map);
6196// }
6197 all_contraction_maps.push_back(contraction_map);
6198
6199 long mapsize=contraction_map.size();
6200 if (mapsize==0) break;
6201 }
6202
6203
6204 // finally do the contraction
6205 for (const auto& contraction_map : all_contraction_maps) {
6206 for (const auto& key_list : contraction_map) {
6207 const Key<NDIM>& key=key_list.first;
6208 const std::list<Key<CDIM>>& list=key_list.second;
6209 woT::task(coeffs.owner(key), &implT:: template partial_inner_contract<Q,LDIM,R,KDIM>,
6210 &g,&h,v1,v2,key,list);
6211 }
6212 }
6213 }
6214
6215 /// for contraction two functions f(x,z) = \int g(x,y) h(y,z) dy
6216
6217 /// find all nodes with d coefficients and return a list of complete keys and of
6218 /// keys holding only the y dimension, also the maximum norm of all d for the j dimension
6219 /// @param[in] n the scale
6220 /// @param[in] v array holding the indices of the integration variable
6221 /// @return ijlist: list of all nodes with d coeffs; jlist: j-part of ij list only
6222 template<std::size_t CDIM>
6223 std::tuple<std::set<Key<NDIM>>, std::map<Key<CDIM>,double>>
6224 get_contraction_node_lists(const std::size_t n, const std::array<int, CDIM>& v) const {
6225
6226 const auto& cdata=get_cdata();
6227 auto has_d_coeffs = [&cdata](const coeffT& coeff) {
6228 if (coeff.has_no_data()) return false;
6229 return (coeff.dim(0)==2*cdata.k);
6230 };
6231
6232 // keys to be contracted in g
6233 std::set<Key<NDIM>> ij_list; // full key
6234 std::map<Key<CDIM>,double> j_list; // only that dimension that will be contracted
6235
6236 for (auto it=get_coeffs().begin(); it!=get_coeffs().end(); ++it) {
6237 const Key<NDIM>& key=it->first;
6238 const FunctionNode<T,NDIM>& node=it->second;
6239 if ((key.level()==n) and (has_d_coeffs(node.coeff()))) {
6240 ij_list.insert(key);
6242 for (std::size_t i=0; i<CDIM; ++i) j_trans[i]=key.translation()[v[i]];
6243 Key<CDIM> jkey(n,j_trans);
6244 const double max_d_norm=j_list[jkey];
6245 j_list.insert_or_assign(jkey,std::max(max_d_norm,node.get_dnorm()));
6246 Key<CDIM> parent_jkey=jkey.parent();
6247 while (j_list.count(parent_jkey)==0) {
6248 j_list.insert({parent_jkey,1.0});
6249 parent_jkey=parent_jkey.parent();
6250 }
6251 }
6252 }
6253 return std::make_tuple(ij_list,j_list);
6254 }
6255
6256 /// make a map of all nodes that will contribute to a partial inner product
6257
6258 /// given the list of d coefficient-holding nodes of the other function:
6259 /// recur down h if snorm * dnorm > tol and key n−jx ∈ other−ij-list. Make s
6260 /// coefficients if necessary. Make list of nodes n − ijk as map(n-ik, list(j)).
6261 ///
6262 /// !! WILL ADD NEW S NODES TO THIS TREE THAT MUST BE REMOVED TO AVOID INCONSISTENT TREE STRUCTURE !!
6263 ///
6264 /// @param[in] key for recursion
6265 /// @param[in] node corresponds to key
6266 /// @param[in] v_this this' dimension that are contracted
6267 /// @param[in] v_other other's dimension that are contracted
6268 /// @param[in] ij_other_list list of nodes of the other function that will be contracted (and their parents)
6269 /// @param[in] j_other_list list of column nodes of the other function that will be contracted (and their parents)
6270 /// @param[in] max_d_norm max d coeff norm of the nodes in j_list
6271 /// @param[in] this_first are the remaining coeffs of this functions first or last in the result function
6272 /// @param[in] thresh threshold for including nodes in the contraction: snorm*dnorm > thresh
6273 /// @tparam CDIM dimension to be contracted
6274 /// @tparam ODIM dimensions of the other function
6275 /// @tparam FDIM dimensions of the final function
6276 template<std::size_t CDIM, std::size_t ODIM, std::size_t FDIM=NDIM+ODIM-2*CDIM>
6277 std::multimap<Key<FDIM>, std::list<Key<CDIM>>> recur_down_for_contraction_map(
6278 const keyT& key, const nodeT& node,
6279 const std::array<int,CDIM>& v_this,
6280 const std::array<int,CDIM>& v_other,
6281 const std::set<Key<ODIM>>& ij_other_list,
6282 const std::map<Key<CDIM>,double>& j_other_list,
6283 bool this_first, const double thresh) {
6284
6285 std::multimap<Key<FDIM>, std::list<Key<CDIM>>> contraction_map;
6286
6287 // fast return if the other function has no d coeffs
6288 if (j_other_list.empty()) return contraction_map;
6289
6290 // continue recursion if this node may be contracted with the j column
6291 // extract relevant node translations from this node
6292 const auto j_this_key=key.extract_key(v_this);
6293
6294// print("\nkey, j_this_key", key, j_this_key);
6295 const double max_d_norm=j_other_list.find(j_this_key)->second;
6296 const bool sd_norm_product_large = node.get_snorm() * max_d_norm > truncate_tol(thresh,key);
6297// print("sd_product_norm",node.get_snorm() * max_d_norm, thresh);
6298
6299 // end recursion if we have reached the final scale n
6300 // with which nodes from other will this node be contracted?
6301 bool final_scale=key.level()==ij_other_list.begin()->level();
6302 if (final_scale and sd_norm_product_large) {
6303 for (auto& other_key : ij_other_list) {
6304 const auto j_other_key=other_key.extract_key(v_other);
6305 if (j_this_key != j_other_key) continue;
6306 auto i_key=key.extract_complement_key(v_this);
6307 auto k_key=other_key.extract_complement_key(v_other);
6308// print("key, ij_other_key",key,other_key);
6309// print("i, k, j key",i_key, k_key, j_this_key);
6310 Key<FDIM> ik_key=(this_first) ? i_key.merge_with(k_key) : k_key.merge_with(i_key);
6311// print("ik_key",ik_key);
6312// MADNESS_CHECK(contraction_map.count(ik_key)==0);
6313 contraction_map.insert(std::make_pair(ik_key,std::list<Key<CDIM>>{j_this_key}));
6314 }
6315 return contraction_map;
6316 }
6317
6318 bool continue_recursion = (j_other_list.count(j_this_key)==1);
6319 if (not continue_recursion) return contraction_map;
6320
6321
6322 // continue recursion if norms are large
6323 continue_recursion = (node.has_children() or sd_norm_product_large);
6324
6325 if (continue_recursion) {
6326 // in case we need to compute children's coefficients: unfilter only once
6327 bool compute_child_s_coeffs=true;
6328 coeffT d = node.coeff();
6329// print("continuing recursion from key",key);
6330
6331 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
6332 keyT child=kit.key();
6333 typename dcT::accessor acc;
6334
6335 // make child's s coeffs if it doesn't exist or if is has no s coeffs
6336 bool childnode_exists=get_coeffs().find(acc,child);
6337 bool need_s_coeffs= childnode_exists ? (acc->second.get_snorm()<=0.0) : true;
6338
6339 coeffT child_s_coeffs;
6340 if (need_s_coeffs and compute_child_s_coeffs) {
6341 if (d.dim(0)==cdata.vk[0]) { // s coeffs only in this node
6342 coeffT d1(cdata.v2k,get_tensor_args());
6343 d1(cdata.s0)+=d;
6344 d=d1;
6345 }
6346 d = unfilter(d);
6347 child_s_coeffs=copy(d(child_patch(child)));
6348 child_s_coeffs.reduce_rank(thresh);
6349 compute_child_s_coeffs=false;
6350 }
6351
6352 if (not childnode_exists) {
6353 get_coeffs().replace(child,nodeT(child_s_coeffs,false));
6354 get_coeffs().find(acc,child);
6355 } else if (childnode_exists and need_s_coeffs) {
6356 acc->second.coeff()=child_s_coeffs;
6357 }
6358 bool exists= get_coeffs().find(acc,child);
6359 MADNESS_CHECK(exists);
6360 nodeT& childnode = acc->second;
6361 if (need_s_coeffs) childnode.recompute_snorm_and_dnorm(get_cdata());
6362// print("recurring down to",child);
6363 contraction_map.merge(recur_down_for_contraction_map(child,childnode, v_this, v_other,
6364 ij_other_list, j_other_list, this_first, thresh));
6365// print("contraction_map.size()",contraction_map.size());
6366 }
6367
6368 }
6369
6370 return contraction_map;
6371 }
6372
6373
6374 /// tensor contraction part of partial_inner
6375
6376 /// @param[in] g rhs of the inner product
6377 /// @param[in] h lhs of the inner product
6378 /// @param[in] v1 dimensions of g to be contracted
6379 /// @param[in] v2 dimensions of h to be contracted
6380 /// @param[in] key key of result's (this) FunctionNode
6381 /// @param[in] j_key_list list of contraction index-j keys contributing to this' node
6382 template<typename Q, std::size_t LDIM, typename R, std::size_t KDIM,
6383 std::size_t CDIM = (KDIM + LDIM - NDIM) / 2>
6385 const std::array<int, CDIM> v1, const std::array<int, CDIM> v2,
6386 const Key<NDIM>& key, const std::list<Key<CDIM>>& j_key_list) {
6387
6388 Key<LDIM - CDIM> i_key;
6389 Key<KDIM - CDIM> k_key;
6390 key.break_apart(i_key, k_key);
6391
6392 coeffT result_coeff(get_cdata().v2k, get_tensor_type());
6393 for (const auto& j_key: j_key_list) {
6394
6395 auto v_complement = [](const auto& v, const auto& vc) {
6396 constexpr std::size_t VDIM = std::tuple_size<std::decay_t<decltype(v)>>::value;
6397 constexpr std::size_t VCDIM = std::tuple_size<std::decay_t<decltype(vc)>>::value;
6398 std::array<int, VCDIM> result;
6399 for (std::size_t i = 0; i < VCDIM; i++) result[i] = (v.back() + i + 1) % (VDIM + VCDIM);
6400 return result;
6401 };
6402 auto make_ij_key = [&v_complement](const auto i_key, const auto j_key, const auto& v) {
6403 constexpr std::size_t IDIM = std::decay_t<decltype(i_key)>::static_size;
6404 constexpr std::size_t JDIM = std::decay_t<decltype(j_key)>::static_size;
6405 static_assert(JDIM == std::tuple_size<std::decay_t<decltype(v)>>::value);
6406
6408 for (std::size_t i = 0; i < v.size(); ++i) l[v[i]] = j_key.translation()[i];
6409 std::array<int, IDIM> vc1;
6410 auto vc = v_complement(v, vc1);
6411 for (std::size_t i = 0; i < vc.size(); ++i) l[vc[i]] = i_key.translation()[i];
6412
6413 return Key<IDIM + JDIM>(i_key.level(), l);
6414 };
6415
6416 Key<LDIM> ij_key = make_ij_key(i_key, j_key, v1);
6417 Key<KDIM> jk_key = make_ij_key(k_key, j_key, v2);
6418
6419 MADNESS_CHECK(g->get_coeffs().probe(ij_key));
6420 MADNESS_CHECK(h->get_coeffs().probe(jk_key));
6421 const coeffT& gcoeff = g->get_coeffs().find(ij_key).get()->second.coeff();
6422 const coeffT& hcoeff = h->get_coeffs().find(jk_key).get()->second.coeff();
6423 coeffT gcoeff1, hcoeff1;
6424 if (gcoeff.dim(0) == g->get_cdata().k) {
6425 gcoeff1 = coeffT(g->get_cdata().v2k, g->get_tensor_args());
6426 gcoeff1(g->get_cdata().s0) += gcoeff;
6427 } else {
6428 gcoeff1 = gcoeff;
6429 }
6430 if (hcoeff.dim(0) == g->get_cdata().k) {
6431 hcoeff1 = coeffT(h->get_cdata().v2k, h->get_tensor_args());
6432 hcoeff1(h->get_cdata().s0) += hcoeff;
6433 } else {
6434 hcoeff1 = hcoeff;
6435 }
6436
6437 // offset: 0 for full tensor, 1 for svd representation with rand being the first dimension (r,d1,d2,d3) -> (r,d1*d2*d3)
6438 auto fuse = [](Tensor<T> tensor, const std::array<int, CDIM>& v, int offset) {
6439 for (std::size_t i = 0; i < CDIM - 1; ++i) {
6440 MADNESS_CHECK((v[i] + 1) == v[i + 1]); // make sure v is contiguous and ascending
6441 tensor = tensor.fusedim(v[0]+offset);
6442 }
6443 return tensor;
6444 };
6445
6446 // use case: partial_projection of 2-electron functions in svd representation f(1) = \int g(2) h(1,2) d2
6447 // c_i = \sum_j a_j b_ij = \sum_jr a_j b_rj b'_rj
6448 // = \sum_jr ( a_j b_rj) b'_rj )
6449 auto contract2 = [](const auto& svdcoeff, const auto& tensor, const int particle) {
6450#if HAVE_GENTENSOR
6451 const int spectator_particle=(particle+1)%2;
6452 Tensor<Q> gtensor = svdcoeff.get_svdtensor().make_vector_with_weights(particle);
6453 gtensor=gtensor.reshape(svdcoeff.rank(),gtensor.size()/svdcoeff.rank());
6454 MADNESS_CHECK(gtensor.ndim()==2);
6455 Tensor<Q> gtensor_other = svdcoeff.get_svdtensor().ref_vector(spectator_particle);
6456 Tensor<T> tmp1=inner(gtensor,tensor.flat(),1,0); // tmp1(r) = sum_j a'_(r,j) b(j)
6457 MADNESS_CHECK(tmp1.ndim()==1);
6458 Tensor<T> tmp2=inner(gtensor_other,tmp1,0,0); // tmp2(i) = sum_r a_(r,i) tmp1(r)
6459 return tmp2;
6460#else
6461 MADNESS_EXCEPTION("no partial_inner using svd without GenTensor",1);
6462 return Tensor<T>();
6463#endif
6464 };
6465
6466 if (gcoeff.is_full_tensor() and hcoeff.is_full_tensor() and result_coeff.is_full_tensor()) {
6467 // merge multiple contraction dimensions into one
6468 int offset = 0;
6469 Tensor<Q> gtensor = fuse(gcoeff1.full_tensor(), v1, offset);
6470 Tensor<R> htensor = fuse(hcoeff1.full_tensor(), v2, offset);
6471 result_coeff.full_tensor() += inner(gtensor, htensor, v1[0], v2[0]);
6472 if (key.level() > 0) {
6473 gtensor = copy(gcoeff1.full_tensor()(g->get_cdata().s0));
6474 htensor = copy(hcoeff1.full_tensor()(h->get_cdata().s0));
6475 gtensor = fuse(gtensor, v1, offset);
6476 htensor = fuse(htensor, v2, offset);
6477 result_coeff.full_tensor()(get_cdata().s0) -= inner(gtensor, htensor, v1[0], v2[0]);
6478 }
6479 }
6480
6481
6482 // use case: 2-electron functions in svd representation f(1,3) = \int g(1,2) h(2,3) d2
6483 // c_ik = \sum_j a_ij b_jk = \sum_jrr' a_ri a'_rj b_r'j b_r'k
6484 // = \sum_jrr' ( a_ri (a'_rj b_r'j) ) b_r'k
6485 // = \sum_jrr' c_r'i b_r'k
6486 else if (gcoeff.is_svd_tensor() and hcoeff.is_svd_tensor() and result_coeff.is_svd_tensor()) {
6487 MADNESS_CHECK(v1[0]==0 or v1[CDIM-1]==LDIM-1);
6488 MADNESS_CHECK(v2[0]==0 or v2[CDIM-1]==KDIM-1);
6489 int gparticle= v1[0]==0 ? 0 : 1; // which particle to integrate over
6490 int hparticle= v2[0]==0 ? 0 : 1; // which particle to integrate over
6491 // merge multiple contraction dimensions into one
6492 Tensor<Q> gtensor = gcoeff1.get_svdtensor().flat_vector_with_weights(gparticle);
6493 Tensor<Q> gtensor_other = gcoeff1.get_svdtensor().flat_vector((gparticle+1)%2);
6494 Tensor<R> htensor = hcoeff1.get_svdtensor().flat_vector_with_weights(hparticle);
6495 Tensor<R> htensor_other = hcoeff1.get_svdtensor().flat_vector((hparticle+1)%2);
6496 Tensor<T> tmp1=inner(gtensor,htensor,1,1); // tmp1(r,r') = sum_j b(r,j) a(r',j)
6497 Tensor<T> tmp2=inner(tmp1,gtensor_other,0,0); // tmp2(r',i) = sum_r tmp1(r,r') a(r,i)
6499 MADNESS_CHECK(tmp2.dim(0)==htensor_other.dim(0));
6500 w=1.0;
6501 coeffT result_tmp(get_cdata().v2k, get_tensor_type());
6502 result_tmp.get_svdtensor().set_vectors_and_weights(w,tmp2,htensor_other);
6503 if (key.level() > 0) {
6504 GenTensor<Q> gcoeff2 = copy(gcoeff1(g->get_cdata().s0));
6505 GenTensor<R> hcoeff2 = copy(hcoeff1(h->get_cdata().s0));
6506 Tensor<Q> gtensor = gcoeff2.get_svdtensor().flat_vector_with_weights(gparticle);
6507 Tensor<Q> gtensor_other = gcoeff2.get_svdtensor().flat_vector((gparticle+1)%2);
6508 Tensor<R> htensor = hcoeff2.get_svdtensor().flat_vector_with_weights(hparticle);
6509 Tensor<R> htensor_other = hcoeff2.get_svdtensor().flat_vector((hparticle+1)%2);
6510 Tensor<T> tmp1=inner(gtensor,htensor,1,1); // tmp1(r,r') = sum_j b(r,j) a(r',j)
6511 Tensor<T> tmp2=inner(tmp1,gtensor_other,0,0); // tmp2(r',i) = sum_r tmp1(r,r') a(r,i)
6513 MADNESS_CHECK(tmp2.dim(0)==htensor_other.dim(0));
6514 w=1.0;
6515 coeffT result_coeff1(get_cdata().vk, get_tensor_type());
6516 result_coeff1.get_svdtensor().set_vectors_and_weights(w,tmp2,htensor_other);
6517 result_tmp(get_cdata().s0)-=result_coeff1;
6518 }
6519 result_coeff+=result_tmp;
6520 }
6521
6522 // use case: partial_projection of 2-electron functions in svd representation f(1) = \int g(2) h(1,2) d2
6523 // c_i = \sum_j a_j b_ij = \sum_jr a_j b_rj b'_rj
6524 // = \sum_jr ( a_j b_rj) b'_rj )
6525 else if (gcoeff.is_full_tensor() and hcoeff.is_svd_tensor() and result_coeff.is_full_tensor()) {
6526 MADNESS_CHECK(v1[0]==0 and v1[CDIM-1]==LDIM-1);
6527 MADNESS_CHECK(v2[0]==0 or v2[CDIM-1]==KDIM-1);
6528 MADNESS_CHECK(LDIM==CDIM);
6529 int hparticle= v2[0]==0 ? 0 : 1; // which particle to integrate over
6530
6531 Tensor<T> r=contract2(hcoeff1,gcoeff1.full_tensor(),hparticle);
6532 if (key.level()>0) r(get_cdata().s0)-=contract2(copy(hcoeff1(h->get_cdata().s0)),copy(gcoeff.full_tensor()(g->get_cdata().s0)),hparticle);
6533 result_coeff.full_tensor()+=r;
6534 }
6535 // use case: partial_projection of 2-electron functions in svd representation f(1) = \int g(1,2) h(2) d2
6536 // c_i = \sum_j a_ij b_j = \sum_jr a_ri a'_rj b_j
6537 // = \sum_jr ( a_ri (a'_rj b_j) )
6538 else if (gcoeff.is_svd_tensor() and hcoeff.is_full_tensor() and result_coeff.is_full_tensor()) {
6539 MADNESS_CHECK(v1[0]==0 or v1[CDIM-1]==LDIM-1);
6540 MADNESS_CHECK(v2[0]==0 and v2[CDIM-1]==KDIM-1);
6541 MADNESS_CHECK(KDIM==CDIM);
6542 int gparticle= v1[0]==0 ? 0 : 1; // which particle to integrate over
6543
6544 Tensor<T> r=contract2(gcoeff1,hcoeff1.full_tensor(),gparticle);
6545 if (key.level()>0) r(get_cdata().s0)-=contract2(copy(gcoeff1(g->get_cdata().s0)),copy(hcoeff.full_tensor()(h->get_cdata().s0)),gparticle);
6546 result_coeff.full_tensor()+=r;
6547
6548 } else {
6549 MADNESS_EXCEPTION("unknown case in partial_inner_contract",1);
6550 }
6551 }
6552
6553 MADNESS_CHECK(result_coeff.is_assigned());
6554 result_coeff.reduce_rank(get_thresh());
6555
6556 if (coeffs.is_local(key))
6557 coeffs.send(key, &nodeT::accumulate, result_coeff, coeffs, key, get_tensor_args());
6558 else
6560 }
6561
6562 /// Return the inner product with an external function on a specified function node.
6563
6564 /// @param[in] key Key of the function node to compute the inner product on. (the domain of integration)
6565 /// @param[in] c Tensor of coefficients for the function at the function node given by key
6566 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6567 /// @return Returns the inner product over the domain of a single function node, no guarantee of accuracy.
6568 T inner_ext_node(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f) const {
6569 tensorT fvals = tensorT(this->cdata.vk);
6570 // Compute the value of the external function at the quadrature points.
6571 fcube(key, *(f), cdata.quad_x, fvals);
6572 // Convert quadrature point values to scaling coefficients.
6573 tensorT fc = tensorT(values2coeffs(key, fvals));
6574 // Return the inner product of the two functions' scaling coefficients.
6575 return c.trace_conj(fc);
6576 }
6577
6578 /// Call inner_ext_node recursively until convergence.
6579 /// @param[in] key Key of the function node on which to compute inner product (the domain of integration)
6580 /// @param[in] c coeffs for the function at the node given by key
6581 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6582 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6583 /// @param[in] old_inner the inner product on the parent function node
6584 /// @return Returns the inner product over the domain of a single function, checks for convergence.
6585 T inner_ext_recursive(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f, const bool leaf_refine, T old_inner=T(0)) const {
6586 int i = 0;
6587 tensorT c_child, inner_child;
6588 T new_inner, result = 0.0;
6589
6590 c_child = tensorT(cdata.v2k); // tensor of child coeffs
6591 inner_child = Tensor<double>(pow(2, NDIM)); // child inner products
6592
6593 // If old_inner is default value, assume this is the first call
6594 // and compute inner product on this node.
6595 if (old_inner == T(0)) {
6596 old_inner = inner_ext_node(key, c, f);
6597 }
6598
6599 if (coeffs.find(key).get()->second.has_children()) {
6600 // Since the key has children and we know the func is redundant,
6601 // Iterate over all children of this compute node, computing
6602 // the inner product on each child node. new_inner will store
6603 // the sum of these, yielding a more accurate inner product.
6604 for (KeyChildIterator<NDIM> it(key); it; ++it, ++i) {
6605 const keyT& child = it.key();
6606 tensorT cc = coeffs.find(child).get()->second.coeff().full_tensor_copy();
6607 inner_child(i) = inner_ext_node(child, cc, f);
6608 }
6609 new_inner = inner_child.sum();
6610 } else if (leaf_refine) {
6611 // We need the scaling coefficients of the numerical function
6612 // at each of the children nodes. We can't use project because
6613 // there is no guarantee that the numerical function will have
6614 // a functor. Instead, since we know we are at or below the
6615 // leaf nodes, the wavelet coefficients are zero (to within the
6616 // truncate tolerance). Thus, we can use unfilter() to
6617 // get the scaling coefficients at the next level.
6618 tensorT d = tensorT(cdata.v2k);
6619 d = T(0);
6620 d(cdata.s0) = copy(c);
6621 c_child = unfilter(d);
6622
6623 // Iterate over all children of this compute node, computing
6624 // the inner product on each child node. new_inner will store
6625 // the sum of these, yielding a more accurate inner product.
6626 for (KeyChildIterator<NDIM> it(key); it; ++it, ++i) {
6627 const keyT& child = it.key();
6628 tensorT cc = tensorT(c_child(child_patch(child)));
6629 inner_child(i) = inner_ext_node(child, cc, f);
6630 }
6631 new_inner = inner_child.sum();
6632 } else {
6633 // If we get to here, we are at the leaf nodes and the user has
6634 // specified that they do not want refinement past leaf nodes.
6635 new_inner = old_inner;
6636 }
6637
6638 // Check for convergence. If converged...yay, we're done. If not,
6639 // call inner_ext_node_recursive on each child node and accumulate
6640 // the inner product in result.
6641 // if (std::abs(new_inner - old_inner) <= truncate_tol(thresh, key)) {
6642 if (std::abs(new_inner - old_inner) <= thresh) {
6643 result = new_inner;
6644 } else {
6645 i = 0;
6646 for (KeyChildIterator<NDIM> it(key); it; ++it, ++i) {
6647 const keyT& child = it.key();
6648 tensorT cc = tensorT(c_child(child_patch(child)));
6649 result += inner_ext_recursive(child, cc, f, leaf_refine, inner_child(i));
6650 }
6651 }
6652
6653 return result;
6654 }
6655
6657 const std::shared_ptr< FunctionFunctorInterface<T, NDIM> > fref;
6658 const implT * impl;
6659 const bool leaf_refine;
6660 const bool do_leaves; ///< start with leaf nodes instead of initial_level
6661
6663 const implT * impl, const bool leaf_refine, const bool do_leaves)
6664 : fref(f), impl(impl), leaf_refine(leaf_refine), do_leaves(do_leaves) {};
6665
6666 T operator()(typename dcT::const_iterator& it) const {
6667 if (do_leaves and it->second.is_leaf()) {
6668 tensorT cc = it->second.coeff().full_tensor();
6669 return impl->inner_adaptive_recursive(it->first, cc, fref, leaf_refine, T(0));
6670 } else if ((not do_leaves) and (it->first.level() == impl->initial_level)) {
6671 tensorT cc = it->second.coeff().full_tensor();
6672 return impl->inner_ext_recursive(it->first, cc, fref, leaf_refine, T(0));
6673 } else {
6674 return 0.0;
6675 }
6676 }
6677
6678 T operator()(T a, T b) const {
6679 return (a + b);
6680 }
6681
6682 template <typename Archive> void serialize(const Archive& ar) {
6683 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
6684 }
6685 };
6686
6687 /// Return the local part of inner product with external function ... no communication.
6688 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6689 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6690 /// @return Returns local part of the inner product, i.e. over the domain of all function nodes on this compute node.
6691 T inner_ext_local(const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f, const bool leaf_refine) const {
6693
6695 do_inner_ext_local_ffi(f, this, leaf_refine, false));
6696 }
6697
6698 /// Return the local part of inner product with external function ... no communication.
6699 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6700 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6701 /// @return Returns local part of the inner product, i.e. over the domain of all function nodes on this compute node.
6702 T inner_adaptive_local(const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f, const bool leaf_refine) const {
6704
6706 do_inner_ext_local_ffi(f, this, leaf_refine, true));
6707 }
6708
6709 /// Call inner_ext_node recursively until convergence.
6710 /// @param[in] key Key of the function node on which to compute inner product (the domain of integration)
6711 /// @param[in] c coeffs for the function at the node given by key
6712 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6713 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6714 /// @param[in] old_inner the inner product on the parent function node
6715 /// @return Returns the inner product over the domain of a single function, checks for convergence.
6717 const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f,
6718 const bool leaf_refine, T old_inner=T(0)) const {
6719
6720 // the inner product in the current node
6721 old_inner = inner_ext_node(key, c, f);
6722 T result=0.0;
6723
6724 // the inner product in the child nodes
6725
6726 // compute the sum coefficients of the MRA function
6727 tensorT d = tensorT(cdata.v2k);
6728 d = T(0);
6729 d(cdata.s0) = copy(c);
6730 tensorT c_child = unfilter(d);
6731
6732 // compute the inner product in the child nodes
6733 T new_inner=0.0; // child inner products
6734 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6735 const keyT& child = it.key();
6736 tensorT cc = tensorT(c_child(child_patch(child)));
6737 new_inner+= inner_ext_node(child, cc, f);
6738 }
6739
6740 // continue recursion if needed
6741 const double tol=truncate_tol(thresh,key);
6742 if (leaf_refine and (std::abs(new_inner - old_inner) > tol)) {
6743 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6744 const keyT& child = it.key();
6745 tensorT cc = tensorT(c_child(child_patch(child)));
6746 result += inner_adaptive_recursive(child, cc, f, leaf_refine, T(0));
6747 }
6748 } else {
6749 result = new_inner;
6750 }
6751 return result;
6752
6753 }
6754
6755
6756 /// Return the gaxpy product with an external function on a specified
6757 /// function node.
6758 /// @param[in] key Key of the function node on which to compute gaxpy
6759 /// @param[in] lc Tensor of coefficients for the function at the
6760 /// function node given by key
6761 /// @param[in] f Pointer to function of type T that takes coordT
6762 /// arguments. This is the externally provided function and
6763 /// the right argument of gaxpy.
6764 /// @param[in] alpha prefactor of c Tensor for gaxpy
6765 /// @param[in] beta prefactor of fcoeffs for gaxpy
6766 /// @return Returns coefficient tensor of the gaxpy product at specified
6767 /// key, no guarantee of accuracy.
6768 template <typename L>
6769 tensorT gaxpy_ext_node(keyT key, Tensor<L> lc, T (*f)(const coordT&), T alpha, T beta) const {
6770 // Compute the value of external function at the quadrature points.
6771 tensorT fvals = madness::fcube(key, f, cdata.quad_x);
6772 // Convert quadrature point values to scaling coefficients.
6773 tensorT fcoeffs = values2coeffs(key, fvals);
6774 // Return the inner product of the two functions' scaling coeffs.
6775 tensorT c2 = copy(lc);
6776 c2.gaxpy(alpha, fcoeffs, beta);
6777 return c2;
6778 }
6779
6780 /// Return out of place gaxpy using recursive descent.
6781 /// @param[in] key Key of the function node on which to compute gaxpy
6782 /// @param[in] left FunctionImpl, left argument of gaxpy
6783 /// @param[in] lcin coefficients of left at this node
6784 /// @param[in] c coefficients of gaxpy product at this node
6785 /// @param[in] f pointer to function of type T that takes coordT
6786 /// arguments. This is the externally provided function and
6787 /// the right argument of gaxpy.
6788 /// @param[in] alpha prefactor of left argument for gaxpy
6789 /// @param[in] beta prefactor of right argument for gaxpy
6790 /// @param[in] tol convergence tolerance...when the norm of the gaxpy's
6791 /// difference coefficients is less than tol, we are done.
6792 template <typename L>
6793 void gaxpy_ext_recursive(const keyT& key, const FunctionImpl<L,NDIM>* left,
6794 Tensor<L> lcin, tensorT c, T (*f)(const coordT&),
6795 T alpha, T beta, double tol, bool below_leaf) {
6796 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
6797
6798 // If we haven't yet reached the leaf level, check whether the
6799 // current key is a leaf node of left. If so, set below_leaf to true
6800 // and continue. If not, make this a parent, recur down, return.
6801 if (not below_leaf) {
6802 bool left_leaf = left->coeffs.find(key).get()->second.is_leaf();
6803 if (left_leaf) {
6804 below_leaf = true;
6805 } else {
6806 this->coeffs.replace(key, nodeT(coeffT(), true));
6807 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6808 const keyT& child = it.key();
6809 woT::task(left->coeffs.owner(child), &implT:: template gaxpy_ext_recursive<L>,
6810 child, left, Tensor<L>(), tensorT(), f, alpha, beta, tol, below_leaf);
6811 }
6812 return;
6813 }
6814 }
6815
6816 // Compute left's coefficients if not provided
6817 Tensor<L> lc = lcin;
6818 if (lc.size() == 0) {
6819 literT it = left->coeffs.find(key).get();
6820 MADNESS_ASSERT(it != left->coeffs.end());
6821 if (it->second.has_coeff())
6822 lc = it->second.coeff().reconstruct_tensor();
6823 }
6824
6825 // Compute this node's coefficients if not provided in function call
6826 if (c.size() == 0) {
6827 c = gaxpy_ext_node(key, lc, f, alpha, beta);
6828 }
6829
6830 // We need the scaling coefficients of the numerical function at
6831 // each of the children nodes. We can't use project because there
6832 // is no guarantee that the numerical function will have a functor.
6833 // Instead, since we know we are at or below the leaf nodes, the
6834 // wavelet coefficients are zero (to within the truncate tolerance).
6835 // Thus, we can use unfilter() to get the scaling coefficients at
6836 // the next level.
6837 Tensor<L> lc_child = Tensor<L>(cdata.v2k); // left's child coeffs
6838 Tensor<L> ld = Tensor<L>(cdata.v2k);
6839 ld = L(0);
6840 ld(cdata.s0) = copy(lc);
6841 lc_child = unfilter(ld);
6842
6843 // Iterate over children of this node,
6844 // storing the gaxpy coeffs in c_child
6845 tensorT c_child = tensorT(cdata.v2k); // tensor of child coeffs
6846 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6847 const keyT& child = it.key();
6848 tensorT lcoeff = tensorT(lc_child(child_patch(child)));
6849 c_child(child_patch(child)) = gaxpy_ext_node(child, lcoeff, f, alpha, beta);
6850 }
6851
6852 // Compute the difference coefficients to test for convergence.
6853 tensorT d = tensorT(cdata.v2k);
6854 d = filter(c_child);
6855 // Filter returns both s and d coefficients, so set scaling
6856 // coefficient part of d to 0 so that we take only the
6857 // norm of the difference coefficients.
6858 d(cdata.s0) = T(0);
6859 double dnorm = d.normf();
6860
6861 // Small d.normf means we've reached a good level of resolution
6862 // Store the coefficients and return.
6863 if (dnorm <= truncate_tol(tol,key)) {
6864 this->coeffs.replace(key, nodeT(coeffT(c,targs), false));
6865 } else {
6866 // Otherwise, make this a parent node and recur down
6867 this->coeffs.replace(key, nodeT(coeffT(), true)); // Interior node
6868
6869 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6870 const keyT& child = it.key();
6871 tensorT child_coeff = tensorT(c_child(child_patch(child)));
6872 tensorT left_coeff = tensorT(lc_child(child_patch(child)));
6873 woT::task(left->coeffs.owner(child), &implT:: template gaxpy_ext_recursive<L>,
6874 child, left, left_coeff, child_coeff, f, alpha, beta, tol, below_leaf);
6875 }
6876 }
6877 }
6878
6879 template <typename L>
6880 void gaxpy_ext(const FunctionImpl<L,NDIM>* left, T (*f)(const coordT&), T alpha, T beta, double tol, bool fence) {
6881 if (world.rank() == coeffs.owner(cdata.key0))
6882 gaxpy_ext_recursive<L> (cdata.key0, left, Tensor<L>(), tensorT(), f, alpha, beta, tol, false);
6883 if (fence)
6884 world.gop.fence();
6885 }
6886
6887 /// project the low-dim function g on the hi-dim function f: result(x) = <this(x,y) | g(y)>
6888
6889 /// invoked by the hi-dim function, a function of NDIM+LDIM
6890
6891 /// Upon return, result matches this, with contributions on all scales
6892 /// @param[in] result lo-dim function of NDIM-LDIM \todo Should this be param[out]?
6893 /// @param[in] gimpl lo-dim function of LDIM
6894 /// @param[in] dim over which dimensions to be integrated: 0..LDIM or LDIM..LDIM+NDIM-1
6895 template<size_t LDIM>
6897 const int dim, const bool fence) {
6898
6899 const keyT& key0=cdata.key0;
6900
6901 if (world.rank() == coeffs.owner(key0)) {
6902
6903 // coeff_op will accumulate the result
6904 typedef project_out_op<LDIM> coeff_opT;
6905 coeff_opT coeff_op(this,result,CoeffTracker<T,LDIM>(gimpl),dim);
6906
6907 // don't do anything on this -- coeff_op will accumulate into result
6908 typedef noop<T,NDIM> apply_opT;
6909 apply_opT apply_op;
6910
6911 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
6912 coeff_op, apply_op, cdata.key0);
6913
6914 }
6915 if (fence) world.gop.fence();
6916
6917 }
6918
6919
6920 /// project the low-dim function g on the hi-dim function f: result(x) = <f(x,y) | g(y)>
6921 template<size_t LDIM>
6923 bool randomize() const {return false;}
6924
6927 typedef FunctionImpl<T,NDIM-LDIM> implL1;
6928 typedef std::pair<bool,coeffT> argT;
6929
6930 const implT* fimpl; ///< the hi dim function f
6931 mutable implL1* result; ///< the low dim result function
6932 ctL iag; ///< the low dim function g
6933 int dim; ///< 0: project 0..LDIM-1, 1: project LDIM..NDIM-1
6934
6935 // ctor
6936 project_out_op() = default;
6937 project_out_op(const implT* fimpl, implL1* result, const ctL& iag, const int dim)
6938 : fimpl(fimpl), result(result), iag(iag), dim(dim) {}
6940 : fimpl(other.fimpl), result(other.result), iag(other.iag), dim(other.dim) {}
6941
6942
6943 /// do the actual contraction
6945
6946 Key<LDIM> key1,key2,dest;
6947 key.break_apart(key1,key2);
6948
6949 // make the right coefficients
6950 coeffT gcoeff;
6951 if (dim==0) {
6952 gcoeff=iag.get_impl()->parent_to_child(iag.coeff(),iag.key(),key1);
6953 dest=key2;
6954 }
6955 if (dim==1) {
6956 gcoeff=iag.get_impl()->parent_to_child(iag.coeff(),iag.key(),key2);
6957 dest=key1;
6958 }
6959
6960 MADNESS_ASSERT(fimpl->get_coeffs().probe(key)); // must be local!
6961 const nodeT& fnode=fimpl->get_coeffs().find(key).get()->second;
6962 const coeffT& fcoeff=fnode.coeff();
6963
6964 // fast return if possible
6965 if (fcoeff.has_no_data() or gcoeff.has_no_data())
6966 return Future<argT> (argT(fnode.is_leaf(),coeffT()));;
6967
6968 MADNESS_CHECK(gcoeff.is_full_tensor());
6969 tensorT final(result->cdata.vk);
6970 const int k=fcoeff.dim(0);
6971 const int k_ldim=std::pow(k,LDIM);
6972 std::vector<long> shape(LDIM, k);
6973
6974 if (fcoeff.is_full_tensor()) {
6975 // result_i = \sum_j g_j f_ji
6976 const tensorT gtensor = gcoeff.full_tensor().reshape(k_ldim);
6977 const tensorT ftensor = fcoeff.full_tensor().reshape(k_ldim,k_ldim);
6978 final=inner(gtensor,ftensor,0,dim).reshape(shape);
6979
6980 } else if (fcoeff.is_svd_tensor()) {
6981 if (fcoeff.rank()>0) {
6982
6983 // result_i = \sum_jr g_j a_rj w_r b_ri
6984 const int otherdim = (dim + 1) % 2;
6985 const tensorT gtensor = gcoeff.full_tensor().flat();
6986 const tensorT atensor = fcoeff.get_svdtensor().flat_vector(dim); // a_rj
6987 const tensorT btensor = fcoeff.get_svdtensor().flat_vector(otherdim);
6988 const tensorT gatensor = inner(gtensor, atensor, 0, 1); // ga_r
6989 tensorT weights = copy(fcoeff.get_svdtensor().weights_);
6990 weights.emul(gatensor); // ga_r * w_r
6991 // sum over all ranks of b, include new weights:
6992 // result_i = \sum_r ga_r * w_r * b_ri
6993 for (int r = 0; r < fcoeff.rank(); ++r) final += weights(r) * btensor(r, _);
6994 final = final.reshape(shape);
6995 }
6996
6997 } else {
6998 MADNESS_EXCEPTION("unsupported tensor type in project_out_op",1);
6999 }
7000
7001 // accumulate the result
7002 result->coeffs.task(dest, &FunctionNode<T,LDIM>::accumulate2, final, result->coeffs, dest, TaskAttributes::hipri());
7003
7004 return Future<argT> (argT(fnode.is_leaf(),coeffT()));
7005 }
7006
7007 this_type make_child(const keyT& child) const {
7008 Key<LDIM> key1,key2;
7009 child.break_apart(key1,key2);
7010 const Key<LDIM> gkey = (dim==0) ? key1 : key2;
7011
7012 return this_type(fimpl,result,iag.make_child(gkey),dim);
7013 }
7014
7015 /// retrieve the coefficients (parent coeffs might be remote)
7018 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
7019 &this_type::forward_ctor),fimpl,result,g1,dim);
7020 }
7021
7022 /// taskq-compatible ctor
7023 this_type forward_ctor(const implT* fimpl1, implL1* result1, const ctL& iag1, const int dim1) {
7024 return this_type(fimpl1,result1,iag1,dim1);
7025 }
7026
7027 template <typename Archive> void serialize(const Archive& ar) {
7028 ar & result & iag & fimpl & dim;
7029 }
7030
7031 };
7032
7033
7034 /// project the low-dim function g on the hi-dim function f: this(x) = <f(x,y) | g(y)>
7035
7036 /// invoked by result, a function of NDIM
7037
7038 /// @param[in] f hi-dim function of LDIM+NDIM
7039 /// @param[in] g lo-dim function of LDIM
7040 /// @param[in] dim over which dimensions to be integrated: 0..LDIM or LDIM..LDIM+NDIM-1
7041 template<size_t LDIM>
7042 void project_out2(const FunctionImpl<T,LDIM+NDIM>* f, const FunctionImpl<T,LDIM>* g, const int dim) {
7043
7044 typedef std::pair< keyT,coeffT > pairT;
7045 typedef typename FunctionImpl<T,NDIM+LDIM>::dcT::const_iterator fiterator;
7046
7047 // loop over all nodes of hi-dim f, compute the inner products with all
7048 // appropriate nodes of g, and accumulate in result
7049 fiterator end = f->get_coeffs().end();
7050 for (fiterator it=f->get_coeffs().begin(); it!=end; ++it) {
7051 const Key<LDIM+NDIM> key=it->first;
7052 const FunctionNode<T,LDIM+NDIM> fnode=it->second;
7053 const coeffT& fcoeff=fnode.coeff();
7054
7055 if (fnode.is_leaf() and fcoeff.has_data()) {
7056
7057 // break key into particle: over key1 will be summed, over key2 will be
7058 // accumulated, or vice versa, depending on dim
7059 if (dim==0) {
7060 Key<NDIM> key1;
7061 Key<LDIM> key2;
7062 key.break_apart(key1,key2);
7063
7064 Future<pairT> result;
7065 // sock_it_to_me(key1, result.remote_ref(world));
7066 g->task(coeffs.owner(key1), &implT::sock_it_to_me, key1, result.remote_ref(world), TaskAttributes::hipri());
7067 woT::task(world.rank(),&implT:: template do_project_out<LDIM>,fcoeff,result,key1,key2,dim);
7068
7069 } else if (dim==1) {
7070 Key<LDIM> key1;
7071 Key<NDIM> key2;
7072 key.break_apart(key1,key2);
7073
7074 Future<pairT> result;
7075 // sock_it_to_me(key2, result.remote_ref(world));
7076 g->task(coeffs.owner(key2), &implT::sock_it_to_me, key2, result.remote_ref(world), TaskAttributes::hipri());
7077 woT::task(world.rank(),&implT:: template do_project_out<LDIM>,fcoeff,result,key2,key1,dim);
7078
7079 } else {
7080 MADNESS_EXCEPTION("confused dim in project_out",1);
7081 }
7082 }
7083 }
7085// this->compressed=false;
7086// this->nonstandard=false;
7087// this->redundant=true;
7088 }
7089
7090
7091 /// compute the inner product of two nodes of only some dimensions and accumulate on result
7092
7093 /// invoked by result
7094 /// @param[in] fcoeff coefficients of high dimension LDIM+NDIM
7095 /// @param[in] gpair key and coeffs of low dimension LDIM (possibly a parent node)
7096 /// @param[in] gkey key of actual low dim node (possibly the same as gpair.first, iff gnode exists)
7097 /// @param[in] dest destination node for the result
7098 /// @param[in] dim which dimensions should be contracted: 0..LDIM-1 or LDIM..NDIM+LDIM-1
7099 template<size_t LDIM>
7100 void do_project_out(const coeffT& fcoeff, const std::pair<keyT,coeffT> gpair, const keyT& gkey,
7101 const Key<NDIM>& dest, const int dim) const {
7102
7103 const coeffT gcoeff=parent_to_child(gpair.second,gpair.first,gkey);
7104
7105 // fast return if possible
7106 if (fcoeff.has_no_data() or gcoeff.has_no_data()) return;
7107
7108 // let's specialize for the time being on SVD tensors for f and full tensors of half dim for g
7110 MADNESS_ASSERT(fcoeff.tensor_type()==TT_2D);
7111 const tensorT gtensor=gcoeff.full_tensor();
7112 tensorT result(cdata.vk);
7113
7114 const int otherdim=(dim+1)%2;
7115 const int k=fcoeff.dim(0);
7116 std::vector<Slice> s(fcoeff.config().dim_per_vector()+1,_);
7117
7118 // do the actual contraction
7119 for (int r=0; r<fcoeff.rank(); ++r) {
7120 s[0]=Slice(r,r);
7121 const tensorT contracted_tensor=fcoeff.config().ref_vector(dim)(s).reshape(k,k,k);
7122 const tensorT other_tensor=fcoeff.config().ref_vector(otherdim)(s).reshape(k,k,k);
7123 const double ovlp= gtensor.trace_conj(contracted_tensor);
7124 const double fac=ovlp * fcoeff.config().weights(r);
7125 result+=fac*other_tensor;
7126 }
7127
7128 // accumulate the result
7129 coeffs.task(dest, &nodeT::accumulate2, result, coeffs, dest, TaskAttributes::hipri());
7130 }
7131
7132
7133
7134
7135 /// Returns the maximum local depth of the tree ... no communications.
7136 std::size_t max_local_depth() const;
7137
7138
7139 /// Returns the maximum depth of the tree ... collective ... global sum/broadcast
7140 std::size_t max_depth() const;
7141
7142 /// Returns the max number of nodes on a processor
7143 std::size_t max_nodes() const;
7144
7145 /// Returns the min number of nodes on a processor
7146 std::size_t min_nodes() const;
7147
7148 /// Returns the size of the tree structure of the function ... collective global sum
7149 std::size_t tree_size() const;
7150
7151 /// Returns the number of coefficients in the function for each rank
7152 std::size_t size_local() const;
7153
7154 /// Returns the number of coefficients in the function ... collective global sum
7155 std::size_t size() const;
7156
7157 /// Returns the number of coefficients in the function for this MPI rank
7158 std::size_t nCoeff_local() const;
7159
7160 /// Returns the number of coefficients in the function ... collective global sum
7161 std::size_t nCoeff() const;
7162
7163 /// Returns the number of coefficients in the function ... collective global sum
7164 std::size_t real_size() const;
7165
7166 /// print tree size and size
7167 void print_size(const std::string name) const;
7168
7169 /// print the number of configurations per node
7170 void print_stats() const;
7171
7172 /// In-place scale by a constant
7173 void scale_inplace(const T q, bool fence);
7174
7175 /// Out-of-place scale by a constant
7176 template <typename Q, typename F>
7177 void scale_oop(const Q q, const FunctionImpl<F,NDIM>& f, bool fence) {
7178 typedef typename FunctionImpl<F,NDIM>::nodeT fnodeT;
7179 typedef typename FunctionImpl<F,NDIM>::dcT fdcT;
7180 typename fdcT::const_iterator end = f.coeffs.end();
7181 for (typename fdcT::const_iterator it=f.coeffs.begin(); it!=end; ++it) {
7182 const keyT& key = it->first;
7183 const fnodeT& node = it->second;
7184
7185 if (node.has_coeff()) {
7186 coeffs.replace(key,nodeT(node.coeff()*q,node.has_children()));
7187 }
7188 else {
7189 coeffs.replace(key,nodeT(coeffT(),node.has_children()));
7190 }
7191 }
7192 if (fence)
7193 world.gop.fence();
7194 }
7195
7196 /// Hash a pointer to \c FunctionImpl
7197
7198 /// \param[in] impl pointer to a FunctionImpl
7199 /// \return The hash.
7200 inline friend hashT hash_value(const FunctionImpl<T,NDIM>* pimpl) {
7201 hashT seed = hash_value(pimpl->id().get_world_id());
7202 detail::combine_hash(seed, hash_value(pimpl->id().get_obj_id()));
7203 return seed;
7204 }
7205
7206 /// Hash a shared_ptr to \c FunctionImpl
7207
7208 /// \param[in] impl pointer to a FunctionImpl
7209 /// \return The hash.
7210 inline friend hashT hash_value(const std::shared_ptr<FunctionImpl<T,NDIM>> impl) {
7211 return hash_value(impl.get());
7212 }
7213 };
7214
7215 namespace archive {
7216 template <class Archive, class T, std::size_t NDIM>
7217 struct ArchiveLoadImpl<Archive,const FunctionImpl<T,NDIM>*> {
7218 static void load(const Archive& ar, const FunctionImpl<T,NDIM>*& ptr) {
7219 bool exists=false;
7220 ar & exists;
7221 if (exists) {
7222 uniqueidT id;
7223 ar & id;
7224 World* world = World::world_from_id(id.get_world_id());
7225 MADNESS_ASSERT(world);
7226 auto ptr_opt = world->ptr_from_id< WorldObject< FunctionImpl<T,NDIM> > >(id);
7227 if (!ptr_opt)
7228 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use a locally uninitialized object",0);
7229 ptr = static_cast< const FunctionImpl<T,NDIM>*>(*ptr_opt);
7230 if (!ptr)
7231 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use an unregistered object",0);
7232 } else {
7233 ptr=nullptr;
7234 }
7235 }
7236 };
7237
7238 template <class Archive, class T, std::size_t NDIM>
7239 struct ArchiveStoreImpl<Archive,const FunctionImpl<T,NDIM>*> {
7240 static void store(const Archive& ar, const FunctionImpl<T,NDIM>*const& ptr) {
7241 bool exists=(ptr) ? true : false;
7242 ar & exists;
7243 if (exists) ar & ptr->id();
7244 }
7245 };
7246
7247 template <class Archive, class T, std::size_t NDIM>
7248 struct ArchiveLoadImpl<Archive, FunctionImpl<T,NDIM>*> {
7249 static void load(const Archive& ar, FunctionImpl<T,NDIM>*& ptr) {
7250 bool exists=false;
7251 ar & exists;
7252 if (exists) {
7253 uniqueidT id;
7254 ar & id;
7255 World* world = World::world_from_id(id.get_world_id());
7256 MADNESS_ASSERT(world);
7257 auto ptr_opt = world->ptr_from_id< WorldObject< FunctionImpl<T,NDIM> > >(id);
7258 if (!ptr_opt)
7259 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use a locally uninitialized object",0);
7260 ptr = static_cast< FunctionImpl<T,NDIM>*>(*ptr_opt);
7261 if (!ptr)
7262 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use an unregistered object",0);
7263 } else {
7264 ptr=nullptr;
7265 }
7266 }
7267 };
7268
7269 template <class Archive, class T, std::size_t NDIM>
7271 static void store(const Archive& ar, FunctionImpl<T,NDIM>*const& ptr) {
7272 bool exists=(ptr) ? true : false;
7273 ar & exists;
7274 if (exists) ar & ptr->id();
7275 // ar & ptr->id();
7276 }
7277 };
7278
7279 template <class Archive, class T, std::size_t NDIM>
7280 struct ArchiveLoadImpl<Archive, std::shared_ptr<const FunctionImpl<T,NDIM> > > {
7281 static void load(const Archive& ar, std::shared_ptr<const FunctionImpl<T,NDIM> >& ptr) {
7282 const FunctionImpl<T,NDIM>* f = nullptr;
7284 ptr.reset(f, [] (const FunctionImpl<T,NDIM> *p_) -> void {});
7285 }
7286 };
7287
7288 template <class Archive, class T, std::size_t NDIM>
7289 struct ArchiveStoreImpl<Archive, std::shared_ptr<const FunctionImpl<T,NDIM> > > {
7290 static void store(const Archive& ar, const std::shared_ptr<const FunctionImpl<T,NDIM> >& ptr) {
7292 }
7293 };
7294
7295 template <class Archive, class T, std::size_t NDIM>
7296 struct ArchiveLoadImpl<Archive, std::shared_ptr<FunctionImpl<T,NDIM> > > {
7297 static void load(const Archive& ar, std::shared_ptr<FunctionImpl<T,NDIM> >& ptr) {
7298 FunctionImpl<T,NDIM>* f = nullptr;
7300 ptr.reset(f, [] (FunctionImpl<T,NDIM> *p_) -> void {});
7301 }
7302 };
7303
7304 template <class Archive, class T, std::size_t NDIM>
7305 struct ArchiveStoreImpl<Archive, std::shared_ptr<FunctionImpl<T,NDIM> > > {
7306 static void store(const Archive& ar, const std::shared_ptr<FunctionImpl<T,NDIM> >& ptr) {
7308 }
7309 };
7310 }
7311
7312}
7313
7314#endif // MADNESS_MRA_FUNCIMPL_H__INCLUDED
double w(double t, double eps)
Definition DKops.h:22
double q(double t)
Definition DKops.h:18
This header should include pretty much everything needed for the parallel runtime.
An integer with atomic set, get, read+increment, read+decrement, and decrement+test operations.
Definition atomicint.h:126
long dim(int i) const
Returns the size of dimension i.
Definition basetensor.h:147
long ndim() const
Returns the number of dimensions in the tensor.
Definition basetensor.h:144
long size() const
Returns the number of elements in the tensor.
Definition basetensor.h:138
Definition displacements.h:717
Definition displacements.h:294
std::function< bool(Level, const PointPattern &, std::optional< Displacement > &)> Filter
this callable filters out points and/or displacements; note that the displacement is optional (this u...
Definition displacements.h:300
a class to track where relevant (parent) coeffs are
Definition funcimpl.h:791
const keyT & key() const
const reference to the key
Definition funcimpl.h:839
CoeffTracker(const CoeffTracker &other, const datumT &datum)
ctor with a pair<keyT,nodeT>
Definition funcimpl.h:821
const LeafStatus & is_leaf() const
const reference to is_leaf flag
Definition funcimpl.h:863
const implT * impl
the funcimpl that has the coeffs
Definition funcimpl.h:800
LeafStatus
Definition funcimpl.h:797
@ yes
Definition funcimpl.h:797
@ no
Definition funcimpl.h:797
@ unknown
Definition funcimpl.h:797
CoeffTracker(const CoeffTracker &other)
copy ctor
Definition funcimpl.h:829
double dnorm(const keyT &key) const
return the s and dnorm belonging to the passed-in key
Definition funcimpl.h:856
coeffT coeff_
the coefficients belonging to key
Definition funcimpl.h:806
const implT * get_impl() const
const reference to impl
Definition funcimpl.h:833
const coeffT & coeff() const
const reference to the coeffs
Definition funcimpl.h:836
keyT key_
the current key, which must exists in impl
Definition funcimpl.h:802
double dnorm_
norm of d coefficients corresponding to key
Definition funcimpl.h:808
CoeffTracker(const implT *impl)
the initial ctor making the root key
Definition funcimpl.h:816
void serialize(const Archive &ar)
serialization
Definition funcimpl.h:915
Future< CoeffTracker > activate() const
find the coefficients
Definition funcimpl.h:892
CoeffTracker()
default ctor
Definition funcimpl.h:813
GenTensor< T > coeffT
Definition funcimpl.h:795
CoeffTracker make_child(const keyT &child) const
make a child of this, ignoring the coeffs
Definition funcimpl.h:866
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:793
std::pair< Key< NDIM >, ShallowNode< T, NDIM > > datumT
Definition funcimpl.h:796
CoeffTracker forward_ctor(const CoeffTracker &other, const datumT &datum) const
taskq-compatible forwarding to the ctor
Definition funcimpl.h:909
LeafStatus is_leaf_
flag if key is a leaf node
Definition funcimpl.h:804
coeffT coeff(const keyT &key) const
return the coefficients belonging to the passed-in key
Definition funcimpl.h:847
Key< NDIM > keyT
Definition funcimpl.h:794
CompositeFunctorInterface implements a wrapper of holding several functions and functors.
Definition function_interface.h:165
Definition worldhashmap.h:396
Tri-diagonal operator traversing tree primarily for derivative operator.
Definition derivative.h:73
Holds displacements for applying operators to avoid replicating for all operators.
Definition displacements.h:51
const std::vector< Key< NDIM > > & get_disp(Level n, const array_of_bools< NDIM > &kernel_lattice_sum_axes)
Definition displacements.h:211
FunctionCommonData holds all Function data common for given k.
Definition function_common_data.h:52
Tensor< double > quad_phit
transpose of quad_phi
Definition function_common_data.h:102
Tensor< double > quad_phiw
quad_phiw(i,j) = at x[i] value of w[i]*phi[j]
Definition function_common_data.h:103
std::vector< long > vk
(k,...) used to initialize Tensors
Definition function_common_data.h:93
std::vector< Slice > s0
s[0] in each dimension to get scaling coeff
Definition function_common_data.h:91
static const FunctionCommonData< T, NDIM > & get(int k)
Definition function_common_data.h:111
static void _init_quadrature(int k, int npt, Tensor< double > &quad_x, Tensor< double > &quad_w, Tensor< double > &quad_phi, Tensor< double > &quad_phiw, Tensor< double > &quad_phit)
Initialize the quadrature information.
Definition mraimpl.h:87
collect common functionality does not need to be member function of funcimpl
Definition function_common_data.h:135
const FunctionCommonData< T, NDIM > & cdata
Definition function_common_data.h:138
GenTensor< T > coeffs2values(const Key< NDIM > &key, const GenTensor< T > &coeff) const
Definition function_common_data.h:142
Tensor< T > values2coeffs(const Key< NDIM > &key, const Tensor< T > &values) const
Definition function_common_data.h:155
FunctionDefaults holds default paramaters as static class members.
Definition funcdefaults.h:100
static const double & get_thresh()
Returns the default threshold.
Definition funcdefaults.h:176
static int get_max_refine_level()
Gets the default maximum adaptive refinement level.
Definition funcdefaults.h:213
static const Tensor< double > & get_cell_width()
Returns the width of each user cell dimension.
Definition funcdefaults.h:369
static bool get_apply_randomize()
Gets the random load balancing for integral operators flag.
Definition funcdefaults.h:289
static const Tensor< double > & get_cell()
Gets the user cell for the simulation.
Definition funcdefaults.h:347
FunctionFactory implements the named-parameter idiom for Function.
Definition function_factory.h:86
bool _refine
Definition function_factory.h:99
bool _empty
Definition function_factory.h:100
bool _fence
Definition function_factory.h:103
Abstract base class interface required for functors used as input to Functions.
Definition function_interface.h:68
Definition funcimpl.h:5459
double operator()(double a, double b) const
Definition funcimpl.h:5485
const opT * func
Definition funcimpl.h:5461
Tensor< double > qx
Definition funcimpl.h:5463
double operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5476
void serialize(const Archive &ar)
Definition funcimpl.h:5490
do_err_box(const implT *impl, const opT *func, int npt, const Tensor< double > &qx, const Tensor< double > &quad_phit, const Tensor< double > &quad_phiw)
Definition funcimpl.h:5469
int npt
Definition funcimpl.h:5462
Tensor< double > quad_phiw
Definition funcimpl.h:5465
const implT * impl
Definition funcimpl.h:5460
Tensor< double > quad_phit
Definition funcimpl.h:5464
do_err_box(const do_err_box &e)
Definition funcimpl.h:5473
FunctionImpl holds all Function state to facilitate shallow copy semantics.
Definition funcimpl.h:945
std::tuple< std::set< Key< NDIM > >, std::map< Key< CDIM >, double > > get_contraction_node_lists(const std::size_t n, const std::array< int, CDIM > &v) const
for contraction two functions f(x,z) = \int g(x,y) h(y,z) dy
Definition funcimpl.h:6224
void copy_coeffs(const FunctionImpl< Q, NDIM > &other, bool fence)
Copy coeffs from other into self.
Definition funcimpl.h:1118
bool is_nonstandard() const
Definition mraimpl.h:269
T eval_cube(Level n, coordT &x, const tensorT &c) const
Definition mraimpl.h:2021
void partial_inner_contract(const FunctionImpl< Q, LDIM > *g, const FunctionImpl< R, KDIM > *h, const std::array< int, CDIM > v1, const std::array< int, CDIM > v2, const Key< NDIM > &key, const std::list< Key< CDIM > > &j_key_list)
tensor contraction part of partial_inner
Definition funcimpl.h:6384
AtomicInt large
Definition funcimpl.h:1001
Timer timer_target_driven
Definition funcimpl.h:999
void binaryXX(const FunctionImpl< L, NDIM > *left, const FunctionImpl< R, NDIM > *right, const opT &op, bool fence)
Definition funcimpl.h:3196
void do_apply(const opT *op, const keyT &key, const Tensor< R > &c)
apply an operator on the coeffs c (at node key)
Definition funcimpl.h:4803
void do_print_tree_graphviz(const keyT &key, std::ostream &os, Level maxlevel) const
Functor for the do_print_tree method (using GraphViz)
Definition mraimpl.h:2755
void add_keys_to_map(mapT *map, int index) const
Adds keys to union of local keys with specified index.
Definition funcimpl.h:5803
void change_tensor_type1(const TensorArgs &targs, bool fence)
change the tensor type of the coefficients in the FunctionNode
Definition mraimpl.h:1095
void gaxpy_ext_recursive(const keyT &key, const FunctionImpl< L, NDIM > *left, Tensor< L > lcin, tensorT c, T(*f)(const coordT &), T alpha, T beta, double tol, bool below_leaf)
Definition funcimpl.h:6793
int initial_level
Initial level for refinement.
Definition funcimpl.h:974
int max_refine_level
Do not refine below this level.
Definition funcimpl.h:977
double do_apply_kernel3(const opT *op, const GenTensor< R > &coeff, const do_op_args< OPDIM > &args, const TensorArgs &apply_targs)
same as do_apply_kernel2, but use low rank tensors as input and low rank tensors as output
Definition funcimpl.h:4761
void hartree_product(const std::vector< std::shared_ptr< FunctionImpl< T, LDIM > > > p1, const std::vector< std::shared_ptr< FunctionImpl< T, LDIM > > > p2, const leaf_opT &leaf_op, bool fence)
given two functions of LDIM, perform the Hartree/Kronecker/outer product
Definition funcimpl.h:3720
void traverse_tree(const coeff_opT &coeff_op, const apply_opT &apply_op, const keyT &key) const
traverse a non-existing tree
Definition funcimpl.h:3690
void do_square_inplace(const keyT &key)
int special_level
Minimium level for refinement on special points.
Definition funcimpl.h:975
void do_apply_kernel(const opT *op, const Tensor< R > &c, const do_op_args< OPDIM > &args)
for fine-grain parallelism: call the apply method of an operator in a separate task
Definition funcimpl.h:4695
double errsq_local(const opT &func) const
Returns the sum of squares of errors from local info ... no comms.
Definition funcimpl.h:5497
WorldContainer< keyT, nodeT > dcT
Type of container holding the coefficients.
Definition funcimpl.h:957
void evaldepthpt(const Vector< double, NDIM > &xin, const keyT &keyin, const typename Future< Level >::remote_refT &ref)
Get the depth of the tree at a point in simulation coordinates.
Definition mraimpl.h:2936
void scale_inplace(const T q, bool fence)
In-place scale by a constant.
Definition mraimpl.h:3107
void gaxpy_oop_reconstructed(const double alpha, const implT &f, const double beta, const implT &g, const bool fence)
perform: this= alpha*f + beta*g, invoked by result
Definition mraimpl.h:219
void unary_op_coeff_inplace(const opT &op, bool fence)
Definition funcimpl.h:2040
World & world
Definition funcimpl.h:964
void apply_1d_realspace_push_op(const archive::archive_ptr< const opT > &pop, int axis, const keyT &key, const Tensor< R > &c)
Definition funcimpl.h:3758
bool is_redundant() const
Returns true if the function is redundant.
Definition mraimpl.h:258
FunctionNode< T, NDIM > nodeT
Type of node.
Definition funcimpl.h:955
std::size_t nCoeff_local() const
Returns the number of coefficients in the function for this MPI rank.
Definition mraimpl.h:1918
void print_size(const std::string name) const
print tree size and size
Definition mraimpl.h:1937
FunctionImpl(const FunctionImpl< T, NDIM > &p)
void print_info() const
Prints summary of data distribution.
Definition mraimpl.h:829
void abs_inplace(bool fence)
Definition mraimpl.h:3119
void binaryXXa(const keyT &key, const FunctionImpl< L, NDIM > *left, const Tensor< L > &lcin, const FunctionImpl< R, NDIM > *right, const Tensor< R > &rcin, const opT &op)
Definition funcimpl.h:3065
void print_timer() const
Definition mraimpl.h:353
void evalR(const Vector< double, NDIM > &xin, const keyT &keyin, const typename Future< long >::remote_refT &ref)
Get the rank of leaf box of the tree at a point in simulation coordinates.
Definition mraimpl.h:2978
const FunctionCommonData< T, NDIM > & cdata
Definition funcimpl.h:983
void do_print_grid(const std::string filename, const std::vector< keyT > &keys) const
print the grid in xyz format
Definition mraimpl.h:580
void mulXXa(const keyT &key, const FunctionImpl< L, NDIM > *left, const Tensor< L > &lcin, const FunctionImpl< R, NDIM > *right, const Tensor< R > &rcin, double tol)
Definition funcimpl.h:2979
int get_truncate_mode() const
Definition funcimpl.h:1678
const std::vector< Vector< double, NDIM > > & get_special_points() const
Definition funcimpl.h:969
std::size_t nCoeff() const
Returns the number of coefficients in the function ... collective global sum.
Definition mraimpl.h:1928
double vol_nsphere(int n, double R)
Definition funcimpl.h:4791
keyT neighbor_in_volume(const keyT &key, const keyT &disp) const
Returns key of general neighbor that resides in-volume.
Definition mraimpl.h:3226
void compress(const TreeState newstate, bool fence)
compress the wave function
Definition mraimpl.h:1496
void do_dirac_convolution(FunctionImpl< T, LDIM > *f, bool fence) const
Definition funcimpl.h:2123
std::pair< coeffT, double > compress_op(const keyT &key, const std::vector< Future< std::pair< coeffT, double > > > &v, bool nonstandard)
calculate the wavelet coefficients using the sum coefficients of all child nodes
Definition mraimpl.h:1664
Future< bool > truncate_spawn(const keyT &key, double tol)
Returns true if after truncation this node has coefficients.
Definition mraimpl.h:2600
void print_type_in_compilation_error(R &&)
Definition funcimpl.h:6106
Future< double > norm_tree_spawn(const keyT &key)
Definition mraimpl.h:1566
std::vector< keyT > local_leaf_keys() const
return the keys of the local leaf boxes
Definition mraimpl.h:554
MADNESS_ASSERT(this->is_redundant()==g.is_redundant())
void do_print_tree(const keyT &key, std::ostream &os, Level maxlevel) const
Functor for the do_print_tree method.
Definition mraimpl.h:2673
void vtransform(const std::vector< std::shared_ptr< FunctionImpl< R, NDIM > > > &vright, const Tensor< Q > &c, const std::vector< std::shared_ptr< FunctionImpl< T, NDIM > > > &vleft, double tol, bool fence)
Definition funcimpl.h:2840
void unset_functor()
Definition mraimpl.h:308
void refine_spawn(const opT &op, const keyT &key)
Definition funcimpl.h:4523
void apply_1d_realspace_push(const opT &op, const FunctionImpl< R, NDIM > *f, int axis, bool fence)
Definition funcimpl.h:3809
static double conj(float x)
Definition funcimpl.h:5992
void do_print_plane(const std::string filename, std::vector< Tensor< double > > plotinfo, const int xaxis, const int yaxis, const coordT el2)
print the MRA structure
Definition mraimpl.h:495
std::pair< Key< NDIM >, ShallowNode< T, NDIM > > find_datum(keyT key) const
return the a std::pair<key, node>, which MUST exist
Definition mraimpl.h:961
void set_functor(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > functor1)
Definition mraimpl.h:289
std::enable_if< NDIM==FDIM >::type read_grid2(const std::string gridfile, std::shared_ptr< FunctionFunctorInterface< double, NDIM > > vnuc_functor)
read data from a grid
Definition funcimpl.h:1572
bool verify_tree_state_local() const
check that the tree state and the coeffs are consistent
Definition mraimpl.h:165
const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > & get_pmap() const
Definition mraimpl.h:203
Tensor< Q > fcube_for_mul(const keyT &child, const keyT &parent, const Tensor< Q > &coeff) const
Compute the function values for multiplication.
Definition funcimpl.h:1887
Timer timer_filter
Definition funcimpl.h:997
void sock_it_to_me(const keyT &key, const RemoteReference< FutureImpl< std::pair< keyT, coeffT > > > &ref) const
Walk up the tree returning pair(key,node) for first node with coefficients.
Definition mraimpl.h:2813
void recursive_apply(opT &apply_op, const implT *fimpl, implT *rimpl, const bool fence)
traverse an existing tree and apply an operator
Definition funcimpl.h:5316
double get_thresh() const
Definition mraimpl.h:324
void trickle_down(bool fence)
sum all the contributions from all scales after applying an operator in mod-NS form
Definition mraimpl.h:1350
bool autorefine
If true, autorefine where appropriate.
Definition funcimpl.h:979
std::pair< coeffT, double > make_redundant_op(const keyT &key, const std::vector< Future< std::pair< coeffT, double > > > &v)
similar to compress_op, but insert only the sum coefficients in the tree
Definition mraimpl.h:1724
void set_autorefine(bool value)
Definition mraimpl.h:333
tensorT filter(const tensorT &s) const
Transform sum coefficients at level n to sums+differences at level n-1.
Definition mraimpl.h:1148
void chop_at_level(const int n, const bool fence=true)
remove all nodes with level higher than n
Definition mraimpl.h:1111
void unaryXXvalues(const FunctionImpl< Q, NDIM > *func, const opT &op, bool fence)
Definition funcimpl.h:3223
static std::complex< double > conj(const std::complex< double > x)
Definition funcimpl.h:5996
void partial_inner(const FunctionImpl< Q, LDIM > &g, const FunctionImpl< R, KDIM > &h, const std::array< int, CDIM > v1, const std::array< int, CDIM > v2)
invoked by result
Definition funcimpl.h:6122
TreeState tree_state
Definition funcimpl.h:986
void print_tree_json(std::ostream &os=std::cout, Level maxlevel=10000) const
Definition mraimpl.h:2693
coeffT parent_to_child_NS(const keyT &child, const keyT &parent, const coeffT &coeff) const
Directly project parent NS coeffs to child NS coeffs.
Definition mraimpl.h:703
void mapdim(const implT &f, const std::vector< long > &map, bool fence)
Permute the dimensions of f according to map, result on this.
Definition mraimpl.h:1053
bool is_compressed() const
Returns true if the function is compressed.
Definition mraimpl.h:246
Vector< double, NDIM > coordT
Type of vector holding coordinates.
Definition funcimpl.h:959
void apply(opT &op, const FunctionImpl< R, NDIM > &f, bool fence)
apply an operator on f to return this
Definition funcimpl.h:4993
Tensor< T > tensorT
Type of tensor for anything but to hold coeffs.
Definition funcimpl.h:952
void mirror(const implT &f, const std::vector< long > &mirror, bool fence)
mirror the dimensions of f according to map, result on this
Definition mraimpl.h:1062
T inner_adaptive_recursive(keyT key, const tensorT &c, const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine, T old_inner=T(0)) const
Definition funcimpl.h:6716
void store(Archive &ar)
Definition funcimpl.h:1247
void do_binary_op(const keyT &key, const Tensor< L > &left, const std::pair< keyT, Tensor< R > > &arg, const opT &op)
Functor for the binary_op method.
Definition funcimpl.h:1989
void gaxpy_ext(const FunctionImpl< L, NDIM > *left, T(*f)(const coordT &), T alpha, T beta, double tol, bool fence)
Definition funcimpl.h:6880
void accumulate_trees(FunctionImpl< Q, NDIM > &result, const R alpha, const bool fence=true) const
merge the trees of this and other, while multiplying them with the alpha or beta, resp
Definition funcimpl.h:1168
void print_stats() const
print the number of configurations per node
Definition mraimpl.h:1965
void broaden(const array_of_bools< NDIM > &is_periodic, bool fence)
Definition mraimpl.h:1299
coeffT truncate_reconstructed_op(const keyT &key, const std::vector< Future< coeffT > > &v, const double tol)
given the sum coefficients of all children, truncate or not
Definition mraimpl.h:1613
void refine_op(const opT &op, const keyT &key)
Definition funcimpl.h:4498
static Tensor< TENSOR_RESULT_TYPE(T, R) > inner_local(const std::vector< const FunctionImpl< T, NDIM > * > &left, const std::vector< const FunctionImpl< R, NDIM > * > &right, bool sym)
Definition funcimpl.h:6002
void fcube(const keyT &key, const FunctionFunctorInterface< T, NDIM > &f, const Tensor< double > &qx, tensorT &fval) const
Evaluate function at quadrature points in the specified box.
Definition mraimpl.h:2438
Timer timer_change_tensor_type
Definition funcimpl.h:995
void forward_do_diff1(const DerivativeBase< T, NDIM > *D, const implT *f, const keyT &key, const std::pair< keyT, coeffT > &left, const std::pair< keyT, coeffT > &center, const std::pair< keyT, coeffT > &right)
Definition mraimpl.h:919
std::vector< Slice > child_patch(const keyT &child) const
Returns patch referring to coeffs of child in parent box.
Definition mraimpl.h:692
void print_tree_graphviz(std::ostream &os=std::cout, Level maxlevel=10000) const
Definition mraimpl.h:2746
void set_tree_state(const TreeState &state)
Definition funcimpl.h:1278
std::size_t min_nodes() const
Returns the min number of nodes on a processor.
Definition mraimpl.h:1869
std::shared_ptr< FunctionFunctorInterface< T, NDIM > > functor
Definition funcimpl.h:985
Timer timer_compress_svd
Definition funcimpl.h:998
Tensor< TENSOR_RESULT_TYPE(T, R)> mul(const Tensor< T > &c1, const Tensor< R > &c2, const int npt, const keyT &key) const
multiply the values of two coefficient tensors using a custom number of grid points
Definition funcimpl.h:1962
void make_redundant(const bool fence)
convert this to redundant, i.e. have sum coefficients on all levels
Definition mraimpl.h:1524
void load(Archive &ar)
Definition funcimpl.h:1229
std::size_t max_nodes() const
Returns the max number of nodes on a processor.
Definition mraimpl.h:1860
T inner_ext_local(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine) const
Definition funcimpl.h:6691
coeffT upsample(const keyT &key, const coeffT &coeff) const
upsample the sum coefficients of level 1 to sum coeffs on level n+1
Definition mraimpl.h:1227
TensorArgs targs
type of tensor to be used in the FunctionNodes
Definition funcimpl.h:981
void flo_unary_op_node_inplace(const opT &op, bool fence)
Definition funcimpl.h:2152
std::size_t size_local() const
Returns the number of coefficients in the function for each rank.
Definition mraimpl.h:1887
GenTensor< Q > values2coeffs(const keyT &key, const GenTensor< Q > &values) const
Definition funcimpl.h:1866
void plot_cube_kernel(archive::archive_ptr< Tensor< T > > ptr, const keyT &key, const coordT &plotlo, const coordT &plothi, const std::vector< long > &npt, bool eval_refine) const
Definition mraimpl.h:3317
T trace_local() const
Returns int(f(x),x) in local volume.
Definition mraimpl.h:3161
void print_grid(const std::string filename) const
Definition mraimpl.h:538
Future< std::pair< coeffT, double > > compress_spawn(const keyT &key, bool nonstandard, bool keepleaves, bool redundant1)
Invoked on node where key is local.
Definition mraimpl.h:3254
bool get_autorefine() const
Definition mraimpl.h:330
int k
Wavelet order.
Definition funcimpl.h:972
void vtransform_doit(const std::shared_ptr< FunctionImpl< R, NDIM > > &right, const Tensor< Q > &c, const std::vector< std::shared_ptr< FunctionImpl< T, NDIM > > > &vleft, double tol)
Definition funcimpl.h:2684
MADNESS_CHECK(this->is_reconstructed())
void phi_for_mul(Level np, Translation lp, Level nc, Translation lc, Tensor< double > &phi) const
Compute the Legendre scaling functions for multiplication.
Definition mraimpl.h:3129
Future< std::pair< keyT, coeffT > > find_me(const keyT &key) const
find_me. Called by diff_bdry to get coefficients of boundary function
Definition mraimpl.h:3241
TensorType get_tensor_type() const
Definition mraimpl.h:315
void do_project_out(const coeffT &fcoeff, const std::pair< keyT, coeffT > gpair, const keyT &gkey, const Key< NDIM > &dest, const int dim) const
compute the inner product of two nodes of only some dimensions and accumulate on result
Definition funcimpl.h:7100
void remove_leaf_coefficients(const bool fence)
Definition mraimpl.h:1518
void insert_zero_down_to_initial_level(const keyT &key)
Initialize nodes to zero function at initial_level of refinement.
Definition mraimpl.h:2569
void do_diff1(const DerivativeBase< T, NDIM > *D, const implT *f, const keyT &key, const std::pair< keyT, coeffT > &left, const std::pair< keyT, coeffT > &center, const std::pair< keyT, coeffT > &right)
Definition mraimpl.h:930
typedef TENSOR_RESULT_TYPE(T, R) resultT
void unary_op_node_inplace(const opT &op, bool fence)
Definition funcimpl.h:2061
T inner_adaptive_local(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine) const
Definition funcimpl.h:6702
void do_print_tree_json(const keyT &key, std::multimap< Level, std::tuple< tranT, std::string > > &data, Level maxlevel) const
Functor for the do_print_tree_json method.
Definition mraimpl.h:2724
std::multimap< Key< FDIM >, std::list< Key< CDIM > > > recur_down_for_contraction_map(const keyT &key, const nodeT &node, const std::array< int, CDIM > &v_this, const std::array< int, CDIM > &v_other, const std::set< Key< ODIM > > &ij_other_list, const std::map< Key< CDIM >, double > &j_other_list, bool this_first, const double thresh)
make a map of all nodes that will contribute to a partial inner product
Definition funcimpl.h:6277
std::shared_ptr< FunctionImpl< T, NDIM > > pimplT
pointer to this class
Definition funcimpl.h:951
TENSOR_RESULT_TYPE(T, R) dot_local(const FunctionImpl< R
Returns the dot product ASSUMING same distribution.
void finalize_sum()
after summing up we need to do some cleanup;
Definition mraimpl.h:1817
std::enable_if< NDIM==FDIM >::type read_grid(const std::string keyfile, const std::string gridfile, std::shared_ptr< FunctionFunctorInterface< double, NDIM > > vnuc_functor)
read data from a grid
Definition funcimpl.h:1465
dcT coeffs
The coefficients.
Definition funcimpl.h:988
bool exists_and_is_leaf(const keyT &key) const
Definition mraimpl.h:1271
void make_Vphi(const opT &leaf_op, const bool fence=true)
assemble the function V*phi using V and phi given from the functor
Definition funcimpl.h:4290
void unaryXX(const FunctionImpl< Q, NDIM > *func, const opT &op, bool fence)
Definition funcimpl.h:3210
std::vector< std::pair< int, const coeffT * > > mapvecT
Type of the entry in the map returned by make_key_vec_map.
Definition funcimpl.h:5797
void project_out(FunctionImpl< T, NDIM-LDIM > *result, const FunctionImpl< T, LDIM > *gimpl, const int dim, const bool fence)
project the low-dim function g on the hi-dim function f: result(x) = <this(x,y) | g(y)>
Definition funcimpl.h:6896
void verify_tree() const
Verify tree is properly constructed ... global synchronization involved.
Definition mraimpl.h:107
void do_square_inplace2(const keyT &parent, const keyT &child, const tensorT &parent_coeff)
void gaxpy_inplace_reconstructed(const T &alpha, const FunctionImpl< Q, NDIM > &g, const R &beta, const bool fence)
Definition funcimpl.h:1136
void set_tensor_args(const TensorArgs &t)
Definition mraimpl.h:321
GenTensor< Q > fcube_for_mul(const keyT &child, const keyT &parent, const GenTensor< Q > &coeff) const
Compute the function values for multiplication.
Definition funcimpl.h:1915
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:5588
std::size_t real_size() const
Returns the number of coefficients in the function ... collective global sum.
Definition mraimpl.h:1905
bool exists_and_has_children(const keyT &key) const
Definition mraimpl.h:1266
void sum_down_spawn(const keyT &key, const coeffT &s)
is this the same as trickle_down() ?
Definition mraimpl.h:872
void multi_to_multi_op_values(const opT &op, const std::vector< implT * > &vin, std::vector< implT * > &vout, const bool fence=true)
Inplace operate on many functions (impl's) with an operator within a certain box.
Definition funcimpl.h:2811
long box_interior[1000]
Definition funcimpl.h:3254
keyT neighbor(const keyT &key, const keyT &disp, const array_of_bools< NDIM > &is_periodic) const
Returns key of general neighbor enforcing BC.
Definition mraimpl.h:3211
GenTensor< Q > NS_fcube_for_mul(const keyT &child, const keyT &parent, const GenTensor< Q > &coeff, const bool s_only) const
Compute the function values for multiplication.
Definition funcimpl.h:1785
rangeT range(coeffs.begin(), coeffs.end())
void norm_tree(bool fence)
compute for each FunctionNode the norm of the function inside that node
Definition mraimpl.h:1543
void gaxpy_inplace(const T &alpha, const FunctionImpl< Q, NDIM > &other, const R &beta, bool fence)
Inplace general bilinear operation.
Definition funcimpl.h:1216
bool has_leaves() const
Definition mraimpl.h:284
bool verify_parents_and_children() const
check that parents and children are consistent
Definition mraimpl.h:115
void apply_source_driven(opT &op, const FunctionImpl< R, NDIM > &f, bool fence)
similar to apply, but for low rank coeffs
Definition funcimpl.h:5141
void distribute(std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > newmap) const
Definition funcimpl.h:1110
int get_special_level() const
Definition funcimpl.h:968
void reconstruct_op(const keyT &key, const coeffT &s, const bool accumulate_NS=true)
Definition mraimpl.h:2075
tensorT gaxpy_ext_node(keyT key, Tensor< L > lc, T(*f)(const coordT &), T alpha, T beta) const
Definition funcimpl.h:6769
const coeffT parent_to_child(const coeffT &s, const keyT &parent, const keyT &child) const
Directly project parent coeffs to child coeffs.
Definition mraimpl.h:3144
WorldObject< FunctionImpl< T, NDIM > > woT
Base class world object type.
Definition funcimpl.h:947
void undo_redundant(const bool fence)
convert this from redundant to standard reconstructed form
Definition mraimpl.h:1534
GenTensor< T > coeffT
Type of tensor used to hold coeffs.
Definition funcimpl.h:956
const keyT & key0() const
Returns cdata.key0.
Definition mraimpl.h:390
double finalize_apply()
after apply we need to do some cleanup;
Definition mraimpl.h:1774
bool leaves_only
Definition funcimpl.h:5593
friend hashT hash_value(const FunctionImpl< T, NDIM > *pimpl)
Hash a pointer to FunctionImpl.
Definition funcimpl.h:7200
const dcT & get_coeffs() const
Definition mraimpl.h:339
T inner_ext_node(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f) const
Return the inner product with an external function on a specified function node.
Definition funcimpl.h:6568
double norm2sq_local() const
Returns the square of the local norm ... no comms.
Definition mraimpl.h:1826
const FunctionCommonData< T, NDIM > & get_cdata() const
Definition mraimpl.h:345
void sum_down(bool fence)
After 1d push operator must sum coeffs down the tree to restore correct scaling function coefficients...
Definition mraimpl.h:911
T inner_ext_recursive(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine, T old_inner=T(0)) const
Definition funcimpl.h:6585
bool noautorefine(const keyT &key, const tensorT &t) const
Always returns false (for when autorefine is not wanted)
Definition mraimpl.h:855
double truncate_tol(double tol, const keyT &key) const
Returns the truncation threshold according to truncate_method.
Definition mraimpl.h:645
void flo_unary_op_node_inplace(const opT &op, bool fence) const
Definition funcimpl.h:2162
bool autorefine_square_test(const keyT &key, const nodeT &t) const
Returns true if this block of coeffs needs autorefining.
Definition mraimpl.h:861
void erase(const Level &max_level)
truncate tree at a certain level
Definition mraimpl.h:735
void mulXX(const FunctionImpl< L, NDIM > *left, const FunctionImpl< R, NDIM > *right, double tol, bool fence)
Definition funcimpl.h:3182
void reconstruct(bool fence)
reconstruct this tree – respects fence
Definition mraimpl.h:1464
void multiply(const implT *f, const FunctionImpl< T, LDIM > *g, const int particle)
multiply f (a pair function of NDIM) with an orbital g (LDIM=NDIM/2)
Definition funcimpl.h:3582
coeffT assemble_coefficients(const keyT &key, const coeffT &coeff_ket, const coeffT &vpotential1, const coeffT &vpotential2, const tensorT &veri) const
given several coefficient tensors, assemble a result tensor
Definition mraimpl.h:1009
static void tnorm(const tensorT &t, double *lo, double *hi)
Computes norm of low/high-order polyn. coeffs for autorefinement test.
Definition mraimpl.h:3021
std::pair< bool, T > eval_local_only(const Vector< double, NDIM > &xin, Level maxlevel)
Evaluate function only if point is local returning (true,value); otherwise return (false,...
Definition mraimpl.h:2907
std::size_t max_depth() const
Returns the maximum depth of the tree ... collective ... global sum/broadcast.
Definition mraimpl.h:1852
std::size_t size() const
Returns the number of coefficients in the function ... collective global sum.
Definition mraimpl.h:1897
void reduce_rank(const double thresh, bool fence)
reduce the rank of the coefficients tensors
Definition mraimpl.h:1103
TreeState get_tree_state() const
Definition funcimpl.h:1282
void merge_trees(const T alpha, const FunctionImpl< Q, NDIM > &other, const R beta, const bool fence=true)
merge the trees of this and other, while multiplying them with the alpha or beta, resp
Definition funcimpl.h:1156
std::shared_ptr< FunctionFunctorInterface< T, NDIM > > get_functor()
Definition mraimpl.h:296
double do_apply_directed_screening(const opT *op, const keyT &key, const coeffT &coeff, const bool &do_kernel)
apply an operator on the coeffs c (at node key)
Definition funcimpl.h:5030
tensorT unfilter(const tensorT &s) const
Transform sums+differences at level n to sum coefficients at level n+1.
Definition mraimpl.h:1177
int get_initial_level() const
getter
Definition funcimpl.h:967
Tensor< T > eval_plot_cube(const coordT &plotlo, const coordT &plothi, const std::vector< long > &npt, const bool eval_refine=false) const
Definition mraimpl.h:3410
virtual ~FunctionImpl()
Definition funcimpl.h:1102
Vector< Translation, NDIM > tranT
Type of array holding translation.
Definition funcimpl.h:953
void change_tree_state(const TreeState finalstate, bool fence=true)
change the tree state of this function, might or might not respect fence!
Definition mraimpl.h:1403
Future< coeffT > truncate_reconstructed_spawn(const keyT &key, const double tol)
truncate using a tree in reconstructed form
Definition mraimpl.h:1589
GenTensor< Q > coeffs2values(const keyT &key, const GenTensor< Q > &coeff) const
Definition funcimpl.h:1733
FunctionImpl(const FunctionFactory< T, NDIM > &factory)
Initialize function impl from data in factory.
Definition funcimpl.h:1004
void map_and_mirror(const implT &f, const std::vector< long > &map, const std::vector< long > &mirror, bool fence)
map and mirror the translation index and the coefficients, result on this
Definition mraimpl.h:1072
Timer timer_lr_result
Definition funcimpl.h:996
void gaxpy(T alpha, const FunctionImpl< L, NDIM > &left, T beta, const FunctionImpl< R, NDIM > &right, bool fence)
Invoked by result to perform result += alpha*left+beta*right in wavelet basis.
Definition funcimpl.h:2012
void truncate(double tol, bool fence)
Truncate according to the threshold with optional global fence.
Definition mraimpl.h:374
void do_mul(const keyT &key, const Tensor< L > &left, const std::pair< keyT, Tensor< R > > &arg)
Functor for the mul method.
Definition funcimpl.h:1937
void project_out2(const FunctionImpl< T, LDIM+NDIM > *f, const FunctionImpl< T, LDIM > *g, const int dim)
project the low-dim function g on the hi-dim function f: this(x) = <f(x,y) | g(y)>
Definition funcimpl.h:7042
double do_apply_kernel2(const opT *op, const Tensor< R > &c, const do_op_args< OPDIM > &args, const TensorArgs &apply_targs)
same as do_apply_kernel, but use full rank tensors as input and low rank tensors as output
Definition funcimpl.h:4723
static Tensor< TENSOR_RESULT_TYPE(T, R)> dot_local(const std::vector< const FunctionImpl< T, NDIM > * > &left, const std::vector< const FunctionImpl< R, NDIM > * > &right, bool sym)
Definition funcimpl.h:6054
Tensor< Q > coeffs2values(const keyT &key, const Tensor< Q > &coeff) const
Definition funcimpl.h:1859
Tensor< Q > values2coeffs(const keyT &key, const Tensor< Q > &values) const
Definition funcimpl.h:1873
void multi_to_multi_op_values_doit(const keyT &key, const opT &op, const std::vector< implT * > &vin, std::vector< implT * > &vout)
Inplace operate on many functions (impl's) with an operator within a certain box.
Definition funcimpl.h:2788
bool is_reconstructed() const
Returns true if the function is compressed.
Definition mraimpl.h:252
void replicate(bool fence=true)
Definition funcimpl.h:1106
double norm_tree_op(const keyT &key, const std::vector< Future< double > > &v)
Definition mraimpl.h:1551
void reset_timer()
Definition mraimpl.h:362
void refine_to_common_level(const std::vector< FunctionImpl< T, NDIM > * > &v, const std::vector< tensorT > &c, const keyT key)
Refine multiple functions down to the same finest level.
Definition mraimpl.h:765
int get_k() const
Definition mraimpl.h:336
void dirac_convolution_op(const keyT &key, const nodeT &node, FunctionImpl< T, LDIM > *f) const
The operator.
Definition funcimpl.h:2078
FunctionImpl< T, NDIM > implT
Type of this class (implementation)
Definition funcimpl.h:950
void eval(const Vector< double, NDIM > &xin, const keyT &keyin, const typename Future< T >::remote_refT &ref)
Evaluate the function at a point in simulation coordinates.
Definition mraimpl.h:2863
bool truncate_op(const keyT &key, double tol, const std::vector< Future< bool > > &v)
Definition mraimpl.h:2636
void zero_norm_tree()
Definition mraimpl.h:1288
std::size_t max_local_depth() const
Returns the maximum local depth of the tree ... no communications.
Definition mraimpl.h:1838
tensorT project(const keyT &key) const
Definition mraimpl.h:2781
double thresh
Screening threshold.
Definition funcimpl.h:973
double check_symmetry_local() const
Returns some asymmetry measure ... no comms.
Definition mraimpl.h:751
Future< double > get_norm_tree_recursive(const keyT &key) const
Definition mraimpl.h:2802
bool is_redundant_after_merge() const
Returns true if the function is redundant_after_merge.
Definition mraimpl.h:264
void mulXXvec(const FunctionImpl< L, NDIM > *left, const std::vector< const FunctionImpl< R, NDIM > * > &vright, const std::vector< FunctionImpl< T, NDIM > * > &vresult, double tol, bool fence)
Definition funcimpl.h:3239
Key< NDIM > keyT
Type of key.
Definition funcimpl.h:954
friend hashT hash_value(const std::shared_ptr< FunctionImpl< T, NDIM > > impl)
Hash a shared_ptr to FunctionImpl.
Definition funcimpl.h:7210
std::vector< Vector< double, NDIM > > special_points
special points for further refinement (needed for composite functions or multiplication)
Definition funcimpl.h:976
bool truncate_on_project
If true projection inserts at level n-1 not n.
Definition funcimpl.h:980
AtomicInt small
Definition funcimpl.h:1000
static void do_dot_localX(const typename mapT::iterator lstart, const typename mapT::iterator lend, typename FunctionImpl< R, NDIM >::mapT *rmap_ptr, const bool sym, Tensor< TENSOR_RESULT_TYPE(T, R)> *result_ptr, Mutex *mutex)
Definition funcimpl.h:5953
bool is_on_demand() const
Definition mraimpl.h:279
double err_box(const keyT &key, const nodeT &node, const opT &func, int npt, const Tensor< double > &qx, const Tensor< double > &quad_phit, const Tensor< double > &quad_phiw) const
Returns the square of the error norm in the box labeled by key.
Definition funcimpl.h:5429
void accumulate_timer(const double time) const
Definition mraimpl.h:348
void trickle_down_op(const keyT &key, const coeffT &s)
sum all the contributions from all scales after applying an operator in mod-NS form
Definition mraimpl.h:1361
static void do_inner_localX(const typename mapT::iterator lstart, const typename mapT::iterator lend, typename FunctionImpl< R, NDIM >::mapT *rmap_ptr, const bool sym, Tensor< TENSOR_RESULT_TYPE(T, R) > *result_ptr, Mutex *mutex)
Definition funcimpl.h:5872
void mulXXveca(const keyT &key, const FunctionImpl< L, NDIM > *left, const Tensor< L > &lcin, const std::vector< const FunctionImpl< R, NDIM > * > vrightin, const std::vector< Tensor< R > > &vrcin, const std::vector< FunctionImpl< T, NDIM > * > vresultin, double tol)
Definition funcimpl.h:2875
void set_thresh(double value)
Definition mraimpl.h:327
Tensor< double > print_plane_local(const int xaxis, const int yaxis, const coordT &el2)
collect the data for a plot of the MRA structure locally on each node
Definition mraimpl.h:419
void sock_it_to_me_too(const keyT &key, const RemoteReference< FutureImpl< std::pair< keyT, coeffT > > > &ref) const
Definition mraimpl.h:2841
void broaden_op(const keyT &key, const std::vector< Future< bool > > &v)
Definition mraimpl.h:1277
void print_plane(const std::string filename, const int xaxis, const int yaxis, const coordT &el2)
Print a plane ("xy", "xz", or "yz") containing the point x to file.
Definition mraimpl.h:399
void print_tree(std::ostream &os=std::cout, Level maxlevel=10000) const
Definition mraimpl.h:2664
void project_refine_op(const keyT &key, bool do_refine, const std::vector< Vector< double, NDIM > > &specialpts)
Definition mraimpl.h:2450
void scale_oop(const Q q, const FunctionImpl< F, NDIM > &f, bool fence)
Out-of-place scale by a constant.
Definition funcimpl.h:7177
T typeT
Definition funcimpl.h:949
std::size_t tree_size() const
Returns the size of the tree structure of the function ... collective global sum.
Definition mraimpl.h:1878
ConcurrentHashMap< keyT, mapvecT > mapT
Type of the map returned by make_key_vec_map.
Definition funcimpl.h:5800
void add_scalar_inplace(T t, bool fence)
Adds a constant to the function. Local operation, optional fence.
Definition mraimpl.h:2528
void forward_traverse(const coeff_opT &coeff_op, const apply_opT &apply_op, const keyT &key) const
traverse a non-existing tree
Definition funcimpl.h:3676
tensorT downsample(const keyT &key, const std::vector< Future< coeffT > > &v) const
downsample the sum coefficients of level n+1 to sum coeffs on level n
Definition mraimpl.h:1197
void abs_square_inplace(bool fence)
Definition mraimpl.h:3124
FunctionImpl(const FunctionImpl< Q, NDIM > &other, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool dozero)
Copy constructor.
Definition funcimpl.h:1073
void refine(const opT &op, bool fence)
Definition funcimpl.h:4536
static mapT make_key_vec_map(const std::vector< const FunctionImpl< T, NDIM > * > &v)
Returns map of union of local keys to vector of indexes of functions containing that key.
Definition funcimpl.h:5821
void put_in_box(ProcessID from, long nl, long ni) const
Definition mraimpl.h:820
void unary_op_value_inplace(const opT &op, bool fence)
Definition funcimpl.h:2855
std::pair< const keyT, nodeT > datumT
Type of entry in container.
Definition funcimpl.h:958
Timer timer_accumulate
Definition funcimpl.h:994
TensorArgs get_tensor_args() const
Definition mraimpl.h:318
void unaryXXa(const keyT &key, const FunctionImpl< Q, NDIM > *func, const opT &op)
Definition funcimpl.h:3157
void make_Vphi_only(const opT &leaf_op, FunctionImpl< T, NDIM > *ket, FunctionImpl< T, LDIM > *v1, FunctionImpl< T, LDIM > *v2, FunctionImpl< T, LDIM > *p1, FunctionImpl< T, LDIM > *p2, FunctionImpl< T, NDIM > *eri, const bool fence=true)
assemble the function V*phi using V and phi given from the functor
Definition funcimpl.h:4351
void average(const implT &rhs)
take the average of two functions, similar to: this=0.5*(this+rhs)
Definition mraimpl.h:1084
void recursive_apply(opT &apply_op, const FunctionImpl< T, LDIM > *fimpl, const FunctionImpl< T, LDIM > *gimpl, const bool fence)
traverse a non-existing tree, make its coeffs and apply an operator
Definition funcimpl.h:5182
void diff(const DerivativeBase< T, NDIM > *D, const implT *f, bool fence)
Definition mraimpl.h:942
void square_inplace(bool fence)
Pointwise squaring of function with optional global fence.
Definition mraimpl.h:3113
void remove_internal_coefficients(const bool fence)
Definition mraimpl.h:1513
void compute_snorm_and_dnorm(bool fence=true)
compute norm of s and d coefficients for all nodes
Definition mraimpl.h:1127
long box_leaf[1000]
Definition funcimpl.h:3253
void standard(bool fence)
Changes non-standard compressed form to standard compressed form.
Definition mraimpl.h:1761
void multiop_values_doit(const keyT &key, const opT &op, const std::vector< implT * > &v)
Definition funcimpl.h:2746
bool is_nonstandard_with_leaves() const
Definition mraimpl.h:274
GenTensor< Q > values2NScoeffs(const keyT &key, const GenTensor< Q > &values) const
convert function values of the a child generation directly to NS coeffs
Definition funcimpl.h:1834
int truncate_mode
0=default=(|d|<thresh), 1=(|d|<thresh/2^n), 2=(|d|<thresh/4^n);
Definition funcimpl.h:978
void multiop_values(const opT &op, const std::vector< implT * > &v)
Definition funcimpl.h:2763
GenTensor< Q > NScoeffs2values(const keyT &key, const GenTensor< Q > &coeff, const bool s_only) const
convert S or NS coeffs to values on a 2k grid of the children
Definition funcimpl.h:1749
FunctionNode holds the coefficients, etc., at each node of the 2^NDIM-tree.
Definition funcimpl.h:127
FunctionNode< Q, NDIM > convert() const
Copy with possible type conversion of coefficients, copying all other state.
Definition funcimpl.h:194
GenTensor< T > coeffT
Definition funcimpl.h:129
bool has_coeff() const
Returns true if there are coefficients in this node.
Definition funcimpl.h:200
void recompute_snorm_and_dnorm(const FunctionCommonData< T, NDIM > &cdata)
Definition funcimpl.h:335
FunctionNode(const coeffT &coeff, bool has_children=false)
Constructor from given coefficients with optional children.
Definition funcimpl.h:156
FunctionNode()
Default constructor makes node without coeff or children.
Definition funcimpl.h:146
void serialize(Archive &ar)
Definition funcimpl.h:458
void consolidate_buffer(const TensorArgs &args)
Definition funcimpl.h:444
double get_dnorm() const
return the precomputed norm of the (virtual) d coefficients
Definition funcimpl.h:316
size_t size() const
Returns the number of coefficients in this node.
Definition funcimpl.h:242
void set_has_children_recursive(const typename FunctionNode< T, NDIM >::dcT &c, const Key< NDIM > &key)
Sets has_children attribute to true recurring up to ensure connected.
Definition funcimpl.h:259
FunctionNode< T, NDIM > & operator=(const FunctionNode< T, NDIM > &other)
Definition funcimpl.h:176
double snorm
norm of the s coefficients
Definition funcimpl.h:141
void clear_coeff()
Clears the coefficients (has_coeff() will subsequently return false)
Definition funcimpl.h:295
Tensor< T > tensorT
Definition funcimpl.h:130
coeffT buffer
The coefficients, if any.
Definition funcimpl.h:139
T trace_conj(const FunctionNode< T, NDIM > &rhs) const
Definition funcimpl.h:453
void scale(Q a)
Scale the coefficients of this node.
Definition funcimpl.h:301
bool is_leaf() const
Returns true if this does not have children.
Definition funcimpl.h:213
void set_has_children(bool flag)
Sets has_children attribute to value of flag.
Definition funcimpl.h:254
void accumulate(const coeffT &t, const typename FunctionNode< T, NDIM >::dcT &c, const Key< NDIM > &key, const TensorArgs &args)
Accumulate inplace and if necessary connect node to parent.
Definition funcimpl.h:416
double get_norm_tree() const
Gets the value of norm_tree.
Definition funcimpl.h:311
bool _has_children
True if there are children.
Definition funcimpl.h:138
FunctionNode(const coeffT &coeff, double norm_tree, double snorm, double dnorm, bool has_children)
Definition funcimpl.h:166
void set_snorm(const double sn)
set the precomputed norm of the (virtual) s coefficients
Definition funcimpl.h:321
coeffT _coeffs
The coefficients, if any.
Definition funcimpl.h:136
void accumulate2(const tensorT &t, const typename FunctionNode< T, NDIM >::dcT &c, const Key< NDIM > &key)
Accumulate inplace and if necessary connect node to parent.
Definition funcimpl.h:383
void reduceRank(const double &eps)
reduces the rank of the coefficients (if applicable)
Definition funcimpl.h:249
WorldContainer< Key< NDIM >, FunctionNode< T, NDIM > > dcT
Definition funcimpl.h:144
void gaxpy_inplace(const T &alpha, const FunctionNode< Q, NDIM > &other, const R &beta)
General bi-linear operation — this = this*alpha + other*beta.
Definition funcimpl.h:365
double _norm_tree
After norm_tree will contain norm of coefficients summed up tree.
Definition funcimpl.h:137
void set_is_leaf(bool flag)
Sets has_children attribute to value of !flag.
Definition funcimpl.h:280
void print_json(std::ostream &s) const
Definition funcimpl.h:466
double get_snorm() const
get the precomputed norm of the (virtual) s coefficients
Definition funcimpl.h:331
const coeffT & coeff() const
Returns a const reference to the tensor containing the coeffs.
Definition funcimpl.h:237
FunctionNode(const coeffT &coeff, double norm_tree, bool has_children)
Definition funcimpl.h:161
bool has_children() const
Returns true if this node has children.
Definition funcimpl.h:207
void set_coeff(const coeffT &coeffs)
Takes a shallow copy of the coeff — same as this->coeff()=coeff.
Definition funcimpl.h:285
void set_dnorm(const double dn)
set the precomputed norm of the (virtual) d coefficients
Definition funcimpl.h:326
double dnorm
norm of the d coefficients, also defined if there are no d coefficients
Definition funcimpl.h:140
bool is_invalid() const
Returns true if this node is invalid (no coeffs and no children)
Definition funcimpl.h:219
FunctionNode(const FunctionNode< T, NDIM > &other)
Definition funcimpl.h:170
coeffT & coeff()
Returns a non-const reference to the tensor containing the coeffs.
Definition funcimpl.h:227
void set_norm_tree(double norm_tree)
Sets the value of norm_tree.
Definition funcimpl.h:306
Implements the functionality of futures.
Definition future.h:74
A future is a possibly yet unevaluated value.
Definition future.h:373
remote_refT remote_ref(World &world) const
Returns a structure used to pass references to another process.
Definition future.h:675
RemoteReference< FutureImpl< T > > remote_refT
Definition future.h:398
Definition lowranktensor.h:59
bool is_of_tensortype(const TensorType &tt) const
Definition gentensor.h:225
GenTensor convert(const TensorArgs &targs) const
Definition gentensor.h:198
GenTensor full_tensor() const
Definition gentensor.h:200
long dim(const int i) const
return the number of entries in dimension i
Definition lowranktensor.h:391
Tensor< T > full_tensor_copy() const
Definition gentensor.h:206
long ndim() const
Definition lowranktensor.h:386
void add_SVD(const GenTensor< T > &rhs, const double &eps)
Definition gentensor.h:235
constexpr bool is_full_tensor() const
Definition gentensor.h:224
GenTensor get_tensor() const
Definition gentensor.h:203
GenTensor reconstruct_tensor() const
Definition gentensor.h:199
bool has_no_data() const
Definition gentensor.h:211
void normalize()
Definition gentensor.h:218
GenTensor< T > & emul(const GenTensor< T > &other)
Inplace multiply by corresponding elements of argument Tensor.
Definition lowranktensor.h:631
float_scalar_type normf() const
Definition lowranktensor.h:406
double svd_normf() const
Definition gentensor.h:213
SRConf< T > config() const
Definition gentensor.h:237
void reduce_rank(const double &eps)
Definition gentensor.h:217
long rank() const
Definition gentensor.h:212
long size() const
Definition lowranktensor.h:482
SVDTensor< T > & get_svdtensor()
Definition gentensor.h:228
TensorType tensor_type() const
Definition gentensor.h:221
bool has_data() const
Definition gentensor.h:210
GenTensor & gaxpy(const T alpha, const GenTensor &other, const T beta)
Definition lowranktensor.h:580
bool is_assigned() const
Definition gentensor.h:209
IsSupported< TensorTypeData< Q >, GenTensor< T > & >::type scale(Q fac)
Inplace multiplication by scalar of supported type (legacy name)
Definition lowranktensor.h:426
constexpr bool is_svd_tensor() const
Definition gentensor.h:222
Iterates in lexical order thru all children of a key.
Definition key.h:466
Key is the index for a node of the 2^NDIM-tree.
Definition key.h:69
Key< NDIM+LDIM > merge_with(const Key< LDIM > &rhs) const
merge with other key (ie concatenate), use level of rhs, not of this
Definition key.h:405
Level level() const
Definition key.h:168
bool is_valid() const
Checks if a key is valid.
Definition key.h:123
Key< NDIM-VDIM > extract_complement_key(const std::array< int, VDIM > &v) const
extract a new key with the Translations complementary to the ones indicated in the v array
Definition key.h:391
Key< VDIM > extract_key(const std::array< int, VDIM > &v) const
extract a new key with the Translations indicated in the v array
Definition key.h:383
Key parent(int generation=1) const
Returns the key of the parent.
Definition key.h:252
const Vector< Translation, NDIM > & translation() const
Definition key.h:173
void break_apart(Key< LDIM > &key1, Key< KDIM > &key2) const
break key into two low-dimensional keys
Definition key.h:343
A pmap that locates children on odd levels with their even level parents.
Definition funcimpl.h:105
LevelPmap(World &world)
Definition funcimpl.h:111
const int nproc
Definition funcimpl.h:107
LevelPmap()
Definition funcimpl.h:109
ProcessID owner(const keyT &key) const
Find the owner of a given key.
Definition funcimpl.h:114
Definition funcimpl.h:77
Mutex using pthread mutex operations.
Definition worldmutex.h:131
void unlock() const
Free a mutex owned by this thread.
Definition worldmutex.h:165
void lock() const
Acquire the mutex waiting if necessary.
Definition worldmutex.h:155
Range, vaguely a la Intel TBB, to encapsulate a random-access, STL-like start and end iterator with c...
Definition range.h:64
Simple structure used to manage references/pointers to remote instances.
Definition worldref.h:395
Definition SVDTensor.h:42
A simple process map.
Definition funcimpl.h:86
SimplePmap(World &world)
Definition funcimpl.h:92
const int nproc
Definition funcimpl.h:88
const ProcessID me
Definition funcimpl.h:89
ProcessID owner(const keyT &key) const
Maps key to processor.
Definition funcimpl.h:95
A slice defines a sub-range or patch of a dimension.
Definition slice.h:103
static TaskAttributes hipri()
Definition thread.h:456
Traits class to specify support of numeric types.
Definition type_data.h:56
A tensor is a multidimensional array.
Definition tensor.h:317
float_scalar_type normf() const
Returns the Frobenius norm of the tensor.
Definition tensor.h:1726
T sum() const
Returns the sum of all elements of the tensor.
Definition tensor.h:1662
Tensor< T > reshape(int ndimnew, const long *d)
Returns new view/tensor reshaping size/number of dimensions to conforming tensor.
Definition tensor.h:1384
T * ptr()
Returns a pointer to the internal data.
Definition tensor.h:1825
Tensor< T > mapdim(const std::vector< long > &map)
Returns new view/tensor permuting the dimensions.
Definition tensor.h:1624
IsSupported< TensorTypeData< Q >, Tensor< T > & >::type scale(Q x)
Inplace multiplication by scalar of supported type (legacy name)
Definition tensor.h:686
Tensor< T > & emul(const Tensor< T > &t)
Inplace multiply by corresponding elements of argument Tensor.
Definition tensor.h:1799
bool has_data() const
Definition tensor.h:1887
const TensorIterator< T > & end() const
End point for forward iteration.
Definition tensor.h:1877
Tensor< T > fusedim(long i)
Returns new view/tensor fusing contiguous dimensions i and i+1.
Definition tensor.h:1587
Tensor< T > flat()
Returns new view/tensor rehshaping to flat (1-d) tensor.
Definition tensor.h:1555
Tensor< T > & gaxpy(T alpha, const Tensor< T > &t, T beta)
Inplace generalized saxpy ... this = this*alpha + other*beta.
Definition tensor.h:1805
Tensor< T > & conj()
Inplace complex conjugate.
Definition tensor.h:716
Definition function_common_data.h:169
void accumulate(const double time) const
accumulate timer
Definition function_common_data.h:183
A simple, fixed dimension vector.
Definition vector.h:64
Makes a distributed container with specified attributes.
Definition worlddc.h:866
void process_pending()
Process pending messages.
Definition worlddc.h:1166
bool find(accessor &acc, const keyT &key)
Write access to LOCAL value by key. Returns true if found, false otherwise (always false for remote).
Definition worlddc.h:987
bool probe(const keyT &key) const
Returns true if local data is immediately available (no communication)
Definition worlddc.h:1024
iterator begin()
Returns an iterator to the beginning of the local data (no communication)
Definition worlddc.h:1070
ProcessID owner(const keyT &key) const
Returns processor that logically owns key (no communication)
Definition worlddc.h:1034
implT::const_iterator const_iterator
Definition worlddc.h:872
void replicate(bool fence=true)
replicates this WorldContainer on all ProcessIDs
Definition worlddc.h:968
void replace(const pairT &datum)
Inserts/replaces key+value pair (non-blocking communication if key not local)
Definition worlddc.h:974
iterator end()
Returns an iterator past the end of the local data (no communication)
Definition worlddc.h:1084
const std::shared_ptr< WorldDCPmapInterface< keyT > > & get_pmap() const
Returns shared pointer to the process mapping.
Definition worlddc.h:1142
bool insert(accessor &acc, const keyT &key)
Write access to LOCAL value by key. Returns true if inserted, false if already exists (throws if remo...
Definition worlddc.h:1001
implT::iterator iterator
Definition worlddc.h:871
std::size_t size() const
Returns the number of local entries (no communication)
Definition worlddc.h:1135
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const TaskAttributes &attr=TaskAttributes())
Adds task "resultT memfun()" in process owning item (non-blocking comm if remote)
Definition worlddc.h:1426
bool is_local(const keyT &key) const
Returns true if the key maps to the local processor (no communication)
Definition worlddc.h:1041
Future< MEMFUN_RETURNT(memfunT)> send(const keyT &key, memfunT memfun)
Sends message "resultT memfun()" to item (non-blocking comm if remote)
Definition worlddc.h:1183
implT::accessor accessor
Definition worlddc.h:873
Interface to be provided by any process map.
Definition worlddc.h:82
void fence(bool debug=false)
Synchronizes all processes in communicator AND globally ensures no pending AM or tasks.
Definition worldgop.cc:161
Implements most parts of a globally addressable object (via unique ID).
Definition world_object.h:364
const uniqueidT & id() const
Returns the globally unique object ID.
Definition world_object.h:711
void process_pending()
To be called from derived constructor to process pending messages.
Definition world_object.h:656
ProcessID me
Rank of self.
Definition world_object.h:385
detail::task_result_type< memfnT >::futureT send(ProcessID dest, memfnT memfn) const
Definition world_object.h:731
detail::task_result_type< memfnT >::futureT task(ProcessID dest, memfnT memfn, const TaskAttributes &attr=TaskAttributes()) const
Sends task to derived class method returnT (this->*memfn)().
Definition world_object.h:1005
Future< bool > for_each(const rangeT &range, const opT &op)
Apply op(item) on all items in range.
Definition world_task_queue.h:572
void add(TaskInterface *t)
Add a new local task, taking ownership of the pointer.
Definition world_task_queue.h:466
Future< resultT > reduce(const rangeT &range, const opT &op)
Reduce op(item) for all items in range using op(sum,op(item)).
Definition world_task_queue.h:527
A parallel world class.
Definition world.h:132
static World * world_from_id(std::uint64_t id)
Convert a World ID to a World pointer.
Definition world.h:492
WorldTaskQueue & taskq
Task queue.
Definition world.h:206
ProcessID rank() const
Returns the process rank in this World (same as MPI_Comm_rank()).
Definition world.h:320
ProcessID size() const
Returns the number of processes in this World (same as MPI_Comm_size()).
Definition world.h:330
WorldGopInterface & gop
Global operations.
Definition world.h:207
std::optional< T * > ptr_from_id(uniqueidT id) const
Look up a local pointer from a world-wide unique ID.
Definition world.h:416
ProcessID random_proc()
Returns a random process number; that is, an integer in [0,world.size()).
Definition world.h:591
Wrapper for an opaque pointer for serialization purposes.
Definition archive.h:851
syntactic sugar for std::array<bool, N>
Definition array_of_bools.h:19
Class for unique global IDs.
Definition uniqueid.h:53
unsigned long get_obj_id() const
Access the object ID.
Definition uniqueid.h:97
unsigned long get_world_id() const
Access the World ID.
Definition uniqueid.h:90
static const double R
Definition csqrt.cc:46
double(* f1)(const coord_3d &)
Definition derivatives.cc:55
char * p(char *buf, const char *name, int k, int initial_level, double thresh, int order)
Definition derivatives.cc:72
static double lo
Definition dirac-hatom.cc:23
@ upper
Definition dirac-hatom.cc:15
Provides FunctionDefaults and utilities for coordinate transformation.
auto T(World &world, response_space &f) -> response_space
Definition global_functions.cc:34
archive_array< unsigned char > wrap_opaque(const T *, unsigned int)
Factory function to wrap a pointer to contiguous data as an opaque (uchar) archive_array.
Definition archive.h:926
Tensor< typename Tensor< T >::scalar_type > arg(const Tensor< T > &t)
Return a new tensor holding the argument of each element of t (complex types only)
Definition tensor.h:2503
Tensor< TENSOR_RESULT_TYPE(T, Q) > & fast_transform(const Tensor< T > &t, const Tensor< Q > &c, Tensor< TENSOR_RESULT_TYPE(T, Q) > &result, Tensor< TENSOR_RESULT_TYPE(T, Q) > &workspace)
Restricted but heavily optimized form of transform()
Definition tensor.h:2444
const double beta
Definition gygi_soltion.cc:62
static const double v
Definition hatom_sf_dirac.cc:20
Provides IndexIterator.
Tensor< double > op(const Tensor< double > &x)
Definition kain.cc:508
Multidimension Key for MRA tree and associated iterators.
static double pow(const double *a, const double *b)
Definition lda.h:74
#define MADNESS_CHECK(condition)
Check a condition — even in a release build the condition is always evaluated so it can have side eff...
Definition madness_exception.h:182
#define MADNESS_EXCEPTION(msg, value)
Macro for throwing a MADNESS exception.
Definition madness_exception.h:119
#define MADNESS_ASSERT(condition)
Assert a condition that should be free of side-effects since in release builds this might be a no-op.
Definition madness_exception.h:134
#define MADNESS_CHECK_THROW(condition, msg)
Check a condition — even in a release build the condition is always evaluated so it can have side eff...
Definition madness_exception.h:207
Header to declare stuff which has not yet found a home.
constexpr double pi
Mathematical constant .
Definition constants.h:48
MemFuncWrapper< objT *, memfnT, typename result_of< memfnT >::type > wrap_mem_fn(objT &obj, memfnT memfn)
Create a member function wrapper (MemFuncWrapper) from an object and a member function pointer.
Definition mem_func_wrapper.h:251
void combine_hash(hashT &seed, hashT hash)
Internal use only.
Definition worldhash.h:248
Namespace for all elements and tools of MADNESS.
Definition DFParameters.h:10
std::ostream & operator<<(std::ostream &os, const particle< PDIM > &p)
Definition lowrankfunction.h:397
static const char * filename
Definition legendre.cc:96
static const std::vector< Slice > ___
Entire dimension.
Definition slice.h:128
static double cpu_time()
Returns the cpu time in seconds relative to an arbitrary origin.
Definition timers.h:127
GenTensor< TENSOR_RESULT_TYPE(R, Q)> general_transform(const GenTensor< R > &t, const Tensor< Q > c[])
Definition gentensor.h:274
response_space scale(response_space a, double b)
void finalize()
Call this once at the very end of your main program instead of MPI_Finalize().
Definition world.cc:232
void norm_tree(World &world, const std::vector< Function< T, NDIM > > &v, bool fence=true)
Makes the norm tree for all functions in a vector.
Definition vmra.h:1181
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const Tensor< R > &c, bool fence=true)
Transforms a vector of functions according to new[i] = sum[j] old[j]*c[j,i].
Definition vmra.h:707
TreeState
Definition funcdefaults.h:59
@ nonstandard_after_apply
s and d coeffs, state after operator application
Definition funcdefaults.h:64
@ redundant_after_merge
s coeffs everywhere, must be summed up to yield the result
Definition funcdefaults.h:66
@ reconstructed
s coeffs at the leaves only
Definition funcdefaults.h:60
@ nonstandard
s and d coeffs in internal nodes
Definition funcdefaults.h:62
@ redundant
s coeffs everywhere
Definition funcdefaults.h:65
static Tensor< double > weights[max_npt+1]
Definition legendre.cc:99
int64_t Translation
Definition key.h:57
Key< NDIM > displacement(const Key< NDIM > &source, const Key< NDIM > &target)
given a source and a target, return the displacement in translation
Definition key.h:451
static const Slice _(0,-1, 1)
std::shared_ptr< FunctionFunctorInterface< double, 3 > > func(new opT(g))
void change_tensor_type(GenTensor< T > &t, const TensorArgs &targs)
change representation to targ.tt
Definition gentensor.h:284
int Level
Definition key.h:58
std::enable_if< std::is_base_of< ProjectorBase, projT >::value, OuterProjector< projT, projQ > >::type outer(const projT &p0, const projQ &p1)
Definition projector.h:457
int RandomValue< int >()
Random int.
Definition ran.cc:250
static double pop(std::vector< double > &v)
Definition SCF.cc:113
void print(const T &t, const Ts &... ts)
Print items to std::cout (items separated by spaces) and terminate with a new line.
Definition print.h:225
Tensor< T > fcube(const Key< NDIM > &, T(*f)(const Vector< double, NDIM > &), const Tensor< double > &)
Definition mraimpl.h:2129
TensorType
low rank representations of tensors (see gentensor.h)
Definition gentensor.h:120
@ TT_2D
Definition gentensor.h:120
@ TT_FULL
Definition gentensor.h:120
NDIM & f
Definition mra.h:2481
void error(const char *msg)
Definition world.cc:139
NDIM const Function< R, NDIM > & g
Definition mra.h:2481
std::size_t hashT
The hash value type.
Definition worldhash.h:145
static const int kmax
Definition twoscale.cc:52
double inner(response_space &a, response_space &b)
Definition response_functions.h:442
GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const GenTensor< R > &t, const Tensor< Q > &c, const int axis)
Definition lowranktensor.h:1099
std::string name(const FuncType &type, const int ex=-1)
Definition ccpairfunction.h:28
void mxmT(long dimi, long dimj, long dimk, T *MADNESS_RESTRICT c, const T *a, const T *b)
Matrix += Matrix * matrix transpose ... MKL interface version.
Definition mxm.h:225
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence.
Definition mra.h:2066
static const int MAXK
The maximum wavelet order presently supported.
Definition funcdefaults.h:54
Definition mraimpl.h:50
static long abs(long a)
Definition tensor.h:218
const double cc
Definition navstokes_cosines.cc:107
static const double b
Definition nonlinschro.cc:119
static const double d
Definition nonlinschro.cc:121
static const double a
Definition nonlinschro.cc:118
Defines simple templates for printing to std::cout "a la Python".
double Q(double a)
Definition relops.cc:20
static const double c
Definition relops.cc:10
static const double L
Definition rk.cc:46
static const double thresh
Definition rk.cc:45
Definition test_ar.cc:204
Definition test_ccpairfunction.cc:22
given a ket and the 1- and 2-electron potentials, construct the function V phi
Definition funcimpl.h:4011
implT * result
where to construct Vphi, no need to track parents
Definition funcimpl.h:4019
bool have_v2() const
Definition funcimpl.h:4028
ctL iav1
Definition funcimpl.h:4023
Vphi_op_NS(implT *result, const opT &leaf_op, const ctT &iaket, const ctL &iap1, const ctL &iap2, const ctL &iav1, const ctL &iav2, const implT *eri)
Definition funcimpl.h:4037
ctL iap1
Definition funcimpl.h:4022
bool have_v1() const
Definition funcimpl.h:4027
std::pair< bool, coeffT > continue_recursion(const std::vector< bool > child_is_leaf, const tensorT &coeffs, const keyT &key) const
loop over all children and either insert their sum coeffs or continue the recursion
Definition funcimpl.h:4103
opT leaf_op
deciding if a given FunctionNode will be a leaf node
Definition funcimpl.h:4020
std::pair< coeffT, double > make_sum_coeffs(const keyT &key) const
make the sum coeffs for key
Definition funcimpl.h:4196
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:4016
ctL iap2
the particles 1 and 2 (exclusive with ket)
Definition funcimpl.h:4022
bool have_ket() const
Definition funcimpl.h:4026
const implT * eri
2-particle potential, must be on-demand
Definition funcimpl.h:4024
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:4017
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
make and insert the coefficients into result's tree
Definition funcimpl.h:4048
void serialize(const Archive &ar)
serialize this (needed for use in recursive_op)
Definition funcimpl.h:4277
Vphi_op_NS< opT, LDIM > this_type
Definition funcimpl.h:4015
ctT iaket
the ket of a pair function (exclusive with p1, p2)
Definition funcimpl.h:4021
double compute_error_from_inaccurate_refinement(const keyT &key, const tensorT &ceri) const
the error is computed from the d coefficients of the constituent functions
Definition funcimpl.h:4149
void accumulate_into_result(const Key< NDIM > &key, const coeffT &coeff) const
Definition funcimpl.h:4031
this_type make_child(const keyT &child) const
Definition funcimpl.h:4248
tensorT eri_coeffs(const keyT &key) const
Definition funcimpl.h:4129
ctL iav2
potentials for particles 1 and 2
Definition funcimpl.h:4023
bool have_eri() const
Definition funcimpl.h:4029
this_type forward_ctor(implT *result1, const opT &leaf_op, const ctT &iaket1, const ctL &iap11, const ctL &iap21, const ctL &iav11, const ctL &iav21, const implT *eri1)
Definition funcimpl.h:4270
Vphi_op_NS()
Definition funcimpl.h:4036
Future< this_type > activate() const
Definition funcimpl.h:4259
bool randomize() const
Definition funcimpl.h:4013
add two functions f and g: result=alpha * f + beta * g
Definition funcimpl.h:3521
bool randomize() const
Definition funcimpl.h:3526
Future< this_type > activate() const
retrieve the coefficients (parent coeffs might be remote)
Definition funcimpl.h:3556
add_op(const ctT &f, const ctT &g, const double alpha, const double beta)
Definition funcimpl.h:3534
ctT f
tracking coeffs of first and second addend
Definition funcimpl.h:3529
double alpha
prefactor for f, g
Definition funcimpl.h:3531
add_op this_type
Definition funcimpl.h:3524
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:3523
void serialize(const Archive &ar)
Definition funcimpl.h:3568
ctT g
Definition funcimpl.h:3529
std::pair< bool, coeffT > operator()(const keyT &key) const
if we are at the bottom of the trees, return the sum of the coeffs
Definition funcimpl.h:3538
double beta
Definition funcimpl.h:3531
this_type make_child(const keyT &child) const
Definition funcimpl.h:3551
this_type forward_ctor(const ctT &f1, const ctT &g1, const double alpha, const double beta)
taskq-compatible ctor
Definition funcimpl.h:3564
opT op
Definition funcimpl.h:3127
opT::resultT resultT
Definition funcimpl.h:3125
Tensor< resultT > operator()(const Key< NDIM > &key, const Tensor< Q > &t) const
Definition funcimpl.h:3134
coeff_value_adaptor(const FunctionImpl< Q, NDIM > *impl_func, const opT &op)
Definition funcimpl.h:3130
const FunctionImpl< Q, NDIM > * impl_func
Definition funcimpl.h:3126
void serialize(Archive &ar)
Definition funcimpl.h:3143
merge the coefficent boxes of this into result's tree
Definition funcimpl.h:2366
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2367
void serialize(const Archive &ar)
Definition funcimpl.h:2384
FunctionImpl< Q, NDIM > * result
Definition funcimpl.h:2368
do_accumulate_trees(FunctionImpl< Q, NDIM > &result, const T alpha)
Definition funcimpl.h:2371
T alpha
Definition funcimpl.h:2369
bool operator()(typename rangeT::iterator &it) const
return the norm of the difference of this node and its "mirror" node
Definition funcimpl.h:2375
"put" this on g
Definition funcimpl.h:2577
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2578
void serialize(const Archive &ar)
Definition funcimpl.h:2606
implT * g
Definition funcimpl.h:2580
do_average()
Definition funcimpl.h:2582
bool operator()(typename rangeT::iterator &it) const
iterator it points to this
Definition funcimpl.h:2586
do_average(implT &g)
Definition funcimpl.h:2583
change representation of nodes' coeffs to low rank, optional fence
Definition funcimpl.h:2610
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2611
void serialize(const Archive &ar)
Definition funcimpl.h:2634
TensorArgs targs
Definition funcimpl.h:2614
do_change_tensor_type(const TensorArgs &targs, implT &g)
Definition funcimpl.h:2620
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2623
implT * f
Definition funcimpl.h:2615
check symmetry wrt particle exchange
Definition funcimpl.h:2283
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2284
double operator()(typename rangeT::iterator &it) const
return the norm of the difference of this node and its "mirror" node
Definition funcimpl.h:2290
do_check_symmetry_local()
Definition funcimpl.h:2286
void serialize(const Archive &ar)
Definition funcimpl.h:2353
double operator()(double a, double b) const
Definition funcimpl.h:2349
do_check_symmetry_local(const implT &f)
Definition funcimpl.h:2287
const implT * f
Definition funcimpl.h:2285
compute the norm of the wavelet coefficients
Definition funcimpl.h:4418
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:4419
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:4425
do_compute_snorm_and_dnorm(const FunctionCommonData< T, NDIM > &cdata)
Definition funcimpl.h:4422
const FunctionCommonData< T, NDIM > & cdata
Definition funcimpl.h:4421
TensorArgs targs
Definition funcimpl.h:2641
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2646
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2638
do_consolidate_buffer(const TensorArgs &targs)
Definition funcimpl.h:2645
void serialize(const Archive &ar)
Definition funcimpl.h:2650
double operator()(double val) const
Definition funcimpl.h:1403
double limit
Definition funcimpl.h:1398
do_convert_to_color(const double limit, const bool log)
Definition funcimpl.h:1402
bool log
Definition funcimpl.h:1399
static double lower()
Definition funcimpl.h:1400
compute the inner product of this range with other
Definition funcimpl.h:5738
do_dot_local(const FunctionImpl< R, NDIM > *other, const bool leaves_only)
Definition funcimpl.h:5743
bool leaves_only
Definition funcimpl.h:5740
typedef TENSOR_RESULT_TYPE(T, R) resultT
resultT operator()(resultT a, resultT b) const
Definition funcimpl.h:5771
const FunctionImpl< R, NDIM > * other
Definition funcimpl.h:5739
void serialize(const Archive &ar)
Definition funcimpl.h:5775
resultT operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5745
functor for the gaxpy_inplace method
Definition funcimpl.h:1188
FunctionImpl< T, NDIM > * f
prefactor for current function impl
Definition funcimpl.h:1190
do_gaxpy_inplace(FunctionImpl< T, NDIM > *f, T alpha, R beta)
Definition funcimpl.h:1194
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:1195
R beta
prefactor for other function impl
Definition funcimpl.h:1192
void serialize(Archive &ar)
Definition funcimpl.h:1203
Range< typename FunctionImpl< Q, NDIM >::dcT::const_iterator > rangeT
Definition funcimpl.h:1189
T alpha
the current function impl
Definition funcimpl.h:1191
const bool do_leaves
start with leaf nodes instead of initial_level
Definition funcimpl.h:6660
T operator()(T a, T b) const
Definition funcimpl.h:6678
do_inner_ext_local_ffi(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const implT *impl, const bool leaf_refine, const bool do_leaves)
Definition funcimpl.h:6662
void serialize(const Archive &ar)
Definition funcimpl.h:6682
const bool leaf_refine
Definition funcimpl.h:6659
const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > fref
Definition funcimpl.h:6657
T operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:6666
const implT * impl
Definition funcimpl.h:6658
compute the inner product of this range with other
Definition funcimpl.h:5601
const FunctionImpl< T, NDIM > * bra
Definition funcimpl.h:5602
void serialize(const Archive &ar)
Definition funcimpl.h:5717
const FunctionImpl< R, NDIM > * ket
Definition funcimpl.h:5603
bool leaves_only
Definition funcimpl.h:5604
do_inner_local_on_demand(const FunctionImpl< T, NDIM > *bra, const FunctionImpl< R, NDIM > *ket, const bool leaves_only=true)
Definition funcimpl.h:5607
resultT operator()(resultT a, resultT b) const
Definition funcimpl.h:5713
resultT operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5610
compute the inner product of this range with other
Definition funcimpl.h:5540
resultT operator()(resultT a, resultT b) const
Definition funcimpl.h:5573
bool leaves_only
Definition funcimpl.h:5542
void serialize(const Archive &ar)
Definition funcimpl.h:5577
do_inner_local(const FunctionImpl< R, NDIM > *other, const bool leaves_only)
Definition funcimpl.h:5545
const FunctionImpl< R, NDIM > * other
Definition funcimpl.h:5541
resultT operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5547
typedef TENSOR_RESULT_TYPE(T, R) resultT
keep only the sum coefficients in each node
Definition funcimpl.h:2237
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2238
do_keep_sum_coeffs(implT *impl)
constructor need impl for cdata
Definition funcimpl.h:2242
implT * impl
Definition funcimpl.h:2239
void serialize(const Archive &ar)
Definition funcimpl.h:2251
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2244
mirror dimensions of this, write result on f
Definition funcimpl.h:2511
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2521
implT * f
Definition funcimpl.h:2515
std::vector< long > mirror
Definition funcimpl.h:2514
void serialize(const Archive &ar)
Definition funcimpl.h:2568
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2512
std::vector< long > map
Definition funcimpl.h:2514
do_map_and_mirror(const std::vector< long > map, const std::vector< long > mirror, implT &f)
Definition funcimpl.h:2518
map this on f
Definition funcimpl.h:2431
do_mapdim(const std::vector< long > map, implT &f)
Definition funcimpl.h:2438
void serialize(const Archive &ar)
Definition funcimpl.h:2454
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2432
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2440
std::vector< long > map
Definition funcimpl.h:2434
do_mapdim()
Definition funcimpl.h:2437
implT * f
Definition funcimpl.h:2435
merge the coefficient boxes of this into other's tree
Definition funcimpl.h:2395
bool operator()(typename rangeT::iterator &it) const
return the norm of the difference of this node and its "mirror" node
Definition funcimpl.h:2405
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2396
FunctionImpl< Q, NDIM > * other
Definition funcimpl.h:2397
do_merge_trees(const T alpha, const R beta, FunctionImpl< Q, NDIM > &other)
Definition funcimpl.h:2401
T alpha
Definition funcimpl.h:2398
do_merge_trees()
Definition funcimpl.h:2400
R beta
Definition funcimpl.h:2399
void serialize(const Archive &ar)
Definition funcimpl.h:2424
mirror dimensions of this, write result on f
Definition funcimpl.h:2461
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2470
implT * f
Definition funcimpl.h:2465
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2462
do_mirror()
Definition funcimpl.h:2467
do_mirror(const std::vector< long > mirror, implT &f)
Definition funcimpl.h:2468
void serialize(const Archive &ar)
Definition funcimpl.h:2504
std::vector< long > mirror
Definition funcimpl.h:2464
Definition funcimpl.h:5513
double operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5514
void serialize(const Archive &ar)
Definition funcimpl.h:5529
double operator()(double a, double b) const
Definition funcimpl.h:5525
laziness
Definition funcimpl.h:4675
void serialize(Archive &ar)
Definition funcimpl.h:4684
Key< OPDIM > d
Definition funcimpl.h:4676
Key< OPDIM > key
Definition funcimpl.h:4676
keyT dest
Definition funcimpl.h:4677
double fac
Definition funcimpl.h:4678
do_op_args(const Key< OPDIM > &key, const Key< OPDIM > &d, const keyT &dest, double tol, double fac, double cnorm)
Definition funcimpl.h:4681
double cnorm
Definition funcimpl.h:4678
double tol
Definition funcimpl.h:4678
reduce the rank of the nodes, optional fence
Definition funcimpl.h:2257
do_reduce_rank(const TensorArgs &targs)
Definition funcimpl.h:2265
TensorArgs args
Definition funcimpl.h:2261
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2271
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2258
do_reduce_rank(const double &thresh)
Definition funcimpl.h:2266
void serialize(const Archive &ar)
Definition funcimpl.h:2277
Changes non-standard compressed form to standard compressed form.
Definition funcimpl.h:4639
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:4650
do_standard(implT *impl)
Definition funcimpl.h:4647
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:4640
void serialize(const Archive &ar)
Definition funcimpl.h:4667
implT * impl
Definition funcimpl.h:4643
given an NS tree resulting from a convolution, truncate leafs if appropriate
Definition funcimpl.h:2178
void serialize(const Archive &ar)
Definition funcimpl.h:2198
const implT * f
Definition funcimpl.h:2180
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2184
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2179
do_truncate_NS_leafs(const implT *f)
Definition funcimpl.h:2182
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2657
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2661
implT * impl
Definition funcimpl.h:2658
void serialize(const Archive &ar)
Definition funcimpl.h:2679
do_unary_op_value_inplace(implT *impl, const opT &op)
Definition funcimpl.h:2660
Hartree product of two LDIM functions to yield a NDIM = 2*LDIM function.
Definition funcimpl.h:3604
this_type forward_ctor(implT *result1, const ctL &p11, const ctL &p22, const leaf_opT &leaf_op)
Definition funcimpl.h:3660
bool randomize() const
Definition funcimpl.h:3605
void serialize(const Archive &ar)
Definition funcimpl.h:3664
hartree_op(implT *result, const ctL &p11, const ctL &p22, const leaf_opT &leaf_op)
Definition funcimpl.h:3616
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:3608
ctL p2
tracking coeffs of the two lo-dim functions
Definition funcimpl.h:3611
leaf_opT leaf_op
determine if a given node will be a leaf node
Definition funcimpl.h:3612
hartree_op()
Definition funcimpl.h:3615
implT * result
where to construct the pair function
Definition funcimpl.h:3610
hartree_op< LDIM, leaf_opT > this_type
Definition funcimpl.h:3607
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
Definition funcimpl.h:3621
ctL p1
Definition funcimpl.h:3611
this_type make_child(const keyT &child) const
Definition funcimpl.h:3644
Future< this_type > activate() const
Definition funcimpl.h:3653
perform this multiplication: h(1,2) = f(1,2) * g(1)
Definition funcimpl.h:3412
multiply_op()
Definition funcimpl.h:3424
ctL g
Definition funcimpl.h:3421
Future< this_type > activate() const
Definition funcimpl.h:3503
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:3416
implT * h
the result function h(1,2) = f(1,2) * g(1)
Definition funcimpl.h:3419
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:3415
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
apply this on a FunctionNode of f and g of Key key
Definition funcimpl.h:3451
this_type forward_ctor(implT *h1, const ctT &f1, const ctL &g1, const int particle)
Definition funcimpl.h:3510
static bool randomize()
Definition funcimpl.h:3414
int particle
if g is g(1) or g(2)
Definition funcimpl.h:3422
ctT f
Definition funcimpl.h:3420
multiply_op< LDIM > this_type
Definition funcimpl.h:3417
multiply_op(implT *h1, const ctT &f1, const ctL &g1, const int particle1)
Definition funcimpl.h:3426
bool screen(const coeffT &fcoeff, const coeffT &gcoeff, const keyT &key) const
return true if this will be a leaf node
Definition funcimpl.h:3432
this_type make_child(const keyT &child) const
Definition funcimpl.h:3493
void serialize(const Archive &ar)
Definition funcimpl.h:3514
coeffT val_lhs
Definition funcimpl.h:3891
double lo
Definition funcimpl.h:3894
double lo1
Definition funcimpl.h:3894
long oversampling
Definition funcimpl.h:3892
double error
Definition funcimpl.h:3893
tensorT operator()(const Key< NDIM > key, const tensorT &coeff_rhs)
multiply values of rhs and lhs, result on rhs, rhs and lhs are of the same dimensions
Definition funcimpl.h:3909
coeffT coeff_lhs
Definition funcimpl.h:3891
void serialize(const Archive &ar)
Definition funcimpl.h:3997
double lo2
Definition funcimpl.h:3894
double hi1
Definition funcimpl.h:3894
pointwise_multiplier(const Key< NDIM > key, const coeffT &clhs)
Definition funcimpl.h:3897
coeffT operator()(const Key< NDIM > key, const tensorT &coeff_rhs, const int particle)
multiply values of rhs and lhs, result on rhs, rhs and lhs are of differnet dimensions
Definition funcimpl.h:3954
double hi2
Definition funcimpl.h:3894
double hi
Definition funcimpl.h:3894
project the low-dim function g on the hi-dim function f: result(x) = <f(x,y) | g(y)>
Definition funcimpl.h:6922
project_out_op(const implT *fimpl, implL1 *result, const ctL &iag, const int dim)
Definition funcimpl.h:6937
ctL iag
the low dim function g
Definition funcimpl.h:6932
FunctionImpl< T, NDIM-LDIM > implL1
Definition funcimpl.h:6927
Future< this_type > activate() const
retrieve the coefficients (parent coeffs might be remote)
Definition funcimpl.h:7016
std::pair< bool, coeffT > argT
Definition funcimpl.h:6928
const implT * fimpl
the hi dim function f
Definition funcimpl.h:6930
this_type forward_ctor(const implT *fimpl1, implL1 *result1, const ctL &iag1, const int dim1)
taskq-compatible ctor
Definition funcimpl.h:7023
this_type make_child(const keyT &child) const
Definition funcimpl.h:7007
project_out_op< LDIM > this_type
Definition funcimpl.h:6925
implL1 * result
the low dim result function
Definition funcimpl.h:6931
Future< argT > operator()(const Key< NDIM > &key) const
do the actual contraction
Definition funcimpl.h:6944
void serialize(const Archive &ar)
Definition funcimpl.h:7027
project_out_op(const project_out_op &other)
Definition funcimpl.h:6939
int dim
0: project 0..LDIM-1, 1: project LDIM..NDIM-1
Definition funcimpl.h:6933
bool randomize() const
Definition funcimpl.h:6923
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:6926
recursive part of recursive_apply
Definition funcimpl.h:5340
ctT iaf
Definition funcimpl.h:5348
recursive_apply_op2< opT > this_type
Definition funcimpl.h:5343
Future< this_type > activate() const
retrieve the coefficients (parent coeffs might be remote)
Definition funcimpl.h:5403
const opT * apply_op
need this for randomization
Definition funcimpl.h:5349
bool randomize() const
Definition funcimpl.h:5341
recursive_apply_op2(const recursive_apply_op2 &other)
Definition funcimpl.h:5356
void serialize(const Archive &ar)
Definition funcimpl.h:5419
argT finalize(const double kernel_norm, const keyT &key, const coeffT &coeff, const implT *r) const
sole purpose is to wait for the kernel norm, wrap it and send it back to caller
Definition funcimpl.h:5389
this_type make_child(const keyT &child) const
Definition funcimpl.h:5398
recursive_apply_op2(implT *result, const ctT &iaf, const opT *apply_op)
Definition funcimpl.h:5353
std::pair< bool, coeffT > argT
Definition funcimpl.h:5345
implT * result
Definition funcimpl.h:5347
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:5344
argT operator()(const Key< NDIM > &key) const
send off the application of the operator
Definition funcimpl.h:5365
this_type forward_ctor(implT *result1, const ctT &iaf1, const opT *apply_op1)
taskq-compatible ctor
Definition funcimpl.h:5415
recursive part of recursive_apply
Definition funcimpl.h:5209
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
make the NS-coefficients and send off the application of the operator
Definition funcimpl.h:5234
this_type forward_ctor(implT *r, const CoeffTracker< T, LDIM > &f1, const CoeffTracker< T, LDIM > &g1, const opT *apply_op1)
Definition funcimpl.h:5299
opT * apply_op
Definition funcimpl.h:5217
recursive_apply_op(const recursive_apply_op &other)
Definition funcimpl.h:5227
recursive_apply_op< opT, LDIM > this_type
Definition funcimpl.h:5212
Future< this_type > activate() const
Definition funcimpl.h:5292
bool randomize() const
Definition funcimpl.h:5210
implT * result
Definition funcimpl.h:5214
CoeffTracker< T, LDIM > iaf
Definition funcimpl.h:5215
void serialize(const Archive &ar)
Definition funcimpl.h:5304
std::pair< bool, coeffT > finalize(const double kernel_norm, const keyT &key, const coeffT &coeff) const
sole purpose is to wait for the kernel norm, wrap it and send it back to caller
Definition funcimpl.h:5274
recursive_apply_op(implT *result, const CoeffTracker< T, LDIM > &iaf, const CoeffTracker< T, LDIM > &iag, const opT *apply_op)
Definition funcimpl.h:5221
this_type make_child(const keyT &child) const
Definition funcimpl.h:5283
CoeffTracker< T, LDIM > iag
Definition funcimpl.h:5216
remove all coefficients of internal nodes
Definition funcimpl.h:2203
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2204
remove_internal_coeffs()=default
constructor need impl for cdata
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2209
void serialize(const Archive &ar)
Definition funcimpl.h:2215
remove all coefficients of leaf nodes
Definition funcimpl.h:2220
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2226
remove_leaf_coeffs()=default
constructor need impl for cdata
void serialize(const Archive &ar)
Definition funcimpl.h:2231
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2221
Definition funcimpl.h:4490
void serialize(Archive &ar)
Definition funcimpl.h:4494
bool operator()(const implT *f, const keyT &key, const nodeT &t) const
Definition funcimpl.h:4491
shallow-copy, pared-down version of FunctionNode, for special purpose only
Definition funcimpl.h:749
coeffT & coeff()
Definition funcimpl.h:763
GenTensor< T > coeffT
Definition funcimpl.h:750
bool is_leaf() const
Definition funcimpl.h:765
void serialize(Archive &ar)
Definition funcimpl.h:767
ShallowNode(const ShallowNode< T, NDIM > &node)
Definition funcimpl.h:758
ShallowNode(const FunctionNode< T, NDIM > &node)
Definition funcimpl.h:755
bool has_children() const
Definition funcimpl.h:764
ShallowNode()
Definition funcimpl.h:754
bool _has_children
Definition funcimpl.h:752
double dnorm
Definition funcimpl.h:753
const coeffT & coeff() const
Definition funcimpl.h:762
coeffT _coeffs
Definition funcimpl.h:751
TensorArgs holds the arguments for creating a LowRankTensor.
Definition gentensor.h:134
double thresh
Definition gentensor.h:135
TensorType tt
Definition gentensor.h:136
inserts/accumulates coefficients into impl's tree
Definition funcimpl.h:716
FunctionImpl< T, NDIM > * impl
Definition funcimpl.h:720
FunctionNode< T, NDIM > nodeT
Definition funcimpl.h:718
accumulate_op(const accumulate_op &other)=default
void operator()(const Key< NDIM > &key, const coeffT &coeff, const bool &is_leaf) const
Definition funcimpl.h:724
void serialize(Archive &ar)
Definition funcimpl.h:728
GenTensor< T > coeffT
Definition funcimpl.h:717
accumulate_op(FunctionImpl< T, NDIM > *f)
Definition funcimpl.h:722
static void load(const Archive &ar, FunctionImpl< T, NDIM > *&ptr)
Definition funcimpl.h:7249
static void load(const Archive &ar, const FunctionImpl< T, NDIM > *&ptr)
Definition funcimpl.h:7218
static void load(const Archive &ar, std::shared_ptr< FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7297
static void load(const Archive &ar, std::shared_ptr< const FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7281
Default load of an object via serialize(ar, t).
Definition archive.h:667
static void load(const A &ar, const U &t)
Load an object.
Definition archive.h:679
static void store(const Archive &ar, FunctionImpl< T, NDIM > *const &ptr)
Definition funcimpl.h:7271
static void store(const Archive &ar, const FunctionImpl< T, NDIM > *const &ptr)
Definition funcimpl.h:7240
static void store(const Archive &ar, const std::shared_ptr< FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7306
static void store(const Archive &ar, const std::shared_ptr< const FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7290
Default store of an object via serialize(ar, t).
Definition archive.h:612
static std::enable_if_t< is_output_archive_v< A > &&!std::is_function< U >::value &&(has_member_serialize_v< U, A >||has_nonmember_serialize_v< U, A >||has_freestanding_serialize_v< U, A >||has_freestanding_default_serialize_v< U, A >), void > store(const A &ar, const U &t)
Definition archive.h:622
Definition funcimpl.h:610
void serialize(Archive &ar)
Definition funcimpl.h:674
const opT * op
Definition funcimpl.h:617
hartree_convolute_leaf_op(const implT *f, const implL *g, const opT *op)
Definition funcimpl.h:621
bool operator()(const Key< NDIM > &key) const
no pre-determination
Definition funcimpl.h:625
bool operator()(const Key< NDIM > &key, const Tensor< T > &fcoeff, const Tensor< T > &gcoeff) const
post-determination: true if f is a leaf and the result is well-represented
Definition funcimpl.h:638
const implL * g
Definition funcimpl.h:616
const FunctionImpl< T, NDIM > * f
Definition funcimpl.h:615
FunctionImpl< T, LDIM > implL
Definition funcimpl.h:613
bool do_error_leaf_op() const
Definition funcimpl.h:618
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:612
bool operator()(const Key< NDIM > &key, const GenTensor< T > &coeff) const
no post-determination
Definition funcimpl.h:628
returns true if the result of a hartree_product is a leaf node (compute norm & error)
Definition funcimpl.h:500
bool do_error_leaf_op() const
Definition funcimpl.h:505
const FunctionImpl< T, NDIM > * f
Definition funcimpl.h:503
hartree_leaf_op(const implT *f, const long &k)
Definition funcimpl.h:508
long k
Definition funcimpl.h:504
void serialize(Archive &ar)
Definition funcimpl.h:556
bool operator()(const Key< NDIM > &key, const GenTensor< T > &coeff) const
no post-determination
Definition funcimpl.h:514
bool operator()(const Key< NDIM > &key, const Tensor< T > &fcoeff, const Tensor< T > &gcoeff) const
post-determination: true if f is a leaf and the result is well-represented
Definition funcimpl.h:524
bool operator()(const Key< NDIM > &key) const
no pre-determination
Definition funcimpl.h:511
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:502
insert/replaces the coefficients into the function
Definition funcimpl.h:692
insert_op()
Definition funcimpl.h:699
implT * impl
Definition funcimpl.h:698
void operator()(const keyT &key, const coeffT &coeff, const bool &is_leaf) const
Definition funcimpl.h:702
FunctionNode< T, NDIM > nodeT
Definition funcimpl.h:696
Key< NDIM > keyT
Definition funcimpl.h:694
insert_op(const insert_op &other)
Definition funcimpl.h:701
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:693
GenTensor< T > coeffT
Definition funcimpl.h:695
insert_op(implT *f)
Definition funcimpl.h:700
void serialize(Archive &ar)
Definition funcimpl.h:706
Definition mra.h:112
Definition funcimpl.h:680
bool operator()(const Key< NDIM > &key, const GenTensor< T > &fcoeff, const GenTensor< T > &gcoeff) const
Definition funcimpl.h:682
void serialize(Archive &ar)
Definition funcimpl.h:686
void operator()(const Key< NDIM > &key, const GenTensor< T > &coeff, const bool &is_leaf) const
Definition funcimpl.h:681
Definition funcimpl.h:564
bool operator()(const Key< NDIM > &key, const double &cnorm) const
post-determination: return true if operator and coefficient norms are small
Definition funcimpl.h:585
void serialize(Archive &ar)
Definition funcimpl.h:600
const implT * f
the source or result function, needed for truncate_tol
Definition funcimpl.h:568
op_leaf_op(const opT *op, const implT *f)
Definition funcimpl.h:572
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:565
const opT * op
the convolution operator
Definition funcimpl.h:567
bool do_error_leaf_op() const
Definition funcimpl.h:569
bool operator()(const Key< NDIM > &key) const
pre-determination: we can't know if this will be a leaf node before we got the final coeffs
Definition funcimpl.h:575
bool operator()(const Key< NDIM > &key, const GenTensor< T > &coeff) const
post-determination: return true if operator and coefficient norms are small
Definition funcimpl.h:578
Definition lowrankfunction.h:332
Definition funcimpl.h:736
void serialize(Archive &ar)
Definition funcimpl.h:743
bool operator()(const Key< NDIM > &key, const T &t, const R &r) const
Definition funcimpl.h:742
bool operator()(const Key< NDIM > &key, const T &t) const
Definition funcimpl.h:739
int np
Definition tdse1d.cc:165
static const double s0
Definition tdse4.cc:83
Defines and implements most of Tensor.
#define ITERATOR(t, exp)
Definition tensor_macros.h:249
#define IND
Definition tensor_macros.h:204
#define TERNARY_OPTIMIZED_ITERATOR(X, x, Y, y, Z, z, exp)
Definition tensor_macros.h:719
AtomicInt sum
Definition test_atomicint.cc:46
double norm(const T i1)
Definition test_cloud.cc:72
int task(int i)
Definition test_runtime.cpp:4
void e()
Definition test_sig.cc:75
static const double alpha
Definition testcosine.cc:10
const double offset
Definition testfuns.cc:143
constexpr std::size_t NDIM
Definition testgconv.cc:54
double h(const coord_1d &r)
Definition testgconv.cc:175
double g1(const coord_t &r)
Definition testgconv.cc:122
std::size_t axis
Definition testpdiff.cc:59
double source(const coordT &r)
Definition testperiodic.cc:48
#define TENSOR_RESULT_TYPE(L, R)
This macro simplifies access to TensorResultType.
Definition type_data.h:205
#define PROFILE_MEMBER_FUNC(classname)
Definition worldprofile.h:210
#define PROFILE_BLOCK(name)
Definition worldprofile.h:208
int ProcessID
Used to clearly identify process number/rank.
Definition worldtypes.h:43