MADNESS 0.10.1
funcimpl.h
Go to the documentation of this file.
1/*
2 This file is part of MADNESS.
3
4 Copyright (C) 2007,2010 Oak Ridge National Laboratory
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
20 For more information please contact:
21
22 Robert J. Harrison
23 Oak Ridge National Laboratory
24 One Bethel Valley Road
25 P.O. Box 2008, MS-6367
26
27 email: harrisonrj@ornl.gov
28 tel: 865-241-3937
29 fax: 865-572-0680
30*/
31
32#ifndef MADNESS_MRA_FUNCIMPL_H__INCLUDED
33#define MADNESS_MRA_FUNCIMPL_H__INCLUDED
34
35/// \file funcimpl.h
36/// \brief Provides FunctionCommonData, FunctionImpl and FunctionFactory
37
39#include <madness/world/print.h>
40#include <madness/misc/misc.h>
43
45#include <madness/mra/indexit.h>
46#include <madness/mra/key.h>
50
51#include <madness/mra/leafop.h>
52
53#include <array>
54#include <iostream>
55#include <type_traits>
56
57namespace madness {
58 template <typename T, std::size_t NDIM>
59 class DerivativeBase;
60
61 template<typename T, std::size_t NDIM>
62 class FunctionImpl;
63
64 template<typename T, std::size_t NDIM>
65 class FunctionNode;
66
67 template<typename T, std::size_t NDIM>
68 class Function;
69
70 template<typename T, std::size_t NDIM>
71 class FunctionFactory;
72
73 template<typename T, std::size_t NDIM, std::size_t MDIM>
74 class CompositeFunctorInterface;
75
76 template<int D>
78
79}
80
81namespace madness {
82
83
84 /// A simple process map
85 template<typename keyT>
86 class SimplePmap : public WorldDCPmapInterface<keyT> {
87 private:
88 const int nproc;
90
91 public:
92 SimplePmap(World& world) : nproc(world.nproc()), me(world.rank())
93 { }
94
95 ProcessID owner(const keyT& key) const {
96 if (key.level() == 0)
97 return 0;
98 else
99 return key.hash() % nproc;
100 }
101 };
102
103 /// A pmap that locates children on odd levels with their even level parents
104 template <typename keyT>
105 class LevelPmap : public WorldDCPmapInterface<keyT> {
106 private:
107 const int nproc;
108 public:
109 LevelPmap() : nproc(0) {};
110
111 LevelPmap(World& world) : nproc(world.nproc()) {}
112
113 /// Find the owner of a given key
114 ProcessID owner(const keyT& key) const {
115 Level n = key.level();
116 if (n == 0) return 0;
117 hashT hash;
118 if (n <= 3 || (n&0x1)) hash = key.hash();
119 else hash = key.parent().hash();
120 return hash%nproc;
121 }
122 };
123
124
125 /// FunctionNode holds the coefficients, etc., at each node of the 2^NDIM-tree
126 template<typename T, std::size_t NDIM>
128 public:
131 private:
132 // Should compile OK with these volatile but there should
133 // be no need to set as volatile since the container internally
134 // stores the entire entry as volatile
135
136 coeffT _coeffs; ///< The coefficients, if any
137 double _norm_tree; ///< After norm_tree will contain norm of coefficients summed up tree
138 bool _has_children; ///< True if there are children
139 coeffT buffer; ///< The coefficients, if any
140 double dnorm=-1.0; ///< norm of the d coefficients, also defined if there are no d coefficients
141 double snorm=-1.0; ///< norm of the s coefficients
142
143 public:
144 typedef WorldContainer<Key<NDIM> , FunctionNode<T, NDIM> > dcT; ///< Type of container holding the nodes
145 /// Default constructor makes node without coeff or children
147 _coeffs(), _norm_tree(1e300), _has_children(false) {
148 }
149
150 /// Constructor from given coefficients with optional children
151
152 /// Note that only a shallow copy of the coeff are taken so
153 /// you should pass in a deep copy if you want the node to
154 /// take ownership.
155 explicit
159
160 explicit
164
165 explicit
169
172 dnorm(other.dnorm), snorm(other.snorm) {
173 }
174
177 if (this != &other) {
178 coeff() = copy(other.coeff());
179 _norm_tree = other._norm_tree;
181 dnorm=other.dnorm;
182 snorm=other.snorm;
184 }
185 return *this;
186 }
187
188 /// Copy with possible type conversion of coefficients, copying all other state
189
190 /// Choose to not overload copy and type conversion operators
191 /// so there are no automatic type conversions.
192 template<typename Q>
194 convert() const {
195 return FunctionNode<Q, NDIM> (madness::convert<Q,T>(coeff()), _norm_tree, snorm, dnorm, _has_children);
196 }
197
198 /// Returns true if there are coefficients in this node
199 bool
200 has_coeff() const {
201 return _coeffs.has_data();
202 }
203
204
205 /// Returns true if this node has children
206 bool
207 has_children() const {
208 return _has_children;
209 }
210
211 /// Returns true if this does not have children
212 bool
213 is_leaf() const {
214 return !_has_children;
215 }
216
217 /// Returns true if this node is invalid (no coeffs and no children)
218 bool
219 is_invalid() const {
220 return !(has_coeff() || has_children());
221 }
222
223 /// Returns a non-const reference to the tensor containing the coeffs
224
225 /// Returns an empty tensor if there are no coefficients.
226 coeffT&
228 MADNESS_ASSERT(_coeffs.ndim() == -1 || (_coeffs.dim(0) <= 2
229 * MAXK && _coeffs.dim(0) >= 0));
230 return const_cast<coeffT&>(_coeffs);
231 }
232
233 /// Returns a const reference to the tensor containing the coeffs
234
235 /// Returns an empty tensor if there are no coefficeints.
236 const coeffT&
237 coeff() const {
238 return const_cast<const coeffT&>(_coeffs);
239 }
240
241 /// Returns the number of coefficients in this node
242 size_t size() const {
243 return _coeffs.size();
244 }
245
246 public:
247
248 /// reduces the rank of the coefficients (if applicable)
249 void reduceRank(const double& eps) {
250 _coeffs.reduce_rank(eps);
251 }
252
253 /// Sets \c has_children attribute to value of \c flag.
254 void set_has_children(bool flag) {
255 _has_children = flag;
256 }
257
258 /// Sets \c has_children attribute to true recurring up to ensure connected
260 //madness::print(" set_chi_recu: ", key, *this);
261 //PROFILE_MEMBER_FUNC(FunctionNode); // Too fine grain for routine profiling
262 if (!(has_children() || has_coeff() || key.level()==0)) {
263 // If node already knows it has children or it has
264 // coefficients then it must already be connected to
265 // its parent. If not, the node was probably just
266 // created for this operation and must be connected to
267 // its parent.
268 Key<NDIM> parent = key.parent();
269 // Task on next line used to be TaskAttributes::hipri()) ... but deferring execution of this
270 // makes sense since it is not urgent and lazy connection will likely mean that less forwarding
271 // will happen since the upper level task will have already made the connection.
272 const_cast<dcT&>(c).task(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
273 //const_cast<dcT&>(c).send(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
274 //madness::print(" set_chi_recu: forwarding",key,parent);
275 }
276 _has_children = true;
277 }
278
279 /// Sets \c has_children attribute to value of \c !flag
280 void set_is_leaf(bool flag) {
281 _has_children = !flag;
282 }
283
284 /// Takes a \em shallow copy of the coeff --- same as \c this->coeff()=coeff
285 void set_coeff(const coeffT& coeffs) {
286 coeff() = coeffs;
287 if ((_coeffs.has_data()) and ((_coeffs.dim(0) < 0) || (_coeffs.dim(0)>2*MAXK))) {
288 print("set_coeff: may have a problem");
289 print("set_coeff: coeff.dim[0] =", coeffs.dim(0), ", 2* MAXK =", 2*MAXK);
290 }
291 MADNESS_ASSERT(coeffs.dim(0)<=2*MAXK && coeffs.dim(0)>=0);
292 }
293
294 /// Clears the coefficients (has_coeff() will subsequently return false)
295 void clear_coeff() {
296 coeff()=coeffT();
297 }
298
299 /// Scale the coefficients of this node
300 template <typename Q>
301 void scale(Q a) {
302 _coeffs.scale(a);
303 }
304
305 /// Sets the value of norm_tree
308 }
309
310 /// Gets the value of norm_tree
311 double get_norm_tree() const {
312 return _norm_tree;
313 }
314
315 /// return the precomputed norm of the (virtual) d coefficients
316 double get_dnorm() const {
317 return dnorm;
318 }
319
320 /// set the precomputed norm of the (virtual) s coefficients
321 void set_snorm(const double sn) {
322 snorm=sn;
323 }
324
325 /// set the precomputed norm of the (virtual) d coefficients
326 void set_dnorm(const double dn) {
327 dnorm=dn;
328 }
329
330 /// get the precomputed norm of the (virtual) s coefficients
331 double get_snorm() const {
332 return snorm;
333 }
334
336 snorm = 0.0;
337 dnorm = 0.0;
338 if (coeff().size() == 0) { ;
339 } else if (coeff().dim(0) == cdata.vk[0]) {
340 snorm = coeff().normf();
341
342 } else if (coeff().is_full_tensor()) {
343 Tensor<T> c = copy(coeff().get_tensor());
344 snorm = c(cdata.s0).normf();
345 c(cdata.s0) = 0.0;
346 dnorm = c.normf();
347
348 } else if (coeff().is_svd_tensor()) {
349 coeffT c= coeff()(cdata.s0);
350 snorm = c.normf();
351 double norm = coeff().normf();
352 dnorm = sqrt(norm * norm - snorm * snorm);
353
354 } else {
355 MADNESS_EXCEPTION("cannot use compute_dnorm", 1);
356 }
357 }
358
359
360 /// General bi-linear operation --- this = this*alpha + other*beta
361
362 /// This/other may not have coefficients. Has_children will be
363 /// true in the result if either this/other have children.
364 template <typename Q, typename R>
365 void gaxpy_inplace(const T& alpha, const FunctionNode<Q,NDIM>& other, const R& beta) {
366 //PROFILE_MEMBER_FUNC(FuncNode); // Too fine grain for routine profiling
367 if (other.has_children())
368 _has_children = true;
369 if (has_coeff()) {
370 if (other.has_coeff()) {
371 coeff().gaxpy(alpha,other.coeff(),beta);
372 }
373 else {
374 coeff().scale(alpha);
375 }
376 }
377 else if (other.has_coeff()) {
378 coeff() = other.coeff()*beta; //? Is this the correct type conversion?
379 }
380 }
381
382 /// Accumulate inplace and if necessary connect node to parent
383 void accumulate2(const tensorT& t, const typename FunctionNode<T,NDIM>::dcT& c,
384 const Key<NDIM>& key) {
385 // double cpu0=cpu_time();
386 if (has_coeff()) {
387 MADNESS_ASSERT(coeff().is_full_tensor());
388 // if (coeff().type==TT_FULL) {
389 coeff() += coeffT(t,-1.0,TT_FULL);
390 // } else {
391 // tensorT cc=coeff().full_tensor_copy();;
392 // cc += t;
393 // coeff()=coeffT(cc,args);
394 // }
395 }
396 else {
397 // No coeff and no children means the node is newly
398 // created for this operation and therefore we must
399 // tell its parent that it exists.
400 coeff() = coeffT(t,-1.0,TT_FULL);
401 // coeff() = copy(t);
402 // coeff() = coeffT(t,args);
403 if ((!_has_children) && key.level()> 0) {
404 Key<NDIM> parent = key.parent();
405 if (c.is_local(parent))
406 const_cast<dcT&>(c).send(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
407 else
408 const_cast<dcT&>(c).task(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
409 }
410 }
411 //double cpu1=cpu_time();
412 }
413
414
415 /// Accumulate inplace and if necessary connect node to parent
416 void accumulate(const coeffT& t, const typename FunctionNode<T,NDIM>::dcT& c,
417 const Key<NDIM>& key, const TensorArgs& args) {
418 if (has_coeff()) {
419 coeff().add_SVD(t,args.thresh);
420 if (buffer.rank()<coeff().rank()) {
421 if (buffer.has_data()) {
422 buffer.add_SVD(coeff(),args.thresh);
423 } else {
424 buffer=copy(coeff());
425 }
426 coeff()=coeffT();
427 }
428
429 } else {
430 // No coeff and no children means the node is newly
431 // created for this operation and therefore we must
432 // tell its parent that it exists.
433 coeff() = copy(t);
434 if ((!_has_children) && key.level()> 0) {
435 Key<NDIM> parent = key.parent();
436 if (c.is_local(parent))
437 const_cast<dcT&>(c).send(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
438 else
439 const_cast<dcT&>(c).task(parent, &FunctionNode<T,NDIM>::set_has_children_recursive, c, parent);
440 }
441 }
442 }
443
444 void consolidate_buffer(const TensorArgs& args) {
445 if ((coeff().has_data()) and (buffer.has_data())) {
446 coeff().add_SVD(buffer,args.thresh);
447 } else if (buffer.has_data()) {
448 coeff()=buffer;
449 }
450 buffer=coeffT();
451 }
452
454 return this->_coeffs.trace_conj((rhs._coeffs));
455 }
456
457 template <typename Archive>
458 void serialize(Archive& ar) {
460 }
461
462 /// like operator<<(ostream&, const FunctionNode<T,NDIM>&) but
463 /// produces a sequence JSON-formatted key-value pairs
464 /// @warning enclose the output in curly braces to make
465 /// a valid JSON object
466 void print_json(std::ostream& s) const {
467 s << "\"has_coeff\":" << this->has_coeff()
468 << ",\"has_children\":" << this->has_children() << ",\"norm\":";
469 double norm = this->has_coeff() ? this->coeff().normf() : 0.0;
470 if (norm < 1e-12)
471 norm = 0.0;
472 double nt = this->get_norm_tree();
473 if (nt == 1e300)
474 nt = 0.0;
475 s << norm << ",\"norm_tree\":" << nt << ",\"snorm\":"
476 << this->get_snorm() << ",\"dnorm\":" << this->get_dnorm()
477 << ",\"rank\":" << this->coeff().rank();
478 if (this->coeff().is_assigned())
479 s << ",\"dim\":" << this->coeff().dim(0);
480 }
481
482 };
483
484 template <typename T, std::size_t NDIM>
485 std::ostream& operator<<(std::ostream& s, const FunctionNode<T,NDIM>& node) {
486 s << "(has_coeff=" << node.has_coeff() << ", has_children=" << node.has_children() << ", norm=";
487 double norm = node.has_coeff() ? node.coeff().normf() : 0.0;
488 if (norm < 1e-12)
489 norm = 0.0;
490 double nt = node.get_norm_tree();
491 if (nt == 1e300) nt = 0.0;
492 s << norm << ", norm_tree, s/dnorm =" << nt << ", " << node.get_snorm() << " " << node.get_dnorm() << "), rank="<< node.coeff().rank()<<")";
493 if (node.coeff().is_assigned()) s << " dim " << node.coeff().dim(0) << " ";
494 return s;
495 }
496
497
498 /// returns true if the result of a hartree_product is a leaf node (compute norm & error)
499 template<typename T, size_t NDIM>
501
504 long k;
505 bool do_error_leaf_op() const {return false;}
506
507 hartree_leaf_op() = default;
508 hartree_leaf_op(const implT* f, const long& k) : f(f), k(k) {}
509
510 /// no pre-determination
511 bool operator()(const Key<NDIM>& key) const {return false;}
512
513 /// no post-determination
514 bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
515 MADNESS_EXCEPTION("no post-determination in hartree_leaf_op",1);
516 return true;
517 }
518
519 /// post-determination: true if f is a leaf and the result is well-represented
520
521 /// @param[in] key the hi-dimensional key (breaks into keys for f and g)
522 /// @param[in] fcoeff coefficients of f of its appropriate key in NS form
523 /// @param[in] gcoeff coefficients of g of its appropriate key in NS form
524 bool operator()(const Key<NDIM>& key, const Tensor<T>& fcoeff, const Tensor<T>& gcoeff) const {
525
526 if (key.level()<2) return false;
527 Slice s = Slice(0,k-1);
528 std::vector<Slice> s0(NDIM/2,s);
529
530 const double tol=f->get_thresh();
531 const double thresh=f->truncate_tol(tol, key)*0.3; // custom factor to "ensure" accuracy
532 // include the wavelets in the norm, makes it much more accurate
533 const double fnorm=fcoeff.normf();
534 const double gnorm=gcoeff.normf();
535
536 // if the final norm is small, perform the hartree product and return
537 const double norm=fnorm*gnorm; // computing the outer product
538 if (norm < thresh) return true;
539
540 // norm of the scaling function coefficients
541 const double sfnorm=fcoeff(s0).normf();
542 const double sgnorm=gcoeff(s0).normf();
543
544 // get the error of both functions and of the pair function;
545 // need the abs for numerics: sfnorm might be equal fnorm.
546 const double ferror=sqrt(std::abs(fnorm*fnorm-sfnorm*sfnorm));
547 const double gerror=sqrt(std::abs(gnorm*gnorm-sgnorm*sgnorm));
548
549 // if the expected error is small, perform the hartree product and return
550 const double error=fnorm*gerror + ferror*gnorm + ferror*gerror;
551 // const double error=sqrt(fnorm*fnorm*gnorm*gnorm - sfnorm*sfnorm*sgnorm*sgnorm);
552
553 if (error < thresh) return true;
554 return false;
555 }
556 template <typename Archive> void serialize (Archive& ar) {
557 ar & f & k;
558 }
559 };
560
561 /// returns true if the result of the convolution operator op with some provided
562 /// coefficients will be small
563 template<typename T, size_t NDIM, typename opT>
564 struct op_leaf_op {
566
567 const opT* op; ///< the convolution operator
568 const implT* f; ///< the source or result function, needed for truncate_tol
569 bool do_error_leaf_op() const {return true;}
570
571 op_leaf_op() = default;
572 op_leaf_op(const opT* op, const implT* f) : op(op), f(f) {}
573
574 /// pre-determination: we can't know if this will be a leaf node before we got the final coeffs
575 bool operator()(const Key<NDIM>& key) const {return true;}
576
577 /// post-determination: return true if operator and coefficient norms are small
578 bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
579 if (key.level()<2) return false;
580 const double cnorm=coeff.normf();
581 return this->operator()(key,cnorm);
582 }
583
584 /// post-determination: return true if operator and coefficient norms are small
585 bool operator()(const Key<NDIM>& key, const double& cnorm) const {
586 if (key.level()<2) return false;
587
588 typedef Key<opT::opdim> opkeyT;
589 const opkeyT source=op->get_source_key(key);
590
591 const double thresh=f->truncate_tol(f->get_thresh(),key);
592 const std::vector<opkeyT>& disp = op->get_disp(key.level());
593 const opkeyT& d = *disp.begin(); // use the zero-displacement for screening
594 const double opnorm = op->norm(key.level(), d, source);
595 const double norm=opnorm*cnorm;
596 return norm<thresh;
597
598 }
599
600 template <typename Archive> void serialize (Archive& ar) {
601 ar & op & f;
602 }
603
604 };
605
606
607 /// returns true if the result of a hartree_product is a leaf node
608 /// criteria are error, norm and its effect on a convolution operator
609 template<typename T, size_t NDIM, size_t LDIM, typename opT>
611
614
616 const implL* g; // for use of its cdata only
617 const opT* op;
618 bool do_error_leaf_op() const {return false;}
619
621 hartree_convolute_leaf_op(const implT* f, const implL* g, const opT* op)
622 : f(f), g(g), op(op) {}
623
624 /// no pre-determination
625 bool operator()(const Key<NDIM>& key) const {return true;}
626
627 /// no post-determination
628 bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
629 MADNESS_EXCEPTION("no post-determination in hartree_convolute_leaf_op",1);
630 return true;
631 }
632
633 /// post-determination: true if f is a leaf and the result is well-represented
634
635 /// @param[in] key the hi-dimensional key (breaks into keys for f and g)
636 /// @param[in] fcoeff coefficients of f of its appropriate key in NS form
637 /// @param[in] gcoeff coefficients of g of its appropriate key in NS form
638 bool operator()(const Key<NDIM>& key, const Tensor<T>& fcoeff, const Tensor<T>& gcoeff) const {
639 // bool operator()(const Key<NDIM>& key, const GenTensor<T>& coeff) const {
640
641 if (key.level()<2) return false;
642
643 const double tol=f->get_thresh();
644 const double thresh=f->truncate_tol(tol, key);
645 // include the wavelets in the norm, makes it much more accurate
646 const double fnorm=fcoeff.normf();
647 const double gnorm=gcoeff.normf();
648
649 // norm of the scaling function coefficients
650 const double sfnorm=fcoeff(g->get_cdata().s0).normf();
651 const double sgnorm=gcoeff(g->get_cdata().s0).normf();
652
653 // if the final norm is small, perform the hartree product and return
654 const double norm=fnorm*gnorm; // computing the outer product
655 if (norm < thresh) return true;
656
657 // get the error of both functions and of the pair function
658 const double ferror=sqrt(fnorm*fnorm-sfnorm*sfnorm);
659 const double gerror=sqrt(gnorm*gnorm-sgnorm*sgnorm);
660
661 // if the expected error is small, perform the hartree product and return
662 const double error=fnorm*gerror + ferror*gnorm + ferror*gerror;
663 if (error < thresh) return true;
664
665 // now check if the norm of this and the norm of the operator are significant
666 const std::vector<Key<NDIM> >& disp = op->get_disp(key.level());
667 const Key<NDIM>& d = *disp.begin(); // use the zero-displacement for screening
668 const double opnorm = op->norm(key.level(), d, key);
669 const double final_norm=opnorm*sfnorm*sgnorm;
670 if (final_norm < thresh) return true;
671
672 return false;
673 }
674 template <typename Archive> void serialize (Archive& ar) {
675 ar & f & op;
676 }
677 };
678
679 template<typename T, size_t NDIM>
680 struct noop {
681 void operator()(const Key<NDIM>& key, const GenTensor<T>& coeff, const bool& is_leaf) const {}
682 bool operator()(const Key<NDIM>& key, const GenTensor<T>& fcoeff, const GenTensor<T>& gcoeff) const {
683 MADNESS_EXCEPTION("in noop::operator()",1);
684 return true;
685 }
686 template <typename Archive> void serialize (Archive& ar) {}
687
688 };
689
690 /// insert/replaces the coefficients into the function
691 template<typename T, std::size_t NDIM>
692 struct insert_op {
697
701 insert_op(const insert_op& other) : impl(other.impl) {}
702 void operator()(const keyT& key, const coeffT& coeff, const bool& is_leaf) const {
704 impl->get_coeffs().replace(key,nodeT(coeff,not is_leaf));
705 }
706 template <typename Archive> void serialize (Archive& ar) {
707 ar & impl;
708 }
709
710 };
711
712 /// inserts/accumulates coefficients into impl's tree
713
714 /// NOTE: will use buffer and will need consolidation after operation ended !! NOTE !!
715 template<typename T, std::size_t NDIM>
719
721 accumulate_op() = default;
723 accumulate_op(const accumulate_op& other) = default;
724 void operator()(const Key<NDIM>& key, const coeffT& coeff, const bool& is_leaf) const {
725 if (coeff.has_data())
726 impl->get_coeffs().task(key, &nodeT::accumulate, coeff, impl->get_coeffs(), key, impl->get_tensor_args());
727 }
728 template <typename Archive> void serialize (Archive& ar) {
729 ar & impl;
730 }
731
732 };
733
734
735template<size_t NDIM>
736 struct true_op {
737
738 template<typename T>
739 bool operator()(const Key<NDIM>& key, const T& t) const {return true;}
740
741 template<typename T, typename R>
742 bool operator()(const Key<NDIM>& key, const T& t, const R& r) const {return true;}
743 template <typename Archive> void serialize (Archive& ar) {}
744
745 };
746
747 /// shallow-copy, pared-down version of FunctionNode, for special purpose only
748 template<typename T, std::size_t NDIM>
749 struct ShallowNode {
753 double dnorm=-1.0;
756 : _coeffs(node.coeff()), _has_children(node.has_children()),
757 dnorm(node.get_dnorm()) {}
759 : _coeffs(node.coeff()), _has_children(node._has_children),
760 dnorm(node.dnorm) {}
761
762 const coeffT& coeff() const {return _coeffs;}
763 coeffT& coeff() {return _coeffs;}
764 bool has_children() const {return _has_children;}
765 bool is_leaf() const {return not _has_children;}
766 template <typename Archive>
767 void serialize(Archive& ar) {
768 ar & coeff() & _has_children & dnorm;
769 }
770 };
771
772
773 /// a class to track where relevant (parent) coeffs are
774
775 /// E.g. if a 6D function is composed of two 3D functions their coefficients must be tracked.
776 /// We might need coeffs from a box that does not exist, and to avoid searching for
777 /// parents we track which are their required respective boxes.
778 /// - CoeffTracker will refer either to a requested key, if it exists, or to its
779 /// outermost parent.
780 /// - Children must be made in sequential order to be able to track correctly.
781 ///
782 /// Usage: 1. make the child of a given CoeffTracker.
783 /// If the parent CoeffTracker refers to a leaf node (flag is_leaf)
784 /// the child will refer to the same node. Otherwise it will refer
785 /// to the child node.
786 /// 2. retrieve its coefficients (possible communication/ returns a Future).
787 /// Member variable key always refers to an existing node,
788 /// so we can fetch it. Once we have the node we can determine
789 /// if it has children which allows us to make a child (see 1. )
790 template<typename T, size_t NDIM>
792
796 typedef std::pair<Key<NDIM>,ShallowNode<T,NDIM> > datumT;
798
799 /// the funcimpl that has the coeffs
800 const implT* impl;
801 /// the current key, which must exists in impl
803 /// flag if key is a leaf node
805 /// the coefficients belonging to key
807 /// norm of d coefficients corresponding to key
808 double dnorm_=-1.0;
809
810 public:
811
812 /// default ctor
814
815 /// the initial ctor making the root key
817 if (impl) key_=impl->get_cdata().key0;
818 }
819
820 /// ctor with a pair<keyT,nodeT>
821 explicit CoeffTracker(const CoeffTracker& other, const datumT& datum)
822 : impl(other.impl), key_(other.key_), coeff_(datum.second.coeff()),
823 dnorm_(datum.second.dnorm) {
824 if (datum.second.is_leaf()) is_leaf_=yes;
825 else is_leaf_=no;
826 }
827
828 /// copy ctor
829 CoeffTracker(const CoeffTracker& other) : impl(other.impl), key_(other.key_),
830 is_leaf_(other.is_leaf_), coeff_(other.coeff_), dnorm_(other.dnorm_) {};
831
832 /// const reference to impl
833 const implT* get_impl() const {return impl;}
834
835 /// const reference to the coeffs
836 const coeffT& coeff() const {return coeff_;}
837
838 /// const reference to the key
839 const keyT& key() const {return key_;}
840
841 /// return the coefficients belonging to the passed-in key
842
843 /// if key equals tracked key just return the coeffs, otherwise
844 /// make the child coefficients.
845 /// @param[in] key return coeffs corresponding to this key
846 /// @return coefficients belonging to key
854
855 /// return the s and dnorm belonging to the passed-in key
856 double dnorm(const keyT& key) const {
857 if (key==key_) return dnorm_;
858 MADNESS_ASSERT(key.is_child_of(key_));
859 return 0.0;
860 }
861
862 /// const reference to is_leaf flag
863 const LeafStatus& is_leaf() const {return is_leaf_;}
864
865 /// make a child of this, ignoring the coeffs
866 CoeffTracker make_child(const keyT& child) const {
867
868 // fast return
869 if ((not impl) or impl->is_on_demand()) return CoeffTracker(*this);
870
871 // can't make a child without knowing if this is a leaf -- activate first
873
874 CoeffTracker result;
875 if (impl) {
876 result.impl=impl;
877 if (is_leaf_==yes) result.key_=key_;
878 if (is_leaf_==no) {
879 result.key_=child;
880 // check if child is direct descendent of this, but root node is special case
881 if (child.level()>0) MADNESS_ASSERT(result.key().level()==key().level()+1);
882 }
883 result.is_leaf_=unknown;
884 }
885 return result;
886 }
887
888 /// find the coefficients
889
890 /// this involves communication to a remote node
891 /// @return a Future<CoeffTracker> with the coefficients that key refers to
893
894 // fast return
895 if (not impl) return Future<CoeffTracker>(CoeffTracker());
897
898 // this will return a <keyT,nodeT> from a remote node
901
902 // construct a new CoeffTracker locally
903 return impl->world.taskq.add(*const_cast<CoeffTracker*> (this),
904 &CoeffTracker::forward_ctor,*this,datum1);
905 }
906
907 private:
908 /// taskq-compatible forwarding to the ctor
909 CoeffTracker forward_ctor(const CoeffTracker& other, const datumT& datum) const {
910 return CoeffTracker(other,datum);
911 }
912
913 public:
914 /// serialization
915 template <typename Archive> void serialize(const Archive& ar) {
916 int il=int(is_leaf_);
917 ar & impl & key_ & il & coeff_ & dnorm_;
919 }
920 };
921
922 template<typename T, std::size_t NDIM>
923 std::ostream&
924 operator<<(std::ostream& s, const CoeffTracker<T,NDIM>& ct) {
925 s << ct.key() << ct.is_leaf() << " " << ct.get_impl();
926 return s;
927 }
928
929 /// FunctionImpl holds all Function state to facilitate shallow copy semantics
930
931 /// Since Function assignment and copy constructors are shallow it
932 /// greatly simplifies maintaining consistent state to have all
933 /// (permanent) state encapsulated in a single class. The state
934 /// is shared between instances using a shared_ptr<FunctionImpl>.
935 ///
936 /// The FunctionImpl inherits all of the functionality of WorldContainer
937 /// (to store the coefficients) and WorldObject<WorldContainer> (used
938 /// for RMI and for its unqiue id).
939 ///
940 /// The class methods are public to avoid painful multiple friend template
941 /// declarations for Function and FunctionImpl ... but this trust should not be
942 /// abused ... NOTHING except FunctionImpl methods should mess with FunctionImplData.
943 /// The LB stuff might have to be an exception.
944 template <typename T, std::size_t NDIM>
945 class FunctionImpl : public WorldObject< FunctionImpl<T,NDIM> > {
946 private:
947 typedef WorldObject< FunctionImpl<T,NDIM> > woT; ///< Base class world object type
948 public:
949 typedef T typeT;
950 typedef FunctionImpl<T,NDIM> implT; ///< Type of this class (implementation)
951 typedef std::shared_ptr< FunctionImpl<T,NDIM> > pimplT; ///< pointer to this class
952 typedef Tensor<T> tensorT; ///< Type of tensor for anything but to hold coeffs
953 typedef Vector<Translation,NDIM> tranT; ///< Type of array holding translation
954 typedef Key<NDIM> keyT; ///< Type of key
955 typedef FunctionNode<T,NDIM> nodeT; ///< Type of node
956 typedef GenTensor<T> coeffT; ///< Type of tensor used to hold coeffs
957 typedef WorldContainer<keyT,nodeT> dcT; ///< Type of container holding the coefficients
958 typedef std::pair<const keyT,nodeT> datumT; ///< Type of entry in container
959 typedef Vector<double,NDIM> coordT; ///< Type of vector holding coordinates
960
961 //template <typename Q, int D> friend class Function;
962 template <typename Q, std::size_t D> friend class FunctionImpl;
963
965
966 /// getter
969 const std::vector<Vector<double,NDIM> >& get_special_points()const{return special_points;}
970
971 private:
972 int k; ///< Wavelet order
973 double thresh; ///< Screening threshold
974 int initial_level; ///< Initial level for refinement
975 int special_level; ///< Minimium level for refinement on special points
976 std::vector<Vector<double,NDIM> > special_points; ///< special points for further refinement (needed for composite functions or multiplication)
977 int max_refine_level; ///< Do not refine below this level
978 int truncate_mode; ///< 0=default=(|d|<thresh), 1=(|d|<thresh/2^n), 2=(|d|<thresh/4^n);
979 bool autorefine; ///< If true, autorefine where appropriate
980 bool truncate_on_project; ///< If true projection inserts at level n-1 not n
981 TensorArgs targs; ///< type of tensor to be used in the FunctionNodes
982
984
985 std::shared_ptr< FunctionFunctorInterface<T,NDIM> > functor;
987
988 dcT coeffs; ///< The coefficients
989
990 // Disable the default copy constructor
992
993 public:
1002
1003 /// Initialize function impl from data in factory
1005 : WorldObject<implT>(factory._world)
1006 , world(factory._world)
1007 , k(factory._k)
1008 , thresh(factory._thresh)
1009 , initial_level(factory._initial_level)
1010 , special_level(factory._special_level)
1011 , special_points(factory._special_points)
1012 , max_refine_level(factory._max_refine_level)
1013 , truncate_mode(factory._truncate_mode)
1014 , autorefine(factory._autorefine)
1015 , truncate_on_project(factory._truncate_on_project)
1016// , nonstandard(false)
1017 , targs(factory._thresh,FunctionDefaults<NDIM>::get_tensor_type())
1018 , cdata(FunctionCommonData<T,NDIM>::get(k))
1019 , functor(factory.get_functor())
1020// , on_demand(factory._is_on_demand)
1021// , compressed(factory._compressed)
1022// , redundant(false)
1023 , tree_state(factory._tree_state)
1024 , coeffs(world,factory._pmap,false)
1025 //, bc(factory._bc)
1026 {
1027 // PROFILE_MEMBER_FUNC(FunctionImpl); // No need to profile this
1028 // !!! Ensure that all local state is correctly formed
1029 // before invoking process_pending for the coeffs and
1030 // for this. Otherwise, there is a race condition.
1031 MADNESS_ASSERT(k>0 && k<=MAXK);
1032
1033 bool empty = (factory._empty or is_on_demand());
1034 bool do_refine = factory._refine;
1035
1036 if (do_refine)
1037 initial_level = std::max(0,initial_level - 1);
1038
1039 if (empty) { // Do not set any coefficients at all
1040 // additional functors are only evaluated on-demand
1041 } else if (functor) { // Project function and optionally refine
1043 // set the union of the special points of functor and the ones explicitly given to FunctionFactory
1044 std::vector<coordT> functor_special_points=functor->special_points();
1045 if (!functor_special_points.empty()) special_points.insert(special_points.end(), functor_special_points.begin(), functor_special_points.end());
1046 // near special points refine as deeply as requested by the factory AND the functor
1047 special_level = std::max(special_level, functor->special_level());
1048
1049 typename dcT::const_iterator end = coeffs.end();
1050 for (typename dcT::const_iterator it=coeffs.begin(); it!=end; ++it) {
1051 if (it->second.is_leaf())
1052 woT::task(coeffs.owner(it->first), &implT::project_refine_op, it->first, do_refine,
1054 }
1055 }
1056 else { // Set as if a zero function
1057 initial_level = 1;
1059 }
1060
1062 this->process_pending();
1063 if (factory._fence && (functor || !empty)) world.gop.fence();
1064 }
1065
1066 /// Copy constructor
1067
1068 /// Allocates a \em new function in preparation for a deep copy
1069 ///
1070 /// By default takes pmap from other but can also specify a different pmap.
1071 /// Does \em not copy the coefficients ... creates an empty container.
1072 template <typename Q>
1074 const std::shared_ptr< WorldDCPmapInterface< Key<NDIM> > >& pmap,
1075 bool dozero)
1076 : WorldObject<implT>(other.world)
1077 , world(other.world)
1078 , k(other.k)
1079 , thresh(other.thresh)
1085 , autorefine(other.autorefine)
1087 , targs(other.targs)
1088 , cdata(FunctionCommonData<T,NDIM>::get(k))
1089 , functor()
1090 , tree_state(other.tree_state)
1091 , coeffs(world, pmap ? pmap : other.coeffs.get_pmap())
1092 {
1093 if (dozero) {
1094 initial_level = 1;
1096 //world.gop.fence(); <<<<<<<<<<<<<<<<<<<<<< needs a fence argument
1097 }
1099 this->process_pending();
1100 }
1101
1102 virtual ~FunctionImpl() { }
1103
1104 const std::shared_ptr< WorldDCPmapInterface< Key<NDIM> > >& get_pmap() const;
1105
1106 void replicate(bool fence=true) {
1107 coeffs.replicate(fence);
1108 }
1109
1110 void distribute(std::shared_ptr< WorldDCPmapInterface< Key<NDIM> > > newmap) const {
1111 auto currentmap=coeffs.get_pmap();
1112 currentmap->redistribute(world,newmap);
1113 }
1114
1115
1116 /// Copy coeffs from other into self
1117 template <typename Q>
1118 void copy_coeffs(const FunctionImpl<Q,NDIM>& other, bool fence) {
1121 it!=end; ++it) {
1122 const keyT& key = it->first;
1123 const typename FunctionImpl<Q,NDIM>::nodeT& node = it->second;
1124 coeffs.replace(key,node. template convert<T>());
1125 }
1126 if (fence)
1127 world.gop.fence();
1128 }
1129
1130 /// perform inplace gaxpy: this = alpha*this + beta*other
1131 /// @param[in] alpha prefactor for this
1132 /// @param[in] beta prefactor for other
1133 /// @param[in] g the other function, reconstructed
1134 /// @return *this = alpha*this + beta*other, in either reconstructed or redundant_after_merge state
1135 template<typename Q, typename R>
1136 void gaxpy_inplace_reconstructed(const T& alpha, const FunctionImpl<Q,NDIM>& g, const R& beta, const bool fence) {
1137 // merge g's tree into this' tree
1138 this->merge_trees(beta,g,alpha,fence);
1139 // tree is now redundant_after_merge
1140 // sum down the sum coeffs into the leafs if possible to keep the state most clean
1141 if (fence) sum_down(fence);
1142 }
1143
1144 /// merge the trees of this and other, while multiplying them with the alpha or beta, resp
1145
1146 /// first step in an inplace gaxpy operation for reconstructed functions; assuming the same
1147 /// distribution for this and other
1148
1149 /// on output, *this = alpha* *this + beta * other
1150 /// @param[in] alpha prefactor for this
1151 /// @param[in] beta prefactor for other
1152 /// @param[in] other the other function, reconstructed
1153 template<typename Q, typename R>
1154 void merge_trees(const T alpha, const FunctionImpl<Q,NDIM>& other, const R beta, const bool fence=true) {
1155 MADNESS_ASSERT(get_pmap() == other.get_pmap());
1158 }
1159
1160 /// merge the trees of this and other, while multiplying them with the alpha or beta, resp
1161
1162 /// result and rhs do not have to have the same distribution or live in the same world
1163 /// result+=alpha* this
1164 /// @param[in] alpha prefactor for this
1165 template<typename Q, typename R>
1166 void accumulate_trees(FunctionImpl<Q,NDIM>& result, const R alpha, const bool fence=true) const {
1168 }
1169
1170 /// perform: this= alpha*f + beta*g, invoked by result
1171
1172 /// f and g are reconstructed, so we can save on the compress operation,
1173 /// walk down the joint tree, and add leaf coefficients; effectively refines
1174 /// to common finest level.
1175
1176 /// nothing returned, but leaves this's tree reconstructed and as sum of f and g
1177 /// @param[in] alpha prefactor for f
1178 /// @param[in] f first addend
1179 /// @param[in] beta prefactor for g
1180 /// @param[in] g second addend
1181 void gaxpy_oop_reconstructed(const double alpha, const implT& f,
1182 const double beta, const implT& g, const bool fence);
1183
1184 /// functor for the gaxpy_inplace method
1185 template <typename Q, typename R>
1188 FunctionImpl<T,NDIM>* f; ///< prefactor for current function impl
1189 T alpha; ///< the current function impl
1190 R beta; ///< prefactor for other function impl
1191 do_gaxpy_inplace() = default;
1193 bool operator()(typename rangeT::iterator& it) const {
1194 const keyT& key = it->first;
1195 const FunctionNode<Q,NDIM>& other_node = it->second;
1196 // Use send to get write accessor and automated construction if missing
1197 f->coeffs.send(key, &nodeT:: template gaxpy_inplace<Q,R>, alpha, other_node, beta);
1198 return true;
1199 }
1200 template <typename Archive>
1201 void serialize(Archive& ar) {
1202 ar & f & alpha & beta;
1203 }
1204 };
1205
1206 /// Inplace general bilinear operation
1207 /// @param[in] alpha prefactor for the current function impl
1208 /// @param[in] other the other function impl
1209 /// @param[in] beta prefactor for other
1210 template <typename Q, typename R>
1211 void gaxpy_inplace(const T& alpha,const FunctionImpl<Q,NDIM>& other, const R& beta, bool fence) {
1212// MADNESS_ASSERT(get_pmap() == other.get_pmap());
1213 if (alpha != T(1.0)) scale_inplace(alpha,false);
1215 typedef do_gaxpy_inplace<Q,R> opT;
1216 other.world.taskq. template for_each<rangeT,opT>(rangeT(other.coeffs.begin(), other.coeffs.end()), opT(this, T(1.0), beta));
1217 if (fence)
1218 other.world.gop.fence();
1219 }
1220
1221 // loads a function impl from persistence
1222 // @param[in] ar the archive where the function impl is stored
1223 template <typename Archive>
1224 void load(Archive& ar) {
1225 // WE RELY ON K BEING STORED FIRST
1226 int kk = 0;
1227 ar & kk;
1228
1229 MADNESS_ASSERT(kk==k);
1230
1231 // note that functor should not be (re)stored
1233 & autorefine & truncate_on_project & tree_state;//nonstandard & compressed ; //& bc;
1234
1235 ar & coeffs;
1236 world.gop.fence();
1237 }
1238
1239 // saves a function impl to persistence
1240 // @param[in] ar the archive where the function impl is to be stored
1241 template <typename Archive>
1242 void store(Archive& ar) {
1243 // WE RELY ON K BEING STORED FIRST
1244
1245 // note that functor should not be (re)stored
1247 & autorefine & truncate_on_project & tree_state;//nonstandard & compressed ; //& bc;
1248
1249 ar & coeffs;
1250 world.gop.fence();
1251 }
1252
1253 /// Returns true if the function is compressed.
1254 bool is_compressed() const;
1255
1256 /// Returns true if the function is compressed.
1257 bool is_reconstructed() const;
1258
1259 /// Returns true if the function is redundant.
1260 bool is_redundant() const;
1261
1262 bool is_nonstandard() const;
1263
1264 bool is_nonstandard_with_leaves() const;
1265
1266 bool is_on_demand() const;
1267
1268 bool has_leaves() const;
1269
1270 void set_tree_state(const TreeState& state) {
1271 tree_state=state;
1272 }
1273
1275
1276 void set_functor(const std::shared_ptr<FunctionFunctorInterface<T,NDIM> > functor1);
1277
1278 std::shared_ptr<FunctionFunctorInterface<T,NDIM> > get_functor();
1279
1280 std::shared_ptr<FunctionFunctorInterface<T,NDIM> > get_functor() const;
1281
1282 void unset_functor();
1283
1284
1286
1288 void set_tensor_args(const TensorArgs& t);
1289
1290 double get_thresh() const;
1291
1292 void set_thresh(double value);
1293
1294 bool get_autorefine() const;
1295
1296 void set_autorefine(bool value);
1297
1298 int get_k() const;
1299
1300 const dcT& get_coeffs() const;
1301
1302 dcT& get_coeffs();
1303
1305
1306 void accumulate_timer(const double time) const; // !!!!!!!!!!!! REDUNDANT !!!!!!!!!!!!!!!
1307
1308 void print_timer() const;
1309
1310 void reset_timer();
1311
1312 /// Adds a constant to the function. Local operation, optional fence
1313
1314 /// In scaling function basis must add value to first polyn in
1315 /// each box with appropriate scaling for level. In wavelet basis
1316 /// need only add at level zero.
1317 /// @param[in] t the scalar to be added
1318 void add_scalar_inplace(T t, bool fence);
1319
1320 /// Initialize nodes to zero function at initial_level of refinement.
1321
1322 /// Works for either basis. No communication.
1323 void insert_zero_down_to_initial_level(const keyT& key);
1324
1325 /// Truncate according to the threshold with optional global fence
1326
1327 /// If thresh<=0 the default value of this->thresh is used
1328 /// @param[in] tol the truncation tolerance
1329 void truncate(double tol, bool fence);
1330
1331 /// Returns true if after truncation this node has coefficients
1332
1333 /// Assumed to be invoked on process owning key. Possible non-blocking
1334 /// communication.
1335 /// @param[in] key the key of the current function node
1336 Future<bool> truncate_spawn(const keyT& key, double tol);
1337
1338 /// Actually do the truncate operation
1339 /// @param[in] key the key to the current function node being evaluated for truncation
1340 /// @param[in] tol the tolerance for thresholding
1341 /// @param[in] v vector of Future<bool>'s that specify whether the current nodes children have coeffs
1342 bool truncate_op(const keyT& key, double tol, const std::vector< Future<bool> >& v);
1343
1344 /// Evaluate function at quadrature points in the specified box
1345
1346 /// @param[in] key the key indicating where the quadrature points are located
1347 /// @param[in] f the interface to the elementary function
1348 /// @param[in] qx quadrature points on a level=0 box
1349 /// @param[out] fval values
1350 void fcube(const keyT& key, const FunctionFunctorInterface<T,NDIM>& f, const Tensor<double>& qx, tensorT& fval) const;
1351
1352 /// Evaluate function at quadrature points in the specified box
1353
1354 /// @param[in] key the key indicating where the quadrature points are located
1355 /// @param[in] f the interface to the elementary function
1356 /// @param[in] qx quadrature points on a level=0 box
1357 /// @param[out] fval values
1358 void fcube(const keyT& key, T (*f)(const coordT&), const Tensor<double>& qx, tensorT& fval) const;
1359
1360 /// Returns cdata.key0
1361 const keyT& key0() const;
1362
1363 /// Prints the coeffs tree of the current function impl
1364 /// @param[in] maxlevel the maximum level of the tree for printing
1365 /// @param[out] os the ostream to where the output is sent
1366 void print_tree(std::ostream& os = std::cout, Level maxlevel = 10000) const;
1367
1368 /// Functor for the do_print_tree method
1369 void do_print_tree(const keyT& key, std::ostream& os, Level maxlevel) const;
1370
1371 /// Prints the coeffs tree of the current function impl (using GraphViz)
1372 /// @param[in] maxlevel the maximum level of the tree for printing
1373 /// @param[out] os the ostream to where the output is sent
1374 void print_tree_graphviz(std::ostream& os = std::cout, Level maxlevel = 10000) const;
1375
1376 /// Functor for the do_print_tree method (using GraphViz)
1377 void do_print_tree_graphviz(const keyT& key, std::ostream& os, Level maxlevel) const;
1378
1379 /// Same as print_tree() but in JSON format
1380 /// @param[out] os the ostream to where the output is sent
1381 /// @param[in] maxlevel the maximum level of the tree for printing
1382 void print_tree_json(std::ostream& os = std::cout, Level maxlevel = 10000) const;
1383
1384 /// Functor for the do_print_tree_json method
1385 void do_print_tree_json(const keyT& key, std::multimap<Level, std::tuple<tranT, std::string>>& data, Level maxlevel) const;
1386
1387 /// convert a number [0,limit] to a hue color code [blue,red],
1388 /// or, if log is set, a number [1.e-10,limit]
1390 double limit;
1391 bool log;
1392 static double lower() {return 1.e-10;};
1394 do_convert_to_color(const double limit, const bool log) : limit(limit), log(log) {}
1395 double operator()(double val) const {
1396 double color=0.0;
1397
1398 if (log) {
1399 double val2=log10(val) - log10(lower()); // will yield >0.0
1400 double upper=log10(limit) -log10(lower());
1401 val2=0.7-(0.7/upper)*val2;
1402 color= std::max(0.0,val2);
1403 color= std::min(0.7,color);
1404 } else {
1405 double hue=0.7-(0.7/limit)*(val);
1406 color= std::max(0.0,hue);
1407 }
1408 return color;
1409 }
1410 };
1411
1412
1413 /// Print a plane ("xy", "xz", or "yz") containing the point x to file
1414
1415 /// works for all dimensions; we walk through the tree, and if a leaf node
1416 /// inside the sub-cell touches the plane we print it in pstricks format
1417 void print_plane(const std::string filename, const int xaxis, const int yaxis, const coordT& el2);
1418
1419 /// collect the data for a plot of the MRA structure locally on each node
1420
1421 /// @param[in] xaxis the x-axis in the plot (can be any axis of the MRA box)
1422 /// @param[in] yaxis the y-axis in the plot (can be any axis of the MRA box)
1423 /// @param[in] el2 needs a description
1424 /// \todo Provide a description for el2
1425 Tensor<double> print_plane_local(const int xaxis, const int yaxis, const coordT& el2);
1426
1427 /// Functor for the print_plane method
1428 /// @param[in] filename the filename for the output
1429 /// @param[in] plotinfo plotting parameters
1430 /// @param[in] xaxis the x-axis in the plot (can be any axis of the MRA box)
1431 /// @param[in] yaxis the y-axis in the plot (can be any axis of the MRA box)
1432 void do_print_plane(const std::string filename, std::vector<Tensor<double> > plotinfo,
1433 const int xaxis, const int yaxis, const coordT el2);
1434
1435 /// print the grid (the roots of the quadrature of each leaf box)
1436 /// of this function in user xyz coordinates
1437 /// @param[in] filename the filename for the output
1438 void print_grid(const std::string filename) const;
1439
1440 /// return the keys of the local leaf boxes
1441 std::vector<keyT> local_leaf_keys() const;
1442
1443 /// print the grid in xyz format
1444
1445 /// the quadrature points and the key information will be written to file,
1446 /// @param[in] filename where the quadrature points will be written to
1447 /// @param[in] keys all leaf keys
1448 void do_print_grid(const std::string filename, const std::vector<keyT>& keys) const;
1449
1450 /// read data from a grid
1451
1452 /// @param[in] keyfile file with keys and grid points for each key
1453 /// @param[in] gridfile file with grid points, w/o key, but with same ordering
1454 /// @param[in] vnuc_functor subtract the values of this functor if regularization is needed
1455 template<size_t FDIM>
1456 typename std::enable_if<NDIM==FDIM>::type
1457 read_grid(const std::string keyfile, const std::string gridfile,
1458 std::shared_ptr< FunctionFunctorInterface<double,NDIM> > vnuc_functor) {
1459
1460 std::ifstream kfile(keyfile.c_str());
1461 std::ifstream gfile(gridfile.c_str());
1462 std::string line;
1463
1464 long ndata,ndata1;
1465 if (not (std::getline(kfile,line))) MADNESS_EXCEPTION("failed reading 1st line of key data",0);
1466 if (not (std::istringstream(line) >> ndata)) MADNESS_EXCEPTION("failed reading k",0);
1467 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 1st line of grid data",0);
1468 if (not (std::istringstream(line) >> ndata1)) MADNESS_EXCEPTION("failed reading k",0);
1469 MADNESS_CHECK(ndata==ndata1);
1470 if (not (std::getline(kfile,line))) MADNESS_EXCEPTION("failed reading 2nd line of key data",0);
1471 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 2nd line of grid data",0);
1472
1473 // the quadrature points in simulation coordinates of the root node
1474 const Tensor<double> qx=cdata.quad_x;
1475 const size_t npt = qx.dim(0);
1476
1477 // the number of coordinates (grid point tuples) per box ({x1},{x2},{x3},..,{xNDIM})
1478 long npoints=power<NDIM>(npt);
1479 // the number of boxes
1480 long nboxes=ndata/npoints;
1481 MADNESS_ASSERT(nboxes*npoints==ndata);
1482 print("reading ",nboxes,"boxes from file",gridfile,keyfile);
1483
1484 // these will be the data
1485 Tensor<T> values(cdata.vk,false);
1486
1487 int ii=0;
1488 std::string gline,kline;
1489 // while (1) {
1490 while (std::getline(kfile,kline)) {
1491
1492 double x,y,z,x1,y1,z1,val;
1493
1494 // get the key
1495 long nn;
1496 Translation l1,l2,l3;
1497 // line looks like: # key: n l1 l2 l3
1498 kline.erase(0,7);
1499 std::stringstream(kline) >> nn >> l1 >> l2 >> l3;
1500 // kfile >> s >> nn >> l1 >> l2 >> l3;
1501 const Vector<Translation,3> ll{ l1,l2,l3 };
1502 Key<3> key(nn,ll);
1503
1504 // this is borrowed from fcube
1505 const Vector<Translation,3>& l = key.translation();
1506 const Level n = key.level();
1507 const double h = std::pow(0.5,double(n));
1508 coordT c; // will hold the point in user coordinates
1511
1512
1513 if (NDIM == 3) {
1514 for (size_t i=0; i<npt; ++i) {
1515 c[0] = cell(0,0) + h*cell_width[0]*(l[0] + qx(i)); // x
1516 for (size_t j=0; j<npt; ++j) {
1517 c[1] = cell(1,0) + h*cell_width[1]*(l[1] + qx(j)); // y
1518 for (size_t k=0; k<npt; ++k) {
1519 c[2] = cell(2,0) + h*cell_width[2]*(l[2] + qx(k)); // z
1520 // fprintf(pFile,"%18.12f %18.12f %18.12f\n",c[0],c[1],c[2]);
1521 auto& success1 = std::getline(gfile,gline); MADNESS_CHECK(success1);
1522 auto& success2 = std::getline(kfile,kline); MADNESS_CHECK(success2);
1523 std::istringstream(gline) >> x >> y >> z >> val;
1524 std::istringstream(kline) >> x1 >> y1 >> z1;
1525 MADNESS_CHECK(std::fabs(x-c[0])<1.e-4);
1526 MADNESS_CHECK(std::fabs(x1-c[0])<1.e-4);
1527 MADNESS_CHECK(std::fabs(y-c[1])<1.e-4);
1528 MADNESS_CHECK(std::fabs(y1-c[1])<1.e-4);
1529 MADNESS_CHECK(std::fabs(z-c[2])<1.e-4);
1530 MADNESS_CHECK(std::fabs(z1-c[2])<1.e-4);
1531
1532 // regularize if a functor is given
1533 if (vnuc_functor) val-=(*vnuc_functor)(c);
1534 values(i,j,k)=val;
1535 }
1536 }
1537 }
1538 } else {
1539 MADNESS_EXCEPTION("only NDIM=3 in print_grid",0);
1540 }
1541
1542 // insert the new leaf node
1543 const bool has_children=false;
1544 coeffT coeff=coeffT(this->values2coeffs(key,values),targs);
1545 nodeT node(coeff,has_children);
1546 coeffs.replace(key,node);
1548 ii++;
1549 }
1550
1551 kfile.close();
1552 gfile.close();
1553 MADNESS_CHECK(ii==nboxes);
1554
1555 }
1556
1557
1558 /// read data from a grid
1559
1560 /// @param[in] gridfile file with keys and grid points and values for each key
1561 /// @param[in] vnuc_functor subtract the values of this functor if regularization is needed
1562 template<size_t FDIM>
1563 typename std::enable_if<NDIM==FDIM>::type
1564 read_grid2(const std::string gridfile,
1565 std::shared_ptr< FunctionFunctorInterface<double,NDIM> > vnuc_functor) {
1566
1567 std::ifstream gfile(gridfile.c_str());
1568 std::string line;
1569
1570 long ndata;
1571 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 1st line of grid data",0);
1572 if (not (std::istringstream(line) >> ndata)) MADNESS_EXCEPTION("failed reading k",0);
1573 if (not (std::getline(gfile,line))) MADNESS_EXCEPTION("failed reading 2nd line of grid data",0);
1574
1575 // the quadrature points in simulation coordinates of the root node
1576 const Tensor<double> qx=cdata.quad_x;
1577 const size_t npt = qx.dim(0);
1578
1579 // the number of coordinates (grid point tuples) per box ({x1},{x2},{x3},..,{xNDIM})
1580 long npoints=power<NDIM>(npt);
1581 // the number of boxes
1582 long nboxes=ndata/npoints;
1583 MADNESS_CHECK(nboxes*npoints==ndata);
1584 print("reading ",nboxes,"boxes from file",gridfile);
1585
1586 // these will be the data
1587 Tensor<T> values(cdata.vk,false);
1588
1589 int ii=0;
1590 std::string gline;
1591 // while (1) {
1592 while (std::getline(gfile,gline)) {
1593
1594 double x1,y1,z1,val;
1595
1596 // get the key
1597 long nn;
1598 Translation l1,l2,l3;
1599 // line looks like: # key: n l1 l2 l3
1600 gline.erase(0,7);
1601 std::stringstream(gline) >> nn >> l1 >> l2 >> l3;
1602 const Vector<Translation,3> ll{ l1,l2,l3 };
1603 Key<3> key(nn,ll);
1604
1605 // this is borrowed from fcube
1606 const Vector<Translation,3>& l = key.translation();
1607 const Level n = key.level();
1608 const double h = std::pow(0.5,double(n));
1609 coordT c; // will hold the point in user coordinates
1612
1613
1614 if (NDIM == 3) {
1615 for (int i=0; i<npt; ++i) {
1616 c[0] = cell(0,0) + h*cell_width[0]*(l[0] + qx(i)); // x
1617 for (int j=0; j<npt; ++j) {
1618 c[1] = cell(1,0) + h*cell_width[1]*(l[1] + qx(j)); // y
1619 for (int k=0; k<npt; ++k) {
1620 c[2] = cell(2,0) + h*cell_width[2]*(l[2] + qx(k)); // z
1621
1622 auto& success = std::getline(gfile,gline);
1623 MADNESS_CHECK(success);
1624 std::istringstream(gline) >> x1 >> y1 >> z1 >> val;
1625 MADNESS_CHECK(std::fabs(x1-c[0])<1.e-4);
1626 MADNESS_CHECK(std::fabs(y1-c[1])<1.e-4);
1627 MADNESS_CHECK(std::fabs(z1-c[2])<1.e-4);
1628
1629 // regularize if a functor is given
1630 if (vnuc_functor) val-=(*vnuc_functor)(c);
1631 values(i,j,k)=val;
1632 }
1633 }
1634 }
1635 } else {
1636 MADNESS_EXCEPTION("only NDIM=3 in print_grid",0);
1637 }
1638
1639 // insert the new leaf node
1640 const bool has_children=false;
1641 coeffT coeff=coeffT(this->values2coeffs(key,values),targs);
1642 nodeT node(coeff,has_children);
1643 coeffs.replace(key,node);
1644 const_cast<dcT&>(coeffs).send(key.parent(),
1646 coeffs, key.parent());
1647 ii++;
1648 }
1649
1650 gfile.close();
1651 MADNESS_CHECK(ii==nboxes);
1652
1653 }
1654
1655
1656 /// Compute by projection the scaling function coeffs in specified box
1657 /// @param[in] key the key to the current function node (box)
1658 tensorT project(const keyT& key) const;
1659
1660 /// Returns the truncation threshold according to truncate_method
1661
1662 /// here is our handwaving argument:
1663 /// this threshold will give each FunctionNode an error of less than tol. The
1664 /// total error can then be as high as sqrt(#nodes) * tol. Therefore in order
1665 /// to account for higher dimensions: divide tol by about the root of number
1666 /// of siblings (2^NDIM) that have a large error when we refine along a deep
1667 /// branch of the tree.
1668 double truncate_tol(double tol, const keyT& key) const;
1669
1670
1671 /// Returns patch referring to coeffs of child in parent box
1672 /// @param[in] child the key to the child function node (box)
1673 std::vector<Slice> child_patch(const keyT& child) const;
1674
1675 /// Projection with optional refinement w/ special points
1676 /// @param[in] key the key to the current function node (box)
1677 /// @param[in] do_refine should we continue refinement?
1678 /// @param[in] specialpts vector of special points in the function where we need
1679 /// to refine at a much finer level
1680 void project_refine_op(const keyT& key, bool do_refine,
1681 const std::vector<Vector<double,NDIM> >& specialpts);
1682
1683 /// Compute the Legendre scaling functions for multiplication
1684
1685 /// Evaluate parent polyn at quadrature points of a child. The prefactor of
1686 /// 2^n/2 is included. The tensor must be preallocated as phi(k,npt).
1687 /// Refer to the implementation notes for more info.
1688 /// @todo Robert please verify this comment. I don't understand this method.
1689 /// @param[in] np level of the parent function node (box)
1690 /// @param[in] nc level of the child function node (box)
1691 /// @param[in] lp translation of the parent function node (box)
1692 /// @param[in] lc translation of the child function node (box)
1693 /// @param[out] phi tensor of the legendre scaling functions
1694 void phi_for_mul(Level np, Translation lp, Level nc, Translation lc, Tensor<double>& phi) const;
1695
1696 /// Directly project parent coeffs to child coeffs
1697
1698 /// Currently used by diff, but other uses can be anticipated
1699
1700 /// @todo is this documentation correct?
1701 /// @param[in] child the key whose coeffs we are requesting
1702 /// @param[in] parent the (leaf) key of our function
1703 /// @param[in] s the (leaf) coeffs belonging to parent
1704 /// @return coeffs
1705 const coeffT parent_to_child(const coeffT& s, const keyT& parent, const keyT& child) const;
1706
1707 /// Directly project parent NS coeffs to child NS coeffs
1708
1709 /// return the NS coefficients if parent and child are the same,
1710 /// or construct sum coeffs from the parents and "add" zero wavelet coeffs
1711 /// @param[in] child the key whose coeffs we are requesting
1712 /// @param[in] parent the (leaf) key of our function
1713 /// @param[in] coeff the (leaf) coeffs belonging to parent
1714 /// @return coeffs in NS form
1715 coeffT parent_to_child_NS(const keyT& child, const keyT& parent,
1716 const coeffT& coeff) const;
1717
1718 /// Return the values when given the coeffs in scaling function basis
1719 /// @param[in] key the key of the function node (box)
1720 /// @param[in] coeff the tensor of scaling function coefficients for function node (box)
1721 /// @return function values for function node (box)
1722 template <typename Q>
1723 GenTensor<Q> coeffs2values(const keyT& key, const GenTensor<Q>& coeff) const {
1724 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1725 double scale = pow(2.0,0.5*NDIM*key.level())/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1726 return transform(coeff,cdata.quad_phit).scale(scale);
1727 }
1728
1729 /// convert S or NS coeffs to values on a 2k grid of the children
1730
1731 /// equivalent to unfiltering the NS coeffs and then converting all child S-coeffs
1732 /// to values in their respective boxes. If only S coeffs are provided d coeffs are
1733 /// assumed to be zero. Reverse operation to values2NScoeffs().
1734 /// @param[in] key the key of the current S or NS coeffs, level n
1735 /// @param[in] coeff coeffs in S or NS form; if S then d coeffs are assumed zero
1736 /// @param[in] s_only sanity check to avoid unintended discard of d coeffs
1737 /// @return function values on the quadrature points of the children of child (!)
1738 template <typename Q>
1740 const bool s_only) const {
1741 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1742
1743 // sanity checks
1744 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) == s_only);
1745 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) or (coeff.dim(0)==2*this->get_k()));
1746
1747 // this is a block-diagonal matrix with the quadrature points on the diagonal
1748 Tensor<double> quad_phit_2k(2*cdata.k,2*cdata.npt);
1749 quad_phit_2k(cdata.s[0],cdata.s[0])=cdata.quad_phit;
1750 quad_phit_2k(cdata.s[1],cdata.s[1])=cdata.quad_phit;
1751
1752 // the transformation matrix unfilters (cdata.hg) and transforms to values in one step
1753 const Tensor<double> transf = (s_only)
1754 ? inner(cdata.hg(Slice(0,k-1),_),quad_phit_2k) // S coeffs
1755 : inner(cdata.hg,quad_phit_2k); // NS coeffs
1756
1757 // increment the level since the coeffs2values part happens on level n+1
1758 const double scale = pow(2.0,0.5*NDIM*(key.level()+1))/
1760
1761 return transform(coeff,transf).scale(scale);
1762 }
1763
1764 /// Compute the function values for multiplication
1765
1766 /// Given S or NS coefficients from a parent cell, compute the value of
1767 /// the functions at the quadrature points of a child
1768 /// currently restricted to special cases
1769 /// @param[in] child key of the box in which we compute values
1770 /// @param[in] parent key of the parent box holding the coeffs
1771 /// @param[in] coeff coeffs of the parent box
1772 /// @param[in] s_only sanity check to avoid unintended discard of d coeffs
1773 /// @return function values on the quadrature points of the children of child (!)
1774 template <typename Q>
1775 GenTensor<Q> NS_fcube_for_mul(const keyT& child, const keyT& parent,
1776 const GenTensor<Q>& coeff, const bool s_only) const {
1777 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1778
1779 // sanity checks
1780 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) == s_only);
1781 MADNESS_ASSERT((coeff.dim(0)==this->get_k()) or (coeff.dim(0)==2*this->get_k()));
1782
1783 // fast return if possible
1784 // if (child.level()==parent.level()) return NScoeffs2values(child,coeff,s_only);
1785
1786 if (s_only) {
1787
1788 Tensor<double> quad_phi[NDIM];
1789 // tmp tensor
1790 Tensor<double> phi1(cdata.k,cdata.npt);
1791
1792 for (std::size_t d=0; d<NDIM; ++d) {
1793
1794 // input is S coeffs (dimension k), output is values on 2*npt grid points
1795 quad_phi[d]=Tensor<double>(cdata.k,2*cdata.npt);
1796
1797 // for both children of "child" evaluate the Legendre polynomials
1798 // first the left child on level n+1 and translations 2l
1799 phi_for_mul(parent.level(),parent.translation()[d],
1800 child.level()+1, 2*child.translation()[d], phi1);
1801 quad_phi[d](_,Slice(0,k-1))=phi1;
1802
1803 // next the right child on level n+1 and translations 2l+1
1804 phi_for_mul(parent.level(),parent.translation()[d],
1805 child.level()+1, 2*child.translation()[d]+1, phi1);
1806 quad_phi[d](_,Slice(k,2*k-1))=phi1;
1807 }
1808
1809 const double scale = 1.0/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1810 return general_transform(coeff,quad_phi).scale(scale);
1811 }
1812 MADNESS_EXCEPTION("you should not be here in NS_fcube_for_mul",1);
1813 return GenTensor<Q>();
1814 }
1815
1816 /// convert function values of the a child generation directly to NS coeffs
1817
1818 /// equivalent to converting the function values to 2^NDIM S coeffs and then
1819 /// filtering them to NS coeffs. Reverse operation to NScoeffs2values().
1820 /// @param[in] key key of the parent of the generation
1821 /// @param[in] values tensor holding function values of the 2^NDIM children of key
1822 /// @return NS coeffs belonging to key
1823 template <typename Q>
1824 GenTensor<Q> values2NScoeffs(const keyT& key, const GenTensor<Q>& values) const {
1825 //PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1826
1827 // sanity checks
1828 MADNESS_ASSERT(values.dim(0)==2*this->get_k());
1829
1830 // this is a block-diagonal matrix with the quadrature points on the diagonal
1831 Tensor<double> quad_phit_2k(2*cdata.npt,2*cdata.k);
1832 quad_phit_2k(cdata.s[0],cdata.s[0])=cdata.quad_phiw;
1833 quad_phit_2k(cdata.s[1],cdata.s[1])=cdata.quad_phiw;
1834
1835 // the transformation matrix unfilters (cdata.hg) and transforms to values in one step
1836 const Tensor<double> transf=inner(quad_phit_2k,cdata.hgT);
1837
1838 // increment the level since the values2coeffs part happens on level n+1
1839 const double scale = pow(0.5,0.5*NDIM*(key.level()+1))
1841
1842 return transform(values,transf).scale(scale);
1843 }
1844
1845 /// Return the scaling function coeffs when given the function values at the quadrature points
1846 /// @param[in] key the key of the function node (box)
1847 /// @return function values for function node (box)
1848 template <typename Q>
1849 Tensor<Q> coeffs2values(const keyT& key, const Tensor<Q>& coeff) const {
1850 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1851 double scale = pow(2.0,0.5*NDIM*key.level())/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1852 return transform(coeff,cdata.quad_phit).scale(scale);
1853 }
1854
1855 template <typename Q>
1856 GenTensor<Q> values2coeffs(const keyT& key, const GenTensor<Q>& values) const {
1857 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1858 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1859 return transform(values,cdata.quad_phiw).scale(scale);
1860 }
1861
1862 template <typename Q>
1863 Tensor<Q> values2coeffs(const keyT& key, const Tensor<Q>& values) const {
1864 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1865 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1866 return transform(values,cdata.quad_phiw).scale(scale);
1867 }
1868
1869 /// Compute the function values for multiplication
1870
1871 /// Given coefficients from a parent cell, compute the value of
1872 /// the functions at the quadrature points of a child
1873 /// @param[in] child the key for the child function node (box)
1874 /// @param[in] parent the key for the parent function node (box)
1875 /// @param[in] coeff the coefficients of scaling function basis of the parent box
1876 template <typename Q>
1877 Tensor<Q> fcube_for_mul(const keyT& child, const keyT& parent, const Tensor<Q>& coeff) const {
1878 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1879 if (child.level() == parent.level()) {
1880 return coeffs2values(parent, coeff);
1881 }
1882 else if (child.level() < parent.level()) {
1883 MADNESS_EXCEPTION("FunctionImpl: fcube_for_mul: child-parent relationship bad?",0);
1884 }
1885 else {
1886 Tensor<double> phi[NDIM];
1887 for (std::size_t d=0; d<NDIM; ++d) {
1888 phi[d] = Tensor<double>(cdata.k,cdata.npt);
1889 phi_for_mul(parent.level(),parent.translation()[d],
1890 child.level(), child.translation()[d], phi[d]);
1891 }
1892 return general_transform(coeff,phi).scale(1.0/sqrt(FunctionDefaults<NDIM>::get_cell_volume()));;
1893 }
1894 }
1895
1896
1897 /// Compute the function values for multiplication
1898
1899 /// Given coefficients from a parent cell, compute the value of
1900 /// the functions at the quadrature points of a child
1901 /// @param[in] child the key for the child function node (box)
1902 /// @param[in] parent the key for the parent function node (box)
1903 /// @param[in] coeff the coefficients of scaling function basis of the parent box
1904 template <typename Q>
1905 GenTensor<Q> fcube_for_mul(const keyT& child, const keyT& parent, const GenTensor<Q>& coeff) const {
1906 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1907 if (child.level() == parent.level()) {
1908 return coeffs2values(parent, coeff);
1909 }
1910 else if (child.level() < parent.level()) {
1911 MADNESS_EXCEPTION("FunctionImpl: fcube_for_mul: child-parent relationship bad?",0);
1912 }
1913 else {
1914 Tensor<double> phi[NDIM];
1915 for (size_t d=0; d<NDIM; d++) {
1916 phi[d] = Tensor<double>(cdata.k,cdata.npt);
1917 phi_for_mul(parent.level(),parent.translation()[d],
1918 child.level(), child.translation()[d], phi[d]);
1919 }
1920 return general_transform(coeff,phi).scale(1.0/sqrt(FunctionDefaults<NDIM>::get_cell_volume()));
1921 }
1922 }
1923
1924
1925 /// Functor for the mul method
1926 template <typename L, typename R>
1927 void do_mul(const keyT& key, const Tensor<L>& left, const std::pair< keyT, Tensor<R> >& arg) {
1928 // PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1929 const keyT& rkey = arg.first;
1930 const Tensor<R>& rcoeff = arg.second;
1931 //madness::print("do_mul: r", rkey, rcoeff.size());
1932 Tensor<R> rcube = fcube_for_mul(key, rkey, rcoeff);
1933 //madness::print("do_mul: l", key, left.size());
1934 Tensor<L> lcube = fcube_for_mul(key, key, left);
1935
1936 Tensor<T> tcube(cdata.vk,false);
1937 TERNARY_OPTIMIZED_ITERATOR(T, tcube, L, lcube, R, rcube, *_p0 = *_p1 * *_p2;);
1938 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1939 tcube = transform(tcube,cdata.quad_phiw).scale(scale);
1940 coeffs.replace(key, nodeT(coeffT(tcube,targs),false));
1941 }
1942
1943
1944 /// multiply the values of two coefficient tensors using a custom number of grid points
1945
1946 /// note both coefficient tensors have to refer to the same key!
1947 /// @param[in] c1 a tensor holding coefficients
1948 /// @param[in] c2 another tensor holding coeffs
1949 /// @param[in] npt number of grid points (optional, default is cdata.npt)
1950 /// @return coefficient tensor holding the product of the values of c1 and c2
1951 template<typename R>
1953 const int npt, const keyT& key) const {
1954 typedef TENSOR_RESULT_TYPE(T,R) resultT;
1955
1957
1958 // construct a tensor with the npt coeffs
1959 Tensor<T> c11(cdata2.vk), c22(cdata2.vk);
1960 c11(this->cdata.s0)=c1;
1961 c22(this->cdata.s0)=c2;
1962
1963 // it's sufficient to scale once
1964 double scale = pow(2.0,0.5*NDIM*key.level())/sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1965 Tensor<T> c1value=transform(c11,cdata2.quad_phit).scale(scale);
1966 Tensor<R> c2value=transform(c22,cdata2.quad_phit);
1967 Tensor<resultT> resultvalue(cdata2.vk,false);
1968 TERNARY_OPTIMIZED_ITERATOR(resultT, resultvalue, T, c1value, R, c2value, *_p0 = *_p1 * *_p2;);
1969
1970 Tensor<resultT> result=transform(resultvalue,cdata2.quad_phiw);
1971
1972 // return a copy of the slice to have the tensor contiguous
1973 return copy(result(this->cdata.s0));
1974 }
1975
1976
1977 /// Functor for the binary_op method
1978 template <typename L, typename R, typename opT>
1979 void do_binary_op(const keyT& key, const Tensor<L>& left,
1980 const std::pair< keyT, Tensor<R> >& arg,
1981 const opT& op) {
1982 //PROFILE_MEMBER_FUNC(FunctionImpl); // Too fine grain for routine profiling
1983 const keyT& rkey = arg.first;
1984 const Tensor<R>& rcoeff = arg.second;
1985 Tensor<R> rcube = fcube_for_mul(key, rkey, rcoeff);
1986 Tensor<L> lcube = fcube_for_mul(key, key, left);
1987
1988 Tensor<T> tcube(cdata.vk,false);
1989 op(key, tcube, lcube, rcube);
1990 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
1991 tcube = transform(tcube,cdata.quad_phiw).scale(scale);
1992 coeffs.replace(key, nodeT(coeffT(tcube,targs),false));
1993 }
1994
1995 /// Invoked by result to perform result += alpha*left+beta*right in wavelet basis
1996
1997 /// Does not assume that any of result, left, right have the same distribution.
1998 /// For most purposes result will start as an empty so actually are implementing
1999 /// out of place gaxpy. If all functions have the same distribution there is
2000 /// no communication except for the optional fence.
2001 template <typename L, typename R>
2003 T beta, const FunctionImpl<R,NDIM>& right, bool fence) {
2004 // Loop over local nodes in both functions. Add in left and subtract right.
2005 // Not that efficient in terms of memory bandwidth but ensures we do
2006 // not miss any nodes.
2007 typename FunctionImpl<L,NDIM>::dcT::const_iterator left_end = left.coeffs.end();
2009 it!=left_end;
2010 ++it) {
2011 const keyT& key = it->first;
2012 const typename FunctionImpl<L,NDIM>::nodeT& other_node = it->second;
2013 coeffs.send(key, &nodeT:: template gaxpy_inplace<T,L>, 1.0, other_node, alpha);
2014 }
2015 typename FunctionImpl<R,NDIM>::dcT::const_iterator right_end = right.coeffs.end();
2017 it!=right_end;
2018 ++it) {
2019 const keyT& key = it->first;
2020 const typename FunctionImpl<L,NDIM>::nodeT& other_node = it->second;
2021 coeffs.send(key, &nodeT:: template gaxpy_inplace<T,R>, 1.0, other_node, beta);
2022 }
2023 if (fence)
2024 world.gop.fence();
2025 }
2026
2027 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2028 /// @param[in] op the unary operator for the coefficients
2029 template <typename opT>
2030 void unary_op_coeff_inplace(const opT& op, bool fence) {
2031 typename dcT::iterator end = coeffs.end();
2032 for (typename dcT::iterator it=coeffs.begin(); it!=end; ++it) {
2033 const keyT& parent = it->first;
2034 nodeT& node = it->second;
2035 if (node.has_coeff()) {
2036 // op(parent, node.coeff());
2037 TensorArgs full(-1.0,TT_FULL);
2038 change_tensor_type(node.coeff(),full);
2039 op(parent, node.coeff().full_tensor());
2041 // op(parent,node);
2042 }
2043 }
2044 if (fence)
2045 world.gop.fence();
2046 }
2047
2048 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2049 /// @param[in] op the unary operator for the coefficients
2050 template <typename opT>
2051 void unary_op_node_inplace(const opT& op, bool fence) {
2052 typename dcT::iterator end = coeffs.end();
2053 for (typename dcT::iterator it=coeffs.begin(); it!=end; ++it) {
2054 const keyT& parent = it->first;
2055 nodeT& node = it->second;
2056 op(parent, node);
2057 }
2058 if (fence)
2059 world.gop.fence();
2060 }
2061
2062 /// Integrate over one particle of a two particle function and get a one particle function
2063 /// bsp \int g(1,2) \delta(2-1) d2 = f(1)
2064 /// The overall dimension of g should be even
2065
2066 /// The operator
2067 template<std::size_t LDIM>
2068 void dirac_convolution_op(const keyT &key, const nodeT &node, FunctionImpl<T,LDIM>* f) const {
2069 // fast return if the node has children (not a leaf node)
2070 if(node.has_children()) return;
2071
2072 const implT* g=this;
2073
2074 // break the 6D key into two 3D keys (may also work for every even dimension)
2075 Key<LDIM> key1, key2;
2076 key.break_apart(key1,key2);
2077
2078 // get the coefficients of the 6D function g
2079 const coeffT& g_coeff = node.coeff();
2080
2081 // get the values of the 6D function g
2082 coeffT g_values = g->coeffs2values(key,g_coeff);
2083
2084 // Determine rank and k
2085 const long rank=g_values.rank();
2086 const long maxk=f->get_k();
2087 MADNESS_ASSERT(maxk==g_coeff.dim(0));
2088
2089 // get tensors for particle 1 and 2 (U and V in SVD)
2090 tensorT vec1=copy(g_values.get_svdtensor().ref_vector(0).reshape(rank,maxk,maxk,maxk));
2091 tensorT vec2=g_values.get_svdtensor().ref_vector(1).reshape(rank,maxk,maxk,maxk);
2092 tensorT result(maxk,maxk,maxk); // should give zero tensor
2093 // Multiply the values of each U and V vector
2094 for (long i=0; i<rank; ++i) {
2095 tensorT c1=vec1(Slice(i,i),_,_,_); // shallow copy (!)
2096 tensorT c2=vec2(Slice(i,i),_,_,_);
2097 c1.emul(c2); // this changes vec1 because of shallow copy, but not the g function because of the deep copy made above
2098 double singular_value_i = g_values.get_svdtensor().weights(i);
2099 result += (singular_value_i*c1);
2100 }
2101
2102 // accumulate coefficients (since only diagonal boxes are used the coefficients get just replaced, but accumulate is needed to create the right tree structure
2103 tensorT f_coeff = f->values2coeffs(key1,result);
2104 f->coeffs.task(key1, &FunctionNode<T,LDIM>::accumulate2, f_coeff, f->coeffs, key1, TaskAttributes::hipri());
2105// coeffs.task(dest, &nodeT::accumulate2, result, coeffs, dest, TaskAttributes::hipri());
2106
2107
2108 return;
2109 }
2110
2111
2112 template<std::size_t LDIM>
2114 typename dcT::const_iterator end = this->coeffs.end();
2115 for (typename dcT::const_iterator it=this->coeffs.begin(); it!=end; ++it) {
2116 // looping through all the leaf(!) coefficients in the NDIM function ("this")
2117 const keyT& key = it->first;
2118 const FunctionNode<T,NDIM>& node = it->second;
2119 if (node.is_leaf()) {
2120 // only process the diagonal boxes
2121 Key<LDIM> key1, key2;
2122 key.break_apart(key1,key2);
2123 if(key1 == key2){
2124 ProcessID p = coeffs.owner(key);
2125 woT::task(p, &implT:: template dirac_convolution_op<LDIM>, key, node, f);
2126 }
2127 }
2128 }
2129 world.gop.fence(); // fence is necessary if trickle down is used afterwards
2130 // trickle down and undo redundand shouldnt change anything if only the diagonal elements are considered above -> check this
2131 f->trickle_down(true); // fence must be true otherwise undo_redundant will have trouble
2132// f->undo_redundant(true);
2133 f->verify_tree();
2134 //if (fence) world.gop.fence(); // unnecessary, fence is activated in undo_redundant
2135
2136 }
2137
2138
2139 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2140 /// @param[in] op the unary operator for the coefficients
2141 template <typename opT>
2142 void flo_unary_op_node_inplace(const opT& op, bool fence) {
2144// typedef do_unary_op_value_inplace<opT> xopT;
2146 if (fence) world.gop.fence();
2147 }
2148
2149 /// Unary operation applied inplace to the coefficients WITHOUT refinement, optional fence
2150 /// @param[in] op the unary operator for the coefficients
2151 template <typename opT>
2152 void flo_unary_op_node_inplace(const opT& op, bool fence) const {
2154// typedef do_unary_op_value_inplace<opT> xopT;
2156 if (fence)
2157 world.gop.fence();
2158 }
2159
2160 /// truncate tree at a certain level
2161 /// @param[in] max_level truncate tree below this level
2162 void erase(const Level& max_level);
2163
2164 /// Returns some asymmetry measure ... no comms
2165 double check_symmetry_local() const;
2166
2167 /// given an NS tree resulting from a convolution, truncate leafs if appropriate
2170 const implT* f; // for calling its member functions
2171
2173
2174 bool operator()(typename rangeT::iterator& it) const {
2175
2176 const keyT& key = it->first;
2177 nodeT& node = it->second;
2178
2179 if (node.is_leaf() and node.coeff().has_data()) {
2180 coeffT d = copy(node.coeff());
2181 d(f->cdata.s0)=0.0;
2182 const double error=d.normf();
2183 const double tol=f->truncate_tol(f->get_thresh(),key);
2184 if (error<tol) node.coeff()=copy(node.coeff()(f->cdata.s0));
2185 }
2186 return true;
2187 }
2188 template <typename Archive> void serialize(const Archive& ar) {}
2189
2190 };
2191
2192 /// remove all coefficients of internal nodes
2195
2196 /// constructor need impl for cdata
2198
2199 bool operator()(typename rangeT::iterator& it) const {
2200
2201 nodeT& node = it->second;
2202 if (node.has_children()) node.clear_coeff();
2203 return true;
2204 }
2205 template <typename Archive> void serialize(const Archive& ar) {}
2206
2207 };
2208
2209 /// remove all coefficients of leaf nodes
2212
2213 /// constructor need impl for cdata
2215
2216 bool operator()(typename rangeT::iterator& it) const {
2217 nodeT& node = it->second;
2218 if (not node.has_children()) node.clear_coeff();
2219 return true;
2220 }
2221 template <typename Archive> void serialize(const Archive& ar) {}
2222
2223 };
2224
2225
2226 /// keep only the sum coefficients in each node
2230
2231 /// constructor need impl for cdata
2233
2234 bool operator()(typename rangeT::iterator& it) const {
2235
2236 nodeT& node = it->second;
2237 coeffT s=copy(node.coeff()(impl->cdata.s0));
2238 node.coeff()=s;
2239 return true;
2240 }
2241 template <typename Archive> void serialize(const Archive& ar) {}
2242
2243 };
2244
2245
2246 /// reduce the rank of the nodes, optional fence
2249
2250 // threshold for rank reduction / SVD truncation
2252
2253 // constructor takes target precision
2254 do_reduce_rank() = default;
2256 do_reduce_rank(const double& thresh) {
2258 }
2259
2260 //
2261 bool operator()(typename rangeT::iterator& it) const {
2262
2263 nodeT& node = it->second;
2264 node.reduceRank(args.thresh);
2265 return true;
2266 }
2267 template <typename Archive> void serialize(const Archive& ar) {}
2268 };
2269
2270
2271
2272 /// check symmetry wrt particle exchange
2275 const implT* f;
2278
2279 /// return the norm of the difference of this node and its "mirror" node
2280 double operator()(typename rangeT::iterator& it) const {
2281
2282 // Temporary fix to GCC whining about out of range access for NDIM!=6
2283 if constexpr(NDIM==6) {
2284 const keyT& key = it->first;
2285 const nodeT& fnode = it->second;
2286
2287 // skip internal nodes
2288 if (fnode.has_children()) return 0.0;
2289
2290 if (f->world.size()>1) return 0.0;
2291
2292 // exchange particles
2293 std::vector<long> map(NDIM);
2294 map[0]=3; map[1]=4; map[2]=5;
2295 map[3]=0; map[4]=1; map[5]=2;
2296
2297 // make mapped key
2299 for (std::size_t i=0; i<NDIM; ++i) l[map[i]] = key.translation()[i];
2300 const keyT mapkey(key.level(),l);
2301
2302 double norm=0.0;
2303
2304
2305 // hope it's local
2306 if (f->get_coeffs().probe(mapkey)) {
2307 MADNESS_ASSERT(f->get_coeffs().probe(mapkey));
2308 const nodeT& mapnode=f->get_coeffs().find(mapkey).get()->second;
2309
2310// bool have_c1=fnode.coeff().has_data() and fnode.coeff().config().has_data();
2311// bool have_c2=mapnode.coeff().has_data() and mapnode.coeff().config().has_data();
2312 bool have_c1=fnode.coeff().has_data();
2313 bool have_c2=mapnode.coeff().has_data();
2314
2315 if (have_c1 and have_c2) {
2316 tensorT c1=fnode.coeff().full_tensor_copy();
2317 tensorT c2=mapnode.coeff().full_tensor_copy();
2318 c2 = copy(c2.mapdim(map));
2319 norm=(c1-c2).normf();
2320 } else if (have_c1) {
2321 tensorT c1=fnode.coeff().full_tensor_copy();
2322 norm=c1.normf();
2323 } else if (have_c2) {
2324 tensorT c2=mapnode.coeff().full_tensor_copy();
2325 norm=c2.normf();
2326 } else {
2327 norm=0.0;
2328 }
2329 } else {
2330 norm=fnode.coeff().normf();
2331 }
2332 return norm*norm;
2333 }
2334 else {
2335 MADNESS_EXCEPTION("ONLY FOR DIM 6!", 1);
2336 }
2337 }
2338
2339 double operator()(double a, double b) const {
2340 return (a+b);
2341 }
2342
2343 template <typename Archive> void serialize(const Archive& ar) {
2344 MADNESS_EXCEPTION("no serialization of do_check_symmetry yet",1);
2345 }
2346
2347
2348 };
2349
2350 /// merge the coefficent boxes of this into result's tree
2351
2352 /// result+= alpha*this
2353 /// this and result don't have to have the same distribution or live in the same world
2354 /// no comm, and the tree should be in an consistent state by virtue
2355 template<typename Q, typename R>
2359 T alpha=T(1.0);
2363
2364 /// return the norm of the difference of this node and its "mirror" node
2365 bool operator()(typename rangeT::iterator& it) const {
2366
2367 const keyT& key = it->first;
2368 const nodeT& node = it->second;
2369 if (node.has_coeff()) result->get_coeffs().task(key, &nodeT::accumulate,
2370 alpha*node.coeff(), result->get_coeffs(), key, result->targs);
2371 return true;
2372 }
2373
2374 template <typename Archive> void serialize(const Archive& ar) {
2375 MADNESS_EXCEPTION("no serialization of do_accumulate_trees",1);
2376 }
2377 };
2378
2379
2380 /// merge the coefficent boxes of this into other's tree
2381
2382 /// no comm, and the tree should be in an consistent state by virtue
2383 /// of FunctionNode::gaxpy_inplace
2384 template<typename Q, typename R>
2393
2394 /// return the norm of the difference of this node and its "mirror" node
2395 bool operator()(typename rangeT::iterator& it) const {
2396
2397 const keyT& key = it->first;
2398 const nodeT& fnode = it->second;
2399
2400 // if other's node exists: add this' coeffs to it
2401 // otherwise insert this' node into other's tree
2402 typename dcT::accessor acc;
2403 if (other->get_coeffs().find(acc,key)) {
2404 nodeT& gnode=acc->second;
2405 gnode.gaxpy_inplace(beta,fnode,alpha);
2406 } else {
2407 nodeT gnode=fnode;
2408 gnode.scale(alpha);
2409 other->get_coeffs().replace(key,gnode);
2410 }
2411 return true;
2412 }
2413
2414 template <typename Archive> void serialize(const Archive& ar) {
2415 MADNESS_EXCEPTION("no serialization of do_merge_trees",1);
2416 }
2417 };
2418
2419
2420 /// map this on f
2421 struct do_mapdim {
2423
2424 std::vector<long> map;
2426
2427 do_mapdim() : f(0) {};
2428 do_mapdim(const std::vector<long> map, implT& f) : map(map), f(&f) {}
2429
2430 bool operator()(typename rangeT::iterator& it) const {
2431
2432 const keyT& key = it->first;
2433 const nodeT& node = it->second;
2434
2436 for (std::size_t i=0; i<NDIM; ++i) l[map[i]] = key.translation()[i];
2437 tensorT c = node.coeff().reconstruct_tensor();
2438 if (c.size()) c = copy(c.mapdim(map));
2440 f->get_coeffs().replace(keyT(key.level(),l), nodeT(cc,node.has_children()));
2441
2442 return true;
2443 }
2444 template <typename Archive> void serialize(const Archive& ar) {
2445 MADNESS_EXCEPTION("no serialization of do_mapdim",1);
2446 }
2447
2448 };
2449
2450 /// mirror dimensions of this, write result on f
2451 struct do_mirror {
2453
2454 std::vector<long> mirror;
2456
2457 do_mirror() : f(0) {};
2458 do_mirror(const std::vector<long> mirror, implT& f) : mirror(mirror), f(&f) {}
2459
2460 bool operator()(typename rangeT::iterator& it) const {
2461
2462 const keyT& key = it->first;
2463 const nodeT& node = it->second;
2464
2465 // mirror translation index: l_new + l_old = l_max
2467 Translation lmax = (Translation(1)<<key.level()) - 1;
2468 for (std::size_t i=0; i<NDIM; ++i) {
2469 if (mirror[i]==-1) l[i]= lmax - key.translation()[i];
2470 }
2471
2472 // mirror coefficients: multiply all odd-k slices with -1
2473 tensorT c = node.coeff().full_tensor_copy();
2474 if (c.size()) {
2475 std::vector<Slice> s(___);
2476
2477 // loop over dimensions and over k
2478 for (size_t i=0; i<NDIM; ++i) {
2479 std::size_t kmax=c.dim(i);
2480 if (mirror[i]==-1) {
2481 for (size_t k=1; k<kmax; k+=2) {
2482 s[i]=Slice(k,k,1);
2483 c(s)*=(-1.0);
2484 }
2485 s[i]=_;
2486 }
2487 }
2488 }
2490 f->get_coeffs().replace(keyT(key.level(),l), nodeT(cc,node.has_children()));
2491
2492 return true;
2493 }
2494 template <typename Archive> void serialize(const Archive& ar) {
2495 MADNESS_EXCEPTION("no serialization of do_mirror",1);
2496 }
2497
2498 };
2499
2500 /// mirror dimensions of this, write result on f
2503
2504 std::vector<long> map,mirror;
2506
2508 do_map_and_mirror(const std::vector<long> map, const std::vector<long> mirror, implT& f)
2509 : map(map), mirror(mirror), f(&f) {}
2510
2511 bool operator()(typename rangeT::iterator& it) const {
2512
2513 const keyT& key = it->first;
2514 const nodeT& node = it->second;
2515
2516 tensorT c = node.coeff().full_tensor_copy();
2518
2519 // do the mapping first (if present)
2520 if (map.size()>0) {
2522 for (std::size_t i=0; i<NDIM; ++i) l1[map[i]] = l[i];
2523 std::swap(l,l1);
2524 if (c.size()) c = copy(c.mapdim(map));
2525 }
2526
2527 if (mirror.size()>0) {
2528 // mirror translation index: l_new + l_old = l_max
2530 Translation lmax = (Translation(1)<<key.level()) - 1;
2531 for (std::size_t i=0; i<NDIM; ++i) {
2532 if (mirror[i]==-1) l1[i]= lmax - l[i];
2533 }
2534 std::swap(l,l1);
2535
2536 // mirror coefficients: multiply all odd-k slices with -1
2537 if (c.size()) {
2538 std::vector<Slice> s(___);
2539
2540 // loop over dimensions and over k
2541 for (size_t i=0; i<NDIM; ++i) {
2542 std::size_t kmax=c.dim(i);
2543 if (mirror[i]==-1) {
2544 for (size_t k=1; k<kmax; k+=2) {
2545 s[i]=Slice(k,k,1);
2546 c(s)*=(-1.0);
2547 }
2548 s[i]=_;
2549 }
2550 }
2551 }
2552 }
2553
2555 f->get_coeffs().replace(keyT(key.level(),l), nodeT(cc,node.has_children()));
2556 return true;
2557 }
2558 template <typename Archive> void serialize(const Archive& ar) {
2559 MADNESS_EXCEPTION("no serialization of do_mirror",1);
2560 }
2561
2562 };
2563
2564
2565
2566 /// "put" this on g
2567 struct do_average {
2569
2571
2572 do_average() : g(0) {}
2574
2575 /// iterator it points to this
2576 bool operator()(typename rangeT::iterator& it) const {
2577
2578 const keyT& key = it->first;
2579 const nodeT& fnode = it->second;
2580
2581 // fast return if rhs has no coeff here
2582 if (fnode.has_coeff()) {
2583
2584 // check if there is a node already existing
2585 typename dcT::accessor acc;
2586 if (g->get_coeffs().find(acc,key)) {
2587 nodeT& gnode=acc->second;
2588 if (gnode.has_coeff()) gnode.coeff()+=fnode.coeff();
2589 } else {
2590 g->get_coeffs().replace(key,fnode);
2591 }
2592 }
2593
2594 return true;
2595 }
2596 template <typename Archive> void serialize(const Archive& ar) {}
2597 };
2598
2599 /// change representation of nodes' coeffs to low rank, optional fence
2602
2603 // threshold for rank reduction / SVD truncation
2606
2607 // constructor takes target precision
2609 // do_change_tensor_type(const TensorArgs& targs) : targs(targs) {}
2611
2612 //
2613 bool operator()(typename rangeT::iterator& it) const {
2614
2615 double cpu0=cpu_time();
2616 nodeT& node = it->second;
2618 double cpu1=cpu_time();
2620
2621 return true;
2622
2623 }
2624 template <typename Archive> void serialize(const Archive& ar) {}
2625 };
2626
2629
2630 // threshold for rank reduction / SVD truncation
2632
2633 // constructor takes target precision
2636 bool operator()(typename rangeT::iterator& it) const {
2637 it->second.consolidate_buffer(targs);
2638 return true;
2639 }
2640 template <typename Archive> void serialize(const Archive& ar) {}
2641 };
2642
2643
2644
2645 template <typename opT>
2649 opT op;
2651 bool operator()(typename rangeT::iterator& it) const {
2652 const keyT& key = it->first;
2653 nodeT& node = it->second;
2654 if (node.has_coeff()) {
2655 const TensorArgs full_args(-1.0,TT_FULL);
2656 change_tensor_type(node.coeff(),full_args);
2657 tensorT& t= node.coeff().full_tensor();
2658 //double before = t.normf();
2659 tensorT values = impl->fcube_for_mul(key, key, t);
2660 op(key, values);
2661 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
2662 t = transform(values,impl->cdata.quad_phiw).scale(scale);
2663 node.coeff()=coeffT(t,impl->get_tensor_args());
2664 //double after = t.normf();
2665 //madness::print("XOP:", key, before, after);
2666 }
2667 return true;
2668 }
2669 template <typename Archive> void serialize(const Archive& ar) {}
2670 };
2671
2672 template <typename Q, typename R>
2673 /// @todo I don't know what this does other than a trasform
2674 void vtransform_doit(const std::shared_ptr< FunctionImpl<R,NDIM> >& right,
2675 const Tensor<Q>& c,
2676 const std::vector< std::shared_ptr< FunctionImpl<T,NDIM> > >& vleft,
2677 double tol) {
2678 // To reduce crunch on vectors being transformed each task
2679 // does them in a random order
2680 std::vector<unsigned int> ind(vleft.size());
2681 for (unsigned int i=0; i<vleft.size(); ++i) {
2682 ind[i] = i;
2683 }
2684 for (unsigned int i=0; i<vleft.size(); ++i) {
2685 unsigned int j = RandomValue<int>()%vleft.size();
2686 std::swap(ind[i],ind[j]);
2687 }
2688
2689 typename FunctionImpl<R,NDIM>::dcT::const_iterator end = right->coeffs.end();
2690 for (typename FunctionImpl<R,NDIM>::dcT::const_iterator it=right->coeffs.begin(); it != end; ++it) {
2691 if (it->second.has_coeff()) {
2692 const Key<NDIM>& key = it->first;
2693 const GenTensor<R>& r = it->second.coeff();
2694 double norm = r.normf();
2695 double keytol = truncate_tol(tol,key);
2696
2697 for (unsigned int j=0; j<vleft.size(); ++j) {
2698 unsigned int i = ind[j]; // Random permutation
2699 if (std::abs(norm*c(i)) > keytol) {
2700 implT* left = vleft[i].get();
2701 typename dcT::accessor acc;
2702 bool newnode = left->coeffs.insert(acc,key);
2703 if (newnode && key.level()>0) {
2704 Key<NDIM> parent = key.parent();
2705 if (left->coeffs.is_local(parent))
2706 left->coeffs.send(parent, &nodeT::set_has_children_recursive, left->coeffs, parent);
2707 else
2708 left->coeffs.task(parent, &nodeT::set_has_children_recursive, left->coeffs, parent);
2709
2710 }
2711 nodeT& node = acc->second;
2712 if (!node.has_coeff())
2713 node.set_coeff(coeffT(cdata.v2k,targs));
2714 coeffT& t = node.coeff();
2715 t.gaxpy(1.0, r, c(i));
2716 }
2717 }
2718 }
2719 }
2720 }
2721
2722 /// Refine multiple functions down to the same finest level
2723
2724 /// @param v the vector of functions we are refining.
2725 /// @param key the current node.
2726 /// @param c the vector of coefficients passed from above.
2727 void refine_to_common_level(const std::vector<FunctionImpl<T,NDIM>*>& v,
2728 const std::vector<tensorT>& c,
2729 const keyT key);
2730
2731 /// Inplace operate on many functions (impl's) with an operator within a certain box
2732 /// @param[in] key the key of the current function node (box)
2733 /// @param[in] op the operator
2734 /// @param[in] v the vector of function impl's on which to be operated
2735 template <typename opT>
2736 void multiop_values_doit(const keyT& key, const opT& op, const std::vector<implT*>& v) {
2737 std::vector<tensorT> c(v.size());
2738 for (unsigned int i=0; i<v.size(); i++) {
2739 if (v[i]) {
2740 coeffT cc = coeffs2values(key, v[i]->coeffs.find(key).get()->second.coeff());
2741 c[i]=cc.full_tensor();
2742 }
2743 }
2744 tensorT r = op(key, c);
2745 coeffs.replace(key, nodeT(coeffT(values2coeffs(key, r),targs),false));
2746 }
2747
2748 /// Inplace operate on many functions (impl's) with an operator within a certain box
2749 /// Assumes all functions have been refined down to the same level
2750 /// @param[in] op the operator
2751 /// @param[in] v the vector of function impl's on which to be operated
2752 template <typename opT>
2753 void multiop_values(const opT& op, const std::vector<implT*>& v) {
2754 // rough check on refinement level (ignore non-initialized functions
2755 for (std::size_t i=1; i<v.size(); ++i) {
2756 if (v[i] and v[i-1]) {
2757 MADNESS_ASSERT(v[i]->coeffs.size()==v[i-1]->coeffs.size());
2758 }
2759 }
2760 typename dcT::iterator end = v[0]->coeffs.end();
2761 for (typename dcT::iterator it=v[0]->coeffs.begin(); it!=end; ++it) {
2762 const keyT& key = it->first;
2763 if (it->second.has_coeff())
2764 world.taskq.add(*this, &implT:: template multiop_values_doit<opT>, key, op, v);
2765 else
2766 coeffs.replace(key, nodeT(coeffT(),true));
2767 }
2768 world.gop.fence();
2769 }
2770
2771 /// Inplace operate on many functions (impl's) with an operator within a certain box
2772
2773 /// @param[in] key the key of the current function node (box)
2774 /// @param[in] op the operator
2775 /// @param[in] vin the vector of function impl's on which to be operated
2776 /// @param[out] vout the resulting vector of function impl's
2777 template <typename opT>
2778 void multi_to_multi_op_values_doit(const keyT& key, const opT& op,
2779 const std::vector<implT*>& vin, std::vector<implT*>& vout) {
2780 std::vector<tensorT> c(vin.size());
2781 for (unsigned int i=0; i<vin.size(); i++) {
2782 if (vin[i]) {
2783 coeffT cc = coeffs2values(key, vin[i]->coeffs.find(key).get()->second.coeff());
2784 c[i]=cc.full_tensor();
2785 }
2786 }
2787 std::vector<tensorT> r = op(key, c);
2788 MADNESS_ASSERT(r.size()==vout.size());
2789 for (std::size_t i=0; i<vout.size(); ++i) {
2790 vout[i]->coeffs.replace(key, nodeT(coeffT(values2coeffs(key, r[i]),targs),false));
2791 }
2792 }
2793
2794 /// Inplace operate on many functions (impl's) with an operator within a certain box
2795
2796 /// Assumes all functions have been refined down to the same level
2797 /// @param[in] op the operator
2798 /// @param[in] vin the vector of function impl's on which to be operated
2799 /// @param[out] vout the resulting vector of function impl's
2800 template <typename opT>
2801 void multi_to_multi_op_values(const opT& op, const std::vector<implT*>& vin,
2802 std::vector<implT*>& vout, const bool fence=true) {
2803 // rough check on refinement level (ignore non-initialized functions
2804 for (std::size_t i=1; i<vin.size(); ++i) {
2805 if (vin[i] and vin[i-1]) {
2806 MADNESS_ASSERT(vin[i]->coeffs.size()==vin[i-1]->coeffs.size());
2807 }
2808 }
2809 typename dcT::iterator end = vin[0]->coeffs.end();
2810 for (typename dcT::iterator it=vin[0]->coeffs.begin(); it!=end; ++it) {
2811 const keyT& key = it->first;
2812 if (it->second.has_coeff())
2813 world.taskq.add(*this, &implT:: template multi_to_multi_op_values_doit<opT>,
2814 key, op, vin, vout);
2815 else {
2816 // fill result functions with empty box in this key
2817 for (implT* it2 : vout) {
2818 it2->coeffs.replace(key, nodeT(coeffT(),true));
2819 }
2820 }
2821 }
2822 if (fence) world.gop.fence();
2823 }
2824
2825 /// Transforms a vector of functions left[i] = sum[j] right[j]*c[j,i] using sparsity
2826 /// @param[in] vright vector of functions (impl's) on which to be transformed
2827 /// @param[in] c the tensor (matrix) transformer
2828 /// @param[in] vleft vector of of the *newly* transformed functions (impl's)
2829 template <typename Q, typename R>
2830 void vtransform(const std::vector< std::shared_ptr< FunctionImpl<R,NDIM> > >& vright,
2831 const Tensor<Q>& c,
2832 const std::vector< std::shared_ptr< FunctionImpl<T,NDIM> > >& vleft,
2833 double tol,
2834 bool fence) {
2835 for (unsigned int j=0; j<vright.size(); ++j) {
2836 world.taskq.add(*this, &implT:: template vtransform_doit<Q,R>, vright[j], copy(c(j,_)), vleft, tol);
2837 }
2838 if (fence)
2839 world.gop.fence();
2840 }
2841
2842 /// Unary operation applied inplace to the values with optional refinement and fence
2843 /// @param[in] op the unary operator for the values
2844 template <typename opT>
2845 void unary_op_value_inplace(const opT& op, bool fence) {
2847 typedef do_unary_op_value_inplace<opT> xopT;
2848 world.taskq.for_each<rangeT,xopT>(rangeT(coeffs.begin(), coeffs.end()), xopT(this,op));
2849 if (fence)
2850 world.gop.fence();
2851 }
2852
2853 // Multiplication assuming same distribution and recursive descent
2854 /// Both left and right functions are in the scaling function basis
2855 /// @param[in] key the key to the current function node (box)
2856 /// @param[in] left the function impl associated with the left function
2857 /// @param[in] lcin the scaling function coefficients associated with the
2858 /// current box in the left function
2859 /// @param[in] vrightin the vector of function impl's associated with
2860 /// the vector of right functions
2861 /// @param[in] vrcin the vector scaling function coefficients associated with the
2862 /// current box in the right functions
2863 /// @param[out] vresultin the vector of resulting functions (impl's)
2864 template <typename L, typename R>
2865 void mulXXveca(const keyT& key,
2866 const FunctionImpl<L,NDIM>* left, const Tensor<L>& lcin,
2867 const std::vector<const FunctionImpl<R,NDIM>*> vrightin,
2868 const std::vector< Tensor<R> >& vrcin,
2869 const std::vector<FunctionImpl<T,NDIM>*> vresultin,
2870 double tol) {
2871 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
2872 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator riterT;
2873
2874 double lnorm = 1e99;
2875 Tensor<L> lc = lcin;
2876 if (lc.size() == 0) {
2877 literT it = left->coeffs.find(key).get();
2878 MADNESS_ASSERT(it != left->coeffs.end());
2879 lnorm = it->second.get_norm_tree();
2880 if (it->second.has_coeff())
2881 lc = it->second.coeff().full_tensor_copy();
2882 }
2883
2884 // Loop thru RHS functions seeing if anything can be multiplied
2885 std::vector<FunctionImpl<T,NDIM>*> vresult;
2886 std::vector<const FunctionImpl<R,NDIM>*> vright;
2887 std::vector< Tensor<R> > vrc;
2888 vresult.reserve(vrightin.size());
2889 vright.reserve(vrightin.size());
2890 vrc.reserve(vrightin.size());
2891
2892 for (unsigned int i=0; i<vrightin.size(); ++i) {
2893 FunctionImpl<T,NDIM>* result = vresultin[i];
2894 const FunctionImpl<R,NDIM>* right = vrightin[i];
2895 Tensor<R> rc = vrcin[i];
2896 double rnorm;
2897 if (rc.size() == 0) {
2898 riterT it = right->coeffs.find(key).get();
2899 MADNESS_ASSERT(it != right->coeffs.end());
2900 rnorm = it->second.get_norm_tree();
2901 if (it->second.has_coeff())
2902 rc = it->second.coeff().full_tensor_copy();
2903 }
2904 else {
2905 rnorm = rc.normf();
2906 }
2907
2908 if (rc.size() && lc.size()) { // Yipee!
2909 result->task(world.rank(), &implT:: template do_mul<L,R>, key, lc, std::make_pair(key,rc));
2910 }
2911 else if (tol && lnorm*rnorm < truncate_tol(tol, key)) {
2912 result->coeffs.replace(key, nodeT(coeffT(cdata.vk,targs),false)); // Zero leaf
2913 }
2914 else { // Interior node
2915 result->coeffs.replace(key, nodeT(coeffT(),true));
2916 vresult.push_back(result);
2917 vright.push_back(right);
2918 vrc.push_back(rc);
2919 }
2920 }
2921
2922 if (vresult.size()) {
2923 Tensor<L> lss;
2924 if (lc.size()) {
2925 Tensor<L> ld(cdata.v2k);
2926 ld(cdata.s0) = lc(___);
2927 lss = left->unfilter(ld);
2928 }
2929
2930 std::vector< Tensor<R> > vrss(vresult.size());
2931 for (unsigned int i=0; i<vresult.size(); ++i) {
2932 if (vrc[i].size()) {
2933 Tensor<R> rd(cdata.v2k);
2934 rd(cdata.s0) = vrc[i](___);
2935 vrss[i] = vright[i]->unfilter(rd);
2936 }
2937 }
2938
2939 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
2940 const keyT& child = kit.key();
2941 Tensor<L> ll;
2942
2943 std::vector<Slice> cp = child_patch(child);
2944
2945 if (lc.size())
2946 ll = copy(lss(cp));
2947
2948 std::vector< Tensor<R> > vv(vresult.size());
2949 for (unsigned int i=0; i<vresult.size(); ++i) {
2950 if (vrc[i].size())
2951 vv[i] = copy(vrss[i](cp));
2952 }
2953
2954 woT::task(coeffs.owner(child), &implT:: template mulXXveca<L,R>, child, left, ll, vright, vv, vresult, tol);
2955 }
2956 }
2957 }
2958
2959 /// Multiplication using recursive descent and assuming same distribution
2960 /// Both left and right functions are in the scaling function basis
2961 /// @param[in] key the key to the current function node (box)
2962 /// @param[in] left the function impl associated with the left function
2963 /// @param[in] lcin the scaling function coefficients associated with the
2964 /// current box in the left function
2965 /// @param[in] right the function impl associated with the right function
2966 /// @param[in] rcin the scaling function coefficients associated with the
2967 /// current box in the right function
2968 template <typename L, typename R>
2969 void mulXXa(const keyT& key,
2970 const FunctionImpl<L,NDIM>* left, const Tensor<L>& lcin,
2971 const FunctionImpl<R,NDIM>* right,const Tensor<R>& rcin,
2972 double tol) {
2973 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
2974 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator riterT;
2975
2976 double lnorm=1e99, rnorm=1e99;
2977
2978 Tensor<L> lc = lcin;
2979 if (lc.size() == 0) {
2980 literT it = left->coeffs.find(key).get();
2981 MADNESS_ASSERT(it != left->coeffs.end());
2982 lnorm = it->second.get_norm_tree();
2983 if (it->second.has_coeff())
2984 lc = it->second.coeff().reconstruct_tensor();
2985 }
2986
2987 Tensor<R> rc = rcin;
2988 if (rc.size() == 0) {
2989 riterT it = right->coeffs.find(key).get();
2990 MADNESS_ASSERT(it != right->coeffs.end());
2991 rnorm = it->second.get_norm_tree();
2992 if (it->second.has_coeff())
2993 rc = it->second.coeff().reconstruct_tensor();
2994 }
2995
2996 // both nodes are leaf nodes: multiply and return
2997 if (rc.size() && lc.size()) { // Yipee!
2998 do_mul<L,R>(key, lc, std::make_pair(key,rc));
2999 return;
3000 }
3001
3002 if (tol) {
3003 if (lc.size())
3004 lnorm = lc.normf(); // Otherwise got from norm tree above
3005 if (rc.size())
3006 rnorm = rc.normf();
3007 if (lnorm*rnorm < truncate_tol(tol, key)) {
3008 coeffs.replace(key, nodeT(coeffT(cdata.vk,targs),false)); // Zero leaf node
3009 return;
3010 }
3011 }
3012
3013 // Recur down
3014 coeffs.replace(key, nodeT(coeffT(),true)); // Interior node
3015
3016 Tensor<L> lss;
3017 if (lc.size()) {
3018 Tensor<L> ld(cdata.v2k);
3019 ld(cdata.s0) = lc(___);
3020 lss = left->unfilter(ld);
3021 }
3022
3023 Tensor<R> rss;
3024 if (rc.size()) {
3025 Tensor<R> rd(cdata.v2k);
3026 rd(cdata.s0) = rc(___);
3027 rss = right->unfilter(rd);
3028 }
3029
3030 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3031 const keyT& child = kit.key();
3032 Tensor<L> ll;
3033 Tensor<R> rr;
3034 if (lc.size())
3035 ll = copy(lss(child_patch(child)));
3036 if (rc.size())
3037 rr = copy(rss(child_patch(child)));
3038
3039 woT::task(coeffs.owner(child), &implT:: template mulXXa<L,R>, child, left, ll, right, rr, tol);
3040 }
3041 }
3042
3043
3044 // Binary operation on values using recursive descent and assuming same distribution
3045 /// Both left and right functions are in the scaling function basis
3046 /// @param[in] key the key to the current function node (box)
3047 /// @param[in] left the function impl associated with the left function
3048 /// @param[in] lcin the scaling function coefficients associated with the
3049 /// current box in the left function
3050 /// @param[in] right the function impl associated with the right function
3051 /// @param[in] rcin the scaling function coefficients associated with the
3052 /// current box in the right function
3053 /// @param[in] op the binary operator
3054 template <typename L, typename R, typename opT>
3055 void binaryXXa(const keyT& key,
3056 const FunctionImpl<L,NDIM>* left, const Tensor<L>& lcin,
3057 const FunctionImpl<R,NDIM>* right,const Tensor<R>& rcin,
3058 const opT& op) {
3059 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
3060 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator riterT;
3061
3062 Tensor<L> lc = lcin;
3063 if (lc.size() == 0) {
3064 literT it = left->coeffs.find(key).get();
3065 MADNESS_ASSERT(it != left->coeffs.end());
3066 if (it->second.has_coeff())
3067 lc = it->second.coeff().reconstruct_tensor();
3068 }
3069
3070 Tensor<R> rc = rcin;
3071 if (rc.size() == 0) {
3072 riterT it = right->coeffs.find(key).get();
3073 MADNESS_ASSERT(it != right->coeffs.end());
3074 if (it->second.has_coeff())
3075 rc = it->second.coeff().reconstruct_tensor();
3076 }
3077
3078 if (rc.size() && lc.size()) { // Yipee!
3079 do_binary_op<L,R>(key, lc, std::make_pair(key,rc), op);
3080 return;
3081 }
3082
3083 // Recur down
3084 coeffs.replace(key, nodeT(coeffT(),true)); // Interior node
3085
3086 Tensor<L> lss;
3087 if (lc.size()) {
3088 Tensor<L> ld(cdata.v2k);
3089 ld(cdata.s0) = lc(___);
3090 lss = left->unfilter(ld);
3091 }
3092
3093 Tensor<R> rss;
3094 if (rc.size()) {
3095 Tensor<R> rd(cdata.v2k);
3096 rd(cdata.s0) = rc(___);
3097 rss = right->unfilter(rd);
3098 }
3099
3100 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3101 const keyT& child = kit.key();
3102 Tensor<L> ll;
3103 Tensor<R> rr;
3104 if (lc.size())
3105 ll = copy(lss(child_patch(child)));
3106 if (rc.size())
3107 rr = copy(rss(child_patch(child)));
3108
3109 woT::task(coeffs.owner(child), &implT:: template binaryXXa<L,R,opT>, child, left, ll, right, rr, op);
3110 }
3111 }
3112
3113 template <typename Q, typename opT>
3115 typedef typename opT::resultT resultT;
3117 opT op;
3118
3123
3124 Tensor<resultT> operator()(const Key<NDIM>& key, const Tensor<Q>& t) const {
3125 Tensor<Q> invalues = impl_func->coeffs2values(key, t);
3126
3127 Tensor<resultT> outvalues = op(key, invalues);
3128
3129 return impl_func->values2coeffs(key, outvalues);
3130 }
3131
3132 template <typename Archive>
3133 void serialize(Archive& ar) {
3134 ar & impl_func & op;
3135 }
3136 };
3137
3138 /// Out of place unary operation on function impl
3139 /// The skeleton algorithm should resemble something like
3140 ///
3141 /// *this = op(*func)
3142 ///
3143 /// @param[in] key the key of the current function node (box)
3144 /// @param[in] func the function impl on which to be operated
3145 /// @param[in] op the unary operator
3146 template <typename Q, typename opT>
3147 void unaryXXa(const keyT& key,
3148 const FunctionImpl<Q,NDIM>* func, const opT& op) {
3149
3150 // const Tensor<Q>& fc = func->coeffs.find(key).get()->second.full_tensor_copy();
3151 const Tensor<Q> fc = func->coeffs.find(key).get()->second.coeff().reconstruct_tensor();
3152
3153 if (fc.size() == 0) {
3154 // Recur down
3155 coeffs.replace(key, nodeT(coeffT(),true)); // Interior node
3156 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3157 const keyT& child = kit.key();
3158 woT::task(coeffs.owner(child), &implT:: template unaryXXa<Q,opT>, child, func, op);
3159 }
3160 }
3161 else {
3162 tensorT t=op(key,fc);
3163 coeffs.replace(key, nodeT(coeffT(t,targs),false)); // Leaf node
3164 }
3165 }
3166
3167 /// Multiplies two functions (impl's) together. Delegates to the mulXXa() method
3168 /// @param[in] left pointer to the left function impl
3169 /// @param[in] right pointer to the right function impl
3170 /// @param[in] tol numerical tolerance
3171 template <typename L, typename R>
3172 void mulXX(const FunctionImpl<L,NDIM>* left, const FunctionImpl<R,NDIM>* right, double tol, bool fence) {
3173 if (world.rank() == coeffs.owner(cdata.key0))
3174 mulXXa(cdata.key0, left, Tensor<L>(), right, Tensor<R>(), tol);
3175 if (fence)
3176 world.gop.fence();
3177
3178 //verify_tree();
3179 }
3180
3181 /// Performs binary operation on two functions (impl's). Delegates to the binaryXXa() method
3182 /// @param[in] left pointer to the left function impl
3183 /// @param[in] right pointer to the right function impl
3184 /// @param[in] op the binary operator
3185 template <typename L, typename R, typename opT>
3187 const opT& op, bool fence) {
3188 if (world.rank() == coeffs.owner(cdata.key0))
3189 binaryXXa(cdata.key0, left, Tensor<L>(), right, Tensor<R>(), op);
3190 if (fence)
3191 world.gop.fence();
3192
3193 //verify_tree();
3194 }
3195
3196 /// Performs unary operation on function impl. Delegates to the unaryXXa() method
3197 /// @param[in] func function impl of the operand
3198 /// @param[in] op the unary operator
3199 template <typename Q, typename opT>
3200 void unaryXX(const FunctionImpl<Q,NDIM>* func, const opT& op, bool fence) {
3201 if (world.rank() == coeffs.owner(cdata.key0))
3202 unaryXXa(cdata.key0, func, op);
3203 if (fence)
3204 world.gop.fence();
3205
3206 //verify_tree();
3207 }
3208
3209 /// Performs unary operation on function impl. Delegates to the unaryXXa() method
3210 /// @param[in] func function impl of the operand
3211 /// @param[in] op the unary operator
3212 template <typename Q, typename opT>
3213 void unaryXXvalues(const FunctionImpl<Q,NDIM>* func, const opT& op, bool fence) {
3214 if (world.rank() == coeffs.owner(cdata.key0))
3216 if (fence)
3217 world.gop.fence();
3218
3219 //verify_tree();
3220 }
3221
3222 /// Multiplies a function (impl) with a vector of functions (impl's). Delegates to the
3223 /// mulXXveca() method.
3224 /// @param[in] left pointer to the left function impl
3225 /// @param[in] vright vector of pointers to the right function impl's
3226 /// @param[in] tol numerical tolerance
3227 /// @param[out] vresult vector of pointers to the resulting function impl's
3228 template <typename L, typename R>
3230 const std::vector<const FunctionImpl<R,NDIM>*>& vright,
3231 const std::vector<FunctionImpl<T,NDIM>*>& vresult,
3232 double tol,
3233 bool fence) {
3234 std::vector< Tensor<R> > vr(vright.size());
3235 if (world.rank() == coeffs.owner(cdata.key0))
3236 mulXXveca(cdata.key0, left, Tensor<L>(), vright, vr, vresult, tol);
3237 if (fence)
3238 world.gop.fence();
3239 }
3240
3242
3243 mutable long box_leaf[1000];
3244 mutable long box_interior[1000];
3245
3246 // horrifically non-scalable
3247 void put_in_box(ProcessID from, long nl, long ni) const;
3248
3249 /// Prints summary of data distribution
3250 void print_info() const;
3251
3252 /// Verify tree is properly constructed ... global synchronization involved
3253
3254 /// If an inconsistency is detected, prints a message describing the error and
3255 /// then throws a madness exception.
3256 ///
3257 /// This is a reasonably quick and scalable operation that is
3258 /// useful for debugging and paranoia.
3259 void verify_tree() const;
3260
3261 /// Walk up the tree returning pair(key,node) for first node with coefficients
3262
3263 /// Three possibilities.
3264 ///
3265 /// 1) The coeffs are present and returned with the key of the containing node.
3266 ///
3267 /// 2) The coeffs are further up the tree ... the request is forwarded up.
3268 ///
3269 /// 3) The coeffs are futher down the tree ... an empty tensor is returned.
3270 ///
3271 /// !! This routine is crying out for an optimization to
3272 /// manage the number of messages being sent ... presently
3273 /// each parent is fetched 2^(n*d) times where n is the no. of
3274 /// levels between the level of evaluation and the parent.
3275 /// Alternatively, reimplement multiply as a downward tree
3276 /// walk and just pass the parent down. Slightly less
3277 /// parallelism but much less communication.
3278 /// @todo Robert .... help!
3279 void sock_it_to_me(const keyT& key,
3280 const RemoteReference< FutureImpl< std::pair<keyT,coeffT> > >& ref) const;
3281 /// As above, except
3282 /// 3) The coeffs are constructed from the avg of nodes further down the tree
3283 /// @todo Robert .... help!
3284 void sock_it_to_me_too(const keyT& key,
3285 const RemoteReference< FutureImpl< std::pair<keyT,coeffT> > >& ref) const;
3286
3287 /// @todo help!
3289 const keyT& key,
3290 const coordT& plotlo, const coordT& plothi, const std::vector<long>& npt,
3291 bool eval_refine) const;
3292
3293
3294 /// Evaluate a cube/slice of points ... plotlo and plothi are already in simulation coordinates
3295 /// No communications
3296 /// @param[in] plotlo the coordinate of the starting point
3297 /// @param[in] plothi the coordinate of the ending point
3298 /// @param[in] npt the number of points in each dimension
3299 Tensor<T> eval_plot_cube(const coordT& plotlo,
3300 const coordT& plothi,
3301 const std::vector<long>& npt,
3302 const bool eval_refine = false) const;
3303
3304
3305 /// Evaluate function only if point is local returning (true,value); otherwise return (false,0.0)
3306
3307 /// maxlevel is the maximum depth to search down to --- the max local depth can be
3308 /// computed with max_local_depth();
3309 std::pair<bool,T> eval_local_only(const Vector<double,NDIM>& xin, Level maxlevel) ;
3310
3311
3312 /// Evaluate the function at a point in \em simulation coordinates
3313
3314 /// Only the invoking process will get the result via the
3315 /// remote reference to a future. Active messages may be sent
3316 /// to other nodes.
3317 void eval(const Vector<double,NDIM>& xin,
3318 const keyT& keyin,
3319 const typename Future<T>::remote_refT& ref);
3320
3321 /// Get the depth of the tree at a point in \em simulation coordinates
3322
3323 /// Only the invoking process will get the result via the
3324 /// remote reference to a future. Active messages may be sent
3325 /// to other nodes.
3326 ///
3327 /// This function is a minimally-modified version of eval()
3328 void evaldepthpt(const Vector<double,NDIM>& xin,
3329 const keyT& keyin,
3330 const typename Future<Level>::remote_refT& ref);
3331
3332 /// Get the rank of leaf box of the tree at a point in \em simulation coordinates
3333
3334 /// Only the invoking process will get the result via the
3335 /// remote reference to a future. Active messages may be sent
3336 /// to other nodes.
3337 ///
3338 /// This function is a minimally-modified version of eval()
3339 void evalR(const Vector<double,NDIM>& xin,
3340 const keyT& keyin,
3341 const typename Future<long>::remote_refT& ref);
3342
3343
3344 /// Computes norm of low/high-order polyn. coeffs for autorefinement test
3345
3346 /// t is a k^d tensor. In order to screen the autorefinement
3347 /// during multiplication compute the norms of
3348 /// ... lo ... the block of t for all polynomials of order < k/2
3349 /// ... hi ... the block of t for all polynomials of order >= k/2
3350 ///
3351 /// k=5 0,1,2,3,4 --> 0,1,2 ... 3,4
3352 /// k=6 0,1,2,3,4,5 --> 0,1,2 ... 3,4,5
3353 ///
3354 /// k=number of wavelets, so k=5 means max order is 4, so max exactly
3355 /// representable squarable polynomial is of order 2.
3356 void static tnorm(const tensorT& t, double* lo, double* hi);
3357
3358 void static tnorm(const GenTensor<T>& t, double* lo, double* hi);
3359
3360 void static tnorm(const SVDTensor<T>& t, double* lo, double* hi, const int particle);
3361
3362 // This invoked if node has not been autorefined
3363 void do_square_inplace(const keyT& key);
3364
3365 // This invoked if node has been autorefined
3366 void do_square_inplace2(const keyT& parent, const keyT& child, const tensorT& parent_coeff);
3367
3368 /// Always returns false (for when autorefine is not wanted)
3369 bool noautorefine(const keyT& key, const tensorT& t) const;
3370
3371 /// Returns true if this block of coeffs needs autorefining
3372 bool autorefine_square_test(const keyT& key, const nodeT& t) const;
3373
3374 /// Pointwise squaring of function with optional global fence
3375
3376 /// If not autorefining, local computation only if not fencing.
3377 /// If autorefining, may result in asynchronous communication.
3378 void square_inplace(bool fence);
3379 void abs_inplace(bool fence);
3380 void abs_square_inplace(bool fence);
3381
3382 /// is this the same as trickle_down() ?
3383 void sum_down_spawn(const keyT& key, const coeffT& s);
3384
3385 /// After 1d push operator must sum coeffs down the tree to restore correct scaling function coefficients
3386 void sum_down(bool fence);
3387
3388 /// perform this multiplication: h(1,2) = f(1,2) * g(1)
3389 template<size_t LDIM>
3391
3392 static bool randomize() {return false;}
3396
3397 implT* h; ///< the result function h(1,2) = f(1,2) * g(1)
3400 int particle; ///< if g is g(1) or g(2)
3401
3402 multiply_op() : h(), f(), g(), particle(1) {}
3403
3404 multiply_op(implT* h1, const ctT& f1, const ctL& g1, const int particle1)
3405 : h(h1), f(f1), g(g1), particle(particle1) {};
3406
3407 /// return true if this will be a leaf node
3408
3409 /// use generalization of tnorm for a GenTensor
3410 bool screen(const coeffT& fcoeff, const coeffT& gcoeff, const keyT& key) const {
3412 MADNESS_ASSERT(fcoeff.is_svd_tensor());
3415
3416 double glo=0.0, ghi=0.0, flo=0.0, fhi=0.0;
3417 g.get_impl()->tnorm(gcoeff.get_tensor(), &glo, &ghi);
3418 g.get_impl()->tnorm(fcoeff.get_svdtensor(),&flo,&fhi,particle);
3419
3420 double total_hi=glo*fhi + ghi*flo + fhi*ghi;
3421 return (total_hi<h->truncate_tol(h->get_thresh(),key));
3422
3423 }
3424
3425 /// apply this on a FunctionNode of f and g of Key key
3426
3427 /// @param[in] key key for FunctionNode in f and g, (g: broken into particles)
3428 /// @return <this node is a leaf, coefficients of this node>
3429 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
3430
3431 // bool is_leaf=(not fdatum.second.has_children());
3432 // if (not is_leaf) return std::pair<bool,coeffT> (is_leaf,coeffT());
3433
3434 // break key into particles (these are the child keys, with f/gdatum come the parent keys)
3435 Key<LDIM> key1,key2;
3436 key.break_apart(key1,key2);
3437 const Key<LDIM> gkey= (particle==1) ? key1 : key2;
3438
3439 // get coefficients of the actual FunctionNode
3440 coeffT coeff1=f.get_impl()->parent_to_child(f.coeff(),f.key(),key);
3441 coeff1.normalize();
3442 const coeffT coeff2=g.get_impl()->parent_to_child(g.coeff(),g.key(),gkey);
3443
3444 // multiplication is done in TT_2D
3445 coeffT coeff1_2D=coeff1.convert(TensorArgs(h->get_thresh(),TT_2D));
3446 coeff1_2D.normalize();
3447
3448 bool is_leaf=screen(coeff1_2D,coeff2,key);
3449 if (key.level()<2) is_leaf=false;
3450
3451 coeffT hcoeff;
3452 if (is_leaf) {
3453
3454 // convert coefficients to values
3455 coeffT hvalues=f.get_impl()->coeffs2values(key,coeff1_2D);
3456 coeffT gvalues=g.get_impl()->coeffs2values(gkey,coeff2);
3457
3458 // perform multiplication
3459 coeffT result_val=h->multiply(hvalues,gvalues,particle-1);
3460
3461 hcoeff=h->values2coeffs(key,result_val);
3462
3463 // conversion on coeffs, not on values, because it implies truncation!
3464 if (not hcoeff.is_of_tensortype(h->get_tensor_type()))
3465 hcoeff=hcoeff.convert(h->get_tensor_args());
3466 }
3467
3468 return std::pair<bool,coeffT> (is_leaf,hcoeff);
3469 }
3470
3471 this_type make_child(const keyT& child) const {
3472
3473 // break key into particles
3474 Key<LDIM> key1, key2;
3475 child.break_apart(key1,key2);
3476 const Key<LDIM> gkey= (particle==1) ? key1 : key2;
3477
3478 return this_type(h,f.make_child(child),g.make_child(gkey),particle);
3479 }
3480
3482 Future<ctT> f1=f.activate();
3484 return h->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
3485 &this_type::forward_ctor),h,f1,g1,particle);
3486 }
3487
3488 this_type forward_ctor(implT* h1, const ctT& f1, const ctL& g1, const int particle) {
3489 return this_type(h1,f1,g1,particle);
3490 }
3491
3492 template <typename Archive> void serialize(const Archive& ar) {
3493 ar & h & f & g & particle;
3494 }
3495 };
3496
3497
3498 /// add two functions f and g: result=alpha * f + beta * g
3499 struct add_op {
3500
3503
3504 bool randomize() const {return false;}
3505
3506 /// tracking coeffs of first and second addend
3508 /// prefactor for f, g
3509 double alpha, beta;
3510
3511 add_op() = default;
3512 add_op(const ctT& f, const ctT& g, const double alpha, const double beta)
3513 : f(f), g(g), alpha(alpha), beta(beta){}
3514
3515 /// if we are at the bottom of the trees, return the sum of the coeffs
3516 std::pair<bool,coeffT> operator()(const keyT& key) const {
3517
3518 bool is_leaf=(f.is_leaf() and g.is_leaf());
3519 if (not is_leaf) return std::pair<bool,coeffT> (is_leaf,coeffT());
3520
3521 coeffT fcoeff=f.get_impl()->parent_to_child(f.coeff(),f.key(),key);
3522 coeffT gcoeff=g.get_impl()->parent_to_child(g.coeff(),g.key(),key);
3523 coeffT hcoeff=copy(fcoeff);
3524 hcoeff.gaxpy(alpha,gcoeff,beta);
3525 hcoeff.reduce_rank(f.get_impl()->get_tensor_args().thresh);
3526 return std::pair<bool,coeffT> (is_leaf,hcoeff);
3527 }
3528
3529 this_type make_child(const keyT& child) const {
3530 return this_type(f.make_child(child),g.make_child(child),alpha,beta);
3531 }
3532
3533 /// retrieve the coefficients (parent coeffs might be remote)
3535 Future<ctT> f1=f.activate();
3536 Future<ctT> g1=g.activate();
3537 return f.get_impl()->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
3539 }
3540
3541 /// taskq-compatible ctor
3542 this_type forward_ctor(const ctT& f1, const ctT& g1, const double alpha, const double beta) {
3543 return this_type(f1,g1,alpha,beta);
3544 }
3545
3546 template <typename Archive> void serialize(const Archive& ar) {
3547 ar & f & g & alpha & beta;
3548 }
3549
3550 };
3551
3552 /// multiply f (a pair function of NDIM) with an orbital g (LDIM=NDIM/2)
3553
3554 /// as in (with h(1,2)=*this) : h(1,2) = g(1) * f(1,2)
3555 /// use tnorm as a measure to determine if f (=*this) must be refined
3556 /// @param[in] f the NDIM function f=f(1,2)
3557 /// @param[in] g the LDIM function g(1) (or g(2))
3558 /// @param[in] particle 1 or 2, as in g(1) or g(2)
3559 template<size_t LDIM>
3560 void multiply(const implT* f, const FunctionImpl<T,LDIM>* g, const int particle) {
3561
3564
3565 typedef multiply_op<LDIM> coeff_opT;
3566 coeff_opT coeff_op(this,ff,gg,particle);
3567
3568 typedef insert_op<T,NDIM> apply_opT;
3569 apply_opT apply_op(this);
3570
3571 keyT key0=f->cdata.key0;
3572 if (world.rank() == coeffs.owner(key0)) {
3574 woT::task(p, &implT:: template forward_traverse<coeff_opT,apply_opT>, coeff_op, apply_op, key0);
3575 }
3576
3578 }
3579
3580 /// Hartree product of two LDIM functions to yield a NDIM = 2*LDIM function
3581 template<size_t LDIM, typename leaf_opT>
3582 struct hartree_op {
3583 bool randomize() const {return false;}
3584
3587
3588 implT* result; ///< where to construct the pair function
3589 ctL p1, p2; ///< tracking coeffs of the two lo-dim functions
3590 leaf_opT leaf_op; ///< determine if a given node will be a leaf node
3591
3592 // ctor
3594 hartree_op(implT* result, const ctL& p11, const ctL& p22, const leaf_opT& leaf_op)
3595 : result(result), p1(p11), p2(p22), leaf_op(leaf_op) {
3596 MADNESS_ASSERT(LDIM+LDIM==NDIM);
3597 }
3598
3599 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
3600
3601 // break key into particles (these are the child keys, with datum1/2 come the parent keys)
3602 Key<LDIM> key1,key2;
3603 key.break_apart(key1,key2);
3604
3605 // this returns the appropriate NS coeffs for key1 and key2 resp.
3606 const coeffT fcoeff=p1.coeff(key1);
3607 const coeffT gcoeff=p2.coeff(key2);
3608 bool is_leaf=leaf_op(key,fcoeff.full_tensor(),gcoeff.full_tensor());
3609 if (not is_leaf) return std::pair<bool,coeffT> (is_leaf,coeffT());
3610
3611 // extract the sum coeffs from the NS coeffs
3612 const coeffT s1=fcoeff(p1.get_impl()->cdata.s0);
3613 const coeffT s2=gcoeff(p2.get_impl()->cdata.s0);
3614
3615 // new coeffs are simply the hartree/kronecker/outer product --
3616 coeffT coeff=outer(s1,s2,result->get_tensor_args());
3617 // no post-determination
3618 // is_leaf=leaf_op(key,coeff);
3619 return std::pair<bool,coeffT>(is_leaf,coeff);
3620 }
3621
3622 this_type make_child(const keyT& child) const {
3623
3624 // break key into particles
3625 Key<LDIM> key1, key2;
3626 child.break_apart(key1,key2);
3627
3628 return this_type(result,p1.make_child(key1),p2.make_child(key2),leaf_op);
3629 }
3630
3632 Future<ctL> p11=p1.activate();
3633 Future<ctL> p22=p2.activate();
3634 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
3635 &this_type::forward_ctor),result,p11,p22,leaf_op);
3636 }
3637
3638 this_type forward_ctor(implT* result1, const ctL& p11, const ctL& p22, const leaf_opT& leaf_op) {
3639 return this_type(result1,p11,p22,leaf_op);
3640 }
3641
3642 template <typename Archive> void serialize(const Archive& ar) {
3643 ar & result & p1 & p2 & leaf_op;
3644 }
3645 };
3646
3647 /// traverse a non-existing tree
3648
3649 /// part II: activate coeff_op, i.e. retrieve all the necessary remote boxes (communication)
3650 /// @param[in] coeff_op operator making the coefficients that needs activation
3651 /// @param[in] apply_op just passing thru
3652 /// @param[in] key the key we are working on
3653 template<typename coeff_opT, typename apply_opT>
3654 void forward_traverse(const coeff_opT& coeff_op, const apply_opT& apply_op, const keyT& key) const {
3656 Future<coeff_opT> active_coeff=coeff_op.activate();
3657 woT::task(world.rank(), &implT:: template traverse_tree<coeff_opT,apply_opT>, active_coeff, apply_op, key);
3658 }
3659
3660
3661 /// traverse a non-existing tree
3662
3663 /// part I: make the coefficients, process them and continue the recursion if necessary
3664 /// @param[in] coeff_op operator making the coefficients and determining them being leaves
3665 /// @param[in] apply_op operator processing the coefficients
3666 /// @param[in] key the key we are currently working on
3667 template<typename coeff_opT, typename apply_opT>
3668 void traverse_tree(const coeff_opT& coeff_op, const apply_opT& apply_op, const keyT& key) const {
3670
3671 typedef typename std::pair<bool,coeffT> argT;
3672 const argT arg=coeff_op(key);
3673 apply_op.operator()(key,arg.second,arg.first);
3674
3675 const bool has_children=(not arg.first);
3676 if (has_children) {
3677 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
3678 const keyT& child=kit.key();
3679 coeff_opT child_op=coeff_op.make_child(child);
3680 // spawn activation where child is local
3681 ProcessID p=coeffs.owner(child);
3682
3683 void (implT::*ft)(const coeff_opT&, const apply_opT&, const keyT&) const = &implT::forward_traverse<coeff_opT,apply_opT>;
3684
3685 woT::task(p, ft, child_op, apply_op, child);
3686 }
3687 }
3688 }
3689
3690
3691 /// given two functions of LDIM, perform the Hartree/Kronecker/outer product
3692
3693 /// |Phi(1,2)> = |phi(1)> x |phi(2)>
3694 /// @param[in] p1 FunctionImpl of particle 1
3695 /// @param[in] p2 FunctionImpl of particle 2
3696 /// @param[in] leaf_op operator determining of a given box will be a leaf
3697 template<std::size_t LDIM, typename leaf_opT>
3698 void hartree_product(const std::vector<std::shared_ptr<FunctionImpl<T,LDIM>>> p1,
3699 const std::vector<std::shared_ptr<FunctionImpl<T,LDIM>>> p2,
3700 const leaf_opT& leaf_op, bool fence) {
3701 MADNESS_CHECK_THROW(p1.size()==p2.size(),"hartree_product: p1 and p2 must have the same size");
3702 for (auto& p : p1) MADNESS_CHECK(p->is_nonstandard() or p->is_nonstandard_with_leaves());
3703 for (auto& p : p2) MADNESS_CHECK(p->is_nonstandard() or p->is_nonstandard_with_leaves());
3704
3705 const keyT key0=cdata.key0;
3706
3707 for (std::size_t i=0; i<p1.size(); ++i) {
3708 if (world.rank() == this->get_coeffs().owner(key0)) {
3709
3710 // prepare the CoeffTracker
3711 CoeffTracker<T,LDIM> iap1(p1[i].get());
3712 CoeffTracker<T,LDIM> iap2(p2[i].get());
3713
3714 // the operator making the coefficients
3715 typedef hartree_op<LDIM,leaf_opT> coeff_opT;
3716 coeff_opT coeff_op(this,iap1,iap2,leaf_op);
3717
3718 // this operator simply inserts the coeffs into this' tree
3719// typedef insert_op<T,NDIM> apply_opT;
3720 typedef accumulate_op<T,NDIM> apply_opT;
3721 apply_opT apply_op(this);
3722
3723 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
3724 coeff_op, apply_op, cdata.key0);
3725
3726 }
3727 }
3728
3730 if (fence) world.gop.fence();
3731 }
3732
3733
3734 template <typename opT, typename R>
3735 void
3737 const opT* op = pop.ptr;
3738 const Level n = key.level();
3739 const double cnorm = c.normf();
3740 const double tol = truncate_tol(thresh, key)*0.1; // ??? why this value????
3741
3743 const Translation lold = lnew[axis];
3744 const Translation maxs = Translation(1)<<n;
3745
3746 int nsmall = 0; // Counts neglected blocks to terminate s loop
3747 for (Translation s=0; s<maxs; ++s) {
3748 int maxdir = s ? 1 : -1;
3749 for (int direction=-1; direction<=maxdir; direction+=2) {
3750 lnew[axis] = lold + direction*s;
3751 if (lnew[axis] >= 0 && lnew[axis] < maxs) { // NON-ZERO BOUNDARY CONDITIONS IGNORED HERE !!!!!!!!!!!!!!!!!!!!
3752 const Tensor<typename opT::opT>& r = op->rnlij(n, s*direction, true);
3753 double Rnorm = r.normf();
3754
3755 if (Rnorm == 0.0) {
3756 return; // Hard zero means finished!
3757 }
3758
3759 if (s <= 1 || r.normf()*cnorm > tol) { // Always do kernel and neighbor
3760 nsmall = 0;
3761 tensorT result = transform_dir(c,r,axis);
3762
3763 if (result.normf() > tol*0.3) {
3764 Key<NDIM> dest(n,lnew);
3765 coeffs.task(dest, &nodeT::accumulate2, result, coeffs, dest, TaskAttributes::hipri());
3766 }
3767 }
3768 else {
3769 ++nsmall;
3770 }
3771 }
3772 else {
3773 ++nsmall;
3774 }
3775 }
3776 if (nsmall >= 4) {
3777 // If have two negligble blocks in
3778 // succession in each direction interpret
3779 // this as the operator being zero beyond
3780 break;
3781 }
3782 }
3783 }
3784
3785 template <typename opT, typename R>
3786 void
3787 apply_1d_realspace_push(const opT& op, const FunctionImpl<R,NDIM>* f, int axis, bool fence) {
3788 MADNESS_ASSERT(!f->is_compressed());
3789
3790 typedef typename FunctionImpl<R,NDIM>::dcT::const_iterator fiterT;
3791 typedef FunctionNode<R,NDIM> fnodeT;
3792 fiterT end = f->coeffs.end();
3793 ProcessID me = world.rank();
3794 for (fiterT it=f->coeffs.begin(); it!=end; ++it) {
3795 const fnodeT& node = it->second;
3796 if (node.has_coeff()) {
3797 const keyT& key = it->first;
3798 const Tensor<R>& c = node.coeff().full_tensor_copy();
3799 woT::task(me, &implT:: template apply_1d_realspace_push_op<opT,R>,
3801 }
3802 }
3803 if (fence) world.gop.fence();
3804 }
3805
3807 const implT* f,
3808 const keyT& key,
3809 const std::pair<keyT,coeffT>& left,
3810 const std::pair<keyT,coeffT>& center,
3811 const std::pair<keyT,coeffT>& right);
3812
3813 void do_diff1(const DerivativeBase<T,NDIM>* D,
3814 const implT* f,
3815 const keyT& key,
3816 const std::pair<keyT,coeffT>& left,
3817 const std::pair<keyT,coeffT>& center,
3818 const std::pair<keyT,coeffT>& right);
3819
3820 // Called by result function to differentiate f
3821 void diff(const DerivativeBase<T,NDIM>* D, const implT* f, bool fence);
3822
3823 /// Returns key of general neighbor enforcing BC
3824
3825 /// Out of volume keys are mapped to enforce the BC as follows.
3826 /// * Periodic BC map back into the volume and return the correct key
3827 /// * non-periodic BC - returns invalid() to indicate out of volume
3828 keyT neighbor(const keyT& key, const keyT& disp, const array_of_bools<NDIM>& is_periodic) const;
3829
3830 /// Returns key of general neighbor that resides in-volume
3831
3832 /// Out of volume keys are mapped to invalid()
3833 keyT neighbor_in_volume(const keyT& key, const keyT& disp) const;
3834
3835 /// find_me. Called by diff_bdry to get coefficients of boundary function
3836 Future< std::pair<keyT,coeffT> > find_me(const keyT& key) const;
3837
3838 /// return the a std::pair<key, node>, which MUST exist
3839 std::pair<Key<NDIM>,ShallowNode<T,NDIM> > find_datum(keyT key) const;
3840
3841 /// multiply the ket with a one-electron potential rr(1,2)= f(1,2)*g(1)
3842
3843 /// @param[in] val_ket function values of f(1,2)
3844 /// @param[in] val_pot function values of g(1)
3845 /// @param[in] particle if 0 then g(1), if 1 then g(2)
3846 /// @return the resulting function values
3847 coeffT multiply(const coeffT& val_ket, const coeffT& val_pot, int particle) const;
3848
3849
3850 /// given several coefficient tensors, assemble a result tensor
3851
3852 /// the result looks like: (v(1,2) + v(1) + v(2)) |ket(1,2)>
3853 /// or (v(1,2) + v(1) + v(2)) |p(1) p(2)>
3854 /// i.e. coefficients for the ket and coefficients for the two particles are
3855 /// mutually exclusive. All potential terms are optional, just pass in empty coeffs.
3856 /// @param[in] key the key of the FunctionNode to which these coeffs belong
3857 /// @param[in] coeff_ket coefficients of the ket
3858 /// @param[in] vpotential1 function values of the potential for particle 1
3859 /// @param[in] vpotential2 function values of the potential for particle 2
3860 /// @param[in] veri function values for the 2-particle potential
3861 coeffT assemble_coefficients(const keyT& key, const coeffT& coeff_ket,
3862 const coeffT& vpotential1, const coeffT& vpotential2,
3863 const tensorT& veri) const;
3864
3865
3866
3867 template<std::size_t LDIM>
3871 double error=0.0;
3872 double lo=0.0, hi=0.0, lo1=0.0, hi1=0.0, lo2=0.0, hi2=0.0;
3873
3875 pointwise_multiplier(const Key<NDIM> key, const coeffT& clhs) : coeff_lhs(clhs) {
3877 val_lhs=fcf.coeffs2values(key,coeff_lhs);
3878 error=0.0;
3880 if (coeff_lhs.is_svd_tensor()) {
3883 }
3884 }
3885
3886 /// multiply values of rhs and lhs, result on rhs, rhs and lhs are of the same dimensions
3887 tensorT operator()(const Key<NDIM> key, const tensorT& coeff_rhs) {
3888
3889 MADNESS_ASSERT(coeff_rhs.dim(0)==coeff_lhs.dim(0));
3891
3892 // the tnorm estimate is not tight enough to be efficient, better use oversampling
3893 bool use_tnorm=false;
3894 if (use_tnorm) {
3895 double rlo, rhi;
3896 implT::tnorm(coeff_rhs,&rlo,&rhi);
3897 error = hi*rlo + rhi*lo + rhi*hi;
3898 tensorT val_rhs=fcf.coeffs2values(key, coeff_rhs);
3899 val_rhs.emul(val_lhs.full_tensor_copy());
3900 return fcf.values2coeffs(key,val_rhs);
3901 } else { // use quadrature of order k+1
3902
3903 auto& cdata=FunctionCommonData<T,NDIM>::get(coeff_rhs.dim(0)); // npt=k+1
3904 auto& cdata_npt=FunctionCommonData<T,NDIM>::get(coeff_rhs.dim(0)+oversampling); // npt=k+1
3905 FunctionCommonFunctionality<T,NDIM> fcf_hi_npt(cdata_npt);
3906
3907 // coeffs2values for rhs: k -> npt=k+1
3908 tensorT coeff1(cdata_npt.vk);
3909 coeff1(cdata.s0)=coeff_rhs; // s0 is smaller than vk!
3910 tensorT val_rhs_k1=fcf_hi_npt.coeffs2values(key,coeff1);
3911
3912 // coeffs2values for lhs: k -> npt=k+1
3913 tensorT coeff_lhs_k1(cdata_npt.vk);
3914 coeff_lhs_k1(cdata.s0)=coeff_lhs.full_tensor_copy();
3915 tensorT val_lhs_k1=fcf_hi_npt.coeffs2values(key,coeff_lhs_k1);
3916
3917 // multiply
3918 val_lhs_k1.emul(val_rhs_k1);
3919
3920 // values2coeffs: npt = k+1-> k
3921 tensorT result1=fcf_hi_npt.values2coeffs(key,val_lhs_k1);
3922
3923 // extract coeffs up to k
3924 tensorT result=copy(result1(cdata.s0));
3925 result1(cdata.s0)=0.0;
3926 error=result1.normf();
3927 return result;
3928 }
3929 }
3930
3931 /// multiply values of rhs and lhs, result on rhs, rhs and lhs are of differnet dimensions
3932 coeffT operator()(const Key<NDIM> key, const tensorT& coeff_rhs, const int particle) {
3933 Key<LDIM> key1, key2;
3934 key.break_apart(key1,key2);
3935 const long k=coeff_rhs.dim(0);
3937 auto& cdata_lowdim=FunctionCommonData<T,LDIM>::get(k);
3938 FunctionCommonFunctionality<T,LDIM> fcf_lo(cdata_lowdim);
3942
3943
3944 // make hi-dim values from lo-dim coeff_rhs on npt grid points
3945 tensorT ones=tensorT(fcf_lo_npt.cdata.vk);
3946 ones=1.0;
3947
3948 tensorT coeff_rhs_npt1(fcf_lo_npt.cdata.vk);
3949 coeff_rhs_npt1(fcf_lo.cdata.s0)=coeff_rhs;
3950 tensorT val_rhs_npt1=fcf_lo_npt.coeffs2values(key1,coeff_rhs_npt1);
3951
3952 TensorArgs targs(-1.0,TT_2D);
3953 coeffT val_rhs;
3954 if (particle==1) val_rhs=outer(val_rhs_npt1,ones,targs);
3955 if (particle==2) val_rhs=outer(ones,val_rhs_npt1,targs);
3956
3957 // make values from hi-dim coeff_lhs on npt grid points
3958 coeffT coeff_lhs_k1(fcf_hi_npt.cdata.vk,coeff_lhs.tensor_type());
3959 coeff_lhs_k1(fcf_hi.cdata.s0)+=coeff_lhs;
3960 coeffT val_lhs_npt=fcf_hi_npt.coeffs2values(key,coeff_lhs_k1);
3961
3962 // multiply
3963 val_lhs_npt.emul(val_rhs);
3964
3965 // values2coeffs: npt = k+1-> k
3966 coeffT result1=fcf_hi_npt.values2coeffs(key,val_lhs_npt);
3967
3968 // extract coeffs up to k
3969 coeffT result=copy(result1(cdata.s0));
3970 result1(cdata.s0)=0.0;
3971 error=result1.normf();
3972 return result;
3973 }
3974
3975 template <typename Archive> void serialize(const Archive& ar) {
3976 ar & error & lo & lo1 & lo2 & hi & hi1& hi2 & val_lhs & coeff_lhs;
3977 }
3978
3979
3980 };
3981
3982 /// given a ket and the 1- and 2-electron potentials, construct the function V phi
3983
3984 /// small memory footstep version of Vphi_op: use the NS form to have information
3985 /// about parent and children to determine if a box is a leaf. This will require
3986 /// compression of the constituent functions, which will lead to more memory usage
3987 /// there, but will avoid oversampling of the result function.
3988 template<typename opT, size_t LDIM>
3989 struct Vphi_op_NS {
3990
3991 bool randomize() const {return true;}
3992
3996
3997 implT* result; ///< where to construct Vphi, no need to track parents
3998 opT leaf_op; ///< deciding if a given FunctionNode will be a leaf node
3999 ctT iaket; ///< the ket of a pair function (exclusive with p1, p2)
4000 ctL iap1, iap2; ///< the particles 1 and 2 (exclusive with ket)
4001 ctL iav1, iav2; ///< potentials for particles 1 and 2
4002 const implT* eri; ///< 2-particle potential, must be on-demand
4003
4004 bool have_ket() const {return iaket.get_impl();}
4005 bool have_v1() const {return iav1.get_impl();}
4006 bool have_v2() const {return iav2.get_impl();}
4007 bool have_eri() const {return eri;}
4008
4009 void accumulate_into_result(const Key<NDIM>& key, const coeffT& coeff) const {
4011 }
4012
4013 // ctor
4015 Vphi_op_NS(implT* result, const opT& leaf_op, const ctT& iaket,
4016 const ctL& iap1, const ctL& iap2, const ctL& iav1, const ctL& iav2,
4017 const implT* eri)
4019 , iav1(iav1), iav2(iav2), eri(eri) {
4020
4021 // 2-particle potential must be on-demand
4023 }
4024
4025 /// make and insert the coefficients into result's tree
4026 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
4027
4029 if(leaf_op.do_pre_screening()){
4030 // this means that we only construct the boxes which are leaf boxes from the other function in the leaf_op
4031 if(leaf_op.pre_screening(key)){
4032 // construct sum_coefficients, insert them and leave
4033 auto [sum_coeff, error]=make_sum_coeffs(key);
4034 accumulate_into_result(key,sum_coeff);
4035 return std::pair<bool,coeffT> (true,coeffT());
4036 }else{
4037 return continue_recursion(std::vector<bool>(1<<NDIM,false),tensorT(),key);
4038 }
4039 }
4040
4041 // this means that the function has to be completely constructed and not mirrored by another function
4042
4043 // if the initial level is not reached then this must not be a leaf box
4044 size_t il = result->get_initial_level();
4046 if(key.level()<int(il)){
4047 return continue_recursion(std::vector<bool>(1<<NDIM,false),tensorT(),key);
4048 }
4049 // if further refinement is needed (because we are at a special box, special point)
4050 // and the special_level is not reached then this must not be a leaf box
4051 if(key.level()<result->get_special_level() and leaf_op.special_refinement_needed(key)){
4052 return continue_recursion(std::vector<bool>(1<<NDIM,false),tensorT(),key);
4053 }
4054
4055 auto [sum_coeff,error]=make_sum_coeffs(key);
4056
4057 // coeffs are leaf (for whatever reason), insert into tree and stop recursion
4058 if(leaf_op.post_screening(key,sum_coeff)){
4059 accumulate_into_result(key,sum_coeff);
4060 return std::pair<bool,coeffT> (true,coeffT());
4061 }
4062
4063 // coeffs are accurate, insert into tree and stop recursion
4064 if(error<result->truncate_tol(result->get_thresh(),key)){
4065 accumulate_into_result(key,sum_coeff);
4066 return std::pair<bool,coeffT> (true,coeffT());
4067 }
4068
4069 // coeffs are inaccurate, continue recursion
4070 std::vector<bool> child_is_leaf(1<<NDIM,false);
4071 return continue_recursion(child_is_leaf,tensorT(),key);
4072 }
4073
4074
4075 /// loop over all children and either insert their sum coeffs or continue the recursion
4076
4077 /// @param[in] child_is_leaf for each child: is it a leaf?
4078 /// @param[in] coeffs coefficient tensor with 2^N sum coeffs (=unfiltered NS coeffs)
4079 /// @param[in] key the key for the NS coeffs (=parent key of the children)
4080 /// @return to avoid recursion outside this return: std::pair<is_leaf,coeff> = true,coeffT()
4081 std::pair<bool,coeffT> continue_recursion(const std::vector<bool> child_is_leaf,
4082 const tensorT& coeffs, const keyT& key) const {
4083 std::size_t i=0;
4084 for (KeyChildIterator<NDIM> kit(key); kit; ++kit, ++i) {
4085 keyT child=kit.key();
4086 bool is_leaf=child_is_leaf[i];
4087
4088 if (is_leaf) {
4089 // insert the sum coeffs
4091 iop(child,coeffT(copy(coeffs(result->child_patch(child))),result->get_tensor_args()),is_leaf);
4092 } else {
4093 this_type child_op=this->make_child(child);
4094 noop<T,NDIM> no;
4095 // spawn activation where child is local
4096 ProcessID p=result->get_coeffs().owner(child);
4097
4098 void (implT::*ft)(const Vphi_op_NS<opT,LDIM>&, const noop<T,NDIM>&, const keyT&) const = &implT:: template forward_traverse< Vphi_op_NS<opT,LDIM>, noop<T,NDIM> >;
4099 result->task(p, ft, child_op, no, child);
4100 }
4101 }
4102 // return e sum coeffs; also return always is_leaf=true:
4103 // the recursion is continued within this struct, not outside in traverse_tree!
4104 return std::pair<bool,coeffT> (true,coeffT());
4105 }
4106
4107 tensorT eri_coeffs(const keyT& key) const {
4110 if (eri->get_functor()->provides_coeff()) {
4111 return eri->get_functor()->coeff(key).full_tensor();
4112 } else {
4113 tensorT val_eri(eri->cdata.vk);
4114 eri->fcube(key,*(eri->get_functor()),eri->cdata.quad_x,val_eri);
4115 return eri->values2coeffs(key,val_eri);
4116 }
4117 }
4118
4119 /// the error is computed from the d coefficients of the constituent functions
4120
4121 /// the result is h_n = P_n(f g), computed as h_n \approx Pn(f_n g_n)
4122 /// its error is therefore
4123 /// h_n = (f g)_n = ((Pn(f) + Qn(f)) (Pn(g) + Qn(g))
4124 /// = Pn(fn gn) + Qn(fn gn) + Pn(f) Qn(g) + Qn(f) Pn(g) + Qn(f) Pn(g)
4125 /// the first term is what we compute, the second term is estimated by tnorm (in another function),
4126 /// the third to last terms are estimated in this function by e.g.: Qn(f)Pn(g) < ||Qn(f)|| ||Pn(g)||
4128 const tensorT& ceri) const {
4129 double error = 0.0;
4130 Key<LDIM> key1, key2;
4131 key.break_apart(key1,key2);
4132
4133 PROFILE_BLOCK(compute_error);
4134 double dnorm_ket, snorm_ket;
4135 if (have_ket()) {
4136 snorm_ket=iaket.coeff(key).normf();
4137 dnorm_ket=iaket.dnorm(key);
4138 } else {
4139 double s1=iap1.coeff(key1).normf();
4140 double s2=iap2.coeff(key2).normf();
4141 double d1=iap1.dnorm(key1);
4142 double d2=iap2.dnorm(key2);
4143 snorm_ket=s1*s2;
4144 dnorm_ket=s1*d2 + s2*d1 + d1*d2;
4145 }
4146
4147 if (have_v1()) {
4148 double snorm=iav1.coeff(key1).normf();
4149 double dnorm=iav1.dnorm(key1);
4150 error+=snorm*dnorm_ket + dnorm*snorm_ket + dnorm*dnorm_ket;
4151 }
4152 if (have_v2()) {
4153 double snorm=iav2.coeff(key2).normf();
4154 double dnorm=iav2.dnorm(key2);
4155 error+=snorm*dnorm_ket + dnorm*snorm_ket + dnorm*dnorm_ket;
4156 }
4157 if (have_eri()) {
4158 tensorT s_coeffs=ceri(result->cdata.s0);
4159 double snorm=s_coeffs.normf();
4160 tensorT d=copy(ceri);
4161 d(result->cdata.s0)=0.0;
4162 double dnorm=d.normf();
4163 error+=snorm*dnorm_ket + dnorm*snorm_ket + dnorm*dnorm_ket;
4164 }
4165
4166 bool no_potential=not ((have_v1() or have_v2() or have_eri()));
4167 if (no_potential) {
4168 error=dnorm_ket;
4169 }
4170 return error;
4171 }
4172
4173 /// make the sum coeffs for key
4174 std::pair<coeffT,double> make_sum_coeffs(const keyT& key) const {
4176 // break key into particles
4177 Key<LDIM> key1, key2;
4178 key.break_apart(key1,key2);
4179
4180 // bool printme=(int(key.translation()[0])==int(std::pow(key.level(),2)/2)) and
4181 // (int(key.translation()[1])==int(std::pow(key.level(),2)/2)) and
4182 // (int(key.translation()[2])==int(std::pow(key.level(),2)/2));
4183
4184// printme=false;
4185
4186 // get/make all coefficients
4187 const coeffT coeff_ket = (iaket.get_impl()) ? iaket.coeff(key)
4188 : outer(iap1.coeff(key1),iap2.coeff(key2),result->get_tensor_args());
4189 const coeffT cpot1 = (have_v1()) ? iav1.coeff(key1) : coeffT();
4190 const coeffT cpot2 = (have_v2()) ? iav2.coeff(key2) : coeffT();
4191 const tensorT ceri = (have_eri()) ? eri_coeffs(key) : tensorT();
4192
4193 // compute first part of the total error
4194 double refine_error=compute_error_from_inaccurate_refinement(key,ceri);
4195 double error=refine_error;
4196
4197 // prepare the multiplication
4198 pointwise_multiplier<LDIM> pm(key,coeff_ket);
4199
4200 // perform the multiplication, compute tnorm part of the total error
4201 coeffT cresult(result->cdata.vk,result->get_tensor_args());
4202 if (have_v1()) {
4203 cresult+=pm(key,cpot1.get_tensor(),1);
4204 error+=pm.error;
4205 }
4206 if (have_v2()) {
4207 cresult+=pm(key,cpot2.get_tensor(),2);
4208 error+=pm.error;
4209 }
4210
4211 if (have_eri()) {
4212 tensorT result1=cresult.full_tensor_copy();
4213 result1+=pm(key,copy(ceri(result->cdata.s0)));
4214 cresult=coeffT(result1,result->get_tensor_args());
4215 error+=pm.error;
4216 } else {
4218 }
4219 if ((not have_v1()) and (not have_v2()) and (not have_eri())) {
4220 cresult=coeff_ket;
4221 }
4222
4223 return std::make_pair(cresult,error);
4224 }
4225
4226 this_type make_child(const keyT& child) const {
4227
4228 // break key into particles
4229 Key<LDIM> key1, key2;
4230 child.break_apart(key1,key2);
4231
4232 return this_type(result,leaf_op,iaket.make_child(child),
4233 iap1.make_child(key1),iap2.make_child(key2),
4234 iav1.make_child(key1),iav2.make_child(key2),eri);
4235 }
4236
4238 Future<ctT> iaket1=iaket.activate();
4239 Future<ctL> iap11=iap1.activate();
4240 Future<ctL> iap21=iap2.activate();
4241 Future<ctL> iav11=iav1.activate();
4242 Future<ctL> iav21=iav2.activate();
4243 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
4244 &this_type::forward_ctor),result,leaf_op,
4245 iaket1,iap11,iap21,iav11,iav21,eri);
4246 }
4247
4248 this_type forward_ctor(implT* result1, const opT& leaf_op, const ctT& iaket1,
4249 const ctL& iap11, const ctL& iap21, const ctL& iav11, const ctL& iav21,
4250 const implT* eri1) {
4251 return this_type(result1,leaf_op,iaket1,iap11,iap21,iav11,iav21,eri1);
4252 }
4253
4254 /// serialize this (needed for use in recursive_op)
4255 template <typename Archive> void serialize(const Archive& ar) {
4256 ar & iaket & eri & result & leaf_op & iap1 & iap2 & iav1 & iav2;
4257 }
4258 };
4259
4260 /// assemble the function V*phi using V and phi given from the functor
4261
4262 /// this function must have been constructed using the CompositeFunctorInterface.
4263 /// The interface provides one- and two-electron potentials, and the ket, which are
4264 /// assembled to give V*phi.
4265 /// @param[in] leaf_op operator to decide if a given node is a leaf node
4266 /// @param[in] fence global fence
4267 template<typename opT>
4268 void make_Vphi(const opT& leaf_op, const bool fence=true) {
4269
4270 constexpr size_t LDIM=NDIM/2;
4271 MADNESS_CHECK_THROW(NDIM==LDIM*2,"make_Vphi only works for even dimensions");
4272
4273
4274 // keep the functor available, but remove it from the result
4275 // result will return false upon is_on_demand(), which is necessary for the
4276 // CoeffTracker to track the parent coeffs correctly for error_leaf_op
4277 std::shared_ptr< FunctionFunctorInterface<T,NDIM> > func2(this->get_functor());
4278 this->unset_functor();
4279
4281 dynamic_cast<CompositeFunctorInterface<T,NDIM,LDIM>* >(&(*func2));
4283
4284 // make sure everything is in place if no fence is requested
4285 if (fence) func->make_redundant(true); // no-op if already redundant
4286 MADNESS_CHECK_THROW(func->check_redundant(),"make_Vphi requires redundant functions");
4287
4288 // loop over all functions in the functor (either ket or particles)
4289 for (auto& ket : func->impl_ket_vector) {
4290 FunctionImpl<T,NDIM>* eri=func->impl_eri.get();
4291 FunctionImpl<T,LDIM>* v1=func->impl_m1.get();
4292 FunctionImpl<T,LDIM>* v2=func->impl_m2.get();
4293 FunctionImpl<T,LDIM>* p1=nullptr;
4294 FunctionImpl<T,LDIM>* p2=nullptr;
4295 make_Vphi_only(leaf_op,ket.get(),v1,v2,p1,p2,eri,false);
4296 }
4297
4298 for (std::size_t i=0; i<func->impl_p1_vector.size(); ++i) {
4299 FunctionImpl<T,NDIM>* ket=nullptr;
4300 FunctionImpl<T,NDIM>* eri=func->impl_eri.get();
4301 FunctionImpl<T,LDIM>* v1=func->impl_m1.get();
4302 FunctionImpl<T,LDIM>* v2=func->impl_m2.get();
4303 FunctionImpl<T,LDIM>* p1=func->impl_p1_vector[i].get();
4304 FunctionImpl<T,LDIM>* p2=func->impl_p2_vector[i].get();
4305 make_Vphi_only(leaf_op,ket,v1,v2,p1,p2,eri,false);
4306 }
4307
4308 // some post-processing:
4309 // - FunctionNode::accumulate() uses buffer -> add the buffer contents to the actual coefficients
4310 // - the operation constructs sum coefficients on all scales -> sum down to get a well-defined tree-state
4311 if (fence) {
4312 world.gop.fence();
4314 sum_down(true);
4316 }
4317
4318
4319 }
4320
4321 /// assemble the function V*phi using V and phi given from the functor
4322
4323 /// this function must have been constructed using the CompositeFunctorInterface.
4324 /// The interface provides one- and two-electron potentials, and the ket, which are
4325 /// assembled to give V*phi.
4326 /// @param[in] leaf_op operator to decide if a given node is a leaf node
4327 /// @param[in] fence global fence
4328 template<typename opT, std::size_t LDIM>
4333 const bool fence=true) {
4334
4335 // prepare the CoeffTracker
4336 CoeffTracker<T,NDIM> iaket(ket);
4337 CoeffTracker<T,LDIM> iap1(p1);
4338 CoeffTracker<T,LDIM> iap2(p2);
4339 CoeffTracker<T,LDIM> iav1(v1);
4340 CoeffTracker<T,LDIM> iav2(v2);
4341
4342 // the operator making the coefficients
4343 typedef Vphi_op_NS<opT,LDIM> coeff_opT;
4344 coeff_opT coeff_op(this,leaf_op,iaket,iap1,iap2,iav1,iav2,eri);
4345
4346 // this operator simply inserts the coeffs into this' tree
4347 typedef noop<T,NDIM> apply_opT;
4348 apply_opT apply_op;
4349
4350 if (world.rank() == coeffs.owner(cdata.key0)) {
4351 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
4352 coeff_op, apply_op, cdata.key0);
4353 }
4354
4356 if (fence) world.gop.fence();
4357
4358 }
4359
4360 /// Permute the dimensions of f according to map, result on this
4361 void mapdim(const implT& f, const std::vector<long>& map, bool fence);
4362
4363 /// mirror the dimensions of f according to map, result on this
4364 void mirror(const implT& f, const std::vector<long>& mirror, bool fence);
4365
4366 /// map and mirror the translation index and the coefficients, result on this
4367
4368 /// first map the dimensions, the mirror!
4369 /// this = mirror(map(f))
4370 void map_and_mirror(const implT& f, const std::vector<long>& map,
4371 const std::vector<long>& mirror, bool fence);
4372
4373 /// take the average of two functions, similar to: this=0.5*(this+rhs)
4374
4375 /// works in either basis and also in nonstandard form
4376 void average(const implT& rhs);
4377
4378 /// change the tensor type of the coefficients in the FunctionNode
4379
4380 /// @param[in] targs target tensor arguments (threshold and full/low rank)
4381 void change_tensor_type1(const TensorArgs& targs, bool fence);
4382
4383 /// reduce the rank of the coefficients tensors
4384
4385 /// @param[in] targs target tensor arguments (threshold and full/low rank)
4386 void reduce_rank(const double thresh, bool fence);
4387
4388
4389 /// remove all nodes with level higher than n
4390 void chop_at_level(const int n, const bool fence=true);
4391
4392 /// compute norm of s and d coefficients for all nodes
4393 void compute_snorm_and_dnorm(bool fence=true);
4394
4395 /// compute the norm of the wavelet coefficients
4398
4402
4403 bool operator()(typename rangeT::iterator& it) const {
4404 auto& node=it->second;
4405 node.recompute_snorm_and_dnorm(cdata);
4406 return true;
4407 }
4408 };
4409
4410
4411 T eval_cube(Level n, coordT& x, const tensorT& c) const;
4412
4413 /// Transform sum coefficients at level n to sums+differences at level n-1
4414
4415 /// Given scaling function coefficients s[n][l][i] and s[n][l+1][i]
4416 /// return the scaling function and wavelet coefficients at the
4417 /// coarser level. I.e., decompose Vn using Vn = Vn-1 + Wn-1.
4418 /// \code
4419 /// s_i = sum(j) h0_ij*s0_j + h1_ij*s1_j
4420 /// d_i = sum(j) g0_ij*s0_j + g1_ij*s1_j
4421 // \endcode
4422 /// Returns a new tensor and has no side effects. Works for any
4423 /// number of dimensions.
4424 ///
4425 /// No communication involved.
4426 tensorT filter(const tensorT& s) const;
4427
4428 coeffT filter(const coeffT& s) const;
4429
4430 /// Transform sums+differences at level n to sum coefficients at level n+1
4431
4432 /// Given scaling function and wavelet coefficients (s and d)
4433 /// returns the scaling function coefficients at the next finer
4434 /// level. I.e., reconstruct Vn using Vn = Vn-1 + Wn-1.
4435 /// \code
4436 /// s0 = sum(j) h0_ji*s_j + g0_ji*d_j
4437 /// s1 = sum(j) h1_ji*s_j + g1_ji*d_j
4438 /// \endcode
4439 /// Returns a new tensor and has no side effects
4440 ///
4441 /// If (sonly) ... then ss is only the scaling function coeff (and
4442 /// assume the d are zero). Works for any number of dimensions.
4443 ///
4444 /// No communication involved.
4445 tensorT unfilter(const tensorT& s) const;
4446
4447 coeffT unfilter(const coeffT& s) const;
4448
4449 /// downsample the sum coefficients of level n+1 to sum coeffs on level n
4450
4451 /// specialization of the filter method, will yield only the sum coefficients
4452 /// @param[in] key key of level n
4453 /// @param[in] v vector of sum coefficients of level n+1
4454 /// @return sum coefficients on level n in full tensor format
4455 tensorT downsample(const keyT& key, const std::vector< Future<coeffT > >& v) const;
4456
4457 /// upsample the sum coefficients of level 1 to sum coeffs on level n+1
4458
4459 /// specialization of the unfilter method, will transform only the sum coefficients
4460 /// @param[in] key key of level n+1
4461 /// @param[in] coeff sum coefficients of level n (does NOT belong to key!!)
4462 /// @return sum coefficients on level n+1
4463 coeffT upsample(const keyT& key, const coeffT& coeff) const;
4464
4465 /// Projects old function into new basis (only in reconstructed form)
4466 void project(const implT& old, bool fence);
4467
4469 bool operator()(const implT* f, const keyT& key, const nodeT& t) const {
4470 return true;
4471 }
4472 template <typename Archive> void serialize(Archive& ar) {}
4473 };
4474
4475 template <typename opT>
4476 void refine_op(const opT& op, const keyT& key) {
4477 // Must allow for someone already having autorefined the coeffs
4478 // and we get a write accessor just in case they are already executing
4479 typename dcT::accessor acc;
4480 const auto found = coeffs.find(acc,key);
4481 MADNESS_CHECK(found);
4482 nodeT& node = acc->second;
4483 if (node.has_coeff() && key.level() < max_refine_level && op(this, key, node)) {
4484 coeffT d(cdata.v2k,targs);
4485 d(cdata.s0) += copy(node.coeff());
4486 d = unfilter(d);
4487 node.clear_coeff();
4488 node.set_has_children(true);
4489 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
4490 const keyT& child = kit.key();
4491 coeffT ss = copy(d(child_patch(child)));
4493 // coeffs.replace(child,nodeT(ss,-1.0,false).node_to_low_rank());
4494 coeffs.replace(child,nodeT(ss,-1.0,false));
4495 // Note value -1.0 for norm tree to indicate result of refinement
4496 }
4497 }
4498 }
4499
4500 template <typename opT>
4501 void refine_spawn(const opT& op, const keyT& key) {
4502 nodeT& node = coeffs.find(key).get()->second;
4503 if (node.has_children()) {
4504 for (KeyChildIterator<NDIM> kit(key); kit; ++kit)
4505 woT::task(coeffs.owner(kit.key()), &implT:: template refine_spawn<opT>, op, kit.key(), TaskAttributes::hipri());
4506 }
4507 else {
4508 woT::task(coeffs.owner(key), &implT:: template refine_op<opT>, op, key);
4509 }
4510 }
4511
4512 // Refine in real space according to local user-defined criterion
4513 template <typename opT>
4514 void refine(const opT& op, bool fence) {
4515 if (world.rank() == coeffs.owner(cdata.key0))
4516 woT::task(coeffs.owner(cdata.key0), &implT:: template refine_spawn<opT>, op, cdata.key0, TaskAttributes::hipri());
4517 if (fence)
4518 world.gop.fence();
4519 }
4520
4521 bool exists_and_has_children(const keyT& key) const;
4522
4523 bool exists_and_is_leaf(const keyT& key) const;
4524
4525
4526 void broaden_op(const keyT& key, const std::vector< Future <bool> >& v);
4527
4528 // For each local node sets value of norm tree, snorm and dnorm to 0.0
4529 void zero_norm_tree();
4530
4531 // Broaden tree
4532 void broaden(const array_of_bools<NDIM>& is_periodic, bool fence);
4533
4534 /// sum all the contributions from all scales after applying an operator in mod-NS form
4535 void trickle_down(bool fence);
4536
4537 /// sum all the contributions from all scales after applying an operator in mod-NS form
4538
4539 /// cf reconstruct_op
4540 void trickle_down_op(const keyT& key, const coeffT& s);
4541
4542 /// reconstruct this tree -- respects fence
4543 void reconstruct(bool fence);
4544
4545 void change_tree_state(const TreeState finalstate, bool fence=true);
4546
4547 // Invoked on node where key is local
4548 // void reconstruct_op(const keyT& key, const tensorT& s);
4549 void reconstruct_op(const keyT& key, const coeffT& s, const bool accumulate_NS=true);
4550
4551 /// compress the wave function
4552
4553 /// after application there will be sum coefficients at the root level,
4554 /// and difference coefficients at all other levels; furthermore:
4555 /// @param[in] nonstandard keep sum coeffs at all other levels, except leaves
4556 /// @param[in] keepleaves keep sum coeffs (but no diff coeffs) at leaves
4557 /// @param[in] redundant keep only sum coeffs at all levels, discard difference coeffs
4558// void compress(bool nonstandard, bool keepleaves, bool redundant, bool fence);
4559 void compress(const TreeState newstate, bool fence);
4560
4561 /// Invoked on node where key is local
4562 Future<std::pair<coeffT,double> > compress_spawn(const keyT& key, bool nonstandard, bool keepleaves,
4563 bool redundant1);
4564
4565 private:
4566 /// convert this to redundant, i.e. have sum coefficients on all levels
4567 void make_redundant(const bool fence);
4568 public:
4569
4570 /// convert this from redundant to standard reconstructed form
4571 void undo_redundant(const bool fence);
4572
4573 void remove_internal_coefficients(const bool fence);
4574 void remove_leaf_coefficients(const bool fence);
4575
4576
4577 /// compute for each FunctionNode the norm of the function inside that node
4578 void norm_tree(bool fence);
4579
4580 double norm_tree_op(const keyT& key, const std::vector< Future<double> >& v);
4581
4583
4584 /// truncate using a tree in reconstructed form
4585
4586 /// must be invoked where key is local
4587 Future<coeffT> truncate_reconstructed_spawn(const keyT& key, const double tol);
4588
4589 /// given the sum coefficients of all children, truncate or not
4590
4591 /// @return new sum coefficients (empty if internal, not empty, if new leaf); might delete its children
4592 coeffT truncate_reconstructed_op(const keyT& key, const std::vector< Future<coeffT > >& v, const double tol);
4593
4594 /// calculate the wavelet coefficients using the sum coefficients of all child nodes
4595
4596 /// also compute the norm tree for all nodes
4597 /// @param[in] key this's key
4598 /// @param[in] v sum coefficients of the child nodes
4599 /// @param[in] nonstandard keep the sum coefficients with the wavelet coefficients
4600 /// @param[in] redundant keep only the sum coefficients, discard the wavelet coefficients
4601 /// @return the sum coefficients
4602 std::pair<coeffT,double> compress_op(const keyT& key, const std::vector< Future<std::pair<coeffT,double>> >& v, bool nonstandard);
4603
4604
4605 /// similar to compress_op, but insert only the sum coefficients in the tree
4606
4607 /// also compute the norm tree for all nodes
4608 /// @param[in] key this's key
4609 /// @param[in] v sum coefficients of the child nodes
4610 /// @return the sum coefficients
4611 std::pair<coeffT,double> make_redundant_op(const keyT& key,const std::vector< Future<std::pair<coeffT,double> > >& v);
4612
4613 /// Changes non-standard compressed form to standard compressed form
4614 void standard(bool fence);
4615
4616 /// Changes non-standard compressed form to standard compressed form
4619
4620 // threshold for rank reduction / SVD truncation
4622
4623 // constructor takes target precision
4624 do_standard() = default;
4626
4627 //
4628 bool operator()(typename rangeT::iterator& it) const {
4629
4630 const keyT& key = it->first;
4631 nodeT& node = it->second;
4632 if (key.level()> 0 && node.has_coeff()) {
4633 if (node.has_children()) {
4634 // Zero out scaling coeffs
4635 MADNESS_ASSERT(node.coeff().dim(0)==2*impl->get_k());
4636 node.coeff()(impl->cdata.s0)=0.0;
4637 node.reduceRank(impl->targs.thresh);
4638 } else {
4639 // Deleting both scaling and wavelet coeffs
4640 node.clear_coeff();
4641 }
4642 }
4643 return true;
4644 }
4645 template <typename Archive> void serialize(const Archive& ar) {
4646 MADNESS_EXCEPTION("no serialization of do_standard",1);
4647 }
4648 };
4649
4650
4651 /// laziness
4652 template<size_t OPDIM>
4653 struct do_op_args {
4656 double tol, fac, cnorm;
4657
4658 do_op_args() = default;
4659 do_op_args(const Key<OPDIM>& key, const Key<OPDIM>& d, const keyT& dest, double tol, double fac, double cnorm)
4660 : key(key), d(d), dest(dest), tol(tol), fac(fac), cnorm(cnorm) {}
4661 template <class Archive>
4662 void serialize(Archive& ar) {
4663 ar & archive::wrap_opaque(this,1);
4664 }
4665 };
4666
4667 /// for fine-grain parallelism: call the apply method of an operator in a separate task
4668
4669 /// @param[in] op the operator working on our function
4670 /// @param[in] c full rank tensor holding the NS coefficients
4671 /// @param[in] args laziness holding norm of the coefficients, displacement, destination, ..
4672 template <typename opT, typename R, size_t OPDIM>
4673 void do_apply_kernel(const opT* op, const Tensor<R>& c, const do_op_args<OPDIM>& args) {
4674
4675 tensorT result = op->apply(args.key, args.d, c, args.tol/args.fac/args.cnorm);
4676
4677 // Screen here to reduce communication cost of negligible data
4678 // and also to ensure we don't needlessly widen the tree when
4679 // applying the operator
4680 if (result.normf()> 0.3*args.tol/args.fac) {
4682 //woT::task(world.rank(),&implT::accumulate_timer,time,TaskAttributes::hipri());
4683 // UGLY BUT ADDED THE OPTIMIZATION BACK IN HERE EXPLICITLY/
4684 if (args.dest == world.rank()) {
4685 coeffs.send(args.dest, &nodeT::accumulate, result, coeffs, args.dest);
4686 }
4687 else {
4689 }
4690 }
4691 }
4692
4693 /// same as do_apply_kernel, but use full rank tensors as input and low rank tensors as output
4694
4695 /// @param[in] op the operator working on our function
4696 /// @param[in] c full rank tensor holding the NS coefficients
4697 /// @param[in] args laziness holding norm of the coefficients, displacement, destination, ..
4698 /// @param[in] apply_targs TensorArgs with tightened threshold for accumulation
4699 /// @return nothing, but accumulate the result tensor into the destination node
4700 template <typename opT, typename R, size_t OPDIM>
4701 double do_apply_kernel2(const opT* op, const Tensor<R>& c, const do_op_args<OPDIM>& args,
4702 const TensorArgs& apply_targs) {
4703
4704 tensorT result_full = op->apply(args.key, args.d, c, args.tol/args.fac/args.cnorm);
4705 const double norm=result_full.normf();
4706
4707 // Screen here to reduce communication cost of negligible data
4708 // and also to ensure we don't needlessly widen the tree when
4709 // applying the operator
4710 // OPTIMIZATION NEEDED HERE ... CHANGING THIS TO TASK NOT SEND REMOVED
4711 // BUILTIN OPTIMIZATION TO SHORTCIRCUIT MSG IF DATA IS LOCAL
4712 if (norm > 0.3*args.tol/args.fac) {
4713
4714 small++;
4715 //double cpu0=cpu_time();
4716 coeffT result=coeffT(result_full,apply_targs);
4717 MADNESS_ASSERT(result.is_full_tensor() or result.is_svd_tensor());
4718 //double cpu1=cpu_time();
4719 //timer_lr_result.accumulate(cpu1-cpu0);
4720
4721 coeffs.task(args.dest, &nodeT::accumulate, result, coeffs, args.dest, apply_targs,
4723
4724 //woT::task(world.rank(),&implT::accumulate_timer,time,TaskAttributes::hipri());
4725 }
4726 return norm;
4727 }
4728
4729
4730
4731 /// same as do_apply_kernel2, but use low rank tensors as input and low rank tensors as output
4732
4733 /// @param[in] op the operator working on our function
4734 /// @param[in] coeff full rank tensor holding the NS coefficients
4735 /// @param[in] args laziness holding norm of the coefficients, displacement, destination, ..
4736 /// @param[in] apply_targs TensorArgs with tightened threshold for accumulation
4737 /// @return nothing, but accumulate the result tensor into the destination node
4738 template <typename opT, typename R, size_t OPDIM>
4739 double do_apply_kernel3(const opT* op, const GenTensor<R>& coeff, const do_op_args<OPDIM>& args,
4740 const TensorArgs& apply_targs) {
4741
4742 coeffT result;
4743 if (2*OPDIM==NDIM) result= op->apply2_lowdim(args.key, args.d, coeff,
4744 args.tol/args.fac/args.cnorm, args.tol/args.fac);
4745 if (OPDIM==NDIM) result = op->apply2(args.key, args.d, coeff,
4746 args.tol/args.fac/args.cnorm, args.tol/args.fac);
4747
4748 const double result_norm=result.svd_normf();
4749
4750 if (result_norm> 0.3*args.tol/args.fac) {
4751 small++;
4752
4753 double cpu0=cpu_time();
4754 if (not result.is_of_tensortype(targs.tt)) result=result.convert(targs);
4755 double cpu1=cpu_time();
4756 timer_lr_result.accumulate(cpu1-cpu0);
4757
4758 // accumulate also expects result in SVD form
4759 coeffs.task(args.dest, &nodeT::accumulate, result, coeffs, args.dest, apply_targs,
4761// woT::task(world.rank(),&implT::accumulate_timer,time,TaskAttributes::hipri());
4762
4763 }
4764 return result_norm;
4765
4766 }
4767
4768 // volume of n-dimensional sphere of radius R
4769 double vol_nsphere(int n, double R) {
4770 return std::pow(madness::constants::pi,n*0.5)*std::pow(R,n)/std::tgamma(1+0.5*n);
4771 }
4772
4773
4774 /// apply an operator on the coeffs c (at node key)
4775
4776 /// the result is accumulated inplace to this's tree at various FunctionNodes
4777 /// @param[in] op the operator to act on the source function
4778 /// @param[in] key key of the source FunctionNode of f which is processed
4779 /// @param[in] c coeffs of the FunctionNode of f which is processed
4780 template <typename opT, typename R>
4781 void do_apply(const opT* op, const keyT& key, const Tensor<R>& c) {
4783
4784 // working assumption here WAS that the operator is
4785 // isotropic and monotonically decreasing with distance
4786 // ... however, now we are using derivative Gaussian
4787 // expansions (and also non-cubic boxes) isotropic is
4788 // violated. While not strictly monotonically decreasing,
4789 // the derivative gaussian is still such that once it
4790 // becomes negligible we are in the asymptotic region.
4791
4792 typedef typename opT::keyT opkeyT;
4793 constexpr auto opdim = opT::opdim;
4794 const opkeyT source = op->get_source_key(key);
4795
4796 // Tuning here is based on observation that with
4797 // sufficiently high-order wavelet relative to the
4798 // precision, that only nearest neighbor boxes contribute,
4799 // whereas for low-order wavelets more neighbors will
4800 // contribute. Sufficiently high is picked as
4801 // k>=2-log10(eps) which is our empirical rule for
4802 // efficiency/accuracy and code instrumentation has
4803 // previously indicated that (in 3D) just unit
4804 // displacements are invoked. The error decays as R^-(k+1),
4805 // and the number of boxes increases as R^d.
4806 //
4807 // Fac is the expected number of contributions to a given
4808 // box, so the error permitted per contribution will be
4809 // tol/fac
4810
4811 // radius of shell (nearest neighbor is diameter of 3 boxes, so radius=1.5)
4812 double radius = 1.5 + 0.33 * std::max(0.0, 2 - std::log10(thresh) -
4813 k); // 0.33 was 0.5
4814 double fac = vol_nsphere(NDIM, radius);
4815 // previously fac=10.0 selected empirically constrained by qmprop
4816
4817 double cnorm = c.normf();
4818
4819 // BC handling:
4820 // - if operator is lattice-summed then treat this as nonperiodic (i.e. tell neighbor() to stay in simulation cell)
4821 // - if operator is NOT lattice-summed then obey BC (i.e. tell neighbor() to go outside the simulation cell along periodic dimensions)
4822 // - BUT user can force operator to treat its arguments as non-periodic (`op.set_domain_periodicity({true,true,true})`) so ... which dimensions of this function are treated as periodic by op?
4823 const array_of_bools<NDIM> this_is_treated_by_op_as_periodic =
4824 (op->particle() == 1)
4825 ? array_of_bools<NDIM>{false}.or_front(
4826 op->domain_is_periodic())
4827 : array_of_bools<NDIM>{false}.or_back(
4828 op->domain_is_periodic());
4829
4830 const auto default_distance_squared = [&](const auto &displacement)
4831 -> std::uint64_t {
4832 return displacement.distsq_bc(op->lattice_summed());
4833 };
4834 const auto default_skip_predicate = [&](const auto &displacement)
4835 -> bool {
4836 return false;
4837 };
4838 const auto for_each = [&](const auto &displacements,
4839 const auto &distance_squared,
4840 const auto &skip_predicate) -> std::optional<std::uint64_t> {
4841
4842 // used to screen estimated and actual contributions
4843 const double tol = truncate_tol(thresh, key);
4844
4845 // assume isotropic decaying kernel, screen in shell-wise fashion by
4846 // monitoring the decay of magnitude of contribution norms with the
4847 // distance ... as soon as we find a shell of displacements at least
4848 // one of each in simulation domain (see neighbor()) and
4849 // all in-domain shells produce negligible contributions, stop.
4850 // a displacement is negligible if ||op|| * ||c|| > tol / fac
4851 // where fac takes into account
4852 int nvalid = 1; // Counts #valid at each distance
4853 int nused = 1; // Counts #used at each distance
4854 std::optional<std::uint64_t> distsq;
4855
4856 // displacements to the kernel range boundary are typically same magnitude (modulo variation estimate the norm of the resulting contributions and skip all if one is too small
4857 // this
4858 if constexpr (std::is_same_v<std::decay_t<decltype(displacements)>,BoxSurfaceDisplacementRange<opdim>>) {
4859 const auto &probing_displacement =
4860 displacements.probing_displacement();
4861 const double opnorm =
4862 op->norm(key.level(), probing_displacement, source);
4863 if (cnorm * opnorm <= tol / fac) {
4864 return {};
4865 }
4866 }
4867
4868 const auto disp_end = displacements.end();
4869 for (auto disp_it = displacements.begin(); disp_it != disp_end;
4870 ++disp_it) {
4871 const auto &displacement = *disp_it;
4872 if (skip_predicate(displacement)) continue;
4873
4874 keyT d;
4875 Key<NDIM - opdim> nullkey(key.level());
4876 MADNESS_ASSERT(op->particle() == 1 || op->particle() == 2);
4877 if (op->particle() == 1)
4878 d = displacement.merge_with(nullkey);
4879 else
4880 d = nullkey.merge_with(displacement);
4881
4882 // shell-wise screening, assumes displacements are grouped into shells sorted so that operator decays with shell index N.B. lattice-summed decaying kernel is periodic (i.e. does decay w.r.t. r), so loop over shells of displacements sorted by distances modulated by periodicity (Key::distsq_bc)
4883 const uint64_t dsq = distance_squared(displacement);
4884 if (!distsq ||
4885 dsq != *distsq) { // Moved to next shell of neighbors
4886 if (nvalid > 0 && nused == 0 && dsq > 1) {
4887 // Have at least done the input box and all first
4888 // nearest neighbors, and none of the last set
4889 // of neighbors made significant contributions. Thus,
4890 // assuming monotonic decrease, we are done.
4891 break;
4892 }
4893 nused = 0;
4894 nvalid = 0;
4895 distsq = dsq;
4896 }
4897
4898 keyT dest = neighbor(key, d, this_is_treated_by_op_as_periodic);
4899 if (dest.is_valid()) {
4900 nvalid++;
4901 const double opnorm = op->norm(key.level(), displacement, source);
4902
4903 if (cnorm * opnorm > tol / fac) {
4904 tensorT result =
4905 op->apply(source, displacement, c, tol / fac / cnorm);
4906 if (result.normf() > 0.3 * tol / fac) {
4907 if (coeffs.is_local(dest))
4908 coeffs.send(dest, &nodeT::accumulate2, result, coeffs,
4909 dest);
4910 else
4911 coeffs.task(dest, &nodeT::accumulate2, result, coeffs,
4912 dest);
4913 nused++;
4914 }
4915 }
4916 }
4917 }
4918
4919 return distsq;
4920 };
4921
4922 // process "standard" displacements, screening assumes monotonic decay of the kernel
4923 // list of displacements sorted in order of increasing distance
4924 // N.B. if op is lattice-summed use periodic displacements, else use
4925 // non-periodic even if op treats any modes of this as periodic
4926 const std::vector<opkeyT> &disp = op->get_disp(key.level());
4927 const auto max_distsq_reached = for_each(disp, default_distance_squared, default_skip_predicate);
4928
4929 // for range-restricted kernels displacements to the boundary of the kernel range also need to be included
4930 // N.B. hard range restriction will result in slow decay of operator matrix elements for the displacements
4931 // to the range boundary, should use soft restriction or sacrifice precision
4932 if (op->range_restricted() && key.level() >= 1) {
4933
4934 std::array<std::optional<std::int64_t>, opdim> box_radius;
4935 std::array<std::optional<std::int64_t>, opdim> surface_thickness;
4936 auto &range = op->get_range();
4937 for (int d = 0; d != opdim; ++d) {
4938 if (range[d]) {
4939 box_radius[d] = range[d].N();
4940 surface_thickness[d] = range[d].finite_soft() ? 1 : 0;
4941 }
4942 }
4943
4945 // skip surface displacements that take us outside of the domain and/or were included in regular displacements
4946 // N.B. for lattice-summed axes the "filter" also maps the displacement back into the simulation cell
4947 if (max_distsq_reached)
4948 filter = BoxSurfaceDisplacementFilter<opdim>(/* domain_is_infinite= */ op->domain_is_periodic(), /* domain_is_periodic= */ op->lattice_summed(), range, default_distance_squared, *max_distsq_reached);
4949
4950 // this range iterates over the entire surface layer(s), and provides a probing displacement that can be used to screen out the entire box
4951 auto opkey = op->particle() == 1 ? key.template extract_front<opdim>() : key.template extract_front<opdim>();
4953 range_boundary_face_displacements(opkey, box_radius,
4954 surface_thickness,
4955 op->lattice_summed(), // along lattice-summed axes treat the box as periodic, make displacements to one side of the box
4956 filter);
4957 for_each(
4958 range_boundary_face_displacements,
4959 // surface displacements are not screened, all are included
4960 [](const auto &displacement) -> std::uint64_t { return 0; },
4961 default_skip_predicate);
4962 }
4963 }
4964
4965
4966 /// apply an operator on f to return this
4967 template <typename opT, typename R>
4968 void apply(opT& op, const FunctionImpl<R,NDIM>& f, bool fence) {
4970 MADNESS_ASSERT(!op.modified());
4971 typename dcT::const_iterator end = f.coeffs.end();
4972 for (typename dcT::const_iterator it=f.coeffs.begin(); it!=end; ++it) {
4973 // looping through all the coefficients in the source
4974 const keyT& key = it->first;
4975 const FunctionNode<R,NDIM>& node = it->second;
4976 if (node.has_coeff()) {
4977 if (node.coeff().dim(0) != k /* i.e. not a leaf */ || op.doleaves) {
4979// woT::task(p, &implT:: template do_apply<opT,R>, &op, key, node.coeff()); //.full_tensor_copy() ????? why copy ????
4980 woT::task(p, &implT:: template do_apply<opT,R>, &op, key, node.coeff().reconstruct_tensor());
4981 }
4982 }
4983 }
4984 if (fence)
4985 world.gop.fence();
4986
4988// this->compressed=true;
4989// this->nonstandard=true;
4990// this->redundant=false;
4991
4992 }
4993
4994
4995
4996 /// apply an operator on the coeffs c (at node key)
4997
4998 /// invoked by result; the result is accumulated inplace to this's tree at various FunctionNodes
4999 /// @param[in] op the operator to act on the source function
5000 /// @param[in] key key of the source FunctionNode of f which is processed (see "source")
5001 /// @param[in] coeff coeffs of FunctionNode being processed
5002 /// @param[in] do_kernel true: do the 0-disp only; false: do everything but the kernel
5003 /// @return max norm, and will modify or include new nodes in this' tree
5004 template <typename opT, typename R>
5005 double do_apply_directed_screening(const opT* op, const keyT& key, const coeffT& coeff,
5006 const bool& do_kernel) {
5008 // insert timer here
5009 typedef typename opT::keyT opkeyT;
5010
5011 // screening: contains all displacement keys that had small result norms
5012 std::list<opkeyT> blacklist;
5013
5014 constexpr auto opdim=opT::opdim;
5015 Key<NDIM-opdim> nullkey(key.level());
5016
5017 // source is that part of key that corresponds to those dimensions being processed
5018 const opkeyT source=op->get_source_key(key);
5019
5020 const double tol = truncate_tol(thresh, key);
5021
5022 // fac is the root of the number of contributing neighbors (1st shell)
5023 double fac=std::pow(3,NDIM*0.5);
5024 double cnorm = coeff.normf();
5025
5026 // for accumulation: keep slightly tighter TensorArgs
5027 TensorArgs apply_targs(targs);
5028 apply_targs.thresh=tol/fac*0.03;
5029
5030 double maxnorm=0.0;
5031
5032 // for the kernel it may be more efficient to do the convolution in full rank
5033 tensorT coeff_full;
5034 // for partial application (exchange operator) it's more efficient to
5035 // do SVD tensors instead of tensortrains, because addition in apply
5036 // can be done in full form for the specific particle
5037 coeffT coeff_SVD=coeff.convert(TensorArgs(-1.0,TT_2D));
5038#ifdef HAVE_GENTENSOR
5039 coeff_SVD.get_svdtensor().orthonormalize(tol*GenTensor<T>::fac_reduce());
5040#endif
5041
5042 // BC handling:
5043 // - if operator is lattice-summed then treat this as nonperiodic (i.e. tell neighbor() to stay in simulation cell)
5044 // - if operator is NOT lattice-summed then obey BC (i.e. tell neighbor() to go outside the simulation cell along periodic dimensions)
5045 // - BUT user can force operator to treat its arguments as non-[eriodic (op.domain_is_simulation_cell(true))
5046 // so ... which dimensions of this function are treated as periodic by op?
5047 const array_of_bools<NDIM> this_is_treated_by_op_as_periodic = (op->particle() == 1) ? array_of_bools<NDIM>{false}.or_front(op->domain_is_periodic()) : array_of_bools<NDIM>{false}.or_back(op->domain_is_periodic());
5048
5049 // list of displacements sorted in order of increasing distance
5050 // N.B. if op is lattice-summed gives periodic displacements, else uses
5051 // non-periodic even if op treats any modes of this as periodic
5052 const std::vector<opkeyT>& disp = Displacements<opdim>().get_disp(key.level(), op->lattice_summed());
5053
5054 for (typename std::vector<opkeyT>::const_iterator it=disp.begin(); it != disp.end(); ++it) {
5055 const opkeyT& d = *it;
5056
5057 const int shell=d.distsq_bc(op->lattice_summed());
5058 if (do_kernel and (shell>0)) break;
5059 if ((not do_kernel) and (shell==0)) continue;
5060
5061 keyT disp1;
5062 if (op->particle()==1) disp1=it->merge_with(nullkey);
5063 else if (op->particle()==2) disp1=nullkey.merge_with(*it);
5064 else {
5065 MADNESS_EXCEPTION("confused particle in operator??",1);
5066 }
5067
5068 keyT dest = neighbor_in_volume(key, disp1);
5069
5070 if (not dest.is_valid()) continue;
5071
5072 // directed screening
5073 // working assumption here is that the operator is isotropic and
5074 // monotonically decreasing with distance
5075 bool screened=false;
5076 typename std::list<opkeyT>::const_iterator it2;
5077 for (it2=blacklist.begin(); it2!=blacklist.end(); it2++) {
5078 if (d.is_farther_out_than(*it2)) {
5079 screened=true;
5080 break;
5081 }
5082 }
5083 if (not screened) {
5084
5085 double opnorm = op->norm(key.level(), d, source);
5086 double norm=0.0;
5087
5088 if (cnorm*opnorm> tol/fac) {
5089
5090 double cost_ratio=op->estimate_costs(source, d, coeff_SVD, tol/fac/cnorm, tol/fac);
5091 // cost_ratio=1.5; // force low rank
5092 // cost_ratio=0.5; // force full rank
5093
5094 if (cost_ratio>0.0) {
5095
5096 do_op_args<opdim> args(source, d, dest, tol, fac, cnorm);
5097 norm=0.0;
5098 if (cost_ratio<1.0) {
5099 if (not coeff_full.has_data()) coeff_full=coeff.full_tensor_copy();
5100 norm=do_apply_kernel2(op, coeff_full,args,apply_targs);
5101 } else {
5102 if (2*opdim==NDIM) { // apply operator on one particle only
5103 norm=do_apply_kernel3(op,coeff_SVD,args,apply_targs);
5104 } else {
5105 norm=do_apply_kernel3(op,coeff,args,apply_targs);
5106 }
5107 }
5108 maxnorm=std::max(norm,maxnorm);
5109 }
5110
5111 } else if (shell >= 12) {
5112 break; // Assumes monotonic decay beyond nearest neighbor
5113 }
5114 if (norm<0.3*tol/fac) blacklist.push_back(d);
5115 }
5116 }
5117 return maxnorm;
5118 }
5119
5120
5121 /// similar to apply, but for low rank coeffs
5122 template <typename opT, typename R>
5123 void apply_source_driven(opT& op, const FunctionImpl<R,NDIM>& f, bool fence) {
5125
5126 MADNESS_ASSERT(not op.modified());
5127 // looping through all the coefficients of the source f
5128 typename dcT::const_iterator end = f.get_coeffs().end();
5129 for (typename dcT::const_iterator it=f.get_coeffs().begin(); it!=end; ++it) {
5130
5131 const keyT& key = it->first;
5132 const coeffT& coeff = it->second.coeff();
5133
5134 if (coeff.has_data() and (coeff.rank()!=0)) {
5136 woT::task(p, &implT:: template do_apply_directed_screening<opT,R>, &op, key, coeff, true);
5137 woT::task(p, &implT:: template do_apply_directed_screening<opT,R>, &op, key, coeff, false);
5138 }
5139 }
5140 if (fence) world.gop.fence();
5142 }
5143
5144 /// after apply we need to do some cleanup;
5145
5146 /// forces fence
5147 double finalize_apply();
5148
5149 /// after summing up we need to do some cleanup;
5150
5151 /// forces fence
5152 void finalize_sum();
5153
5154 /// traverse a non-existing tree, make its coeffs and apply an operator
5155
5156 /// invoked by result
5157 /// here we use the fact that the hi-dim NS coefficients on all scales are exactly
5158 /// the outer product of the underlying low-dim functions (also in NS form),
5159 /// so we don't need to construct the full hi-dim tree and then turn it into NS form.
5160 /// @param[in] apply_op the operator acting on the NS tree
5161 /// @param[in] fimpl the funcimpl of the function of particle 1
5162 /// @param[in] gimpl the funcimpl of the function of particle 2
5163 template<typename opT, std::size_t LDIM>
5164 void recursive_apply(opT& apply_op, const FunctionImpl<T,LDIM>* fimpl,
5165 const FunctionImpl<T,LDIM>* gimpl, const bool fence) {
5166
5167 //print("IN RECUR2");
5168 const keyT& key0=cdata.key0;
5169
5170 if (world.rank() == coeffs.owner(key0)) {
5171
5172 CoeffTracker<T,LDIM> ff(fimpl);
5173 CoeffTracker<T,LDIM> gg(gimpl);
5174
5175 typedef recursive_apply_op<opT,LDIM> coeff_opT;
5176 coeff_opT coeff_op(this,ff,gg,&apply_op);
5177
5178 typedef noop<T,NDIM> apply_opT;
5179 apply_opT apply_op;
5180
5182 woT::task(p, &implT:: template forward_traverse<coeff_opT,apply_opT>, coeff_op, apply_op, key0);
5183
5184 }
5185 if (fence) world.gop.fence();
5187 }
5188
5189 /// recursive part of recursive_apply
5190 template<typename opT, std::size_t LDIM>
5192 bool randomize() const {return true;}
5193
5195
5200
5201 // ctor
5205 const opT* apply_op) : result(result), iaf(iaf), iag(iag), apply_op(apply_op)
5206 {
5207 MADNESS_ASSERT(LDIM+LDIM==NDIM);
5208 }
5210 iag(other.iag), apply_op(other.apply_op) {}
5211
5212
5213 /// make the NS-coefficients and send off the application of the operator
5214
5215 /// @return a Future<bool,coeffT>(is_leaf,coeffT())
5216 std::pair<bool,coeffT> operator()(const Key<NDIM>& key) const {
5217
5218 // World& world=result->world;
5219 // break key into particles (these are the child keys, with datum1/2 come the parent keys)
5220 Key<LDIM> key1,key2;
5221 key.break_apart(key1,key2);
5222
5223 // the lo-dim functions should be in full tensor form
5224 const tensorT fcoeff=iaf.coeff(key1).full_tensor();
5225 const tensorT gcoeff=iag.coeff(key2).full_tensor();
5226
5227 // would this be a leaf node? If so, then its sum coeffs have already been
5228 // processed by the parent node's wavelet coeffs. Therefore we won't
5229 // process it any more.
5231 bool is_leaf=leaf_op(key,fcoeff,gcoeff);
5232
5233 if (not is_leaf) {
5234 // new coeffs are simply the hartree/kronecker/outer product --
5235 const std::vector<Slice>& s0=iaf.get_impl()->cdata.s0;
5236 const coeffT coeff = (apply_op->modified())
5237 ? outer(copy(fcoeff(s0)),copy(gcoeff(s0)),result->targs)
5238 : outer(fcoeff,gcoeff,result->targs);
5239
5240 // now send off the application
5241 tensorT coeff_full;
5243 double norm0=result->do_apply_directed_screening<opT,T>(apply_op, key, coeff, true);
5244
5245 result->task(p,&implT:: template do_apply_directed_screening<opT,T>,
5246 apply_op,key,coeff,false);
5247
5248 return finalize(norm0,key,coeff);
5249
5250 } else {
5251 return std::pair<bool,coeffT> (is_leaf,coeffT());
5252 }
5253 }
5254
5255 /// sole purpose is to wait for the kernel norm, wrap it and send it back to caller
5256 std::pair<bool,coeffT> finalize(const double kernel_norm, const keyT& key,
5257 const coeffT& coeff) const {
5258 const double thresh=result->get_thresh()*0.1;
5259 bool is_leaf=(kernel_norm<result->truncate_tol(thresh,key));
5260 if (key.level()<2) is_leaf=false;
5261 return std::pair<bool,coeffT> (is_leaf,coeff);
5262 }
5263
5264
5265 this_type make_child(const keyT& child) const {
5266
5267 // break key into particles
5268 Key<LDIM> key1, key2;
5269 child.break_apart(key1,key2);
5270
5271 return this_type(result,iaf.make_child(key1),iag.make_child(key2),apply_op);
5272 }
5273
5277 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
5278 &this_type::forward_ctor),result,f1,g1,apply_op);
5279 }
5280
5282 const opT* apply_op1) {
5283 return this_type(r,f1,g1,apply_op1);
5284 }
5285
5286 template <typename Archive> void serialize(const Archive& ar) {
5287 ar & result & iaf & iag & apply_op;
5288 }
5289 };
5290
5291 /// traverse an existing tree and apply an operator
5292
5293 /// invoked by result
5294 /// @param[in] apply_op the operator acting on the NS tree
5295 /// @param[in] fimpl the funcimpl of the source function
5296 /// @param[in] rimpl a dummy function for recursive_op to insert data
5297 template<typename opT>
5298 void recursive_apply(opT& apply_op, const implT* fimpl, implT* rimpl, const bool fence) {
5299
5300 print("IN RECUR1");
5301
5302 const keyT& key0=cdata.key0;
5303
5304 if (world.rank() == coeffs.owner(key0)) {
5305
5306 typedef recursive_apply_op2<opT> coeff_opT;
5307 coeff_opT coeff_op(this,fimpl,&apply_op);
5308
5309 typedef noop<T,NDIM> apply_opT;
5310 apply_opT apply_op;
5311
5312 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
5313 coeff_op, apply_op, cdata.key0);
5314
5315 }
5316 if (fence) world.gop.fence();
5318 }
5319
5320 /// recursive part of recursive_apply
5321 template<typename opT>
5323 bool randomize() const {return true;}
5324
5327 typedef std::pair<bool,coeffT> argT;
5328
5329 mutable implT* result;
5330 ctT iaf; /// need this for randomization
5331 const opT* apply_op;
5332
5333 // ctor
5337
5339 iaf(other.iaf), apply_op(other.apply_op) {}
5340
5341
5342 /// send off the application of the operator
5343
5344 /// the first (core) neighbor (ie. the box itself) is processed
5345 /// immediately, all other ones are shoved into the taskq
5346 /// @return a pair<bool,coeffT>(is_leaf,coeffT())
5347 argT operator()(const Key<NDIM>& key) const {
5348
5349 const coeffT& coeff=iaf.coeff();
5350
5351 if (coeff.has_data()) {
5352
5353 // now send off the application for all neighbor boxes
5355 result->task(p,&implT:: template do_apply_directed_screening<opT,T>,
5356 apply_op, key, coeff, false);
5357
5358 // process the core box
5359 double norm0=result->do_apply_directed_screening<opT,T>(apply_op,key,coeff,true);
5360
5361 if (iaf.is_leaf()) return argT(true,coeff);
5362 return finalize(norm0,key,coeff,result);
5363
5364 } else {
5365 const bool is_leaf=true;
5366 return argT(is_leaf,coeffT());
5367 }
5368 }
5369
5370 /// sole purpose is to wait for the kernel norm, wrap it and send it back to caller
5371 argT finalize(const double kernel_norm, const keyT& key,
5372 const coeffT& coeff, const implT* r) const {
5373 const double thresh=r->get_thresh()*0.1;
5374 bool is_leaf=(kernel_norm<r->truncate_tol(thresh,key));
5375 if (key.level()<2) is_leaf=false;
5376 return argT(is_leaf,coeff);
5377 }
5378
5379
5380 this_type make_child(const keyT& child) const {
5381 return this_type(result,iaf.make_child(child),apply_op);
5382 }
5383
5384 /// retrieve the coefficients (parent coeffs might be remote)
5386 Future<ctT> f1=iaf.activate();
5387
5388// Future<ctL> g1=g.activate();
5389// return h->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
5390// &this_type::forward_ctor),h,f1,g1,particle);
5391
5392 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
5393 &this_type::forward_ctor),result,f1,apply_op);
5394 }
5395
5396 /// taskq-compatible ctor
5397 this_type forward_ctor(implT* result1, const ctT& iaf1, const opT* apply_op1) {
5398 return this_type(result1,iaf1,apply_op1);
5399 }
5400
5401 template <typename Archive> void serialize(const Archive& ar) {
5402 ar & result & iaf & apply_op;
5403 }
5404 };
5405
5406 /// Returns the square of the error norm in the box labeled by key
5407
5408 /// Assumed to be invoked locally but it would be easy to eliminate
5409 /// this assumption
5410 template <typename opT>
5411 double err_box(const keyT& key, const nodeT& node, const opT& func,
5412 int npt, const Tensor<double>& qx, const Tensor<double>& quad_phit,
5413 const Tensor<double>& quad_phiw) const {
5414
5415 std::vector<long> vq(NDIM);
5416 for (std::size_t i=0; i<NDIM; ++i)
5417 vq[i] = npt;
5418 tensorT fval(vq,false), work(vq,false), result(vq,false);
5419
5420 // Compute the "exact" function in this volume at npt points
5421 // where npt is usually this->npt+1.
5422 fcube(key, func, qx, fval);
5423
5424 // Transform into the scaling function basis of order npt
5425 double scale = pow(0.5,0.5*NDIM*key.level())*sqrt(FunctionDefaults<NDIM>::get_cell_volume());
5426 fval = fast_transform(fval,quad_phiw,result,work).scale(scale);
5427
5428 // Subtract to get the error ... the original coeffs are in the order k
5429 // basis but we just computed the coeffs in the order npt(=k+1) basis
5430 // so we can either use slices or an iterator macro.
5431 const tensorT coeff = node.coeff().full_tensor_copy();
5432 ITERATOR(coeff,fval(IND)-=coeff(IND););
5433 // flo note: we do want to keep a full tensor here!
5434
5435 // Compute the norm of what remains
5436 double err = fval.normf();
5437 return err*err;
5438 }
5439
5440 template <typename opT>
5442 const implT* impl;
5443 const opT* func;
5444 int npt;
5448 public:
5449 do_err_box() = default;
5450
5454
5457
5458 double operator()(typename dcT::const_iterator& it) const {
5459 const keyT& key = it->first;
5460 const nodeT& node = it->second;
5461 if (node.has_coeff())
5462 return impl->err_box(key, node, *func, npt, qx, quad_phit, quad_phiw);
5463 else
5464 return 0.0;
5465 }
5466
5467 double operator()(double a, double b) const {
5468 return a+b;
5469 }
5470
5471 template <typename Archive>
5472 void serialize(const Archive& ar) {
5473 MADNESS_EXCEPTION("not yet", 1);
5474 }
5475 };
5476
5477 /// Returns the sum of squares of errors from local info ... no comms
5478 template <typename opT>
5479 double errsq_local(const opT& func) const {
5481 // Make quadrature rule of higher order
5482 const int npt = cdata.npt + 1;
5483 Tensor<double> qx, qw, quad_phi, quad_phiw, quad_phit;
5484 FunctionCommonData<T,NDIM>::_init_quadrature(k+1, npt, qx, qw, quad_phi, quad_phiw, quad_phit);
5485
5488 return world.taskq.reduce< double,rangeT,do_err_box<opT> >(range,
5489 do_err_box<opT>(this, &func, npt, qx, quad_phit, quad_phiw));
5490 }
5491
5492 /// Returns \c int(f(x),x) in local volume
5493 T trace_local() const;
5494
5496 double operator()(typename dcT::const_iterator& it) const {
5497 const nodeT& node = it->second;
5498 if (node.has_coeff()) {
5499 double norm = node.coeff().normf();
5500 return norm*norm;
5501 }
5502 else {
5503 return 0.0;
5504 }
5505 }
5506
5507 double operator()(double a, double b) const {
5508 return (a+b);
5509 }
5510
5511 template <typename Archive> void serialize(const Archive& ar) {
5512 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
5513 }
5514 };
5515
5516
5517 /// Returns the square of the local norm ... no comms
5518 double norm2sq_local() const;
5519
5520 /// compute the inner product of this range with other
5521 template<typename R>
5525 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5526
5529 resultT operator()(typename dcT::const_iterator& it) const {
5530
5532 const keyT& key=it->first;
5533 const nodeT& fnode = it->second;
5534 if (fnode.has_coeff()) {
5535 if (other->coeffs.probe(it->first)) {
5536 const FunctionNode<R,NDIM>& gnode = other->coeffs.find(key).get()->second;
5537 if (gnode.has_coeff()) {
5538 if (gnode.coeff().dim(0) != fnode.coeff().dim(0)) {
5539 madness::print("INNER", it->first, gnode.coeff().dim(0),fnode.coeff().dim(0));
5540 MADNESS_EXCEPTION("functions have different k or compress/reconstruct error", 0);
5541 }
5542 if (leaves_only) {
5543 if (gnode.is_leaf() or fnode.is_leaf()) {
5544 sum += fnode.coeff().trace_conj(gnode.coeff());
5545 }
5546 } else {
5547 sum += fnode.coeff().trace_conj(gnode.coeff());
5548 }
5549 }
5550 }
5551 }
5552 return sum;
5553 }
5554
5555 resultT operator()(resultT a, resultT b) const {
5556 return (a+b);
5557 }
5558
5559 template <typename Archive> void serialize(const Archive& ar) {
5560 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
5561 }
5562 };
5563
5564 /// Returns the inner product ASSUMING same distribution
5565
5566 /// handles compressed and redundant form
5567 template <typename R>
5571 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5572
5573 // make sure the states of the trees are consistent
5576 return world.taskq.reduce<resultT,rangeT,do_inner_local<R> >
5578 }
5579
5580
5581 /// compute the inner product of this range with other
5582 template<typename R>
5586 bool leaves_only=true;
5587 typedef TENSOR_RESULT_TYPE(T,R) resultT;
5588
5592 resultT operator()(typename dcT::const_iterator& it) const {
5593
5594 constexpr std::size_t LDIM=std::max(NDIM/2,std::size_t(1));
5595
5596 const keyT& key=it->first;
5597 const nodeT& fnode = it->second;
5598 if (not fnode.has_coeff()) return resultT(0.0); // probably internal nodes
5599
5600 // assuming all boxes (esp the low-dim ones) are local, i.e. the functions are replicated
5601 auto find_valid_parent = [](auto& key, auto& impl, auto&& find_valid_parent) {
5602 MADNESS_CHECK(impl->get_coeffs().owner(key)==impl->world.rank()); // make sure everything is local!
5603 if (impl->get_coeffs().probe(key)) return key;
5604 auto parentkey=key.parent();
5605 return find_valid_parent(parentkey, impl, find_valid_parent);
5606 };
5607
5608 // returns coefficients, empty if no functor present
5609 auto get_coeff = [&find_valid_parent](const auto& key, const auto& v_impl) {
5610 if ((v_impl.size()>0) and v_impl.front().get()) {
5611 auto impl=v_impl.front();
5612
5613// bool have_impl=impl.get();
5614// if (have_impl) {
5615 auto parentkey = find_valid_parent(key, impl, find_valid_parent);
5616 MADNESS_CHECK(impl->get_coeffs().probe(parentkey));
5617 typename decltype(impl->coeffs)::accessor acc;
5618 impl->get_coeffs().find(acc,parentkey);
5619 auto parentcoeff=acc->second.coeff();
5620 auto coeff=impl->parent_to_child(parentcoeff, parentkey, key);
5621 return coeff;
5622 } else {
5623 // get type of vector elements
5624 typedef typename std::decay_t<decltype(v_impl)>::value_type::element_type::typeT S;
5625// typedef typename std::decay_t<decltype(v_impl)>::value_type S;
5626 return GenTensor<S>();
5627// return GenTensor<typename std::decay_t<decltype(*impl)>::typeT>();
5628 }
5629 };
5630
5631 auto make_vector = [](auto& arg) {
5632 return std::vector<std::decay_t<decltype(arg)>>(1,arg);
5633 };
5634
5635
5636 Key<LDIM> key1,key2;
5637 key.break_apart(key1,key2);
5638
5639 auto func=dynamic_cast<CompositeFunctorInterface<R,NDIM,LDIM>* >(ket->functor.get());
5641
5642 MADNESS_CHECK_THROW(func->impl_ket_vector.size()==0 or func->impl_ket_vector.size()==1,
5643 "only one ket function supported in inner_on_demand");
5644 MADNESS_CHECK_THROW(func->impl_p1_vector.size()==0 or func->impl_p1_vector.size()==1,
5645 "only one p1 function supported in inner_on_demand");
5646 MADNESS_CHECK_THROW(func->impl_p2_vector.size()==0 or func->impl_p2_vector.size()==1,
5647 "only one p2 function supported in inner_on_demand");
5648 auto coeff_bra=fnode.coeff();
5649 auto coeff_ket=get_coeff(key,func->impl_ket_vector);
5650 auto coeff_v1=get_coeff(key1,make_vector(func->impl_m1));
5651 auto coeff_v2=get_coeff(key2,make_vector(func->impl_m2));
5652 auto coeff_p1=get_coeff(key1,func->impl_p1_vector);
5653 auto coeff_p2=get_coeff(key2,func->impl_p2_vector);
5654
5655 // construct |ket(1,2)> or |p(1)p(2)> or |p(1)p(2) ket(1,2)>
5656 double error=0.0;
5657 if (coeff_ket.has_data() and coeff_p1.has_data()) {
5658 pointwise_multiplier<LDIM> pm(key,coeff_ket);
5659 coeff_ket=pm(key,outer(coeff_p1,coeff_p2,TensorArgs(TT_FULL,-1.0)).full_tensor());
5660 error+=pm.error;
5661 } else if (coeff_ket.has_data() or coeff_p1.has_data()) {
5662 coeff_ket = (coeff_ket.has_data()) ? coeff_ket : outer(coeff_p1,coeff_p2);
5663 } else { // not ket and no p1p2
5664 MADNESS_EXCEPTION("confused ket/p1p2 in do_inner_local_on_demand",1);
5665 }
5666
5667 // construct (v(1) + v(2)) |ket(1,2)>
5668 coeffT v1v2ket;
5669 if (coeff_v1.has_data()) {
5670 pointwise_multiplier<LDIM> pm(key,coeff_ket);
5671 v1v2ket = pm(key,coeff_v1.full_tensor(), 1);
5672 error+=pm.error;
5673 v1v2ket+= pm(key,coeff_v2.full_tensor(), 2);
5674 error+=pm.error;
5675 } else {
5676 v1v2ket = coeff_ket;
5677 }
5678
5679 resultT result;
5680 if (func->impl_eri) { // project bra*ket onto eri, avoid multiplication with eri
5681 MADNESS_CHECK(func->impl_eri->get_functor()->provides_coeff());
5682 coeffT coeff_eri=func->impl_eri->get_functor()->coeff(key).full_tensor();
5683 pointwise_multiplier<LDIM> pm(key,v1v2ket);
5684 tensorT braket=pm(key,coeff_bra.full_tensor_copy().conj());
5685 error+=pm.error;
5686 if (error>1.e-3) print("error in key",key,error);
5687 result=coeff_eri.full_tensor().trace(braket);
5688
5689 } else { // no eri, project ket onto bra
5690 result=coeff_bra.full_tensor_copy().trace_conj(v1v2ket.full_tensor_copy());
5691 }
5692 return result;
5693 }
5694
5695 resultT operator()(resultT a, resultT b) const {
5696 return (a+b);
5697 }
5698
5699 template <typename Archive> void serialize(const Archive& ar) {
5700 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
5701 }
5702 };
5703
5704 /// Returns the inner product of this with function g constructed on-the-fly
5705
5706 /// the leaf boxes of this' MRA tree defines the inner product
5707 template <typename R>
5708 TENSOR_RESULT_TYPE(T,R) inner_local_on_demand(const FunctionImpl<R,NDIM>& gimpl) const {
5711
5715 do_inner_local_on_demand<R>(this, &gimpl));
5716 }
5717
5718 /// Type of the entry in the map returned by make_key_vec_map
5719 typedef std::vector< std::pair<int,const coeffT*> > mapvecT;
5720
5721 /// Type of the map returned by make_key_vec_map
5723
5724 /// Adds keys to union of local keys with specified index
5725 void add_keys_to_map(mapT* map, int index) const {
5726 typename dcT::const_iterator end = coeffs.end();
5727 for (typename dcT::const_iterator it=coeffs.begin(); it!=end; ++it) {
5728 typename mapT::accessor acc;
5729 const keyT& key = it->first;
5730 const FunctionNode<T,NDIM>& node = it->second;
5731 if (node.has_coeff()) {
5732 [[maybe_unused]] auto inserted = map->insert(acc,key);
5733 acc->second.push_back(std::make_pair(index,&(node.coeff())));
5734 }
5735 }
5736 }
5737
5738 /// Returns map of union of local keys to vector of indexes of functions containing that key
5739
5740 /// Local concurrency and synchronization only; no communication
5741 static
5742 mapT
5743 make_key_vec_map(const std::vector<const FunctionImpl<T,NDIM>*>& v) {
5744 mapT map(100000);
5745 // This loop must be parallelized
5746 for (unsigned int i=0; i<v.size(); i++) {
5747 //v[i]->add_keys_to_map(&map,i);
5748 v[i]->world.taskq.add(*(v[i]), &FunctionImpl<T,NDIM>::add_keys_to_map, &map, int(i));
5749 }
5750 if (v.size()) v[0]->world.taskq.fence();
5751 return map;
5752 }
5753
5754#if 0
5755// Original
5756 template <typename R>
5757 static void do_inner_localX(const typename mapT::iterator lstart,
5758 const typename mapT::iterator lend,
5759 typename FunctionImpl<R,NDIM>::mapT* rmap_ptr,
5760 const bool sym,
5761 Tensor< TENSOR_RESULT_TYPE(T,R) >* result_ptr,
5762 Mutex* mutex) {
5763 Tensor< TENSOR_RESULT_TYPE(T,R) >& result = *result_ptr;
5764 Tensor< TENSOR_RESULT_TYPE(T,R) > r(result.dim(0),result.dim(1));
5765 for (typename mapT::iterator lit=lstart; lit!=lend; ++lit) {
5766 const keyT& key = lit->first;
5767 typename FunctionImpl<R,NDIM>::mapT::iterator rit=rmap_ptr->find(key);
5768 if (rit != rmap_ptr->end()) {
5769 const mapvecT& leftv = lit->second;
5770 const typename FunctionImpl<R,NDIM>::mapvecT& rightv =rit->second;
5771 const int nleft = leftv.size();
5772 const int nright= rightv.size();
5773
5774 for (int iv=0; iv<nleft; iv++) {
5775 const int i = leftv[iv].first;
5776 const GenTensor<T>* iptr = leftv[iv].second;
5777
5778 for (int jv=0; jv<nright; jv++) {
5779 const int j = rightv[jv].first;
5780 const GenTensor<R>* jptr = rightv[jv].second;
5781
5782 if (!sym || (sym && i<=j))
5783 r(i,j) += iptr->trace_conj(*jptr);
5784 }
5785 }
5786 }
5787 }
5788 mutex->lock();
5789 result += r;
5790 mutex->unlock();
5791 }
5792#else
5793 template <typename R>
5794 static void do_inner_localX(const typename mapT::iterator lstart,
5795 const typename mapT::iterator lend,
5796 typename FunctionImpl<R,NDIM>::mapT* rmap_ptr,
5797 const bool sym,
5798 Tensor< TENSOR_RESULT_TYPE(T,R) >* result_ptr,
5799 Mutex* mutex) {
5800 Tensor< TENSOR_RESULT_TYPE(T,R) >& result = *result_ptr;
5801 //Tensor< TENSOR_RESULT_TYPE(T,R) > r(result.dim(0),result.dim(1));
5802 for (typename mapT::iterator lit=lstart; lit!=lend; ++lit) {
5803 const keyT& key = lit->first;
5804 typename FunctionImpl<R,NDIM>::mapT::iterator rit=rmap_ptr->find(key);
5805 if (rit != rmap_ptr->end()) {
5806 const mapvecT& leftv = lit->second;
5807 const typename FunctionImpl<R,NDIM>::mapvecT& rightv =rit->second;
5808 const size_t nleft = leftv.size();
5809 const size_t nright= rightv.size();
5810
5811 unsigned int size = leftv[0].second->size();
5812 Tensor<T> Left(nleft, size);
5813 Tensor<R> Right(nright, size);
5814 Tensor< TENSOR_RESULT_TYPE(T,R)> r(nleft, nright);
5815 for(unsigned int iv = 0; iv < nleft; ++iv) Left(iv,_) = (*(leftv[iv].second)).full_tensor();
5816 for(unsigned int jv = 0; jv < nright; ++jv) Right(jv,_) = (*(rightv[jv].second)).full_tensor();
5817 // call mxmT from mxm.h in tensor
5818 if(TensorTypeData<T>::iscomplex) Left = Left.conj(); // Should handle complex case and leave real case alone
5819 mxmT(nleft, nright, size, r.ptr(), Left.ptr(), Right.ptr());
5820 mutex->lock();
5821 for(unsigned int iv = 0; iv < nleft; ++iv) {
5822 const int i = leftv[iv].first;
5823 for(unsigned int jv = 0; jv < nright; ++jv) {
5824 const int j = rightv[jv].first;
5825 if (!sym || (sym && i<=j)) result(i,j) += r(iv,jv);
5826 }
5827 }
5828 mutex->unlock();
5829 }
5830 }
5831 }
5832#endif
5833
5834#if 0
5835// Original
5836 template <typename R, typename = std::enable_if_t<std::is_floating_point_v<R>>>
5837 static void do_dot_localX(const typename mapT::iterator lstart,
5838 const typename mapT::iterator lend,
5839 typename FunctionImpl<R, NDIM>::mapT* rmap_ptr,
5840 const bool sym,
5841 Tensor<TENSOR_RESULT_TYPE(T, R)>* result_ptr,
5842 Mutex* mutex) {
5843 if (TensorTypeData<T>::iscomplex) MADNESS_EXCEPTION("no complex trace in LowRankTensor, sorry", 1);
5844 Tensor<TENSOR_RESULT_TYPE(T, R)>& result = *result_ptr;
5845 Tensor<TENSOR_RESULT_TYPE(T, R)> r(result.dim(0), result.dim(1));
5846 for (typename mapT::iterator lit = lstart; lit != lend; ++lit) {
5847 const keyT& key = lit->first;
5848 typename FunctionImpl<R, NDIM>::mapT::iterator rit = rmap_ptr->find(key);
5849 if (rit != rmap_ptr->end()) {
5850 const mapvecT& leftv = lit->second;
5851 const typename FunctionImpl<R, NDIM>::mapvecT& rightv = rit->second;
5852 const int nleft = leftv.size();
5853 const int nright = rightv.size();
5854
5855 for (int iv = 0; iv < nleft; iv++) {
5856 const int i = leftv[iv].first;
5857 const GenTensor<T>* iptr = leftv[iv].second;
5858
5859 for (int jv = 0; jv < nright; jv++) {
5860 const int j = rightv[jv].first;
5861 const GenTensor<R>* jptr = rightv[jv].second;
5862
5863 if (!sym || (sym && i <= j))
5864 r(i, j) += iptr->trace_conj(*jptr);
5865 }
5866 }
5867 }
5868 }
5869 mutex->lock();
5870 result += r;
5871 mutex->unlock();
5872 }
5873#else
5874 template <typename R>
5875 static void do_dot_localX(const typename mapT::iterator lstart,
5876 const typename mapT::iterator lend,
5877 typename FunctionImpl<R, NDIM>::mapT* rmap_ptr,
5878 const bool sym,
5879 Tensor<TENSOR_RESULT_TYPE(T, R)>* result_ptr,
5880 Mutex* mutex) {
5881 Tensor<TENSOR_RESULT_TYPE(T, R)>& result = *result_ptr;
5882 // Tensor<TENSOR_RESULT_TYPE(T, R)> r(result.dim(0), result.dim(1));
5883 for (typename mapT::iterator lit = lstart; lit != lend; ++lit) {
5884 const keyT& key = lit->first;
5885 typename FunctionImpl<R, NDIM>::mapT::iterator rit = rmap_ptr->find(key);
5886 if (rit != rmap_ptr->end()) {
5887 const mapvecT& leftv = lit->second;
5888 const typename FunctionImpl<R, NDIM>::mapvecT& rightv = rit->second;
5889 const size_t nleft = leftv.size();
5890 const size_t nright= rightv.size();
5891
5892 unsigned int size = leftv[0].second->size();
5893 Tensor<T> Left(nleft, size);
5894 Tensor<R> Right(nright, size);
5895 Tensor< TENSOR_RESULT_TYPE(T, R)> r(nleft, nright);
5896 for(unsigned int iv = 0; iv < nleft; ++iv) Left(iv, _) = (*(leftv[iv].second)).full_tensor();
5897 for(unsigned int jv = 0; jv < nright; ++jv) Right(jv, _) = (*(rightv[jv].second)).full_tensor();
5898 // call mxmT from mxm.h in tensor
5899 mxmT(nleft, nright, size, r.ptr(), Left.ptr(), Right.ptr());
5900 mutex->lock();
5901 for(unsigned int iv = 0; iv < nleft; ++iv) {
5902 const int i = leftv[iv].first;
5903 for(unsigned int jv = 0; jv < nright; ++jv) {
5904 const int j = rightv[jv].first;
5905 if (!sym || (sym && i <= j)) result(i, j) += r(iv, jv);
5906 }
5907 }
5908 mutex->unlock();
5909 }
5910 }
5911 }
5912#endif
5913
5914 static double conj(float x) {
5915 return x;
5916 }
5917
5918 static std::complex<double> conj(const std::complex<double> x) {
5919 return std::conj(x);
5920 }
5921
5922 template <typename R>
5923 static Tensor< TENSOR_RESULT_TYPE(T,R) >
5924 inner_local(const std::vector<const FunctionImpl<T,NDIM>*>& left,
5925 const std::vector<const FunctionImpl<R,NDIM>*>& right,
5926 bool sym) {
5927
5928 // This is basically a sparse matrix^T * matrix product
5929 // Rij = sum(k) Aki * Bkj
5930 // where i and j index functions and k index the wavelet coeffs
5931 // eventually the goal is this structure (don't have jtile yet)
5932 //
5933 // do in parallel tiles of k (tensors of coeffs)
5934 // do tiles of j
5935 // do i
5936 // do j in jtile
5937 // do k in ktile
5938 // Rij += Aki*Bkj
5939
5940 mapT lmap = make_key_vec_map(left);
5941 typename FunctionImpl<R,NDIM>::mapT rmap;
5942 auto* rmap_ptr = (typename FunctionImpl<R,NDIM>::mapT*)(&lmap);
5943 if ((std::vector<const FunctionImpl<R,NDIM>*>*)(&left) != &right) {
5945 rmap_ptr = &rmap;
5946 }
5947
5948 size_t chunk = (lmap.size()-1)/(3*4*5)+1;
5949
5950 Tensor< TENSOR_RESULT_TYPE(T,R) > r(left.size(), right.size());
5951 Mutex mutex;
5952
5953 typename mapT::iterator lstart=lmap.begin();
5954 while (lstart != lmap.end()) {
5955 typename mapT::iterator lend = lstart;
5956 advance(lend,chunk);
5957 left[0]->world.taskq.add(&FunctionImpl<T,NDIM>::do_inner_localX<R>, lstart, lend, rmap_ptr, sym, &r, &mutex);
5958 lstart = lend;
5959 }
5960 left[0]->world.taskq.fence();
5961
5962 if (sym) {
5963 for (long i=0; i<r.dim(0); i++) {
5964 for (long j=0; j<i; j++) {
5965 TENSOR_RESULT_TYPE(T,R) sum = r(i,j)+conj(r(j,i));
5966 r(i,j) = sum;
5967 r(j,i) = conj(sum);
5968 }
5969 }
5970 }
5971 return r;
5972 }
5973
5974 template <typename R>
5975 static Tensor<TENSOR_RESULT_TYPE(T, R)>
5976 dot_local(const std::vector<const FunctionImpl<T, NDIM>*>& left,
5977 const std::vector<const FunctionImpl<R, NDIM>*>& right,
5978 bool sym) {
5979
5980 // This is basically a sparse matrix * matrix product
5981 // Rij = sum(k) Aik * Bkj
5982 // where i and j index functions and k index the wavelet coeffs
5983 // eventually the goal is this structure (don't have jtile yet)
5984 //
5985 // do in parallel tiles of k (tensors of coeffs)
5986 // do tiles of j
5987 // do i
5988 // do j in jtile
5989 // do k in ktile
5990 // Rij += Aik*Bkj
5991
5992 mapT lmap = make_key_vec_map(left);
5993 typename FunctionImpl<R, NDIM>::mapT rmap;
5994 auto* rmap_ptr = (typename FunctionImpl<R, NDIM>::mapT*)(&lmap);
5995 if ((std::vector<const FunctionImpl<R, NDIM>*>*)(&left) != &right) {
5997 rmap_ptr = &rmap;
5998 }
5999
6000 size_t chunk = (lmap.size() - 1) / (3 * 4 * 5) + 1;
6001
6002 Tensor<TENSOR_RESULT_TYPE(T, R)> r(left.size(), right.size());
6003 Mutex mutex;
6004
6005 typename mapT::iterator lstart=lmap.begin();
6006 while (lstart != lmap.end()) {
6007 typename mapT::iterator lend = lstart;
6008 advance(lend, chunk);
6009 left[0]->world.taskq.add(&FunctionImpl<T, NDIM>::do_dot_localX<R>, lstart, lend, rmap_ptr, sym, &r, &mutex);
6010 lstart = lend;
6011 }
6012 left[0]->world.taskq.fence();
6013
6014 // sym is for hermiticity
6015 if (sym) {
6016 for (long i = 0; i < r.dim(0); i++) {
6017 for (long j = 0; j < i; j++) {
6018 TENSOR_RESULT_TYPE(T, R) sum = r(i, j) + conj(r(j, i));
6019 r(i, j) = sum;
6020 r(j, i) = conj(sum);
6021 }
6022 }
6023 }
6024 return r;
6025 }
6026
6027 template <typename R>
6029 {
6030 static_assert(!std::is_same<R, int>::value &&
6031 std::is_same<R, int>::value,
6032 "Compilation failed because you wanted to know the type; see below:");
6033 }
6034
6035 /// invoked by result
6036
6037 /// contract 2 functions f(x,z) = \int g(x,y) * h(y,z) dy
6038 /// @tparam CDIM: the dimension of the contraction variable (y)
6039 /// @tparam NDIM: the dimension of the result (x,z)
6040 /// @tparam LDIM: the dimension of g(x,y)
6041 /// @tparam KDIM: the dimension of h(y,z)
6042 template<typename Q, std::size_t LDIM, typename R, std::size_t KDIM,
6043 std::size_t CDIM = (KDIM + LDIM - NDIM) / 2>
6045 const std::array<int, CDIM> v1, const std::array<int, CDIM> v2) {
6046
6047 typedef std::multimap<Key<NDIM>, std::list<Key<CDIM>>> contractionmapT;
6048 //double wall_get_lists=0.0;
6049 //double wall_recur=0.0;
6050 //double wall_contract=0.0;
6053
6054 // auto print_map = [](const auto& map) {
6055 // for (const auto& kv : map) print(kv.first,"--",kv.second);
6056 // };
6057 // logical constness, not bitwise constness
6058 FunctionImpl<Q,LDIM>& g_nc=const_cast<FunctionImpl<Q,LDIM>&>(g);
6059 FunctionImpl<R,KDIM>& h_nc=const_cast<FunctionImpl<R,KDIM>&>(h);
6060
6061 std::list<contractionmapT> all_contraction_maps;
6062 for (std::size_t n=0; n<nmax; ++n) {
6063
6064 // list of nodes with d coefficients (and their parents)
6065 //double wall0 = wall_time();
6066 auto [g_ijlist, g_jlist] = g.get_contraction_node_lists(n, v1);
6067 auto [h_ijlist, h_jlist] = h.get_contraction_node_lists(n, v2);
6068 if ((g_ijlist.size() == 0) and (h_ijlist.size() == 0)) break;
6069 //double wall1 = wall_time();
6070 //wall_get_lists += (wall1 - wall0);
6071 //wall0 = wall1;
6072// print("g_jlist");
6073// for (const auto& kv : g_jlist) print(kv.first,kv.second);
6074// print("h_jlist");
6075// for (const auto& kv : h_jlist) print(kv.first,kv.second);
6076
6077 // next lines will insert s nodes into g and h -> possible race condition!
6078 bool this_first = true; // are the remaining indices of g before those of g: f(x,z) = g(x,y) h(y,z)
6079 // CDIM, NDIM, KDIM
6080 contractionmapT contraction_map = g_nc.recur_down_for_contraction_map(
6081 g_nc.key0(), g_nc.get_coeffs().find(g_nc.key0()).get()->second, v1, v2,
6082 h_ijlist, h_jlist, this_first, thresh);
6083
6084 this_first = false;
6085 // CDIM, NDIM, LDIM
6086 auto hnode0=h_nc.get_coeffs().find(h_nc.key0()).get()->second;
6087 contractionmapT contraction_map1 = h_nc.recur_down_for_contraction_map(
6088 h_nc.key0(), hnode0, v2, v1,
6089 g_ijlist, g_jlist, this_first, thresh);
6090
6091 // will contain duplicate entries
6092 contraction_map.merge(contraction_map1);
6093 // turn multimap into a map of list
6094 auto it = contraction_map.begin();
6095 while (it != contraction_map.end()) {
6096 auto it_end = contraction_map.upper_bound(it->first);
6097 auto it2 = it;
6098 it2++;
6099 while (it2 != it_end) {
6100 it->second.splice(it->second.end(), it2->second);
6101 it2 = contraction_map.erase(it2);
6102 }
6103 it = it_end;
6104 }
6105// print("thresh ",thresh);
6106// print("contraction list size",contraction_map.size());
6107
6108 // remove all double entries
6109 for (auto& elem: contraction_map) {
6110 elem.second.sort();
6111 elem.second.unique();
6112 }
6113 //wall1 = wall_time();
6114 //wall_recur += (wall1 - wall0);
6115// if (n==2) {
6116// print("contraction map for n=", n);
6117// print_map(contraction_map);
6118// }
6119 all_contraction_maps.push_back(contraction_map);
6120
6121 long mapsize=contraction_map.size();
6122 if (mapsize==0) break;
6123 }
6124
6125
6126 // finally do the contraction
6127 for (const auto& contraction_map : all_contraction_maps) {
6128 for (const auto& key_list : contraction_map) {
6129 const Key<NDIM>& key=key_list.first;
6130 const std::list<Key<CDIM>>& list=key_list.second;
6131 woT::task(coeffs.owner(key), &implT:: template partial_inner_contract<Q,LDIM,R,KDIM>,
6132 &g,&h,v1,v2,key,list);
6133 }
6134 }
6135 }
6136
6137 /// for contraction two functions f(x,z) = \int g(x,y) h(y,z) dy
6138
6139 /// find all nodes with d coefficients and return a list of complete keys and of
6140 /// keys holding only the y dimension, also the maximum norm of all d for the j dimension
6141 /// @param[in] n the scale
6142 /// @param[in] v array holding the indices of the integration variable
6143 /// @return ijlist: list of all nodes with d coeffs; jlist: j-part of ij list only
6144 template<std::size_t CDIM>
6145 std::tuple<std::set<Key<NDIM>>, std::map<Key<CDIM>,double>>
6146 get_contraction_node_lists(const std::size_t n, const std::array<int, CDIM>& v) const {
6147
6148 const auto& cdata=get_cdata();
6149 auto has_d_coeffs = [&cdata](const coeffT& coeff) {
6150 if (coeff.has_no_data()) return false;
6151 return (coeff.dim(0)==2*cdata.k);
6152 };
6153
6154 // keys to be contracted in g
6155 std::set<Key<NDIM>> ij_list; // full key
6156 std::map<Key<CDIM>,double> j_list; // only that dimension that will be contracted
6157
6158 for (auto it=get_coeffs().begin(); it!=get_coeffs().end(); ++it) {
6159 const Key<NDIM>& key=it->first;
6160 const FunctionNode<T,NDIM>& node=it->second;
6161 if ((key.level()==n) and (has_d_coeffs(node.coeff()))) {
6162 ij_list.insert(key);
6164 for (std::size_t i=0; i<CDIM; ++i) j_trans[i]=key.translation()[v[i]];
6165 Key<CDIM> jkey(n,j_trans);
6166 const double max_d_norm=j_list[jkey];
6167 j_list.insert_or_assign(jkey,std::max(max_d_norm,node.get_dnorm()));
6168 Key<CDIM> parent_jkey=jkey.parent();
6169 while (j_list.count(parent_jkey)==0) {
6170 j_list.insert({parent_jkey,1.0});
6171 parent_jkey=parent_jkey.parent();
6172 }
6173 }
6174 }
6175 return std::make_tuple(ij_list,j_list);
6176 }
6177
6178 /// make a map of all nodes that will contribute to a partial inner product
6179
6180 /// given the list of d coefficient-holding nodes of the other function:
6181 /// recur down h if snorm * dnorm > tol and key n−jx ∈ other−ij-list. Make s
6182 /// coefficients if necessary. Make list of nodes n − ijk as map(n-ik, list(j)).
6183 ///
6184 /// !! WILL ADD NEW S NODES TO THIS TREE THAT MUST BE REMOVED TO AVOID INCONSISTENT TREE STRUCTURE !!
6185 ///
6186 /// @param[in] key for recursion
6187 /// @param[in] node corresponds to key
6188 /// @param[in] v_this this' dimension that are contracted
6189 /// @param[in] v_other other's dimension that are contracted
6190 /// @param[in] ij_other_list list of nodes of the other function that will be contracted (and their parents)
6191 /// @param[in] j_other_list list of column nodes of the other function that will be contracted (and their parents)
6192 /// @param[in] max_d_norm max d coeff norm of the nodes in j_list
6193 /// @param[in] this_first are the remaining coeffs of this functions first or last in the result function
6194 /// @param[in] thresh threshold for including nodes in the contraction: snorm*dnorm > thresh
6195 /// @tparam CDIM dimension to be contracted
6196 /// @tparam ODIM dimensions of the other function
6197 /// @tparam FDIM dimensions of the final function
6198 template<std::size_t CDIM, std::size_t ODIM, std::size_t FDIM=NDIM+ODIM-2*CDIM>
6199 std::multimap<Key<FDIM>, std::list<Key<CDIM>>> recur_down_for_contraction_map(
6200 const keyT& key, const nodeT& node,
6201 const std::array<int,CDIM>& v_this,
6202 const std::array<int,CDIM>& v_other,
6203 const std::set<Key<ODIM>>& ij_other_list,
6204 const std::map<Key<CDIM>,double>& j_other_list,
6205 bool this_first, const double thresh) {
6206
6207 std::multimap<Key<FDIM>, std::list<Key<CDIM>>> contraction_map;
6208
6209 // fast return if the other function has no d coeffs
6210 if (j_other_list.empty()) return contraction_map;
6211
6212 // continue recursion if this node may be contracted with the j column
6213 // extract relevant node translations from this node
6214 const auto j_this_key=key.extract_key(v_this);
6215
6216// print("\nkey, j_this_key", key, j_this_key);
6217 const double max_d_norm=j_other_list.find(j_this_key)->second;
6218 const bool sd_norm_product_large = node.get_snorm() * max_d_norm > truncate_tol(thresh,key);
6219// print("sd_product_norm",node.get_snorm() * max_d_norm, thresh);
6220
6221 // end recursion if we have reached the final scale n
6222 // with which nodes from other will this node be contracted?
6223 bool final_scale=key.level()==ij_other_list.begin()->level();
6224 if (final_scale and sd_norm_product_large) {
6225 for (auto& other_key : ij_other_list) {
6226 const auto j_other_key=other_key.extract_key(v_other);
6227 if (j_this_key != j_other_key) continue;
6228 auto i_key=key.extract_complement_key(v_this);
6229 auto k_key=other_key.extract_complement_key(v_other);
6230// print("key, ij_other_key",key,other_key);
6231// print("i, k, j key",i_key, k_key, j_this_key);
6232 Key<FDIM> ik_key=(this_first) ? i_key.merge_with(k_key) : k_key.merge_with(i_key);
6233// print("ik_key",ik_key);
6234// MADNESS_CHECK(contraction_map.count(ik_key)==0);
6235 contraction_map.insert(std::make_pair(ik_key,std::list<Key<CDIM>>{j_this_key}));
6236 }
6237 return contraction_map;
6238 }
6239
6240 bool continue_recursion = (j_other_list.count(j_this_key)==1);
6241 if (not continue_recursion) return contraction_map;
6242
6243
6244 // continue recursion if norms are large
6245 continue_recursion = (node.has_children() or sd_norm_product_large);
6246
6247 if (continue_recursion) {
6248 // in case we need to compute children's coefficients: unfilter only once
6249 bool compute_child_s_coeffs=true;
6250 coeffT d = node.coeff();
6251// print("continuing recursion from key",key);
6252
6253 for (KeyChildIterator<NDIM> kit(key); kit; ++kit) {
6254 keyT child=kit.key();
6255 typename dcT::accessor acc;
6256
6257 // make child's s coeffs if it doesn't exist or if is has no s coeffs
6258 bool childnode_exists=get_coeffs().find(acc,child);
6259 bool need_s_coeffs= childnode_exists ? (acc->second.get_snorm()<=0.0) : true;
6260
6261 coeffT child_s_coeffs;
6262 if (need_s_coeffs and compute_child_s_coeffs) {
6263 if (d.dim(0)==cdata.vk[0]) { // s coeffs only in this node
6264 coeffT d1(cdata.v2k,get_tensor_args());
6265 d1(cdata.s0)+=d;
6266 d=d1;
6267 }
6268 d = unfilter(d);
6269 child_s_coeffs=copy(d(child_patch(child)));
6270 child_s_coeffs.reduce_rank(thresh);
6271 compute_child_s_coeffs=false;
6272 }
6273
6274 if (not childnode_exists) {
6275 get_coeffs().replace(child,nodeT(child_s_coeffs,false));
6276 get_coeffs().find(acc,child);
6277 } else if (childnode_exists and need_s_coeffs) {
6278 acc->second.coeff()=child_s_coeffs;
6279 }
6280 bool exists= get_coeffs().find(acc,child);
6281 MADNESS_CHECK(exists);
6282 nodeT& childnode = acc->second;
6283 if (need_s_coeffs) childnode.recompute_snorm_and_dnorm(get_cdata());
6284// print("recurring down to",child);
6285 contraction_map.merge(recur_down_for_contraction_map(child,childnode, v_this, v_other,
6286 ij_other_list, j_other_list, this_first, thresh));
6287// print("contraction_map.size()",contraction_map.size());
6288 }
6289
6290 }
6291
6292 return contraction_map;
6293 }
6294
6295
6296 /// tensor contraction part of partial_inner
6297
6298 /// @param[in] g rhs of the inner product
6299 /// @param[in] h lhs of the inner product
6300 /// @param[in] v1 dimensions of g to be contracted
6301 /// @param[in] v2 dimensions of h to be contracted
6302 /// @param[in] key key of result's (this) FunctionNode
6303 /// @param[in] j_key_list list of contraction index-j keys contributing to this' node
6304 template<typename Q, std::size_t LDIM, typename R, std::size_t KDIM,
6305 std::size_t CDIM = (KDIM + LDIM - NDIM) / 2>
6307 const std::array<int, CDIM> v1, const std::array<int, CDIM> v2,
6308 const Key<NDIM>& key, const std::list<Key<CDIM>>& j_key_list) {
6309
6310 Key<LDIM - CDIM> i_key;
6311 Key<KDIM - CDIM> k_key;
6312 key.break_apart(i_key, k_key);
6313
6314 coeffT result_coeff(get_cdata().v2k, get_tensor_type());
6315 for (const auto& j_key: j_key_list) {
6316
6317 auto v_complement = [](const auto& v, const auto& vc) {
6318 constexpr std::size_t VDIM = std::tuple_size<std::decay_t<decltype(v)>>::value;
6319 constexpr std::size_t VCDIM = std::tuple_size<std::decay_t<decltype(vc)>>::value;
6320 std::array<int, VCDIM> result;
6321 for (std::size_t i = 0; i < VCDIM; i++) result[i] = (v.back() + i + 1) % (VDIM + VCDIM);
6322 return result;
6323 };
6324 auto make_ij_key = [&v_complement](const auto i_key, const auto j_key, const auto& v) {
6325 constexpr std::size_t IDIM = std::decay_t<decltype(i_key)>::static_size;
6326 constexpr std::size_t JDIM = std::decay_t<decltype(j_key)>::static_size;
6327 static_assert(JDIM == std::tuple_size<std::decay_t<decltype(v)>>::value);
6328
6330 for (std::size_t i = 0; i < v.size(); ++i) l[v[i]] = j_key.translation()[i];
6331 std::array<int, IDIM> vc1;
6332 auto vc = v_complement(v, vc1);
6333 for (std::size_t i = 0; i < vc.size(); ++i) l[vc[i]] = i_key.translation()[i];
6334
6335 return Key<IDIM + JDIM>(i_key.level(), l);
6336 };
6337
6338 Key<LDIM> ij_key = make_ij_key(i_key, j_key, v1);
6339 Key<KDIM> jk_key = make_ij_key(k_key, j_key, v2);
6340
6341 MADNESS_CHECK(g->get_coeffs().probe(ij_key));
6342 MADNESS_CHECK(h->get_coeffs().probe(jk_key));
6343 const coeffT& gcoeff = g->get_coeffs().find(ij_key).get()->second.coeff();
6344 const coeffT& hcoeff = h->get_coeffs().find(jk_key).get()->second.coeff();
6345 coeffT gcoeff1, hcoeff1;
6346 if (gcoeff.dim(0) == g->get_cdata().k) {
6347 gcoeff1 = coeffT(g->get_cdata().v2k, g->get_tensor_args());
6348 gcoeff1(g->get_cdata().s0) += gcoeff;
6349 } else {
6350 gcoeff1 = gcoeff;
6351 }
6352 if (hcoeff.dim(0) == g->get_cdata().k) {
6353 hcoeff1 = coeffT(h->get_cdata().v2k, h->get_tensor_args());
6354 hcoeff1(h->get_cdata().s0) += hcoeff;
6355 } else {
6356 hcoeff1 = hcoeff;
6357 }
6358
6359 // offset: 0 for full tensor, 1 for svd representation with rand being the first dimension (r,d1,d2,d3) -> (r,d1*d2*d3)
6360 auto fuse = [](Tensor<T> tensor, const std::array<int, CDIM>& v, int offset) {
6361 for (std::size_t i = 0; i < CDIM - 1; ++i) {
6362 MADNESS_CHECK((v[i] + 1) == v[i + 1]); // make sure v is contiguous and ascending
6363 tensor = tensor.fusedim(v[0]+offset);
6364 }
6365 return tensor;
6366 };
6367
6368 // use case: partial_projection of 2-electron functions in svd representation f(1) = \int g(2) h(1,2) d2
6369 // c_i = \sum_j a_j b_ij = \sum_jr a_j b_rj b'_rj
6370 // = \sum_jr ( a_j b_rj) b'_rj )
6371 auto contract2 = [](const auto& svdcoeff, const auto& tensor, const int particle) {
6372#if HAVE_GENTENSOR
6373 const int spectator_particle=(particle+1)%2;
6374 Tensor<Q> gtensor = svdcoeff.get_svdtensor().make_vector_with_weights(particle);
6375 gtensor=gtensor.reshape(svdcoeff.rank(),gtensor.size()/svdcoeff.rank());
6376 MADNESS_CHECK(gtensor.ndim()==2);
6377 Tensor<Q> gtensor_other = svdcoeff.get_svdtensor().ref_vector(spectator_particle);
6378 Tensor<T> tmp1=inner(gtensor,tensor.flat(),1,0); // tmp1(r) = sum_j a'_(r,j) b(j)
6379 MADNESS_CHECK(tmp1.ndim()==1);
6380 Tensor<T> tmp2=inner(gtensor_other,tmp1,0,0); // tmp2(i) = sum_r a_(r,i) tmp1(r)
6381 return tmp2;
6382#else
6383 MADNESS_EXCEPTION("no partial_inner using svd without GenTensor",1);
6384 return Tensor<T>();
6385#endif
6386 };
6387
6388 if (gcoeff.is_full_tensor() and hcoeff.is_full_tensor() and result_coeff.is_full_tensor()) {
6389 // merge multiple contraction dimensions into one
6390 int offset = 0;
6391 Tensor<Q> gtensor = fuse(gcoeff1.full_tensor(), v1, offset);
6392 Tensor<R> htensor = fuse(hcoeff1.full_tensor(), v2, offset);
6393 result_coeff.full_tensor() += inner(gtensor, htensor, v1[0], v2[0]);
6394 if (key.level() > 0) {
6395 gtensor = copy(gcoeff1.full_tensor()(g->get_cdata().s0));
6396 htensor = copy(hcoeff1.full_tensor()(h->get_cdata().s0));
6397 gtensor = fuse(gtensor, v1, offset);
6398 htensor = fuse(htensor, v2, offset);
6399 result_coeff.full_tensor()(get_cdata().s0) -= inner(gtensor, htensor, v1[0], v2[0]);
6400 }
6401 }
6402
6403
6404 // use case: 2-electron functions in svd representation f(1,3) = \int g(1,2) h(2,3) d2
6405 // c_ik = \sum_j a_ij b_jk = \sum_jrr' a_ri a'_rj b_r'j b_r'k
6406 // = \sum_jrr' ( a_ri (a'_rj b_r'j) ) b_r'k
6407 // = \sum_jrr' c_r'i b_r'k
6408 else if (gcoeff.is_svd_tensor() and hcoeff.is_svd_tensor() and result_coeff.is_svd_tensor()) {
6409 MADNESS_CHECK(v1[0]==0 or v1[CDIM-1]==LDIM-1);
6410 MADNESS_CHECK(v2[0]==0 or v2[CDIM-1]==KDIM-1);
6411 int gparticle= v1[0]==0 ? 0 : 1; // which particle to integrate over
6412 int hparticle= v2[0]==0 ? 0 : 1; // which particle to integrate over
6413 // merge multiple contraction dimensions into one
6414 Tensor<Q> gtensor = gcoeff1.get_svdtensor().flat_vector_with_weights(gparticle);
6415 Tensor<Q> gtensor_other = gcoeff1.get_svdtensor().flat_vector((gparticle+1)%2);
6416 Tensor<R> htensor = hcoeff1.get_svdtensor().flat_vector_with_weights(hparticle);
6417 Tensor<R> htensor_other = hcoeff1.get_svdtensor().flat_vector((hparticle+1)%2);
6418 Tensor<T> tmp1=inner(gtensor,htensor,1,1); // tmp1(r,r') = sum_j b(r,j) a(r',j)
6419 Tensor<T> tmp2=inner(tmp1,gtensor_other,0,0); // tmp2(r',i) = sum_r tmp1(r,r') a(r,i)
6421 MADNESS_CHECK(tmp2.dim(0)==htensor_other.dim(0));
6422 w=1.0;
6423 coeffT result_tmp(get_cdata().v2k, get_tensor_type());
6424 result_tmp.get_svdtensor().set_vectors_and_weights(w,tmp2,htensor_other);
6425 if (key.level() > 0) {
6426 GenTensor<Q> gcoeff2 = copy(gcoeff1(g->get_cdata().s0));
6427 GenTensor<R> hcoeff2 = copy(hcoeff1(h->get_cdata().s0));
6428 Tensor<Q> gtensor = gcoeff2.get_svdtensor().flat_vector_with_weights(gparticle);
6429 Tensor<Q> gtensor_other = gcoeff2.get_svdtensor().flat_vector((gparticle+1)%2);
6430 Tensor<R> htensor = hcoeff2.get_svdtensor().flat_vector_with_weights(hparticle);
6431 Tensor<R> htensor_other = hcoeff2.get_svdtensor().flat_vector((hparticle+1)%2);
6432 Tensor<T> tmp1=inner(gtensor,htensor,1,1); // tmp1(r,r') = sum_j b(r,j) a(r',j)
6433 Tensor<T> tmp2=inner(tmp1,gtensor_other,0,0); // tmp2(r',i) = sum_r tmp1(r,r') a(r,i)
6435 MADNESS_CHECK(tmp2.dim(0)==htensor_other.dim(0));
6436 w=1.0;
6437 coeffT result_coeff1(get_cdata().vk, get_tensor_type());
6438 result_coeff1.get_svdtensor().set_vectors_and_weights(w,tmp2,htensor_other);
6439 result_tmp(get_cdata().s0)-=result_coeff1;
6440 }
6441 result_coeff+=result_tmp;
6442 }
6443
6444 // use case: partial_projection of 2-electron functions in svd representation f(1) = \int g(2) h(1,2) d2
6445 // c_i = \sum_j a_j b_ij = \sum_jr a_j b_rj b'_rj
6446 // = \sum_jr ( a_j b_rj) b'_rj )
6447 else if (gcoeff.is_full_tensor() and hcoeff.is_svd_tensor() and result_coeff.is_full_tensor()) {
6448 MADNESS_CHECK(v1[0]==0 and v1[CDIM-1]==LDIM-1);
6449 MADNESS_CHECK(v2[0]==0 or v2[CDIM-1]==KDIM-1);
6450 MADNESS_CHECK(LDIM==CDIM);
6451 int hparticle= v2[0]==0 ? 0 : 1; // which particle to integrate over
6452
6453 Tensor<T> r=contract2(hcoeff1,gcoeff1.full_tensor(),hparticle);
6454 if (key.level()>0) r(get_cdata().s0)-=contract2(copy(hcoeff1(h->get_cdata().s0)),copy(gcoeff.full_tensor()(g->get_cdata().s0)),hparticle);
6455 result_coeff.full_tensor()+=r;
6456 }
6457 // use case: partial_projection of 2-electron functions in svd representation f(1) = \int g(1,2) h(2) d2
6458 // c_i = \sum_j a_ij b_j = \sum_jr a_ri a'_rj b_j
6459 // = \sum_jr ( a_ri (a'_rj b_j) )
6460 else if (gcoeff.is_svd_tensor() and hcoeff.is_full_tensor() and result_coeff.is_full_tensor()) {
6461 MADNESS_CHECK(v1[0]==0 or v1[CDIM-1]==LDIM-1);
6462 MADNESS_CHECK(v2[0]==0 and v2[CDIM-1]==KDIM-1);
6463 MADNESS_CHECK(KDIM==CDIM);
6464 int gparticle= v1[0]==0 ? 0 : 1; // which particle to integrate over
6465
6466 Tensor<T> r=contract2(gcoeff1,hcoeff1.full_tensor(),gparticle);
6467 if (key.level()>0) r(get_cdata().s0)-=contract2(copy(gcoeff1(g->get_cdata().s0)),copy(hcoeff.full_tensor()(h->get_cdata().s0)),gparticle);
6468 result_coeff.full_tensor()+=r;
6469
6470 } else {
6471 MADNESS_EXCEPTION("unknown case in partial_inner_contract",1);
6472 }
6473 }
6474
6475 MADNESS_CHECK(result_coeff.is_assigned());
6476 result_coeff.reduce_rank(get_thresh());
6477
6478 if (coeffs.is_local(key))
6479 coeffs.send(key, &nodeT::accumulate, result_coeff, coeffs, key, get_tensor_args());
6480 else
6482 }
6483
6484 /// Return the inner product with an external function on a specified function node.
6485
6486 /// @param[in] key Key of the function node to compute the inner product on. (the domain of integration)
6487 /// @param[in] c Tensor of coefficients for the function at the function node given by key
6488 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6489 /// @return Returns the inner product over the domain of a single function node, no guarantee of accuracy.
6490 T inner_ext_node(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f) const {
6491 tensorT fvals = tensorT(this->cdata.vk);
6492 // Compute the value of the external function at the quadrature points.
6493 fcube(key, *(f), cdata.quad_x, fvals);
6494 // Convert quadrature point values to scaling coefficients.
6495 tensorT fc = tensorT(values2coeffs(key, fvals));
6496 // Return the inner product of the two functions' scaling coefficients.
6497 return c.trace_conj(fc);
6498 }
6499
6500 /// Call inner_ext_node recursively until convergence.
6501 /// @param[in] key Key of the function node on which to compute inner product (the domain of integration)
6502 /// @param[in] c coeffs for the function at the node given by key
6503 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6504 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6505 /// @param[in] old_inner the inner product on the parent function node
6506 /// @return Returns the inner product over the domain of a single function, checks for convergence.
6507 T inner_ext_recursive(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f, const bool leaf_refine, T old_inner=T(0)) const {
6508 int i = 0;
6509 tensorT c_child, inner_child;
6510 T new_inner, result = 0.0;
6511
6512 c_child = tensorT(cdata.v2k); // tensor of child coeffs
6513 inner_child = Tensor<double>(pow(2, NDIM)); // child inner products
6514
6515 // If old_inner is default value, assume this is the first call
6516 // and compute inner product on this node.
6517 if (old_inner == T(0)) {
6518 old_inner = inner_ext_node(key, c, f);
6519 }
6520
6521 if (coeffs.find(key).get()->second.has_children()) {
6522 // Since the key has children and we know the func is redundant,
6523 // Iterate over all children of this compute node, computing
6524 // the inner product on each child node. new_inner will store
6525 // the sum of these, yielding a more accurate inner product.
6526 for (KeyChildIterator<NDIM> it(key); it; ++it, ++i) {
6527 const keyT& child = it.key();
6528 tensorT cc = coeffs.find(child).get()->second.coeff().full_tensor_copy();
6529 inner_child(i) = inner_ext_node(child, cc, f);
6530 }
6531 new_inner = inner_child.sum();
6532 } else if (leaf_refine) {
6533 // We need the scaling coefficients of the numerical function
6534 // at each of the children nodes. We can't use project because
6535 // there is no guarantee that the numerical function will have
6536 // a functor. Instead, since we know we are at or below the
6537 // leaf nodes, the wavelet coefficients are zero (to within the
6538 // truncate tolerance). Thus, we can use unfilter() to
6539 // get the scaling coefficients at the next level.
6540 tensorT d = tensorT(cdata.v2k);
6541 d = T(0);
6542 d(cdata.s0) = copy(c);
6543 c_child = unfilter(d);
6544
6545 // Iterate over all children of this compute node, computing
6546 // the inner product on each child node. new_inner will store
6547 // the sum of these, yielding a more accurate inner product.
6548 for (KeyChildIterator<NDIM> it(key); it; ++it, ++i) {
6549 const keyT& child = it.key();
6550 tensorT cc = tensorT(c_child(child_patch(child)));
6551 inner_child(i) = inner_ext_node(child, cc, f);
6552 }
6553 new_inner = inner_child.sum();
6554 } else {
6555 // If we get to here, we are at the leaf nodes and the user has
6556 // specified that they do not want refinement past leaf nodes.
6557 new_inner = old_inner;
6558 }
6559
6560 // Check for convergence. If converged...yay, we're done. If not,
6561 // call inner_ext_node_recursive on each child node and accumulate
6562 // the inner product in result.
6563 // if (std::abs(new_inner - old_inner) <= truncate_tol(thresh, key)) {
6564 if (std::abs(new_inner - old_inner) <= thresh) {
6565 result = new_inner;
6566 } else {
6567 i = 0;
6568 for (KeyChildIterator<NDIM> it(key); it; ++it, ++i) {
6569 const keyT& child = it.key();
6570 tensorT cc = tensorT(c_child(child_patch(child)));
6571 result += inner_ext_recursive(child, cc, f, leaf_refine, inner_child(i));
6572 }
6573 }
6574
6575 return result;
6576 }
6577
6579 const std::shared_ptr< FunctionFunctorInterface<T, NDIM> > fref;
6580 const implT * impl;
6581 const bool leaf_refine;
6582 const bool do_leaves; ///< start with leaf nodes instead of initial_level
6583
6585 const implT * impl, const bool leaf_refine, const bool do_leaves)
6586 : fref(f), impl(impl), leaf_refine(leaf_refine), do_leaves(do_leaves) {};
6587
6588 T operator()(typename dcT::const_iterator& it) const {
6589 if (do_leaves and it->second.is_leaf()) {
6590 tensorT cc = it->second.coeff().full_tensor();
6591 return impl->inner_adaptive_recursive(it->first, cc, fref, leaf_refine, T(0));
6592 } else if ((not do_leaves) and (it->first.level() == impl->initial_level)) {
6593 tensorT cc = it->second.coeff().full_tensor();
6594 return impl->inner_ext_recursive(it->first, cc, fref, leaf_refine, T(0));
6595 } else {
6596 return 0.0;
6597 }
6598 }
6599
6600 T operator()(T a, T b) const {
6601 return (a + b);
6602 }
6603
6604 template <typename Archive> void serialize(const Archive& ar) {
6605 MADNESS_EXCEPTION("NOT IMPLEMENTED", 1);
6606 }
6607 };
6608
6609 /// Return the local part of inner product with external function ... no communication.
6610 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6611 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6612 /// @return Returns local part of the inner product, i.e. over the domain of all function nodes on this compute node.
6613 T inner_ext_local(const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f, const bool leaf_refine) const {
6615
6617 do_inner_ext_local_ffi(f, this, leaf_refine, false));
6618 }
6619
6620 /// Return the local part of inner product with external function ... no communication.
6621 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6622 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6623 /// @return Returns local part of the inner product, i.e. over the domain of all function nodes on this compute node.
6624 T inner_adaptive_local(const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f, const bool leaf_refine) const {
6626
6628 do_inner_ext_local_ffi(f, this, leaf_refine, true));
6629 }
6630
6631 /// Call inner_ext_node recursively until convergence.
6632 /// @param[in] key Key of the function node on which to compute inner product (the domain of integration)
6633 /// @param[in] c coeffs for the function at the node given by key
6634 /// @param[in] f Reference to FunctionFunctorInterface. This is the externally provided function
6635 /// @param[in] leaf_refine boolean switch to turn on/off refinement past leaf nodes
6636 /// @param[in] old_inner the inner product on the parent function node
6637 /// @return Returns the inner product over the domain of a single function, checks for convergence.
6639 const std::shared_ptr< FunctionFunctorInterface<T,NDIM> > f,
6640 const bool leaf_refine, T old_inner=T(0)) const {
6641
6642 // the inner product in the current node
6643 old_inner = inner_ext_node(key, c, f);
6644 T result=0.0;
6645
6646 // the inner product in the child nodes
6647
6648 // compute the sum coefficients of the MRA function
6649 tensorT d = tensorT(cdata.v2k);
6650 d = T(0);
6651 d(cdata.s0) = copy(c);
6652 tensorT c_child = unfilter(d);
6653
6654 // compute the inner product in the child nodes
6655 T new_inner=0.0; // child inner products
6656 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6657 const keyT& child = it.key();
6658 tensorT cc = tensorT(c_child(child_patch(child)));
6659 new_inner+= inner_ext_node(child, cc, f);
6660 }
6661
6662 // continue recursion if needed
6663 const double tol=truncate_tol(thresh,key);
6664 if (leaf_refine and (std::abs(new_inner - old_inner) > tol)) {
6665 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6666 const keyT& child = it.key();
6667 tensorT cc = tensorT(c_child(child_patch(child)));
6668 result += inner_adaptive_recursive(child, cc, f, leaf_refine, T(0));
6669 }
6670 } else {
6671 result = new_inner;
6672 }
6673 return result;
6674
6675 }
6676
6677
6678 /// Return the gaxpy product with an external function on a specified
6679 /// function node.
6680 /// @param[in] key Key of the function node on which to compute gaxpy
6681 /// @param[in] lc Tensor of coefficients for the function at the
6682 /// function node given by key
6683 /// @param[in] f Pointer to function of type T that takes coordT
6684 /// arguments. This is the externally provided function and
6685 /// the right argument of gaxpy.
6686 /// @param[in] alpha prefactor of c Tensor for gaxpy
6687 /// @param[in] beta prefactor of fcoeffs for gaxpy
6688 /// @return Returns coefficient tensor of the gaxpy product at specified
6689 /// key, no guarantee of accuracy.
6690 template <typename L>
6691 tensorT gaxpy_ext_node(keyT key, Tensor<L> lc, T (*f)(const coordT&), T alpha, T beta) const {
6692 // Compute the value of external function at the quadrature points.
6693 tensorT fvals = madness::fcube(key, f, cdata.quad_x);
6694 // Convert quadrature point values to scaling coefficients.
6695 tensorT fcoeffs = values2coeffs(key, fvals);
6696 // Return the inner product of the two functions' scaling coeffs.
6697 tensorT c2 = copy(lc);
6698 c2.gaxpy(alpha, fcoeffs, beta);
6699 return c2;
6700 }
6701
6702 /// Return out of place gaxpy using recursive descent.
6703 /// @param[in] key Key of the function node on which to compute gaxpy
6704 /// @param[in] left FunctionImpl, left argument of gaxpy
6705 /// @param[in] lcin coefficients of left at this node
6706 /// @param[in] c coefficients of gaxpy product at this node
6707 /// @param[in] f pointer to function of type T that takes coordT
6708 /// arguments. This is the externally provided function and
6709 /// the right argument of gaxpy.
6710 /// @param[in] alpha prefactor of left argument for gaxpy
6711 /// @param[in] beta prefactor of right argument for gaxpy
6712 /// @param[in] tol convergence tolerance...when the norm of the gaxpy's
6713 /// difference coefficients is less than tol, we are done.
6714 template <typename L>
6715 void gaxpy_ext_recursive(const keyT& key, const FunctionImpl<L,NDIM>* left,
6716 Tensor<L> lcin, tensorT c, T (*f)(const coordT&),
6717 T alpha, T beta, double tol, bool below_leaf) {
6718 typedef typename FunctionImpl<L,NDIM>::dcT::const_iterator literT;
6719
6720 // If we haven't yet reached the leaf level, check whether the
6721 // current key is a leaf node of left. If so, set below_leaf to true
6722 // and continue. If not, make this a parent, recur down, return.
6723 if (not below_leaf) {
6724 bool left_leaf = left->coeffs.find(key).get()->second.is_leaf();
6725 if (left_leaf) {
6726 below_leaf = true;
6727 } else {
6728 this->coeffs.replace(key, nodeT(coeffT(), true));
6729 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6730 const keyT& child = it.key();
6731 woT::task(left->coeffs.owner(child), &implT:: template gaxpy_ext_recursive<L>,
6732 child, left, Tensor<L>(), tensorT(), f, alpha, beta, tol, below_leaf);
6733 }
6734 return;
6735 }
6736 }
6737
6738 // Compute left's coefficients if not provided
6739 Tensor<L> lc = lcin;
6740 if (lc.size() == 0) {
6741 literT it = left->coeffs.find(key).get();
6742 MADNESS_ASSERT(it != left->coeffs.end());
6743 if (it->second.has_coeff())
6744 lc = it->second.coeff().reconstruct_tensor();
6745 }
6746
6747 // Compute this node's coefficients if not provided in function call
6748 if (c.size() == 0) {
6749 c = gaxpy_ext_node(key, lc, f, alpha, beta);
6750 }
6751
6752 // We need the scaling coefficients of the numerical function at
6753 // each of the children nodes. We can't use project because there
6754 // is no guarantee that the numerical function will have a functor.
6755 // Instead, since we know we are at or below the leaf nodes, the
6756 // wavelet coefficients are zero (to within the truncate tolerance).
6757 // Thus, we can use unfilter() to get the scaling coefficients at
6758 // the next level.
6759 Tensor<L> lc_child = Tensor<L>(cdata.v2k); // left's child coeffs
6760 Tensor<L> ld = Tensor<L>(cdata.v2k);
6761 ld = L(0);
6762 ld(cdata.s0) = copy(lc);
6763 lc_child = unfilter(ld);
6764
6765 // Iterate over children of this node,
6766 // storing the gaxpy coeffs in c_child
6767 tensorT c_child = tensorT(cdata.v2k); // tensor of child coeffs
6768 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6769 const keyT& child = it.key();
6770 tensorT lcoeff = tensorT(lc_child(child_patch(child)));
6771 c_child(child_patch(child)) = gaxpy_ext_node(child, lcoeff, f, alpha, beta);
6772 }
6773
6774 // Compute the difference coefficients to test for convergence.
6775 tensorT d = tensorT(cdata.v2k);
6776 d = filter(c_child);
6777 // Filter returns both s and d coefficients, so set scaling
6778 // coefficient part of d to 0 so that we take only the
6779 // norm of the difference coefficients.
6780 d(cdata.s0) = T(0);
6781 double dnorm = d.normf();
6782
6783 // Small d.normf means we've reached a good level of resolution
6784 // Store the coefficients and return.
6785 if (dnorm <= truncate_tol(tol,key)) {
6786 this->coeffs.replace(key, nodeT(coeffT(c,targs), false));
6787 } else {
6788 // Otherwise, make this a parent node and recur down
6789 this->coeffs.replace(key, nodeT(coeffT(), true)); // Interior node
6790
6791 for (KeyChildIterator<NDIM> it(key); it; ++it) {
6792 const keyT& child = it.key();
6793 tensorT child_coeff = tensorT(c_child(child_patch(child)));
6794 tensorT left_coeff = tensorT(lc_child(child_patch(child)));
6795 woT::task(left->coeffs.owner(child), &implT:: template gaxpy_ext_recursive<L>,
6796 child, left, left_coeff, child_coeff, f, alpha, beta, tol, below_leaf);
6797 }
6798 }
6799 }
6800
6801 template <typename L>
6802 void gaxpy_ext(const FunctionImpl<L,NDIM>* left, T (*f)(const coordT&), T alpha, T beta, double tol, bool fence) {
6803 if (world.rank() == coeffs.owner(cdata.key0))
6804 gaxpy_ext_recursive<L> (cdata.key0, left, Tensor<L>(), tensorT(), f, alpha, beta, tol, false);
6805 if (fence)
6806 world.gop.fence();
6807 }
6808
6809 /// project the low-dim function g on the hi-dim function f: result(x) = <this(x,y) | g(y)>
6810
6811 /// invoked by the hi-dim function, a function of NDIM+LDIM
6812
6813 /// Upon return, result matches this, with contributions on all scales
6814 /// @param[in] result lo-dim function of NDIM-LDIM \todo Should this be param[out]?
6815 /// @param[in] gimpl lo-dim function of LDIM
6816 /// @param[in] dim over which dimensions to be integrated: 0..LDIM or LDIM..LDIM+NDIM-1
6817 template<size_t LDIM>
6819 const int dim, const bool fence) {
6820
6821 const keyT& key0=cdata.key0;
6822
6823 if (world.rank() == coeffs.owner(key0)) {
6824
6825 // coeff_op will accumulate the result
6826 typedef project_out_op<LDIM> coeff_opT;
6827 coeff_opT coeff_op(this,result,CoeffTracker<T,LDIM>(gimpl),dim);
6828
6829 // don't do anything on this -- coeff_op will accumulate into result
6830 typedef noop<T,NDIM> apply_opT;
6831 apply_opT apply_op;
6832
6833 woT::task(world.rank(), &implT:: template forward_traverse<coeff_opT,apply_opT>,
6834 coeff_op, apply_op, cdata.key0);
6835
6836 }
6837 if (fence) world.gop.fence();
6838
6839 }
6840
6841
6842 /// project the low-dim function g on the hi-dim function f: result(x) = <f(x,y) | g(y)>
6843 template<size_t LDIM>
6845 bool randomize() const {return false;}
6846
6849 typedef FunctionImpl<T,NDIM-LDIM> implL1;
6850 typedef std::pair<bool,coeffT> argT;
6851
6852 const implT* fimpl; ///< the hi dim function f
6853 mutable implL1* result; ///< the low dim result function
6854 ctL iag; ///< the low dim function g
6855 int dim; ///< 0: project 0..LDIM-1, 1: project LDIM..NDIM-1
6856
6857 // ctor
6858 project_out_op() = default;
6859 project_out_op(const implT* fimpl, implL1* result, const ctL& iag, const int dim)
6860 : fimpl(fimpl), result(result), iag(iag), dim(dim) {}
6862 : fimpl(other.fimpl), result(other.result), iag(other.iag), dim(other.dim) {}
6863
6864
6865 /// do the actual contraction
6867
6868 Key<LDIM> key1,key2,dest;
6869 key.break_apart(key1,key2);
6870
6871 // make the right coefficients
6872 coeffT gcoeff;
6873 if (dim==0) {
6874 gcoeff=iag.get_impl()->parent_to_child(iag.coeff(),iag.key(),key1);
6875 dest=key2;
6876 }
6877 if (dim==1) {
6878 gcoeff=iag.get_impl()->parent_to_child(iag.coeff(),iag.key(),key2);
6879 dest=key1;
6880 }
6881
6882 MADNESS_ASSERT(fimpl->get_coeffs().probe(key)); // must be local!
6883 const nodeT& fnode=fimpl->get_coeffs().find(key).get()->second;
6884 const coeffT& fcoeff=fnode.coeff();
6885
6886 // fast return if possible
6887 if (fcoeff.has_no_data() or gcoeff.has_no_data())
6888 return Future<argT> (argT(fnode.is_leaf(),coeffT()));;
6889
6890 MADNESS_CHECK(gcoeff.is_full_tensor());
6891 tensorT final(result->cdata.vk);
6892 const int k=fcoeff.dim(0);
6893 const int k_ldim=std::pow(k,LDIM);
6894 std::vector<long> shape(LDIM, k);
6895
6896 if (fcoeff.is_full_tensor()) {
6897 // result_i = \sum_j g_j f_ji
6898 const tensorT gtensor = gcoeff.full_tensor().reshape(k_ldim);
6899 const tensorT ftensor = fcoeff.full_tensor().reshape(k_ldim,k_ldim);
6900 final=inner(gtensor,ftensor,0,dim).reshape(shape);
6901
6902 } else if (fcoeff.is_svd_tensor()) {
6903 if (fcoeff.rank()>0) {
6904
6905 // result_i = \sum_jr g_j a_rj w_r b_ri
6906 const int otherdim = (dim + 1) % 2;
6907 const tensorT gtensor = gcoeff.full_tensor().flat();
6908 const tensorT atensor = fcoeff.get_svdtensor().flat_vector(dim); // a_rj
6909 const tensorT btensor = fcoeff.get_svdtensor().flat_vector(otherdim);
6910 const tensorT gatensor = inner(gtensor, atensor, 0, 1); // ga_r
6911 tensorT weights = copy(fcoeff.get_svdtensor().weights_);
6912 weights.emul(gatensor); // ga_r * w_r
6913 // sum over all ranks of b, include new weights:
6914 // result_i = \sum_r ga_r * w_r * b_ri
6915 for (int r = 0; r < fcoeff.rank(); ++r) final += weights(r) * btensor(r, _);
6916 final = final.reshape(shape);
6917 }
6918
6919 } else {
6920 MADNESS_EXCEPTION("unsupported tensor type in project_out_op",1);
6921 }
6922
6923 // accumulate the result
6924 result->coeffs.task(dest, &FunctionNode<T,LDIM>::accumulate2, final, result->coeffs, dest, TaskAttributes::hipri());
6925
6926 return Future<argT> (argT(fnode.is_leaf(),coeffT()));
6927 }
6928
6929 this_type make_child(const keyT& child) const {
6930 Key<LDIM> key1,key2;
6931 child.break_apart(key1,key2);
6932 const Key<LDIM> gkey = (dim==0) ? key1 : key2;
6933
6934 return this_type(fimpl,result,iag.make_child(gkey),dim);
6935 }
6936
6937 /// retrieve the coefficients (parent coeffs might be remote)
6940 return result->world.taskq.add(detail::wrap_mem_fn(*const_cast<this_type *> (this),
6941 &this_type::forward_ctor),fimpl,result,g1,dim);
6942 }
6943
6944 /// taskq-compatible ctor
6945 this_type forward_ctor(const implT* fimpl1, implL1* result1, const ctL& iag1, const int dim1) {
6946 return this_type(fimpl1,result1,iag1,dim1);
6947 }
6948
6949 template <typename Archive> void serialize(const Archive& ar) {
6950 ar & result & iag & fimpl & dim;
6951 }
6952
6953 };
6954
6955
6956 /// project the low-dim function g on the hi-dim function f: this(x) = <f(x,y) | g(y)>
6957
6958 /// invoked by result, a function of NDIM
6959
6960 /// @param[in] f hi-dim function of LDIM+NDIM
6961 /// @param[in] g lo-dim function of LDIM
6962 /// @param[in] dim over which dimensions to be integrated: 0..LDIM or LDIM..LDIM+NDIM-1
6963 template<size_t LDIM>
6964 void project_out2(const FunctionImpl<T,LDIM+NDIM>* f, const FunctionImpl<T,LDIM>* g, const int dim) {
6965
6966 typedef std::pair< keyT,coeffT > pairT;
6967 typedef typename FunctionImpl<T,NDIM+LDIM>::dcT::const_iterator fiterator;
6968
6969 // loop over all nodes of hi-dim f, compute the inner products with all
6970 // appropriate nodes of g, and accumulate in result
6971 fiterator end = f->get_coeffs().end();
6972 for (fiterator it=f->get_coeffs().begin(); it!=end; ++it) {
6973 const Key<LDIM+NDIM> key=it->first;
6974 const FunctionNode<T,LDIM+NDIM> fnode=it->second;
6975 const coeffT& fcoeff=fnode.coeff();
6976
6977 if (fnode.is_leaf() and fcoeff.has_data()) {
6978
6979 // break key into particle: over key1 will be summed, over key2 will be
6980 // accumulated, or vice versa, depending on dim
6981 if (dim==0) {
6982 Key<NDIM> key1;
6983 Key<LDIM> key2;
6984 key.break_apart(key1,key2);
6985
6986 Future<pairT> result;
6987 // sock_it_to_me(key1, result.remote_ref(world));
6988 g->task(coeffs.owner(key1), &implT::sock_it_to_me, key1, result.remote_ref(world), TaskAttributes::hipri());
6989 woT::task(world.rank(),&implT:: template do_project_out<LDIM>,fcoeff,result,key1,key2,dim);
6990
6991 } else if (dim==1) {
6992 Key<LDIM> key1;
6993 Key<NDIM> key2;
6994 key.break_apart(key1,key2);
6995
6996 Future<pairT> result;
6997 // sock_it_to_me(key2, result.remote_ref(world));
6998 g->task(coeffs.owner(key2), &implT::sock_it_to_me, key2, result.remote_ref(world), TaskAttributes::hipri());
6999 woT::task(world.rank(),&implT:: template do_project_out<LDIM>,fcoeff,result,key2,key1,dim);
7000
7001 } else {
7002 MADNESS_EXCEPTION("confused dim in project_out",1);
7003 }
7004 }
7005 }
7007// this->compressed=false;
7008// this->nonstandard=false;
7009// this->redundant=true;
7010 }
7011
7012
7013 /// compute the inner product of two nodes of only some dimensions and accumulate on result
7014
7015 /// invoked by result
7016 /// @param[in] fcoeff coefficients of high dimension LDIM+NDIM
7017 /// @param[in] gpair key and coeffs of low dimension LDIM (possibly a parent node)
7018 /// @param[in] gkey key of actual low dim node (possibly the same as gpair.first, iff gnode exists)
7019 /// @param[in] dest destination node for the result
7020 /// @param[in] dim which dimensions should be contracted: 0..LDIM-1 or LDIM..NDIM+LDIM-1
7021 template<size_t LDIM>
7022 void do_project_out(const coeffT& fcoeff, const std::pair<keyT,coeffT> gpair, const keyT& gkey,
7023 const Key<NDIM>& dest, const int dim) const {
7024
7025 const coeffT gcoeff=parent_to_child(gpair.second,gpair.first,gkey);
7026
7027 // fast return if possible
7028 if (fcoeff.has_no_data() or gcoeff.has_no_data()) return;
7029
7030 // let's specialize for the time being on SVD tensors for f and full tensors of half dim for g
7032 MADNESS_ASSERT(fcoeff.tensor_type()==TT_2D);
7033 const tensorT gtensor=gcoeff.full_tensor();
7034 tensorT result(cdata.vk);
7035
7036 const int otherdim=(dim+1)%2;
7037 const int k=fcoeff.dim(0);
7038 std::vector<Slice> s(fcoeff.config().dim_per_vector()+1,_);
7039
7040 // do the actual contraction
7041 for (int r=0; r<fcoeff.rank(); ++r) {
7042 s[0]=Slice(r,r);
7043 const tensorT contracted_tensor=fcoeff.config().ref_vector(dim)(s).reshape(k,k,k);
7044 const tensorT other_tensor=fcoeff.config().ref_vector(otherdim)(s).reshape(k,k,k);
7045 const double ovlp= gtensor.trace_conj(contracted_tensor);
7046 const double fac=ovlp * fcoeff.config().weights(r);
7047 result+=fac*other_tensor;
7048 }
7049
7050 // accumulate the result
7051 coeffs.task(dest, &nodeT::accumulate2, result, coeffs, dest, TaskAttributes::hipri());
7052 }
7053
7054
7055
7056
7057 /// Returns the maximum local depth of the tree ... no communications.
7058 std::size_t max_local_depth() const;
7059
7060
7061 /// Returns the maximum depth of the tree ... collective ... global sum/broadcast
7062 std::size_t max_depth() const;
7063
7064 /// Returns the max number of nodes on a processor
7065 std::size_t max_nodes() const;
7066
7067 /// Returns the min number of nodes on a processor
7068 std::size_t min_nodes() const;
7069
7070 /// Returns the size of the tree structure of the function ... collective global sum
7071 std::size_t tree_size() const;
7072
7073 /// Returns the number of coefficients in the function for each rank
7074 std::size_t size_local() const;
7075
7076 /// Returns the number of coefficients in the function ... collective global sum
7077 std::size_t size() const;
7078
7079 /// Returns the number of coefficients in the function ... collective global sum
7080 std::size_t nCoeff() const;
7081
7082 /// Returns the number of coefficients in the function ... collective global sum
7083 std::size_t real_size() const;
7084
7085 /// print tree size and size
7086 void print_size(const std::string name) const;
7087
7088 /// print the number of configurations per node
7089 void print_stats() const;
7090
7091 /// In-place scale by a constant
7092 void scale_inplace(const T q, bool fence);
7093
7094 /// Out-of-place scale by a constant
7095 template <typename Q, typename F>
7096 void scale_oop(const Q q, const FunctionImpl<F,NDIM>& f, bool fence) {
7097 typedef typename FunctionImpl<F,NDIM>::nodeT fnodeT;
7098 typedef typename FunctionImpl<F,NDIM>::dcT fdcT;
7099 typename fdcT::const_iterator end = f.coeffs.end();
7100 for (typename fdcT::const_iterator it=f.coeffs.begin(); it!=end; ++it) {
7101 const keyT& key = it->first;
7102 const fnodeT& node = it->second;
7103
7104 if (node.has_coeff()) {
7105 coeffs.replace(key,nodeT(node.coeff()*q,node.has_children()));
7106 }
7107 else {
7108 coeffs.replace(key,nodeT(coeffT(),node.has_children()));
7109 }
7110 }
7111 if (fence)
7112 world.gop.fence();
7113 }
7114
7115 /// Hash a pointer to \c FunctionImpl
7116
7117 /// \param[in] impl pointer to a FunctionImpl
7118 /// \return The hash.
7119 inline friend hashT hash_value(const FunctionImpl<T,NDIM>* pimpl) {
7120 hashT seed = hash_value(pimpl->id().get_world_id());
7121 detail::combine_hash(seed, hash_value(pimpl->id().get_obj_id()));
7122 return seed;
7123 }
7124
7125 /// Hash a shared_ptr to \c FunctionImpl
7126
7127 /// \param[in] impl pointer to a FunctionImpl
7128 /// \return The hash.
7129 inline friend hashT hash_value(const std::shared_ptr<FunctionImpl<T,NDIM>> impl) {
7130 return hash_value(impl.get());
7131 }
7132 };
7133
7134 namespace archive {
7135 template <class Archive, class T, std::size_t NDIM>
7136 struct ArchiveLoadImpl<Archive,const FunctionImpl<T,NDIM>*> {
7137 static void load(const Archive& ar, const FunctionImpl<T,NDIM>*& ptr) {
7138 bool exists=false;
7139 ar & exists;
7140 if (exists) {
7141 uniqueidT id;
7142 ar & id;
7143 World* world = World::world_from_id(id.get_world_id());
7144 MADNESS_ASSERT(world);
7145 auto ptr_opt = world->ptr_from_id< WorldObject< FunctionImpl<T,NDIM> > >(id);
7146 if (!ptr_opt)
7147 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use a locally uninitialized object",0);
7148 ptr = static_cast< const FunctionImpl<T,NDIM>*>(*ptr_opt);
7149 if (!ptr)
7150 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use an unregistered object",0);
7151 } else {
7152 ptr=nullptr;
7153 }
7154 }
7155 };
7156
7157 template <class Archive, class T, std::size_t NDIM>
7158 struct ArchiveStoreImpl<Archive,const FunctionImpl<T,NDIM>*> {
7159 static void store(const Archive& ar, const FunctionImpl<T,NDIM>*const& ptr) {
7160 bool exists=(ptr) ? true : false;
7161 ar & exists;
7162 if (exists) ar & ptr->id();
7163 }
7164 };
7165
7166 template <class Archive, class T, std::size_t NDIM>
7167 struct ArchiveLoadImpl<Archive, FunctionImpl<T,NDIM>*> {
7168 static void load(const Archive& ar, FunctionImpl<T,NDIM>*& ptr) {
7169 bool exists=false;
7170 ar & exists;
7171 if (exists) {
7172 uniqueidT id;
7173 ar & id;
7174 World* world = World::world_from_id(id.get_world_id());
7175 MADNESS_ASSERT(world);
7176 auto ptr_opt = world->ptr_from_id< WorldObject< FunctionImpl<T,NDIM> > >(id);
7177 if (!ptr_opt)
7178 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use a locally uninitialized object",0);
7179 ptr = static_cast< FunctionImpl<T,NDIM>*>(*ptr_opt);
7180 if (!ptr)
7181 MADNESS_EXCEPTION("FunctionImpl: remote operation attempting to use an unregistered object",0);
7182 } else {
7183 ptr=nullptr;
7184 }
7185 }
7186 };
7187
7188 template <class Archive, class T, std::size_t NDIM>
7190 static void store(const Archive& ar, FunctionImpl<T,NDIM>*const& ptr) {
7191 bool exists=(ptr) ? true : false;
7192 ar & exists;
7193 if (exists) ar & ptr->id();
7194 // ar & ptr->id();
7195 }
7196 };
7197
7198 template <class Archive, class T, std::size_t NDIM>
7199 struct ArchiveLoadImpl<Archive, std::shared_ptr<const FunctionImpl<T,NDIM> > > {
7200 static void load(const Archive& ar, std::shared_ptr<const FunctionImpl<T,NDIM> >& ptr) {
7201 const FunctionImpl<T,NDIM>* f = nullptr;
7203 ptr.reset(f, [] (const FunctionImpl<T,NDIM> *p_) -> void {});
7204 }
7205 };
7206
7207 template <class Archive, class T, std::size_t NDIM>
7208 struct ArchiveStoreImpl<Archive, std::shared_ptr<const FunctionImpl<T,NDIM> > > {
7209 static void store(const Archive& ar, const std::shared_ptr<const FunctionImpl<T,NDIM> >& ptr) {
7211 }
7212 };
7213
7214 template <class Archive, class T, std::size_t NDIM>
7215 struct ArchiveLoadImpl<Archive, std::shared_ptr<FunctionImpl<T,NDIM> > > {
7216 static void load(const Archive& ar, std::shared_ptr<FunctionImpl<T,NDIM> >& ptr) {
7217 FunctionImpl<T,NDIM>* f = nullptr;
7219 ptr.reset(f, [] (FunctionImpl<T,NDIM> *p_) -> void {});
7220 }
7221 };
7222
7223 template <class Archive, class T, std::size_t NDIM>
7224 struct ArchiveStoreImpl<Archive, std::shared_ptr<FunctionImpl<T,NDIM> > > {
7225 static void store(const Archive& ar, const std::shared_ptr<FunctionImpl<T,NDIM> >& ptr) {
7227 }
7228 };
7229 }
7230
7231}
7232
7233#endif // MADNESS_MRA_FUNCIMPL_H__INCLUDED
double w(double t, double eps)
Definition DKops.h:22
double q(double t)
Definition DKops.h:18
This header should include pretty much everything needed for the parallel runtime.
An integer with atomic set, get, read+increment, read+decrement, and decrement+test operations.
Definition atomicint.h:126
long dim(int i) const
Returns the size of dimension i.
Definition basetensor.h:147
long ndim() const
Returns the number of dimensions in the tensor.
Definition basetensor.h:144
long size() const
Returns the number of elements in the tensor.
Definition basetensor.h:138
Definition displacements.h:713
Definition displacements.h:294
std::function< bool(Level, const PointPattern &, std::optional< Displacement > &)> Filter
this callable filters out points and/or displacements; note that the displacement is optional (this u...
Definition displacements.h:300
a class to track where relevant (parent) coeffs are
Definition funcimpl.h:791
const keyT & key() const
const reference to the key
Definition funcimpl.h:839
CoeffTracker(const CoeffTracker &other, const datumT &datum)
ctor with a pair<keyT,nodeT>
Definition funcimpl.h:821
const LeafStatus & is_leaf() const
const reference to is_leaf flag
Definition funcimpl.h:863
const implT * impl
the funcimpl that has the coeffs
Definition funcimpl.h:800
LeafStatus
Definition funcimpl.h:797
@ yes
Definition funcimpl.h:797
@ no
Definition funcimpl.h:797
@ unknown
Definition funcimpl.h:797
CoeffTracker(const CoeffTracker &other)
copy ctor
Definition funcimpl.h:829
double dnorm(const keyT &key) const
return the s and dnorm belonging to the passed-in key
Definition funcimpl.h:856
coeffT coeff_
the coefficients belonging to key
Definition funcimpl.h:806
const implT * get_impl() const
const reference to impl
Definition funcimpl.h:833
const coeffT & coeff() const
const reference to the coeffs
Definition funcimpl.h:836
keyT key_
the current key, which must exists in impl
Definition funcimpl.h:802
double dnorm_
norm of d coefficients corresponding to key
Definition funcimpl.h:808
CoeffTracker(const implT *impl)
the initial ctor making the root key
Definition funcimpl.h:816
void serialize(const Archive &ar)
serialization
Definition funcimpl.h:915
Future< CoeffTracker > activate() const
find the coefficients
Definition funcimpl.h:892
CoeffTracker()
default ctor
Definition funcimpl.h:813
GenTensor< T > coeffT
Definition funcimpl.h:795
CoeffTracker make_child(const keyT &child) const
make a child of this, ignoring the coeffs
Definition funcimpl.h:866
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:793
std::pair< Key< NDIM >, ShallowNode< T, NDIM > > datumT
Definition funcimpl.h:796
CoeffTracker forward_ctor(const CoeffTracker &other, const datumT &datum) const
taskq-compatible forwarding to the ctor
Definition funcimpl.h:909
LeafStatus is_leaf_
flag if key is a leaf node
Definition funcimpl.h:804
coeffT coeff(const keyT &key) const
return the coefficients belonging to the passed-in key
Definition funcimpl.h:847
Key< NDIM > keyT
Definition funcimpl.h:794
CompositeFunctorInterface implements a wrapper of holding several functions and functors.
Definition function_interface.h:165
Definition worldhashmap.h:396
Tri-diagonal operator traversing tree primarily for derivative operator.
Definition derivative.h:73
Holds displacements for applying operators to avoid replicating for all operators.
Definition displacements.h:51
const std::vector< Key< NDIM > > & get_disp(Level n, const array_of_bools< NDIM > &kernel_lattice_sum_axes)
Definition displacements.h:211
FunctionCommonData holds all Function data common for given k.
Definition function_common_data.h:52
Tensor< double > quad_phit
transpose of quad_phi
Definition function_common_data.h:102
Tensor< double > quad_phiw
quad_phiw(i,j) = at x[i] value of w[i]*phi[j]
Definition function_common_data.h:103
std::vector< long > vk
(k,...) used to initialize Tensors
Definition function_common_data.h:93
std::vector< Slice > s0
s[0] in each dimension to get scaling coeff
Definition function_common_data.h:91
static const FunctionCommonData< T, NDIM > & get(int k)
Definition function_common_data.h:111
static void _init_quadrature(int k, int npt, Tensor< double > &quad_x, Tensor< double > &quad_w, Tensor< double > &quad_phi, Tensor< double > &quad_phiw, Tensor< double > &quad_phit)
Initialize the quadrature information.
Definition mraimpl.h:88
collect common functionality does not need to be member function of funcimpl
Definition function_common_data.h:135
const FunctionCommonData< T, NDIM > & cdata
Definition function_common_data.h:138
GenTensor< T > coeffs2values(const Key< NDIM > &key, const GenTensor< T > &coeff) const
Definition function_common_data.h:142
Tensor< T > values2coeffs(const Key< NDIM > &key, const Tensor< T > &values) const
Definition function_common_data.h:155
FunctionDefaults holds default paramaters as static class members.
Definition funcdefaults.h:100
static const double & get_thresh()
Returns the default threshold.
Definition funcdefaults.h:176
static int get_max_refine_level()
Gets the default maximum adaptive refinement level.
Definition funcdefaults.h:213
static const Tensor< double > & get_cell_width()
Returns the width of each user cell dimension.
Definition funcdefaults.h:369
static bool get_apply_randomize()
Gets the random load balancing for integral operators flag.
Definition funcdefaults.h:289
static const Tensor< double > & get_cell()
Gets the user cell for the simulation.
Definition funcdefaults.h:347
FunctionFactory implements the named-parameter idiom for Function.
Definition function_factory.h:86
bool _refine
Definition function_factory.h:99
bool _empty
Definition function_factory.h:100
bool _fence
Definition function_factory.h:103
Abstract base class interface required for functors used as input to Functions.
Definition function_interface.h:68
Definition funcimpl.h:5441
double operator()(double a, double b) const
Definition funcimpl.h:5467
const opT * func
Definition funcimpl.h:5443
Tensor< double > qx
Definition funcimpl.h:5445
double operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5458
void serialize(const Archive &ar)
Definition funcimpl.h:5472
do_err_box(const implT *impl, const opT *func, int npt, const Tensor< double > &qx, const Tensor< double > &quad_phit, const Tensor< double > &quad_phiw)
Definition funcimpl.h:5451
int npt
Definition funcimpl.h:5444
Tensor< double > quad_phiw
Definition funcimpl.h:5447
const implT * impl
Definition funcimpl.h:5442
Tensor< double > quad_phit
Definition funcimpl.h:5446
do_err_box(const do_err_box &e)
Definition funcimpl.h:5455
FunctionImpl holds all Function state to facilitate shallow copy semantics.
Definition funcimpl.h:945
std::tuple< std::set< Key< NDIM > >, std::map< Key< CDIM >, double > > get_contraction_node_lists(const std::size_t n, const std::array< int, CDIM > &v) const
for contraction two functions f(x,z) = \int g(x,y) h(y,z) dy
Definition funcimpl.h:6146
void copy_coeffs(const FunctionImpl< Q, NDIM > &other, bool fence)
Copy coeffs from other into self.
Definition funcimpl.h:1118
bool is_nonstandard() const
Definition mraimpl.h:252
T eval_cube(Level n, coordT &x, const tensorT &c) const
Definition mraimpl.h:2047
void partial_inner_contract(const FunctionImpl< Q, LDIM > *g, const FunctionImpl< R, KDIM > *h, const std::array< int, CDIM > v1, const std::array< int, CDIM > v2, const Key< NDIM > &key, const std::list< Key< CDIM > > &j_key_list)
tensor contraction part of partial_inner
Definition funcimpl.h:6306
AtomicInt large
Definition funcimpl.h:1001
Timer timer_target_driven
Definition funcimpl.h:999
void binaryXX(const FunctionImpl< L, NDIM > *left, const FunctionImpl< R, NDIM > *right, const opT &op, bool fence)
Definition funcimpl.h:3186
void do_apply(const opT *op, const keyT &key, const Tensor< R > &c)
apply an operator on the coeffs c (at node key)
Definition funcimpl.h:4781
void do_print_tree_graphviz(const keyT &key, std::ostream &os, Level maxlevel) const
Functor for the do_print_tree method (using GraphViz)
Definition mraimpl.h:2781
void add_keys_to_map(mapT *map, int index) const
Adds keys to union of local keys with specified index.
Definition funcimpl.h:5725
void change_tensor_type1(const TensorArgs &targs, bool fence)
change the tensor type of the coefficients in the FunctionNode
Definition mraimpl.h:1078
void gaxpy_ext_recursive(const keyT &key, const FunctionImpl< L, NDIM > *left, Tensor< L > lcin, tensorT c, T(*f)(const coordT &), T alpha, T beta, double tol, bool below_leaf)
Definition funcimpl.h:6715
int initial_level
Initial level for refinement.
Definition funcimpl.h:974
int max_refine_level
Do not refine below this level.
Definition funcimpl.h:977
double do_apply_kernel3(const opT *op, const GenTensor< R > &coeff, const do_op_args< OPDIM > &args, const TensorArgs &apply_targs)
same as do_apply_kernel2, but use low rank tensors as input and low rank tensors as output
Definition funcimpl.h:4739
void hartree_product(const std::vector< std::shared_ptr< FunctionImpl< T, LDIM > > > p1, const std::vector< std::shared_ptr< FunctionImpl< T, LDIM > > > p2, const leaf_opT &leaf_op, bool fence)
given two functions of LDIM, perform the Hartree/Kronecker/outer product
Definition funcimpl.h:3698
void traverse_tree(const coeff_opT &coeff_op, const apply_opT &apply_op, const keyT &key) const
traverse a non-existing tree
Definition funcimpl.h:3668
void do_square_inplace(const keyT &key)
int special_level
Minimium level for refinement on special points.
Definition funcimpl.h:975
void do_apply_kernel(const opT *op, const Tensor< R > &c, const do_op_args< OPDIM > &args)
for fine-grain parallelism: call the apply method of an operator in a separate task
Definition funcimpl.h:4673
double errsq_local(const opT &func) const
Returns the sum of squares of errors from local info ... no comms.
Definition funcimpl.h:5479
WorldContainer< keyT, nodeT > dcT
Type of container holding the coefficients.
Definition funcimpl.h:957
void evaldepthpt(const Vector< double, NDIM > &xin, const keyT &keyin, const typename Future< Level >::remote_refT &ref)
Get the depth of the tree at a point in simulation coordinates.
Definition mraimpl.h:2962
void scale_inplace(const T q, bool fence)
In-place scale by a constant.
Definition mraimpl.h:3133
void gaxpy_oop_reconstructed(const double alpha, const implT &f, const double beta, const implT &g, const bool fence)
perform: this= alpha*f + beta*g, invoked by result
Definition mraimpl.h:208
void unary_op_coeff_inplace(const opT &op, bool fence)
Definition funcimpl.h:2030
World & world
Definition funcimpl.h:964
void apply_1d_realspace_push_op(const archive::archive_ptr< const opT > &pop, int axis, const keyT &key, const Tensor< R > &c)
Definition funcimpl.h:3736
bool is_redundant() const
Returns true if the function is redundant.
Definition mraimpl.h:247
FunctionNode< T, NDIM > nodeT
Type of node.
Definition funcimpl.h:955
void print_size(const std::string name) const
print tree size and size
Definition mraimpl.h:1963
FunctionImpl(const FunctionImpl< T, NDIM > &p)
void print_info() const
Prints summary of data distribution.
Definition mraimpl.h:812
void abs_inplace(bool fence)
Definition mraimpl.h:3145
void binaryXXa(const keyT &key, const FunctionImpl< L, NDIM > *left, const Tensor< L > &lcin, const FunctionImpl< R, NDIM > *right, const Tensor< R > &rcin, const opT &op)
Definition funcimpl.h:3055
void print_timer() const
Definition mraimpl.h:336
void evalR(const Vector< double, NDIM > &xin, const keyT &keyin, const typename Future< long >::remote_refT &ref)
Get the rank of leaf box of the tree at a point in simulation coordinates.
Definition mraimpl.h:3004
const FunctionCommonData< T, NDIM > & cdata
Definition funcimpl.h:983
void do_print_grid(const std::string filename, const std::vector< keyT > &keys) const
print the grid in xyz format
Definition mraimpl.h:563
void mulXXa(const keyT &key, const FunctionImpl< L, NDIM > *left, const Tensor< L > &lcin, const FunctionImpl< R, NDIM > *right, const Tensor< R > &rcin, double tol)
Definition funcimpl.h:2969
const std::vector< Vector< double, NDIM > > & get_special_points() const
Definition funcimpl.h:969
std::size_t nCoeff() const
Returns the number of coefficients in the function ... collective global sum.
Definition mraimpl.h:1949
double vol_nsphere(int n, double R)
Definition funcimpl.h:4769
keyT neighbor_in_volume(const keyT &key, const keyT &disp) const
Returns key of general neighbor that resides in-volume.
Definition mraimpl.h:3252
void compress(const TreeState newstate, bool fence)
compress the wave function
Definition mraimpl.h:1527
void do_dirac_convolution(FunctionImpl< T, LDIM > *f, bool fence) const
Definition funcimpl.h:2113
std::pair< coeffT, double > compress_op(const keyT &key, const std::vector< Future< std::pair< coeffT, double > > > &v, bool nonstandard)
calculate the wavelet coefficients using the sum coefficients of all child nodes
Definition mraimpl.h:1695
Future< bool > truncate_spawn(const keyT &key, double tol)
Returns true if after truncation this node has coefficients.
Definition mraimpl.h:2626
void print_type_in_compilation_error(R &&)
Definition funcimpl.h:6028
Future< double > norm_tree_spawn(const keyT &key)
Definition mraimpl.h:1597
std::vector< keyT > local_leaf_keys() const
return the keys of the local leaf boxes
Definition mraimpl.h:537
MADNESS_ASSERT(this->is_redundant()==g.is_redundant())
void do_print_tree(const keyT &key, std::ostream &os, Level maxlevel) const
Functor for the do_print_tree method.
Definition mraimpl.h:2699
void vtransform(const std::vector< std::shared_ptr< FunctionImpl< R, NDIM > > > &vright, const Tensor< Q > &c, const std::vector< std::shared_ptr< FunctionImpl< T, NDIM > > > &vleft, double tol, bool fence)
Definition funcimpl.h:2830
void unset_functor()
Definition mraimpl.h:291
void refine_spawn(const opT &op, const keyT &key)
Definition funcimpl.h:4501
void apply_1d_realspace_push(const opT &op, const FunctionImpl< R, NDIM > *f, int axis, bool fence)
Definition funcimpl.h:3787
static double conj(float x)
Definition funcimpl.h:5914
void do_print_plane(const std::string filename, std::vector< Tensor< double > > plotinfo, const int xaxis, const int yaxis, const coordT el2)
print the MRA structure
Definition mraimpl.h:478
std::pair< Key< NDIM >, ShallowNode< T, NDIM > > find_datum(keyT key) const
return the a std::pair<key, node>, which MUST exist
Definition mraimpl.h:944
void set_functor(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > functor1)
Definition mraimpl.h:272
std::enable_if< NDIM==FDIM >::type read_grid2(const std::string gridfile, std::shared_ptr< FunctionFunctorInterface< double, NDIM > > vnuc_functor)
read data from a grid
Definition funcimpl.h:1564
const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > & get_pmap() const
Definition mraimpl.h:192
Tensor< Q > fcube_for_mul(const keyT &child, const keyT &parent, const Tensor< Q > &coeff) const
Compute the function values for multiplication.
Definition funcimpl.h:1877
Timer timer_filter
Definition funcimpl.h:997
void sock_it_to_me(const keyT &key, const RemoteReference< FutureImpl< std::pair< keyT, coeffT > > > &ref) const
Walk up the tree returning pair(key,node) for first node with coefficients.
Definition mraimpl.h:2839
void recursive_apply(opT &apply_op, const implT *fimpl, implT *rimpl, const bool fence)
traverse an existing tree and apply an operator
Definition funcimpl.h:5298
double get_thresh() const
Definition mraimpl.h:307
void trickle_down(bool fence)
sum all the contributions from all scales after applying an operator in mod-NS form
Definition mraimpl.h:1333
bool autorefine
If true, autorefine where appropriate.
Definition funcimpl.h:979
std::pair< coeffT, double > make_redundant_op(const keyT &key, const std::vector< Future< std::pair< coeffT, double > > > &v)
similar to compress_op, but insert only the sum coefficients in the tree
Definition mraimpl.h:1755
void set_autorefine(bool value)
Definition mraimpl.h:316
tensorT filter(const tensorT &s) const
Transform sum coefficients at level n to sums+differences at level n-1.
Definition mraimpl.h:1131
void chop_at_level(const int n, const bool fence=true)
remove all nodes with level higher than n
Definition mraimpl.h:1094
void unaryXXvalues(const FunctionImpl< Q, NDIM > *func, const opT &op, bool fence)
Definition funcimpl.h:3213
static std::complex< double > conj(const std::complex< double > x)
Definition funcimpl.h:5918
void partial_inner(const FunctionImpl< Q, LDIM > &g, const FunctionImpl< R, KDIM > &h, const std::array< int, CDIM > v1, const std::array< int, CDIM > v2)
invoked by result
Definition funcimpl.h:6044
TreeState tree_state
Definition funcimpl.h:986
void print_tree_json(std::ostream &os=std::cout, Level maxlevel=10000) const
Definition mraimpl.h:2719
coeffT parent_to_child_NS(const keyT &child, const keyT &parent, const coeffT &coeff) const
Directly project parent NS coeffs to child NS coeffs.
Definition mraimpl.h:686
void mapdim(const implT &f, const std::vector< long > &map, bool fence)
Permute the dimensions of f according to map, result on this.
Definition mraimpl.h:1036
bool is_compressed() const
Returns true if the function is compressed.
Definition mraimpl.h:235
Vector< double, NDIM > coordT
Type of vector holding coordinates.
Definition funcimpl.h:959
void apply(opT &op, const FunctionImpl< R, NDIM > &f, bool fence)
apply an operator on f to return this
Definition funcimpl.h:4968
Tensor< T > tensorT
Type of tensor for anything but to hold coeffs.
Definition funcimpl.h:952
void mirror(const implT &f, const std::vector< long > &mirror, bool fence)
mirror the dimensions of f according to map, result on this
Definition mraimpl.h:1045
T inner_adaptive_recursive(keyT key, const tensorT &c, const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine, T old_inner=T(0)) const
Definition funcimpl.h:6638
void store(Archive &ar)
Definition funcimpl.h:1242
void do_binary_op(const keyT &key, const Tensor< L > &left, const std::pair< keyT, Tensor< R > > &arg, const opT &op)
Functor for the binary_op method.
Definition funcimpl.h:1979
void gaxpy_ext(const FunctionImpl< L, NDIM > *left, T(*f)(const coordT &), T alpha, T beta, double tol, bool fence)
Definition funcimpl.h:6802
void accumulate_trees(FunctionImpl< Q, NDIM > &result, const R alpha, const bool fence=true) const
merge the trees of this and other, while multiplying them with the alpha or beta, resp
Definition funcimpl.h:1166
void print_stats() const
print the number of configurations per node
Definition mraimpl.h:1991
void broaden(const array_of_bools< NDIM > &is_periodic, bool fence)
Definition mraimpl.h:1282
coeffT truncate_reconstructed_op(const keyT &key, const std::vector< Future< coeffT > > &v, const double tol)
given the sum coefficients of all children, truncate or not
Definition mraimpl.h:1644
void refine_op(const opT &op, const keyT &key)
Definition funcimpl.h:4476
static Tensor< TENSOR_RESULT_TYPE(T, R) > inner_local(const std::vector< const FunctionImpl< T, NDIM > * > &left, const std::vector< const FunctionImpl< R, NDIM > * > &right, bool sym)
Definition funcimpl.h:5924
void fcube(const keyT &key, const FunctionFunctorInterface< T, NDIM > &f, const Tensor< double > &qx, tensorT &fval) const
Evaluate function at quadrature points in the specified box.
Definition mraimpl.h:2464
Timer timer_change_tensor_type
Definition funcimpl.h:995
void forward_do_diff1(const DerivativeBase< T, NDIM > *D, const implT *f, const keyT &key, const std::pair< keyT, coeffT > &left, const std::pair< keyT, coeffT > &center, const std::pair< keyT, coeffT > &right)
Definition mraimpl.h:902
std::vector< Slice > child_patch(const keyT &child) const
Returns patch referring to coeffs of child in parent box.
Definition mraimpl.h:675
void print_tree_graphviz(std::ostream &os=std::cout, Level maxlevel=10000) const
Definition mraimpl.h:2772
void set_tree_state(const TreeState &state)
Definition funcimpl.h:1270
std::size_t min_nodes() const
Returns the min number of nodes on a processor.
Definition mraimpl.h:1900
std::shared_ptr< FunctionFunctorInterface< T, NDIM > > functor
Definition funcimpl.h:985
Timer timer_compress_svd
Definition funcimpl.h:998
Tensor< TENSOR_RESULT_TYPE(T, R)> mul(const Tensor< T > &c1, const Tensor< R > &c2, const int npt, const keyT &key) const
multiply the values of two coefficient tensors using a custom number of grid points
Definition funcimpl.h:1952
void make_redundant(const bool fence)
convert this to redundant, i.e. have sum coefficients on all levels
Definition mraimpl.h:1555
void load(Archive &ar)
Definition funcimpl.h:1224
std::size_t max_nodes() const
Returns the max number of nodes on a processor.
Definition mraimpl.h:1891
T inner_ext_local(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine) const
Definition funcimpl.h:6613
coeffT upsample(const keyT &key, const coeffT &coeff) const
upsample the sum coefficients of level 1 to sum coeffs on level n+1
Definition mraimpl.h:1210
TensorArgs targs
type of tensor to be used in the FunctionNodes
Definition funcimpl.h:981
void flo_unary_op_node_inplace(const opT &op, bool fence)
Definition funcimpl.h:2142
std::size_t size_local() const
Returns the number of coefficients in the function for each rank.
Definition mraimpl.h:1918
GenTensor< Q > values2coeffs(const keyT &key, const GenTensor< Q > &values) const
Definition funcimpl.h:1856
void plot_cube_kernel(archive::archive_ptr< Tensor< T > > ptr, const keyT &key, const coordT &plotlo, const coordT &plothi, const std::vector< long > &npt, bool eval_refine) const
Definition mraimpl.h:3343
T trace_local() const
Returns int(f(x),x) in local volume.
Definition mraimpl.h:3187
void print_grid(const std::string filename) const
Definition mraimpl.h:521
Future< std::pair< coeffT, double > > compress_spawn(const keyT &key, bool nonstandard, bool keepleaves, bool redundant1)
Invoked on node where key is local.
Definition mraimpl.h:3280
bool get_autorefine() const
Definition mraimpl.h:313
int k
Wavelet order.
Definition funcimpl.h:972
void vtransform_doit(const std::shared_ptr< FunctionImpl< R, NDIM > > &right, const Tensor< Q > &c, const std::vector< std::shared_ptr< FunctionImpl< T, NDIM > > > &vleft, double tol)
Definition funcimpl.h:2674
MADNESS_CHECK(this->is_reconstructed())
void phi_for_mul(Level np, Translation lp, Level nc, Translation lc, Tensor< double > &phi) const
Compute the Legendre scaling functions for multiplication.
Definition mraimpl.h:3155
Future< std::pair< keyT, coeffT > > find_me(const keyT &key) const
find_me. Called by diff_bdry to get coefficients of boundary function
Definition mraimpl.h:3267
TensorType get_tensor_type() const
Definition mraimpl.h:298
void do_project_out(const coeffT &fcoeff, const std::pair< keyT, coeffT > gpair, const keyT &gkey, const Key< NDIM > &dest, const int dim) const
compute the inner product of two nodes of only some dimensions and accumulate on result
Definition funcimpl.h:7022
void remove_leaf_coefficients(const bool fence)
Definition mraimpl.h:1549
void insert_zero_down_to_initial_level(const keyT &key)
Initialize nodes to zero function at initial_level of refinement.
Definition mraimpl.h:2595
void do_diff1(const DerivativeBase< T, NDIM > *D, const implT *f, const keyT &key, const std::pair< keyT, coeffT > &left, const std::pair< keyT, coeffT > &center, const std::pair< keyT, coeffT > &right)
Definition mraimpl.h:913
typedef TENSOR_RESULT_TYPE(T, R) resultT
void unary_op_node_inplace(const opT &op, bool fence)
Definition funcimpl.h:2051
T inner_adaptive_local(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine) const
Definition funcimpl.h:6624
void do_print_tree_json(const keyT &key, std::multimap< Level, std::tuple< tranT, std::string > > &data, Level maxlevel) const
Functor for the do_print_tree_json method.
Definition mraimpl.h:2750
std::multimap< Key< FDIM >, std::list< Key< CDIM > > > recur_down_for_contraction_map(const keyT &key, const nodeT &node, const std::array< int, CDIM > &v_this, const std::array< int, CDIM > &v_other, const std::set< Key< ODIM > > &ij_other_list, const std::map< Key< CDIM >, double > &j_other_list, bool this_first, const double thresh)
make a map of all nodes that will contribute to a partial inner product
Definition funcimpl.h:6199
std::shared_ptr< FunctionImpl< T, NDIM > > pimplT
pointer to this class
Definition funcimpl.h:951
void finalize_sum()
after summing up we need to do some cleanup;
Definition mraimpl.h:1848
std::enable_if< NDIM==FDIM >::type read_grid(const std::string keyfile, const std::string gridfile, std::shared_ptr< FunctionFunctorInterface< double, NDIM > > vnuc_functor)
read data from a grid
Definition funcimpl.h:1457
dcT coeffs
The coefficients.
Definition funcimpl.h:988
bool exists_and_is_leaf(const keyT &key) const
Definition mraimpl.h:1254
void make_Vphi(const opT &leaf_op, const bool fence=true)
assemble the function V*phi using V and phi given from the functor
Definition funcimpl.h:4268
void unaryXX(const FunctionImpl< Q, NDIM > *func, const opT &op, bool fence)
Definition funcimpl.h:3200
std::vector< std::pair< int, const coeffT * > > mapvecT
Type of the entry in the map returned by make_key_vec_map.
Definition funcimpl.h:5719
void project_out(FunctionImpl< T, NDIM-LDIM > *result, const FunctionImpl< T, LDIM > *gimpl, const int dim, const bool fence)
project the low-dim function g on the hi-dim function f: result(x) = <this(x,y) | g(y)>
Definition funcimpl.h:6818
void verify_tree() const
Verify tree is properly constructed ... global synchronization involved.
Definition mraimpl.h:109
void do_square_inplace2(const keyT &parent, const keyT &child, const tensorT &parent_coeff)
void gaxpy_inplace_reconstructed(const T &alpha, const FunctionImpl< Q, NDIM > &g, const R &beta, const bool fence)
Definition funcimpl.h:1136
void set_tensor_args(const TensorArgs &t)
Definition mraimpl.h:304
GenTensor< Q > fcube_for_mul(const keyT &child, const keyT &parent, const GenTensor< Q > &coeff) const
Compute the function values for multiplication.
Definition funcimpl.h:1905
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:5570
std::size_t real_size() const
Returns the number of coefficients in the function ... collective global sum.
Definition mraimpl.h:1936
bool exists_and_has_children(const keyT &key) const
Definition mraimpl.h:1249
void sum_down_spawn(const keyT &key, const coeffT &s)
is this the same as trickle_down() ?
Definition mraimpl.h:855
void multi_to_multi_op_values(const opT &op, const std::vector< implT * > &vin, std::vector< implT * > &vout, const bool fence=true)
Inplace operate on many functions (impl's) with an operator within a certain box.
Definition funcimpl.h:2801
long box_interior[1000]
Definition funcimpl.h:3244
keyT neighbor(const keyT &key, const keyT &disp, const array_of_bools< NDIM > &is_periodic) const
Returns key of general neighbor enforcing BC.
Definition mraimpl.h:3237
GenTensor< Q > NS_fcube_for_mul(const keyT &child, const keyT &parent, const GenTensor< Q > &coeff, const bool s_only) const
Compute the function values for multiplication.
Definition funcimpl.h:1775
rangeT range(coeffs.begin(), coeffs.end())
void norm_tree(bool fence)
compute for each FunctionNode the norm of the function inside that node
Definition mraimpl.h:1574
void gaxpy_inplace(const T &alpha, const FunctionImpl< Q, NDIM > &other, const R &beta, bool fence)
Definition funcimpl.h:1211
bool has_leaves() const
Definition mraimpl.h:267
void apply_source_driven(opT &op, const FunctionImpl< R, NDIM > &f, bool fence)
similar to apply, but for low rank coeffs
Definition funcimpl.h:5123
void distribute(std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > newmap) const
Definition funcimpl.h:1110
int get_special_level() const
Definition funcimpl.h:968
void reconstruct_op(const keyT &key, const coeffT &s, const bool accumulate_NS=true)
Definition mraimpl.h:2101
tensorT gaxpy_ext_node(keyT key, Tensor< L > lc, T(*f)(const coordT &), T alpha, T beta) const
Definition funcimpl.h:6691
const coeffT parent_to_child(const coeffT &s, const keyT &parent, const keyT &child) const
Directly project parent coeffs to child coeffs.
Definition mraimpl.h:3170
WorldObject< FunctionImpl< T, NDIM > > woT
Base class world object type.
Definition funcimpl.h:947
void undo_redundant(const bool fence)
convert this from redundant to standard reconstructed form
Definition mraimpl.h:1565
GenTensor< T > coeffT
Type of tensor used to hold coeffs.
Definition funcimpl.h:956
const keyT & key0() const
Returns cdata.key0.
Definition mraimpl.h:373
double finalize_apply()
after apply we need to do some cleanup;
Definition mraimpl.h:1805
bool leaves_only
Definition funcimpl.h:5575
friend hashT hash_value(const FunctionImpl< T, NDIM > *pimpl)
Hash a pointer to FunctionImpl.
Definition funcimpl.h:7119
const dcT & get_coeffs() const
Definition mraimpl.h:322
T inner_ext_node(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f) const
Return the inner product with an external function on a specified function node.
Definition funcimpl.h:6490
double norm2sq_local() const
Returns the square of the local norm ... no comms.
Definition mraimpl.h:1857
const FunctionCommonData< T, NDIM > & get_cdata() const
Definition mraimpl.h:328
void sum_down(bool fence)
After 1d push operator must sum coeffs down the tree to restore correct scaling function coefficients...
Definition mraimpl.h:894
T inner_ext_recursive(keyT key, tensorT c, const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const bool leaf_refine, T old_inner=T(0)) const
Definition funcimpl.h:6507
bool noautorefine(const keyT &key, const tensorT &t) const
Always returns false (for when autorefine is not wanted)
Definition mraimpl.h:838
double truncate_tol(double tol, const keyT &key) const
Returns the truncation threshold according to truncate_method.
Definition mraimpl.h:628
void flo_unary_op_node_inplace(const opT &op, bool fence) const
Definition funcimpl.h:2152
bool autorefine_square_test(const keyT &key, const nodeT &t) const
Returns true if this block of coeffs needs autorefining.
Definition mraimpl.h:844
void erase(const Level &max_level)
truncate tree at a certain level
Definition mraimpl.h:718
void mulXX(const FunctionImpl< L, NDIM > *left, const FunctionImpl< R, NDIM > *right, double tol, bool fence)
Definition funcimpl.h:3172
void reconstruct(bool fence)
reconstruct this tree – respects fence
Definition mraimpl.h:1495
void multiply(const implT *f, const FunctionImpl< T, LDIM > *g, const int particle)
multiply f (a pair function of NDIM) with an orbital g (LDIM=NDIM/2)
Definition funcimpl.h:3560
coeffT assemble_coefficients(const keyT &key, const coeffT &coeff_ket, const coeffT &vpotential1, const coeffT &vpotential2, const tensorT &veri) const
given several coefficient tensors, assemble a result tensor
Definition mraimpl.h:992
static void tnorm(const tensorT &t, double *lo, double *hi)
Computes norm of low/high-order polyn. coeffs for autorefinement test.
Definition mraimpl.h:3047
std::pair< bool, T > eval_local_only(const Vector< double, NDIM > &xin, Level maxlevel)
Evaluate function only if point is local returning (true,value); otherwise return (false,...
Definition mraimpl.h:2933
std::size_t max_depth() const
Returns the maximum depth of the tree ... collective ... global sum/broadcast.
Definition mraimpl.h:1883
std::size_t size() const
Returns the number of coefficients in the function ... collective global sum.
Definition mraimpl.h:1928
void reduce_rank(const double thresh, bool fence)
reduce the rank of the coefficients tensors
Definition mraimpl.h:1086
TreeState get_tree_state() const
Definition funcimpl.h:1274
void merge_trees(const T alpha, const FunctionImpl< Q, NDIM > &other, const R beta, const bool fence=true)
merge the trees of this and other, while multiplying them with the alpha or beta, resp
Definition funcimpl.h:1154
std::shared_ptr< FunctionFunctorInterface< T, NDIM > > get_functor()
Definition mraimpl.h:279
double do_apply_directed_screening(const opT *op, const keyT &key, const coeffT &coeff, const bool &do_kernel)
apply an operator on the coeffs c (at node key)
Definition funcimpl.h:5005
tensorT unfilter(const tensorT &s) const
Transform sums+differences at level n to sum coefficients at level n+1.
Definition mraimpl.h:1160
int get_initial_level() const
getter
Definition funcimpl.h:967
Tensor< T > eval_plot_cube(const coordT &plotlo, const coordT &plothi, const std::vector< long > &npt, const bool eval_refine=false) const
Definition mraimpl.h:3436
virtual ~FunctionImpl()
Definition funcimpl.h:1102
Vector< Translation, NDIM > tranT
Type of array holding translation.
Definition funcimpl.h:953
void change_tree_state(const TreeState finalstate, bool fence=true)
change the tree state of this function, might or might not respect fence!
Definition mraimpl.h:1386
Future< coeffT > truncate_reconstructed_spawn(const keyT &key, const double tol)
truncate using a tree in reconstructed form
Definition mraimpl.h:1620
GenTensor< Q > coeffs2values(const keyT &key, const GenTensor< Q > &coeff) const
Definition funcimpl.h:1723
FunctionImpl(const FunctionFactory< T, NDIM > &factory)
Initialize function impl from data in factory.
Definition funcimpl.h:1004
void map_and_mirror(const implT &f, const std::vector< long > &map, const std::vector< long > &mirror, bool fence)
map and mirror the translation index and the coefficients, result on this
Definition mraimpl.h:1055
Timer timer_lr_result
Definition funcimpl.h:996
void gaxpy(T alpha, const FunctionImpl< L, NDIM > &left, T beta, const FunctionImpl< R, NDIM > &right, bool fence)
Invoked by result to perform result += alpha*left+beta*right in wavelet basis.
Definition funcimpl.h:2002
void truncate(double tol, bool fence)
Truncate according to the threshold with optional global fence.
Definition mraimpl.h:357
void do_mul(const keyT &key, const Tensor< L > &left, const std::pair< keyT, Tensor< R > > &arg)
Functor for the mul method.
Definition funcimpl.h:1927
void project_out2(const FunctionImpl< T, LDIM+NDIM > *f, const FunctionImpl< T, LDIM > *g, const int dim)
project the low-dim function g on the hi-dim function f: this(x) = <f(x,y) | g(y)>
Definition funcimpl.h:6964
double do_apply_kernel2(const opT *op, const Tensor< R > &c, const do_op_args< OPDIM > &args, const TensorArgs &apply_targs)
same as do_apply_kernel, but use full rank tensors as input and low rank tensors as output
Definition funcimpl.h:4701
static Tensor< TENSOR_RESULT_TYPE(T, R)> dot_local(const std::vector< const FunctionImpl< T, NDIM > * > &left, const std::vector< const FunctionImpl< R, NDIM > * > &right, bool sym)
Definition funcimpl.h:5976
Tensor< Q > coeffs2values(const keyT &key, const Tensor< Q > &coeff) const
Definition funcimpl.h:1849
Tensor< Q > values2coeffs(const keyT &key, const Tensor< Q > &values) const
Definition funcimpl.h:1863
void multi_to_multi_op_values_doit(const keyT &key, const opT &op, const std::vector< implT * > &vin, std::vector< implT * > &vout)
Inplace operate on many functions (impl's) with an operator within a certain box.
Definition funcimpl.h:2778
bool is_reconstructed() const
Returns true if the function is compressed.
Definition mraimpl.h:241
void replicate(bool fence=true)
Definition funcimpl.h:1106
double norm_tree_op(const keyT &key, const std::vector< Future< double > > &v)
Definition mraimpl.h:1582
void reset_timer()
Definition mraimpl.h:345
void refine_to_common_level(const std::vector< FunctionImpl< T, NDIM > * > &v, const std::vector< tensorT > &c, const keyT key)
Refine multiple functions down to the same finest level.
Definition mraimpl.h:748
int get_k() const
Definition mraimpl.h:319
void dirac_convolution_op(const keyT &key, const nodeT &node, FunctionImpl< T, LDIM > *f) const
The operator.
Definition funcimpl.h:2068
FunctionImpl< T, NDIM > implT
Type of this class (implementation)
Definition funcimpl.h:950
void eval(const Vector< double, NDIM > &xin, const keyT &keyin, const typename Future< T >::remote_refT &ref)
Evaluate the function at a point in simulation coordinates.
Definition mraimpl.h:2889
bool truncate_op(const keyT &key, double tol, const std::vector< Future< bool > > &v)
Definition mraimpl.h:2662
void zero_norm_tree()
Definition mraimpl.h:1271
std::size_t max_local_depth() const
Returns the maximum local depth of the tree ... no communications.
Definition mraimpl.h:1869
tensorT project(const keyT &key) const
Definition mraimpl.h:2807
double thresh
Screening threshold.
Definition funcimpl.h:973
double check_symmetry_local() const
Returns some asymmetry measure ... no comms.
Definition mraimpl.h:734
Future< double > get_norm_tree_recursive(const keyT &key) const
Definition mraimpl.h:2828
void mulXXvec(const FunctionImpl< L, NDIM > *left, const std::vector< const FunctionImpl< R, NDIM > * > &vright, const std::vector< FunctionImpl< T, NDIM > * > &vresult, double tol, bool fence)
Definition funcimpl.h:3229
Key< NDIM > keyT
Type of key.
Definition funcimpl.h:954
friend hashT hash_value(const std::shared_ptr< FunctionImpl< T, NDIM > > impl)
Hash a shared_ptr to FunctionImpl.
Definition funcimpl.h:7129
std::vector< Vector< double, NDIM > > special_points
special points for further refinement (needed for composite functions or multiplication)
Definition funcimpl.h:976
bool truncate_on_project
If true projection inserts at level n-1 not n.
Definition funcimpl.h:980
AtomicInt small
Definition funcimpl.h:1000
static void do_dot_localX(const typename mapT::iterator lstart, const typename mapT::iterator lend, typename FunctionImpl< R, NDIM >::mapT *rmap_ptr, const bool sym, Tensor< TENSOR_RESULT_TYPE(T, R)> *result_ptr, Mutex *mutex)
Definition funcimpl.h:5875
bool is_on_demand() const
Definition mraimpl.h:262
double err_box(const keyT &key, const nodeT &node, const opT &func, int npt, const Tensor< double > &qx, const Tensor< double > &quad_phit, const Tensor< double > &quad_phiw) const
Returns the square of the error norm in the box labeled by key.
Definition funcimpl.h:5411
void accumulate_timer(const double time) const
Definition mraimpl.h:331
void trickle_down_op(const keyT &key, const coeffT &s)
sum all the contributions from all scales after applying an operator in mod-NS form
Definition mraimpl.h:1344
static void do_inner_localX(const typename mapT::iterator lstart, const typename mapT::iterator lend, typename FunctionImpl< R, NDIM >::mapT *rmap_ptr, const bool sym, Tensor< TENSOR_RESULT_TYPE(T, R) > *result_ptr, Mutex *mutex)
Definition funcimpl.h:5794
void mulXXveca(const keyT &key, const FunctionImpl< L, NDIM > *left, const Tensor< L > &lcin, const std::vector< const FunctionImpl< R, NDIM > * > vrightin, const std::vector< Tensor< R > > &vrcin, const std::vector< FunctionImpl< T, NDIM > * > vresultin, double tol)
Definition funcimpl.h:2865
void set_thresh(double value)
Definition mraimpl.h:310
Tensor< double > print_plane_local(const int xaxis, const int yaxis, const coordT &el2)
collect the data for a plot of the MRA structure locally on each node
Definition mraimpl.h:402
void sock_it_to_me_too(const keyT &key, const RemoteReference< FutureImpl< std::pair< keyT, coeffT > > > &ref) const
Definition mraimpl.h:2867
void broaden_op(const keyT &key, const std::vector< Future< bool > > &v)
Definition mraimpl.h:1260
void print_plane(const std::string filename, const int xaxis, const int yaxis, const coordT &el2)
Print a plane ("xy", "xz", or "yz") containing the point x to file.
Definition mraimpl.h:382
void print_tree(std::ostream &os=std::cout, Level maxlevel=10000) const
Definition mraimpl.h:2690
void project_refine_op(const keyT &key, bool do_refine, const std::vector< Vector< double, NDIM > > &specialpts)
Definition mraimpl.h:2476
void scale_oop(const Q q, const FunctionImpl< F, NDIM > &f, bool fence)
Out-of-place scale by a constant.
Definition funcimpl.h:7096
T typeT
Definition funcimpl.h:949
std::size_t tree_size() const
Returns the size of the tree structure of the function ... collective global sum.
Definition mraimpl.h:1909
ConcurrentHashMap< keyT, mapvecT > mapT
Type of the map returned by make_key_vec_map.
Definition funcimpl.h:5722
void add_scalar_inplace(T t, bool fence)
Adds a constant to the function. Local operation, optional fence.
Definition mraimpl.h:2554
void forward_traverse(const coeff_opT &coeff_op, const apply_opT &apply_op, const keyT &key) const
traverse a non-existing tree
Definition funcimpl.h:3654
tensorT downsample(const keyT &key, const std::vector< Future< coeffT > > &v) const
downsample the sum coefficients of level n+1 to sum coeffs on level n
Definition mraimpl.h:1180
void abs_square_inplace(bool fence)
Definition mraimpl.h:3150
FunctionImpl(const FunctionImpl< Q, NDIM > &other, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool dozero)
Copy constructor.
Definition funcimpl.h:1073
void refine(const opT &op, bool fence)
Definition funcimpl.h:4514
static mapT make_key_vec_map(const std::vector< const FunctionImpl< T, NDIM > * > &v)
Returns map of union of local keys to vector of indexes of functions containing that key.
Definition funcimpl.h:5743
void put_in_box(ProcessID from, long nl, long ni) const
Definition mraimpl.h:803
void unary_op_value_inplace(const opT &op, bool fence)
Definition funcimpl.h:2845
std::pair< const keyT, nodeT > datumT
Type of entry in container.
Definition funcimpl.h:958
Timer timer_accumulate
Definition funcimpl.h:994
TensorArgs get_tensor_args() const
Definition mraimpl.h:301
void unaryXXa(const keyT &key, const FunctionImpl< Q, NDIM > *func, const opT &op)
Definition funcimpl.h:3147
void make_Vphi_only(const opT &leaf_op, FunctionImpl< T, NDIM > *ket, FunctionImpl< T, LDIM > *v1, FunctionImpl< T, LDIM > *v2, FunctionImpl< T, LDIM > *p1, FunctionImpl< T, LDIM > *p2, FunctionImpl< T, NDIM > *eri, const bool fence=true)
assemble the function V*phi using V and phi given from the functor
Definition funcimpl.h:4329
void average(const implT &rhs)
take the average of two functions, similar to: this=0.5*(this+rhs)
Definition mraimpl.h:1067
void recursive_apply(opT &apply_op, const FunctionImpl< T, LDIM > *fimpl, const FunctionImpl< T, LDIM > *gimpl, const bool fence)
traverse a non-existing tree, make its coeffs and apply an operator
Definition funcimpl.h:5164
void diff(const DerivativeBase< T, NDIM > *D, const implT *f, bool fence)
Definition mraimpl.h:925
void square_inplace(bool fence)
Pointwise squaring of function with optional global fence.
Definition mraimpl.h:3139
void remove_internal_coefficients(const bool fence)
Definition mraimpl.h:1544
void compute_snorm_and_dnorm(bool fence=true)
compute norm of s and d coefficients for all nodes
Definition mraimpl.h:1110
long box_leaf[1000]
Definition funcimpl.h:3243
void standard(bool fence)
Changes non-standard compressed form to standard compressed form.
Definition mraimpl.h:1792
void multiop_values_doit(const keyT &key, const opT &op, const std::vector< implT * > &v)
Definition funcimpl.h:2736
bool is_nonstandard_with_leaves() const
Definition mraimpl.h:257
GenTensor< Q > values2NScoeffs(const keyT &key, const GenTensor< Q > &values) const
convert function values of the a child generation directly to NS coeffs
Definition funcimpl.h:1824
int truncate_mode
0=default=(|d|<thresh), 1=(|d|<thresh/2^n), 2=(|d|<thresh/4^n);
Definition funcimpl.h:978
void multiop_values(const opT &op, const std::vector< implT * > &v)
Definition funcimpl.h:2753
GenTensor< Q > NScoeffs2values(const keyT &key, const GenTensor< Q > &coeff, const bool s_only) const
convert S or NS coeffs to values on a 2k grid of the children
Definition funcimpl.h:1739
FunctionNode holds the coefficients, etc., at each node of the 2^NDIM-tree.
Definition funcimpl.h:127
FunctionNode< Q, NDIM > convert() const
Copy with possible type conversion of coefficients, copying all other state.
Definition funcimpl.h:194
GenTensor< T > coeffT
Definition funcimpl.h:129
bool has_coeff() const
Returns true if there are coefficients in this node.
Definition funcimpl.h:200
void recompute_snorm_and_dnorm(const FunctionCommonData< T, NDIM > &cdata)
Definition funcimpl.h:335
FunctionNode(const coeffT &coeff, bool has_children=false)
Constructor from given coefficients with optional children.
Definition funcimpl.h:156
FunctionNode()
Default constructor makes node without coeff or children.
Definition funcimpl.h:146
void serialize(Archive &ar)
Definition funcimpl.h:458
void consolidate_buffer(const TensorArgs &args)
Definition funcimpl.h:444
double get_dnorm() const
return the precomputed norm of the (virtual) d coefficients
Definition funcimpl.h:316
size_t size() const
Returns the number of coefficients in this node.
Definition funcimpl.h:242
void set_has_children_recursive(const typename FunctionNode< T, NDIM >::dcT &c, const Key< NDIM > &key)
Sets has_children attribute to true recurring up to ensure connected.
Definition funcimpl.h:259
FunctionNode< T, NDIM > & operator=(const FunctionNode< T, NDIM > &other)
Definition funcimpl.h:176
double snorm
norm of the s coefficients
Definition funcimpl.h:141
void clear_coeff()
Clears the coefficients (has_coeff() will subsequently return false)
Definition funcimpl.h:295
Tensor< T > tensorT
Definition funcimpl.h:130
coeffT buffer
The coefficients, if any.
Definition funcimpl.h:139
T trace_conj(const FunctionNode< T, NDIM > &rhs) const
Definition funcimpl.h:453
void scale(Q a)
Scale the coefficients of this node.
Definition funcimpl.h:301
bool is_leaf() const
Returns true if this does not have children.
Definition funcimpl.h:213
void set_has_children(bool flag)
Sets has_children attribute to value of flag.
Definition funcimpl.h:254
void accumulate(const coeffT &t, const typename FunctionNode< T, NDIM >::dcT &c, const Key< NDIM > &key, const TensorArgs &args)
Accumulate inplace and if necessary connect node to parent.
Definition funcimpl.h:416
double get_norm_tree() const
Gets the value of norm_tree.
Definition funcimpl.h:311
bool _has_children
True if there are children.
Definition funcimpl.h:138
FunctionNode(const coeffT &coeff, double norm_tree, double snorm, double dnorm, bool has_children)
Definition funcimpl.h:166
void set_snorm(const double sn)
set the precomputed norm of the (virtual) s coefficients
Definition funcimpl.h:321
coeffT _coeffs
The coefficients, if any.
Definition funcimpl.h:136
void accumulate2(const tensorT &t, const typename FunctionNode< T, NDIM >::dcT &c, const Key< NDIM > &key)
Accumulate inplace and if necessary connect node to parent.
Definition funcimpl.h:383
void reduceRank(const double &eps)
reduces the rank of the coefficients (if applicable)
Definition funcimpl.h:249
WorldContainer< Key< NDIM >, FunctionNode< T, NDIM > > dcT
Definition funcimpl.h:144
void gaxpy_inplace(const T &alpha, const FunctionNode< Q, NDIM > &other, const R &beta)
General bi-linear operation — this = this*alpha + other*beta.
Definition funcimpl.h:365
double _norm_tree
After norm_tree will contain norm of coefficients summed up tree.
Definition funcimpl.h:137
void set_is_leaf(bool flag)
Sets has_children attribute to value of !flag.
Definition funcimpl.h:280
void print_json(std::ostream &s) const
Definition funcimpl.h:466
double get_snorm() const
get the precomputed norm of the (virtual) s coefficients
Definition funcimpl.h:331
const coeffT & coeff() const
Returns a const reference to the tensor containing the coeffs.
Definition funcimpl.h:237
FunctionNode(const coeffT &coeff, double norm_tree, bool has_children)
Definition funcimpl.h:161
bool has_children() const
Returns true if this node has children.
Definition funcimpl.h:207
void set_coeff(const coeffT &coeffs)
Takes a shallow copy of the coeff — same as this->coeff()=coeff.
Definition funcimpl.h:285
void set_dnorm(const double dn)
set the precomputed norm of the (virtual) d coefficients
Definition funcimpl.h:326
double dnorm
norm of the d coefficients, also defined if there are no d coefficients
Definition funcimpl.h:140
bool is_invalid() const
Returns true if this node is invalid (no coeffs and no children)
Definition funcimpl.h:219
FunctionNode(const FunctionNode< T, NDIM > &other)
Definition funcimpl.h:170
coeffT & coeff()
Returns a non-const reference to the tensor containing the coeffs.
Definition funcimpl.h:227
void set_norm_tree(double norm_tree)
Sets the value of norm_tree.
Definition funcimpl.h:306
Implements the functionality of futures.
Definition future.h:74
A future is a possibly yet unevaluated value.
Definition future.h:373
remote_refT remote_ref(World &world) const
Returns a structure used to pass references to another process.
Definition future.h:675
RemoteReference< FutureImpl< T > > remote_refT
Definition future.h:398
Definition lowranktensor.h:59
bool is_of_tensortype(const TensorType &tt) const
Definition gentensor.h:225
GenTensor convert(const TensorArgs &targs) const
Definition gentensor.h:198
GenTensor full_tensor() const
Definition gentensor.h:200
long dim(const int i) const
return the number of entries in dimension i
Definition lowranktensor.h:391
Tensor< T > full_tensor_copy() const
Definition gentensor.h:206
long ndim() const
Definition lowranktensor.h:386
void add_SVD(const GenTensor< T > &rhs, const double &eps)
Definition gentensor.h:235
constexpr bool is_full_tensor() const
Definition gentensor.h:224
GenTensor get_tensor() const
Definition gentensor.h:203
GenTensor reconstruct_tensor() const
Definition gentensor.h:199
bool has_no_data() const
Definition gentensor.h:211
void normalize()
Definition gentensor.h:218
GenTensor< T > & emul(const GenTensor< T > &other)
Inplace multiply by corresponding elements of argument Tensor.
Definition lowranktensor.h:631
float_scalar_type normf() const
Definition lowranktensor.h:406
double svd_normf() const
Definition gentensor.h:213
SRConf< T > config() const
Definition gentensor.h:237
void reduce_rank(const double &eps)
Definition gentensor.h:217
long rank() const
Definition gentensor.h:212
long size() const
Definition lowranktensor.h:482
SVDTensor< T > & get_svdtensor()
Definition gentensor.h:228
TensorType tensor_type() const
Definition gentensor.h:221
bool has_data() const
Definition gentensor.h:210
GenTensor & gaxpy(const T alpha, const GenTensor &other, const T beta)
Definition lowranktensor.h:580
bool is_assigned() const
Definition gentensor.h:209
IsSupported< TensorTypeData< Q >, GenTensor< T > & >::type scale(Q fac)
Inplace multiplication by scalar of supported type (legacy name)
Definition lowranktensor.h:426
constexpr bool is_svd_tensor() const
Definition gentensor.h:222
Iterates in lexical order thru all children of a key.
Definition key.h:459
Key is the index for a node of the 2^NDIM-tree.
Definition key.h:68
Key< NDIM+LDIM > merge_with(const Key< LDIM > &rhs) const
merge with other key (ie concatenate), use level of rhs, not of this
Definition key.h:398
Level level() const
Definition key.h:161
bool is_valid() const
Checks if a key is valid.
Definition key.h:116
Key< NDIM-VDIM > extract_complement_key(const std::array< int, VDIM > &v) const
extract a new key with the Translations complementary to the ones indicated in the v array
Definition key.h:384
Key< VDIM > extract_key(const std::array< int, VDIM > &v) const
extract a new key with the Translations indicated in the v array
Definition key.h:376
Key parent(int generation=1) const
Returns the key of the parent.
Definition key.h:245
const Vector< Translation, NDIM > & translation() const
Definition key.h:166
void break_apart(Key< LDIM > &key1, Key< KDIM > &key2) const
break key into two low-dimensional keys
Definition key.h:336
A pmap that locates children on odd levels with their even level parents.
Definition funcimpl.h:105
LevelPmap(World &world)
Definition funcimpl.h:111
const int nproc
Definition funcimpl.h:107
LevelPmap()
Definition funcimpl.h:109
ProcessID owner(const keyT &key) const
Find the owner of a given key.
Definition funcimpl.h:114
Definition funcimpl.h:77
Mutex using pthread mutex operations.
Definition worldmutex.h:131
void unlock() const
Free a mutex owned by this thread.
Definition worldmutex.h:165
void lock() const
Acquire the mutex waiting if necessary.
Definition worldmutex.h:155
Range, vaguely a la Intel TBB, to encapsulate a random-access, STL-like start and end iterator with c...
Definition range.h:64
Simple structure used to manage references/pointers to remote instances.
Definition worldref.h:395
Definition SVDTensor.h:42
A simple process map.
Definition funcimpl.h:86
SimplePmap(World &world)
Definition funcimpl.h:92
const int nproc
Definition funcimpl.h:88
const ProcessID me
Definition funcimpl.h:89
ProcessID owner(const keyT &key) const
Maps key to processor.
Definition funcimpl.h:95
A slice defines a sub-range or patch of a dimension.
Definition slice.h:103
static TaskAttributes hipri()
Definition thread.h:450
Traits class to specify support of numeric types.
Definition type_data.h:56
A tensor is a multidimensional array.
Definition tensor.h:317
float_scalar_type normf() const
Returns the Frobenius norm of the tensor.
Definition tensor.h:1726
T sum() const
Returns the sum of all elements of the tensor.
Definition tensor.h:1662
Tensor< T > reshape(int ndimnew, const long *d)
Returns new view/tensor reshaping size/number of dimensions to conforming tensor.
Definition tensor.h:1384
T * ptr()
Returns a pointer to the internal data.
Definition tensor.h:1824
Tensor< T > mapdim(const std::vector< long > &map)
Returns new view/tensor permuting the dimensions.
Definition tensor.h:1624
IsSupported< TensorTypeData< Q >, Tensor< T > & >::type scale(Q x)
Inplace multiplication by scalar of supported type (legacy name)
Definition tensor.h:686
T trace(const Tensor< T > &t) const
Return the trace of two tensors (no complex conjugate invoked)
Definition tensor.h:1776
Tensor< T > & emul(const Tensor< T > &t)
Inplace multiply by corresponding elements of argument Tensor.
Definition tensor.h:1798
bool has_data() const
Definition tensor.h:1886
const TensorIterator< T > & end() const
End point for forward iteration.
Definition tensor.h:1876
Tensor< T > fusedim(long i)
Returns new view/tensor fusing contiguous dimensions i and i+1.
Definition tensor.h:1587
Tensor< T > flat()
Returns new view/tensor rehshaping to flat (1-d) tensor.
Definition tensor.h:1555
Tensor< T > & gaxpy(T alpha, const Tensor< T > &t, T beta)
Inplace generalized saxpy ... this = this*alpha + other*beta.
Definition tensor.h:1804
Tensor< T > & conj()
Inplace complex conjugate.
Definition tensor.h:716
Definition function_common_data.h:169
void accumulate(const double time) const
accumulate timer
Definition function_common_data.h:183
A simple, fixed dimension vector.
Definition vector.h:64
Makes a distributed container with specified attributes.
Definition worlddc.h:866
void process_pending()
Process pending messages.
Definition worlddc.h:1166
bool find(accessor &acc, const keyT &key)
Write access to LOCAL value by key. Returns true if found, false otherwise (always false for remote).
Definition worlddc.h:987
bool probe(const keyT &key) const
Returns true if local data is immediately available (no communication)
Definition worlddc.h:1024
iterator begin()
Returns an iterator to the beginning of the local data (no communication)
Definition worlddc.h:1070
ProcessID owner(const keyT &key) const
Returns processor that logically owns key (no communication)
Definition worlddc.h:1034
implT::const_iterator const_iterator
Definition worlddc.h:872
void replicate(bool fence=true)
replicates this WorldContainer on all ProcessIDs
Definition worlddc.h:968
void replace(const pairT &datum)
Inserts/replaces key+value pair (non-blocking communication if key not local)
Definition worlddc.h:974
iterator end()
Returns an iterator past the end of the local data (no communication)
Definition worlddc.h:1084
const std::shared_ptr< WorldDCPmapInterface< keyT > > & get_pmap() const
Returns shared pointer to the process mapping.
Definition worlddc.h:1142
bool insert(accessor &acc, const keyT &key)
Write access to LOCAL value by key. Returns true if inserted, false if already exists (throws if remo...
Definition worlddc.h:1001
implT::iterator iterator
Definition worlddc.h:871
std::size_t size() const
Returns the number of local entries (no communication)
Definition worlddc.h:1135
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const TaskAttributes &attr=TaskAttributes())
Adds task "resultT memfun()" in process owning item (non-blocking comm if remote)
Definition worlddc.h:1426
bool is_local(const keyT &key) const
Returns true if the key maps to the local processor (no communication)
Definition worlddc.h:1041
Future< MEMFUN_RETURNT(memfunT)> send(const keyT &key, memfunT memfun)
Sends message "resultT memfun()" to item (non-blocking comm if remote)
Definition worlddc.h:1183
implT::accessor accessor
Definition worlddc.h:873
Interface to be provided by any process map.
Definition worlddc.h:82
void fence(bool debug=false)
Synchronizes all processes in communicator AND globally ensures no pending AM or tasks.
Definition worldgop.cc:161
Implements most parts of a globally addressable object (via unique ID).
Definition world_object.h:364
const uniqueidT & id() const
Returns the globally unique object ID.
Definition world_object.h:711
void process_pending()
To be called from derived constructor to process pending messages.
Definition world_object.h:656
ProcessID me
Rank of self.
Definition world_object.h:385
detail::task_result_type< memfnT >::futureT send(ProcessID dest, memfnT memfn) const
Definition world_object.h:731
detail::task_result_type< memfnT >::futureT task(ProcessID dest, memfnT memfn, const TaskAttributes &attr=TaskAttributes()) const
Sends task to derived class method returnT (this->*memfn)().
Definition world_object.h:1005
Future< bool > for_each(const rangeT &range, const opT &op)
Apply op(item) on all items in range.
Definition world_task_queue.h:572
void add(TaskInterface *t)
Add a new local task, taking ownership of the pointer.
Definition world_task_queue.h:466
Future< resultT > reduce(const rangeT &range, const opT &op)
Reduce op(item) for all items in range using op(sum,op(item)).
Definition world_task_queue.h:527
A parallel world class.
Definition world.h:132
static World * world_from_id(std::uint64_t id)
Convert a World ID to a World pointer.
Definition world.h:492
WorldTaskQueue & taskq
Task queue.
Definition world.h:206
ProcessID rank() const
Returns the process rank in this World (same as MPI_Comm_rank()).
Definition world.h:320
ProcessID size() const
Returns the number of processes in this World (same as MPI_Comm_size()).
Definition world.h:330
WorldGopInterface & gop
Global operations.
Definition world.h:207
std::optional< T * > ptr_from_id(uniqueidT id) const
Look up a local pointer from a world-wide unique ID.
Definition world.h:416
ProcessID random_proc()
Returns a random process number; that is, an integer in [0,world.size()).
Definition world.h:591
Wrapper for an opaque pointer for serialization purposes.
Definition archive.h:850
syntactic sugar for std::array<bool, N>
Definition array_of_bools.h:19
Class for unique global IDs.
Definition uniqueid.h:53
unsigned long get_obj_id() const
Access the object ID.
Definition uniqueid.h:97
unsigned long get_world_id() const
Access the World ID.
Definition uniqueid.h:90
static const double R
Definition csqrt.cc:46
double(* f1)(const coord_3d &)
Definition derivatives.cc:55
char * p(char *buf, const char *name, int k, int initial_level, double thresh, int order)
Definition derivatives.cc:72
static double lo
Definition dirac-hatom.cc:23
@ upper
Definition dirac-hatom.cc:15
Provides FunctionDefaults and utilities for coordinate transformation.
auto T(World &world, response_space &f) -> response_space
Definition global_functions.cc:34
archive_array< unsigned char > wrap_opaque(const T *, unsigned int)
Factory function to wrap a pointer to contiguous data as an opaque (uchar) archive_array.
Definition archive.h:925
Tensor< typename Tensor< T >::scalar_type > arg(const Tensor< T > &t)
Return a new tensor holding the argument of each element of t (complex types only)
Definition tensor.h:2502
Tensor< TENSOR_RESULT_TYPE(T, Q) > & fast_transform(const Tensor< T > &t, const Tensor< Q > &c, Tensor< TENSOR_RESULT_TYPE(T, Q) > &result, Tensor< TENSOR_RESULT_TYPE(T, Q) > &workspace)
Restricted but heavily optimized form of transform()
Definition tensor.h:2443
const double beta
Definition gygi_soltion.cc:62
static const double v
Definition hatom_sf_dirac.cc:20
Provides IndexIterator.
Tensor< double > op(const Tensor< double > &x)
Definition kain.cc:508
Multidimension Key for MRA tree and associated iterators.
static double pow(const double *a, const double *b)
Definition lda.h:74
#define MADNESS_CHECK(condition)
Check a condition — even in a release build the condition is always evaluated so it can have side eff...
Definition madness_exception.h:182
#define MADNESS_EXCEPTION(msg, value)
Macro for throwing a MADNESS exception.
Definition madness_exception.h:119
#define MADNESS_ASSERT(condition)
Assert a condition that should be free of side-effects since in release builds this might be a no-op.
Definition madness_exception.h:134
#define MADNESS_CHECK_THROW(condition, msg)
Check a condition — even in a release build the condition is always evaluated so it can have side eff...
Definition madness_exception.h:207
Header to declare stuff which has not yet found a home.
constexpr double pi
Mathematical constant .
Definition constants.h:48
MemFuncWrapper< objT *, memfnT, typename result_of< memfnT >::type > wrap_mem_fn(objT &obj, memfnT memfn)
Create a member function wrapper (MemFuncWrapper) from an object and a member function pointer.
Definition mem_func_wrapper.h:251
void combine_hash(hashT &seed, hashT hash)
Internal use only.
Definition worldhash.h:248
Namespace for all elements and tools of MADNESS.
Definition DFParameters.h:10
std::ostream & operator<<(std::ostream &os, const particle< PDIM > &p)
Definition lowrankfunction.h:397
static const char * filename
Definition legendre.cc:96
static const std::vector< Slice > ___
Entire dimension.
Definition slice.h:128
static double cpu_time()
Returns the cpu time in seconds relative to an arbitrary origin.
Definition timers.h:127
GenTensor< TENSOR_RESULT_TYPE(R, Q)> general_transform(const GenTensor< R > &t, const Tensor< Q > c[])
Definition gentensor.h:274
response_space scale(response_space a, double b)
void finalize()
Call this once at the very end of your main program instead of MPI_Finalize().
Definition world.cc:232
void norm_tree(World &world, const std::vector< Function< T, NDIM > > &v, bool fence=true)
Makes the norm tree for all functions in a vector.
Definition vmra.h:1125
std::vector< Function< TENSOR_RESULT_TYPE(T, R), NDIM > > transform(World &world, const std::vector< Function< T, NDIM > > &v, const Tensor< R > &c, bool fence=true)
Transforms a vector of functions according to new[i] = sum[j] old[j]*c[j,i].
Definition vmra.h:664
TreeState
Definition funcdefaults.h:59
@ nonstandard_after_apply
s and d coeffs, state after operator application
Definition funcdefaults.h:64
@ redundant_after_merge
s coeffs everywhere, must be summed up to yield the result
Definition funcdefaults.h:66
@ reconstructed
s coeffs at the leaves only
Definition funcdefaults.h:60
@ nonstandard
s and d coeffs in internal nodes
Definition funcdefaults.h:62
@ redundant
s coeffs everywhere
Definition funcdefaults.h:65
static Tensor< double > weights[max_npt+1]
Definition legendre.cc:99
int64_t Translation
Definition key.h:56
Key< NDIM > displacement(const Key< NDIM > &source, const Key< NDIM > &target)
given a source and a target, return the displacement in translation
Definition key.h:444
static const Slice _(0,-1, 1)
std::shared_ptr< FunctionFunctorInterface< double, 3 > > func(new opT(g))
void change_tensor_type(GenTensor< T > &t, const TensorArgs &targs)
change representation to targ.tt
Definition gentensor.h:284
int Level
Definition key.h:57
std::enable_if< std::is_base_of< ProjectorBase, projT >::value, OuterProjector< projT, projQ > >::type outer(const projT &p0, const projQ &p1)
Definition projector.h:457
int RandomValue< int >()
Random int.
Definition ran.cc:250
static double pop(std::vector< double > &v)
Definition SCF.cc:113
void print(const T &t, const Ts &... ts)
Print items to std::cout (items separated by spaces) and terminate with a new line.
Definition print.h:225
Tensor< T > fcube(const Key< NDIM > &, T(*f)(const Vector< double, NDIM > &), const Tensor< double > &)
Definition mraimpl.h:2155
TensorType
low rank representations of tensors (see gentensor.h)
Definition gentensor.h:120
@ TT_2D
Definition gentensor.h:120
@ TT_FULL
Definition gentensor.h:120
NDIM & f
Definition mra.h:2451
void error(const char *msg)
Definition world.cc:139
NDIM const Function< R, NDIM > & g
Definition mra.h:2451
std::size_t hashT
The hash value type.
Definition worldhash.h:145
static const int kmax
Definition twoscale.cc:52
double inner(response_space &a, response_space &b)
Definition response_functions.h:442
GenTensor< TENSOR_RESULT_TYPE(R, Q)> transform_dir(const GenTensor< R > &t, const Tensor< Q > &c, const int axis)
Definition lowranktensor.h:1099
std::string name(const FuncType &type, const int ex=-1)
Definition ccpairfunction.h:28
void mxmT(long dimi, long dimj, long dimk, T *MADNESS_RESTRICT c, const T *a, const T *b)
Matrix += Matrix * matrix transpose ... MKL interface version.
Definition mxm.h:225
Function< T, NDIM > copy(const Function< T, NDIM > &f, const std::shared_ptr< WorldDCPmapInterface< Key< NDIM > > > &pmap, bool fence=true)
Create a new copy of the function with different distribution and optional fence.
Definition mra.h:2037
static const int MAXK
The maximum wavelet order presently supported.
Definition funcdefaults.h:54
Definition mraimpl.h:50
static long abs(long a)
Definition tensor.h:218
const double cc
Definition navstokes_cosines.cc:107
static const double b
Definition nonlinschro.cc:119
static const double d
Definition nonlinschro.cc:121
static const double a
Definition nonlinschro.cc:118
Defines simple templates for printing to std::cout "a la Python".
double Q(double a)
Definition relops.cc:20
static const double c
Definition relops.cc:10
static const double L
Definition rk.cc:46
static const double thresh
Definition rk.cc:45
Definition test_ar.cc:204
Definition test_ccpairfunction.cc:22
given a ket and the 1- and 2-electron potentials, construct the function V phi
Definition funcimpl.h:3989
implT * result
where to construct Vphi, no need to track parents
Definition funcimpl.h:3997
bool have_v2() const
Definition funcimpl.h:4006
ctL iav1
Definition funcimpl.h:4001
Vphi_op_NS(implT *result, const opT &leaf_op, const ctT &iaket, const ctL &iap1, const ctL &iap2, const ctL &iav1, const ctL &iav2, const implT *eri)
Definition funcimpl.h:4015
ctL iap1
Definition funcimpl.h:4000
bool have_v1() const
Definition funcimpl.h:4005
std::pair< bool, coeffT > continue_recursion(const std::vector< bool > child_is_leaf, const tensorT &coeffs, const keyT &key) const
loop over all children and either insert their sum coeffs or continue the recursion
Definition funcimpl.h:4081
opT leaf_op
deciding if a given FunctionNode will be a leaf node
Definition funcimpl.h:3998
std::pair< coeffT, double > make_sum_coeffs(const keyT &key) const
make the sum coeffs for key
Definition funcimpl.h:4174
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:3994
ctL iap2
the particles 1 and 2 (exclusive with ket)
Definition funcimpl.h:4000
bool have_ket() const
Definition funcimpl.h:4004
const implT * eri
2-particle potential, must be on-demand
Definition funcimpl.h:4002
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:3995
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
make and insert the coefficients into result's tree
Definition funcimpl.h:4026
void serialize(const Archive &ar)
serialize this (needed for use in recursive_op)
Definition funcimpl.h:4255
Vphi_op_NS< opT, LDIM > this_type
Definition funcimpl.h:3993
ctT iaket
the ket of a pair function (exclusive with p1, p2)
Definition funcimpl.h:3999
double compute_error_from_inaccurate_refinement(const keyT &key, const tensorT &ceri) const
the error is computed from the d coefficients of the constituent functions
Definition funcimpl.h:4127
void accumulate_into_result(const Key< NDIM > &key, const coeffT &coeff) const
Definition funcimpl.h:4009
this_type make_child(const keyT &child) const
Definition funcimpl.h:4226
tensorT eri_coeffs(const keyT &key) const
Definition funcimpl.h:4107
ctL iav2
potentials for particles 1 and 2
Definition funcimpl.h:4001
bool have_eri() const
Definition funcimpl.h:4007
this_type forward_ctor(implT *result1, const opT &leaf_op, const ctT &iaket1, const ctL &iap11, const ctL &iap21, const ctL &iav11, const ctL &iav21, const implT *eri1)
Definition funcimpl.h:4248
Vphi_op_NS()
Definition funcimpl.h:4014
Future< this_type > activate() const
Definition funcimpl.h:4237
bool randomize() const
Definition funcimpl.h:3991
add two functions f and g: result=alpha * f + beta * g
Definition funcimpl.h:3499
bool randomize() const
Definition funcimpl.h:3504
Future< this_type > activate() const
retrieve the coefficients (parent coeffs might be remote)
Definition funcimpl.h:3534
add_op(const ctT &f, const ctT &g, const double alpha, const double beta)
Definition funcimpl.h:3512
ctT f
tracking coeffs of first and second addend
Definition funcimpl.h:3507
double alpha
prefactor for f, g
Definition funcimpl.h:3509
add_op this_type
Definition funcimpl.h:3502
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:3501
void serialize(const Archive &ar)
Definition funcimpl.h:3546
ctT g
Definition funcimpl.h:3507
std::pair< bool, coeffT > operator()(const keyT &key) const
if we are at the bottom of the trees, return the sum of the coeffs
Definition funcimpl.h:3516
double beta
Definition funcimpl.h:3509
this_type make_child(const keyT &child) const
Definition funcimpl.h:3529
this_type forward_ctor(const ctT &f1, const ctT &g1, const double alpha, const double beta)
taskq-compatible ctor
Definition funcimpl.h:3542
opT op
Definition funcimpl.h:3117
opT::resultT resultT
Definition funcimpl.h:3115
Tensor< resultT > operator()(const Key< NDIM > &key, const Tensor< Q > &t) const
Definition funcimpl.h:3124
coeff_value_adaptor(const FunctionImpl< Q, NDIM > *impl_func, const opT &op)
Definition funcimpl.h:3120
const FunctionImpl< Q, NDIM > * impl_func
Definition funcimpl.h:3116
void serialize(Archive &ar)
Definition funcimpl.h:3133
merge the coefficent boxes of this into result's tree
Definition funcimpl.h:2356
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2357
void serialize(const Archive &ar)
Definition funcimpl.h:2374
FunctionImpl< Q, NDIM > * result
Definition funcimpl.h:2358
do_accumulate_trees(FunctionImpl< Q, NDIM > &result, const T alpha)
Definition funcimpl.h:2361
T alpha
Definition funcimpl.h:2359
bool operator()(typename rangeT::iterator &it) const
return the norm of the difference of this node and its "mirror" node
Definition funcimpl.h:2365
"put" this on g
Definition funcimpl.h:2567
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2568
void serialize(const Archive &ar)
Definition funcimpl.h:2596
implT * g
Definition funcimpl.h:2570
do_average()
Definition funcimpl.h:2572
bool operator()(typename rangeT::iterator &it) const
iterator it points to this
Definition funcimpl.h:2576
do_average(implT &g)
Definition funcimpl.h:2573
change representation of nodes' coeffs to low rank, optional fence
Definition funcimpl.h:2600
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2601
void serialize(const Archive &ar)
Definition funcimpl.h:2624
TensorArgs targs
Definition funcimpl.h:2604
do_change_tensor_type(const TensorArgs &targs, implT &g)
Definition funcimpl.h:2610
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2613
implT * f
Definition funcimpl.h:2605
check symmetry wrt particle exchange
Definition funcimpl.h:2273
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2274
double operator()(typename rangeT::iterator &it) const
return the norm of the difference of this node and its "mirror" node
Definition funcimpl.h:2280
do_check_symmetry_local()
Definition funcimpl.h:2276
void serialize(const Archive &ar)
Definition funcimpl.h:2343
double operator()(double a, double b) const
Definition funcimpl.h:2339
do_check_symmetry_local(const implT &f)
Definition funcimpl.h:2277
const implT * f
Definition funcimpl.h:2275
compute the norm of the wavelet coefficients
Definition funcimpl.h:4396
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:4397
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:4403
do_compute_snorm_and_dnorm(const FunctionCommonData< T, NDIM > &cdata)
Definition funcimpl.h:4400
const FunctionCommonData< T, NDIM > & cdata
Definition funcimpl.h:4399
TensorArgs targs
Definition funcimpl.h:2631
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2636
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2628
do_consolidate_buffer(const TensorArgs &targs)
Definition funcimpl.h:2635
void serialize(const Archive &ar)
Definition funcimpl.h:2640
double operator()(double val) const
Definition funcimpl.h:1395
double limit
Definition funcimpl.h:1390
do_convert_to_color(const double limit, const bool log)
Definition funcimpl.h:1394
bool log
Definition funcimpl.h:1391
static double lower()
Definition funcimpl.h:1392
functor for the gaxpy_inplace method
Definition funcimpl.h:1186
FunctionImpl< T, NDIM > * f
prefactor for current function impl
Definition funcimpl.h:1188
do_gaxpy_inplace(FunctionImpl< T, NDIM > *f, T alpha, R beta)
Definition funcimpl.h:1192
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:1193
R beta
prefactor for other function impl
Definition funcimpl.h:1190
void serialize(Archive &ar)
Definition funcimpl.h:1201
Range< typename FunctionImpl< Q, NDIM >::dcT::const_iterator > rangeT
Definition funcimpl.h:1187
T alpha
the current function impl
Definition funcimpl.h:1189
const bool do_leaves
start with leaf nodes instead of initial_level
Definition funcimpl.h:6582
T operator()(T a, T b) const
Definition funcimpl.h:6600
do_inner_ext_local_ffi(const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > f, const implT *impl, const bool leaf_refine, const bool do_leaves)
Definition funcimpl.h:6584
void serialize(const Archive &ar)
Definition funcimpl.h:6604
const bool leaf_refine
Definition funcimpl.h:6581
const std::shared_ptr< FunctionFunctorInterface< T, NDIM > > fref
Definition funcimpl.h:6579
T operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:6588
const implT * impl
Definition funcimpl.h:6580
compute the inner product of this range with other
Definition funcimpl.h:5583
const FunctionImpl< T, NDIM > * bra
Definition funcimpl.h:5584
void serialize(const Archive &ar)
Definition funcimpl.h:5699
const FunctionImpl< R, NDIM > * ket
Definition funcimpl.h:5585
bool leaves_only
Definition funcimpl.h:5586
do_inner_local_on_demand(const FunctionImpl< T, NDIM > *bra, const FunctionImpl< R, NDIM > *ket, const bool leaves_only=true)
Definition funcimpl.h:5589
resultT operator()(resultT a, resultT b) const
Definition funcimpl.h:5695
resultT operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5592
compute the inner product of this range with other
Definition funcimpl.h:5522
resultT operator()(resultT a, resultT b) const
Definition funcimpl.h:5555
bool leaves_only
Definition funcimpl.h:5524
void serialize(const Archive &ar)
Definition funcimpl.h:5559
do_inner_local(const FunctionImpl< R, NDIM > *other, const bool leaves_only)
Definition funcimpl.h:5527
const FunctionImpl< R, NDIM > * other
Definition funcimpl.h:5523
resultT operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5529
typedef TENSOR_RESULT_TYPE(T, R) resultT
keep only the sum coefficients in each node
Definition funcimpl.h:2227
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2228
do_keep_sum_coeffs(implT *impl)
constructor need impl for cdata
Definition funcimpl.h:2232
implT * impl
Definition funcimpl.h:2229
void serialize(const Archive &ar)
Definition funcimpl.h:2241
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2234
mirror dimensions of this, write result on f
Definition funcimpl.h:2501
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2511
implT * f
Definition funcimpl.h:2505
std::vector< long > mirror
Definition funcimpl.h:2504
void serialize(const Archive &ar)
Definition funcimpl.h:2558
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2502
std::vector< long > map
Definition funcimpl.h:2504
do_map_and_mirror(const std::vector< long > map, const std::vector< long > mirror, implT &f)
Definition funcimpl.h:2508
map this on f
Definition funcimpl.h:2421
do_mapdim(const std::vector< long > map, implT &f)
Definition funcimpl.h:2428
void serialize(const Archive &ar)
Definition funcimpl.h:2444
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2422
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2430
std::vector< long > map
Definition funcimpl.h:2424
do_mapdim()
Definition funcimpl.h:2427
implT * f
Definition funcimpl.h:2425
merge the coefficent boxes of this into other's tree
Definition funcimpl.h:2385
bool operator()(typename rangeT::iterator &it) const
return the norm of the difference of this node and its "mirror" node
Definition funcimpl.h:2395
Range< typename dcT::const_iterator > rangeT
Definition funcimpl.h:2386
FunctionImpl< Q, NDIM > * other
Definition funcimpl.h:2387
do_merge_trees(const T alpha, const R beta, FunctionImpl< Q, NDIM > &other)
Definition funcimpl.h:2391
T alpha
Definition funcimpl.h:2388
do_merge_trees()
Definition funcimpl.h:2390
R beta
Definition funcimpl.h:2389
void serialize(const Archive &ar)
Definition funcimpl.h:2414
mirror dimensions of this, write result on f
Definition funcimpl.h:2451
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2460
implT * f
Definition funcimpl.h:2455
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2452
do_mirror()
Definition funcimpl.h:2457
do_mirror(const std::vector< long > mirror, implT &f)
Definition funcimpl.h:2458
void serialize(const Archive &ar)
Definition funcimpl.h:2494
std::vector< long > mirror
Definition funcimpl.h:2454
Definition funcimpl.h:5495
double operator()(typename dcT::const_iterator &it) const
Definition funcimpl.h:5496
void serialize(const Archive &ar)
Definition funcimpl.h:5511
double operator()(double a, double b) const
Definition funcimpl.h:5507
laziness
Definition funcimpl.h:4653
void serialize(Archive &ar)
Definition funcimpl.h:4662
Key< OPDIM > d
Definition funcimpl.h:4654
Key< OPDIM > key
Definition funcimpl.h:4654
keyT dest
Definition funcimpl.h:4655
double fac
Definition funcimpl.h:4656
do_op_args(const Key< OPDIM > &key, const Key< OPDIM > &d, const keyT &dest, double tol, double fac, double cnorm)
Definition funcimpl.h:4659
double cnorm
Definition funcimpl.h:4656
double tol
Definition funcimpl.h:4656
reduce the rank of the nodes, optional fence
Definition funcimpl.h:2247
do_reduce_rank(const TensorArgs &targs)
Definition funcimpl.h:2255
TensorArgs args
Definition funcimpl.h:2251
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2261
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2248
do_reduce_rank(const double &thresh)
Definition funcimpl.h:2256
void serialize(const Archive &ar)
Definition funcimpl.h:2267
Changes non-standard compressed form to standard compressed form.
Definition funcimpl.h:4617
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:4628
do_standard(implT *impl)
Definition funcimpl.h:4625
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:4618
void serialize(const Archive &ar)
Definition funcimpl.h:4645
implT * impl
Definition funcimpl.h:4621
given an NS tree resulting from a convolution, truncate leafs if appropriate
Definition funcimpl.h:2168
void serialize(const Archive &ar)
Definition funcimpl.h:2188
const implT * f
Definition funcimpl.h:2170
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2174
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2169
do_truncate_NS_leafs(const implT *f)
Definition funcimpl.h:2172
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2647
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2651
implT * impl
Definition funcimpl.h:2648
void serialize(const Archive &ar)
Definition funcimpl.h:2669
do_unary_op_value_inplace(implT *impl, const opT &op)
Definition funcimpl.h:2650
Hartree product of two LDIM functions to yield a NDIM = 2*LDIM function.
Definition funcimpl.h:3582
this_type forward_ctor(implT *result1, const ctL &p11, const ctL &p22, const leaf_opT &leaf_op)
Definition funcimpl.h:3638
bool randomize() const
Definition funcimpl.h:3583
void serialize(const Archive &ar)
Definition funcimpl.h:3642
hartree_op(implT *result, const ctL &p11, const ctL &p22, const leaf_opT &leaf_op)
Definition funcimpl.h:3594
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:3586
ctL p2
tracking coeffs of the two lo-dim functions
Definition funcimpl.h:3589
leaf_opT leaf_op
determine if a given node will be a leaf node
Definition funcimpl.h:3590
hartree_op()
Definition funcimpl.h:3593
implT * result
where to construct the pair function
Definition funcimpl.h:3588
hartree_op< LDIM, leaf_opT > this_type
Definition funcimpl.h:3585
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
Definition funcimpl.h:3599
ctL p1
Definition funcimpl.h:3589
this_type make_child(const keyT &child) const
Definition funcimpl.h:3622
Future< this_type > activate() const
Definition funcimpl.h:3631
perform this multiplication: h(1,2) = f(1,2) * g(1)
Definition funcimpl.h:3390
multiply_op()
Definition funcimpl.h:3402
ctL g
Definition funcimpl.h:3399
Future< this_type > activate() const
Definition funcimpl.h:3481
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:3394
implT * h
the result function h(1,2) = f(1,2) * g(1)
Definition funcimpl.h:3397
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:3393
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
apply this on a FunctionNode of f and g of Key key
Definition funcimpl.h:3429
this_type forward_ctor(implT *h1, const ctT &f1, const ctL &g1, const int particle)
Definition funcimpl.h:3488
static bool randomize()
Definition funcimpl.h:3392
int particle
if g is g(1) or g(2)
Definition funcimpl.h:3400
ctT f
Definition funcimpl.h:3398
multiply_op< LDIM > this_type
Definition funcimpl.h:3395
multiply_op(implT *h1, const ctT &f1, const ctL &g1, const int particle1)
Definition funcimpl.h:3404
bool screen(const coeffT &fcoeff, const coeffT &gcoeff, const keyT &key) const
return true if this will be a leaf node
Definition funcimpl.h:3410
this_type make_child(const keyT &child) const
Definition funcimpl.h:3471
void serialize(const Archive &ar)
Definition funcimpl.h:3492
coeffT val_lhs
Definition funcimpl.h:3869
double lo
Definition funcimpl.h:3872
double lo1
Definition funcimpl.h:3872
long oversampling
Definition funcimpl.h:3870
double error
Definition funcimpl.h:3871
tensorT operator()(const Key< NDIM > key, const tensorT &coeff_rhs)
multiply values of rhs and lhs, result on rhs, rhs and lhs are of the same dimensions
Definition funcimpl.h:3887
coeffT coeff_lhs
Definition funcimpl.h:3869
void serialize(const Archive &ar)
Definition funcimpl.h:3975
double lo2
Definition funcimpl.h:3872
double hi1
Definition funcimpl.h:3872
pointwise_multiplier(const Key< NDIM > key, const coeffT &clhs)
Definition funcimpl.h:3875
coeffT operator()(const Key< NDIM > key, const tensorT &coeff_rhs, const int particle)
multiply values of rhs and lhs, result on rhs, rhs and lhs are of differnet dimensions
Definition funcimpl.h:3932
double hi2
Definition funcimpl.h:3872
double hi
Definition funcimpl.h:3872
project the low-dim function g on the hi-dim function f: result(x) = <f(x,y) | g(y)>
Definition funcimpl.h:6844
project_out_op(const implT *fimpl, implL1 *result, const ctL &iag, const int dim)
Definition funcimpl.h:6859
ctL iag
the low dim function g
Definition funcimpl.h:6854
FunctionImpl< T, NDIM-LDIM > implL1
Definition funcimpl.h:6849
Future< this_type > activate() const
retrieve the coefficients (parent coeffs might be remote)
Definition funcimpl.h:6938
std::pair< bool, coeffT > argT
Definition funcimpl.h:6850
const implT * fimpl
the hi dim function f
Definition funcimpl.h:6852
this_type forward_ctor(const implT *fimpl1, implL1 *result1, const ctL &iag1, const int dim1)
taskq-compatible ctor
Definition funcimpl.h:6945
this_type make_child(const keyT &child) const
Definition funcimpl.h:6929
project_out_op< LDIM > this_type
Definition funcimpl.h:6847
implL1 * result
the low dim result function
Definition funcimpl.h:6853
Future< argT > operator()(const Key< NDIM > &key) const
do the actual contraction
Definition funcimpl.h:6866
void serialize(const Archive &ar)
Definition funcimpl.h:6949
project_out_op(const project_out_op &other)
Definition funcimpl.h:6861
int dim
0: project 0..LDIM-1, 1: project LDIM..NDIM-1
Definition funcimpl.h:6855
bool randomize() const
Definition funcimpl.h:6845
CoeffTracker< T, LDIM > ctL
Definition funcimpl.h:6848
recursive part of recursive_apply
Definition funcimpl.h:5322
ctT iaf
Definition funcimpl.h:5330
recursive_apply_op2< opT > this_type
Definition funcimpl.h:5325
Future< this_type > activate() const
retrieve the coefficients (parent coeffs might be remote)
Definition funcimpl.h:5385
const opT * apply_op
need this for randomization
Definition funcimpl.h:5331
bool randomize() const
Definition funcimpl.h:5323
recursive_apply_op2(const recursive_apply_op2 &other)
Definition funcimpl.h:5338
void serialize(const Archive &ar)
Definition funcimpl.h:5401
argT finalize(const double kernel_norm, const keyT &key, const coeffT &coeff, const implT *r) const
sole purpose is to wait for the kernel norm, wrap it and send it back to caller
Definition funcimpl.h:5371
this_type make_child(const keyT &child) const
Definition funcimpl.h:5380
recursive_apply_op2(implT *result, const ctT &iaf, const opT *apply_op)
Definition funcimpl.h:5335
std::pair< bool, coeffT > argT
Definition funcimpl.h:5327
implT * result
Definition funcimpl.h:5329
CoeffTracker< T, NDIM > ctT
Definition funcimpl.h:5326
argT operator()(const Key< NDIM > &key) const
send off the application of the operator
Definition funcimpl.h:5347
this_type forward_ctor(implT *result1, const ctT &iaf1, const opT *apply_op1)
taskq-compatible ctor
Definition funcimpl.h:5397
recursive part of recursive_apply
Definition funcimpl.h:5191
std::pair< bool, coeffT > operator()(const Key< NDIM > &key) const
make the NS-coefficients and send off the application of the operator
Definition funcimpl.h:5216
this_type forward_ctor(implT *r, const CoeffTracker< T, LDIM > &f1, const CoeffTracker< T, LDIM > &g1, const opT *apply_op1)
Definition funcimpl.h:5281
opT * apply_op
Definition funcimpl.h:5199
recursive_apply_op(const recursive_apply_op &other)
Definition funcimpl.h:5209
recursive_apply_op< opT, LDIM > this_type
Definition funcimpl.h:5194
Future< this_type > activate() const
Definition funcimpl.h:5274
bool randomize() const
Definition funcimpl.h:5192
implT * result
Definition funcimpl.h:5196
CoeffTracker< T, LDIM > iaf
Definition funcimpl.h:5197
void serialize(const Archive &ar)
Definition funcimpl.h:5286
std::pair< bool, coeffT > finalize(const double kernel_norm, const keyT &key, const coeffT &coeff) const
sole purpose is to wait for the kernel norm, wrap it and send it back to caller
Definition funcimpl.h:5256
recursive_apply_op(implT *result, const CoeffTracker< T, LDIM > &iaf, const CoeffTracker< T, LDIM > &iag, const opT *apply_op)
Definition funcimpl.h:5203
this_type make_child(const keyT &child) const
Definition funcimpl.h:5265
CoeffTracker< T, LDIM > iag
Definition funcimpl.h:5198
remove all coefficients of internal nodes
Definition funcimpl.h:2193
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2194
remove_internal_coeffs()=default
constructor need impl for cdata
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2199
void serialize(const Archive &ar)
Definition funcimpl.h:2205
remove all coefficients of leaf nodes
Definition funcimpl.h:2210
bool operator()(typename rangeT::iterator &it) const
Definition funcimpl.h:2216
remove_leaf_coeffs()=default
constructor need impl for cdata
void serialize(const Archive &ar)
Definition funcimpl.h:2221
Range< typename dcT::iterator > rangeT
Definition funcimpl.h:2211
Definition funcimpl.h:4468
void serialize(Archive &ar)
Definition funcimpl.h:4472
bool operator()(const implT *f, const keyT &key, const nodeT &t) const
Definition funcimpl.h:4469
shallow-copy, pared-down version of FunctionNode, for special purpose only
Definition funcimpl.h:749
coeffT & coeff()
Definition funcimpl.h:763
GenTensor< T > coeffT
Definition funcimpl.h:750
bool is_leaf() const
Definition funcimpl.h:765
void serialize(Archive &ar)
Definition funcimpl.h:767
ShallowNode(const ShallowNode< T, NDIM > &node)
Definition funcimpl.h:758
ShallowNode(const FunctionNode< T, NDIM > &node)
Definition funcimpl.h:755
bool has_children() const
Definition funcimpl.h:764
ShallowNode()
Definition funcimpl.h:754
bool _has_children
Definition funcimpl.h:752
double dnorm
Definition funcimpl.h:753
const coeffT & coeff() const
Definition funcimpl.h:762
coeffT _coeffs
Definition funcimpl.h:751
TensorArgs holds the arguments for creating a LowRankTensor.
Definition gentensor.h:134
double thresh
Definition gentensor.h:135
TensorType tt
Definition gentensor.h:136
inserts/accumulates coefficients into impl's tree
Definition funcimpl.h:716
FunctionImpl< T, NDIM > * impl
Definition funcimpl.h:720
FunctionNode< T, NDIM > nodeT
Definition funcimpl.h:718
accumulate_op(const accumulate_op &other)=default
void operator()(const Key< NDIM > &key, const coeffT &coeff, const bool &is_leaf) const
Definition funcimpl.h:724
void serialize(Archive &ar)
Definition funcimpl.h:728
GenTensor< T > coeffT
Definition funcimpl.h:717
accumulate_op(FunctionImpl< T, NDIM > *f)
Definition funcimpl.h:722
static void load(const Archive &ar, FunctionImpl< T, NDIM > *&ptr)
Definition funcimpl.h:7168
static void load(const Archive &ar, const FunctionImpl< T, NDIM > *&ptr)
Definition funcimpl.h:7137
static void load(const Archive &ar, std::shared_ptr< FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7216
static void load(const Archive &ar, std::shared_ptr< const FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7200
Default load of an object via serialize(ar, t).
Definition archive.h:666
static void load(const A &ar, const U &t)
Load an object.
Definition archive.h:678
static void store(const Archive &ar, FunctionImpl< T, NDIM > *const &ptr)
Definition funcimpl.h:7190
static void store(const Archive &ar, const FunctionImpl< T, NDIM > *const &ptr)
Definition funcimpl.h:7159
static void store(const Archive &ar, const std::shared_ptr< FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7225
static void store(const Archive &ar, const std::shared_ptr< const FunctionImpl< T, NDIM > > &ptr)
Definition funcimpl.h:7209
Default store of an object via serialize(ar, t).
Definition archive.h:611
static std::enable_if_t< is_output_archive_v< A > &&!std::is_function< U >::value &&(has_member_serialize_v< U, A >||has_nonmember_serialize_v< U, A >||has_freestanding_serialize_v< U, A >||has_freestanding_default_serialize_v< U, A >), void > store(const A &ar, const U &t)
Definition archive.h:621
Definition funcimpl.h:610
void serialize(Archive &ar)
Definition funcimpl.h:674
const opT * op
Definition funcimpl.h:617
hartree_convolute_leaf_op(const implT *f, const implL *g, const opT *op)
Definition funcimpl.h:621
bool operator()(const Key< NDIM > &key) const
no pre-determination
Definition funcimpl.h:625
bool operator()(const Key< NDIM > &key, const Tensor< T > &fcoeff, const Tensor< T > &gcoeff) const
post-determination: true if f is a leaf and the result is well-represented
Definition funcimpl.h:638
const implL * g
Definition funcimpl.h:616
const FunctionImpl< T, NDIM > * f
Definition funcimpl.h:615
FunctionImpl< T, LDIM > implL
Definition funcimpl.h:613
bool do_error_leaf_op() const
Definition funcimpl.h:618
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:612
bool operator()(const Key< NDIM > &key, const GenTensor< T > &coeff) const
no post-determination
Definition funcimpl.h:628
returns true if the result of a hartree_product is a leaf node (compute norm & error)
Definition funcimpl.h:500
bool do_error_leaf_op() const
Definition funcimpl.h:505
const FunctionImpl< T, NDIM > * f
Definition funcimpl.h:503
hartree_leaf_op(const implT *f, const long &k)
Definition funcimpl.h:508
long k
Definition funcimpl.h:504
void serialize(Archive &ar)
Definition funcimpl.h:556
bool operator()(const Key< NDIM > &key, const GenTensor< T > &coeff) const
no post-determination
Definition funcimpl.h:514
bool operator()(const Key< NDIM > &key, const Tensor< T > &fcoeff, const Tensor< T > &gcoeff) const
post-determination: true if f is a leaf and the result is well-represented
Definition funcimpl.h:524
bool operator()(const Key< NDIM > &key) const
no pre-determination
Definition funcimpl.h:511
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:502
insert/replaces the coefficients into the function
Definition funcimpl.h:692
insert_op()
Definition funcimpl.h:699
implT * impl
Definition funcimpl.h:698
void operator()(const keyT &key, const coeffT &coeff, const bool &is_leaf) const
Definition funcimpl.h:702
FunctionNode< T, NDIM > nodeT
Definition funcimpl.h:696
Key< NDIM > keyT
Definition funcimpl.h:694
insert_op(const insert_op &other)
Definition funcimpl.h:701
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:693
GenTensor< T > coeffT
Definition funcimpl.h:695
insert_op(implT *f)
Definition funcimpl.h:700
void serialize(Archive &ar)
Definition funcimpl.h:706
Definition mra.h:112
Definition funcimpl.h:680
bool operator()(const Key< NDIM > &key, const GenTensor< T > &fcoeff, const GenTensor< T > &gcoeff) const
Definition funcimpl.h:682
void serialize(Archive &ar)
Definition funcimpl.h:686
void operator()(const Key< NDIM > &key, const GenTensor< T > &coeff, const bool &is_leaf) const
Definition funcimpl.h:681
Definition funcimpl.h:564
bool operator()(const Key< NDIM > &key, const double &cnorm) const
post-determination: return true if operator and coefficient norms are small
Definition funcimpl.h:585
void serialize(Archive &ar)
Definition funcimpl.h:600
const implT * f
the source or result function, needed for truncate_tol
Definition funcimpl.h:568
op_leaf_op(const opT *op, const implT *f)
Definition funcimpl.h:572
FunctionImpl< T, NDIM > implT
Definition funcimpl.h:565
const opT * op
the convolution operator
Definition funcimpl.h:567
bool do_error_leaf_op() const
Definition funcimpl.h:569
bool operator()(const Key< NDIM > &key) const
pre-determination: we can't know if this will be a leaf node before we got the final coeffs
Definition funcimpl.h:575
bool operator()(const Key< NDIM > &key, const GenTensor< T > &coeff) const
post-determination: return true if operator and coefficient norms are small
Definition funcimpl.h:578
Definition lowrankfunction.h:332
Definition funcimpl.h:736
void serialize(Archive &ar)
Definition funcimpl.h:743
bool operator()(const Key< NDIM > &key, const T &t, const R &r) const
Definition funcimpl.h:742
bool operator()(const Key< NDIM > &key, const T &t) const
Definition funcimpl.h:739
int np
Definition tdse1d.cc:165
static const double s0
Definition tdse4.cc:83
Defines and implements most of Tensor.
#define ITERATOR(t, exp)
Definition tensor_macros.h:249
#define IND
Definition tensor_macros.h:204
#define TERNARY_OPTIMIZED_ITERATOR(X, x, Y, y, Z, z, exp)
Definition tensor_macros.h:719
AtomicInt sum
Definition test_atomicint.cc:46
double norm(const T i1)
Definition test_cloud.cc:72
int task(int i)
Definition test_runtime.cpp:4
void e()
Definition test_sig.cc:75
static const double alpha
Definition testcosine.cc:10
const double offset
Definition testfuns.cc:143
constexpr std::size_t NDIM
Definition testgconv.cc:54
double h(const coord_1d &r)
Definition testgconv.cc:175
double g1(const coord_t &r)
Definition testgconv.cc:122
std::size_t axis
Definition testpdiff.cc:59
double source(const coordT &r)
Definition testperiodic.cc:48
#define TENSOR_RESULT_TYPE(L, R)
This macro simplifies access to TensorResultType.
Definition type_data.h:205
#define PROFILE_MEMBER_FUNC(classname)
Definition worldprofile.h:210
#define PROFILE_BLOCK(name)
Definition worldprofile.h:208
int ProcessID
Used to clearly identify process number/rank.
Definition worldtypes.h:43