MADNESS 0.10.1
worlddc.h
Go to the documentation of this file.
1/*
2 This file is part of MADNESS.
3
4 Copyright (C) 2007,2010 Oak Ridge National Laboratory
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
20 For more information please contact:
21
22 Robert J. Harrison
23 Oak Ridge National Laboratory
24 One Bethel Valley Road
25 P.O. Box 2008, MS-6367
26
27 email: harrisonrj@ornl.gov
28 tel: 865-241-3937
29 fax: 865-572-0680
30*/
31
32#ifndef MADNESS_WORLD_WORLDDC_H__INCLUDED
33#define MADNESS_WORLD_WORLDDC_H__INCLUDED
34
35/*!
36 \file worlddc.h
37 \brief Implements WorldContainer
38 \addtogroup worlddc
39 @{
40
41*/
42
43#include <functional>
44#include <set>
45#include <unordered_set>
46
47
53
54namespace madness
55{
56
57 template <typename keyT, typename valueT, typename hashfunT>
58 class WorldContainer;
59
60 template <typename keyT, typename valueT, typename hashfunT>
61 class WorldContainerImpl;
62
63 template <typename keyT, typename valueT, typename hashfunT>
64 void swap(WorldContainer<keyT, valueT, hashfunT> &, WorldContainer<keyT, valueT, hashfunT> &);
65
66 template <typename keyT>
67 class WorldDCPmapInterface;
68
69 template <typename keyT>
71 {
72 public:
73 virtual std::size_t size() const = 0;
74 virtual void redistribute_phase1(const std::shared_ptr<WorldDCPmapInterface<keyT>> &newmap) = 0;
75 virtual void redistribute_phase2() = 0;
76 virtual void redistribute_phase3() = 0;
78 };
79
80 /// some introspection of how data is distributed
82 Distributed, ///< no replication of the container, the container is distributed over the world
83 RankReplicated, ///< replicate the container over all world ranks
84 NodeReplicated ///< replicate the container over all hosts (compute nodes), once per node,
85 ///< even if there are several ranks per node
86 };
87
88 template<typename T=long>
89 std::ostream& operator<<(std::ostream& os, const DistributionType type) {
90 if (type==DistributionType::Distributed) os << "Distributed";
91 if (type==DistributionType::RankReplicated) os << "RankReplicated";
92 if (type==DistributionType::NodeReplicated) os << "NodeReplicated";
93 return os;
94 }
95
96 template<typename T=long>
97 std::string to_string(const DistributionType type) {
98 std::stringstream ss; ss << type;
99 return ss.str();
100 }
101
102 template<typename T=long>
104 // to lower case
105 std::transform(type.begin(), type.end(), type.begin(), [](unsigned char c){ return std::tolower(c); });
106 if (type=="distributed") return Distributed;
107 if (type=="rankreplicated") return RankReplicated;
108 if (type=="nodereplicated") return NodeReplicated;
109 std::string msg="unknown distribution type:"+type;
110 MADNESS_EXCEPTION(msg.c_str(),1)
111 return Distributed;
112 }
113
114
115
116 /// Interface to be provided by any process map
117
118 /// NOTE: if the map is not distributed, but replicated, you must override the distribution_type() method.
119 /// \ingroup worlddc
120 template <typename keyT>
122 {
123 public:
125
126
127 private:
128 std::set<ptrT> ptrs;
129
130 public:
131 /// Maps key to processor
132
133 /// @param[in] key Key for container
134 /// @return Processor that logically owns the key
135 virtual ProcessID owner(const keyT &key) const = 0;
136
138
139 virtual void print() const {}
140
141 /// by default the map is distributed
143 {
144 return Distributed;
145 }
146
147 /// Registers object for receipt of redistribute callbacks
148
149 /// @param[in] ptr Pointer to class derived from WorldDCRedistributedInterface
151 {
152 ptrs.insert(ptr);
153 }
154
155 /// Deregisters object for receipt of redistribute callbacks
156
157 /// @param[in] ptr Pointer to class derived from WorldDCRedistributedInterface
159 {
160 ptrs.erase(ptr);
161 }
162
163 /// Invoking this switches all registered objects from this process map to the new one
164
165 /// After invoking this routine all objects will be registered with the
166 /// new map and no objects will be registered in the current map.
167 /// @param[in] world The associated world
168 /// @param[in] newpmap The new process map
169 void redistribute(World &world, const std::shared_ptr<WorldDCPmapInterface<keyT>> &newpmap)
170 {
171 print_data_sizes(world, "before redistributing");
172 world.gop.fence();
173 for (typename std::set<ptrT>::iterator iter = ptrs.begin();
174 iter != ptrs.end();
175 ++iter)
176 {
177 (*iter)->redistribute_phase1(newpmap);
178 }
179 world.gop.fence();
180 for (typename std::set<ptrT>::iterator iter = ptrs.begin();
181 iter != ptrs.end();
182 ++iter)
183 {
184 (*iter)->redistribute_phase2();
185 newpmap->register_callback(*iter);
186 }
187 world.gop.fence();
188 for (typename std::set<ptrT>::iterator iter = ptrs.begin();
189 iter != ptrs.end();
190 ++iter)
191 {
192 (*iter)->redistribute_phase3();
193 }
194 world.gop.fence();
195 ptrs.clear();
196 newpmap->print_data_sizes(world, "after redistributing");
197 }
198
199 /// Counts global number of entries in all containers associated with this process map
200
201 /// Collective operation with global fence
202 std::size_t global_size(World &world) const
203 {
204 world.gop.fence();
205 std::size_t sum = local_size();
206 world.gop.sum(sum);
207 world.gop.fence();
208 return sum;
209 }
210
211 /// Counts local number of entries in all containers associated with this process map
212 std::size_t local_size() const
213 {
214 std::size_t sum = 0;
215 for (typename std::set<ptrT>::iterator iter = ptrs.begin(); iter != ptrs.end(); ++iter)
216 {
217 sum += (*iter)->size();
218 }
219 return sum;
220 }
221
222 /// Prints size info to std::cout
223
224 /// Collective operation with global fence
225 void print_data_sizes(World &world, const std::string msg = "") const
226 {
227 world.gop.fence();
228 std::size_t total = global_size(world);
229 std::vector<std::size_t> sizes(world.size());
230 sizes[world.rank()] = local_size();
231 world.gop.sum(&sizes[0], world.size());
232 if (world.rank() == 0)
233 {
234 madness::print("data distribution info", msg);
235 madness::print(" total: ", total);
236 std::cout << " procs: ";
237 for (int i = 0; i < world.size(); i++)
238 std::cout << sizes[i] << " ";
239 std::cout << std::endl;
240 }
241 world.gop.fence();
242 }
243 };
244
245 /// Default process map is "random" using madness::hash(key)
246
247 /// \ingroup worlddc
248 template <typename keyT, typename hashfunT = Hash<keyT>>
250 {
251 private:
252 const int nproc;
253 hashfunT hashfun;
254
255 public:
256 WorldDCDefaultPmap(World &world, const hashfunT &hf = hashfunT()) : nproc(world.mpi.nproc()),
257 hashfun(hf)
258 {
259 }
260
261 ProcessID owner(const keyT &key) const
262 {
263 if (nproc == 1)
264 return 0;
265 return hashfun(key) % nproc;
266 }
267 };
268
269 /// Local process map will always return the current process as owner
270
271 /// \ingroup worlddc
272 template <typename keyT, typename hashfunT = Hash<keyT>>
274 {
275 private:
277
278 public:
279 WorldDCLocalPmap(World &world) : me(world.rank()) {}
280 ProcessID owner(const keyT &key) const override
281 {
282 return me;
283 }
284
286 {
287 return RankReplicated;
288 }
289 };
290
291 /// node-replicated map will return the lowest rank on the node as owner
292 ///
293 /// \ingroup worlddc
294 template <typename keyT, typename hashfunT = Hash<keyT>>
297
298 public:
299 /// ctor makes a map of all ranks to their owners (lowest rank on the host)
300 /// calls a fence
301 /// @param[in] world the associated world
302 explicit WorldDCNodeReplicatedPmap(World& world, const std::map<std::string,std::vector<long>> ranks_per_host) {
304 }
305
306 /// owner is the lowest rank on the node, same for all keys
307 ProcessID owner(const keyT &key) const override {
308 return myowner;
309 }
310
312 {
313 return NodeReplicated;
314 }
315 };
316
317 /// check distribution type of WorldContainer -- global communication
318 template <typename dcT>
320 {
321 // assume pmap distribution type is the correct result
322 World& world=dc.get_world();
323 auto result= dc.get_pmap()->distribution_type();
324
325 auto local_size=dc.size();
326 auto global_size=local_size;
327 world.gop.sum(global_size);
328
329 auto number_of_duplicates = [](const std::vector<hashT>& v) {
330 std::unordered_map<hashT, size_t> counts;
331 size_t duplicates = 0;
332 for (const auto& elem : v) {
333 if (++counts[elem] > 1) duplicates++;
334 }
335 return duplicates;
336 };
337
338 // collect all hash vales and determine the number of duplicates
339 std::vector<hashT> all_hashes(local_size);
340 const auto& hashfun=dc.get_hash();
341 int i=0;
342 for (auto it=dc.begin();it!=dc.end();++it,++i) all_hashes[i]=hashfun(it->first);
343 all_hashes=world.gop.concat0(all_hashes);
344 std::size_t ndup=number_of_duplicates(all_hashes);
345 world.gop.broadcast(ndup,0);
346 // print("rank, local, global, duplicates", world.rank(),local_size,global_size,ndup);
347
348 // consistency checks
349 if (result==Distributed) {
350 // all keys should exist only on one process
351 MADNESS_CHECK_THROW(ndup==0,"WorldDC inconsistent -- distributed has duplicates");
352 }
353 else if (result==RankReplicated) {
354 // all keys should exist on all processes
355 std::size_t nrank=world.size();
356 MADNESS_CHECK_THROW(global_size==nrank*local_size,"WorldDC inconsistent");
357 MADNESS_CHECK_THROW(ndup==local_size*(nrank-1),"WorldDC inconsistent - duplicates");
358 }
359 else if (result==NodeReplicated) {
360 // all keys should exist on all nodes, not on all ranks
361 auto ranks_per_host1=ranks_per_host(world);
362 std::vector<int> primary_ranks=primary_ranks_per_host(world,ranks_per_host1);
363 world.gop.broadcast_serializable(primary_ranks,0);
364 world.gop.fence();
365 print("primary_rank_per_host",primary_ranks);
366
367 std::size_t nnodes=primary_ranks.size();
368 bool is_primary=(std::find(primary_ranks.begin(),primary_ranks.end(),world.rank())!=primary_ranks.end());
369
370 if (is_primary) {
371 MADNESS_CHECK_THROW(global_size==(nnodes*local_size),"WorldDC inconsistent - global size");
372 MADNESS_CHECK_THROW(ndup==local_size*(nnodes-1),"WorldDC inconsistent - duplicates");
373 } else {
374 MADNESS_CHECK_THROW(local_size==0,"WorldDC inconsistent -- secondary");
375 }
376 }
377 return result;
378 }
379
380
381 /// Iterator for distributed container wraps the local iterator
382
383 /// \ingroup worlddc
384 template <class internal_iteratorT>
386 {
387 public:
388 typedef typename std::iterator_traits<internal_iteratorT>::iterator_category iterator_category;
389 typedef typename std::iterator_traits<internal_iteratorT>::value_type value_type;
390 typedef typename std::iterator_traits<internal_iteratorT>::difference_type difference_type;
391 typedef typename std::iterator_traits<internal_iteratorT>::pointer pointer;
392 typedef typename std::iterator_traits<internal_iteratorT>::reference reference;
393
394 private:
395 internal_iteratorT it; ///< Iterator from local container
396 // TODO: Convert this to a scoped pointer.
397 mutable value_type *value; ///< holds the remote values
398
399 public:
400 /// Default constructor makes a local uninitialized value
402 : it(), value(nullptr) {}
403
404 /// Initializes from a local iterator
405 explicit WorldContainerIterator(const internal_iteratorT &it)
406 : it(it), value(nullptr) {}
407
408 /// Initializes to cache a remote value
410 : it(), value(nullptr)
411 {
412 value = new value_type(v);
413 }
414
416 : it(), value(nullptr)
417 {
418 copy(other);
419 }
420
421 template <class iteratorT>
423 : it(), value(nullptr)
424 {
425 copy(other);
426 }
427
429 {
430 delete value;
431 }
432
433 /// Assignment
435 {
436 copy(other);
437 return *this;
438 }
439
440 /// Determines if two iterators are identical
441 bool operator==(const WorldContainerIterator &other) const
442 {
443 return (((!is_cached()) && (!other.is_cached())) && it == other.it) ||
444 ((is_cached() && other.is_cached()) && value->first == other.value->first);
445 }
446
447 /// Determines if two iterators are different
448 bool operator!=(const WorldContainerIterator &other) const
449 {
450 return !(*this == other);
451 }
452
453 /// Pre-increment of an iterator (i.e., ++it) --- \em local iterators only
454
455 /// Trying to increment a remote iterator will throw
457 {
459 ++it;
460 return *this;
461 }
462
464 {
467 ++it;
468 return result;
469 }
470
471 /// Iterators dereference to std::pair<const keyT,valueT>
473 {
474 return (is_cached() ? value : it.operator->());
475 }
476
477 /// Iterators dereference to std::pair<const keyT,valueT>
479 {
480 return (is_cached() ? *value : *it);
481 }
482
483 /// Private: (or should be) Returns iterator of internal container
484 const internal_iteratorT &get_internal_iterator() const
485 {
486 return it;
487 }
488
489 /// Returns true if this is non-local or cached value
490 bool is_cached() const
491 {
492 return value != nullptr;
493 }
494
495 template <typename Archive>
496 void serialize(const Archive &)
497 {
498 MADNESS_EXCEPTION("Serializing DC iterator ... why?", false);
499 }
500
501 private:
502 template <class iteratorT>
504
505 template <class iteratorT>
507 {
508 if (static_cast<const void *>(this) != static_cast<const void *>(&other))
509 {
510 delete value;
511 if (other.is_cached())
512 {
513 value = new value_type(*other.value);
514 it = internal_iteratorT();
515 }
516 else
517 {
518 it = other.it;
519 value = nullptr;
520 }
521 }
522 }
523 };
524
525 /// Internal implementation of distributed container to facilitate shallow copy
526
527 /// \ingroup worlddc
528 template <typename keyT, typename valueT, typename hashfunT>
530 : public WorldObject<WorldContainerImpl<keyT, valueT, hashfunT>>,
532#ifndef MADNESS_DISABLE_SHARED_FROM_THIS
533 ,
534 public std::enable_shared_from_this<WorldContainerImpl<keyT, valueT, hashfunT>>
535#endif // MADNESS_DISABLE_SHARED_FROM_THIS
536 {
537 public:
538 typedef typename std::pair<const keyT, valueT> pairT;
539 typedef const pairT const_pairT;
541
543
544 // typedef WorldObject< WorldContainerImpl<keyT, valueT, hashfunT> > worldobjT;
545
554
555 friend class WorldContainer<keyT, valueT, hashfunT>;
556
557 // template <typename containerT, typename datumT>
558 // inline
559 // static
560 // typename containerT::iterator replace(containerT& c, const datumT& d) {
561 // std::pair<typename containerT::iterator,bool> p = c.insert(d);
562 // if (!p.second) p.first->second = d.second; // Who's on first?
563 // return p.first;
564 // }
565
566 private:
567 WorldContainerImpl(); // Inhibit default constructor
568
569 std::shared_ptr<WorldDCPmapInterface<keyT>> pmap; ///< Function/class to map from keys to owning process
570 const ProcessID me; ///< My MPI rank
571 internal_containerT local; ///< Locally owned data
572 std::vector<keyT> *move_list; ///< Tempoary used to record data that needs redistributing
573
574 /// Handles find request
575 void find_handler(ProcessID requestor, const keyT &key, const RemoteReference<FutureImpl<iterator>> &ref)
576 {
578 if (r == local.end())
579 {
580 // print("find_handler: failure:", key);
581 this->send(requestor, &implT::find_failure_handler, ref);
582 }
583 else
584 {
585 // print("find_handler: success:", key, r->first, r->second);
586 this->send(requestor, &implT::find_success_handler, ref, *r);
587 }
588 }
589
590 /// Handles successful find response
592 {
593 FutureImpl<iterator> *f = ref.get();
594 f->set(iterator(datum));
595 // print("find_success_handler: success:", datum.first, datum.second, f->get()->first, f->get()->second);
596 // Todo: Look at this again.
597 // ref.reset(); // Matching inc() in find() where ref was made
598 }
599
600 /// Handles unsuccessful find response
602 {
603 FutureImpl<iterator> *f = ref.get();
604 f->set(end());
605 // print("find_failure_handler");
606 // Todo: Look at this again.
607 // ref.reset(); // Matching inc() in find() where ref was made
608 }
609
610 public:
612 const std::shared_ptr<WorldDCPmapInterface<keyT>> &pm,
613 const hashfunT &hf)
614 : WorldObject<WorldContainerImpl<keyT, valueT, hashfunT>>(world), pmap(pm), me(world.mpi.rank()), local(5011, hf)
615 {
616 pmap->register_callback(this);
617 }
618
620 {
621 pmap->deregister_callback(this);
622 }
623
624 const std::shared_ptr<WorldDCPmapInterface<keyT>> &get_pmap() const
625 {
626 return pmap;
627 }
628
629 std::shared_ptr<WorldDCPmapInterface<keyT>> &get_pmap()
630 {
631 return pmap;
632 }
633
635 {
636 pmap->deregister_callback(this);
637 pmap.reset(new WorldDCLocalPmap<keyT>(this->get_world()));
638 pmap->register_callback(this);
639 }
640
641 /// replicates this WorldContainer on all ProcessIDs and generates a
642 /// ProcessMap where all nodes are local
643 void replicate(bool fence) {
644 World &world = this->get_world();
645 pmap->deregister_callback(this);
647 pmap->register_callback(this);
648
650 if (fence) world.gop.fence();
651 }
652
653 /// replicates this WorldContainer on all hosts and generates a
654 /// ProcessMap where all nodes are host-local (not rank-local)
655 /// will always fence
656 void replicate_on_hosts(bool fence) {
657 MADNESS_CHECK(fence);
658
659 /// print in rank-order
660// auto oprint = [&](World& world, auto &&... args) {
661// world.gop.fence();
662// for (int r=0; r<world.size(); ++r) {
663// if (r==world.rank()) {
664// std::cout << "rank " << world.rank() << ": ";
665// print(std::forward<decltype(args)>(args)...);
666// }
667// world.gop.fence();
668// }
669// };
670
671 World &world = this->get_world();
672
673 // find primary ranks per host (lowest rank on each host)
674 auto ranks_per_host1=ranks_per_host(world);
675 std::vector<int> primary_ranks=primary_ranks_per_host(world,ranks_per_host1);
676 world.gop.broadcast_serializable(primary_ranks,0);
677 world.gop.fence();
678
679// auto sizes =[&](std::string msg) {
680// world.gop.fence();
681// auto local_size=size();
682// auto global_size=local_size;
683// world.gop.sum(global_size);
684// oprint(world,"rank, local, global",msg, world.rank(),local_size,global_size);
685// world.gop.fence();
686// };
687
688 // change pmap to replicated
689 pmap->deregister_callback(this);
691 pmap->register_callback(this);
692
693 // shortcut: replace pmap and return
694 if (world.size()==1) {
695 // change pmap to node replicated
696 pmap->deregister_callback(this);
697 pmap.reset(new WorldDCNodeReplicatedPmap<keyT>(world,ranks_per_host1));
698 pmap->register_callback(this);
699 return;
700 }
701 world.gop.fence();
702
703 // get a list of all other ranks that are not primary
704 std::vector<int> secondary_ranks;
705 for (int r=0; r<world.size(); ++r) {
706 if (std::find(primary_ranks.begin(),primary_ranks.end(),r)==primary_ranks.end())
707 secondary_ranks.push_back(r);
708 }
709
710 // phase 1: for all ranks send data to the lowest rank on host
711
712 // step 1-2: send data to lowest rank on host (which will become the owner)
713 long myowner = lowest_rank_on_host_of_rank(ranks_per_host1, world.rank());
714 // oprint(world,"my owner, size:", myowner,size());
715 if (world.rank() != myowner) {
716 // send data to myowner
717 for (auto it = begin(); it != end(); ++it) {
718 keyT key = it->first;
719 valueT value = it->second;
720 this->send(myowner,&implT::insert,pairT(key,value));
721 // insert(pairT(key,value)); // this won't work with LocalPmap
722 }
723 // remove all local data after sending
724 // clear();
725 }
726 // need a fence here to make sure send is finished
727 world.gop.fence();
728 // sizes("after step 1, before clear");
729 // world.gop.fence();
730 if (world.rank()!=myowner) clear();
731 world.gop.fence();
732 // sizes("after step 1");
733
734 // change pmap to replicated
735 pmap->deregister_callback(this);
736 pmap.reset(new WorldDCNodeReplicatedPmap<keyT>(world,ranks_per_host1));
737 pmap->register_callback(this);
738
739 // check if this rank is in the primary list
740 bool i_am_in_primary_list=world.rank()==myowner;
741 if (i_am_in_primary_list) {
742
743 // step 2-1: create a world with only the primary ranks and replicate there (see test_world.cc)
744
745 SafeMPI::Group primary_group = world.mpi.comm().Get_group().Incl(primary_ranks.size(), &primary_ranks[0]);
746 SafeMPI::Intracomm comm_primary = world.mpi.comm().Create(primary_group);
747 // step 2-2: replicate in the primary world
748 {
749 World world_primary(comm_primary);
750 // auto ranks_per_host1=ranks_per_host(world_primary);
751 // if (world_primary.rank()==0) {
752 // print("host/rank map in primary world:");
753 // for (auto& p : ranks_per_host1) print(p.first, p.second);
754 // }
755
756 world_primary.gop.fence(); // this fence seems necessary, why??
757 do_replicate(world_primary);
758 world_primary.gop.fence();
759 }
760 } else {
761 // need this to avoid deadlock in MPI_Comm_create (why??)
762 SafeMPI::Group secondary_group = world.mpi.comm().Get_group().Incl(secondary_ranks.size(), &secondary_ranks[0]);
763 SafeMPI::Intracomm comm_secondary = world.mpi.comm().Create(secondary_group);
764 }
765
766 // phase 3: done
767 if (fence) world.gop.fence();
768 // validate_distribution_type(*this);
769 }
770
772 for (ProcessID rank = 0; rank < world.size(); rank++)
773 {
774 if (rank == world.rank())
775 {
776 std::size_t sz = size();
778
779 for (auto it = begin(); it != end(); ++it)
780 {
781 keyT key = it->first;
782 valueT value = it->second;
784 world.gop.broadcast_serializable(value, rank);
785 }
786 }
787 else
788 {
789 size_t sz;
791 for (size_t i = 0; i < sz; i++)
792 {
793 keyT key;
794 valueT value;
796 world.gop.broadcast_serializable(value, rank);
797 insert(pairT(key, value));
798 }
799 }
800 }
801 }
802
803 const hashfunT &get_hash() const { return local.get_hash(); }
804
805 bool is_local(const keyT &key) const
806 {
807 return owner(key) == me;
808 }
809
810 ProcessID owner(const keyT &key) const
811 {
812 return pmap->owner(key);
813 }
814
815 bool probe(const keyT &key) const
816 {
817 ProcessID dest = owner(key);
818 if (dest == me)
819 return local.find(key) != local.end();
820 else
821 return false;
822 }
823
824 std::size_t size() const
825 {
826 return local.size();
827 }
828
829 void insert(const pairT &datum)
830 {
831 ProcessID dest = owner(datum.first);
832 if (dest == me)
833 {
834 // Was using iterator ... try accessor ?????
835 accessor acc;
836 // N.B. key might already exist if want to simply replace
837 [[maybe_unused]] auto inserted = local.insert(acc, datum.first);
838 acc->second = datum.second;
839 }
840 else
841 {
842 // Must be send (not task) for sequential consistency (and relies on single-threaded remote server)
843 this->send(dest, &implT::insert, datum);
844 }
845 }
846
847 bool insert_acc(accessor &acc, const keyT &key)
848 {
849 MADNESS_ASSERT(owner(key) == me);
850 return local.insert(acc, key);
851 }
852
853 bool insert_const_acc(const_accessor &acc, const keyT &key)
854 {
855 MADNESS_ASSERT(owner(key) == me);
856 return local.insert(acc, key);
857 }
858
859 void clear()
860 {
861 local.clear();
862 }
863
864 void erase(const keyT &key)
865 {
866 ProcessID dest = owner(key);
867 if (dest == me)
868 {
869 [[maybe_unused]] auto erased = local.try_erase(key);
870 MADNESS_ASSERT(erased);
871 }
872 else
873 {
874 void (implT::*eraser)(const keyT &) = &implT::erase;
875 this->send(dest, eraser, key);
876 }
877 }
878
879 template <typename InIter>
880 void erase(InIter it)
881 {
882 MADNESS_ASSERT(!it.is_cached());
883 MADNESS_ASSERT(it != end());
884 erase(it->first);
885 }
886
887 template <typename InIter>
888 void erase(InIter first, InIter last)
889 {
890 InIter it = first;
891 do
892 {
893 first++;
894 erase(it->first);
895 it = first;
896 } while (first != last);
897 }
898
900 {
901 return iterator(local.begin());
902 }
903
905 {
906 return const_iterator(local.begin());
907 }
908
910 {
911 return iterator(local.end());
912 }
913
915 {
916 return const_iterator(local.end());
917 }
918
920 {
921 // Ugliness here to avoid replicating find() and
922 // associated handlers for const. Assumption is that
923 // const and non-const iterators are identical except for
924 // const attribute ... at some point probably need to do
925 // the right thing.
926 Future<iterator> r = const_cast<implT *>(this)->find(key);
927 return *(Future<const_iterator> *)(&r);
928 }
929
931 {
932 ProcessID dest = owner(key);
933 if (dest == me)
934 {
935 return Future<iterator>(iterator(local.find(key)));
936 }
937 else
938 {
939 Future<iterator> result;
940 this->send(dest, &implT::find_handler, me, key, result.remote_ref(this->get_world()));
941 return result;
942 }
943 }
944
945 bool find(accessor &acc, const keyT &key)
946 {
947 if (owner(key) != me)
948 return false;
949 return local.find(acc, key);
950 }
951
952 bool find(const_accessor &acc, const keyT &key) const
953 {
954 if (owner(key) != me)
955 return false;
956 return local.find(acc, key);
957 }
958
959 // Used to forward call to item member function
960 template <typename memfunT>
961 MEMFUN_RETURNT(memfunT)
962 itemfun(const keyT &key, memfunT memfun)
963 {
964 accessor acc;
965 // N.B. key may already exist, this is just to ensure lock is held by acc
966 [[maybe_unused]] auto inserted = local.insert(acc, key);
967 return (acc->second.*memfun)();
968 }
969
970 // Used to forward call to item member function
971 template <typename memfunT, typename arg1T>
972 MEMFUN_RETURNT(memfunT)
973 itemfun(const keyT &key, memfunT memfun, const arg1T &arg1)
974 {
975 accessor acc;
976 // N.B. key may already exist, this is just to ensure lock is held by acc
977 [[maybe_unused]] auto inserted = local.insert(acc, key);
978 return (acc->second.*memfun)(arg1);
979 }
980
981 // Used to forward call to item member function
982 template <typename memfunT, typename arg1T, typename arg2T>
983 MEMFUN_RETURNT(memfunT)
984 itemfun(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2)
985 {
986 accessor acc;
987 // N.B. key may already exist, this is just to ensure lock is held by acc
988 [[maybe_unused]] auto inserted = local.insert(acc, key);
989 return (acc->second.*memfun)(arg1, arg2);
990 }
991
992 // Used to forward call to item member function
993 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T>
994 MEMFUN_RETURNT(memfunT)
995 itemfun(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3)
996 {
997 accessor acc;
998 // N.B. key may already exist, this is just to ensure lock is held by acc
999 [[maybe_unused]] auto inserted = local.insert(acc, key);
1000 return (acc->second.*memfun)(arg1, arg2, arg3);
1001 }
1002
1003 // Used to forward call to item member function
1004 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T>
1005 MEMFUN_RETURNT(memfunT)
1006 itemfun(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4)
1007 {
1008 accessor acc;
1009 // N.B. key may already exist, this is just to ensure lock is held by acc
1010 [[maybe_unused]] auto inserted = local.insert(acc, key);
1011 return (acc->second.*memfun)(arg1, arg2, arg3, arg4);
1012 }
1013
1014 // Used to forward call to item member function
1015 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T>
1016 MEMFUN_RETURNT(memfunT)
1017 itemfun(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5)
1018 {
1019 accessor acc;
1020 // N.B. key may already exist, this is just to ensure lock is held by acc
1021 [[maybe_unused]] auto inserted = local.insert(acc, key);
1022 return (acc->second.*memfun)(arg1, arg2, arg3, arg4, arg5);
1023 }
1024
1025 // Used to forward call to item member function
1026 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T, typename arg6T>
1027 MEMFUN_RETURNT(memfunT)
1028 itemfun(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6)
1029 {
1030 accessor acc;
1031 // N.B. key may already exist, this is just to ensure lock is held by acc
1032 [[maybe_unused]] auto inserted = local.insert(acc, key);
1033 return (acc->second.*memfun)(arg1, arg2, arg3, arg4, arg5, arg6);
1034 }
1035
1036 // Used to forward call to item member function
1037 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T, typename arg6T, typename arg7T>
1038 MEMFUN_RETURNT(memfunT)
1039 itemfun(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3,
1040 const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const arg7T &arg7)
1041 {
1042 accessor acc;
1043 // N.B. key may already exist, this is just to ensure lock is held by acc
1044 [[maybe_unused]] auto inserted = local.insert(acc, key);
1045 return (acc->second.*memfun)(arg1, arg2, arg3, arg4, arg5, arg6, arg7);
1046 }
1047
1048 // First phase of redistributions changes pmap and makes list of stuff to move
1049 void redistribute_phase1(const std::shared_ptr<WorldDCPmapInterface<keyT>> &newpmap)
1050 {
1051 pmap = newpmap;
1052 move_list = new std::vector<keyT>();
1053 for (typename internal_containerT::iterator iter = local.begin(); iter != local.end(); ++iter)
1054 {
1055 if (owner(iter->first) != me)
1056 move_list->push_back(iter->first);
1057 }
1058 }
1059
1060 struct P2Op
1061 {
1065 P2Op(const P2Op &p) : impl(p.impl) {}
1066 bool operator()(typename rangeT::iterator &iterator) const
1067 {
1068 typename internal_containerT::iterator iter = impl->local.find(*iterator);
1069 MADNESS_ASSERT(iter != impl->local.end());
1070
1071 // impl->insert(*iter);
1072 impl->task(impl->owner(*iterator), &implT::insert, *iter);
1073
1074 impl->local.erase(iter); // delete local copy of the data
1075 return true;
1076 }
1077 };
1078
1079 // Second phase moves data
1081 {
1082 this->get_world().taskq.for_each(typename P2Op::rangeT(move_list->begin(), move_list->end()), P2Op(this));
1083 // std::vector<keyT>& mvlist = *move_list;
1084 // for (unsigned int i=0; i<move_list->size(); ++i) {
1085 // typename internal_containerT::iterator iter = local.find(mvlist[i]);
1086 // MADNESS_ASSERT(iter != local.end());
1087 // insert(*iter);
1088 // local.erase(iter);
1089 // }
1090 // delete move_list;
1091 }
1092
1093 // Third phase cleans up
1095 {
1096 delete move_list;
1097 }
1098 };
1099
1100 /// Makes a distributed container with specified attributes
1101
1102 /// \ingroup worlddc
1103 ///
1104 /// There is no communication or syncronization associated with
1105 /// making a new container, but every process must invoke the
1106 /// constructor for each container in the same order. This is so
1107 /// that we can assign each container a unique ID without any
1108 /// communication. Since remotely invoked operations may start
1109 /// happening before local construction, messages on not yet
1110 /// constructed containers are buffered pending construction.
1111 ///
1112 /// Similarly, when a container is destroyed, the actual
1113 /// destruction is deferred until a synchronization point
1114 /// (world.gop.fence()) in order to eliminate the need to fence
1115 /// before destroying every container.
1116 ///
1117 /// The distribution of data between processes is controlled by
1118 /// the process map (Pmap) class. The default is uniform
1119 /// hashing based upon a strong (Bob Jenkins, lookup3) bytewise
1120 /// hash of the key.
1121 ///
1122 /// All operations, including constructors and destructors, are
1123 /// non-blocking and return immediately. If communication occurs
1124 /// it is asynchronous, otherwise operations are local.
1125 template <typename keyT, typename valueT, typename hashfunT = Hash<keyT>>
1127 {
1128 public:
1129 // access keyT and valueT types for serialization
1133 typedef typename implT::pairT pairT;
1134 typedef typename implT::iterator iterator;
1135 typedef typename implT::const_iterator const_iterator;
1136 typedef typename implT::accessor accessor;
1137 typedef typename implT::const_accessor const_accessor;
1140
1141 private:
1142 std::shared_ptr<implT> p;
1143
1144 inline void check_initialized() const
1145 {
1147 }
1148
1149 public:
1150 /// Makes an uninitialized container (no communication)
1151
1152 /// The container is useless until assigned to from a fully
1153 /// constructed container. There is no need to worry about
1154 /// default constructors being executed in order.
1156 : p()
1157 {
1158 }
1159
1160 /// Makes an initialized, empty container with default data distribution (no communication)
1161
1162 /// A unique ID is associated with every distributed container
1163 /// within a world. In order to avoid synchronization when
1164 /// making a container, we have to assume that all processes
1165 /// execute this constructor in the same order (does not apply
1166 /// to the non-initializing, default constructor).
1167 WorldContainer(World &world, bool do_pending = true, const hashfunT &hf = hashfunT())
1168 : p(new implT(world,
1169 std::shared_ptr<WorldDCPmapInterface<keyT>>(new WorldDCDefaultPmap<keyT, hashfunT>(world, hf)),
1170 hf))
1171 {
1172 if (do_pending)
1173 p->process_pending();
1174 }
1175
1176 /// Makes an initialized, empty container (no communication)
1177
1178 /// A unique ID is associated with every distributed container
1179 /// within a world. In order to avoid synchronization when
1180 /// making a container, we have to assume that all processes
1181 /// execute this constructor in the same order (does not apply
1182 /// to the non-initializing, default constructor).
1184 const std::shared_ptr<WorldDCPmapInterface<keyT>> &pmap,
1185 bool do_pending = true,
1186 const hashfunT &hf = hashfunT())
1187 : p(new implT(world, pmap, hf))
1188 {
1189 if (do_pending)
1190 p->process_pending();
1191 }
1192
1193 /// Copy constructor is shallow (no communication)
1194
1195 /// The copy refers to exactly the same container as other
1196 /// which must be initialized.
1198 : p(other.p)
1199 {
1201 }
1202
1203 /// Assignment is shallow (no communication)
1204
1205 /// The copy refers to exactly the same container as other
1206 /// which must be initialized.
1208 {
1209 if (this != &other)
1210 {
1211 other.check_initialized();
1212 p = other.p;
1213 }
1214 return *this;
1215 }
1216
1217 /// return the way data is distributed
1219 if (!p) MADNESS_EXCEPTION("Uninitialized container", false);
1220 return p->get_pmap()->distribution_type();
1221 }
1222
1223 bool is_distributed() const {
1225 }
1226
1227 bool is_replicated() const {
1229 }
1230
1231 bool is_host_replicated() const {
1233 }
1234
1235 /// Returns the world associated with this container
1237 {
1239 return p->get_world();
1240 }
1241
1242 std::shared_ptr<WorldDCPmapInterface<keyT>> &get_impl()
1243 {
1245 return p;
1246 }
1247
1248 /// replicates this WorldContainer on all ProcessIDs
1249 void replicate(bool fence = true)
1250 {
1251 p->replicate(fence);
1252 }
1253
1254 /// replicates this WorldContainer on all hosts (one PID per host)
1255 void replicate_on_hosts(bool fence = true)
1256 {
1257 p->replicate_on_hosts(fence);
1258 }
1259
1260 /// Inserts/replaces key+value pair (non-blocking communication if key not local)
1261 void replace(const pairT &datum)
1262 {
1264 p->insert(datum);
1265 }
1266
1267 /// Inserts/replaces key+value pair (non-blocking communication if key not local)
1268 void replace(const keyT &key, const valueT &value)
1269 {
1270 replace(pairT(key, value));
1271 }
1272
1273 /// Write access to LOCAL value by key. Returns true if found, false otherwise (always false for remote).
1274 bool find(accessor &acc, const keyT &key)
1275 {
1277 return p->find(acc, key);
1278 }
1279
1280 /// Read access to LOCAL value by key. Returns true if found, false otherwise (always false for remote).
1281 bool find(const_accessor &acc, const keyT &key) const
1282 {
1284 return p->find(acc, key);
1285 }
1286
1287 /// Write access to LOCAL value by key. Returns true if inserted, false if already exists (throws if remote)
1288 bool insert(accessor &acc, const keyT &key)
1289 {
1291 return p->insert_acc(acc, key);
1292 }
1293
1294 /// Read access to LOCAL value by key. Returns true if inserted, false if already exists (throws if remote)
1295 bool insert(const_accessor &acc, const keyT &key)
1296 {
1298 return p->insert_acc(acc, key);
1299 }
1300
1301 /// Inserts pairs (non-blocking communication if key(s) not local)
1302 template <typename input_iterator>
1303 void replace(input_iterator &start, input_iterator &end)
1304 {
1306 using std::placeholders::_1;
1307 std::for_each(start, end, std::bind(this, std::mem_fn(&containerT::insert), _1));
1308 }
1309
1310 /// Returns true if local data is immediately available (no communication)
1311 bool probe(const keyT &key) const
1312 {
1314 return p->probe(key);
1315 }
1316
1317 /// Returns processor that logically owns key (no communication)
1318
1319 /// Local remapping may have changed its physical location, but all
1320 /// operations should forward correctly.
1321 inline ProcessID owner(const keyT &key) const
1322 {
1324 return p->owner(key);
1325 }
1326
1327 /// Returns true if the key maps to the local processor (no communication)
1328 bool is_local(const keyT &key) const
1329 {
1331 return p->is_local(key);
1332 }
1333
1334 /// Returns a future iterator (non-blocking communication if key not local)
1335
1336 /// Like an std::map an iterator "points" to an std::pair<const keyT,valueT>.
1337 ///
1338 /// Refer to Future for info on how to avoid blocking.
1340 { //
1342 return p->find(key);
1343 }
1344
1345 /// Returns a future iterator (non-blocking communication if key not local)
1346
1347 /// Like an std::map an iterator "points" to an std::pair<const keyT,valueT>.
1348 ///
1349 /// Refer to Future for info on how to avoid blocking.
1351 {
1353 return const_cast<const implT *>(p.get())->find(key);
1354 }
1355
1356 /// Returns an iterator to the beginning of the \em local data (no communication)
1358 {
1360 return p->begin();
1361 }
1362
1363 /// Returns an iterator to the beginning of the \em local data (no communication)
1365 {
1367 return const_cast<const implT *>(p.get())->begin();
1368 }
1369
1370 /// Returns an iterator past the end of the \em local data (no communication)
1372 {
1374 return p->end();
1375 }
1376
1377 /// Returns an iterator past the end of the \em local data (no communication)
1379 {
1381 return const_cast<const implT *>(p.get())->end();
1382 }
1383
1384 /// Erases entry from container (non-blocking comm if remote)
1385
1386 /// Missing keys are quietly ignored.
1387 ///
1388 /// Note that erasing an entry may invalidate iterators on the
1389 /// remote end. This is just the same as what happens when
1390 /// using STL iterators on an STL container in a sequential
1391 /// algorithm.
1392 void erase(const keyT &key)
1393 {
1395 p->erase(key);
1396 }
1397
1398 /// Erases entry corresponding to \em local iterator (no communication)
1399 void erase(const iterator &it)
1400 {
1402 p->erase(it);
1403 }
1404
1405 /// Erases range defined by \em local iterators (no communication)
1406 void erase(const iterator &start, const iterator &finish)
1407 {
1409 p->erase(start, finish);
1410 }
1411
1412 /// Clears all \em local data (no communication)
1413
1414 /// Invalidates all iterators
1415 void clear()
1416 {
1418 p->clear();
1419 }
1420
1421 /// Returns the number of \em local entries (no communication)
1422 std::size_t size() const
1423 {
1425 return p->size();
1426 }
1427
1428 /// Returns shared pointer to the process mapping
1429 inline const std::shared_ptr<WorldDCPmapInterface<keyT>> &get_pmap() const
1430 {
1432 return p->get_pmap();
1433 }
1434
1435 /// Returns shared pointer to the process mapping
1437 {
1438 p->reset_pmap_to_local();
1439 }
1440
1441 /// Returns a reference to the hashing functor
1442 const hashfunT &get_hash() const
1443 {
1445 return p->get_hash();
1446 }
1447
1448 /// Process pending messages
1449
1450 /// If the constructor was given \c do_pending=false then you
1451 /// \em must invoke this routine in order to process both
1452 /// prior and future messages.
1453 inline void process_pending()
1454 {
1456 p->process_pending();
1457 }
1458
1459 /// Sends message "resultT memfun()" to item (non-blocking comm if remote)
1460
1461 /// If item does not exist it is made with the default constructor.
1462 ///
1463 /// Future arguments must be ready for remote messages.
1464 ///
1465 /// Returns a future result (Future<void> may be ignored).
1466 ///
1467 /// The method executes with a write lock on the item.
1468 template <typename memfunT>
1469 Future<MEMFUN_RETURNT(memfunT)>
1470 send(const keyT &key, memfunT memfun)
1471 {
1473 MEMFUN_RETURNT(memfunT)
1474 (implT::*itemfun)(const keyT &, memfunT) = &implT::template itemfun<memfunT>;
1475 return p->send(owner(key), itemfun, key, memfun);
1476 }
1477
1478 /// Sends message "resultT memfun(arg1T)" to item (non-blocking comm if remote)
1479
1480 /// If item does not exist it is made with the default constructor.
1481 ///
1482 /// Future arguments must be ready for remote messages.
1483 ///
1484 /// Returns a future result (Future<void> may be ignored).
1485 ///
1486 /// The method executes with a write lock on the item.
1487 template <typename memfunT, typename arg1T>
1489 send(const keyT &key, const memfunT &memfun, const arg1T &arg1)
1490 {
1492 // To work around bug in g++ 4.3.* use static cast as alternative mechanism to force type deduction
1493 MEMFUN_RETURNT(memfunT)
1494 (implT::*itemfun)(const keyT &, memfunT, const arg1T &) = &implT::template itemfun<memfunT, arg1T>;
1495 return p->send(owner(key), itemfun, key, memfun, arg1);
1496 /*return p->send(owner(key),
1497 static_cast<MEMFUN_RETURNT(memfunT)(implT::*)(const keyT&, memfunT, const arg1T&)>(&implT:: template itemfun<memfunT,arg1T>),
1498 key, memfun, arg1);*/
1499 }
1500
1501 /// Sends message "resultT memfun(arg1T,arg2T)" to item (non-blocking comm if remote)
1502
1503 /// If item does not exist it is made with the default constructor.
1504 ///
1505 /// Future arguments must be ready for both local and remote messages.
1506 ///
1507 /// Returns a future result (Future<void> may be ignored).
1508 ///
1509 /// The method executes with a write lock on the item.
1510 template <typename memfunT, typename arg1T, typename arg2T>
1512 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2)
1513 {
1515 // To work around bug in g++ 4.3.* use static cast as alternative mechanism to force type deduction
1516 MEMFUN_RETURNT(memfunT)
1517 (implT::*itemfun)(const keyT &, memfunT, const arg1T &, const arg2T &) = &implT::template itemfun<memfunT, arg1T, arg2T>;
1518 return p->send(owner(key), itemfun, key, memfun, arg1, arg2);
1519 /*return p->send(owner(key),
1520 static_cast<MEMFUN_RETURNT(memfunT)(implT::*)(const keyT&, memfunT, const arg1T&, const arg2T&)>(&implT:: template itemfun<memfunT,arg1T,arg2T>), key, memfun, arg1, arg2);*/
1521 }
1522
1523 /// Sends message "resultT memfun(arg1T,arg2T,arg3T)" to item (non-blocking comm if remote)
1524
1525 /// If item does not exist it is made with the default constructor.
1526 ///
1527 /// Future arguments must be ready for both local and remote messages.
1528 ///
1529 /// Returns a future result (Future<void> may be ignored).
1530 ///
1531 /// The method executes with a write lock on the item.
1532 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T>
1534 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3)
1535 {
1537 MEMFUN_RETURNT(memfunT)
1538 (implT::*itemfun)(const keyT &, memfunT, const arg1T &, const arg2T &, const arg3T &) = &implT::template itemfun<memfunT, arg1T, arg2T, arg3T>;
1539 return p->send(owner(key), itemfun, key, memfun, arg1, arg2, arg3);
1540 }
1541
1542 /// Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T)" to item (non-blocking comm if remote)
1543
1544 /// If item does not exist it is made with the default constructor.
1545 ///
1546 /// Future arguments must be ready for both local and remote messages.
1547 ///
1548 /// Returns a future result (Future<void> may be ignored).
1549 ///
1550 /// The method executes with a write lock on the item.
1551 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T>
1553 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4)
1554 {
1556 MEMFUN_RETURNT(memfunT)
1557 (implT::*itemfun)(const keyT &, memfunT, const arg1T &, const arg2T &, const arg3T &, const arg4T &) = &implT::template itemfun<memfunT, arg1T, arg2T, arg3T, arg4T>;
1558 return p->send(owner(key), itemfun, key, memfun, arg1, arg2, arg3, arg4);
1559 }
1560
1561 /// Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T)" to item (non-blocking comm if remote)
1562
1563 /// If item does not exist it is made with the default constructor.
1564 ///
1565 /// Future arguments must be ready for both local and remote messages.
1566 ///
1567 /// Returns a future result (Future<void> may be ignored).
1568 ///
1569 /// The method executes with a write lock on the item.
1570 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T>
1572 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5)
1573 {
1575 MEMFUN_RETURNT(memfunT)
1576 (implT::*itemfun)(const keyT &, memfunT, const arg1T &, const arg2T &, const arg3T &, const arg4T &, const arg5T &) = &implT::template itemfun<memfunT, arg1T, arg2T, arg3T, arg4T, arg5T>;
1577 return p->send(owner(key), itemfun, key, memfun, arg1, arg2, arg3, arg4, arg5);
1578 }
1579
1580 /// Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T)" to item (non-blocking comm if remote)
1581
1582 /// If item does not exist it is made with the default constructor.
1583 ///
1584 /// Future arguments must be ready for both local and remote messages.
1585 ///
1586 /// Returns a future result (Future<void> may be ignored).
1587 ///
1588 /// The method executes with a write lock on the item.
1589 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T, typename arg6T>
1591 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6)
1592 {
1594 MEMFUN_RETURNT(memfunT)
1595 (implT::*itemfun)(const keyT &, memfunT, const arg1T &, const arg2T &, const arg3T &, const arg4T &, const arg5T &, const arg6T &) = &implT::template itemfun<memfunT, arg1T, arg2T, arg3T, arg4T, arg5T, arg6T>;
1596 return p->send(owner(key), itemfun, key, memfun, arg1, arg2, arg3, arg4, arg5, arg6);
1597 }
1598
1599 /// Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T,arg7T)" to item (non-blocking comm if remote)
1600
1601 /// If item does not exist it is made with the default constructor.
1602 ///
1603 /// Future arguments must be ready for both local and remote messages.
1604 ///
1605 /// Returns a future result (Future<void> may be ignored).
1606 ///
1607 /// The method executes with a write lock on the item.
1608 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T, typename arg6T, typename arg7T>
1610 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4,
1611 const arg5T &arg5, const arg6T &arg6, const arg7T &arg7)
1612 {
1614 MEMFUN_RETURNT(memfunT)
1615 (implT::*itemfun)(const keyT &, memfunT, const arg1T &, const arg2T &, const arg3T &, const arg4T &, const arg5T &, const arg6T &, const arg7T &) = &implT::template itemfun<memfunT, arg1T, arg2T, arg3T, arg4T, arg5T, arg6T, arg7T>;
1616 return p->send(owner(key), itemfun, key, memfun, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
1617 }
1618
1619 /// Sends message "resultT memfun() const" to item (non-blocking comm if remote)
1620
1621 /// The method executes with a write lock on the item.
1622 template <typename memfunT>
1624 send(const keyT &key, memfunT memfun) const
1625 {
1626 return const_cast<containerT *>(this)->send(key, memfun);
1627 }
1628
1629 /// Sends message "resultT memfun(arg1T) const" to item (non-blocking comm if remote)
1630
1631 /// The method executes with a write lock on the item.
1632 template <typename memfunT, typename arg1T>
1634 send(const keyT &key, memfunT memfun, const arg1T &arg1) const
1635 {
1636 return const_cast<containerT *>(this)->send(key, memfun, arg1);
1637 }
1638
1639 /// Sends message "resultT memfun(arg1T,arg2T) const" to item (non-blocking comm if remote)
1640
1641 /// The method executes with a write lock on the item.
1642 template <typename memfunT, typename arg1T, typename arg2T>
1644 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2) const
1645 {
1646 return const_cast<containerT *>(this)->send(key, memfun, arg1, arg2);
1647 }
1648
1649 /// Sends message "resultT memfun(arg1T,arg2T,arg3T) const" to item (non-blocking comm if remote)
1650
1651 /// The method executes with a write lock on the item.
1652 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T>
1654 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3) const
1655 {
1656 return const_cast<containerT *>(this)->send(key, memfun, arg1, arg2, arg3);
1657 }
1658
1659 /// Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T) const" to item (non-blocking comm if remote)
1660
1661 /// The method executes with a write lock on the item.
1662 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T>
1664 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4) const
1665 {
1666 return const_cast<containerT *>(this)->send(key, memfun, arg1, arg2, arg3, arg4);
1667 }
1668
1669 /// Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T) const" to item (non-blocking comm if remote)
1670
1671 /// The method executes with a write lock on the item.
1672 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T>
1674 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5) const
1675 {
1676 return const_cast<containerT *>(this)->send(key, memfun, arg1, arg2, arg3, arg4, arg5);
1677 }
1678
1679 /// Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T) const" to item (non-blocking comm if remote)
1680
1681 /// The method executes with a write lock on the item.
1682 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T, typename arg6T>
1684 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3,
1685 const arg4T &arg4, const arg5T &arg5, const arg6T &arg6) const
1686 {
1687 return const_cast<containerT *>(this)->send(key, memfun, arg1, arg2, arg3, arg4, arg5, arg6);
1688 }
1689
1690 /// Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T,arg7T) const" to item (non-blocking comm if remote)
1691
1692 /// The method executes with a write lock on the item.
1693 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T, typename arg6T, typename arg7T>
1695 send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3,
1696 const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const arg7T &arg7) const
1697 {
1698 return const_cast<containerT *>(this)->send(key, memfun, arg1, arg2, arg3, arg4, arg5, arg6, arg7);
1699 }
1700
1701 /// Adds task "resultT memfun()" in process owning item (non-blocking comm if remote)
1702
1703 /// If item does not exist it is made with the default constructor.
1704 ///
1705 /// Future arguments for local tasks can generate dependencies, but for remote
1706 /// tasks all futures must be ready.
1707 ///
1708 /// Returns a future result (Future<void> may be ignored).
1709 ///
1710 /// The method executes with a write lock on the item.
1711 template <typename memfunT>
1713 task(const keyT &key, memfunT memfun, const TaskAttributes &attr = TaskAttributes())
1714 {
1716 MEMFUN_RETURNT(memfunT)
1717 (implT::*itemfun)(const keyT &, memfunT) = &implT::template itemfun<memfunT>;
1718 return p->task(owner(key), itemfun, key, memfun, attr);
1719 }
1720
1721 /// Adds task "resultT memfun(arg1T)" in process owning item (non-blocking comm if remote)
1722
1723 /// If item does not exist it is made with the default constructor.
1724 ///
1725 /// Future arguments for local tasks can generate dependencies, but for remote
1726 /// tasks all futures must be ready.
1727 ///
1728 /// Returns a future result (Future<void> may be ignored).
1729 ///
1730 /// The method executes with a write lock on the item.
1731 template <typename memfunT, typename arg1T>
1733 task(const keyT &key, memfunT memfun, const arg1T &arg1, const TaskAttributes &attr = TaskAttributes())
1734 {
1736 typedef REMFUTURE(arg1T) a1T;
1737 MEMFUN_RETURNT(memfunT)
1738 (implT::*itemfun)(const keyT &, memfunT, const a1T &) = &implT::template itemfun<memfunT, a1T>;
1739 return p->task(owner(key), itemfun, key, memfun, arg1, attr);
1740 }
1741
1742 /// Adds task "resultT memfun(arg1T,arg2T)" in process owning item (non-blocking comm if remote)
1743
1744 /// If item does not exist it is made with the default constructor.
1745 ///
1746 /// Future arguments for local tasks can generate dependencies, but for remote
1747 /// tasks all futures must be ready.
1748 ///
1749 /// Returns a future result (Future<void> may be ignored).
1750 ///
1751 /// The method executes with a write lock on the item.
1752 template <typename memfunT, typename arg1T, typename arg2T>
1754 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const TaskAttributes &attr = TaskAttributes())
1755 {
1757 typedef REMFUTURE(arg1T) a1T;
1758 typedef REMFUTURE(arg2T) a2T;
1759 MEMFUN_RETURNT(memfunT)
1760 (implT::*itemfun)(const keyT &, memfunT, const a1T &, const a2T &) = &implT::template itemfun<memfunT, a1T, a2T>;
1761 return p->task(owner(key), itemfun, key, memfun, arg1, arg2, attr);
1762 }
1763
1764 /// Adds task "resultT memfun(arg1T,arg2T,arg3T)" in process owning item (non-blocking comm if remote)
1765
1766 /// If item does not exist it is made with the default constructor.
1767 ///
1768 /// Future arguments for local tasks can generate dependencies, but for remote
1769 /// tasks all futures must be ready.
1770 ///
1771 /// Returns a future result (Future<void> may be ignored).
1772 ///
1773 /// The method executes with a write lock on the item.
1774 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T>
1776 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const TaskAttributes &attr = TaskAttributes())
1777 {
1779 typedef REMFUTURE(arg1T) a1T;
1780 typedef REMFUTURE(arg2T) a2T;
1781 typedef REMFUTURE(arg3T) a3T;
1782 MEMFUN_RETURNT(memfunT)
1783 (implT::*itemfun)(const keyT &, memfunT, const a1T &, const a2T &, const a3T &) = &implT::template itemfun<memfunT, a1T, a2T, a3T>;
1784 return p->task(owner(key), itemfun, key, memfun, arg1, arg2, arg3, attr);
1785 }
1786
1787 /// Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T)" in process owning item (non-blocking comm if remote)
1788
1789 /// If item does not exist it is made with the default constructor.
1790 ///
1791 /// Future arguments for local tasks can generate dependencies, but for remote
1792 /// tasks all futures must be ready.
1793 ///
1794 /// Returns a future result (Future<void> may be ignored).
1795 ///
1796 /// The method executes with a write lock on the item.
1797 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T>
1799 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const TaskAttributes &attr = TaskAttributes())
1800 {
1802 typedef REMFUTURE(arg1T) a1T;
1803 typedef REMFUTURE(arg2T) a2T;
1804 typedef REMFUTURE(arg3T) a3T;
1805 typedef REMFUTURE(arg4T) a4T;
1806 MEMFUN_RETURNT(memfunT)
1807 (implT::*itemfun)(const keyT &, memfunT, const a1T &, const a2T &, const a3T &, const a4T &) = &implT::template itemfun<memfunT, a1T, a2T, a3T, a4T>;
1808 return p->task(owner(key), itemfun, key, memfun, arg1, arg2, arg3, arg4, attr);
1809 }
1810
1811 /// Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T)" in process owning item (non-blocking comm if remote)
1812
1813 /// If item does not exist it is made with the default constructor.
1814 ///
1815 /// Future arguments for local tasks can generate dependencies, but for remote
1816 /// tasks all futures must be ready.
1817 ///
1818 /// Returns a future result (Future<void> may be ignored).
1819 ///
1820 /// The method executes with a write lock on the item.
1821 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T>
1823 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const TaskAttributes &attr = TaskAttributes())
1824 {
1826 typedef REMFUTURE(arg1T) a1T;
1827 typedef REMFUTURE(arg2T) a2T;
1828 typedef REMFUTURE(arg3T) a3T;
1829 typedef REMFUTURE(arg4T) a4T;
1830 typedef REMFUTURE(arg5T) a5T;
1831 MEMFUN_RETURNT(memfunT)
1832 (implT::*itemfun)(const keyT &, memfunT, const a1T &, const a2T &, const a3T &, const a4T &, const a5T &) = &implT::template itemfun<memfunT, a1T, a2T, a3T, a4T, a5T>;
1833 return p->task(owner(key), itemfun, key, memfun, arg1, arg2, arg3, arg4, arg5, attr);
1834 }
1835
1836 /// Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T)" in process owning item (non-blocking comm if remote)
1837
1838 /// If item does not exist it is made with the default constructor.
1839 ///
1840 /// Future arguments for local tasks can generate dependencies, but for remote
1841 /// tasks all futures must be ready.
1842 ///
1843 /// Returns a future result (Future<void> may be ignored).
1844 ///
1845 /// The method executes with a write lock on the item.
1846 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T, typename arg6T>
1848 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const TaskAttributes &attr = TaskAttributes())
1849 {
1851 typedef REMFUTURE(arg1T) a1T;
1852 typedef REMFUTURE(arg2T) a2T;
1853 typedef REMFUTURE(arg3T) a3T;
1854 typedef REMFUTURE(arg4T) a4T;
1855 typedef REMFUTURE(arg5T) a5T;
1856 typedef REMFUTURE(arg6T) a6T;
1857 MEMFUN_RETURNT(memfunT)
1858 (implT::*itemfun)(const keyT &, memfunT, const a1T &, const a2T &, const a3T &, const a4T &, const a5T &, const a6T &) = &implT::template itemfun<memfunT, a1T, a2T, a3T, a4T, a5T, a6T>;
1859 return p->task(owner(key), itemfun, key, memfun, arg1, arg2, arg3, arg4, arg5, arg6, attr);
1860 }
1861
1862 /// Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T,arg7T)" in process owning item (non-blocking comm if remote)
1863
1864 /// If item does not exist it is made with the default constructor.
1865 ///
1866 /// Future arguments for local tasks can generate dependencies, but for remote
1867 /// tasks all futures must be ready.
1868 ///
1869 /// Returns a future result (Future<void> may be ignored).
1870 ///
1871 /// The method executes with a write lock on the item.
1872 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T, typename arg6T, typename arg7T>
1874 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const arg7T &arg7, const TaskAttributes &attr = TaskAttributes())
1875 {
1877 typedef REMFUTURE(arg1T) a1T;
1878 typedef REMFUTURE(arg2T) a2T;
1879 typedef REMFUTURE(arg3T) a3T;
1880 typedef REMFUTURE(arg4T) a4T;
1881 typedef REMFUTURE(arg5T) a5T;
1882 typedef REMFUTURE(arg6T) a6T;
1883 typedef REMFUTURE(arg7T) a7T;
1884 MEMFUN_RETURNT(memfunT)
1885 (implT::*itemfun)(const keyT &, memfunT, const a1T &, const a2T &, const a3T &, const a4T &, const a5T &, const a6T &, const a7T &) = &implT::template itemfun<memfunT, a1T, a2T, a3T, a4T, a5T, a6T, a7T>;
1886 return p->task(owner(key), itemfun, key, memfun, arg1, arg2, arg3, arg4, arg5, arg6, arg7, attr);
1887 }
1888
1889 /// Adds task "resultT memfun() const" in process owning item (non-blocking comm if remote)
1890
1891 /// The method executes with a write lock on the item.
1892 template <typename memfunT>
1894 task(const keyT &key, memfunT memfun, const TaskAttributes &attr = TaskAttributes()) const
1895 {
1896 return const_cast<containerT *>(this)->task(key, memfun, attr);
1897 }
1898
1899 /// Adds task "resultT memfun(arg1T) const" in process owning item (non-blocking comm if remote)
1900
1901 /// The method executes with a write lock on the item.
1902 template <typename memfunT, typename arg1T>
1904 task(const keyT &key, memfunT memfun, const arg1T &arg1, const TaskAttributes &attr = TaskAttributes()) const
1905 {
1906 return const_cast<containerT *>(this)->task(key, memfun, arg1, attr);
1907 }
1908
1909 /// Adds task "resultT memfun(arg1T,arg2T) const" in process owning item (non-blocking comm if remote)
1910
1911 /// The method executes with a write lock on the item.
1912 template <typename memfunT, typename arg1T, typename arg2T>
1914 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const TaskAttributes &attr = TaskAttributes()) const
1915 {
1916 return const_cast<containerT *>(this)->task(key, memfun, arg1, arg2, attr);
1917 }
1918
1919 /// Adds task "resultT memfun(arg1T,arg2T,arg3T) const" in process owning item (non-blocking comm if remote)
1920
1921 /// The method executes with a write lock on the item.
1922 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T>
1924 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const TaskAttributes &attr = TaskAttributes()) const
1925 {
1926 return const_cast<containerT *>(this)->task(key, memfun, arg1, arg2, arg3, attr);
1927 }
1928
1929 /// Adds task "resultT memfun(arg1T,arg2T,arg3T, arg4T) const" in process owning item (non-blocking comm if remote)
1930
1931 /// The method executes with a write lock on the item.
1932 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T>
1934 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const TaskAttributes &attr = TaskAttributes()) const
1935 {
1936 return const_cast<containerT *>(this)->task(key, memfun, arg1, arg2, arg3, arg4, attr);
1937 }
1938
1939 /// Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T) const" in process owning item (non-blocking comm if remote)
1940
1941 /// The method executes with a write lock on the item.
1942 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T>
1944 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const TaskAttributes &attr = TaskAttributes()) const
1945 {
1946 return const_cast<containerT *>(this)->task(key, memfun, arg1, arg2, arg3, arg4, arg5, attr);
1947 }
1948
1949 /// Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T) const" in process owning item (non-blocking comm if remote)
1950
1951 /// The method executes with a write lock on the item.
1952 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T, typename arg6T>
1954 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const TaskAttributes &attr = TaskAttributes()) const
1955 {
1956 return const_cast<containerT *>(this)->task(key, memfun, arg1, arg2, arg3, arg4, arg5, arg6, attr);
1957 }
1958
1959 /// Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T,arg7T) const" in process owning item (non-blocking comm if remote)
1960
1961 /// The method executes with a write lock on the item.
1962 template <typename memfunT, typename arg1T, typename arg2T, typename arg3T, typename arg4T, typename arg5T, typename arg6T, typename arg7T>
1964 task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const arg7T &arg7, const TaskAttributes &attr = TaskAttributes()) const
1965 {
1966 return const_cast<containerT *>(this)->task(key, memfun, arg1, arg2, arg3, arg4, arg5, arg6, arg7, attr);
1967 }
1968
1969 /// (de)Serialize --- *Local* data only to/from anything *except* Buffer*Archive and Parallel*Archive
1970
1971 /// Advisable for *you* to fence before and after this to ensure consistency
1972 template <typename Archive>
1973 void serialize(const Archive &ar)
1974 {
1975 //
1976 // !! If you change the format of this stream make sure that
1977 // !! the parallel in/out archive below is compatible
1978 //
1979 const long magic = 5881828; // Sitar Indian restaurant in Knoxville
1980 unsigned long count = 0;
1982
1983 if (Archive::is_output_archive)
1984 {
1985 ar & magic;
1986 for (iterator it = begin(); it != end(); ++it)
1987 count++;
1988 ar & count;
1989 for (iterator it = begin(); it != end(); ++it)
1990 ar &*it;
1991 }
1992 else
1993 {
1994 long cookie = 0l;
1995 ar & cookie;
1996 MADNESS_ASSERT(cookie == magic);
1997 ar & count;
1998 while (count--)
1999 {
2000 pairT datum;
2001 ar & datum;
2002 replace(datum);
2003 }
2004 }
2005 }
2006
2007 /// (de)Serialize --- !! ONLY for purpose of interprocess communication
2008
2009 /// This just writes/reads the unique id to/from the Buffer*Archive.
2011 {
2013 ar &static_cast<WorldObject<implT> *>(p.get());
2014 }
2015
2016 /// (de)Serialize --- !! ONLY for purpose of interprocess communication
2017
2018 /// This just writes/reads the unique id to/from the Buffer*Archive.
2020 {
2021 WorldObject<implT> *ptr = nullptr;
2022 ar & ptr;
2023 MADNESS_ASSERT(ptr);
2024
2025#ifdef MADNESS_DISABLE_SHARED_FROM_THIS
2026 p.reset(static_cast<implT *>(ptr), [](implT *p_) -> void{});
2027#else
2028 p = static_cast<implT *>(ptr)->shared_from_this();
2029#endif // MADNESS_DISABLE_SHARED_FROM_THIS
2030 }
2031
2032 /// Returns the associated unique id ... must be initialized
2033 const uniqueidT &id() const
2034 {
2036 return p->id();
2037 }
2038
2039 /// Destructor passes ownership of implementation to world for deferred cleanup
2041 {
2042 detail::deferred_cleanup(p->get_world(), p);
2043 }
2044
2045 friend void swap<>(WorldContainer &, WorldContainer &);
2046 };
2047
2048 /// Swaps the content of two WorldContainer objects. It should be called on all nodes.
2049
2050 /// \ingroup worlddc
2051 template <typename keyT, typename valueT, typename hashfunT>
2053 {
2054 std::swap(dc0.p, dc1.p);
2055 }
2056
2057
2058 namespace archive
2059 {
2060
2061 /// Write container to parallel archive
2062
2063 /// specialization for parallel serialization of a WorldContainer:
2064 /// all threads on each process serialize some values into a buffer, which gets concatenated
2065 /// and finally serialized to localarchive (aka VectorOutputArchive).
2066 template <class keyT, class valueT>
2068 {
2070 {
2071 using localarchiveT = VectorOutputArchive;
2072 const long magic = -5881828; // Sitar Indian restaurant in Knoxville (negative to indicate parallel!)
2073 typedef WorldContainer<keyT, valueT> dcT;
2074 using const_iterator = typename dcT::const_iterator;
2075 int count = t.size(); // Must be INT for MPI and NOT const since we'll do a global sum eventually
2076
2077 // Strategy:
2078 // 1. Serialize local data to a buffer in parallel over threads
2079 // a) Compute the size of the buffer needed by each task
2080 // b) Sum sizes and allocate the buffer of exact sizes needed for all threads
2081 // c) Serialize the data into the buffer in parallel over threads
2082 // 2. Gather all buffers to process 0
2083
2084 World *world = ar.get_world();
2085 world->gop.fence(); // Global fence here
2086
2087 class op_inspector : public TaskInterface
2088 {
2089 const_iterator start, end;
2090 size_t &size;
2091
2092 public:
2093 op_inspector(const_iterator start, const_iterator end, size_t &size)
2094 : start(start), end(end), size(size) {}
2095 void run(World &world)
2096 {
2098 for (const_iterator it = start; it != end; ++it)
2099 bo &*it;
2100 size = bo.size();
2101 }
2102 };
2103
2104 class op_executor : public TaskInterface
2105 {
2106 const_iterator start, end;
2107 unsigned char *buf;
2108 const size_t size;
2109
2110 public:
2111 op_executor(const_iterator start, const_iterator end, unsigned char *buf, size_t size)
2112 : start(start), end(end), buf(buf), size(size) {}
2113 void run(World &world)
2114 {
2115 BufferOutputArchive bo(buf, size);
2116 for (const_iterator it = start; it != end; ++it)
2117 {
2118 bo &*it;
2119 }
2120 MADNESS_CHECK(size == bo.size());
2121 }
2122 };
2123
2124 // No need for LOCAL fence here since only master thread is busy
2125 double wall0 = wall_time();
2126 const size_t ntasks = std::min(size_t(count), std::max(size_t(1), ThreadPool::size()));
2127 size_t local_size = 0;
2128 double wall1 = wall0;
2129 unsigned char* buf = 0;
2130 if (ntasks > 0)
2131 {
2132 const size_t max_items_per_task = (std::max(1, count) - 1) / ntasks + 1;
2133 // Compute the size of the buffer needed by each task
2134 std::vector<const_iterator> starts(ntasks), ends(ntasks);
2135 std::vector<size_t> local_sizes(ntasks);
2136 const_iterator start = t.begin();
2137 size_t nleft = count;
2138 for (size_t taskid = 0; taskid < ntasks; taskid++)
2139 {
2140 const_iterator end = start;
2141 if (taskid == (ntasks - 1))
2142 {
2143 end = t.end();
2144 }
2145 else
2146 {
2147 size_t nitems = std::min(max_items_per_task, nleft);
2148 std::advance(end, max_items_per_task);
2149 nleft -= nitems;
2150 }
2151 starts[taskid] = start;
2152 ends[taskid] = end;
2153 world->taskq.add(new op_inspector(start, end, local_sizes[taskid])); // Be sure to pass iterators by value!!
2154 start = end;
2155 }
2156 world->taskq.fence(); // just need LOCAL fence
2157 wall1 = wall_time();
2158 // if (world->rank() == 0)
2159 // printf("time in op_inspector: %8.4fs\n", wall1 - wall0);
2160 wall0 = wall1;
2161
2162 // total size over all threads
2163 for (size_t taskid = 0; taskid < ntasks; taskid++)
2164 {
2165 local_size += local_sizes[taskid];
2166 // print("taskid",taskid,"size",local_sizes[taskid]);
2167 }
2168
2169 // Allocate the buffer for all threads
2170 buf = new unsigned char[local_size];
2171
2172 // Now execute the serialization
2173 size_t offset = 0;
2174 for (size_t taskid = 0; taskid < ntasks; taskid++)
2175 {
2176 world->taskq.add(new op_executor(starts[taskid], ends[taskid], buf + offset, local_sizes[taskid]));
2177 offset += local_sizes[taskid];
2178 }
2179 world->taskq.fence(); // just need LOCAL fence
2180
2181 wall1 = wall_time();
2182 // if (world->rank() == 0)
2183 // printf("time in op_executor: %8.4fs\n", wall1 - wall0);
2184 wall0 = wall1;
2185 }
2186 // VERify that the serialization worked!!
2187 // {
2188 // BufferInputArchive bi(buf, local_size);
2189 // for (int item=0; item<count; item++) {
2190 // std::pair<keyT, valueT> datum;
2191 // bi & datum;
2192 // print("deserializing",datum.first);
2193 // }
2194 // }
2195
2196 // Gather all buffers to process 0
2197 // first gather all of the sizes and counts to a vector in process 0
2198 const int size = local_size;
2199 std::vector<int> sizes(world->size());
2200 MPI_Gather(&size, 1, MPI_INT, sizes.data(), 1, MPI_INT, 0, world->mpi.comm().Get_mpi_comm());
2201 world->gop.sum(count); // just need total number of elements
2202
2203 // print("time 3",wall_time());
2204 // build the cumulative sum of sizes
2205 std::vector<int> offsets(world->size());
2206 offsets[0] = 0;
2207 for (int i = 1; i < world->size(); ++i)
2208 offsets[i] = offsets[i - 1] + sizes[i - 1];
2209 size_t total_size = offsets.back() + sizes.back();
2210 // if (world->rank() == 0)
2211 // print("total_size", total_size);
2212
2213 // print("time 4",wall_time());
2214 // gather the vector of data v from each process to process 0
2215 unsigned char *all_data = 0;
2216 if (world->rank() == 0)
2217 {
2218 all_data = new unsigned char[total_size];
2219 }
2220 MPI_Gatherv(buf, local_size, MPI_BYTE, all_data, sizes.data(), offsets.data(), MPI_BYTE, 0, world->mpi.comm().Get_mpi_comm());
2221
2222 wall1 = wall_time();
2223 // if (world->rank() == 0)
2224 // printf("time in gather+gatherv: %8.4fs\n", wall1 - wall0);
2225 wall0 = wall1;
2226
2227 delete[] buf;
2228
2229 // print("time 5",wall_time());
2230 if (world->rank() == 0)
2231 {
2232 auto &localar = ar.local_archive();
2233 localar & magic & 1; // 1 client
2234 // localar & t;
2236 localar & -magic &(unsigned long)(count);
2237 localar.store(all_data, total_size);
2239 wall1 = wall_time();
2240 // if (world->rank() == 0)
2241 // printf("time in final copy on node 0: %8.4fs\n", wall1 - wall0);
2242
2243 delete[] all_data;
2244 }
2245 world->gop.fence();
2246 // print("time 6",wall_time());
2247 }
2248 };
2249
2250 /// Write container to parallel archive with optional fence
2251
2252 /// \ingroup worlddc
2253 /// Each node (process) is served by a designated IO node.
2254 /// The IO node has a binary local file archive to which is
2255 /// first written a cookie and the number of servers. The IO
2256 /// node then loops thru all of its clients and in turn tells
2257 /// each to write its data over an MPI stream, which is copied
2258 /// directly to the output file. The stream contents are then
2259 /// cookie, no. of clients, foreach client (usual sequential archive).
2260 ///
2261 /// If ar.dofence() is true (default) fence is invoked before and
2262 /// after the IO. The fence is optional but it is of course
2263 /// necessary to be sure that all updates have completed
2264 /// before doing IO, and that all IO has completed before
2265 /// subsequent modifications. Also, there is always at least
2266 /// some synchronization between a client and its IO server.
2267 template <class keyT, class valueT, class localarchiveT>
2269 {
2271 {
2272 const long magic = -5881828; // Sitar Indian restaurant in Knoxville (negative to indicate parallel!)
2273 typedef WorldContainer<keyT, valueT> dcT;
2274 // typedef typename dcT::const_iterator iterator; // unused?
2275 typedef typename dcT::pairT pairT;
2276 World *world = ar.get_world();
2277 Tag tag = world->mpi.unique_tag();
2278 ProcessID me = world->rank();
2279 if (ar.dofence())
2280 world->gop.fence();
2281 if (ar.is_io_node())
2282 {
2283 auto &localar = ar.local_archive();
2284 localar & magic & ar.num_io_clients();
2285 for (ProcessID p = 0; p < world->size(); ++p)
2286 {
2287 if (p == me)
2288 {
2289 localar & t;
2290 }
2291 else if (ar.io_node(p) == me)
2292 {
2293 world->mpi.Send(int(1), p, tag); // Tell client to start sending
2295 long cookie = 0l;
2296 unsigned long count = 0ul;
2297
2299
2300 source & cookie & count;
2301 localar & cookie & count;
2302 while (count--)
2303 {
2304 pairT datum;
2305 source & datum;
2306 localar & datum;
2307 }
2308
2310 }
2311 }
2312 }
2313 else
2314 {
2315 ProcessID p = ar.my_io_node();
2316 int flag;
2317 world->mpi.Recv(flag, p, tag);
2318 MPIOutputArchive dest(*world, p);
2319 dest & t;
2320 dest.flush();
2321 }
2322 if (ar.dofence())
2323 world->gop.fence();
2324 }
2325 };
2326
2327 template <class keyT, class valueT, class localarchiveT>
2329 {
2330 /// Read container from parallel archive
2331
2332 /// \ingroup worlddc
2333 /// See store method above for format of file content.
2334 /// !!! We presently ASSUME that the number of writers and readers are
2335 /// the same. This is frustrating but not a show stopper since you
2336 /// can always run a separate job to copy to a different number.
2337 ///
2338 /// The IO node simply reads all data and inserts entries.
2340 {
2341 const long magic = -5881828; // Sitar Indian restaurant in Knoxville (negative to indicate parallel!)
2342 // typedef WorldContainer<keyT,valueT> dcT; // unused
2343 // typedef typename dcT::iterator iterator; // unused
2344 // typedef typename dcT::pairT pairT; // unused
2345 World *world = ar.get_world();
2346 if (ar.dofence())
2347 world->gop.fence();
2348 if (ar.is_io_node())
2349 {
2350 long cookie = 0l;
2351 int nclient = 0;
2352 auto &localar = ar.local_archive();
2353 localar & cookie & nclient;
2354 MADNESS_CHECK(cookie == magic);
2355 while (nclient--)
2356 {
2357 localar & t;
2358 }
2359 }
2360 if (ar.dofence())
2361 world->gop.fence();
2362 }
2363 };
2364 }
2365
2366}
2367
2368///@}
2369
2370#endif // MADNESS_WORLD_WORLDDC_H__INCLUDED
Definition safempi.h:428
Group Incl(int n, const int *ranks) const
Definition safempi.h:430
Wrapper around MPI_Comm. Has a shallow copy constructor; use Create(Get_group()) for deep copy.
Definition safempi.h:490
Intracomm Create(Group group) const
Definition safempi.h:609
MPI_Comm & Get_mpi_comm() const
Definition safempi.h:709
int unique_tag()
Returns a unique tag for temporary use (1023<tag<4095)
Definition safempi.h:830
Group Get_group() const
Definition safempi.h:702
Definition worldhashmap.h:396
size_t size() const
Definition worldhashmap.h:560
iterator begin()
Definition worldhashmap.h:571
std::pair< iterator, bool > insert(const datumT &datum)
Definition worldhashmap.h:468
const hashfunT & get_hash() const
Definition worldhashmap.h:595
bool try_erase(const keyT &key)
Definition worldhashmap.h:502
iterator end()
Definition worldhashmap.h:583
iterator find(const keyT &key)
Definition worldhashmap.h:524
void clear()
Definition worldhashmap.h:556
Implements the functionality of futures.
Definition future.h:74
T & get(bool dowork=true)
Gets/forces the value, waiting if necessary.
Definition future.h:288
A future is a possibly yet unevaluated value.
Definition future.h:369
remote_refT remote_ref(World &world) const
Returns a structure used to pass references to another process.
Definition future.h:671
Definition worldhashmap.h:330
iterator for hash
Definition worldhashmap.h:188
Range, vaguely a la Intel TBB, to encapsulate a random-access, STL-like start and end iterator with c...
Definition range.h:64
Simple structure used to manage references/pointers to remote instances.
Definition worldref.h:395
Contains attributes of a task.
Definition thread.h:329
All world tasks must be derived from this public interface.
Definition taskfn.h:69
static std::size_t size()
Returns the number of threads in the pool.
Definition thread.h:1419
Internal implementation of distributed container to facilitate shallow copy.
Definition worlddc.h:536
void erase(const keyT &key)
Definition worlddc.h:864
WorldContainerIterator< internal_iteratorT > iterator
Definition worlddc.h:551
const hashfunT & get_hash() const
Definition worlddc.h:803
bool find(const_accessor &acc, const keyT &key) const
Definition worlddc.h:952
internal_containerT::accessor accessor
Definition worlddc.h:548
void find_handler(ProcessID requestor, const keyT &key, const RemoteReference< FutureImpl< iterator > > &ref)
Handles find request.
Definition worlddc.h:575
bool probe(const keyT &key) const
Definition worlddc.h:815
void find_success_handler(const RemoteReference< FutureImpl< iterator > > &ref, const pairT &datum)
Handles successful find response.
Definition worlddc.h:591
std::pair< const keyT, valueT > pairT
Definition worlddc.h:538
bool insert_const_acc(const_accessor &acc, const keyT &key)
Definition worlddc.h:853
WorldContainerIterator< internal_const_iteratorT > const_iteratorT
Definition worlddc.h:552
bool find(accessor &acc, const keyT &key)
Definition worlddc.h:945
void redistribute_phase2()
Definition worlddc.h:1080
void insert(const pairT &datum)
Definition worlddc.h:829
WorldContainerIterator< internal_iteratorT > iteratorT
Definition worlddc.h:550
void clear()
Definition worlddc.h:859
bool insert_acc(accessor &acc, const keyT &key)
Definition worlddc.h:847
Future< iterator > find(const keyT &key)
Definition worlddc.h:930
WorldContainerImpl(World &world, const std::shared_ptr< WorldDCPmapInterface< keyT > > &pm, const hashfunT &hf)
Definition worlddc.h:611
void reset_pmap_to_local()
Definition worlddc.h:634
itemfun(const keyT &key, memfunT memfun)
Definition worlddc.h:962
const pairT const_pairT
Definition worlddc.h:539
std::vector< keyT > * move_list
Tempoary used to record data that needs redistributing.
Definition worlddc.h:572
internal_containerT::iterator internal_iteratorT
Definition worlddc.h:546
WorldContainerImpl< keyT, valueT, hashfunT > implT
Definition worlddc.h:540
std::size_t size() const
Definition worlddc.h:824
void redistribute_phase1(const std::shared_ptr< WorldDCPmapInterface< keyT > > &newpmap)
Definition worlddc.h:1049
internal_containerT::const_iterator internal_const_iteratorT
Definition worlddc.h:547
std::shared_ptr< WorldDCPmapInterface< keyT > > pmap
Function/class to map from keys to owning process.
Definition worlddc.h:569
virtual ~WorldContainerImpl()
Definition worlddc.h:619
void do_replicate(World &world)
Definition worlddc.h:771
std::shared_ptr< WorldDCPmapInterface< keyT > > & get_pmap()
Definition worlddc.h:629
internal_containerT local
Locally owned data.
Definition worlddc.h:571
ConcurrentHashMap< keyT, valueT, hashfunT > internal_containerT
Definition worlddc.h:542
const_iterator begin() const
Definition worlddc.h:904
void erase(InIter it)
Definition worlddc.h:880
void replicate(bool fence)
Definition worlddc.h:643
const_iterator end() const
Definition worlddc.h:914
bool is_local(const keyT &key) const
Definition worlddc.h:805
void redistribute_phase3()
Definition worlddc.h:1094
void find_failure_handler(const RemoteReference< FutureImpl< iterator > > &ref)
Handles unsuccessful find response.
Definition worlddc.h:601
ProcessID owner(const keyT &key) const
Definition worlddc.h:810
void erase(InIter first, InIter last)
Definition worlddc.h:888
void replicate_on_hosts(bool fence)
Definition worlddc.h:656
const std::shared_ptr< WorldDCPmapInterface< keyT > > & get_pmap() const
Definition worlddc.h:624
iterator begin()
Definition worlddc.h:899
Future< const_iterator > find(const keyT &key) const
Definition worlddc.h:919
const ProcessID me
My MPI rank.
Definition worlddc.h:570
iterator end()
Definition worlddc.h:909
WorldContainerIterator< internal_const_iteratorT > const_iterator
Definition worlddc.h:553
internal_containerT::const_accessor const_accessor
Definition worlddc.h:549
Iterator for distributed container wraps the local iterator.
Definition worlddc.h:386
WorldContainerIterator(const WorldContainerIterator &other)
Definition worlddc.h:415
WorldContainerIterator(const internal_iteratorT &it)
Initializes from a local iterator.
Definition worlddc.h:405
WorldContainerIterator & operator++()
Pre-increment of an iterator (i.e., ++it) — local iterators only.
Definition worlddc.h:456
std::iterator_traits< internal_iteratorT >::iterator_category iterator_category
Definition worlddc.h:388
void copy(const WorldContainerIterator< iteratorT > &other)
Definition worlddc.h:506
std::iterator_traits< internal_iteratorT >::pointer pointer
Definition worlddc.h:391
bool operator==(const WorldContainerIterator &other) const
Determines if two iterators are identical.
Definition worlddc.h:441
const internal_iteratorT & get_internal_iterator() const
Private: (or should be) Returns iterator of internal container.
Definition worlddc.h:484
WorldContainerIterator & operator=(const WorldContainerIterator &other)
Assignment.
Definition worlddc.h:434
value_type * value
holds the remote values
Definition worlddc.h:397
bool is_cached() const
Returns true if this is non-local or cached value.
Definition worlddc.h:490
WorldContainerIterator operator++(int)
Definition worlddc.h:463
WorldContainerIterator(const value_type &v)
Initializes to cache a remote value.
Definition worlddc.h:409
pointer operator->() const
Iterators dereference to std::pair<const keyT,valueT>
Definition worlddc.h:472
std::iterator_traits< internal_iteratorT >::reference reference
Definition worlddc.h:392
void serialize(const Archive &)
Definition worlddc.h:496
std::iterator_traits< internal_iteratorT >::value_type value_type
Definition worlddc.h:389
WorldContainerIterator(const WorldContainerIterator< iteratorT > &other)
Definition worlddc.h:422
WorldContainerIterator()
Default constructor makes a local uninitialized value.
Definition worlddc.h:401
internal_iteratorT it
Iterator from local container.
Definition worlddc.h:395
reference operator*() const
Iterators dereference to std::pair<const keyT,valueT>
Definition worlddc.h:478
std::iterator_traits< internal_iteratorT >::difference_type difference_type
Definition worlddc.h:390
bool operator!=(const WorldContainerIterator &other) const
Determines if two iterators are different.
Definition worlddc.h:448
~WorldContainerIterator()
Definition worlddc.h:428
Makes a distributed container with specified attributes.
Definition worlddc.h:1127
void process_pending()
Process pending messages.
Definition worlddc.h:1453
WorldContainer(World &world, bool do_pending=true, const hashfunT &hf=hashfunT())
Makes an initialized, empty container with default data distribution (no communication)
Definition worlddc.h:1167
const hashfunT & get_hash() const
Returns a reference to the hashing functor.
Definition worlddc.h:1442
bool find(accessor &acc, const keyT &key)
Write access to LOCAL value by key. Returns true if found, false otherwise (always false for remote).
Definition worlddc.h:1274
bool probe(const keyT &key) const
Returns true if local data is immediately available (no communication)
Definition worlddc.h:1311
const_iterator begin() const
Returns an iterator to the beginning of the local data (no communication)
Definition worlddc.h:1364
void replace(const keyT &key, const valueT &value)
Inserts/replaces key+value pair (non-blocking communication if key not local)
Definition worlddc.h:1268
bool insert(const_accessor &acc, const keyT &key)
Read access to LOCAL value by key. Returns true if inserted, false if already exists (throws if remot...
Definition worlddc.h:1295
iterator begin()
Returns an iterator to the beginning of the local data (no communication)
Definition worlddc.h:1357
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const TaskAttributes &attr=TaskAttributes()) const
Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T) const" in process owning item (non-blocking ...
Definition worlddc.h:1944
WorldContainer(World &world, const std::shared_ptr< WorldDCPmapInterface< keyT > > &pmap, bool do_pending=true, const hashfunT &hf=hashfunT())
Makes an initialized, empty container (no communication)
Definition worlddc.h:1183
Future< iterator > find(const keyT &key)
Returns a future iterator (non-blocking communication if key not local)
Definition worlddc.h:1339
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6) const
Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T) const" to item (non-blocking comm ...
Definition worlddc.h:1684
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const arg7T &arg7) const
Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T,arg7T) const" to item (non-blocking...
Definition worlddc.h:1695
Future< const_iterator > const_futureT
Definition worlddc.h:1139
bool is_replicated() const
Definition worlddc.h:1227
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const TaskAttributes &attr=TaskAttributes())
Adds task "resultT memfun(arg1T)" in process owning item (non-blocking comm if remote)
Definition worlddc.h:1733
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5) const
Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T) const" to item (non-blocking comm if rem...
Definition worlddc.h:1674
ProcessID owner(const keyT &key) const
Returns processor that logically owns key (no communication)
Definition worlddc.h:1321
implT::const_iterator const_iterator
Definition worlddc.h:1135
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const TaskAttributes &attr=TaskAttributes()) const
Adds task "resultT memfun(arg1T,arg2T) const" in process owning item (non-blocking comm if remote)
Definition worlddc.h:1914
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const TaskAttributes &attr=TaskAttributes()) const
Adds task "resultT memfun() const" in process owning item (non-blocking comm if remote)
Definition worlddc.h:1894
WorldContainer()
Makes an uninitialized container (no communication)
Definition worlddc.h:1155
void serialize(const archive::BufferOutputArchive &ar)
(de)Serialize — !! ONLY for purpose of interprocess communication
Definition worlddc.h:2010
keyT key_type
Definition worlddc.h:1130
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3)
Sends message "resultT memfun(arg1T,arg2T,arg3T)" to item (non-blocking comm if remote)
Definition worlddc.h:1534
void replicate(bool fence=true)
replicates this WorldContainer on all ProcessIDs
Definition worlddc.h:1249
virtual ~WorldContainer()
Destructor passes ownership of implementation to world for deferred cleanup.
Definition worlddc.h:2040
void erase(const keyT &key)
Erases entry from container (non-blocking comm if remote)
Definition worlddc.h:1392
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5)
Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T)" to item (non-blocking comm if remote)
Definition worlddc.h:1572
void reset_pmap_to_local()
Returns shared pointer to the process mapping.
Definition worlddc.h:1436
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, const memfunT &memfun, const arg1T &arg1)
Sends message "resultT memfun(arg1T)" to item (non-blocking comm if remote)
Definition worlddc.h:1489
const uniqueidT & id() const
Returns the associated unique id ... must be initialized.
Definition worlddc.h:2033
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const arg7T &arg7, const TaskAttributes &attr=TaskAttributes())
Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T,arg7T)" in process owning item (non-blo...
Definition worlddc.h:1874
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const TaskAttributes &attr=TaskAttributes())
Adds task "resultT memfun(arg1T,arg2T)" in process owning item (non-blocking comm if remote)
Definition worlddc.h:1754
WorldContainerImpl< keyT, valueT, hashfunT > implT
Definition worlddc.h:1132
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2) const
Sends message "resultT memfun(arg1T,arg2T) const" to item (non-blocking comm if remote)
Definition worlddc.h:1644
void replace(const pairT &datum)
Inserts/replaces key+value pair (non-blocking communication if key not local)
Definition worlddc.h:1261
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const TaskAttributes &attr=TaskAttributes()) const
Adds task "resultT memfun(arg1T,arg2T,arg3T) const" in process owning item (non-blocking comm if remo...
Definition worlddc.h:1924
iterator end()
Returns an iterator past the end of the local data (no communication)
Definition worlddc.h:1371
const std::shared_ptr< WorldDCPmapInterface< keyT > > & get_pmap() const
Returns shared pointer to the process mapping.
Definition worlddc.h:1429
std::shared_ptr< WorldDCPmapInterface< keyT > > & get_impl()
Definition worlddc.h:1242
void replace(input_iterator &start, input_iterator &end)
Inserts pairs (non-blocking communication if key(s) not local)
Definition worlddc.h:1303
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const TaskAttributes &attr=TaskAttributes()) const
Adds task "resultT memfun(arg1T) const" in process owning item (non-blocking comm if remote)
Definition worlddc.h:1904
bool insert(accessor &acc, const keyT &key)
Write access to LOCAL value by key. Returns true if inserted, false if already exists (throws if remo...
Definition worlddc.h:1288
Future< iterator > futureT
Definition worlddc.h:1138
void erase(const iterator &it)
Erases entry corresponding to local iterator (no communication)
Definition worlddc.h:1399
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const TaskAttributes &attr=TaskAttributes()) const
Adds task "resultT memfun(arg1T,arg2T,arg3T, arg4T) const" in process owning item (non-blocking comm ...
Definition worlddc.h:1934
void erase(const iterator &start, const iterator &finish)
Erases range defined by local iterators (no communication)
Definition worlddc.h:1406
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4) const
Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T) const" to item (non-blocking comm if remote)
Definition worlddc.h:1664
bool is_distributed() const
Definition worlddc.h:1223
WorldContainer(const WorldContainer &other)
Copy constructor is shallow (no communication)
Definition worlddc.h:1197
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const TaskAttributes &attr=TaskAttributes())
Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T)" in process owning item (non-blocking ...
Definition worlddc.h:1848
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const arg7T &arg7)
Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T,arg7T)" to item (non-blocking comm ...
Definition worlddc.h:1610
World & get_world() const
Returns the world associated with this container.
Definition worlddc.h:1236
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun) const
Sends message "resultT memfun() const" to item (non-blocking comm if remote)
Definition worlddc.h:1624
implT::iterator iterator
Definition worlddc.h:1134
DistributionType get_distribution_type() const
return the way data is distributed
Definition worlddc.h:1218
implT::pairT pairT
Definition worlddc.h:1133
std::size_t size() const
Returns the number of local entries (no communication)
Definition worlddc.h:1422
containerT & operator=(const containerT &other)
Assignment is shallow (no communication)
Definition worlddc.h:1207
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const TaskAttributes &attr=TaskAttributes())
Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T)" in process owning item (non-blocking comm i...
Definition worlddc.h:1823
bool find(const_accessor &acc, const keyT &key) const
Read access to LOCAL value by key. Returns true if found, false otherwise (always false for remote).
Definition worlddc.h:1281
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const TaskAttributes &attr=TaskAttributes())
Adds task "resultT memfun(arg1T,arg2T,arg3T)" in process owning item (non-blocking comm if remote)
Definition worlddc.h:1776
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4)
Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T)" to item (non-blocking comm if remote)
Definition worlddc.h:1553
void serialize(const archive::BufferInputArchive &ar)
(de)Serialize — !! ONLY for purpose of interprocess communication
Definition worlddc.h:2019
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const TaskAttributes &attr=TaskAttributes())
Adds task "resultT memfun()" in process owning item (non-blocking comm if remote)
Definition worlddc.h:1713
bool is_local(const keyT &key) const
Returns true if the key maps to the local processor (no communication)
Definition worlddc.h:1328
const_iterator end() const
Returns an iterator past the end of the local data (no communication)
Definition worlddc.h:1378
void serialize(const Archive &ar)
(de)Serialize — Local data only to/from anything except Buffer*Archive and Parallel*Archive
Definition worlddc.h:1973
bool is_host_replicated() const
Definition worlddc.h:1231
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const TaskAttributes &attr=TaskAttributes()) const
Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T) const" in process owning item (non-blo...
Definition worlddc.h:1954
void clear()
Clears all local data (no communication)
Definition worlddc.h:1415
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const TaskAttributes &attr=TaskAttributes())
Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T)" in process owning item (non-blocking comm if remo...
Definition worlddc.h:1799
Future< MEMFUN_RETURNT(memfunT)> send(const keyT &key, memfunT memfun)
Sends message "resultT memfun()" to item (non-blocking comm if remote)
Definition worlddc.h:1470
implT::const_accessor const_accessor
Definition worlddc.h:1137
std::shared_ptr< implT > p
Definition worlddc.h:1142
Future< const_iterator > find(const keyT &key) const
Returns a future iterator (non-blocking communication if key not local)
Definition worlddc.h:1350
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3) const
Sends message "resultT memfun(arg1T,arg2T,arg3T) const" to item (non-blocking comm if remote)
Definition worlddc.h:1654
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6)
Sends message "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T)" to item (non-blocking comm if rem...
Definition worlddc.h:1591
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2)
Sends message "resultT memfun(arg1T,arg2T)" to item (non-blocking comm if remote)
Definition worlddc.h:1512
void replicate_on_hosts(bool fence=true)
replicates this WorldContainer on all hosts (one PID per host)
Definition worlddc.h:1255
void check_initialized() const
Definition worlddc.h:1144
WorldContainer< keyT, valueT, hashfunT > containerT
Definition worlddc.h:1131
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> task(const keyT &key, memfunT memfun, const arg1T &arg1, const arg2T &arg2, const arg3T &arg3, const arg4T &arg4, const arg5T &arg5, const arg6T &arg6, const arg7T &arg7, const TaskAttributes &attr=TaskAttributes()) const
Adds task "resultT memfun(arg1T,arg2T,arg3T,arg4T,arg5T,arg6T,arg7T) const" in process owning item (n...
Definition worlddc.h:1964
Future< REMFUTURE(MEMFUN_RETURNT(memfunT))> send(const keyT &key, memfunT memfun, const arg1T &arg1) const
Sends message "resultT memfun(arg1T) const" to item (non-blocking comm if remote)
Definition worlddc.h:1634
implT::accessor accessor
Definition worlddc.h:1136
Default process map is "random" using madness::hash(key)
Definition worlddc.h:250
ProcessID owner(const keyT &key) const
Maps key to processor.
Definition worlddc.h:261
WorldDCDefaultPmap(World &world, const hashfunT &hf=hashfunT())
Definition worlddc.h:256
const int nproc
Definition worlddc.h:252
hashfunT hashfun
Definition worlddc.h:253
Local process map will always return the current process as owner.
Definition worlddc.h:274
ProcessID owner(const keyT &key) const override
Maps key to processor.
Definition worlddc.h:280
WorldDCLocalPmap(World &world)
Definition worlddc.h:279
ProcessID me
Definition worlddc.h:276
DistributionType distribution_type() const override
by default the map is distributed
Definition worlddc.h:285
Definition worlddc.h:295
DistributionType distribution_type() const override
by default the map is distributed
Definition worlddc.h:311
WorldDCNodeReplicatedPmap(World &world, const std::map< std::string, std::vector< long > > ranks_per_host)
Definition worlddc.h:302
ProcessID myowner
Definition worlddc.h:296
ProcessID owner(const keyT &key) const override
owner is the lowest rank on the node, same for all keys
Definition worlddc.h:307
Interface to be provided by any process map.
Definition worlddc.h:122
void redistribute(World &world, const std::shared_ptr< WorldDCPmapInterface< keyT > > &newpmap)
Invoking this switches all registered objects from this process map to the new one.
Definition worlddc.h:169
virtual DistributionType distribution_type() const
by default the map is distributed
Definition worlddc.h:142
void print_data_sizes(World &world, const std::string msg="") const
Prints size info to std::cout.
Definition worlddc.h:225
std::size_t global_size(World &world) const
Counts global number of entries in all containers associated with this process map.
Definition worlddc.h:202
virtual ProcessID owner(const keyT &key) const =0
Maps key to processor.
void deregister_callback(ptrT ptr)
Deregisters object for receipt of redistribute callbacks.
Definition worlddc.h:158
void register_callback(ptrT ptr)
Registers object for receipt of redistribute callbacks.
Definition worlddc.h:150
virtual void print() const
Definition worlddc.h:139
std::size_t local_size() const
Counts local number of entries in all containers associated with this process map.
Definition worlddc.h:212
std::set< ptrT > ptrs
Definition worlddc.h:128
virtual ~WorldDCPmapInterface()
Definition worlddc.h:137
WorldDCRedistributeInterface< keyT > * ptrT
Definition worlddc.h:124
virtual std::size_t size() const =0
virtual void redistribute_phase1(const std::shared_ptr< WorldDCPmapInterface< keyT > > &newmap)=0
virtual ~WorldDCRedistributeInterface()
Definition worlddc.h:77
void broadcast_serializable(objT &obj, ProcessID root)
Broadcast a serializable object.
Definition worldgop.h:756
void fence(bool debug=false)
Synchronizes all processes in communicator AND globally ensures no pending AM or tasks.
Definition worldgop.cc:161
void broadcast(void *buf, size_t nbyte, ProcessID root, bool dowork=true, Tag bcast_tag=-1)
Broadcasts bytes from process root while still processing AM & tasks.
Definition worldgop.cc:173
std::vector< T > concat0(const std::vector< T > &v, size_t bufsz=1024 *1024)
Concatenate an STL vector of serializable stuff onto node 0.
Definition worldgop.h:955
void sum(T *buf, size_t nelem)
Inplace global sum while still processing AM & tasks.
Definition worldgop.h:872
SafeMPI::Intracomm & comm()
Returns the associated SafeMPI communicator.
Definition worldmpi.h:286
void Send(const T *buf, long lenbuf, int dest, int tag=SafeMPI::DEFAULT_SEND_RECV_TAG) const
Send array of lenbuf elements to process dest.
Definition worldmpi.h:347
void Recv(T *buf, long lenbuf, int src, int tag) const
Receive data of up to lenbuf elements from process src.
Definition worldmpi.h:374
Implements most parts of a globally addressable object (via unique ID).
Definition world_object.h:366
World & get_world() const
Returns a reference to the world.
Definition world_object.h:719
World & world
The World this object belongs to. (Think globally, act locally).
Definition world_object.h:383
detail::task_result_type< memfnT >::futureT send(ProcessID dest, memfnT memfn) const
Definition world_object.h:733
Future< bool > for_each(const rangeT &range, const opT &op)
Apply op(item) on all items in range.
Definition world_task_queue.h:572
void add(TaskInterface *t)
Add a new local task, taking ownership of the pointer.
Definition world_task_queue.h:466
void fence()
Returns after all local tasks have completed.
Definition world_task_queue.h:1384
A parallel world class.
Definition world.h:132
WorldTaskQueue & taskq
Task queue.
Definition world.h:206
ProcessID rank() const
Returns the process rank in this World (same as MPI_Comm_rank()).
Definition world.h:320
WorldMpiInterface & mpi
MPI interface.
Definition world.h:204
ProcessID size() const
Returns the number of processes in this World (same as MPI_Comm_size()).
Definition world.h:330
WorldGopInterface & gop
Global operations.
Definition world.h:207
bool dofence() const
Check if we should fence around a read/write operation.
Definition parallel_archive.h:295
World * get_world() const
Returns a pointer to the world.
Definition parallel_archive.h:130
bool is_io_node() const
Returns true if this node is doing physical I/O.
Definition parallel_archive.h:122
int num_io_clients() const
Returns the number of I/O clients for this node, including self (zero if not an I/O node).
Definition parallel_archive.h:114
ProcessID io_node(ProcessID rank) const
Returns the process doing I/O for given node.
Definition parallel_archive.h:99
ProcessID my_io_node() const
Returns the process doing I/O for this node.
Definition parallel_archive.h:106
Archive & local_archive() const
Returns a reference to the local archive.
Definition parallel_archive.h:248
Wraps an archive around a memory buffer for input.
Definition buffer_archive.h:134
Wraps an archive around a memory buffer for output.
Definition buffer_archive.h:59
std::size_t size() const
Return the amount of data stored (counted) in the buffer.
Definition buffer_archive.h:123
Archive allowing buffering, deserialization of data, and point-to-point communication between process...
Definition mpi_archive.h:180
Archive allowing buffering, serialization of data, and point-to-point communication between processes...
Definition mpi_archive.h:118
void flush() const
Send all data in the buffer to the destination process.
Definition mpi_archive.h:158
An archive for storing local or parallel data, wrapping a BinaryFstreamInputArchive.
Definition parallel_archive.h:366
An archive for storing local or parallel data wrapping a BinaryFstreamOutputArchive.
Definition parallel_archive.h:321
Objects that implement their own parallel archive interface should derive from this class.
Definition parallel_archive.h:58
Wraps an archive around an STL vector for output.
Definition vector_archive.h:55
Class for unique global IDs.
Definition uniqueid.h:53
char * p(char *buf, const char *name, int k, int initial_level, double thresh, int order)
Definition derivatives.cc:72
void run(World &world, ansatzT ansatz, const int nuclear_charge, const commandlineparser &parser, const int nstates)
Definition dirac-hatom.cc:1392
static void load(const ParallelInputArchive< localarchiveT > &ar, WorldContainer< keyT, valueT > &t)
Read container from parallel archive.
Definition worlddc.h:2339
static const double v
Definition hatom_sf_dirac.cc:20
#define MADNESS_CHECK(condition)
Check a condition — even in a release build the condition is always evaluated so it can have side eff...
Definition madness_exception.h:182
#define MADNESS_EXCEPTION(msg, value)
Macro for throwing a MADNESS exception.
Definition madness_exception.h:119
#define MADNESS_ASSERT(condition)
Assert a condition that should be free of side-effects since in release builds this might be a no-op.
Definition madness_exception.h:134
#define MADNESS_CHECK_THROW(condition, msg)
Check a condition — even in a release build the condition is always evaluated so it can have side eff...
Definition madness_exception.h:207
Implements archives to serialize data for MPI.
void deferred_cleanup(World &world, const std::shared_ptr< objT > &p, bool assume_p_is_unique=false)
Defer the cleanup of a shared pointer to the end of the next fence.
Definition deferred_cleanup.h:135
Namespace for all elements and tools of MADNESS.
Definition DFParameters.h:10
DistributionType from_string(std::string type)
Definition worlddc.h:103
std::ostream & operator<<(std::ostream &os, const particle< PDIM > &p)
Definition lowrankfunction.h:401
DistributionType
some introspection of how data is distributed
Definition worlddc.h:81
@ NodeReplicated
even if there are several ranks per node
Definition worlddc.h:84
@ Distributed
no replication of the container, the container is distributed over the world
Definition worlddc.h:82
@ RankReplicated
replicate the container over all world ranks
Definition worlddc.h:83
std::string to_string(const DistributionType type)
Definition worlddc.h:97
std::vector< int > primary_ranks_per_host(World &world, const std::map< std::string, std::vector< long > > &ranks_per_host1)
Definition ranks_and_hosts.cpp:86
DistributionType validate_distribution_type(const dcT &dc)
check distribution type of WorldContainer – global communication
Definition worlddc.h:319
void print(const T &t, const Ts &... ts)
Print items to std::cout (items separated by spaces) and terminate with a new line.
Definition print.h:226
NDIM & f
Definition mra.h:2528
double wall_time()
Returns the wall time in seconds relative to an arbitrary origin.
Definition timers.cc:48
std::string type(const PairType &n)
Definition PNOParameters.h:18
long lowest_rank_on_host_of_rank(const std::map< std::string, std::vector< long > > ranks_per_host1, int rank)
Definition ranks_and_hosts.cpp:73
std::map< std::string, std::vector< long > > ranks_per_host(World &universe)
for each host, return a list of its ranks
Definition ranks_and_hosts.cpp:52
void swap(Function< R, MDIM > &f1, Function< R, MDIM > &f2)
Definition mra.h:2860
Definition mraimpl.h:51
void advance(madness::Hash_private::HashIterator< hashT > &it, const distT &dist)
Definition worldhashmap.h:610
Implements ParallelInputArchive and ParallelOutputArchive for parallel serialization of data.
static const double c
Definition relops.cc:10
Definition test_dc.cc:47
Definition worlddc.h:1061
bool operator()(typename rangeT::iterator &iterator) const
Definition worlddc.h:1066
implT * impl
Definition worlddc.h:1062
P2Op(const P2Op &p)
Definition worlddc.h:1065
P2Op(implT *impl)
Definition worlddc.h:1064
Range< typename std::vector< keyT >::const_iterator > rangeT
Definition worlddc.h:1063
Default load of an object via serialize(ar, t).
Definition archive.h:667
static void postamble_store(const Archive &)
By default there is no postamble.
Definition archive.h:546
static void preamble_store(const Archive &ar)
Serialize a cookie for type checking.
Definition archive.h:536
static void store(const ParallelOutputArchive< VectorOutputArchive > &ar, const WorldContainer< keyT, valueT > &t)
Definition worlddc.h:2069
static void store(const ParallelOutputArchive< localarchiveT > &ar, const WorldContainer< keyT, valueT > &t)
Definition worlddc.h:2270
Default store of an object via serialize(ar, t).
Definition archive.h:612
#define MPI_INT
Definition stubmpi.h:81
#define MPI_BYTE
Definition stubmpi.h:77
int MPI_Gatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int recvcounts[], const int displs[], MPI_Datatype recvtype, int root, MPI_Comm)
Definition stubmpi.h:218
int MPI_Gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm)
Definition stubmpi.h:233
AtomicInt sum
Definition test_atomicint.cc:46
std::pair< int, double > valueT
Definition test_binsorter.cc:6
int me
Definition test_binsorter.cc:10
const double offset
Definition testfuns.cc:143
double source(const coordT &r)
Definition testperiodic.cc:48
#define REMFUTURE(T)
Macro to determine type of future (by removing wrapping Future template).
Definition type_traits.h:163
#define MEMFUN_RETURNT(MEMFUN)
Macro to make member function type traits easier to use.
Definition type_traits.h:773
Defines and implements WorldObject.
Defines and implements a concurrent hashmap.
int ProcessID
Used to clearly identify process number/rank.
Definition worldtypes.h:43
int Tag
Used to clearly identify message tag/type.
Definition worldtypes.h:44