MFEM  v4.6.0
Finite element discretization library
communication.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2023, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #ifndef MFEM_COMMUNICATION
13 #define MFEM_COMMUNICATION
14 
15 #include "../config/config.hpp"
16 
17 #ifdef MFEM_USE_MPI
18 
19 #include "array.hpp"
20 #include "table.hpp"
21 #include "sets.hpp"
22 #include "globals.hpp"
23 #include <mpi.h>
24 
25 
26 namespace mfem
27 {
28 
29 /** @brief A simple singleton class that calls MPI_Init() at construction and
30  MPI_Finalize() at destruction. It also provides easy access to
31  MPI_COMM_WORLD's rank and size. */
32 class Mpi
33 {
34 public:
35  /// Singleton creation with Mpi::Init();
36  static void Init() { Init_(NULL, NULL); }
37  /// Singleton creation with Mpi::Init(argc,argv);
38  static void Init(int &argc, char **&argv) { Init_(&argc, &argv); }
39  /// Finalize MPI (if it has been initialized and not yet already finalized).
40  static void Finalize()
41  {
42  if (IsInitialized() && !IsFinalized()) { MPI_Finalize(); }
43  }
44  /// Return true if MPI has been initialized.
45  static bool IsInitialized()
46  {
47  int mpi_is_initialized;
48  int mpi_err = MPI_Initialized(&mpi_is_initialized);
49  return (mpi_err == MPI_SUCCESS) && mpi_is_initialized;
50  }
51  /// Return true if MPI has been finalized.
52  static bool IsFinalized()
53  {
54  int mpi_is_finalized;
55  int mpi_err = MPI_Finalized(&mpi_is_finalized);
56  return (mpi_err == MPI_SUCCESS) && mpi_is_finalized;
57  }
58  /// Return the MPI rank in MPI_COMM_WORLD.
59  static int WorldRank()
60  {
61  int world_rank;
62  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
63  return world_rank;
64  }
65  /// Return the size of MPI_COMM_WORLD.
66  static int WorldSize()
67  {
68  int world_size;
69  MPI_Comm_size(MPI_COMM_WORLD, &world_size);
70  return world_size;
71  }
72  /// Return true if the rank in MPI_COMM_WORLD is zero.
73  static bool Root() { return WorldRank() == 0; }
74 private:
75  /// Initialize MPI
76  static void Init_(int *argc, char ***argv)
77  {
78  MFEM_VERIFY(!IsInitialized(), "MPI already initialized!")
79  MPI_Init(argc, argv);
80  // The "mpi" object below needs to be created after MPI_Init() for some
81  // MPI implementations
82  static Mpi mpi;
83  }
84  /// Finalize MPI
85  ~Mpi() { Finalize(); }
86  /// Prevent direct construction of objects of this class
87  Mpi() { }
88 };
89 
90 /** @brief A simple convenience class based on the Mpi singleton class above.
91  Preserved for backward compatibility. New code should use Mpi::Init() and
92  other Mpi methods instead. */
94 {
95 public:
97  MPI_Session(int &argc, char **&argv) { Mpi::Init(argc, argv); }
98  /// Return MPI_COMM_WORLD's rank.
99  int WorldRank() const { return Mpi::WorldRank(); }
100  /// Return MPI_COMM_WORLD's size.
101  int WorldSize() const { return Mpi::WorldSize(); }
102  /// Return true if WorldRank() == 0.
103  bool Root() const { return Mpi::Root(); }
104 };
105 
106 
107 /** The shared entities (e.g. vertices, faces and edges) are split into groups,
108  each group determined by the set of participating processors. They are
109  numbered locally in lproc. Assumptions:
110  - group 0 is the 'local' group
111  - groupmaster_lproc[0] = 0
112  - lproc_proc[0] = MyRank */
114 {
115 private:
116  MPI_Comm MyComm;
117 
118  /// Neighbor ids (lproc) in each group.
119  Table group_lproc;
120  /// Master neighbor id for each group.
121  Array<int> groupmaster_lproc;
122  /// MPI rank of each neighbor.
123  Array<int> lproc_proc;
124  /// Group --> Group number in the master.
125  Array<int> group_mgroup;
126 
127  void ProcToLProc();
128 
129 public:
130  /// Constructor with the MPI communicator = 0.
131  GroupTopology() : MyComm(0) {}
132 
133  /// Constructor given the MPI communicator 'comm'.
134  GroupTopology(MPI_Comm comm) { MyComm = comm; }
135 
136  /// Copy constructor
137  GroupTopology(const GroupTopology &gt);
138 
139  /// Set the MPI communicator to 'comm'.
140  void SetComm(MPI_Comm comm) { MyComm = comm; }
141 
142  /// Return the MPI communicator.
143  MPI_Comm GetComm() const { return MyComm; }
144 
145  /// Return the MPI rank within this object's communicator.
146  int MyRank() const { int r; MPI_Comm_rank(MyComm, &r); return r; }
147 
148  /// Return the number of MPI ranks within this object's communicator.
149  int NRanks() const { int s; MPI_Comm_size(MyComm, &s); return s; }
150 
151  /// Set up the group topology given the list of sets of shared entities.
152  void Create(ListOfIntegerSets &groups, int mpitag);
153 
154  /// Return the number of groups.
155  int NGroups() const { return group_lproc.Size(); }
156 
157  /// Return the number of neighbors including the local processor.
158  int GetNumNeighbors() const { return lproc_proc.Size(); }
159 
160  /// Return the MPI rank of neighbor 'i'.
161  int GetNeighborRank(int i) const { return lproc_proc[i]; }
162 
163  /// Return true if I am master for group 'g'.
164  bool IAmMaster(int g) const { return (groupmaster_lproc[g] == 0); }
165 
166  /** @brief Return the neighbor index of the group master for a given group.
167  Neighbor 0 is the local processor. */
168  int GetGroupMaster(int g) const { return groupmaster_lproc[g]; }
169 
170  /// Return the rank of the group master for group 'g'.
171  int GetGroupMasterRank(int g) const
172  { return lproc_proc[groupmaster_lproc[g]]; }
173 
174  /// Return the group number in the master for group 'g'.
175  int GetGroupMasterGroup(int g) const { return group_mgroup[g]; }
176 
177  /// Get the number of processors in a group
178  int GetGroupSize(int g) const { return group_lproc.RowSize(g); }
179 
180  /** @brief Return a pointer to a list of neighbors for a given group.
181  Neighbor 0 is the local processor */
182  const int *GetGroup(int g) const { return group_lproc.GetRow(g); }
183 
184  /// Save the data in a stream.
185  void Save(std::ostream &out) const;
186 
187  /// Load the data from a stream.
188  void Load(std::istream &in);
189 
190  /// Copy the internal data to the external 'copy'.
191  void Copy(GroupTopology & copy) const;
192 
193  /// Swap the internal data with another @a GroupTopology object.
194  void Swap(GroupTopology &other);
195 
196  virtual ~GroupTopology() {}
197 };
198 
199 /** @brief Communicator performing operations within groups defined by a
200  GroupTopology with arbitrary-size data associated with each group. */
202 {
203 public:
204  /// Communication mode.
205  enum Mode
206  {
207  byGroup, ///< Communications are performed one group at a time.
208  byNeighbor /**< Communications are performed one neighbor at a time,
209  aggregating over groups. */
210  };
211 
212 protected:
216  Table group_ltdof; // only for groups for which this processor is master.
219  MPI_Request *requests;
220  // MPI_Status *statuses;
221  // comm_lock: 0 - no lock, 1 - locked for Bcast, 2 - locked for Reduce
222  mutable int comm_lock;
223  mutable int num_requests;
225  int *buf_offsets; // size = max(number of groups, number of neighbors)
226  Table nbr_send_groups, nbr_recv_groups; // nbr 0 = me
227 
228 public:
229  /// Construct a GroupCommunicator object.
230  /** The object must be initialized before it can be used to perform any
231  operations. To initialize the object, either
232  - call Create() or
233  - initialize the Table reference returned by GroupLDofTable() and then
234  call Finalize().
235  */
236  GroupCommunicator(const GroupTopology &gt, Mode m = byNeighbor);
237 
238  /** @brief Initialize the communicator from a local-dof to group map.
239  Finalize() is called internally. */
240  void Create(const Array<int> &ldof_group);
241 
242  /** @brief Fill-in the returned Table reference to initialize the
243  GroupCommunicator then call Finalize(). */
244  Table &GroupLDofTable() { return group_ldof; }
245 
246  /// Read-only access to group-ldof Table.
247  const Table &GroupLDofTable() const { return group_ldof; }
248 
249  /// Allocate internal buffers after the GroupLDofTable is defined
250  void Finalize();
251 
252  /// Initialize the internal group_ltdof Table.
253  /** This method must be called before performing operations that use local
254  data layout 2, see CopyGroupToBuffer() for layout descriptions. */
255  void SetLTDofTable(const Array<int> &ldof_ltdof);
256 
257  /// Get a reference to the associated GroupTopology object
258  const GroupTopology &GetGroupTopology() { return gtopo; }
259 
260  /// Get a const reference to the associated GroupTopology object
261  const GroupTopology &GetGroupTopology() const { return gtopo; }
262 
263  /// Dofs to be sent to communication neighbors
264  void GetNeighborLTDofTable(Table &nbr_ltdof) const;
265 
266  /// Dofs to be received from communication neighbors
267  void GetNeighborLDofTable(Table &nbr_ldof) const;
268 
269  /** @brief Data structure on which we define reduce operations.
270  The data is associated with (and the operation is performed on) one
271  group at a time. */
272  template <class T> struct OpData
273  {
274  int nldofs, nb;
275  const int *ldofs;
276  T *ldata, *buf;
277  };
278 
279  /** @brief Copy the entries corresponding to the group @a group from the
280  local array @a ldata to the buffer @a buf. */
281  /** The @a layout of the local array can be:
282  - 0 - @a ldata is an array on all ldofs: copied indices:
283  `{ J[j] : I[group] <= j < I[group+1] }` where `I,J=group_ldof.{I,J}`
284  - 1 - @a ldata is an array on the shared ldofs: copied indices:
285  `{ j : I[group] <= j < I[group+1] }` where `I,J=group_ldof.{I,J}`
286  - 2 - @a ldata is an array on the true ldofs, ltdofs: copied indices:
287  `{ J[j] : I[group] <= j < I[group+1] }` where `I,J=group_ltdof.{I,J}`.
288  @returns The pointer @a buf plus the number of elements in the group. */
289  template <class T>
290  T *CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const;
291 
292  /** @brief Copy the entries corresponding to the group @a group from the
293  buffer @a buf to the local array @a ldata. */
294  /** For a description of @a layout, see CopyGroupToBuffer().
295  @returns The pointer @a buf plus the number of elements in the group. */
296  template <class T>
297  const T *CopyGroupFromBuffer(const T *buf, T *ldata, int group,
298  int layout) const;
299 
300  /** @brief Perform the reduction operation @a Op to the entries of group
301  @a group using the values from the buffer @a buf and the values from the
302  local array @a ldata, saving the result in the latter. */
303  /** For a description of @a layout, see CopyGroupToBuffer().
304  @returns The pointer @a buf plus the number of elements in the group. */
305  template <class T>
306  const T *ReduceGroupFromBuffer(const T *buf, T *ldata, int group,
307  int layout, void (*Op)(OpData<T>)) const;
308 
309  /// Begin a broadcast within each group where the master is the root.
310  /** For a description of @a layout, see CopyGroupToBuffer(). */
311  template <class T> void BcastBegin(T *ldata, int layout) const;
312 
313  /** @brief Finalize a broadcast started with BcastBegin().
314 
315  The output data @a layout can be:
316  - 0 - @a ldata is an array on all ldofs; the input layout should be
317  either 0 or 2
318  - 1 - @a ldata is the same array as given to BcastBegin(); the input
319  layout should be 1.
320 
321  For more details about @a layout, see CopyGroupToBuffer(). */
322  template <class T> void BcastEnd(T *ldata, int layout) const;
323 
324  /** @brief Broadcast within each group where the master is the root.
325 
326  The data @a layout can be either 0 or 1.
327 
328  For a description of @a layout, see CopyGroupToBuffer(). */
329  template <class T> void Bcast(T *ldata, int layout) const
330  {
331  BcastBegin(ldata, layout);
332  BcastEnd(ldata, layout);
333  }
334 
335  /// Broadcast within each group where the master is the root.
336  template <class T> void Bcast(T *ldata) const { Bcast<T>(ldata, 0); }
337  /// Broadcast within each group where the master is the root.
338  template <class T> void Bcast(Array<T> &ldata) const
339  { Bcast<T>((T *)ldata); }
340 
341  /** @brief Begin reduction operation within each group where the master is
342  the root. */
343  /** The input data layout is an array on all ldofs, i.e. layout 0, see
344  CopyGroupToBuffer().
345 
346  The reduce operation will be specified when calling ReduceEnd(). This
347  method is instantiated for int and double. */
348  template <class T> void ReduceBegin(const T *ldata) const;
349 
350  /** @brief Finalize reduction operation started with ReduceBegin().
351 
352  The output data @a layout can be either 0 or 2, see CopyGroupToBuffer().
353 
354  The reduce operation is given by the third argument (see below for list
355  of the supported operations.) This method is instantiated for int and
356  double.
357 
358  @note If the output data layout is 2, then the data from the @a ldata
359  array passed to this call is used in the reduction operation, instead of
360  the data from the @a ldata array passed to ReduceBegin(). Therefore, the
361  data for master-groups has to be identical in both arrays.
362  */
363  template <class T> void ReduceEnd(T *ldata, int layout,
364  void (*Op)(OpData<T>)) const;
365 
366  /** @brief Reduce within each group where the master is the root.
367 
368  The reduce operation is given by the second argument (see below for list
369  of the supported operations.) */
370  template <class T> void Reduce(T *ldata, void (*Op)(OpData<T>)) const
371  {
372  ReduceBegin(ldata);
373  ReduceEnd(ldata, 0, Op);
374  }
375 
376  /// Reduce within each group where the master is the root.
377  template <class T> void Reduce(Array<T> &ldata, void (*Op)(OpData<T>)) const
378  { Reduce<T>((T *)ldata, Op); }
379 
380  /// Reduce operation Sum, instantiated for int and double
381  template <class T> static void Sum(OpData<T>);
382  /// Reduce operation Min, instantiated for int and double
383  template <class T> static void Min(OpData<T>);
384  /// Reduce operation Max, instantiated for int and double
385  template <class T> static void Max(OpData<T>);
386  /// Reduce operation bitwise OR, instantiated for int only
387  template <class T> static void BitOR(OpData<T>);
388 
389  /// Print information about the GroupCommunicator from all MPI ranks.
390  void PrintInfo(std::ostream &out = mfem::out) const;
391 
392  /** @brief Destroy a GroupCommunicator object, deallocating internal data
393  structures and buffers. */
395 };
396 
397 
398 /// \brief Variable-length MPI message containing unspecific binary data.
399 template<int Tag>
401 {
402  std::string data;
403  MPI_Request send_request;
404 
405  /** @brief Non-blocking send to processor 'rank'.
406  Returns immediately. Completion (as tested by MPI_Wait/Test) does not
407  mean the message was received -- it may be on its way or just buffered
408  locally. */
409  void Isend(int rank, MPI_Comm comm)
410  {
411  Encode(rank);
412  MPI_Isend((void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
413  &send_request);
414  }
415 
416  /** @brief Non-blocking synchronous send to processor 'rank'.
417  Returns immediately. Completion (MPI_Wait/Test) means that the message
418  was received. */
419  void Issend(int rank, MPI_Comm comm)
420  {
421  Encode(rank);
422  MPI_Issend((void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
423  &send_request);
424  }
425 
426  /// Helper to send all messages in a rank-to-message map container.
427  template<typename MapT>
428  static void IsendAll(MapT& rank_msg, MPI_Comm comm)
429  {
430  for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
431  {
432  it->second.Isend(it->first, comm);
433  }
434  }
435 
436  /// Helper to wait for all messages in a map container to be sent.
437  template<typename MapT>
438  static void WaitAllSent(MapT& rank_msg)
439  {
440  for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
441  {
442  MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
443  it->second.Clear();
444  }
445  }
446 
447  /** @brief Return true if all messages in the map container were sent,
448  otherwise return false, without waiting. */
449  template<typename MapT>
450  static bool TestAllSent(MapT& rank_msg)
451  {
452  for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
453  {
454  VarMessage &msg = it->second;
455  if (msg.send_request != MPI_REQUEST_NULL)
456  {
457  int sent;
458  MPI_Test(&msg.send_request, &sent, MPI_STATUS_IGNORE);
459  if (!sent) { return false; }
460  msg.Clear();
461  }
462  }
463  return true;
464  }
465 
466  /** @brief Blocking probe for incoming message of this type from any rank.
467  Returns the rank and message size. */
468  static void Probe(int &rank, int &size, MPI_Comm comm)
469  {
470  MPI_Status status;
471  MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
472  rank = status.MPI_SOURCE;
473  MPI_Get_count(&status, MPI_BYTE, &size);
474  }
475 
476  /** @brief Non-blocking probe for incoming message of this type from any
477  rank. If there is an incoming message, returns true and sets 'rank' and
478  'size'. Otherwise returns false. */
479  static bool IProbe(int &rank, int &size, MPI_Comm comm)
480  {
481  int flag;
482  MPI_Status status;
483  MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
484  if (!flag) { return false; }
485 
486  rank = status.MPI_SOURCE;
487  MPI_Get_count(&status, MPI_BYTE, &size);
488  return true;
489  }
490 
491  /// Post-probe receive from processor 'rank' of message size 'size'.
492  void Recv(int rank, int size, MPI_Comm comm)
493  {
494  MFEM_ASSERT(size >= 0, "");
495  data.resize(size);
496  MPI_Status status;
497  MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
498 #ifdef MFEM_DEBUG
499  int count;
500  MPI_Get_count(&status, MPI_BYTE, &count);
501  MFEM_VERIFY(count == size, "");
502 #endif
503  Decode(rank);
504  }
505 
506  /// Like Recv(), but throw away the message.
507  void RecvDrop(int rank, int size, MPI_Comm comm)
508  {
509  data.resize(size);
510  MPI_Status status;
511  MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
512  data.resize(0); // don't decode
513  }
514 
515  /// Helper to receive all messages in a rank-to-message map container.
516  template<typename MapT>
517  static void RecvAll(MapT& rank_msg, MPI_Comm comm)
518  {
519  int recv_left = rank_msg.size();
520  while (recv_left > 0)
521  {
522  int rank, size;
523  Probe(rank, size, comm);
524  MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(), "Unexpected message"
525  " (tag " << Tag << ") from rank " << rank);
526  // NOTE: no guard against receiving two messages from the same rank
527  rank_msg[rank].Recv(rank, size, comm);
528  --recv_left;
529  }
530  }
531 
532  VarMessage() : send_request(MPI_REQUEST_NULL) {}
533 
534  /// Clear the message and associated request.
535  void Clear() { data.clear(); send_request = MPI_REQUEST_NULL; }
536 
537  virtual ~VarMessage()
538  {
539  MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
540  "WaitAllSent was not called after Isend");
541  }
542 
543  VarMessage(const VarMessage &other)
544  : data(other.data), send_request(other.send_request)
545  {
546  MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
547  "Cannot copy message with a pending send.");
548  }
549 
550 protected:
551  virtual void Encode(int rank) {}
552  virtual void Decode(int rank) {}
553 };
554 
555 
556 /// Helper struct to convert a C++ type to an MPI type
557 template <typename Type> struct MPITypeMap;
558 
559 // Specializations of MPITypeMap; mpi_type initialized in communication.cpp:
560 template<> struct MPITypeMap<int> { static const MPI_Datatype mpi_type; };
561 template<> struct MPITypeMap<double> { static const MPI_Datatype mpi_type; };
562 
563 
564 /** Reorder MPI ranks to follow the Z-curve within the physical machine topology
565  (provided that functions to query physical node coordinates are available).
566  Returns a new communicator with reordered ranks. */
567 MPI_Comm ReorderRanksZCurve(MPI_Comm comm);
568 
569 
570 } // namespace mfem
571 
572 #endif
573 
574 #endif
int NRanks() const
Return the number of MPI ranks within this object&#39;s communicator.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group &#39;g&#39;.
Helper struct to convert a C++ type to an MPI type.
static void Init(int &argc, char **&argv)
Singleton creation with Mpi::Init(argc,argv);.
bool Root() const
Return true if WorldRank() == 0.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
GroupTopology(MPI_Comm comm)
Constructor given the MPI communicator &#39;comm&#39;.
static void Finalize()
Finalize MPI (if it has been initialized and not yet already finalized).
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
GroupTopology()
Constructor with the MPI communicator = 0.
static const MPI_Datatype mpi_type
int GetGroupMasterGroup(int g) const
Return the group number in the master for group &#39;g&#39;.
int NGroups() const
Return the number of groups.
int GetNeighborRank(int i) const
Return the MPI rank of neighbor &#39;i&#39;.
void Bcast(T *ldata) const
Broadcast within each group where the master is the root.
int MyRank() const
Return the MPI rank within this object&#39;s communicator.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor...
static int WorldSize()
Return the size of MPI_COMM_WORLD.
static bool IsFinalized()
Return true if MPI has been finalized.
void RecvDrop(int rank, int size, MPI_Comm comm)
Like Recv(), but throw away the message.
int RowSize(int i) const
Definition: table.hpp:108
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor &#39;rank&#39; of message size &#39;size&#39;.
VarMessage(const VarMessage &other)
const Table & GroupLDofTable() const
Read-only access to group-ldof Table.
A simple convenience class based on the Mpi singleton class above. Preserved for backward compatibili...
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
const GroupTopology & gtopo
const int * GetGroup(int g) const
Return a pointer to a list of neighbors for a given group. Neighbor 0 is the local processor...
MPI_Request send_request
virtual void Encode(int rank)
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
static void Init()
Singleton creation with Mpi::Init();.
void Bcast(Array< T > &ldata) const
Broadcast within each group where the master is the root.
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:187
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
static bool TestAllSent(MapT &rank_msg)
Return true if all messages in the map container were sent, otherwise return false, without waiting.
static bool IsInitialized()
Return true if MPI has been initialized.
int WorldRank() const
Return MPI_COMM_WORLD&#39;s rank.
static bool Root()
Return true if the rank in MPI_COMM_WORLD is zero.
const GroupTopology & GetGroupTopology() const
Get a const reference to the associated GroupTopology object.
void Swap(Array< T > &, Array< T > &)
Definition: array.hpp:638
const GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
virtual void Decode(int rank)
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:66
static const MPI_Datatype mpi_type
Communications are performed one group at a time.
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
A simple singleton class that calls MPI_Init() at construction and MPI_Finalize() at destruction...
Data structure on which we define reduce operations. The data is associated with (and the operation i...
void Issend(int rank, MPI_Comm comm)
Non-blocking synchronous send to processor &#39;rank&#39;. Returns immediately. Completion (MPI_Wait/Test) me...
MPI_Comm GetComm() const
Return the MPI communicator.
int GetGroupSize(int g) const
Get the number of processors in a group.
void Clear()
Clear the message and associated request.
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:92
Mode
Communication mode.
MPI_Session(int &argc, char **&argv)
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor &#39;rank&#39;. Returns immediately. Completion (as tested by MPI_Wait/Test) d...
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
static int WorldRank()
Return the MPI rank in MPI_COMM_WORLD.
int Size() const
Return the logical size of the array.
Definition: array.hpp:141
void SetComm(MPI_Comm comm)
Set the MPI communicator to &#39;comm&#39;.
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
RefCoord s[3]
static bool IProbe(int &rank, int &size, MPI_Comm comm)
Non-blocking probe for incoming message of this type from any rank. If there is an incoming message...
List of integer sets.
Definition: sets.hpp:62
int WorldSize() const
Return MPI_COMM_WORLD&#39;s size.
Variable-length MPI message containing unspecific binary data.
static void Probe(int &rank, int &size, MPI_Comm comm)
Blocking probe for incoming message of this type from any rank. Returns the rank and message size...
bool IAmMaster(int g) const
Return true if I am master for group &#39;g&#39;.
int GetNumNeighbors() const
Return the number of neighbors including the local processor.