MFEM  v4.2.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
communication.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2020, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #ifndef MFEM_COMMUNICATION
13 #define MFEM_COMMUNICATION
14 
15 #include "../config/config.hpp"
16 
17 #ifdef MFEM_USE_MPI
18 
19 #include "array.hpp"
20 #include "table.hpp"
21 #include "sets.hpp"
22 #include "globals.hpp"
23 #include <mpi.h>
24 
25 
26 namespace mfem
27 {
28 
29 /** @brief A simple convenience class that calls MPI_Init() at construction and
30  MPI_Finalize() at destruction. It also provides easy access to
31  MPI_COMM_WORLD's rank and size. */
33 {
34 protected:
36  void GetRankAndSize();
37 public:
38  MPI_Session() { MPI_Init(NULL, NULL); GetRankAndSize(); }
39  MPI_Session(int &argc, char **&argv)
40  { MPI_Init(&argc, &argv); GetRankAndSize(); }
41  ~MPI_Session() { MPI_Finalize(); }
42  /// Return MPI_COMM_WORLD's rank.
43  int WorldRank() const { return world_rank; }
44  /// Return MPI_COMM_WORLD's size.
45  int WorldSize() const { return world_size; }
46  /// Return true if WorldRank() == 0.
47  bool Root() const { return world_rank == 0; }
48 };
49 
50 
51 /** The shared entities (e.g. vertices, faces and edges) are split into groups,
52  each group determined by the set of participating processors. They are
53  numbered locally in lproc. Assumptions:
54  - group 0 is the 'local' group
55  - groupmaster_lproc[0] = 0
56  - lproc_proc[0] = MyRank */
58 {
59 private:
60  MPI_Comm MyComm;
61 
62  /// Neighbor ids (lproc) in each group.
63  Table group_lproc;
64  /// Master neighbor id for each group.
65  Array<int> groupmaster_lproc;
66  /// MPI rank of each neighbor.
67  Array<int> lproc_proc;
68  /// Group --> Group number in the master.
69  Array<int> group_mgroup;
70 
71  void ProcToLProc();
72 
73 public:
74  /// Constructor with the MPI communicator = 0.
75  GroupTopology() : MyComm(0) {}
76 
77  /// Constructor given the MPI communicator 'comm'.
78  GroupTopology(MPI_Comm comm) { MyComm = comm; }
79 
80  /// Copy constructor
81  GroupTopology(const GroupTopology &gt);
82 
83  /// Set the MPI communicator to 'comm'.
84  void SetComm(MPI_Comm comm) { MyComm = comm; }
85 
86  /// Return the MPI communicator.
87  MPI_Comm GetComm() const { return MyComm; }
88 
89  /// Return the MPI rank within this object's communicator.
90  int MyRank() const { int r; MPI_Comm_rank(MyComm, &r); return r; }
91 
92  /// Return the number of MPI ranks within this object's communicator.
93  int NRanks() const { int s; MPI_Comm_size(MyComm, &s); return s; }
94 
95  /// Set up the group topology given the list of sets of shared entities.
96  void Create(ListOfIntegerSets &groups, int mpitag);
97 
98  /// Return the number of groups.
99  int NGroups() const { return group_lproc.Size(); }
100 
101  /// Return the number of neighbors including the local processor.
102  int GetNumNeighbors() const { return lproc_proc.Size(); }
103 
104  /// Return the MPI rank of neighbor 'i'.
105  int GetNeighborRank(int i) const { return lproc_proc[i]; }
106 
107  /// Return true if I am master for group 'g'.
108  bool IAmMaster(int g) const { return (groupmaster_lproc[g] == 0); }
109 
110  /** @brief Return the neighbor index of the group master for a given group.
111  Neighbor 0 is the local processor. */
112  int GetGroupMaster(int g) const { return groupmaster_lproc[g]; }
113 
114  /// Return the rank of the group master for group 'g'.
115  int GetGroupMasterRank(int g) const
116  { return lproc_proc[groupmaster_lproc[g]]; }
117 
118  /// Return the group number in the master for group 'g'.
119  int GetGroupMasterGroup(int g) const { return group_mgroup[g]; }
120 
121  /// Get the number of processors in a group
122  int GetGroupSize(int g) const { return group_lproc.RowSize(g); }
123 
124  /** @brief Return a pointer to a list of neighbors for a given group.
125  Neighbor 0 is the local processor */
126  const int *GetGroup(int g) const { return group_lproc.GetRow(g); }
127 
128  /// Save the data in a stream.
129  void Save(std::ostream &out) const;
130 
131  /// Load the data from a stream.
132  void Load(std::istream &in);
133 
134  /// Copy the internal data to the external 'copy'.
135  void Copy(GroupTopology & copy) const;
136 
137  virtual ~GroupTopology() {}
138 };
139 
140 /** @brief Communicator performing operations within groups defined by a
141  GroupTopology with arbitrary-size data associated with each group. */
143 {
144 public:
145  /// Communication mode.
146  enum Mode
147  {
148  byGroup, ///< Communications are performed one group at a time.
149  byNeighbor /**< Communications are performed one neighbor at a time,
150  aggregating over groups. */
151  };
152 
153 protected:
157  Table group_ltdof; // only for groups for which this processor is master.
160  MPI_Request *requests;
161  // MPI_Status *statuses;
162  // comm_lock: 0 - no lock, 1 - locked for Bcast, 2 - locked for Reduce
163  mutable int comm_lock;
164  mutable int num_requests;
166  int *buf_offsets; // size = max(number of groups, number of neighbors)
168 
169 public:
170  /// Construct a GroupCommunicator object.
171  /** The object must be initialized before it can be used to perform any
172  operations. To initialize the object, either
173  - call Create() or
174  - initialize the Table reference returned by GroupLDofTable() and then
175  call Finalize().
176  */
178 
179  /** @brief Initialize the communicator from a local-dof to group map.
180  Finalize() is called internally. */
181  void Create(const Array<int> &ldof_group);
182 
183  /** @brief Fill-in the returned Table reference to initialize the
184  GroupCommunicator then call Finalize(). */
186 
187  /// Read-only access to group-ldof Table.
188  const Table &GroupLDofTable() const { return group_ldof; }
189 
190  /// Allocate internal buffers after the GroupLDofTable is defined
191  void Finalize();
192 
193  /// Initialize the internal group_ltdof Table.
194  /** This method must be called before performing operations that use local
195  data layout 2, see CopyGroupToBuffer() for layout descriptions. */
196  void SetLTDofTable(const Array<int> &ldof_ltdof);
197 
198  /// Get a reference to the associated GroupTopology object
200 
201  /// Get a const reference to the associated GroupTopology object
202  const GroupTopology &GetGroupTopology() const { return gtopo; }
203 
204  /// Dofs to be sent to communication neighbors
205  void GetNeighborLTDofTable(Table &nbr_ltdof) const;
206 
207  /// Dofs to be received from communication neighbors
208  void GetNeighborLDofTable(Table &nbr_ldof) const;
209 
210  /** @brief Data structure on which we define reduce operations.
211  The data is associated with (and the operation is performed on) one
212  group at a time. */
213  template <class T> struct OpData
214  {
215  int nldofs, nb;
216  const int *ldofs;
217  T *ldata, *buf;
218  };
219 
220  /** @brief Copy the entries corresponding to the group @a group from the
221  local array @a ldata to the buffer @a buf. */
222  /** The @a layout of the local array can be:
223  - 0 - @a ldata is an array on all ldofs: copied indices:
224  `{ J[j] : I[group] <= j < I[group+1] }` where `I,J=group_ldof.{I,J}`
225  - 1 - @a ldata is an array on the shared ldofs: copied indices:
226  `{ j : I[group] <= j < I[group+1] }` where `I,J=group_ldof.{I,J}`
227  - 2 - @a ldata is an array on the true ldofs, ltdofs: copied indices:
228  `{ J[j] : I[group] <= j < I[group+1] }` where `I,J=group_ltdof.{I,J}`.
229  @returns The pointer @a buf plus the number of elements in the group. */
230  template <class T>
231  T *CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const;
232 
233  /** @brief Copy the entries corresponding to the group @a group from the
234  buffer @a buf to the local array @a ldata. */
235  /** For a description of @a layout, see CopyGroupToBuffer().
236  @returns The pointer @a buf plus the number of elements in the group. */
237  template <class T>
238  const T *CopyGroupFromBuffer(const T *buf, T *ldata, int group,
239  int layout) const;
240 
241  /** @brief Perform the reduction operation @a Op to the entries of group
242  @a group using the values from the buffer @a buf and the values from the
243  local array @a ldata, saving the result in the latter. */
244  /** For a description of @a layout, see CopyGroupToBuffer().
245  @returns The pointer @a buf plus the number of elements in the group. */
246  template <class T>
247  const T *ReduceGroupFromBuffer(const T *buf, T *ldata, int group,
248  int layout, void (*Op)(OpData<T>)) const;
249 
250  /// Begin a broadcast within each group where the master is the root.
251  /** For a description of @a layout, see CopyGroupToBuffer(). */
252  template <class T> void BcastBegin(T *ldata, int layout) const;
253 
254  /** @brief Finalize a broadcast started with BcastBegin().
255 
256  The output data @a layout can be:
257  - 0 - @a ldata is an array on all ldofs; the input layout should be
258  either 0 or 2
259  - 1 - @a ldata is the same array as given to BcastBegin(); the input
260  layout should be 1.
261 
262  For more details about @a layout, see CopyGroupToBuffer(). */
263  template <class T> void BcastEnd(T *ldata, int layout) const;
264 
265  /** @brief Broadcast within each group where the master is the root.
266 
267  The data @a layout can be either 0 or 1.
268 
269  For a description of @a layout, see CopyGroupToBuffer(). */
270  template <class T> void Bcast(T *ldata, int layout) const
271  {
272  BcastBegin(ldata, layout);
273  BcastEnd(ldata, layout);
274  }
275 
276  /// Broadcast within each group where the master is the root.
277  template <class T> void Bcast(T *ldata) const { Bcast<T>(ldata, 0); }
278  /// Broadcast within each group where the master is the root.
279  template <class T> void Bcast(Array<T> &ldata) const
280  { Bcast<T>((T *)ldata); }
281 
282  /** @brief Begin reduction operation within each group where the master is
283  the root. */
284  /** The input data layout is an array on all ldofs, i.e. layout 0, see
285  CopyGroupToBuffer().
286 
287  The reduce operation will be specified when calling ReduceEnd(). This
288  method is instantiated for int and double. */
289  template <class T> void ReduceBegin(const T *ldata) const;
290 
291  /** @brief Finalize reduction operation started with ReduceBegin().
292 
293  The output data @a layout can be either 0 or 2, see CopyGroupToBuffer().
294 
295  The reduce operation is given by the third argument (see below for list
296  of the supported operations.) This method is instantiated for int and
297  double.
298 
299  @note If the output data layout is 2, then the data from the @a ldata
300  array passed to this call is used in the reduction operation, instead of
301  the data from the @a ldata array passed to ReduceBegin(). Therefore, the
302  data for master-groups has to be identical in both arrays.
303  */
304  template <class T> void ReduceEnd(T *ldata, int layout,
305  void (*Op)(OpData<T>)) const;
306 
307  /** @brief Reduce within each group where the master is the root.
308 
309  The reduce operation is given by the second argument (see below for list
310  of the supported operations.) */
311  template <class T> void Reduce(T *ldata, void (*Op)(OpData<T>)) const
312  {
313  ReduceBegin(ldata);
314  ReduceEnd(ldata, 0, Op);
315  }
316 
317  /// Reduce within each group where the master is the root.
318  template <class T> void Reduce(Array<T> &ldata, void (*Op)(OpData<T>)) const
319  { Reduce<T>((T *)ldata, Op); }
320 
321  /// Reduce operation Sum, instantiated for int and double
322  template <class T> static void Sum(OpData<T>);
323  /// Reduce operation Min, instantiated for int and double
324  template <class T> static void Min(OpData<T>);
325  /// Reduce operation Max, instantiated for int and double
326  template <class T> static void Max(OpData<T>);
327  /// Reduce operation bitwise OR, instantiated for int only
328  template <class T> static void BitOR(OpData<T>);
329 
330  /// Print information about the GroupCommunicator from all MPI ranks.
331  void PrintInfo(std::ostream &out = mfem::out) const;
332 
333  /** @brief Destroy a GroupCommunicator object, deallocating internal data
334  structures and buffers. */
336 };
337 
338 
339 /// \brief Variable-length MPI message containing unspecific binary data.
340 template<int Tag>
342 {
343  std::string data;
344  MPI_Request send_request;
345 
346  /** @brief Non-blocking send to processor 'rank'.
347  Returns immediately. Completion (as tested by MPI_Wait/Test) does not
348  mean the message was received -- it may be on its way or just buffered
349  locally. */
350  void Isend(int rank, MPI_Comm comm)
351  {
352  Encode(rank);
353  MPI_Isend((void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
354  &send_request);
355  }
356 
357  /** @brief Non-blocking synchronous send to processor 'rank'.
358  Returns immediately. Completion (MPI_Wait/Test) means that the message
359  was received. */
360  void Issend(int rank, MPI_Comm comm)
361  {
362  Encode(rank);
363  MPI_Issend((void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
364  &send_request);
365  }
366 
367  /// Helper to send all messages in a rank-to-message map container.
368  template<typename MapT>
369  static void IsendAll(MapT& rank_msg, MPI_Comm comm)
370  {
371  for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
372  {
373  it->second.Isend(it->first, comm);
374  }
375  }
376 
377  /// Helper to wait for all messages in a map container to be sent.
378  template<typename MapT>
379  static void WaitAllSent(MapT& rank_msg)
380  {
381  for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
382  {
383  MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
384  it->second.Clear();
385  }
386  }
387 
388  /** @brief Return true if all messages in the map container were sent,
389  otherwise return false, without waiting. */
390  template<typename MapT>
391  static bool TestAllSent(MapT& rank_msg)
392  {
393  for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
394  {
395  VarMessage &msg = it->second;
396  if (msg.send_request != MPI_REQUEST_NULL)
397  {
398  int sent;
399  MPI_Test(&msg.send_request, &sent, MPI_STATUS_IGNORE);
400  if (!sent) { return false; }
401  msg.Clear();
402  }
403  }
404  return true;
405  }
406 
407  /** @brief Blocking probe for incoming message of this type from any rank.
408  Returns the rank and message size. */
409  static void Probe(int &rank, int &size, MPI_Comm comm)
410  {
411  MPI_Status status;
412  MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
413  rank = status.MPI_SOURCE;
414  MPI_Get_count(&status, MPI_BYTE, &size);
415  }
416 
417  /** @brief Non-blocking probe for incoming message of this type from any
418  rank. If there is an incoming message, returns true and sets 'rank' and
419  'size'. Otherwise returns false. */
420  static bool IProbe(int &rank, int &size, MPI_Comm comm)
421  {
422  int flag;
423  MPI_Status status;
424  MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
425  if (!flag) { return false; }
426 
427  rank = status.MPI_SOURCE;
428  MPI_Get_count(&status, MPI_BYTE, &size);
429  return true;
430  }
431 
432  /// Post-probe receive from processor 'rank' of message size 'size'.
433  void Recv(int rank, int size, MPI_Comm comm)
434  {
435  MFEM_ASSERT(size >= 0, "");
436  data.resize(size);
437  MPI_Status status;
438  MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
439 #ifdef MFEM_DEBUG
440  int count;
441  MPI_Get_count(&status, MPI_BYTE, &count);
442  MFEM_VERIFY(count == size, "");
443 #endif
444  Decode(rank);
445  }
446 
447  /// Like Recv(), but throw away the message.
448  void RecvDrop(int rank, int size, MPI_Comm comm)
449  {
450  data.resize(size);
451  MPI_Status status;
452  MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
453  data.resize(0); // don't decode
454  }
455 
456  /// Helper to receive all messages in a rank-to-message map container.
457  template<typename MapT>
458  static void RecvAll(MapT& rank_msg, MPI_Comm comm)
459  {
460  int recv_left = rank_msg.size();
461  while (recv_left > 0)
462  {
463  int rank, size;
464  Probe(rank, size, comm);
465  MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(), "Unexpected message"
466  " (tag " << Tag << ") from rank " << rank);
467  // NOTE: no guard against receiving two messages from the same rank
468  rank_msg[rank].Recv(rank, size, comm);
469  --recv_left;
470  }
471  }
472 
473  VarMessage() : send_request(MPI_REQUEST_NULL) {}
474 
475  /// Clear the message and associated request.
476  void Clear() { data.clear(); send_request = MPI_REQUEST_NULL; }
477 
478  virtual ~VarMessage()
479  {
480  MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
481  "WaitAllSent was not called after Isend");
482  }
483 
484  VarMessage(const VarMessage &other)
485  : data(other.data), send_request(other.send_request)
486  {
487  MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
488  "Cannot copy message with a pending send.");
489  }
490 
491 protected:
492  virtual void Encode(int rank) {}
493  virtual void Decode(int rank) {}
494 };
495 
496 
497 /// Helper struct to convert a C++ type to an MPI type
498 template <typename Type> struct MPITypeMap;
499 
500 // Specializations of MPITypeMap; mpi_type initialized in communication.cpp:
501 template<> struct MPITypeMap<int> { static const MPI_Datatype mpi_type; };
502 template<> struct MPITypeMap<double> { static const MPI_Datatype mpi_type; };
503 
504 
505 /** Reorder MPI ranks to follow the Z-curve within the physical machine topology
506  (provided that functions to query physical node coordinates are available).
507  Returns a new communicator with reordered ranks. */
508 MPI_Comm ReorderRanksZCurve(MPI_Comm comm);
509 
510 
511 } // namespace mfem
512 
513 #endif
514 
515 #endif
int WorldSize() const
Return MPI_COMM_WORLD&#39;s size.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group &#39;g&#39;.
void Create(ListOfIntegerSets &groups, int mpitag)
Set up the group topology given the list of sets of shared entities.
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
int Size() const
Return the logical size of the array.
Definition: array.hpp:124
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
void GetNeighborLDofTable(Table &nbr_ldof) const
Dofs to be received from communication neighbors.
int GetGroupMasterGroup(int g) const
Return the group number in the master for group &#39;g&#39;.
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
Helper struct to convert a C++ type to an MPI type.
void GetNeighborLTDofTable(Table &nbr_ltdof) const
Dofs to be sent to communication neighbors.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
GroupTopology(MPI_Comm comm)
Constructor given the MPI communicator &#39;comm&#39;.
int NRanks() const
Return the number of MPI ranks within this object&#39;s communicator.
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
GroupTopology()
Constructor with the MPI communicator = 0.
int GetGroupSize(int g) const
Get the number of processors in a group.
static const MPI_Datatype mpi_type
const int * GetGroup(int g) const
Return a pointer to a list of neighbors for a given group. Neighbor 0 is the local processor...
const Table & GroupLDofTable() const
Read-only access to group-ldof Table.
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:191
void RecvDrop(int rank, int size, MPI_Comm comm)
Like Recv(), but throw away the message.
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata...
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor &#39;rank&#39; of message size &#39;size&#39;.
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
bool IAmMaster(int g) const
Return true if I am master for group &#39;g&#39;.
VarMessage(const VarMessage &other)
void Bcast(T *ldata) const
Broadcast within each group where the master is the root.
A simple convenience class that calls MPI_Init() at construction and MPI_Finalize() at destruction...
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
int MyRank() const
Return the MPI rank within this object&#39;s communicator.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
MPI_Comm GetComm() const
Return the MPI communicator.
bool Root() const
Return true if WorldRank() == 0.
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf...
MPI_Request send_request
virtual void Encode(int rank)
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:92
GroupCommunicator(GroupTopology &gt, Mode m=byNeighbor)
Construct a GroupCommunicator object.
GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
static bool TestAllSent(MapT &rank_msg)
Return true if all messages in the map container were sent, otherwise return false, without waiting.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor...
void Copy(GroupTopology &copy) const
Copy the internal data to the external &#39;copy&#39;.
void Save(std::ostream &out) const
Save the data in a stream.
int WorldRank() const
Return MPI_COMM_WORLD&#39;s rank.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
virtual void Decode(int rank)
int GetNeighborRank(int i) const
Return the MPI rank of neighbor &#39;i&#39;.
static const MPI_Datatype mpi_type
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
Communications are performed one group at a time.
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
Data structure on which we define reduce operations. The data is associated with (and the operation i...
void Issend(int rank, MPI_Comm comm)
Non-blocking synchronous send to processor &#39;rank&#39;. Returns immediately. Completion (MPI_Wait/Test) me...
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
void Clear()
Clear the message and associated request.
Mode
Communication mode.
MPI_Session(int &argc, char **&argv)
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor &#39;rank&#39;. Returns immediately. Completion (as tested by MPI_Wait/Test) d...
void Bcast(Array< T > &ldata) const
Broadcast within each group where the master is the root.
int NGroups() const
Return the number of groups.
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void SetComm(MPI_Comm comm)
Set the MPI communicator to &#39;comm&#39;.
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
int RowSize(int i) const
Definition: table.hpp:108
static bool IProbe(int &rank, int &size, MPI_Comm comm)
Non-blocking probe for incoming message of this type from any rank. If there is an incoming message...
List of integer sets.
Definition: sets.hpp:59
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:66
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
Variable-length MPI message containing unspecific binary data.
void Load(std::istream &in)
Load the data from a stream.
const GroupTopology & GetGroupTopology() const
Get a const reference to the associated GroupTopology object.
static void Probe(int &rank, int &size, MPI_Comm comm)
Blocking probe for incoming message of this type from any rank. Returns the rank and message size...
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.