MFEM  v4.3.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
communication.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2021, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #ifndef MFEM_COMMUNICATION
13 #define MFEM_COMMUNICATION
14 
15 #include "../config/config.hpp"
16 
17 #ifdef MFEM_USE_MPI
18 
19 #include "array.hpp"
20 #include "table.hpp"
21 #include "sets.hpp"
22 #include "globals.hpp"
23 #include <mpi.h>
24 
25 
26 namespace mfem
27 {
28 
29 /** @brief A simple convenience class that calls MPI_Init() at construction and
30  MPI_Finalize() at destruction. It also provides easy access to
31  MPI_COMM_WORLD's rank and size. */
33 {
34 protected:
36  void GetRankAndSize();
37 public:
38  MPI_Session() { MPI_Init(NULL, NULL); GetRankAndSize(); }
39  MPI_Session(int &argc, char **&argv)
40  { MPI_Init(&argc, &argv); GetRankAndSize(); }
41  ~MPI_Session() { MPI_Finalize(); }
42  /// Return MPI_COMM_WORLD's rank.
43  int WorldRank() const { return world_rank; }
44  /// Return MPI_COMM_WORLD's size.
45  int WorldSize() const { return world_size; }
46  /// Return true if WorldRank() == 0.
47  bool Root() const { return world_rank == 0; }
48 };
49 
50 
51 /** The shared entities (e.g. vertices, faces and edges) are split into groups,
52  each group determined by the set of participating processors. They are
53  numbered locally in lproc. Assumptions:
54  - group 0 is the 'local' group
55  - groupmaster_lproc[0] = 0
56  - lproc_proc[0] = MyRank */
58 {
59 private:
60  MPI_Comm MyComm;
61 
62  /// Neighbor ids (lproc) in each group.
63  Table group_lproc;
64  /// Master neighbor id for each group.
65  Array<int> groupmaster_lproc;
66  /// MPI rank of each neighbor.
67  Array<int> lproc_proc;
68  /// Group --> Group number in the master.
69  Array<int> group_mgroup;
70 
71  void ProcToLProc();
72 
73 public:
74  /// Constructor with the MPI communicator = 0.
75  GroupTopology() : MyComm(0) {}
76 
77  /// Constructor given the MPI communicator 'comm'.
78  GroupTopology(MPI_Comm comm) { MyComm = comm; }
79 
80  /// Copy constructor
81  GroupTopology(const GroupTopology &gt);
82 
83  /// Set the MPI communicator to 'comm'.
84  void SetComm(MPI_Comm comm) { MyComm = comm; }
85 
86  /// Return the MPI communicator.
87  MPI_Comm GetComm() const { return MyComm; }
88 
89  /// Return the MPI rank within this object's communicator.
90  int MyRank() const { int r; MPI_Comm_rank(MyComm, &r); return r; }
91 
92  /// Return the number of MPI ranks within this object's communicator.
93  int NRanks() const { int s; MPI_Comm_size(MyComm, &s); return s; }
94 
95  /// Set up the group topology given the list of sets of shared entities.
96  void Create(ListOfIntegerSets &groups, int mpitag);
97 
98  /// Return the number of groups.
99  int NGroups() const { return group_lproc.Size(); }
100 
101  /// Return the number of neighbors including the local processor.
102  int GetNumNeighbors() const { return lproc_proc.Size(); }
103 
104  /// Return the MPI rank of neighbor 'i'.
105  int GetNeighborRank(int i) const { return lproc_proc[i]; }
106 
107  /// Return true if I am master for group 'g'.
108  bool IAmMaster(int g) const { return (groupmaster_lproc[g] == 0); }
109 
110  /** @brief Return the neighbor index of the group master for a given group.
111  Neighbor 0 is the local processor. */
112  int GetGroupMaster(int g) const { return groupmaster_lproc[g]; }
113 
114  /// Return the rank of the group master for group 'g'.
115  int GetGroupMasterRank(int g) const
116  { return lproc_proc[groupmaster_lproc[g]]; }
117 
118  /// Return the group number in the master for group 'g'.
119  int GetGroupMasterGroup(int g) const { return group_mgroup[g]; }
120 
121  /// Get the number of processors in a group
122  int GetGroupSize(int g) const { return group_lproc.RowSize(g); }
123 
124  /** @brief Return a pointer to a list of neighbors for a given group.
125  Neighbor 0 is the local processor */
126  const int *GetGroup(int g) const { return group_lproc.GetRow(g); }
127 
128  /// Save the data in a stream.
129  void Save(std::ostream &out) const;
130 
131  /// Load the data from a stream.
132  void Load(std::istream &in);
133 
134  /// Copy the internal data to the external 'copy'.
135  void Copy(GroupTopology & copy) const;
136 
137  /// Swap the internal data with another @a GroupTopology object.
138  void Swap(GroupTopology &other);
139 
140  virtual ~GroupTopology() {}
141 };
142 
143 /** @brief Communicator performing operations within groups defined by a
144  GroupTopology with arbitrary-size data associated with each group. */
146 {
147 public:
148  /// Communication mode.
149  enum Mode
150  {
151  byGroup, ///< Communications are performed one group at a time.
152  byNeighbor /**< Communications are performed one neighbor at a time,
153  aggregating over groups. */
154  };
155 
156 protected:
160  Table group_ltdof; // only for groups for which this processor is master.
163  MPI_Request *requests;
164  // MPI_Status *statuses;
165  // comm_lock: 0 - no lock, 1 - locked for Bcast, 2 - locked for Reduce
166  mutable int comm_lock;
167  mutable int num_requests;
169  int *buf_offsets; // size = max(number of groups, number of neighbors)
171 
172 public:
173  /// Construct a GroupCommunicator object.
174  /** The object must be initialized before it can be used to perform any
175  operations. To initialize the object, either
176  - call Create() or
177  - initialize the Table reference returned by GroupLDofTable() and then
178  call Finalize().
179  */
181 
182  /** @brief Initialize the communicator from a local-dof to group map.
183  Finalize() is called internally. */
184  void Create(const Array<int> &ldof_group);
185 
186  /** @brief Fill-in the returned Table reference to initialize the
187  GroupCommunicator then call Finalize(). */
189 
190  /// Read-only access to group-ldof Table.
191  const Table &GroupLDofTable() const { return group_ldof; }
192 
193  /// Allocate internal buffers after the GroupLDofTable is defined
194  void Finalize();
195 
196  /// Initialize the internal group_ltdof Table.
197  /** This method must be called before performing operations that use local
198  data layout 2, see CopyGroupToBuffer() for layout descriptions. */
199  void SetLTDofTable(const Array<int> &ldof_ltdof);
200 
201  /// Get a reference to the associated GroupTopology object
203 
204  /// Get a const reference to the associated GroupTopology object
205  const GroupTopology &GetGroupTopology() const { return gtopo; }
206 
207  /// Dofs to be sent to communication neighbors
208  void GetNeighborLTDofTable(Table &nbr_ltdof) const;
209 
210  /// Dofs to be received from communication neighbors
211  void GetNeighborLDofTable(Table &nbr_ldof) const;
212 
213  /** @brief Data structure on which we define reduce operations.
214  The data is associated with (and the operation is performed on) one
215  group at a time. */
216  template <class T> struct OpData
217  {
218  int nldofs, nb;
219  const int *ldofs;
220  T *ldata, *buf;
221  };
222 
223  /** @brief Copy the entries corresponding to the group @a group from the
224  local array @a ldata to the buffer @a buf. */
225  /** The @a layout of the local array can be:
226  - 0 - @a ldata is an array on all ldofs: copied indices:
227  `{ J[j] : I[group] <= j < I[group+1] }` where `I,J=group_ldof.{I,J}`
228  - 1 - @a ldata is an array on the shared ldofs: copied indices:
229  `{ j : I[group] <= j < I[group+1] }` where `I,J=group_ldof.{I,J}`
230  - 2 - @a ldata is an array on the true ldofs, ltdofs: copied indices:
231  `{ J[j] : I[group] <= j < I[group+1] }` where `I,J=group_ltdof.{I,J}`.
232  @returns The pointer @a buf plus the number of elements in the group. */
233  template <class T>
234  T *CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const;
235 
236  /** @brief Copy the entries corresponding to the group @a group from the
237  buffer @a buf to the local array @a ldata. */
238  /** For a description of @a layout, see CopyGroupToBuffer().
239  @returns The pointer @a buf plus the number of elements in the group. */
240  template <class T>
241  const T *CopyGroupFromBuffer(const T *buf, T *ldata, int group,
242  int layout) const;
243 
244  /** @brief Perform the reduction operation @a Op to the entries of group
245  @a group using the values from the buffer @a buf and the values from the
246  local array @a ldata, saving the result in the latter. */
247  /** For a description of @a layout, see CopyGroupToBuffer().
248  @returns The pointer @a buf plus the number of elements in the group. */
249  template <class T>
250  const T *ReduceGroupFromBuffer(const T *buf, T *ldata, int group,
251  int layout, void (*Op)(OpData<T>)) const;
252 
253  /// Begin a broadcast within each group where the master is the root.
254  /** For a description of @a layout, see CopyGroupToBuffer(). */
255  template <class T> void BcastBegin(T *ldata, int layout) const;
256 
257  /** @brief Finalize a broadcast started with BcastBegin().
258 
259  The output data @a layout can be:
260  - 0 - @a ldata is an array on all ldofs; the input layout should be
261  either 0 or 2
262  - 1 - @a ldata is the same array as given to BcastBegin(); the input
263  layout should be 1.
264 
265  For more details about @a layout, see CopyGroupToBuffer(). */
266  template <class T> void BcastEnd(T *ldata, int layout) const;
267 
268  /** @brief Broadcast within each group where the master is the root.
269 
270  The data @a layout can be either 0 or 1.
271 
272  For a description of @a layout, see CopyGroupToBuffer(). */
273  template <class T> void Bcast(T *ldata, int layout) const
274  {
275  BcastBegin(ldata, layout);
276  BcastEnd(ldata, layout);
277  }
278 
279  /// Broadcast within each group where the master is the root.
280  template <class T> void Bcast(T *ldata) const { Bcast<T>(ldata, 0); }
281  /// Broadcast within each group where the master is the root.
282  template <class T> void Bcast(Array<T> &ldata) const
283  { Bcast<T>((T *)ldata); }
284 
285  /** @brief Begin reduction operation within each group where the master is
286  the root. */
287  /** The input data layout is an array on all ldofs, i.e. layout 0, see
288  CopyGroupToBuffer().
289 
290  The reduce operation will be specified when calling ReduceEnd(). This
291  method is instantiated for int and double. */
292  template <class T> void ReduceBegin(const T *ldata) const;
293 
294  /** @brief Finalize reduction operation started with ReduceBegin().
295 
296  The output data @a layout can be either 0 or 2, see CopyGroupToBuffer().
297 
298  The reduce operation is given by the third argument (see below for list
299  of the supported operations.) This method is instantiated for int and
300  double.
301 
302  @note If the output data layout is 2, then the data from the @a ldata
303  array passed to this call is used in the reduction operation, instead of
304  the data from the @a ldata array passed to ReduceBegin(). Therefore, the
305  data for master-groups has to be identical in both arrays.
306  */
307  template <class T> void ReduceEnd(T *ldata, int layout,
308  void (*Op)(OpData<T>)) const;
309 
310  /** @brief Reduce within each group where the master is the root.
311 
312  The reduce operation is given by the second argument (see below for list
313  of the supported operations.) */
314  template <class T> void Reduce(T *ldata, void (*Op)(OpData<T>)) const
315  {
316  ReduceBegin(ldata);
317  ReduceEnd(ldata, 0, Op);
318  }
319 
320  /// Reduce within each group where the master is the root.
321  template <class T> void Reduce(Array<T> &ldata, void (*Op)(OpData<T>)) const
322  { Reduce<T>((T *)ldata, Op); }
323 
324  /// Reduce operation Sum, instantiated for int and double
325  template <class T> static void Sum(OpData<T>);
326  /// Reduce operation Min, instantiated for int and double
327  template <class T> static void Min(OpData<T>);
328  /// Reduce operation Max, instantiated for int and double
329  template <class T> static void Max(OpData<T>);
330  /// Reduce operation bitwise OR, instantiated for int only
331  template <class T> static void BitOR(OpData<T>);
332 
333  /// Print information about the GroupCommunicator from all MPI ranks.
334  void PrintInfo(std::ostream &out = mfem::out) const;
335 
336  /** @brief Destroy a GroupCommunicator object, deallocating internal data
337  structures and buffers. */
339 };
340 
341 
342 /// \brief Variable-length MPI message containing unspecific binary data.
343 template<int Tag>
345 {
346  std::string data;
347  MPI_Request send_request;
348 
349  /** @brief Non-blocking send to processor 'rank'.
350  Returns immediately. Completion (as tested by MPI_Wait/Test) does not
351  mean the message was received -- it may be on its way or just buffered
352  locally. */
353  void Isend(int rank, MPI_Comm comm)
354  {
355  Encode(rank);
356  MPI_Isend((void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
357  &send_request);
358  }
359 
360  /** @brief Non-blocking synchronous send to processor 'rank'.
361  Returns immediately. Completion (MPI_Wait/Test) means that the message
362  was received. */
363  void Issend(int rank, MPI_Comm comm)
364  {
365  Encode(rank);
366  MPI_Issend((void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
367  &send_request);
368  }
369 
370  /// Helper to send all messages in a rank-to-message map container.
371  template<typename MapT>
372  static void IsendAll(MapT& rank_msg, MPI_Comm comm)
373  {
374  for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
375  {
376  it->second.Isend(it->first, comm);
377  }
378  }
379 
380  /// Helper to wait for all messages in a map container to be sent.
381  template<typename MapT>
382  static void WaitAllSent(MapT& rank_msg)
383  {
384  for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
385  {
386  MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
387  it->second.Clear();
388  }
389  }
390 
391  /** @brief Return true if all messages in the map container were sent,
392  otherwise return false, without waiting. */
393  template<typename MapT>
394  static bool TestAllSent(MapT& rank_msg)
395  {
396  for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
397  {
398  VarMessage &msg = it->second;
399  if (msg.send_request != MPI_REQUEST_NULL)
400  {
401  int sent;
402  MPI_Test(&msg.send_request, &sent, MPI_STATUS_IGNORE);
403  if (!sent) { return false; }
404  msg.Clear();
405  }
406  }
407  return true;
408  }
409 
410  /** @brief Blocking probe for incoming message of this type from any rank.
411  Returns the rank and message size. */
412  static void Probe(int &rank, int &size, MPI_Comm comm)
413  {
414  MPI_Status status;
415  MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
416  rank = status.MPI_SOURCE;
417  MPI_Get_count(&status, MPI_BYTE, &size);
418  }
419 
420  /** @brief Non-blocking probe for incoming message of this type from any
421  rank. If there is an incoming message, returns true and sets 'rank' and
422  'size'. Otherwise returns false. */
423  static bool IProbe(int &rank, int &size, MPI_Comm comm)
424  {
425  int flag;
426  MPI_Status status;
427  MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
428  if (!flag) { return false; }
429 
430  rank = status.MPI_SOURCE;
431  MPI_Get_count(&status, MPI_BYTE, &size);
432  return true;
433  }
434 
435  /// Post-probe receive from processor 'rank' of message size 'size'.
436  void Recv(int rank, int size, MPI_Comm comm)
437  {
438  MFEM_ASSERT(size >= 0, "");
439  data.resize(size);
440  MPI_Status status;
441  MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
442 #ifdef MFEM_DEBUG
443  int count;
444  MPI_Get_count(&status, MPI_BYTE, &count);
445  MFEM_VERIFY(count == size, "");
446 #endif
447  Decode(rank);
448  }
449 
450  /// Like Recv(), but throw away the message.
451  void RecvDrop(int rank, int size, MPI_Comm comm)
452  {
453  data.resize(size);
454  MPI_Status status;
455  MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
456  data.resize(0); // don't decode
457  }
458 
459  /// Helper to receive all messages in a rank-to-message map container.
460  template<typename MapT>
461  static void RecvAll(MapT& rank_msg, MPI_Comm comm)
462  {
463  int recv_left = rank_msg.size();
464  while (recv_left > 0)
465  {
466  int rank, size;
467  Probe(rank, size, comm);
468  MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(), "Unexpected message"
469  " (tag " << Tag << ") from rank " << rank);
470  // NOTE: no guard against receiving two messages from the same rank
471  rank_msg[rank].Recv(rank, size, comm);
472  --recv_left;
473  }
474  }
475 
476  VarMessage() : send_request(MPI_REQUEST_NULL) {}
477 
478  /// Clear the message and associated request.
479  void Clear() { data.clear(); send_request = MPI_REQUEST_NULL; }
480 
481  virtual ~VarMessage()
482  {
483  MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
484  "WaitAllSent was not called after Isend");
485  }
486 
487  VarMessage(const VarMessage &other)
488  : data(other.data), send_request(other.send_request)
489  {
490  MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
491  "Cannot copy message with a pending send.");
492  }
493 
494 protected:
495  virtual void Encode(int rank) {}
496  virtual void Decode(int rank) {}
497 };
498 
499 
500 /// Helper struct to convert a C++ type to an MPI type
501 template <typename Type> struct MPITypeMap;
502 
503 // Specializations of MPITypeMap; mpi_type initialized in communication.cpp:
504 template<> struct MPITypeMap<int> { static const MPI_Datatype mpi_type; };
505 template<> struct MPITypeMap<double> { static const MPI_Datatype mpi_type; };
506 
507 
508 /** Reorder MPI ranks to follow the Z-curve within the physical machine topology
509  (provided that functions to query physical node coordinates are available).
510  Returns a new communicator with reordered ranks. */
511 MPI_Comm ReorderRanksZCurve(MPI_Comm comm);
512 
513 
514 } // namespace mfem
515 
516 #endif
517 
518 #endif
int WorldSize() const
Return MPI_COMM_WORLD&#39;s size.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group &#39;g&#39;.
void Create(ListOfIntegerSets &groups, int mpitag)
Set up the group topology given the list of sets of shared entities.
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
int Size() const
Return the logical size of the array.
Definition: array.hpp:134
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
void GetNeighborLDofTable(Table &nbr_ldof) const
Dofs to be received from communication neighbors.
int GetGroupMasterGroup(int g) const
Return the group number in the master for group &#39;g&#39;.
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
Helper struct to convert a C++ type to an MPI type.
void GetNeighborLTDofTable(Table &nbr_ltdof) const
Dofs to be sent to communication neighbors.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
GroupTopology(MPI_Comm comm)
Constructor given the MPI communicator &#39;comm&#39;.
int NRanks() const
Return the number of MPI ranks within this object&#39;s communicator.
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
GroupTopology()
Constructor with the MPI communicator = 0.
int GetGroupSize(int g) const
Get the number of processors in a group.
static const MPI_Datatype mpi_type
const int * GetGroup(int g) const
Return a pointer to a list of neighbors for a given group. Neighbor 0 is the local processor...
const Table & GroupLDofTable() const
Read-only access to group-ldof Table.
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:191
void RecvDrop(int rank, int size, MPI_Comm comm)
Like Recv(), but throw away the message.
void Swap(GroupTopology &other)
Swap the internal data with another GroupTopology object.
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata...
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor &#39;rank&#39; of message size &#39;size&#39;.
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
bool IAmMaster(int g) const
Return true if I am master for group &#39;g&#39;.
VarMessage(const VarMessage &other)
void Bcast(T *ldata) const
Broadcast within each group where the master is the root.
A simple convenience class that calls MPI_Init() at construction and MPI_Finalize() at destruction...
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
int MyRank() const
Return the MPI rank within this object&#39;s communicator.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
MPI_Comm GetComm() const
Return the MPI communicator.
bool Root() const
Return true if WorldRank() == 0.
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf...
MPI_Request send_request
virtual void Encode(int rank)
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:92
GroupCommunicator(GroupTopology &gt, Mode m=byNeighbor)
Construct a GroupCommunicator object.
GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
static bool TestAllSent(MapT &rank_msg)
Return true if all messages in the map container were sent, otherwise return false, without waiting.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor...
void Copy(GroupTopology &copy) const
Copy the internal data to the external &#39;copy&#39;.
void Save(std::ostream &out) const
Save the data in a stream.
int WorldRank() const
Return MPI_COMM_WORLD&#39;s rank.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
virtual void Decode(int rank)
int GetNeighborRank(int i) const
Return the MPI rank of neighbor &#39;i&#39;.
static const MPI_Datatype mpi_type
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
Communications are performed one group at a time.
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
Data structure on which we define reduce operations. The data is associated with (and the operation i...
void Issend(int rank, MPI_Comm comm)
Non-blocking synchronous send to processor &#39;rank&#39;. Returns immediately. Completion (MPI_Wait/Test) me...
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
void Clear()
Clear the message and associated request.
Mode
Communication mode.
MPI_Session(int &argc, char **&argv)
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor &#39;rank&#39;. Returns immediately. Completion (as tested by MPI_Wait/Test) d...
void Bcast(Array< T > &ldata) const
Broadcast within each group where the master is the root.
int NGroups() const
Return the number of groups.
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void SetComm(MPI_Comm comm)
Set the MPI communicator to &#39;comm&#39;.
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
RefCoord s[3]
int RowSize(int i) const
Definition: table.hpp:108
static bool IProbe(int &rank, int &size, MPI_Comm comm)
Non-blocking probe for incoming message of this type from any rank. If there is an incoming message...
List of integer sets.
Definition: sets.hpp:59
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:66
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
Variable-length MPI message containing unspecific binary data.
void Load(std::istream &in)
Load the data from a stream.
const GroupTopology & GetGroupTopology() const
Get a const reference to the associated GroupTopology object.
static void Probe(int &rank, int &size, MPI_Comm comm)
Blocking probe for incoming message of this type from any rank. Returns the rank and message size...
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.