12 #ifndef MFEM_COMMUNICATION
13 #define MFEM_COMMUNICATION
15 #include "../config/config.hpp"
84 void SetComm(MPI_Comm comm) { MyComm = comm; }
87 MPI_Comm
GetComm()
const {
return MyComm; }
90 int MyRank()
const {
int r; MPI_Comm_rank(MyComm, &r);
return r; }
93 int NRanks()
const {
int s; MPI_Comm_size(MyComm, &s);
return s; }
108 bool IAmMaster(
int g)
const {
return (groupmaster_lproc[g] == 0); }
116 {
return lproc_proc[groupmaster_lproc[g]]; }
129 void Save(std::ostream &
out)
const;
132 void Load(std::istream &in);
251 int layout,
void (*Op)(
OpData<T>))
const;
255 template <
class T>
void BcastBegin(T *ldata,
int layout)
const;
266 template <
class T>
void BcastEnd(T *ldata,
int layout)
const;
273 template <
class T>
void Bcast(T *ldata,
int layout)
const
280 template <
class T>
void Bcast(T *ldata)
const { Bcast<T>(ldata, 0); }
283 { Bcast<T>((T *)ldata); }
292 template <
class T>
void ReduceBegin(
const T *ldata)
const;
307 template <
class T>
void ReduceEnd(T *ldata,
int layout,
308 void (*Op)(OpData<T>))
const;
322 { Reduce<T>((T *)ldata, Op); }
325 template <
class T>
static void Sum(OpData<T>);
327 template <
class T>
static void Min(OpData<T>);
329 template <
class T>
static void Max(OpData<T>);
331 template <
class T>
static void BitOR(OpData<T>);
356 MPI_Isend((
void*)
data.data(),
data.length(), MPI_BYTE, rank, Tag, comm,
366 MPI_Issend((
void*)
data.data(),
data.length(), MPI_BYTE, rank, Tag, comm,
371 template<
typename MapT>
372 static void IsendAll(MapT& rank_msg, MPI_Comm comm)
374 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
376 it->second.Isend(it->first, comm);
381 template<
typename MapT>
384 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
386 MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
393 template<
typename MapT>
396 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
403 if (!sent) {
return false; }
412 static void Probe(
int &rank,
int &size, MPI_Comm comm)
415 MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
416 rank = status.MPI_SOURCE;
417 MPI_Get_count(&status, MPI_BYTE, &size);
423 static bool IProbe(
int &rank,
int &size, MPI_Comm comm)
427 MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
428 if (!flag) {
return false; }
430 rank = status.MPI_SOURCE;
431 MPI_Get_count(&status, MPI_BYTE, &size);
436 void Recv(
int rank,
int size, MPI_Comm comm)
438 MFEM_ASSERT(size >= 0,
"");
441 MPI_Recv((
void*)
data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
444 MPI_Get_count(&status, MPI_BYTE, &count);
445 MFEM_VERIFY(count == size,
"");
455 MPI_Recv((
void*)
data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
460 template<
typename MapT>
461 static void RecvAll(MapT& rank_msg, MPI_Comm comm)
463 int recv_left = rank_msg.size();
464 while (recv_left > 0)
467 Probe(rank, size, comm);
468 MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(),
"Unexpected message"
469 " (tag " << Tag <<
") from rank " << rank);
471 rank_msg[rank].Recv(rank, size, comm);
484 "WaitAllSent was not called after Isend");
491 "Cannot copy message with a pending send.");
int WorldSize() const
Return MPI_COMM_WORLD's size.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group 'g'.
void Create(ListOfIntegerSets &groups, int mpitag)
Set up the group topology given the list of sets of shared entities.
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
int Size() const
Return the logical size of the array.
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
void GetNeighborLDofTable(Table &nbr_ldof) const
Dofs to be received from communication neighbors.
int GetGroupMasterGroup(int g) const
Return the group number in the master for group 'g'.
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
Helper struct to convert a C++ type to an MPI type.
void GetNeighborLTDofTable(Table &nbr_ltdof) const
Dofs to be sent to communication neighbors.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
GroupTopology(MPI_Comm comm)
Constructor given the MPI communicator 'comm'.
int NRanks() const
Return the number of MPI ranks within this object's communicator.
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
GroupTopology()
Constructor with the MPI communicator = 0.
int GetGroupSize(int g) const
Get the number of processors in a group.
static const MPI_Datatype mpi_type
const int * GetGroup(int g) const
Return a pointer to a list of neighbors for a given group. Neighbor 0 is the local processor...
const Table & GroupLDofTable() const
Read-only access to group-ldof Table.
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
void RecvDrop(int rank, int size, MPI_Comm comm)
Like Recv(), but throw away the message.
void Swap(GroupTopology &other)
Swap the internal data with another GroupTopology object.
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata...
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor 'rank' of message size 'size'.
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
bool IAmMaster(int g) const
Return true if I am master for group 'g'.
VarMessage(const VarMessage &other)
void Bcast(T *ldata) const
Broadcast within each group where the master is the root.
A simple convenience class that calls MPI_Init() at construction and MPI_Finalize() at destruction...
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
int MyRank() const
Return the MPI rank within this object's communicator.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
MPI_Comm GetComm() const
Return the MPI communicator.
bool Root() const
Return true if WorldRank() == 0.
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf...
virtual void Encode(int rank)
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
int Size() const
Returns the number of TYPE I elements.
GroupCommunicator(GroupTopology >, Mode m=byNeighbor)
Construct a GroupCommunicator object.
GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
static bool TestAllSent(MapT &rank_msg)
Return true if all messages in the map container were sent, otherwise return false, without waiting.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor...
void Copy(GroupTopology ©) const
Copy the internal data to the external 'copy'.
void Save(std::ostream &out) const
Save the data in a stream.
int WorldRank() const
Return MPI_COMM_WORLD's rank.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
virtual void Decode(int rank)
int GetNeighborRank(int i) const
Return the MPI rank of neighbor 'i'.
static const MPI_Datatype mpi_type
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
Communications are performed one group at a time.
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
Data structure on which we define reduce operations. The data is associated with (and the operation i...
void Issend(int rank, MPI_Comm comm)
Non-blocking synchronous send to processor 'rank'. Returns immediately. Completion (MPI_Wait/Test) me...
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
void Clear()
Clear the message and associated request.
MPI_Session(int &argc, char **&argv)
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor 'rank'. Returns immediately. Completion (as tested by MPI_Wait/Test) d...
void Bcast(Array< T > &ldata) const
Broadcast within each group where the master is the root.
int NGroups() const
Return the number of groups.
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void SetComm(MPI_Comm comm)
Set the MPI communicator to 'comm'.
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
static bool IProbe(int &rank, int &size, MPI_Comm comm)
Non-blocking probe for incoming message of this type from any rank. If there is an incoming message...
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
Variable-length MPI message containing unspecific binary data.
void Load(std::istream &in)
Load the data from a stream.
const GroupTopology & GetGroupTopology() const
Get a const reference to the associated GroupTopology object.
static void Probe(int &rank, int &size, MPI_Comm comm)
Blocking probe for incoming message of this type from any rank. Returns the rank and message size...
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.