12 #ifndef MFEM_COMMUNICATION
13 #define MFEM_COMMUNICATION
15 #include "../config/config.hpp"
84 void SetComm(MPI_Comm comm) { MyComm = comm; }
87 MPI_Comm
GetComm()
const {
return MyComm; }
90 int MyRank()
const {
int r; MPI_Comm_rank(MyComm, &r);
return r; }
93 int NRanks()
const {
int s; MPI_Comm_size(MyComm, &s);
return s; }
108 bool IAmMaster(
int g)
const {
return (groupmaster_lproc[g] == 0); }
116 {
return lproc_proc[groupmaster_lproc[g]]; }
129 void Save(std::ostream &
out)
const;
132 void Load(std::istream &in);
248 int layout,
void (*Op)(
OpData<T>))
const;
252 template <
class T>
void BcastBegin(T *ldata,
int layout)
const;
263 template <
class T>
void BcastEnd(T *ldata,
int layout)
const;
270 template <
class T>
void Bcast(T *ldata,
int layout)
const
277 template <
class T>
void Bcast(T *ldata)
const { Bcast<T>(ldata, 0); }
280 { Bcast<T>((T *)ldata); }
289 template <
class T>
void ReduceBegin(
const T *ldata)
const;
304 template <
class T>
void ReduceEnd(T *ldata,
int layout,
305 void (*Op)(OpData<T>))
const;
319 { Reduce<T>((T *)ldata, Op); }
322 template <
class T>
static void Sum(OpData<T>);
324 template <
class T>
static void Min(OpData<T>);
326 template <
class T>
static void Max(OpData<T>);
328 template <
class T>
static void BitOR(OpData<T>);
353 MPI_Isend((
void*)
data.data(),
data.length(), MPI_BYTE, rank, Tag, comm,
363 MPI_Issend((
void*)
data.data(),
data.length(), MPI_BYTE, rank, Tag, comm,
368 template<
typename MapT>
369 static void IsendAll(MapT& rank_msg, MPI_Comm comm)
371 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
373 it->second.Isend(it->first, comm);
378 template<
typename MapT>
381 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
383 MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
390 template<
typename MapT>
393 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
400 if (!sent) {
return false; }
409 static void Probe(
int &rank,
int &size, MPI_Comm comm)
412 MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
413 rank = status.MPI_SOURCE;
414 MPI_Get_count(&status, MPI_BYTE, &size);
420 static bool IProbe(
int &rank,
int &size, MPI_Comm comm)
424 MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
425 if (!flag) {
return false; }
427 rank = status.MPI_SOURCE;
428 MPI_Get_count(&status, MPI_BYTE, &size);
433 void Recv(
int rank,
int size, MPI_Comm comm)
435 MFEM_ASSERT(size >= 0,
"");
438 MPI_Recv((
void*)
data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
441 MPI_Get_count(&status, MPI_BYTE, &count);
442 MFEM_VERIFY(count == size,
"");
452 MPI_Recv((
void*)
data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
457 template<
typename MapT>
458 static void RecvAll(MapT& rank_msg, MPI_Comm comm)
460 int recv_left = rank_msg.size();
461 while (recv_left > 0)
464 Probe(rank, size, comm);
465 MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(),
"Unexpected message"
466 " (tag " << Tag <<
") from rank " << rank);
468 rank_msg[rank].Recv(rank, size, comm);
481 "WaitAllSent was not called after Isend");
488 "Cannot copy message with a pending send.");
int WorldSize() const
Return MPI_COMM_WORLD's size.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group 'g'.
void Create(ListOfIntegerSets &groups, int mpitag)
Set up the group topology given the list of sets of shared entities.
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
int Size() const
Return the logical size of the array.
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
void GetNeighborLDofTable(Table &nbr_ldof) const
Dofs to be received from communication neighbors.
int GetGroupMasterGroup(int g) const
Return the group number in the master for group 'g'.
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
Helper struct to convert a C++ type to an MPI type.
void GetNeighborLTDofTable(Table &nbr_ltdof) const
Dofs to be sent to communication neighbors.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
GroupTopology(MPI_Comm comm)
Constructor given the MPI communicator 'comm'.
int NRanks() const
Return the number of MPI ranks within this object's communicator.
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
GroupTopology()
Constructor with the MPI communicator = 0.
int GetGroupSize(int g) const
Get the number of processors in a group.
static const MPI_Datatype mpi_type
const int * GetGroup(int g) const
Return a pointer to a list of neighbors for a given group. Neighbor 0 is the local processor...
const Table & GroupLDofTable() const
Read-only access to group-ldof Table.
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
void RecvDrop(int rank, int size, MPI_Comm comm)
Like Recv(), but throw away the message.
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata...
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor 'rank' of message size 'size'.
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
bool IAmMaster(int g) const
Return true if I am master for group 'g'.
VarMessage(const VarMessage &other)
void Bcast(T *ldata) const
Broadcast within each group where the master is the root.
A simple convenience class that calls MPI_Init() at construction and MPI_Finalize() at destruction...
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
int MyRank() const
Return the MPI rank within this object's communicator.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
MPI_Comm GetComm() const
Return the MPI communicator.
bool Root() const
Return true if WorldRank() == 0.
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf...
virtual void Encode(int rank)
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
int Size() const
Returns the number of TYPE I elements.
GroupCommunicator(GroupTopology >, Mode m=byNeighbor)
Construct a GroupCommunicator object.
GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
static bool TestAllSent(MapT &rank_msg)
Return true if all messages in the map container were sent, otherwise return false, without waiting.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor...
void Copy(GroupTopology ©) const
Copy the internal data to the external 'copy'.
void Save(std::ostream &out) const
Save the data in a stream.
int WorldRank() const
Return MPI_COMM_WORLD's rank.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
virtual void Decode(int rank)
int GetNeighborRank(int i) const
Return the MPI rank of neighbor 'i'.
static const MPI_Datatype mpi_type
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
Communications are performed one group at a time.
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
Data structure on which we define reduce operations. The data is associated with (and the operation i...
void Issend(int rank, MPI_Comm comm)
Non-blocking synchronous send to processor 'rank'. Returns immediately. Completion (MPI_Wait/Test) me...
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
void Clear()
Clear the message and associated request.
MPI_Session(int &argc, char **&argv)
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor 'rank'. Returns immediately. Completion (as tested by MPI_Wait/Test) d...
void Bcast(Array< T > &ldata) const
Broadcast within each group where the master is the root.
int NGroups() const
Return the number of groups.
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void SetComm(MPI_Comm comm)
Set the MPI communicator to 'comm'.
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
static bool IProbe(int &rank, int &size, MPI_Comm comm)
Non-blocking probe for incoming message of this type from any rank. If there is an incoming message...
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
Variable-length MPI message containing unspecific binary data.
void Load(std::istream &in)
Load the data from a stream.
const GroupTopology & GetGroupTopology() const
Get a const reference to the associated GroupTopology object.
static void Probe(int &rank, int &size, MPI_Comm comm)
Blocking probe for incoming message of this type from any rank. Returns the rank and message size...
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.