12#ifndef MFEM_COMMUNICATION
13#define MFEM_COMMUNICATION
35 static void Init(
int &argc,
char **&argv,
37 int *provided =
nullptr)
38 {
Init(&argc, &argv, required, provided); }
40 static void Init(
int *argc =
nullptr,
char ***argv =
nullptr,
42 int *provided =
nullptr)
45 if (required == MPI_THREAD_SINGLE)
47 int mpi_err = MPI_Init(argc, argv);
48 MFEM_VERIFY(!mpi_err,
"error in MPI_Init()!");
49 if (provided) { *provided = MPI_THREAD_SINGLE; }
54 int mpi_err = MPI_Init_thread(argc, argv, required, &mpi_provided);
55 MFEM_VERIFY(!mpi_err,
"error in MPI_Init()!");
56 if (provided) { *provided = mpi_provided; }
70 int mpi_is_initialized;
71 int mpi_err = MPI_Initialized(&mpi_is_initialized);
72 return (mpi_err == MPI_SUCCESS) && mpi_is_initialized;
78 int mpi_err = MPI_Finalized(&mpi_is_finalized);
79 return (mpi_err == MPI_SUCCESS) && mpi_is_finalized;
85 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
92 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
101 static Mpi &Singleton()
162 void SetComm(MPI_Comm comm) { MyComm = comm; }
168 int MyRank()
const {
int r; MPI_Comm_rank(MyComm, &r);
return r; }
171 int NRanks()
const {
int s; MPI_Comm_size(MyComm, &
s);
return s; }
186 bool IAmMaster(
int g)
const {
return (groupmaster_lproc[g] == 0); }
194 {
return lproc_proc[groupmaster_lproc[g]]; }
207 void Save(std::ostream &
out)
const;
210 void Load(std::istream &in);
329 int layout,
void (*Op)(
OpData<T>))
const;
333 template <
class T>
void BcastBegin(T *ldata,
int layout)
const;
344 template <
class T>
void BcastEnd(T *ldata,
int layout)
const;
351 template <
class T>
void Bcast(T *ldata,
int layout)
const
370 template <
class T>
void ReduceBegin(
const T *ldata)
const;
385 template <
class T>
void ReduceEnd(T *ldata,
int layout,
386 void (*Op)(OpData<T>))
const;
403 template <
class T>
static void Sum(OpData<T>);
405 template <
class T>
static void Min(OpData<T>);
407 template <
class T>
static void Max(OpData<T>);
409 template <
class T>
static void BitOR(OpData<T>);
434 MPI_Isend((
void*)
data.data(),
data.length(), MPI_BYTE, rank, Tag, comm,
444 MPI_Issend((
void*)
data.data(),
data.length(), MPI_BYTE, rank, Tag, comm,
449 template<
typename MapT>
450 static void IsendAll(MapT& rank_msg, MPI_Comm comm)
452 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
454 it->second.Isend(it->first, comm);
459 template<
typename MapT>
462 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
464 MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
471 template<
typename MapT>
474 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
481 if (!sent) {
return false; }
490 static void Probe(
int &rank,
int &size, MPI_Comm comm)
493 MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
494 rank = status.MPI_SOURCE;
495 MPI_Get_count(&status, MPI_BYTE, &size);
501 static bool IProbe(
int &rank,
int &size, MPI_Comm comm)
505 MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
506 if (!flag) {
return false; }
508 rank = status.MPI_SOURCE;
509 MPI_Get_count(&status, MPI_BYTE, &size);
514 void Recv(
int rank,
int size, MPI_Comm comm)
516 MFEM_ASSERT(size >= 0,
"");
519 MPI_Recv((
void*)
data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
522 MPI_Get_count(&status, MPI_BYTE, &count);
523 MFEM_VERIFY(count == size,
"");
533 MPI_Recv((
void*)
data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
538 template<
typename MapT>
539 static void RecvAll(MapT& rank_msg, MPI_Comm comm)
541 int recv_left = rank_msg.size();
542 while (recv_left > 0)
545 Probe(rank, size, comm);
546 MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(),
"Unexpected message"
547 " (tag " << Tag <<
") from rank " << rank);
549 rank_msg[rank].Recv(rank, size, comm);
562 "WaitAllSent was not called after Isend");
569 "Cannot copy message with a pending send.");
int Size() const
Return the logical size of the array.
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
const Table & GroupLDofTable() const
Read-only access to group-ldof Table.
GroupCommunicator(const GroupTopology >, Mode m=byNeighbor)
Construct a GroupCommunicator object.
void GetNeighborLTDofTable(Table &nbr_ltdof) const
Dofs to be sent to communication neighbors.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize().
@ byGroup
Communications are performed one group at a time.
void GetNeighborLDofTable(Table &nbr_ldof) const
Dofs to be received from communication neighbors.
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
const GroupTopology & GetGroupTopology() const
Get a const reference to the associated GroupTopology object.
const GroupTopology & gtopo
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf.
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata.
const GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
void Bcast(Array< T > &ldata) const
Broadcast within each group where the master is the root.
void Bcast(T *ldata) const
Broadcast within each group where the master is the root.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
int GetNeighborRank(int i) const
Return the MPI rank of neighbor 'i'.
void SetComm(MPI_Comm comm)
Set the MPI communicator to 'comm'.
int NRanks() const
Return the number of MPI ranks within this object's communicator.
bool IAmMaster(int g) const
Return true if I am master for group 'g'.
void Swap(GroupTopology &other)
Swap the internal data with another GroupTopology object.
void Save(std::ostream &out) const
Save the data in a stream.
const int * GetGroup(int g) const
Return a pointer to a list of neighbors for a given group. Neighbor 0 is the local processor.
int MyRank() const
Return the MPI rank within this object's communicator.
int GetGroupSize(int g) const
Get the number of processors in a group.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor.
void Load(std::istream &in)
Load the data from a stream.
GroupTopology()
Constructor with the MPI communicator = 0.
GroupTopology(MPI_Comm comm)
Constructor given the MPI communicator 'comm'.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group 'g'.
void Create(ListOfIntegerSets &groups, int mpitag)
Set up the group topology given the list of sets of shared entities.
MPI_Comm GetComm() const
Return the MPI communicator.
void Copy(GroupTopology ©) const
Copy the internal data to the external 'copy'.
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
int NGroups() const
Return the number of groups.
int GetGroupMasterGroup(int g) const
Return the group number in the master for group 'g'.
A simple convenience class based on the Mpi singleton class above. Preserved for backward compatibili...
bool Root() const
Return true if WorldRank() == 0.
int WorldSize() const
Return MPI_COMM_WORLD's size.
MPI_Session(int &argc, char **&argv)
int WorldRank() const
Return MPI_COMM_WORLD's rank.
A simple singleton class that calls MPI_Init() at construction and MPI_Finalize() at destruction....
static MFEM_EXPORT int default_thread_required
Default level of thread support for MPI_Init_thread.
static bool IsFinalized()
Return true if MPI has been finalized.
static bool Root()
Return true if the rank in MPI_COMM_WORLD is zero.
static void Finalize()
Finalize MPI (if it has been initialized and not yet already finalized).
static void Init(int *argc=nullptr, char ***argv=nullptr, int required=default_thread_required, int *provided=nullptr)
Singleton creation with Mpi::Init().
static bool IsInitialized()
Return true if MPI has been initialized.
static int WorldRank()
Return the MPI rank in MPI_COMM_WORLD.
static int WorldSize()
Return the size of MPI_COMM_WORLD.
static void Init(int &argc, char **&argv, int required=default_thread_required, int *provided=nullptr)
Singleton creation with Mpi::Init(argc, argv).
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
int Size() const
Returns the number of TYPE I elements.
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
Data structure on which we define reduce operations. The data is associated with (and the operation i...
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
Helper struct to convert a C++ type to an MPI type.
Variable-length MPI message containing unspecific binary data.
void Issend(int rank, MPI_Comm comm)
Non-blocking synchronous send to processor 'rank'. Returns immediately. Completion (MPI_Wait/Test) me...
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
virtual void Encode(int rank)=0
static bool TestAllSent(MapT &rank_msg)
Return true if all messages in the map container were sent, otherwise return false,...
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor 'rank'. Returns immediately. Completion (as tested by MPI_Wait/Test) d...
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
virtual void Decode(int rank)=0
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
VarMessage(const VarMessage &other)
void Clear()
Clear the message and associated request.
void RecvDrop(int rank, int size, MPI_Comm comm)
Like Recv(), but throw away the message.
static bool IProbe(int &rank, int &size, MPI_Comm comm)
Non-blocking probe for incoming message of this type from any rank. If there is an incoming message,...
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor 'rank' of message size 'size'.
static void Probe(int &rank, int &size, MPI_Comm comm)
Blocking probe for incoming message of this type from any rank. Returns the rank and message size.