12 #ifndef MFEM_COMMUNICATION
13 #define MFEM_COMMUNICATION
15 #include "../config/config.hpp"
36 static void Init() { Init_(NULL, NULL); }
38 static void Init(
int &argc,
char **&argv) { Init_(&argc, &argv); }
47 int mpi_is_initialized;
48 int mpi_err = MPI_Initialized(&mpi_is_initialized);
49 return (mpi_err == MPI_SUCCESS) && mpi_is_initialized;
55 int mpi_err = MPI_Finalized(&mpi_is_finalized);
56 return (mpi_err == MPI_SUCCESS) && mpi_is_finalized;
62 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
69 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
76 static void Init_(
int *argc,
char ***argv)
140 void SetComm(MPI_Comm comm) { MyComm = comm; }
146 int MyRank()
const {
int r; MPI_Comm_rank(MyComm, &r);
return r; }
149 int NRanks()
const {
int s; MPI_Comm_size(MyComm, &s);
return s; }
155 int NGroups()
const {
return group_lproc.Size(); }
164 bool IAmMaster(
int g)
const {
return (groupmaster_lproc[g] == 0); }
172 {
return lproc_proc[groupmaster_lproc[g]]; }
182 const int *
GetGroup(
int g)
const {
return group_lproc.GetRow(g); }
185 void Save(std::ostream &
out)
const;
188 void Load(std::istream &in);
255 void SetLTDofTable(
const Array<int> &ldof_ltdof);
264 void GetNeighborLTDofTable(
Table &nbr_ltdof)
const;
267 void GetNeighborLDofTable(
Table &nbr_ldof)
const;
290 T *CopyGroupToBuffer(
const T *ldata, T *buf,
int group,
int layout)
const;
297 const T *CopyGroupFromBuffer(
const T *buf, T *ldata,
int group,
306 const T *ReduceGroupFromBuffer(
const T *buf, T *ldata,
int group,
307 int layout,
void (*Op)(
OpData<T>))
const;
311 template <
class T>
void BcastBegin(T *ldata,
int layout)
const;
322 template <
class T>
void BcastEnd(T *ldata,
int layout)
const;
329 template <
class T>
void Bcast(T *ldata,
int layout)
const
331 BcastBegin(ldata, layout);
332 BcastEnd(ldata, layout);
336 template <
class T>
void Bcast(T *ldata)
const { Bcast<T>(ldata, 0); }
339 { Bcast<T>((T *)ldata); }
348 template <
class T>
void ReduceBegin(
const T *ldata)
const;
363 template <
class T>
void ReduceEnd(T *ldata,
int layout,
364 void (*Op)(OpData<T>))
const;
373 ReduceEnd(ldata, 0, Op);
378 { Reduce<T>((T *)ldata, Op); }
381 template <
class T>
static void Sum(OpData<T>);
383 template <
class T>
static void Min(OpData<T>);
385 template <
class T>
static void Max(OpData<T>);
387 template <
class T>
static void BitOR(OpData<T>);
412 MPI_Isend((
void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
422 MPI_Issend((
void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
427 template<
typename MapT>
428 static void IsendAll(MapT& rank_msg, MPI_Comm comm)
430 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
432 it->second.Isend(it->first, comm);
437 template<
typename MapT>
440 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
442 MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
449 template<
typename MapT>
452 for (
auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
459 if (!sent) {
return false; }
468 static void Probe(
int &rank,
int &size, MPI_Comm comm)
471 MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
472 rank = status.MPI_SOURCE;
473 MPI_Get_count(&status, MPI_BYTE, &size);
479 static bool IProbe(
int &rank,
int &size, MPI_Comm comm)
483 MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
484 if (!flag) {
return false; }
486 rank = status.MPI_SOURCE;
487 MPI_Get_count(&status, MPI_BYTE, &size);
492 void Recv(
int rank,
int size, MPI_Comm comm)
494 MFEM_ASSERT(size >= 0,
"");
497 MPI_Recv((
void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
500 MPI_Get_count(&status, MPI_BYTE, &count);
501 MFEM_VERIFY(count == size,
"");
511 MPI_Recv((
void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
516 template<
typename MapT>
517 static void RecvAll(MapT& rank_msg, MPI_Comm comm)
519 int recv_left = rank_msg.size();
520 while (recv_left > 0)
523 Probe(rank, size, comm);
524 MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(),
"Unexpected message"
525 " (tag " << Tag <<
") from rank " << rank);
527 rank_msg[rank].Recv(rank, size, comm);
535 void Clear() { data.clear(); send_request = MPI_REQUEST_NULL; }
539 MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
540 "WaitAllSent was not called after Isend");
544 : data(other.data), send_request(other.send_request)
546 MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
547 "Cannot copy message with a pending send.");
int WorldSize() const
Return MPI_COMM_WORLD's size.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group 'g'.
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
int GetGroupMasterGroup(int g) const
Return the group number in the master for group 'g'.
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
Helper struct to convert a C++ type to an MPI type.
static void Init(int &argc, char **&argv)
Singleton creation with Mpi::Init(argc,argv);.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
GroupTopology(MPI_Comm comm)
Constructor given the MPI communicator 'comm'.
static void Finalize()
Finalize MPI (if it has been initialized and not yet already finalized).
int NRanks() const
Return the number of MPI ranks within this object's communicator.
GroupTopology()
Constructor with the MPI communicator = 0.
int GetGroupSize(int g) const
Get the number of processors in a group.
static const MPI_Datatype mpi_type
const int * GetGroup(int g) const
Return a pointer to a list of neighbors for a given group. Neighbor 0 is the local processor...
const Table & GroupLDofTable() const
Read-only access to group-ldof Table.
static int WorldSize()
Return the size of MPI_COMM_WORLD.
static bool IsFinalized()
Return true if MPI has been finalized.
void RecvDrop(int rank, int size, MPI_Comm comm)
Like Recv(), but throw away the message.
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor 'rank' of message size 'size'.
bool IAmMaster(int g) const
Return true if I am master for group 'g'.
VarMessage(const VarMessage &other)
void Bcast(T *ldata) const
Broadcast within each group where the master is the root.
A simple convenience class based on the Mpi singleton class above. Preserved for backward compatibili...
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
int MyRank() const
Return the MPI rank within this object's communicator.
MPI_Comm GetComm() const
Return the MPI communicator.
bool Root() const
Return true if WorldRank() == 0.
virtual void Encode(int rank)
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
static void Init()
Singleton creation with Mpi::Init();.
GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
static bool TestAllSent(MapT &rank_msg)
Return true if all messages in the map container were sent, otherwise return false, without waiting.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor...
static bool IsInitialized()
Return true if MPI has been initialized.
static bool Root()
Return true if the rank in MPI_COMM_WORLD is zero.
void Swap(Array< T > &, Array< T > &)
int WorldRank() const
Return MPI_COMM_WORLD's rank.
virtual void Decode(int rank)
int GetNeighborRank(int i) const
Return the MPI rank of neighbor 'i'.
static const MPI_Datatype mpi_type
Communications are performed one group at a time.
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
A simple singleton class that calls MPI_Init() at construction and MPI_Finalize() at destruction...
Data structure on which we define reduce operations. The data is associated with (and the operation i...
void Issend(int rank, MPI_Comm comm)
Non-blocking synchronous send to processor 'rank'. Returns immediately. Completion (MPI_Wait/Test) me...
void Clear()
Clear the message and associated request.
MPI_Session(int &argc, char **&argv)
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor 'rank'. Returns immediately. Completion (as tested by MPI_Wait/Test) d...
void Bcast(Array< T > &ldata) const
Broadcast within each group where the master is the root.
int NGroups() const
Return the number of groups.
static int WorldRank()
Return the MPI rank in MPI_COMM_WORLD.
void SetComm(MPI_Comm comm)
Set the MPI communicator to 'comm'.
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
static bool IProbe(int &rank, int &size, MPI_Comm comm)
Non-blocking probe for incoming message of this type from any rank. If there is an incoming message...
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Variable-length MPI message containing unspecific binary data.
const GroupTopology & GetGroupTopology() const
Get a const reference to the associated GroupTopology object.
static void Probe(int &rank, int &size, MPI_Comm comm)
Blocking probe for incoming message of this type from any rank. Returns the rank and message size...