12 #ifndef MFEM_COMMUNICATION
13 #define MFEM_COMMUNICATION
15 #include "../config/config.hpp"
79 void SetComm(MPI_Comm comm) { MyComm = comm; }
82 int MyRank() {
int r; MPI_Comm_rank(MyComm, &r);
return r; }
83 int NRanks() {
int s; MPI_Comm_size(MyComm, &s);
return s; }
92 bool IAmMaster(
int g)
const {
return (groupmaster_lproc[g] == 0); }
98 {
return lproc_proc[groupmaster_lproc[g]]; }
108 void Save(std::ostream &
out)
const;
110 void Load(std::istream &in);
135 MPI_Request *requests;
141 Table nbr_send_groups, nbr_recv_groups;
210 int layout,
void (*Op)(
OpData<T>))
const;
214 template <
class T>
void BcastBegin(T *ldata,
int layout);
225 template <
class T>
void BcastEnd(T *ldata,
int layout);
232 template <
class T>
void Bcast(T *ldata,
int layout)
239 template <
class T>
void Bcast(T *ldata) { Bcast<T>(ldata, 0); }
250 template <
class T>
void ReduceBegin(
const T *ldata);
265 template <
class T>
void ReduceEnd(T *ldata,
int layout,
266 void (*Op)(OpData<T>));
280 { Reduce<T>((T *)ldata, Op); }
283 template <
class T>
static void Sum(OpData<T>);
285 template <
class T>
static void Min(OpData<T>);
287 template <
class T>
static void Max(OpData<T>);
289 template <
class T>
static void BitOR(OpData<T>);
311 MPI_Isend((
void*)
data.data(),
data.length(), MPI_BYTE, rank, Tag, comm,
316 template<
typename MapT>
317 static void IsendAll(MapT& rank_msg, MPI_Comm comm)
319 typename MapT::iterator it;
320 for (it = rank_msg.begin(); it != rank_msg.end(); ++it)
322 it->second.Isend(it->first, comm);
327 template<
typename MapT>
330 typename MapT::iterator it;
331 for (it = rank_msg.begin(); it != rank_msg.end(); ++it)
333 MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
340 static void Probe(
int &rank,
int &size, MPI_Comm comm)
343 MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
344 rank = status.MPI_SOURCE;
345 MPI_Get_count(&status, MPI_BYTE, &size);
351 static bool IProbe(
int &rank,
int &size, MPI_Comm comm)
355 MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
356 if (!flag) {
return false; }
358 rank = status.MPI_SOURCE;
359 MPI_Get_count(&status, MPI_BYTE, &size);
364 void Recv(
int rank,
int size, MPI_Comm comm)
366 MFEM_ASSERT(size >= 0,
"");
369 MPI_Recv((
void*)
data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
372 MPI_Get_count(&status, MPI_BYTE, &count);
373 MFEM_VERIFY(count == size,
"");
379 template<
typename MapT>
380 static void RecvAll(MapT& rank_msg, MPI_Comm comm)
382 int recv_left = rank_msg.size();
383 while (recv_left > 0)
386 Probe(rank, size, comm);
387 MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(),
"Unexpected message"
388 " (tag " << Tag <<
") from rank " << rank);
390 rank_msg[rank].Recv(rank, size, comm);
401 "WaitAllSent was not called after Isend");
408 "Cannot copy message with a pending send.");
418 template <
typename Type>
int WorldSize() const
Return MPI_COMM_WORLD's size.
int GetGroupMasterRank(int g) const
void Create(ListOfIntegerSets &groups, int mpitag)
int Size() const
Logical size of the array.
void Bcast(Array< T > &ldata)
Broadcast within each group where the master is the root.
int GetGroupMasterGroup(int g) const
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >))
Finalize reduction operation started with ReduceBegin().
Helper struct to convert a C++ type to an MPI type.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
GroupTopology(MPI_Comm comm)
int GetGroupSize(int g) const
const int * GetGroup(int g) const
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
void Bcast(T *ldata)
Broadcast within each group where the master is the root.
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >))
Reduce within each group where the master is the root.
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata...
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor 'rank' of message size 'size'.
bool IAmMaster(int g) const
VarMessage(const VarMessage &other)
A simple convenience class that calls MPI_Init() at construction and MPI_Finalize() at destruction...
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int GetNumNeighbors() const
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
bool Root() const
Return true if WorldRank() == 0.
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf...
void BcastBegin(T *ldata, int layout)
Begin a broadcast within each group where the master is the root.
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
void ReduceBegin(const T *ldata)
Begin reduction operation within each group where the master is the root.
int Size() const
Returns the number of TYPE I elements.
GroupCommunicator(GroupTopology >, Mode m=byNeighbor)
Construct a GroupCommunicator object.
GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
int GetGroupMaster(int g) const
void Save(std::ostream &out) const
Save the data in a stream.
int WorldRank() const
Return MPI_COMM_WORLD's rank.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
int GetNeighborRank(int i) const
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
Communications are performed one group at a time.
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
void BcastEnd(T *ldata, int layout)
Finalize a broadcast started with BcastBegin().
Data structure on which we define reduce operations.
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
MPI_Session(int &argc, char **&argv)
void Reduce(T *ldata, void(*Op)(OpData< T >))
Reduce within each group where the master is the root.
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor 'rank'.
void Create(Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
void SetComm(MPI_Comm comm)
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
void SetLTDofTable(Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
static bool IProbe(int &rank, int &size, MPI_Comm comm)
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
static const MPI_Datatype mpi_type
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
Variable-length MPI message containing unspecific binary data.
void Load(std::istream &in)
Load the data from a stream.
static void Probe(int &rank, int &size, MPI_Comm comm)
void Bcast(T *ldata, int layout)
Broadcast within each group where the master is the root.
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.