12 #ifndef MFEM_COMMUNICATION
13 #define MFEM_COMMUNICATION
15 #include "../config/config.hpp"
79 void SetComm(MPI_Comm comm) { MyComm = comm; }
81 MPI_Comm
GetComm()
const {
return MyComm; }
82 int MyRank()
const {
int r; MPI_Comm_rank(MyComm, &r);
return r; }
83 int NRanks()
const {
int s; MPI_Comm_size(MyComm, &s);
return s; }
92 bool IAmMaster(
int g)
const {
return (groupmaster_lproc[g] == 0); }
98 {
return lproc_proc[groupmaster_lproc[g]]; }
108 void Save(std::ostream &
out)
const;
110 void Load(std::istream &in);
221 int layout,
void (*Op)(
OpData<T>))
const;
225 template <
class T>
void BcastBegin(T *ldata,
int layout)
const;
236 template <
class T>
void BcastEnd(T *ldata,
int layout)
const;
243 template <
class T>
void Bcast(T *ldata,
int layout)
const
250 template <
class T>
void Bcast(T *ldata)
const { Bcast<T>(ldata, 0); }
253 { Bcast<T>((T *)ldata); }
262 template <
class T>
void ReduceBegin(
const T *ldata)
const;
277 template <
class T>
void ReduceEnd(T *ldata,
int layout,
278 void (*Op)(OpData<T>))
const;
292 { Reduce<T>((T *)ldata, Op); }
295 template <
class T>
static void Sum(OpData<T>);
297 template <
class T>
static void Min(OpData<T>);
299 template <
class T>
static void Max(OpData<T>);
301 template <
class T>
static void BitOR(OpData<T>);
323 MPI_Isend((
void*)
data.data(),
data.length(), MPI_BYTE, rank, Tag, comm,
328 template<
typename MapT>
329 static void IsendAll(MapT& rank_msg, MPI_Comm comm)
331 typename MapT::iterator it;
332 for (it = rank_msg.begin(); it != rank_msg.end(); ++it)
334 it->second.Isend(it->first, comm);
339 template<
typename MapT>
342 typename MapT::iterator it;
343 for (it = rank_msg.begin(); it != rank_msg.end(); ++it)
345 MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
352 static void Probe(
int &rank,
int &size, MPI_Comm comm)
355 MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
356 rank = status.MPI_SOURCE;
357 MPI_Get_count(&status, MPI_BYTE, &size);
363 static bool IProbe(
int &rank,
int &size, MPI_Comm comm)
367 MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
368 if (!flag) {
return false; }
370 rank = status.MPI_SOURCE;
371 MPI_Get_count(&status, MPI_BYTE, &size);
376 void Recv(
int rank,
int size, MPI_Comm comm)
378 MFEM_ASSERT(size >= 0,
"");
381 MPI_Recv((
void*)
data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
384 MPI_Get_count(&status, MPI_BYTE, &count);
385 MFEM_VERIFY(count == size,
"");
395 MPI_Recv((
void*)
data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
400 template<
typename MapT>
401 static void RecvAll(MapT& rank_msg, MPI_Comm comm)
403 int recv_left = rank_msg.size();
404 while (recv_left > 0)
407 Probe(rank, size, comm);
408 MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(),
"Unexpected message"
409 " (tag " << Tag <<
") from rank " << rank);
411 rank_msg[rank].Recv(rank, size, comm);
422 "WaitAllSent was not called after Isend");
429 "Cannot copy message with a pending send.");
int WorldSize() const
Return MPI_COMM_WORLD's size.
int GetGroupMasterRank(int g) const
void Create(ListOfIntegerSets &groups, int mpitag)
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
int Size() const
Logical size of the array.
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
int GetGroupMasterGroup(int g) const
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
Helper struct to convert a C++ type to an MPI type.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
GroupTopology(MPI_Comm comm)
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
int GetGroupSize(int g) const
static const MPI_Datatype mpi_type
const int * GetGroup(int g) const
const Table & GroupLDofTable() const
Read-only access to group-ldof Table.
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
void RecvDrop(int rank, int size, MPI_Comm comm)
Like Recv(), but throw away the messsage.
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata...
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor 'rank' of message size 'size'.
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
bool IAmMaster(int g) const
VarMessage(const VarMessage &other)
void Bcast(T *ldata) const
Broadcast within each group where the master is the root.
A simple convenience class that calls MPI_Init() at construction and MPI_Finalize() at destruction...
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int GetNumNeighbors() const
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
bool Root() const
Return true if WorldRank() == 0.
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf...
virtual void Encode(int rank)
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
int Size() const
Returns the number of TYPE I elements.
GroupCommunicator(GroupTopology >, Mode m=byNeighbor)
Construct a GroupCommunicator object.
GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
int GetGroupMaster(int g) const
void Copy(GroupTopology ©) const
Copy.
void Save(std::ostream &out) const
Save the data in a stream.
int WorldRank() const
Return MPI_COMM_WORLD's rank.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
virtual void Decode(int rank)
int GetNeighborRank(int i) const
static const MPI_Datatype mpi_type
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
Communications are performed one group at a time.
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
Data structure on which we define reduce operations.
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
MPI_Session(int &argc, char **&argv)
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor 'rank'.
void Bcast(Array< T > &ldata) const
Broadcast within each group where the master is the root.
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void SetComm(MPI_Comm comm)
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
static bool IProbe(int &rank, int &size, MPI_Comm comm)
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
Variable-length MPI message containing unspecific binary data.
void Load(std::istream &in)
Load the data from a stream.
const GroupTopology & GetGroupTopology() const
Get a const reference to the associated GroupTopology object.
static void Probe(int &rank, int &size, MPI_Comm comm)
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.