12 #ifndef MFEM_COMMUNICATION
13 #define MFEM_COMMUNICATION
15 #include "../config/config.hpp"
81 void SetComm(MPI_Comm comm) { MyComm = comm; }
84 int MyRank() {
int r; MPI_Comm_rank(MyComm, &r);
return r; }
85 int NRanks() {
int s; MPI_Comm_size(MyComm, &s);
return s; }
94 bool IAmMaster(
int g)
const {
return (groupmaster_lproc[g] == 0); }
100 {
return lproc_proc[groupmaster_lproc[g]]; }
110 void Save(std::ostream &out)
const;
112 void Load(std::istream &in);
124 MPI_Request *requests;
125 MPI_Status *statuses;
147 template <
class T>
void Bcast(T *data,
int layout);
151 template <
class T>
void Bcast(T *ldata) { Bcast<T>(ldata, 0); }
171 { Reduce<T>((T *)ldata, Op); }
174 template <
class T>
static void Sum(OpData<T>);
176 template <
class T>
static void Min(OpData<T>);
178 template <
class T>
static void Max(OpData<T>);
180 template <
class T>
static void BitOR(OpData<T>);
197 MPI_Isend((
void*)
data.data(),
data.length(), MPI_BYTE, rank, Tag, comm,
202 template<
typename MapT>
203 static void IsendAll(MapT& rank_msg, MPI_Comm comm)
205 typename MapT::iterator it;
206 for (it = rank_msg.begin(); it != rank_msg.end(); ++it)
208 it->second.Isend(it->first, comm);
213 template<
typename MapT>
216 typename MapT::iterator it;
217 for (it = rank_msg.begin(); it != rank_msg.end(); ++it)
219 MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
226 static void Probe(
int &rank,
int &size, MPI_Comm comm)
229 MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
230 rank = status.MPI_SOURCE;
231 MPI_Get_count(&status, MPI_BYTE, &size);
237 static bool IProbe(
int &rank,
int &size, MPI_Comm comm)
241 MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
242 if (!flag) {
return false; }
244 rank = status.MPI_SOURCE;
245 MPI_Get_count(&status, MPI_BYTE, &size);
250 void Recv(
int rank,
int size, MPI_Comm comm)
252 MFEM_ASSERT(size >= 0,
"");
255 MPI_Recv((
void*)
data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
258 MPI_Get_count(&status, MPI_BYTE, &count);
259 MFEM_VERIFY(count == size,
"");
265 template<
typename MapT>
266 static void RecvAll(MapT& rank_msg, MPI_Comm comm)
268 int recv_left = rank_msg.size();
269 while (recv_left > 0)
272 Probe(rank, size, comm);
273 MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(),
"Unexpected message"
274 " (tag " << Tag <<
") from rank " << rank);
276 rank_msg[rank].Recv(rank, size, comm);
287 "WaitAllSent was not called after Isend");
294 "Cannot copy message with a pending send.");
304 template <
typename Type>
int WorldSize() const
Return MPI_COMM_WORLD's size.
int GetGroupMasterRank(int g) const
void Create(ListOfIntegerSets &groups, int mpitag)
int Size() const
Logical size of the array.
void Bcast(Array< T > &ldata)
int GetGroupMasterGroup(int g) const
GroupCommunicator(GroupTopology >)
Helper struct to convert a C++ type to an MPI type.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
GroupTopology(MPI_Comm comm)
int GetGroupSize(int g) const
const int * GetGroup(int g) const
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
void Bcast(T *ldata)
Broadcast within each group where the master is the root. This method is instantiated for int and dou...
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >))
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor 'rank' of message size 'size'.
bool IAmMaster(int g) const
VarMessage(const VarMessage &other)
A simple convenience class that calls MPI_Init() at construction and MPI_Finalize() at destruction...
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int GetNumNeighbors() const
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
bool Root() const
Return true if WorldRank() == 0.
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
int Size() const
Returns the number of TYPE I elements.
GroupTopology & GetGroupTopology()
Get a reference to the group topology object.
int GetGroupMaster(int g) const
void Save(std::ostream &out) const
Save the data in a stream.
int WorldRank() const
Return MPI_COMM_WORLD's rank.
int GetNeighborRank(int i) const
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
Data structure on which we define reduce operations.
MPI_Session(int &argc, char **&argv)
void Reduce(T *ldata, void(*Op)(OpData< T >))
Reduce within each group where the master is the root.
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor 'rank'.
void Create(Array< int > &ldof_group)
void SetComm(MPI_Comm comm)
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
static bool IProbe(int &rank, int &size, MPI_Comm comm)
static const MPI_Datatype mpi_type
Variable-length MPI message containing unspecific binary data.
void Bcast(T *data, int layout)
Broadcast within each group where the master is the root. The data layout can be: ...
void Load(std::istream &in)
Load the data from a stream.
static void Probe(int &rank, int &size, MPI_Comm comm)
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.