MFEM  v3.2
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
communication.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #ifndef MFEM_COMMUNICATION
13 #define MFEM_COMMUNICATION
14 
15 #include "../config/config.hpp"
16 
17 #ifdef MFEM_USE_MPI
18 
19 #include <mpi.h>
20 #include "array.hpp"
21 #include "table.hpp"
22 #include "sets.hpp"
23 
24 namespace mfem
25 {
26 
31 {
32 protected:
35  {
36  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
37  MPI_Comm_size(MPI_COMM_WORLD, &world_size);
38  }
39 public:
40  MPI_Session() { MPI_Init(NULL, NULL); GetRankAndSize(); }
41  MPI_Session(int &argc, char **&argv)
42  { MPI_Init(&argc, &argv); GetRankAndSize(); }
43  ~MPI_Session() { MPI_Finalize(); }
45  int WorldRank() const { return world_rank; }
47  int WorldSize() const { return world_size; }
49  bool Root() const { return world_rank == 0; }
50 };
51 
53 {
54 private:
55  MPI_Comm MyComm;
56 
57  /* The shared entities (e.g. vertices, faces and edges) are split into
58  groups, each group determined by the set of participating processors.
59  They are numbered locally in lproc. Assumptions:
60  - group 0 is the 'local' group
61  - groupmaster_lproc[0] = 0
62  - lproc_proc[0] = MyRank */
63  Table group_lproc;
64  Array<int> groupmaster_lproc;
65  Array<int> lproc_proc;
66  Array<int> group_mgroup; // group --> group number in the master
67 
68  void ProcToLProc();
69 
70 public:
71  GroupTopology() : MyComm(0) {}
72  GroupTopology(MPI_Comm comm) { MyComm = comm; }
73 
75  GroupTopology(const GroupTopology &gt);
76  void SetComm(MPI_Comm comm) { MyComm = comm; }
77 
78  MPI_Comm GetComm() { return MyComm; }
79  int MyRank() { int r; MPI_Comm_rank(MyComm, &r); return r; }
80  int NRanks() { int s; MPI_Comm_size(MyComm, &s); return s; }
81 
82  void Create(ListOfIntegerSets &groups, int mpitag);
83 
84  int NGroups() const { return group_lproc.Size(); }
85  // return the number of neighbors including the local processor
86  int GetNumNeighbors() const { return lproc_proc.Size(); }
87  int GetNeighborRank(int i) const { return lproc_proc[i]; }
88  // am I master for group 'g'?
89  bool IAmMaster(int g) const { return (groupmaster_lproc[g] == 0); }
90  // return the neighbor index of the group master for a given group.
91  // neighbor 0 is the local processor
92  int GetGroupMaster(int g) const { return groupmaster_lproc[g]; }
93  // return the rank of the group master for a given group
94  int GetGroupMasterRank(int g) const
95  { return lproc_proc[groupmaster_lproc[g]]; }
96  // for a given group return the group number in the master
97  int GetGroupMasterGroup(int g) const { return group_mgroup[g]; }
98  // get the number of processors in a group
99  int GetGroupSize(int g) const { return group_lproc.RowSize(g); }
100  // return a pointer to a list of neighbors for a given group.
101  // neighbor 0 is the local processor
102  const int *GetGroup(int g) const { return group_lproc.GetRow(g); }
103 
104  virtual ~GroupTopology() {}
105 };
106 
108 {
109 private:
110  GroupTopology &gtopo;
111  Table group_ldof;
112  int group_buf_size;
113  Array<char> group_buf;
114  MPI_Request *requests;
115  MPI_Status *statuses;
116 
117 public:
121  void Create(Array<int> &ldof_group);
124  Table &GroupLDofTable() { return group_ldof; }
126  void Finalize();
127 
129  GroupTopology & GetGroupTopology() { return gtopo; }
130 
133  template <class T> void Bcast(T *ldata);
134  template <class T> void Bcast(Array<T> &ldata) { Bcast<T>((T *)ldata); }
135 
139  template <class T> struct OpData
140  {
141  int nldofs, nb, *ldofs;
142  T *ldata, *buf;
143  };
144 
148  template <class T> void Reduce(T *ldata, void (*Op)(OpData<T>));
149  template <class T> void Reduce(Array<T> &ldata, void (*Op)(OpData<T>))
150  { Reduce<T>((T *)ldata, Op); }
151 
153  template <class T> static void Sum(OpData<T>);
155  template <class T> static void Min(OpData<T>);
157  template <class T> static void Max(OpData<T>);
159  template <class T> static void BitOR(OpData<T>);
160 
162 };
163 
164 
166 template<int Tag>
168 {
169  std::string data;
170  MPI_Request send_request;
171 
173  void Isend(int rank, MPI_Comm comm)
174  {
175  Encode();
176  MPI_Isend((void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
177  &send_request);
178  }
179 
181  template<typename MapT>
182  static void IsendAll(MapT& rank_msg, MPI_Comm comm)
183  {
184  typename MapT::iterator it;
185  for (it = rank_msg.begin(); it != rank_msg.end(); ++it)
186  {
187  it->second.Isend(it->first, comm);
188  }
189  }
190 
192  template<typename MapT>
193  static void WaitAllSent(MapT& rank_msg)
194  {
195  typename MapT::iterator it;
196  for (it = rank_msg.begin(); it != rank_msg.end(); ++it)
197  {
198  MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
199  it->second.Clear();
200  }
201  }
202 
205  static void Probe(int &rank, int &size, MPI_Comm comm)
206  {
207  MPI_Status status;
208  MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
209  rank = status.MPI_SOURCE;
210  MPI_Get_count(&status, MPI_BYTE, &size);
211  }
212 
216  static bool IProbe(int &rank, int &size, MPI_Comm comm)
217  {
218  int flag;
219  MPI_Status status;
220  MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
221  if (!flag) { return false; }
222 
223  rank = status.MPI_SOURCE;
224  MPI_Get_count(&status, MPI_BYTE, &size);
225  return true;
226  }
227 
229  void Recv(int rank, int size, MPI_Comm comm)
230  {
231  MFEM_ASSERT(size >= 0, "");
232  data.resize(size);
233  MPI_Status status;
234  MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
235 #ifdef MFEM_DEBUG
236  int count;
237  MPI_Get_count(&status, MPI_BYTE, &count);
238  MFEM_VERIFY(count == size, "");
239 #endif
240  Decode();
241  }
242 
244  template<typename MapT>
245  static void RecvAll(MapT& rank_msg, MPI_Comm comm)
246  {
247  int recv_left = rank_msg.size();
248  while (recv_left > 0)
249  {
250  int rank, size;
251  Probe(rank, size, comm);
252  MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(), "Unexpected message"
253  " (tag " << Tag << ") from rank " << rank);
254  // NOTE: no guard against receiving two messages from the same rank
255  rank_msg[rank].Recv(rank, size, comm);
256  --recv_left;
257  }
258  }
259 
260  VarMessage() : send_request(MPI_REQUEST_NULL) {}
261  void Clear() { data.clear(); send_request = MPI_REQUEST_NULL; }
262 
263  virtual ~VarMessage()
264  {
265  MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
266  "WaitAllSent was not called after Isend");
267  }
268 
269  VarMessage(const VarMessage &other)
270  : data(other.data), send_request(other.send_request)
271  {
272  MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
273  "Cannot copy message with a pending send.");
274  }
275 
276 protected:
277  virtual void Encode() {}
278  virtual void Decode() {}
279 };
280 
281 
283 template <typename Type>
284 struct MPITypeMap { static const MPI_Datatype mpi_type; };
285 
286 
290 MPI_Comm ReorderRanksZCurve(MPI_Comm comm);
291 
292 
293 } // namespace mfem
294 
295 #endif
296 
297 #endif
int WorldSize() const
Return MPI_COMM_WORLD&#39;s size.
int GetGroupMasterRank(int g) const
void Create(ListOfIntegerSets &groups, int mpitag)
int Size() const
Logical size of the array.
Definition: array.hpp:109
void Bcast(Array< T > &ldata)
int GetGroupMasterGroup(int g) const
GroupCommunicator(GroupTopology &gt)
Helper struct to convert a C++ type to an MPI type.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
GroupTopology(MPI_Comm comm)
int GetGroupSize(int g) const
virtual void Decode()
const int * GetGroup(int g) const
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:179
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >))
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor &#39;rank&#39; of message size &#39;size&#39;.
bool IAmMaster(int g) const
VarMessage(const VarMessage &other)
A simple convenience class that calls MPI_Init() at construction and MPI_Finalize() at destruction...
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int GetNumNeighbors() const
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
bool Root() const
Return true if WorldRank() == 0.
MPI_Request send_request
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:86
GroupTopology & GetGroupTopology()
Get a reference to the group topology object.
int GetGroupMaster(int g) const
virtual void Encode()
int WorldRank() const
Return MPI_COMM_WORLD&#39;s rank.
int GetNeighborRank(int i) const
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
MPI_Session(int &argc, char **&argv)
void Reduce(T *ldata, void(*Op)(OpData< T >))
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor &#39;rank&#39;.
int NGroups() const
void Create(Array< int > &ldof_group)
void SetComm(MPI_Comm comm)
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
int RowSize(int i) const
Definition: table.hpp:102
static bool IProbe(int &rank, int &size, MPI_Comm comm)
List of integer sets.
Definition: sets.hpp:49
static const MPI_Datatype mpi_type
Variable-length MPI message containing unspecific binary data.
static void Probe(int &rank, int &size, MPI_Comm comm)
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.