MFEM  v3.1
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
communication.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #ifndef MFEM_COMMUNICATION
13 #define MFEM_COMMUNICATION
14 
15 #include "../config/config.hpp"
16 
17 #ifdef MFEM_USE_MPI
18 
19 #include <mpi.h>
20 #include "array.hpp"
21 #include "table.hpp"
22 #include "sets.hpp"
23 
24 namespace mfem
25 {
26 
28 {
29 private:
30  MPI_Comm MyComm;
31 
32  /* The shared entities (e.g. vertices, faces and edges) are split into
33  groups, each group determined by the set of participating processors.
34  They are numbered locally in lproc. Assumptions:
35  - group 0 is the 'local' group
36  - groupmaster_lproc[0] = 0
37  - lproc_proc[0] = MyRank */
38  Table group_lproc;
39  Array<int> groupmaster_lproc;
40  Array<int> lproc_proc;
41  Array<int> group_mgroup; // group --> group number in the master
42 
43  void ProcToLProc();
44 
45 public:
46  GroupTopology(MPI_Comm comm) { MyComm = comm; }
47 
49  GroupTopology(const GroupTopology &gt);
50 
51  MPI_Comm GetComm() { return MyComm; }
52  int MyRank() { int r; MPI_Comm_rank(MyComm, &r); return r; }
53  int NRanks() { int s; MPI_Comm_size(MyComm, &s); return s; }
54 
55  void Create(ListOfIntegerSets &groups, int mpitag);
56 
57  int NGroups() const { return group_lproc.Size(); }
58  // return the number of neighbors including the local processor
59  int GetNumNeighbors() const { return lproc_proc.Size(); }
60  int GetNeighborRank(int i) const { return lproc_proc[i]; }
61  // am I master for group 'g'?
62  bool IAmMaster(int g) const { return (groupmaster_lproc[g] == 0); }
63  // return the neighbor index of the group master for a given group.
64  // neighbor 0 is the local processor
65  int GetGroupMaster(int g) const { return groupmaster_lproc[g]; }
66  // return the rank of the group master for a given group
67  int GetGroupMasterRank(int g) const
68  { return lproc_proc[groupmaster_lproc[g]]; }
69  // for a given group return the group number in the master
70  int GetGroupMasterGroup(int g) const { return group_mgroup[g]; }
71  // get the number of processors in a group
72  int GetGroupSize(int g) const { return group_lproc.RowSize(g); }
73  // return a pointer to a list of neighbors for a given group.
74  // neighbor 0 is the local processor
75  const int *GetGroup(int g) const { return group_lproc.GetRow(g); }
76 };
77 
79 {
80 private:
81  GroupTopology &gtopo;
82  Table group_ldof;
83  int group_buf_size;
84  Array<char> group_buf;
85  MPI_Request *requests;
86  MPI_Status *statuses;
87 
90  template <class T> static inline MPI_Datatype Get_MPI_Datatype();
91 
92 public:
96  void Create(Array<int> &ldof_group);
99  Table &GroupLDofTable() { return group_ldof; }
101  void Finalize();
102 
104  GroupTopology & GetGroupTopology() { return gtopo; }
105 
108  template <class T> void Bcast(T *ldata);
109  template <class T> void Bcast(Array<T> &ldata) { Bcast<T>((T *)ldata); }
110 
114  template <class T> struct OpData
115  {
116  int nldofs, nb, *ldofs;
117  T *ldata, *buf;
118  };
119 
123  template <class T> void Reduce(T *ldata, void (*Op)(OpData<T>));
124  template <class T> void Reduce(Array<T> &ldata, void (*Op)(OpData<T>))
125  { Reduce<T>((T *)ldata, Op); }
126 
128  template <class T> static void Sum(OpData<T>);
130  template <class T> static void Min(OpData<T>);
132  template <class T> static void Max(OpData<T>);
134  template <class T> static void BitOR(OpData<T>);
135 
137 };
138 
139 
141 template<int Tag>
143 {
144  std::string data;
145  MPI_Request send_request;
146 
148  void Isend(int rank, MPI_Comm comm)
149  {
150  Encode();
151  MPI_Isend((void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
152  &send_request);
153  }
154 
156  template<typename MapT>
157  static void IsendAll(MapT& rank_msg, MPI_Comm comm)
158  {
159  typename MapT::iterator it;
160  for (it = rank_msg.begin(); it != rank_msg.end(); ++it)
161  {
162  it->second.Isend(it->first, comm);
163  }
164  }
165 
167  template<typename MapT>
168  static void WaitAllSent(MapT& rank_msg)
169  {
170  typename MapT::iterator it;
171  for (it = rank_msg.begin(); it != rank_msg.end(); ++it)
172  {
173  MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
174  it->second.Clear();
175  }
176  }
177 
180  static void Probe(int &rank, int &size, MPI_Comm comm)
181  {
182  MPI_Status status;
183  MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
184  rank = status.MPI_SOURCE;
185  MPI_Get_count(&status, MPI_BYTE, &size);
186  }
187 
191  static bool IProbe(int &rank, int &size, MPI_Comm comm)
192  {
193  int flag;
194  MPI_Status status;
195  MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
196  if (!flag) { return false; }
197 
198  rank = status.MPI_SOURCE;
199  MPI_Get_count(&status, MPI_BYTE, &size);
200  return true;
201  }
202 
204  void Recv(int rank, int size, MPI_Comm comm)
205  {
206  MFEM_ASSERT(size >= 0, "");
207  data.resize(size);
208  MPI_Status status;
209  MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
210 #ifdef MFEM_DEBUG
211  int count;
212  MPI_Get_count(&status, MPI_BYTE, &count);
213  MFEM_VERIFY(count == size, "");
214 #endif
215  Decode();
216  }
217 
219  template<typename MapT>
220  static void RecvAll(MapT& rank_msg, MPI_Comm comm)
221  {
222  int recv_left = rank_msg.size();
223  while (recv_left > 0)
224  {
225  int rank, size;
226  Probe(rank, size, comm);
227  MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(), "");
228  // No guard against receiving two messages from the same rank
229  rank_msg[rank].Recv(rank, size, comm);
230  --recv_left;
231  }
232  }
233 
234  VarMessage() : send_request(MPI_REQUEST_NULL) {}
235  void Clear() { data.clear(); send_request = MPI_REQUEST_NULL; }
236 
237  virtual ~VarMessage()
238  {
239  MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
240  "WaitAllSent was not called after Isend");
241  }
242 
243  VarMessage(const VarMessage &other)
244  : data(other.data), send_request(other.send_request)
245  {
246  MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
247  "Cannot copy message with a pending send.");
248  }
249 
250 protected:
251  virtual void Encode() {}
252  virtual void Decode() {}
253 };
254 
255 }
256 
257 #endif
258 
259 #endif
int GetGroupMasterRank(int g) const
void Create(ListOfIntegerSets &groups, int mpitag)
int Size() const
Logical size of the array.
Definition: array.hpp:109
void Bcast(Array< T > &ldata)
int GetGroupMasterGroup(int g) const
GroupCommunicator(GroupTopology &gt)
GroupTopology(MPI_Comm comm)
int GetGroupSize(int g) const
virtual void Decode()
const int * GetGroup(int g) const
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:179
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >))
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor &#39;rank&#39; of message size &#39;size&#39;.
bool IAmMaster(int g) const
VarMessage(const VarMessage &other)
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int GetNumNeighbors() const
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
MPI_Request send_request
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:86
GroupTopology & GetGroupTopology()
Get a reference to the group topology object.
int GetGroupMaster(int g) const
virtual void Encode()
int GetNeighborRank(int i) const
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
void Reduce(T *ldata, void(*Op)(OpData< T >))
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor &#39;rank&#39;.
int NGroups() const
void Create(Array< int > &ldof_group)
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
int RowSize(int i) const
Definition: table.hpp:102
static bool IProbe(int &rank, int &size, MPI_Comm comm)
List of integer sets.
Definition: sets.hpp:49
Variable-length MPI message containing unspecific binary data.
static void Probe(int &rank, int &size, MPI_Comm comm)
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.