MFEM  v3.0
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
communication.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.googlecode.com.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include <mpi.h>
17 
18 #include "array.hpp"
19 #include "table.hpp"
20 #include "sets.hpp"
21 #include "communication.hpp"
22 #include <iostream>
23 using namespace std;
24 
25 namespace mfem
26 {
27 
28 void GroupTopology::ProcToLProc()
29 {
30  int NRanks;
31  MPI_Comm_size(MyComm, &NRanks);
32 
33  Array<int> proc_lproc(NRanks); // array of size number of processors!
34  proc_lproc = -1;
35 
36  int lproc_counter = 0;
37  for (int i = 0; i < group_lproc.Size_of_connections(); i++)
38  if (proc_lproc[group_lproc.GetJ()[i]] < 0)
39  proc_lproc[group_lproc.GetJ()[i]] = lproc_counter++;
40  // Note: group_lproc.GetJ()[0] == MyRank --> proc_lproc[MyRank] == 0
41 
42  lproc_proc.SetSize(lproc_counter);
43  for (int i = 0; i < NRanks; i++)
44  if (proc_lproc[i] >= 0)
45  lproc_proc[proc_lproc[i]] = i;
46 
47  for (int i = 0; i < group_lproc.Size_of_connections(); i++)
48  group_lproc.GetJ()[i] = proc_lproc[group_lproc.GetJ()[i]];
49 
50  for (int i = 0; i < NGroups(); i++)
51  groupmaster_lproc[i] = proc_lproc[groupmaster_lproc[i]];
52 }
53 
54 void GroupTopology::Create(ListOfIntegerSets &groups, int mpitag)
55 {
56  groups.AsTable(group_lproc); // group_lproc = group_proc
57 
58  Table group_mgroupandproc;
59  group_mgroupandproc.SetDims(NGroups(),
60  group_lproc.Size_of_connections() + NGroups());
61  for (int i = 0; i < NGroups(); i++)
62  {
63  int j = group_mgroupandproc.GetI()[i];
64  group_mgroupandproc.GetI()[i+1] = j + group_lproc.RowSize(i) + 1;
65  group_mgroupandproc.GetJ()[j] = i;
66  j++;
67  for (int k = group_lproc.GetI()[i];
68  j < group_mgroupandproc.GetI()[i+1]; j++, k++)
69  group_mgroupandproc.GetJ()[j] = group_lproc.GetJ()[k];
70  }
71 
72  // build groupmaster_lproc with lproc = proc
73  groupmaster_lproc.SetSize(NGroups());
74 
75  // simplest choice of the group owner
76  for (int i = 0; i < NGroups(); i++)
77  groupmaster_lproc[i] = groups.PickElementInSet(i);
78 
79  // load-balanced choice of the group owner, which however can lead to
80  // isolated dofs
81  // for (i = 0; i < NGroups(); i++)
82  // groupmaster_lproc[i] = groups.PickRandomElementInSet(i);
83 
84  ProcToLProc();
85 
86  // build group_mgroup
87  group_mgroup.SetSize(NGroups());
88 
89  int send_counter = 0;
90  int recv_counter = 0;
91  for (int i = 1; i < NGroups(); i++)
92  if (groupmaster_lproc[i] != 0) // we are not the master
93  recv_counter++;
94  else
95  send_counter += group_lproc.RowSize(i)-1;
96 
97  MPI_Request *requests = new MPI_Request[send_counter];
98  MPI_Status *statuses = new MPI_Status[send_counter];
99 
100  int max_recv_size = 0;
101  send_counter = 0;
102  for (int i = 1; i < NGroups(); i++)
103  {
104  if (groupmaster_lproc[i] == 0) // we are the master
105  {
106  group_mgroup[i] = i;
107 
108  for (int j = group_lproc.GetI()[i];
109  j < group_lproc.GetI()[i+1]; j++)
110  {
111  if (group_lproc.GetJ()[j] != 0)
112  {
113  MPI_Isend(group_mgroupandproc.GetRow(i),
114  group_mgroupandproc.RowSize(i),
115  MPI_INT,
116  lproc_proc[group_lproc.GetJ()[j]],
117  mpitag,
118  MyComm,
119  &requests[send_counter]);
120  send_counter++;
121  }
122  }
123  }
124  else // we are not the master
125  if (max_recv_size < group_lproc.RowSize(i))
126  max_recv_size = group_lproc.RowSize(i);
127  }
128  max_recv_size++;
129 
130  IntegerSet group;
131  if (recv_counter > 0)
132  {
133  int count;
134  MPI_Status status;
135  int *recv_buf = new int[max_recv_size];
136  for ( ; recv_counter > 0; recv_counter--)
137  {
138  MPI_Recv(recv_buf, max_recv_size, MPI_INT,
139  MPI_ANY_SOURCE, mpitag, MyComm, &status);
140 
141  MPI_Get_count(&status, MPI_INT, &count);
142 
143  group.Recreate(count-1, recv_buf+1);
144  int g = groups.Lookup(group);
145  group_mgroup[g] = recv_buf[0];
146 
147  if (lproc_proc[groupmaster_lproc[g]] != status.MPI_SOURCE)
148  {
149  cerr << "\n\n\nGroupTopology::GroupTopology: "
150  << MyRank() << ": ERROR\n\n\n" << endl;
151  mfem_error();
152  }
153  }
154  delete [] recv_buf;
155  }
156 
157  MPI_Waitall(send_counter, requests, statuses);
158 
159  delete [] statuses;
160  delete [] requests;
161 }
162 
163 
164 GroupCommunicator::GroupCommunicator(GroupTopology &gt)
165  : gtopo(gt)
166 {
167  group_buf_size = 0;
168  requests = NULL;
169  statuses = NULL;
170 }
171 
173 {
174  group_ldof.MakeI(gtopo.NGroups());
175  for (int i = 0; i < ldof_group.Size(); i++)
176  {
177  int group = ldof_group[i];
178  if (group != 0)
179  group_ldof.AddAColumnInRow(group);
180  }
181  group_ldof.MakeJ();
182 
183  for (int i = 0; i < ldof_group.Size(); i++)
184  {
185  int group = ldof_group[i];
186  if (group != 0)
187  group_ldof.AddConnection(group, i);
188  }
189  group_ldof.ShiftUpI();
190 
191  Finalize();
192 }
193 
195 {
196  int request_counter = 0;
197 
198  for (int gr = 1; gr < group_ldof.Size(); gr++)
199  if (group_ldof.RowSize(gr) != 0)
200  {
201  int gr_requests;
202  if (!gtopo.IAmMaster(gr)) // we are not the master
203  gr_requests = 1;
204  else
205  gr_requests = gtopo.GetGroupSize(gr)-1;
206 
207  request_counter += gr_requests;
208  group_buf_size += gr_requests * group_ldof.RowSize(gr);
209  }
210 
211  requests = new MPI_Request[request_counter];
212  statuses = new MPI_Status[request_counter];
213 }
214 
215 template <class T>
217 {
218  if (group_buf_size == 0)
219  return;
220 
221  group_buf.SetSize(group_buf_size*sizeof(T));
222  T *buf = (T *)group_buf.GetData();
223 
224  int i, gr, request_counter = 0;
225 
226  for (gr = 1; gr < group_ldof.Size(); gr++)
227  {
228  const int nldofs = group_ldof.RowSize(gr);
229 
230  // ignore groups without dofs
231  if (nldofs == 0)
232  continue;
233 
234  if (!gtopo.IAmMaster(gr)) // we are not the master
235  {
236  MPI_Irecv(buf,
237  nldofs,
238  Get_MPI_Datatype<T>(),
239  gtopo.GetGroupMasterRank(gr),
240  40822 + gtopo.GetGroupMasterGroup(gr),
241  gtopo.GetComm(),
242  &requests[request_counter]);
243  request_counter++;
244  }
245  else // we are the master
246  {
247  // fill send buffer
248  const int *ldofs = group_ldof.GetRow(gr);
249  for (i = 0; i < nldofs; i++)
250  buf[i] = ldata[ldofs[i]];
251 
252  const int gs = gtopo.GetGroupSize(gr);
253  const int *nbs = gtopo.GetGroup(gr);
254  for (i = 0; i < gs; i++)
255  {
256  if (nbs[i] != 0)
257  {
258  MPI_Isend(buf,
259  nldofs,
260  Get_MPI_Datatype<T>(),
261  gtopo.GetNeighborRank(nbs[i]),
262  40822 + gtopo.GetGroupMasterGroup(gr),
263  gtopo.GetComm(),
264  &requests[request_counter]);
265  request_counter++;
266  }
267  }
268  }
269  buf += nldofs;
270  }
271 
272  MPI_Waitall(request_counter, requests, statuses);
273 
274  // copy the received data from the buffer to ldata
275  buf = (T *)group_buf.GetData();
276  for (gr = 1; gr < group_ldof.Size(); gr++)
277  {
278  const int nldofs = group_ldof.RowSize(gr);
279 
280  // ignore groups without dofs
281  if (nldofs == 0)
282  continue;
283 
284  if (!gtopo.IAmMaster(gr)) // we are not the master
285  {
286  const int *ldofs = group_ldof.GetRow(gr);
287  for (i = 0; i < nldofs; i++)
288  ldata[ldofs[i]] = buf[i];
289  }
290  buf += nldofs;
291  }
292 }
293 
294 template <class T>
295 void GroupCommunicator::Reduce(T *ldata, void (*Op)(OpData<T>))
296 {
297  if (group_buf_size == 0)
298  return;
299 
300  int i, gr, request_counter = 0;
301  OpData<T> opd;
302 
303  group_buf.SetSize(group_buf_size*sizeof(T));
304  opd.ldata = ldata;
305  opd.buf = (T *)group_buf.GetData();
306  for (gr = 1; gr < group_ldof.Size(); gr++)
307  {
308  opd.nldofs = group_ldof.RowSize(gr);
309 
310  // ignore groups without dofs
311  if (opd.nldofs == 0)
312  continue;
313 
314  opd.ldofs = group_ldof.GetRow(gr);
315 
316  if (!gtopo.IAmMaster(gr)) // we are not the master
317  {
318  for (i = 0; i < opd.nldofs; i++)
319  opd.buf[i] = ldata[opd.ldofs[i]];
320 
321  MPI_Isend(opd.buf,
322  opd.nldofs,
323  Get_MPI_Datatype<T>(),
324  gtopo.GetGroupMasterRank(gr),
325  43822 + gtopo.GetGroupMasterGroup(gr),
326  gtopo.GetComm(),
327  &requests[request_counter]);
328  request_counter++;
329  opd.buf += opd.nldofs;
330  }
331  else // we are the master
332  {
333  const int gs = gtopo.GetGroupSize(gr);
334  const int *nbs = gtopo.GetGroup(gr);
335  for (i = 0; i < gs; i++)
336  {
337  if (nbs[i] != 0)
338  {
339  MPI_Irecv(opd.buf,
340  opd.nldofs,
341  Get_MPI_Datatype<T>(),
342  gtopo.GetNeighborRank(nbs[i]),
343  43822 + gtopo.GetGroupMasterGroup(gr),
344  gtopo.GetComm(),
345  &requests[request_counter]);
346  request_counter++;
347  opd.buf += opd.nldofs;
348  }
349  }
350  }
351  }
352 
353  MPI_Waitall(request_counter, requests, statuses);
354 
355  // perform the reduce operation
356  opd.buf = (T *)group_buf.GetData();
357  for (gr = 1; gr < group_ldof.Size(); gr++)
358  {
359  opd.nldofs = group_ldof.RowSize(gr);
360 
361  // ignore groups without dofs
362  if (opd.nldofs == 0)
363  continue;
364 
365  if (!gtopo.IAmMaster(gr)) // we are not the master
366  {
367  opd.buf += opd.nldofs;
368  }
369  else // we are the master
370  {
371  opd.ldofs = group_ldof.GetRow(gr);
372  opd.nb = gtopo.GetGroupSize(gr)-1;
373  Op(opd);
374  opd.buf += opd.nb * opd.nldofs;
375  }
376  }
377 }
378 
379 template <class T>
381 {
382  for (int i = 0; i < opd.nldofs; i++)
383  {
384  T data = opd.ldata[opd.ldofs[i]];
385  for (int j = 0; j < opd.nb; j++)
386  data += opd.buf[j*opd.nldofs+i];
387  opd.ldata[opd.ldofs[i]] = data;
388  }
389 }
390 
391 template <class T>
393 {
394  for (int i = 0; i < opd.nldofs; i++)
395  {
396  T data = opd.ldata[opd.ldofs[i]];
397  for (int j = 0; j < opd.nb; j++)
398  {
399  T b = opd.buf[j*opd.nldofs+i];
400  if (data > b)
401  data = b;
402  }
403  opd.ldata[opd.ldofs[i]] = data;
404  }
405 }
406 
407 template <class T>
409 {
410  for (int i = 0; i < opd.nldofs; i++)
411  {
412  T data = opd.ldata[opd.ldofs[i]];
413  for (int j = 0; j < opd.nb; j++)
414  {
415  T b = opd.buf[j*opd.nldofs+i];
416  if (data < b)
417  data = b;
418  }
419  opd.ldata[opd.ldofs[i]] = data;
420  }
421 }
422 
423 template <class T>
425 {
426  for (int i = 0; i < opd.nldofs; i++)
427  {
428  T data = opd.ldata[opd.ldofs[i]];
429  for (int j = 0; j < opd.nb; j++)
430  data |= opd.buf[j*opd.nldofs+i];
431  opd.ldata[opd.ldofs[i]] = data;
432  }
433 }
434 
436 {
437  delete [] statuses;
438  delete [] requests;
439 }
440 
441 // explicitly define GroupCommunicator::Get_MPI_Datatype for int and double
442 template <> inline MPI_Datatype GroupCommunicator::Get_MPI_Datatype<int>()
443 {
444  return MPI_INT;
445 }
446 
447 template <> inline MPI_Datatype GroupCommunicator::Get_MPI_Datatype<double>()
448 {
449  return MPI_DOUBLE;
450 }
451 
452 // instantiate GroupCommunicator::Bcast and Reduce for int and double
453 template void GroupCommunicator::Bcast<int>(int *);
454 template void GroupCommunicator::Reduce<int>(int *, void (*)(OpData<int>));
455 
456 template void GroupCommunicator::Bcast<double>(double *);
457 template void GroupCommunicator::Reduce<double>(
458  double *, void (*)(OpData<double>));
459 
460 // instantiate reduce operators for int and double
461 template void GroupCommunicator::Sum<int>(OpData<int>);
462 template void GroupCommunicator::Min<int>(OpData<int>);
463 template void GroupCommunicator::Max<int>(OpData<int>);
464 template void GroupCommunicator::BitOR<int>(OpData<int>);
465 
466 template void GroupCommunicator::Sum<double>(OpData<double>);
467 template void GroupCommunicator::Min<double>(OpData<double>);
468 template void GroupCommunicator::Max<double>(OpData<double>);
469 
470 }
471 
472 #endif
int Lookup(IntegerSet &s)
Definition: sets.cpp:81
int GetGroupMasterRank(int g) const
int Size() const
Logical size of the array.
Definition: array.hpp:108
void Recreate(const int n, const int *p)
Definition: sets.cpp:51
int GetGroupMasterGroup(int g) const
int * GetJ()
Definition: table.hpp:88
void AsTable(Table &t)
Definition: sets.cpp:91
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
Definition: table.cpp:57
int GetGroupSize(int g) const
void SetDims(int rows, int nnz)
Definition: table.cpp:107
const int * GetGroup(int g) const
T * GetData()
Returns the data.
Definition: array.hpp:90
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:148
int PickElementInSet(int i)
Definition: sets.hpp:58
bool IAmMaster(int g) const
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
void AddConnection(int r, int c)
Definition: table.hpp:54
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:66
int GetNeighborRank(int i) const
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
void AddAColumnInRow(int r)
Definition: table.hpp:51
void mfem_error(const char *msg)
Definition: error.cpp:23
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
Definition: array.hpp:293
void ShiftUpI()
Definition: table.cpp:84
A set of integers.
Definition: sets.hpp:23
void MakeJ()
Definition: table.cpp:65
void Reduce(T *ldata, void(*Op)(OpData< T >))
int NGroups() const
void Create(Array< int > &ldof_group)
int * GetI()
Definition: table.hpp:87
int RowSize(int i) const
Definition: table.hpp:82
List of integer sets.
Definition: sets.hpp:49
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.