MFEM  v3.1
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
communication.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include <mpi.h>
17 
18 #include "array.hpp"
19 #include "table.hpp"
20 #include "sets.hpp"
21 #include "communication.hpp"
22 #include <iostream>
23 #include <map>
24 
25 using namespace std;
26 
27 namespace mfem
28 {
29 
30 GroupTopology::GroupTopology(const GroupTopology &gt)
31  : MyComm(gt.MyComm),
32  group_lproc(gt.group_lproc)
33 {
34  gt.groupmaster_lproc.Copy(groupmaster_lproc);
35  gt.lproc_proc.Copy(lproc_proc);
36  gt.group_mgroup.Copy(group_mgroup);
37 }
38 
39 void GroupTopology::ProcToLProc()
40 {
41  int NRanks;
42  MPI_Comm_size(MyComm, &NRanks);
43 
44  map<int, int> proc_lproc;
45 
46  int lproc_counter = 0;
47  for (int i = 0; i < group_lproc.Size_of_connections(); i++)
48  {
49  const pair<const int, int> p(group_lproc.GetJ()[i], lproc_counter);
50  if (proc_lproc.insert(p).second)
51  {
52  lproc_counter++;
53  }
54  }
55  // Note: group_lproc.GetJ()[0] == MyRank --> proc_lproc[MyRank] == 0
56 
57  lproc_proc.SetSize(lproc_counter);
58  for (map<int, int>::iterator it = proc_lproc.begin();
59  it != proc_lproc.end(); ++it)
60  {
61  lproc_proc[it->second] = it->first;
62  }
63 
64  for (int i = 0; i < group_lproc.Size_of_connections(); i++)
65  {
66  group_lproc.GetJ()[i] = proc_lproc[group_lproc.GetJ()[i]];
67  }
68 
69  for (int i = 0; i < NGroups(); i++)
70  {
71  groupmaster_lproc[i] = proc_lproc[groupmaster_lproc[i]];
72  }
73 }
74 
75 void GroupTopology::Create(ListOfIntegerSets &groups, int mpitag)
76 {
77  groups.AsTable(group_lproc); // group_lproc = group_proc
78 
79  Table group_mgroupandproc;
80  group_mgroupandproc.SetDims(NGroups(),
81  group_lproc.Size_of_connections() + NGroups());
82  for (int i = 0; i < NGroups(); i++)
83  {
84  int j = group_mgroupandproc.GetI()[i];
85  group_mgroupandproc.GetI()[i+1] = j + group_lproc.RowSize(i) + 1;
86  group_mgroupandproc.GetJ()[j] = i;
87  j++;
88  for (int k = group_lproc.GetI()[i];
89  j < group_mgroupandproc.GetI()[i+1]; j++, k++)
90  {
91  group_mgroupandproc.GetJ()[j] = group_lproc.GetJ()[k];
92  }
93  }
94 
95  // build groupmaster_lproc with lproc = proc
96  groupmaster_lproc.SetSize(NGroups());
97 
98  // simplest choice of the group owner
99  for (int i = 0; i < NGroups(); i++)
100  {
101  groupmaster_lproc[i] = groups.PickElementInSet(i);
102  }
103 
104  // load-balanced choice of the group owner, which however can lead to
105  // isolated dofs
106  // for (i = 0; i < NGroups(); i++)
107  // groupmaster_lproc[i] = groups.PickRandomElementInSet(i);
108 
109  ProcToLProc();
110 
111  // build group_mgroup
112  group_mgroup.SetSize(NGroups());
113 
114  int send_counter = 0;
115  int recv_counter = 0;
116  for (int i = 1; i < NGroups(); i++)
117  if (groupmaster_lproc[i] != 0) // we are not the master
118  {
119  recv_counter++;
120  }
121  else
122  {
123  send_counter += group_lproc.RowSize(i)-1;
124  }
125 
126  MPI_Request *requests = new MPI_Request[send_counter];
127  MPI_Status *statuses = new MPI_Status[send_counter];
128 
129  int max_recv_size = 0;
130  send_counter = 0;
131  for (int i = 1; i < NGroups(); i++)
132  {
133  if (groupmaster_lproc[i] == 0) // we are the master
134  {
135  group_mgroup[i] = i;
136 
137  for (int j = group_lproc.GetI()[i];
138  j < group_lproc.GetI()[i+1]; j++)
139  {
140  if (group_lproc.GetJ()[j] != 0)
141  {
142  MPI_Isend(group_mgroupandproc.GetRow(i),
143  group_mgroupandproc.RowSize(i),
144  MPI_INT,
145  lproc_proc[group_lproc.GetJ()[j]],
146  mpitag,
147  MyComm,
148  &requests[send_counter]);
149  send_counter++;
150  }
151  }
152  }
153  else // we are not the master
154  if (max_recv_size < group_lproc.RowSize(i))
155  {
156  max_recv_size = group_lproc.RowSize(i);
157  }
158  }
159  max_recv_size++;
160 
161  IntegerSet group;
162  if (recv_counter > 0)
163  {
164  int count;
165  MPI_Status status;
166  int *recv_buf = new int[max_recv_size];
167  for ( ; recv_counter > 0; recv_counter--)
168  {
169  MPI_Recv(recv_buf, max_recv_size, MPI_INT,
170  MPI_ANY_SOURCE, mpitag, MyComm, &status);
171 
172  MPI_Get_count(&status, MPI_INT, &count);
173 
174  group.Recreate(count-1, recv_buf+1);
175  int g = groups.Lookup(group);
176  group_mgroup[g] = recv_buf[0];
177 
178  if (lproc_proc[groupmaster_lproc[g]] != status.MPI_SOURCE)
179  {
180  cerr << "\n\n\nGroupTopology::GroupTopology: "
181  << MyRank() << ": ERROR\n\n\n" << endl;
182  mfem_error();
183  }
184  }
185  delete [] recv_buf;
186  }
187 
188  MPI_Waitall(send_counter, requests, statuses);
189 
190  delete [] statuses;
191  delete [] requests;
192 }
193 
194 
196  : gtopo(gt)
197 {
198  group_buf_size = 0;
199  requests = NULL;
200  statuses = NULL;
201 }
202 
204 {
205  group_ldof.MakeI(gtopo.NGroups());
206  for (int i = 0; i < ldof_group.Size(); i++)
207  {
208  int group = ldof_group[i];
209  if (group != 0)
210  {
211  group_ldof.AddAColumnInRow(group);
212  }
213  }
214  group_ldof.MakeJ();
215 
216  for (int i = 0; i < ldof_group.Size(); i++)
217  {
218  int group = ldof_group[i];
219  if (group != 0)
220  {
221  group_ldof.AddConnection(group, i);
222  }
223  }
224  group_ldof.ShiftUpI();
225 
226  Finalize();
227 }
228 
230 {
231  int request_counter = 0;
232 
233  for (int gr = 1; gr < group_ldof.Size(); gr++)
234  if (group_ldof.RowSize(gr) != 0)
235  {
236  int gr_requests;
237  if (!gtopo.IAmMaster(gr)) // we are not the master
238  {
239  gr_requests = 1;
240  }
241  else
242  {
243  gr_requests = gtopo.GetGroupSize(gr)-1;
244  }
245 
246  request_counter += gr_requests;
247  group_buf_size += gr_requests * group_ldof.RowSize(gr);
248  }
249 
250  requests = new MPI_Request[request_counter];
251  statuses = new MPI_Status[request_counter];
252 }
253 
254 template <class T>
256 {
257  if (group_buf_size == 0)
258  {
259  return;
260  }
261 
262  group_buf.SetSize(group_buf_size*sizeof(T));
263  T *buf = (T *)group_buf.GetData();
264 
265  int i, gr, request_counter = 0;
266 
267  for (gr = 1; gr < group_ldof.Size(); gr++)
268  {
269  const int nldofs = group_ldof.RowSize(gr);
270 
271  // ignore groups without dofs
272  if (nldofs == 0)
273  {
274  continue;
275  }
276 
277  if (!gtopo.IAmMaster(gr)) // we are not the master
278  {
279  MPI_Irecv(buf,
280  nldofs,
281  Get_MPI_Datatype<T>(),
282  gtopo.GetGroupMasterRank(gr),
283  40822 + gtopo.GetGroupMasterGroup(gr),
284  gtopo.GetComm(),
285  &requests[request_counter]);
286  request_counter++;
287  }
288  else // we are the master
289  {
290  // fill send buffer
291  const int *ldofs = group_ldof.GetRow(gr);
292  for (i = 0; i < nldofs; i++)
293  {
294  buf[i] = ldata[ldofs[i]];
295  }
296 
297  const int gs = gtopo.GetGroupSize(gr);
298  const int *nbs = gtopo.GetGroup(gr);
299  for (i = 0; i < gs; i++)
300  {
301  if (nbs[i] != 0)
302  {
303  MPI_Isend(buf,
304  nldofs,
305  Get_MPI_Datatype<T>(),
306  gtopo.GetNeighborRank(nbs[i]),
307  40822 + gtopo.GetGroupMasterGroup(gr),
308  gtopo.GetComm(),
309  &requests[request_counter]);
310  request_counter++;
311  }
312  }
313  }
314  buf += nldofs;
315  }
316 
317  MPI_Waitall(request_counter, requests, statuses);
318 
319  // copy the received data from the buffer to ldata
320  buf = (T *)group_buf.GetData();
321  for (gr = 1; gr < group_ldof.Size(); gr++)
322  {
323  const int nldofs = group_ldof.RowSize(gr);
324 
325  // ignore groups without dofs
326  if (nldofs == 0)
327  {
328  continue;
329  }
330 
331  if (!gtopo.IAmMaster(gr)) // we are not the master
332  {
333  const int *ldofs = group_ldof.GetRow(gr);
334  for (i = 0; i < nldofs; i++)
335  {
336  ldata[ldofs[i]] = buf[i];
337  }
338  }
339  buf += nldofs;
340  }
341 }
342 
343 template <class T>
344 void GroupCommunicator::Reduce(T *ldata, void (*Op)(OpData<T>))
345 {
346  if (group_buf_size == 0)
347  {
348  return;
349  }
350 
351  int i, gr, request_counter = 0;
352  OpData<T> opd;
353 
354  group_buf.SetSize(group_buf_size*sizeof(T));
355  opd.ldata = ldata;
356  opd.buf = (T *)group_buf.GetData();
357  for (gr = 1; gr < group_ldof.Size(); gr++)
358  {
359  opd.nldofs = group_ldof.RowSize(gr);
360 
361  // ignore groups without dofs
362  if (opd.nldofs == 0)
363  {
364  continue;
365  }
366 
367  opd.ldofs = group_ldof.GetRow(gr);
368 
369  if (!gtopo.IAmMaster(gr)) // we are not the master
370  {
371  for (i = 0; i < opd.nldofs; i++)
372  {
373  opd.buf[i] = ldata[opd.ldofs[i]];
374  }
375 
376  MPI_Isend(opd.buf,
377  opd.nldofs,
378  Get_MPI_Datatype<T>(),
379  gtopo.GetGroupMasterRank(gr),
380  43822 + gtopo.GetGroupMasterGroup(gr),
381  gtopo.GetComm(),
382  &requests[request_counter]);
383  request_counter++;
384  opd.buf += opd.nldofs;
385  }
386  else // we are the master
387  {
388  const int gs = gtopo.GetGroupSize(gr);
389  const int *nbs = gtopo.GetGroup(gr);
390  for (i = 0; i < gs; i++)
391  {
392  if (nbs[i] != 0)
393  {
394  MPI_Irecv(opd.buf,
395  opd.nldofs,
396  Get_MPI_Datatype<T>(),
397  gtopo.GetNeighborRank(nbs[i]),
398  43822 + gtopo.GetGroupMasterGroup(gr),
399  gtopo.GetComm(),
400  &requests[request_counter]);
401  request_counter++;
402  opd.buf += opd.nldofs;
403  }
404  }
405  }
406  }
407 
408  MPI_Waitall(request_counter, requests, statuses);
409 
410  // perform the reduce operation
411  opd.buf = (T *)group_buf.GetData();
412  for (gr = 1; gr < group_ldof.Size(); gr++)
413  {
414  opd.nldofs = group_ldof.RowSize(gr);
415 
416  // ignore groups without dofs
417  if (opd.nldofs == 0)
418  {
419  continue;
420  }
421 
422  if (!gtopo.IAmMaster(gr)) // we are not the master
423  {
424  opd.buf += opd.nldofs;
425  }
426  else // we are the master
427  {
428  opd.ldofs = group_ldof.GetRow(gr);
429  opd.nb = gtopo.GetGroupSize(gr)-1;
430  Op(opd);
431  opd.buf += opd.nb * opd.nldofs;
432  }
433  }
434 }
435 
436 template <class T>
438 {
439  for (int i = 0; i < opd.nldofs; i++)
440  {
441  T data = opd.ldata[opd.ldofs[i]];
442  for (int j = 0; j < opd.nb; j++)
443  {
444  data += opd.buf[j*opd.nldofs+i];
445  }
446  opd.ldata[opd.ldofs[i]] = data;
447  }
448 }
449 
450 template <class T>
452 {
453  for (int i = 0; i < opd.nldofs; i++)
454  {
455  T data = opd.ldata[opd.ldofs[i]];
456  for (int j = 0; j < opd.nb; j++)
457  {
458  T b = opd.buf[j*opd.nldofs+i];
459  if (data > b)
460  {
461  data = b;
462  }
463  }
464  opd.ldata[opd.ldofs[i]] = data;
465  }
466 }
467 
468 template <class T>
470 {
471  for (int i = 0; i < opd.nldofs; i++)
472  {
473  T data = opd.ldata[opd.ldofs[i]];
474  for (int j = 0; j < opd.nb; j++)
475  {
476  T b = opd.buf[j*opd.nldofs+i];
477  if (data < b)
478  {
479  data = b;
480  }
481  }
482  opd.ldata[opd.ldofs[i]] = data;
483  }
484 }
485 
486 template <class T>
488 {
489  for (int i = 0; i < opd.nldofs; i++)
490  {
491  T data = opd.ldata[opd.ldofs[i]];
492  for (int j = 0; j < opd.nb; j++)
493  {
494  data |= opd.buf[j*opd.nldofs+i];
495  }
496  opd.ldata[opd.ldofs[i]] = data;
497  }
498 }
499 
501 {
502  delete [] statuses;
503  delete [] requests;
504 }
505 
506 // explicitly define GroupCommunicator::Get_MPI_Datatype for int and double
507 template <> inline MPI_Datatype GroupCommunicator::Get_MPI_Datatype<int>()
508 {
509  return MPI_INT;
510 }
511 
512 template <> inline MPI_Datatype GroupCommunicator::Get_MPI_Datatype<double>()
513 {
514  return MPI_DOUBLE;
515 }
516 
517 // @cond DOXYGEN_SKIP
518 
519 // instantiate GroupCommunicator::Bcast and Reduce for int and double
520 template void GroupCommunicator::Bcast<int>(int *);
521 template void GroupCommunicator::Reduce<int>(int *, void (*)(OpData<int>));
522 
523 template void GroupCommunicator::Bcast<double>(double *);
524 template void GroupCommunicator::Reduce<double>(
525  double *, void (*)(OpData<double>));
526 
527 // @endcond
528 
529 // instantiate reduce operators for int and double
530 template void GroupCommunicator::Sum<int>(OpData<int>);
531 template void GroupCommunicator::Min<int>(OpData<int>);
532 template void GroupCommunicator::Max<int>(OpData<int>);
533 template void GroupCommunicator::BitOR<int>(OpData<int>);
534 
535 template void GroupCommunicator::Sum<double>(OpData<double>);
536 template void GroupCommunicator::Min<double>(OpData<double>);
537 template void GroupCommunicator::Max<double>(OpData<double>);
538 
539 }
540 
541 #endif
int Lookup(IntegerSet &s)
Definition: sets.cpp:95
int GetGroupMasterRank(int g) const
void Create(ListOfIntegerSets &groups, int mpitag)
int Size() const
Logical size of the array.
Definition: array.hpp:109
void Recreate(const int n, const int *p)
Definition: sets.cpp:59
int GetGroupMasterGroup(int g) const
int * GetJ()
Definition: table.hpp:108
GroupCommunicator(GroupTopology &gt)
void AsTable(Table &t)
Definition: sets.cpp:107
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
Definition: table.cpp:74
int GetGroupSize(int g) const
void SetDims(int rows, int nnz)
Definition: table.cpp:132
const int * GetGroup(int g) const
void Copy(Array &copy) const
Create a copy of the current array.
Definition: array.hpp:160
T * GetData()
Returns the data.
Definition: array.hpp:91
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:179
int Size_of_connections() const
Definition: table.hpp:92
int PickElementInSet(int i)
Definition: sets.hpp:58
bool IAmMaster(int g) const
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
void AddConnection(int r, int c)
Definition: table.hpp:74
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:86
int GetNeighborRank(int i) const
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
void AddAColumnInRow(int r)
Definition: table.hpp:71
void mfem_error(const char *msg)
Definition: error.cpp:23
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
Definition: array.hpp:323
void ShiftUpI()
Definition: table.cpp:107
A set of integers.
Definition: sets.hpp:23
void MakeJ()
Definition: table.cpp:84
void Reduce(T *ldata, void(*Op)(OpData< T >))
int NGroups() const
void Create(Array< int > &ldof_group)
int * GetI()
Definition: table.hpp:107
int RowSize(int i) const
Definition: table.hpp:102
List of integer sets.
Definition: sets.hpp:49
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.