MFEM  v4.2.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
communication.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2020, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include <mpi.h>
17 #ifdef __bgq__
18 #include <mpix.h>
19 #endif
20 
21 #include "array.hpp"
22 #include "table.hpp"
23 #include "sets.hpp"
24 #include "communication.hpp"
25 #include "text.hpp"
26 #include "sort_pairs.hpp"
27 #include "globals.hpp"
28 
29 #include <iostream>
30 #include <map>
31 
32 using namespace std;
33 
34 namespace mfem
35 {
36 
37 void MPI_Session::GetRankAndSize()
38 {
39  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
40  MPI_Comm_size(MPI_COMM_WORLD, &world_size);
41 }
42 
43 
44 GroupTopology::GroupTopology(const GroupTopology &gt)
45  : MyComm(gt.MyComm),
46  group_lproc(gt.group_lproc)
47 {
48  gt.groupmaster_lproc.Copy(groupmaster_lproc);
49  gt.lproc_proc.Copy(lproc_proc);
50  gt.group_mgroup.Copy(group_mgroup);
51 }
52 
53 void GroupTopology::ProcToLProc()
54 {
55  int NRanks;
56  MPI_Comm_size(MyComm, &NRanks);
57 
58  map<int, int> proc_lproc;
59 
60  // The local processor ids are assigned following the group order and within
61  // a group following their ordering in the group. In other words, the ids are
62  // assigned based on their order in the J array of group_lproc.
63  int lproc_counter = 0;
64  for (int i = 0; i < group_lproc.Size_of_connections(); i++)
65  {
66  const pair<const int, int> p(group_lproc.GetJ()[i], lproc_counter);
67  if (proc_lproc.insert(p).second)
68  {
69  lproc_counter++;
70  }
71  }
72  // Note: group_lproc.GetJ()[0] == MyRank --> proc_lproc[MyRank] == 0
73 
74  lproc_proc.SetSize(lproc_counter);
75  for (map<int, int>::iterator it = proc_lproc.begin();
76  it != proc_lproc.end(); ++it)
77  {
78  lproc_proc[it->second] = it->first;
79  }
80 
81  for (int i = 0; i < group_lproc.Size_of_connections(); i++)
82  {
83  group_lproc.GetJ()[i] = proc_lproc[group_lproc.GetJ()[i]];
84  }
85 
86  for (int i = 0; i < NGroups(); i++)
87  {
88  groupmaster_lproc[i] = proc_lproc[groupmaster_lproc[i]];
89  }
90 }
91 
92 void GroupTopology::Create(ListOfIntegerSets &groups, int mpitag)
93 {
94  groups.AsTable(group_lproc); // group_lproc = group_proc
95 
96  Table group_mgroupandproc;
97  group_mgroupandproc.SetDims(NGroups(),
98  group_lproc.Size_of_connections() + NGroups());
99  for (int i = 0; i < NGroups(); i++)
100  {
101  int j = group_mgroupandproc.GetI()[i];
102  group_mgroupandproc.GetI()[i+1] = j + group_lproc.RowSize(i) + 1;
103  group_mgroupandproc.GetJ()[j] = i;
104  j++;
105  for (int k = group_lproc.GetI()[i];
106  j < group_mgroupandproc.GetI()[i+1]; j++, k++)
107  {
108  group_mgroupandproc.GetJ()[j] = group_lproc.GetJ()[k];
109  }
110  }
111 
112  // build groupmaster_lproc with lproc = proc
113  groupmaster_lproc.SetSize(NGroups());
114 
115  // simplest choice of the group owner
116  for (int i = 0; i < NGroups(); i++)
117  {
118  groupmaster_lproc[i] = groups.PickElementInSet(i);
119  }
120 
121  // load-balanced choice of the group owner, which however can lead to
122  // isolated dofs
123  // for (i = 0; i < NGroups(); i++)
124  // groupmaster_lproc[i] = groups.PickRandomElementInSet(i);
125 
126  ProcToLProc();
127 
128  // Build 'group_mgroup':
129 
130  // Use aggregated neighbor communication: at most one send to and/or one
131  // receive from each neighbor.
132 
133  group_mgroup.SetSize(NGroups());
134  MFEM_DEBUG_DO(group_mgroup = -1);
135  for (int g = 0; g < NGroups(); g++)
136  {
137  if (IAmMaster(g)) { group_mgroup[g] = g; }
138  }
139 
140  // The Table 'lproc_cgroup': for each lproc, list the groups that are owned
141  // by this rank or by that lproc.
142  Table lproc_cgroup;
143  {
144  Array<Connection> lproc_cgroup_list;
145  for (int g = 1; g < NGroups(); g++)
146  {
147  if (IAmMaster(g))
148  {
149  const int gs = GetGroupSize(g);
150  const int *lprocs = GetGroup(g);
151  for (int i = 0; i < gs; i++)
152  {
153  if (lprocs[i])
154  {
155  lproc_cgroup_list.Append(Connection(lprocs[i],g));
156  }
157  }
158  }
159  else
160  {
161  lproc_cgroup_list.Append(Connection(GetGroupMaster(g),g));
162  }
163  }
164  lproc_cgroup_list.Sort();
165  lproc_cgroup_list.Unique();
166  lproc_cgroup.MakeFromList(GetNumNeighbors(), lproc_cgroup_list);
167  }
168 
169  // Determine size of the send-receive buffer. For each neighbor the buffer
170  // contains: <send-part><receive-part> with each part consisting of a list of
171  // groups. Each group, g, has group_lproc.RowSize(g)+2 integers: the first
172  // entry is group_lproc.RowSize(g) - the number of processors in the group,
173  // followed by the group-id in the master processor, followed by the ranks of
174  // the processors in the group.
175  Table buffer;
176  buffer.MakeI(2*lproc_cgroup.Size()-2); // excluding the "local" lproc, 0
177  for (int nbr = 1; nbr < lproc_cgroup.Size(); nbr++)
178  {
179  const int send_row = 2*(nbr-1);
180  const int recv_row = send_row+1;
181  const int ng = lproc_cgroup.RowSize(nbr);
182  const int *g = lproc_cgroup.GetRow(nbr);
183  for (int j = 0; j < ng; j++)
184  {
185  const int gs = group_lproc.RowSize(g[j]);
186  if (IAmMaster(g[j]))
187  {
188  buffer.AddColumnsInRow(send_row, gs+2);
189  }
190  else
191  {
192  MFEM_ASSERT(GetGroupMaster(g[j]) == nbr, "internal error");
193  buffer.AddColumnsInRow(recv_row, gs+2);
194  }
195  }
196  }
197  buffer.MakeJ();
198  for (int nbr = 1; nbr < lproc_cgroup.Size(); nbr++)
199  {
200  const int send_row = 2*(nbr-1);
201  const int recv_row = send_row+1;
202  const int ng = lproc_cgroup.RowSize(nbr);
203  const int *g = lproc_cgroup.GetRow(nbr);
204  for (int j = 0; j < ng; j++)
205  {
206  const int gs = group_lproc.RowSize(g[j]);
207  if (IAmMaster(g[j]))
208  {
209  buffer.AddConnection(send_row, gs);
210  buffer.AddConnections(
211  send_row, group_mgroupandproc.GetRow(g[j]), gs+1);
212  }
213  else
214  {
215  buffer.AddColumnsInRow(recv_row, gs+2);
216  }
217  }
218  }
219  buffer.ShiftUpI();
220  Array<MPI_Request> send_requests(lproc_cgroup.Size()-1);
221  Array<MPI_Request> recv_requests(lproc_cgroup.Size()-1);
222  send_requests = MPI_REQUEST_NULL;
223  recv_requests = MPI_REQUEST_NULL;
224  for (int nbr = 1; nbr < lproc_cgroup.Size(); nbr++)
225  {
226  const int send_row = 2*(nbr-1);
227  const int recv_row = send_row+1;
228  const int send_size = buffer.RowSize(send_row);
229  const int recv_size = buffer.RowSize(recv_row);
230  if (send_size > 0)
231  {
232  MPI_Isend(buffer.GetRow(send_row), send_size, MPI_INT, lproc_proc[nbr],
233  mpitag, MyComm, &send_requests[nbr-1]);
234  }
235  if (recv_size > 0)
236  {
237  MPI_Irecv(buffer.GetRow(recv_row), recv_size, MPI_INT, lproc_proc[nbr],
238  mpitag, MyComm, &recv_requests[nbr-1]);
239  }
240  }
241 
242  if (recv_requests.Size() > 0)
243  {
244  int idx;
245  IntegerSet group;
246  while (MPI_Waitany(recv_requests.Size(), recv_requests.GetData(), &idx,
247  MPI_STATUS_IGNORE),
248  idx != MPI_UNDEFINED)
249  {
250  const int recv_size = buffer.RowSize(2*idx+1);
251  const int *recv_buf = buffer.GetRow(2*idx+1);
252  for (int s = 0; s < recv_size; s += recv_buf[s]+2)
253  {
254  group.Recreate(recv_buf[s], recv_buf+s+2);
255  const int g = groups.Lookup(group);
256  MFEM_ASSERT(group_mgroup[g] == -1, "communication error");
257  group_mgroup[g] = recv_buf[s+1];
258  }
259  }
260  }
261 
262  MPI_Waitall(send_requests.Size(), send_requests.GetData(),
263  MPI_STATUSES_IGNORE);
264 
265  // debug barrier: MPI_Barrier(MyComm);
266 }
267 
268 void GroupTopology::Save(ostream &out) const
269 {
270  out << "\ncommunication_groups\n";
271  out << "number_of_groups " << NGroups() << "\n\n";
272 
273  out << "# number of entities in each group, followed by group ids in group\n";
274  for (int group_id = 0; group_id < NGroups(); ++group_id)
275  {
276  int group_size = GetGroupSize(group_id);
277  const int * group_ptr = GetGroup(group_id);
278  out << group_size;
279  for ( int group_member_index = 0; group_member_index < group_size;
280  ++group_member_index)
281  {
282  out << " " << GetNeighborRank( group_ptr[group_member_index] );
283  }
284  out << "\n";
285  }
286 
287  // For future use, optional ownership strategy.
288  // out << "# ownership";
289 }
290 
291 void GroupTopology::Load(istream &in)
292 {
293  // Load in group topology and create list of integer sets. Use constructor
294  // that uses list of integer sets.
295  std::string ident;
296 
297  // Read in number of groups
298  int number_of_groups = -1;
299  in >> ident;
300  MFEM_VERIFY(ident == "number_of_groups",
301  "GroupTopology::Load - expected 'number_of_groups' entry.");
302  in >> number_of_groups;
303 
304  // Skip number of entries in each group comment.
305  skip_comment_lines(in, '#');
306 
307  ListOfIntegerSets integer_sets;
308  for (int group_id = 0; group_id < number_of_groups; ++group_id)
309  {
310  IntegerSet integer_set;
311  Array<int>& array = integer_set;
312  int group_size;
313  in >> group_size;
314  array.Reserve(group_size);
315  for ( int index = 0; index < group_size; ++index )
316  {
317  int value;
318  in >> value;
319  array.Append(value);
320  }
321  integer_sets.Insert(integer_set);
322  }
323 
324  Create(integer_sets, 823);
325 }
326 
328 {
329  copy.SetComm(MyComm);
330  group_lproc.Copy(copy.group_lproc);
331  groupmaster_lproc.Copy(copy.groupmaster_lproc);
332  lproc_proc.Copy(copy.lproc_proc);
333  group_mgroup.Copy(copy.group_mgroup);
334 }
335 
336 // Initialize the static mpi_type for the specializations of MPITypeMap:
337 const MPI_Datatype MPITypeMap<int>::mpi_type = MPI_INT;
338 const MPI_Datatype MPITypeMap<double>::mpi_type = MPI_DOUBLE;
339 
340 
342  : gtopo(gt), mode(m)
343 {
344  group_buf_size = 0;
345  requests = NULL;
346  // statuses = NULL;
347  comm_lock = 0;
348  num_requests = 0;
349  request_marker = NULL;
350  buf_offsets = NULL;
351 }
352 
353 void GroupCommunicator::Create(const Array<int> &ldof_group)
354 {
356  for (int i = 0; i < ldof_group.Size(); i++)
357  {
358  int group = ldof_group[i];
359  if (group != 0)
360  {
362  }
363  }
364  group_ldof.MakeJ();
365 
366  for (int i = 0; i < ldof_group.Size(); i++)
367  {
368  int group = ldof_group[i];
369  if (group != 0)
370  {
371  group_ldof.AddConnection(group, i);
372  }
373  }
375 
376  Finalize();
377 }
378 
380 {
381  int request_counter = 0;
382 
383  // size buf_offsets = max(number of groups, number of neighbors)
384  buf_offsets = new int[max(group_ldof.Size(), gtopo.GetNumNeighbors())];
385  buf_offsets[0] = 0;
386  for (int gr = 1; gr < group_ldof.Size(); gr++)
387  {
388  if (group_ldof.RowSize(gr) != 0)
389  {
390  int gr_requests;
391  if (!gtopo.IAmMaster(gr)) // we are not the master
392  {
393  gr_requests = 1;
394  }
395  else
396  {
397  gr_requests = gtopo.GetGroupSize(gr)-1;
398  }
399 
400  request_counter += gr_requests;
401  group_buf_size += gr_requests * group_ldof.RowSize(gr);
402  }
403  }
404 
405  requests = new MPI_Request[request_counter];
406  // statuses = new MPI_Status[request_counter];
407  request_marker = new int[request_counter];
408 
409  // Construct nbr_send_groups and nbr_recv_groups: (nbr 0 = me)
412  for (int gr = 1; gr < group_ldof.Size(); gr++)
413  {
414  const int nldofs = group_ldof.RowSize(gr);
415  if (nldofs == 0) { continue; }
416 
417  if (!gtopo.IAmMaster(gr)) // we are not the master
418  {
420  }
421  else // we are the master
422  {
423  const int grp_size = gtopo.GetGroupSize(gr);
424  const int *grp_nbr_list = gtopo.GetGroup(gr);
425  for (int i = 0; i < grp_size; i++)
426  {
427  if (grp_nbr_list[i] != 0)
428  {
429  nbr_send_groups.AddAColumnInRow(grp_nbr_list[i]);
430  }
431  }
432  }
433  }
436  for (int gr = 1; gr < group_ldof.Size(); gr++)
437  {
438  const int nldofs = group_ldof.RowSize(gr);
439  if (nldofs == 0) { continue; }
440 
441  if (!gtopo.IAmMaster(gr)) // we are not the master
442  {
444  }
445  else // we are the master
446  {
447  const int grp_size = gtopo.GetGroupSize(gr);
448  const int *grp_nbr_list = gtopo.GetGroup(gr);
449  for (int i = 0; i < grp_size; i++)
450  {
451  if (grp_nbr_list[i] != 0)
452  {
453  nbr_send_groups.AddConnection(grp_nbr_list[i], gr);
454  }
455  }
456  }
457  }
460  // The above construction creates the Tables with the column indices
461  // sorted, i.e. the group lists are sorted. To coordinate this order between
462  // processors, we will sort the group lists in the nbr_recv_groups Table
463  // according to their indices in the master. This does not require any
464  // communication because we have access to the group indices in the master
465  // by calling: master_group_id = gtopo.GetGroupMasterGroup(my_group_id).
466  Array<Pair<int,int> > group_ids;
467  for (int nbr = 1; nbr < nbr_recv_groups.Size(); nbr++)
468  {
469  const int num_recv_groups = nbr_recv_groups.RowSize(nbr);
470  if (num_recv_groups > 0)
471  {
472  int *grp_list = nbr_recv_groups.GetRow(nbr);
473  group_ids.SetSize(num_recv_groups);
474  for (int i = 0; i < num_recv_groups; i++)
475  {
476  group_ids[i].one = gtopo.GetGroupMasterGroup(grp_list[i]);
477  group_ids[i].two = grp_list[i]; // my_group_id
478  }
479  group_ids.Sort();
480  for (int i = 0; i < num_recv_groups; i++)
481  {
482  grp_list[i] = group_ids[i].two;
483  }
484  }
485  }
486 }
487 
489 {
490  if (group_ltdof.Size() == group_ldof.Size()) { return; }
491 
493  for (int gr = 1; gr < group_ldof.Size(); gr++)
494  {
495  if (gtopo.IAmMaster(gr))
496  {
498  }
499  }
500  group_ltdof.MakeJ();
501  for (int gr = 1; gr < group_ldof.Size(); gr++)
502  {
503  if (gtopo.IAmMaster(gr))
504  {
505  const int *ldofs = group_ldof.GetRow(gr);
506  const int nldofs = group_ldof.RowSize(gr);
507  for (int i = 0; i < nldofs; i++)
508  {
509  group_ltdof.AddConnection(gr, ldof_ltdof[ldofs[i]]);
510  }
511  }
512  }
514 }
515 
517 {
518  nbr_ltdof.MakeI(nbr_send_groups.Size());
519  for (int nbr = 1; nbr < nbr_send_groups.Size(); nbr++)
520  {
521  const int num_send_groups = nbr_send_groups.RowSize(nbr);
522  if (num_send_groups > 0)
523  {
524  const int *grp_list = nbr_send_groups.GetRow(nbr);
525  for (int i = 0; i < num_send_groups; i++)
526  {
527  const int group = grp_list[i];
528  const int nltdofs = group_ltdof.RowSize(group);
529  nbr_ltdof.AddColumnsInRow(nbr, nltdofs);
530  }
531  }
532  }
533  nbr_ltdof.MakeJ();
534  for (int nbr = 1; nbr < nbr_send_groups.Size(); nbr++)
535  {
536  const int num_send_groups = nbr_send_groups.RowSize(nbr);
537  if (num_send_groups > 0)
538  {
539  const int *grp_list = nbr_send_groups.GetRow(nbr);
540  for (int i = 0; i < num_send_groups; i++)
541  {
542  const int group = grp_list[i];
543  const int nltdofs = group_ltdof.RowSize(group);
544  const int *ltdofs = group_ltdof.GetRow(group);
545  nbr_ltdof.AddConnections(nbr, ltdofs, nltdofs);
546  }
547  }
548  }
549  nbr_ltdof.ShiftUpI();
550 }
551 
553 {
554  nbr_ldof.MakeI(nbr_recv_groups.Size());
555  for (int nbr = 1; nbr < nbr_recv_groups.Size(); nbr++)
556  {
557  const int num_recv_groups = nbr_recv_groups.RowSize(nbr);
558  if (num_recv_groups > 0)
559  {
560  const int *grp_list = nbr_recv_groups.GetRow(nbr);
561  for (int i = 0; i < num_recv_groups; i++)
562  {
563  const int group = grp_list[i];
564  const int nldofs = group_ldof.RowSize(group);
565  nbr_ldof.AddColumnsInRow(nbr, nldofs);
566  }
567  }
568  }
569  nbr_ldof.MakeJ();
570  for (int nbr = 1; nbr < nbr_recv_groups.Size(); nbr++)
571  {
572  const int num_recv_groups = nbr_recv_groups.RowSize(nbr);
573  if (num_recv_groups > 0)
574  {
575  const int *grp_list = nbr_recv_groups.GetRow(nbr);
576  for (int i = 0; i < num_recv_groups; i++)
577  {
578  const int group = grp_list[i];
579  const int nldofs = group_ldof.RowSize(group);
580  const int *ldofs = group_ldof.GetRow(group);
581  nbr_ldof.AddConnections(nbr, ldofs, nldofs);
582  }
583  }
584  }
585  nbr_ldof.ShiftUpI();
586 }
587 
588 template <class T>
589 T *GroupCommunicator::CopyGroupToBuffer(const T *ldata, T *buf, int group,
590  int layout) const
591 {
592  switch (layout)
593  {
594  case 1:
595  {
596  return std::copy(ldata + group_ldof.GetI()[group],
597  ldata + group_ldof.GetI()[group+1],
598  buf);
599  }
600  case 2:
601  {
602  const int nltdofs = group_ltdof.RowSize(group);
603  const int *ltdofs = group_ltdof.GetRow(group);
604  for (int j = 0; j < nltdofs; j++)
605  {
606  buf[j] = ldata[ltdofs[j]];
607  }
608  return buf + nltdofs;
609  }
610  default:
611  {
612  const int nldofs = group_ldof.RowSize(group);
613  const int *ldofs = group_ldof.GetRow(group);
614  for (int j = 0; j < nldofs; j++)
615  {
616  buf[j] = ldata[ldofs[j]];
617  }
618  return buf + nldofs;
619  }
620  }
621 }
622 
623 template <class T>
624 const T *GroupCommunicator::CopyGroupFromBuffer(const T *buf, T *ldata,
625  int group, int layout) const
626 {
627  const int nldofs = group_ldof.RowSize(group);
628  switch (layout)
629  {
630  case 1:
631  {
632  std::copy(buf, buf + nldofs, ldata + group_ldof.GetI()[group]);
633  break;
634  }
635  case 2:
636  {
637  const int *ltdofs = group_ltdof.GetRow(group);
638  for (int j = 0; j < nldofs; j++)
639  {
640  ldata[ltdofs[j]] = buf[j];
641  }
642  break;
643  }
644  default:
645  {
646  const int *ldofs = group_ldof.GetRow(group);
647  for (int j = 0; j < nldofs; j++)
648  {
649  ldata[ldofs[j]] = buf[j];
650  }
651  break;
652  }
653  }
654  return buf + nldofs;
655 }
656 
657 template <class T>
658 const T *GroupCommunicator::ReduceGroupFromBuffer(const T *buf, T *ldata,
659  int group, int layout,
660  void (*Op)(OpData<T>)) const
661 {
662  OpData<T> opd;
663  opd.ldata = ldata;
664  opd.nldofs = group_ldof.RowSize(group);
665  opd.nb = 1;
666  opd.buf = const_cast<T*>(buf);
667 
668  switch (layout)
669  {
670  case 1:
671  {
672  MFEM_ABORT("layout 1 is not supported");
673  T *dest = ldata + group_ldof.GetI()[group];
674  for (int j = 0; j < opd.nldofs; j++)
675  {
676  dest[j] += buf[j];
677  }
678  break;
679  }
680  case 2:
681  {
682  opd.ldofs = const_cast<int*>(group_ltdof.GetRow(group));
683  Op(opd);
684  break;
685  }
686  default:
687  {
688  opd.ldofs = const_cast<int*>(group_ldof.GetRow(group));
689  Op(opd);
690  break;
691  }
692  }
693  return buf + opd.nldofs;
694 }
695 
696 template <class T>
697 void GroupCommunicator::BcastBegin(T *ldata, int layout) const
698 {
699  MFEM_VERIFY(comm_lock == 0, "object is already in use");
700 
701  if (group_buf_size == 0) { return; }
702 
703  int request_counter = 0;
704  switch (mode)
705  {
706  case byGroup: // ***** Communication by groups *****
707  {
708  T *buf;
709  if (layout != 1)
710  {
711  group_buf.SetSize(group_buf_size*sizeof(T));
712  buf = (T *)group_buf.GetData();
713  MFEM_VERIFY(layout != 2 || group_ltdof.Size() == group_ldof.Size(),
714  "'group_ltdof' is not set, use SetLTDofTable()");
715  }
716  else
717  {
718  buf = ldata;
719  }
720 
721  for (int gr = 1; gr < group_ldof.Size(); gr++)
722  {
723  const int nldofs = group_ldof.RowSize(gr);
724 
725  // ignore groups without dofs
726  if (nldofs == 0) { continue; }
727 
728  if (!gtopo.IAmMaster(gr)) // we are not the master
729  {
730  MPI_Irecv(buf,
731  nldofs,
734  40822 + gtopo.GetGroupMasterGroup(gr),
735  gtopo.GetComm(),
736  &requests[request_counter]);
737  request_marker[request_counter] = gr;
738  request_counter++;
739  }
740  else // we are the master
741  {
742  if (layout != 1)
743  {
744  CopyGroupToBuffer(ldata, buf, gr, layout);
745  }
746  const int gs = gtopo.GetGroupSize(gr);
747  const int *nbs = gtopo.GetGroup(gr);
748  for (int i = 0; i < gs; i++)
749  {
750  if (nbs[i] != 0)
751  {
752  MPI_Isend(buf,
753  nldofs,
755  gtopo.GetNeighborRank(nbs[i]),
756  40822 + gtopo.GetGroupMasterGroup(gr),
757  gtopo.GetComm(),
758  &requests[request_counter]);
759  request_marker[request_counter] = -1; // mark as send req.
760  request_counter++;
761  }
762  }
763  }
764  buf += nldofs;
765  }
766  break;
767  }
768 
769  case byNeighbor: // ***** Communication by neighbors *****
770  {
771  group_buf.SetSize(group_buf_size*sizeof(T));
772  T *buf = (T *)group_buf.GetData();
773  for (int nbr = 1; nbr < nbr_send_groups.Size(); nbr++)
774  {
775  const int num_send_groups = nbr_send_groups.RowSize(nbr);
776  if (num_send_groups > 0)
777  {
778  // Possible optimization:
779  // if (num_send_groups == 1) and (layout == 1) then we do not
780  // need to copy the data in order to send it.
781  T *buf_start = buf;
782  const int *grp_list = nbr_send_groups.GetRow(nbr);
783  for (int i = 0; i < num_send_groups; i++)
784  {
785  buf = CopyGroupToBuffer(ldata, buf, grp_list[i], layout);
786  }
787  MPI_Isend(buf_start,
788  buf - buf_start,
790  gtopo.GetNeighborRank(nbr),
791  40822,
792  gtopo.GetComm(),
793  &requests[request_counter]);
794  request_marker[request_counter] = -1; // mark as send request
795  request_counter++;
796  }
797 
798  const int num_recv_groups = nbr_recv_groups.RowSize(nbr);
799  if (num_recv_groups > 0)
800  {
801  // Possible optimization (requires interface change):
802  // if (num_recv_groups == 1) and the (output layout == 1) then
803  // we can receive directly in the output buffer; however, at
804  // this point we do not have that information.
805  const int *grp_list = nbr_recv_groups.GetRow(nbr);
806  int recv_size = 0;
807  for (int i = 0; i < num_recv_groups; i++)
808  {
809  recv_size += group_ldof.RowSize(grp_list[i]);
810  }
811  MPI_Irecv(buf,
812  recv_size,
814  gtopo.GetNeighborRank(nbr),
815  40822,
816  gtopo.GetComm(),
817  &requests[request_counter]);
818  request_marker[request_counter] = nbr;
819  request_counter++;
820  buf_offsets[nbr] = buf - (T*)group_buf.GetData();
821  buf += recv_size;
822  }
823  }
824  MFEM_ASSERT(buf - (T*)group_buf.GetData() == group_buf_size, "");
825  break;
826  }
827  }
828 
829  comm_lock = 1; // 1 - locked for Bcast
830  num_requests = request_counter;
831 }
832 
833 template <class T>
834 void GroupCommunicator::BcastEnd(T *ldata, int layout) const
835 {
836  if (comm_lock == 0) { return; }
837  // The above also handles the case (group_buf_size == 0).
838  MFEM_VERIFY(comm_lock == 1, "object is NOT locked for Bcast");
839 
840  switch (mode)
841  {
842  case byGroup: // ***** Communication by groups *****
843  {
844  if (layout == 1)
845  {
846  MPI_Waitall(num_requests, requests, MPI_STATUSES_IGNORE);
847  }
848  else if (layout == 0)
849  {
850  // copy the received data from the buffer to ldata, as it arrives
851  int idx;
852  while (MPI_Waitany(num_requests, requests, &idx, MPI_STATUS_IGNORE),
853  idx != MPI_UNDEFINED)
854  {
855  int gr = request_marker[idx];
856  if (gr == -1) { continue; } // skip send requests
857 
858  // groups without dofs are skipped, so here nldofs > 0.
859  T *buf = (T *)group_buf.GetData() + group_ldof.GetI()[gr];
860  CopyGroupFromBuffer(buf, ldata, gr, layout);
861  }
862  }
863  break;
864  }
865 
866  case byNeighbor: // ***** Communication by neighbors *****
867  {
868  // copy the received data from the buffer to ldata, as it arrives
869  int idx;
870  while (MPI_Waitany(num_requests, requests, &idx, MPI_STATUS_IGNORE),
871  idx != MPI_UNDEFINED)
872  {
873  int nbr = request_marker[idx];
874  if (nbr == -1) { continue; } // skip send requests
875 
876  const int num_recv_groups = nbr_recv_groups.RowSize(nbr);
877  if (num_recv_groups > 0)
878  {
879  const int *grp_list = nbr_recv_groups.GetRow(nbr);
880  const T *buf = (T*)group_buf.GetData() + buf_offsets[nbr];
881  for (int i = 0; i < num_recv_groups; i++)
882  {
883  buf = CopyGroupFromBuffer(buf, ldata, grp_list[i], layout);
884  }
885  }
886  }
887  break;
888  }
889  }
890 
891  comm_lock = 0; // 0 - no lock
892  num_requests = 0;
893 }
894 
895 template <class T>
896 void GroupCommunicator::ReduceBegin(const T *ldata) const
897 {
898  MFEM_VERIFY(comm_lock == 0, "object is already in use");
899 
900  if (group_buf_size == 0) { return; }
901 
902  int request_counter = 0;
903  group_buf.SetSize(group_buf_size*sizeof(T));
904  T *buf = (T *)group_buf.GetData();
905  switch (mode)
906  {
907  case byGroup: // ***** Communication by groups *****
908  {
909  for (int gr = 1; gr < group_ldof.Size(); gr++)
910  {
911  const int nldofs = group_ldof.RowSize(gr);
912  // ignore groups without dofs
913  if (nldofs == 0) { continue; }
914 
915  if (!gtopo.IAmMaster(gr)) // we are not the master
916  {
917  const int layout = 0;
918  CopyGroupToBuffer(ldata, buf, gr, layout);
919  MPI_Isend(buf,
920  nldofs,
923  43822 + gtopo.GetGroupMasterGroup(gr),
924  gtopo.GetComm(),
925  &requests[request_counter]);
926  request_marker[request_counter] = -1; // mark as send request
927  request_counter++;
928  buf += nldofs;
929  }
930  else // we are the master
931  {
932  const int gs = gtopo.GetGroupSize(gr);
933  const int *nbs = gtopo.GetGroup(gr);
934  buf_offsets[gr] = buf - (T *)group_buf.GetData();
935  for (int i = 0; i < gs; i++)
936  {
937  if (nbs[i] != 0)
938  {
939  MPI_Irecv(buf,
940  nldofs,
942  gtopo.GetNeighborRank(nbs[i]),
943  43822 + gtopo.GetGroupMasterGroup(gr),
944  gtopo.GetComm(),
945  &requests[request_counter]);
946  request_marker[request_counter] = gr;
947  request_counter++;
948  buf += nldofs;
949  }
950  }
951  }
952  }
953  break;
954  }
955 
956  case byNeighbor: // ***** Communication by neighbors *****
957  {
958  for (int nbr = 1; nbr < nbr_send_groups.Size(); nbr++)
959  {
960  // In Reduce operation: send_groups <--> recv_groups
961  const int num_send_groups = nbr_recv_groups.RowSize(nbr);
962  if (num_send_groups > 0)
963  {
964  T *buf_start = buf;
965  const int *grp_list = nbr_recv_groups.GetRow(nbr);
966  for (int i = 0; i < num_send_groups; i++)
967  {
968  const int layout = 0; // ldata is an array on all ldofs
969  buf = CopyGroupToBuffer(ldata, buf, grp_list[i], layout);
970  }
971  MPI_Isend(buf_start,
972  buf - buf_start,
974  gtopo.GetNeighborRank(nbr),
975  43822,
976  gtopo.GetComm(),
977  &requests[request_counter]);
978  request_marker[request_counter] = -1; // mark as send request
979  request_counter++;
980  }
981 
982  // In Reduce operation: send_groups <--> recv_groups
983  const int num_recv_groups = nbr_send_groups.RowSize(nbr);
984  if (num_recv_groups > 0)
985  {
986  const int *grp_list = nbr_send_groups.GetRow(nbr);
987  int recv_size = 0;
988  for (int i = 0; i < num_recv_groups; i++)
989  {
990  recv_size += group_ldof.RowSize(grp_list[i]);
991  }
992  MPI_Irecv(buf,
993  recv_size,
995  gtopo.GetNeighborRank(nbr),
996  43822,
997  gtopo.GetComm(),
998  &requests[request_counter]);
999  request_marker[request_counter] = nbr;
1000  request_counter++;
1001  buf_offsets[nbr] = buf - (T*)group_buf.GetData();
1002  buf += recv_size;
1003  }
1004  }
1005  MFEM_ASSERT(buf - (T*)group_buf.GetData() == group_buf_size, "");
1006  break;
1007  }
1008  }
1009 
1010  comm_lock = 2;
1011  num_requests = request_counter;
1012 }
1013 
1014 template <class T>
1015 void GroupCommunicator::ReduceEnd(T *ldata, int layout,
1016  void (*Op)(OpData<T>)) const
1017 {
1018  if (comm_lock == 0) { return; }
1019  // The above also handles the case (group_buf_size == 0).
1020  MFEM_VERIFY(comm_lock == 2, "object is NOT locked for Reduce");
1021 
1022  switch (mode)
1023  {
1024  case byGroup: // ***** Communication by groups *****
1025  {
1026  OpData<T> opd;
1027  opd.ldata = ldata;
1028  Array<int> group_num_req(group_ldof.Size());
1029  for (int gr = 1; gr < group_ldof.Size(); gr++)
1030  {
1031  group_num_req[gr] =
1032  gtopo.IAmMaster(gr) ? gtopo.GetGroupSize(gr)-1 : 0;
1033  }
1034  int idx;
1035  while (MPI_Waitany(num_requests, requests, &idx, MPI_STATUS_IGNORE),
1036  idx != MPI_UNDEFINED)
1037  {
1038  int gr = request_marker[idx];
1039  if (gr == -1) { continue; } // skip send requests
1040 
1041  // Delay the processing of a group until all receive requests, for
1042  // that group, are done:
1043  if ((--group_num_req[gr]) != 0) { continue; }
1044 
1045  opd.nldofs = group_ldof.RowSize(gr);
1046  // groups without dofs are skipped, so here nldofs > 0.
1047 
1048  opd.buf = (T *)group_buf.GetData() + buf_offsets[gr];
1049  opd.ldofs = (layout == 0) ?
1050  group_ldof.GetRow(gr) : group_ltdof.GetRow(gr);
1051  opd.nb = gtopo.GetGroupSize(gr)-1;
1052  Op(opd);
1053  }
1054  break;
1055  }
1056 
1057  case byNeighbor: // ***** Communication by neighbors *****
1058  {
1059  MPI_Waitall(num_requests, requests, MPI_STATUSES_IGNORE);
1060 
1061  for (int nbr = 1; nbr < nbr_send_groups.Size(); nbr++)
1062  {
1063  // In Reduce operation: send_groups <--> recv_groups
1064  const int num_recv_groups = nbr_send_groups.RowSize(nbr);
1065  if (num_recv_groups > 0)
1066  {
1067  const int *grp_list = nbr_send_groups.GetRow(nbr);
1068  const T *buf = (T*)group_buf.GetData() + buf_offsets[nbr];
1069  for (int i = 0; i < num_recv_groups; i++)
1070  {
1071  buf = ReduceGroupFromBuffer(buf, ldata, grp_list[i],
1072  layout, Op);
1073  }
1074  }
1075  }
1076  break;
1077  }
1078  }
1079 
1080  comm_lock = 0; // 0 - no lock
1081  num_requests = 0;
1082 }
1083 
1084 template <class T>
1086 {
1087  if (opd.nb == 1)
1088  {
1089  for (int i = 0; i < opd.nldofs; i++)
1090  {
1091  opd.ldata[opd.ldofs[i]] += opd.buf[i];
1092  }
1093  }
1094  else
1095  {
1096  for (int i = 0; i < opd.nldofs; i++)
1097  {
1098  T data = opd.ldata[opd.ldofs[i]];
1099  for (int j = 0; j < opd.nb; j++)
1100  {
1101  data += opd.buf[j*opd.nldofs+i];
1102  }
1103  opd.ldata[opd.ldofs[i]] = data;
1104  }
1105  }
1106 }
1107 
1108 template <class T>
1110 {
1111  for (int i = 0; i < opd.nldofs; i++)
1112  {
1113  T data = opd.ldata[opd.ldofs[i]];
1114  for (int j = 0; j < opd.nb; j++)
1115  {
1116  T b = opd.buf[j*opd.nldofs+i];
1117  if (data > b)
1118  {
1119  data = b;
1120  }
1121  }
1122  opd.ldata[opd.ldofs[i]] = data;
1123  }
1124 }
1125 
1126 template <class T>
1128 {
1129  for (int i = 0; i < opd.nldofs; i++)
1130  {
1131  T data = opd.ldata[opd.ldofs[i]];
1132  for (int j = 0; j < opd.nb; j++)
1133  {
1134  T b = opd.buf[j*opd.nldofs+i];
1135  if (data < b)
1136  {
1137  data = b;
1138  }
1139  }
1140  opd.ldata[opd.ldofs[i]] = data;
1141  }
1142 }
1143 
1144 template <class T>
1146 {
1147  for (int i = 0; i < opd.nldofs; i++)
1148  {
1149  T data = opd.ldata[opd.ldofs[i]];
1150  for (int j = 0; j < opd.nb; j++)
1151  {
1152  data |= opd.buf[j*opd.nldofs+i];
1153  }
1154  opd.ldata[opd.ldofs[i]] = data;
1155  }
1156 }
1157 
1158 void GroupCommunicator::PrintInfo(std::ostream &out) const
1159 {
1160  char c = '\0';
1161  const int tag = 46800;
1162  const int myid = gtopo.MyRank();
1163 
1164  int num_sends = 0, num_recvs = 0;
1165  size_t mem_sends = 0, mem_recvs = 0;
1166  int num_master_groups = 0, num_empty_groups = 0;
1167  int num_active_neighbors = 0; // for mode == byNeighbor
1168  switch (mode)
1169  {
1170  case byGroup:
1171  for (int gr = 1; gr < group_ldof.Size(); gr++)
1172  {
1173  const int nldofs = group_ldof.RowSize(gr);
1174  if (nldofs == 0)
1175  {
1176  num_empty_groups++;
1177  continue;
1178  }
1179  if (gtopo.IAmMaster(gr))
1180  {
1181  num_sends += (gtopo.GetGroupSize(gr)-1);
1182  mem_sends += sizeof(double)*nldofs*(gtopo.GetGroupSize(gr)-1);
1183  num_master_groups++;
1184  }
1185  else
1186  {
1187  num_recvs++;
1188  mem_recvs += sizeof(double)*nldofs;
1189  }
1190  }
1191  break;
1192 
1193  case byNeighbor:
1194  for (int gr = 1; gr < group_ldof.Size(); gr++)
1195  {
1196  const int nldofs = group_ldof.RowSize(gr);
1197  if (nldofs == 0)
1198  {
1199  num_empty_groups++;
1200  continue;
1201  }
1202  if (gtopo.IAmMaster(gr))
1203  {
1204  num_master_groups++;
1205  }
1206  }
1207  for (int nbr = 1; nbr < nbr_send_groups.Size(); nbr++)
1208  {
1209  const int num_send_groups = nbr_send_groups.RowSize(nbr);
1210  if (num_send_groups > 0)
1211  {
1212  const int *grp_list = nbr_send_groups.GetRow(nbr);
1213  for (int i = 0; i < num_send_groups; i++)
1214  {
1215  mem_sends += sizeof(double)*group_ldof.RowSize(grp_list[i]);
1216  }
1217  num_sends++;
1218  }
1219 
1220  const int num_recv_groups = nbr_recv_groups.RowSize(nbr);
1221  if (num_recv_groups > 0)
1222  {
1223  const int *grp_list = nbr_recv_groups.GetRow(nbr);
1224  for (int i = 0; i < num_recv_groups; i++)
1225  {
1226  mem_recvs += sizeof(double)*group_ldof.RowSize(grp_list[i]);
1227  }
1228  num_recvs++;
1229  }
1230  if (num_send_groups > 0 || num_recv_groups > 0)
1231  {
1232  num_active_neighbors++;
1233  }
1234  }
1235  break;
1236  }
1237  if (myid != 0)
1238  {
1239  MPI_Recv(&c, 1, MPI_CHAR, myid-1, tag, gtopo.GetComm(),
1240  MPI_STATUS_IGNORE);
1241  }
1242  else
1243  {
1244  out << "\nGroupCommunicator:\n";
1245  }
1246  out << "Rank " << myid << ":\n"
1247  " mode = " <<
1248  (mode == byGroup ? "byGroup" : "byNeighbor") << "\n"
1249  " number of sends = " << num_sends <<
1250  " (" << mem_sends << " bytes)\n"
1251  " number of recvs = " << num_recvs <<
1252  " (" << mem_recvs << " bytes)\n";
1253  out <<
1254  " num groups = " << group_ldof.Size() << " = " <<
1255  num_master_groups << " + " <<
1256  group_ldof.Size()-num_master_groups-num_empty_groups << " + " <<
1257  num_empty_groups << " (master + slave + empty)\n";
1258  if (mode == byNeighbor)
1259  {
1260  out <<
1261  " num neighbors = " << nbr_send_groups.Size() << " = " <<
1262  num_active_neighbors << " + " <<
1263  nbr_send_groups.Size()-num_active_neighbors <<
1264  " (active + inactive)\n";
1265  }
1266  if (myid != gtopo.NRanks()-1)
1267  {
1268  out << std::flush;
1269  MPI_Send(&c, 1, MPI_CHAR, myid+1, tag, gtopo.GetComm());
1270  }
1271  else
1272  {
1273  out << std::endl;
1274  }
1275  MPI_Barrier(gtopo.GetComm());
1276 }
1277 
1279 {
1280  delete [] buf_offsets;
1281  delete [] request_marker;
1282  // delete [] statuses;
1283  delete [] requests;
1284 }
1285 
1286 // @cond DOXYGEN_SKIP
1287 
1288 // instantiate GroupCommunicator::Bcast and Reduce for int and double
1289 template void GroupCommunicator::BcastBegin<int>(int *, int) const;
1290 template void GroupCommunicator::BcastEnd<int>(int *, int) const;
1291 template void GroupCommunicator::ReduceBegin<int>(const int *) const;
1292 template void GroupCommunicator::ReduceEnd<int>(
1293  int *, int, void (*)(OpData<int>)) const;
1294 
1295 template void GroupCommunicator::BcastBegin<double>(double *, int) const;
1296 template void GroupCommunicator::BcastEnd<double>(double *, int) const;
1297 template void GroupCommunicator::ReduceBegin<double>(const double *) const;
1298 template void GroupCommunicator::ReduceEnd<double>(
1299  double *, int, void (*)(OpData<double>)) const;
1300 
1301 // @endcond
1302 
1303 // instantiate reduce operators for int and double
1304 template void GroupCommunicator::Sum<int>(OpData<int>);
1305 template void GroupCommunicator::Min<int>(OpData<int>);
1306 template void GroupCommunicator::Max<int>(OpData<int>);
1307 template void GroupCommunicator::BitOR<int>(OpData<int>);
1308 
1309 template void GroupCommunicator::Sum<double>(OpData<double>);
1310 template void GroupCommunicator::Min<double>(OpData<double>);
1311 template void GroupCommunicator::Max<double>(OpData<double>);
1312 
1313 
1314 #ifdef __bgq__
1315 static void DebugRankCoords(int** coords, int dim, int size)
1316 {
1317  for (int i = 0; i < size; i++)
1318  {
1319  mfem::out << "Rank " << i << " coords: ";
1320  for (int j = 0; j < dim; j++)
1321  {
1322  mfem::out << coords[i][j] << " ";
1323  }
1324  mfem::out << endl;
1325  }
1326 }
1327 
1328 struct CompareCoords
1329 {
1330  CompareCoords(int coord) : coord(coord) {}
1331  int coord;
1332 
1333  bool operator()(int* const &a, int* const &b) const
1334  { return a[coord] < b[coord]; }
1335 };
1336 
1337 void KdTreeSort(int** coords, int d, int dim, int size)
1338 {
1339  if (size > 1)
1340  {
1341  bool all_same = true;
1342  for (int i = 1; i < size && all_same; i++)
1343  {
1344  for (int j = 0; j < dim; j++)
1345  {
1346  if (coords[i][j] != coords[0][j]) { all_same = false; break; }
1347  }
1348  }
1349  if (all_same) { return; }
1350 
1351  // sort by coordinate 'd'
1352  std::sort(coords, coords + size, CompareCoords(d));
1353  int next = (d + 1) % dim;
1354 
1355  if (coords[0][d] < coords[size-1][d])
1356  {
1357  KdTreeSort(coords, next, dim, size/2);
1358  KdTreeSort(coords + size/2, next, dim, size - size/2);
1359  }
1360  else
1361  {
1362  // skip constant dimension
1363  KdTreeSort(coords, next, dim, size);
1364  }
1365  }
1366 }
1367 
1368 MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
1369 {
1370  MPI_Status status;
1371 
1372  int rank, size;
1373  MPI_Comm_rank(comm, &rank);
1374  MPI_Comm_size(comm, &size);
1375 
1376  int dim;
1377  MPIX_Torus_ndims(&dim);
1378 
1379  int* mycoords = new int[dim + 1];
1380  MPIX_Rank2torus(rank, mycoords);
1381 
1382  MPI_Send(mycoords, dim, MPI_INT, 0, 111, comm);
1383  delete [] mycoords;
1384 
1385  if (rank == 0)
1386  {
1387  int** coords = new int*[size];
1388  for (int i = 0; i < size; i++)
1389  {
1390  coords[i] = new int[dim + 1];
1391  coords[i][dim] = i;
1392  MPI_Recv(coords[i], dim, MPI_INT, i, 111, comm, &status);
1393  }
1394 
1395  KdTreeSort(coords, 0, dim, size);
1396 
1397  // DebugRankCoords(coords, dim, size);
1398 
1399  for (int i = 0; i < size; i++)
1400  {
1401  MPI_Send(&coords[i][dim], 1, MPI_INT, i, 112, comm);
1402  delete [] coords[i];
1403  }
1404  delete [] coords;
1405  }
1406 
1407  int new_rank;
1408  MPI_Recv(&new_rank, 1, MPI_INT, 0, 112, comm, &status);
1409 
1410  MPI_Comm new_comm;
1411  MPI_Comm_split(comm, 0, new_rank, &new_comm);
1412  return new_comm;
1413 }
1414 
1415 #else // __bgq__
1416 
1417 MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
1418 {
1419  // pass
1420  return comm;
1421 }
1422 #endif // __bgq__
1423 
1424 } // namespace mfem
1425 
1426 #endif
int Lookup(IntegerSet &s)
Definition: sets.cpp:95
int GetGroupMasterRank(int g) const
Return the rank of the group master for group &#39;g&#39;.
void Create(ListOfIntegerSets &groups, int mpitag)
Set up the group topology given the list of sets of shared entities.
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
int Size() const
Return the logical size of the array.
Definition: array.hpp:124
void GetNeighborLDofTable(Table &nbr_ldof) const
Dofs to be received from communication neighbors.
void Recreate(const int n, const int *p)
Create an integer set from C-array &#39;p&#39; of &#39;n&#39; integers. Overwrites any existing set data...
Definition: sets.cpp:59
int GetGroupMasterGroup(int g) const
Return the group number in the master for group &#39;g&#39;.
int * GetJ()
Definition: table.hpp:114
void Unique()
Removes duplicities from a sorted array. This requires operator== to be defined for T...
Definition: array.hpp:242
void AsTable(Table &t)
Write the list of sets into table &#39;t&#39;.
Definition: sets.cpp:107
Helper struct to convert a C++ type to an MPI type.
void GetNeighborLTDofTable(Table &nbr_ltdof) const
Dofs to be sent to communication neighbors.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
void AddColumnsInRow(int r, int ncol)
Definition: table.hpp:78
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
Definition: table.cpp:85
int NRanks() const
Return the number of MPI ranks within this object&#39;s communicator.
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
int GetGroupSize(int g) const
Get the number of processors in a group.
void SetDims(int rows, int nnz)
Definition: table.cpp:144
const int * GetGroup(int g) const
Return a pointer to a list of neighbors for a given group. Neighbor 0 is the local processor...
void Copy(Array &copy) const
Create a copy of the internal array to the provided copy.
Definition: array.hpp:831
T * GetData()
Returns the data.
Definition: array.hpp:98
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:191
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata...
int Size_of_connections() const
Definition: table.hpp:98
void skip_comment_lines(std::istream &is, const char comment_char)
Check if the stream starts with comment_char. If so skip it.
Definition: text.hpp:28
void AddConnections(int r, const int *c, int nc)
Definition: table.cpp:108
int PickElementInSet(int i)
Return the value of the first element of the ith set.
Definition: sets.hpp:70
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
bool IAmMaster(int g) const
Return true if I am master for group &#39;g&#39;.
void MakeFromList(int nrows, const Array< Connection > &list)
Definition: table.cpp:280
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int Append(const T &el)
Append element &#39;el&#39; to array, resize if necessary.
Definition: array.hpp:726
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
int MyRank() const
Return the MPI rank within this object&#39;s communicator.
double b
Definition: lissajous.cpp:42
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
MPI_Comm GetComm() const
Return the MPI communicator.
void AddConnection(int r, int c)
Definition: table.hpp:80
int Insert(IntegerSet &s)
Check to see if set &#39;s&#39; is in the list. If not append it to the end of the list. Returns the index of...
Definition: sets.cpp:82
void Reserve(int capacity)
Ensures that the allocated size is at least the given size.
Definition: array.hpp:142
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf...
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
void Sort()
Sorts the array in ascending order. This requires operator&lt; to be defined for T.
Definition: array.hpp:234
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:92
GroupCommunicator(GroupTopology &gt, Mode m=byNeighbor)
Construct a GroupCommunicator object.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor...
void Copy(GroupTopology &copy) const
Copy the internal data to the external &#39;copy&#39;.
void Save(std::ostream &out) const
Save the data in a stream.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
int GetNeighborRank(int i) const
Return the MPI rank of neighbor &#39;i&#39;.
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
Communications are performed one group at a time.
void AddAColumnInRow(int r)
Definition: table.hpp:77
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition: array.hpp:654
Helper struct for defining a connectivity table, see Table::MakeFromList.
Definition: table.hpp:27
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
void ShiftUpI()
Definition: table.cpp:119
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
Data structure on which we define reduce operations. The data is associated with (and the operation i...
double a
Definition: lissajous.cpp:41
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
A set of integers.
Definition: sets.hpp:23
Mode
Communication mode.
void MakeJ()
Definition: table.cpp:95
int dim
Definition: ex24.cpp:53
void KdTreeSort(int **coords, int d, int dim, int size)
int index(int i, int j, int nx, int ny)
Definition: life.cpp:241
int NGroups() const
Return the number of groups.
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void SetComm(MPI_Comm comm)
Set the MPI communicator to &#39;comm&#39;.
int * GetI()
Definition: table.hpp:113
int RowSize(int i) const
Definition: table.hpp:108
List of integer sets.
Definition: sets.hpp:59
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:66
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
void Copy(Table &copy) const
Definition: table.cpp:390
void Load(std::istream &in)
Load the data from a stream.
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.