MFEM  v3.4
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
communication.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include <mpi.h>
17 #ifdef __bgq__
18 #include <mpix.h>
19 #endif
20 
21 #include "array.hpp"
22 #include "table.hpp"
23 #include "sets.hpp"
24 #include "communication.hpp"
25 #include "text.hpp"
26 #include "sort_pairs.hpp"
27 #include "globals.hpp"
28 
29 #include <iostream>
30 #include <map>
31 
32 using namespace std;
33 
34 namespace mfem
35 {
36 
37 void MPI_Session::GetRankAndSize()
38 {
39  MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
40  MPI_Comm_size(MPI_COMM_WORLD, &world_size);
41 }
42 
43 
44 GroupTopology::GroupTopology(const GroupTopology &gt)
45  : MyComm(gt.MyComm),
46  group_lproc(gt.group_lproc)
47 {
48  gt.groupmaster_lproc.Copy(groupmaster_lproc);
49  gt.lproc_proc.Copy(lproc_proc);
50  gt.group_mgroup.Copy(group_mgroup);
51 }
52 
53 void GroupTopology::ProcToLProc()
54 {
55  int NRanks;
56  MPI_Comm_size(MyComm, &NRanks);
57 
58  map<int, int> proc_lproc;
59 
60  // The local processor ids are assigned following the group order and within
61  // a group following their ordering in the group. In other words, the ids are
62  // assigned based on their order in the J array of group_lproc.
63  int lproc_counter = 0;
64  for (int i = 0; i < group_lproc.Size_of_connections(); i++)
65  {
66  const pair<const int, int> p(group_lproc.GetJ()[i], lproc_counter);
67  if (proc_lproc.insert(p).second)
68  {
69  lproc_counter++;
70  }
71  }
72  // Note: group_lproc.GetJ()[0] == MyRank --> proc_lproc[MyRank] == 0
73 
74  lproc_proc.SetSize(lproc_counter);
75  for (map<int, int>::iterator it = proc_lproc.begin();
76  it != proc_lproc.end(); ++it)
77  {
78  lproc_proc[it->second] = it->first;
79  }
80 
81  for (int i = 0; i < group_lproc.Size_of_connections(); i++)
82  {
83  group_lproc.GetJ()[i] = proc_lproc[group_lproc.GetJ()[i]];
84  }
85 
86  for (int i = 0; i < NGroups(); i++)
87  {
88  groupmaster_lproc[i] = proc_lproc[groupmaster_lproc[i]];
89  }
90 }
91 
92 void GroupTopology::Create(ListOfIntegerSets &groups, int mpitag)
93 {
94  groups.AsTable(group_lproc); // group_lproc = group_proc
95 
96  Table group_mgroupandproc;
97  group_mgroupandproc.SetDims(NGroups(),
98  group_lproc.Size_of_connections() + NGroups());
99  for (int i = 0; i < NGroups(); i++)
100  {
101  int j = group_mgroupandproc.GetI()[i];
102  group_mgroupandproc.GetI()[i+1] = j + group_lproc.RowSize(i) + 1;
103  group_mgroupandproc.GetJ()[j] = i;
104  j++;
105  for (int k = group_lproc.GetI()[i];
106  j < group_mgroupandproc.GetI()[i+1]; j++, k++)
107  {
108  group_mgroupandproc.GetJ()[j] = group_lproc.GetJ()[k];
109  }
110  }
111 
112  // build groupmaster_lproc with lproc = proc
113  groupmaster_lproc.SetSize(NGroups());
114 
115  // simplest choice of the group owner
116  for (int i = 0; i < NGroups(); i++)
117  {
118  groupmaster_lproc[i] = groups.PickElementInSet(i);
119  }
120 
121  // load-balanced choice of the group owner, which however can lead to
122  // isolated dofs
123  // for (i = 0; i < NGroups(); i++)
124  // groupmaster_lproc[i] = groups.PickRandomElementInSet(i);
125 
126  ProcToLProc();
127 
128  // Build 'group_mgroup':
129 
130  // Use aggregated neighbor communication: at most one send to and/or one
131  // receive from each neighbor.
132 
133  group_mgroup.SetSize(NGroups());
134  MFEM_DEBUG_DO(group_mgroup = -1);
135  for (int g = 0; g < NGroups(); g++)
136  {
137  if (IAmMaster(g)) { group_mgroup[g] = g; }
138  }
139 
140  // The Table 'lproc_cgroup': for each lproc, list the groups that are owned
141  // by this rank or by that lproc.
142  Table lproc_cgroup;
143  {
144  Array<Connection> lproc_cgroup_list;
145  for (int g = 1; g < NGroups(); g++)
146  {
147  if (IAmMaster(g))
148  {
149  const int gs = GetGroupSize(g);
150  const int *lprocs = GetGroup(g);
151  for (int i = 0; i < gs; i++)
152  {
153  if (lprocs[i])
154  {
155  lproc_cgroup_list.Append(Connection(lprocs[i],g));
156  }
157  }
158  }
159  else
160  {
161  lproc_cgroup_list.Append(Connection(GetGroupMaster(g),g));
162  }
163  }
164  lproc_cgroup_list.Sort();
165  lproc_cgroup_list.Unique();
166  lproc_cgroup.MakeFromList(GetNumNeighbors(), lproc_cgroup_list);
167  }
168 
169  // Determine size of the send-receive buffer. For each neighbor the buffer
170  // contains: <send-part><receive-part> with each part consisting of a list of
171  // groups. Each group, g, has group_lproc.RowSize(g)+2 integers: the first
172  // entry is group_lproc.RowSize(g) - the number of processors in the group,
173  // followed by the group-id in the master processor, followed by the ranks of
174  // the processors in the group.
175  Table buffer;
176  buffer.MakeI(2*lproc_cgroup.Size()-2); // excluding the "local" lproc, 0
177  for (int nbr = 1; nbr < lproc_cgroup.Size(); nbr++)
178  {
179  const int send_row = 2*(nbr-1);
180  const int recv_row = send_row+1;
181  const int ng = lproc_cgroup.RowSize(nbr);
182  const int *g = lproc_cgroup.GetRow(nbr);
183  for (int j = 0; j < ng; j++)
184  {
185  const int gs = group_lproc.RowSize(g[j]);
186  if (IAmMaster(g[j]))
187  {
188  buffer.AddColumnsInRow(send_row, gs+2);
189  }
190  else
191  {
192  MFEM_ASSERT(GetGroupMaster(g[j]) == nbr, "internal error");
193  buffer.AddColumnsInRow(recv_row, gs+2);
194  }
195  }
196  }
197  buffer.MakeJ();
198  for (int nbr = 1; nbr < lproc_cgroup.Size(); nbr++)
199  {
200  const int send_row = 2*(nbr-1);
201  const int recv_row = send_row+1;
202  const int ng = lproc_cgroup.RowSize(nbr);
203  const int *g = lproc_cgroup.GetRow(nbr);
204  for (int j = 0; j < ng; j++)
205  {
206  const int gs = group_lproc.RowSize(g[j]);
207  if (IAmMaster(g[j]))
208  {
209  buffer.AddConnection(send_row, gs);
210  buffer.AddConnections(
211  send_row, group_mgroupandproc.GetRow(g[j]), gs+1);
212  }
213  else
214  {
215  buffer.AddColumnsInRow(recv_row, gs+2);
216  }
217  }
218  }
219  buffer.ShiftUpI();
220  Array<MPI_Request> send_requests(lproc_cgroup.Size()-1);
221  Array<MPI_Request> recv_requests(lproc_cgroup.Size()-1);
222  send_requests = MPI_REQUEST_NULL;
223  recv_requests = MPI_REQUEST_NULL;
224  for (int nbr = 1; nbr < lproc_cgroup.Size(); nbr++)
225  {
226  const int send_row = 2*(nbr-1);
227  const int recv_row = send_row+1;
228  const int send_size = buffer.RowSize(send_row);
229  const int recv_size = buffer.RowSize(recv_row);
230  if (send_size > 0)
231  {
232  MPI_Isend(buffer.GetRow(send_row), send_size, MPI_INT, lproc_proc[nbr],
233  mpitag, MyComm, &send_requests[nbr-1]);
234  }
235  if (recv_size > 0)
236  {
237  MPI_Irecv(buffer.GetRow(recv_row), recv_size, MPI_INT, lproc_proc[nbr],
238  mpitag, MyComm, &recv_requests[nbr-1]);
239  }
240  }
241 
242  if (recv_requests.Size() > 0)
243  {
244  int idx;
245  IntegerSet group;
246  while (MPI_Waitany(recv_requests.Size(), recv_requests.GetData(), &idx,
247  MPI_STATUS_IGNORE),
248  idx != MPI_UNDEFINED)
249  {
250  const int recv_size = buffer.RowSize(2*idx+1);
251  const int *recv_buf = buffer.GetRow(2*idx+1);
252  for (int s = 0; s < recv_size; s += recv_buf[s]+2)
253  {
254  group.Recreate(recv_buf[s], recv_buf+s+2);
255  const int g = groups.Lookup(group);
256  MFEM_ASSERT(group_mgroup[g] == -1, "communication error");
257  group_mgroup[g] = recv_buf[s+1];
258  }
259  }
260  }
261 
262  MPI_Waitall(send_requests.Size(), send_requests.GetData(),
263  MPI_STATUSES_IGNORE);
264 
265  // debug barrier: MPI_Barrier(MyComm);
266 }
267 
268 void GroupTopology::Save(ostream &out) const
269 {
270  out << "\ncommunication_groups\n";
271  out << "number_of_groups " << NGroups() << "\n\n";
272 
273  out << "# number of entities in each group, followed by group ids in group\n";
274  for (int group_id = 0; group_id < NGroups(); ++group_id)
275  {
276  int group_size = GetGroupSize(group_id);
277  const int * group_ptr = GetGroup(group_id);
278  out << group_size;
279  for ( int group_member_index = 0; group_member_index < group_size;
280  ++group_member_index)
281  {
282  out << " " << GetNeighborRank( group_ptr[group_member_index] );
283  }
284  out << "\n";
285  }
286 
287  // For future use, optional ownership strategy.
288  // out << "# ownership";
289 }
290 
291 void GroupTopology::Load(istream &in)
292 {
293  // Load in group topology and create list of integer sets. Use constructor
294  // that uses list of integer sets.
295  std::string ident;
296 
297  // Read in number of groups
298  int number_of_groups = -1;
299  in >> ident;
300  MFEM_VERIFY(ident == "number_of_groups",
301  "GroupTopology::Load - expected 'number_of_groups' entry.");
302  in >> number_of_groups;
303 
304  // Skip number of entries in each group comment.
305  skip_comment_lines(in, '#');
306 
307  ListOfIntegerSets integer_sets;
308  for (int group_id = 0; group_id < number_of_groups; ++group_id)
309  {
310  IntegerSet integer_set;
311  Array<int>& array = integer_set;
312  int group_size;
313  in >> group_size;
314  array.Reserve(group_size);
315  for ( int index = 0; index < group_size; ++index )
316  {
317  int value;
318  in >> value;
319  array.Append(value);
320  }
321  integer_sets.Insert(integer_set);
322  }
323 
324  Create(integer_sets, 823);
325 }
326 
328 {
329  copy.SetComm(MyComm);
330  group_lproc.Copy(copy.group_lproc);
331  groupmaster_lproc.Copy(copy.groupmaster_lproc);
332  lproc_proc.Copy(copy.lproc_proc);
333  group_mgroup.Copy(copy.group_mgroup);
334 }
335 
336 // Initialize the static mpi_type for the specializations of MPITypeMap:
337 const MPI_Datatype MPITypeMap<int>::mpi_type = MPI_INT;
338 const MPI_Datatype MPITypeMap<double>::mpi_type = MPI_DOUBLE;
339 
340 
342  : gtopo(gt), mode(m)
343 {
344  group_buf_size = 0;
345  requests = NULL;
346  // statuses = NULL;
347  comm_lock = 0;
348  num_requests = 0;
349  request_marker = NULL;
350  buf_offsets = NULL;
351 }
352 
353 void GroupCommunicator::Create(const Array<int> &ldof_group)
354 {
356  for (int i = 0; i < ldof_group.Size(); i++)
357  {
358  int group = ldof_group[i];
359  if (group != 0)
360  {
362  }
363  }
364  group_ldof.MakeJ();
365 
366  for (int i = 0; i < ldof_group.Size(); i++)
367  {
368  int group = ldof_group[i];
369  if (group != 0)
370  {
371  group_ldof.AddConnection(group, i);
372  }
373  }
375 
376  Finalize();
377 }
378 
380 {
381  int request_counter = 0;
382 
383  // size buf_offsets = max(number of groups, number of neighbors)
384  buf_offsets = new int[max(group_ldof.Size(), gtopo.GetNumNeighbors())];
385  buf_offsets[0] = 0;
386  for (int gr = 1; gr < group_ldof.Size(); gr++)
387  {
388  if (group_ldof.RowSize(gr) != 0)
389  {
390  int gr_requests;
391  if (!gtopo.IAmMaster(gr)) // we are not the master
392  {
393  gr_requests = 1;
394  }
395  else
396  {
397  gr_requests = gtopo.GetGroupSize(gr)-1;
398  }
399 
400  request_counter += gr_requests;
401  group_buf_size += gr_requests * group_ldof.RowSize(gr);
402  }
403  }
404 
405  requests = new MPI_Request[request_counter];
406  // statuses = new MPI_Status[request_counter];
407  request_marker = new int[request_counter];
408 
409  // Construct nbr_send_groups and nbr_recv_groups: (nbr 0 = me)
412  for (int gr = 1; gr < group_ldof.Size(); gr++)
413  {
414  const int nldofs = group_ldof.RowSize(gr);
415  if (nldofs == 0) { continue; }
416 
417  if (!gtopo.IAmMaster(gr)) // we are not the master
418  {
420  }
421  else // we are the master
422  {
423  const int grp_size = gtopo.GetGroupSize(gr);
424  const int *grp_nbr_list = gtopo.GetGroup(gr);
425  for (int i = 0; i < grp_size; i++)
426  {
427  if (grp_nbr_list[i] != 0)
428  {
429  nbr_send_groups.AddAColumnInRow(grp_nbr_list[i]);
430  }
431  }
432  }
433  }
436  for (int gr = 1; gr < group_ldof.Size(); gr++)
437  {
438  const int nldofs = group_ldof.RowSize(gr);
439  if (nldofs == 0) { continue; }
440 
441  if (!gtopo.IAmMaster(gr)) // we are not the master
442  {
444  }
445  else // we are the master
446  {
447  const int grp_size = gtopo.GetGroupSize(gr);
448  const int *grp_nbr_list = gtopo.GetGroup(gr);
449  for (int i = 0; i < grp_size; i++)
450  {
451  if (grp_nbr_list[i] != 0)
452  {
453  nbr_send_groups.AddConnection(grp_nbr_list[i], gr);
454  }
455  }
456  }
457  }
460  // The above construction creates the Tables with the column indices
461  // sorted, i.e. the group lists are sorted. To coordinate this order between
462  // processors, we will sort the group lists in the nbr_recv_groups Table
463  // according to their indices in the master. This does not require any
464  // communication because we have access to the group indices in the master
465  // by calling: master_group_id = gtopo.GetGroupMasterGroup(my_group_id).
466  Array<Pair<int,int> > group_ids;
467  for (int nbr = 1; nbr < nbr_recv_groups.Size(); nbr++)
468  {
469  const int num_recv_groups = nbr_recv_groups.RowSize(nbr);
470  if (num_recv_groups > 0)
471  {
472  int *grp_list = nbr_recv_groups.GetRow(nbr);
473  group_ids.SetSize(num_recv_groups);
474  for (int i = 0; i < num_recv_groups; i++)
475  {
476  group_ids[i].one = gtopo.GetGroupMasterGroup(grp_list[i]);
477  group_ids[i].two = grp_list[i]; // my_group_id
478  }
479  group_ids.Sort();
480  for (int i = 0; i < num_recv_groups; i++)
481  {
482  grp_list[i] = group_ids[i].two;
483  }
484  }
485  }
486 }
487 
489 {
490  if (group_ltdof.Size() == group_ldof.Size()) { return; }
491 
493  for (int gr = 1; gr < group_ldof.Size(); gr++)
494  {
495  if (gtopo.IAmMaster(gr))
496  {
498  }
499  }
500  group_ltdof.MakeJ();
501  for (int gr = 1; gr < group_ldof.Size(); gr++)
502  {
503  if (gtopo.IAmMaster(gr))
504  {
505  const int *ldofs = group_ldof.GetRow(gr);
506  const int nldofs = group_ldof.RowSize(gr);
507  for (int i = 0; i < nldofs; i++)
508  {
509  group_ltdof.AddConnection(gr, ldof_ltdof[ldofs[i]]);
510  }
511  }
512  }
514 }
515 
516 template <class T>
517 T *GroupCommunicator::CopyGroupToBuffer(const T *ldata, T *buf, int group,
518  int layout) const
519 {
520  switch (layout)
521  {
522  case 1:
523  {
524  return std::copy(ldata + group_ldof.GetI()[group],
525  ldata + group_ldof.GetI()[group+1],
526  buf);
527  }
528  case 2:
529  {
530  const int nltdofs = group_ltdof.RowSize(group);
531  const int *ltdofs = group_ltdof.GetRow(group);
532  for (int j = 0; j < nltdofs; j++)
533  {
534  buf[j] = ldata[ltdofs[j]];
535  }
536  return buf + nltdofs;
537  }
538  default:
539  {
540  const int nldofs = group_ldof.RowSize(group);
541  const int *ldofs = group_ldof.GetRow(group);
542  for (int j = 0; j < nldofs; j++)
543  {
544  buf[j] = ldata[ldofs[j]];
545  }
546  return buf + nldofs;
547  }
548  }
549 }
550 
551 template <class T>
552 const T *GroupCommunicator::CopyGroupFromBuffer(const T *buf, T *ldata,
553  int group, int layout) const
554 {
555  const int nldofs = group_ldof.RowSize(group);
556  switch (layout)
557  {
558  case 1:
559  {
560  std::copy(buf, buf + nldofs, ldata + group_ldof.GetI()[group]);
561  break;
562  }
563  case 2:
564  {
565  const int *ltdofs = group_ltdof.GetRow(group);
566  for (int j = 0; j < nldofs; j++)
567  {
568  ldata[ltdofs[j]] = buf[j];
569  }
570  break;
571  }
572  default:
573  {
574  const int *ldofs = group_ldof.GetRow(group);
575  for (int j = 0; j < nldofs; j++)
576  {
577  ldata[ldofs[j]] = buf[j];
578  }
579  break;
580  }
581  }
582  return buf + nldofs;
583 }
584 
585 template <class T>
586 const T *GroupCommunicator::ReduceGroupFromBuffer(const T *buf, T *ldata,
587  int group, int layout,
588  void (*Op)(OpData<T>)) const
589 {
590  OpData<T> opd;
591  opd.ldata = ldata;
592  opd.nldofs = group_ldof.RowSize(group);
593  opd.nb = 1;
594  opd.buf = const_cast<T*>(buf);
595 
596  switch (layout)
597  {
598  case 1:
599  {
600  MFEM_ABORT("layout 1 is not supported");
601  T *dest = ldata + group_ldof.GetI()[group];
602  for (int j = 0; j < opd.nldofs; j++)
603  {
604  dest[j] += buf[j];
605  }
606  break;
607  }
608  case 2:
609  {
610  opd.ldofs = const_cast<int*>(group_ltdof.GetRow(group));
611  Op(opd);
612  break;
613  }
614  default:
615  {
616  opd.ldofs = const_cast<int*>(group_ldof.GetRow(group));
617  Op(opd);
618  break;
619  }
620  }
621  return buf + opd.nldofs;
622 }
623 
624 template <class T>
625 void GroupCommunicator::BcastBegin(T *ldata, int layout) const
626 {
627  MFEM_VERIFY(comm_lock == 0, "object is already in use");
628 
629  if (group_buf_size == 0) { return; }
630 
631  int request_counter = 0;
632  switch (mode)
633  {
634  case byGroup: // ***** Communication by groups *****
635  {
636  T *buf;
637  if (layout != 1)
638  {
639  group_buf.SetSize(group_buf_size*sizeof(T));
640  buf = (T *)group_buf.GetData();
641  MFEM_VERIFY(layout != 2 || group_ltdof.Size() == group_ldof.Size(),
642  "'group_ltdof' is not set, use SetLTDofTable()");
643  }
644  else
645  {
646  buf = ldata;
647  }
648 
649  for (int gr = 1; gr < group_ldof.Size(); gr++)
650  {
651  const int nldofs = group_ldof.RowSize(gr);
652 
653  // ignore groups without dofs
654  if (nldofs == 0) { continue; }
655 
656  if (!gtopo.IAmMaster(gr)) // we are not the master
657  {
658  MPI_Irecv(buf,
659  nldofs,
662  40822 + gtopo.GetGroupMasterGroup(gr),
663  gtopo.GetComm(),
664  &requests[request_counter]);
665  request_marker[request_counter] = gr;
666  request_counter++;
667  }
668  else // we are the master
669  {
670  if (layout != 1)
671  {
672  CopyGroupToBuffer(ldata, buf, gr, layout);
673  }
674  const int gs = gtopo.GetGroupSize(gr);
675  const int *nbs = gtopo.GetGroup(gr);
676  for (int i = 0; i < gs; i++)
677  {
678  if (nbs[i] != 0)
679  {
680  MPI_Isend(buf,
681  nldofs,
683  gtopo.GetNeighborRank(nbs[i]),
684  40822 + gtopo.GetGroupMasterGroup(gr),
685  gtopo.GetComm(),
686  &requests[request_counter]);
687  request_marker[request_counter] = -1; // mark as send req.
688  request_counter++;
689  }
690  }
691  }
692  buf += nldofs;
693  }
694  break;
695  }
696 
697  case byNeighbor: // ***** Communication by neighbors *****
698  {
699  group_buf.SetSize(group_buf_size*sizeof(T));
700  T *buf = (T *)group_buf.GetData();
701  for (int nbr = 1; nbr < nbr_send_groups.Size(); nbr++)
702  {
703  const int num_send_groups = nbr_send_groups.RowSize(nbr);
704  if (num_send_groups > 0)
705  {
706  // Possible optimization:
707  // if (num_send_groups == 1) and (layout == 1) then we do not
708  // need to copy the data in order to send it.
709  T *buf_start = buf;
710  const int *grp_list = nbr_send_groups.GetRow(nbr);
711  for (int i = 0; i < num_send_groups; i++)
712  {
713  buf = CopyGroupToBuffer(ldata, buf, grp_list[i], layout);
714  }
715  MPI_Isend(buf_start,
716  buf - buf_start,
718  gtopo.GetNeighborRank(nbr),
719  40822,
720  gtopo.GetComm(),
721  &requests[request_counter]);
722  request_marker[request_counter] = -1; // mark as send request
723  request_counter++;
724  }
725 
726  const int num_recv_groups = nbr_recv_groups.RowSize(nbr);
727  if (num_recv_groups > 0)
728  {
729  // Possible optimization (requires interface change):
730  // if (num_recv_groups == 1) and the (output layout == 1) then
731  // we can receive directly in the output buffer; however, at
732  // this point we do not have that information.
733  const int *grp_list = nbr_recv_groups.GetRow(nbr);
734  int recv_size = 0;
735  for (int i = 0; i < num_recv_groups; i++)
736  {
737  recv_size += group_ldof.RowSize(grp_list[i]);
738  }
739  MPI_Irecv(buf,
740  recv_size,
742  gtopo.GetNeighborRank(nbr),
743  40822,
744  gtopo.GetComm(),
745  &requests[request_counter]);
746  request_marker[request_counter] = nbr;
747  request_counter++;
748  buf_offsets[nbr] = buf - (T*)group_buf.GetData();
749  buf += recv_size;
750  }
751  }
752  MFEM_ASSERT(buf - (T*)group_buf.GetData() == group_buf_size, "");
753  break;
754  }
755  }
756 
757  comm_lock = 1; // 1 - locked fot Bcast
758  num_requests = request_counter;
759 }
760 
761 template <class T>
762 void GroupCommunicator::BcastEnd(T *ldata, int layout) const
763 {
764  if (comm_lock == 0) { return; }
765  // The above also handles the case (group_buf_size == 0).
766  MFEM_VERIFY(comm_lock == 1, "object is NOT locked for Bcast");
767 
768  switch (mode)
769  {
770  case byGroup: // ***** Communication by groups *****
771  {
772  if (layout == 1)
773  {
774  MPI_Waitall(num_requests, requests, MPI_STATUSES_IGNORE);
775  }
776  else if (layout == 0)
777  {
778  // copy the received data from the buffer to ldata, as it arrives
779  int idx;
780  while (MPI_Waitany(num_requests, requests, &idx, MPI_STATUS_IGNORE),
781  idx != MPI_UNDEFINED)
782  {
783  int gr = request_marker[idx];
784  if (gr == -1) { continue; } // skip send requests
785 
786  // groups without dofs are skipped, so here nldofs > 0.
787  T *buf = (T *)group_buf.GetData() + group_ldof.GetI()[gr];
788  CopyGroupFromBuffer(buf, ldata, gr, layout);
789  }
790  }
791  break;
792  }
793 
794  case byNeighbor: // ***** Communication by neighbors *****
795  {
796  // copy the received data from the buffer to ldata, as it arrives
797  int idx;
798  while (MPI_Waitany(num_requests, requests, &idx, MPI_STATUS_IGNORE),
799  idx != MPI_UNDEFINED)
800  {
801  int nbr = request_marker[idx];
802  if (nbr == -1) { continue; } // skip send requests
803 
804  const int num_recv_groups = nbr_recv_groups.RowSize(nbr);
805  if (num_recv_groups > 0)
806  {
807  const int *grp_list = nbr_recv_groups.GetRow(nbr);
808  const T *buf = (T*)group_buf.GetData() + buf_offsets[nbr];
809  for (int i = 0; i < num_recv_groups; i++)
810  {
811  buf = CopyGroupFromBuffer(buf, ldata, grp_list[i], layout);
812  }
813  }
814  }
815  break;
816  }
817  }
818 
819  comm_lock = 0; // 0 - no lock
820  num_requests = 0;
821 }
822 
823 template <class T>
824 void GroupCommunicator::ReduceBegin(const T *ldata) const
825 {
826  MFEM_VERIFY(comm_lock == 0, "object is already in use");
827 
828  if (group_buf_size == 0) { return; }
829 
830  int request_counter = 0;
831  group_buf.SetSize(group_buf_size*sizeof(T));
832  T *buf = (T *)group_buf.GetData();
833  switch (mode)
834  {
835  case byGroup: // ***** Communication by groups *****
836  {
837  for (int gr = 1; gr < group_ldof.Size(); gr++)
838  {
839  const int nldofs = group_ldof.RowSize(gr);
840  // ignore groups without dofs
841  if (nldofs == 0) { continue; }
842 
843  if (!gtopo.IAmMaster(gr)) // we are not the master
844  {
845  const int layout = 0;
846  CopyGroupToBuffer(ldata, buf, gr, layout);
847  MPI_Isend(buf,
848  nldofs,
851  43822 + gtopo.GetGroupMasterGroup(gr),
852  gtopo.GetComm(),
853  &requests[request_counter]);
854  request_marker[request_counter] = -1; // mark as send request
855  request_counter++;
856  buf += nldofs;
857  }
858  else // we are the master
859  {
860  const int gs = gtopo.GetGroupSize(gr);
861  const int *nbs = gtopo.GetGroup(gr);
862  buf_offsets[gr] = buf - (T *)group_buf.GetData();
863  for (int i = 0; i < gs; i++)
864  {
865  if (nbs[i] != 0)
866  {
867  MPI_Irecv(buf,
868  nldofs,
870  gtopo.GetNeighborRank(nbs[i]),
871  43822 + gtopo.GetGroupMasterGroup(gr),
872  gtopo.GetComm(),
873  &requests[request_counter]);
874  request_marker[request_counter] = gr;
875  request_counter++;
876  buf += nldofs;
877  }
878  }
879  }
880  }
881  break;
882  }
883 
884  case byNeighbor: // ***** Communication by neighbors *****
885  {
886  for (int nbr = 1; nbr < nbr_send_groups.Size(); nbr++)
887  {
888  // In Reduce operation: send_groups <--> recv_groups
889  const int num_send_groups = nbr_recv_groups.RowSize(nbr);
890  if (num_send_groups > 0)
891  {
892  T *buf_start = buf;
893  const int *grp_list = nbr_recv_groups.GetRow(nbr);
894  for (int i = 0; i < num_send_groups; i++)
895  {
896  const int layout = 0; // ldata is an array on all ldofs
897  buf = CopyGroupToBuffer(ldata, buf, grp_list[i], layout);
898  }
899  MPI_Isend(buf_start,
900  buf - buf_start,
902  gtopo.GetNeighborRank(nbr),
903  43822,
904  gtopo.GetComm(),
905  &requests[request_counter]);
906  request_marker[request_counter] = -1; // mark as send request
907  request_counter++;
908  }
909 
910  // In Reduce operation: send_groups <--> recv_groups
911  const int num_recv_groups = nbr_send_groups.RowSize(nbr);
912  if (num_recv_groups > 0)
913  {
914  const int *grp_list = nbr_send_groups.GetRow(nbr);
915  int recv_size = 0;
916  for (int i = 0; i < num_recv_groups; i++)
917  {
918  recv_size += group_ldof.RowSize(grp_list[i]);
919  }
920  MPI_Irecv(buf,
921  recv_size,
923  gtopo.GetNeighborRank(nbr),
924  43822,
925  gtopo.GetComm(),
926  &requests[request_counter]);
927  request_marker[request_counter] = nbr;
928  request_counter++;
929  buf_offsets[nbr] = buf - (T*)group_buf.GetData();
930  buf += recv_size;
931  }
932  }
933  MFEM_ASSERT(buf - (T*)group_buf.GetData() == group_buf_size, "");
934  break;
935  }
936  }
937 
938  comm_lock = 2;
939  num_requests = request_counter;
940 }
941 
942 template <class T>
943 void GroupCommunicator::ReduceEnd(T *ldata, int layout,
944  void (*Op)(OpData<T>)) const
945 {
946  if (comm_lock == 0) { return; }
947  // The above also handles the case (group_buf_size == 0).
948  MFEM_VERIFY(comm_lock == 2, "object is NOT locked for Reduce");
949 
950  switch (mode)
951  {
952  case byGroup: // ***** Communication by groups *****
953  {
954  OpData<T> opd;
955  opd.ldata = ldata;
956  Array<int> group_num_req(group_ldof.Size());
957  for (int gr = 1; gr < group_ldof.Size(); gr++)
958  {
959  group_num_req[gr] =
960  gtopo.IAmMaster(gr) ? gtopo.GetGroupSize(gr)-1 : 0;
961  }
962  int idx;
963  while (MPI_Waitany(num_requests, requests, &idx, MPI_STATUS_IGNORE),
964  idx != MPI_UNDEFINED)
965  {
966  int gr = request_marker[idx];
967  if (gr == -1) { continue; } // skip send requests
968 
969  // Delay the processing of a group until all receive requests, for
970  // that group, are done:
971  if ((--group_num_req[gr]) != 0) { continue; }
972 
973  opd.nldofs = group_ldof.RowSize(gr);
974  // groups without dofs are skipped, so here nldofs > 0.
975 
976  opd.buf = (T *)group_buf.GetData() + buf_offsets[gr];
977  opd.ldofs = (layout == 0) ?
979  opd.nb = gtopo.GetGroupSize(gr)-1;
980  Op(opd);
981  }
982  break;
983  }
984 
985  case byNeighbor: // ***** Communication by neighbors *****
986  {
987  MPI_Waitall(num_requests, requests, MPI_STATUSES_IGNORE);
988 
989  for (int nbr = 1; nbr < nbr_send_groups.Size(); nbr++)
990  {
991  // In Reduce operation: send_groups <--> recv_groups
992  const int num_recv_groups = nbr_send_groups.RowSize(nbr);
993  if (num_recv_groups > 0)
994  {
995  const int *grp_list = nbr_send_groups.GetRow(nbr);
996  const T *buf = (T*)group_buf.GetData() + buf_offsets[nbr];
997  for (int i = 0; i < num_recv_groups; i++)
998  {
999  buf = ReduceGroupFromBuffer(buf, ldata, grp_list[i],
1000  layout, Op);
1001  }
1002  }
1003  }
1004  break;
1005  }
1006  }
1007 
1008  comm_lock = 0; // 0 - no lock
1009  num_requests = 0;
1010 }
1011 
1012 template <class T>
1014 {
1015  if (opd.nb == 1)
1016  {
1017  for (int i = 0; i < opd.nldofs; i++)
1018  {
1019  opd.ldata[opd.ldofs[i]] += opd.buf[i];
1020  }
1021  }
1022  else
1023  {
1024  for (int i = 0; i < opd.nldofs; i++)
1025  {
1026  T data = opd.ldata[opd.ldofs[i]];
1027  for (int j = 0; j < opd.nb; j++)
1028  {
1029  data += opd.buf[j*opd.nldofs+i];
1030  }
1031  opd.ldata[opd.ldofs[i]] = data;
1032  }
1033  }
1034 }
1035 
1036 template <class T>
1038 {
1039  for (int i = 0; i < opd.nldofs; i++)
1040  {
1041  T data = opd.ldata[opd.ldofs[i]];
1042  for (int j = 0; j < opd.nb; j++)
1043  {
1044  T b = opd.buf[j*opd.nldofs+i];
1045  if (data > b)
1046  {
1047  data = b;
1048  }
1049  }
1050  opd.ldata[opd.ldofs[i]] = data;
1051  }
1052 }
1053 
1054 template <class T>
1056 {
1057  for (int i = 0; i < opd.nldofs; i++)
1058  {
1059  T data = opd.ldata[opd.ldofs[i]];
1060  for (int j = 0; j < opd.nb; j++)
1061  {
1062  T b = opd.buf[j*opd.nldofs+i];
1063  if (data < b)
1064  {
1065  data = b;
1066  }
1067  }
1068  opd.ldata[opd.ldofs[i]] = data;
1069  }
1070 }
1071 
1072 template <class T>
1074 {
1075  for (int i = 0; i < opd.nldofs; i++)
1076  {
1077  T data = opd.ldata[opd.ldofs[i]];
1078  for (int j = 0; j < opd.nb; j++)
1079  {
1080  data |= opd.buf[j*opd.nldofs+i];
1081  }
1082  opd.ldata[opd.ldofs[i]] = data;
1083  }
1084 }
1085 
1086 void GroupCommunicator::PrintInfo(std::ostream &out) const
1087 {
1088  char c = '\0';
1089  const int tag = 46800;
1090  const int myid = gtopo.MyRank();
1091 
1092  int num_sends = 0, num_recvs = 0;
1093  size_t mem_sends = 0, mem_recvs = 0;
1094  int num_master_groups = 0, num_empty_groups = 0;
1095  int num_active_neighbors = 0; // for mode == byNeighbor
1096  switch (mode)
1097  {
1098  case byGroup:
1099  for (int gr = 1; gr < group_ldof.Size(); gr++)
1100  {
1101  const int nldofs = group_ldof.RowSize(gr);
1102  if (nldofs == 0)
1103  {
1104  num_empty_groups++;
1105  continue;
1106  }
1107  if (gtopo.IAmMaster(gr))
1108  {
1109  num_sends += (gtopo.GetGroupSize(gr)-1);
1110  mem_sends += sizeof(double)*nldofs*(gtopo.GetGroupSize(gr)-1);
1111  num_master_groups++;
1112  }
1113  else
1114  {
1115  num_recvs++;
1116  mem_recvs += sizeof(double)*nldofs;
1117  }
1118  }
1119  break;
1120 
1121  case byNeighbor:
1122  for (int gr = 1; gr < group_ldof.Size(); gr++)
1123  {
1124  const int nldofs = group_ldof.RowSize(gr);
1125  if (nldofs == 0)
1126  {
1127  num_empty_groups++;
1128  continue;
1129  }
1130  if (gtopo.IAmMaster(gr))
1131  {
1132  num_master_groups++;
1133  }
1134  }
1135  for (int nbr = 1; nbr < nbr_send_groups.Size(); nbr++)
1136  {
1137  const int num_send_groups = nbr_send_groups.RowSize(nbr);
1138  if (num_send_groups > 0)
1139  {
1140  const int *grp_list = nbr_send_groups.GetRow(nbr);
1141  for (int i = 0; i < num_send_groups; i++)
1142  {
1143  mem_sends += sizeof(double)*group_ldof.RowSize(grp_list[i]);
1144  }
1145  num_sends++;
1146  }
1147 
1148  const int num_recv_groups = nbr_recv_groups.RowSize(nbr);
1149  if (num_recv_groups > 0)
1150  {
1151  const int *grp_list = nbr_recv_groups.GetRow(nbr);
1152  for (int i = 0; i < num_recv_groups; i++)
1153  {
1154  mem_recvs += sizeof(double)*group_ldof.RowSize(grp_list[i]);
1155  }
1156  num_recvs++;
1157  }
1158  if (num_send_groups > 0 || num_recv_groups > 0)
1159  {
1160  num_active_neighbors++;
1161  }
1162  }
1163  break;
1164  }
1165  if (myid != 0)
1166  {
1167  MPI_Recv(&c, 1, MPI_CHAR, myid-1, tag, gtopo.GetComm(),
1168  MPI_STATUS_IGNORE);
1169  }
1170  else
1171  {
1172  out << "\nGroupCommunicator:\n";
1173  }
1174  out << "Rank " << myid << ":\n"
1175  " mode = " <<
1176  (mode == byGroup ? "byGroup" : "byNeighbor") << "\n"
1177  " number of sends = " << num_sends <<
1178  " (" << mem_sends << " bytes)\n"
1179  " number of recvs = " << num_recvs <<
1180  " (" << mem_recvs << " bytes)\n";
1181  out <<
1182  " num groups = " << group_ldof.Size() << " = " <<
1183  num_master_groups << " + " <<
1184  group_ldof.Size()-num_master_groups-num_empty_groups << " + " <<
1185  num_empty_groups << " (master + slave + empty)\n";
1186  if (mode == byNeighbor)
1187  {
1188  out <<
1189  " num neighbors = " << nbr_send_groups.Size() << " = " <<
1190  num_active_neighbors << " + " <<
1191  nbr_send_groups.Size()-num_active_neighbors <<
1192  " (active + inactive)\n";
1193  }
1194  if (myid != gtopo.NRanks()-1)
1195  {
1196  out << std::flush;
1197  MPI_Send(&c, 1, MPI_CHAR, myid+1, tag, gtopo.GetComm());
1198  }
1199  else
1200  {
1201  out << std::endl;
1202  }
1203  MPI_Barrier(gtopo.GetComm());
1204 }
1205 
1207 {
1208  delete [] buf_offsets;
1209  delete [] request_marker;
1210  // delete [] statuses;
1211  delete [] requests;
1212 }
1213 
1214 // @cond DOXYGEN_SKIP
1215 
1216 // instantiate GroupCommunicator::Bcast and Reduce for int and double
1217 template void GroupCommunicator::BcastBegin<int>(int *, int) const;
1218 template void GroupCommunicator::BcastEnd<int>(int *, int) const;
1219 template void GroupCommunicator::ReduceBegin<int>(const int *) const;
1220 template void GroupCommunicator::ReduceEnd<int>(
1221  int *, int, void (*)(OpData<int>)) const;
1222 
1223 template void GroupCommunicator::BcastBegin<double>(double *, int) const;
1224 template void GroupCommunicator::BcastEnd<double>(double *, int) const;
1225 template void GroupCommunicator::ReduceBegin<double>(const double *) const;
1226 template void GroupCommunicator::ReduceEnd<double>(
1227  double *, int, void (*)(OpData<double>)) const;
1228 
1229 // @endcond
1230 
1231 // instantiate reduce operators for int and double
1232 template void GroupCommunicator::Sum<int>(OpData<int>);
1233 template void GroupCommunicator::Min<int>(OpData<int>);
1234 template void GroupCommunicator::Max<int>(OpData<int>);
1235 template void GroupCommunicator::BitOR<int>(OpData<int>);
1236 
1237 template void GroupCommunicator::Sum<double>(OpData<double>);
1238 template void GroupCommunicator::Min<double>(OpData<double>);
1239 template void GroupCommunicator::Max<double>(OpData<double>);
1240 
1241 
1242 #ifdef __bgq__
1243 static void DebugRankCoords(int** coords, int dim, int size)
1244 {
1245  for (int i = 0; i < size; i++)
1246  {
1247  mfem::out << "Rank " << i << " coords: ";
1248  for (int j = 0; j < dim; j++)
1249  {
1250  mfem::out << coords[i][j] << " ";
1251  }
1252  mfem::out << endl;
1253  }
1254 }
1255 
1256 struct CompareCoords
1257 {
1258  CompareCoords(int coord) : coord(coord) {}
1259  int coord;
1260 
1261  bool operator()(int* const &a, int* const &b) const
1262  { return a[coord] < b[coord]; }
1263 };
1264 
1265 void KdTreeSort(int** coords, int d, int dim, int size)
1266 {
1267  if (size > 1)
1268  {
1269  bool all_same = true;
1270  for (int i = 1; i < size && all_same; i++)
1271  {
1272  for (int j = 0; j < dim; j++)
1273  {
1274  if (coords[i][j] != coords[0][j]) { all_same = false; break; }
1275  }
1276  }
1277  if (all_same) { return; }
1278 
1279  // sort by coordinate 'd'
1280  std::sort(coords, coords + size, CompareCoords(d));
1281  int next = (d + 1) % dim;
1282 
1283  if (coords[0][d] < coords[size-1][d])
1284  {
1285  KdTreeSort(coords, next, dim, size/2);
1286  KdTreeSort(coords + size/2, next, dim, size - size/2);
1287  }
1288  else
1289  {
1290  // skip constant dimension
1291  KdTreeSort(coords, next, dim, size);
1292  }
1293  }
1294 }
1295 
1296 MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
1297 {
1298  MPI_Status status;
1299 
1300  int rank, size;
1301  MPI_Comm_rank(comm, &rank);
1302  MPI_Comm_size(comm, &size);
1303 
1304  int dim;
1305  MPIX_Torus_ndims(&dim);
1306 
1307  int* mycoords = new int[dim + 1];
1308  MPIX_Rank2torus(rank, mycoords);
1309 
1310  MPI_Send(mycoords, dim, MPI_INT, 0, 111, comm);
1311  delete [] mycoords;
1312 
1313  if (rank == 0)
1314  {
1315  int** coords = new int*[size];
1316  for (int i = 0; i < size; i++)
1317  {
1318  coords[i] = new int[dim + 1];
1319  coords[i][dim] = i;
1320  MPI_Recv(coords[i], dim, MPI_INT, i, 111, comm, &status);
1321  }
1322 
1323  KdTreeSort(coords, 0, dim, size);
1324 
1325  //DebugRankCoords(coords, dim, size);
1326 
1327  for (int i = 0; i < size; i++)
1328  {
1329  MPI_Send(&coords[i][dim], 1, MPI_INT, i, 112, comm);
1330  delete [] coords[i];
1331  }
1332  delete [] coords;
1333  }
1334 
1335  int new_rank;
1336  MPI_Recv(&new_rank, 1, MPI_INT, 0, 112, comm, &status);
1337 
1338  MPI_Comm new_comm;
1339  MPI_Comm_split(comm, 0, new_rank, &new_comm);
1340  return new_comm;
1341 }
1342 
1343 #else // __bgq__
1344 
1345 MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
1346 {
1347  // pass
1348  return comm;
1349 }
1350 #endif // __bgq__
1351 
1352 } // namespace mfem
1353 
1354 #endif
int Lookup(IntegerSet &s)
Definition: sets.cpp:95
int GetGroupMasterRank(int g) const
void Create(ListOfIntegerSets &groups, int mpitag)
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
int Size() const
Logical size of the array.
Definition: array.hpp:133
void Recreate(const int n, const int *p)
Definition: sets.cpp:59
int GetGroupMasterGroup(int g) const
int * GetJ()
Definition: table.hpp:114
void Unique()
Definition: array.hpp:245
void AsTable(Table &t)
Definition: sets.cpp:107
Helper struct to convert a C++ type to an MPI type.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
void AddColumnsInRow(int r, int ncol)
Definition: table.hpp:78
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
Definition: table.cpp:84
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
int GetGroupSize(int g) const
void SetDims(int rows, int nnz)
Definition: table.cpp:142
const int * GetGroup(int g) const
void Copy(Array &copy) const
Create a copy of the current array.
Definition: array.hpp:190
T * GetData()
Returns the data.
Definition: array.hpp:115
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:189
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata...
int Size_of_connections() const
Definition: table.hpp:98
void skip_comment_lines(std::istream &is, const char comment_char)
Definition: text.hpp:27
void AddConnections(int r, const int *c, int nc)
Definition: table.cpp:106
int PickElementInSet(int i)
Definition: sets.hpp:63
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
bool IAmMaster(int g) const
int dim
Definition: ex3.cpp:47
void MakeFromList(int nrows, const Array< Connection > &list)
Definition: table.cpp:274
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int Append(const T &el)
Append element to array, resize if necessary.
Definition: array.hpp:614
int GetNumNeighbors() const
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
MPI_Comm GetComm() const
void AddConnection(int r, int c)
Definition: table.hpp:80
int Insert(IntegerSet &s)
Definition: sets.cpp:82
void Reserve(int capacity)
Ensures that the allocated size is at least the given size.
Definition: array.hpp:146
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf...
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
void Sort()
Sorts the array. This requires operator&lt; to be defined for T.
Definition: array.hpp:237
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:92
GroupCommunicator(GroupTopology &gt, Mode m=byNeighbor)
Construct a GroupCommunicator object.
int GetGroupMaster(int g) const
void Copy(GroupTopology &copy) const
Copy.
void Save(std::ostream &out) const
Save the data in a stream.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
int GetNeighborRank(int i) const
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
Communications are performed one group at a time.
void AddAColumnInRow(int r)
Definition: table.hpp:77
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
Definition: array.hpp:569
Helper struct for defining a connectivity table, see Table::MakeFromList.
Definition: table.hpp:27
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
void ShiftUpI()
Definition: table.cpp:117
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
Data structure on which we define reduce operations.
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
A set of integers.
Definition: sets.hpp:23
Mode
Communication mode.
void MakeJ()
Definition: table.cpp:94
void KdTreeSort(int **coords, int d, int dim, int size)
int NGroups() const
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void SetComm(MPI_Comm comm)
int * GetI()
Definition: table.hpp:113
int RowSize(int i) const
Definition: table.hpp:108
List of integer sets.
Definition: sets.hpp:54
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:64
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
void Copy(Table &copy) const
Definition: table.cpp:381
void Load(std::istream &in)
Load the data from a stream.
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.