12 #include "../config/config.hpp"
37 void MPI_Session::GetRankAndSize()
39 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
40 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
46 group_lproc(gt.group_lproc)
48 gt.groupmaster_lproc.
Copy(groupmaster_lproc);
49 gt.lproc_proc.
Copy(lproc_proc);
50 gt.group_mgroup.
Copy(group_mgroup);
53 void GroupTopology::ProcToLProc()
56 MPI_Comm_size(MyComm, &NRanks);
58 map<int, int> proc_lproc;
60 int lproc_counter = 0;
63 const pair<const int, int> p(group_lproc.
GetJ()[i], lproc_counter);
64 if (proc_lproc.insert(p).second)
71 lproc_proc.
SetSize(lproc_counter);
72 for (map<int, int>::iterator it = proc_lproc.begin();
73 it != proc_lproc.end(); ++it)
75 lproc_proc[it->second] = it->first;
80 group_lproc.
GetJ()[i] = proc_lproc[group_lproc.
GetJ()[i]];
83 for (
int i = 0; i <
NGroups(); i++)
85 groupmaster_lproc[i] = proc_lproc[groupmaster_lproc[i]];
93 Table group_mgroupandproc;
96 for (
int i = 0; i <
NGroups(); i++)
98 int j = group_mgroupandproc.
GetI()[i];
99 group_mgroupandproc.
GetI()[i+1] = j + group_lproc.
RowSize(i) + 1;
100 group_mgroupandproc.
GetJ()[j] = i;
102 for (
int k = group_lproc.
GetI()[i];
103 j < group_mgroupandproc.
GetI()[i+1]; j++, k++)
105 group_mgroupandproc.
GetJ()[j] = group_lproc.
GetJ()[k];
113 for (
int i = 0; i <
NGroups(); i++)
129 int send_counter = 0;
130 int recv_counter = 0;
131 for (
int i = 1; i <
NGroups(); i++)
132 if (groupmaster_lproc[i] != 0)
138 send_counter += group_lproc.
RowSize(i)-1;
141 MPI_Request *requests =
new MPI_Request[send_counter];
142 MPI_Status *statuses =
new MPI_Status[send_counter];
144 int max_recv_size = 0;
146 for (
int i = 1; i <
NGroups(); i++)
148 if (groupmaster_lproc[i] == 0)
152 for (
int j = group_lproc.
GetI()[i];
153 j < group_lproc.
GetI()[i+1]; j++)
155 if (group_lproc.
GetJ()[j] != 0)
157 MPI_Isend(group_mgroupandproc.
GetRow(i),
158 group_mgroupandproc.
RowSize(i),
160 lproc_proc[group_lproc.
GetJ()[j]],
163 &requests[send_counter]);
169 if (max_recv_size < group_lproc.
RowSize(i))
171 max_recv_size = group_lproc.
RowSize(i);
177 if (recv_counter > 0)
181 int *recv_buf =
new int[max_recv_size];
182 for ( ; recv_counter > 0; recv_counter--)
184 MPI_Recv(recv_buf, max_recv_size, MPI_INT,
185 MPI_ANY_SOURCE, mpitag, MyComm, &status);
187 MPI_Get_count(&status, MPI_INT, &count);
189 group.
Recreate(count-1, recv_buf+1);
190 int g = groups.
Lookup(group);
191 group_mgroup[g] = recv_buf[0];
193 if (lproc_proc[groupmaster_lproc[g]] != status.MPI_SOURCE)
195 mfem::err <<
"\n\n\nGroupTopology::GroupTopology: "
196 <<
MyRank() <<
": ERROR\n\n\n" << endl;
203 MPI_Waitall(send_counter, requests, statuses);
211 out <<
"\ncommunication_groups\n";
212 out <<
"number_of_groups " <<
NGroups() <<
"\n\n";
214 out <<
"# number of entities in each group, followed by group ids in group\n";
215 for (
int group_id = 0; group_id <
NGroups(); ++group_id)
218 const int * group_ptr =
GetGroup(group_id);
220 for (
int group_member_index = 0; group_member_index < group_size;
221 ++group_member_index)
239 int number_of_groups = -1;
241 MFEM_VERIFY(ident ==
"number_of_groups",
242 "GroupTopology::Load - expected 'number_of_groups' entry.");
243 in >> number_of_groups;
249 for (
int group_id = 0; group_id < number_of_groups; ++group_id)
256 for (
int index = 0; index < group_size; ++index )
262 integer_sets.
Insert(integer_set);
265 Create(integer_sets, 823);
282 request_marker = NULL;
289 for (
int i = 0; i < ldof_group.
Size(); i++)
291 int group = ldof_group[i];
299 for (
int i = 0; i < ldof_group.
Size(); i++)
301 int group = ldof_group[i];
314 int request_counter = 0;
319 for (
int gr = 1; gr < group_ldof.
Size(); gr++)
321 if (group_ldof.
RowSize(gr) != 0)
333 request_counter += gr_requests;
334 group_buf_size += gr_requests * group_ldof.
RowSize(gr);
338 requests =
new MPI_Request[request_counter];
340 request_marker =
new int[request_counter];
345 for (
int gr = 1; gr < group_ldof.
Size(); gr++)
347 const int nldofs = group_ldof.
RowSize(gr);
348 if (nldofs == 0) {
continue; }
357 const int *grp_nbr_list = gtopo.
GetGroup(gr);
358 for (
int i = 0; i < grp_size; i++)
360 if (grp_nbr_list[i] != 0)
367 nbr_send_groups.
MakeJ();
368 nbr_recv_groups.
MakeJ();
369 for (
int gr = 1; gr < group_ldof.
Size(); gr++)
371 const int nldofs = group_ldof.
RowSize(gr);
372 if (nldofs == 0) {
continue; }
381 const int *grp_nbr_list = gtopo.
GetGroup(gr);
382 for (
int i = 0; i < grp_size; i++)
384 if (grp_nbr_list[i] != 0)
400 for (
int nbr = 1; nbr < nbr_recv_groups.
Size(); nbr++)
402 const int num_recv_groups = nbr_recv_groups.
RowSize(nbr);
403 if (num_recv_groups > 0)
405 int *grp_list = nbr_recv_groups.
GetRow(nbr);
406 group_ids.
SetSize(num_recv_groups);
407 for (
int i = 0; i < num_recv_groups; i++)
410 group_ids[i].two = grp_list[i];
413 for (
int i = 0; i < num_recv_groups; i++)
415 grp_list[i] = group_ids[i].two;
423 if (group_ltdof.
Size() == group_ldof.
Size()) {
return; }
426 for (
int gr = 1; gr < group_ldof.
Size(); gr++)
434 for (
int gr = 1; gr < group_ldof.
Size(); gr++)
438 const int *ldofs = group_ldof.
GetRow(gr);
439 const int nldofs = group_ldof.
RowSize(gr);
440 for (
int i = 0; i < nldofs; i++)
457 return std::copy(ldata + group_ldof.
GetI()[group],
458 ldata + group_ldof.
GetI()[group+1],
463 const int nltdofs = group_ltdof.
RowSize(group);
464 const int *ltdofs = group_ltdof.
GetRow(group);
465 for (
int j = 0; j < nltdofs; j++)
467 buf[j] = ldata[ltdofs[j]];
469 return buf + nltdofs;
473 const int nldofs = group_ldof.
RowSize(group);
474 const int *ldofs = group_ldof.
GetRow(group);
475 for (
int j = 0; j < nldofs; j++)
477 buf[j] = ldata[ldofs[j]];
486 int group,
int layout)
const
488 const int nldofs = group_ldof.
RowSize(group);
493 std::copy(buf, buf + nldofs, ldata + group_ldof.
GetI()[group]);
498 const int *ltdofs = group_ltdof.
GetRow(group);
499 for (
int j = 0; j < nldofs; j++)
501 ldata[ltdofs[j]] = buf[j];
507 const int *ldofs = group_ldof.
GetRow(group);
508 for (
int j = 0; j < nldofs; j++)
510 ldata[ldofs[j]] = buf[j];
520 int group,
int layout,
527 opd.
buf =
const_cast<T*
>(buf);
533 MFEM_ABORT(
"layout 1 is not supported");
534 T *dest = ldata + group_ldof.
GetI()[group];
535 for (
int j = 0; j < opd.
nldofs; j++)
543 opd.
ldofs =
const_cast<int*
>(group_ltdof.
GetRow(group));
549 opd.
ldofs =
const_cast<int*
>(group_ldof.
GetRow(group));
560 MFEM_VERIFY(comm_lock == 0,
"object is already in use");
562 if (group_buf_size == 0) {
return; }
564 int request_counter = 0;
572 group_buf.
SetSize(group_buf_size*
sizeof(T));
573 buf = (T *)group_buf.
GetData();
574 MFEM_VERIFY(layout != 2 || group_ltdof.
Size() == group_ldof.
Size(),
575 "'group_ltdof' is not set, use SetLTDofTable()");
582 for (
int gr = 1; gr < group_ldof.
Size(); gr++)
584 const int nldofs = group_ldof.
RowSize(gr);
587 if (nldofs == 0) {
continue; }
597 &requests[request_counter]);
598 request_marker[request_counter] = gr;
608 const int *nbs = gtopo.
GetGroup(gr);
609 for (
int i = 0; i < gs; i++)
619 &requests[request_counter]);
620 request_marker[request_counter] = -1;
632 group_buf.
SetSize(group_buf_size*
sizeof(T));
633 T *buf = (T *)group_buf.
GetData();
634 for (
int nbr = 1; nbr < nbr_send_groups.
Size(); nbr++)
636 const int num_send_groups = nbr_send_groups.
RowSize(nbr);
637 if (num_send_groups > 0)
643 const int *grp_list = nbr_send_groups.
GetRow(nbr);
644 for (
int i = 0; i < num_send_groups; i++)
654 &requests[request_counter]);
655 request_marker[request_counter] = -1;
659 const int num_recv_groups = nbr_recv_groups.
RowSize(nbr);
660 if (num_recv_groups > 0)
666 const int *grp_list = nbr_recv_groups.
GetRow(nbr);
668 for (
int i = 0; i < num_recv_groups; i++)
670 recv_size += group_ldof.
RowSize(grp_list[i]);
678 &requests[request_counter]);
679 request_marker[request_counter] = nbr;
681 buf_offsets[nbr] = buf - (T*)group_buf.
GetData();
685 MFEM_ASSERT(buf - (T*)group_buf.
GetData() == group_buf_size,
"");
691 num_requests = request_counter;
697 if (comm_lock == 0) {
return; }
699 MFEM_VERIFY(comm_lock == 1,
"object is NOT locked for Bcast");
707 MPI_Waitall(num_requests, requests, MPI_STATUSES_IGNORE);
709 else if (layout == 0)
713 while (MPI_Waitany(num_requests, requests, &idx, MPI_STATUS_IGNORE),
714 idx != MPI_UNDEFINED)
716 int gr = request_marker[idx];
717 if (gr == -1) {
continue; }
720 T *buf = (T *)group_buf.
GetData() + group_ldof.
GetI()[gr];
731 while (MPI_Waitany(num_requests, requests, &idx, MPI_STATUS_IGNORE),
732 idx != MPI_UNDEFINED)
734 int nbr = request_marker[idx];
735 if (nbr == -1) {
continue; }
737 const int num_recv_groups = nbr_recv_groups.
RowSize(nbr);
738 if (num_recv_groups > 0)
740 const int *grp_list = nbr_recv_groups.
GetRow(nbr);
741 const T *buf = (T*)group_buf.
GetData() + buf_offsets[nbr];
742 for (
int i = 0; i < num_recv_groups; i++)
759 MFEM_VERIFY(comm_lock == 0,
"object is already in use");
761 if (group_buf_size == 0) {
return; }
763 int request_counter = 0;
764 group_buf.
SetSize(group_buf_size*
sizeof(T));
765 T *buf = (T *)group_buf.
GetData();
770 for (
int gr = 1; gr < group_ldof.
Size(); gr++)
772 const int nldofs = group_ldof.
RowSize(gr);
774 if (nldofs == 0) {
continue; }
778 const int layout = 0;
786 &requests[request_counter]);
787 request_marker[request_counter] = -1;
794 const int *nbs = gtopo.
GetGroup(gr);
795 buf_offsets[gr] = buf - (T *)group_buf.
GetData();
796 for (
int i = 0; i < gs; i++)
806 &requests[request_counter]);
807 request_marker[request_counter] = gr;
819 for (
int nbr = 1; nbr < nbr_send_groups.
Size(); nbr++)
822 const int num_send_groups = nbr_recv_groups.
RowSize(nbr);
823 if (num_send_groups > 0)
826 const int *grp_list = nbr_recv_groups.
GetRow(nbr);
827 for (
int i = 0; i < num_send_groups; i++)
829 const int layout = 0;
838 &requests[request_counter]);
839 request_marker[request_counter] = -1;
844 const int num_recv_groups = nbr_send_groups.
RowSize(nbr);
845 if (num_recv_groups > 0)
847 const int *grp_list = nbr_send_groups.
GetRow(nbr);
849 for (
int i = 0; i < num_recv_groups; i++)
851 recv_size += group_ldof.
RowSize(grp_list[i]);
859 &requests[request_counter]);
860 request_marker[request_counter] = nbr;
862 buf_offsets[nbr] = buf - (T*)group_buf.
GetData();
866 MFEM_ASSERT(buf - (T*)group_buf.
GetData() == group_buf_size,
"");
872 num_requests = request_counter;
878 if (comm_lock == 0) {
return; }
880 MFEM_VERIFY(comm_lock == 2,
"object is NOT locked for Reduce");
889 for (
int gr = 1; gr < group_ldof.
Size(); gr++)
895 while (MPI_Waitany(num_requests, requests, &idx, MPI_STATUS_IGNORE),
896 idx != MPI_UNDEFINED)
898 int gr = request_marker[idx];
899 if (gr == -1) {
continue; }
903 if ((--group_num_req[gr]) != 0) {
continue; }
908 opd.
buf = (T *)group_buf.
GetData() + buf_offsets[gr];
909 opd.
ldofs = (layout == 0) ?
919 MPI_Waitall(num_requests, requests, MPI_STATUSES_IGNORE);
921 for (
int nbr = 1; nbr < nbr_send_groups.
Size(); nbr++)
924 const int num_recv_groups = nbr_send_groups.
RowSize(nbr);
925 if (num_recv_groups > 0)
927 const int *grp_list = nbr_send_groups.
GetRow(nbr);
928 const T *buf = (T*)group_buf.
GetData() + buf_offsets[nbr];
929 for (
int i = 0; i < num_recv_groups; i++)
949 for (
int i = 0; i < opd.
nldofs; i++)
956 for (
int i = 0; i < opd.
nldofs; i++)
959 for (
int j = 0; j < opd.
nb; j++)
971 for (
int i = 0; i < opd.
nldofs; i++)
974 for (
int j = 0; j < opd.
nb; j++)
989 for (
int i = 0; i < opd.
nldofs; i++)
992 for (
int j = 0; j < opd.
nb; j++)
1007 for (
int i = 0; i < opd.
nldofs; i++)
1010 for (
int j = 0; j < opd.
nb; j++)
1021 const int tag = 46800;
1022 const int myid = gtopo.
MyRank();
1024 int num_sends = 0, num_recvs = 0;
1025 size_t mem_sends = 0, mem_recvs = 0;
1026 int num_master_groups = 0, num_empty_groups = 0;
1027 int num_active_neighbors = 0;
1031 for (
int gr = 1; gr < group_ldof.
Size(); gr++)
1033 const int nldofs = group_ldof.
RowSize(gr);
1042 mem_sends +=
sizeof(double)*nldofs*(gtopo.
GetGroupSize(gr)-1);
1043 num_master_groups++;
1048 mem_recvs +=
sizeof(double)*nldofs;
1054 for (
int gr = 1; gr < group_ldof.
Size(); gr++)
1056 const int nldofs = group_ldof.
RowSize(gr);
1064 num_master_groups++;
1067 for (
int nbr = 1; nbr < nbr_send_groups.
Size(); nbr++)
1069 const int num_send_groups = nbr_send_groups.
RowSize(nbr);
1070 if (num_send_groups > 0)
1072 const int *grp_list = nbr_send_groups.
GetRow(nbr);
1073 for (
int i = 0; i < num_send_groups; i++)
1075 mem_sends +=
sizeof(double)*group_ldof.
RowSize(grp_list[i]);
1080 const int num_recv_groups = nbr_recv_groups.
RowSize(nbr);
1081 if (num_recv_groups > 0)
1083 const int *grp_list = nbr_recv_groups.
GetRow(nbr);
1084 for (
int i = 0; i < num_recv_groups; i++)
1086 mem_recvs +=
sizeof(double)*group_ldof.
RowSize(grp_list[i]);
1090 if (num_send_groups > 0 || num_recv_groups > 0)
1092 num_active_neighbors++;
1099 MPI_Recv(&c, 1, MPI_CHAR, myid-1, tag, gtopo.
GetComm(),
1104 out <<
"\nGroupCommunicator:\n";
1106 out <<
"Rank " << myid <<
":\n"
1108 (mode ==
byGroup ?
"byGroup" :
"byNeighbor") <<
"\n"
1109 " number of sends = " << num_sends <<
1110 " (" << mem_sends <<
" bytes)\n"
1111 " number of recvs = " << num_recvs <<
1112 " (" << mem_recvs <<
" bytes)\n";
1114 " num groups = " << group_ldof.
Size() <<
" = " <<
1115 num_master_groups <<
" + " <<
1116 group_ldof.
Size()-num_master_groups-num_empty_groups <<
" + " <<
1117 num_empty_groups <<
" (master + slave + empty)\n";
1121 " num neighbors = " << nbr_send_groups.
Size() <<
" = " <<
1122 num_active_neighbors <<
" + " <<
1123 nbr_send_groups.
Size()-num_active_neighbors <<
1124 " (active + inactive)\n";
1126 if (myid != gtopo.
NRanks()-1)
1129 MPI_Send(&c, 1, MPI_CHAR, myid+1, tag, gtopo.
GetComm());
1140 delete [] buf_offsets;
1141 delete [] request_marker;
1149 template void GroupCommunicator::BcastBegin<int>(
int *, int);
1150 template void GroupCommunicator::BcastEnd<int>(
int *, int);
1151 template void GroupCommunicator::ReduceBegin<int>(
const int *);
1152 template void GroupCommunicator::ReduceEnd<int>(
1153 int *, int, void (*)(OpData<int>));
1155 template void GroupCommunicator::BcastBegin<double>(
double *, int);
1156 template void GroupCommunicator::BcastEnd<double>(
double *, int);
1157 template void GroupCommunicator::ReduceBegin<double>(
const double *);
1158 template void GroupCommunicator::ReduceEnd<double>(
1159 double *, int, void (*)(OpData<double>));
1164 template void GroupCommunicator::Sum<int>(OpData<int>);
1165 template void GroupCommunicator::Min<int>(OpData<int>);
1166 template void GroupCommunicator::Max<int>(OpData<int>);
1167 template void GroupCommunicator::BitOR<int>(OpData<int>);
1169 template void GroupCommunicator::Sum<double>(OpData<double>);
1170 template void GroupCommunicator::Min<double>(OpData<double>);
1171 template void GroupCommunicator::Max<double>(OpData<double>);
1175 static void DebugRankCoords(
int** coords,
int dim,
int size)
1177 for (
int i = 0; i < size; i++)
1179 mfem::out <<
"Rank " << i <<
" coords: ";
1180 for (
int j = 0; j <
dim; j++)
1188 struct CompareCoords
1190 CompareCoords(
int coord) : coord(coord) {}
1193 bool operator()(
int*
const &a,
int*
const &b)
const
1194 {
return a[coord] < b[coord]; }
1201 bool all_same =
true;
1202 for (
int i = 1; i < size && all_same; i++)
1204 for (
int j = 0; j <
dim; j++)
1206 if (coords[i][j] != coords[0][j]) { all_same =
false;
break; }
1209 if (all_same) {
return; }
1212 std::sort(coords, coords + size, CompareCoords(d));
1213 int next = (d + 1) % dim;
1215 if (coords[0][d] < coords[size-1][d])
1218 KdTreeSort(coords + size/2, next, dim, size - size/2);
1233 MPI_Comm_rank(comm, &rank);
1234 MPI_Comm_size(comm, &size);
1237 MPIX_Torus_ndims(&dim);
1239 int* mycoords =
new int[dim + 1];
1240 MPIX_Rank2torus(rank, mycoords);
1242 MPI_Send(mycoords, dim, MPI_INT, 0, 111, comm);
1247 int** coords =
new int*[size];
1248 for (
int i = 0; i < size; i++)
1250 coords[i] =
new int[dim + 1];
1252 MPI_Recv(coords[i], dim, MPI_INT, i, 111, comm, &status);
1259 for (
int i = 0; i < size; i++)
1261 MPI_Send(&coords[i][dim], 1, MPI_INT, i, 112, comm);
1262 delete [] coords[i];
1268 MPI_Recv(&new_rank, 1, MPI_INT, 0, 112, comm, &status);
1271 MPI_Comm_split(comm, 0, new_rank, &new_comm);
int Lookup(IntegerSet &s)
int GetGroupMasterRank(int g) const
void Create(ListOfIntegerSets &groups, int mpitag)
int Size() const
Logical size of the array.
void Recreate(const int n, const int *p)
int GetGroupMasterGroup(int g) const
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >))
Finalize reduction operation started with ReduceBegin().
Helper struct to convert a C++ type to an MPI type.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
void AddColumnsInRow(int r, int ncol)
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
int GetGroupSize(int g) const
void SetDims(int rows, int nnz)
const int * GetGroup(int g) const
void Copy(Array ©) const
Create a copy of the current array.
T * GetData()
Returns the data.
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata...
int Size_of_connections() const
void skip_comment_lines(std::istream &is, const char comment_char)
int PickElementInSet(int i)
bool IAmMaster(int g) const
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int Append(const T &el)
Append element to array, resize if necessary.
int GetNumNeighbors() const
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
void AddConnection(int r, int c)
int Insert(IntegerSet &s)
void Reserve(int capacity)
Ensures that the allocated size is at least the given size.
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf...
void BcastBegin(T *ldata, int layout)
Begin a broadcast within each group where the master is the root.
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
void Sort()
Sorts the array. This requires operator< to be defined for T.
void ReduceBegin(const T *ldata)
Begin reduction operation within each group where the master is the root.
int Size() const
Returns the number of TYPE I elements.
GroupCommunicator(GroupTopology >, Mode m=byNeighbor)
Construct a GroupCommunicator object.
int GetGroupMaster(int g) const
void Save(std::ostream &out) const
Save the data in a stream.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
OutStream err(std::cerr)
Global stream used by the library for standard error output. Initially it uses the same std::streambu...
int GetNeighborRank(int i) const
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
Communications are performed one group at a time.
void AddAColumnInRow(int r)
void mfem_error(const char *msg)
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
void BcastEnd(T *ldata, int layout)
Finalize a broadcast started with BcastBegin().
Data structure on which we define reduce operations.
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
void KdTreeSort(int **coords, int d, int dim, int size)
void Create(Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
void SetLTDofTable(Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
void Load(std::istream &in)
Load the data from a stream.
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.