12 #include "../config/config.hpp" 39 group_lproc(gt.group_lproc)
41 gt.groupmaster_lproc.
Copy(groupmaster_lproc);
42 gt.lproc_proc.
Copy(lproc_proc);
43 gt.group_mgroup.
Copy(group_mgroup);
46 void GroupTopology::ProcToLProc()
49 MPI_Comm_size(MyComm, &
NRanks);
51 map<int, int> proc_lproc;
56 int lproc_counter = 0;
59 const pair<const int, int>
p(group_lproc.
GetJ()[i], lproc_counter);
60 if (proc_lproc.insert(
p).second)
67 lproc_proc.
SetSize(lproc_counter);
68 for (map<int, int>::iterator it = proc_lproc.begin();
69 it != proc_lproc.end(); ++it)
71 lproc_proc[it->second] = it->first;
76 group_lproc.
GetJ()[i] = proc_lproc[group_lproc.
GetJ()[i]];
79 for (
int i = 0; i <
NGroups(); i++)
81 groupmaster_lproc[i] = proc_lproc[groupmaster_lproc[i]];
89 Table group_mgroupandproc;
92 for (
int i = 0; i <
NGroups(); i++)
94 int j = group_mgroupandproc.
GetI()[i];
95 group_mgroupandproc.
GetI()[i+1] = j + group_lproc.
RowSize(i) + 1;
96 group_mgroupandproc.
GetJ()[j] = i;
98 for (
int k = group_lproc.
GetI()[i];
99 j < group_mgroupandproc.
GetI()[i+1]; j++, k++)
101 group_mgroupandproc.
GetJ()[j] = group_lproc.
GetJ()[k];
109 for (
int i = 0; i <
NGroups(); i++)
127 MFEM_DEBUG_DO(group_mgroup = -1);
128 for (
int g = 0; g <
NGroups(); g++)
130 if (
IAmMaster(g)) { group_mgroup[g] = g; }
138 for (
int g = 1; g <
NGroups(); g++)
144 for (
int i = 0; i < gs; i++)
157 lproc_cgroup_list.
Sort();
158 lproc_cgroup_list.
Unique();
170 for (
int nbr = 1; nbr < lproc_cgroup.
Size(); nbr++)
172 const int send_row = 2*(nbr-1);
173 const int recv_row = send_row+1;
174 const int ng = lproc_cgroup.
RowSize(nbr);
175 const int *g = lproc_cgroup.
GetRow(nbr);
176 for (
int j = 0; j < ng; j++)
178 const int gs = group_lproc.
RowSize(g[j]);
191 for (
int nbr = 1; nbr < lproc_cgroup.
Size(); nbr++)
193 const int send_row = 2*(nbr-1);
194 const int recv_row = send_row+1;
195 const int ng = lproc_cgroup.
RowSize(nbr);
196 const int *g = lproc_cgroup.
GetRow(nbr);
197 for (
int j = 0; j < ng; j++)
199 const int gs = group_lproc.
RowSize(g[j]);
204 send_row, group_mgroupandproc.
GetRow(g[j]), gs+1);
215 send_requests = MPI_REQUEST_NULL;
216 recv_requests = MPI_REQUEST_NULL;
217 for (
int nbr = 1; nbr < lproc_cgroup.
Size(); nbr++)
219 const int send_row = 2*(nbr-1);
220 const int recv_row = send_row+1;
221 const int send_size = buffer.
RowSize(send_row);
222 const int recv_size = buffer.
RowSize(recv_row);
225 MPI_Isend(buffer.
GetRow(send_row), send_size, MPI_INT, lproc_proc[nbr],
226 mpitag, MyComm, &send_requests[nbr-1]);
230 MPI_Irecv(buffer.
GetRow(recv_row), recv_size, MPI_INT, lproc_proc[nbr],
231 mpitag, MyComm, &recv_requests[nbr-1]);
235 if (recv_requests.Size() > 0)
239 while (MPI_Waitany(recv_requests.Size(), recv_requests.GetData(), &idx,
241 idx != MPI_UNDEFINED)
243 const int recv_size = buffer.
RowSize(2*idx+1);
244 const int *recv_buf = buffer.
GetRow(2*idx+1);
245 for (
int s = 0;
s < recv_size;
s += recv_buf[
s]+2)
248 const int g = groups.
Lookup(group);
249 MFEM_ASSERT(group_mgroup[g] == -1,
"communication error");
250 group_mgroup[g] = recv_buf[
s+1];
255 MPI_Waitall(send_requests.
Size(), send_requests.
GetData(),
256 MPI_STATUSES_IGNORE);
263 os <<
"\ncommunication_groups\n";
264 os <<
"number_of_groups " <<
NGroups() <<
"\n\n";
266 os <<
"# number of entities in each group, followed by group ids in group\n";
267 for (
int group_id = 0; group_id <
NGroups(); ++group_id)
270 const int * group_ptr =
GetGroup(group_id);
272 for (
int group_member_index = 0; group_member_index < group_size;
273 ++group_member_index)
291 int number_of_groups = -1;
293 MFEM_VERIFY(ident ==
"number_of_groups",
294 "GroupTopology::Load - expected 'number_of_groups' entry.");
295 in >> number_of_groups;
301 for (
int group_id = 0; group_id < number_of_groups; ++group_id)
314 integer_sets.
Insert(integer_set);
317 Create(integer_sets, 823);
323 group_lproc.
Copy(copy.group_lproc);
324 groupmaster_lproc.
Copy(copy.groupmaster_lproc);
325 lproc_proc.
Copy(copy.lproc_proc);
326 group_mgroup.
Copy(copy.group_mgroup);
333 mfem::Swap(groupmaster_lproc, other.groupmaster_lproc);
358 for (
int i = 0; i < ldof_group.
Size(); i++)
360 int group = ldof_group[i];
368 for (
int i = 0; i < ldof_group.
Size(); i++)
370 int group = ldof_group[i];
383 int request_counter = 0;
402 request_counter += gr_requests;
407 requests =
new MPI_Request[request_counter];
417 if (nldofs == 0) {
continue; }
427 for (
int i = 0; i < grp_size; i++)
429 if (grp_nbr_list[i] != 0)
441 if (nldofs == 0) {
continue; }
451 for (
int i = 0; i < grp_size; i++)
453 if (grp_nbr_list[i] != 0)
472 if (num_recv_groups > 0)
475 group_ids.
SetSize(num_recv_groups);
476 for (
int i = 0; i < num_recv_groups; i++)
479 group_ids[i].two = grp_list[i];
482 for (
int i = 0; i < num_recv_groups; i++)
484 grp_list[i] = group_ids[i].two;
509 for (
int i = 0; i < nldofs; i++)
524 if (num_send_groups > 0)
527 for (
int i = 0; i < num_send_groups; i++)
529 const int group = grp_list[i];
539 if (num_send_groups > 0)
542 for (
int i = 0; i < num_send_groups; i++)
544 const int group = grp_list[i];
560 if (num_recv_groups > 0)
563 for (
int i = 0; i < num_recv_groups; i++)
565 const int group = grp_list[i];
575 if (num_recv_groups > 0)
578 for (
int i = 0; i < num_recv_groups; i++)
580 const int group = grp_list[i];
606 for (
int j = 0; j < nltdofs; j++)
608 buf[j] = ldata[ltdofs[j]];
610 return buf + nltdofs;
616 for (
int j = 0; j < nldofs; j++)
618 buf[j] = ldata[ldofs[j]];
627 int group,
int layout)
const 640 for (
int j = 0; j < nldofs; j++)
642 ldata[ltdofs[j]] = buf[j];
649 for (
int j = 0; j < nldofs; j++)
651 ldata[ldofs[j]] = buf[j];
661 int group,
int layout,
668 opd.
buf =
const_cast<T*
>(buf);
674 MFEM_ABORT(
"layout 1 is not supported");
676 for (
int j = 0; j < opd.
nldofs; j++)
701 MFEM_VERIFY(
comm_lock == 0,
"object is already in use");
705 int request_counter = 0;
716 "'group_ltdof' is not set, use SetLTDofTable()");
728 if (nldofs == 0) {
continue; }
750 for (
int i = 0; i < gs; i++)
778 if (num_send_groups > 0)
785 for (
int i = 0; i < num_send_groups; i++)
801 if (num_recv_groups > 0)
809 for (
int i = 0; i < num_recv_groups; i++)
840 MFEM_VERIFY(
comm_lock == 1,
"object is NOT locked for Bcast");
850 else if (layout == 0)
855 idx != MPI_UNDEFINED)
858 if (gr == -1) {
continue; }
873 idx != MPI_UNDEFINED)
876 if (nbr == -1) {
continue; }
879 if (num_recv_groups > 0)
883 for (
int i = 0; i < num_recv_groups; i++)
900 MFEM_VERIFY(
comm_lock == 0,
"object is already in use");
904 int request_counter = 0;
915 if (nldofs == 0) {
continue; }
919 const int layout = 0;
937 for (
int i = 0; i < gs; i++)
964 if (num_send_groups > 0)
968 for (
int i = 0; i < num_send_groups; i++)
970 const int layout = 0;
986 if (num_recv_groups > 0)
990 for (
int i = 0; i < num_recv_groups; i++)
1022 MFEM_VERIFY(
comm_lock == 2,
"object is NOT locked for Reduce");
1038 idx != MPI_UNDEFINED)
1041 if (gr == -1) {
continue; }
1045 if ((--group_num_req[gr]) != 0) {
continue; }
1051 opd.
ldofs = (layout == 0) ?
1067 if (num_recv_groups > 0)
1071 for (
int i = 0; i < num_recv_groups; i++)
1091 for (
int i = 0; i < opd.
nldofs; i++)
1098 for (
int i = 0; i < opd.
nldofs; i++)
1101 for (
int j = 0; j < opd.
nb; j++)
1113 for (
int i = 0; i < opd.
nldofs; i++)
1116 for (
int j = 0; j < opd.
nb; j++)
1131 for (
int i = 0; i < opd.
nldofs; i++)
1134 for (
int j = 0; j < opd.
nb; j++)
1149 for (
int i = 0; i < opd.
nldofs; i++)
1152 for (
int j = 0; j < opd.
nb; j++)
1163 const int tag = 46800;
1166 int num_sends = 0, num_recvs = 0;
1167 size_t mem_sends = 0, mem_recvs = 0;
1168 int num_master_groups = 0, num_empty_groups = 0;
1169 int num_active_neighbors = 0;
1185 num_master_groups++;
1190 mem_recvs +=
sizeof(double)*nldofs;
1206 num_master_groups++;
1212 if (num_send_groups > 0)
1215 for (
int i = 0; i < num_send_groups; i++)
1223 if (num_recv_groups > 0)
1226 for (
int i = 0; i < num_recv_groups; i++)
1232 if (num_send_groups > 0 || num_recv_groups > 0)
1234 num_active_neighbors++;
1241 MPI_Recv(&c, 1, MPI_CHAR, myid-1, tag,
gtopo.
GetComm(),
1246 os <<
"\nGroupCommunicator:\n";
1248 os <<
"Rank " << myid <<
":\n" 1250 (
mode ==
byGroup ?
"byGroup" :
"byNeighbor") <<
"\n" 1251 " number of sends = " << num_sends <<
1252 " (" << mem_sends <<
" bytes)\n" 1253 " number of recvs = " << num_recvs <<
1254 " (" << mem_recvs <<
" bytes)\n";
1257 num_master_groups <<
" + " <<
1259 num_empty_groups <<
" (master + slave + empty)\n";
1264 num_active_neighbors <<
" + " <<
1266 " (active + inactive)\n";
1271 MPI_Send(&c, 1, MPI_CHAR, myid+1, tag,
gtopo.
GetComm());
1291 template void GroupCommunicator::BcastBegin<int>(
int *, int)
const;
1292 template void GroupCommunicator::BcastEnd<int>(
int *, int)
const;
1293 template void GroupCommunicator::ReduceBegin<int>(
const int *)
const;
1294 template void GroupCommunicator::ReduceEnd<int>(
1295 int *, int, void (*)(OpData<int>))
const;
1297 template void GroupCommunicator::BcastBegin<double>(
double *, int)
const;
1298 template void GroupCommunicator::BcastEnd<double>(
double *, int)
const;
1299 template void GroupCommunicator::ReduceBegin<double>(
const double *)
const;
1300 template void GroupCommunicator::ReduceEnd<double>(
1301 double *, int, void (*)(OpData<double>))
const;
1306 template void GroupCommunicator::Sum<int>(OpData<int>);
1307 template void GroupCommunicator::Min<int>(OpData<int>);
1308 template void GroupCommunicator::Max<int>(OpData<int>);
1309 template void GroupCommunicator::BitOR<int>(OpData<int>);
1311 template void GroupCommunicator::Sum<double>(OpData<double>);
1312 template void GroupCommunicator::Min<double>(OpData<double>);
1313 template void GroupCommunicator::Max<double>(OpData<double>);
1317 static void DebugRankCoords(
int** coords,
int dim,
int size)
1319 for (
int i = 0; i < size; i++)
1321 mfem::out <<
"Rank " << i <<
" coords: ";
1322 for (
int j = 0; j <
dim; j++)
1330 struct CompareCoords
1332 CompareCoords(
int coord) : coord(coord) {}
1335 bool operator()(
int*
const &
a,
int*
const &
b)
const 1336 {
return a[coord] <
b[coord]; }
1343 bool all_same =
true;
1344 for (
int i = 1; i < size && all_same; i++)
1346 for (
int j = 0; j <
dim; j++)
1348 if (coords[i][j] != coords[0][j]) { all_same =
false;
break; }
1351 if (all_same) {
return; }
1354 std::sort(coords, coords + size, CompareCoords(d));
1355 int next = (d + 1) %
dim;
1357 if (coords[0][d] < coords[size-1][d])
1375 MPI_Comm_rank(comm, &rank);
1376 MPI_Comm_size(comm, &size);
1379 MPIX_Torus_ndims(&
dim);
1381 int* mycoords =
new int[
dim + 1];
1382 MPIX_Rank2torus(rank, mycoords);
1384 MPI_Send(mycoords,
dim, MPI_INT, 0, 111, comm);
1389 int** coords =
new int*[size];
1390 for (
int i = 0; i < size; i++)
1392 coords[i] =
new int[
dim + 1];
1394 MPI_Recv(coords[i],
dim, MPI_INT, i, 111, comm, &status);
1401 for (
int i = 0; i < size; i++)
1403 MPI_Send(&coords[i][
dim], 1, MPI_INT, i, 112, comm);
1404 delete [] coords[i];
1410 MPI_Recv(&new_rank, 1, MPI_INT, 0, 112, comm, &status);
1413 MPI_Comm_split(comm, 0, new_rank, &new_comm);
int Lookup(IntegerSet &s)
void Create(ListOfIntegerSets &groups, int mpitag)
Set up the group topology given the list of sets of shared entities.
int NRanks() const
Return the number of MPI ranks within this object's communicator.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group 'g'.
void Recreate(const int n, const int *p)
Create an integer set from C-array 'p' of 'n' integers. Overwrites any existing set data...
void Unique()
Removes duplicities from a sorted array. This requires operator== to be defined for T...
void AsTable(Table &t)
Write the list of sets into table 't'.
Helper struct to convert a C++ type to an MPI type.
GroupCommunicator(const GroupTopology >, Mode m=byNeighbor)
Construct a GroupCommunicator object.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
void AddColumnsInRow(int r, int ncol)
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf...
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
void SetDims(int rows, int nnz)
T * GetData()
Returns the data.
int GetGroupMasterGroup(int g) const
Return the group number in the master for group 'g'.
int NGroups() const
Return the number of groups.
int GetNeighborRank(int i) const
Return the MPI rank of neighbor 'i'.
int MyRank() const
Return the MPI rank within this object's communicator.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor...
void GetNeighborLDofTable(Table &nbr_ldof) const
Dofs to be received from communication neighbors.
void Swap(GroupTopology &other)
Swap the internal data with another GroupTopology object.
void Save(std::ostream &out) const
Save the data in a stream.
void skip_comment_lines(std::istream &is, const char comment_char)
Check if the stream starts with comment_char. If so skip it.
void AddConnections(int r, const int *c, int nc)
int PickElementInSet(int i)
Return the value of the first element of the ith set.
void MakeFromList(int nrows, const Array< Connection > &list)
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int Append(const T &el)
Append element 'el' to array, resize if necessary.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
const GroupTopology & gtopo
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void AddConnection(int r, int c)
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
int Insert(IntegerSet &s)
Check to see if set 's' is in the list. If not append it to the end of the list. Returns the index of...
void Reserve(int capacity)
Ensures that the allocated size is at least the given size.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
const int * GetGroup(int g) const
Return a pointer to a list of neighbors for a given group. Neighbor 0 is the local processor...
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata...
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
void Sort()
Sorts the array in ascending order. This requires operator< to be defined for T.
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
void Swap(Array< T > &, Array< T > &)
double p(const Vector &x, double t)
void GetNeighborLTDofTable(Table &nbr_ltdof) const
Dofs to be sent to communication neighbors.
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
Communications are performed one group at a time.
void AddAColumnInRow(int r)
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Helper struct for defining a connectivity table, see Table::MakeFromList.
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
Data structure on which we define reduce operations. The data is associated with (and the operation i...
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
MPI_Comm GetComm() const
Return the MPI communicator.
int GetGroupSize(int g) const
Get the number of processors in a group.
int Size() const
Returns the number of TYPE I elements.
void KdTreeSort(int **coords, int d, int dim, int size)
int index(int i, int j, int nx, int ny)
void Copy(Array ©) const
Create a copy of the internal array to the provided copy.
int Size_of_connections() const
int Size() const
Return the logical size of the array.
void SetComm(MPI_Comm comm)
Set the MPI communicator to 'comm'.
void Copy(Table ©) const
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
void Copy(GroupTopology ©) const
Copy the internal data to the external 'copy'.
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
void Load(std::istream &in)
Load the data from a stream.
bool IAmMaster(int g) const
Return true if I am master for group 'g'.
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.