12 #include "../config/config.hpp"
37 group_lproc(gt.group_lproc)
39 gt.groupmaster_lproc.
Copy(groupmaster_lproc);
40 gt.lproc_proc.
Copy(lproc_proc);
41 gt.group_mgroup.
Copy(group_mgroup);
44 void GroupTopology::ProcToLProc()
47 MPI_Comm_size(MyComm, &NRanks);
49 map<int, int> proc_lproc;
51 int lproc_counter = 0;
54 const pair<const int, int> p(group_lproc.
GetJ()[i], lproc_counter);
55 if (proc_lproc.insert(p).second)
62 lproc_proc.
SetSize(lproc_counter);
63 for (map<int, int>::iterator it = proc_lproc.begin();
64 it != proc_lproc.end(); ++it)
66 lproc_proc[it->second] = it->first;
71 group_lproc.
GetJ()[i] = proc_lproc[group_lproc.
GetJ()[i]];
74 for (
int i = 0; i <
NGroups(); i++)
76 groupmaster_lproc[i] = proc_lproc[groupmaster_lproc[i]];
84 Table group_mgroupandproc;
87 for (
int i = 0; i <
NGroups(); i++)
89 int j = group_mgroupandproc.
GetI()[i];
90 group_mgroupandproc.
GetI()[i+1] = j + group_lproc.
RowSize(i) + 1;
91 group_mgroupandproc.
GetJ()[j] = i;
93 for (
int k = group_lproc.
GetI()[i];
94 j < group_mgroupandproc.
GetI()[i+1]; j++, k++)
96 group_mgroupandproc.
GetJ()[j] = group_lproc.
GetJ()[k];
104 for (
int i = 0; i <
NGroups(); i++)
120 int send_counter = 0;
121 int recv_counter = 0;
122 for (
int i = 1; i <
NGroups(); i++)
123 if (groupmaster_lproc[i] != 0)
129 send_counter += group_lproc.
RowSize(i)-1;
132 MPI_Request *requests =
new MPI_Request[send_counter];
133 MPI_Status *statuses =
new MPI_Status[send_counter];
135 int max_recv_size = 0;
137 for (
int i = 1; i <
NGroups(); i++)
139 if (groupmaster_lproc[i] == 0)
143 for (
int j = group_lproc.
GetI()[i];
144 j < group_lproc.
GetI()[i+1]; j++)
146 if (group_lproc.
GetJ()[j] != 0)
148 MPI_Isend(group_mgroupandproc.
GetRow(i),
149 group_mgroupandproc.
RowSize(i),
151 lproc_proc[group_lproc.
GetJ()[j]],
154 &requests[send_counter]);
160 if (max_recv_size < group_lproc.
RowSize(i))
162 max_recv_size = group_lproc.
RowSize(i);
168 if (recv_counter > 0)
172 int *recv_buf =
new int[max_recv_size];
173 for ( ; recv_counter > 0; recv_counter--)
175 MPI_Recv(recv_buf, max_recv_size, MPI_INT,
176 MPI_ANY_SOURCE, mpitag, MyComm, &status);
178 MPI_Get_count(&status, MPI_INT, &count);
180 group.
Recreate(count-1, recv_buf+1);
181 int g = groups.
Lookup(group);
182 group_mgroup[g] = recv_buf[0];
184 if (lproc_proc[groupmaster_lproc[g]] != status.MPI_SOURCE)
186 cerr <<
"\n\n\nGroupTopology::GroupTopology: "
187 <<
MyRank() <<
": ERROR\n\n\n" << endl;
194 MPI_Waitall(send_counter, requests, statuses);
202 out <<
"\ncommunication_groups\n";
203 out <<
"number_of_groups " <<
NGroups() <<
"\n\n";
205 out <<
"# number of entities in each group, followed by group ids in group\n";
206 for (
int group_id = 0; group_id <
NGroups(); ++group_id)
209 const int * group_ptr =
GetGroup(group_id);
211 for (
int group_member_index = 0; group_member_index < group_size;
212 ++group_member_index)
230 int number_of_groups = -1;
232 MFEM_VERIFY(ident ==
"number_of_groups",
233 "GroupTopology::Load - expected 'number_of_groups' entry.");
234 in >> number_of_groups;
240 for (
int group_id = 0; group_id < number_of_groups; ++group_id)
247 for (
int index = 0; index < group_size; ++index )
253 integer_sets.
Insert(integer_set);
256 Create(integer_sets, 823);
276 for (
int i = 0; i < ldof_group.
Size(); i++)
278 int group = ldof_group[i];
286 for (
int i = 0; i < ldof_group.
Size(); i++)
288 int group = ldof_group[i];
301 int request_counter = 0;
303 for (
int gr = 1; gr < group_ldof.
Size(); gr++)
304 if (group_ldof.
RowSize(gr) != 0)
316 request_counter += gr_requests;
317 group_buf_size += gr_requests * group_ldof.
RowSize(gr);
320 requests =
new MPI_Request[request_counter];
321 statuses =
new MPI_Status[request_counter];
327 if (group_buf_size == 0) {
return; }
332 group_buf.
SetSize(group_buf_size*
sizeof(T));
333 buf = (T *)group_buf.
GetData();
340 int i, gr, request_counter = 0;
342 for (gr = 1; gr < group_ldof.
Size(); gr++)
344 const int nldofs = group_ldof.
RowSize(gr);
347 if (nldofs == 0) {
continue; }
357 &requests[request_counter]);
365 const int *ldofs = group_ldof.
GetRow(gr);
366 for (i = 0; i < nldofs; i++)
368 buf[i] = ldata[ldofs[i]];
373 const int *nbs = gtopo.
GetGroup(gr);
374 for (i = 0; i < gs; i++)
384 &requests[request_counter]);
392 MPI_Waitall(request_counter, requests, statuses);
397 buf = (T *)group_buf.
GetData();
398 for (gr = 1; gr < group_ldof.
Size(); gr++)
400 const int nldofs = group_ldof.
RowSize(gr);
403 if (nldofs == 0) {
continue; }
407 const int *ldofs = group_ldof.
GetRow(gr);
408 for (i = 0; i < nldofs; i++)
410 ldata[ldofs[i]] = buf[i];
421 if (group_buf_size == 0) {
return; }
423 int i, gr, request_counter = 0;
426 group_buf.
SetSize(group_buf_size*
sizeof(T));
429 for (gr = 1; gr < group_ldof.
Size(); gr++)
434 if (opd.
nldofs == 0) {
continue; }
440 for (i = 0; i < opd.
nldofs; i++)
451 &requests[request_counter]);
458 const int *nbs = gtopo.
GetGroup(gr);
459 for (i = 0; i < gs; i++)
469 &requests[request_counter]);
477 MPI_Waitall(request_counter, requests, statuses);
481 for (gr = 1; gr < group_ldof.
Size(); gr++)
486 if (opd.
nldofs == 0) {
continue; }
505 for (
int i = 0; i < opd.
nldofs; i++)
508 for (
int j = 0; j < opd.
nb; j++)
519 for (
int i = 0; i < opd.
nldofs; i++)
522 for (
int j = 0; j < opd.
nb; j++)
537 for (
int i = 0; i < opd.
nldofs; i++)
540 for (
int j = 0; j < opd.
nb; j++)
555 for (
int i = 0; i < opd.
nldofs; i++)
558 for (
int j = 0; j < opd.
nb; j++)
575 template void GroupCommunicator::Bcast<int>(
int *);
576 template void GroupCommunicator::Bcast<int>(
int *, int);
577 template void GroupCommunicator::Reduce<int>(
int *, void (*)(OpData<int>));
579 template void GroupCommunicator::Bcast<double>(
double *);
580 template void GroupCommunicator::Bcast<double>(
double *, int);
581 template void GroupCommunicator::Reduce<double>(
582 double *, void (*)(OpData<double>));
587 template void GroupCommunicator::Sum<int>(OpData<int>);
588 template void GroupCommunicator::Min<int>(OpData<int>);
589 template void GroupCommunicator::Max<int>(OpData<int>);
590 template void GroupCommunicator::BitOR<int>(OpData<int>);
592 template void GroupCommunicator::Sum<double>(OpData<double>);
593 template void GroupCommunicator::Min<double>(OpData<double>);
594 template void GroupCommunicator::Max<double>(OpData<double>);
598 static void DebugRankCoords(
int** coords,
int dim,
int size)
600 for (
int i = 0; i < size; i++)
602 std::cout <<
"Rank " << i <<
" coords: ";
603 for (
int j = 0; j <
dim; j++)
605 std::cout << coords[i][j] <<
" ";
613 CompareCoords(
int coord) : coord(coord) {}
616 bool operator()(
int*
const &a,
int*
const &b)
const
617 {
return a[coord] < b[coord]; }
624 bool all_same =
true;
625 for (
int i = 1; i < size && all_same; i++)
627 for (
int j = 0; j <
dim; j++)
629 if (coords[i][j] != coords[0][j]) { all_same =
false;
break; }
632 if (all_same) {
return; }
635 std::sort(coords, coords + size, CompareCoords(d));
636 int next = (d + 1) % dim;
638 if (coords[0][d] < coords[size-1][d])
641 KdTreeSort(coords + size/2, next, dim, size - size/2);
656 MPI_Comm_rank(comm, &rank);
657 MPI_Comm_size(comm, &size);
660 MPIX_Torus_ndims(&dim);
662 int* mycoords =
new int[dim + 1];
663 MPIX_Rank2torus(rank, mycoords);
665 MPI_Send(mycoords, dim, MPI_INT, 0, 111, comm);
670 int** coords =
new int*[size];
671 for (
int i = 0; i < size; i++)
673 coords[i] =
new int[dim + 1];
675 MPI_Recv(coords[i], dim, MPI_INT, i, 111, comm, &status);
682 for (
int i = 0; i < size; i++)
684 MPI_Send(&coords[i][dim], 1, MPI_INT, i, 112, comm);
691 MPI_Recv(&new_rank, 1, MPI_INT, 0, 112, comm, &status);
694 MPI_Comm_split(comm, 0, new_rank, &new_comm);
int Lookup(IntegerSet &s)
int GetGroupMasterRank(int g) const
void Create(ListOfIntegerSets &groups, int mpitag)
int Size() const
Logical size of the array.
void Recreate(const int n, const int *p)
int GetGroupMasterGroup(int g) const
GroupCommunicator(GroupTopology >)
Helper struct to convert a C++ type to an MPI type.
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
int GetGroupSize(int g) const
void SetDims(int rows, int nnz)
const int * GetGroup(int g) const
void Copy(Array ©) const
Create a copy of the current array.
T * GetData()
Returns the data.
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
int Size_of_connections() const
void skip_comment_lines(std::istream &is, const char comment_char)
int PickElementInSet(int i)
bool IAmMaster(int g) const
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int Append(const T &el)
Append element to array, resize if necessary.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
void AddConnection(int r, int c)
int Insert(IntegerSet &s)
void Reserve(int capacity)
Ensures that the allocated size is at least the given size.
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
int Size() const
Returns the number of TYPE I elements.
void Save(std::ostream &out) const
Save the data in a stream.
int GetNeighborRank(int i) const
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
void AddAColumnInRow(int r)
void mfem_error(const char *msg)
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
Data structure on which we define reduce operations.
void Reduce(T *ldata, void(*Op)(OpData< T >))
Reduce within each group where the master is the root.
void KdTreeSort(int **coords, int d, int dim, int size)
void Create(Array< int > &ldof_group)
void Bcast(T *data, int layout)
Broadcast within each group where the master is the root. The data layout can be: ...
void Load(std::istream &in)
Load the data from a stream.
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.