12 #include "../config/config.hpp"
17 #include "../general/sort_pairs.hpp"
18 #include "../mesh/mesh_headers.hpp"
19 #include "../general/binaryio.hpp"
33 ParInit(pmesh ? pmesh : orig.pmesh);
49 f ? f : global_fes->FEColl(),
50 global_fes->GetVDim(), global_fes->GetOrdering())
69 int dim,
int ordering)
79 if (globNURBSext == NULL) {
return NULL; }
80 const ParNURBSExtension *pNURBSext =
81 dynamic_cast<const ParNURBSExtension*
>(parNURBSext);
82 MFEM_ASSERT(pNURBSext,
"need a ParNURBSExtension");
84 NURBSExtension *tmp_globNURBSext =
new NURBSExtension(*globNURBSext);
86 return new ParNURBSExtension(tmp_globNURBSext, pNURBSext);
89 void ParFiniteElementSpace::ParInit(ParMesh *pm)
92 pncmesh = pm->pncmesh;
111 MFEM_ASSERT(
own_ext,
"internal error");
113 ParNURBSExtension *pNe =
new ParNURBSExtension(
129 void ParFiniteElementSpace::Construct()
133 ConstructTrueNURBSDofs();
134 GenerateGlobalOffsets();
139 GenerateGlobalOffsets();
154 ngedofs = ngfdofs = 0;
168 ngdofs = ngvdofs + ngedofs + ngfdofs;
172 ltdof_size = BuildParallelConformingInterpolation(
173 &P, &R, dof_offsets, tdof_offsets, &ldof_ltdof,
false);
183 long ltdofs = ltdof_size;
184 long min_ltdofs, max_ltdofs, sum_ltdofs;
186 MPI_Reduce(<dofs, &min_ltdofs, 1, MPI_LONG, MPI_MIN, 0, MyComm);
187 MPI_Reduce(<dofs, &max_ltdofs, 1, MPI_LONG, MPI_MAX, 0, MyComm);
188 MPI_Reduce(<dofs, &sum_ltdofs, 1, MPI_LONG, MPI_SUM, 0, MyComm);
192 double avg = double(sum_ltdofs) / NRanks;
193 mfem::out <<
"True DOF partitioning: min " << min_ltdofs
194 <<
", avg " << std::fixed << std::setprecision(1) << avg
195 <<
", max " << max_ltdofs
196 <<
", (max-avg)/avg " << 100.0*(max_ltdofs - avg)/avg
204 mfem::out <<
"True DOFs by rank: " << ltdofs;
205 for (
int i = 1; i < NRanks; i++)
208 MPI_Recv(<dofs, 1, MPI_LONG, i, 123, MyComm, &status);
215 MPI_Send(<dofs, 1, MPI_LONG, 0, 123, MyComm);
220 void ParFiniteElementSpace::GetGroupComm(
225 int nvd, ned, ntd = 0, nqd = 0;
228 int group_ldof_counter;
253 group_ldof_counter = 0;
254 for (gr = 1; gr < ng; gr++)
257 group_ldof_counter += ned * pmesh->
GroupNEdges(gr);
263 group_ldof_counter *=
vdim;
266 group_ldof.
SetDims(ng, group_ldof_counter);
269 group_ldof_counter = 0;
270 group_ldof.
GetI()[0] = group_ldof.
GetI()[1] = 0;
271 for (gr = 1; gr < ng; gr++)
273 int j, k, l, m, o, nv, ne, nt, nq;
284 for (j = 0; j < nv; j++)
290 for (l = 0; l < nvd; l++, m++)
300 for (l = 0; l < dofs.
Size(); l++)
302 group_ldof.
GetJ()[group_ldof_counter++] = dofs[l];
310 for (j = 0; j < ne; j++)
317 for (l = 0; l < ned; l++)
321 dofs[l] = m + (-1-ind[l]);
324 (*ldof_sign)[dofs[l]] = -1;
329 dofs[l] = m + ind[l];
338 for (l = 0; l < dofs.
Size(); l++)
340 group_ldof.
GetJ()[group_ldof_counter++] = dofs[l];
348 for (j = 0; j < nt; j++)
355 for (l = 0; l < ntd; l++)
359 dofs[l] = m + (-1-ind[l]);
362 (*ldof_sign)[dofs[l]] = -1;
367 dofs[l] = m + ind[l];
376 for (l = 0; l < dofs.
Size(); l++)
378 group_ldof.
GetJ()[group_ldof_counter++] = dofs[l];
386 for (j = 0; j < nq; j++)
393 for (l = 0; l < nqd; l++)
397 dofs[l] = m + (-1-ind[l]);
400 (*ldof_sign)[dofs[l]] = -1;
405 dofs[l] = m + ind[l];
414 for (l = 0; l < dofs.
Size(); l++)
416 group_ldof.
GetJ()[group_ldof_counter++] = dofs[l];
421 group_ldof.
GetI()[gr+1] = group_ldof_counter;
427 void ParFiniteElementSpace::ApplyLDofSigns(Array<int> &dofs)
const
431 for (
int i = 0; i < dofs.Size(); i++)
435 if (ldof_sign[-1-dofs[i]] < 0)
437 dofs[i] = -1-dofs[i];
442 if (ldof_sign[dofs[i]] < 0)
444 dofs[i] = -1-dofs[i];
450 void ParFiniteElementSpace::ApplyLDofSigns(Table &el_dof)
const
452 Array<int> all_dofs(el_dof.GetJ(), el_dof.Size_of_connections());
453 ApplyLDofSigns(all_dofs);
466 ApplyLDofSigns(dofs);
480 ApplyLDofSigns(dofs);
489 ApplyLDofSigns(dofs);
497 MFEM_ASSERT(0 <= ei && ei < pmesh->GroupNEdges(group),
"invalid edge index");
498 pmesh->
GroupEdge(group, ei, l_edge, ori);
508 for (
int i = 0; i < dofs.
Size(); i++)
510 const int di = dofs[i];
511 dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
520 MFEM_ASSERT(0 <= fi && fi < pmesh->GroupNTriangles(group),
521 "invalid triangular face index");
532 for (
int i = 0; i < dofs.
Size(); i++)
534 const int di = dofs[i];
535 dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
544 MFEM_ASSERT(0 <= fi && fi < pmesh->GroupNQuadrilaterals(group),
545 "invalid quadrilateral face index");
556 for (
int i = 0; i < dofs.
Size(); i++)
558 const int di = dofs[i];
559 dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
564 void ParFiniteElementSpace::GenerateGlobalOffsets()
const
576 if (HYPRE_AssumedPartitionCheck())
579 GroupTopology > = GetGroupTopo();
580 int nsize = gt.GetNumNeighbors()-1;
581 MPI_Request *requests =
new MPI_Request[2*nsize];
582 MPI_Status *statuses =
new MPI_Status[2*nsize];
583 tdof_nb_offsets.
SetSize(nsize+1);
584 tdof_nb_offsets[0] = tdof_offsets[0];
587 int request_counter = 0;
588 for (
int i = 1; i <= nsize; i++)
590 MPI_Irecv(&tdof_nb_offsets[i], 1, HYPRE_MPI_INT,
591 gt.GetNeighborRank(i), 5365, MyComm,
592 &requests[request_counter++]);
594 for (
int i = 1; i <= nsize; i++)
596 MPI_Isend(&tdof_nb_offsets[0], 1, HYPRE_MPI_INT,
597 gt.GetNeighborRank(i), 5365, MyComm,
598 &requests[request_counter++]);
600 MPI_Waitall(request_counter, requests, statuses);
607 void ParFiniteElementSpace::Build_Dof_TrueDof_Matrix() const
616 HYPRE_Int *i_diag =
new HYPRE_Int[ldof+1];
617 HYPRE_Int *j_diag =
new HYPRE_Int[ltdof];
620 HYPRE_Int *i_offd =
new HYPRE_Int[ldof+1];
621 HYPRE_Int *j_offd =
new HYPRE_Int[ldof-ltdof];
624 HYPRE_Int *cmap =
new HYPRE_Int[ldof-ltdof];
629 Array<Pair<HYPRE_Int, int> > cmap_j_offd(ldof-ltdof);
631 i_diag[0] = i_offd[0] = 0;
632 diag_counter = offd_counter = 0;
633 for (
int i = 0; i < ldof; i++)
638 j_diag[diag_counter++] = ltdof;
643 cmap_j_offd[offd_counter].two = offd_counter;
646 i_diag[i+1] = diag_counter;
647 i_offd[i+1] = offd_counter;
650 SortPairs<HYPRE_Int, int>(cmap_j_offd, offd_counter);
652 for (
int i = 0; i < offd_counter; i++)
654 cmap[i] = cmap_j_offd[i].one;
655 j_offd[cmap_j_offd[i].two] = i;
658 P =
new HypreParMatrix(MyComm, MyRank, NRanks, row_starts, col_starts,
659 i_diag, j_diag, i_offd, j_offd, cmap, offd_counter);
670 BuildParallelConformingInterpolation(&P_pc, NULL, P_pc_row_starts,
671 P_pc_col_starts, NULL,
true);
680 for (
int i = 0; i < ldof_group.
Size(); i++)
684 if (ldof_ltdof[i] >= 0)
702 gc->
Create(pNURBSext()->ldof_group);
706 GetGroupComm(*gc, 0);
716 MFEM_VERIFY(ldof_marker.
Size() ==
GetVSize(),
"invalid in/out array");
720 gcomm->
Bcast(ldof_marker);
750 const int *ess_dofs_data = ess_dofs.
HostRead();
751 Pt->BooleanMult(1, ess_dofs_data, 0, true_ess_dofs2);
754 const int *ted = true_ess_dofs.
HostRead();
755 for (
int i = 0; i < true_ess_dofs.
Size(); i++)
757 if (
bool(ted[i]) != bool(true_ess_dofs2[i])) { counter++; }
759 MFEM_VERIFY(counter == 0,
"internal MFEM error: counter = " << counter);
770 return ldof_ltdof[ldof];
774 if (GetGroupTopo().IAmMaster(ldof_group[ldof]))
776 return ldof_ltdof[ldof];
789 MFEM_VERIFY(ldof_ltdof[ldof] >= 0,
"ldof " << ldof <<
" not a true DOF.");
795 if (HYPRE_AssumedPartitionCheck())
797 return ldof_ltdof[ldof] +
802 return ldof_ltdof[ldof] +
812 MFEM_ABORT(
"Not implemented for NC mesh.");
815 if (HYPRE_AssumedPartitionCheck())
819 return ldof_ltdof[sldof] +
821 ldof_group[sldof])] /
vdim;
825 return (ldof_ltdof[sldof*
vdim] +
826 tdof_nb_offsets[GetGroupTopo().GetGroupMaster(
833 return ldof_ltdof[sldof] +
835 ldof_group[sldof])] /
vdim;
839 return (ldof_ltdof[sldof*
vdim] +
840 tdof_offsets[GetGroupTopo().GetGroupMasterRank(
847 return HYPRE_AssumedPartitionCheck() ? dof_offsets[0] : dof_offsets[MyRank];
852 return HYPRE_AssumedPartitionCheck()? tdof_offsets[0] : tdof_offsets[MyRank];
876 if (num_face_nbrs == 0)
882 MPI_Request *requests =
new MPI_Request[2*num_face_nbrs];
883 MPI_Request *send_requests = requests;
884 MPI_Request *recv_requests = requests + num_face_nbrs;
885 MPI_Status *statuses =
new MPI_Status[num_face_nbrs];
891 Table send_nbr_elem_dof;
898 for (
int fn = 0; fn < num_face_nbrs; fn++)
903 for (
int i = 0; i < num_my_elems; i++)
906 for (
int j = 0; j < ldofs.
Size(); j++)
908 int ldof = (ldofs[j] >= 0 ? ldofs[j] : -1-ldofs[j]);
910 if (ldof_marker[ldof] != fn)
912 ldof_marker[ldof] = fn;
922 MyComm, &send_requests[fn]);
925 MyComm, &recv_requests[fn]);
928 MPI_Waitall(num_face_nbrs, recv_requests, statuses);
933 MPI_Waitall(num_face_nbrs, send_requests, statuses);
941 int *send_I = send_nbr_elem_dof.
GetI();
943 for (
int fn = 0; fn < num_face_nbrs; fn++)
947 MPI_Isend(send_I + send_el_off[fn], send_el_off[fn+1] - send_el_off[fn],
948 MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
950 MPI_Irecv(recv_I + recv_el_off[fn], recv_el_off[fn+1] - recv_el_off[fn],
951 MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
954 MPI_Waitall(num_face_nbrs, send_requests, statuses);
955 send_nbr_elem_dof.
MakeJ();
959 for (
int fn = 0; fn < num_face_nbrs; fn++)
964 for (
int i = 0; i < num_my_elems; i++)
967 for (
int j = 0; j < ldofs.
Size(); j++)
969 int ldof = (ldofs[j] >= 0 ? ldofs[j] : -1-ldofs[j]);
971 if (ldof_marker[ldof] != fn)
973 ldof_marker[ldof] = fn;
978 send_el_off[fn] + i, ldofs, ldofs.
Size());
985 int *send_J = send_nbr_elem_dof.
GetJ();
986 for (
int fn = 0, j = 0; fn < num_face_nbrs; fn++)
990 int j_end = send_I[send_el_off[fn+1]];
992 for (
int i = 0; i < num_ldofs; i++)
994 int ldof = (ldofs[i] >= 0 ? ldofs[i] : -1-ldofs[i]);
995 ldof_marker[ldof] = i;
998 for ( ; j < j_end; j++)
1000 int ldof = (send_J[j] >= 0 ? send_J[j] : -1-send_J[j]);
1001 send_J[j] = (send_J[j] >= 0 ? ldof_marker[ldof] : -1-ldof_marker[ldof]);
1005 MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1012 for (
int fn = 0; fn < num_face_nbrs; fn++)
1017 MPI_Isend(send_J + send_I[send_el_off[fn]],
1018 send_I[send_el_off[fn+1]] - send_I[send_el_off[fn]],
1019 MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
1021 MPI_Irecv(recv_J + recv_I[recv_el_off[fn]],
1022 recv_I[recv_el_off[fn+1]] - recv_I[recv_el_off[fn]],
1023 MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
1026 MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1029 for (
int fn = 0, j = 0; fn < num_face_nbrs; fn++)
1032 int j_end = recv_I[recv_el_off[fn+1]];
1034 for ( ; j < j_end; j++)
1047 MPI_Waitall(num_face_nbrs, send_requests, statuses);
1051 for (
int fn = 0; fn < num_face_nbrs; fn++)
1058 MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
1062 MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
1065 MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1066 MPI_Waitall(num_face_nbrs, send_requests, statuses);
1073 for (
int fn = 0; fn < num_face_nbrs; fn++)
1078 MPI_Isend(&my_dof_offset, 1, HYPRE_MPI_INT, nbr_rank, tag,
1079 MyComm, &send_requests[fn]);
1081 MPI_Irecv(&dof_face_nbr_offsets[fn], 1, HYPRE_MPI_INT, nbr_rank, tag,
1082 MyComm, &recv_requests[fn]);
1085 MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1089 for (
int fn = 0, j = 0; fn < num_face_nbrs; fn++)
1103 MPI_Waitall(num_face_nbrs, send_requests, statuses);
1121 int el1, el2, inf1, inf2;
1134 Ordering::DofsToVDofs<Ordering::byNODES>(nd/
vdim,
vdim, vdofs);
1136 for (
int j = 0; j < vdofs.
Size(); j++)
1138 const int ldof = vdofs[j];
1139 vdofs[j] = (ldof >= 0) ? vol_vdofs[ldof] : -1-vol_vdofs[-1-ldof];
1151 mfem_error(
"ParFiniteElementSpace::GetFaceNbrFE"
1152 " does not support NURBS!");
1168 hypre_ParCSRMatrix *csrP = (hypre_ParCSRMatrix*)(*P);
1169 hypre_ParCSRMatrixOwnsRowStarts(csrP) = 1;
1170 hypre_ParCSRMatrixOwnsColStarts(csrP) = 1;
1176 void ParFiniteElementSpace::ConstructTrueDofs()
1183 GetGroupComm(*gcomm, 1, &ldof_sign);
1193 for (gr = 1; gr < group_ldof.
Size(); gr++)
1195 const int *ldofs = group_ldof.
GetRow(gr);
1196 const int nldofs = group_ldof.
RowSize(gr);
1197 for (i = 0; i < nldofs; i++)
1199 ldof_group[ldofs[i]] = gr;
1204 for (i = 0; i < nldofs; i++)
1206 ldof_ltdof[ldofs[i]] = -2;
1213 for (i = 0; i < n; i++)
1215 if (ldof_ltdof[i] == -1)
1217 ldof_ltdof[i] = ltdof_size++;
1223 gcomm->
Bcast(ldof_ltdof);
1226 void ParFiniteElementSpace::ConstructTrueNURBSDofs()
1229 GroupTopology > = pNURBSext()->
gtopo;
1230 gcomm =
new GroupCommunicator(gt);
1235 ldof_group.
MakeRef(pNURBSext()->ldof_group);
1239 const int *scalar_ldof_group = pNURBSext()->
ldof_group;
1241 for (
int i = 0; i < n; i++)
1243 ldof_group[i] = scalar_ldof_group[
VDofToDof(i)];
1247 gcomm->
Create(ldof_group);
1255 for (
int i = 0; i < n; i++)
1257 if (gt.IAmMaster(ldof_group[i]))
1259 ldof_ltdof[i] = ltdof_size;
1270 gcomm->
Bcast(ldof_ltdof);
1273 void ParFiniteElementSpace::GetGhostVertexDofs(
const MeshId &
id,
1274 Array<int> &dofs)
const
1278 for (
int j = 0; j < nv; j++)
1280 dofs[j] =
ndofs + nv*
id.index + j;
1284 void ParFiniteElementSpace::GetGhostEdgeDofs(
const MeshId &edge_id,
1285 Array<int> &dofs)
const
1289 dofs.SetSize(2*nv + ne);
1294 for (
int i = 0; i < 2; i++)
1296 int k = (V[i] < ghost) ? V[i]*nv : (
ndofs + (V[i] - ghost)*nv);
1297 for (
int j = 0; j < nv; j++)
1299 dofs[i*nv + j] = k++;
1303 int k =
ndofs + ngvdofs + (edge_id.index - pncmesh->
GetNEdges())*ne;
1304 for (
int j = 0; j < ne; j++)
1306 dofs[2*nv + j] = k++;
1310 void ParFiniteElementSpace::GetGhostFaceDofs(
const MeshId &face_id,
1311 Array<int> &dofs)
const
1313 const int ghost_face_index = face_id.index - pncmesh->
GetNFaces();
1320 dofs.SetSize(4*nv + 4*ne + nf);
1322 int V[4], E[4], Eo[4];
1326 for (
int i = 0; i < 4; i++)
1329 int first = (V[i] < ghost) ? V[i]*nv : (
ndofs + (V[i] - ghost)*nv);
1330 for (
int j = 0; j < nv; j++)
1332 dofs[offset++] = first + j;
1336 for (
int i = 0; i < 4; i++)
1339 int first = (E[i] < ghost) ?
nvdofs + E[i]*ne
1340 :
ndofs + ngvdofs + (E[i] - ghost)*ne;
1342 for (
int j = 0; j < ne; j++)
1344 dofs[offset++] = (ind[j] >= 0) ? (first + ind[j])
1345 : (-1 - (first + (-1 - ind[j])));
1350 int first =
ndofs + ngvdofs + ngedofs + ghost_face_index*nf;
1351 for (
int j = 0; j < nf; j++)
1353 dofs[offset++] = first + j;
1357 void ParFiniteElementSpace::GetGhostDofs(
int entity,
const MeshId &
id,
1358 Array<int> &dofs)
const
1363 case 0: GetGhostVertexDofs(
id, dofs);
break;
1364 case 1: GetGhostEdgeDofs(
id, dofs);
break;
1365 case 2: GetGhostFaceDofs(
id, dofs);
break;
1369 void ParFiniteElementSpace::GetBareDofs(
int entity,
int index,
1370 Array<int> &dofs)
const
1372 int ned, ghost, first;
1378 first = (index < ghost)
1380 :
ndofs + (index - ghost)*ned;
1386 first = (index < ghost)
1388 :
ndofs + ngvdofs + (index - ghost)*ned;
1395 first = (index < ghost)
1397 :
ndofs + ngvdofs + ngedofs + (index - ghost)*ned;
1402 for (
int i = 0; i < ned; i++)
1404 dofs[i] = first + i;
1408 int ParFiniteElementSpace::PackDof(
int entity,
int index,
int edof)
const
1420 return (index < ghost)
1422 :
ndofs + (index - ghost)*ned + edof;
1428 return (index < ghost)
1429 ?
nvdofs + index*ned + edof
1430 :
ndofs + ngvdofs + (index - ghost)*ned + edof;
1437 return (index < ghost)
1439 :
ndofs + ngvdofs + ngedofs + (index - ghost)*ned + edof;
1446 void ParFiniteElementSpace::UnpackDof(
int dof,
1447 int &entity,
int &index,
int &edof)
const
1449 MFEM_VERIFY(dof >= 0,
"");
1455 entity = 0, index = dof / nv, edof = dof % nv;
1462 entity = 1, index = dof / ne, edof = dof % ne;
1470 entity = 2, index = dof / nf, edof = dof % nf;
1473 MFEM_ABORT(
"Cannot unpack internal DOF");
1481 entity = 0, index = pncmesh->
GetNVertices() + dof / nv, edof = dof % nv;
1488 entity = 1, index = pncmesh->
GetNEdges() + dof / ne, edof = dof % ne;
1495 entity = 2, index = pncmesh->
GetNFaces() + dof / nf, edof = dof % nf;
1498 MFEM_ABORT(
"Out of range DOF.");
1506 struct PMatrixElement
1508 HYPRE_Int column, stride;
1511 PMatrixElement(HYPRE_Int col = 0, HYPRE_Int str = 0,
double val = 0)
1512 : column(col), stride(str), value(val) {}
1514 bool operator<(
const PMatrixElement &other)
const
1515 {
return column < other.column; }
1517 typedef std::vector<PMatrixElement> List;
1525 PMatrixElement::List elems;
1528 void AddRow(
const PMatrixRow &other,
double coef)
1530 elems.reserve(elems.size() + other.elems.size());
1531 for (
unsigned i = 0; i < other.elems.size(); i++)
1533 const PMatrixElement &oei = other.elems[i];
1535 PMatrixElement(oei.column, oei.stride, coef * oei.value));
1542 if (!elems.size()) {
return; }
1543 std::sort(elems.begin(), elems.end());
1546 for (
unsigned i = 1; i < elems.size(); i++)
1548 if (elems[j].column == elems[i].column)
1550 elems[j].value += elems[i].value;
1554 elems[++j] = elems[i];
1560 void write(std::ostream &os,
double sign)
const
1562 bin_io::write<int>(os, elems.size());
1563 for (
unsigned i = 0; i < elems.size(); i++)
1565 const PMatrixElement &e = elems[i];
1566 bin_io::write<HYPRE_Int>(os, e.column);
1567 bin_io::write<int>(os, e.stride);
1568 bin_io::write<double>(os, e.value * sign);
1572 void read(std::istream &is,
double sign)
1574 elems.resize(bin_io::read<int>(is));
1575 for (
unsigned i = 0; i < elems.size(); i++)
1577 PMatrixElement &e = elems[i];
1578 e.column = bin_io::read<HYPRE_Int>(is);
1579 e.stride = bin_io::read<int>(is);
1580 e.value = bin_io::read<double>(is) * sign;
1588 class NeighborRowMessage :
public VarMessage<314>
1591 typedef NCMesh::MeshId MeshId;
1596 int entity, index, edof;
1600 RowInfo(
int ent,
int idx,
int edof, GroupId grp,
const PMatrixRow &row)
1601 : entity(ent), index(idx), edof(edof), group(grp), row(row) {}
1603 RowInfo(
int ent,
int idx,
int edof, GroupId grp)
1604 : entity(ent), index(idx), edof(edof), group(grp) {}
1606 typedef std::vector<RowInfo> List;
1609 NeighborRowMessage() : pncmesh(NULL) {}
1611 void AddRow(
int entity,
int index,
int edof, GroupId group,
1612 const PMatrixRow &row)
1614 rows.push_back(RowInfo(entity, index, edof, group, row));
1617 const RowInfo::List& GetRows()
const {
return rows; }
1619 void SetNCMesh(ParNCMesh* pnc) { pncmesh = pnc; }
1620 void SetFEC(
const FiniteElementCollection* fec) { this->fec = fec; }
1622 typedef std::map<int, NeighborRowMessage> Map;
1628 const FiniteElementCollection* fec;
1630 virtual void Encode(
int rank);
1631 virtual void Decode(
int);
1635 void NeighborRowMessage::Encode(
int rank)
1637 std::ostringstream stream;
1639 Array<MeshId> ent_ids[3];
1640 Array<GroupId> group_ids[3];
1641 Array<int> row_idx[3];
1644 for (
unsigned i = 0; i < rows.size(); i++)
1646 const RowInfo &ri = rows[i];
1647 const MeshId &
id = pncmesh->GetNCList(ri.entity).LookUp(ri.index);
1648 ent_ids[ri.entity].Append(
id);
1649 row_idx[ri.entity].Append(i);
1650 group_ids[ri.entity].Append(ri.group);
1653 Array<GroupId> all_group_ids;
1654 all_group_ids.Reserve(rows.size());
1655 for (
int i = 0; i < 3; i++)
1657 all_group_ids.Append(group_ids[i]);
1660 pncmesh->AdjustMeshIds(ent_ids, rank);
1661 pncmesh->EncodeMeshIds(stream, ent_ids);
1662 pncmesh->EncodeGroups(stream, all_group_ids);
1665 for (
int ent = 0; ent < 3; ent++)
1667 const Array<MeshId> &ids = ent_ids[ent];
1668 for (
int i = 0; i < ids.Size(); i++)
1670 const MeshId &
id = ids[i];
1671 const RowInfo &ri = rows[row_idx[ent][i]];
1672 MFEM_ASSERT(ent == ri.entity,
"");
1674 #ifdef MFEM_DEBUG_PMATRIX
1675 mfem::out <<
"Rank " << pncmesh->MyRank <<
" sending to " << rank
1676 <<
": ent " << ri.entity <<
", index " << ri.index
1677 <<
", edof " << ri.edof <<
" (id " <<
id.element <<
"/"
1678 <<
id.local <<
")" << std::endl;
1686 int eo = pncmesh->GetEdgeNCOrientation(
id);
1688 if ((edof = ind[edof]) < 0)
1695 bin_io::write<int>(stream, edof);
1696 ri.row.write(stream, s);
1701 stream.str().swap(data);
1704 void NeighborRowMessage::Decode(
int rank)
1706 std::istringstream stream(data);
1708 Array<MeshId> ent_ids[3];
1709 Array<GroupId> group_ids;
1712 pncmesh->DecodeMeshIds(stream, ent_ids);
1713 pncmesh->DecodeGroups(stream, group_ids);
1715 int nrows = ent_ids[0].Size() + ent_ids[1].Size() + ent_ids[2].Size();
1716 MFEM_ASSERT(nrows == group_ids.Size(),
"");
1719 rows.reserve(nrows);
1724 for (
int ent = 0, gi = 0; ent < 3; ent++)
1726 const Array<MeshId> &ids = ent_ids[ent];
1727 for (
int i = 0; i < ids.Size(); i++)
1729 const MeshId &
id = ids[i];
1730 int edof = bin_io::read<int>(stream);
1733 const int *ind = NULL;
1736 int eo = pncmesh->GetEdgeNCOrientation(
id);
1741 int fo = pncmesh->GetFaceOrientation(
id.index);
1742 ind = fec->DofOrderForOrientation(fgeom, fo);
1746 if (ind && (edof = ind[edof]) < 0)
1752 rows.push_back(RowInfo(ent,
id.index, edof, group_ids[gi++]));
1753 rows.back().row.read(stream, s);
1755 #ifdef MFEM_DEBUG_PMATRIX
1756 mfem::out <<
"Rank " << pncmesh->MyRank <<
" receiving from " << rank
1757 <<
": ent " << rows.back().entity <<
", index "
1758 << rows.back().index <<
", edof " << rows.back().edof
1766 ParFiniteElementSpace::ScheduleSendRow(
const PMatrixRow &row,
int dof,
1768 NeighborRowMessage::Map &send_msg)
const
1771 UnpackDof(dof, ent, idx, edof);
1774 for (
unsigned i = 0; i < group.size(); i++)
1776 int rank = group[i];
1779 NeighborRowMessage &msg = send_msg[rank];
1780 msg.AddRow(ent, idx, edof, group_id, row);
1781 msg.SetNCMesh(pncmesh);
1783 #ifdef MFEM_PMATRIX_STATS
1790 void ParFiniteElementSpace::ForwardRow(
const PMatrixRow &row,
int dof,
1791 GroupId group_sent_id, GroupId group_id,
1792 NeighborRowMessage::Map &send_msg)
const
1795 UnpackDof(dof, ent, idx, edof);
1798 for (
unsigned i = 0; i < group.size(); i++)
1800 int rank = group[i];
1801 if (rank != MyRank && !pncmesh->
GroupContains(group_sent_id, rank))
1803 NeighborRowMessage &msg = send_msg[rank];
1804 GroupId invalid = -1;
1805 msg.AddRow(ent, idx, edof, invalid, row);
1806 msg.SetNCMesh(pncmesh);
1808 #ifdef MFEM_PMATRIX_STATS
1811 #ifdef MFEM_DEBUG_PMATRIX
1813 << rank <<
": ent " << ent <<
", index" << idx
1814 <<
", edof " << edof << std::endl;
1820 #ifdef MFEM_DEBUG_PMATRIX
1821 void ParFiniteElementSpace
1822 ::DebugDumpDOFs(std::ostream &os,
1823 const SparseMatrix &deps,
1824 const Array<GroupId> &dof_group,
1825 const Array<GroupId> &dof_owner,
1826 const Array<bool> &finalized)
const
1828 for (
int i = 0; i < dof_group.Size(); i++)
1834 UnpackDof(i, ent, idx, edof);
1836 os << edof <<
" @ ";
1837 if (i >
ndofs) { os <<
"ghost "; }
1840 case 0: os <<
"vertex ";
break;
1841 case 1: os <<
"edge ";
break;
1842 default: os <<
"face ";
break;
1846 if (i < deps.Height() && deps.RowSize(i))
1848 os <<
"depends on ";
1849 for (
int j = 0; j < deps.RowSize(i); j++)
1851 os << deps.GetRowColumns(i)[j] <<
" ("
1852 << deps.GetRowEntries(i)[j] <<
")";
1853 if (j < deps.RowSize(i)-1) { os <<
", "; }
1862 os <<
"group " << dof_group[i] <<
" (";
1864 for (
unsigned j = 0; j < g.size(); j++)
1866 if (j) { os <<
", "; }
1870 os <<
"), owner " << dof_owner[i] <<
" (rank "
1871 << pncmesh->
GetGroup(dof_owner[i])[0] <<
"); "
1872 << (finalized[i] ?
"finalized" :
"NOT finalized");
1883 int ParFiniteElementSpace
1884 ::BuildParallelConformingInterpolation(HypreParMatrix **P, SparseMatrix **R,
1885 Array<HYPRE_Int> &dof_offs,
1886 Array<HYPRE_Int> &tdof_offs,
1887 Array<int> *dof_tdof,
1892 #ifdef MFEM_PMATRIX_STATS
1893 n_msgs_sent = n_msgs_recv = 0;
1894 n_rows_sent = n_rows_recv = n_rows_fwd = 0;
1899 int total_dofs =
ndofs + ngdofs;
1900 SparseMatrix deps(
ndofs, total_dofs);
1902 if (!dg && !partial)
1904 Array<int> master_dofs, slave_dofs;
1907 for (
int entity = 0; entity <= 2; entity++)
1909 const NCMesh::NCList &list = pncmesh->
GetNCList(entity);
1910 if (!list.masters.size()) {
continue; }
1912 IsoparametricTransformation T;
1919 if (!fe) {
continue; }
1921 DenseMatrix I(fe->GetDof());
1924 for (
unsigned mi = 0; mi < list.masters.size(); mi++)
1926 const NCMesh::Master &mf = list.masters[mi];
1929 pncmesh->
IsGhost(entity, mf.index)
1930 ? GetGhostDofs(entity, mf, master_dofs)
1933 if (!master_dofs.Size()) {
continue; }
1936 for (
int si = mf.slaves_begin; si < mf.slaves_end; si++)
1938 const NCMesh::Slave &sf = list.slaves[si];
1939 if (pncmesh->
IsGhost(entity, sf.index)) {
continue; }
1942 if (!slave_dofs.Size()) {
continue; }
1944 sf.OrientedPointMatrix(T.GetPointMat());
1945 T.FinalizeTransformation();
1946 fe->GetLocalInterpolation(T, I);
1959 Array<GroupId> dof_group(total_dofs);
1960 Array<GroupId> dof_owner(total_dofs);
1969 for (
int entity = 0; entity <= 2; entity++)
1971 const NCMesh::NCList &list = pncmesh->
GetNCList(entity);
1973 std::size_t lsize[3] =
1974 { list.
conforming.size(), list.masters.size(), list.slaves.size() };
1976 for (
int l = 0; l < 3; l++)
1978 for (std::size_t i = 0; i < lsize[l]; i++)
1981 (l == 0) ? list.conforming[i] :
1982 (l == 1) ? (
const MeshId&) list.masters[i]
1983 : (
const MeshId&) list.slaves[i];
1988 GetBareDofs(entity,
id.index, dofs);
1990 for (
int j = 0; j < dofs.Size(); j++)
1993 dof_owner[dof] = owner;
1994 dof_group[dof] = group;
2003 Array<bool> finalized(total_dofs);
2007 int num_true_dofs = 0;
2008 for (
int i = 0; i <
ndofs; i++)
2010 if (dof_owner[i] == 0 && deps.RowSize(i) == 0)
2013 finalized[i] =
true;
2018 HYPRE_Int loc_sizes[2] = { ndofs*
vdim, num_true_dofs*vdim };
2019 Array<HYPRE_Int>* offsets[2] = { &dof_offs, &tdof_offs };
2022 HYPRE_Int my_tdof_offset =
2023 tdof_offs[HYPRE_AssumedPartitionCheck() ? 0 : MyRank];
2028 *R =
new SparseMatrix(num_true_dofs*
vdim, ndofs*vdim);
2032 dof_tdof->SetSize(ndofs*
vdim);
2036 std::vector<PMatrixRow> pmatrix(total_dofs);
2039 int vdim_factor = bynodes ? 1 :
vdim;
2040 int dof_stride = bynodes ? ndofs : 1;
2041 int tdof_stride = bynodes ? num_true_dofs : 1;
2044 std::list<NeighborRowMessage::Map> send_msg;
2045 send_msg.push_back(NeighborRowMessage::Map());
2048 for (
int dof = 0, tdof = 0; dof <
ndofs; dof++)
2052 pmatrix[dof].elems.push_back(
2053 PMatrixElement(my_tdof_offset + vdim_factor*tdof, tdof_stride, 1.));
2056 if (dof_group[dof] != 0)
2058 ScheduleSendRow(pmatrix[dof], dof, dof_group[dof], send_msg.back());
2061 for (
int vd = 0; vd <
vdim; vd++)
2063 int vdof = dof*vdim_factor + vd*dof_stride;
2064 int vtdof = tdof*vdim_factor + vd*tdof_stride;
2066 if (R) { (*R)->Add(vtdof, vdof, 1.0); }
2067 if (dof_tdof) { (*dof_tdof)[vdof] = vtdof; }
2074 NeighborRowMessage::IsendAll(send_msg.back(), MyComm);
2075 #ifdef MFEM_PMATRIX_STATS
2076 n_msgs_sent += send_msg.back().size();
2079 if (R) { (*R)->Finalize(); }
2084 NeighborRowMessage recv_msg;
2085 recv_msg.SetNCMesh(pncmesh);
2086 recv_msg.SetFEC(
fec);
2088 int num_finalized = num_true_dofs;
2090 buffer.elems.reserve(1024);
2092 while (num_finalized < ndofs)
2095 if (send_msg.back().size())
2097 send_msg.push_back(NeighborRowMessage::Map());
2102 while (NeighborRowMessage::IProbe(rank, size, MyComm))
2104 recv_msg.Recv(rank, size, MyComm);
2105 #ifdef MFEM_PMATRIX_STATS
2107 n_rows_recv += recv_msg.GetRows().size();
2110 const NeighborRowMessage::RowInfo::List &rows = recv_msg.GetRows();
2111 for (
unsigned i = 0; i < rows.size(); i++)
2113 const NeighborRowMessage::RowInfo &ri = rows[i];
2114 int dof = PackDof(ri.entity, ri.index, ri.edof);
2115 pmatrix[dof] = ri.row;
2117 if (dof < ndofs && !finalized[dof]) { num_finalized++; }
2118 finalized[dof] =
true;
2120 if (ri.group >= 0 && dof_group[dof] != ri.group)
2123 ForwardRow(ri.row, dof, ri.group, dof_group[dof], send_msg.back());
2133 for (
int dof = 0; dof <
ndofs; dof++)
2135 if (finalized[dof]) {
continue; }
2137 bool owned = (dof_owner[dof] == 0);
2138 bool shared = (dof_group[dof] != 0);
2142 const int* dep_col = deps.GetRowColumns(dof);
2143 const double* dep_coef = deps.GetRowEntries(dof);
2144 int num_dep = deps.RowSize(dof);
2147 buffer.elems.clear();
2148 for (
int j = 0; j < num_dep; j++)
2150 buffer.AddRow(pmatrix[dep_col[j]], dep_coef[j]);
2153 pmatrix[dof] = buffer;
2155 finalized[dof] =
true;
2162 ScheduleSendRow(pmatrix[dof], dof, dof_group[dof],
2169 #ifdef MFEM_DEBUG_PMATRIX
2182 NeighborRowMessage::IsendAll(send_msg.back(), MyComm);
2183 #ifdef MFEM_PMATRIX_STATS
2184 n_msgs_sent += send_msg.back().size();
2190 *P = MakeVDimHypreMatrix(pmatrix, ndofs, num_true_dofs,
2191 dof_offs, tdof_offs);
2197 while (NeighborRowMessage::IProbe(rank, size, MyComm))
2199 recv_msg.RecvDrop(rank, size, MyComm);
2203 for (std::list<NeighborRowMessage::Map>::iterator
2204 it = send_msg.begin(); it != send_msg.end(); ++it)
2206 NeighborRowMessage::WaitAllSent(*it);
2209 #ifdef MFEM_PMATRIX_STATS
2210 int n_rounds = send_msg.size();
2211 int glob_rounds, glob_msgs_sent, glob_msgs_recv;
2212 int glob_rows_sent, glob_rows_recv, glob_rows_fwd;
2214 MPI_Reduce(&n_rounds, &glob_rounds, 1, MPI_INT, MPI_SUM, 0, MyComm);
2215 MPI_Reduce(&n_msgs_sent, &glob_msgs_sent, 1, MPI_INT, MPI_SUM, 0, MyComm);
2216 MPI_Reduce(&n_msgs_recv, &glob_msgs_recv, 1, MPI_INT, MPI_SUM, 0, MyComm);
2217 MPI_Reduce(&n_rows_sent, &glob_rows_sent, 1, MPI_INT, MPI_SUM, 0, MyComm);
2218 MPI_Reduce(&n_rows_recv, &glob_rows_recv, 1, MPI_INT, MPI_SUM, 0, MyComm);
2219 MPI_Reduce(&n_rows_fwd, &glob_rows_fwd, 1, MPI_INT, MPI_SUM, 0, MyComm);
2223 mfem::out <<
"P matrix stats (avg per rank): "
2224 << double(glob_rounds)/NRanks <<
" rounds, "
2225 << double(glob_msgs_sent)/NRanks <<
" msgs sent, "
2226 << double(glob_msgs_recv)/NRanks <<
" msgs recv, "
2227 << double(glob_rows_sent)/NRanks <<
" rows sent, "
2228 << double(glob_rows_recv)/NRanks <<
" rows recv, "
2229 << double(glob_rows_fwd)/NRanks <<
" rows forwarded."
2234 return num_true_dofs*
vdim;
2238 HypreParMatrix* ParFiniteElementSpace
2239 ::MakeVDimHypreMatrix(
const std::vector<PMatrixRow> &rows,
2240 int local_rows,
int local_cols,
2241 Array<HYPRE_Int> &row_starts,
2242 Array<HYPRE_Int> &col_starts)
const
2244 bool assumed = HYPRE_AssumedPartitionCheck();
2247 HYPRE_Int first_col = col_starts[assumed ? 0 : MyRank];
2248 HYPRE_Int next_col = col_starts[assumed ? 1 : MyRank+1];
2251 HYPRE_Int nnz_diag = 0, nnz_offd = 0;
2252 std::map<HYPRE_Int, int> col_map;
2253 for (
int i = 0; i < local_rows; i++)
2255 for (
unsigned j = 0; j < rows[i].elems.size(); j++)
2257 const PMatrixElement &elem = rows[i].elems[j];
2258 HYPRE_Int col = elem.column;
2259 if (col >= first_col && col < next_col)
2266 for (
int vd = 0; vd <
vdim; vd++)
2276 HYPRE_Int *cmap =
new HYPRE_Int[col_map.size()];
2278 for (std::map<HYPRE_Int, int>::iterator
2279 it = col_map.begin(); it != col_map.end(); ++it)
2281 cmap[offd_col] = it->first;
2282 it->second = offd_col++;
2285 HYPRE_Int *I_diag =
new HYPRE_Int[vdim*local_rows + 1];
2286 HYPRE_Int *I_offd =
new HYPRE_Int[vdim*local_rows + 1];
2288 HYPRE_Int *J_diag =
new HYPRE_Int[nnz_diag];
2289 HYPRE_Int *J_offd =
new HYPRE_Int[nnz_offd];
2291 double *A_diag =
new double[nnz_diag];
2292 double *A_offd =
new double[nnz_offd];
2294 int vdim1 = bynodes ? vdim : 1;
2295 int vdim2 = bynodes ? 1 :
vdim;
2296 int vdim_offset = bynodes ? local_cols : 1;
2299 nnz_diag = nnz_offd = 0;
2301 for (
int vd1 = 0; vd1 < vdim1; vd1++)
2303 for (
int i = 0; i < local_rows; i++)
2305 for (
int vd2 = 0; vd2 < vdim2; vd2++)
2307 I_diag[vrow] = nnz_diag;
2308 I_offd[vrow++] = nnz_offd;
2310 int vd = bynodes ? vd1 : vd2;
2311 for (
unsigned j = 0; j < rows[i].elems.size(); j++)
2313 const PMatrixElement &elem = rows[i].elems[j];
2314 if (elem.column >= first_col && elem.column < next_col)
2316 J_diag[nnz_diag] = elem.column + vd*vdim_offset - first_col;
2317 A_diag[nnz_diag++] = elem.value;
2321 J_offd[nnz_offd] = col_map[elem.column + vd*elem.stride];
2322 A_offd[nnz_offd++] = elem.value;
2328 MFEM_ASSERT(vrow == vdim*local_rows,
"");
2329 I_diag[vrow] = nnz_diag;
2330 I_offd[vrow] = nnz_offd;
2332 return new HypreParMatrix(MyComm,
2333 row_starts.Last(), col_starts.Last(),
2334 row_starts.GetData(), col_starts.GetData(),
2335 I_diag, J_diag, A_diag,
2336 I_offd, J_offd, A_offd,
2337 col_map.size(), cmap);
2341 static HYPRE_Int* make_i_array(
int nrows)
2343 HYPRE_Int *I =
new HYPRE_Int[nrows+1];
2344 for (
int i = 0; i <= nrows; i++) { I[i] = -1; }
2348 static HYPRE_Int* make_j_array(HYPRE_Int* I,
int nrows)
2351 for (
int i = 0; i < nrows; i++)
2353 if (I[i] >= 0) { nnz++; }
2355 HYPRE_Int *J =
new HYPRE_Int[nnz];
2358 for (
int i = 0, k = 0; i <= nrows; i++)
2360 HYPRE_Int col = I[i];
2362 if (col >= 0) { J[k++] = col; }
2368 ParFiniteElementSpace::RebalanceMatrix(
int old_ndofs,
2369 const Table* old_elem_dof)
2371 MFEM_VERIFY(
Nonconforming(),
"Only supported for nonconforming meshes.");
2372 MFEM_VERIFY(old_dof_offsets.
Size(),
"ParFiniteElementSpace::Update needs to "
2373 "be called before ParFiniteElementSpace::RebalanceMatrix");
2375 HYPRE_Int old_offset = HYPRE_AssumedPartitionCheck()
2376 ? old_dof_offsets[0] : old_dof_offsets[MyRank];
2379 ParNCMesh* pncmesh = pmesh->
pncmesh;
2385 const Array<int> &old_index = pncmesh->GetRebalanceOldIndex();
2386 MFEM_VERIFY(old_index.Size() == pmesh->
GetNE(),
2387 "Mesh::Rebalance was not called before "
2388 "ParFiniteElementSpace::RebalanceMatrix");
2391 HYPRE_Int* i_diag = make_i_array(vsize);
2392 for (
int i = 0; i < pmesh->
GetNE(); i++)
2394 if (old_index[i] >= 0)
2396 const int* old_dofs = old_elem_dof->GetRow(old_index[i]);
2399 for (
int vd = 0; vd <
vdim; vd++)
2401 for (
int j = 0; j < dofs.Size(); j++)
2404 if (row < 0) { row = -1 - row; }
2406 int col =
DofToVDof(old_dofs[j], vd, old_ndofs);
2407 if (col < 0) { col = -1 - col; }
2414 HYPRE_Int* j_diag = make_j_array(i_diag, vsize);
2417 Array<int> new_elements;
2418 Array<long> old_remote_dofs;
2419 pncmesh->RecvRebalanceDofs(new_elements, old_remote_dofs);
2422 HYPRE_Int* i_offd = make_i_array(vsize);
2423 for (
int i = 0; i < new_elements.Size(); i++)
2426 const long* old_dofs = &old_remote_dofs[i * dofs.Size() *
vdim];
2428 for (
int vd = 0; vd <
vdim; vd++)
2430 for (
int j = 0; j < dofs.Size(); j++)
2433 if (row < 0) { row = -1 - row; }
2435 if (i_diag[row] == i_diag[row+1])
2437 i_offd[row] = old_dofs[j + vd * dofs.Size()];
2442 HYPRE_Int* j_offd = make_j_array(i_offd, vsize);
2445 int offd_cols = i_offd[vsize];
2446 Array<Pair<HYPRE_Int, int> > cmap_offd(offd_cols);
2447 for (
int i = 0; i < offd_cols; i++)
2449 cmap_offd[i].one = j_offd[i];
2450 cmap_offd[i].two = i;
2452 SortPairs<HYPRE_Int, int>(cmap_offd, offd_cols);
2454 HYPRE_Int* cmap =
new HYPRE_Int[offd_cols];
2455 for (
int i = 0; i < offd_cols; i++)
2457 cmap[i] = cmap_offd[i].one;
2458 j_offd[cmap_offd[i].two] = i;
2462 M =
new HypreParMatrix(MyComm, MyRank, NRanks, dof_offsets, old_dof_offsets,
2463 i_diag, j_diag, i_offd, j_offd, cmap, offd_cols);
2468 struct DerefDofMessage
2470 std::vector<HYPRE_Int> dofs;
2471 MPI_Request request;
2475 ParFiniteElementSpace::ParallelDerefinementMatrix(
int old_ndofs,
2476 const Table* old_elem_dof)
2478 int nrk = HYPRE_AssumedPartitionCheck() ? 2 : NRanks;
2480 MFEM_VERIFY(
Nonconforming(),
"Not implemented for conforming meshes.");
2481 MFEM_VERIFY(old_dof_offsets[nrk],
"Missing previous (finer) space.");
2482 MFEM_VERIFY(dof_offsets[nrk] <= old_dof_offsets[nrk],
2483 "Previous space is not finer.");
2490 Array<int> dofs, old_dofs, old_vdofs;
2493 ParNCMesh* pncmesh = pmesh->
pncmesh;
2497 const CoarseFineTransformations &dtrans = pncmesh->GetDerefinementTransforms();
2498 const Array<int> &old_ranks = pncmesh->GetDerefineOldRanks();
2500 std::map<int, DerefDofMessage> messages;
2502 HYPRE_Int old_offset = HYPRE_AssumedPartitionCheck()
2503 ? old_dof_offsets[0] : old_dof_offsets[MyRank];
2507 for (
int k = 0; k < dtrans.embeddings.Size(); k++)
2509 const Embedding &emb = dtrans.embeddings[k];
2511 int fine_rank = old_ranks[k];
2512 int coarse_rank = (emb.parent < 0) ? (-1 - emb.parent)
2513 : pncmesh->ElementRank(emb.parent);
2515 if (coarse_rank != MyRank && fine_rank == MyRank)
2517 old_elem_dof->GetRow(k, dofs);
2520 DerefDofMessage &msg = messages[k];
2521 msg.dofs.resize(dofs.Size());
2522 for (
int i = 0; i < dofs.Size(); i++)
2524 msg.dofs[i] = old_offset + dofs[i];
2527 MPI_Isend(&msg.dofs[0], msg.dofs.size(), HYPRE_MPI_INT,
2528 coarse_rank, 291, MyComm, &msg.request);
2530 else if (coarse_rank == MyRank && fine_rank != MyRank)
2532 DerefDofMessage &msg = messages[k];
2533 msg.dofs.resize(ldof*vdim);
2535 MPI_Irecv(&msg.dofs[0], ldof*vdim, HYPRE_MPI_INT,
2536 fine_rank, 291, MyComm, &msg.request);
2547 SparseMatrix *diag =
new SparseMatrix(ndofs*vdim, old_ndofs*vdim);
2549 Array<char> mark(diag->Height());
2551 for (
int k = 0; k < dtrans.embeddings.Size(); k++)
2553 const Embedding &emb = dtrans.embeddings[k];
2554 if (emb.parent < 0) {
continue; }
2556 int coarse_rank = pncmesh->ElementRank(emb.parent);
2557 int fine_rank = old_ranks[k];
2559 if (coarse_rank == MyRank && fine_rank == MyRank)
2561 DenseMatrix &lR = localR(emb.matrix);
2564 old_elem_dof->GetRow(k, old_dofs);
2566 for (
int vd = 0; vd <
vdim; vd++)
2568 old_dofs.Copy(old_vdofs);
2571 for (
int i = 0; i < lR.Height(); i++)
2573 if (lR(i, 0) ==
infinity()) {
continue; }
2576 int m = (r >= 0) ? r : (-1 - r);
2581 diag->SetRow(r, old_vdofs, row);
2591 for (std::map<int, DerefDofMessage>::iterator
2592 it = messages.begin(); it != messages.end(); ++it)
2594 MPI_Wait(&it->second.request, MPI_STATUS_IGNORE);
2598 SparseMatrix *offd =
new SparseMatrix(ndofs*vdim, 1);
2600 std::map<HYPRE_Int, int> col_map;
2601 for (
int k = 0; k < dtrans.embeddings.Size(); k++)
2603 const Embedding &emb = dtrans.embeddings[k];
2604 if (emb.parent < 0) {
continue; }
2606 int coarse_rank = pncmesh->ElementRank(emb.parent);
2607 int fine_rank = old_ranks[k];
2609 if (coarse_rank == MyRank && fine_rank != MyRank)
2611 DenseMatrix &lR = localR(emb.matrix);
2615 DerefDofMessage &msg = messages[k];
2616 MFEM_ASSERT(msg.dofs.size(),
"");
2618 for (
int vd = 0; vd <
vdim; vd++)
2620 HYPRE_Int* remote_dofs = &msg.dofs[vd*ldof];
2622 for (
int i = 0; i < lR.Height(); i++)
2624 if (lR(i, 0) ==
infinity()) {
continue; }
2627 int m = (r >= 0) ? r : (-1 - r);
2632 for (
int j = 0; j < ldof; j++)
2634 if (row[j] == 0.0) {
continue; }
2635 int &lcol = col_map[remote_dofs[j]];
2636 if (!lcol) { lcol = col_map.size(); }
2637 offd->_Set_(m, lcol-1, row[j]);
2647 offd->SetWidth(col_map.size());
2650 HYPRE_Int *cmap =
new HYPRE_Int[offd->Width()];
2651 for (std::map<HYPRE_Int, int>::iterator
2652 it = col_map.begin(); it != col_map.end(); ++it)
2654 cmap[it->second-1] = it->first;
2661 int width = offd->Width();
2662 Array<Pair<int, int> > reorder(width);
2663 for (
int i = 0; i < width; i++)
2665 reorder[i].one = cmap[i];
2670 Array<int> reindex(width);
2671 for (
int i = 0; i < width; i++)
2673 reindex[reorder[i].two] = i;
2674 cmap[i] = reorder[i].one;
2677 int *J = offd->GetJ();
2678 for (
int i = 0; i < offd->NumNonZeroElems(); i++)
2680 J[i] = reindex[J[i]];
2682 offd->SortColumnIndices();
2686 R =
new HypreParMatrix(MyComm, dof_offsets[nrk], old_dof_offsets[nrk],
2687 dof_offsets, old_dof_offsets, diag, offd, cmap);
2689 #ifndef HYPRE_BIGINT
2693 diag->SetDataOwner(
false);
2694 offd->SetDataOwner(
false);
2699 R->SetOwnerFlags(3, 3, 1);
2704 void ParFiniteElementSpace::Destroy()
2715 delete Pconf; Pconf = NULL;
2718 delete gcomm; gcomm = NULL;
2738 MFEM_ASSERT(c_pfes != NULL,
"coarse_fes must be a parallel space");
2749 false, Tgf.OwnsOperator(),
false));
2750 Tgf.SetOperatorOwner(
false);
2762 MFEM_ABORT(
"Error in update sequence. Space needs to be updated after "
2763 "each mesh modification.");
2773 Table* old_elem_dof = NULL;
2782 Swap(dof_offsets, old_dof_offsets);
2805 old_elem_dof = NULL;
2817 Th.
Reset(ParallelDerefinementMatrix(old_ndofs, old_elem_dof));
2822 false,
false,
true));
2829 Th.
Reset(RebalanceMatrix(old_ndofs, old_elem_dof));
2837 delete old_elem_dof;
2844 :
Operator(pfes.GetVSize(), pfes.GetTrueVSize()),
2846 gc(pfes.GroupComm())
2851 for (
int gr = 1; gr < group_ldof.Size(); gr++)
2870 for ( ; j < end; j++)
2876 for ( ; j <
Height(); j++)
2890 const double *xdata = x.
HostRead();
2894 const int in_layout = 2;
2895 gc.
BcastBegin(const_cast<double*>(xdata), in_layout);
2898 for (
int i = 0; i < m; i++)
2901 std::copy(xdata+j-i, xdata+end-i, ydata+j);
2904 std::copy(xdata+j-m, xdata+
Width(), ydata+j);
2906 const int out_layout = 0;
2916 const double *xdata = x.
HostRead();
2923 for (
int i = 0; i < m; i++)
2926 std::copy(xdata+j, xdata+end, ydata+j-i);
2929 std::copy(xdata+j, xdata+
Height(), ydata+j-m);
2931 const int out_layout = 2;
Abstract class for Finite Elements.
Geometry::Type GetGeometryType() const
int GetNFaceNeighbors() const
int GetGroupMasterRank(int g) const
void SendRebalanceDofs(int old_ndofs, const Table &old_element_dofs, long old_global_offset, FiniteElementSpace *space)
Use the communication pattern from last Rebalance() to send element DOFs.
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
int Size() const
Logical size of the array.
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
HYPRE_Int GetGlobalTDofNumber(int ldof) const
Returns the global tdof number of the given local degree of freedom.
int GetNDofs() const
Returns number of degrees of freedom.
int ndofs
Number of degrees of freedom. Number of unknowns is ndofs * vdim.
std::vector< int > CommGroup
int DofToVDof(int dof, int vd, int ndofs=-1) const
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
const CommGroup & GetGroup(GroupId id) const
Return a list of ranks contained in the group of the given ID.
void BuildElementToDofTable() const
void AddColumnsInRow(int r, int ncol)
virtual void GetEssentialVDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_dofs, int component=-1) const
Determine the boundary degrees of freedom.
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
HYPRE_Int * GetDofOffsets() const
virtual const Operator * GetProlongationMatrix() const
The returned Operator is owned by the FiniteElementSpace.
void GetFaceVerticesEdges(const MeshId &face_id, int vert_index[4], int edge_index[4], int edge_orientation[4]) const
Return Mesh vertex and edge indices of a face identified by 'face_id'.
SparseMatrix * RefinementMatrix(int old_ndofs, const Table *old_elem_dof)
void GetElementVDofs(int i, Array< int > &vdofs) const
Returns indexes of degrees of freedom in array dofs for i'th element.
Array< Element * > face_nbr_elements
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
void BooleanMult(const Array< int > &x, Array< int > &y) const
y = A * x, treating all entries as booleans (zero=false, nonzero=true).
void Mult(const Table &A, const Table &B, Table &C)
C = A * B (as boolean matrices)
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
int GetGroupSize(int g) const
virtual void Update(bool want_transform=true)
Pointer to an Operator of a specified type.
void SetDims(int rows, int nnz)
Operator::Type Type() const
Get the currently set operator type id.
int VDofToDof(int vdof) const
virtual const FiniteElement * FiniteElementForGeometry(Geometry::Type GeomType) const =0
void ExchangeFaceNbrData(Table *gr_sface, int *s2l_face)
virtual void GetEssentialVDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_vdofs, int component=-1) const
int vdim
Vector dimension (number of unknowns per degree of freedom).
int Size() const
Returns the size of the vector.
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
void GetEntityDofs(int entity, int index, Array< int > &dofs) const
Helper to get vertex, edge or face DOFs (entity=0,1,2 resp.).
virtual void GetTrueTransferOperator(const FiniteElementSpace &coarse_fes, OperatorHandle &T) const
Construct and return an Operator that can be used to transfer true-dof data from coarse_fes, defined on a coarse mesh, to this FE space, defined on a refined mesh.
int GetNE() const
Returns number of elements.
void DivideByGroupSize(double *vec)
Scale a vector of true dofs.
Abstract parallel finite element space.
bool operator<(const Pair< A, B > &p, const Pair< A, B > &q)
Comparison operator for class Pair, based on the first element only.
void Synchronize(Array< int > &ldof_marker) const
Given an integer array on the local degrees of freedom, perform a bitwise OR between the shared dofs...
std::vector< MeshId > conforming
void Lose_Dof_TrueDof_Matrix()
bool GroupContains(GroupId id, int rank) const
Return true if group 'id' contains the given rank.
int GetLocalTDofNumber(int ldof) const
HypreParMatrix * GetPartialConformingInterpolation()
For a non-conforming mesh, construct and return the interpolation matrix from the partially conformin...
const FiniteElementCollection * fec
Associated FE collection (not owned).
OperatorHandle Th
Transformation to apply to GridFunctions after space Update().
int GroupNQuadrilaterals(int group)
int Size_of_connections() const
void GetLocalDerefinementMatrices(Geometry::Type geom, DenseTensor &localR) const
HYPRE_Int GetMyDofOffset() const
bool HasGeometry(Geometry::Type geom) const
Return true iff the given geom is encountered in the mesh. Geometries of dimensions lower than Dimens...
void DeleteAll()
Delete whole array.
void AddConnections(int r, const int *c, int nc)
void GroupTriangle(int group, int i, int &face, int &o)
static void MarkerToList(const Array< int > &marker, Array< int > &list)
Convert a Boolean marker array to a list containing all marked indices.
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
bool IAmMaster(int g) const
void GetSharedEdgeDofs(int group, int ei, Array< int > &dofs) const
virtual int DofForGeometry(Geometry::Type GeomType) const =0
virtual void GetFaceDofs(int i, Array< int > &dofs) const
virtual void GetBdrElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom for i'th boundary element.
int GroupVertex(int group, int i)
int GetNumFaces() const
Return the number of faces (3D), edges (2D) or vertices (1D).
void GetFaceNbrElementVDofs(int i, Array< int > &vdofs) const
ID for class SparseMatrix.
const double * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
const FiniteElement * GetFaceNbrFE(int i) const
ID for the base class Operator, i.e. any type.
Array< int > face_nbr_elements_offset
void ExchangeFaceNbrData()
void write(std::ostream &os, T value)
static const int Dimension[NumGeom]
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
Geometry::Type GetGhostFaceGeometry(int ghost_face_id) const
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
int Append(const T &el)
Append element to array, resize if necessary.
GroupId GetEntityGroupId(int entity, int index)
void mfem_error(const char *msg)
Function called when an error is encountered. Used by the macros MFEM_ABORT, MFEM_ASSERT, MFEM_VERIFY.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
int GetNGhostEdges() const
void AddConnection(int r, int c)
void LoseData()
NULL-ifies the data.
HypreParMatrix * Dof_TrueDof_Matrix() const
The true dof-to-dof interpolation matrix.
virtual const Operator * GetProlongationMatrix() const
The returned Operator is owned by the FiniteElementSpace.
virtual void GetElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom in array dofs for i'th element.
HYPRE_Int GetMyTDofOffset() const
void Reserve(int capacity)
Ensures that the allocated size is at least the given size.
void SubDofOrder(Geometry::Type Geom, int SDim, int Info, Array< int > &dofs) const
Get the local dofs for a given sub-manifold.
void GetSharedQuadrilateralDofs(int group, int fi, Array< int > &dofs) const
void Transpose(const Table &A, Table &At, int _ncols_A)
Transpose a Table.
static void AddDependencies(SparseMatrix &deps, Array< int > &master_dofs, Array< int > &slave_dofs, DenseMatrix &I)
HypreParMatrix * Transpose() const
Returns the transpose of *this.
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
void Sort()
Sorts the array. This requires operator< to be defined for T.
int GroupNVertices(int group)
void GetSharedTriangleDofs(int group, int fi, Array< int > &dofs) const
virtual void GetFaceDofs(int i, Array< int > &dofs) const
int Size() const
Returns the number of TYPE I elements.
GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
void GetEdgeDofs(int i, Array< int > &dofs) const
Operator * Ptr() const
Access the underlying Operator pointer.
int GetGroupMaster(int g) const
void GetTransferOperator(const FiniteElementSpace &coarse_fes, OperatorHandle &T) const
Construct and return an Operator that can be used to transfer GridFunction data from coarse_fes...
void GroupQuadrilateral(int group, int i, int &face, int &o)
int GroupNTriangles(int group)
int GetNGhostFaces() const
void GetFaceElements(int Face, int *Elem1, int *Elem2) const
virtual const int * DofOrderForOrientation(Geometry::Type GeomType, int Or) const =0
Returns an array, say p, that maps a local permuted index i to a local base index: base_i = p[i]...
void Swap(Array< T > &, Array< T > &)
void GetFaceInfos(int Face, int *Inf1, int *Inf2) const
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
int GetDof() const
Returns the number of degrees of freedom in the finite element.
void AddAColumnInRow(int r)
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
GroupId GetEntityOwnerId(int entity, int index)
Return vertex/edge/face ('entity' == 0/1/2, resp.) owner.
ParFiniteElementSpace(const ParFiniteElementSpace &orig, ParMesh *pmesh=NULL, const FiniteElementCollection *fec=NULL)
Copy constructor: deep copy all data from orig except the ParMesh, the FiniteElementCollection, and some derived data.
HYPRE_Int GetGlobalScalarTDofNumber(int sldof)
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
Operation GetLastOperation() const
Return type of last modification of the mesh.
bool Nonconforming() const
virtual void GetBdrElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom for i'th boundary element.
NURBSExtension * NURBSext
Optional NURBS mesh extension.
virtual void GetElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom in array dofs for i'th element.
void SetOperatorOwner(bool own=true)
Set the ownership flag for the held Operator.
Table face_nbr_element_dof
Mesh * mesh
The mesh that FE space lives on (not owned).
GridFunction interpolation operator applicable after mesh refinement.
void PrintPartitionStats()
const NCList & GetNCList(int entity)
Return vertex/edge/face list (entity = 0/1/2, respectively).
virtual const SparseMatrix * GetRestrictionMatrix() const
Get the R matrix which restricts a local dof vector to true dof vector.
General triple product operator x -> A*B*C*x, with ownership of the factors.
bool IsGhost(int entity, int index) const
Return true if the specified vertex/edge/face is a ghost.
double infinity()
Define a shortcut for std::numeric_limits<double>::infinity()
const FiniteElement * GetFaceNbrFaceFE(int i) const
double * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
HYPRE_Int * GetTrueDofOffsets() const
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void GetFaceNbrFaceVDofs(int i, Array< int > &vdofs) const
void GetEdgeVertices(const MeshId &edge_id, int vert_index[2], bool oriented=true) const
Return Mesh vertex indices of an edge identified by 'edge_id'.
void MakeRef(T *, int)
Make this Array a reference to a pointer.
void GroupEdge(int group, int i, int &edge, int &o)
void GenerateOffsets(int N, HYPRE_Int loc_sizes[], Array< HYPRE_Int > *offsets[]) const
ID for class HypreParMatrix.
static bool DofFinalizable(int dof, const Array< bool > &finalized, const SparseMatrix &deps)
NURBSExtension * NURBSext
int GetNGhostVertices() const
Table send_face_nbr_elements
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Wrapper for hypre's ParCSR matrix class.
GroupCommunicator * ScalarGroupComm()
Return a new GroupCommunicator on scalar dofs, i.e. for VDim = 1.
BiLinear2DFiniteElement QuadrilateralFE
Class for parallel meshes.
Abstract data type element.
int GetFaceNbrRank(int fn) const
Linear1DFiniteElement SegmentFE
void Reset(OpType *A, bool own_A=true)
Reset the OperatorHandle to the given OpType pointer, A.
int GroupNEdges(int group)
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.
void DofsToVDofs(Array< int > &dofs, int ndofs=-1) const
Array< HYPRE_Int > face_nbr_glob_dof_map