12 #include "../config/config.hpp"
18 #include "../general/forall.hpp"
19 #include "../general/sort_pairs.hpp"
20 #include "../mesh/mesh_headers.hpp"
21 #include "../general/binaryio.hpp"
35 ParInit(pmesh ? pmesh : orig.pmesh);
51 f ? f : global_fes->FEColl(),
52 global_fes->GetVDim(), global_fes->GetOrdering())
71 int dim,
int ordering)
81 if (globNURBSext == NULL) {
return NULL; }
82 const ParNURBSExtension *pNURBSext =
83 dynamic_cast<const ParNURBSExtension*
>(parNURBSext);
84 MFEM_ASSERT(pNURBSext,
"need a ParNURBSExtension");
86 NURBSExtension *tmp_globNURBSext =
new NURBSExtension(*globNURBSext);
88 return new ParNURBSExtension(tmp_globNURBSext, pNURBSext);
91 void ParFiniteElementSpace::ParInit(ParMesh *pm)
94 pncmesh = pm->pncmesh;
113 MFEM_ASSERT(
own_ext,
"internal error");
115 ParNURBSExtension *pNe =
new ParNURBSExtension(
131 void ParFiniteElementSpace::Construct()
135 ConstructTrueNURBSDofs();
136 GenerateGlobalOffsets();
141 GenerateGlobalOffsets();
152 ngedofs = ngfdofs = 0;
172 ngdofs = ngvdofs + ngedofs + ngfdofs;
176 ltdof_size = BuildParallelConformingInterpolation(
177 &P, &R, dof_offsets, tdof_offsets, &ldof_ltdof,
false);
187 long ltdofs = ltdof_size;
188 long min_ltdofs, max_ltdofs, sum_ltdofs;
190 MPI_Reduce(<dofs, &min_ltdofs, 1, MPI_LONG, MPI_MIN, 0, MyComm);
191 MPI_Reduce(<dofs, &max_ltdofs, 1, MPI_LONG, MPI_MAX, 0, MyComm);
192 MPI_Reduce(<dofs, &sum_ltdofs, 1, MPI_LONG, MPI_SUM, 0, MyComm);
196 double avg = double(sum_ltdofs) / NRanks;
197 mfem::out <<
"True DOF partitioning: min " << min_ltdofs
198 <<
", avg " << std::fixed << std::setprecision(1) << avg
199 <<
", max " << max_ltdofs
200 <<
", (max-avg)/avg " << 100.0*(max_ltdofs - avg)/avg
208 mfem::out <<
"True DOFs by rank: " << ltdofs;
209 for (
int i = 1; i < NRanks; i++)
212 MPI_Recv(<dofs, 1, MPI_LONG, i, 123, MyComm, &status);
219 MPI_Send(<dofs, 1, MPI_LONG, 0, 123, MyComm);
224 void ParFiniteElementSpace::GetGroupComm(
229 int nvd, ned, ntd = 0, nqd = 0;
232 int group_ldof_counter;
257 group_ldof_counter = 0;
258 for (gr = 1; gr < ng; gr++)
261 group_ldof_counter += ned * pmesh->
GroupNEdges(gr);
267 group_ldof_counter *=
vdim;
270 group_ldof.
SetDims(ng, group_ldof_counter);
273 group_ldof_counter = 0;
274 group_ldof.
GetI()[0] = group_ldof.
GetI()[1] = 0;
275 for (gr = 1; gr < ng; gr++)
277 int j, k, l, m, o, nv, ne, nt, nq;
288 for (j = 0; j < nv; j++)
294 for (l = 0; l < nvd; l++, m++)
304 for (l = 0; l < dofs.
Size(); l++)
306 group_ldof.
GetJ()[group_ldof_counter++] = dofs[l];
314 for (j = 0; j < ne; j++)
321 for (l = 0; l < ned; l++)
325 dofs[l] = m + (-1-ind[l]);
328 (*ldof_sign)[dofs[l]] = -1;
333 dofs[l] = m + ind[l];
342 for (l = 0; l < dofs.
Size(); l++)
344 group_ldof.
GetJ()[group_ldof_counter++] = dofs[l];
352 for (j = 0; j < nt; j++)
359 for (l = 0; l < ntd; l++)
363 dofs[l] = m + (-1-ind[l]);
366 (*ldof_sign)[dofs[l]] = -1;
371 dofs[l] = m + ind[l];
380 for (l = 0; l < dofs.
Size(); l++)
382 group_ldof.
GetJ()[group_ldof_counter++] = dofs[l];
390 for (j = 0; j < nq; j++)
397 for (l = 0; l < nqd; l++)
401 dofs[l] = m + (-1-ind[l]);
404 (*ldof_sign)[dofs[l]] = -1;
409 dofs[l] = m + ind[l];
418 for (l = 0; l < dofs.
Size(); l++)
420 group_ldof.
GetJ()[group_ldof_counter++] = dofs[l];
425 group_ldof.
GetI()[gr+1] = group_ldof_counter;
431 void ParFiniteElementSpace::ApplyLDofSigns(Array<int> &dofs)
const
435 for (
int i = 0; i < dofs.Size(); i++)
439 if (ldof_sign[-1-dofs[i]] < 0)
441 dofs[i] = -1-dofs[i];
446 if (ldof_sign[dofs[i]] < 0)
448 dofs[i] = -1-dofs[i];
454 void ParFiniteElementSpace::ApplyLDofSigns(Table &el_dof)
const
456 Array<int> all_dofs(el_dof.GetJ(), el_dof.Size_of_connections());
457 ApplyLDofSigns(all_dofs);
470 ApplyLDofSigns(dofs);
484 ApplyLDofSigns(dofs);
498 ApplyLDofSigns(dofs);
509 auto key = std::make_tuple(is_dg_space, e_ordering, type, m);
510 auto itr =
L2F.find(key);
511 if (itr !=
L2F.end())
535 MFEM_ASSERT(0 <= ei && ei < pmesh->GroupNEdges(group),
"invalid edge index");
536 pmesh->
GroupEdge(group, ei, l_edge, ori);
546 for (
int i = 0; i < dofs.
Size(); i++)
548 const int di = dofs[i];
549 dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
558 MFEM_ASSERT(0 <= fi && fi < pmesh->GroupNTriangles(group),
559 "invalid triangular face index");
570 for (
int i = 0; i < dofs.
Size(); i++)
572 const int di = dofs[i];
573 dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
582 MFEM_ASSERT(0 <= fi && fi < pmesh->GroupNQuadrilaterals(group),
583 "invalid quadrilateral face index");
594 for (
int i = 0; i < dofs.
Size(); i++)
596 const int di = dofs[i];
597 dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
602 void ParFiniteElementSpace::GenerateGlobalOffsets()
const
614 if (HYPRE_AssumedPartitionCheck())
617 GroupTopology > = GetGroupTopo();
618 int nsize = gt.GetNumNeighbors()-1;
619 MPI_Request *requests =
new MPI_Request[2*nsize];
620 MPI_Status *statuses =
new MPI_Status[2*nsize];
621 tdof_nb_offsets.
SetSize(nsize+1);
622 tdof_nb_offsets[0] = tdof_offsets[0];
625 int request_counter = 0;
626 for (
int i = 1; i <= nsize; i++)
628 MPI_Irecv(&tdof_nb_offsets[i], 1, HYPRE_MPI_INT,
629 gt.GetNeighborRank(i), 5365, MyComm,
630 &requests[request_counter++]);
632 for (
int i = 1; i <= nsize; i++)
634 MPI_Isend(&tdof_nb_offsets[0], 1, HYPRE_MPI_INT,
635 gt.GetNeighborRank(i), 5365, MyComm,
636 &requests[request_counter++]);
638 MPI_Waitall(request_counter, requests, statuses);
645 void ParFiniteElementSpace::Build_Dof_TrueDof_Matrix() const
654 HYPRE_Int *i_diag = Memory<HYPRE_Int>(ldof+1);
655 HYPRE_Int *j_diag = Memory<HYPRE_Int>(ltdof);
658 HYPRE_Int *i_offd = Memory<HYPRE_Int>(ldof+1);
659 HYPRE_Int *j_offd = Memory<HYPRE_Int>(ldof-ltdof);
662 HYPRE_Int *cmap = Memory<HYPRE_Int>(ldof-ltdof);
667 Array<Pair<HYPRE_Int, int> > cmap_j_offd(ldof-ltdof);
669 i_diag[0] = i_offd[0] = 0;
670 diag_counter = offd_counter = 0;
671 for (
int i = 0; i < ldof; i++)
676 j_diag[diag_counter++] = ltdof;
681 cmap_j_offd[offd_counter].two = offd_counter;
684 i_diag[i+1] = diag_counter;
685 i_offd[i+1] = offd_counter;
688 SortPairs<HYPRE_Int, int>(cmap_j_offd, offd_counter);
690 for (
int i = 0; i < offd_counter; i++)
692 cmap[i] = cmap_j_offd[i].one;
693 j_offd[cmap_j_offd[i].two] = i;
696 P =
new HypreParMatrix(MyComm, MyRank, NRanks, row_starts, col_starts,
697 i_diag, j_diag, i_offd, j_offd, cmap, offd_counter);
708 BuildParallelConformingInterpolation(&P_pc, NULL, P_pc_row_starts,
709 P_pc_col_starts, NULL,
true);
718 for (
int i = 0; i < ldof_group.
Size(); i++)
722 if (ldof_ltdof[i] >= 0)
740 gc->
Create(pNURBSext()->ldof_group);
744 GetGroupComm(*gc, 0);
754 MFEM_VERIFY(ldof_marker.
Size() ==
GetVSize(),
"invalid in/out array");
758 gcomm->
Bcast(ldof_marker);
789 const int *ess_dofs_data = ess_dofs.
HostRead();
790 Pt->BooleanMult(1, ess_dofs_data, 0, true_ess_dofs2);
793 const int *ted = true_ess_dofs.
HostRead();
794 for (
int i = 0; i < true_ess_dofs.
Size(); i++)
796 if (
bool(ted[i]) != bool(true_ess_dofs2[i])) { counter++; }
798 MFEM_VERIFY(counter == 0,
"internal MFEM error: counter = " << counter);
810 return ldof_ltdof[ldof];
814 if (GetGroupTopo().IAmMaster(ldof_group[ldof]))
816 return ldof_ltdof[ldof];
829 MFEM_VERIFY(ldof_ltdof[ldof] >= 0,
"ldof " << ldof <<
" not a true DOF.");
835 if (HYPRE_AssumedPartitionCheck())
837 return ldof_ltdof[ldof] +
842 return ldof_ltdof[ldof] +
852 MFEM_ABORT(
"Not implemented for NC mesh.");
855 if (HYPRE_AssumedPartitionCheck())
859 return ldof_ltdof[sldof] +
861 ldof_group[sldof])] /
vdim;
865 return (ldof_ltdof[sldof*
vdim] +
866 tdof_nb_offsets[GetGroupTopo().GetGroupMaster(
873 return ldof_ltdof[sldof] +
875 ldof_group[sldof])] /
vdim;
879 return (ldof_ltdof[sldof*
vdim] +
880 tdof_offsets[GetGroupTopo().GetGroupMasterRank(
887 return HYPRE_AssumedPartitionCheck() ? dof_offsets[0] : dof_offsets[MyRank];
892 return HYPRE_AssumedPartitionCheck()? tdof_offsets[0] : tdof_offsets[MyRank];
899 if (Pconf) {
return Pconf; }
932 if (num_face_nbrs == 0)
938 MPI_Request *requests =
new MPI_Request[2*num_face_nbrs];
939 MPI_Request *send_requests = requests;
940 MPI_Request *recv_requests = requests + num_face_nbrs;
941 MPI_Status *statuses =
new MPI_Status[num_face_nbrs];
947 Table send_nbr_elem_dof;
954 for (
int fn = 0; fn < num_face_nbrs; fn++)
959 for (
int i = 0; i < num_my_elems; i++)
962 for (
int j = 0; j < ldofs.
Size(); j++)
964 int ldof = (ldofs[j] >= 0 ? ldofs[j] : -1-ldofs[j]);
966 if (ldof_marker[ldof] != fn)
968 ldof_marker[ldof] = fn;
978 MyComm, &send_requests[fn]);
981 MyComm, &recv_requests[fn]);
984 MPI_Waitall(num_face_nbrs, recv_requests, statuses);
989 MPI_Waitall(num_face_nbrs, send_requests, statuses);
997 int *send_I = send_nbr_elem_dof.
GetI();
999 for (
int fn = 0; fn < num_face_nbrs; fn++)
1003 MPI_Isend(send_I + send_el_off[fn], send_el_off[fn+1] - send_el_off[fn],
1004 MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
1006 MPI_Irecv(recv_I + recv_el_off[fn], recv_el_off[fn+1] - recv_el_off[fn],
1007 MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
1010 MPI_Waitall(num_face_nbrs, send_requests, statuses);
1011 send_nbr_elem_dof.
MakeJ();
1015 for (
int fn = 0; fn < num_face_nbrs; fn++)
1020 for (
int i = 0; i < num_my_elems; i++)
1023 for (
int j = 0; j < ldofs.
Size(); j++)
1025 int ldof = (ldofs[j] >= 0 ? ldofs[j] : -1-ldofs[j]);
1027 if (ldof_marker[ldof] != fn)
1029 ldof_marker[ldof] = fn;
1034 send_el_off[fn] + i, ldofs, ldofs.
Size());
1041 int *send_J = send_nbr_elem_dof.
GetJ();
1042 for (
int fn = 0, j = 0; fn < num_face_nbrs; fn++)
1046 int j_end = send_I[send_el_off[fn+1]];
1048 for (
int i = 0; i < num_ldofs; i++)
1050 int ldof = (ldofs[i] >= 0 ? ldofs[i] : -1-ldofs[i]);
1051 ldof_marker[ldof] = i;
1054 for ( ; j < j_end; j++)
1056 int ldof = (send_J[j] >= 0 ? send_J[j] : -1-send_J[j]);
1057 send_J[j] = (send_J[j] >= 0 ? ldof_marker[ldof] : -1-ldof_marker[ldof]);
1061 MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1068 for (
int fn = 0; fn < num_face_nbrs; fn++)
1073 MPI_Isend(send_J + send_I[send_el_off[fn]],
1074 send_I[send_el_off[fn+1]] - send_I[send_el_off[fn]],
1075 MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
1077 MPI_Irecv(recv_J + recv_I[recv_el_off[fn]],
1078 recv_I[recv_el_off[fn+1]] - recv_I[recv_el_off[fn]],
1079 MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
1082 MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1085 for (
int fn = 0, j = 0; fn < num_face_nbrs; fn++)
1088 int j_end = recv_I[recv_el_off[fn+1]];
1090 for ( ; j < j_end; j++)
1103 MPI_Waitall(num_face_nbrs, send_requests, statuses);
1107 for (
int fn = 0; fn < num_face_nbrs; fn++)
1114 MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
1118 MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
1121 MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1122 MPI_Waitall(num_face_nbrs, send_requests, statuses);
1129 for (
int fn = 0; fn < num_face_nbrs; fn++)
1134 MPI_Isend(&my_dof_offset, 1, HYPRE_MPI_INT, nbr_rank, tag,
1135 MyComm, &send_requests[fn]);
1137 MPI_Irecv(&dof_face_nbr_offsets[fn], 1, HYPRE_MPI_INT, nbr_rank, tag,
1138 MyComm, &recv_requests[fn]);
1141 MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1145 for (
int fn = 0, j = 0; fn < num_face_nbrs; fn++)
1159 MPI_Waitall(num_face_nbrs, send_requests, statuses);
1177 int el1, el2, inf1, inf2;
1190 Ordering::DofsToVDofs<Ordering::byNODES>(nd/
vdim,
vdim, vdofs);
1192 for (
int j = 0; j < vdofs.
Size(); j++)
1194 const int ldof = vdofs[j];
1195 vdofs[j] = (ldof >= 0) ? vol_vdofs[ldof] : -1-vol_vdofs[-1-ldof];
1207 mfem_error(
"ParFiniteElementSpace::GetFaceNbrFE"
1208 " does not support NURBS!");
1227 hypre_ParCSRMatrix *csrP = (hypre_ParCSRMatrix*)(*P);
1228 hypre_ParCSRMatrixOwnsRowStarts(csrP) = 1;
1229 hypre_ParCSRMatrixOwnsColStarts(csrP) = 1;
1235 void ParFiniteElementSpace::ConstructTrueDofs()
1242 GetGroupComm(*gcomm, 1, &ldof_sign);
1252 for (gr = 1; gr < group_ldof.
Size(); gr++)
1254 const int *ldofs = group_ldof.
GetRow(gr);
1255 const int nldofs = group_ldof.
RowSize(gr);
1256 for (i = 0; i < nldofs; i++)
1258 ldof_group[ldofs[i]] = gr;
1263 for (i = 0; i < nldofs; i++)
1265 ldof_ltdof[ldofs[i]] = -2;
1272 for (i = 0; i < n; i++)
1274 if (ldof_ltdof[i] == -1)
1276 ldof_ltdof[i] = ltdof_size++;
1282 gcomm->
Bcast(ldof_ltdof);
1285 void ParFiniteElementSpace::ConstructTrueNURBSDofs()
1288 GroupTopology > = pNURBSext()->
gtopo;
1289 gcomm =
new GroupCommunicator(gt);
1294 ldof_group.
MakeRef(pNURBSext()->ldof_group);
1298 const int *scalar_ldof_group = pNURBSext()->
ldof_group;
1300 for (
int i = 0; i < n; i++)
1302 ldof_group[i] = scalar_ldof_group[
VDofToDof(i)];
1306 gcomm->
Create(ldof_group);
1314 for (
int i = 0; i < n; i++)
1316 if (gt.IAmMaster(ldof_group[i]))
1318 ldof_ltdof[i] = ltdof_size;
1329 gcomm->
Bcast(ldof_ltdof);
1332 void ParFiniteElementSpace::GetGhostVertexDofs(
const MeshId &
id,
1333 Array<int> &dofs)
const
1337 for (
int j = 0; j < nv; j++)
1339 dofs[j] =
ndofs + nv*
id.index + j;
1343 void ParFiniteElementSpace::GetGhostEdgeDofs(
const MeshId &edge_id,
1344 Array<int> &dofs)
const
1348 dofs.SetSize(2*nv + ne);
1353 for (
int i = 0; i < 2; i++)
1355 int k = (V[i] < ghost) ? V[i]*nv : (
ndofs + (V[i] - ghost)*nv);
1356 for (
int j = 0; j < nv; j++)
1358 dofs[i*nv + j] = k++;
1362 int k =
ndofs + ngvdofs + (edge_id.index - pncmesh->
GetNEdges())*ne;
1363 for (
int j = 0; j < ne; j++)
1365 dofs[2*nv + j] = k++;
1369 void ParFiniteElementSpace::GetGhostFaceDofs(
const MeshId &face_id,
1370 Array<int> &dofs)
const
1372 int nfv, V[4], E[4], Eo[4];
1379 int nf = (nfv == 3) ? nf_tri : nf_quad;
1381 dofs.SetSize(nfv*(nv + ne) + nf);
1384 for (
int i = 0; i < nfv; i++)
1387 int first = (V[i] < ghost) ? V[i]*nv : (
ndofs + (V[i] - ghost)*nv);
1388 for (
int j = 0; j < nv; j++)
1390 dofs[offset++] = first + j;
1394 for (
int i = 0; i < nfv; i++)
1397 int first = (E[i] < ghost) ?
nvdofs + E[i]*ne
1398 :
ndofs + ngvdofs + (E[i] - ghost)*ne;
1400 for (
int j = 0; j < ne; j++)
1402 dofs[offset++] = (ind[j] >= 0) ? (first + ind[j])
1403 : (-1 - (first + (-1 - ind[j])));
1407 const int ghost_face_index = face_id.index - pncmesh->
GetNFaces();
1408 int first =
ndofs + ngvdofs + ngedofs + nf_quad*ghost_face_index;
1410 for (
int j = 0; j < nf; j++)
1412 dofs[offset++] = first + j;
1416 void ParFiniteElementSpace::GetGhostDofs(
int entity,
const MeshId &
id,
1417 Array<int> &dofs)
const
1422 case 0: GetGhostVertexDofs(
id, dofs);
break;
1423 case 1: GetGhostEdgeDofs(
id, dofs);
break;
1424 case 2: GetGhostFaceDofs(
id, dofs);
break;
1428 void ParFiniteElementSpace::GetBareDofs(
int entity,
int index,
1429 Array<int> &dofs)
const
1431 int ned, ghost, first;
1437 first = (index < ghost)
1439 :
ndofs + (index - ghost)*ned;
1445 first = (index < ghost)
1447 :
ndofs + ngvdofs + (index - ghost)*ned;
1466 first =
ndofs + ngvdofs + ngedofs + index*stride;
1472 for (
int i = 0; i < ned; i++)
1474 dofs[i] = first + i;
1478 int ParFiniteElementSpace::PackDof(
int entity,
int index,
int edof)
const
1490 return (index < ghost)
1492 :
ndofs + (index - ghost)*ned + edof;
1498 return (index < ghost)
1499 ?
nvdofs + index*ned + edof
1500 :
ndofs + ngvdofs + (index - ghost)*ned + edof;
1514 return ndofs + ngvdofs + ngedofs + index*stride + edof;
1519 static int bisect(
int* array,
int size,
int value)
1521 int* end = array + size;
1522 int* pos = std::upper_bound(array, end, value);
1523 MFEM_VERIFY(pos != end,
"value not found");
1530 void ParFiniteElementSpace::UnpackDof(
int dof,
1531 int &entity,
int &index,
int &edof)
const
1533 MFEM_VERIFY(dof >= 0,
"");
1539 entity = 0, index = dof / nv, edof = dof % nv;
1546 entity = 1, index = dof / ne, edof = dof % ne;
1555 edof = dof - fdofs[
index];
1560 index = dof / nf, edof = dof % nf;
1565 MFEM_ABORT(
"Cannot unpack internal DOF");
1573 entity = 0, index = pncmesh->
GetNVertices() + dof / nv, edof = dof % nv;
1580 entity = 1, index = pncmesh->
GetNEdges() + dof / ne, edof = dof % ne;
1587 index = pncmesh->
GetNFaces() + dof / stride, edof = dof % stride;
1591 MFEM_ABORT(
"Out of range DOF.");
1599 struct PMatrixElement
1601 HYPRE_Int column, stride;
1604 PMatrixElement(HYPRE_Int col = 0, HYPRE_Int str = 0,
double val = 0)
1605 : column(col), stride(str), value(val) {}
1607 bool operator<(
const PMatrixElement &other)
const
1608 {
return column < other.column; }
1610 typedef std::vector<PMatrixElement> List;
1618 PMatrixElement::List elems;
1621 void AddRow(
const PMatrixRow &other,
double coef)
1623 elems.reserve(elems.size() + other.elems.size());
1624 for (
unsigned i = 0; i < other.elems.size(); i++)
1626 const PMatrixElement &oei = other.elems[i];
1628 PMatrixElement(oei.column, oei.stride, coef * oei.value));
1635 if (!elems.size()) {
return; }
1636 std::sort(elems.begin(), elems.end());
1639 for (
unsigned i = 1; i < elems.size(); i++)
1641 if (elems[j].column == elems[i].column)
1643 elems[j].value += elems[i].value;
1647 elems[++j] = elems[i];
1653 void write(std::ostream &os,
double sign)
const
1655 bin_io::write<int>(os, elems.size());
1656 for (
unsigned i = 0; i < elems.size(); i++)
1658 const PMatrixElement &e = elems[i];
1659 bin_io::write<HYPRE_Int>(os, e.column);
1660 bin_io::write<int>(os, e.stride);
1661 bin_io::write<double>(os, e.value * sign);
1665 void read(std::istream &is,
double sign)
1667 elems.resize(bin_io::read<int>(is));
1668 for (
unsigned i = 0; i < elems.size(); i++)
1670 PMatrixElement &e = elems[i];
1671 e.column = bin_io::read<HYPRE_Int>(is);
1672 e.stride = bin_io::read<int>(is);
1673 e.value = bin_io::read<double>(is) * sign;
1681 class NeighborRowMessage :
public VarMessage<314>
1684 typedef NCMesh::MeshId MeshId;
1689 int entity,
index, edof;
1693 RowInfo(
int ent,
int idx,
int edof, GroupId grp,
const PMatrixRow &row)
1694 : entity(ent), index(idx), edof(edof), group(grp), row(row) {}
1696 RowInfo(
int ent,
int idx,
int edof, GroupId grp)
1697 : entity(ent), index(idx), edof(edof), group(grp) {}
1699 typedef std::vector<RowInfo> List;
1702 NeighborRowMessage() : pncmesh(NULL) {}
1704 void AddRow(
int entity,
int index,
int edof, GroupId group,
1705 const PMatrixRow &row)
1707 rows.push_back(RowInfo(entity, index, edof, group, row));
1710 const RowInfo::List& GetRows()
const {
return rows; }
1712 void SetNCMesh(ParNCMesh* pnc) { pncmesh = pnc; }
1713 void SetFEC(
const FiniteElementCollection* fec) { this->fec = fec; }
1715 typedef std::map<int, NeighborRowMessage> Map;
1721 const FiniteElementCollection* fec;
1723 virtual void Encode(
int rank);
1724 virtual void Decode(
int);
1728 void NeighborRowMessage::Encode(
int rank)
1730 std::ostringstream stream;
1732 Array<MeshId> ent_ids[3];
1733 Array<GroupId> group_ids[3];
1734 Array<int> row_idx[3];
1737 for (
unsigned i = 0; i < rows.size(); i++)
1739 const RowInfo &ri = rows[i];
1740 const MeshId &
id = pncmesh->GetNCList(ri.entity).LookUp(ri.index);
1741 ent_ids[ri.entity].Append(
id);
1742 row_idx[ri.entity].Append(i);
1743 group_ids[ri.entity].Append(ri.group);
1746 Array<GroupId> all_group_ids;
1747 all_group_ids.Reserve(rows.size());
1748 for (
int i = 0; i < 3; i++)
1750 all_group_ids.Append(group_ids[i]);
1753 pncmesh->AdjustMeshIds(ent_ids, rank);
1754 pncmesh->EncodeMeshIds(stream, ent_ids);
1755 pncmesh->EncodeGroups(stream, all_group_ids);
1758 for (
int ent = 0; ent < 3; ent++)
1760 const Array<MeshId> &ids = ent_ids[ent];
1761 for (
int i = 0; i < ids.Size(); i++)
1763 const MeshId &
id = ids[i];
1764 const RowInfo &ri = rows[row_idx[ent][i]];
1765 MFEM_ASSERT(ent == ri.entity,
"");
1767 #ifdef MFEM_DEBUG_PMATRIX
1768 mfem::out <<
"Rank " << pncmesh->MyRank <<
" sending to " << rank
1769 <<
": ent " << ri.entity <<
", index " << ri.index
1770 <<
", edof " << ri.edof <<
" (id " <<
id.element <<
"/"
1771 << int(
id.local) <<
")" << std::endl;
1779 int eo = pncmesh->GetEdgeNCOrientation(
id);
1781 if ((edof = ind[edof]) < 0)
1788 bin_io::write<int>(stream, edof);
1789 ri.row.write(stream, s);
1794 stream.str().swap(data);
1797 void NeighborRowMessage::Decode(
int rank)
1799 std::istringstream stream(data);
1801 Array<MeshId> ent_ids[3];
1802 Array<GroupId> group_ids;
1805 pncmesh->DecodeMeshIds(stream, ent_ids);
1806 pncmesh->DecodeGroups(stream, group_ids);
1808 int nrows = ent_ids[0].Size() + ent_ids[1].Size() + ent_ids[2].Size();
1809 MFEM_ASSERT(nrows == group_ids.Size(),
"");
1812 rows.reserve(nrows);
1815 for (
int ent = 0, gi = 0; ent < 3; ent++)
1817 const Array<MeshId> &ids = ent_ids[ent];
1818 for (
int i = 0; i < ids.Size(); i++)
1820 const MeshId &
id = ids[i];
1821 int edof = bin_io::read<int>(stream);
1824 const int *ind = NULL;
1827 int eo = pncmesh->GetEdgeNCOrientation(
id);
1833 int fo = pncmesh->GetFaceOrientation(
id.index);
1834 ind = fec->DofOrderForOrientation(geom, fo);
1838 if (ind && (edof = ind[edof]) < 0)
1844 rows.push_back(RowInfo(ent,
id.index, edof, group_ids[gi++]));
1845 rows.back().row.read(stream, s);
1847 #ifdef MFEM_DEBUG_PMATRIX
1848 mfem::out <<
"Rank " << pncmesh->MyRank <<
" receiving from " << rank
1849 <<
": ent " << rows.back().entity <<
", index "
1850 << rows.back().index <<
", edof " << rows.back().edof
1858 ParFiniteElementSpace::ScheduleSendRow(
const PMatrixRow &row,
int dof,
1860 NeighborRowMessage::Map &send_msg)
const
1863 UnpackDof(dof, ent, idx, edof);
1866 for (
unsigned i = 0; i < group.size(); i++)
1868 int rank = group[i];
1871 NeighborRowMessage &msg = send_msg[rank];
1872 msg.AddRow(ent, idx, edof, group_id, row);
1873 msg.SetNCMesh(pncmesh);
1875 #ifdef MFEM_PMATRIX_STATS
1882 void ParFiniteElementSpace::ForwardRow(
const PMatrixRow &row,
int dof,
1883 GroupId group_sent_id, GroupId group_id,
1884 NeighborRowMessage::Map &send_msg)
const
1887 UnpackDof(dof, ent, idx, edof);
1890 for (
unsigned i = 0; i < group.size(); i++)
1892 int rank = group[i];
1893 if (rank != MyRank && !pncmesh->
GroupContains(group_sent_id, rank))
1895 NeighborRowMessage &msg = send_msg[rank];
1896 GroupId invalid = -1;
1897 msg.AddRow(ent, idx, edof, invalid, row);
1898 msg.SetNCMesh(pncmesh);
1900 #ifdef MFEM_PMATRIX_STATS
1903 #ifdef MFEM_DEBUG_PMATRIX
1905 << rank <<
": ent " << ent <<
", index" << idx
1906 <<
", edof " << edof << std::endl;
1912 #ifdef MFEM_DEBUG_PMATRIX
1913 void ParFiniteElementSpace
1914 ::DebugDumpDOFs(std::ostream &os,
1915 const SparseMatrix &deps,
1916 const Array<GroupId> &dof_group,
1917 const Array<GroupId> &dof_owner,
1918 const Array<bool> &finalized)
const
1920 for (
int i = 0; i < dof_group.Size(); i++)
1926 UnpackDof(i, ent, idx, edof);
1928 os << edof <<
" @ ";
1929 if (i >
ndofs) { os <<
"ghost "; }
1932 case 0: os <<
"vertex ";
break;
1933 case 1: os <<
"edge ";
break;
1934 default: os <<
"face ";
break;
1938 if (i < deps.Height() && deps.RowSize(i))
1940 os <<
"depends on ";
1941 for (
int j = 0; j < deps.RowSize(i); j++)
1943 os << deps.GetRowColumns(i)[j] <<
" ("
1944 << deps.GetRowEntries(i)[j] <<
")";
1945 if (j < deps.RowSize(i)-1) { os <<
", "; }
1954 os <<
"group " << dof_group[i] <<
" (";
1956 for (
unsigned j = 0; j < g.size(); j++)
1958 if (j) { os <<
", "; }
1962 os <<
"), owner " << dof_owner[i] <<
" (rank "
1963 << pncmesh->
GetGroup(dof_owner[i])[0] <<
"); "
1964 << (finalized[i] ?
"finalized" :
"NOT finalized");
1975 int ParFiniteElementSpace
1976 ::BuildParallelConformingInterpolation(HypreParMatrix **P, SparseMatrix **R,
1977 Array<HYPRE_Int> &dof_offs,
1978 Array<HYPRE_Int> &tdof_offs,
1979 Array<int> *dof_tdof,
1984 #ifdef MFEM_PMATRIX_STATS
1985 n_msgs_sent = n_msgs_recv = 0;
1986 n_rows_sent = n_rows_recv = n_rows_fwd = 0;
1991 int total_dofs =
ndofs + ngdofs;
1992 SparseMatrix deps(
ndofs, total_dofs);
1994 if (!dg && !partial)
1996 Array<int> master_dofs, slave_dofs;
1999 for (
int entity = 0; entity <= 2; entity++)
2001 const NCMesh::NCList &list = pncmesh->
GetNCList(entity);
2002 if (!list.masters.Size()) {
continue; }
2004 IsoparametricTransformation T;
2008 for (
int mi = 0; mi < list.masters.Size(); mi++)
2010 const NCMesh::Master &mf = list.
masters[mi];
2013 pncmesh->
IsGhost(entity, mf.index)
2014 ? GetGhostDofs(entity, mf, master_dofs)
2017 if (!master_dofs.Size()) {
continue; }
2020 if (!fe) {
continue; }
2027 default: MFEM_ABORT(
"unsupported geometry");
2031 for (
int si = mf.slaves_begin; si < mf.slaves_end; si++)
2033 const NCMesh::Slave &sf = list.slaves[si];
2034 if (pncmesh->
IsGhost(entity, sf.index)) {
continue; }
2037 if (!slave_dofs.Size()) {
continue; }
2039 list.OrientedPointMatrix(sf, T.GetPointMat());
2040 fe->GetLocalInterpolation(T, I);
2053 Array<GroupId> dof_group(total_dofs);
2054 Array<GroupId> dof_owner(total_dofs);
2063 for (
int entity = 0; entity <= 2; entity++)
2065 const NCMesh::NCList &list = pncmesh->
GetNCList(entity);
2068 { list.
conforming.Size(), list.masters.Size(), list.slaves.Size() };
2070 for (
int l = 0; l < 3; l++)
2072 for (
int i = 0; i < lsize[l]; i++)
2075 (l == 0) ? list.conforming[i] :
2076 (l == 1) ? (
const MeshId&) list.masters[i]
2077 : (
const MeshId&) list.slaves[i];
2079 if (
id.index < 0) {
continue; }
2084 GetBareDofs(entity,
id.index, dofs);
2086 for (
int j = 0; j < dofs.Size(); j++)
2089 dof_owner[dof] = owner;
2090 dof_group[dof] = group;
2099 Array<bool> finalized(total_dofs);
2103 int num_true_dofs = 0;
2104 for (
int i = 0; i <
ndofs; i++)
2106 if (dof_owner[i] == 0 && deps.RowSize(i) == 0)
2109 finalized[i] =
true;
2114 HYPRE_Int loc_sizes[2] = { ndofs*
vdim, num_true_dofs*vdim };
2115 Array<HYPRE_Int>* offsets[2] = { &dof_offs, &tdof_offs };
2118 HYPRE_Int my_tdof_offset =
2119 tdof_offs[HYPRE_AssumedPartitionCheck() ? 0 : MyRank];
2124 *R =
new SparseMatrix(num_true_dofs*
vdim, ndofs*vdim);
2128 dof_tdof->SetSize(ndofs*
vdim);
2132 std::vector<PMatrixRow> pmatrix(total_dofs);
2135 int vdim_factor = bynodes ? 1 :
vdim;
2136 int dof_stride = bynodes ? ndofs : 1;
2137 int tdof_stride = bynodes ? num_true_dofs : 1;
2140 std::list<NeighborRowMessage::Map> send_msg;
2141 send_msg.push_back(NeighborRowMessage::Map());
2144 for (
int dof = 0, tdof = 0; dof <
ndofs; dof++)
2148 pmatrix[dof].elems.push_back(
2149 PMatrixElement(my_tdof_offset + vdim_factor*tdof, tdof_stride, 1.));
2152 if (dof_group[dof] != 0)
2154 ScheduleSendRow(pmatrix[dof], dof, dof_group[dof], send_msg.back());
2157 for (
int vd = 0; vd <
vdim; vd++)
2159 int vdof = dof*vdim_factor + vd*dof_stride;
2160 int vtdof = tdof*vdim_factor + vd*tdof_stride;
2162 if (R) { (*R)->Add(vtdof, vdof, 1.0); }
2163 if (dof_tdof) { (*dof_tdof)[vdof] = vtdof; }
2170 NeighborRowMessage::IsendAll(send_msg.back(), MyComm);
2171 #ifdef MFEM_PMATRIX_STATS
2172 n_msgs_sent += send_msg.back().size();
2175 if (R) { (*R)->Finalize(); }
2180 NeighborRowMessage recv_msg;
2181 recv_msg.SetNCMesh(pncmesh);
2182 recv_msg.SetFEC(
fec);
2184 int num_finalized = num_true_dofs;
2186 buffer.elems.reserve(1024);
2188 while (num_finalized < ndofs)
2191 if (send_msg.back().size())
2193 send_msg.push_back(NeighborRowMessage::Map());
2198 while (NeighborRowMessage::IProbe(rank, size, MyComm))
2200 recv_msg.Recv(rank, size, MyComm);
2201 #ifdef MFEM_PMATRIX_STATS
2203 n_rows_recv += recv_msg.GetRows().size();
2206 const NeighborRowMessage::RowInfo::List &rows = recv_msg.GetRows();
2207 for (
unsigned i = 0; i < rows.size(); i++)
2209 const NeighborRowMessage::RowInfo &ri = rows[i];
2210 int dof = PackDof(ri.entity, ri.index, ri.edof);
2211 pmatrix[dof] = ri.row;
2213 if (dof < ndofs && !finalized[dof]) { num_finalized++; }
2214 finalized[dof] =
true;
2216 if (ri.group >= 0 && dof_group[dof] != ri.group)
2219 ForwardRow(ri.row, dof, ri.group, dof_group[dof], send_msg.back());
2229 for (
int dof = 0; dof <
ndofs; dof++)
2231 if (finalized[dof]) {
continue; }
2233 bool owned = (dof_owner[dof] == 0);
2234 bool shared = (dof_group[dof] != 0);
2238 const int* dep_col = deps.GetRowColumns(dof);
2239 const double* dep_coef = deps.GetRowEntries(dof);
2240 int num_dep = deps.RowSize(dof);
2243 buffer.elems.clear();
2244 for (
int j = 0; j < num_dep; j++)
2246 buffer.AddRow(pmatrix[dep_col[j]], dep_coef[j]);
2249 pmatrix[dof] = buffer;
2251 finalized[dof] =
true;
2258 ScheduleSendRow(pmatrix[dof], dof, dof_group[dof],
2265 #ifdef MFEM_DEBUG_PMATRIX
2278 NeighborRowMessage::IsendAll(send_msg.back(), MyComm);
2279 #ifdef MFEM_PMATRIX_STATS
2280 n_msgs_sent += send_msg.back().size();
2286 *P = MakeVDimHypreMatrix(pmatrix, ndofs, num_true_dofs,
2287 dof_offs, tdof_offs);
2293 while (NeighborRowMessage::IProbe(rank, size, MyComm))
2295 recv_msg.RecvDrop(rank, size, MyComm);
2299 for (std::list<NeighborRowMessage::Map>::iterator
2300 it = send_msg.begin(); it != send_msg.end(); ++it)
2302 NeighborRowMessage::WaitAllSent(*it);
2305 #ifdef MFEM_PMATRIX_STATS
2306 int n_rounds = send_msg.size();
2307 int glob_rounds, glob_msgs_sent, glob_msgs_recv;
2308 int glob_rows_sent, glob_rows_recv, glob_rows_fwd;
2310 MPI_Reduce(&n_rounds, &glob_rounds, 1, MPI_INT, MPI_SUM, 0, MyComm);
2311 MPI_Reduce(&n_msgs_sent, &glob_msgs_sent, 1, MPI_INT, MPI_SUM, 0, MyComm);
2312 MPI_Reduce(&n_msgs_recv, &glob_msgs_recv, 1, MPI_INT, MPI_SUM, 0, MyComm);
2313 MPI_Reduce(&n_rows_sent, &glob_rows_sent, 1, MPI_INT, MPI_SUM, 0, MyComm);
2314 MPI_Reduce(&n_rows_recv, &glob_rows_recv, 1, MPI_INT, MPI_SUM, 0, MyComm);
2315 MPI_Reduce(&n_rows_fwd, &glob_rows_fwd, 1, MPI_INT, MPI_SUM, 0, MyComm);
2319 mfem::out <<
"P matrix stats (avg per rank): "
2320 << double(glob_rounds)/NRanks <<
" rounds, "
2321 << double(glob_msgs_sent)/NRanks <<
" msgs sent, "
2322 << double(glob_msgs_recv)/NRanks <<
" msgs recv, "
2323 << double(glob_rows_sent)/NRanks <<
" rows sent, "
2324 << double(glob_rows_recv)/NRanks <<
" rows recv, "
2325 << double(glob_rows_fwd)/NRanks <<
" rows forwarded."
2330 return num_true_dofs*
vdim;
2334 HypreParMatrix* ParFiniteElementSpace
2335 ::MakeVDimHypreMatrix(
const std::vector<PMatrixRow> &rows,
2336 int local_rows,
int local_cols,
2337 Array<HYPRE_Int> &row_starts,
2338 Array<HYPRE_Int> &col_starts)
const
2340 bool assumed = HYPRE_AssumedPartitionCheck();
2343 HYPRE_Int first_col = col_starts[assumed ? 0 : MyRank];
2344 HYPRE_Int next_col = col_starts[assumed ? 1 : MyRank+1];
2347 HYPRE_Int nnz_diag = 0, nnz_offd = 0;
2348 std::map<HYPRE_Int, int> col_map;
2349 for (
int i = 0; i < local_rows; i++)
2351 for (
unsigned j = 0; j < rows[i].elems.size(); j++)
2353 const PMatrixElement &elem = rows[i].elems[j];
2354 HYPRE_Int col = elem.column;
2355 if (col >= first_col && col < next_col)
2362 for (
int vd = 0; vd <
vdim; vd++)
2372 HYPRE_Int *cmap = Memory<HYPRE_Int>(col_map.size());
2374 for (std::map<HYPRE_Int, int>::iterator
2375 it = col_map.begin(); it != col_map.end(); ++it)
2377 cmap[offd_col] = it->first;
2378 it->second = offd_col++;
2381 HYPRE_Int *I_diag = Memory<HYPRE_Int>(vdim*local_rows + 1);
2382 HYPRE_Int *I_offd = Memory<HYPRE_Int>(vdim*local_rows + 1);
2384 HYPRE_Int *J_diag = Memory<HYPRE_Int>(nnz_diag);
2385 HYPRE_Int *J_offd = Memory<HYPRE_Int>(nnz_offd);
2387 double *A_diag = Memory<double>(nnz_diag);
2388 double *A_offd = Memory<double>(nnz_offd);
2390 int vdim1 = bynodes ? vdim : 1;
2391 int vdim2 = bynodes ? 1 :
vdim;
2392 int vdim_offset = bynodes ? local_cols : 1;
2395 nnz_diag = nnz_offd = 0;
2397 for (
int vd1 = 0; vd1 < vdim1; vd1++)
2399 for (
int i = 0; i < local_rows; i++)
2401 for (
int vd2 = 0; vd2 < vdim2; vd2++)
2403 I_diag[vrow] = nnz_diag;
2404 I_offd[vrow++] = nnz_offd;
2406 int vd = bynodes ? vd1 : vd2;
2407 for (
unsigned j = 0; j < rows[i].elems.size(); j++)
2409 const PMatrixElement &elem = rows[i].elems[j];
2410 if (elem.column >= first_col && elem.column < next_col)
2412 J_diag[nnz_diag] = elem.column + vd*vdim_offset - first_col;
2413 A_diag[nnz_diag++] = elem.value;
2417 J_offd[nnz_offd] = col_map[elem.column + vd*elem.stride];
2418 A_offd[nnz_offd++] = elem.value;
2424 MFEM_ASSERT(vrow == vdim*local_rows,
"");
2425 I_diag[vrow] = nnz_diag;
2426 I_offd[vrow] = nnz_offd;
2428 return new HypreParMatrix(MyComm,
2429 row_starts.Last(), col_starts.Last(),
2430 row_starts.GetData(), col_starts.GetData(),
2431 I_diag, J_diag, A_diag,
2432 I_offd, J_offd, A_offd,
2433 col_map.size(), cmap);
2437 static HYPRE_Int* make_i_array(
int nrows)
2439 HYPRE_Int *I = Memory<HYPRE_Int>(nrows+1);
2440 for (
int i = 0; i <= nrows; i++) { I[i] = -1; }
2444 static HYPRE_Int* make_j_array(HYPRE_Int* I,
int nrows)
2447 for (
int i = 0; i < nrows; i++)
2449 if (I[i] >= 0) { nnz++; }
2451 HYPRE_Int *J = Memory<HYPRE_Int>(nnz);
2454 for (
int i = 0, k = 0; i <= nrows; i++)
2456 HYPRE_Int col = I[i];
2458 if (col >= 0) { J[k++] = col; }
2464 ParFiniteElementSpace::RebalanceMatrix(
int old_ndofs,
2465 const Table* old_elem_dof)
2467 MFEM_VERIFY(
Nonconforming(),
"Only supported for nonconforming meshes.");
2468 MFEM_VERIFY(old_dof_offsets.
Size(),
"ParFiniteElementSpace::Update needs to "
2469 "be called before ParFiniteElementSpace::RebalanceMatrix");
2471 HYPRE_Int old_offset = HYPRE_AssumedPartitionCheck()
2472 ? old_dof_offsets[0] : old_dof_offsets[MyRank];
2475 ParNCMesh* pncmesh = pmesh->
pncmesh;
2481 const Array<int> &old_index = pncmesh->GetRebalanceOldIndex();
2482 MFEM_VERIFY(old_index.Size() == pmesh->
GetNE(),
2483 "Mesh::Rebalance was not called before "
2484 "ParFiniteElementSpace::RebalanceMatrix");
2487 HYPRE_Int* i_diag = make_i_array(vsize);
2488 for (
int i = 0; i < pmesh->
GetNE(); i++)
2490 if (old_index[i] >= 0)
2492 const int* old_dofs = old_elem_dof->GetRow(old_index[i]);
2495 for (
int vd = 0; vd <
vdim; vd++)
2497 for (
int j = 0; j < dofs.Size(); j++)
2500 if (row < 0) { row = -1 - row; }
2502 int col =
DofToVDof(old_dofs[j], vd, old_ndofs);
2503 if (col < 0) { col = -1 - col; }
2510 HYPRE_Int* j_diag = make_j_array(i_diag, vsize);
2513 Array<int> new_elements;
2514 Array<long> old_remote_dofs;
2515 pncmesh->RecvRebalanceDofs(new_elements, old_remote_dofs);
2518 HYPRE_Int* i_offd = make_i_array(vsize);
2519 for (
int i = 0, pos = 0; i < new_elements.Size(); i++)
2522 const long* old_dofs = &old_remote_dofs[pos];
2523 pos += dofs.Size() *
vdim;
2525 for (
int vd = 0; vd <
vdim; vd++)
2527 for (
int j = 0; j < dofs.Size(); j++)
2530 if (row < 0) { row = -1 - row; }
2532 if (i_diag[row] == i_diag[row+1])
2534 i_offd[row] = old_dofs[j + vd * dofs.Size()];
2539 HYPRE_Int* j_offd = make_j_array(i_offd, vsize);
2542 int offd_cols = i_offd[vsize];
2543 Array<Pair<HYPRE_Int, int> > cmap_offd(offd_cols);
2544 for (
int i = 0; i < offd_cols; i++)
2546 cmap_offd[i].one = j_offd[i];
2547 cmap_offd[i].two = i;
2549 SortPairs<HYPRE_Int, int>(cmap_offd, offd_cols);
2551 HYPRE_Int* cmap = Memory<HYPRE_Int>(offd_cols);
2552 for (
int i = 0; i < offd_cols; i++)
2554 cmap[i] = cmap_offd[i].one;
2555 j_offd[cmap_offd[i].two] = i;
2559 M =
new HypreParMatrix(MyComm, MyRank, NRanks, dof_offsets, old_dof_offsets,
2560 i_diag, j_diag, i_offd, j_offd, cmap, offd_cols);
2565 struct DerefDofMessage
2567 std::vector<HYPRE_Int> dofs;
2568 MPI_Request request;
2572 ParFiniteElementSpace::ParallelDerefinementMatrix(
int old_ndofs,
2573 const Table* old_elem_dof)
2575 int nrk = HYPRE_AssumedPartitionCheck() ? 2 : NRanks;
2577 MFEM_VERIFY(
Nonconforming(),
"Not implemented for conforming meshes.");
2578 MFEM_VERIFY(old_dof_offsets[nrk],
"Missing previous (finer) space.");
2580 #if 0 // check no longer seems to work with NC tet refinement
2581 MFEM_VERIFY(dof_offsets[nrk] <= old_dof_offsets[nrk],
2582 "Previous space is not finer.");
2590 Mesh::GeometryList elem_geoms(*
mesh);
2592 Array<int> dofs, old_dofs, old_vdofs;
2595 ParNCMesh* pncmesh = pmesh->
pncmesh;
2602 for (
int i = 0; i < elem_geoms.Size(); i++)
2608 const CoarseFineTransformations &dtrans = pncmesh->GetDerefinementTransforms();
2609 const Array<int> &old_ranks = pncmesh->GetDerefineOldRanks();
2611 std::map<int, DerefDofMessage> messages;
2613 HYPRE_Int old_offset = HYPRE_AssumedPartitionCheck()
2614 ? old_dof_offsets[0] : old_dof_offsets[MyRank];
2618 for (
int k = 0; k < dtrans.embeddings.Size(); k++)
2620 const Embedding &emb = dtrans.embeddings[k];
2622 int fine_rank = old_ranks[k];
2623 int coarse_rank = (emb.parent < 0) ? (-1 - emb.parent)
2624 : pncmesh->ElementRank(emb.parent);
2626 if (coarse_rank != MyRank && fine_rank == MyRank)
2628 old_elem_dof->GetRow(k, dofs);
2631 DerefDofMessage &msg = messages[k];
2632 msg.dofs.resize(dofs.Size());
2633 for (
int i = 0; i < dofs.Size(); i++)
2635 msg.dofs[i] = old_offset + dofs[i];
2638 MPI_Isend(&msg.dofs[0], msg.dofs.size(), HYPRE_MPI_INT,
2639 coarse_rank, 291, MyComm, &msg.request);
2641 else if (coarse_rank == MyRank && fine_rank != MyRank)
2643 MFEM_ASSERT(emb.parent >= 0,
"");
2646 DerefDofMessage &msg = messages[k];
2647 msg.dofs.resize(ldof[geom]*vdim);
2649 MPI_Irecv(&msg.dofs[0], ldof[geom]*vdim, HYPRE_MPI_INT,
2650 fine_rank, 291, MyComm, &msg.request);
2658 for (
int i = 0; i < elem_geoms.Size(); i++)
2664 SparseMatrix *diag =
new SparseMatrix(ndofs*vdim, old_ndofs*vdim);
2666 Array<char> mark(diag->Height());
2669 for (
int k = 0; k < dtrans.embeddings.Size(); k++)
2671 const Embedding &emb = dtrans.embeddings[k];
2672 if (emb.parent < 0) {
continue; }
2674 int coarse_rank = pncmesh->ElementRank(emb.parent);
2675 int fine_rank = old_ranks[k];
2677 if (coarse_rank == MyRank && fine_rank == MyRank)
2680 DenseMatrix &lR = localR[
geom](emb.matrix);
2683 old_elem_dof->GetRow(k, old_dofs);
2685 for (
int vd = 0; vd <
vdim; vd++)
2687 old_dofs.Copy(old_vdofs);
2690 for (
int i = 0; i < lR.Height(); i++)
2692 if (!std::isfinite(lR(i, 0))) {
continue; }
2695 int m = (r >= 0) ? r : (-1 - r);
2700 diag->SetRow(r, old_vdofs, row);
2710 for (
auto it = messages.begin(); it != messages.end(); ++it)
2712 MPI_Wait(&it->second.request, MPI_STATUS_IGNORE);
2716 SparseMatrix *offd =
new SparseMatrix(ndofs*vdim, 1);
2718 std::map<HYPRE_Int, int> col_map;
2719 for (
int k = 0; k < dtrans.embeddings.Size(); k++)
2721 const Embedding &emb = dtrans.embeddings[k];
2722 if (emb.parent < 0) {
continue; }
2724 int coarse_rank = pncmesh->ElementRank(emb.parent);
2725 int fine_rank = old_ranks[k];
2727 if (coarse_rank == MyRank && fine_rank != MyRank)
2730 DenseMatrix &lR = localR[
geom](emb.matrix);
2734 DerefDofMessage &msg = messages[k];
2735 MFEM_ASSERT(msg.dofs.size(),
"");
2737 for (
int vd = 0; vd <
vdim; vd++)
2739 MFEM_ASSERT(ldof[geom],
"");
2740 HYPRE_Int* remote_dofs = &msg.dofs[vd*ldof[
geom]];
2742 for (
int i = 0; i < lR.Height(); i++)
2744 if (!std::isfinite(lR(i, 0))) {
continue; }
2747 int m = (r >= 0) ? r : (-1 - r);
2752 MFEM_ASSERT(ldof[geom] == row.Size(),
"");
2753 for (
int j = 0; j < ldof[
geom]; j++)
2755 if (row[j] == 0.0) {
continue; }
2756 int &lcol = col_map[remote_dofs[j]];
2757 if (!lcol) { lcol = col_map.size(); }
2758 offd->_Set_(m, lcol-1, row[j]);
2768 offd->SetWidth(col_map.size());
2771 HYPRE_Int *cmap = Memory<HYPRE_Int>(offd->Width());
2772 for (std::map<HYPRE_Int, int>::iterator
2773 it = col_map.begin(); it != col_map.end(); ++it)
2775 cmap[it->second-1] = it->first;
2782 int width = offd->Width();
2783 Array<Pair<HYPRE_Int, int> > reorder(width);
2784 for (
int i = 0; i < width; i++)
2786 reorder[i].one = cmap[i];
2791 Array<int> reindex(width);
2792 for (
int i = 0; i < width; i++)
2794 reindex[reorder[i].two] = i;
2795 cmap[i] = reorder[i].one;
2798 int *J = offd->GetJ();
2799 for (
int i = 0; i < offd->NumNonZeroElems(); i++)
2801 J[i] = reindex[J[i]];
2803 offd->SortColumnIndices();
2807 R =
new HypreParMatrix(MyComm, dof_offsets[nrk], old_dof_offsets[nrk],
2808 dof_offsets, old_dof_offsets, diag, offd, cmap);
2810 #ifndef HYPRE_BIGINT
2814 diag->SetDataOwner(
false);
2815 offd->SetDataOwner(
false);
2820 R->SetOwnerFlags(3, 3, 1);
2825 void ParFiniteElementSpace::Destroy()
2836 delete Pconf; Pconf = NULL;
2839 delete gcomm; gcomm = NULL;
2859 MFEM_ASSERT(c_pfes != NULL,
"coarse_fes must be a parallel space");
2870 false, Tgf.OwnsOperator(),
false));
2871 Tgf.SetOperatorOwner(
false);
2883 MFEM_ABORT(
"Error in update sequence. Space needs to be updated after "
2884 "each mesh modification.");
2894 Table* old_elem_dof = NULL;
2903 Swap(dof_offsets, old_dof_offsets);
2926 old_elem_dof = NULL;
2938 Th.
Reset(ParallelDerefinementMatrix(old_ndofs, old_elem_dof));
2943 false,
false,
true));
2950 Th.
Reset(RebalanceMatrix(old_ndofs, old_elem_dof));
2958 delete old_elem_dof;
2965 :
Operator(pfes.GetVSize(), pfes.GetTrueVSize()),
2967 gc(pfes.GroupComm())
2972 for (
int gr = 1; gr < group_ldof.Size(); gr++)
2991 for ( ; j < end; j++)
2997 for ( ; j <
Height(); j++)
3011 const double *xdata = x.
HostRead();
3015 const int in_layout = 2;
3016 gc.
BcastBegin(const_cast<double*>(xdata), in_layout);
3019 for (
int i = 0; i < m; i++)
3022 std::copy(xdata+j-i, xdata+end-i, ydata+j);
3025 std::copy(xdata+j-m, xdata+
Width(), ydata+j);
3027 const int out_layout = 0;
3037 const double *xdata = x.
HostRead();
3044 for (
int i = 0; i < m; i++)
3047 std::copy(xdata+j, xdata+end, ydata+j-i);
3050 std::copy(xdata+j, xdata+
Height(), ydata+j-m);
3052 const int out_layout = 2;
3059 mpi_gpu_aware(
Device::GetGPUAwareMPI())
3061 MFEM_ASSERT(pfes.
Conforming(),
"internal error");
3063 MFEM_ASSERT(R->Finalized(),
"");
3064 const int tdofs = R->
Height();
3066 MFEM_ASSERT(tdofs == R->HostReadI()[tdofs],
"");
3068 ltdof_ldof.UseDevice();
3081 unique_ltdof.
Sort();
3087 MFEM_ASSERT(
shr_ltdof[i] != -1,
"internal error");
3111 int req_counter = 0;
3116 if (send_size > 0) { req_counter++; }
3120 if (recv_size > 0) { req_counter++; }
3122 requests =
new MPI_Request[req_counter];
3125 static void ExtractSubVector(
const int N,
3129 auto y = out.
Write();
3130 const auto x = in.
Read();
3131 const auto I = indices.
Read();
3132 MFEM_FORALL(i, N, y[i] = x[I[i]];);
3146 static void SetSubVector(
const int N,
3150 auto y = out.
Write();
3151 const auto x = in.
Read();
3152 const auto I = indices.
Read();
3153 MFEM_FORALL(i, N, y[I[i]] = x[i];);
3177 int req_counter = 0;
3185 MPI_Isend(send_buf + send_offset, send_size, MPI_DOUBLE,
3187 gtopo.
GetComm(), &requests[req_counter++]);
3194 MPI_Irecv(recv_buf + recv_offset, recv_size, MPI_DOUBLE,
3196 gtopo.
GetComm(), &requests[req_counter++]);
3200 MPI_Waitall(req_counter, requests, MPI_STATUSES_IGNORE);
3230 static void AddSubVector(
const int num_unique_dst_indices,
3237 auto y = dst.
Write();
3238 const auto x = src.
Read();
3239 const auto DST_I = unique_dst_indices.
Read();
3240 const auto SRC_O = unique_to_src_offsets.
Read();
3241 const auto SRC_I = unique_to_src_indices.
Read();
3242 MFEM_FORALL(i, num_unique_dst_indices,
3244 const int dst_idx = DST_I[i];
3245 double sum = y[dst_idx];
3246 const int end = SRC_O[i+1];
3247 for (
int j = SRC_O[i]; j != end; ++j) { sum += x[SRC_I[j]]; }
3256 if (unq_ltdof_size == 0) {
return; }
3265 int req_counter = 0;
3273 MPI_Isend(send_buf + send_offset, send_size, MPI_DOUBLE,
3275 gtopo.
GetComm(), &requests[req_counter++]);
3282 MPI_Irecv(recv_buf + recv_offset, recv_size, MPI_DOUBLE,
3284 gtopo.
GetComm(), &requests[req_counter++]);
3288 MPI_Waitall(req_counter, requests, MPI_STATUSES_IGNORE);
Abstract class for all finite elements.
Geometry::Type GetGeometryType() const
int GetNFaceNeighbors() const
int GetGroupMasterRank(int g) const
Return the rank of the group master for group 'g'.
void SendRebalanceDofs(int old_ndofs, const Table &old_element_dofs, long old_global_offset, FiniteElementSpace *space)
Use the communication pattern from last Rebalance() to send element DOFs.
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
int Size() const
Return the logical size of the array.
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
void GetNeighborLDofTable(Table &nbr_ldof) const
Dofs to be received from communication neighbors.
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
HYPRE_Int GetGlobalTDofNumber(int ldof) const
Returns the global tdof number of the given local degree of freedom.
int GetNDofs() const
Returns number of degrees of freedom.
int ndofs
Number of degrees of freedom. Number of unknowns is ndofs * vdim.
std::vector< int > CommGroup
Operator that extracts Face degrees of freedom in parallel.
int DofToVDof(int dof, int vd, int ndofs=-1) const
int GetFaceVerticesEdges(const MeshId &face_id, int vert_index[4], int edge_index[4], int edge_orientation[4]) const
void Unique()
Removes duplicities from a sorted array. This requires operator== to be defined for T...
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
const CommGroup & GetGroup(GroupId id) const
Return a list of ranks contained in the group of the given ID.
void GetNeighborLTDofTable(Table &nbr_ltdof) const
Dofs to be sent to communication neighbors.
Operator that extracts Face degrees of freedom.
void BuildElementToDofTable() const
void AddColumnsInRow(int r, int ncol)
virtual void GetEssentialVDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_dofs, int component=-1) const
Determine the boundary degrees of freedom.
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
HYPRE_Int * GetDofOffsets() const
virtual const Operator * GetProlongationMatrix() const
The returned Operator is owned by the FiniteElementSpace.
void SetSize(int s)
Resize the vector to size s.
SparseMatrix * RefinementMatrix(int old_ndofs, const Table *old_elem_dof)
void GetElementVDofs(int i, Array< int > &vdofs) const
Returns indexes of degrees of freedom in array dofs for i'th element.
Array< Element * > face_nbr_elements
void Delete()
Delete the owned pointers. The Memory is not reset by this method, i.e. it will, generally, not be Empty() after this call.
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
int TrueVSize() const
Obsolete, kept for backward compatibility.
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
void BooleanMult(const Array< int > &x, Array< int > &y) const
y = A * x, treating all entries as booleans (zero=false, nonzero=true).
void Mult(const Table &A, const Table &B, Table &C)
C = A * B (as boolean matrices)
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
int GetGroupSize(int g) const
Get the number of processors in a group.
virtual void Update(bool want_transform=true)
Pointer to an Operator of a specified type.
void SetDims(int rows, int nnz)
Operator::Type Type() const
Get the currently set operator type id.
int VDofToDof(int vdof) const
void UseDevice(bool use_dev) const
Enable execution of Vector operations using the mfem::Device.
virtual const FiniteElement * FiniteElementForGeometry(Geometry::Type GeomType) const =0
void ExchangeFaceNbrData(Table *gr_sface, int *s2l_face)
virtual void GetEssentialVDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_vdofs, int component=-1) const
Mark degrees of freedom associated with boundary elements with the specified boundary attributes (mar...
int vdim
Vector dimension (number of unknowns per degree of freedom).
int Size() const
Returns the size of the vector.
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
virtual void GetTrueTransferOperator(const FiniteElementSpace &coarse_fes, OperatorHandle &T) const
Construct and return an Operator that can be used to transfer true-dof data from coarse_fes, defined on a coarse mesh, to this FE space, defined on a refined mesh.
int GetNE() const
Returns number of elements.
void DivideByGroupSize(double *vec)
Scale a vector of true dofs.
void LoseData()
Call this if data has been stolen.
Abstract parallel finite element space.
bool operator<(const Pair< A, B > &p, const Pair< A, B > &q)
Comparison operator for class Pair, based on the first element only.
void Synchronize(Array< int > &ldof_marker) const
Given an integer array on the local degrees of freedom, perform a bitwise OR between the shared dofs...
void Lose_Dof_TrueDof_Matrix()
bool GroupContains(GroupId id, int rank) const
Return true if group 'id' contains the given rank.
double * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
int GetLocalTDofNumber(int ldof) const
HypreParMatrix * GetPartialConformingInterpolation()
For a non-conforming mesh, construct and return the interpolation matrix from the partially conformin...
const FiniteElementCollection * fec
Associated FE collection (not owned).
OperatorHandle Th
Transformation to apply to GridFunctions after space Update().
int GroupNQuadrilaterals(int group)
Geometry::Type GetElementBaseGeometry(int i) const
int Size_of_connections() const
void GetLocalDerefinementMatrices(Geometry::Type geom, DenseTensor &localR) const
HYPRE_Int GetMyDofOffset() const
bool HasGeometry(Geometry::Type geom) const
Return true iff the given geom is encountered in the mesh. Geometries of dimensions lower than Dimens...
void DeleteAll()
Delete the whole array.
void AddConnections(int r, const int *c, int nc)
void GroupTriangle(int group, int i, int &face, int &o)
static void MarkerToList(const Array< int > &marker, Array< int > &list)
Convert a Boolean marker array to a list containing all marked indices.
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
bool IAmMaster(int g) const
Return true if I am master for group 'g'.
void GetSharedEdgeDofs(int group, int ei, Array< int > &dofs) const
virtual int DofForGeometry(Geometry::Type GeomType) const =0
virtual void GetFaceDofs(int i, Array< int > &dofs) const
Returns the indexes of the degrees of freedom for i'th face including the dofs for the edges and the ...
virtual int GetTrueVSize() const
Return the number of local vector true dofs.
virtual void GetBdrElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom for i'th boundary element.
int GroupVertex(int group, int i)
int GetNumFaces() const
Return the number of faces (3D), edges (2D) or vertices (1D).
void GetFaceNbrElementVDofs(int i, Array< int > &vdofs) const
ID for class SparseMatrix.
const double * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
const FiniteElement * GetFaceNbrFE(int i) const
ID for the base class Operator, i.e. any type.
Array< int > face_nbr_elements_offset
void ExchangeFaceNbrData()
void write(std::ostream &os, T value)
Write 'value' to stream.
static const int Dimension[NumGeom]
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
int Append(const T &el)
Append element 'el' to array, resize if necessary.
GroupId GetEntityGroupId(int entity, int index)
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
Memory< int > & GetJMemory()
void mfem_error(const char *msg)
Function called when an error is encountered. Used by the macros MFEM_ABORT, MFEM_ASSERT, MFEM_VERIFY.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
MPI_Comm GetComm() const
Return the MPI communicator.
int GetNGhostEdges() const
void AddConnection(int r, int c)
void LoseData()
NULL-ifies the data.
HypreParMatrix * Dof_TrueDof_Matrix() const
The true dof-to-dof interpolation matrix.
virtual const Operator * GetProlongationMatrix() const
The returned Operator is owned by the FiniteElementSpace.
virtual void GetElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom in array dofs for i'th element.
HYPRE_Int GetMyTDofOffset() const
void Reserve(int capacity)
Ensures that the allocated size is at least the given size.
void SubDofOrder(Geometry::Type Geom, int SDim, int Info, Array< int > &dofs) const
Get the local dofs for a given sub-manifold.
void GetSharedQuadrilateralDofs(int group, int fi, Array< int > &dofs) const
void CopyFrom(const U *src)
Copy from src into this array. Copies enough entries to fill the Capacity size of this array...
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
void Transpose(const Table &A, Table &At, int _ncols_A)
Transpose a Table.
static void AddDependencies(SparseMatrix &deps, Array< int > &master_dofs, Array< int > &slave_dofs, DenseMatrix &I)
bool IsDGSpace() const
Return whether or not the space is discontinuous (L2)
HypreParMatrix * Transpose() const
Returns the transpose of *this.
T read(std::istream &is)
Read a value from the stream and return it.
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
void Sort()
Sorts the array in ascending order. This requires operator< to be defined for T.
int GroupNVertices(int group)
void GetSharedTriangleDofs(int group, int fi, Array< int > &dofs) const
virtual void GetFaceDofs(int i, Array< int > &dofs) const
int FindSorted(const T &el) const
Do bisection search for 'el' in a sorted array; return -1 if not found.
int Size() const
Returns the number of TYPE I elements.
GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
Geometry::Type GetFaceGeometry(int index) const
Return face geometry type. index is the Mesh face number.
void GetEdgeDofs(int i, Array< int > &dofs) const
Returns the indexes of the degrees of freedom for i'th edge including the dofs for the vertices of th...
Operator * Ptr() const
Access the underlying Operator pointer.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor...
void GetTransferOperator(const FiniteElementSpace &coarse_fes, OperatorHandle &T) const
Construct and return an Operator that can be used to transfer GridFunction data from coarse_fes...
void GroupQuadrilateral(int group, int i, int &face, int &o)
int GroupNTriangles(int group)
int GetNGhostFaces() const
void GetFaceElements(int Face, int *Elem1, int *Elem2) const
virtual const int * DofOrderForOrientation(Geometry::Type GeomType, int Or) const =0
Returns an array, say p, that maps a local permuted index i to a local base index: base_i = p[i]...
void Swap(Array< T > &, Array< T > &)
Memory< int > & GetIMemory()
void GetFaceInfos(int Face, int *Inf1, int *Inf2) const
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
int GetDof() const
Returns the number of degrees of freedom in the finite element.
int GetNeighborRank(int i) const
Return the MPI rank of neighbor 'i'.
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
void AddAColumnInRow(int r)
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
GroupId GetEntityOwnerId(int entity, int index)
Return vertex/edge/face ('entity' == 0/1/2, resp.) owner.
ParFiniteElementSpace(const ParFiniteElementSpace &orig, ParMesh *pmesh=NULL, const FiniteElementCollection *fec=NULL)
Copy constructor: deep copy all data from orig except the ParMesh, the FiniteElementCollection, and some derived data.
HYPRE_Int GetGlobalScalarTDofNumber(int sldof)
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
void GetEntityDofs(int entity, int index, Array< int > &dofs, Geometry::Type master_geom=Geometry::INVALID) const
Helper to get vertex, edge or face DOFs (entity=0,1,2 resp.).
Linear2DFiniteElement TriangleFE
virtual const Operator * GetFaceRestriction(ElementDofOrdering e_ordering, FaceType type, L2FaceValues mul=L2FaceValues::DoubleValued) const
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
Operation GetLastOperation() const
Return type of last modification of the mesh.
bool Nonconforming() const
virtual void GetBdrElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom for i'th boundary element.
NURBSExtension * NURBSext
Optional NURBS mesh extension.
virtual void GetElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom in array dofs for i'th element.
void SetOperatorOwner(bool own=true)
Set the ownership flag for the held Operator.
Table face_nbr_element_dof
Mesh * mesh
The mesh that FE space lives on (not owned).
const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
GridFunction interpolation operator applicable after mesh refinement.
void PrintPartitionStats()
const NCList & GetNCList(int entity)
Return vertex/edge/face list (entity = 0/1/2, respectively).
virtual const SparseMatrix * GetRestrictionMatrix() const
Get the R matrix which restricts a local dof vector to true dof vector.
ElementDofOrdering
Constants describing the possible orderings of the DOFs in one element.
General triple product operator x -> A*B*C*x, with ownership of the factors.
Array< MeshId > conforming
int index(int i, int j, int nx, int ny)
bool IsGhost(int entity, int index) const
Return true if the specified vertex/edge/face is a ghost.
const FiniteElement * GetFaceNbrFaceFE(int i) const
double * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
HYPRE_Int * GetTrueDofOffsets() const
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void GetFaceNbrFaceVDofs(int i, Array< int > &vdofs) const
Geometry::Type GetFaceGeometryType(int Face) const
void GetEdgeVertices(const MeshId &edge_id, int vert_index[2], bool oriented=true) const
Return Mesh vertex indices of an edge identified by 'edge_id'.
void MakeRef(T *, int)
Make this Array a reference to a pointer.
void GroupEdge(int group, int i, int &edge, int &o)
int GetNFaces() const
Return the number of faces in a 3D mesh.
BiLinear2DFiniteElement QuadrilateralFE
Identity Operator I: x -> x.
void GenerateOffsets(int N, HYPRE_Int loc_sizes[], Array< HYPRE_Int > *offsets[]) const
ID for class HypreParMatrix.
static bool DofFinalizable(int dof, const Array< bool > &finalized, const SparseMatrix &deps)
Biwise-OR of all device backends.
NURBSExtension * NURBSext
int GetNGhostVertices() const
Table send_face_nbr_elements
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
The MFEM Device class abstracts hardware devices such as GPUs, as well as programming models such as ...
Wrapper for hypre's ParCSR matrix class.
GroupCommunicator * ScalarGroupComm()
Return a new GroupCommunicator on scalar dofs, i.e. for VDim = 1.
Class for parallel meshes.
Abstract data type element.
int GetFaceNbrRank(int fn) const
Linear1DFiniteElement SegmentFE
void Reset(OpType *A, bool own_A=true)
Reset the OperatorHandle to the given OpType pointer, A.
int GroupNEdges(int group)
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.
double f(const Vector &p)
void DofsToVDofs(Array< int > &dofs, int ndofs=-1) const
Array< HYPRE_Int > face_nbr_glob_dof_map