MFEM  v4.4.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
pfespace.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2022, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include "pfespace.hpp"
17 #include "prestriction.hpp"
18 #include "../general/forall.hpp"
19 #include "../general/sort_pairs.hpp"
20 #include "../mesh/mesh_headers.hpp"
21 #include "../general/binaryio.hpp"
22 
23 #include <limits>
24 #include <list>
25 
26 namespace mfem
27 {
28 
30  const ParFiniteElementSpace &orig, ParMesh *pmesh,
31  const FiniteElementCollection *fec)
32  : FiniteElementSpace(orig, pmesh, fec)
33 {
34  ParInit(pmesh ? pmesh : orig.pmesh);
35 }
36 
38  const FiniteElementSpace &orig, ParMesh &pmesh,
39  const FiniteElementCollection *fec)
40  : FiniteElementSpace(orig, &pmesh, fec)
41 {
42  ParInit(&pmesh);
43 }
44 
46  ParMesh *pm, const FiniteElementSpace *global_fes, const int *partitioning,
48  : FiniteElementSpace(pm, MakeLocalNURBSext(global_fes->GetNURBSext(),
49  pm->NURBSext),
50  f ? f : global_fes->FEColl(),
51  global_fes->GetVDim(), global_fes->GetOrdering())
52 {
53  ParInit(pm);
54  // For NURBS spaces, the variable-order data is contained in the
55  // NURBSExtension of 'global_fes' and inside the ParNURBSExtension of 'pm'.
56 
57  // TODO: when general variable-order support is added, copy the local portion
58  // of the variable-order data from 'global_fes' to 'this'.
59 }
60 
62  ParMesh *pm, const FiniteElementCollection *f, int dim, int ordering)
63  : FiniteElementSpace(pm, f, dim, ordering)
64 {
65  ParInit(pm);
66 }
67 
70  int dim, int ordering)
71  : FiniteElementSpace(pm, ext, f, dim, ordering)
72 {
73  ParInit(pm);
74 }
75 
76 // static method
77 ParNURBSExtension *ParFiniteElementSpace::MakeLocalNURBSext(
78  const NURBSExtension *globNURBSext, const NURBSExtension *parNURBSext)
79 {
80  if (globNURBSext == NULL) { return NULL; }
81  const ParNURBSExtension *pNURBSext =
82  dynamic_cast<const ParNURBSExtension*>(parNURBSext);
83  MFEM_ASSERT(pNURBSext, "need a ParNURBSExtension");
84  // make a copy of globNURBSext:
85  NURBSExtension *tmp_globNURBSext = new NURBSExtension(*globNURBSext);
86  // tmp_globNURBSext will be deleted by the following ParNURBSExtension ctor:
87  return new ParNURBSExtension(tmp_globNURBSext, pNURBSext);
88 }
89 
90 void ParFiniteElementSpace::ParInit(ParMesh *pm)
91 {
92  pmesh = pm;
93  pncmesh = NULL;
94 
95  MyComm = pmesh->GetComm();
96  NRanks = pmesh->GetNRanks();
97  MyRank = pmesh->GetMyRank();
98 
99  gcomm = NULL;
100 
101  P = NULL;
102  Pconf = NULL;
103  nonconf_P = false;
104  Rconf = NULL;
105  R_transpose = NULL;
106  R = NULL;
107 
108  num_face_nbr_dofs = -1;
109 
110  if (NURBSext && !pNURBSext())
111  {
112  // This is necessary in some cases: e.g. when the FiniteElementSpace
113  // constructor creates a serial NURBSExtension of higher order than the
114  // mesh NURBSExtension.
115  MFEM_ASSERT(own_ext, "internal error");
116 
117  ParNURBSExtension *pNe = new ParNURBSExtension(
118  NURBSext, dynamic_cast<ParNURBSExtension *>(pmesh->NURBSext));
119  // serial NURBSext is destroyed by the above constructor
120  NURBSext = pNe;
121  UpdateNURBS();
122  }
123 
124  Construct(); // parallel version of Construct().
125 
126  // Apply the ldof_signs to the elem_dof Table
127  if (Conforming() && !NURBSext)
128  {
129  ApplyLDofSigns(*elem_dof);
130  }
131 
132  // Check for shared trianglular faces with interior Nedelec DoFs
133  CheckNDSTriaDofs();
134 }
135 
136 void ParFiniteElementSpace::Construct()
137 {
138  MFEM_VERIFY(!IsVariableOrder(), "variable orders are not implemented"
139  " for ParFiniteElementSpace yet.");
140 
141  if (NURBSext)
142  {
143  ConstructTrueNURBSDofs();
144  GenerateGlobalOffsets();
145  }
146  else if (Conforming())
147  {
148  ConstructTrueDofs();
149  GenerateGlobalOffsets();
150  }
151  else // Nonconforming()
152  {
153  pncmesh = pmesh->pncmesh;
154 
155  // Initialize 'gcomm' for the cut (aka "partially conforming") space.
156  // In the process, the array 'ldof_ltdof' is also initialized (for the cut
157  // space) and used; however, it will be overwritten below with the real
158  // true dofs. Also, 'ldof_sign' and 'ldof_group' are constructed for the
159  // cut space.
160  ConstructTrueDofs();
161 
162  ngedofs = ngfdofs = 0;
163 
164  // calculate number of ghost DOFs
165  ngvdofs = pncmesh->GetNGhostVertices()
167 
168  if (pmesh->Dimension() > 1)
169  {
170  ngedofs = pncmesh->GetNGhostEdges()
172  }
173 
174  if (pmesh->Dimension() > 2)
175  {
176  int stride = fec->DofForGeometry(Geometry::SQUARE);
177  ngfdofs = pncmesh->GetNGhostFaces() * stride;
178  }
179 
180  // total number of ghost DOFs. Ghost DOFs start at index 'ndofs', i.e.,
181  // after all regular DOFs
182  ngdofs = ngvdofs + ngedofs + ngfdofs;
183 
184  // get P and R matrices, initialize DOF offsets, etc. NOTE: in the NC
185  // case this needs to be done here to get the number of true DOFs
186  ltdof_size = BuildParallelConformingInterpolation(
187  &P, &R, dof_offsets, tdof_offsets, &ldof_ltdof, false);
188 
189  // TODO future: split BuildParallelConformingInterpolation into two parts
190  // to overlap its communication with processing between this constructor
191  // and the point where the P matrix is actually needed.
192  }
193 }
194 
196 {
197  long ltdofs = ltdof_size;
198  long min_ltdofs, max_ltdofs, sum_ltdofs;
199 
200  MPI_Reduce(&ltdofs, &min_ltdofs, 1, MPI_LONG, MPI_MIN, 0, MyComm);
201  MPI_Reduce(&ltdofs, &max_ltdofs, 1, MPI_LONG, MPI_MAX, 0, MyComm);
202  MPI_Reduce(&ltdofs, &sum_ltdofs, 1, MPI_LONG, MPI_SUM, 0, MyComm);
203 
204  if (MyRank == 0)
205  {
206  double avg = double(sum_ltdofs) / NRanks;
207  mfem::out << "True DOF partitioning: min " << min_ltdofs
208  << ", avg " << std::fixed << std::setprecision(1) << avg
209  << ", max " << max_ltdofs
210  << ", (max-avg)/avg " << 100.0*(max_ltdofs - avg)/avg
211  << "%" << std::endl;
212  }
213 
214  if (NRanks <= 32)
215  {
216  if (MyRank == 0)
217  {
218  mfem::out << "True DOFs by rank: " << ltdofs;
219  for (int i = 1; i < NRanks; i++)
220  {
221  MPI_Status status;
222  MPI_Recv(&ltdofs, 1, MPI_LONG, i, 123, MyComm, &status);
223  mfem::out << " " << ltdofs;
224  }
225  mfem::out << "\n";
226  }
227  else
228  {
229  MPI_Send(&ltdofs, 1, MPI_LONG, 0, 123, MyComm);
230  }
231  }
232 }
233 
234 void ParFiniteElementSpace::GetGroupComm(
235  GroupCommunicator &gc, int ldof_type, Array<int> *g_ldof_sign)
236 {
237  int gr;
238  int ng = pmesh->GetNGroups();
239  int nvd, ned, ntd = 0, nqd = 0;
240  Array<int> dofs;
241 
242  int group_ldof_counter;
243  Table &group_ldof = gc.GroupLDofTable();
244 
247 
248  if (mesh->Dimension() >= 3)
249  {
251  {
253  }
255  {
257  }
258  }
259 
260  if (g_ldof_sign)
261  {
262  g_ldof_sign->SetSize(GetNDofs());
263  *g_ldof_sign = 1;
264  }
265 
266  // count the number of ldofs in all groups (excluding the local group 0)
267  group_ldof_counter = 0;
268  for (gr = 1; gr < ng; gr++)
269  {
270  group_ldof_counter += nvd * pmesh->GroupNVertices(gr);
271  group_ldof_counter += ned * pmesh->GroupNEdges(gr);
272  group_ldof_counter += ntd * pmesh->GroupNTriangles(gr);
273  group_ldof_counter += nqd * pmesh->GroupNQuadrilaterals(gr);
274  }
275  if (ldof_type)
276  {
277  group_ldof_counter *= vdim;
278  }
279  // allocate the I and J arrays in group_ldof
280  group_ldof.SetDims(ng, group_ldof_counter);
281 
282  // build the full group_ldof table
283  group_ldof_counter = 0;
284  group_ldof.GetI()[0] = group_ldof.GetI()[1] = 0;
285  for (gr = 1; gr < ng; gr++)
286  {
287  int j, k, l, m, o, nv, ne, nt, nq;
288  const int *ind;
289 
290  nv = pmesh->GroupNVertices(gr);
291  ne = pmesh->GroupNEdges(gr);
292  nt = pmesh->GroupNTriangles(gr);
293  nq = pmesh->GroupNQuadrilaterals(gr);
294 
295  // vertices
296  if (nvd > 0)
297  {
298  for (j = 0; j < nv; j++)
299  {
300  k = pmesh->GroupVertex(gr, j);
301 
302  dofs.SetSize(nvd);
303  m = nvd * k;
304  for (l = 0; l < nvd; l++, m++)
305  {
306  dofs[l] = m;
307  }
308 
309  if (ldof_type)
310  {
311  DofsToVDofs(dofs);
312  }
313 
314  for (l = 0; l < dofs.Size(); l++)
315  {
316  group_ldof.GetJ()[group_ldof_counter++] = dofs[l];
317  }
318  }
319  }
320 
321  // edges
322  if (ned > 0)
323  {
324  for (j = 0; j < ne; j++)
325  {
326  pmesh->GroupEdge(gr, j, k, o);
327 
328  dofs.SetSize(ned);
329  m = nvdofs+k*ned;
331  for (l = 0; l < ned; l++)
332  {
333  if (ind[l] < 0)
334  {
335  dofs[l] = m + (-1-ind[l]);
336  if (g_ldof_sign)
337  {
338  (*g_ldof_sign)[dofs[l]] = -1;
339  }
340  }
341  else
342  {
343  dofs[l] = m + ind[l];
344  }
345  }
346 
347  if (ldof_type)
348  {
349  DofsToVDofs(dofs);
350  }
351 
352  for (l = 0; l < dofs.Size(); l++)
353  {
354  group_ldof.GetJ()[group_ldof_counter++] = dofs[l];
355  }
356  }
357  }
358 
359  // triangles
360  if (ntd > 0)
361  {
362  for (j = 0; j < nt; j++)
363  {
364  pmesh->GroupTriangle(gr, j, k, o);
365 
366  dofs.SetSize(ntd);
367  m = nvdofs + nedofs + FirstFaceDof(k);
369  for (l = 0; l < ntd; l++)
370  {
371  if (ind[l] < 0)
372  {
373  dofs[l] = m + (-1-ind[l]);
374  if (g_ldof_sign)
375  {
376  (*g_ldof_sign)[dofs[l]] = -1;
377  }
378  }
379  else
380  {
381  dofs[l] = m + ind[l];
382  }
383  }
384 
385  if (ldof_type)
386  {
387  DofsToVDofs(dofs);
388  }
389 
390  for (l = 0; l < dofs.Size(); l++)
391  {
392  group_ldof.GetJ()[group_ldof_counter++] = dofs[l];
393  }
394  }
395  }
396 
397  // quadrilaterals
398  if (nqd > 0)
399  {
400  for (j = 0; j < nq; j++)
401  {
402  pmesh->GroupQuadrilateral(gr, j, k, o);
403 
404  dofs.SetSize(nqd);
405  m = nvdofs + nedofs + FirstFaceDof(k);
407  for (l = 0; l < nqd; l++)
408  {
409  if (ind[l] < 0)
410  {
411  dofs[l] = m + (-1-ind[l]);
412  if (g_ldof_sign)
413  {
414  (*g_ldof_sign)[dofs[l]] = -1;
415  }
416  }
417  else
418  {
419  dofs[l] = m + ind[l];
420  }
421  }
422 
423  if (ldof_type)
424  {
425  DofsToVDofs(dofs);
426  }
427 
428  for (l = 0; l < dofs.Size(); l++)
429  {
430  group_ldof.GetJ()[group_ldof_counter++] = dofs[l];
431  }
432  }
433  }
434 
435  group_ldof.GetI()[gr+1] = group_ldof_counter;
436  }
437 
438  gc.Finalize();
439 }
440 
441 void ParFiniteElementSpace::ApplyLDofSigns(Array<int> &dofs) const
442 {
443  MFEM_ASSERT(Conforming(), "wrong code path");
444 
445  for (int i = 0; i < dofs.Size(); i++)
446  {
447  if (dofs[i] < 0)
448  {
449  if (ldof_sign[-1-dofs[i]] < 0)
450  {
451  dofs[i] = -1-dofs[i];
452  }
453  }
454  else
455  {
456  if (ldof_sign[dofs[i]] < 0)
457  {
458  dofs[i] = -1-dofs[i];
459  }
460  }
461  }
462 }
463 
464 void ParFiniteElementSpace::ApplyLDofSigns(Table &el_dof) const
465 {
466  Array<int> all_dofs(el_dof.GetJ(), el_dof.Size_of_connections());
467  ApplyLDofSigns(all_dofs);
468 }
469 
470 DofTransformation *
472 {
473  if (elem_dof)
474  {
475  elem_dof->GetRow(i, dofs);
476 
478  {
479  Array<int> Fo;
480  elem_fos->GetRow(i, Fo);
481  DoFTrans[mesh->GetElementBaseGeometry(i)]->SetFaceOrientations(Fo);
483  }
484  return NULL;
485  }
487  if (Conforming())
488  {
489  ApplyLDofSigns(dofs);
490  }
491  return doftrans;
492 }
493 
496 {
497  if (bdr_elem_dof)
498  {
499  bdr_elem_dof->GetRow(i, dofs);
500 
502  {
503  Array<int> Fo;
504  bdr_elem_fos -> GetRow (i, Fo);
505  DoFTrans[mesh->GetBdrElementBaseGeometry(i)]->SetFaceOrientations(Fo);
507  }
508  return NULL;
509  }
510  DofTransformation * doftrans =
512  if (Conforming())
513  {
514  ApplyLDofSigns(dofs);
515  }
516  return doftrans;
517 }
518 
520  int variant) const
521 {
522  if (face_dof && variant == 0)
523  {
524  face_dof->GetRow(i, dofs);
525  return fec->GetOrder();
526  }
527  int p = FiniteElementSpace::GetFaceDofs(i, dofs, variant);
528  if (Conforming())
529  {
530  ApplyLDofSigns(dofs);
531  }
532  return p;
533 }
534 
536 {
537  int ne = mesh->GetNE();
538  if (i >= ne) { return GetFaceNbrFE(i - ne); }
539  else { return FiniteElementSpace::GetFE(i); }
540 }
541 
543  ElementDofOrdering e_ordering, FaceType type, L2FaceValues mul) const
544 {
545  const bool is_dg_space = IsDGSpace();
546  const L2FaceValues m = (is_dg_space && mul==L2FaceValues::DoubleValued) ?
548  auto key = std::make_tuple(is_dg_space, e_ordering, type, m);
549  auto itr = L2F.find(key);
550  if (itr != L2F.end())
551  {
552  return itr->second;
553  }
554  else
555  {
556  FaceRestriction *res;
557  if (is_dg_space)
558  {
559  if (Conforming())
560  {
561  res = new ParL2FaceRestriction(*this, e_ordering, type, m);
562  }
563  else
564  {
565  res = new ParNCL2FaceRestriction(*this, e_ordering, type, m);
566  }
567  }
568  else
569  {
570  if (Conforming())
571  {
572  res = new H1FaceRestriction(*this, e_ordering, type);
573  }
574  else
575  {
576  res = new ParNCH1FaceRestriction(*this, e_ordering, type);
577  }
578  }
579  L2F[key] = res;
580  return res;
581  }
582 }
583 
585  int group, int ei, Array<int> &dofs) const
586 {
587  int l_edge, ori;
588  MFEM_ASSERT(0 <= ei && ei < pmesh->GroupNEdges(group), "invalid edge index");
589  pmesh->GroupEdge(group, ei, l_edge, ori);
590  if (ori > 0) // ori = +1 or -1
591  {
592  GetEdgeDofs(l_edge, dofs);
593  }
594  else
595  {
596  Array<int> rdofs;
597  fec->SubDofOrder(Geometry::SEGMENT, 1, 1, dofs);
598  GetEdgeDofs(l_edge, rdofs);
599  for (int i = 0; i < dofs.Size(); i++)
600  {
601  const int di = dofs[i];
602  dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
603  }
604  }
605 }
606 
608  int group, int fi, Array<int> &dofs) const
609 {
610  int l_face, ori;
611  MFEM_ASSERT(0 <= fi && fi < pmesh->GroupNTriangles(group),
612  "invalid triangular face index");
613  pmesh->GroupTriangle(group, fi, l_face, ori);
614  if (ori == 0)
615  {
616  GetFaceDofs(l_face, dofs);
617  }
618  else
619  {
620  Array<int> rdofs;
621  fec->SubDofOrder(Geometry::TRIANGLE, 2, ori, dofs);
622  GetFaceDofs(l_face, rdofs);
623  for (int i = 0; i < dofs.Size(); i++)
624  {
625  const int di = dofs[i];
626  dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
627  }
628  }
629 }
630 
632  int group, int fi, Array<int> &dofs) const
633 {
634  int l_face, ori;
635  MFEM_ASSERT(0 <= fi && fi < pmesh->GroupNQuadrilaterals(group),
636  "invalid quadrilateral face index");
637  pmesh->GroupQuadrilateral(group, fi, l_face, ori);
638  if (ori == 0)
639  {
640  GetFaceDofs(l_face, dofs);
641  }
642  else
643  {
644  Array<int> rdofs;
645  fec->SubDofOrder(Geometry::SQUARE, 2, ori, dofs);
646  GetFaceDofs(l_face, rdofs);
647  for (int i = 0; i < dofs.Size(); i++)
648  {
649  const int di = dofs[i];
650  dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
651  }
652  }
653 }
654 
655 void ParFiniteElementSpace::GenerateGlobalOffsets() const
656 {
657  MFEM_ASSERT(Conforming(), "wrong code path");
658 
659  HYPRE_BigInt ldof[2];
660  Array<HYPRE_BigInt> *offsets[2] = { &dof_offsets, &tdof_offsets };
661 
662  ldof[0] = GetVSize();
663  ldof[1] = TrueVSize();
664 
665  pmesh->GenerateOffsets(2, ldof, offsets);
666 
667  if (HYPRE_AssumedPartitionCheck())
668  {
669  // communicate the neighbor offsets in tdof_nb_offsets
670  GroupTopology &gt = GetGroupTopo();
671  int nsize = gt.GetNumNeighbors()-1;
672  MPI_Request *requests = new MPI_Request[2*nsize];
673  MPI_Status *statuses = new MPI_Status[2*nsize];
674  tdof_nb_offsets.SetSize(nsize+1);
675  tdof_nb_offsets[0] = tdof_offsets[0];
676 
677  // send and receive neighbors' local tdof offsets
678  int request_counter = 0;
679  for (int i = 1; i <= nsize; i++)
680  {
681  MPI_Irecv(&tdof_nb_offsets[i], 1, HYPRE_MPI_BIG_INT,
682  gt.GetNeighborRank(i), 5365, MyComm,
683  &requests[request_counter++]);
684  }
685  for (int i = 1; i <= nsize; i++)
686  {
687  MPI_Isend(&tdof_nb_offsets[0], 1, HYPRE_MPI_BIG_INT,
688  gt.GetNeighborRank(i), 5365, MyComm,
689  &requests[request_counter++]);
690  }
691  MPI_Waitall(request_counter, requests, statuses);
692 
693  delete [] statuses;
694  delete [] requests;
695  }
696 }
697 
698 void ParFiniteElementSpace::CheckNDSTriaDofs()
699 {
700  // Check for Nedelec basis
701  bool nd_basis = dynamic_cast<const ND_FECollection*>(fec);
702  if (!nd_basis)
703  {
704  nd_strias = false;
705  return;
706  }
707 
708  // Check for interior face dofs on triangles (the use of TETRAHEDRON
709  // is not an error)
710  bool nd_fdof = fec->HasFaceDofs(Geometry::TETRAHEDRON,
712  if (!nd_fdof)
713  {
714  nd_strias = false;
715  return;
716  }
717 
718  // Check for shared triangle faces
719  bool strias = false;
720  {
721  int ngrps = pmesh->GetNGroups();
722  for (int g = 1; g < ngrps; g++)
723  {
724  strias |= pmesh->GroupNTriangles(g);
725  }
726  }
727 
728  // Combine results
729  int loc_nd_strias = strias ? 1 : 0;
730  int glb_nd_strias = 0;
731  MPI_Allreduce(&loc_nd_strias, &glb_nd_strias, 1,
732  MPI_INTEGER, MPI_SUM, MyComm);
733  nd_strias = glb_nd_strias > 0;
734 }
735 
736 void ParFiniteElementSpace::Build_Dof_TrueDof_Matrix() const // matrix P
737 {
738  MFEM_ASSERT(Conforming(), "wrong code path");
739 
740  if (P) { return; }
741 
742  if (!nd_strias)
743  {
744  // Safe to assume 1-1 correspondence between shared dofs
745  int ldof = GetVSize();
746  int ltdof = TrueVSize();
747 
748  HYPRE_Int *i_diag = Memory<HYPRE_Int>(ldof+1);
749  HYPRE_Int *j_diag = Memory<HYPRE_Int>(ltdof);
750  int diag_counter;
751 
752  HYPRE_Int *i_offd = Memory<HYPRE_Int>(ldof+1);
753  HYPRE_Int *j_offd = Memory<HYPRE_Int>(ldof-ltdof);
754  int offd_counter;
755 
756  HYPRE_BigInt *cmap = Memory<HYPRE_BigInt>(ldof-ltdof);
757 
758  HYPRE_BigInt *col_starts = GetTrueDofOffsets();
759  HYPRE_BigInt *row_starts = GetDofOffsets();
760 
761  Array<Pair<HYPRE_BigInt, int> > cmap_j_offd(ldof-ltdof);
762 
763  i_diag[0] = i_offd[0] = 0;
764  diag_counter = offd_counter = 0;
765  for (int i = 0; i < ldof; i++)
766  {
767  int ltdof_i = GetLocalTDofNumber(i);
768  if (ltdof_i >= 0)
769  {
770  j_diag[diag_counter++] = ltdof_i;
771  }
772  else
773  {
774  cmap_j_offd[offd_counter].one = GetGlobalTDofNumber(i);
775  cmap_j_offd[offd_counter].two = offd_counter;
776  offd_counter++;
777  }
778  i_diag[i+1] = diag_counter;
779  i_offd[i+1] = offd_counter;
780  }
781 
782  SortPairs<HYPRE_BigInt, int>(cmap_j_offd, offd_counter);
783 
784  for (int i = 0; i < offd_counter; i++)
785  {
786  cmap[i] = cmap_j_offd[i].one;
787  j_offd[cmap_j_offd[i].two] = i;
788  }
789 
790  P = new HypreParMatrix(MyComm, MyRank, NRanks, row_starts, col_starts,
791  i_diag, j_diag, i_offd, j_offd,
792  cmap, offd_counter);
793  }
794  else
795  {
796  // Some shared dofs will be linear combinations of others
797  HYPRE_BigInt ldof = GetVSize();
798  HYPRE_BigInt ltdof = TrueVSize();
799 
800  HYPRE_BigInt gdof = -1;
801  HYPRE_BigInt gtdof = -1;
802 
803  MPI_Allreduce(&ldof, &gdof, 1, HYPRE_MPI_BIG_INT, MPI_SUM, MyComm);
804  MPI_Allreduce(&ltdof, &gtdof, 1, HYPRE_MPI_BIG_INT, MPI_SUM, MyComm);
805 
806  // Ensure face orientations have been communicated
807  pmesh->ExchangeFaceNbrData();
808 
809  // Locate and count non-zeros in off-diagonal portion of P
810  int nnz_offd = 0;
811  Array<int> ldsize(ldof); ldsize = 0;
812  Array<int> ltori(ldof); ltori = 0; // Local triangle orientations
813  {
814  int ngrps = pmesh->GetNGroups();
816  Array<int> sdofs;
817  for (int g = 1; g < ngrps; g++)
818  {
819  if (pmesh->gtopo.IAmMaster(g))
820  {
821  continue;
822  }
823  for (int ei=0; ei<pmesh->GroupNEdges(g); ei++)
824  {
825  this->GetSharedEdgeDofs(g, ei, sdofs);
826  for (int i=0; i<sdofs.Size(); i++)
827  {
828  int ind = (sdofs[i]>=0) ? sdofs[i] : (-sdofs[i]-1);
829  if (ldsize[ind] == 0) { nnz_offd++; }
830  ldsize[ind] = 1;
831  }
832  }
833  for (int fi=0; fi<pmesh->GroupNTriangles(g); fi++)
834  {
835  int face, ori, info1, info2;
836  pmesh->GroupTriangle(g, fi, face, ori);
837  pmesh->GetFaceInfos(face, &info1, &info2);
838  this->GetSharedTriangleDofs(g, fi, sdofs);
839  for (int i=0; i<3*nedofs; i++)
840  {
841  int ind = (sdofs[i]>=0) ? sdofs[i] : (-sdofs[i]-1);
842  if (ldsize[ind] == 0) { nnz_offd++; }
843  ldsize[ind] = 1;
844  }
845  for (int i=3*nedofs; i<sdofs.Size(); i++)
846  {
847  if (ldsize[sdofs[i]] == 0) { nnz_offd += 2; }
848  ldsize[sdofs[i]] = 2;
849  ltori[sdofs[i]] = info2 % 64;
850  }
851  }
852  for (int fi=0; fi<pmesh->GroupNQuadrilaterals(g); fi++)
853  {
854  this->GetSharedQuadrilateralDofs(g, fi, sdofs);
855  for (int i=0; i<sdofs.Size(); i++)
856  {
857  int ind = (sdofs[i]>=0) ? sdofs[i] : (-sdofs[i]-1);
858  if (ldsize[ind] == 0) { nnz_offd++; }
859  ldsize[ind] = 1;
860  }
861  }
862  }
863  }
864 
865  HYPRE_Int *i_diag = new HYPRE_Int[ldof+1];
866  HYPRE_Int *j_diag = new HYPRE_Int[ltdof];
867  double *d_diag = new double[ltdof];
868  int diag_counter;
869 
870  HYPRE_Int *i_offd = new HYPRE_Int[ldof+1];
871  HYPRE_Int *j_offd = new HYPRE_Int[nnz_offd];
872  double *d_offd = new double[nnz_offd];
873  int offd_counter;
874 
875  HYPRE_BigInt *cmap = new HYPRE_BigInt[ldof-ltdof];
876 
877  HYPRE_BigInt *col_starts = GetTrueDofOffsets();
878  HYPRE_BigInt *row_starts = GetDofOffsets();
879 
880  Array<Pair<HYPRE_BigInt, int> > cmap_j_offd(ldof-ltdof);
881 
882  i_diag[0] = i_offd[0] = 0;
883  diag_counter = offd_counter = 0;
884  int offd_col_counter = 0;
885  for (int i = 0; i < ldof; i++)
886  {
887  int ltdofi = GetLocalTDofNumber(i);
888  if (ltdofi >= 0)
889  {
890  j_diag[diag_counter] = ltdofi;
891  d_diag[diag_counter++] = 1.0;
892  }
893  else
894  {
895  if (ldsize[i] == 1)
896  {
897  cmap_j_offd[offd_col_counter].one = GetGlobalTDofNumber(i);
898  cmap_j_offd[offd_col_counter].two = offd_counter;
899  offd_counter++;
900  offd_col_counter++;
901  }
902  else
903  {
904  cmap_j_offd[offd_col_counter].one = GetGlobalTDofNumber(i);
905  cmap_j_offd[offd_col_counter].two = offd_counter;
906  offd_counter += 2;
907  offd_col_counter++;
908  i_diag[i+1] = diag_counter;
909  i_offd[i+1] = offd_counter;
910  i++;
911  cmap_j_offd[offd_col_counter].one = GetGlobalTDofNumber(i);
912  cmap_j_offd[offd_col_counter].two = offd_counter;
913  offd_counter += 2;
914  offd_col_counter++;
915  }
916  }
917  i_diag[i+1] = diag_counter;
918  i_offd[i+1] = offd_counter;
919  }
920 
921  SortPairs<HYPRE_BigInt, int>(cmap_j_offd, offd_col_counter);
922 
923  for (int i = 0; i < nnz_offd; i++)
924  {
925  j_offd[i] = -1;
926  d_offd[i] = 0.0;
927  }
928 
929  for (int i = 0; i < offd_col_counter; i++)
930  {
931  cmap[i] = cmap_j_offd[i].one;
932  j_offd[cmap_j_offd[i].two] = i;
933  }
934 
935  for (int i = 0; i < ldof; i++)
936  {
937  if (i_offd[i+1] == i_offd[i] + 1)
938  {
939  d_offd[i_offd[i]] = 1.0;
940  }
941  else if (i_offd[i+1] == i_offd[i] + 2)
942  {
943  const double * T = ND_DofTransformation
944  ::GetFaceTransform(ltori[i]).GetData();
945  j_offd[i_offd[i] + 1] = j_offd[i_offd[i]] + 1;
946  d_offd[i_offd[i]] = T[0]; d_offd[i_offd[i] + 1] = T[2];
947  i++;
948  j_offd[i_offd[i] + 1] = j_offd[i_offd[i]];
949  j_offd[i_offd[i]] = j_offd[i_offd[i] + 1] - 1;
950  d_offd[i_offd[i]] = T[1]; d_offd[i_offd[i] + 1] = T[3];
951  }
952  }
953 
954  P = new HypreParMatrix(MyComm, gdof, gtdof, row_starts, col_starts,
955  i_diag, j_diag, d_diag, i_offd, j_offd, d_offd,
956  offd_col_counter, cmap);
957  }
958 
959  SparseMatrix Pdiag;
960  P->GetDiag(Pdiag);
961  R = Transpose(Pdiag);
962 }
963 
965 {
966  HypreParMatrix *P_pc;
967  Array<HYPRE_BigInt> P_pc_row_starts, P_pc_col_starts;
968  BuildParallelConformingInterpolation(&P_pc, NULL, P_pc_row_starts,
969  P_pc_col_starts, NULL, true);
970  P_pc->CopyRowStarts();
971  P_pc->CopyColStarts();
972  return P_pc;
973 }
974 
976 {
977  GroupTopology &gt = GetGroupTopo();
978  for (int i = 0; i < ldof_group.Size(); i++)
979  {
980  if (gt.IAmMaster(ldof_group[i])) // we are the master
981  {
982  if (ldof_ltdof[i] >= 0) // see note below
983  {
984  vec[ldof_ltdof[i]] /= gt.GetGroupSize(ldof_group[i]);
985  }
986  // NOTE: in NC meshes, ldof_ltdof generated for the gtopo
987  // groups by ConstructTrueDofs gets overwritten by
988  // BuildParallelConformingInterpolation. Some DOFs that are
989  // seen as true by the conforming code are actually slaves and
990  // end up with a -1 in ldof_ltdof.
991  }
992  }
993 }
994 
996 {
997  GroupCommunicator *gc = new GroupCommunicator(GetGroupTopo());
998  if (NURBSext)
999  {
1000  gc->Create(pNURBSext()->ldof_group);
1001  }
1002  else
1003  {
1004  GetGroupComm(*gc, 0);
1005  }
1006  return gc;
1007 }
1008 
1010 {
1011  // For non-conforming mesh, synchronization is performed on the cut (aka
1012  // "partially conforming") space.
1013 
1014  MFEM_VERIFY(ldof_marker.Size() == GetVSize(), "invalid in/out array");
1015 
1016  // implement allreduce(|) as reduce(|) + broadcast
1017  gcomm->Reduce<int>(ldof_marker, GroupCommunicator::BitOR);
1018  gcomm->Bcast(ldof_marker);
1019 }
1020 
1022  Array<int> &ess_dofs,
1023  int component) const
1024 {
1025  FiniteElementSpace::GetEssentialVDofs(bdr_attr_is_ess, ess_dofs, component);
1026 
1027  // Make sure that processors without boundary elements mark
1028  // their boundary dofs (if they have any).
1029  Synchronize(ess_dofs);
1030 }
1031 
1033  &bdr_attr_is_ess,
1034  Array<int> &ess_tdof_list,
1035  int component)
1036 {
1037  Array<int> ess_dofs, true_ess_dofs;
1038 
1039  GetEssentialVDofs(bdr_attr_is_ess, ess_dofs, component);
1040  GetRestrictionMatrix()->BooleanMult(ess_dofs, true_ess_dofs);
1041 
1042 #ifdef MFEM_DEBUG
1043  // Verify that in boolean arithmetic: P^T ess_dofs = R ess_dofs.
1044  Array<int> true_ess_dofs2(true_ess_dofs.Size());
1046  const int *ess_dofs_data = ess_dofs.HostRead();
1047  Pt->BooleanMult(1, ess_dofs_data, 0, true_ess_dofs2);
1048  delete Pt;
1049  int counter = 0;
1050  const int *ted = true_ess_dofs.HostRead();
1051  for (int i = 0; i < true_ess_dofs.Size(); i++)
1052  {
1053  if (bool(ted[i]) != bool(true_ess_dofs2[i])) { counter++; }
1054  }
1055  MFEM_VERIFY(counter == 0, "internal MFEM error: counter = " << counter
1056  << ", rank = " << MyRank);
1057 #endif
1058 
1059  MarkerToList(true_ess_dofs, ess_tdof_list);
1060 }
1061 
1063 {
1064  if (Nonconforming())
1065  {
1066  Dof_TrueDof_Matrix(); // make sure P has been built
1067 
1068  return ldof_ltdof[ldof]; // NOTE: contains -1 for slaves/DOFs we don't own
1069  }
1070  else
1071  {
1072  if (GetGroupTopo().IAmMaster(ldof_group[ldof]))
1073  {
1074  return ldof_ltdof[ldof];
1075  }
1076  else
1077  {
1078  return -1;
1079  }
1080  }
1081 }
1082 
1084 {
1085  if (Nonconforming())
1086  {
1087  MFEM_VERIFY(ldof_ltdof[ldof] >= 0, "ldof " << ldof << " not a true DOF.");
1088 
1089  return GetMyTDofOffset() + ldof_ltdof[ldof];
1090  }
1091  else
1092  {
1093  if (HYPRE_AssumedPartitionCheck())
1094  {
1095  return ldof_ltdof[ldof] +
1096  tdof_nb_offsets[GetGroupTopo().GetGroupMaster(ldof_group[ldof])];
1097  }
1098  else
1099  {
1100  return ldof_ltdof[ldof] +
1101  tdof_offsets[GetGroupTopo().GetGroupMasterRank(ldof_group[ldof])];
1102  }
1103  }
1104 }
1105 
1107 {
1108  if (Nonconforming())
1109  {
1110  MFEM_ABORT("Not implemented for NC mesh.");
1111  }
1112 
1113  if (HYPRE_AssumedPartitionCheck())
1114  {
1115  if (ordering == Ordering::byNODES)
1116  {
1117  return ldof_ltdof[sldof] +
1118  tdof_nb_offsets[GetGroupTopo().GetGroupMaster(
1119  ldof_group[sldof])] / vdim;
1120  }
1121  else
1122  {
1123  return (ldof_ltdof[sldof*vdim] +
1124  tdof_nb_offsets[GetGroupTopo().GetGroupMaster(
1125  ldof_group[sldof*vdim])]) / vdim;
1126  }
1127  }
1128 
1129  if (ordering == Ordering::byNODES)
1130  {
1131  return ldof_ltdof[sldof] +
1132  tdof_offsets[GetGroupTopo().GetGroupMasterRank(
1133  ldof_group[sldof])] / vdim;
1134  }
1135  else
1136  {
1137  return (ldof_ltdof[sldof*vdim] +
1138  tdof_offsets[GetGroupTopo().GetGroupMasterRank(
1139  ldof_group[sldof*vdim])]) / vdim;
1140  }
1141 }
1142 
1144 {
1145  return HYPRE_AssumedPartitionCheck() ? dof_offsets[0] : dof_offsets[MyRank];
1146 }
1147 
1149 {
1150  return HYPRE_AssumedPartitionCheck()? tdof_offsets[0] : tdof_offsets[MyRank];
1151 }
1152 
1154 {
1155  if (Conforming())
1156  {
1157  if (Pconf) { return Pconf; }
1158 
1159  if (nd_strias) { return Dof_TrueDof_Matrix(); }
1160 
1161  if (NRanks == 1)
1162  {
1163  Pconf = new IdentityOperator(GetTrueVSize());
1164  }
1165  else
1166  {
1168  {
1169  Pconf = new ConformingProlongationOperator(*this);
1170  }
1171  else
1172  {
1173  Pconf = new DeviceConformingProlongationOperator(*this);
1174  }
1175  }
1176  return Pconf;
1177  }
1178  else
1179  {
1180  return Dof_TrueDof_Matrix();
1181  }
1182 }
1183 
1185 {
1186  if (Conforming())
1187  {
1188  if (Rconf) { return Rconf; }
1189 
1190  if (NRanks == 1)
1191  {
1192  R_transpose = new IdentityOperator(GetTrueVSize());
1193  }
1194  else
1195  {
1197  {
1198  R_transpose = new ConformingProlongationOperator(*this, true);
1199  }
1200  else
1201  {
1202  R_transpose =
1203  new DeviceConformingProlongationOperator(*this, true);
1204  }
1205  }
1206  Rconf = new TransposeOperator(R_transpose);
1207  return Rconf;
1208  }
1209  else
1210  {
1212  R_transpose = new TransposeOperator(R);
1213  return R;
1214  }
1215 }
1216 
1218 {
1220  return R_transpose;
1221 }
1222 
1224 {
1225  if (num_face_nbr_dofs >= 0) { return; }
1226 
1227  pmesh->ExchangeFaceNbrData();
1228 
1229  int num_face_nbrs = pmesh->GetNFaceNeighbors();
1230 
1231  if (num_face_nbrs == 0)
1232  {
1233  num_face_nbr_dofs = 0;
1234  return;
1235  }
1236 
1237  MPI_Request *requests = new MPI_Request[2*num_face_nbrs];
1238  MPI_Request *send_requests = requests;
1239  MPI_Request *recv_requests = requests + num_face_nbrs;
1240  MPI_Status *statuses = new MPI_Status[num_face_nbrs];
1241 
1242  Array<int> ldofs;
1243  Array<int> ldof_marker(GetVSize());
1244  ldof_marker = -1;
1245 
1246  Table send_nbr_elem_dof;
1247 
1248  send_nbr_elem_dof.MakeI(pmesh->send_face_nbr_elements.Size_of_connections());
1249  send_face_nbr_ldof.MakeI(num_face_nbrs);
1250  face_nbr_ldof.MakeI(num_face_nbrs);
1251  int *send_el_off = pmesh->send_face_nbr_elements.GetI();
1252  int *recv_el_off = pmesh->face_nbr_elements_offset;
1253  for (int fn = 0; fn < num_face_nbrs; fn++)
1254  {
1255  int *my_elems = pmesh->send_face_nbr_elements.GetRow(fn);
1256  int num_my_elems = pmesh->send_face_nbr_elements.RowSize(fn);
1257 
1258  for (int i = 0; i < num_my_elems; i++)
1259  {
1260  GetElementVDofs(my_elems[i], ldofs);
1261  for (int j = 0; j < ldofs.Size(); j++)
1262  {
1263  int ldof = (ldofs[j] >= 0 ? ldofs[j] : -1-ldofs[j]);
1264 
1265  if (ldof_marker[ldof] != fn)
1266  {
1267  ldof_marker[ldof] = fn;
1269  }
1270  }
1271  send_nbr_elem_dof.AddColumnsInRow(send_el_off[fn] + i, ldofs.Size());
1272  }
1273 
1274  int nbr_rank = pmesh->GetFaceNbrRank(fn);
1275  int tag = 0;
1276  MPI_Isend(&send_face_nbr_ldof.GetI()[fn], 1, MPI_INT, nbr_rank, tag,
1277  MyComm, &send_requests[fn]);
1278 
1279  MPI_Irecv(&face_nbr_ldof.GetI()[fn], 1, MPI_INT, nbr_rank, tag,
1280  MyComm, &recv_requests[fn]);
1281  }
1282 
1283  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1284  face_nbr_ldof.MakeJ();
1285 
1287 
1288  MPI_Waitall(num_face_nbrs, send_requests, statuses);
1290 
1291  // send/receive the I arrays of send_nbr_elem_dof/face_nbr_element_dof,
1292  // respectively (they contain the number of dofs for each face-neighbor
1293  // element)
1294  face_nbr_element_dof.MakeI(recv_el_off[num_face_nbrs]);
1295 
1296  int *send_I = send_nbr_elem_dof.GetI();
1297  int *recv_I = face_nbr_element_dof.GetI();
1298  for (int fn = 0; fn < num_face_nbrs; fn++)
1299  {
1300  int nbr_rank = pmesh->GetFaceNbrRank(fn);
1301  int tag = 0;
1302  MPI_Isend(send_I + send_el_off[fn], send_el_off[fn+1] - send_el_off[fn],
1303  MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
1304 
1305  MPI_Irecv(recv_I + recv_el_off[fn], recv_el_off[fn+1] - recv_el_off[fn],
1306  MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
1307  }
1308 
1309  MPI_Waitall(num_face_nbrs, send_requests, statuses);
1310  send_nbr_elem_dof.MakeJ();
1311 
1312  ldof_marker = -1;
1313 
1314  for (int fn = 0; fn < num_face_nbrs; fn++)
1315  {
1316  int *my_elems = pmesh->send_face_nbr_elements.GetRow(fn);
1317  int num_my_elems = pmesh->send_face_nbr_elements.RowSize(fn);
1318 
1319  for (int i = 0; i < num_my_elems; i++)
1320  {
1321  GetElementVDofs(my_elems[i], ldofs);
1322  for (int j = 0; j < ldofs.Size(); j++)
1323  {
1324  int ldof = (ldofs[j] >= 0 ? ldofs[j] : -1-ldofs[j]);
1325 
1326  if (ldof_marker[ldof] != fn)
1327  {
1328  ldof_marker[ldof] = fn;
1329  send_face_nbr_ldof.AddConnection(fn, ldofs[j]);
1330  }
1331  }
1332  send_nbr_elem_dof.AddConnections(
1333  send_el_off[fn] + i, ldofs, ldofs.Size());
1334  }
1335  }
1337  send_nbr_elem_dof.ShiftUpI();
1338 
1339  // convert the ldof indices in send_nbr_elem_dof
1340  int *send_J = send_nbr_elem_dof.GetJ();
1341  for (int fn = 0, j = 0; fn < num_face_nbrs; fn++)
1342  {
1343  int num_ldofs = send_face_nbr_ldof.RowSize(fn);
1344  int *ldofs_fn = send_face_nbr_ldof.GetRow(fn);
1345  int j_end = send_I[send_el_off[fn+1]];
1346 
1347  for (int i = 0; i < num_ldofs; i++)
1348  {
1349  int ldof = (ldofs_fn[i] >= 0 ? ldofs_fn[i] : -1-ldofs_fn[i]);
1350  ldof_marker[ldof] = i;
1351  }
1352 
1353  for ( ; j < j_end; j++)
1354  {
1355  int ldof = (send_J[j] >= 0 ? send_J[j] : -1-send_J[j]);
1356  send_J[j] = (send_J[j] >= 0 ? ldof_marker[ldof] : -1-ldof_marker[ldof]);
1357  }
1358  }
1359 
1360  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1362 
1363  // send/receive the J arrays of send_nbr_elem_dof/face_nbr_element_dof,
1364  // respectively (they contain the element dofs in enumeration local for
1365  // the face-neighbor pair)
1366  int *recv_J = face_nbr_element_dof.GetJ();
1367  for (int fn = 0; fn < num_face_nbrs; fn++)
1368  {
1369  int nbr_rank = pmesh->GetFaceNbrRank(fn);
1370  int tag = 0;
1371 
1372  MPI_Isend(send_J + send_I[send_el_off[fn]],
1373  send_I[send_el_off[fn+1]] - send_I[send_el_off[fn]],
1374  MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
1375 
1376  MPI_Irecv(recv_J + recv_I[recv_el_off[fn]],
1377  recv_I[recv_el_off[fn+1]] - recv_I[recv_el_off[fn]],
1378  MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
1379  }
1380 
1381  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1382 
1383  // shift the J array of face_nbr_element_dof
1384  for (int fn = 0, j = 0; fn < num_face_nbrs; fn++)
1385  {
1386  int shift = face_nbr_ldof.GetI()[fn];
1387  int j_end = recv_I[recv_el_off[fn+1]];
1388 
1389  for ( ; j < j_end; j++)
1390  {
1391  if (recv_J[j] >= 0)
1392  {
1393  recv_J[j] += shift;
1394  }
1395  else
1396  {
1397  recv_J[j] -= shift;
1398  }
1399  }
1400  }
1401 
1402  MPI_Waitall(num_face_nbrs, send_requests, statuses);
1403 
1404  // send/receive the J arrays of send_face_nbr_ldof/face_nbr_ldof,
1405  // respectively
1406  for (int fn = 0; fn < num_face_nbrs; fn++)
1407  {
1408  int nbr_rank = pmesh->GetFaceNbrRank(fn);
1409  int tag = 0;
1410 
1411  MPI_Isend(send_face_nbr_ldof.GetRow(fn),
1413  MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
1414 
1415  MPI_Irecv(face_nbr_ldof.GetRow(fn),
1416  face_nbr_ldof.RowSize(fn),
1417  MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
1418  }
1419 
1420  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1421  MPI_Waitall(num_face_nbrs, send_requests, statuses);
1422 
1423  // send my_dof_offset (i.e. my_ldof_offset) to face neighbors and receive
1424  // their offset in dof_face_nbr_offsets, used to define face_nbr_glob_dof_map
1426  Array<HYPRE_BigInt> dof_face_nbr_offsets(num_face_nbrs);
1427  HYPRE_BigInt my_dof_offset = GetMyDofOffset();
1428  for (int fn = 0; fn < num_face_nbrs; fn++)
1429  {
1430  int nbr_rank = pmesh->GetFaceNbrRank(fn);
1431  int tag = 0;
1432 
1433  MPI_Isend(&my_dof_offset, 1, HYPRE_MPI_BIG_INT, nbr_rank, tag,
1434  MyComm, &send_requests[fn]);
1435 
1436  MPI_Irecv(&dof_face_nbr_offsets[fn], 1, HYPRE_MPI_BIG_INT, nbr_rank, tag,
1437  MyComm, &recv_requests[fn]);
1438  }
1439 
1440  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1441 
1442  // set the array face_nbr_glob_dof_map which holds the global ldof indices of
1443  // the face-neighbor dofs
1444  for (int fn = 0, j = 0; fn < num_face_nbrs; fn++)
1445  {
1446  for (int j_end = face_nbr_ldof.GetI()[fn+1]; j < j_end; j++)
1447  {
1448  int ldof = face_nbr_ldof.GetJ()[j];
1449  if (ldof < 0)
1450  {
1451  ldof = -1-ldof;
1452  }
1453 
1454  face_nbr_glob_dof_map[j] = dof_face_nbr_offsets[fn] + ldof;
1455  }
1456  }
1457 
1458  MPI_Waitall(num_face_nbrs, send_requests, statuses);
1459 
1460  delete [] statuses;
1461  delete [] requests;
1462 }
1463 
1465  int i, Array<int> &vdofs) const
1466 {
1467  face_nbr_element_dof.GetRow(i, vdofs);
1468 
1469  DofTransformation *doftrans = NULL;
1471  if (DoFTrans[geom])
1472  {
1473  Array<int> F, Fo;
1474  pmesh->GetFaceNbrElementFaces(pmesh->GetNE() + i, F, Fo);
1475  doftrans = DoFTrans[geom];
1476  doftrans->SetFaceOrientations(Fo);
1477  }
1478  if (vdim == 1 || doftrans == NULL)
1479  {
1480  return doftrans;
1481  }
1482  else
1483  {
1484  VDoFTrans.SetDofTransformation(*doftrans);
1485  return &VDoFTrans;
1486  }
1487 }
1488 
1490 {
1491  // Works for NC mesh where 'i' is an index returned by
1492  // ParMesh::GetSharedFace() such that i >= Mesh::GetNumFaces(), i.e. 'i' is
1493  // the index of a ghost face.
1494  MFEM_ASSERT(Nonconforming() && i >= pmesh->GetNumFaces(), "");
1495  int el1, el2, inf1, inf2;
1496  pmesh->GetFaceElements(i, &el1, &el2);
1497  el2 = -1 - el2;
1498  pmesh->GetFaceInfos(i, &inf1, &inf2);
1499  MFEM_ASSERT(0 <= el2 && el2 < face_nbr_element_dof.Size(), "");
1500  const int nd = face_nbr_element_dof.RowSize(el2);
1501  const int *vol_vdofs = face_nbr_element_dof.GetRow(el2);
1502  const Element *face_nbr_el = pmesh->face_nbr_elements[el2];
1503  Geometry::Type geom = face_nbr_el->GetGeometryType();
1504  const int face_dim = Geometry::Dimension[geom]-1;
1505 
1506  fec->SubDofOrder(geom, face_dim, inf2, vdofs);
1507  // Convert local dofs to local vdofs.
1508  Ordering::DofsToVDofs<Ordering::byNODES>(nd/vdim, vdim, vdofs);
1509  // Convert local vdofs to global vdofs.
1510  for (int j = 0; j < vdofs.Size(); j++)
1511  {
1512  const int ldof = vdofs[j];
1513  vdofs[j] = (ldof >= 0) ? vol_vdofs[ldof] : -1-vol_vdofs[-1-ldof];
1514  }
1515 }
1516 
1518 {
1519  const FiniteElement *FE =
1521  pmesh->face_nbr_elements[i]->GetGeometryType());
1522 
1523  if (NURBSext)
1524  {
1525  mfem_error("ParFiniteElementSpace::GetFaceNbrFE"
1526  " does not support NURBS!");
1527  }
1528  return FE;
1529 }
1530 
1532 {
1533  // Works for NC mesh where 'i' is an index returned by
1534  // ParMesh::GetSharedFace() such that i >= Mesh::GetNumFaces(), i.e. 'i' is
1535  // the index of a ghost face.
1536  // Works in tandem with GetFaceNbrFaceVDofs() defined above.
1537 
1538  MFEM_ASSERT(Nonconforming() && !NURBSext, "");
1539  Geometry::Type face_geom = pmesh->GetFaceGeometryType(i);
1540  return fec->FiniteElementForGeometry(face_geom);
1541 }
1542 
1544 {
1545  P -> StealData();
1546 #if MFEM_HYPRE_VERSION <= 22200
1547  hypre_ParCSRMatrix *csrP = (hypre_ParCSRMatrix*)(*P);
1548  hypre_ParCSRMatrixOwnsRowStarts(csrP) = 1;
1549  hypre_ParCSRMatrixOwnsColStarts(csrP) = 1;
1550  dof_offsets.LoseData();
1551  tdof_offsets.LoseData();
1552 #else
1553  dof_offsets.DeleteAll();
1554  tdof_offsets.DeleteAll();
1555 #endif
1556 }
1557 
1558 void ParFiniteElementSpace::ConstructTrueDofs()
1559 {
1560  int i, gr, n = GetVSize();
1561  GroupTopology &gt = pmesh->gtopo;
1562  gcomm = new GroupCommunicator(gt);
1563  Table &group_ldof = gcomm->GroupLDofTable();
1564 
1565  GetGroupComm(*gcomm, 1, &ldof_sign);
1566 
1567  // Define ldof_group and mark ldof_ltdof with
1568  // -1 for ldof that is ours
1569  // -2 for ldof that is in a group with another master
1570  ldof_group.SetSize(n);
1571  ldof_ltdof.SetSize(n);
1572  ldof_group = 0;
1573  ldof_ltdof = -1;
1574 
1575  for (gr = 1; gr < group_ldof.Size(); gr++)
1576  {
1577  const int *ldofs = group_ldof.GetRow(gr);
1578  const int nldofs = group_ldof.RowSize(gr);
1579  for (i = 0; i < nldofs; i++)
1580  {
1581  ldof_group[ldofs[i]] = gr;
1582  }
1583 
1584  if (!gt.IAmMaster(gr)) // we are not the master
1585  {
1586  for (i = 0; i < nldofs; i++)
1587  {
1588  ldof_ltdof[ldofs[i]] = -2;
1589  }
1590  }
1591  }
1592 
1593  // count ltdof_size
1594  ltdof_size = 0;
1595  for (i = 0; i < n; i++)
1596  {
1597  if (ldof_ltdof[i] == -1)
1598  {
1599  ldof_ltdof[i] = ltdof_size++;
1600  }
1601  }
1602  gcomm->SetLTDofTable(ldof_ltdof);
1603 
1604  // have the group masters broadcast their ltdofs to the rest of the group
1605  gcomm->Bcast(ldof_ltdof);
1606 }
1607 
1608 void ParFiniteElementSpace::ConstructTrueNURBSDofs()
1609 {
1610  int n = GetVSize();
1611  GroupTopology &gt = pNURBSext()->gtopo;
1612  gcomm = new GroupCommunicator(gt);
1613 
1614  // pNURBSext()->ldof_group is for scalar space!
1615  if (vdim == 1)
1616  {
1617  ldof_group.MakeRef(pNURBSext()->ldof_group);
1618  }
1619  else
1620  {
1621  const int *scalar_ldof_group = pNURBSext()->ldof_group;
1622  ldof_group.SetSize(n);
1623  for (int i = 0; i < n; i++)
1624  {
1625  ldof_group[i] = scalar_ldof_group[VDofToDof(i)];
1626  }
1627  }
1628 
1629  gcomm->Create(ldof_group);
1630 
1631  // ldof_sign.SetSize(n);
1632  // ldof_sign = 1;
1633  ldof_sign.DeleteAll();
1634 
1635  ltdof_size = 0;
1636  ldof_ltdof.SetSize(n);
1637  for (int i = 0; i < n; i++)
1638  {
1639  if (gt.IAmMaster(ldof_group[i]))
1640  {
1641  ldof_ltdof[i] = ltdof_size;
1642  ltdof_size++;
1643  }
1644  else
1645  {
1646  ldof_ltdof[i] = -2;
1647  }
1648  }
1649  gcomm->SetLTDofTable(ldof_ltdof);
1650 
1651  // have the group masters broadcast their ltdofs to the rest of the group
1652  gcomm->Bcast(ldof_ltdof);
1653 }
1654 
1655 void ParFiniteElementSpace::GetGhostVertexDofs(const MeshId &id,
1656  Array<int> &dofs) const
1657 {
1658  int nv = fec->DofForGeometry(Geometry::POINT);
1659  dofs.SetSize(nv);
1660  for (int j = 0; j < nv; j++)
1661  {
1662  dofs[j] = ndofs + nv*id.index + j;
1663  }
1664 }
1665 
1666 void ParFiniteElementSpace::GetGhostEdgeDofs(const MeshId &edge_id,
1667  Array<int> &dofs) const
1668 {
1669  int nv = fec->DofForGeometry(Geometry::POINT);
1671  dofs.SetSize(2*nv + ne);
1672 
1673  int V[2], ghost = pncmesh->GetNVertices();
1674  pmesh->pncmesh->GetEdgeVertices(edge_id, V);
1675 
1676  for (int i = 0; i < 2; i++)
1677  {
1678  int k = (V[i] < ghost) ? V[i]*nv : (ndofs + (V[i] - ghost)*nv);
1679  for (int j = 0; j < nv; j++)
1680  {
1681  dofs[i*nv + j] = k++;
1682  }
1683  }
1684 
1685  int k = ndofs + ngvdofs + (edge_id.index - pncmesh->GetNEdges())*ne;
1686  for (int j = 0; j < ne; j++)
1687  {
1688  dofs[2*nv + j] = k++;
1689  }
1690 }
1691 
1692 void ParFiniteElementSpace::GetGhostFaceDofs(const MeshId &face_id,
1693  Array<int> &dofs) const
1694 {
1695  int nfv, V[4], E[4], Eo[4];
1696  nfv = pmesh->pncmesh->GetFaceVerticesEdges(face_id, V, E, Eo);
1697 
1698  int nv = fec->DofForGeometry(Geometry::POINT);
1700  int nf_tri = fec->DofForGeometry(Geometry::TRIANGLE);
1701  int nf_quad = fec->DofForGeometry(Geometry::SQUARE);
1702  int nf = (nfv == 3) ? nf_tri : nf_quad;
1703 
1704  dofs.SetSize(nfv*(nv + ne) + nf);
1705 
1706  int offset = 0;
1707  for (int i = 0; i < nfv; i++)
1708  {
1709  int ghost = pncmesh->GetNVertices();
1710  int first = (V[i] < ghost) ? V[i]*nv : (ndofs + (V[i] - ghost)*nv);
1711  for (int j = 0; j < nv; j++)
1712  {
1713  dofs[offset++] = first + j;
1714  }
1715  }
1716 
1717  for (int i = 0; i < nfv; i++)
1718  {
1719  int ghost = pncmesh->GetNEdges();
1720  int first = (E[i] < ghost) ? nvdofs + E[i]*ne
1721  /* */ : ndofs + ngvdofs + (E[i] - ghost)*ne;
1722  const int *ind = fec->DofOrderForOrientation(Geometry::SEGMENT, Eo[i]);
1723  for (int j = 0; j < ne; j++)
1724  {
1725  dofs[offset++] = (ind[j] >= 0) ? (first + ind[j])
1726  /* */ : (-1 - (first + (-1 - ind[j])));
1727  }
1728  }
1729 
1730  const int ghost_face_index = face_id.index - pncmesh->GetNFaces();
1731  int first = ndofs + ngvdofs + ngedofs + nf_quad*ghost_face_index;
1732 
1733  for (int j = 0; j < nf; j++)
1734  {
1735  dofs[offset++] = first + j;
1736  }
1737 }
1738 
1739 void ParFiniteElementSpace::GetGhostDofs(int entity, const MeshId &id,
1740  Array<int> &dofs) const
1741 {
1742  // helper to get ghost vertex, ghost edge or ghost face DOFs
1743  switch (entity)
1744  {
1745  case 0: GetGhostVertexDofs(id, dofs); break;
1746  case 1: GetGhostEdgeDofs(id, dofs); break;
1747  case 2: GetGhostFaceDofs(id, dofs); break;
1748  }
1749 }
1750 
1751 void ParFiniteElementSpace::GetBareDofs(int entity, int index,
1752  Array<int> &dofs) const
1753 {
1754  int ned, ghost, first;
1755  switch (entity)
1756  {
1757  case 0:
1759  ghost = pncmesh->GetNVertices();
1760  first = (index < ghost)
1761  ? index*ned // regular vertex
1762  : ndofs + (index - ghost)*ned; // ghost vertex
1763  break;
1764 
1765  case 1:
1767  ghost = pncmesh->GetNEdges();
1768  first = (index < ghost)
1769  ? nvdofs + index*ned // regular edge
1770  : ndofs + ngvdofs + (index - ghost)*ned; // ghost edge
1771  break;
1772 
1773  default:
1774  Geometry::Type geom = pncmesh->GetFaceGeometry(index);
1775  MFEM_ASSERT(geom == Geometry::SQUARE ||
1776  geom == Geometry::TRIANGLE, "");
1777 
1778  ned = fec->DofForGeometry(geom);
1779  ghost = pncmesh->GetNFaces();
1780 
1781  if (index < ghost) // regular face
1782  {
1783  first = nvdofs + nedofs + FirstFaceDof(index);
1784  }
1785  else // ghost face
1786  {
1787  index -= ghost;
1788  int stride = fec->DofForGeometry(Geometry::SQUARE);
1789  first = ndofs + ngvdofs + ngedofs + index*stride;
1790  }
1791  break;
1792  }
1793 
1794  dofs.SetSize(ned);
1795  for (int i = 0; i < ned; i++)
1796  {
1797  dofs[i] = first + i;
1798  }
1799 }
1800 
1801 int ParFiniteElementSpace::PackDof(int entity, int index, int edof) const
1802 {
1803  // DOFs are ordered as follows:
1804  // vertices | edges | faces | internal | ghost vert. | g. edges | g. faces
1805 
1806  int ghost, ned;
1807  switch (entity)
1808  {
1809  case 0:
1810  ghost = pncmesh->GetNVertices();
1812 
1813  return (index < ghost)
1814  ? index*ned + edof // regular vertex
1815  : ndofs + (index - ghost)*ned + edof; // ghost vertex
1816 
1817  case 1:
1818  ghost = pncmesh->GetNEdges();
1820 
1821  return (index < ghost)
1822  ? nvdofs + index*ned + edof // regular edge
1823  : ndofs + ngvdofs + (index - ghost)*ned + edof; // ghost edge
1824 
1825  default:
1826  ghost = pncmesh->GetNFaces();
1827  ned = fec->DofForGeometry(pncmesh->GetFaceGeometry(index));
1828 
1829  if (index < ghost) // regular face
1830  {
1831  return nvdofs + nedofs + FirstFaceDof(index) + edof;
1832  }
1833  else // ghost face
1834  {
1835  index -= ghost;
1836  int stride = fec->DofForGeometry(Geometry::SQUARE);
1837  return ndofs + ngvdofs + ngedofs + index*stride + edof;
1838  }
1839  }
1840 }
1841 
1842 static int bisect(const int* array, int size, int value)
1843 {
1844  const int* end = array + size;
1845  const int* pos = std::lower_bound(array, end, value);
1846  MFEM_VERIFY(pos != end, "value not found");
1847  return pos - array;
1848 }
1849 
1850 /** Dissect a DOF number to obtain the entity type (0=vertex, 1=edge, 2=face),
1851  * entity index and the DOF number within the entity.
1852  */
1853 void ParFiniteElementSpace::UnpackDof(int dof,
1854  int &entity, int &index, int &edof) const
1855 {
1856  MFEM_VERIFY(dof >= 0, "");
1857  if (dof < ndofs)
1858  {
1859  if (dof < nvdofs) // regular vertex
1860  {
1861  int nv = fec->DofForGeometry(Geometry::POINT);
1862  entity = 0, index = dof / nv, edof = dof % nv;
1863  return;
1864  }
1865  dof -= nvdofs;
1866  if (dof < nedofs) // regular edge
1867  {
1869  entity = 1, index = dof / ne, edof = dof % ne;
1870  return;
1871  }
1872  dof -= nedofs;
1873  if (dof < nfdofs) // regular face
1874  {
1875  if (uni_fdof >= 0) // uniform faces
1876  {
1877  int nf = fec->DofForGeometry(pncmesh->GetFaceGeometry(0));
1878  index = dof / nf, edof = dof % nf;
1879  }
1880  else // mixed faces or var-order space
1881  {
1882  const Table &table = var_face_dofs;
1883  MFEM_ASSERT(table.Size(), "");
1884  int jpos = bisect(table.GetJ(), table.Size_of_connections(), dof);
1885  index = bisect(table.GetI(), table.Size(), jpos);
1886  edof = dof - table.GetRow(index)[0];
1887  }
1888  entity = 2;
1889  return;
1890  }
1891  MFEM_ABORT("Cannot unpack internal DOF");
1892  }
1893  else
1894  {
1895  dof -= ndofs;
1896  if (dof < ngvdofs) // ghost vertex
1897  {
1898  int nv = fec->DofForGeometry(Geometry::POINT);
1899  entity = 0, index = pncmesh->GetNVertices() + dof / nv, edof = dof % nv;
1900  return;
1901  }
1902  dof -= ngvdofs;
1903  if (dof < ngedofs) // ghost edge
1904  {
1906  entity = 1, index = pncmesh->GetNEdges() + dof / ne, edof = dof % ne;
1907  return;
1908  }
1909  dof -= ngedofs;
1910  if (dof < ngfdofs) // ghost face
1911  {
1912  int stride = fec->DofForGeometry(Geometry::SQUARE);
1913  index = pncmesh->GetNFaces() + dof / stride, edof = dof % stride;
1914  entity = 2;
1915  return;
1916  }
1917  MFEM_ABORT("Out of range DOF.");
1918  }
1919 }
1920 
1921 /** Represents an element of the P matrix. The column number is global and
1922  * corresponds to vector dimension 0. The other dimension columns are offset
1923  * by 'stride'.
1924  */
1925 struct PMatrixElement
1926 {
1927  HYPRE_BigInt column;
1928  int stride;
1929  double value;
1930 
1931  PMatrixElement(HYPRE_BigInt col = 0, int str = 0, double val = 0)
1932  : column(col), stride(str), value(val) {}
1933 
1934  bool operator<(const PMatrixElement &other) const
1935  { return column < other.column; }
1936 
1937  typedef std::vector<PMatrixElement> List;
1938 };
1939 
1940 /** Represents one row of the P matrix, for the construction code below.
1941  * The row is complete: diagonal and offdiagonal elements are not distinguished.
1942  */
1943 struct PMatrixRow
1944 {
1945  PMatrixElement::List elems;
1946 
1947  /// Add other row, times 'coef'.
1948  void AddRow(const PMatrixRow &other, double coef)
1949  {
1950  elems.reserve(elems.size() + other.elems.size());
1951  for (unsigned i = 0; i < other.elems.size(); i++)
1952  {
1953  const PMatrixElement &oei = other.elems[i];
1954  elems.push_back(
1955  PMatrixElement(oei.column, oei.stride, coef * oei.value));
1956  }
1957  }
1958 
1959  /// Remove duplicate columns and sum their values.
1960  void Collapse()
1961  {
1962  if (!elems.size()) { return; }
1963  std::sort(elems.begin(), elems.end());
1964 
1965  int j = 0;
1966  for (unsigned i = 1; i < elems.size(); i++)
1967  {
1968  if (elems[j].column == elems[i].column)
1969  {
1970  elems[j].value += elems[i].value;
1971  }
1972  else
1973  {
1974  elems[++j] = elems[i];
1975  }
1976  }
1977  elems.resize(j+1);
1978  }
1979 
1980  void write(std::ostream &os, double sign) const
1981  {
1982  bin_io::write<int>(os, elems.size());
1983  for (unsigned i = 0; i < elems.size(); i++)
1984  {
1985  const PMatrixElement &e = elems[i];
1986  bin_io::write<HYPRE_BigInt>(os, e.column);
1987  bin_io::write<int>(os, e.stride);
1988  bin_io::write<double>(os, e.value * sign);
1989  }
1990  }
1991 
1992  void read(std::istream &is, double sign)
1993  {
1994  elems.resize(bin_io::read<int>(is));
1995  for (unsigned i = 0; i < elems.size(); i++)
1996  {
1997  PMatrixElement &e = elems[i];
1998  e.column = bin_io::read<HYPRE_BigInt>(is);
1999  e.stride = bin_io::read<int>(is);
2000  e.value = bin_io::read<double>(is) * sign;
2001  }
2002  }
2003 };
2004 
2005 /** Represents a message to another processor containing P matrix rows.
2006  * Used by ParFiniteElementSpace::ParallelConformingInterpolation.
2007  */
2008 class NeighborRowMessage : public VarMessage<314>
2009 {
2010 public:
2011  typedef NCMesh::MeshId MeshId;
2012  typedef ParNCMesh::GroupId GroupId;
2013 
2014  struct RowInfo
2015  {
2016  int entity, index, edof;
2017  GroupId group;
2018  PMatrixRow row;
2019 
2020  RowInfo(int ent, int idx, int edof, GroupId grp, const PMatrixRow &row)
2021  : entity(ent), index(idx), edof(edof), group(grp), row(row) {}
2022 
2023  RowInfo(int ent, int idx, int edof, GroupId grp)
2024  : entity(ent), index(idx), edof(edof), group(grp) {}
2025 
2026  typedef std::vector<RowInfo> List;
2027  };
2028 
2029  NeighborRowMessage() : pncmesh(NULL) {}
2030 
2031  void AddRow(int entity, int index, int edof, GroupId group,
2032  const PMatrixRow &row)
2033  {
2034  rows.push_back(RowInfo(entity, index, edof, group, row));
2035  }
2036 
2037  const RowInfo::List& GetRows() const { return rows; }
2038 
2039  void SetNCMesh(ParNCMesh* pnc) { pncmesh = pnc; }
2040  void SetFEC(const FiniteElementCollection* fec_) { this->fec = fec_; }
2041 
2042  typedef std::map<int, NeighborRowMessage> Map;
2043 
2044 protected:
2045  RowInfo::List rows;
2046 
2047  ParNCMesh *pncmesh;
2048  const FiniteElementCollection* fec;
2049 
2050  virtual void Encode(int rank);
2051  virtual void Decode(int);
2052 };
2053 
2054 
2055 void NeighborRowMessage::Encode(int rank)
2056 {
2057  std::ostringstream stream;
2058 
2059  Array<MeshId> ent_ids[3];
2060  Array<GroupId> group_ids[3];
2061  Array<int> row_idx[3];
2062 
2063  // encode MeshIds and groups
2064  for (unsigned i = 0; i < rows.size(); i++)
2065  {
2066  const RowInfo &ri = rows[i];
2067  const MeshId &id = pncmesh->GetNCList(ri.entity).LookUp(ri.index);
2068  ent_ids[ri.entity].Append(id);
2069  row_idx[ri.entity].Append(i);
2070  group_ids[ri.entity].Append(ri.group);
2071  }
2072 
2073  Array<GroupId> all_group_ids;
2074  all_group_ids.Reserve(rows.size());
2075  for (int i = 0; i < 3; i++)
2076  {
2077  all_group_ids.Append(group_ids[i]);
2078  }
2079 
2080  pncmesh->AdjustMeshIds(ent_ids, rank);
2081  pncmesh->EncodeMeshIds(stream, ent_ids);
2082  pncmesh->EncodeGroups(stream, all_group_ids);
2083 
2084  // write all rows to the stream
2085  for (int ent = 0; ent < 3; ent++)
2086  {
2087  const Array<MeshId> &ids = ent_ids[ent];
2088  for (int i = 0; i < ids.Size(); i++)
2089  {
2090  const MeshId &id = ids[i];
2091  const RowInfo &ri = rows[row_idx[ent][i]];
2092  MFEM_ASSERT(ent == ri.entity, "");
2093 
2094 #ifdef MFEM_DEBUG_PMATRIX
2095  mfem::out << "Rank " << pncmesh->MyRank << " sending to " << rank
2096  << ": ent " << ri.entity << ", index " << ri.index
2097  << ", edof " << ri.edof << " (id " << id.element << "/"
2098  << int(id.local) << ")" << std::endl;
2099 #endif
2100 
2101  // handle orientation and sign change
2102  int edof = ri.edof;
2103  double s = 1.0;
2104  if (ent == 1)
2105  {
2106  int eo = pncmesh->GetEdgeNCOrientation(id);
2107  const int* ind = fec->DofOrderForOrientation(Geometry::SEGMENT, eo);
2108  if ((edof = ind[edof]) < 0)
2109  {
2110  edof = -1 - edof;
2111  s = -1;
2112  }
2113  }
2114 
2115  bin_io::write<int>(stream, edof);
2116  ri.row.write(stream, s);
2117  }
2118  }
2119 
2120  rows.clear();
2121  stream.str().swap(data);
2122 }
2123 
2124 void NeighborRowMessage::Decode(int rank)
2125 {
2126  std::istringstream stream(data);
2127 
2128  Array<MeshId> ent_ids[3];
2129  Array<GroupId> group_ids;
2130 
2131  // decode vertex/edge/face IDs and groups
2132  pncmesh->DecodeMeshIds(stream, ent_ids);
2133  pncmesh->DecodeGroups(stream, group_ids);
2134 
2135  int nrows = ent_ids[0].Size() + ent_ids[1].Size() + ent_ids[2].Size();
2136  MFEM_ASSERT(nrows == group_ids.Size(), "");
2137 
2138  rows.clear();
2139  rows.reserve(nrows);
2140 
2141  // read rows
2142  for (int ent = 0, gi = 0; ent < 3; ent++)
2143  {
2144  const Array<MeshId> &ids = ent_ids[ent];
2145  for (int i = 0; i < ids.Size(); i++)
2146  {
2147  const MeshId &id = ids[i];
2148  int edof = bin_io::read<int>(stream);
2149 
2150  // handle orientation and sign change
2151  const int *ind = NULL;
2152  if (ent == 1)
2153  {
2154  int eo = pncmesh->GetEdgeNCOrientation(id);
2155  ind = fec->DofOrderForOrientation(Geometry::SEGMENT, eo);
2156  }
2157  else if (ent == 2)
2158  {
2159  Geometry::Type geom = pncmesh->GetFaceGeometry(id.index);
2160  int fo = pncmesh->GetFaceOrientation(id.index);
2161  ind = fec->DofOrderForOrientation(geom, fo);
2162  }
2163 
2164  double s = 1.0;
2165  if (ind && (edof = ind[edof]) < 0)
2166  {
2167  edof = -1 - edof;
2168  s = -1.0;
2169  }
2170 
2171  rows.push_back(RowInfo(ent, id.index, edof, group_ids[gi++]));
2172  rows.back().row.read(stream, s);
2173 
2174 #ifdef MFEM_DEBUG_PMATRIX
2175  mfem::out << "Rank " << pncmesh->MyRank << " receiving from " << rank
2176  << ": ent " << rows.back().entity << ", index "
2177  << rows.back().index << ", edof " << rows.back().edof
2178  << std::endl;
2179 #endif
2180  }
2181  }
2182 }
2183 
2184 void
2185 ParFiniteElementSpace::ScheduleSendRow(const PMatrixRow &row, int dof,
2186  GroupId group_id,
2187  NeighborRowMessage::Map &send_msg) const
2188 {
2189  int ent, idx, edof;
2190  UnpackDof(dof, ent, idx, edof);
2191 
2192  const ParNCMesh::CommGroup &group = pncmesh->GetGroup(group_id);
2193  for (unsigned i = 0; i < group.size(); i++)
2194  {
2195  int rank = group[i];
2196  if (rank != MyRank)
2197  {
2198  NeighborRowMessage &msg = send_msg[rank];
2199  msg.AddRow(ent, idx, edof, group_id, row);
2200  msg.SetNCMesh(pncmesh);
2201  msg.SetFEC(fec);
2202 #ifdef MFEM_PMATRIX_STATS
2203  n_rows_sent++;
2204 #endif
2205  }
2206  }
2207 }
2208 
2209 void ParFiniteElementSpace::ForwardRow(const PMatrixRow &row, int dof,
2210  GroupId group_sent_id, GroupId group_id,
2211  NeighborRowMessage::Map &send_msg) const
2212 {
2213  int ent, idx, edof;
2214  UnpackDof(dof, ent, idx, edof);
2215 
2216  const ParNCMesh::CommGroup &group = pncmesh->GetGroup(group_id);
2217  for (unsigned i = 0; i < group.size(); i++)
2218  {
2219  int rank = group[i];
2220  if (rank != MyRank && !pncmesh->GroupContains(group_sent_id, rank))
2221  {
2222  NeighborRowMessage &msg = send_msg[rank];
2223  GroupId invalid = -1; // to prevent forwarding again
2224  msg.AddRow(ent, idx, edof, invalid, row);
2225  msg.SetNCMesh(pncmesh);
2226  msg.SetFEC(fec);
2227 #ifdef MFEM_PMATRIX_STATS
2228  n_rows_fwd++;
2229 #endif
2230 #ifdef MFEM_DEBUG_PMATRIX
2231  mfem::out << "Rank " << pncmesh->GetMyRank() << " forwarding to "
2232  << rank << ": ent " << ent << ", index" << idx
2233  << ", edof " << edof << std::endl;
2234 #endif
2235  }
2236  }
2237 }
2238 
2239 #ifdef MFEM_DEBUG_PMATRIX
2240 void ParFiniteElementSpace
2241 ::DebugDumpDOFs(std::ostream &os,
2242  const SparseMatrix &deps,
2243  const Array<GroupId> &dof_group,
2244  const Array<GroupId> &dof_owner,
2245  const Array<bool> &finalized) const
2246 {
2247  for (int i = 0; i < dof_group.Size(); i++)
2248  {
2249  os << i << ": ";
2250  if (i < (nvdofs + nedofs + nfdofs) || i >= ndofs)
2251  {
2252  int ent, idx, edof;
2253  UnpackDof(i, ent, idx, edof);
2254 
2255  os << edof << " @ ";
2256  if (i > ndofs) { os << "ghost "; }
2257  switch (ent)
2258  {
2259  case 0: os << "vertex "; break;
2260  case 1: os << "edge "; break;
2261  default: os << "face "; break;
2262  }
2263  os << idx << "; ";
2264 
2265  if (i < deps.Height() && deps.RowSize(i))
2266  {
2267  os << "depends on ";
2268  for (int j = 0; j < deps.RowSize(i); j++)
2269  {
2270  os << deps.GetRowColumns(i)[j] << " ("
2271  << deps.GetRowEntries(i)[j] << ")";
2272  if (j < deps.RowSize(i)-1) { os << ", "; }
2273  }
2274  os << "; ";
2275  }
2276  else
2277  {
2278  os << "no deps; ";
2279  }
2280 
2281  os << "group " << dof_group[i] << " (";
2282  const ParNCMesh::CommGroup &g = pncmesh->GetGroup(dof_group[i]);
2283  for (unsigned j = 0; j < g.size(); j++)
2284  {
2285  if (j) { os << ", "; }
2286  os << g[j];
2287  }
2288 
2289  os << "), owner " << dof_owner[i] << " (rank "
2290  << pncmesh->GetGroup(dof_owner[i])[0] << "); "
2291  << (finalized[i] ? "finalized" : "NOT finalized");
2292  }
2293  else
2294  {
2295  os << "internal";
2296  }
2297  os << "\n";
2298  }
2299 }
2300 #endif
2301 
2302 int ParFiniteElementSpace
2303 ::BuildParallelConformingInterpolation(HypreParMatrix **P_, SparseMatrix **R_,
2304  Array<HYPRE_BigInt> &dof_offs,
2305  Array<HYPRE_BigInt> &tdof_offs,
2306  Array<int> *dof_tdof,
2307  bool partial) const
2308 {
2309  // TODO: general face DOF transformations in NeighborRowMessage::Decode()
2310  MFEM_VERIFY(!(fec->GetOrder() >= 2
2313  "Nedelec NC tets of order >= 2 are not supported yet.");
2314 
2315  bool dg = (nvdofs == 0 && nedofs == 0 && nfdofs == 0);
2316 
2317 #ifdef MFEM_PMATRIX_STATS
2318  n_msgs_sent = n_msgs_recv = 0;
2319  n_rows_sent = n_rows_recv = n_rows_fwd = 0;
2320 #endif
2321 
2322  // *** STEP 1: build master-slave dependency lists ***
2323 
2324  int total_dofs = ndofs + ngdofs;
2325  SparseMatrix deps(ndofs, total_dofs);
2326 
2327  if (!dg && !partial)
2328  {
2329  Array<int> master_dofs, slave_dofs;
2330 
2331  // loop through *all* master edges/faces, constrain their slaves
2332  for (int entity = 0; entity <= 2; entity++)
2333  {
2334  const NCMesh::NCList &list = pncmesh->GetNCList(entity);
2335  if (!list.masters.Size()) { continue; }
2336 
2337  IsoparametricTransformation T;
2338  DenseMatrix I;
2339 
2340  // process masters that we own or that affect our edges/faces
2341  for (int mi = 0; mi < list.masters.Size(); mi++)
2342  {
2343  const NCMesh::Master &mf = list.masters[mi];
2344 
2345  // get master DOFs
2346  if (pncmesh->IsGhost(entity, mf.index))
2347  {
2348  GetGhostDofs(entity, mf, master_dofs);
2349  }
2350  else
2351  {
2352  GetEntityDofs(entity, mf.index, master_dofs, mf.Geom());
2353  }
2354 
2355  if (!master_dofs.Size()) { continue; }
2356 
2357  const FiniteElement* fe = fec->FiniteElementForGeometry(mf.Geom());
2358  if (!fe) { continue; }
2359 
2360  switch (mf.Geom())
2361  {
2362  case Geometry::SQUARE: T.SetFE(&QuadrilateralFE); break;
2363  case Geometry::TRIANGLE: T.SetFE(&TriangleFE); break;
2364  case Geometry::SEGMENT: T.SetFE(&SegmentFE); break;
2365  default: MFEM_ABORT("unsupported geometry");
2366  }
2367 
2368  // constrain slaves that exist in our mesh
2369  for (int si = mf.slaves_begin; si < mf.slaves_end; si++)
2370  {
2371  const NCMesh::Slave &sf = list.slaves[si];
2372  if (pncmesh->IsGhost(entity, sf.index)) { continue; }
2373 
2374  const int variant = 0; // TODO parallel var-order
2375  GetEntityDofs(entity, sf.index, slave_dofs, mf.Geom(), variant);
2376  if (!slave_dofs.Size()) { continue; }
2377 
2378  list.OrientedPointMatrix(sf, T.GetPointMat());
2379  fe->GetLocalInterpolation(T, I);
2380 
2381  // make each slave DOF dependent on all master DOFs
2382  AddDependencies(deps, master_dofs, slave_dofs, I);
2383  }
2384  }
2385  }
2386 
2387  deps.Finalize();
2388  }
2389 
2390  // *** STEP 2: initialize group and owner ID for each DOF ***
2391 
2392  Array<GroupId> dof_group(total_dofs);
2393  Array<GroupId> dof_owner(total_dofs);
2394  dof_group = 0;
2395  dof_owner = 0;
2396 
2397  if (!dg)
2398  {
2399  Array<int> dofs;
2400 
2401  // initialize dof_group[], dof_owner[]
2402  for (int entity = 0; entity <= 2; entity++)
2403  {
2404  const NCMesh::NCList &list = pncmesh->GetNCList(entity);
2405 
2406  int lsize[3] =
2407  { list.conforming.Size(), list.masters.Size(), list.slaves.Size() };
2408 
2409  for (int l = 0; l < 3; l++)
2410  {
2411  for (int i = 0; i < lsize[l]; i++)
2412  {
2413  const MeshId &id =
2414  (l == 0) ? list.conforming[i] :
2415  (l == 1) ? (const MeshId&) list.masters[i]
2416  /* */ : (const MeshId&) list.slaves[i];
2417 
2418  if (id.index < 0) { continue; }
2419 
2420  GroupId owner = pncmesh->GetEntityOwnerId(entity, id.index);
2421  GroupId group = pncmesh->GetEntityGroupId(entity, id.index);
2422 
2423  GetBareDofs(entity, id.index, dofs);
2424 
2425  for (int j = 0; j < dofs.Size(); j++)
2426  {
2427  int dof = dofs[j];
2428  dof_owner[dof] = owner;
2429  dof_group[dof] = group;
2430  }
2431  }
2432  }
2433  }
2434  }
2435 
2436  // *** STEP 3: count true DOFs and calculate P row/column partitions ***
2437 
2438  Array<bool> finalized(total_dofs);
2439  finalized = false;
2440 
2441  // DOFs that stayed independent and are ours are true DOFs
2442  int num_true_dofs = 0;
2443  for (int i = 0; i < ndofs; i++)
2444  {
2445  if (dof_owner[i] == 0 && deps.RowSize(i) == 0)
2446  {
2447  num_true_dofs++;
2448  finalized[i] = true;
2449  }
2450  }
2451 
2452  // calculate global offsets
2453  HYPRE_BigInt loc_sizes[2] = { ndofs*vdim, num_true_dofs*vdim };
2454  Array<HYPRE_BigInt>* offsets[2] = { &dof_offs, &tdof_offs };
2455  pmesh->GenerateOffsets(2, loc_sizes, offsets); // calls MPI_Scan, MPI_Bcast
2456 
2457  HYPRE_BigInt my_tdof_offset =
2458  tdof_offs[HYPRE_AssumedPartitionCheck() ? 0 : MyRank];
2459 
2460  if (R_)
2461  {
2462  // initialize the restriction matrix (also parallel but block-diagonal)
2463  *R_ = new SparseMatrix(num_true_dofs*vdim, ndofs*vdim);
2464  }
2465  if (dof_tdof)
2466  {
2467  dof_tdof->SetSize(ndofs*vdim);
2468  *dof_tdof = -1;
2469  }
2470 
2471  std::vector<PMatrixRow> pmatrix(total_dofs);
2472 
2473  bool bynodes = (ordering == Ordering::byNODES);
2474  int vdim_factor = bynodes ? 1 : vdim;
2475  int dof_stride = bynodes ? ndofs : 1;
2476  int tdof_stride = bynodes ? num_true_dofs : 1;
2477 
2478  // big container for all messages we send (the list is for iterations)
2479  std::list<NeighborRowMessage::Map> send_msg;
2480  send_msg.push_back(NeighborRowMessage::Map());
2481 
2482  // put identity in P and R for true DOFs, set ldof_ltdof
2483  for (int dof = 0, tdof = 0; dof < ndofs; dof++)
2484  {
2485  if (finalized[dof])
2486  {
2487  pmatrix[dof].elems.push_back(
2488  PMatrixElement(my_tdof_offset + vdim_factor*tdof, tdof_stride, 1.));
2489 
2490  // prepare messages to neighbors with identity rows
2491  if (dof_group[dof] != 0)
2492  {
2493  ScheduleSendRow(pmatrix[dof], dof, dof_group[dof], send_msg.back());
2494  }
2495 
2496  for (int vd = 0; vd < vdim; vd++)
2497  {
2498  int vdof = dof*vdim_factor + vd*dof_stride;
2499  int vtdof = tdof*vdim_factor + vd*tdof_stride;
2500 
2501  if (R_) { (*R_)->Add(vtdof, vdof, 1.0); }
2502  if (dof_tdof) { (*dof_tdof)[vdof] = vtdof; }
2503  }
2504  tdof++;
2505  }
2506  }
2507 
2508  // send identity rows
2509  NeighborRowMessage::IsendAll(send_msg.back(), MyComm);
2510 #ifdef MFEM_PMATRIX_STATS
2511  n_msgs_sent += send_msg.back().size();
2512 #endif
2513 
2514  if (R_) { (*R_)->Finalize(); }
2515 
2516  // *** STEP 4: main loop ***
2517 
2518  // a single instance (recv_msg) is reused for all incoming messages
2519  NeighborRowMessage recv_msg;
2520  recv_msg.SetNCMesh(pncmesh);
2521  recv_msg.SetFEC(fec);
2522 
2523  int num_finalized = num_true_dofs;
2524  PMatrixRow buffer;
2525  buffer.elems.reserve(1024);
2526 
2527  while (num_finalized < ndofs)
2528  {
2529  // prepare a new round of send buffers
2530  if (send_msg.back().size())
2531  {
2532  send_msg.push_back(NeighborRowMessage::Map());
2533  }
2534 
2535  // check for incoming messages, receive PMatrixRows
2536  int rank, size;
2537  while (NeighborRowMessage::IProbe(rank, size, MyComm))
2538  {
2539  recv_msg.Recv(rank, size, MyComm);
2540 #ifdef MFEM_PMATRIX_STATS
2541  n_msgs_recv++;
2542  n_rows_recv += recv_msg.GetRows().size();
2543 #endif
2544 
2545  const NeighborRowMessage::RowInfo::List &rows = recv_msg.GetRows();
2546  for (unsigned i = 0; i < rows.size(); i++)
2547  {
2548  const NeighborRowMessage::RowInfo &ri = rows[i];
2549  int dof = PackDof(ri.entity, ri.index, ri.edof);
2550  pmatrix[dof] = ri.row;
2551 
2552  if (dof < ndofs && !finalized[dof]) { num_finalized++; }
2553  finalized[dof] = true;
2554 
2555  if (ri.group >= 0 && dof_group[dof] != ri.group)
2556  {
2557  // the sender didn't see the complete group, forward the message
2558  ForwardRow(ri.row, dof, ri.group, dof_group[dof], send_msg.back());
2559  }
2560  }
2561  }
2562 
2563  // finalize all rows that can currently be finalized
2564  bool done = false;
2565  while (!done)
2566  {
2567  done = true;
2568  for (int dof = 0; dof < ndofs; dof++)
2569  {
2570  if (finalized[dof]) { continue; }
2571 
2572  bool owned = (dof_owner[dof] == 0);
2573  bool shared = (dof_group[dof] != 0);
2574 
2575  if (owned && DofFinalizable(dof, finalized, deps))
2576  {
2577  const int* dep_col = deps.GetRowColumns(dof);
2578  const double* dep_coef = deps.GetRowEntries(dof);
2579  int num_dep = deps.RowSize(dof);
2580 
2581  // form linear combination of rows
2582  buffer.elems.clear();
2583  for (int j = 0; j < num_dep; j++)
2584  {
2585  buffer.AddRow(pmatrix[dep_col[j]], dep_coef[j]);
2586  }
2587  buffer.Collapse();
2588  pmatrix[dof] = buffer;
2589 
2590  finalized[dof] = true;
2591  num_finalized++;
2592  done = false;
2593 
2594  // send row to neighbors who need it
2595  if (shared)
2596  {
2597  ScheduleSendRow(pmatrix[dof], dof, dof_group[dof],
2598  send_msg.back());
2599  }
2600  }
2601  }
2602  }
2603 
2604 #ifdef MFEM_DEBUG_PMATRIX
2605  /*static int dump = 0;
2606  if (dump < 10)
2607  {
2608  char fname[100];
2609  sprintf(fname, "dofs%02d.txt", MyRank);
2610  std::ofstream f(fname);
2611  DebugDumpDOFs(f, deps, dof_group, dof_owner, finalized);
2612  dump++;
2613  }*/
2614 #endif
2615 
2616  // send current batch of messages
2617  NeighborRowMessage::IsendAll(send_msg.back(), MyComm);
2618 #ifdef MFEM_PMATRIX_STATS
2619  n_msgs_sent += send_msg.back().size();
2620 #endif
2621  }
2622 
2623  if (P_)
2624  {
2625  *P_ = MakeVDimHypreMatrix(pmatrix, ndofs, num_true_dofs,
2626  dof_offs, tdof_offs);
2627  }
2628 
2629  // clean up possible remaining messages in the queue to avoid receiving
2630  // them erroneously in the next run
2631  int rank, size;
2632  while (NeighborRowMessage::IProbe(rank, size, MyComm))
2633  {
2634  recv_msg.RecvDrop(rank, size, MyComm);
2635  }
2636 
2637  // make sure we can discard all send buffers
2638  for (std::list<NeighborRowMessage::Map>::iterator
2639  it = send_msg.begin(); it != send_msg.end(); ++it)
2640  {
2641  NeighborRowMessage::WaitAllSent(*it);
2642  }
2643 
2644 #ifdef MFEM_PMATRIX_STATS
2645  int n_rounds = send_msg.size();
2646  int glob_rounds, glob_msgs_sent, glob_msgs_recv;
2647  int glob_rows_sent, glob_rows_recv, glob_rows_fwd;
2648 
2649  MPI_Reduce(&n_rounds, &glob_rounds, 1, MPI_INT, MPI_SUM, 0, MyComm);
2650  MPI_Reduce(&n_msgs_sent, &glob_msgs_sent, 1, MPI_INT, MPI_SUM, 0, MyComm);
2651  MPI_Reduce(&n_msgs_recv, &glob_msgs_recv, 1, MPI_INT, MPI_SUM, 0, MyComm);
2652  MPI_Reduce(&n_rows_sent, &glob_rows_sent, 1, MPI_INT, MPI_SUM, 0, MyComm);
2653  MPI_Reduce(&n_rows_recv, &glob_rows_recv, 1, MPI_INT, MPI_SUM, 0, MyComm);
2654  MPI_Reduce(&n_rows_fwd, &glob_rows_fwd, 1, MPI_INT, MPI_SUM, 0, MyComm);
2655 
2656  if (MyRank == 0)
2657  {
2658  mfem::out << "P matrix stats (avg per rank): "
2659  << double(glob_rounds)/NRanks << " rounds, "
2660  << double(glob_msgs_sent)/NRanks << " msgs sent, "
2661  << double(glob_msgs_recv)/NRanks << " msgs recv, "
2662  << double(glob_rows_sent)/NRanks << " rows sent, "
2663  << double(glob_rows_recv)/NRanks << " rows recv, "
2664  << double(glob_rows_fwd)/NRanks << " rows forwarded."
2665  << std::endl;
2666  }
2667 #endif
2668 
2669  return num_true_dofs*vdim;
2670 }
2671 
2672 
2673 HypreParMatrix* ParFiniteElementSpace
2674 ::MakeVDimHypreMatrix(const std::vector<PMatrixRow> &rows,
2675  int local_rows, int local_cols,
2676  Array<HYPRE_BigInt> &row_starts,
2677  Array<HYPRE_BigInt> &col_starts) const
2678 {
2679  bool assumed = HYPRE_AssumedPartitionCheck();
2680  bool bynodes = (ordering == Ordering::byNODES);
2681 
2682  HYPRE_BigInt first_col = col_starts[assumed ? 0 : MyRank];
2683  HYPRE_BigInt next_col = col_starts[assumed ? 1 : MyRank+1];
2684 
2685  // count nonzeros in diagonal/offdiagonal parts
2686  HYPRE_Int nnz_diag = 0, nnz_offd = 0;
2687  std::map<HYPRE_BigInt, int> col_map;
2688  for (int i = 0; i < local_rows; i++)
2689  {
2690  for (unsigned j = 0; j < rows[i].elems.size(); j++)
2691  {
2692  const PMatrixElement &elem = rows[i].elems[j];
2693  HYPRE_BigInt col = elem.column;
2694  if (col >= first_col && col < next_col)
2695  {
2696  nnz_diag += vdim;
2697  }
2698  else
2699  {
2700  nnz_offd += vdim;
2701  for (int vd = 0; vd < vdim; vd++)
2702  {
2703  col_map[col] = -1;
2704  col += elem.stride;
2705  }
2706  }
2707  }
2708  }
2709 
2710  // create offd column mapping
2711  HYPRE_BigInt *cmap = Memory<HYPRE_BigInt>(col_map.size());
2712  int offd_col = 0;
2713  for (auto it = col_map.begin(); it != col_map.end(); ++it)
2714  {
2715  cmap[offd_col] = it->first;
2716  it->second = offd_col++;
2717  }
2718 
2719  HYPRE_Int *I_diag = Memory<HYPRE_Int>(vdim*local_rows + 1);
2720  HYPRE_Int *I_offd = Memory<HYPRE_Int>(vdim*local_rows + 1);
2721 
2722  HYPRE_Int *J_diag = Memory<HYPRE_Int>(nnz_diag);
2723  HYPRE_Int *J_offd = Memory<HYPRE_Int>(nnz_offd);
2724 
2725  double *A_diag = Memory<double>(nnz_diag);
2726  double *A_offd = Memory<double>(nnz_offd);
2727 
2728  int vdim1 = bynodes ? vdim : 1;
2729  int vdim2 = bynodes ? 1 : vdim;
2730  int vdim_offset = bynodes ? local_cols : 1;
2731 
2732  // copy the diag/offd elements
2733  nnz_diag = nnz_offd = 0;
2734  int vrow = 0;
2735  for (int vd1 = 0; vd1 < vdim1; vd1++)
2736  {
2737  for (int i = 0; i < local_rows; i++)
2738  {
2739  for (int vd2 = 0; vd2 < vdim2; vd2++)
2740  {
2741  I_diag[vrow] = nnz_diag;
2742  I_offd[vrow++] = nnz_offd;
2743 
2744  int vd = bynodes ? vd1 : vd2;
2745  for (unsigned j = 0; j < rows[i].elems.size(); j++)
2746  {
2747  const PMatrixElement &elem = rows[i].elems[j];
2748  if (elem.column >= first_col && elem.column < next_col)
2749  {
2750  J_diag[nnz_diag] = elem.column + vd*vdim_offset - first_col;
2751  A_diag[nnz_diag++] = elem.value;
2752  }
2753  else
2754  {
2755  J_offd[nnz_offd] = col_map[elem.column + vd*elem.stride];
2756  A_offd[nnz_offd++] = elem.value;
2757  }
2758  }
2759  }
2760  }
2761  }
2762  MFEM_ASSERT(vrow == vdim*local_rows, "");
2763  I_diag[vrow] = nnz_diag;
2764  I_offd[vrow] = nnz_offd;
2765 
2766  return new HypreParMatrix(MyComm,
2767  row_starts.Last(), col_starts.Last(),
2768  row_starts.GetData(), col_starts.GetData(),
2769  I_diag, J_diag, A_diag,
2770  I_offd, J_offd, A_offd,
2771  col_map.size(), cmap);
2772 }
2773 
2774 template <typename int_type>
2775 static int_type* make_i_array(int nrows)
2776 {
2777  int_type *I = Memory<int_type>(nrows+1);
2778  for (int i = 0; i <= nrows; i++) { I[i] = -1; }
2779  return I;
2780 }
2781 
2782 template <typename int_type>
2783 static int_type* make_j_array(int_type* I, int nrows)
2784 {
2785  int nnz = 0;
2786  for (int i = 0; i < nrows; i++)
2787  {
2788  if (I[i] >= 0) { nnz++; }
2789  }
2790  int_type *J = Memory<int_type>(nnz);
2791 
2792  I[nrows] = -1;
2793  for (int i = 0, k = 0; i <= nrows; i++)
2794  {
2795  int_type col = I[i];
2796  I[i] = k;
2797  if (col >= 0) { J[k++] = col; }
2798  }
2799  return J;
2800 }
2801 
2802 HypreParMatrix*
2803 ParFiniteElementSpace::RebalanceMatrix(int old_ndofs,
2804  const Table* old_elem_dof,
2805  const Table* old_elem_fos)
2806 {
2807  MFEM_VERIFY(Nonconforming(), "Only supported for nonconforming meshes.");
2808  MFEM_VERIFY(old_dof_offsets.Size(), "ParFiniteElementSpace::Update needs to "
2809  "be called before ParFiniteElementSpace::RebalanceMatrix");
2810 
2811  HYPRE_BigInt old_offset = HYPRE_AssumedPartitionCheck()
2812  ? old_dof_offsets[0] : old_dof_offsets[MyRank];
2813 
2814  // send old DOFs of elements we used to own
2815  ParNCMesh* old_pncmesh = pmesh->pncmesh;
2816  old_pncmesh->SendRebalanceDofs(old_ndofs, *old_elem_dof, old_offset, this);
2817 
2818  Array<int> dofs;
2819  int vsize = GetVSize();
2820 
2821  const Array<int> &old_index = old_pncmesh->GetRebalanceOldIndex();
2822  MFEM_VERIFY(old_index.Size() == pmesh->GetNE(),
2823  "Mesh::Rebalance was not called before "
2824  "ParFiniteElementSpace::RebalanceMatrix");
2825 
2826  // prepare the local (diagonal) part of the matrix
2827  HYPRE_Int* i_diag = make_i_array<HYPRE_Int>(vsize);
2828  for (int i = 0; i < pmesh->GetNE(); i++)
2829  {
2830  if (old_index[i] >= 0) // we had this element before
2831  {
2832  const int* old_dofs = old_elem_dof->GetRow(old_index[i]);
2833  GetElementDofs(i, dofs);
2834 
2835  for (int vd = 0; vd < vdim; vd++)
2836  {
2837  for (int j = 0; j < dofs.Size(); j++)
2838  {
2839  int row = DofToVDof(dofs[j], vd);
2840  if (row < 0) { row = -1 - row; }
2841 
2842  int col = DofToVDof(old_dofs[j], vd, old_ndofs);
2843  if (col < 0) { col = -1 - col; }
2844 
2845  i_diag[row] = col;
2846  }
2847  }
2848  }
2849  }
2850  HYPRE_Int* j_diag = make_j_array(i_diag, vsize);
2851 
2852  // receive old DOFs for elements we obtained from others in Rebalance
2853  Array<int> new_elements;
2854  Array<long> old_remote_dofs;
2855  old_pncmesh->RecvRebalanceDofs(new_elements, old_remote_dofs);
2856 
2857  // create the offdiagonal part of the matrix
2858  HYPRE_BigInt* i_offd = make_i_array<HYPRE_BigInt>(vsize);
2859  for (int i = 0, pos = 0; i < new_elements.Size(); i++)
2860  {
2861  GetElementDofs(new_elements[i], dofs);
2862  const long* old_dofs = &old_remote_dofs[pos];
2863  pos += dofs.Size() * vdim;
2864 
2865  for (int vd = 0; vd < vdim; vd++)
2866  {
2867  for (int j = 0; j < dofs.Size(); j++)
2868  {
2869  int row = DofToVDof(dofs[j], vd);
2870  if (row < 0) { row = -1 - row; }
2871 
2872  if (i_diag[row] == i_diag[row+1]) // diag row empty?
2873  {
2874  i_offd[row] = old_dofs[j + vd * dofs.Size()];
2875  }
2876  }
2877  }
2878  }
2879  HYPRE_BigInt* j_offd = make_j_array(i_offd, vsize);
2880 
2881 #ifndef HYPRE_MIXEDINT
2882  HYPRE_Int *i_offd_hi = i_offd;
2883 #else
2884  // Copy of i_offd array as array of HYPRE_Int
2885  HYPRE_Int *i_offd_hi = Memory<HYPRE_Int>(vsize + 1);
2886  std::copy(i_offd, i_offd + vsize + 1, i_offd_hi);
2887  Memory<HYPRE_BigInt>(i_offd, vsize + 1, true).Delete();
2888 #endif
2889 
2890  // create the offd column map
2891  int offd_cols = i_offd_hi[vsize];
2892  Array<Pair<HYPRE_BigInt, int> > cmap_offd(offd_cols);
2893  for (int i = 0; i < offd_cols; i++)
2894  {
2895  cmap_offd[i].one = j_offd[i];
2896  cmap_offd[i].two = i;
2897  }
2898 
2899 #ifndef HYPRE_MIXEDINT
2900  HYPRE_Int *j_offd_hi = j_offd;
2901 #else
2902  HYPRE_Int *j_offd_hi = Memory<HYPRE_Int>(offd_cols);
2903  Memory<HYPRE_BigInt>(j_offd, offd_cols, true).Delete();
2904 #endif
2905 
2906  SortPairs<HYPRE_BigInt, int>(cmap_offd, offd_cols);
2907 
2908  HYPRE_BigInt* cmap = Memory<HYPRE_BigInt>(offd_cols);
2909  for (int i = 0; i < offd_cols; i++)
2910  {
2911  cmap[i] = cmap_offd[i].one;
2912  j_offd_hi[cmap_offd[i].two] = i;
2913  }
2914 
2915  HypreParMatrix *M;
2916  M = new HypreParMatrix(MyComm, MyRank, NRanks, dof_offsets, old_dof_offsets,
2917  i_diag, j_diag, i_offd_hi, j_offd_hi, cmap, offd_cols);
2918  return M;
2919 }
2920 
2921 
2922 struct DerefDofMessage
2923 {
2924  std::vector<HYPRE_BigInt> dofs;
2925  MPI_Request request;
2926 };
2927 
2928 HypreParMatrix*
2929 ParFiniteElementSpace::ParallelDerefinementMatrix(int old_ndofs,
2930  const Table* old_elem_dof,
2931  const Table *old_elem_fos)
2932 {
2933  int nrk = HYPRE_AssumedPartitionCheck() ? 2 : NRanks;
2934 
2935  MFEM_VERIFY(Nonconforming(), "Not implemented for conforming meshes.");
2936  MFEM_VERIFY(old_dof_offsets[nrk], "Missing previous (finer) space.");
2937 
2938 #if 0 // check no longer seems to work with NC tet refinement
2939  MFEM_VERIFY(dof_offsets[nrk] <= old_dof_offsets[nrk],
2940  "Previous space is not finer.");
2941 #endif
2942 
2943  // Note to the reader: please make sure you first read
2944  // FiniteElementSpace::RefinementMatrix, then
2945  // FiniteElementSpace::DerefinementMatrix, and only then this function.
2946  // You have been warned! :-)
2947 
2948  Mesh::GeometryList elem_geoms(*mesh);
2949 
2950  Array<int> dofs, old_dofs, old_vdofs;
2951  Vector row;
2952 
2953  ParNCMesh* old_pncmesh = pmesh->pncmesh;
2954 
2955  int ldof[Geometry::NumGeom];
2956  for (int i = 0; i < Geometry::NumGeom; i++)
2957  {
2958  ldof[i] = 0;
2959  }
2960  for (int i = 0; i < elem_geoms.Size(); i++)
2961  {
2962  Geometry::Type geom = elem_geoms[i];
2963  ldof[geom] = fec->FiniteElementForGeometry(geom)->GetDof();
2964  }
2965 
2966  const CoarseFineTransformations &dtrans =
2967  old_pncmesh->GetDerefinementTransforms();
2968  const Array<int> &old_ranks = old_pncmesh->GetDerefineOldRanks();
2969 
2970  std::map<int, DerefDofMessage> messages;
2971 
2972  HYPRE_BigInt old_offset = HYPRE_AssumedPartitionCheck()
2973  ? old_dof_offsets[0] : old_dof_offsets[MyRank];
2974 
2975  // communicate DOFs for derefinements that straddle processor boundaries,
2976  // note that this is infrequent due to the way elements are ordered
2977  for (int k = 0; k < dtrans.embeddings.Size(); k++)
2978  {
2979  const Embedding &emb = dtrans.embeddings[k];
2980 
2981  int fine_rank = old_ranks[k];
2982  int coarse_rank = (emb.parent < 0) ? (-1 - emb.parent)
2983  : old_pncmesh->ElementRank(emb.parent);
2984 
2985  if (coarse_rank != MyRank && fine_rank == MyRank)
2986  {
2987  old_elem_dof->GetRow(k, dofs);
2988  DofsToVDofs(dofs, old_ndofs);
2989 
2990  DerefDofMessage &msg = messages[k];
2991  msg.dofs.resize(dofs.Size());
2992  for (int i = 0; i < dofs.Size(); i++)
2993  {
2994  msg.dofs[i] = old_offset + dofs[i];
2995  }
2996 
2997  MPI_Isend(&msg.dofs[0], msg.dofs.size(), HYPRE_MPI_BIG_INT,
2998  coarse_rank, 291, MyComm, &msg.request);
2999  }
3000  else if (coarse_rank == MyRank && fine_rank != MyRank)
3001  {
3002  MFEM_ASSERT(emb.parent >= 0, "");
3003  Geometry::Type geom = mesh->GetElementBaseGeometry(emb.parent);
3004 
3005  DerefDofMessage &msg = messages[k];
3006  msg.dofs.resize(ldof[geom]*vdim);
3007 
3008  MPI_Irecv(&msg.dofs[0], ldof[geom]*vdim, HYPRE_MPI_BIG_INT,
3009  fine_rank, 291, MyComm, &msg.request);
3010  }
3011  // TODO: coalesce Isends/Irecvs to the same rank. Typically, on uniform
3012  // derefinement, there should be just one send to MyRank-1 and one recv
3013  // from MyRank+1
3014  }
3015 
3016  DenseTensor localR[Geometry::NumGeom];
3017  for (int i = 0; i < elem_geoms.Size(); i++)
3018  {
3019  GetLocalDerefinementMatrices(elem_geoms[i], localR[elem_geoms[i]]);
3020  }
3021 
3022  // create the diagonal part of the derefinement matrix
3023  SparseMatrix *diag = new SparseMatrix(ndofs*vdim, old_ndofs*vdim);
3024 
3025  Array<char> mark(diag->Height());
3026  mark = 0;
3027 
3028  for (int k = 0; k < dtrans.embeddings.Size(); k++)
3029  {
3030  const Embedding &emb = dtrans.embeddings[k];
3031  if (emb.parent < 0) { continue; }
3032 
3033  int coarse_rank = old_pncmesh->ElementRank(emb.parent);
3034  int fine_rank = old_ranks[k];
3035 
3036  if (coarse_rank == MyRank && fine_rank == MyRank)
3037  {
3038  Geometry::Type geom = mesh->GetElementBaseGeometry(emb.parent);
3039  DenseMatrix &lR = localR[geom](emb.matrix);
3040 
3041  elem_dof->GetRow(emb.parent, dofs);
3042  old_elem_dof->GetRow(k, old_dofs);
3043 
3044  for (int vd = 0; vd < vdim; vd++)
3045  {
3046  old_dofs.Copy(old_vdofs);
3047  DofsToVDofs(vd, old_vdofs, old_ndofs);
3048 
3049  for (int i = 0; i < lR.Height(); i++)
3050  {
3051  if (!std::isfinite(lR(i, 0))) { continue; }
3052 
3053  int r = DofToVDof(dofs[i], vd);
3054  int m = (r >= 0) ? r : (-1 - r);
3055 
3056  if (!mark[m])
3057  {
3058  lR.GetRow(i, row);
3059  diag->SetRow(r, old_vdofs, row);
3060  mark[m] = 1;
3061  }
3062  }
3063  }
3064  }
3065  }
3066  diag->Finalize();
3067 
3068  // wait for all sends/receives to complete
3069  for (auto it = messages.begin(); it != messages.end(); ++it)
3070  {
3071  MPI_Wait(&it->second.request, MPI_STATUS_IGNORE);
3072  }
3073 
3074  // create the offdiagonal part of the derefinement matrix
3075  SparseMatrix *offd = new SparseMatrix(ndofs*vdim, 1);
3076 
3077  std::map<HYPRE_BigInt, int> col_map;
3078  for (int k = 0; k < dtrans.embeddings.Size(); k++)
3079  {
3080  const Embedding &emb = dtrans.embeddings[k];
3081  if (emb.parent < 0) { continue; }
3082 
3083  int coarse_rank = old_pncmesh->ElementRank(emb.parent);
3084  int fine_rank = old_ranks[k];
3085 
3086  if (coarse_rank == MyRank && fine_rank != MyRank)
3087  {
3088  Geometry::Type geom = mesh->GetElementBaseGeometry(emb.parent);
3089  DenseMatrix &lR = localR[geom](emb.matrix);
3090 
3091  elem_dof->GetRow(emb.parent, dofs);
3092 
3093  DerefDofMessage &msg = messages[k];
3094  MFEM_ASSERT(msg.dofs.size(), "");
3095 
3096  for (int vd = 0; vd < vdim; vd++)
3097  {
3098  MFEM_ASSERT(ldof[geom], "");
3099  HYPRE_BigInt* remote_dofs = &msg.dofs[vd*ldof[geom]];
3100 
3101  for (int i = 0; i < lR.Height(); i++)
3102  {
3103  if (!std::isfinite(lR(i, 0))) { continue; }
3104 
3105  int r = DofToVDof(dofs[i], vd);
3106  int m = (r >= 0) ? r : (-1 - r);
3107 
3108  if (!mark[m])
3109  {
3110  lR.GetRow(i, row);
3111  MFEM_ASSERT(ldof[geom] == row.Size(), "");
3112  for (int j = 0; j < ldof[geom]; j++)
3113  {
3114  if (row[j] == 0.0) { continue; } // NOTE: lR thresholded
3115  int &lcol = col_map[remote_dofs[j]];
3116  if (!lcol) { lcol = col_map.size(); }
3117  offd->_Set_(m, lcol-1, row[j]);
3118  }
3119  mark[m] = 1;
3120  }
3121  }
3122  }
3123  }
3124  }
3125  messages.clear();
3126  offd->Finalize(0);
3127  offd->SetWidth(col_map.size());
3128 
3129  // create offd column mapping for use by hypre
3130  HYPRE_BigInt *cmap = Memory<HYPRE_BigInt>(offd->Width());
3131  for (auto it = col_map.begin(); it != col_map.end(); ++it)
3132  {
3133  cmap[it->second-1] = it->first;
3134  }
3135 
3136  // reorder offd columns so that 'cmap' is monotonic
3137  // NOTE: this is easier and probably faster (offd is small) than making
3138  // sure cmap is determined and sorted before the offd matrix is created
3139  {
3140  int width = offd->Width();
3141  Array<Pair<HYPRE_BigInt, int> > reorder(width);
3142  for (int i = 0; i < width; i++)
3143  {
3144  reorder[i].one = cmap[i];
3145  reorder[i].two = i;
3146  }
3147  reorder.Sort();
3148 
3149  Array<int> reindex(width);
3150  for (int i = 0; i < width; i++)
3151  {
3152  reindex[reorder[i].two] = i;
3153  cmap[i] = reorder[i].one;
3154  }
3155 
3156  int *J = offd->GetJ();
3157  for (int i = 0; i < offd->NumNonZeroElems(); i++)
3158  {
3159  J[i] = reindex[J[i]];
3160  }
3161  offd->SortColumnIndices();
3162  }
3163 
3164  HypreParMatrix* new_R;
3165  new_R = new HypreParMatrix(MyComm, dof_offsets[nrk], old_dof_offsets[nrk],
3166  dof_offsets, old_dof_offsets, diag, offd, cmap,
3167  true);
3168 
3169  new_R->SetOwnerFlags(new_R->OwnsDiag(), new_R->OwnsOffd(), 1);
3170 
3171  return new_R;
3172 }
3173 
3174 void ParFiniteElementSpace::Destroy()
3175 {
3176  ldof_group.DeleteAll();
3177  ldof_ltdof.DeleteAll();
3178  dof_offsets.DeleteAll();
3179  tdof_offsets.DeleteAll();
3180  tdof_nb_offsets.DeleteAll();
3181  // preserve old_dof_offsets
3182  ldof_sign.DeleteAll();
3183 
3184  delete P; P = NULL;
3185  delete Pconf; Pconf = NULL;
3186  delete Rconf; Rconf = NULL;
3187  delete R_transpose; R_transpose = NULL;
3188  delete R; R = NULL;
3189 
3190  delete gcomm; gcomm = NULL;
3191 
3192  num_face_nbr_dofs = -1;
3194  face_nbr_ldof.Clear();
3197 }
3198 
3199 void ParFiniteElementSpace::CopyProlongationAndRestriction(
3200  const FiniteElementSpace &fes, const Array<int> *perm)
3201 {
3202  const ParFiniteElementSpace *pfes
3203  = dynamic_cast<const ParFiniteElementSpace*>(&fes);
3204  MFEM_VERIFY(pfes != NULL, "");
3205  MFEM_VERIFY(P == NULL, "");
3206  MFEM_VERIFY(R == NULL, "");
3207 
3208  SparseMatrix *perm_mat = NULL, *perm_mat_tr = NULL;
3209  if (perm)
3210  {
3211  // Note: although n and fes.GetVSize() are typically equal, in
3212  // variable-order spaces they may differ, since nonconforming edges/faces
3213  // my have fictitious DOFs.
3214  int n = perm->Size();
3215  perm_mat = new SparseMatrix(n, fes.GetVSize());
3216  for (int i=0; i<n; ++i)
3217  {
3218  double s;
3219  int j = DecodeDof((*perm)[i], s);
3220  perm_mat->Set(i, j, s);
3221  }
3222  perm_mat->Finalize();
3223  perm_mat_tr = Transpose(*perm_mat);
3224  }
3225 
3226  if (pfes->P != NULL)
3227  {
3228  if (perm) { P = pfes->P->LeftDiagMult(*perm_mat); }
3229  else { P = new HypreParMatrix(*pfes->P); }
3230  nonconf_P = true;
3231  }
3232  else if (perm != NULL)
3233  {
3234  HYPRE_BigInt glob_nrows = GlobalVSize();
3235  HYPRE_BigInt glob_ncols = GlobalTrueVSize();
3236  HYPRE_BigInt *col_starts = GetTrueDofOffsets();
3237  HYPRE_BigInt *row_starts = GetDofOffsets();
3238  P = new HypreParMatrix(MyComm, glob_nrows, glob_ncols, row_starts,
3239  col_starts, perm_mat);
3240  nonconf_P = true;
3241  }
3242  if (pfes->R != NULL)
3243  {
3244  if (perm) { R = Mult(*pfes->R, *perm_mat_tr); }
3245  else { R = new SparseMatrix(*pfes->R); }
3246  }
3247  else if (perm != NULL)
3248  {
3249  R = perm_mat_tr;
3250  perm_mat_tr = NULL;
3251  }
3252 
3253  delete perm_mat;
3254  delete perm_mat_tr;
3255 }
3256 
3258  const FiniteElementSpace &coarse_fes, OperatorHandle &T) const
3259 {
3262  GetTransferOperator(coarse_fes, Tgf);
3263  Dof_TrueDof_Matrix(); // Make sure R is built - we need R in all cases.
3264  if (T.Type() == Operator::Hypre_ParCSR)
3265  {
3266  const ParFiniteElementSpace *c_pfes =
3267  dynamic_cast<const ParFiniteElementSpace *>(&coarse_fes);
3268  MFEM_ASSERT(c_pfes != NULL, "coarse_fes must be a parallel space");
3269  SparseMatrix *RA = mfem::Mult(*R, *Tgf.As<SparseMatrix>());
3270  Tgf.Clear();
3271  T.Reset(c_pfes->Dof_TrueDof_Matrix()->
3272  LeftDiagMult(*RA, GetTrueDofOffsets()));
3273  delete RA;
3274  }
3275  else
3276  {
3277  T.Reset(new TripleProductOperator(R, Tgf.Ptr(),
3278  coarse_fes.GetProlongationMatrix(),
3279  false, Tgf.OwnsOperator(), false));
3280  Tgf.SetOperatorOwner(false);
3281  }
3282 }
3283 
3284 void ParFiniteElementSpace::Update(bool want_transform)
3285 {
3286  MFEM_VERIFY(!IsVariableOrder(),
3287  "Parallel variable order space not supported yet.");
3288 
3289  if (mesh->GetSequence() == mesh_sequence)
3290  {
3291  return; // no need to update, no-op
3292  }
3293  if (want_transform && mesh->GetSequence() != mesh_sequence + 1)
3294  {
3295  MFEM_ABORT("Error in update sequence. Space needs to be updated after "
3296  "each mesh modification.");
3297  }
3298 
3299  if (NURBSext)
3300  {
3301  UpdateNURBS();
3302  return;
3303  }
3304 
3305  Table* old_elem_dof = NULL;
3306  Table* old_elem_fos = NULL;
3307  int old_ndofs;
3308 
3309  // save old DOF table
3310  if (want_transform)
3311  {
3312  old_elem_dof = elem_dof;
3313  old_elem_fos = elem_fos;
3314  elem_dof = NULL;
3315  elem_fos = NULL;
3316  old_ndofs = ndofs;
3317  Swap(dof_offsets, old_dof_offsets);
3318  }
3319 
3320  Destroy();
3321  FiniteElementSpace::Destroy(); // calls Th.Clear()
3322 
3324  Construct();
3325 
3327 
3328  if (want_transform)
3329  {
3330  // calculate appropriate GridFunction transformation
3331  switch (mesh->GetLastOperation())
3332  {
3333  case Mesh::REFINE:
3334  {
3335  if (Th.Type() != Operator::MFEM_SPARSEMAT)
3336  {
3337  Th.Reset(new RefinementOperator(this, old_elem_dof,
3338  old_elem_fos, old_ndofs));
3339  // The RefinementOperator takes ownership of 'old_elem_dofs', so
3340  // we no longer own it:
3341  old_elem_dof = NULL;
3342  old_elem_fos = NULL;
3343  }
3344  else
3345  {
3346  // calculate fully assembled matrix
3347  Th.Reset(RefinementMatrix(old_ndofs, old_elem_dof, old_elem_fos));
3348  }
3349  break;
3350  }
3351 
3352  case Mesh::DEREFINE:
3353  {
3354  Th.Reset(ParallelDerefinementMatrix(old_ndofs, old_elem_dof,
3355  old_elem_fos));
3356  if (Nonconforming())
3357  {
3358  Th.SetOperatorOwner(false);
3359  Th.Reset(new TripleProductOperator(P, R, Th.Ptr(),
3360  false, false, true));
3361  }
3362  break;
3363  }
3364 
3365  case Mesh::REBALANCE:
3366  {
3367  Th.Reset(RebalanceMatrix(old_ndofs, old_elem_dof, old_elem_fos));
3368  break;
3369  }
3370 
3371  default:
3372  break;
3373  }
3374 
3375  delete old_elem_dof;
3376  delete old_elem_fos;
3377  }
3378 }
3379 
3380 void ParFiniteElementSpace::UpdateMeshPointer(Mesh *new_mesh)
3381 {
3382  ParMesh *new_pmesh = dynamic_cast<ParMesh*>(new_mesh);
3383  MFEM_VERIFY(new_pmesh != NULL,
3384  "ParFiniteElementSpace::UpdateMeshPointer(...) must be a ParMesh");
3385  mesh = new_mesh;
3386  pmesh = new_pmesh;
3387 }
3388 
3390  int lsize, const GroupCommunicator &gc_, bool local_)
3391  : gc(gc_), local(local_)
3392 {
3393  const Table &group_ldof = gc.GroupLDofTable();
3394 
3395  int n_external = 0;
3396  for (int g=1; g<group_ldof.Size(); ++g)
3397  {
3398  if (!gc.GetGroupTopology().IAmMaster(g))
3399  {
3400  n_external += group_ldof.RowSize(g);
3401  }
3402  }
3403  int tsize = lsize - n_external;
3404 
3405  height = lsize;
3406  width = tsize;
3407 
3408  external_ldofs.Reserve(n_external);
3409  for (int gr = 1; gr < group_ldof.Size(); gr++)
3410  {
3411  if (!gc.GetGroupTopology().IAmMaster(gr))
3412  {
3413  external_ldofs.Append(group_ldof.GetRow(gr), group_ldof.RowSize(gr));
3414  }
3415  }
3416  external_ldofs.Sort();
3417 }
3418 
3420 const
3421 {
3422  return gc;
3423 }
3424 
3426  const ParFiniteElementSpace &pfes, bool local_)
3427  : Operator(pfes.GetVSize(), pfes.GetTrueVSize()),
3428  external_ldofs(),
3429  gc(pfes.GroupComm()),
3430  local(local_)
3431 {
3432  MFEM_VERIFY(pfes.Conforming(), "");
3433  const Table &group_ldof = gc.GroupLDofTable();
3435  for (int gr = 1; gr < group_ldof.Size(); gr++)
3436  {
3437  if (!gc.GetGroupTopology().IAmMaster(gr))
3438  {
3439  external_ldofs.Append(group_ldof.GetRow(gr), group_ldof.RowSize(gr));
3440  }
3441  }
3442  external_ldofs.Sort();
3443  MFEM_ASSERT(external_ldofs.Size() == Height()-Width(), "");
3444 #ifdef MFEM_DEBUG
3445  for (int j = 1; j < external_ldofs.Size(); j++)
3446  {
3447  // Check for repeated ldofs.
3448  MFEM_VERIFY(external_ldofs[j-1] < external_ldofs[j], "");
3449  }
3450  int j = 0;
3451  for (int i = 0; i < external_ldofs.Size(); i++)
3452  {
3453  const int end = external_ldofs[i];
3454  for ( ; j < end; j++)
3455  {
3456  MFEM_VERIFY(j-i == pfes.GetLocalTDofNumber(j), "");
3457  }
3458  j = end+1;
3459  }
3460  for ( ; j < Height(); j++)
3461  {
3462  MFEM_VERIFY(j-external_ldofs.Size() == pfes.GetLocalTDofNumber(j), "");
3463  }
3464  // gc.PrintInfo();
3465  // pfes.Dof_TrueDof_Matrix()->PrintCommPkg();
3466 #endif
3467 }
3468 
3470 {
3471  MFEM_ASSERT(x.Size() == Width(), "");
3472  MFEM_ASSERT(y.Size() == Height(), "");
3473 
3474  const double *xdata = x.HostRead();
3475  double *ydata = y.HostWrite();
3476  const int m = external_ldofs.Size();
3477 
3478  const int in_layout = 2; // 2 - input is ltdofs array
3479  if (local)
3480  {
3481  y = 0.0;
3482  }
3483  else
3484  {
3485  gc.BcastBegin(const_cast<double*>(xdata), in_layout);
3486  }
3487 
3488  int j = 0;
3489  for (int i = 0; i < m; i++)
3490  {
3491  const int end = external_ldofs[i];
3492  std::copy(xdata+j-i, xdata+end-i, ydata+j);
3493  j = end+1;
3494  }
3495  std::copy(xdata+j-m, xdata+Width(), ydata+j);
3496 
3497  const int out_layout = 0; // 0 - output is ldofs array
3498  if (!local)
3499  {
3500  gc.BcastEnd(ydata, out_layout);
3501  }
3502 }
3503 
3505  const Vector &x, Vector &y) const
3506 {
3507  MFEM_ASSERT(x.Size() == Height(), "");
3508  MFEM_ASSERT(y.Size() == Width(), "");
3509 
3510  const double *xdata = x.HostRead();
3511  double *ydata = y.HostWrite();
3512  const int m = external_ldofs.Size();
3513 
3514  if (!local)
3515  {
3516  gc.ReduceBegin(xdata);
3517  }
3518 
3519  int j = 0;
3520  for (int i = 0; i < m; i++)
3521  {
3522  const int end = external_ldofs[i];
3523  std::copy(xdata+j, xdata+end, ydata+j-i);
3524  j = end+1;
3525  }
3526  std::copy(xdata+j, xdata+Height(), ydata+j-m);
3527 
3528  const int out_layout = 2; // 2 - output is an array on all ltdofs
3529  if (!local)
3530  {
3531  gc.ReduceEnd<double>(ydata, out_layout, GroupCommunicator::Sum);
3532  }
3533 }
3534 
3536  const GroupCommunicator &gc_, const SparseMatrix *R, bool local_)
3537  : ConformingProlongationOperator(R->Width(), gc_, local_),
3538  mpi_gpu_aware(Device::GetGPUAwareMPI())
3539 {
3540  MFEM_ASSERT(R->Finalized(), "");
3541  const int tdofs = R->Height();
3542  MFEM_ASSERT(tdofs == R->HostReadI()[tdofs], "");
3543  ltdof_ldof = Array<int>(const_cast<int*>(R->HostReadJ()), tdofs);
3544  {
3545  Table nbr_ltdof;
3546  gc.GetNeighborLTDofTable(nbr_ltdof);
3547  const int nb_connections = nbr_ltdof.Size_of_connections();
3548  shr_ltdof.SetSize(nb_connections);
3549  shr_ltdof.CopyFrom(nbr_ltdof.GetJ());
3550  shr_buf.SetSize(nb_connections);
3551  shr_buf.UseDevice(true);
3552  shr_buf_offsets = nbr_ltdof.GetIMemory();
3553  {
3554  Array<int> shared_ltdof(nbr_ltdof.GetJ(), nb_connections);
3555  Array<int> unique_ltdof(shared_ltdof);
3556  unique_ltdof.Sort();
3557  unique_ltdof.Unique();
3558  // Note: the next loop modifies the J array of nbr_ltdof
3559  for (int i = 0; i < shared_ltdof.Size(); i++)
3560  {
3561  shared_ltdof[i] = unique_ltdof.FindSorted(shared_ltdof[i]);
3562  MFEM_ASSERT(shared_ltdof[i] != -1, "internal error");
3563  }
3564  Table unique_shr;
3565  Transpose(shared_ltdof, unique_shr, unique_ltdof.Size());
3566  unq_ltdof = Array<int>(unique_ltdof, unique_ltdof.Size());
3567  unq_shr_i = Array<int>(unique_shr.GetI(), unique_shr.Size()+1);
3568  unq_shr_j = Array<int>(unique_shr.GetJ(), unique_shr.Size_of_connections());
3569  }
3570  nbr_ltdof.GetJMemory().Delete();
3571  nbr_ltdof.LoseData();
3572  }
3573  {
3574  Table nbr_ldof;
3575  gc.GetNeighborLDofTable(nbr_ldof);
3576  const int nb_connections = nbr_ldof.Size_of_connections();
3577  ext_ldof.SetSize(nb_connections);
3578  ext_ldof.CopyFrom(nbr_ldof.GetJ());
3579  ext_ldof.GetMemory().UseDevice(true);
3580  ext_buf.SetSize(nb_connections);
3581  ext_buf.UseDevice(true);
3582  ext_buf_offsets = nbr_ldof.GetIMemory();
3583  nbr_ldof.GetJMemory().Delete();
3584  nbr_ldof.LoseData();
3585  }
3586  const GroupTopology &gtopo = gc.GetGroupTopology();
3587  int req_counter = 0;
3588  for (int nbr = 1; nbr < gtopo.GetNumNeighbors(); nbr++)
3589  {
3590  const int send_offset = shr_buf_offsets[nbr];
3591  const int send_size = shr_buf_offsets[nbr+1] - send_offset;
3592  if (send_size > 0) { req_counter++; }
3593 
3594  const int recv_offset = ext_buf_offsets[nbr];
3595  const int recv_size = ext_buf_offsets[nbr+1] - recv_offset;
3596  if (recv_size > 0) { req_counter++; }
3597  }
3598  requests = new MPI_Request[req_counter];
3599 }
3600 
3602  const ParFiniteElementSpace &pfes, bool local_)
3603  : DeviceConformingProlongationOperator(pfes.GroupComm(),
3604  pfes.GetRestrictionMatrix(),
3605  local_)
3606 {
3607  MFEM_ASSERT(pfes.Conforming(), "internal error");
3608  MFEM_ASSERT(pfes.GetRestrictionMatrix()->Height() == pfes.GetTrueVSize(), "");
3609 }
3610 
3611 static void ExtractSubVector(const Array<int> &indices,
3612  const Vector &vin, Vector &vout)
3613 {
3614  MFEM_ASSERT(indices.Size() == vout.Size(), "incompatible sizes!");
3615  auto y = vout.Write();
3616  const auto x = vin.Read();
3617  const auto I = indices.Read();
3618  MFEM_FORALL(i, indices.Size(), y[i] = x[I[i]];); // indices can be repeated
3619 }
3620 
3622  const Vector &x) const
3623 {
3624  // shr_buf[i] = src[shr_ltdof[i]]
3625  if (shr_ltdof.Size() == 0) { return; }
3626  ExtractSubVector(shr_ltdof, x, shr_buf);
3627  // If the above kernel is executed asynchronously, we should wait for it to
3628  // complete
3629  if (mpi_gpu_aware) { MFEM_STREAM_SYNC; }
3630 }
3631 
3632 static void SetSubVector(const Array<int> &indices,
3633  const Vector &vin, Vector &vout)
3634 {
3635  MFEM_ASSERT(indices.Size() == vin.Size(), "incompatible sizes!");
3636  // Use ReadWrite() since we modify only a subset of the indices:
3637  auto y = vout.ReadWrite();
3638  const auto x = vin.Read();
3639  const auto I = indices.Read();
3640  MFEM_FORALL(i, indices.Size(), y[I[i]] = x[i];);
3641 }
3642 
3644  const Vector &x, Vector &y) const
3645 {
3646  // dst[ltdof_ldof[i]] = src[i]
3647  if (ltdof_ldof.Size() == 0) { return; }
3648  SetSubVector(ltdof_ldof, x, y);
3649 }
3650 
3652  Vector &y) const
3653 {
3654  // dst[ext_ldof[i]] = ext_buf[i]
3655  if (ext_ldof.Size() == 0) { return; }
3656  SetSubVector(ext_ldof, ext_buf, y);
3657 }
3658 
3660  Vector &y) const
3661 {
3662  const GroupTopology &gtopo = gc.GetGroupTopology();
3663  int req_counter = 0;
3664  // Make sure 'y' is marked as valid on device and for use on device.
3665  // This ensures that there is no unnecessary host to device copy when the
3666  // input 'y' is valid on host (in 'y.SetSubVector(ext_ldof, 0.0)' when local
3667  // is true) or BcastLocalCopy (when local is false).
3668  y.Write();
3669  if (local)
3670  {
3671  // done on device since we've marked ext_ldof for use on device:
3672  y.SetSubVector(ext_ldof, 0.0);
3673  }
3674  else
3675  {
3676  BcastBeginCopy(x); // copy to 'shr_buf'
3677  for (int nbr = 1; nbr < gtopo.GetNumNeighbors(); nbr++)
3678  {
3679  const int send_offset = shr_buf_offsets[nbr];
3680  const int send_size = shr_buf_offsets[nbr+1] - send_offset;
3681  if (send_size > 0)
3682  {
3683  auto send_buf = mpi_gpu_aware ? shr_buf.Read() : shr_buf.HostRead();
3684  MPI_Isend(send_buf + send_offset, send_size, MPI_DOUBLE,
3685  gtopo.GetNeighborRank(nbr), 41822,
3686  gtopo.GetComm(), &requests[req_counter++]);
3687  }
3688  const int recv_offset = ext_buf_offsets[nbr];
3689  const int recv_size = ext_buf_offsets[nbr+1] - recv_offset;
3690  if (recv_size > 0)
3691  {
3692  auto recv_buf = mpi_gpu_aware ? ext_buf.Write() : ext_buf.HostWrite();
3693  MPI_Irecv(recv_buf + recv_offset, recv_size, MPI_DOUBLE,
3694  gtopo.GetNeighborRank(nbr), 41822,
3695  gtopo.GetComm(), &requests[req_counter++]);
3696  }
3697  }
3698  }
3699  BcastLocalCopy(x, y);
3700  if (!local)
3701  {
3702  MPI_Waitall(req_counter, requests, MPI_STATUSES_IGNORE);
3703  BcastEndCopy(y); // copy from 'ext_buf'
3704  }
3705 }
3706 
3708 {
3709  delete [] requests;
3712 }
3713 
3715  const Vector &x) const
3716 {
3717  // ext_buf[i] = src[ext_ldof[i]]
3718  if (ext_ldof.Size() == 0) { return; }
3719  ExtractSubVector(ext_ldof, x, ext_buf);
3720  // If the above kernel is executed asynchronously, we should wait for it to
3721  // complete
3722  if (mpi_gpu_aware) { MFEM_STREAM_SYNC; }
3723 }
3724 
3726  const Vector &x, Vector &y) const
3727 {
3728  // dst[i] = src[ltdof_ldof[i]]
3729  if (ltdof_ldof.Size() == 0) { return; }
3730  ExtractSubVector(ltdof_ldof, x, y);
3731 }
3732 
3733 static void AddSubVector(const Array<int> &unique_dst_indices,
3734  const Array<int> &unique_to_src_offsets,
3735  const Array<int> &unique_to_src_indices,
3736  const Vector &src,
3737  Vector &dst)
3738 {
3739  auto y = dst.ReadWrite();
3740  const auto x = src.Read();
3741  const auto DST_I = unique_dst_indices.Read();
3742  const auto SRC_O = unique_to_src_offsets.Read();
3743  const auto SRC_I = unique_to_src_indices.Read();
3744  MFEM_FORALL(i, unique_dst_indices.Size(),
3745  {
3746  const int dst_idx = DST_I[i];
3747  double sum = y[dst_idx];
3748  const int end = SRC_O[i+1];
3749  for (int j = SRC_O[i]; j != end; ++j) { sum += x[SRC_I[j]]; }
3750  y[dst_idx] = sum;
3751  });
3752 }
3753 
3755 {
3756  // dst[shr_ltdof[i]] += shr_buf[i]
3757  if (unq_ltdof.Size() == 0) { return; }
3758  AddSubVector(unq_ltdof, unq_shr_i, unq_shr_j, shr_buf, y);
3759 }
3760 
3762  Vector &y) const
3763 {
3764  const GroupTopology &gtopo = gc.GetGroupTopology();
3765  int req_counter = 0;
3766  if (!local)
3767  {
3768  ReduceBeginCopy(x); // copy to 'ext_buf'
3769  for (int nbr = 1; nbr < gtopo.GetNumNeighbors(); nbr++)
3770  {
3771  const int send_offset = ext_buf_offsets[nbr];
3772  const int send_size = ext_buf_offsets[nbr+1] - send_offset;
3773  if (send_size > 0)
3774  {
3775  auto send_buf = mpi_gpu_aware ? ext_buf.Read() : ext_buf.HostRead();
3776  MPI_Isend(send_buf + send_offset, send_size, MPI_DOUBLE,
3777  gtopo.GetNeighborRank(nbr), 41823,
3778  gtopo.GetComm(), &requests[req_counter++]);
3779  }
3780  const int recv_offset = shr_buf_offsets[nbr];
3781  const int recv_size = shr_buf_offsets[nbr+1] - recv_offset;
3782  if (recv_size > 0)
3783  {
3784  auto recv_buf = mpi_gpu_aware ? shr_buf.Write() : shr_buf.HostWrite();
3785  MPI_Irecv(recv_buf + recv_offset, recv_size, MPI_DOUBLE,
3786  gtopo.GetNeighborRank(nbr), 41823,
3787  gtopo.GetComm(), &requests[req_counter++]);
3788  }
3789  }
3790  }
3791  ReduceLocalCopy(x, y);
3792  if (!local)
3793  {
3794  MPI_Waitall(req_counter, requests, MPI_STATUSES_IGNORE);
3795  ReduceEndAssemble(y); // assemble from 'shr_buf'
3796  }
3797 }
3798 
3799 } // namespace mfem
3800 
3801 #endif
SparseMatrix * RefinementMatrix(int old_ndofs, const Table *old_elem_dof, const Table *old_elem_fos)
Definition: fespace.cpp:1476
Abstract class for all finite elements.
Definition: fe_base.hpp:235
VDofTransformation VDoFTrans
Definition: fespace.hpp:144
Geometry::Type GetGeometryType() const
Definition: element.hpp:52
int GetNFaceNeighbors() const
Definition: pmesh.hpp:351
int GetGroupMasterRank(int g) const
Return the rank of the group master for group &#39;g&#39;.
void SendRebalanceDofs(int old_ndofs, const Table &old_element_dofs, long old_global_offset, FiniteElementSpace *space)
Use the communication pattern from last Rebalance() to send element DOFs.
Definition: pncmesh.cpp:2019
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
int Size() const
Return the logical size of the array.
Definition: array.hpp:138
void SetSubVector(const Array< int > &dofs, const double value)
Set the entries listed in dofs to the given value.
Definition: vector.cpp:560
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
void GetNeighborLDofTable(Table &nbr_ldof) const
Dofs to be received from communication neighbors.
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
Definition: fespace.hpp:563
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
Definition: pfespace.cpp:1032
HYPRE_BigInt GetMyTDofOffset() const
Definition: pfespace.cpp:1148
int GetNDofs() const
Returns number of degrees of freedom.
Definition: fespace.hpp:560
int * GetJ()
Definition: table.hpp:114
int ndofs
Number of degrees of freedom. Number of unknowns is ndofs * vdim.
Definition: fespace.hpp:111
std::vector< int > CommGroup
Definition: pncmesh.hpp:145
HYPRE_BigInt GetGlobalScalarTDofNumber(int sldof)
Definition: pfespace.cpp:1106
Operator that extracts Face degrees of freedom in parallel.
int DofToVDof(int dof, int vd, int ndofs=-1) const
Definition: fespace.cpp:251
int GetFaceVerticesEdges(const MeshId &face_id, int vert_index[4], int edge_index[4], int edge_orientation[4]) const
Definition: ncmesh.cpp:4814
Memory< T > & GetMemory()
Return a reference to the Memory object used by the Array.
Definition: array.hpp:117
void Unique()
Removes duplicities from a sorted array. This requires operator== to be defined for T...
Definition: array.hpp:256
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
static void AddDependencies(SparseMatrix &deps, Array< int > &master_dofs, Array< int > &slave_dofs, DenseMatrix &I, int skipfirst=0)
Definition: fespace.cpp:758
const CommGroup & GetGroup(GroupId id) const
Return a list of ranks contained in the group of the given ID.
Definition: pncmesh.hpp:174
bool IsVariableOrder() const
Returns true if the space contains elements of varying polynomial orders.
Definition: fespace.hpp:455
void GetNeighborLTDofTable(Table &nbr_ltdof) const
Dofs to be sent to communication neighbors.
Operator that extracts Face degrees of freedom for H1 FiniteElementSpaces.
Array< int > ldof_group
Definition: nurbs.hpp:425
void BuildElementToDofTable() const
Definition: fespace.cpp:342
void AddColumnsInRow(int r, int ncol)
Definition: table.hpp:78
virtual void GetEssentialVDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_dofs, int component=-1) const
Determine the boundary degrees of freedom.
Definition: pfespace.cpp:1021
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
Definition: table.cpp:81
static const int NumGeom
Definition: geom.hpp:42
virtual int GetContType() const =0
int GetNGroups() const
Definition: pmesh.hpp:327
virtual const Operator * GetProlongationMatrix() const
The returned Operator is owned by the FiniteElementSpace.
Definition: pfespace.cpp:1153
void ReduceEndAssemble(Vector &dst) const
Definition: pfespace.cpp:3754
Auxiliary class used by ParFiniteElementSpace.
Definition: pfespace.hpp:439
Tangential components of vector field.
Definition: fe_coll.hpp:46
void SetSize(int s)
Resize the vector to size s.
Definition: vector.hpp:521
Ordering::Type ordering
Definition: fespace.hpp:108
Array< Element * > face_nbr_elements
Definition: pmesh.hpp:319
void Delete()
Delete the owned pointers and reset the Memory object.
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
int TrueVSize() const
Obsolete, kept for backward compatibility.
Definition: pfespace.hpp:434
virtual DofTransformation * GetBdrElementDofs(int bel, Array< int > &dofs) const
Returns indices of degrees of freedom for boundary element &#39;bel&#39;.
Definition: fespace.cpp:2814
static int DecodeDof(int dof)
Helpers to remove encoded sign from a DOF.
Definition: fespace.hpp:254
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
Definition: array.hpp:308
void BooleanMult(const Array< int > &x, Array< int > &y) const
y = A * x, treating all entries as booleans (zero=false, nonzero=true).
Definition: sparsemat.cpp:933
void Mult(const Table &A, const Table &B, Table &C)
C = A * B (as boolean matrices)
Definition: table.cpp:472
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
Definition: operator.hpp:72
int GetGroupSize(int g) const
Get the number of processors in a group.
virtual void Update(bool want_transform=true)
Definition: pfespace.cpp:3284
Pointer to an Operator of a specified type.
Definition: handle.hpp:33
DeviceConformingProlongationOperator(const GroupCommunicator &gc_, const SparseMatrix *R, bool local_=false)
Definition: pfespace.cpp:3535
void SetDims(int rows, int nnz)
Definition: table.cpp:140
Operator::Type Type() const
Get the currently set operator type id.
Definition: handle.hpp:99
int VDofToDof(int vdof) const
Definition: fespace.hpp:687
virtual const FiniteElement * FiniteElementForGeometry(Geometry::Type GeomType) const =0
HYPRE_BigInt GlobalTrueVSize() const
Definition: pfespace.hpp:285
void ExchangeFaceNbrData(Table *gr_sface, int *s2l_face)
Definition: pmesh.cpp:2105
int GetNVertices() const
Return the number of vertices in the NCMesh.
Definition: ncmesh.hpp:149
virtual DofTransformation * GetBdrElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom for i&#39;th boundary element.
Definition: pfespace.cpp:495
virtual void GetEssentialVDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_vdofs, int component=-1) const
Mark degrees of freedom associated with boundary elements with the specified boundary attributes (mar...
Definition: fespace.cpp:495
virtual double * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:450
int vdim
Vector dimension (number of unknowns per degree of freedom).
Definition: fespace.hpp:103
int Size() const
Returns the size of the vector.
Definition: vector.hpp:199
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:187
virtual void GetTrueTransferOperator(const FiniteElementSpace &coarse_fes, OperatorHandle &T) const
Construct and return an Operator that can be used to transfer true-dof data from coarse_fes, defined on a coarse mesh, to this FE space, defined on a refined mesh.
Definition: pfespace.cpp:3257
ConformingProlongationOperator(int lsize, const GroupCommunicator &gc_, bool local_=false)
Definition: pfespace.cpp:3389
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:928
int GetEdgeDofs(int edge, Array< int > &dofs, int variant=0) const
Returns the indices of the degrees of freedom for the specified edge, including the DOFs for the vert...
Definition: fespace.cpp:2987
void DivideByGroupSize(double *vec)
Scale a vector of true dofs.
Definition: pfespace.cpp:975
void LoseData()
Call this if data has been stolen.
Definition: table.hpp:154
void BcastEndCopy(Vector &dst) const
Definition: pfespace.cpp:3651
void BcastBeginCopy(const Vector &src) const
Definition: pfespace.cpp:3621
Abstract parallel finite element space.
Definition: pfespace.hpp:28
bool operator<(const Pair< A, B > &p, const Pair< A, B > &q)
Comparison operator for class Pair, based on the first element only.
Definition: sort_pairs.hpp:36
void Synchronize(Array< int > &ldof_marker) const
Given an integer array on the local degrees of freedom, perform a bitwise OR between the shared dofs...
Definition: pfespace.cpp:1009
bool GroupContains(GroupId id, int rank) const
Return true if group &#39;id&#39; contains the given rank.
Definition: pncmesh.cpp:416
int GetLocalTDofNumber(int ldof) const
Definition: pfespace.cpp:1062
HypreParMatrix * GetPartialConformingInterpolation()
For a non-conforming mesh, construct and return the interpolation matrix from the partially conformin...
Definition: pfespace.cpp:964
double * GetData() const
Returns the matrix data array.
Definition: densemat.hpp:115
const FiniteElementCollection * fec
Associated FE collection (not owned).
Definition: fespace.hpp:100
OperatorHandle Th
Transformation to apply to GridFunctions after space Update().
Definition: fespace.hpp:157
virtual int GetFaceDofs(int face, Array< int > &dofs, int variant=0) const
Returns the indices of the degrees of freedom for the specified face, including the DOFs for the edge...
Definition: fespace.cpp:2906
int GroupNQuadrilaterals(int group)
Definition: pmesh.hpp:333
const GroupCommunicator & GetGroupCommunicator() const
Definition: pfespace.cpp:3419
Geometry::Type GetElementBaseGeometry(int i) const
Definition: mesh.hpp:1062
virtual void UseDevice(bool use_dev) const
Enable execution of Vector operations using the mfem::Device.
Definition: vector.hpp:117
int Size_of_connections() const
Definition: table.hpp:98
void GetLocalDerefinementMatrices(Geometry::Type geom, DenseTensor &localR) const
Definition: fespace.cpp:2016
bool HasGeometry(Geometry::Type geom) const
Return true iff the given geom is encountered in the mesh. Geometries of dimensions lower than Dimens...
Definition: mesh.hpp:1070
void DeleteAll()
Delete the whole array.
Definition: array.hpp:846
int GetOrder() const
Return the order (polynomial degree) of the FE collection, corresponding to the order/degree returned...
Definition: fe_coll.hpp:195
void AddConnections(int r, const int *c, int nc)
Definition: table.cpp:104
void GroupTriangle(int group, int i, int &face, int &o)
Definition: pmesh.cpp:1614
static void MarkerToList(const Array< int > &marker, Array< int > &list)
Convert a Boolean marker array to a list containing all marked indices.
Definition: fespace.cpp:598
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
bool IAmMaster(int g) const
Return true if I am master for group &#39;g&#39;.
void GetSharedEdgeDofs(int group, int ei, Array< int > &dofs) const
Definition: pfespace.cpp:584
ParNCMesh * pncmesh
Definition: pmesh.hpp:325
virtual int DofForGeometry(Geometry::Type GeomType) const =0
virtual int GetTrueVSize() const
Return the number of local vector true dofs.
Definition: pfespace.hpp:289
int GetNRanks() const
Definition: pmesh.hpp:289
int GroupVertex(int group, int i)
Definition: pmesh.hpp:335
int GetNumFaces() const
Return the number of faces (3D), edges (2D) or vertices (1D).
Definition: mesh.cpp:5345
int uni_fdof
of single face DOFs if all faces uniform; -1 otherwise
Definition: fespace.hpp:118
Geometry::Type GetGeomType() const
Returns the Geometry::Type of the reference element.
Definition: fe_base.hpp:320
ID for class SparseMatrix.
Definition: operator.hpp:258
long GetSequence() const
Definition: mesh.hpp:1639
HYPRE_BigInt GlobalVSize() const
Definition: pfespace.hpp:283
FaceType
Definition: mesh.hpp:45
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
int HasFaceDofs(Geometry::Type geom, int p) const
Definition: fe_coll.cpp:25
const FiniteElement * GetFaceNbrFE(int i) const
Definition: pfespace.cpp:1517
ID for the base class Operator, i.e. any type.
Definition: operator.hpp:257
DofTransformation * GetElementVDofs(int i, Array< int > &vdofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: fespace.cpp:281
Array< int > face_nbr_elements_offset
Definition: pmesh.hpp:317
double f(const Vector &xvec)
Definition: lor_mms.hpp:32
void write(std::ostream &os, T value)
Write &#39;value&#39; to stream.
Definition: binaryio.hpp:30
Array< DofTransformation * > DoFTrans
Definition: fespace.hpp:143
virtual const FiniteElement * GetFE(int i) const
Definition: pfespace.cpp:535
static const int Dimension[NumGeom]
Definition: geom.hpp:47
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
Data type sparse matrix.
Definition: sparsemat.hpp:46
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition: operator.hpp:66
int Append(const T &el)
Append element &#39;el&#39; to array, resize if necessary.
Definition: array.hpp:751
GroupId GetEntityGroupId(int entity, int index)
Definition: pncmesh.hpp:162
const GroupCommunicator & gc
Definition: pfespace.hpp:443
virtual double * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:446
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
Memory< int > & GetJMemory()
Definition: table.hpp:119
void Clear()
Definition: table.cpp:377
void mfem_error(const char *msg)
Function called when an error is encountered. Used by the macros MFEM_ABORT, MFEM_ASSERT, MFEM_VERIFY.
Definition: error.cpp:154
virtual void MultTranspose(const Vector &x, Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition: pfespace.cpp:3761
void SetDofTransformation(DofTransformation &doftrans)
Set or change the nested DofTransformation object.
Definition: doftrans.hpp:175
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
MPI_Comm GetComm() const
Return the MPI communicator.
Operator that extracts Face degrees of freedom for NCMesh in parallel.
int GetNGhostEdges() const
Definition: pncmesh.hpp:117
void AddConnection(int r, int c)
Definition: table.hpp:80
void LoseData()
NULL-ifies the data.
Definition: array.hpp:132
void SetFaceOrientations(const Array< int > &face_orientation)
Configure the transformation using face orientations for the current element.
Definition: doftrans.hpp:77
HypreParMatrix * Dof_TrueDof_Matrix() const
The true dof-to-dof interpolation matrix.
Definition: pfespace.hpp:321
const int * HostReadJ() const
Definition: sparsemat.hpp:248
virtual const Operator * GetProlongationMatrix() const
The returned Operator is owned by the FiniteElementSpace.
Definition: fespace.hpp:471
void ReduceLocalCopy(const Vector &src, Vector &dst) const
Definition: pfespace.cpp:3725
void Reserve(int capacity)
Ensures that the allocated size is at least the given size.
Definition: array.hpp:156
void SubDofOrder(Geometry::Type Geom, int SDim, int Info, Array< int > &dofs) const
Get the local dofs for a given sub-manifold.
Definition: fe_coll.cpp:397
void GetSharedQuadrilateralDofs(int group, int fi, Array< int > &dofs) const
Definition: pfespace.cpp:631
void CopyFrom(const U *src)
Copy from src into this array. Copies enough entries to fill the Capacity size of this array...
Definition: array.hpp:285
int GetMaxElementOrder() const
Return the maximum polynomial order.
Definition: fespace.hpp:451
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
Definition: array.hpp:304
bool IsDGSpace() const
Return whether or not the space is discontinuous (L2)
Definition: fespace.hpp:901
HypreParMatrix * Transpose() const
Returns the transpose of *this.
Definition: hypre.cpp:1607
T read(std::istream &is)
Read a value from the stream and return it.
Definition: binaryio.hpp:37
HYPRE_BigInt GetMyDofOffset() const
Definition: pfespace.cpp:1143
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
Definition: hypre.cpp:1480
void Sort()
Sorts the array in ascending order. This requires operator&lt; to be defined for T.
Definition: array.hpp:248
int GroupNVertices(int group)
Definition: pmesh.hpp:330
void GetSharedTriangleDofs(int group, int fi, Array< int > &dofs) const
Definition: pfespace.cpp:607
int FindSorted(const T &el) const
Do bisection search for &#39;el&#39; in a sorted array; return -1 if not found.
Definition: array.hpp:820
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:92
GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
HYPRE_BigInt * GetDofOffsets() const
Definition: pfespace.hpp:281
int Dimension() const
Definition: mesh.hpp:999
int GetNFaces() const
Return the number of (2D) faces in the NCMesh.
Definition: ncmesh.hpp:153
virtual const double * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:442
Geometry::Type GetFaceGeometry(int index) const
Return face geometry type. index is the Mesh face number.
Definition: ncmesh.hpp:362
virtual DofTransformation * GetElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: pfespace.cpp:471
Operator * Ptr() const
Access the underlying Operator pointer.
Definition: handle.hpp:87
int GetEntityDofs(int entity, int index, Array< int > &dofs, Geometry::Type master_geom=Geometry::INVALID, int variant=0) const
Helper to get vertex, edge or face DOFs (entity=0,1,2 resp.).
Definition: fespace.cpp:903
int FirstFaceDof(int face, int variant=0) const
Definition: fespace.hpp:243
Auxiliary device class used by ParFiniteElementSpace.
Definition: pfespace.hpp:461
virtual DofTransformation * GetElementDofs(int elem, Array< int > &dofs) const
Returns indices of degrees of freedom of element &#39;elem&#39;.
Definition: fespace.cpp:2680
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor...
void GetTransferOperator(const FiniteElementSpace &coarse_fes, OperatorHandle &T) const
Construct and return an Operator that can be used to transfer GridFunction data from coarse_fes...
Definition: fespace.cpp:3246
bool Finalized() const
Returns whether or not CSR format has been finalized.
Definition: sparsemat.hpp:521
void GroupQuadrilateral(int group, int i, int &face, int &o)
Definition: pmesh.cpp:1625
HYPRE_BigInt * GetTrueDofOffsets() const
Definition: pfespace.hpp:282
int GroupNTriangles(int group)
Definition: pmesh.hpp:332
HYPRE_BigInt GetGlobalTDofNumber(int ldof) const
Returns the global tdof number of the given local degree of freedom.
Definition: pfespace.cpp:1083
int GetNGhostFaces() const
Definition: pncmesh.hpp:118
void GetFaceElements(int Face, int *Elem1, int *Elem2) const
Definition: mesh.cpp:1393
virtual const int * DofOrderForOrientation(Geometry::Type GeomType, int Or) const =0
Returns an array, say p, that maps a local permuted index i to a local base index: base_i = p[i]...
void Swap(Array< T > &, Array< T > &)
Definition: array.hpp:630
int GetMyRank() const
Definition: pmesh.hpp:290
Memory< int > & GetIMemory()
Definition: table.hpp:118
void GetFaceInfos(int Face, int *Inf1, int *Inf2) const
Definition: mesh.cpp:1399
MPI_Comm GetComm() const
Definition: pmesh.hpp:288
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition: fespace.hpp:88
int GetDof() const
Returns the number of degrees of freedom in the finite element.
Definition: fe_base.hpp:323
virtual void MultTranspose(const Vector &x, Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition: pfespace.cpp:3504
int GetNeighborRank(int i) const
Return the MPI rank of neighbor &#39;i&#39;.
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
Definition: fe_coll.hpp:26
void AddAColumnInRow(int r)
Definition: table.hpp:77
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition: array.hpp:679
HYPRE_Int HYPRE_BigInt
void Transpose(const Table &A, Table &At, int ncols_A_)
Transpose a Table.
Definition: table.cpp:410
GroupId GetEntityOwnerId(int entity, int index)
Return vertex/edge/face (&#39;entity&#39; == 0/1/2, resp.) owner.
Definition: pncmesh.hpp:148
ParFiniteElementSpace(const ParFiniteElementSpace &orig, ParMesh *pmesh=NULL, const FiniteElementCollection *fec=NULL)
Copy constructor: deep copy all data from orig except the ParMesh, the FiniteElementCollection, and some derived data.
Definition: pfespace.cpp:29
void GetFaceNbrElementFaces(int i, Array< int > &fcs, Array< int > &cor) const
Definition: pmesh.cpp:2773
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Definition: device.hpp:258
The transpose of a given operator. Switches the roles of the methods Mult() and MultTranspose().
Definition: operator.hpp:718
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
int GetNEdges() const
Return the number of edges in the NCMesh.
Definition: ncmesh.hpp:151
void ShiftUpI()
Definition: table.cpp:115
Linear2DFiniteElement TriangleFE
Definition: fe.cpp:32
void BcastLocalCopy(const Vector &src, Vector &dst) const
Definition: pfespace.cpp:3643
int height
Dimension of the output / number of rows in the matrix.
Definition: operator.hpp:27
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
Operation GetLastOperation() const
Return type of last modification of the mesh.
Definition: mesh.hpp:1633
bool Nonconforming() const
Definition: pfespace.hpp:408
Array< HYPRE_BigInt > face_nbr_glob_dof_map
Definition: pfespace.hpp:218
NURBSExtension * NURBSext
Optional NURBS mesh extension.
Definition: mesh.hpp:272
int GetMyRank() const
Definition: pncmesh.hpp:210
virtual const Operator * GetRestrictionOperator() const
Definition: pfespace.cpp:1184
virtual double * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:454
void SetOperatorOwner(bool own=true)
Set the ownership flag for the held Operator.
Definition: handle.hpp:120
Mesh * mesh
The mesh that FE space lives on (not owned).
Definition: fespace.hpp:97
GridFunction interpolation operator applicable after mesh refinement.
Definition: fespace.hpp:290
const NCList & GetNCList(int entity)
Return vertex/edge/face list (entity = 0/1/2, respectively).
Definition: ncmesh.hpp:275
virtual const SparseMatrix * GetRestrictionMatrix() const
Get the R matrix which restricts a local dof vector to true dof vector.
Definition: pfespace.hpp:389
Table var_face_dofs
NOTE: also used for spaces with mixed faces.
Definition: fespace.hpp:124
Array< Master > masters
Definition: ncmesh.hpp:230
virtual int GetFaceDofs(int i, Array< int > &dofs, int variant=0) const
Definition: pfespace.cpp:519
ElementDofOrdering
Constants describing the possible orderings of the DOFs in one element.
Definition: fespace.hpp:66
General triple product operator x -&gt; A*B*C*x, with ownership of the factors.
Definition: operator.hpp:807
void MakeJ()
Definition: table.cpp:91
int dim
Definition: ex24.cpp:53
Array< MeshId > conforming
Definition: ncmesh.hpp:229
virtual const FaceRestriction * GetFaceRestriction(ElementDofOrdering e_ordering, FaceType type, L2FaceValues mul=L2FaceValues::DoubleValued) const
Definition: pfespace.cpp:542
virtual const FiniteElement * GetFE(int i) const
Returns pointer to the FiniteElement in the FiniteElementCollection associated with i&#39;th element in t...
Definition: fespace.cpp:2783
int index(int i, int j, int nx, int ny)
Definition: life.cpp:237
bool IsGhost(int entity, int index) const
Return true if the specified vertex/edge/face is a ghost.
Definition: pncmesh.hpp:184
virtual void Mult(const Vector &x, Vector &y) const
Operator application: y=A(x).
Definition: pfespace.cpp:3469
const FiniteElement * GetFaceNbrFaceFE(int i) const
Definition: pfespace.cpp:1531
Operator that extracts Face degrees of freedom for NCMesh in parallel.
Geometry::Type GetBdrElementBaseGeometry(int i) const
Definition: mesh.hpp:1065
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
virtual const Operator * GetRestrictionTransposeOperator() const
Return logical transpose of restriction matrix, but in non-assembled optimized matrix-free form...
Definition: pfespace.cpp:1217
void GetFaceNbrFaceVDofs(int i, Array< int > &vdofs) const
Definition: pfespace.cpp:1489
Geometry::Type GetFaceGeometryType(int Face) const
Definition: mesh.cpp:1412
void GetEdgeVertices(const MeshId &edge_id, int vert_index[2], bool oriented=true) const
Return Mesh vertex indices of an edge identified by &#39;edge_id&#39;.
Definition: ncmesh.cpp:4783
void MakeRef(T *, int)
Make this Array a reference to a pointer.
Definition: array.hpp:864
void GroupEdge(int group, int i, int &edge, int &o)
Definition: pmesh.cpp:1606
static const DenseMatrix & GetFaceTransform(int ori)
Definition: doftrans.hpp:224
BiLinear2DFiniteElement QuadrilateralFE
Vector data type.
Definition: vector.hpp:60
const int * HostReadI() const
Definition: sparsemat.hpp:232
Identity Operator I: x -&gt; x.
Definition: operator.hpp:674
ID for class HypreParMatrix.
Definition: operator.hpp:259
int * GetI()
Definition: table.hpp:113
bool UseDevice() const
Read the internal device flag.
RefCoord s[3]
void ReduceBeginCopy(const Vector &src) const
Definition: pfespace.cpp:3714
static bool DofFinalizable(int dof, const Array< bool > &finalized, const SparseMatrix &deps)
Definition: fespace.cpp:838
HypreParMatrix * LeftDiagMult(const SparseMatrix &D, HYPRE_BigInt *row_starts=NULL) const
Multiply the HypreParMatrix on the left by a block-diagonal parallel matrix D and return the result a...
Definition: hypre.cpp:1879
int RowSize(int i) const
Definition: table.hpp:108
Biwise-OR of all device backends.
Definition: device.hpp:96
NURBSExtension * NURBSext
Definition: fespace.hpp:139
int GetNGhostVertices() const
Definition: pncmesh.hpp:116
Table send_face_nbr_elements
Definition: pmesh.hpp:322
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:66
The MFEM Device class abstracts hardware devices such as GPUs, as well as programming models such as ...
Definition: device.hpp:121
GroupTopology gtopo
Definition: pmesh.hpp:312
Base class for operators that extracts Face degrees of freedom.
Abstract operator.
Definition: operator.hpp:24
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:337
GroupCommunicator * ScalarGroupComm()
Return a new GroupCommunicator on scalar dofs, i.e. for VDim = 1.
Definition: pfespace.cpp:995
Class for parallel meshes.
Definition: pmesh.hpp:32
Abstract data type element.
Definition: element.hpp:28
GroupTopology gtopo
Definition: nurbs.hpp:423
virtual void Mult(const Vector &x, Vector &y) const
Operator application: y=A(x).
Definition: pfespace.cpp:3659
int GetFaceNbrRank(int fn) const
Definition: pmesh.cpp:2755
Linear1DFiniteElement SegmentFE
Definition: segment.cpp:49
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:438
void GenerateOffsets(int N, HYPRE_BigInt loc_sizes[], Array< HYPRE_BigInt > *offsets[]) const
Definition: pmesh.cpp:1850
void Reset(OpType *A, bool own_A=true)
Reset the OperatorHandle to the given OpType pointer, A.
Definition: handle.hpp:145
DofTransformation * GetFaceNbrElementVDofs(int i, Array< int > &vdofs) const
Definition: pfespace.cpp:1464
int GroupNEdges(int group)
Definition: pmesh.hpp:331
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.
void DofsToVDofs(Array< int > &dofs, int ndofs=-1) const
Definition: fespace.cpp:215