MFEM  v4.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
pfespace.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include "pfespace.hpp"
17 #include "../general/sort_pairs.hpp"
18 #include "../mesh/mesh_headers.hpp"
19 #include "../general/binaryio.hpp"
20 
21 #include <climits> // INT_MAX
22 #include <limits>
23 #include <list>
24 
25 namespace mfem
26 {
27 
29  const ParFiniteElementSpace &orig, ParMesh *pmesh,
30  const FiniteElementCollection *fec)
31  : FiniteElementSpace(orig, pmesh, fec)
32 {
33  ParInit(pmesh ? pmesh : orig.pmesh);
34 }
35 
37  const FiniteElementSpace &orig, ParMesh &pmesh,
38  const FiniteElementCollection *fec)
39  : FiniteElementSpace(orig, &pmesh, fec)
40 {
41  ParInit(&pmesh);
42 }
43 
45  ParMesh *pm, const FiniteElementSpace *global_fes, const int *partitioning,
46  const FiniteElementCollection *f)
47  : FiniteElementSpace(pm, MakeLocalNURBSext(global_fes->GetNURBSext(),
48  pm->NURBSext),
49  f ? f : global_fes->FEColl(),
50  global_fes->GetVDim(), global_fes->GetOrdering())
51 {
52  ParInit(pm);
53  // For NURBS spaces, the variable-order data is contained in the
54  // NURBSExtension of 'global_fes' and inside the ParNURBSExtension of 'pm'.
55 
56  // TODO: when general variable-order support is added, copy the local portion
57  // of the variable-order data from 'global_fes' to 'this'.
58 }
59 
61  ParMesh *pm, const FiniteElementCollection *f, int dim, int ordering)
62  : FiniteElementSpace(pm, f, dim, ordering)
63 {
64  ParInit(pm);
65 }
66 
69  int dim, int ordering)
70  : FiniteElementSpace(pm, ext, f, dim, ordering)
71 {
72  ParInit(pm);
73 }
74 
75 // static method
76 ParNURBSExtension *ParFiniteElementSpace::MakeLocalNURBSext(
77  const NURBSExtension *globNURBSext, const NURBSExtension *parNURBSext)
78 {
79  if (globNURBSext == NULL) { return NULL; }
80  const ParNURBSExtension *pNURBSext =
81  dynamic_cast<const ParNURBSExtension*>(parNURBSext);
82  MFEM_ASSERT(pNURBSext, "need a ParNURBSExtension");
83  // make a copy of globNURBSext:
84  NURBSExtension *tmp_globNURBSext = new NURBSExtension(*globNURBSext);
85  // tmp_globNURBSext will be deleted by the following ParNURBSExtension ctor:
86  return new ParNURBSExtension(tmp_globNURBSext, pNURBSext);
87 }
88 
89 void ParFiniteElementSpace::ParInit(ParMesh *pm)
90 {
91  pmesh = pm;
92  pncmesh = pm->pncmesh;
93 
94  MyComm = pmesh->GetComm();
95  NRanks = pmesh->GetNRanks();
96  MyRank = pmesh->GetMyRank();
97 
98  gcomm = NULL;
99 
100  P = NULL;
101  Pconf = NULL;
102  R = NULL;
103 
104  num_face_nbr_dofs = -1;
105 
106  if (NURBSext && !pNURBSext())
107  {
108  // This is necessary in some cases: e.g. when the FiniteElementSpace
109  // constructor creates a serial NURBSExtension of higher order than the
110  // mesh NURBSExtension.
111  MFEM_ASSERT(own_ext, "internal error");
112 
113  ParNURBSExtension *pNe = new ParNURBSExtension(
114  NURBSext, dynamic_cast<ParNURBSExtension *>(pmesh->NURBSext));
115  // serial NURBSext is destroyed by the above constructor
116  NURBSext = pNe;
117  UpdateNURBS();
118  }
119 
120  Construct(); // parallel version of Construct().
121 
122  // Apply the ldof_signs to the elem_dof Table
123  if (Conforming() && !NURBSext)
124  {
125  ApplyLDofSigns(*elem_dof);
126  }
127 }
128 
129 void ParFiniteElementSpace::Construct()
130 {
131  if (NURBSext)
132  {
133  ConstructTrueNURBSDofs();
134  GenerateGlobalOffsets();
135  }
136  else if (Conforming())
137  {
138  ConstructTrueDofs();
139  GenerateGlobalOffsets();
140  }
141  else // Nonconforming()
142  {
143  // Initialize 'gcomm' for the cut (aka "partially conforming") space.
144  // In the process, the array 'ldof_ltdof' is also initialized (for the cut
145  // space) and used; however, it will be overwritten below with the real
146  // true dofs. Also, 'ldof_sign' and 'ldof_group' are constructed for the
147  // cut space.
148  ConstructTrueDofs();
149 
150  // calculate number of ghost DOFs
151  ngvdofs = pncmesh->GetNGhostVertices()
153 
154  ngedofs = ngfdofs = 0;
155  if (pmesh->Dimension() > 1)
156  {
157  ngedofs = pncmesh->GetNGhostEdges()
159  }
160  if (pmesh->Dimension() > 2)
161  {
162  ngfdofs = pncmesh->GetNGhostFaces()
163  * fec->DofForGeometry(pncmesh->GetGhostFaceGeometry(0));
164  }
165 
166  // total number of ghost DOFs. Ghost DOFs start at index 'ndofs', i.e.,
167  // after all regular DOFs
168  ngdofs = ngvdofs + ngedofs + ngfdofs;
169 
170  // get P and R matrices, initialize DOF offsets, etc. NOTE: in the NC
171  // case this needs to be done here to get the number of true DOFs
172  ltdof_size = BuildParallelConformingInterpolation(
173  &P, &R, dof_offsets, tdof_offsets, &ldof_ltdof, false);
174 
175  // TODO future: split BuildParallelConformingInterpolation into two parts
176  // to overlap its communication with processing between this constructor
177  // and the point where the P matrix is actually needed.
178  }
179 }
180 
182 {
183  long ltdofs = ltdof_size;
184  long min_ltdofs, max_ltdofs, sum_ltdofs;
185 
186  MPI_Reduce(&ltdofs, &min_ltdofs, 1, MPI_LONG, MPI_MIN, 0, MyComm);
187  MPI_Reduce(&ltdofs, &max_ltdofs, 1, MPI_LONG, MPI_MAX, 0, MyComm);
188  MPI_Reduce(&ltdofs, &sum_ltdofs, 1, MPI_LONG, MPI_SUM, 0, MyComm);
189 
190  if (MyRank == 0)
191  {
192  double avg = double(sum_ltdofs) / NRanks;
193  mfem::out << "True DOF partitioning: min " << min_ltdofs
194  << ", avg " << std::fixed << std::setprecision(1) << avg
195  << ", max " << max_ltdofs
196  << ", (max-avg)/avg " << 100.0*(max_ltdofs - avg)/avg
197  << "%" << std::endl;
198  }
199 
200  if (NRanks <= 32)
201  {
202  if (MyRank == 0)
203  {
204  mfem::out << "True DOFs by rank: " << ltdofs;
205  for (int i = 1; i < NRanks; i++)
206  {
207  MPI_Status status;
208  MPI_Recv(&ltdofs, 1, MPI_LONG, i, 123, MyComm, &status);
209  mfem::out << " " << ltdofs;
210  }
211  mfem::out << "\n";
212  }
213  else
214  {
215  MPI_Send(&ltdofs, 1, MPI_LONG, 0, 123, MyComm);
216  }
217  }
218 }
219 
220 void ParFiniteElementSpace::GetGroupComm(
221  GroupCommunicator &gc, int ldof_type, Array<int> *ldof_sign)
222 {
223  int gr;
224  int ng = pmesh->GetNGroups();
225  int nvd, ned, ntd = 0, nqd = 0;
226  Array<int> dofs;
227 
228  int group_ldof_counter;
229  Table &group_ldof = gc.GroupLDofTable();
230 
233 
234  if (fdofs)
235  {
237  {
239  }
241  {
243  }
244  }
245 
246  if (ldof_sign)
247  {
248  ldof_sign->SetSize(GetNDofs());
249  *ldof_sign = 1;
250  }
251 
252  // count the number of ldofs in all groups (excluding the local group 0)
253  group_ldof_counter = 0;
254  for (gr = 1; gr < ng; gr++)
255  {
256  group_ldof_counter += nvd * pmesh->GroupNVertices(gr);
257  group_ldof_counter += ned * pmesh->GroupNEdges(gr);
258  group_ldof_counter += ntd * pmesh->GroupNTriangles(gr);
259  group_ldof_counter += nqd * pmesh->GroupNQuadrilaterals(gr);
260  }
261  if (ldof_type)
262  {
263  group_ldof_counter *= vdim;
264  }
265  // allocate the I and J arrays in group_ldof
266  group_ldof.SetDims(ng, group_ldof_counter);
267 
268  // build the full group_ldof table
269  group_ldof_counter = 0;
270  group_ldof.GetI()[0] = group_ldof.GetI()[1] = 0;
271  for (gr = 1; gr < ng; gr++)
272  {
273  int j, k, l, m, o, nv, ne, nt, nq;
274  const int *ind;
275 
276  nv = pmesh->GroupNVertices(gr);
277  ne = pmesh->GroupNEdges(gr);
278  nt = pmesh->GroupNTriangles(gr);
279  nq = pmesh->GroupNQuadrilaterals(gr);
280 
281  // vertices
282  if (nvd > 0)
283  {
284  for (j = 0; j < nv; j++)
285  {
286  k = pmesh->GroupVertex(gr, j);
287 
288  dofs.SetSize(nvd);
289  m = nvd * k;
290  for (l = 0; l < nvd; l++, m++)
291  {
292  dofs[l] = m;
293  }
294 
295  if (ldof_type)
296  {
297  DofsToVDofs(dofs);
298  }
299 
300  for (l = 0; l < dofs.Size(); l++)
301  {
302  group_ldof.GetJ()[group_ldof_counter++] = dofs[l];
303  }
304  }
305  }
306 
307  // edges
308  if (ned > 0)
309  {
310  for (j = 0; j < ne; j++)
311  {
312  pmesh->GroupEdge(gr, j, k, o);
313 
314  dofs.SetSize(ned);
315  m = nvdofs+k*ned;
317  for (l = 0; l < ned; l++)
318  {
319  if (ind[l] < 0)
320  {
321  dofs[l] = m + (-1-ind[l]);
322  if (ldof_sign)
323  {
324  (*ldof_sign)[dofs[l]] = -1;
325  }
326  }
327  else
328  {
329  dofs[l] = m + ind[l];
330  }
331  }
332 
333  if (ldof_type)
334  {
335  DofsToVDofs(dofs);
336  }
337 
338  for (l = 0; l < dofs.Size(); l++)
339  {
340  group_ldof.GetJ()[group_ldof_counter++] = dofs[l];
341  }
342  }
343  }
344 
345  // triangles
346  if (ntd > 0)
347  {
348  for (j = 0; j < nt; j++)
349  {
350  pmesh->GroupTriangle(gr, j, k, o);
351 
352  dofs.SetSize(ntd);
353  m = nvdofs+nedofs+fdofs[k];
355  for (l = 0; l < ntd; l++)
356  {
357  if (ind[l] < 0)
358  {
359  dofs[l] = m + (-1-ind[l]);
360  if (ldof_sign)
361  {
362  (*ldof_sign)[dofs[l]] = -1;
363  }
364  }
365  else
366  {
367  dofs[l] = m + ind[l];
368  }
369  }
370 
371  if (ldof_type)
372  {
373  DofsToVDofs(dofs);
374  }
375 
376  for (l = 0; l < dofs.Size(); l++)
377  {
378  group_ldof.GetJ()[group_ldof_counter++] = dofs[l];
379  }
380  }
381  }
382 
383  // quadrilaterals
384  if (nqd > 0)
385  {
386  for (j = 0; j < nq; j++)
387  {
388  pmesh->GroupQuadrilateral(gr, j, k, o);
389 
390  dofs.SetSize(nqd);
391  m = nvdofs+nedofs+fdofs[k];
393  for (l = 0; l < nqd; l++)
394  {
395  if (ind[l] < 0)
396  {
397  dofs[l] = m + (-1-ind[l]);
398  if (ldof_sign)
399  {
400  (*ldof_sign)[dofs[l]] = -1;
401  }
402  }
403  else
404  {
405  dofs[l] = m + ind[l];
406  }
407  }
408 
409  if (ldof_type)
410  {
411  DofsToVDofs(dofs);
412  }
413 
414  for (l = 0; l < dofs.Size(); l++)
415  {
416  group_ldof.GetJ()[group_ldof_counter++] = dofs[l];
417  }
418  }
419  }
420 
421  group_ldof.GetI()[gr+1] = group_ldof_counter;
422  }
423 
424  gc.Finalize();
425 }
426 
427 void ParFiniteElementSpace::ApplyLDofSigns(Array<int> &dofs) const
428 {
429  MFEM_ASSERT(Conforming(), "wrong code path");
430 
431  for (int i = 0; i < dofs.Size(); i++)
432  {
433  if (dofs[i] < 0)
434  {
435  if (ldof_sign[-1-dofs[i]] < 0)
436  {
437  dofs[i] = -1-dofs[i];
438  }
439  }
440  else
441  {
442  if (ldof_sign[dofs[i]] < 0)
443  {
444  dofs[i] = -1-dofs[i];
445  }
446  }
447  }
448 }
449 
450 void ParFiniteElementSpace::ApplyLDofSigns(Table &el_dof) const
451 {
452  Array<int> all_dofs(el_dof.GetJ(), el_dof.Size_of_connections());
453  ApplyLDofSigns(all_dofs);
454 }
455 
457 {
458  if (elem_dof)
459  {
460  elem_dof->GetRow(i, dofs);
461  return;
462  }
464  if (Conforming())
465  {
466  ApplyLDofSigns(dofs);
467  }
468 }
469 
471 {
472  if (bdrElem_dof)
473  {
474  bdrElem_dof->GetRow(i, dofs);
475  return;
476  }
478  if (Conforming())
479  {
480  ApplyLDofSigns(dofs);
481  }
482 }
483 
485 {
487  if (Conforming())
488  {
489  ApplyLDofSigns(dofs);
490  }
491 }
492 
494  int group, int ei, Array<int> &dofs) const
495 {
496  int l_edge, ori;
497  MFEM_ASSERT(0 <= ei && ei < pmesh->GroupNEdges(group), "invalid edge index");
498  pmesh->GroupEdge(group, ei, l_edge, ori);
499  if (ori > 0) // ori = +1 or -1
500  {
501  GetEdgeDofs(l_edge, dofs);
502  }
503  else
504  {
505  Array<int> rdofs;
506  fec->SubDofOrder(Geometry::SEGMENT, 1, 1, dofs);
507  GetEdgeDofs(l_edge, rdofs);
508  for (int i = 0; i < dofs.Size(); i++)
509  {
510  const int di = dofs[i];
511  dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
512  }
513  }
514 }
515 
517  int group, int fi, Array<int> &dofs) const
518 {
519  int l_face, ori;
520  MFEM_ASSERT(0 <= fi && fi < pmesh->GroupNTriangles(group),
521  "invalid triangular face index");
522  pmesh->GroupTriangle(group, fi, l_face, ori);
523  if (ori == 0)
524  {
525  GetFaceDofs(l_face, dofs);
526  }
527  else
528  {
529  Array<int> rdofs;
530  fec->SubDofOrder(Geometry::TRIANGLE, 2, ori, dofs);
531  GetFaceDofs(l_face, rdofs);
532  for (int i = 0; i < dofs.Size(); i++)
533  {
534  const int di = dofs[i];
535  dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
536  }
537  }
538 }
539 
541  int group, int fi, Array<int> &dofs) const
542 {
543  int l_face, ori;
544  MFEM_ASSERT(0 <= fi && fi < pmesh->GroupNQuadrilaterals(group),
545  "invalid quadrilateral face index");
546  pmesh->GroupQuadrilateral(group, fi, l_face, ori);
547  if (ori == 0)
548  {
549  GetFaceDofs(l_face, dofs);
550  }
551  else
552  {
553  Array<int> rdofs;
554  fec->SubDofOrder(Geometry::SQUARE, 2, ori, dofs);
555  GetFaceDofs(l_face, rdofs);
556  for (int i = 0; i < dofs.Size(); i++)
557  {
558  const int di = dofs[i];
559  dofs[i] = (di >= 0) ? rdofs[di] : -1-rdofs[-1-di];
560  }
561  }
562 }
563 
564 void ParFiniteElementSpace::GenerateGlobalOffsets() const
565 {
566  MFEM_ASSERT(Conforming(), "wrong code path");
567 
568  HYPRE_Int ldof[2];
569  Array<HYPRE_Int> *offsets[2] = { &dof_offsets, &tdof_offsets };
570 
571  ldof[0] = GetVSize();
572  ldof[1] = TrueVSize();
573 
574  pmesh->GenerateOffsets(2, ldof, offsets);
575 
576  if (HYPRE_AssumedPartitionCheck())
577  {
578  // communicate the neighbor offsets in tdof_nb_offsets
579  GroupTopology &gt = GetGroupTopo();
580  int nsize = gt.GetNumNeighbors()-1;
581  MPI_Request *requests = new MPI_Request[2*nsize];
582  MPI_Status *statuses = new MPI_Status[2*nsize];
583  tdof_nb_offsets.SetSize(nsize+1);
584  tdof_nb_offsets[0] = tdof_offsets[0];
585 
586  // send and receive neighbors' local tdof offsets
587  int request_counter = 0;
588  for (int i = 1; i <= nsize; i++)
589  {
590  MPI_Irecv(&tdof_nb_offsets[i], 1, HYPRE_MPI_INT,
591  gt.GetNeighborRank(i), 5365, MyComm,
592  &requests[request_counter++]);
593  }
594  for (int i = 1; i <= nsize; i++)
595  {
596  MPI_Isend(&tdof_nb_offsets[0], 1, HYPRE_MPI_INT,
597  gt.GetNeighborRank(i), 5365, MyComm,
598  &requests[request_counter++]);
599  }
600  MPI_Waitall(request_counter, requests, statuses);
601 
602  delete [] statuses;
603  delete [] requests;
604  }
605 }
606 
607 void ParFiniteElementSpace::Build_Dof_TrueDof_Matrix() const // matrix P
608 {
609  MFEM_ASSERT(Conforming(), "wrong code path");
610 
611  if (P) { return; }
612 
613  int ldof = GetVSize();
614  int ltdof = TrueVSize();
615 
616  HYPRE_Int *i_diag = new HYPRE_Int[ldof+1];
617  HYPRE_Int *j_diag = new HYPRE_Int[ltdof];
618  int diag_counter;
619 
620  HYPRE_Int *i_offd = new HYPRE_Int[ldof+1];
621  HYPRE_Int *j_offd = new HYPRE_Int[ldof-ltdof];
622  int offd_counter;
623 
624  HYPRE_Int *cmap = new HYPRE_Int[ldof-ltdof];
625 
626  HYPRE_Int *col_starts = GetTrueDofOffsets();
627  HYPRE_Int *row_starts = GetDofOffsets();
628 
629  Array<Pair<HYPRE_Int, int> > cmap_j_offd(ldof-ltdof);
630 
631  i_diag[0] = i_offd[0] = 0;
632  diag_counter = offd_counter = 0;
633  for (int i = 0; i < ldof; i++)
634  {
635  int ltdof = GetLocalTDofNumber(i);
636  if (ltdof >= 0)
637  {
638  j_diag[diag_counter++] = ltdof;
639  }
640  else
641  {
642  cmap_j_offd[offd_counter].one = GetGlobalTDofNumber(i);
643  cmap_j_offd[offd_counter].two = offd_counter;
644  offd_counter++;
645  }
646  i_diag[i+1] = diag_counter;
647  i_offd[i+1] = offd_counter;
648  }
649 
650  SortPairs<HYPRE_Int, int>(cmap_j_offd, offd_counter);
651 
652  for (int i = 0; i < offd_counter; i++)
653  {
654  cmap[i] = cmap_j_offd[i].one;
655  j_offd[cmap_j_offd[i].two] = i;
656  }
657 
658  P = new HypreParMatrix(MyComm, MyRank, NRanks, row_starts, col_starts,
659  i_diag, j_diag, i_offd, j_offd, cmap, offd_counter);
660 
661  SparseMatrix Pdiag;
662  P->GetDiag(Pdiag);
663  R = Transpose(Pdiag);
664 }
665 
667 {
668  HypreParMatrix *P_pc;
669  Array<HYPRE_Int> P_pc_row_starts, P_pc_col_starts;
670  BuildParallelConformingInterpolation(&P_pc, NULL, P_pc_row_starts,
671  P_pc_col_starts, NULL, true);
672  P_pc->CopyRowStarts();
673  P_pc->CopyColStarts();
674  return P_pc;
675 }
676 
678 {
679  GroupTopology &gt = GetGroupTopo();
680  for (int i = 0; i < ldof_group.Size(); i++)
681  {
682  if (gt.IAmMaster(ldof_group[i])) // we are the master
683  {
684  if (ldof_ltdof[i] >= 0) // see note below
685  {
686  vec[ldof_ltdof[i]] /= gt.GetGroupSize(ldof_group[i]);
687  }
688  // NOTE: in NC meshes, ldof_ltdof generated for the gtopo
689  // groups by ConstructTrueDofs gets overwritten by
690  // BuildParallelConformingInterpolation. Some DOFs that are
691  // seen as true by the conforming code are actually slaves and
692  // end up with a -1 in ldof_ltdof.
693  }
694  }
695 }
696 
698 {
699  GroupCommunicator *gc = new GroupCommunicator(GetGroupTopo());
700  if (NURBSext)
701  {
702  gc->Create(pNURBSext()->ldof_group);
703  }
704  else
705  {
706  GetGroupComm(*gc, 0);
707  }
708  return gc;
709 }
710 
712 {
713  // For non-conforming mesh, synchronization is performed on the cut (aka
714  // "partially conforming") space.
715 
716  MFEM_VERIFY(ldof_marker.Size() == GetVSize(), "invalid in/out array");
717 
718  // implement allreduce(|) as reduce(|) + broadcast
719  gcomm->Reduce<int>(ldof_marker, GroupCommunicator::BitOR);
720  gcomm->Bcast(ldof_marker);
721 }
722 
724  Array<int> &ess_dofs,
725  int component) const
726 {
727  FiniteElementSpace::GetEssentialVDofs(bdr_attr_is_ess, ess_dofs, component);
728 
729  if (Conforming())
730  {
731  // Make sure that processors without boundary elements mark
732  // their boundary dofs (if they have any).
733  Synchronize(ess_dofs);
734  }
735 }
736 
738  &bdr_attr_is_ess,
739  Array<int> &ess_tdof_list,
740  int component)
741 {
742  Array<int> ess_dofs, true_ess_dofs;
743 
744  GetEssentialVDofs(bdr_attr_is_ess, ess_dofs, component);
745  GetRestrictionMatrix()->BooleanMult(ess_dofs, true_ess_dofs);
746 #ifdef MFEM_DEBUG
747  // Verify that in boolean arithmetic: P^T ess_dofs = R ess_dofs.
748  Array<int> true_ess_dofs2(true_ess_dofs.Size());
750  const int *ess_dofs_data = ess_dofs.HostRead();
751  Pt->BooleanMult(1, ess_dofs_data, 0, true_ess_dofs2);
752  delete Pt;
753  int counter = 0;
754  const int *ted = true_ess_dofs.HostRead();
755  for (int i = 0; i < true_ess_dofs.Size(); i++)
756  {
757  if (bool(ted[i]) != bool(true_ess_dofs2[i])) { counter++; }
758  }
759  MFEM_VERIFY(counter == 0, "internal MFEM error: counter = " << counter);
760 #endif
761  MarkerToList(true_ess_dofs, ess_tdof_list);
762 }
763 
765 {
766  if (Nonconforming())
767  {
768  Dof_TrueDof_Matrix(); // inline method
769 
770  return ldof_ltdof[ldof]; // NOTE: contains -1 for slaves/DOFs we don't own
771  }
772  else
773  {
774  if (GetGroupTopo().IAmMaster(ldof_group[ldof]))
775  {
776  return ldof_ltdof[ldof];
777  }
778  else
779  {
780  return -1;
781  }
782  }
783 }
784 
786 {
787  if (Nonconforming())
788  {
789  MFEM_VERIFY(ldof_ltdof[ldof] >= 0, "ldof " << ldof << " not a true DOF.");
790 
791  return GetMyTDofOffset() + ldof_ltdof[ldof];
792  }
793  else
794  {
795  if (HYPRE_AssumedPartitionCheck())
796  {
797  return ldof_ltdof[ldof] +
798  tdof_nb_offsets[GetGroupTopo().GetGroupMaster(ldof_group[ldof])];
799  }
800  else
801  {
802  return ldof_ltdof[ldof] +
803  tdof_offsets[GetGroupTopo().GetGroupMasterRank(ldof_group[ldof])];
804  }
805  }
806 }
807 
809 {
810  if (Nonconforming())
811  {
812  MFEM_ABORT("Not implemented for NC mesh.");
813  }
814 
815  if (HYPRE_AssumedPartitionCheck())
816  {
818  {
819  return ldof_ltdof[sldof] +
820  tdof_nb_offsets[GetGroupTopo().GetGroupMaster(
821  ldof_group[sldof])] / vdim;
822  }
823  else
824  {
825  return (ldof_ltdof[sldof*vdim] +
826  tdof_nb_offsets[GetGroupTopo().GetGroupMaster(
827  ldof_group[sldof*vdim])]) / vdim;
828  }
829  }
830 
832  {
833  return ldof_ltdof[sldof] +
834  tdof_offsets[GetGroupTopo().GetGroupMasterRank(
835  ldof_group[sldof])] / vdim;
836  }
837  else
838  {
839  return (ldof_ltdof[sldof*vdim] +
840  tdof_offsets[GetGroupTopo().GetGroupMasterRank(
841  ldof_group[sldof*vdim])]) / vdim;
842  }
843 }
844 
846 {
847  return HYPRE_AssumedPartitionCheck() ? dof_offsets[0] : dof_offsets[MyRank];
848 }
849 
851 {
852  return HYPRE_AssumedPartitionCheck()? tdof_offsets[0] : tdof_offsets[MyRank];
853 }
854 
856 {
857  if (Conforming())
858  {
859  if (!Pconf) { Pconf = new ConformingProlongationOperator(*this); }
860  return Pconf;
861  }
862  else
863  {
864  return Dof_TrueDof_Matrix();
865  }
866 }
867 
869 {
870  if (num_face_nbr_dofs >= 0) { return; }
871 
872  pmesh->ExchangeFaceNbrData();
873 
874  int num_face_nbrs = pmesh->GetNFaceNeighbors();
875 
876  if (num_face_nbrs == 0)
877  {
878  num_face_nbr_dofs = 0;
879  return;
880  }
881 
882  MPI_Request *requests = new MPI_Request[2*num_face_nbrs];
883  MPI_Request *send_requests = requests;
884  MPI_Request *recv_requests = requests + num_face_nbrs;
885  MPI_Status *statuses = new MPI_Status[num_face_nbrs];
886 
887  Array<int> ldofs;
888  Array<int> ldof_marker(GetVSize());
889  ldof_marker = -1;
890 
891  Table send_nbr_elem_dof;
892 
893  send_nbr_elem_dof.MakeI(pmesh->send_face_nbr_elements.Size_of_connections());
894  send_face_nbr_ldof.MakeI(num_face_nbrs);
895  face_nbr_ldof.MakeI(num_face_nbrs);
896  int *send_el_off = pmesh->send_face_nbr_elements.GetI();
897  int *recv_el_off = pmesh->face_nbr_elements_offset;
898  for (int fn = 0; fn < num_face_nbrs; fn++)
899  {
900  int *my_elems = pmesh->send_face_nbr_elements.GetRow(fn);
901  int num_my_elems = pmesh->send_face_nbr_elements.RowSize(fn);
902 
903  for (int i = 0; i < num_my_elems; i++)
904  {
905  GetElementVDofs(my_elems[i], ldofs);
906  for (int j = 0; j < ldofs.Size(); j++)
907  {
908  int ldof = (ldofs[j] >= 0 ? ldofs[j] : -1-ldofs[j]);
909 
910  if (ldof_marker[ldof] != fn)
911  {
912  ldof_marker[ldof] = fn;
914  }
915  }
916  send_nbr_elem_dof.AddColumnsInRow(send_el_off[fn] + i, ldofs.Size());
917  }
918 
919  int nbr_rank = pmesh->GetFaceNbrRank(fn);
920  int tag = 0;
921  MPI_Isend(&send_face_nbr_ldof.GetI()[fn], 1, MPI_INT, nbr_rank, tag,
922  MyComm, &send_requests[fn]);
923 
924  MPI_Irecv(&face_nbr_ldof.GetI()[fn], 1, MPI_INT, nbr_rank, tag,
925  MyComm, &recv_requests[fn]);
926  }
927 
928  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
930 
932 
933  MPI_Waitall(num_face_nbrs, send_requests, statuses);
935 
936  // send/receive the I arrays of send_nbr_elem_dof/face_nbr_element_dof,
937  // respectively (they contain the number of dofs for each face-neighbor
938  // element)
939  face_nbr_element_dof.MakeI(recv_el_off[num_face_nbrs]);
940 
941  int *send_I = send_nbr_elem_dof.GetI();
942  int *recv_I = face_nbr_element_dof.GetI();
943  for (int fn = 0; fn < num_face_nbrs; fn++)
944  {
945  int nbr_rank = pmesh->GetFaceNbrRank(fn);
946  int tag = 0;
947  MPI_Isend(send_I + send_el_off[fn], send_el_off[fn+1] - send_el_off[fn],
948  MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
949 
950  MPI_Irecv(recv_I + recv_el_off[fn], recv_el_off[fn+1] - recv_el_off[fn],
951  MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
952  }
953 
954  MPI_Waitall(num_face_nbrs, send_requests, statuses);
955  send_nbr_elem_dof.MakeJ();
956 
957  ldof_marker = -1;
958 
959  for (int fn = 0; fn < num_face_nbrs; fn++)
960  {
961  int *my_elems = pmesh->send_face_nbr_elements.GetRow(fn);
962  int num_my_elems = pmesh->send_face_nbr_elements.RowSize(fn);
963 
964  for (int i = 0; i < num_my_elems; i++)
965  {
966  GetElementVDofs(my_elems[i], ldofs);
967  for (int j = 0; j < ldofs.Size(); j++)
968  {
969  int ldof = (ldofs[j] >= 0 ? ldofs[j] : -1-ldofs[j]);
970 
971  if (ldof_marker[ldof] != fn)
972  {
973  ldof_marker[ldof] = fn;
974  send_face_nbr_ldof.AddConnection(fn, ldofs[j]);
975  }
976  }
977  send_nbr_elem_dof.AddConnections(
978  send_el_off[fn] + i, ldofs, ldofs.Size());
979  }
980  }
982  send_nbr_elem_dof.ShiftUpI();
983 
984  // convert the ldof indices in send_nbr_elem_dof
985  int *send_J = send_nbr_elem_dof.GetJ();
986  for (int fn = 0, j = 0; fn < num_face_nbrs; fn++)
987  {
988  int num_ldofs = send_face_nbr_ldof.RowSize(fn);
989  int *ldofs = send_face_nbr_ldof.GetRow(fn);
990  int j_end = send_I[send_el_off[fn+1]];
991 
992  for (int i = 0; i < num_ldofs; i++)
993  {
994  int ldof = (ldofs[i] >= 0 ? ldofs[i] : -1-ldofs[i]);
995  ldof_marker[ldof] = i;
996  }
997 
998  for ( ; j < j_end; j++)
999  {
1000  int ldof = (send_J[j] >= 0 ? send_J[j] : -1-send_J[j]);
1001  send_J[j] = (send_J[j] >= 0 ? ldof_marker[ldof] : -1-ldof_marker[ldof]);
1002  }
1003  }
1004 
1005  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1007 
1008  // send/receive the J arrays of send_nbr_elem_dof/face_nbr_element_dof,
1009  // respectively (they contain the element dofs in enumeration local for
1010  // the face-neighbor pair)
1011  int *recv_J = face_nbr_element_dof.GetJ();
1012  for (int fn = 0; fn < num_face_nbrs; fn++)
1013  {
1014  int nbr_rank = pmesh->GetFaceNbrRank(fn);
1015  int tag = 0;
1016 
1017  MPI_Isend(send_J + send_I[send_el_off[fn]],
1018  send_I[send_el_off[fn+1]] - send_I[send_el_off[fn]],
1019  MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
1020 
1021  MPI_Irecv(recv_J + recv_I[recv_el_off[fn]],
1022  recv_I[recv_el_off[fn+1]] - recv_I[recv_el_off[fn]],
1023  MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
1024  }
1025 
1026  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1027 
1028  // shift the J array of face_nbr_element_dof
1029  for (int fn = 0, j = 0; fn < num_face_nbrs; fn++)
1030  {
1031  int shift = face_nbr_ldof.GetI()[fn];
1032  int j_end = recv_I[recv_el_off[fn+1]];
1033 
1034  for ( ; j < j_end; j++)
1035  {
1036  if (recv_J[j] >= 0)
1037  {
1038  recv_J[j] += shift;
1039  }
1040  else
1041  {
1042  recv_J[j] -= shift;
1043  }
1044  }
1045  }
1046 
1047  MPI_Waitall(num_face_nbrs, send_requests, statuses);
1048 
1049  // send/receive the J arrays of send_face_nbr_ldof/face_nbr_ldof,
1050  // respectively
1051  for (int fn = 0; fn < num_face_nbrs; fn++)
1052  {
1053  int nbr_rank = pmesh->GetFaceNbrRank(fn);
1054  int tag = 0;
1055 
1056  MPI_Isend(send_face_nbr_ldof.GetRow(fn),
1058  MPI_INT, nbr_rank, tag, MyComm, &send_requests[fn]);
1059 
1060  MPI_Irecv(face_nbr_ldof.GetRow(fn),
1061  face_nbr_ldof.RowSize(fn),
1062  MPI_INT, nbr_rank, tag, MyComm, &recv_requests[fn]);
1063  }
1064 
1065  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1066  MPI_Waitall(num_face_nbrs, send_requests, statuses);
1067 
1068  // send my_dof_offset (i.e. my_ldof_offset) to face neighbors and receive
1069  // their offset in dof_face_nbr_offsets, used to define face_nbr_glob_dof_map
1071  Array<HYPRE_Int> dof_face_nbr_offsets(num_face_nbrs);
1072  HYPRE_Int my_dof_offset = GetMyDofOffset();
1073  for (int fn = 0; fn < num_face_nbrs; fn++)
1074  {
1075  int nbr_rank = pmesh->GetFaceNbrRank(fn);
1076  int tag = 0;
1077 
1078  MPI_Isend(&my_dof_offset, 1, HYPRE_MPI_INT, nbr_rank, tag,
1079  MyComm, &send_requests[fn]);
1080 
1081  MPI_Irecv(&dof_face_nbr_offsets[fn], 1, HYPRE_MPI_INT, nbr_rank, tag,
1082  MyComm, &recv_requests[fn]);
1083  }
1084 
1085  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
1086 
1087  // set the array face_nbr_glob_dof_map which holds the global ldof indices of
1088  // the face-neighbor dofs
1089  for (int fn = 0, j = 0; fn < num_face_nbrs; fn++)
1090  {
1091  for (int j_end = face_nbr_ldof.GetI()[fn+1]; j < j_end; j++)
1092  {
1093  int ldof = face_nbr_ldof.GetJ()[j];
1094  if (ldof < 0)
1095  {
1096  ldof = -1-ldof;
1097  }
1098 
1099  face_nbr_glob_dof_map[j] = dof_face_nbr_offsets[fn] + ldof;
1100  }
1101  }
1102 
1103  MPI_Waitall(num_face_nbrs, send_requests, statuses);
1104 
1105  delete [] statuses;
1106  delete [] requests;
1107 }
1108 
1110  int i, Array<int> &vdofs) const
1111 {
1112  face_nbr_element_dof.GetRow(i, vdofs);
1113 }
1114 
1116 {
1117  // Works for NC mesh where 'i' is an index returned by
1118  // ParMesh::GetSharedFace() such that i >= Mesh::GetNumFaces(), i.e. 'i' is
1119  // the index of a ghost.
1120  MFEM_ASSERT(Nonconforming() && i >= pmesh->GetNumFaces(), "");
1121  int el1, el2, inf1, inf2;
1122  pmesh->GetFaceElements(i, &el1, &el2);
1123  el2 = -1 - el2;
1124  pmesh->GetFaceInfos(i, &inf1, &inf2);
1125  MFEM_ASSERT(0 <= el2 && el2 < face_nbr_element_dof.Size(), "");
1126  const int nd = face_nbr_element_dof.RowSize(el2);
1127  const int *vol_vdofs = face_nbr_element_dof.GetRow(el2);
1128  const Element *face_nbr_el = pmesh->face_nbr_elements[el2];
1129  Geometry::Type geom = face_nbr_el->GetGeometryType();
1130  const int face_dim = Geometry::Dimension[geom]-1;
1131 
1132  fec->SubDofOrder(geom, face_dim, inf2, vdofs);
1133  // Convert local dofs to local vdofs.
1134  Ordering::DofsToVDofs<Ordering::byNODES>(nd/vdim, vdim, vdofs);
1135  // Convert local vdofs to global vdofs.
1136  for (int j = 0; j < vdofs.Size(); j++)
1137  {
1138  const int ldof = vdofs[j];
1139  vdofs[j] = (ldof >= 0) ? vol_vdofs[ldof] : -1-vol_vdofs[-1-ldof];
1140  }
1141 }
1142 
1144 {
1145  const FiniteElement *FE =
1147  pmesh->face_nbr_elements[i]->GetGeometryType());
1148 
1149  if (NURBSext)
1150  {
1151  mfem_error("ParFiniteElementSpace::GetFaceNbrFE"
1152  " does not support NURBS!");
1153  }
1154  return FE;
1155 }
1156 
1158 {
1159  // Works in tandem with GetFaceNbrFaceVDofs() defined above.
1160  MFEM_ASSERT(Nonconforming() && !NURBSext, "");
1161  Geometry::Type geom = (pmesh->Dimension() == 2) ?
1163  return fec->FiniteElementForGeometry(geom);
1164 }
1165 
1167 {
1168  hypre_ParCSRMatrix *csrP = (hypre_ParCSRMatrix*)(*P);
1169  hypre_ParCSRMatrixOwnsRowStarts(csrP) = 1;
1170  hypre_ParCSRMatrixOwnsColStarts(csrP) = 1;
1171  P -> StealData();
1172  dof_offsets.LoseData();
1173  tdof_offsets.LoseData();
1174 }
1175 
1176 void ParFiniteElementSpace::ConstructTrueDofs()
1177 {
1178  int i, gr, n = GetVSize();
1179  GroupTopology &gt = pmesh->gtopo;
1180  gcomm = new GroupCommunicator(gt);
1181  Table &group_ldof = gcomm->GroupLDofTable();
1182 
1183  GetGroupComm(*gcomm, 1, &ldof_sign);
1184 
1185  // Define ldof_group and mark ldof_ltdof with
1186  // -1 for ldof that is ours
1187  // -2 for ldof that is in a group with another master
1188  ldof_group.SetSize(n);
1189  ldof_ltdof.SetSize(n);
1190  ldof_group = 0;
1191  ldof_ltdof = -1;
1192 
1193  for (gr = 1; gr < group_ldof.Size(); gr++)
1194  {
1195  const int *ldofs = group_ldof.GetRow(gr);
1196  const int nldofs = group_ldof.RowSize(gr);
1197  for (i = 0; i < nldofs; i++)
1198  {
1199  ldof_group[ldofs[i]] = gr;
1200  }
1201 
1202  if (!gt.IAmMaster(gr)) // we are not the master
1203  {
1204  for (i = 0; i < nldofs; i++)
1205  {
1206  ldof_ltdof[ldofs[i]] = -2;
1207  }
1208  }
1209  }
1210 
1211  // count ltdof_size
1212  ltdof_size = 0;
1213  for (i = 0; i < n; i++)
1214  {
1215  if (ldof_ltdof[i] == -1)
1216  {
1217  ldof_ltdof[i] = ltdof_size++;
1218  }
1219  }
1220  gcomm->SetLTDofTable(ldof_ltdof);
1221 
1222  // have the group masters broadcast their ltdofs to the rest of the group
1223  gcomm->Bcast(ldof_ltdof);
1224 }
1225 
1226 void ParFiniteElementSpace::ConstructTrueNURBSDofs()
1227 {
1228  int n = GetVSize();
1229  GroupTopology &gt = pNURBSext()->gtopo;
1230  gcomm = new GroupCommunicator(gt);
1231 
1232  // pNURBSext()->ldof_group is for scalar space!
1233  if (vdim == 1)
1234  {
1235  ldof_group.MakeRef(pNURBSext()->ldof_group);
1236  }
1237  else
1238  {
1239  const int *scalar_ldof_group = pNURBSext()->ldof_group;
1240  ldof_group.SetSize(n);
1241  for (int i = 0; i < n; i++)
1242  {
1243  ldof_group[i] = scalar_ldof_group[VDofToDof(i)];
1244  }
1245  }
1246 
1247  gcomm->Create(ldof_group);
1248 
1249  // ldof_sign.SetSize(n);
1250  // ldof_sign = 1;
1251  ldof_sign.DeleteAll();
1252 
1253  ltdof_size = 0;
1254  ldof_ltdof.SetSize(n);
1255  for (int i = 0; i < n; i++)
1256  {
1257  if (gt.IAmMaster(ldof_group[i]))
1258  {
1259  ldof_ltdof[i] = ltdof_size;
1260  ltdof_size++;
1261  }
1262  else
1263  {
1264  ldof_ltdof[i] = -2;
1265  }
1266  }
1267  gcomm->SetLTDofTable(ldof_ltdof);
1268 
1269  // have the group masters broadcast their ltdofs to the rest of the group
1270  gcomm->Bcast(ldof_ltdof);
1271 }
1272 
1273 void ParFiniteElementSpace::GetGhostVertexDofs(const MeshId &id,
1274  Array<int> &dofs) const
1275 {
1276  int nv = fec->DofForGeometry(Geometry::POINT);
1277  dofs.SetSize(nv);
1278  for (int j = 0; j < nv; j++)
1279  {
1280  dofs[j] = ndofs + nv*id.index + j;
1281  }
1282 }
1283 
1284 void ParFiniteElementSpace::GetGhostEdgeDofs(const MeshId &edge_id,
1285  Array<int> &dofs) const
1286 {
1287  int nv = fec->DofForGeometry(Geometry::POINT);
1289  dofs.SetSize(2*nv + ne);
1290 
1291  int V[2], ghost = pncmesh->GetNVertices();
1292  pmesh->pncmesh->GetEdgeVertices(edge_id, V);
1293 
1294  for (int i = 0; i < 2; i++)
1295  {
1296  int k = (V[i] < ghost) ? V[i]*nv : (ndofs + (V[i] - ghost)*nv);
1297  for (int j = 0; j < nv; j++)
1298  {
1299  dofs[i*nv + j] = k++;
1300  }
1301  }
1302 
1303  int k = ndofs + ngvdofs + (edge_id.index - pncmesh->GetNEdges())*ne;
1304  for (int j = 0; j < ne; j++)
1305  {
1306  dofs[2*nv + j] = k++;
1307  }
1308 }
1309 
1310 void ParFiniteElementSpace::GetGhostFaceDofs(const MeshId &face_id,
1311  Array<int> &dofs) const
1312 {
1313  const int ghost_face_index = face_id.index - pncmesh->GetNFaces();
1314  MFEM_ASSERT(pncmesh->GetGhostFaceGeometry(ghost_face_index)
1315  == Geometry::SQUARE, "");
1316 
1317  int nv = fec->DofForGeometry(Geometry::POINT);
1319  int nf = fec->DofForGeometry(Geometry::SQUARE);
1320  dofs.SetSize(4*nv + 4*ne + nf);
1321 
1322  int V[4], E[4], Eo[4];
1323  pmesh->pncmesh->GetFaceVerticesEdges(face_id, V, E, Eo);
1324 
1325  int offset = 0;
1326  for (int i = 0; i < 4; i++)
1327  {
1328  int ghost = pncmesh->GetNVertices();
1329  int first = (V[i] < ghost) ? V[i]*nv : (ndofs + (V[i] - ghost)*nv);
1330  for (int j = 0; j < nv; j++)
1331  {
1332  dofs[offset++] = first + j;
1333  }
1334  }
1335 
1336  for (int i = 0; i < 4; i++)
1337  {
1338  int ghost = pncmesh->GetNEdges();
1339  int first = (E[i] < ghost) ? nvdofs + E[i]*ne
1340  /* */ : ndofs + ngvdofs + (E[i] - ghost)*ne;
1341  const int *ind = fec->DofOrderForOrientation(Geometry::SEGMENT, Eo[i]);
1342  for (int j = 0; j < ne; j++)
1343  {
1344  dofs[offset++] = (ind[j] >= 0) ? (first + ind[j])
1345  /* */ : (-1 - (first + (-1 - ind[j])));
1346  }
1347  }
1348 
1349  // Assuming all ghost faces have the same number of dofs:
1350  int first = ndofs + ngvdofs + ngedofs + ghost_face_index*nf;
1351  for (int j = 0; j < nf; j++)
1352  {
1353  dofs[offset++] = first + j;
1354  }
1355 }
1356 
1357 void ParFiniteElementSpace::GetGhostDofs(int entity, const MeshId &id,
1358  Array<int> &dofs) const
1359 {
1360  // helper to get ghost vertex, ghost edge or ghost face DOFs
1361  switch (entity)
1362  {
1363  case 0: GetGhostVertexDofs(id, dofs); break;
1364  case 1: GetGhostEdgeDofs(id, dofs); break;
1365  case 2: GetGhostFaceDofs(id, dofs); break;
1366  }
1367 }
1368 
1369 void ParFiniteElementSpace::GetBareDofs(int entity, int index,
1370  Array<int> &dofs) const
1371 {
1372  int ned, ghost, first;
1373  switch (entity)
1374  {
1375  case 0:
1377  ghost = pncmesh->GetNVertices();
1378  first = (index < ghost)
1379  ? index*ned // regular vertex
1380  : ndofs + (index - ghost)*ned; // ghost vertex
1381  break;
1382 
1383  case 1:
1385  ghost = pncmesh->GetNEdges();
1386  first = (index < ghost)
1387  ? nvdofs + index*ned // regular edge
1388  : ndofs + ngvdofs + (index - ghost)*ned; // ghost edge
1389  break;
1390 
1391  default:
1392  MFEM_ASSERT(!pmesh->HasGeometry(Geometry::TRIANGLE), "");
1394  ghost = pncmesh->GetNFaces();
1395  first = (index < ghost)
1396  ? nvdofs + nedofs + index*ned // regular face
1397  : ndofs + ngvdofs + ngedofs + (index - ghost)*ned; // ghost
1398  break;
1399  }
1400 
1401  dofs.SetSize(ned);
1402  for (int i = 0; i < ned; i++)
1403  {
1404  dofs[i] = first + i;
1405  }
1406 }
1407 
1408 int ParFiniteElementSpace::PackDof(int entity, int index, int edof) const
1409 {
1410  // DOFs are ordered as follows:
1411  // vertices | edges | faces | internal | ghost vert. | g. edges | g. faces
1412 
1413  int ghost, ned;
1414  switch (entity)
1415  {
1416  case 0:
1417  ghost = pncmesh->GetNVertices();
1419 
1420  return (index < ghost)
1421  ? index*ned + edof // regular vertex
1422  : ndofs + (index - ghost)*ned + edof; // ghost vertex
1423 
1424  case 1:
1425  ghost = pncmesh->GetNEdges();
1427 
1428  return (index < ghost)
1429  ? nvdofs + index*ned + edof // regular edge
1430  : ndofs + ngvdofs + (index - ghost)*ned + edof; // ghost edge
1431 
1432  default:
1433  MFEM_ASSERT(!pmesh->HasGeometry(Geometry::TRIANGLE), "");
1434  ghost = pncmesh->GetNFaces();
1436 
1437  return (index < ghost)
1438  ? nvdofs + nedofs + index*ned + edof // regular face
1439  : ndofs + ngvdofs + ngedofs + (index - ghost)*ned + edof; //ghost
1440  }
1441 }
1442 
1443 /** Dissect a DOF number to obtain the entity type (0=vertex, 1=edge, 2=face),
1444  * entity index and the DOF number within the entity.
1445  */
1446 void ParFiniteElementSpace::UnpackDof(int dof,
1447  int &entity, int &index, int &edof) const
1448 {
1449  MFEM_VERIFY(dof >= 0, "");
1450  if (dof < ndofs)
1451  {
1452  if (dof < nvdofs) // regular vertex
1453  {
1454  int nv = fec->DofForGeometry(Geometry::POINT);
1455  entity = 0, index = dof / nv, edof = dof % nv;
1456  return;
1457  }
1458  dof -= nvdofs;
1459  if (dof < nedofs) // regular edge
1460  {
1462  entity = 1, index = dof / ne, edof = dof % ne;
1463  return;
1464  }
1465  dof -= nedofs;
1466  if (dof < nfdofs) // regular face
1467  {
1468  MFEM_ASSERT(!pmesh->HasGeometry(Geometry::TRIANGLE), "");
1469  int nf = fec->DofForGeometry(Geometry::SQUARE);
1470  entity = 2, index = dof / nf, edof = dof % nf;
1471  return;
1472  }
1473  MFEM_ABORT("Cannot unpack internal DOF");
1474  }
1475  else
1476  {
1477  dof -= ndofs;
1478  if (dof < ngvdofs) // ghost vertex
1479  {
1480  int nv = fec->DofForGeometry(Geometry::POINT);
1481  entity = 0, index = pncmesh->GetNVertices() + dof / nv, edof = dof % nv;
1482  return;
1483  }
1484  dof -= ngvdofs;
1485  if (dof < ngedofs) // ghost edge
1486  {
1488  entity = 1, index = pncmesh->GetNEdges() + dof / ne, edof = dof % ne;
1489  return;
1490  }
1491  dof -= ngedofs;
1492  if (dof < ngfdofs) // ghost face
1493  {
1494  int nf = fec->DofForGeometry(pncmesh->GetGhostFaceGeometry(0));
1495  entity = 2, index = pncmesh->GetNFaces() + dof / nf, edof = dof % nf;
1496  return;
1497  }
1498  MFEM_ABORT("Out of range DOF.");
1499  }
1500 }
1501 
1502 /** Represents an element of the P matrix. The column number is global and
1503  * corresponds to vector dimension 0. The other dimension columns are offset
1504  * by 'stride'.
1505  */
1506 struct PMatrixElement
1507 {
1508  HYPRE_Int column, stride;
1509  double value;
1510 
1511  PMatrixElement(HYPRE_Int col = 0, HYPRE_Int str = 0, double val = 0)
1512  : column(col), stride(str), value(val) {}
1513 
1514  bool operator<(const PMatrixElement &other) const
1515  { return column < other.column; }
1516 
1517  typedef std::vector<PMatrixElement> List;
1518 };
1519 
1520 /** Represents one row of the P matrix, for the construction code below.
1521  * The row is complete: diagonal and offdiagonal elements are not distinguished.
1522  */
1523 struct PMatrixRow
1524 {
1525  PMatrixElement::List elems;
1526 
1527  /// Add other row, times 'coef'.
1528  void AddRow(const PMatrixRow &other, double coef)
1529  {
1530  elems.reserve(elems.size() + other.elems.size());
1531  for (unsigned i = 0; i < other.elems.size(); i++)
1532  {
1533  const PMatrixElement &oei = other.elems[i];
1534  elems.push_back(
1535  PMatrixElement(oei.column, oei.stride, coef * oei.value));
1536  }
1537  }
1538 
1539  /// Remove duplicate columns and sum their values.
1540  void Collapse()
1541  {
1542  if (!elems.size()) { return; }
1543  std::sort(elems.begin(), elems.end());
1544 
1545  int j = 0;
1546  for (unsigned i = 1; i < elems.size(); i++)
1547  {
1548  if (elems[j].column == elems[i].column)
1549  {
1550  elems[j].value += elems[i].value;
1551  }
1552  else
1553  {
1554  elems[++j] = elems[i];
1555  }
1556  }
1557  elems.resize(j+1);
1558  }
1559 
1560  void write(std::ostream &os, double sign) const
1561  {
1562  bin_io::write<int>(os, elems.size());
1563  for (unsigned i = 0; i < elems.size(); i++)
1564  {
1565  const PMatrixElement &e = elems[i];
1566  bin_io::write<HYPRE_Int>(os, e.column);
1567  bin_io::write<int>(os, e.stride); // truncate HYPRE_Int -> int
1568  bin_io::write<double>(os, e.value * sign);
1569  }
1570  }
1571 
1572  void read(std::istream &is, double sign)
1573  {
1574  elems.resize(bin_io::read<int>(is));
1575  for (unsigned i = 0; i < elems.size(); i++)
1576  {
1577  PMatrixElement &e = elems[i];
1578  e.column = bin_io::read<HYPRE_Int>(is);
1579  e.stride = bin_io::read<int>(is);
1580  e.value = bin_io::read<double>(is) * sign;
1581  }
1582  }
1583 };
1584 
1585 /** Represents a message to another processor containing P matrix rows.
1586  * Used by ParFiniteElementSpace::ParallelConformingInterpolation.
1587  */
1588 class NeighborRowMessage : public VarMessage<314>
1589 {
1590 public:
1591  typedef NCMesh::MeshId MeshId;
1592  typedef ParNCMesh::GroupId GroupId;
1593 
1594  struct RowInfo
1595  {
1596  int entity, index, edof;
1597  GroupId group;
1598  PMatrixRow row;
1599 
1600  RowInfo(int ent, int idx, int edof, GroupId grp, const PMatrixRow &row)
1601  : entity(ent), index(idx), edof(edof), group(grp), row(row) {}
1602 
1603  RowInfo(int ent, int idx, int edof, GroupId grp)
1604  : entity(ent), index(idx), edof(edof), group(grp) {}
1605 
1606  typedef std::vector<RowInfo> List;
1607  };
1608 
1609  NeighborRowMessage() : pncmesh(NULL) {}
1610 
1611  void AddRow(int entity, int index, int edof, GroupId group,
1612  const PMatrixRow &row)
1613  {
1614  rows.push_back(RowInfo(entity, index, edof, group, row));
1615  }
1616 
1617  const RowInfo::List& GetRows() const { return rows; }
1618 
1619  void SetNCMesh(ParNCMesh* pnc) { pncmesh = pnc; }
1620  void SetFEC(const FiniteElementCollection* fec) { this->fec = fec; }
1621 
1622  typedef std::map<int, NeighborRowMessage> Map;
1623 
1624 protected:
1625  RowInfo::List rows;
1626 
1627  ParNCMesh *pncmesh;
1628  const FiniteElementCollection* fec;
1629 
1630  virtual void Encode(int rank);
1631  virtual void Decode(int);
1632 };
1633 
1634 
1635 void NeighborRowMessage::Encode(int rank)
1636 {
1637  std::ostringstream stream;
1638 
1639  Array<MeshId> ent_ids[3];
1640  Array<GroupId> group_ids[3];
1641  Array<int> row_idx[3];
1642 
1643  // encode MeshIds and groups
1644  for (unsigned i = 0; i < rows.size(); i++)
1645  {
1646  const RowInfo &ri = rows[i];
1647  const MeshId &id = pncmesh->GetNCList(ri.entity).LookUp(ri.index);
1648  ent_ids[ri.entity].Append(id);
1649  row_idx[ri.entity].Append(i);
1650  group_ids[ri.entity].Append(ri.group);
1651  }
1652 
1653  Array<GroupId> all_group_ids;
1654  all_group_ids.Reserve(rows.size());
1655  for (int i = 0; i < 3; i++)
1656  {
1657  all_group_ids.Append(group_ids[i]);
1658  }
1659 
1660  pncmesh->AdjustMeshIds(ent_ids, rank);
1661  pncmesh->EncodeMeshIds(stream, ent_ids);
1662  pncmesh->EncodeGroups(stream, all_group_ids);
1663 
1664  // write all rows to the stream
1665  for (int ent = 0; ent < 3; ent++)
1666  {
1667  const Array<MeshId> &ids = ent_ids[ent];
1668  for (int i = 0; i < ids.Size(); i++)
1669  {
1670  const MeshId &id = ids[i];
1671  const RowInfo &ri = rows[row_idx[ent][i]];
1672  MFEM_ASSERT(ent == ri.entity, "");
1673 
1674 #ifdef MFEM_DEBUG_PMATRIX
1675  mfem::out << "Rank " << pncmesh->MyRank << " sending to " << rank
1676  << ": ent " << ri.entity << ", index " << ri.index
1677  << ", edof " << ri.edof << " (id " << id.element << "/"
1678  << id.local << ")" << std::endl;
1679 #endif
1680 
1681  // handle orientation and sign change
1682  int edof = ri.edof;
1683  double s = 1.0;
1684  if (ent == 1)
1685  {
1686  int eo = pncmesh->GetEdgeNCOrientation(id);
1687  const int* ind = fec->DofOrderForOrientation(Geometry::SEGMENT, eo);
1688  if ((edof = ind[edof]) < 0)
1689  {
1690  edof = -1 - edof;
1691  s = -1;
1692  }
1693  }
1694 
1695  bin_io::write<int>(stream, edof);
1696  ri.row.write(stream, s);
1697  }
1698  }
1699 
1700  rows.clear();
1701  stream.str().swap(data);
1702 }
1703 
1704 void NeighborRowMessage::Decode(int rank)
1705 {
1706  std::istringstream stream(data);
1707 
1708  Array<MeshId> ent_ids[3];
1709  Array<GroupId> group_ids;
1710 
1711  // decode vertex/edge/face IDs and groups
1712  pncmesh->DecodeMeshIds(stream, ent_ids);
1713  pncmesh->DecodeGroups(stream, group_ids);
1714 
1715  int nrows = ent_ids[0].Size() + ent_ids[1].Size() + ent_ids[2].Size();
1716  MFEM_ASSERT(nrows == group_ids.Size(), "");
1717 
1718  rows.clear();
1719  rows.reserve(nrows);
1720 
1721  Geometry::Type fgeom = pncmesh->GetFaceGeometry();
1722 
1723  // read rows
1724  for (int ent = 0, gi = 0; ent < 3; ent++)
1725  {
1726  const Array<MeshId> &ids = ent_ids[ent];
1727  for (int i = 0; i < ids.Size(); i++)
1728  {
1729  const MeshId &id = ids[i];
1730  int edof = bin_io::read<int>(stream);
1731 
1732  // handle orientation and sign change
1733  const int *ind = NULL;
1734  if (ent == 1)
1735  {
1736  int eo = pncmesh->GetEdgeNCOrientation(id);
1737  ind = fec->DofOrderForOrientation(Geometry::SEGMENT, eo);
1738  }
1739  else if (ent == 2)
1740  {
1741  int fo = pncmesh->GetFaceOrientation(id.index);
1742  ind = fec->DofOrderForOrientation(fgeom, fo);
1743  }
1744 
1745  double s = 1.0;
1746  if (ind && (edof = ind[edof]) < 0)
1747  {
1748  edof = -1 - edof;
1749  s = -1.0;
1750  }
1751 
1752  rows.push_back(RowInfo(ent, id.index, edof, group_ids[gi++]));
1753  rows.back().row.read(stream, s);
1754 
1755 #ifdef MFEM_DEBUG_PMATRIX
1756  mfem::out << "Rank " << pncmesh->MyRank << " receiving from " << rank
1757  << ": ent " << rows.back().entity << ", index "
1758  << rows.back().index << ", edof " << rows.back().edof
1759  << std::endl;
1760 #endif
1761  }
1762  }
1763 }
1764 
1765 void
1766 ParFiniteElementSpace::ScheduleSendRow(const PMatrixRow &row, int dof,
1767  GroupId group_id,
1768  NeighborRowMessage::Map &send_msg) const
1769 {
1770  int ent, idx, edof;
1771  UnpackDof(dof, ent, idx, edof);
1772 
1773  const ParNCMesh::CommGroup &group = pncmesh->GetGroup(group_id);
1774  for (unsigned i = 0; i < group.size(); i++)
1775  {
1776  int rank = group[i];
1777  if (rank != MyRank)
1778  {
1779  NeighborRowMessage &msg = send_msg[rank];
1780  msg.AddRow(ent, idx, edof, group_id, row);
1781  msg.SetNCMesh(pncmesh);
1782  msg.SetFEC(fec);
1783 #ifdef MFEM_PMATRIX_STATS
1784  n_rows_sent++;
1785 #endif
1786  }
1787  }
1788 }
1789 
1790 void ParFiniteElementSpace::ForwardRow(const PMatrixRow &row, int dof,
1791  GroupId group_sent_id, GroupId group_id,
1792  NeighborRowMessage::Map &send_msg) const
1793 {
1794  int ent, idx, edof;
1795  UnpackDof(dof, ent, idx, edof);
1796 
1797  const ParNCMesh::CommGroup &group = pncmesh->GetGroup(group_id);
1798  for (unsigned i = 0; i < group.size(); i++)
1799  {
1800  int rank = group[i];
1801  if (rank != MyRank && !pncmesh->GroupContains(group_sent_id, rank))
1802  {
1803  NeighborRowMessage &msg = send_msg[rank];
1804  GroupId invalid = -1; // to prevent forwarding again
1805  msg.AddRow(ent, idx, edof, invalid, row);
1806  msg.SetNCMesh(pncmesh);
1807  msg.SetFEC(fec);
1808 #ifdef MFEM_PMATRIX_STATS
1809  n_rows_fwd++;
1810 #endif
1811 #ifdef MFEM_DEBUG_PMATRIX
1812  mfem::out << "Rank " << pncmesh->GetMyRank() << " forwarding to "
1813  << rank << ": ent " << ent << ", index" << idx
1814  << ", edof " << edof << std::endl;
1815 #endif
1816  }
1817  }
1818 }
1819 
1820 #ifdef MFEM_DEBUG_PMATRIX
1821 void ParFiniteElementSpace
1822 ::DebugDumpDOFs(std::ostream &os,
1823  const SparseMatrix &deps,
1824  const Array<GroupId> &dof_group,
1825  const Array<GroupId> &dof_owner,
1826  const Array<bool> &finalized) const
1827 {
1828  for (int i = 0; i < dof_group.Size(); i++)
1829  {
1830  os << i << ": ";
1831  if (i < (nvdofs + nedofs + nfdofs) || i > ndofs)
1832  {
1833  int ent, idx, edof;
1834  UnpackDof(i, ent, idx, edof);
1835 
1836  os << edof << " @ ";
1837  if (i > ndofs) { os << "ghost "; }
1838  switch (ent)
1839  {
1840  case 0: os << "vertex "; break;
1841  case 1: os << "edge "; break;
1842  default: os << "face "; break;
1843  }
1844  os << idx << "; ";
1845 
1846  if (i < deps.Height() && deps.RowSize(i))
1847  {
1848  os << "depends on ";
1849  for (int j = 0; j < deps.RowSize(i); j++)
1850  {
1851  os << deps.GetRowColumns(i)[j] << " ("
1852  << deps.GetRowEntries(i)[j] << ")";
1853  if (j < deps.RowSize(i)-1) { os << ", "; }
1854  }
1855  os << "; ";
1856  }
1857  else
1858  {
1859  os << "no deps; ";
1860  }
1861 
1862  os << "group " << dof_group[i] << " (";
1863  const ParNCMesh::CommGroup &g = pncmesh->GetGroup(dof_group[i]);
1864  for (unsigned j = 0; j < g.size(); j++)
1865  {
1866  if (j) { os << ", "; }
1867  os << g[j];
1868  }
1869 
1870  os << "), owner " << dof_owner[i] << " (rank "
1871  << pncmesh->GetGroup(dof_owner[i])[0] << "); "
1872  << (finalized[i] ? "finalized" : "NOT finalized");
1873  }
1874  else
1875  {
1876  os << "internal";
1877  }
1878  os << "\n";
1879  }
1880 }
1881 #endif
1882 
1883 int ParFiniteElementSpace
1884 ::BuildParallelConformingInterpolation(HypreParMatrix **P, SparseMatrix **R,
1885  Array<HYPRE_Int> &dof_offs,
1886  Array<HYPRE_Int> &tdof_offs,
1887  Array<int> *dof_tdof,
1888  bool partial) const
1889 {
1890  bool dg = (nvdofs == 0 && nedofs == 0 && nfdofs == 0);
1891 
1892 #ifdef MFEM_PMATRIX_STATS
1893  n_msgs_sent = n_msgs_recv = 0;
1894  n_rows_sent = n_rows_recv = n_rows_fwd = 0;
1895 #endif
1896 
1897  // *** STEP 1: build master-slave dependency lists ***
1898 
1899  int total_dofs = ndofs + ngdofs;
1900  SparseMatrix deps(ndofs, total_dofs);
1901 
1902  if (!dg && !partial)
1903  {
1904  Array<int> master_dofs, slave_dofs;
1905 
1906  // loop through *all* master edges/faces, constrain their slaves
1907  for (int entity = 0; entity <= 2; entity++)
1908  {
1909  const NCMesh::NCList &list = pncmesh->GetNCList(entity);
1910  if (!list.masters.size()) { continue; }
1911 
1912  IsoparametricTransformation T;
1913  if (entity > 1) { T.SetFE(&QuadrilateralFE); }
1914  else { T.SetFE(&SegmentFE); }
1915 
1916  Geometry::Type geom = (entity > 1) ?
1918  const FiniteElement* fe = fec->FiniteElementForGeometry(geom);
1919  if (!fe) { continue; }
1920 
1921  DenseMatrix I(fe->GetDof());
1922 
1923  // process masters that we own or that affect our edges/faces
1924  for (unsigned mi = 0; mi < list.masters.size(); mi++)
1925  {
1926  const NCMesh::Master &mf = list.masters[mi];
1927 
1928  // get master DOFs
1929  pncmesh->IsGhost(entity, mf.index)
1930  ? GetGhostDofs(entity, mf, master_dofs)
1931  : GetEntityDofs(entity, mf.index, master_dofs);
1932 
1933  if (!master_dofs.Size()) { continue; }
1934 
1935  // constrain slaves that exist in our mesh
1936  for (int si = mf.slaves_begin; si < mf.slaves_end; si++)
1937  {
1938  const NCMesh::Slave &sf = list.slaves[si];
1939  if (pncmesh->IsGhost(entity, sf.index)) { continue; }
1940 
1941  GetEntityDofs(entity, sf.index, slave_dofs);
1942  if (!slave_dofs.Size()) { continue; }
1943 
1944  sf.OrientedPointMatrix(T.GetPointMat());
1945  T.FinalizeTransformation();
1946  fe->GetLocalInterpolation(T, I);
1947 
1948  // make each slave DOF dependent on all master DOFs
1949  AddDependencies(deps, master_dofs, slave_dofs, I);
1950  }
1951  }
1952  }
1953 
1954  deps.Finalize();
1955  }
1956 
1957  // *** STEP 2: initialize group and owner ID for each DOF ***
1958 
1959  Array<GroupId> dof_group(total_dofs);
1960  Array<GroupId> dof_owner(total_dofs);
1961  dof_group = 0;
1962  dof_owner = 0;
1963 
1964  if (!dg)
1965  {
1966  Array<int> dofs;
1967 
1968  // initialize dof_group[], dof_owner[]
1969  for (int entity = 0; entity <= 2; entity++)
1970  {
1971  const NCMesh::NCList &list = pncmesh->GetNCList(entity);
1972 
1973  std::size_t lsize[3] =
1974  { list.conforming.size(), list.masters.size(), list.slaves.size() };
1975 
1976  for (int l = 0; l < 3; l++)
1977  {
1978  for (std::size_t i = 0; i < lsize[l]; i++)
1979  {
1980  const MeshId &id =
1981  (l == 0) ? list.conforming[i] :
1982  (l == 1) ? (const MeshId&) list.masters[i]
1983  /* */ : (const MeshId&) list.slaves[i];
1984 
1985  GroupId owner = pncmesh->GetEntityOwnerId(entity, id.index);
1986  GroupId group = pncmesh->GetEntityGroupId(entity, id.index);
1987 
1988  GetBareDofs(entity, id.index, dofs);
1989 
1990  for (int j = 0; j < dofs.Size(); j++)
1991  {
1992  int dof = dofs[j];
1993  dof_owner[dof] = owner;
1994  dof_group[dof] = group;
1995  }
1996  }
1997  }
1998  }
1999  }
2000 
2001  // *** STEP 3: count true DOFs and calculate P row/column partitions ***
2002 
2003  Array<bool> finalized(total_dofs);
2004  finalized = false;
2005 
2006  // DOFs that stayed independent and are ours are true DOFs
2007  int num_true_dofs = 0;
2008  for (int i = 0; i < ndofs; i++)
2009  {
2010  if (dof_owner[i] == 0 && deps.RowSize(i) == 0)
2011  {
2012  num_true_dofs++;
2013  finalized[i] = true;
2014  }
2015  }
2016 
2017  // calculate global offsets
2018  HYPRE_Int loc_sizes[2] = { ndofs*vdim, num_true_dofs*vdim };
2019  Array<HYPRE_Int>* offsets[2] = { &dof_offs, &tdof_offs };
2020  pmesh->GenerateOffsets(2, loc_sizes, offsets); // calls MPI_Scan, MPI_Bcast
2021 
2022  HYPRE_Int my_tdof_offset =
2023  tdof_offs[HYPRE_AssumedPartitionCheck() ? 0 : MyRank];
2024 
2025  if (R)
2026  {
2027  // initialize the restriction matrix (also parallel but block-diagonal)
2028  *R = new SparseMatrix(num_true_dofs*vdim, ndofs*vdim);
2029  }
2030  if (dof_tdof)
2031  {
2032  dof_tdof->SetSize(ndofs*vdim);
2033  *dof_tdof = -1;
2034  }
2035 
2036  std::vector<PMatrixRow> pmatrix(total_dofs);
2037 
2038  bool bynodes = (ordering == Ordering::byNODES);
2039  int vdim_factor = bynodes ? 1 : vdim;
2040  int dof_stride = bynodes ? ndofs : 1;
2041  int tdof_stride = bynodes ? num_true_dofs : 1;
2042 
2043  // big container for all messages we send (the list is for iterations)
2044  std::list<NeighborRowMessage::Map> send_msg;
2045  send_msg.push_back(NeighborRowMessage::Map());
2046 
2047  // put identity in P and R for true DOFs, set ldof_ltdof
2048  for (int dof = 0, tdof = 0; dof < ndofs; dof++)
2049  {
2050  if (finalized[dof])
2051  {
2052  pmatrix[dof].elems.push_back(
2053  PMatrixElement(my_tdof_offset + vdim_factor*tdof, tdof_stride, 1.));
2054 
2055  // prepare messages to neighbors with identity rows
2056  if (dof_group[dof] != 0)
2057  {
2058  ScheduleSendRow(pmatrix[dof], dof, dof_group[dof], send_msg.back());
2059  }
2060 
2061  for (int vd = 0; vd < vdim; vd++)
2062  {
2063  int vdof = dof*vdim_factor + vd*dof_stride;
2064  int vtdof = tdof*vdim_factor + vd*tdof_stride;
2065 
2066  if (R) { (*R)->Add(vtdof, vdof, 1.0); }
2067  if (dof_tdof) { (*dof_tdof)[vdof] = vtdof; }
2068  }
2069  tdof++;
2070  }
2071  }
2072 
2073  // send identity rows
2074  NeighborRowMessage::IsendAll(send_msg.back(), MyComm);
2075 #ifdef MFEM_PMATRIX_STATS
2076  n_msgs_sent += send_msg.back().size();
2077 #endif
2078 
2079  if (R) { (*R)->Finalize(); }
2080 
2081  // *** STEP 4: main loop ***
2082 
2083  // a single instance (recv_msg) is reused for all incoming messages
2084  NeighborRowMessage recv_msg;
2085  recv_msg.SetNCMesh(pncmesh);
2086  recv_msg.SetFEC(fec);
2087 
2088  int num_finalized = num_true_dofs;
2089  PMatrixRow buffer;
2090  buffer.elems.reserve(1024);
2091 
2092  while (num_finalized < ndofs)
2093  {
2094  // prepare a new round of send buffers
2095  if (send_msg.back().size())
2096  {
2097  send_msg.push_back(NeighborRowMessage::Map());
2098  }
2099 
2100  // check for incoming messages, receive PMatrixRows
2101  int rank, size;
2102  while (NeighborRowMessage::IProbe(rank, size, MyComm))
2103  {
2104  recv_msg.Recv(rank, size, MyComm);
2105 #ifdef MFEM_PMATRIX_STATS
2106  n_msgs_recv++;
2107  n_rows_recv += recv_msg.GetRows().size();
2108 #endif
2109 
2110  const NeighborRowMessage::RowInfo::List &rows = recv_msg.GetRows();
2111  for (unsigned i = 0; i < rows.size(); i++)
2112  {
2113  const NeighborRowMessage::RowInfo &ri = rows[i];
2114  int dof = PackDof(ri.entity, ri.index, ri.edof);
2115  pmatrix[dof] = ri.row;
2116 
2117  if (dof < ndofs && !finalized[dof]) { num_finalized++; }
2118  finalized[dof] = true;
2119 
2120  if (ri.group >= 0 && dof_group[dof] != ri.group)
2121  {
2122  // the sender didn't see the complete group, forward the message
2123  ForwardRow(ri.row, dof, ri.group, dof_group[dof], send_msg.back());
2124  }
2125  }
2126  }
2127 
2128  // finalize all rows that can currently be finalized
2129  bool done = false;
2130  while (!done)
2131  {
2132  done = true;
2133  for (int dof = 0; dof < ndofs; dof++)
2134  {
2135  if (finalized[dof]) { continue; }
2136 
2137  bool owned = (dof_owner[dof] == 0);
2138  bool shared = (dof_group[dof] != 0);
2139 
2140  if (owned && DofFinalizable(dof, finalized, deps))
2141  {
2142  const int* dep_col = deps.GetRowColumns(dof);
2143  const double* dep_coef = deps.GetRowEntries(dof);
2144  int num_dep = deps.RowSize(dof);
2145 
2146  // form linear combination of rows
2147  buffer.elems.clear();
2148  for (int j = 0; j < num_dep; j++)
2149  {
2150  buffer.AddRow(pmatrix[dep_col[j]], dep_coef[j]);
2151  }
2152  buffer.Collapse();
2153  pmatrix[dof] = buffer;
2154 
2155  finalized[dof] = true;
2156  num_finalized++;
2157  done = false;
2158 
2159  // send row to neighbors who need it
2160  if (shared)
2161  {
2162  ScheduleSendRow(pmatrix[dof], dof, dof_group[dof],
2163  send_msg.back());
2164  }
2165  }
2166  }
2167  }
2168 
2169 #ifdef MFEM_DEBUG_PMATRIX
2170  /*static int dump = 0;
2171  if (dump < 10)
2172  {
2173  char fname[100];
2174  sprintf(fname, "dofs%02d.txt", MyRank);
2175  std::ofstream f(fname);
2176  DebugDumpDOFs(f, deps, dof_group, dof_owner, finalized);
2177  dump++;
2178  }*/
2179 #endif
2180 
2181  // send current batch of messages
2182  NeighborRowMessage::IsendAll(send_msg.back(), MyComm);
2183 #ifdef MFEM_PMATRIX_STATS
2184  n_msgs_sent += send_msg.back().size();
2185 #endif
2186  }
2187 
2188  if (P)
2189  {
2190  *P = MakeVDimHypreMatrix(pmatrix, ndofs, num_true_dofs,
2191  dof_offs, tdof_offs);
2192  }
2193 
2194  // clean up possible remaining messages in the queue to avoid receiving
2195  // them erroneously in the next run
2196  int rank, size;
2197  while (NeighborRowMessage::IProbe(rank, size, MyComm))
2198  {
2199  recv_msg.RecvDrop(rank, size, MyComm);
2200  }
2201 
2202  // make sure we can discard all send buffers
2203  for (std::list<NeighborRowMessage::Map>::iterator
2204  it = send_msg.begin(); it != send_msg.end(); ++it)
2205  {
2206  NeighborRowMessage::WaitAllSent(*it);
2207  }
2208 
2209 #ifdef MFEM_PMATRIX_STATS
2210  int n_rounds = send_msg.size();
2211  int glob_rounds, glob_msgs_sent, glob_msgs_recv;
2212  int glob_rows_sent, glob_rows_recv, glob_rows_fwd;
2213 
2214  MPI_Reduce(&n_rounds, &glob_rounds, 1, MPI_INT, MPI_SUM, 0, MyComm);
2215  MPI_Reduce(&n_msgs_sent, &glob_msgs_sent, 1, MPI_INT, MPI_SUM, 0, MyComm);
2216  MPI_Reduce(&n_msgs_recv, &glob_msgs_recv, 1, MPI_INT, MPI_SUM, 0, MyComm);
2217  MPI_Reduce(&n_rows_sent, &glob_rows_sent, 1, MPI_INT, MPI_SUM, 0, MyComm);
2218  MPI_Reduce(&n_rows_recv, &glob_rows_recv, 1, MPI_INT, MPI_SUM, 0, MyComm);
2219  MPI_Reduce(&n_rows_fwd, &glob_rows_fwd, 1, MPI_INT, MPI_SUM, 0, MyComm);
2220 
2221  if (MyRank == 0)
2222  {
2223  mfem::out << "P matrix stats (avg per rank): "
2224  << double(glob_rounds)/NRanks << " rounds, "
2225  << double(glob_msgs_sent)/NRanks << " msgs sent, "
2226  << double(glob_msgs_recv)/NRanks << " msgs recv, "
2227  << double(glob_rows_sent)/NRanks << " rows sent, "
2228  << double(glob_rows_recv)/NRanks << " rows recv, "
2229  << double(glob_rows_fwd)/NRanks << " rows forwarded."
2230  << std::endl;
2231  }
2232 #endif
2233 
2234  return num_true_dofs*vdim;
2235 }
2236 
2237 
2238 HypreParMatrix* ParFiniteElementSpace
2239 ::MakeVDimHypreMatrix(const std::vector<PMatrixRow> &rows,
2240  int local_rows, int local_cols,
2241  Array<HYPRE_Int> &row_starts,
2242  Array<HYPRE_Int> &col_starts) const
2243 {
2244  bool assumed = HYPRE_AssumedPartitionCheck();
2245  bool bynodes = (ordering == Ordering::byNODES);
2246 
2247  HYPRE_Int first_col = col_starts[assumed ? 0 : MyRank];
2248  HYPRE_Int next_col = col_starts[assumed ? 1 : MyRank+1];
2249 
2250  // count nonzeros in diagonal/offdiagonal parts
2251  HYPRE_Int nnz_diag = 0, nnz_offd = 0;
2252  std::map<HYPRE_Int, int> col_map;
2253  for (int i = 0; i < local_rows; i++)
2254  {
2255  for (unsigned j = 0; j < rows[i].elems.size(); j++)
2256  {
2257  const PMatrixElement &elem = rows[i].elems[j];
2258  HYPRE_Int col = elem.column;
2259  if (col >= first_col && col < next_col)
2260  {
2261  nnz_diag += vdim;
2262  }
2263  else
2264  {
2265  nnz_offd += vdim;
2266  for (int vd = 0; vd < vdim; vd++)
2267  {
2268  col_map[col] = -1;
2269  col += elem.stride;
2270  }
2271  }
2272  }
2273  }
2274 
2275  // create offd column mapping
2276  HYPRE_Int *cmap = new HYPRE_Int[col_map.size()];
2277  int offd_col = 0;
2278  for (std::map<HYPRE_Int, int>::iterator
2279  it = col_map.begin(); it != col_map.end(); ++it)
2280  {
2281  cmap[offd_col] = it->first;
2282  it->second = offd_col++;
2283  }
2284 
2285  HYPRE_Int *I_diag = new HYPRE_Int[vdim*local_rows + 1];
2286  HYPRE_Int *I_offd = new HYPRE_Int[vdim*local_rows + 1];
2287 
2288  HYPRE_Int *J_diag = new HYPRE_Int[nnz_diag];
2289  HYPRE_Int *J_offd = new HYPRE_Int[nnz_offd];
2290 
2291  double *A_diag = new double[nnz_diag];
2292  double *A_offd = new double[nnz_offd];
2293 
2294  int vdim1 = bynodes ? vdim : 1;
2295  int vdim2 = bynodes ? 1 : vdim;
2296  int vdim_offset = bynodes ? local_cols : 1;
2297 
2298  // copy the diag/offd elements
2299  nnz_diag = nnz_offd = 0;
2300  int vrow = 0;
2301  for (int vd1 = 0; vd1 < vdim1; vd1++)
2302  {
2303  for (int i = 0; i < local_rows; i++)
2304  {
2305  for (int vd2 = 0; vd2 < vdim2; vd2++)
2306  {
2307  I_diag[vrow] = nnz_diag;
2308  I_offd[vrow++] = nnz_offd;
2309 
2310  int vd = bynodes ? vd1 : vd2;
2311  for (unsigned j = 0; j < rows[i].elems.size(); j++)
2312  {
2313  const PMatrixElement &elem = rows[i].elems[j];
2314  if (elem.column >= first_col && elem.column < next_col)
2315  {
2316  J_diag[nnz_diag] = elem.column + vd*vdim_offset - first_col;
2317  A_diag[nnz_diag++] = elem.value;
2318  }
2319  else
2320  {
2321  J_offd[nnz_offd] = col_map[elem.column + vd*elem.stride];
2322  A_offd[nnz_offd++] = elem.value;
2323  }
2324  }
2325  }
2326  }
2327  }
2328  MFEM_ASSERT(vrow == vdim*local_rows, "");
2329  I_diag[vrow] = nnz_diag;
2330  I_offd[vrow] = nnz_offd;
2331 
2332  return new HypreParMatrix(MyComm,
2333  row_starts.Last(), col_starts.Last(),
2334  row_starts.GetData(), col_starts.GetData(),
2335  I_diag, J_diag, A_diag,
2336  I_offd, J_offd, A_offd,
2337  col_map.size(), cmap);
2338 }
2339 
2340 
2341 static HYPRE_Int* make_i_array(int nrows)
2342 {
2343  HYPRE_Int *I = new HYPRE_Int[nrows+1];
2344  for (int i = 0; i <= nrows; i++) { I[i] = -1; }
2345  return I;
2346 }
2347 
2348 static HYPRE_Int* make_j_array(HYPRE_Int* I, int nrows)
2349 {
2350  int nnz = 0;
2351  for (int i = 0; i < nrows; i++)
2352  {
2353  if (I[i] >= 0) { nnz++; }
2354  }
2355  HYPRE_Int *J = new HYPRE_Int[nnz];
2356 
2357  I[nrows] = -1;
2358  for (int i = 0, k = 0; i <= nrows; i++)
2359  {
2360  HYPRE_Int col = I[i];
2361  I[i] = k;
2362  if (col >= 0) { J[k++] = col; }
2363  }
2364  return J;
2365 }
2366 
2367 HypreParMatrix*
2368 ParFiniteElementSpace::RebalanceMatrix(int old_ndofs,
2369  const Table* old_elem_dof)
2370 {
2371  MFEM_VERIFY(Nonconforming(), "Only supported for nonconforming meshes.");
2372  MFEM_VERIFY(old_dof_offsets.Size(), "ParFiniteElementSpace::Update needs to "
2373  "be called before ParFiniteElementSpace::RebalanceMatrix");
2374 
2375  HYPRE_Int old_offset = HYPRE_AssumedPartitionCheck()
2376  ? old_dof_offsets[0] : old_dof_offsets[MyRank];
2377 
2378  // send old DOFs of elements we used to own
2379  ParNCMesh* pncmesh = pmesh->pncmesh;
2380  pncmesh->SendRebalanceDofs(old_ndofs, *old_elem_dof, old_offset, this);
2381 
2382  Array<int> dofs;
2383  int vsize = GetVSize();
2384 
2385  const Array<int> &old_index = pncmesh->GetRebalanceOldIndex();
2386  MFEM_VERIFY(old_index.Size() == pmesh->GetNE(),
2387  "Mesh::Rebalance was not called before "
2388  "ParFiniteElementSpace::RebalanceMatrix");
2389 
2390  // prepare the local (diagonal) part of the matrix
2391  HYPRE_Int* i_diag = make_i_array(vsize);
2392  for (int i = 0; i < pmesh->GetNE(); i++)
2393  {
2394  if (old_index[i] >= 0) // we had this element before
2395  {
2396  const int* old_dofs = old_elem_dof->GetRow(old_index[i]);
2397  GetElementDofs(i, dofs);
2398 
2399  for (int vd = 0; vd < vdim; vd++)
2400  {
2401  for (int j = 0; j < dofs.Size(); j++)
2402  {
2403  int row = DofToVDof(dofs[j], vd);
2404  if (row < 0) { row = -1 - row; }
2405 
2406  int col = DofToVDof(old_dofs[j], vd, old_ndofs);
2407  if (col < 0) { col = -1 - col; }
2408 
2409  i_diag[row] = col;
2410  }
2411  }
2412  }
2413  }
2414  HYPRE_Int* j_diag = make_j_array(i_diag, vsize);
2415 
2416  // receive old DOFs for elements we obtained from others in Rebalance
2417  Array<int> new_elements;
2418  Array<long> old_remote_dofs;
2419  pncmesh->RecvRebalanceDofs(new_elements, old_remote_dofs);
2420 
2421  // create the offdiagonal part of the matrix
2422  HYPRE_Int* i_offd = make_i_array(vsize);
2423  for (int i = 0; i < new_elements.Size(); i++)
2424  {
2425  GetElementDofs(new_elements[i], dofs);
2426  const long* old_dofs = &old_remote_dofs[i * dofs.Size() * vdim];
2427 
2428  for (int vd = 0; vd < vdim; vd++)
2429  {
2430  for (int j = 0; j < dofs.Size(); j++)
2431  {
2432  int row = DofToVDof(dofs[j], vd);
2433  if (row < 0) { row = -1 - row; }
2434 
2435  if (i_diag[row] == i_diag[row+1]) // diag row empty?
2436  {
2437  i_offd[row] = old_dofs[j + vd * dofs.Size()];
2438  }
2439  }
2440  }
2441  }
2442  HYPRE_Int* j_offd = make_j_array(i_offd, vsize);
2443 
2444  // create the offd column map
2445  int offd_cols = i_offd[vsize];
2446  Array<Pair<HYPRE_Int, int> > cmap_offd(offd_cols);
2447  for (int i = 0; i < offd_cols; i++)
2448  {
2449  cmap_offd[i].one = j_offd[i];
2450  cmap_offd[i].two = i;
2451  }
2452  SortPairs<HYPRE_Int, int>(cmap_offd, offd_cols);
2453 
2454  HYPRE_Int* cmap = new HYPRE_Int[offd_cols];
2455  for (int i = 0; i < offd_cols; i++)
2456  {
2457  cmap[i] = cmap_offd[i].one;
2458  j_offd[cmap_offd[i].two] = i;
2459  }
2460 
2461  HypreParMatrix *M;
2462  M = new HypreParMatrix(MyComm, MyRank, NRanks, dof_offsets, old_dof_offsets,
2463  i_diag, j_diag, i_offd, j_offd, cmap, offd_cols);
2464  return M;
2465 }
2466 
2467 
2468 struct DerefDofMessage
2469 {
2470  std::vector<HYPRE_Int> dofs;
2471  MPI_Request request;
2472 };
2473 
2474 HypreParMatrix*
2475 ParFiniteElementSpace::ParallelDerefinementMatrix(int old_ndofs,
2476  const Table* old_elem_dof)
2477 {
2478  int nrk = HYPRE_AssumedPartitionCheck() ? 2 : NRanks;
2479 
2480  MFEM_VERIFY(Nonconforming(), "Not implemented for conforming meshes.");
2481  MFEM_VERIFY(old_dof_offsets[nrk], "Missing previous (finer) space.");
2482  MFEM_VERIFY(dof_offsets[nrk] <= old_dof_offsets[nrk],
2483  "Previous space is not finer.");
2484 
2485  // Note to the reader: please make sure you first read
2486  // FiniteElementSpace::RefinementMatrix, then
2487  // FiniteElementSpace::DerefinementMatrix, and only then this function.
2488  // You have been warned! :-)
2489 
2490  Array<int> dofs, old_dofs, old_vdofs;
2491  Vector row;
2492 
2493  ParNCMesh* pncmesh = pmesh->pncmesh;
2494  Geometry::Type geom = pncmesh->GetElementGeometry();
2495  int ldof = fec->FiniteElementForGeometry(geom)->GetDof();
2496 
2497  const CoarseFineTransformations &dtrans = pncmesh->GetDerefinementTransforms();
2498  const Array<int> &old_ranks = pncmesh->GetDerefineOldRanks();
2499 
2500  std::map<int, DerefDofMessage> messages;
2501 
2502  HYPRE_Int old_offset = HYPRE_AssumedPartitionCheck()
2503  ? old_dof_offsets[0] : old_dof_offsets[MyRank];
2504 
2505  // communicate DOFs for derefinements that straddle processor boundaries,
2506  // note that this is infrequent due to the way elements are ordered
2507  for (int k = 0; k < dtrans.embeddings.Size(); k++)
2508  {
2509  const Embedding &emb = dtrans.embeddings[k];
2510 
2511  int fine_rank = old_ranks[k];
2512  int coarse_rank = (emb.parent < 0) ? (-1 - emb.parent)
2513  : pncmesh->ElementRank(emb.parent);
2514 
2515  if (coarse_rank != MyRank && fine_rank == MyRank)
2516  {
2517  old_elem_dof->GetRow(k, dofs);
2518  DofsToVDofs(dofs, old_ndofs);
2519 
2520  DerefDofMessage &msg = messages[k];
2521  msg.dofs.resize(dofs.Size());
2522  for (int i = 0; i < dofs.Size(); i++)
2523  {
2524  msg.dofs[i] = old_offset + dofs[i];
2525  }
2526 
2527  MPI_Isend(&msg.dofs[0], msg.dofs.size(), HYPRE_MPI_INT,
2528  coarse_rank, 291, MyComm, &msg.request);
2529  }
2530  else if (coarse_rank == MyRank && fine_rank != MyRank)
2531  {
2532  DerefDofMessage &msg = messages[k];
2533  msg.dofs.resize(ldof*vdim);
2534 
2535  MPI_Irecv(&msg.dofs[0], ldof*vdim, HYPRE_MPI_INT,
2536  fine_rank, 291, MyComm, &msg.request);
2537  }
2538  // TODO: coalesce Isends/Irecvs to the same rank. Typically, on uniform
2539  // derefinement, there should be just one send to MyRank-1 and one recv
2540  // from MyRank+1
2541  }
2542 
2543  DenseTensor localR;
2544  GetLocalDerefinementMatrices(geom, localR);
2545 
2546  // create the diagonal part of the derefinement matrix
2547  SparseMatrix *diag = new SparseMatrix(ndofs*vdim, old_ndofs*vdim);
2548 
2549  Array<char> mark(diag->Height());
2550  mark = 0;
2551  for (int k = 0; k < dtrans.embeddings.Size(); k++)
2552  {
2553  const Embedding &emb = dtrans.embeddings[k];
2554  if (emb.parent < 0) { continue; }
2555 
2556  int coarse_rank = pncmesh->ElementRank(emb.parent);
2557  int fine_rank = old_ranks[k];
2558 
2559  if (coarse_rank == MyRank && fine_rank == MyRank)
2560  {
2561  DenseMatrix &lR = localR(emb.matrix);
2562 
2563  elem_dof->GetRow(emb.parent, dofs);
2564  old_elem_dof->GetRow(k, old_dofs);
2565 
2566  for (int vd = 0; vd < vdim; vd++)
2567  {
2568  old_dofs.Copy(old_vdofs);
2569  DofsToVDofs(vd, old_vdofs, old_ndofs);
2570 
2571  for (int i = 0; i < lR.Height(); i++)
2572  {
2573  if (lR(i, 0) == infinity()) { continue; }
2574 
2575  int r = DofToVDof(dofs[i], vd);
2576  int m = (r >= 0) ? r : (-1 - r);
2577 
2578  if (!mark[m])
2579  {
2580  lR.GetRow(i, row);
2581  diag->SetRow(r, old_vdofs, row);
2582  mark[m] = 1;
2583  }
2584  }
2585  }
2586  }
2587  }
2588  diag->Finalize();
2589 
2590  // wait for all sends/receives to complete
2591  for (std::map<int, DerefDofMessage>::iterator
2592  it = messages.begin(); it != messages.end(); ++it)
2593  {
2594  MPI_Wait(&it->second.request, MPI_STATUS_IGNORE);
2595  }
2596 
2597  // create the offdiagonal part of the derefinement matrix
2598  SparseMatrix *offd = new SparseMatrix(ndofs*vdim, 1);
2599 
2600  std::map<HYPRE_Int, int> col_map;
2601  for (int k = 0; k < dtrans.embeddings.Size(); k++)
2602  {
2603  const Embedding &emb = dtrans.embeddings[k];
2604  if (emb.parent < 0) { continue; }
2605 
2606  int coarse_rank = pncmesh->ElementRank(emb.parent);
2607  int fine_rank = old_ranks[k];
2608 
2609  if (coarse_rank == MyRank && fine_rank != MyRank)
2610  {
2611  DenseMatrix &lR = localR(emb.matrix);
2612 
2613  elem_dof->GetRow(emb.parent, dofs);
2614 
2615  DerefDofMessage &msg = messages[k];
2616  MFEM_ASSERT(msg.dofs.size(), "");
2617 
2618  for (int vd = 0; vd < vdim; vd++)
2619  {
2620  HYPRE_Int* remote_dofs = &msg.dofs[vd*ldof];
2621 
2622  for (int i = 0; i < lR.Height(); i++)
2623  {
2624  if (lR(i, 0) == infinity()) { continue; }
2625 
2626  int r = DofToVDof(dofs[i], vd);
2627  int m = (r >= 0) ? r : (-1 - r);
2628 
2629  if (!mark[m])
2630  {
2631  lR.GetRow(i, row);
2632  for (int j = 0; j < ldof; j++)
2633  {
2634  if (row[j] == 0.0) { continue; } // NOTE: lR thresholded
2635  int &lcol = col_map[remote_dofs[j]];
2636  if (!lcol) { lcol = col_map.size(); }
2637  offd->_Set_(m, lcol-1, row[j]);
2638  }
2639  mark[m] = 1;
2640  }
2641  }
2642  }
2643  }
2644  }
2645  messages.clear();
2646  offd->Finalize(0);
2647  offd->SetWidth(col_map.size());
2648 
2649  // create offd column mapping for use by hypre
2650  HYPRE_Int *cmap = new HYPRE_Int[offd->Width()];
2651  for (std::map<HYPRE_Int, int>::iterator
2652  it = col_map.begin(); it != col_map.end(); ++it)
2653  {
2654  cmap[it->second-1] = it->first;
2655  }
2656 
2657  // reorder offd columns so that 'cmap' is monotonic
2658  // NOTE: this is easier and probably faster (offd is small) than making
2659  // sure cmap is determined and sorted before the offd matrix is created
2660  {
2661  int width = offd->Width();
2662  Array<Pair<int, int> > reorder(width);
2663  for (int i = 0; i < width; i++)
2664  {
2665  reorder[i].one = cmap[i];
2666  reorder[i].two = i;
2667  }
2668  reorder.Sort();
2669 
2670  Array<int> reindex(width);
2671  for (int i = 0; i < width; i++)
2672  {
2673  reindex[reorder[i].two] = i;
2674  cmap[i] = reorder[i].one;
2675  }
2676 
2677  int *J = offd->GetJ();
2678  for (int i = 0; i < offd->NumNonZeroElems(); i++)
2679  {
2680  J[i] = reindex[J[i]];
2681  }
2682  offd->SortColumnIndices();
2683  }
2684 
2685  HypreParMatrix* R;
2686  R = new HypreParMatrix(MyComm, dof_offsets[nrk], old_dof_offsets[nrk],
2687  dof_offsets, old_dof_offsets, diag, offd, cmap);
2688 
2689 #ifndef HYPRE_BIGINT
2690  diag->LoseData();
2691  offd->LoseData();
2692 #else
2693  diag->SetDataOwner(false);
2694  offd->SetDataOwner(false);
2695 #endif
2696  delete diag;
2697  delete offd;
2698 
2699  R->SetOwnerFlags(3, 3, 1);
2700 
2701  return R;
2702 }
2703 
2704 void ParFiniteElementSpace::Destroy()
2705 {
2706  ldof_group.DeleteAll();
2707  ldof_ltdof.DeleteAll();
2708  dof_offsets.DeleteAll();
2709  tdof_offsets.DeleteAll();
2710  tdof_nb_offsets.DeleteAll();
2711  // preserve old_dof_offsets
2712  ldof_sign.DeleteAll();
2713 
2714  delete P; P = NULL;
2715  delete Pconf; Pconf = NULL;
2716  delete R; R = NULL;
2717 
2718  delete gcomm; gcomm = NULL;
2719 
2720  num_face_nbr_dofs = -1;
2722  face_nbr_ldof.Clear();
2725 }
2726 
2728  const FiniteElementSpace &coarse_fes, OperatorHandle &T) const
2729 {
2732  GetTransferOperator(coarse_fes, Tgf);
2733  Dof_TrueDof_Matrix(); // Make sure R is built - we need R in all cases.
2734  if (T.Type() == Operator::Hypre_ParCSR)
2735  {
2736  const ParFiniteElementSpace *c_pfes =
2737  dynamic_cast<const ParFiniteElementSpace *>(&coarse_fes);
2738  MFEM_ASSERT(c_pfes != NULL, "coarse_fes must be a parallel space");
2739  SparseMatrix *RA = mfem::Mult(*R, *Tgf.As<SparseMatrix>());
2740  Tgf.Clear();
2741  T.Reset(c_pfes->Dof_TrueDof_Matrix()->
2742  LeftDiagMult(*RA, GetTrueDofOffsets()));
2743  delete RA;
2744  }
2745  else
2746  {
2747  T.Reset(new TripleProductOperator(R, Tgf.Ptr(),
2748  coarse_fes.GetProlongationMatrix(),
2749  false, Tgf.OwnsOperator(), false));
2750  Tgf.SetOperatorOwner(false);
2751  }
2752 }
2753 
2754 void ParFiniteElementSpace::Update(bool want_transform)
2755 {
2756  if (mesh->GetSequence() == sequence)
2757  {
2758  return; // no need to update, no-op
2759  }
2760  if (want_transform && mesh->GetSequence() != sequence + 1)
2761  {
2762  MFEM_ABORT("Error in update sequence. Space needs to be updated after "
2763  "each mesh modification.");
2764  }
2765  sequence = mesh->GetSequence();
2766 
2767  if (NURBSext)
2768  {
2769  UpdateNURBS();
2770  return;
2771  }
2772 
2773  Table* old_elem_dof = NULL;
2774  int old_ndofs;
2775 
2776  // save old DOF table
2777  if (want_transform)
2778  {
2779  old_elem_dof = elem_dof;
2780  elem_dof = NULL;
2781  old_ndofs = ndofs;
2782  Swap(dof_offsets, old_dof_offsets);
2783  }
2784 
2785  Destroy();
2786  FiniteElementSpace::Destroy(); // calls Th.Clear()
2787 
2789  Construct();
2790 
2792 
2793  if (want_transform)
2794  {
2795  // calculate appropriate GridFunction transformation
2796  switch (mesh->GetLastOperation())
2797  {
2798  case Mesh::REFINE:
2799  {
2800  if (Th.Type() != Operator::MFEM_SPARSEMAT)
2801  {
2802  Th.Reset(new RefinementOperator(this, old_elem_dof, old_ndofs));
2803  // The RefinementOperator takes ownership of 'old_elem_dofs', so
2804  // we no longer own it:
2805  old_elem_dof = NULL;
2806  }
2807  else
2808  {
2809  // calculate fully assembled matrix
2810  Th.Reset(RefinementMatrix(old_ndofs, old_elem_dof));
2811  }
2812  break;
2813  }
2814 
2815  case Mesh::DEREFINE:
2816  {
2817  Th.Reset(ParallelDerefinementMatrix(old_ndofs, old_elem_dof));
2818  if (Nonconforming())
2819  {
2820  Th.SetOperatorOwner(false);
2821  Th.Reset(new TripleProductOperator(P, R, Th.Ptr(),
2822  false, false, true));
2823  }
2824  break;
2825  }
2826 
2827  case Mesh::REBALANCE:
2828  {
2829  Th.Reset(RebalanceMatrix(old_ndofs, old_elem_dof));
2830  break;
2831  }
2832 
2833  default:
2834  break;
2835  }
2836 
2837  delete old_elem_dof;
2838  }
2839 }
2840 
2841 
2843  const ParFiniteElementSpace &pfes)
2844  : Operator(pfes.GetVSize(), pfes.GetTrueVSize()),
2845  external_ldofs(),
2846  gc(pfes.GroupComm())
2847 {
2848  MFEM_VERIFY(pfes.Conforming(), "");
2849  const Table &group_ldof = gc.GroupLDofTable();
2851  for (int gr = 1; gr < group_ldof.Size(); gr++)
2852  {
2853  if (!gc.GetGroupTopology().IAmMaster(gr))
2854  {
2855  external_ldofs.Append(group_ldof.GetRow(gr), group_ldof.RowSize(gr));
2856  }
2857  }
2858  external_ldofs.Sort();
2859  MFEM_ASSERT(external_ldofs.Size() == Height()-Width(), "");
2860 #ifdef MFEM_DEBUG
2861  for (int j = 1; j < external_ldofs.Size(); j++)
2862  {
2863  // Check for repeated ldofs.
2864  MFEM_VERIFY(external_ldofs[j-1] < external_ldofs[j], "");
2865  }
2866  int j = 0;
2867  for (int i = 0; i < external_ldofs.Size(); i++)
2868  {
2869  const int end = external_ldofs[i];
2870  for ( ; j < end; j++)
2871  {
2872  MFEM_VERIFY(j-i == pfes.GetLocalTDofNumber(j), "");
2873  }
2874  j = end+1;
2875  }
2876  for ( ; j < Height(); j++)
2877  {
2878  MFEM_VERIFY(j-external_ldofs.Size() == pfes.GetLocalTDofNumber(j), "");
2879  }
2880  // gc.PrintInfo();
2881  // pfes.Dof_TrueDof_Matrix()->PrintCommPkg();
2882 #endif
2883 }
2884 
2886 {
2887  MFEM_ASSERT(x.Size() == Width(), "");
2888  MFEM_ASSERT(y.Size() == Height(), "");
2889 
2890  const double *xdata = x.HostRead();
2891  double *ydata = y.HostWrite();
2892  const int m = external_ldofs.Size();
2893 
2894  const int in_layout = 2; // 2 - input is ltdofs array
2895  gc.BcastBegin(const_cast<double*>(xdata), in_layout);
2896 
2897  int j = 0;
2898  for (int i = 0; i < m; i++)
2899  {
2900  const int end = external_ldofs[i];
2901  std::copy(xdata+j-i, xdata+end-i, ydata+j);
2902  j = end+1;
2903  }
2904  std::copy(xdata+j-m, xdata+Width(), ydata+j);
2905 
2906  const int out_layout = 0; // 0 - output is ldofs array
2907  gc.BcastEnd(ydata, out_layout);
2908 }
2909 
2911  const Vector &x, Vector &y) const
2912 {
2913  MFEM_ASSERT(x.Size() == Height(), "");
2914  MFEM_ASSERT(y.Size() == Width(), "");
2915 
2916  const double *xdata = x.HostRead();
2917  double *ydata = y.HostWrite();
2918  const int m = external_ldofs.Size();
2919 
2920  gc.ReduceBegin(xdata);
2921 
2922  int j = 0;
2923  for (int i = 0; i < m; i++)
2924  {
2925  const int end = external_ldofs[i];
2926  std::copy(xdata+j, xdata+end, ydata+j-i);
2927  j = end+1;
2928  }
2929  std::copy(xdata+j, xdata+Height(), ydata+j-m);
2930 
2931  const int out_layout = 2; // 2 - output is an array on all ltdofs
2932  gc.ReduceEnd<double>(ydata, out_layout, GroupCommunicator::Sum);
2933 }
2934 
2935 } // namespace mfem
2936 
2937 #endif
Abstract class for Finite Elements.
Definition: fe.hpp:229
Geometry::Type GetGeometryType() const
Definition: element.hpp:52
int GetNFaceNeighbors() const
Definition: pmesh.hpp:263
int GetGroupMasterRank(int g) const
void SendRebalanceDofs(int old_ndofs, const Table &old_element_dofs, long old_global_offset, FiniteElementSpace *space)
Use the communication pattern from last Rebalance() to send element DOFs.
Definition: pncmesh.cpp:1957
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
int Size() const
Logical size of the array.
Definition: array.hpp:118
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
Definition: fespace.hpp:347
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
Definition: pfespace.cpp:737
HYPRE_Int GetGlobalTDofNumber(int ldof) const
Returns the global tdof number of the given local degree of freedom.
Definition: pfespace.cpp:785
int GetNDofs() const
Returns number of degrees of freedom.
Definition: fespace.hpp:344
int * GetJ()
Definition: table.hpp:114
int ndofs
Number of degrees of freedom. Number of unknowns is ndofs * vdim.
Definition: fespace.hpp:105
std::vector< int > CommGroup
Definition: pncmesh.hpp:130
int DofToVDof(int dof, int vd, int ndofs=-1) const
Definition: fespace.cpp:143
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
const CommGroup & GetGroup(GroupId id) const
Return a list of ranks contained in the group of the given ID.
Definition: pncmesh.hpp:159
Array< int > ldof_group
Definition: nurbs.hpp:411
void BuildElementToDofTable() const
Definition: fespace.cpp:214
void AddColumnsInRow(int r, int ncol)
Definition: table.hpp:78
virtual void GetEssentialVDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_dofs, int component=-1) const
Determine the boundary degrees of freedom.
Definition: pfespace.cpp:723
ConformingProlongationOperator(const ParFiniteElementSpace &pfes)
Definition: pfespace.cpp:2842
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
Definition: table.cpp:85
HYPRE_Int * GetDofOffsets() const
Definition: pfespace.hpp:247
int GetNGroups() const
Definition: pmesh.hpp:242
virtual const Operator * GetProlongationMatrix() const
The returned Operator is owned by the FiniteElementSpace.
Definition: pfespace.cpp:855
Auxiliary class used by ParFiniteElementSpace.
Definition: pfespace.hpp:376
void GetFaceVerticesEdges(const MeshId &face_id, int vert_index[4], int edge_index[4], int edge_orientation[4]) const
Return Mesh vertex and edge indices of a face identified by &#39;face_id&#39;.
Definition: ncmesh.cpp:3514
SparseMatrix * RefinementMatrix(int old_ndofs, const Table *old_elem_dof)
Definition: fespace.cpp:926
void GetElementVDofs(int i, Array< int > &vdofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: fespace.cpp:172
Ordering::Type ordering
Definition: fespace.hpp:102
Array< Element * > face_nbr_elements
Definition: pmesh.hpp:234
const Geometry::Type geom
Definition: ex1.cpp:40
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
Definition: array.hpp:265
void BooleanMult(const Array< int > &x, Array< int > &y) const
y = A * x, treating all entries as booleans (zero=false, nonzero=true).
Definition: sparsemat.cpp:726
void Mult(const Table &A, const Table &B, Table &C)
C = A * B (as boolean matrices)
Definition: table.cpp:476
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
Definition: operator.hpp:42
int GetGroupSize(int g) const
virtual void Update(bool want_transform=true)
Definition: pfespace.cpp:2754
Pointer to an Operator of a specified type.
Definition: handle.hpp:33
void SetDims(int rows, int nnz)
Definition: table.cpp:144
Operator::Type Type() const
Get the currently set operator type id.
Definition: handle.hpp:91
int VDofToDof(int vdof) const
Definition: fespace.hpp:445
virtual const FiniteElement * FiniteElementForGeometry(Geometry::Type GeomType) const =0
void ExchangeFaceNbrData(Table *gr_sface, int *s2l_face)
Definition: pmesh.cpp:1740
int GetNVertices() const
Definition: ncmesh.hpp:115
virtual void GetEssentialVDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_vdofs, int component=-1) const
Definition: fespace.cpp:297
int vdim
Vector dimension (number of unknowns per degree of freedom).
Definition: fespace.hpp:97
int Size() const
Returns the size of the vector.
Definition: vector.hpp:150
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:191
void GetEntityDofs(int entity, int index, Array< int > &dofs) const
Helper to get vertex, edge or face DOFs (entity=0,1,2 resp.).
Definition: fespace.cpp:571
virtual void GetTrueTransferOperator(const FiniteElementSpace &coarse_fes, OperatorHandle &T) const
Construct and return an Operator that can be used to transfer true-dof data from coarse_fes, defined on a coarse mesh, to this FE space, defined on a refined mesh.
Definition: pfespace.cpp:2727
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:676
void DivideByGroupSize(double *vec)
Scale a vector of true dofs.
Definition: pfespace.cpp:677
Abstract parallel finite element space.
Definition: pfespace.hpp:28
bool operator<(const Pair< A, B > &p, const Pair< A, B > &q)
Comparison operator for class Pair, based on the first element only.
Definition: sort_pairs.hpp:36
void Synchronize(Array< int > &ldof_marker) const
Given an integer array on the local degrees of freedom, perform a bitwise OR between the shared dofs...
Definition: pfespace.cpp:711
std::vector< MeshId > conforming
Definition: ncmesh.hpp:188
bool GroupContains(GroupId id, int rank) const
Return true if group &#39;id&#39; contains the given rank.
Definition: pncmesh.cpp:503
int GetLocalTDofNumber(int ldof) const
Definition: pfespace.cpp:764
HypreParMatrix * GetPartialConformingInterpolation()
For a non-conforming mesh, construct and return the interpolation matrix from the partially conformin...
Definition: pfespace.cpp:666
const FiniteElementCollection * fec
Associated FE collection (not owned).
Definition: fespace.hpp:94
OperatorHandle Th
Transformation to apply to GridFunctions after space Update().
Definition: fespace.hpp:127
int GroupNQuadrilaterals(int group)
Definition: pmesh.hpp:248
int Size_of_connections() const
Definition: table.hpp:98
void GetLocalDerefinementMatrices(Geometry::Type geom, DenseTensor &localR) const
Definition: fespace.cpp:1166
HYPRE_Int GetMyDofOffset() const
Definition: pfespace.cpp:845
bool HasGeometry(Geometry::Type geom) const
Return true iff the given geom is encountered in the mesh. Geometries of dimensions lower than Dimens...
Definition: mesh.hpp:771
void CopyRowStarts()
Definition: hypre.cpp:846
void DeleteAll()
Delete whole array.
Definition: array.hpp:785
void AddConnections(int r, const int *c, int nc)
Definition: table.cpp:108
void GroupTriangle(int group, int i, int &face, int &o)
Definition: pmesh.cpp:1303
static void MarkerToList(const Array< int > &marker, Array< int > &list)
Convert a Boolean marker array to a list containing all marked indices.
Definition: fespace.cpp:385
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
bool IAmMaster(int g) const
void GetSharedEdgeDofs(int group, int ei, Array< int > &dofs) const
Definition: pfespace.cpp:493
ParNCMesh * pncmesh
Definition: pmesh.hpp:240
virtual int DofForGeometry(Geometry::Type GeomType) const =0
virtual void GetFaceDofs(int i, Array< int > &dofs) const
Definition: fespace.cpp:1641
int GetNRanks() const
Definition: pmesh.hpp:224
virtual void GetBdrElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom for i&#39;th boundary element.
Definition: pfespace.cpp:470
int GroupVertex(int group, int i)
Definition: pmesh.hpp:250
int GetNumFaces() const
Return the number of faces (3D), edges (2D) or vertices (1D).
Definition: mesh.cpp:3663
void GetFaceNbrElementVDofs(int i, Array< int > &vdofs) const
Definition: pfespace.cpp:1109
ID for class SparseMatrix.
Definition: operator.hpp:138
const double * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:330
long GetSequence() const
Definition: mesh.hpp:1149
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
const FiniteElement * GetFaceNbrFE(int i) const
Definition: pfespace.cpp:1143
ID for the base class Operator, i.e. any type.
Definition: operator.hpp:137
Array< int > face_nbr_elements_offset
Definition: pmesh.hpp:232
int dim
Definition: ex3.cpp:48
void write(std::ostream &os, T value)
Definition: binaryio.hpp:28
static const int Dimension[NumGeom]
Definition: geom.hpp:46
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
Geometry::Type GetGhostFaceGeometry(int ghost_face_id) const
Definition: pncmesh.hpp:103
Data type sparse matrix.
Definition: sparsemat.hpp:40
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition: operator.hpp:36
int Append(const T &el)
Append element to array, resize if necessary.
Definition: array.hpp:690
GroupId GetEntityGroupId(int entity, int index)
Definition: pncmesh.hpp:147
void Clear()
Definition: table.cpp:381
void mfem_error(const char *msg)
Function called when an error is encountered. Used by the macros MFEM_ABORT, MFEM_ASSERT, MFEM_VERIFY.
Definition: error.cpp:146
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
int GetNGhostEdges() const
Definition: pncmesh.hpp:99
void AddConnection(int r, int c)
Definition: table.hpp:80
void LoseData()
NULL-ifies the data.
Definition: array.hpp:112
HypreParMatrix * Dof_TrueDof_Matrix() const
The true dof-to-dof interpolation matrix.
Definition: pfespace.hpp:272
virtual const Operator * GetProlongationMatrix() const
The returned Operator is owned by the FiniteElementSpace.
Definition: fespace.hpp:288
virtual void GetElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: fespace.cpp:1439
HYPRE_Int GetMyTDofOffset() const
Definition: pfespace.cpp:850
void Reserve(int capacity)
Ensures that the allocated size is at least the given size.
Definition: array.hpp:136
void SubDofOrder(Geometry::Type Geom, int SDim, int Info, Array< int > &dofs) const
Get the local dofs for a given sub-manifold.
Definition: fe_coll.cpp:364
void GetSharedQuadrilateralDofs(int group, int fi, Array< int > &dofs) const
Definition: pfespace.cpp:540
void Transpose(const Table &A, Table &At, int _ncols_A)
Transpose a Table.
Definition: table.cpp:414
static void AddDependencies(SparseMatrix &deps, Array< int > &master_dofs, Array< int > &slave_dofs, DenseMatrix &I)
Definition: fespace.cpp:532
HypreParMatrix * Transpose() const
Returns the transpose of *this.
Definition: hypre.cpp:988
T read(std::istream &is)
Definition: binaryio.hpp:34
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
Definition: hypre.cpp:924
void Sort()
Sorts the array. This requires operator&lt; to be defined for T.
Definition: array.hpp:223
int GroupNVertices(int group)
Definition: pmesh.hpp:245
void GetSharedTriangleDofs(int group, int fi, Array< int > &dofs) const
Definition: pfespace.cpp:516
virtual void GetFaceDofs(int i, Array< int > &dofs) const
Definition: pfespace.cpp:484
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:92
GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
int Dimension() const
Definition: mesh.hpp:713
int GetNFaces() const
Definition: ncmesh.hpp:117
void GetEdgeDofs(int i, Array< int > &dofs) const
Definition: fespace.cpp:1700
Operator * Ptr() const
Access the underlying Operator pointer.
Definition: handle.hpp:82
int GetGroupMaster(int g) const
void GetTransferOperator(const FiniteElementSpace &coarse_fes, OperatorHandle &T) const
Construct and return an Operator that can be used to transfer GridFunction data from coarse_fes...
Definition: fespace.cpp:1876
void GroupQuadrilateral(int group, int i, int &face, int &o)
Definition: pmesh.cpp:1314
int GroupNTriangles(int group)
Definition: pmesh.hpp:247
int GetNGhostFaces() const
Definition: pncmesh.hpp:100
void GetFaceElements(int Face, int *Elem1, int *Elem2) const
Definition: mesh.cpp:941
virtual const int * DofOrderForOrientation(Geometry::Type GeomType, int Or) const =0
Returns an array, say p, that maps a local permuted index i to a local base index: base_i = p[i]...
void Swap(Array< T > &, Array< T > &)
Definition: array.hpp:575
int GetMyRank() const
Definition: pmesh.hpp:225
void GetFaceInfos(int Face, int *Inf1, int *Inf2) const
Definition: mesh.cpp:947
MPI_Comm GetComm() const
Definition: pmesh.hpp:223
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition: fespace.hpp:85
int GetDof() const
Returns the number of degrees of freedom in the finite element.
Definition: fe.hpp:311
virtual void MultTranspose(const Vector &x, Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition: pfespace.cpp:2910
void AddAColumnInRow(int r)
Definition: table.hpp:77
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
Definition: array.hpp:618
GroupId GetEntityOwnerId(int entity, int index)
Return vertex/edge/face (&#39;entity&#39; == 0/1/2, resp.) owner.
Definition: pncmesh.hpp:133
ParFiniteElementSpace(const ParFiniteElementSpace &orig, ParMesh *pmesh=NULL, const FiniteElementCollection *fec=NULL)
Copy constructor: deep copy all data from orig except the ParMesh, the FiniteElementCollection, and some derived data.
Definition: pfespace.cpp:28
HYPRE_Int GetGlobalScalarTDofNumber(int sldof)
Definition: pfespace.cpp:808
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
int GetNEdges() const
Definition: ncmesh.hpp:116
void ShiftUpI()
Definition: table.cpp:119
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
Operation GetLastOperation() const
Return type of last modification of the mesh.
Definition: mesh.hpp:1143
bool Nonconforming() const
Definition: pfespace.hpp:347
virtual void GetBdrElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom for i&#39;th boundary element.
Definition: fespace.cpp:1558
NURBSExtension * NURBSext
Optional NURBS mesh extension.
Definition: mesh.hpp:181
int GetMyRank() const
Definition: pncmesh.hpp:189
virtual void GetElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: pfespace.cpp:456
void SetOperatorOwner(bool own=true)
Set the ownership flag for the held Operator.
Definition: handle.hpp:112
Mesh * mesh
The mesh that FE space lives on (not owned).
Definition: fespace.hpp:91
GridFunction interpolation operator applicable after mesh refinement.
Definition: fespace.hpp:162
const NCList & GetNCList(int entity)
Return vertex/edge/face list (entity = 0/1/2, respectively).
Definition: ncmesh.hpp:227
virtual const SparseMatrix * GetRestrictionMatrix() const
Get the R matrix which restricts a local dof vector to true dof vector.
Definition: pfespace.hpp:330
General triple product operator x -&gt; A*B*C*x, with ownership of the factors.
Definition: operator.hpp:390
void MakeJ()
Definition: table.cpp:95
bool IsGhost(int entity, int index) const
Return true if the specified vertex/edge/face is a ghost.
Definition: pncmesh.hpp:169
void CopyColStarts()
Definition: hypre.cpp:883
virtual void Mult(const Vector &x, Vector &y) const
Operator application: y=A(x).
Definition: pfespace.cpp:2885
double infinity()
Define a shortcut for std::numeric_limits&lt;double&gt;::infinity()
Definition: vector.hpp:42
const FiniteElement * GetFaceNbrFaceFE(int i) const
Definition: pfespace.cpp:1157
double * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:338
HYPRE_Int * GetTrueDofOffsets() const
Definition: pfespace.hpp:248
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void GetFaceNbrFaceVDofs(int i, Array< int > &vdofs) const
Definition: pfespace.cpp:1115
void GetEdgeVertices(const MeshId &edge_id, int vert_index[2], bool oriented=true) const
Return Mesh vertex indices of an edge identified by &#39;edge_id&#39;.
Definition: ncmesh.cpp:3483
void MakeRef(T *, int)
Make this Array a reference to a pointer.
Definition: array.hpp:803
void GroupEdge(int group, int i, int &edge, int &o)
Definition: pmesh.cpp:1295
Vector data type.
Definition: vector.hpp:48
void GenerateOffsets(int N, HYPRE_Int loc_sizes[], Array< HYPRE_Int > *offsets[]) const
Definition: pmesh.cpp:1539
ID for class HypreParMatrix.
Definition: operator.hpp:139
int * GetI()
Definition: table.hpp:113
static bool DofFinalizable(int dof, const Array< bool > &finalized, const SparseMatrix &deps)
Definition: fespace.cpp:556
int RowSize(int i) const
Definition: table.hpp:108
NURBSExtension * NURBSext
Definition: fespace.hpp:115
int GetNGhostVertices() const
Definition: pncmesh.hpp:98
Table send_face_nbr_elements
Definition: pmesh.hpp:237
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:64
GroupTopology gtopo
Definition: pmesh.hpp:227
Abstract operator.
Definition: operator.hpp:21
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:187
GroupCommunicator * ScalarGroupComm()
Return a new GroupCommunicator on scalar dofs, i.e. for VDim = 1.
Definition: pfespace.cpp:697
BiLinear2DFiniteElement QuadrilateralFE
Class for parallel meshes.
Definition: pmesh.hpp:32
Abstract data type element.
Definition: element.hpp:28
GroupTopology gtopo
Definition: nurbs.hpp:409
int GetFaceNbrRank(int fn) const
Definition: pmesh.cpp:2169
Linear1DFiniteElement SegmentFE
Definition: segment.cpp:49
void Reset(OpType *A, bool own_A=true)
Reset the OperatorHandle to the given OpType pointer, A.
Definition: handle.hpp:137
int GroupNEdges(int group)
Definition: pmesh.hpp:246
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.
void DofsToVDofs(Array< int > &dofs, int ndofs=-1) const
Definition: fespace.cpp:107
Array< HYPRE_Int > face_nbr_glob_dof_map
Definition: pfespace.hpp:184