MFEM  v3.4
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex6p.cpp
Go to the documentation of this file.
1 // MFEM Example 6 - Parallel Version
2 // PETSc Modification
3 //
4 // Compile with: make ex6p
5 //
6 // Sample runs:
7 // mpirun -np 4 ex6p -m ../../data/amr-quad.mesh
8 // mpirun -np 4 ex6p -m ../../data/amr-quad.mesh -nonoverlapping
9 //
10 // Description: This is a version of Example 1 with a simple adaptive mesh
11 // refinement loop. The problem being solved is again the Laplace
12 // equation -Delta u = 1 with homogeneous Dirichlet boundary
13 // conditions. The problem is solved on a sequence of meshes which
14 // are locally refined in a conforming (triangles, tetrahedrons)
15 // or non-conforming (quadrilateral, hexahedrons) manner according
16 // to a simple ZZ error estimator.
17 //
18 // The example demonstrates MFEM's capability to work with both
19 // conforming and nonconforming refinements, in 2D and 3D, on
20 // linear, curved and surface meshes. Interpolation of functions
21 // from coarse to fine meshes, as well as persistent GLVis
22 // visualization are also illustrated.
23 //
24 // PETSc assembly timings can be benchmarked if requested by
25 // command line.
26 //
27 // We recommend viewing Example 1 before viewing this example.
28 
29 #include "mfem.hpp"
30 #include <fstream>
31 #include <iostream>
32 
33 #ifndef MFEM_USE_PETSC
34 #error This example requires that MFEM is built with MFEM_USE_PETSC=YES
35 #endif
36 
37 using namespace std;
38 using namespace mfem;
39 
40 int main(int argc, char *argv[])
41 {
42  // 1. Initialize MPI.
43  int num_procs, myid;
44  MPI_Init(&argc, &argv);
45  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
46  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
47 
48  // 2. Parse command-line options.
49  const char *mesh_file = "../../data/star.mesh";
50  int order = 1;
51  bool visualization = true;
52  int max_dofs = 100000;
53  bool use_petsc = true;
54  const char *petscrc_file = "";
55  bool use_nonoverlapping = false;
56 
57  OptionsParser args(argc, argv);
58  args.AddOption(&mesh_file, "-m", "--mesh",
59  "Mesh file to use.");
60  args.AddOption(&order, "-o", "--order",
61  "Finite element order (polynomial degree).");
62  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
63  "--no-visualization",
64  "Enable or disable GLVis visualization.");
65  args.AddOption(&max_dofs, "-md", "--max_dofs",
66  "Maximum number of dofs.");
67  args.AddOption(&use_petsc, "-usepetsc", "--usepetsc", "-no-petsc",
68  "--no-petsc",
69  "Use or not PETSc to solve the linear system.");
70  args.AddOption(&petscrc_file, "-petscopts", "--petscopts",
71  "PetscOptions file to use.");
72  args.AddOption(&use_nonoverlapping, "-nonoverlapping", "--nonoverlapping",
73  "-no-nonoverlapping", "--no-nonoverlapping",
74  "Use or not the block diagonal PETSc's matrix format "
75  "for non-overlapping domain decomposition.");
76  args.Parse();
77  if (!args.Good())
78  {
79  if (myid == 0)
80  {
81  args.PrintUsage(cout);
82  }
83  MPI_Finalize();
84  return 1;
85  }
86  if (myid == 0)
87  {
88  args.PrintOptions(cout);
89  }
90  // 2b. We initialize PETSc
91  if (use_petsc) { PetscInitialize(NULL,NULL,petscrc_file,NULL); }
92 
93  // 3. Read the (serial) mesh from the given mesh file on all processors. We
94  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
95  // and volume meshes with the same code.
96  Mesh *mesh = new Mesh(mesh_file, 1, 1);
97  int dim = mesh->Dimension();
98  int sdim = mesh->SpaceDimension();
99 
100  // 4. Refine the serial mesh on all processors to increase the resolution.
101  // Also project a NURBS mesh to a piecewise-quadratic curved mesh. Make
102  // sure that the mesh is non-conforming.
103  if (mesh->NURBSext)
104  {
105  mesh->UniformRefinement();
106  mesh->SetCurvature(2);
107  }
108  mesh->EnsureNCMesh();
109 
110  // 5. Define a parallel mesh by partitioning the serial mesh.
111  // Once the parallel mesh is defined, the serial mesh can be deleted.
112  ParMesh pmesh(MPI_COMM_WORLD, *mesh);
113  delete mesh;
114 
115  MFEM_VERIFY(pmesh.bdr_attributes.Size() > 0,
116  "Boundary attributes required in the mesh.");
117  Array<int> ess_bdr(pmesh.bdr_attributes.Max());
118  ess_bdr = 1;
119 
120  // 6. Define a finite element space on the mesh. The polynomial order is
121  // one (linear) by default, but this can be changed on the command line.
122  H1_FECollection fec(order, dim);
123  ParFiniteElementSpace fespace(&pmesh, &fec);
124 
125  // 7. As in Example 1p, we set up bilinear and linear forms corresponding to
126  // the Laplace problem -\Delta u = 1. We don't assemble the discrete
127  // problem yet, this will be done in the main loop.
128  ParBilinearForm a(&fespace);
129  ParLinearForm b(&fespace);
130 
131  ConstantCoefficient one(1.0);
132 
134  a.AddDomainIntegrator(integ);
136 
137  // 8. The solution vector x and the associated finite element grid function
138  // will be maintained over the AMR iterations. We initialize it to zero.
139  ParGridFunction x(&fespace);
140  x = 0;
141 
142  // 9. Connect to GLVis.
143  char vishost[] = "localhost";
144  int visport = 19916;
145 
146  socketstream sout;
147  if (visualization)
148  {
149  sout.open(vishost, visport);
150  if (!sout)
151  {
152  if (myid == 0)
153  {
154  cout << "Unable to connect to GLVis server at "
155  << vishost << ':' << visport << endl;
156  cout << "GLVis visualization disabled.\n";
157  }
158  visualization = false;
159  }
160 
161  sout.precision(8);
162  }
163 
164  // 10. Set up an error estimator. Here we use the Zienkiewicz-Zhu estimator
165  // with L2 projection in the smoothing step to better handle hanging
166  // nodes and parallel partitioning. We need to supply a space for the
167  // discontinuous flux (L2) and a space for the smoothed flux (H(div) is
168  // used here).
169  L2_FECollection flux_fec(order, dim);
170  ParFiniteElementSpace flux_fes(&pmesh, &flux_fec, sdim);
171  RT_FECollection smooth_flux_fec(order-1, dim);
172  ParFiniteElementSpace smooth_flux_fes(&pmesh, &smooth_flux_fec);
173  // Another possible option for the smoothed flux space:
174  // H1_FECollection smooth_flux_fec(order, dim);
175  // ParFiniteElementSpace smooth_flux_fes(&pmesh, &smooth_flux_fec, dim);
176  L2ZienkiewiczZhuEstimator estimator(*integ, x, flux_fes, smooth_flux_fes);
177 
178  // 11. A refiner selects and refines elements based on a refinement strategy.
179  // The strategy here is to refine elements with errors larger than a
180  // fraction of the maximum element error. Other strategies are possible.
181  // The refiner will call the given error estimator.
182  ThresholdRefiner refiner(estimator);
183  refiner.SetTotalErrorFraction(0.7);
184 
185  // 12. The main AMR loop. In each iteration we solve the problem on the
186  // current mesh, visualize the solution, and refine the mesh.
187  for (int it = 0; ; it++)
188  {
189  HYPRE_Int global_dofs = fespace.GlobalTrueVSize();
190  if (myid == 0)
191  {
192  cout << "\nAMR iteration " << it << endl;
193  cout << "Number of unknowns: " << global_dofs << endl;
194  }
195 
196  // 13. Assemble the stiffness matrix and the right-hand side. Note that
197  // MFEM doesn't care at this point that the mesh is nonconforming
198  // and parallel. The FE space is considered 'cut' along hanging
199  // edges/faces, and also across processor boundaries.
200  a.Assemble();
201  b.Assemble();
202 
203  // 14. Create the parallel linear system: eliminate boundary conditions,
204  // constrain hanging nodes and nodes across processor boundaries.
205  // The system will be solved for true (unconstrained/unique) DOFs only.
206  Array<int> ess_tdof_list;
207  fespace.GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
208  double time;
209  const int copy_interior = 1;
210 
211  if (use_petsc)
212  {
213  a.SetOperatorType(use_nonoverlapping ?
214  Operator::PETSC_MATIS : Operator::PETSC_MATAIJ);
215  PetscParMatrix pA;
216  Vector pX,pB;
217  MPI_Barrier(MPI_COMM_WORLD);
218  time = -MPI_Wtime();
219  a.FormLinearSystem(ess_tdof_list, x, b, pA, pX, pB, copy_interior);
220  MPI_Barrier(MPI_COMM_WORLD);
221  time += MPI_Wtime();
222  if (myid == 0) { cout << "PETSc assembly timing : " << time << endl; }
223  }
224 
225  a.Assemble();
226  b.Assemble();
227  a.SetOperatorType(Operator::Hypre_ParCSR);
228  HypreParMatrix A;
229  Vector B, X;
230  MPI_Barrier(MPI_COMM_WORLD);
231  time = -MPI_Wtime();
232  a.FormLinearSystem(ess_tdof_list, x, b, A, X, B, copy_interior);
233  MPI_Barrier(MPI_COMM_WORLD);
234  time += MPI_Wtime();
235  if (myid == 0) { cout << "HYPRE assembly timing : " << time << endl; }
236 
237  // 15. Define and apply a parallel PCG solver for AX=B with the BoomerAMG
238  // preconditioner from hypre.
239  HypreBoomerAMG amg;
240  amg.SetPrintLevel(0);
241  CGSolver pcg(A.GetComm());
242  pcg.SetPreconditioner(amg);
243  pcg.SetOperator(A);
244  pcg.SetRelTol(1e-6);
245  pcg.SetMaxIter(200);
246  pcg.SetPrintLevel(3); // print the first and the last iterations only
247  pcg.Mult(B, X);
248 
249  // 16. Extract the parallel grid function corresponding to the finite element
250  // approximation X. This is the local solution on each processor.
251  a.RecoverFEMSolution(X, b, x);
252 
253  // 17. Send the solution by socket to a GLVis server.
254  if (visualization)
255  {
256  sout << "parallel " << num_procs << " " << myid << "\n";
257  sout << "solution\n" << pmesh << x << flush;
258  }
259 
260  if (global_dofs > max_dofs)
261  {
262  if (myid == 0)
263  {
264  cout << "Reached the maximum number of dofs. Stop." << endl;
265  }
266  // we need to call Update here to delete any internal PETSc object that
267  // have been created by the ParBilinearForm; otherwise, these objects
268  // will be destroyed at the end of the main scope, when PETSc has been
269  // already finalized.
270  a.Update();
271  b.Update();
272  break;
273  }
274 
275  // 18. Call the refiner to modify the mesh. The refiner calls the error
276  // estimator to obtain element errors, then it selects elements to be
277  // refined and finally it modifies the mesh. The Stop() method can be
278  // used to determine if a stopping criterion was met.
279  refiner.Apply(pmesh);
280  if (refiner.Stop())
281  {
282  if (myid == 0)
283  {
284  cout << "Stopping criterion satisfied. Stop." << endl;
285  }
286  a.Update();
287  b.Update();
288  break;
289  }
290 
291  // 19. Update the finite element space (recalculate the number of DOFs,
292  // etc.) and create a grid function update matrix. Apply the matrix
293  // to any GridFunctions over the space. In this case, the update
294  // matrix is an interpolation matrix so the updated GridFunction will
295  // still represent the same function as before refinement.
296  fespace.Update();
297  x.Update();
298 
299  // 20. Load balance the mesh, and update the space and solution. Currently
300  // available only for nonconforming meshes.
301  if (pmesh.Nonconforming())
302  {
303  pmesh.Rebalance();
304 
305  // Update the space and the GridFunction. This time the update matrix
306  // redistributes the GridFunction among the processors.
307  fespace.Update();
308  x.Update();
309  }
310 
311  // 21. Inform also the bilinear and linear forms that the space has
312  // changed.
313  a.Update();
314  b.Update();
315  }
316 
317  // We finalize PETSc
318  if (use_petsc) { PetscFinalize(); }
319 
320  MPI_Finalize();
321  return 0;
322 }
int Size() const
Logical size of the array.
Definition: array.hpp:133
Class for domain integration L(v) := (f, v)
Definition: lininteg.hpp:93
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
Definition: pfespace.cpp:626
Conjugate gradient method.
Definition: solvers.hpp:111
MPI_Comm GetComm() const
MPI communicator.
Definition: hypre.hpp:303
Subclass constant coefficient.
Definition: coefficient.hpp:57
void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B, int copy_interior=0)
Wrapper for PETSc&#39;s matrix class.
Definition: petsc.hpp:133
virtual void Update(bool want_transform=true)
Definition: pfespace.cpp:2568
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:51
bool Stop() const
Check if STOP action is requested, e.g. stopping criterion is satisfied.
Abstract parallel finite element space.
Definition: pfespace.hpp:28
int main(int argc, char *argv[])
Definition: ex1.cpp:45
The L2ZienkiewiczZhuEstimator class implements the Zienkiewicz-Zhu error estimation procedure where t...
Definition: estimators.hpp:183
void Update(ParFiniteElementSpace *pf=NULL)
Definition: plinearform.cpp:21
virtual void Update(FiniteElementSpace *nfes=NULL)
The BoomerAMG solver in hypre.
Definition: hypre.hpp:796
void SetOperatorType(Operator::Type tid)
Set the operator type id for the parallel matrix/operator.
bool Apply(Mesh &mesh)
Perform the mesh operation.
void Rebalance()
Load balance the mesh. NC meshes only.
Definition: pmesh.cpp:2746
bool Nonconforming() const
Definition: mesh.hpp:1014
Class for parallel linear form.
Definition: plinearform.hpp:26
int dim
Definition: ex3.cpp:47
virtual void Update()
Transform by the Space UpdateMatrix (e.g., on Mesh change).
Definition: pgridfunc.cpp:80
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:6741
void SetPrintLevel(int print_level)
Definition: hypre.hpp:837
Mesh refinement operator using an error threshold.
void SetCurvature(int order, bool discont=false, int space_dim=-1, int ordering=1)
Definition: mesh.cpp:3355
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:109
HYPRE_Int GlobalTrueVSize() const
Definition: pfespace.hpp:247
void Assemble(int skip_zeros=1)
Assemble the local matrix.
int Dimension() const
Definition: mesh.hpp:645
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
Arbitrary order H(div)-conforming Raviart-Thomas finite elements.
Definition: fe_coll.hpp:172
int SpaceDimension() const
Definition: mesh.hpp:646
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator.
Definition: linearform.cpp:19
Abstract base class BilinearFormIntegrator.
Definition: bilininteg.hpp:22
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:174
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:74
virtual void RecoverFEMSolution(const Vector &X, const Vector &b, Vector &x)
NURBSExtension * NURBSext
Optional NURBS mesh extension.
Definition: mesh.hpp:176
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator.
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
Class for parallel bilinear form.
void EnsureNCMesh(bool triangles_nonconforming=false)
Definition: mesh.cpp:6407
int open(const char hostname[], int port)
Vector data type.
Definition: vector.hpp:48
virtual void SetPreconditioner(Solver &pr)
This should be called before SetOperator.
Definition: solvers.cpp:93
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:79
Class for parallel grid function.
Definition: pgridfunc.hpp:32
void SetTotalErrorFraction(double fraction)
Set the total fraction used in the computation of the threshold. The default value is 1/2...
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:175
Class for parallel meshes.
Definition: pmesh.hpp:32
Arbitrary order &quot;L2-conforming&quot; discontinuous finite elements.
Definition: fe_coll.hpp:128
bool Good() const
Definition: optparser.hpp:120