MFEM  v4.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex1p.cpp
Go to the documentation of this file.
1 // MFEM Example 1 - Parallel Version
2 // PETSc Modification
3 //
4 // Compile with: make ex1p
5 //
6 // Sample runs:
7 // mpirun -np 4 ex1p -m ../../data/amr-quad.mesh
8 // mpirun -np 4 ex1p -m ../../data/amr-quad.mesh --petscopts rc_ex1p
9 //
10 // Description: This example code demonstrates the use of MFEM to define a
11 // simple finite element discretization of the Laplace problem
12 // -Delta u = 1 with homogeneous Dirichlet boundary conditions.
13 // Specifically, we discretize using a FE space of the specified
14 // order, or if order < 1 using an isoparametric/isogeometric
15 // space (i.e. quadratic for quadratic curvilinear mesh, NURBS for
16 // NURBS mesh, etc.)
17 //
18 // The example highlights the use of mesh refinement, finite
19 // element grid functions, as well as linear and bilinear forms
20 // corresponding to the left-hand side and right-hand side of the
21 // discrete linear system. We also cover the explicit elimination
22 // of essential boundary conditions, static condensation, and the
23 // optional connection to the GLVis tool for visualization.
24 // The example also shows how PETSc Krylov solvers can be used by
25 // wrapping a HypreParMatrix (or not) and a Solver, together with
26 // customization using an options file (see rc_ex1p) We also
27 // provide an example on how to visualize the iterative solution
28 // inside a PETSc solver.
29 
30 #include "mfem.hpp"
31 #include <fstream>
32 #include <iostream>
33 
34 #ifndef MFEM_USE_PETSC
35 #error This example requires that MFEM is built with MFEM_USE_PETSC=YES
36 #endif
37 
38 using namespace std;
39 using namespace mfem;
40 
41 class UserMonitor : public PetscSolverMonitor
42 {
43 private:
44  ParBilinearForm *_a;
45  ParLinearForm *_b;
46 
47 public:
48  UserMonitor(ParBilinearForm *a, ParLinearForm *b)
49  : PetscSolverMonitor(true,false), _a(a), _b(b) {}
50 
51  void MonitorSolution(PetscInt it, PetscReal norm, const Vector &X)
52  {
53  // we plot the first 5 iterates
54  if (!it || it > 5) { return; }
55  ParFiniteElementSpace *fespace = _a->ParFESpace();
56  ParMesh *mesh = fespace->GetParMesh();
57  ParGridFunction _x(fespace);
58  _a->RecoverFEMSolution(X, *_b, _x);
59 
60  char vishost[] = "localhost";
61  int visport = 19916;
62  int num_procs, myid;
63 
64  MPI_Comm_size(mesh->GetComm(),&num_procs);
65  MPI_Comm_rank(mesh->GetComm(),&myid);
66  socketstream sol_sock(vishost, visport);
67  sol_sock << "parallel " << num_procs << " " << myid << "\n";
68  sol_sock.precision(8);
69  sol_sock << "solution\n" << *mesh << _x
70  << "window_title 'Iteration no " << it << "'" << flush;
71  }
72 };
73 
74 int main(int argc, char *argv[])
75 {
76  // 1. Initialize MPI.
77  int num_procs, myid;
78  MPI_Init(&argc, &argv);
79  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
80  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
81 
82  // 2. Parse command-line options.
83  const char *mesh_file = "../../data/star.mesh";
84  int order = 1;
85  bool static_cond = false;
86  bool visualization = false;
87  bool use_petsc = true;
88  const char *petscrc_file = "";
89  bool petscmonitor = false;
90 
91  OptionsParser args(argc, argv);
92  args.AddOption(&mesh_file, "-m", "--mesh",
93  "Mesh file to use.");
94  args.AddOption(&order, "-o", "--order",
95  "Finite element order (polynomial degree) or -1 for"
96  " isoparametric space.");
97  args.AddOption(&static_cond, "-sc", "--static-condensation", "-no-sc",
98  "--no-static-condensation", "Enable static condensation.");
99  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
100  "--no-visualization",
101  "Enable or disable GLVis visualization.");
102  args.AddOption(&use_petsc, "-usepetsc", "--usepetsc", "-no-petsc",
103  "--no-petsc",
104  "Use or not PETSc to solve the linear system.");
105  args.AddOption(&petscrc_file, "-petscopts", "--petscopts",
106  "PetscOptions file to use.");
107  args.AddOption(&petscmonitor, "-petscmonitor", "--petscmonitor",
108  "-no-petscmonitor", "--no-petscmonitor",
109  "Enable or disable GLVis visualization of residual.");
110  args.Parse();
111  if (!args.Good())
112  {
113  if (myid == 0)
114  {
115  args.PrintUsage(cout);
116  }
117  MPI_Finalize();
118  return 1;
119  }
120  if (myid == 0)
121  {
122  args.PrintOptions(cout);
123  }
124 
125  // 2b. We initialize PETSc
126  MFEMInitializePetsc(NULL,NULL,petscrc_file,NULL);
127 
128  // 3. Read the (serial) mesh from the given mesh file on all processors. We
129  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
130  // and volume meshes with the same code.
131  Mesh *mesh = new Mesh(mesh_file, 1, 1);
132  int dim = mesh->Dimension();
133 
134  // 4. Refine the serial mesh on all processors to increase the resolution. In
135  // this example we do 'ref_levels' of uniform refinement. We choose
136  // 'ref_levels' to be the largest number that gives a final mesh with no
137  // more than 10,000 elements.
138  {
139  int ref_levels =
140  (int)floor(log(10000./mesh->GetNE())/log(2.)/dim);
141  for (int l = 0; l < ref_levels; l++)
142  {
143  mesh->UniformRefinement();
144  }
145  }
146 
147  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
148  // this mesh further in parallel to increase the resolution. Once the
149  // parallel mesh is defined, the serial mesh can be deleted.
150  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
151  delete mesh;
152  {
153  int par_ref_levels = 2;
154  for (int l = 0; l < par_ref_levels; l++)
155  {
156  pmesh->UniformRefinement();
157  }
158  }
159 
160  // 6. Define a parallel finite element space on the parallel mesh. Here we
161  // use continuous Lagrange finite elements of the specified order. If
162  // order < 1, we instead use an isoparametric/isogeometric space.
164  if (order > 0)
165  {
166  fec = new H1_FECollection(order, dim);
167  }
168  else if (pmesh->GetNodes())
169  {
170  fec = pmesh->GetNodes()->OwnFEC();
171  if (myid == 0)
172  {
173  cout << "Using isoparametric FEs: " << fec->Name() << endl;
174  }
175  }
176  else
177  {
178  fec = new H1_FECollection(order = 1, dim);
179  }
180  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
181  HYPRE_Int size = fespace->GlobalTrueVSize();
182  if (myid == 0)
183  {
184  cout << "Number of finite element unknowns: " << size << endl;
185  }
186 
187  // 7. Determine the list of true (i.e. parallel conforming) essential
188  // boundary dofs. In this example, the boundary conditions are defined
189  // by marking all the boundary attributes from the mesh as essential
190  // (Dirichlet) and converting them to a list of true dofs.
191  Array<int> ess_tdof_list;
192  if (pmesh->bdr_attributes.Size())
193  {
194  Array<int> ess_bdr(pmesh->bdr_attributes.Max());
195  ess_bdr = 1;
196  fespace->GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
197  }
198 
199  // 8. Set up the parallel linear form b(.) which corresponds to the
200  // right-hand side of the FEM linear system, which in this case is
201  // (1,phi_i) where phi_i are the basis functions in fespace.
202  ParLinearForm *b = new ParLinearForm(fespace);
203  ConstantCoefficient one(1.0);
205  b->Assemble();
206 
207  // 9. Define the solution vector x as a parallel finite element grid function
208  // corresponding to fespace. Initialize x with initial guess of zero,
209  // which satisfies the boundary conditions.
210  ParGridFunction x(fespace);
211  x = 0.0;
212 
213  // 10. Set up the parallel bilinear form a(.,.) on the finite element space
214  // corresponding to the Laplacian operator -Delta, by adding the Diffusion
215  // domain integrator.
216  ParBilinearForm *a = new ParBilinearForm(fespace);
218 
219  // 11. Assemble the parallel bilinear form and the corresponding linear
220  // system, applying any necessary transformations such as: parallel
221  // assembly, eliminating boundary conditions, applying conforming
222  // constraints for non-conforming AMR, static condensation, etc.
223  if (static_cond) { a->EnableStaticCondensation(); }
224  a->Assemble();
225 
226  HypreParMatrix A;
227  Vector B, X;
228  a->FormLinearSystem(ess_tdof_list, x, *b, A, X, B);
229 
230  if (myid == 0)
231  {
232  cout << "Size of linear system: " << A.GetGlobalNumRows() << endl;
233  }
234 
235  // 12. Define and apply a parallel PCG solver for AX=B with the BoomerAMG
236  // preconditioner from hypre.
237  HypreSolver *amg = new HypreBoomerAMG(A);
238 
239  if (!use_petsc)
240  {
241  HyprePCG *pcg = new HyprePCG(A);
242  pcg->SetPreconditioner(*amg);
243  pcg->SetTol(1e-12);
244  pcg->SetMaxIter(200);
245  pcg->SetPrintLevel(2);
246  pcg->Mult(B, X);
247  delete pcg;
248  }
249  else
250  {
251  // If petscrc_file has been given, we convert the HypreParMatrix to a
252  // PetscParMatrix; the user can then experiment with PETSc command line
253  // options.
254  bool wrap = !strlen(petscrc_file);
255  PetscPCGSolver *pcg = new PetscPCGSolver(A, wrap);
256  if (wrap)
257  {
258  pcg->SetPreconditioner(*amg);
259  }
260  pcg->SetTol(1e-12);
261  pcg->SetAbsTol(1e-12);
262  pcg->SetMaxIter(200);
263  pcg->SetPrintLevel(2);
264 
265  UserMonitor mymon(a,b);
266  if (visualization && petscmonitor)
267  {
268  pcg->SetMonitor(&mymon);
269  pcg->iterative_mode = true;
270  X.Randomize();
271  }
272  pcg->Mult(B, X);
273  delete pcg;
274  }
275 
276  // 13. Recover the parallel grid function corresponding to X. This is the
277  // local finite element solution on each processor.
278  a->RecoverFEMSolution(X, *b, x);
279 
280  // 14. Save the refined mesh and the solution in parallel. This output can
281  // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
282  {
283  ostringstream mesh_name, sol_name;
284  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
285  sol_name << "sol." << setfill('0') << setw(6) << myid;
286 
287  ofstream mesh_ofs(mesh_name.str().c_str());
288  mesh_ofs.precision(8);
289  pmesh->Print(mesh_ofs);
290 
291  ofstream sol_ofs(sol_name.str().c_str());
292  sol_ofs.precision(8);
293  x.Save(sol_ofs);
294  }
295 
296  // 15. Send the solution by socket to a GLVis server.
297  if (visualization)
298  {
299  char vishost[] = "localhost";
300  int visport = 19916;
301  socketstream sol_sock(vishost, visport);
302  sol_sock << "parallel " << num_procs << " " << myid << "\n";
303  sol_sock.precision(8);
304  sol_sock << "solution\n" << *pmesh << x << flush;
305  }
306 
307  // 16. Free the used memory.
308  delete amg;
309  delete a;
310  delete b;
311  delete fespace;
312  if (order > 0) { delete fec; }
313  delete pmesh;
314 
315  // We finalize PETSc
317 
318  MPI_Finalize();
319 
320  return 0;
321 }
void SetTol(double tol)
Definition: hypre.cpp:2182
int Size() const
Logical size of the array.
Definition: array.hpp:118
void SetPreconditioner(Solver &precond)
Sets the solver to perform preconditioning.
Definition: petsc.cpp:2463
void SetPrintLevel(int plev)
Definition: petsc.cpp:1822
Class for domain integration L(v) := (f, v)
Definition: lininteg.hpp:93
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
Definition: pfespace.cpp:737
Subclass constant coefficient.
Definition: coefficient.hpp:67
virtual void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B, int copy_interior=0)
Form the linear system A X = B, corresponding to this bilinear form and the linear form b(...
HYPRE_Int PetscInt
Definition: petsc.hpp:46
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:79
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:676
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:488
Abstract parallel finite element space.
Definition: pfespace.hpp:28
bool iterative_mode
If true, use the second argument of Mult() as an initial guess.
Definition: operator.hpp:283
void Randomize(int seed=0)
Set random values in the vector.
Definition: vector.cpp:690
void SetMonitor(PetscSolverMonitor *ctx)
Sets user-defined monitoring routine.
Definition: petsc.cpp:1899
void SetMaxIter(int max_iter)
Definition: petsc.cpp:1795
int main(int argc, char *argv[])
Definition: ex1.cpp:58
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:2197
double PetscReal
Definition: petsc.hpp:48
The BoomerAMG solver in hypre.
Definition: hypre.hpp:873
Class for parallel linear form.
Definition: plinearform.hpp:26
HYPRE_Int GetGlobalNumRows() const
Return the global number of rows.
Definition: hypre.hpp:417
int dim
Definition: ex3.cpp:48
Abstract class for monitoring PETSc&#39;s solvers.
Definition: petsc.hpp:812
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:7591
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:68
HYPRE_Int GlobalTrueVSize() const
Definition: pfespace.hpp:251
void Assemble(int skip_zeros=1)
Assemble the local matrix.
virtual void Print(std::ostream &out=mfem::out) const
Definition: pmesh.cpp:3863
int Dimension() const
Definition: mesh.hpp:713
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
void SetMaxIter(int max_iter)
Definition: hypre.cpp:2187
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator. Assumes ownership of lfi.
Definition: linearform.cpp:39
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:179
MPI_Comm GetComm() const
Definition: pmesh.hpp:223
PCG solver in hypre.
Definition: hypre.hpp:706
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:76
virtual void RecoverFEMSolution(const Vector &X, const Vector &b, Vector &x)
void MFEMFinalizePetsc()
Definition: petsc.cpp:185
virtual const char * Name() const
Definition: fe_coll.hpp:53
void MFEMInitializePetsc()
Convenience functions to initialize/finalize PETSc.
Definition: petsc.cpp:168
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
Definition: hypre.cpp:2202
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator. Assumes ownership of bfi.
void SetAbsTol(double tol)
Definition: petsc.cpp:1770
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
void SetTol(double tol)
Definition: petsc.cpp:1740
Class for parallel bilinear form.
Abstract class for hypre&#39;s solvers and preconditioners.
Definition: hypre.hpp:670
Vector data type.
Definition: vector.hpp:48
void GetNodes(Vector &node_coord) const
Definition: mesh.cpp:5837
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:83
Class for parallel grid function.
Definition: pgridfunc.hpp:32
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:187
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre&#39;s PCG.
Definition: hypre.cpp:2223
virtual void Mult(const Vector &b, Vector &x) const
Application of the solver.
Definition: petsc.cpp:2503
Class for parallel meshes.
Definition: pmesh.hpp:32
void EnableStaticCondensation()
bool Good() const
Definition: optparser.hpp:122