MFEM  v3.4
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex3p.cpp
Go to the documentation of this file.
1 // MFEM Example 3 - Parallel Version
2 // PETSc Modification
3 //
4 // Compile with: make ex3p
5 //
6 // Sample runs:
7 // mpirun -np 4 ex3p -m ../../data/klein-bottle.mesh -o 2 -f 0.1 --petscopts rc_ex3p
8 // mpirun -np 4 ex3p -m ../../data/klein-bottle.mesh -o 2 -f 0.1 --petscopts rc_ex3p_bddc --nonoverlapping
9 //
10 // Description: This example code solves a simple electromagnetic diffusion
11 // problem corresponding to the second order definite Maxwell
12 // equation curl curl E + E = f with boundary condition
13 // E x n = <given tangential field>. Here, we use a given exact
14 // solution E and compute the corresponding r.h.s. f.
15 // We discretize with Nedelec finite elements in 2D or 3D.
16 //
17 // The example demonstrates the use of H(curl) finite element
18 // spaces with the curl-curl and the (vector finite element) mass
19 // bilinear form, as well as the computation of discretization
20 // error when the exact solution is known. Static condensation is
21 // also illustrated.
22 //
23 // The example also show how to use the non-overlapping feature of
24 // the ParBilinearForm class to obtain the linear operator in
25 // a format suitable for the BDDC preconditioner in PETSc.
26 //
27 // We recommend viewing examples 1-2 before viewing this example.
28 
29 #include "mfem.hpp"
30 #include <fstream>
31 #include <iostream>
32 
33 #ifndef MFEM_USE_PETSC
34 #error This example requires that MFEM is built with MFEM_USE_PETSC=YES
35 #endif
36 
37 using namespace std;
38 using namespace mfem;
39 
40 // Exact solution, E, and r.h.s., f. See below for implementation.
41 void E_exact(const Vector &, Vector &);
42 void f_exact(const Vector &, Vector &);
43 double freq = 1.0, kappa;
44 int dim;
45 
46 int main(int argc, char *argv[])
47 {
48  // 1. Initialize MPI.
49  int num_procs, myid;
50  MPI_Init(&argc, &argv);
51  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
52  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
53 
54  // 2. Parse command-line options.
55  const char *mesh_file = "../../data/beam-tet.mesh";
56  int order = 1;
57  bool static_cond = false;
58  bool visualization = 1;
59  bool use_petsc = true;
60  const char *petscrc_file = "";
61  bool use_nonoverlapping = false;
62 
63  OptionsParser args(argc, argv);
64  args.AddOption(&mesh_file, "-m", "--mesh",
65  "Mesh file to use.");
66  args.AddOption(&order, "-o", "--order",
67  "Finite element order (polynomial degree).");
68  args.AddOption(&freq, "-f", "--frequency", "Set the frequency for the exact"
69  " solution.");
70  args.AddOption(&static_cond, "-sc", "--static-condensation", "-no-sc",
71  "--no-static-condensation", "Enable static condensation.");
72  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
73  "--no-visualization",
74  "Enable or disable GLVis visualization.");
75  args.AddOption(&use_petsc, "-usepetsc", "--usepetsc", "-no-petsc",
76  "--no-petsc",
77  "Use or not PETSc to solve the linear system.");
78  args.AddOption(&petscrc_file, "-petscopts", "--petscopts",
79  "PetscOptions file to use.");
80  args.AddOption(&use_nonoverlapping, "-nonoverlapping", "--nonoverlapping",
81  "-no-nonoverlapping", "--no-nonoverlapping",
82  "Use or not the block diagonal PETSc's matrix format "
83  "for non-overlapping domain decomposition.");
84  args.Parse();
85  if (!args.Good())
86  {
87  if (myid == 0)
88  {
89  args.PrintUsage(cout);
90  }
91  MPI_Finalize();
92  return 1;
93  }
94  if (myid == 0)
95  {
96  args.PrintOptions(cout);
97  }
98  // 2b. We initialize PETSc
99  if (use_petsc) { PetscInitialize(NULL,NULL,petscrc_file,NULL); }
100  kappa = freq * M_PI;
101 
102  // 3. Read the (serial) mesh from the given mesh file on all processors. We
103  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
104  // and volume meshes with the same code.
105  Mesh *mesh = new Mesh(mesh_file, 1, 1);
106  dim = mesh->Dimension();
107  int sdim = mesh->SpaceDimension();
108 
109  // 4. Refine the serial mesh on all processors to increase the resolution. In
110  // this example we do 'ref_levels' of uniform refinement. We choose
111  // 'ref_levels' to be the largest number that gives a final mesh with no
112  // more than 1,000 elements.
113  {
114  int ref_levels =
115  (int)floor(log(1000./mesh->GetNE())/log(2.)/dim);
116  for (int l = 0; l < ref_levels; l++)
117  {
118  mesh->UniformRefinement();
119  }
120  }
121 
122  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
123  // this mesh further in parallel to increase the resolution. Once the
124  // parallel mesh is defined, the serial mesh can be deleted. Tetrahedral
125  // meshes need to be reoriented before we can define high-order Nedelec
126  // spaces on them.
127  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
128  delete mesh;
129  {
130  int par_ref_levels = 2;
131  for (int l = 0; l < par_ref_levels; l++)
132  {
133  pmesh->UniformRefinement();
134  }
135  }
136  pmesh->ReorientTetMesh();
137 
138  // 6. Define a parallel finite element space on the parallel mesh. Here we
139  // use the Nedelec finite elements of the specified order.
140  FiniteElementCollection *fec = new ND_FECollection(order, dim);
141  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
142  HYPRE_Int size = fespace->GlobalTrueVSize();
143  if (myid == 0)
144  {
145  cout << "Number of finite element unknowns: " << size << endl;
146  }
147 
148  // 7. Determine the list of true (i.e. parallel conforming) essential
149  // boundary dofs. In this example, the boundary conditions are defined
150  // by marking all the boundary attributes from the mesh as essential
151  // (Dirichlet) and converting them to a list of true dofs.
152  Array<int> ess_tdof_list;
153  if (pmesh->bdr_attributes.Size())
154  {
155  Array<int> ess_bdr(pmesh->bdr_attributes.Max());
156  ess_bdr = 1;
157  fespace->GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
158  }
159 
160  // 8. Set up the parallel linear form b(.) which corresponds to the
161  // right-hand side of the FEM linear system, which in this case is
162  // (f,phi_i) where f is given by the function f_exact and phi_i are the
163  // basis functions in the finite element fespace.
165  ParLinearForm *b = new ParLinearForm(fespace);
167  b->Assemble();
168 
169  // 9. Define the solution vector x as a parallel finite element grid function
170  // corresponding to fespace. Initialize x by projecting the exact
171  // solution. Note that only values from the boundary edges will be used
172  // when eliminating the non-homogeneous boundary condition to modify the
173  // r.h.s. vector b.
174  ParGridFunction x(fespace);
176  x.ProjectCoefficient(E);
177 
178  // 10. Set up the parallel bilinear form corresponding to the EM diffusion
179  // operator curl muinv curl + sigma I, by adding the curl-curl and the
180  // mass domain integrators.
181  Coefficient *muinv = new ConstantCoefficient(1.0);
183  ParBilinearForm *a = new ParBilinearForm(fespace);
184  a->AddDomainIntegrator(new CurlCurlIntegrator(*muinv));
186 
187  // 11. Assemble the parallel bilinear form and the corresponding linear
188  // system, applying any necessary transformations such as: parallel
189  // assembly, eliminating boundary conditions, applying conforming
190  // constraints for non-conforming AMR, static condensation, etc.
191  if (static_cond) { a->EnableStaticCondensation(); }
192  a->Assemble();
193 
194  Vector B, X;
195  if (!use_petsc)
196  {
197  HypreParMatrix A;
198  a->FormLinearSystem(ess_tdof_list, x, *b, A, X, B);
199 
200  if (myid == 0)
201  {
202  cout << "Size of linear system: " << A.GetGlobalNumRows() << endl;
203  }
204 
205  // 12. Define and apply a parallel PCG solver for AX=B with the AMS
206  // preconditioner from hypre.
207  ParFiniteElementSpace *prec_fespace =
208  (a->StaticCondensationIsEnabled() ? a->SCParFESpace() : fespace);
209  HypreSolver *ams = new HypreAMS(A, prec_fespace);
210  HyprePCG *pcg = new HyprePCG(A);
211  pcg->SetTol(1e-10);
212  pcg->SetMaxIter(500);
213  pcg->SetPrintLevel(2);
214  pcg->SetPreconditioner(*ams);
215  pcg->Mult(B, X);
216  delete pcg;
217  delete ams;
218  }
219  else
220  {
221  PetscParMatrix A;
222  a->SetOperatorType(use_nonoverlapping ?
223  Operator::PETSC_MATIS : Operator::PETSC_MATAIJ);
224  a->FormLinearSystem(ess_tdof_list, x, *b, A, X, B);
225 
226  if (myid == 0)
227  {
228  cout << "Size of linear system: " << A.M() << endl;
229  }
230 
231  // 12. Define and apply a parallel PCG solver.
232  ParFiniteElementSpace *prec_fespace =
233  (a->StaticCondensationIsEnabled() ? a->SCParFESpace() : fespace);
234  PetscPCGSolver *pcg = new PetscPCGSolver(A);
235  PetscPreconditioner *prec = NULL;
236  pcg->SetTol(1e-10);
237  pcg->SetMaxIter(500);
238  pcg->SetPrintLevel(2);
239  if (use_nonoverlapping)
240  {
241  // Auxiliary class for BDDC customization
243  // Inform the solver about the finite element space
244  opts.SetSpace(prec_fespace);
245  // Inform the solver about essential dofs
246  opts.SetEssBdrDofs(&ess_tdof_list);
247  // Create a BDDC solver with parameters
248  prec = new PetscBDDCSolver(A,opts);
249  }
250  else
251  {
252  // Create an empty preconditioner object that can
253  // be customized at runtime
254  prec = new PetscPreconditioner(A,"solver_");
255  }
256  pcg->SetPreconditioner(*prec);
257  pcg->Mult(B, X);
258  delete pcg;
259  delete prec;
260  }
261 
262  // 13. Recover the parallel grid function corresponding to X. This is the
263  // local finite element solution on each processor.
264  a->RecoverFEMSolution(X, *b, x);
265 
266  // 14. Compute and print the L^2 norm of the error.
267  {
268  double err = x.ComputeL2Error(E);
269  if (myid == 0)
270  {
271  cout << "\n|| E_h - E ||_{L^2} = " << err << '\n' << endl;
272  }
273  }
274 
275  // 15. Save the refined mesh and the solution in parallel. This output can
276  // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
277  {
278  ostringstream mesh_name, sol_name;
279  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
280  sol_name << "sol." << setfill('0') << setw(6) << myid;
281 
282  ofstream mesh_ofs(mesh_name.str().c_str());
283  mesh_ofs.precision(8);
284  pmesh->Print(mesh_ofs);
285 
286  ofstream sol_ofs(sol_name.str().c_str());
287  sol_ofs.precision(8);
288  x.Save(sol_ofs);
289  }
290 
291  // 16. Send the solution by socket to a GLVis server.
292  if (visualization)
293  {
294  char vishost[] = "localhost";
295  int visport = 19916;
296  socketstream sol_sock(vishost, visport);
297  sol_sock << "parallel " << num_procs << " " << myid << "\n";
298  sol_sock.precision(8);
299  sol_sock << "solution\n" << *pmesh << x << flush;
300  }
301 
302  // 17. Free the used memory.
303  delete a;
304  delete sigma;
305  delete muinv;
306  delete b;
307  delete fespace;
308  delete fec;
309  delete pmesh;
310 
311  // We finalize PETSc
312  if (use_petsc) { PetscFinalize(); }
313 
314  MPI_Finalize();
315 
316  return 0;
317 }
318 
319 
320 void E_exact(const Vector &x, Vector &E)
321 {
322  if (dim == 3)
323  {
324  E(0) = sin(kappa * x(1));
325  E(1) = sin(kappa * x(2));
326  E(2) = sin(kappa * x(0));
327  }
328  else
329  {
330  E(0) = sin(kappa * x(1));
331  E(1) = sin(kappa * x(0));
332  if (x.Size() == 3) { E(2) = 0.0; }
333  }
334 }
335 
336 void f_exact(const Vector &x, Vector &f)
337 {
338  if (dim == 3)
339  {
340  f(0) = (1. + kappa * kappa) * sin(kappa * x(1));
341  f(1) = (1. + kappa * kappa) * sin(kappa * x(2));
342  f(2) = (1. + kappa * kappa) * sin(kappa * x(0));
343  }
344  else
345  {
346  f(0) = (1. + kappa * kappa) * sin(kappa * x(1));
347  f(1) = (1. + kappa * kappa) * sin(kappa * x(0));
348  if (x.Size() == 3) { f(2) = 0.0; }
349  }
350 }
void SetTol(double tol)
Definition: hypre.cpp:2170
int Size() const
Logical size of the array.
Definition: array.hpp:133
void SetPreconditioner(Solver &precond)
Sets the solver to perform preconditioning.
Definition: petsc.cpp:2003
void SetPrintLevel(int plev)
Definition: petsc.cpp:1407
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
Definition: pfespace.cpp:626
ParFiniteElementSpace * SCParFESpace() const
Return the parallel trace FE space associated with static condensation.
The Auxiliary-space Maxwell Solver in hypre.
Definition: hypre.hpp:859
Abstract class for PETSc&#39;s preconditioners.
Definition: petsc.hpp:523
Subclass constant coefficient.
Definition: coefficient.hpp:57
void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B, int copy_interior=0)
Wrapper for PETSc&#39;s matrix class.
Definition: petsc.hpp:133
virtual void ReorientTetMesh()
See the remarks for the serial version in mesh.hpp.
Definition: pmesh.cpp:2071
Integrator for (curl u, curl v) for Nedelec elements.
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:51
int Size() const
Returns the size of the vector.
Definition: vector.hpp:120
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:618
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:398
Abstract parallel finite element space.
Definition: pfespace.hpp:28
virtual void ProjectCoefficient(Coefficient &coeff)
Definition: pgridfunc.cpp:290
void SetMaxIter(int max_iter)
Definition: petsc.cpp:1380
int main(int argc, char *argv[])
Definition: ex1.cpp:45
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:2185
PetscInt M() const
Returns the global number of rows.
Definition: petsc.cpp:373
void SetOperatorType(Operator::Type tid)
Set the operator type id for the parallel matrix/operator.
Class for parallel linear form.
Definition: plinearform.hpp:26
HYPRE_Int GetGlobalNumRows() const
Definition: hypre.hpp:379
int dim
Definition: ex3.cpp:47
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:6741
Auxiliary class for BDDC customization.
Definition: petsc.hpp:545
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:109
HYPRE_Int GlobalTrueVSize() const
Definition: pfespace.hpp:247
void Assemble(int skip_zeros=1)
Assemble the local matrix.
virtual void Print(std::ostream &out=mfem::out) const
Definition: pmesh.cpp:3428
int Dimension() const
Definition: mesh.hpp:645
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
void SetMaxIter(int max_iter)
Definition: hypre.cpp:2175
int SpaceDimension() const
Definition: mesh.hpp:646
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator.
Definition: linearform.cpp:19
bool StaticCondensationIsEnabled() const
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:174
PCG solver in hypre.
Definition: hypre.hpp:656
OutStream err(std::cerr)
Global stream used by the library for standard error output. Initially it uses the same std::streambu...
Definition: globals.hpp:69
Base class Coefficient that may optionally depend on time.
Definition: coefficient.hpp:31
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:74
void f_exact(const Vector &, Vector &)
Definition: ex3.cpp:227
virtual void RecoverFEMSolution(const Vector &X, const Vector &b, Vector &x)
virtual double ComputeL2Error(Coefficient *exsol[], const IntegrationRule *irs[]=NULL) const
Definition: pgridfunc.hpp:215
void SetSpace(ParFiniteElementSpace *fe)
Definition: petsc.hpp:559
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
Definition: hypre.cpp:2190
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator.
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
void SetTol(double tol)
Definition: petsc.cpp:1325
void E_exact(const Vector &, Vector &)
Definition: ex3.cpp:211
Class for parallel bilinear form.
Abstract class for hypre&#39;s solvers and preconditioners.
Definition: hypre.hpp:620
for VectorFiniteElements (Nedelec, Raviart-Thomas)
Definition: lininteg.hpp:231
Arbitrary order H(curl)-conforming Nedelec finite elements.
Definition: fe_coll.hpp:230
double kappa
Definition: ex3.cpp:46
Vector data type.
Definition: vector.hpp:48
Class for parallel grid function.
Definition: pgridfunc.hpp:32
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:175
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre&#39;s PCG.
Definition: hypre.cpp:2211
virtual void Mult(const Vector &b, Vector &x) const
Application of the solver.
Definition: petsc.cpp:2043
double freq
Definition: ex3.cpp:46
Class for parallel meshes.
Definition: pmesh.hpp:32
void SetEssBdrDofs(const Array< int > *essdofs, bool loc=false)
Specify dofs on the essential boundary.
Definition: petsc.hpp:564
Integrator for (Q u, v) for VectorFiniteElements.
void EnableStaticCondensation()
double sigma(const Vector &x)
Definition: maxwell.cpp:102
bool Good() const
Definition: optparser.hpp:120