MFEM  v3.1
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex3p.cpp
Go to the documentation of this file.
1 // MFEM Example 3 - Parallel Version
2 //
3 // Compile with: make ex3p
4 //
5 // Sample runs: mpirun -np 4 ex3p -m ../data/star.mesh
6 // mpirun -np 4 ex3p -m ../data/square-disc.mesh -o 2
7 // mpirun -np 4 ex3p -m ../data/beam-tet.mesh
8 // mpirun -np 4 ex3p -m ../data/beam-hex.mesh
9 // mpirun -np 4 ex3p -m ../data/escher.mesh
10 // mpirun -np 4 ex3p -m ../data/fichera.mesh
11 // mpirun -np 4 ex3p -m ../data/fichera-q2.vtk
12 // mpirun -np 4 ex3p -m ../data/fichera-q3.mesh
13 // mpirun -np 4 ex3p -m ../data/square-disc-nurbs.mesh
14 // mpirun -np 4 ex3p -m ../data/beam-hex-nurbs.mesh
15 // mpirun -np 4 ex3p -m ../data/amr-quad.mesh -o 2
16 // mpirun -np 4 ex3p -m ../data/amr-hex.mesh
17 // mpirun -np 4 ex3p -m ../data/star-surf.mesh -o 2
18 // mpirun -np 4 ex3p -m ../data/mobius-strip.mesh -o 2 -f 0.1
19 // mpirun -np 4 ex3p -m ../data/klein-bottle.mesh -o 2 -f 0.1
20 //
21 // Description: This example code solves a simple electromagnetic diffusion
22 // problem corresponding to the second order definite Maxwell
23 // equation curl curl E + E = f with boundary condition
24 // E x n = <given tangential field>. Here, we use a given exact
25 // solution E and compute the corresponding r.h.s. f.
26 // We discretize with Nedelec finite elements in 2D or 3D.
27 //
28 // The example demonstrates the use of H(curl) finite element
29 // spaces with the curl-curl and the (vector finite element) mass
30 // bilinear form, as well as the computation of discretization
31 // error when the exact solution is known. Static condensation is
32 // also illustrated.
33 //
34 // We recommend viewing examples 1-2 before viewing this example.
35 
36 #include "mfem.hpp"
37 #include <fstream>
38 #include <iostream>
39 
40 using namespace std;
41 using namespace mfem;
42 
43 // Exact solution, E, and r.h.s., f. See below for implementation.
44 void E_exact(const Vector &, Vector &);
45 void f_exact(const Vector &, Vector &);
46 double freq = 1.0, kappa;
47 int dim;
48 
49 int main(int argc, char *argv[])
50 {
51  // 1. Initialize MPI.
52  int num_procs, myid;
53  MPI_Init(&argc, &argv);
54  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
55  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
56 
57  // 2. Parse command-line options.
58  const char *mesh_file = "../data/beam-tet.mesh";
59  int order = 1;
60  bool static_cond = false;
61  bool visualization = 1;
62 
63  OptionsParser args(argc, argv);
64  args.AddOption(&mesh_file, "-m", "--mesh",
65  "Mesh file to use.");
66  args.AddOption(&order, "-o", "--order",
67  "Finite element order (polynomial degree).");
68  args.AddOption(&freq, "-f", "--frequency", "Set the frequency for the exact"
69  " solution.");
70  args.AddOption(&static_cond, "-sc", "--static-condensation", "-no-sc",
71  "--no-static-condensation", "Enable static condensation.");
72  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
73  "--no-visualization",
74  "Enable or disable GLVis visualization.");
75  args.Parse();
76  if (!args.Good())
77  {
78  if (myid == 0)
79  {
80  args.PrintUsage(cout);
81  }
82  MPI_Finalize();
83  return 1;
84  }
85  if (myid == 0)
86  {
87  args.PrintOptions(cout);
88  }
89  kappa = freq * M_PI;
90 
91  // 3. Read the (serial) mesh from the given mesh file on all processors. We
92  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
93  // and volume meshes with the same code.
94  Mesh *mesh;
95  ifstream imesh(mesh_file);
96  if (!imesh)
97  {
98  if (myid == 0)
99  {
100  cerr << "\nCan not open mesh file: " << mesh_file << '\n' << endl;
101  }
102  MPI_Finalize();
103  return 2;
104  }
105  mesh = new Mesh(imesh, 1, 1);
106  imesh.close();
107  dim = mesh->Dimension();
108  int sdim = mesh->SpaceDimension();
109 
110  // 4. Refine the serial mesh on all processors to increase the resolution. In
111  // this example we do 'ref_levels' of uniform refinement. We choose
112  // 'ref_levels' to be the largest number that gives a final mesh with no
113  // more than 1,000 elements.
114  {
115  int ref_levels =
116  (int)floor(log(1000./mesh->GetNE())/log(2.)/dim);
117  for (int l = 0; l < ref_levels; l++)
118  {
119  mesh->UniformRefinement();
120  }
121  }
122 
123  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
124  // this mesh further in parallel to increase the resolution. Once the
125  // parallel mesh is defined, the serial mesh can be deleted. Tetrahedral
126  // meshes need to be reoriented before we can define high-order Nedelec
127  // spaces on them.
128  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
129  delete mesh;
130  {
131  int par_ref_levels = 2;
132  for (int l = 0; l < par_ref_levels; l++)
133  {
134  pmesh->UniformRefinement();
135  }
136  }
137  pmesh->ReorientTetMesh();
138 
139  // 6. Define a parallel finite element space on the parallel mesh. Here we
140  // use the Nedelec finite elements of the specified order.
141  FiniteElementCollection *fec = new ND_FECollection(order, dim);
142  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
143  HYPRE_Int size = fespace->GlobalTrueVSize();
144  if (myid == 0)
145  {
146  cout << "Number of finite element unknowns: " << size << endl;
147  }
148 
149  // 7. Determine the list of true (i.e. parallel conforming) essential
150  // boundary dofs. In this example, the boundary conditions are defined
151  // by marking all the boundary attributes from the mesh as essential
152  // (Dirichlet) and converting them to a list of true dofs.
153  Array<int> ess_tdof_list;
154  if (pmesh->bdr_attributes.Size())
155  {
156  Array<int> ess_bdr(pmesh->bdr_attributes.Max());
157  ess_bdr = 1;
158  fespace->GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
159  }
160 
161  // 8. Set up the parallel linear form b(.) which corresponds to the
162  // right-hand side of the FEM linear system, which in this case is
163  // (f,phi_i) where f is given by the function f_exact and phi_i are the
164  // basis functions in the finite element fespace.
166  ParLinearForm *b = new ParLinearForm(fespace);
168  b->Assemble();
169 
170  // 9. Define the solution vector x as a parallel finite element grid function
171  // corresponding to fespace. Initialize x by projecting the exact
172  // solution. Note that only values from the boundary edges will be used
173  // when eliminating the non-homogeneous boundary condition to modify the
174  // r.h.s. vector b.
175  ParGridFunction x(fespace);
177  x.ProjectCoefficient(E);
178 
179  // 10. Set up the parallel bilinear form corresponding to the EM diffusion
180  // operator curl muinv curl + sigma I, by adding the curl-curl and the
181  // mass domain integrators.
182  Coefficient *muinv = new ConstantCoefficient(1.0);
183  Coefficient *sigma = new ConstantCoefficient(1.0);
184  ParBilinearForm *a = new ParBilinearForm(fespace);
185  a->AddDomainIntegrator(new CurlCurlIntegrator(*muinv));
187 
188  // 11. Assemble the parallel bilinear form and the corresponding linear
189  // system, applying any necessary transformations such as: parallel
190  // assembly, eliminating boundary conditions, applying conforming
191  // constraints for non-conforming AMR, static condensation, etc.
192  if (static_cond) { a->EnableStaticCondensation(); }
193  a->Assemble();
194 
195  HypreParMatrix A;
196  Vector B, X;
197  a->FormLinearSystem(ess_tdof_list, x, *b, A, X, B);
198 
199  if (myid == 0)
200  {
201  cout << "Size of linear system: " << A.GetGlobalNumRows() << endl;
202  }
203 
204  // 12. Define and apply a parallel PCG solver for AX=B with the AMS
205  // preconditioner from hypre.
206  ParFiniteElementSpace *prec_fespace =
207  (a->StaticCondensationIsEnabled() ? a->SCParFESpace() : fespace);
208  HypreSolver *ams = new HypreAMS(A, prec_fespace);
209  HyprePCG *pcg = new HyprePCG(A);
210  pcg->SetTol(1e-12);
211  pcg->SetMaxIter(500);
212  pcg->SetPrintLevel(2);
213  pcg->SetPreconditioner(*ams);
214  pcg->Mult(B, X);
215 
216  // 13. Recover the parallel grid function corresponding to X. This is the
217  // local finite element solution on each processor.
218  a->RecoverFEMSolution(X, *b, x);
219 
220  // 14. Compute and print the L^2 norm of the error.
221  {
222  double err = x.ComputeL2Error(E);
223  if (myid == 0)
224  {
225  cout << "\n|| E_h - E ||_{L^2} = " << err << '\n' << endl;
226  }
227  }
228 
229  // 15. Save the refined mesh and the solution in parallel. This output can
230  // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
231  {
232  ostringstream mesh_name, sol_name;
233  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
234  sol_name << "sol." << setfill('0') << setw(6) << myid;
235 
236  ofstream mesh_ofs(mesh_name.str().c_str());
237  mesh_ofs.precision(8);
238  pmesh->Print(mesh_ofs);
239 
240  ofstream sol_ofs(sol_name.str().c_str());
241  sol_ofs.precision(8);
242  x.Save(sol_ofs);
243  }
244 
245  // 16. Send the solution by socket to a GLVis server.
246  if (visualization)
247  {
248  char vishost[] = "localhost";
249  int visport = 19916;
250  socketstream sol_sock(vishost, visport);
251  sol_sock << "parallel " << num_procs << " " << myid << "\n";
252  sol_sock.precision(8);
253  sol_sock << "solution\n" << *pmesh << x << flush;
254  }
255 
256  // 17. Free the used memory.
257  delete pcg;
258  delete ams;
259  delete a;
260  delete sigma;
261  delete muinv;
262  delete b;
263  delete fespace;
264  delete fec;
265  delete pmesh;
266 
267  MPI_Finalize();
268 
269  return 0;
270 }
271 
272 
273 void E_exact(const Vector &x, Vector &E)
274 {
275  if (dim == 3)
276  {
277  E(0) = sin(kappa * x(1));
278  E(1) = sin(kappa * x(2));
279  E(2) = sin(kappa * x(0));
280  }
281  else
282  {
283  E(0) = sin(kappa * x(1));
284  E(1) = sin(kappa * x(0));
285  if (x.Size() == 3) { E(2) = 0.0; }
286  }
287 }
288 
289 void f_exact(const Vector &x, Vector &f)
290 {
291  if (dim == 3)
292  {
293  f(0) = (1. + kappa * kappa) * sin(kappa * x(1));
294  f(1) = (1. + kappa * kappa) * sin(kappa * x(2));
295  f(2) = (1. + kappa * kappa) * sin(kappa * x(0));
296  }
297  else
298  {
299  f(0) = (1. + kappa * kappa) * sin(kappa * x(1));
300  f(1) = (1. + kappa * kappa) * sin(kappa * x(0));
301  if (x.Size() == 3) { f(2) = 0.0; }
302  }
303 }
void SetTol(double tol)
Definition: hypre.cpp:1917
int Size() const
Logical size of the array.
Definition: array.hpp:109
ParFiniteElementSpace * SCParFESpace() const
Return the parallel trace FE space associated with static condensation.
The Auxiliary-space Maxwell Solver in hypre.
Definition: hypre.hpp:761
double ComputeL2Error(Coefficient *exsol[], const IntegrationRule *irs[]=NULL) const
Definition: pgridfunc.hpp:148
Subclass constant coefficient.
Definition: coefficient.hpp:57
virtual void ReorientTetMesh()
See the remarks for the serial version in mesh.hpp.
Definition: pmesh.cpp:1432
Integrator for (curl u, curl v) for Nedelec elements.
Definition: bilininteg.hpp:401
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:34
int Size() const
Returns the size of the vector.
Definition: vector.hpp:84
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:454
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:306
Abstract parallel finite element space.
Definition: pfespace.hpp:28
void ProjectCoefficient(Coefficient &coeff)
Definition: pgridfunc.cpp:241
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:1932
Class for parallel linear form.
Definition: plinearform.hpp:26
HYPRE_Int GetGlobalNumRows() const
Definition: hypre.hpp:333
int dim
Definition: ex3.cpp:48
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:7316
T Max() const
Definition: array.cpp:90
void Assemble(int skip_zeros=1)
Assemble the local matrix.
int Dimension() const
Definition: mesh.hpp:475
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
void SetMaxIter(int max_iter)
Definition: hypre.cpp:1922
int SpaceDimension() const
Definition: mesh.hpp:476
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator.
Definition: linearform.cpp:19
bool StaticCondensationIsEnabled() const
Array< int > bdr_attributes
Definition: mesh.hpp:140
int main(int argc, char *argv[])
Definition: ex1.cpp:45
PCG solver in hypre.
Definition: hypre.hpp:558
Base class Coefficient that may optionally depend on time.
Definition: coefficient.hpp:31
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list)
Definition: pfespace.cpp:545
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:74
void f_exact(const Vector &, Vector &)
Definition: ex3.cpp:236
void RecoverFEMSolution(const Vector &X, const Vector &b, Vector &x)
void FormLinearSystem(Array< int > &ess_tdof_list, Vector &x, Vector &b, HypreParMatrix &A, Vector &X, Vector &B, int copy_interior=0)
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
Definition: hypre.cpp:1937
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator.
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
void E_exact(const Vector &, Vector &)
Definition: ex3.cpp:220
Class for parallel bilinear form.
Abstract class for hypre&#39;s solvers and preconditioners.
Definition: hypre.hpp:522
for VectorFiniteElements (Nedelec, Raviart-Thomas)
Definition: lininteg.hpp:171
Arbitrary order H(curl)-conforming Nedelec finite elements.
Definition: fe_coll.hpp:185
double kappa
Definition: ex3.cpp:47
Vector data type.
Definition: vector.hpp:33
Class for parallel grid function.
Definition: pgridfunc.hpp:31
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:143
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre&#39;s PCG.
Definition: hypre.cpp:1958
double freq
Definition: ex3.cpp:47
Class for parallel meshes.
Definition: pmesh.hpp:28
Integrator for (Q u, v) for VectorFiniteElements.
Definition: bilininteg.hpp:458
void EnableStaticCondensation()
virtual void Print(std::ostream &out=std::cout) const
Definition: pmesh.cpp:2850
bool Good() const
Definition: optparser.hpp:120