MFEM  v4.1.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex3p.cpp
Go to the documentation of this file.
1 // MFEM Example 3 - Parallel Version
2 //
3 // Compile with: make ex3p
4 //
5 // Sample runs: mpirun -np 4 ex3p -m ../data/star.mesh
6 // mpirun -np 4 ex3p -m ../data/square-disc.mesh -o 2
7 // mpirun -np 4 ex3p -m ../data/beam-tet.mesh
8 // mpirun -np 4 ex3p -m ../data/beam-hex.mesh
9 // mpirun -np 4 ex3p -m ../data/beam-hex.mesh -o 2 -pa
10 // mpirun -np 4 ex3p -m ../data/escher.mesh
11 // mpirun -np 4 ex3p -m ../data/escher.mesh -o 2
12 // mpirun -np 4 ex3p -m ../data/fichera.mesh
13 // mpirun -np 4 ex3p -m ../data/fichera-q2.vtk
14 // mpirun -np 4 ex3p -m ../data/fichera-q3.mesh
15 // mpirun -np 4 ex3p -m ../data/square-disc-nurbs.mesh
16 // mpirun -np 4 ex3p -m ../data/beam-hex-nurbs.mesh
17 // mpirun -np 4 ex3p -m ../data/amr-quad.mesh -o 2
18 // mpirun -np 4 ex3p -m ../data/amr-hex.mesh
19 // mpirun -np 4 ex3p -m ../data/star-surf.mesh -o 2
20 // mpirun -np 4 ex3p -m ../data/mobius-strip.mesh -o 2 -f 0.1
21 // mpirun -np 4 ex3p -m ../data/klein-bottle.mesh -o 2 -f 0.1
22 //
23 // Device sample runs:
24 // mpirun -np 4 ex3p -m ../data/star.mesh -pa -d cuda
25 // mpirun -np 4 ex3p -m ../data/star.mesh -pa -d raja-cuda
26 // mpirun -np 4 ex3p -m ../data/star.mesh -pa -d raja-omp
27 // mpirun -np 4 ex3p -m ../data/beam-hex.mesh -pa -d cuda
28 //
29 // Description: This example code solves a simple electromagnetic diffusion
30 // problem corresponding to the second order definite Maxwell
31 // equation curl curl E + E = f with boundary condition
32 // E x n = <given tangential field>. Here, we use a given exact
33 // solution E and compute the corresponding r.h.s. f.
34 // We discretize with Nedelec finite elements in 2D or 3D.
35 //
36 // The example demonstrates the use of H(curl) finite element
37 // spaces with the curl-curl and the (vector finite element) mass
38 // bilinear form, as well as the computation of discretization
39 // error when the exact solution is known. Static condensation is
40 // also illustrated.
41 //
42 // We recommend viewing examples 1-2 before viewing this example.
43 
44 #include "mfem.hpp"
45 #include <fstream>
46 #include <iostream>
47 
48 using namespace std;
49 using namespace mfem;
50 
51 // Exact solution, E, and r.h.s., f. See below for implementation.
52 void E_exact(const Vector &, Vector &);
53 void f_exact(const Vector &, Vector &);
54 double freq = 1.0, kappa;
55 int dim;
56 
57 int main(int argc, char *argv[])
58 {
59  // 1. Initialize MPI.
60  int num_procs, myid;
61  MPI_Init(&argc, &argv);
62  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
63  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
64 
65  // 2. Parse command-line options.
66  const char *mesh_file = "../data/beam-tet.mesh";
67  int order = 1;
68  bool static_cond = false;
69  bool pa = false;
70  const char *device_config = "cpu";
71  bool visualization = 1;
72 
73  OptionsParser args(argc, argv);
74  args.AddOption(&mesh_file, "-m", "--mesh",
75  "Mesh file to use.");
76  args.AddOption(&order, "-o", "--order",
77  "Finite element order (polynomial degree).");
78  args.AddOption(&freq, "-f", "--frequency", "Set the frequency for the exact"
79  " solution.");
80  args.AddOption(&static_cond, "-sc", "--static-condensation", "-no-sc",
81  "--no-static-condensation", "Enable static condensation.");
82  args.AddOption(&pa, "-pa", "--partial-assembly", "-no-pa",
83  "--no-partial-assembly", "Enable Partial Assembly.");
84  args.AddOption(&device_config, "-d", "--device",
85  "Device configuration string, see Device::Configure().");
86  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
87  "--no-visualization",
88  "Enable or disable GLVis visualization.");
89 
90  args.Parse();
91  if (!args.Good())
92  {
93  if (myid == 0)
94  {
95  args.PrintUsage(cout);
96  }
97  MPI_Finalize();
98  return 1;
99  }
100  if (myid == 0)
101  {
102  args.PrintOptions(cout);
103  }
104  kappa = freq * M_PI;
105 
106  // 3. Enable hardware devices such as GPUs, and programming models such as
107  // CUDA, OCCA, RAJA and OpenMP based on command line options.
108  Device device(device_config);
109  if (myid == 0) { device.Print(); }
110 
111  // 4. Read the (serial) mesh from the given mesh file on all processors. We
112  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
113  // and volume meshes with the same code.
114  Mesh *mesh = new Mesh(mesh_file, 1, 1);
115  dim = mesh->Dimension();
116  int sdim = mesh->SpaceDimension();
117 
118  // 5. Refine the serial mesh on all processors to increase the resolution. In
119  // this example we do 'ref_levels' of uniform refinement. We choose
120  // 'ref_levels' to be the largest number that gives a final mesh with no
121  // more than 1,000 elements.
122  {
123  int ref_levels = (int)floor(log(1000./mesh->GetNE())/log(2.)/dim);
124  for (int l = 0; l < ref_levels; l++)
125  {
126  mesh->UniformRefinement();
127  }
128  }
129 
130  // 6. Define a parallel mesh by a partitioning of the serial mesh. Refine
131  // this mesh further in parallel to increase the resolution. Once the
132  // parallel mesh is defined, the serial mesh can be deleted. Tetrahedral
133  // meshes need to be reoriented before we can define high-order Nedelec
134  // spaces on them.
135  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
136  delete mesh;
137  {
138  int par_ref_levels = 2;
139  for (int l = 0; l < par_ref_levels; l++)
140  {
141  pmesh->UniformRefinement();
142  }
143  }
144  pmesh->ReorientTetMesh();
145 
146  // 7. Define a parallel finite element space on the parallel mesh. Here we
147  // use the Nedelec finite elements of the specified order.
148  FiniteElementCollection *fec = new ND_FECollection(order, dim);
149  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
150  HYPRE_Int size = fespace->GlobalTrueVSize();
151  if (myid == 0)
152  {
153  cout << "Number of finite element unknowns: " << size << endl;
154  }
155 
156  // 8. Determine the list of true (i.e. parallel conforming) essential
157  // boundary dofs. In this example, the boundary conditions are defined
158  // by marking all the boundary attributes from the mesh as essential
159  // (Dirichlet) and converting them to a list of true dofs.
160  Array<int> ess_tdof_list;
161  if (pmesh->bdr_attributes.Size())
162  {
163  Array<int> ess_bdr(pmesh->bdr_attributes.Max());
164  ess_bdr = 1;
165  fespace->GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
166  }
167 
168  // 9. Set up the parallel linear form b(.) which corresponds to the
169  // right-hand side of the FEM linear system, which in this case is
170  // (f,phi_i) where f is given by the function f_exact and phi_i are the
171  // basis functions in the finite element fespace.
173  ParLinearForm *b = new ParLinearForm(fespace);
175  b->Assemble();
176 
177  // 10. Define the solution vector x as a parallel finite element grid function
178  // corresponding to fespace. Initialize x by projecting the exact
179  // solution. Note that only values from the boundary edges will be used
180  // when eliminating the non-homogeneous boundary condition to modify the
181  // r.h.s. vector b.
182  ParGridFunction x(fespace);
184  x.ProjectCoefficient(E);
185 
186  // 11. Set up the parallel bilinear form corresponding to the EM diffusion
187  // operator curl muinv curl + sigma I, by adding the curl-curl and the
188  // mass domain integrators.
189  Coefficient *muinv = new ConstantCoefficient(1.0);
191  ParBilinearForm *a = new ParBilinearForm(fespace);
192  if (pa) { a->SetAssemblyLevel(AssemblyLevel::PARTIAL); }
193  a->AddDomainIntegrator(new CurlCurlIntegrator(*muinv));
195 
196  // 12. Assemble the parallel bilinear form and the corresponding linear
197  // system, applying any necessary transformations such as: parallel
198  // assembly, eliminating boundary conditions, applying conforming
199  // constraints for non-conforming AMR, static condensation, etc.
200  if (static_cond) { a->EnableStaticCondensation(); }
201  a->Assemble();
202 
203  OperatorPtr A;
204  Vector B, X;
205  a->FormLinearSystem(ess_tdof_list, x, *b, A, X, B);
206 
207  // 13. Solve the system AX=B using PCG with the AMS preconditioner from hypre
208  // (in the full assembly case) or CG with Jacobi preconditioner (in the
209  // partial assembly case).
210 
211  if (pa) // Jacobi preconditioning in partial assembly mode
212  {
213  OperatorJacobiSmoother Jacobi(*a, ess_tdof_list);
214 
215  CGSolver cg(MPI_COMM_WORLD);
216  cg.SetRelTol(1e-12);
217  cg.SetMaxIter(1000);
218  cg.SetPrintLevel(1);
219  cg.SetOperator(*A);
220  cg.SetPreconditioner(Jacobi);
221  cg.Mult(B, X);
222  }
223  else
224  {
225  if (myid == 0)
226  {
227  cout << "Size of linear system: "
228  << A.As<HypreParMatrix>()->GetGlobalNumRows() << endl;
229  }
230 
231  ParFiniteElementSpace *prec_fespace =
232  (a->StaticCondensationIsEnabled() ? a->SCParFESpace() : fespace);
233  HypreAMS ams(*A.As<HypreParMatrix>(), prec_fespace);
234  HyprePCG pcg(*A.As<HypreParMatrix>());
235  pcg.SetTol(1e-12);
236  pcg.SetMaxIter(500);
237  pcg.SetPrintLevel(2);
238  pcg.SetPreconditioner(ams);
239  pcg.Mult(B, X);
240  }
241 
242  // 14. Recover the parallel grid function corresponding to X. This is the
243  // local finite element solution on each processor.
244  a->RecoverFEMSolution(X, *b, x);
245 
246  // 15. Compute and print the L^2 norm of the error.
247  {
248  double err = x.ComputeL2Error(E);
249  if (myid == 0)
250  {
251  cout << "\n|| E_h - E ||_{L^2} = " << err << '\n' << endl;
252  }
253  }
254 
255  // 16. Save the refined mesh and the solution in parallel. This output can
256  // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
257  {
258  ostringstream mesh_name, sol_name;
259  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
260  sol_name << "sol." << setfill('0') << setw(6) << myid;
261 
262  ofstream mesh_ofs(mesh_name.str().c_str());
263  mesh_ofs.precision(8);
264  pmesh->Print(mesh_ofs);
265 
266  ofstream sol_ofs(sol_name.str().c_str());
267  sol_ofs.precision(8);
268  x.Save(sol_ofs);
269  }
270 
271  // 17. Send the solution by socket to a GLVis server.
272  if (visualization)
273  {
274  char vishost[] = "localhost";
275  int visport = 19916;
276  socketstream sol_sock(vishost, visport);
277  sol_sock << "parallel " << num_procs << " " << myid << "\n";
278  sol_sock.precision(8);
279  sol_sock << "solution\n" << *pmesh << x << flush;
280  }
281 
282  // 18. Free the used memory.
283  delete a;
284  delete sigma;
285  delete muinv;
286  delete b;
287  delete fespace;
288  delete fec;
289  delete pmesh;
290 
291  MPI_Finalize();
292 
293  return 0;
294 }
295 
296 
297 void E_exact(const Vector &x, Vector &E)
298 {
299  if (dim == 3)
300  {
301  E(0) = sin(kappa * x(1));
302  E(1) = sin(kappa * x(2));
303  E(2) = sin(kappa * x(0));
304  }
305  else
306  {
307  E(0) = sin(kappa * x(1));
308  E(1) = sin(kappa * x(0));
309  if (x.Size() == 3) { E(2) = 0.0; }
310  }
311 }
312 
313 void f_exact(const Vector &x, Vector &f)
314 {
315  if (dim == 3)
316  {
317  f(0) = (1. + kappa * kappa) * sin(kappa * x(1));
318  f(1) = (1. + kappa * kappa) * sin(kappa * x(2));
319  f(2) = (1. + kappa * kappa) * sin(kappa * x(0));
320  }
321  else
322  {
323  f(0) = (1. + kappa * kappa) * sin(kappa * x(1));
324  f(1) = (1. + kappa * kappa) * sin(kappa * x(0));
325  if (x.Size() == 3) { f(2) = 0.0; }
326  }
327 }
void SetTol(double tol)
Definition: hypre.cpp:2305
int Size() const
Logical size of the array.
Definition: array.hpp:124
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
Definition: pfespace.cpp:770
Conjugate gradient method.
Definition: solvers.hpp:152
ParFiniteElementSpace * SCParFESpace() const
Return the parallel trace FE space associated with static condensation.
OpType * As() const
Return the Operator pointer statically cast to a specified OpType. Similar to the method Get()...
Definition: handle.hpp:96
The Auxiliary-space Maxwell Solver in hypre.
Definition: hypre.hpp:1014
Subclass constant coefficient.
Definition: coefficient.hpp:67
virtual void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B, int copy_interior=0)
Form the linear system A X = B, corresponding to this bilinear form and the linear form b(...
virtual void Mult(const Vector &b, Vector &x) const
Operator application: y=A(x).
Definition: solvers.cpp:358
virtual void ReorientTetMesh()
See the remarks for the serial version in mesh.hpp.
Definition: pmesh.cpp:2508
Integrator for (curl u, curl v) for Nedelec elements.
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:79
Pointer to an Operator of a specified type.
Definition: handle.hpp:33
int Size() const
Returns the size of the vector.
Definition: vector.hpp:157
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:693
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:485
void Print(std::ostream &out=mfem::out)
Print the configuration of the MFEM virtual device object.
Definition: device.cpp:236
void SetAssemblyLevel(AssemblyLevel assembly_level)
Set the desired assembly level. The default is AssemblyLevel::FULL.
Abstract parallel finite element space.
Definition: pfespace.hpp:28
virtual void ProjectCoefficient(Coefficient &coeff)
Definition: pgridfunc.cpp:301
int main(int argc, char *argv[])
Definition: ex1.cpp:62
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:2320
Class for parallel linear form.
Definition: plinearform.hpp:26
void SetPrintLevel(int print_lvl)
Definition: solvers.cpp:70
Jacobi smoothing for a given bilinear form (no matrix necessary).
Definition: solvers.hpp:84
double b
Definition: lissajous.cpp:42
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:7982
void SetMaxIter(int max_it)
Definition: solvers.hpp:65
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:68
HYPRE_Int GlobalTrueVSize() const
Definition: pfespace.hpp:251
void Assemble(int skip_zeros=1)
Assemble the local matrix.
virtual void Print(std::ostream &out=mfem::out) const
Definition: pmesh.cpp:4102
int Dimension() const
Definition: mesh.hpp:744
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
void SetMaxIter(int max_iter)
Definition: hypre.cpp:2310
int SpaceDimension() const
Definition: mesh.hpp:745
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator. Assumes ownership of lfi.
Definition: linearform.cpp:39
bool StaticCondensationIsEnabled() const
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:189
void SetRelTol(double rtol)
Definition: solvers.hpp:63
PCG solver in hypre.
Definition: hypre.hpp:743
OutStream err(std::cerr)
Global stream used by the library for standard error output. Initially it uses the same std::streambu...
Definition: globals.hpp:71
Base class Coefficient that may optionally depend on time.
Definition: coefficient.hpp:31
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:76
void f_exact(const Vector &, Vector &)
Definition: ex3.cpp:257
virtual void RecoverFEMSolution(const Vector &X, const Vector &b, Vector &x)
double a
Definition: lissajous.cpp:41
virtual double ComputeL2Error(Coefficient *exsol[], const IntegrationRule *irs[]=NULL) const
Definition: pgridfunc.hpp:250
int dim
Definition: ex24.cpp:43
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
Definition: hypre.cpp:2325
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator. Assumes ownership of bfi.
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
void E_exact(const Vector &, Vector &)
Definition: ex3.cpp:241
Class for parallel bilinear form.
for VectorFiniteElements (Nedelec, Raviart-Thomas)
Definition: lininteg.hpp:231
virtual void SetOperator(const Operator &op)
Also calls SetOperator for the preconditioner if there is one.
Definition: solvers.hpp:166
Arbitrary order H(curl)-conforming Nedelec finite elements.
Definition: fe_coll.hpp:251
double kappa
Definition: ex3.cpp:54
Vector data type.
Definition: vector.hpp:48
virtual void SetPreconditioner(Solver &pr)
This should be called before SetOperator.
Definition: solvers.cpp:91
Class for parallel grid function.
Definition: pgridfunc.hpp:32
The MFEM Device class abstracts hardware devices such as GPUs, as well as programming models such as ...
Definition: device.hpp:114
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:187
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre&#39;s PCG.
Definition: hypre.cpp:2348
double freq
Definition: ex3.cpp:54
Class for parallel meshes.
Definition: pmesh.hpp:32
void EnableStaticCondensation()
double sigma(const Vector &x)
Definition: maxwell.cpp:102
bool Good() const
Definition: optparser.hpp:125