MFEM  v3.1
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex11p.cpp
Go to the documentation of this file.
1 // MFEM Example 11 - Parallel Version
2 //
3 // Compile with: make ex11p
4 //
5 // Sample runs: mpirun -np 4 ex11p -m ../data/square-disc.mesh
6 // mpirun -np 4 ex11p -m ../data/star.mesh
7 // mpirun -np 4 ex11p -m ../data/escher.mesh
8 // mpirun -np 4 ex11p -m ../data/fichera.mesh
9 // mpirun -np 4 ex11p -m ../data/square-disc-p2.vtk -o 2
10 // mpirun -np 4 ex11p -m ../data/square-disc-p3.mesh -o 3
11 // mpirun -np 4 ex11p -m ../data/square-disc-nurbs.mesh -o -1
12 // mpirun -np 4 ex11p -m ../data/disc-nurbs.mesh -o -1 -n 20
13 // mpirun -np 4 ex11p -m ../data/pipe-nurbs.mesh -o -1
14 // mpirun -np 4 ex11p -m ../data/ball-nurbs.mesh -o 2
15 // mpirun -np 4 ex11p -m ../data/star-surf.mesh
16 // mpirun -np 4 ex11p -m ../data/square-disc-surf.mesh
17 // mpirun -np 4 ex11p -m ../data/inline-segment.mesh
18 // mpirun -np 4 ex11p -m ../data/amr-quad.mesh
19 // mpirun -np 4 ex11p -m ../data/amr-hex.mesh
20 // mpirun -np 4 ex11p -m ../data/mobius-strip.mesh -n 8
21 // mpirun -np 4 ex11p -m ../data/klein-bottle.mesh -n 10
22 //
23 // Description: This example code demonstrates the use of MFEM to solve the
24 // eigenvalue problem -Delta u = lambda u with homogeneous
25 // Dirichlet boundary conditions.
26 //
27 // We compute a number of the lowest eigenmodes by discretizing
28 // the Laplacian and Mass operators using a FE space of the
29 // specified order, or an isoparametric/isogeometric space if
30 // order < 1 (quadratic for quadratic curvilinear mesh, NURBS for
31 // NURBS mesh, etc.)
32 //
33 // The example highlights the use of the LOBPCG eigenvalue solver
34 // together with the BoomerAMG preconditioner in HYPRE. Reusing a
35 // single GLVis visualization window for multiple eigenfunctions
36 // is also illustrated.
37 //
38 // We recommend viewing Example 1 before viewing this example.
39 
40 #include "mfem.hpp"
41 #include <fstream>
42 #include <iostream>
43 
44 using namespace std;
45 using namespace mfem;
46 
47 int main(int argc, char *argv[])
48 {
49  // 1. Initialize MPI.
50  int num_procs, myid;
51  MPI_Init(&argc, &argv);
52  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
53  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
54 
55  // 2. Parse command-line options.
56  const char *mesh_file = "../data/star.mesh";
57  int ser_ref_levels = 2;
58  int par_ref_levels = 1;
59  int order = 1;
60  int nev = 5;
61  bool visualization = 1;
62 
63  OptionsParser args(argc, argv);
64  args.AddOption(&mesh_file, "-m", "--mesh",
65  "Mesh file to use.");
66  args.AddOption(&ser_ref_levels, "-rs", "--refine-serial",
67  "Number of times to refine the mesh uniformly in serial.");
68  args.AddOption(&par_ref_levels, "-rp", "--refine-parallel",
69  "Number of times to refine the mesh uniformly in parallel.");
70  args.AddOption(&order, "-o", "--order",
71  "Finite element order (polynomial degree) or -1 for"
72  " isoparametric space.");
73  args.AddOption(&nev, "-n", "--num-eigs",
74  "Number of desired eigenmodes.");
75  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
76  "--no-visualization",
77  "Enable or disable GLVis visualization.");
78  args.Parse();
79  if (!args.Good())
80  {
81  if (myid == 0)
82  {
83  args.PrintUsage(cout);
84  }
85  MPI_Finalize();
86  return 1;
87  }
88  if (myid == 0)
89  {
90  args.PrintOptions(cout);
91  }
92 
93  // 3. Read the (serial) mesh from the given mesh file on all processors. We
94  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
95  // and volume meshes with the same code.
96  Mesh *mesh;
97  ifstream imesh(mesh_file);
98  if (!imesh)
99  {
100  if (myid == 0)
101  {
102  cerr << "\nCan not open mesh file: " << mesh_file << '\n' << endl;
103  }
104  MPI_Finalize();
105  return 2;
106  }
107  mesh = new Mesh(imesh, 1, 1);
108  imesh.close();
109  int dim = mesh->Dimension();
110 
111  // 4. Refine the serial mesh on all processors to increase the resolution. In
112  // this example we do 'ref_levels' of uniform refinement (2 by default, or
113  // specified on the command line with -rs).
114  for (int lev = 0; lev < ser_ref_levels; lev++)
115  {
116  mesh->UniformRefinement();
117  }
118 
119  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
120  // this mesh further in parallel to increase the resolution (1 time by
121  // default, or specified on the command line with -rp). Once the parallel
122  // mesh is defined, the serial mesh can be deleted.
123  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
124  delete mesh;
125  for (int lev = 0; lev < par_ref_levels; lev++)
126  {
127  pmesh->UniformRefinement();
128  }
129 
130  // 6. Define a parallel finite element space on the parallel mesh. Here we
131  // use continuous Lagrange finite elements of the specified order. If
132  // order < 1, we instead use an isoparametric/isogeometric space.
134  if (order > 0)
135  {
136  fec = new H1_FECollection(order, dim);
137  }
138  else if (pmesh->GetNodes())
139  {
140  fec = pmesh->GetNodes()->OwnFEC();
141  }
142  else
143  {
144  fec = new H1_FECollection(order = 1, dim);
145  }
146  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
147  HYPRE_Int size = fespace->GlobalTrueVSize();
148  if (myid == 0)
149  {
150  cout << "Number of unknowns: " << size << endl;
151  }
152 
153  // 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite
154  // element space. The first corresponds to the Laplacian operator -Delta,
155  // while the second is a simple mass matrix needed on the right hand side
156  // of the generalized eigenvalue problem below. The boundary conditions
157  // are implemented by elimination with special values on the diagonal to
158  // shift the Dirichlet eigenvalues out of the computational range. After
159  // serial and parallel assembly we extract the corresponding parallel
160  // matrices A and M.
161  ConstantCoefficient one(1.0);
162  Array<int> ess_bdr;
163  if (pmesh->bdr_attributes.Size())
164  {
165  ess_bdr.SetSize(pmesh->bdr_attributes.Max());
166  ess_bdr = 1;
167  }
168 
169  ParBilinearForm *a = new ParBilinearForm(fespace);
171  if (pmesh->bdr_attributes.Size() == 0)
172  {
173  // Add a mass term if the mesh has no boundary, e.g. periodic mesh or
174  // closed surface.
175  a->AddDomainIntegrator(new MassIntegrator(one));
176  }
177  a->Assemble();
178  a->EliminateEssentialBCDiag(ess_bdr, 1.0);
179  a->Finalize();
180 
181  ParBilinearForm *m = new ParBilinearForm(fespace);
182  m->AddDomainIntegrator(new MassIntegrator(one));
183  m->Assemble();
184  // shift the eigenvalue corresponding to eliminated dofs to a large value
185  m->EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min());
186  m->Finalize();
187 
190 
191  delete a;
192  delete m;
193 
194  // 8. Define and configure the LOBPCG eigensolver and the BoomerAMG
195  // preconditioner for A to be used within the solver. Set the matrices
196  // which define the generalized eigenproblem A x = lambda M x.
197  HypreBoomerAMG * amg = new HypreBoomerAMG(*A);
198  amg->SetPrintLevel(0);
199 
200  HypreLOBPCG * lobpcg = new HypreLOBPCG(MPI_COMM_WORLD);
201  lobpcg->SetNumModes(nev);
202  lobpcg->SetPreconditioner(*amg);
203  lobpcg->SetMaxIter(100);
204  lobpcg->SetTol(1e-8);
205  lobpcg->SetPrecondUsageMode(1);
206  lobpcg->SetPrintLevel(1);
207  lobpcg->SetMassMatrix(*M);
208  lobpcg->SetOperator(*A);
209 
210  // 9. Compute the eigenmodes and extract the array of eigenvalues. Define a
211  // parallel grid function to represent each of the eigenmodes returned by
212  // the solver.
213  Array<double> eigenvalues;
214  lobpcg->Solve();
215  lobpcg->GetEigenvalues(eigenvalues);
216  ParGridFunction x(fespace);
217 
218  // 10. Save the refined mesh and the modes in parallel. This output can be
219  // viewed later using GLVis: "glvis -np <np> -m mesh -g mode".
220  {
221  ostringstream mesh_name, mode_name;
222  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
223 
224  ofstream mesh_ofs(mesh_name.str().c_str());
225  mesh_ofs.precision(8);
226  pmesh->Print(mesh_ofs);
227 
228  for (int i=0; i<nev; i++)
229  {
230  // convert eigenvector from HypreParVector to ParGridFunction
231  x = lobpcg->GetEigenvector(i);
232 
233  mode_name << "mode_" << setfill('0') << setw(2) << i << "."
234  << setfill('0') << setw(6) << myid;
235 
236  ofstream mode_ofs(mode_name.str().c_str());
237  mode_ofs.precision(8);
238  x.Save(mode_ofs);
239  mode_name.str("");
240  }
241  }
242 
243  // 11. Send the solution by socket to a GLVis server.
244  if (visualization)
245  {
246  char vishost[] = "localhost";
247  int visport = 19916;
248  socketstream mode_sock(vishost, visport);
249  mode_sock.precision(8);
250 
251  for (int i=0; i<nev; i++)
252  {
253  if ( myid == 0 )
254  {
255  cout << "Eigenmode " << i+1 << '/' << nev
256  << ", Lambda = " << eigenvalues[i] << endl;
257  }
258 
259  // convert eigenvector from HypreParVector to ParGridFunction
260  x = lobpcg->GetEigenvector(i);
261 
262  mode_sock << "parallel " << num_procs << " " << myid << "\n"
263  << "solution\n" << *pmesh << x << flush
264  << "window_title 'Eigenmode " << i+1 << '/' << nev
265  << ", Lambda = " << eigenvalues[i] << "'" << endl;
266 
267  char c;
268  if (myid == 0)
269  {
270  cout << "press (q)uit or (c)ontinue --> " << flush;
271  cin >> c;
272  }
273  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
274 
275  if (c != 'c')
276  {
277  break;
278  }
279  }
280  mode_sock.close();
281  }
282 
283  // 12. Free the used memory.
284  delete lobpcg;
285  delete amg;
286  delete M;
287  delete A;
288 
289  delete fespace;
290  if (order > 0)
291  {
292  delete fec;
293  }
294  delete pmesh;
295 
296  MPI_Finalize();
297 
298  return 0;
299 }
int Size() const
Logical size of the array.
Definition: array.hpp:109
HypreParVector & GetEigenvector(unsigned int i)
Extract a single eigenvector.
Definition: hypre.cpp:3102
Subclass constant coefficient.
Definition: coefficient.hpp:57
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:306
void SetPreconditioner(Solver &precond)
Definition: hypre.cpp:3025
void SetMassMatrix(Operator &M)
Definition: hypre.cpp:3080
Abstract parallel finite element space.
Definition: pfespace.hpp:28
void SetPrintLevel(int logging)
Definition: hypre.cpp:3010
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
void SetTol(double tol)
Definition: hypre.cpp:2998
The BoomerAMG solver in hypre.
Definition: hypre.hpp:698
int dim
Definition: ex3.cpp:48
void SetMaxIter(int max_iter)
Definition: hypre.cpp:3004
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:7316
void SetPrintLevel(int print_level)
Definition: hypre.hpp:739
T Max() const
Definition: array.cpp:90
void Assemble(int skip_zeros=1)
Assemble the local matrix.
int Dimension() const
Definition: mesh.hpp:475
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
void EliminateEssentialBCDiag(Array< int > &bdr_attr_is_ess, double value)
Perform elimination and set the diagonal entry to the given value.
Array< int > bdr_attributes
Definition: mesh.hpp:140
int main(int argc, char *argv[])
Definition: ex1.cpp:45
void GetEigenvalues(Array< double > &eigenvalues)
Collect the converged eigenvalues.
Definition: hypre.cpp:3090
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:74
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
Definition: array.hpp:323
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void SetOperator(Operator &A)
Definition: hypre.cpp:3034
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator.
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
Class for parallel bilinear form.
void GetNodes(Vector &node_coord) const
Definition: mesh.cpp:5844
void SetPrecondUsageMode(int pcg_mode)
Definition: hypre.cpp:3019
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:54
Class for parallel grid function.
Definition: pgridfunc.hpp:31
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:143
void SetNumModes(int num_eigs)
Definition: hypre.hpp:937
Class for parallel meshes.
Definition: pmesh.hpp:28
void Solve()
Solve the eigenproblem.
Definition: hypre.cpp:3108
virtual void Print(std::ostream &out=std::cout) const
Definition: pmesh.cpp:2850
bool Good() const
Definition: optparser.hpp:120