MFEM  v3.3
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex11p.cpp
Go to the documentation of this file.
1 // MFEM Example 11 - Parallel Version
2 //
3 // Compile with: make ex11p
4 //
5 // Sample runs: mpirun -np 4 ex11p -m ../data/square-disc.mesh
6 // mpirun -np 4 ex11p -m ../data/star.mesh
7 // mpirun -np 4 ex11p -m ../data/escher.mesh
8 // mpirun -np 4 ex11p -m ../data/fichera.mesh
9 // mpirun -np 4 ex11p -m ../data/square-disc-p2.vtk -o 2
10 // mpirun -np 4 ex11p -m ../data/square-disc-p3.mesh -o 3
11 // mpirun -np 4 ex11p -m ../data/square-disc-nurbs.mesh -o -1
12 // mpirun -np 4 ex11p -m ../data/disc-nurbs.mesh -o -1 -n 20
13 // mpirun -np 4 ex11p -m ../data/pipe-nurbs.mesh -o -1
14 // mpirun -np 4 ex11p -m ../data/ball-nurbs.mesh -o 2
15 // mpirun -np 4 ex11p -m ../data/star-surf.mesh
16 // mpirun -np 4 ex11p -m ../data/square-disc-surf.mesh
17 // mpirun -np 4 ex11p -m ../data/inline-segment.mesh
18 // mpirun -np 4 ex11p -m ../data/amr-quad.mesh
19 // mpirun -np 4 ex11p -m ../data/amr-hex.mesh
20 // mpirun -np 4 ex11p -m ../data/mobius-strip.mesh -n 8
21 // mpirun -np 4 ex11p -m ../data/klein-bottle.mesh -n 10
22 //
23 // Description: This example code demonstrates the use of MFEM to solve the
24 // eigenvalue problem -Delta u = lambda u with homogeneous
25 // Dirichlet boundary conditions.
26 //
27 // We compute a number of the lowest eigenmodes by discretizing
28 // the Laplacian and Mass operators using a FE space of the
29 // specified order, or an isoparametric/isogeometric space if
30 // order < 1 (quadratic for quadratic curvilinear mesh, NURBS for
31 // NURBS mesh, etc.)
32 //
33 // The example highlights the use of the LOBPCG eigenvalue solver
34 // together with the BoomerAMG preconditioner in HYPRE, as well as
35 // optionally the SuperLU parallel direct solver. Reusing a single
36 // GLVis visualization window for multiple eigenfunctions is also
37 // illustrated.
38 //
39 // We recommend viewing Example 1 before viewing this example.
40 
41 #include "mfem.hpp"
42 #include <fstream>
43 #include <iostream>
44 
45 using namespace std;
46 using namespace mfem;
47 
48 int main(int argc, char *argv[])
49 {
50  // 1. Initialize MPI.
51  int num_procs, myid;
52  MPI_Init(&argc, &argv);
53  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
54  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
55 
56  // 2. Parse command-line options.
57  const char *mesh_file = "../data/star.mesh";
58  int ser_ref_levels = 2;
59  int par_ref_levels = 1;
60  int order = 1;
61  int nev = 5;
62  bool slu_solver = false;
63  bool visualization = 1;
64 
65  OptionsParser args(argc, argv);
66  args.AddOption(&mesh_file, "-m", "--mesh",
67  "Mesh file to use.");
68  args.AddOption(&ser_ref_levels, "-rs", "--refine-serial",
69  "Number of times to refine the mesh uniformly in serial.");
70  args.AddOption(&par_ref_levels, "-rp", "--refine-parallel",
71  "Number of times to refine the mesh uniformly in parallel.");
72  args.AddOption(&order, "-o", "--order",
73  "Finite element order (polynomial degree) or -1 for"
74  " isoparametric space.");
75  args.AddOption(&nev, "-n", "--num-eigs",
76  "Number of desired eigenmodes.");
77 #ifdef MFEM_USE_SUPERLU
78  args.AddOption(&slu_solver, "-slu", "--superlu", "-no-slu",
79  "--no-superlu", "Use the SuperLU Solver.");
80 #endif
81  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
82  "--no-visualization",
83  "Enable or disable GLVis visualization.");
84  args.Parse();
85  if (!args.Good())
86  {
87  if (myid == 0)
88  {
89  args.PrintUsage(cout);
90  }
91  MPI_Finalize();
92  return 1;
93  }
94  if (myid == 0)
95  {
96  args.PrintOptions(cout);
97  }
98 
99  // 3. Read the (serial) mesh from the given mesh file on all processors. We
100  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
101  // and volume meshes with the same code.
102  Mesh *mesh = new Mesh(mesh_file, 1, 1);
103  int dim = mesh->Dimension();
104 
105  // 4. Refine the serial mesh on all processors to increase the resolution. In
106  // this example we do 'ref_levels' of uniform refinement (2 by default, or
107  // specified on the command line with -rs).
108  for (int lev = 0; lev < ser_ref_levels; lev++)
109  {
110  mesh->UniformRefinement();
111  }
112 
113  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
114  // this mesh further in parallel to increase the resolution (1 time by
115  // default, or specified on the command line with -rp). Once the parallel
116  // mesh is defined, the serial mesh can be deleted.
117  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
118  delete mesh;
119  for (int lev = 0; lev < par_ref_levels; lev++)
120  {
121  pmesh->UniformRefinement();
122  }
123 
124  // 6. Define a parallel finite element space on the parallel mesh. Here we
125  // use continuous Lagrange finite elements of the specified order. If
126  // order < 1, we instead use an isoparametric/isogeometric space.
128  if (order > 0)
129  {
130  fec = new H1_FECollection(order, dim);
131  }
132  else if (pmesh->GetNodes())
133  {
134  fec = pmesh->GetNodes()->OwnFEC();
135  }
136  else
137  {
138  fec = new H1_FECollection(order = 1, dim);
139  }
140  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
141  HYPRE_Int size = fespace->GlobalTrueVSize();
142  if (myid == 0)
143  {
144  cout << "Number of unknowns: " << size << endl;
145  }
146 
147  // 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite
148  // element space. The first corresponds to the Laplacian operator -Delta,
149  // while the second is a simple mass matrix needed on the right hand side
150  // of the generalized eigenvalue problem below. The boundary conditions
151  // are implemented by elimination with special values on the diagonal to
152  // shift the Dirichlet eigenvalues out of the computational range. After
153  // serial and parallel assembly we extract the corresponding parallel
154  // matrices A and M.
155  ConstantCoefficient one(1.0);
156  Array<int> ess_bdr;
157  if (pmesh->bdr_attributes.Size())
158  {
159  ess_bdr.SetSize(pmesh->bdr_attributes.Max());
160  ess_bdr = 1;
161  }
162 
163  ParBilinearForm *a = new ParBilinearForm(fespace);
165  if (pmesh->bdr_attributes.Size() == 0)
166  {
167  // Add a mass term if the mesh has no boundary, e.g. periodic mesh or
168  // closed surface.
169  a->AddDomainIntegrator(new MassIntegrator(one));
170  }
171  a->Assemble();
172  a->EliminateEssentialBCDiag(ess_bdr, 1.0);
173  a->Finalize();
174 
175  ParBilinearForm *m = new ParBilinearForm(fespace);
176  m->AddDomainIntegrator(new MassIntegrator(one));
177  m->Assemble();
178  // shift the eigenvalue corresponding to eliminated dofs to a large value
179  m->EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min());
180  m->Finalize();
181 
184 
185 #ifdef MFEM_USE_SUPERLU
186  Operator * Arow = NULL;
187  if (slu_solver)
188  {
189  Arow = new SuperLURowLocMatrix(*A);
190  }
191 #endif
192 
193  delete a;
194  delete m;
195 
196  // 8. Define and configure the LOBPCG eigensolver and the BoomerAMG
197  // preconditioner for A to be used within the solver. Set the matrices
198  // which define the generalized eigenproblem A x = lambda M x.
199  Solver * precond = NULL;
200  if (!slu_solver)
201  {
202  HypreBoomerAMG * amg = new HypreBoomerAMG(*A);
203  amg->SetPrintLevel(0);
204  precond = amg;
205  }
206 #ifdef MFEM_USE_SUPERLU
207  else
208  {
209  SuperLUSolver * superlu = new SuperLUSolver(MPI_COMM_WORLD);
210  superlu->SetPrintStatistics(false);
211  superlu->SetSymmetricPattern(true);
213  superlu->SetOperator(*Arow);
214  precond = superlu;
215  }
216 #endif
217 
218  HypreLOBPCG * lobpcg = new HypreLOBPCG(MPI_COMM_WORLD);
219  lobpcg->SetNumModes(nev);
220  lobpcg->SetPreconditioner(*precond);
221  lobpcg->SetMaxIter(200);
222  lobpcg->SetTol(1e-8);
223  lobpcg->SetPrecondUsageMode(1);
224  lobpcg->SetPrintLevel(1);
225  lobpcg->SetMassMatrix(*M);
226  lobpcg->SetOperator(*A);
227 
228  // 9. Compute the eigenmodes and extract the array of eigenvalues. Define a
229  // parallel grid function to represent each of the eigenmodes returned by
230  // the solver.
231  Array<double> eigenvalues;
232  lobpcg->Solve();
233  lobpcg->GetEigenvalues(eigenvalues);
234  ParGridFunction x(fespace);
235 
236  // 10. Save the refined mesh and the modes in parallel. This output can be
237  // viewed later using GLVis: "glvis -np <np> -m mesh -g mode".
238  {
239  ostringstream mesh_name, mode_name;
240  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
241 
242  ofstream mesh_ofs(mesh_name.str().c_str());
243  mesh_ofs.precision(8);
244  pmesh->Print(mesh_ofs);
245 
246  for (int i=0; i<nev; i++)
247  {
248  // convert eigenvector from HypreParVector to ParGridFunction
249  x = lobpcg->GetEigenvector(i);
250 
251  mode_name << "mode_" << setfill('0') << setw(2) << i << "."
252  << setfill('0') << setw(6) << myid;
253 
254  ofstream mode_ofs(mode_name.str().c_str());
255  mode_ofs.precision(8);
256  x.Save(mode_ofs);
257  mode_name.str("");
258  }
259  }
260 
261  // 11. Send the solution by socket to a GLVis server.
262  if (visualization)
263  {
264  char vishost[] = "localhost";
265  int visport = 19916;
266  socketstream mode_sock(vishost, visport);
267  mode_sock.precision(8);
268 
269  for (int i=0; i<nev; i++)
270  {
271  if ( myid == 0 )
272  {
273  cout << "Eigenmode " << i+1 << '/' << nev
274  << ", Lambda = " << eigenvalues[i] << endl;
275  }
276 
277  // convert eigenvector from HypreParVector to ParGridFunction
278  x = lobpcg->GetEigenvector(i);
279 
280  mode_sock << "parallel " << num_procs << " " << myid << "\n"
281  << "solution\n" << *pmesh << x << flush
282  << "window_title 'Eigenmode " << i+1 << '/' << nev
283  << ", Lambda = " << eigenvalues[i] << "'" << endl;
284 
285  char c;
286  if (myid == 0)
287  {
288  cout << "press (q)uit or (c)ontinue --> " << flush;
289  cin >> c;
290  }
291  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
292 
293  if (c != 'c')
294  {
295  break;
296  }
297  }
298  mode_sock.close();
299  }
300 
301  // 12. Free the used memory.
302  delete lobpcg;
303  delete precond;
304  delete M;
305  delete A;
306 #ifdef MFEM_USE_SUPERLU
307  delete Arow;
308 #endif
309 
310  delete fespace;
311  if (order > 0)
312  {
313  delete fec;
314  }
315  delete pmesh;
316 
317  MPI_Finalize();
318 
319  return 0;
320 }
int Size() const
Logical size of the array.
Definition: array.hpp:109
HypreParVector & GetEigenvector(unsigned int i)
Extract a single eigenvector.
Definition: hypre.cpp:3183
Subclass constant coefficient.
Definition: coefficient.hpp:57
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:314
void SetPreconditioner(Solver &precond)
Definition: hypre.cpp:3106
void SetMassMatrix(Operator &M)
Definition: hypre.cpp:3161
Abstract parallel finite element space.
Definition: pfespace.hpp:28
void SetPrintLevel(int logging)
Definition: hypre.cpp:3091
int main(int argc, char *argv[])
void SetSymmetricPattern(bool sym)
Definition: superlu.cpp:395
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
void SetTol(double tol)
Definition: hypre.cpp:3079
void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: superlu.cpp:528
The BoomerAMG solver in hypre.
Definition: hypre.hpp:770
int dim
Definition: ex3.cpp:47
void SetMaxIter(int max_iter)
Definition: hypre.cpp:3085
void SetColumnPermutation(superlu::ColPerm col_perm)
Definition: superlu.cpp:315
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:6684
void SetPrintLevel(int print_level)
Definition: hypre.hpp:811
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:108
void Assemble(int skip_zeros=1)
Assemble the local matrix.
void EliminateEssentialBCDiag(const Array< int > &bdr_attr_is_ess, double value)
Perform elimination and set the diagonal entry to the given value.
void SetPrintStatistics(bool print_stat)
Definition: superlu.cpp:297
int Dimension() const
Definition: mesh.hpp:611
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:142
void GetEigenvalues(Array< double > &eigenvalues)
Collect the converged eigenvalues.
Definition: hypre.cpp:3171
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:74
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
Definition: array.hpp:349
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void SetOperator(Operator &A)
Definition: hypre.cpp:3115
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator.
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
Class for parallel bilinear form.
void GetNodes(Vector &node_coord) const
Definition: mesh.cpp:5368
void SetPrecondUsageMode(int pcg_mode)
Definition: hypre.cpp:3100
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:146
Base class for solvers.
Definition: operator.hpp:257
Class for parallel grid function.
Definition: pgridfunc.hpp:31
Abstract operator.
Definition: operator.hpp:21
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:174
void SetNumModes(int num_eigs)
Definition: hypre.hpp:1009
Class for parallel meshes.
Definition: pmesh.hpp:28
void Solve()
Solve the eigenproblem.
Definition: hypre.cpp:3189
virtual void Print(std::ostream &out=std::cout) const
Definition: pmesh.cpp:3395
bool Good() const
Definition: optparser.hpp:120