MFEM  v4.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex11p.cpp
Go to the documentation of this file.
1 // MFEM Example 11 - Parallel Version
2 //
3 // Compile with: make ex11p
4 //
5 // Sample runs: mpirun -np 4 ex11p -m ../data/square-disc.mesh
6 // mpirun -np 4 ex11p -m ../data/star.mesh
7 // mpirun -np 4 ex11p -m ../data/star-mixed.mesh
8 // mpirun -np 4 ex11p -m ../data/escher.mesh
9 // mpirun -np 4 ex11p -m ../data/fichera.mesh
10 // mpirun -np 4 ex11p -m ../data/fichera-mixed.mesh
11 // mpirun -np 4 ex11p -m ../data/toroid-wedge.mesh -o 2
12 // mpirun -np 4 ex11p -m ../data/square-disc-p2.vtk -o 2
13 // mpirun -np 4 ex11p -m ../data/square-disc-p3.mesh -o 3
14 // mpirun -np 4 ex11p -m ../data/square-disc-nurbs.mesh -o -1
15 // mpirun -np 4 ex11p -m ../data/disc-nurbs.mesh -o -1 -n 20
16 // mpirun -np 4 ex11p -m ../data/pipe-nurbs.mesh -o -1
17 // mpirun -np 4 ex11p -m ../data/ball-nurbs.mesh -o 2
18 // mpirun -np 4 ex11p -m ../data/star-surf.mesh
19 // mpirun -np 4 ex11p -m ../data/square-disc-surf.mesh
20 // mpirun -np 4 ex11p -m ../data/inline-segment.mesh
21 // mpirun -np 4 ex11p -m ../data/inline-quad.mesh
22 // mpirun -np 4 ex11p -m ../data/inline-tri.mesh
23 // mpirun -np 4 ex11p -m ../data/inline-hex.mesh
24 // mpirun -np 4 ex11p -m ../data/inline-tet.mesh
25 // mpirun -np 4 ex11p -m ../data/inline-wedge.mesh -s 83
26 // mpirun -np 4 ex11p -m ../data/amr-quad.mesh
27 // mpirun -np 4 ex11p -m ../data/amr-hex.mesh
28 // mpirun -np 4 ex11p -m ../data/mobius-strip.mesh -n 8
29 // mpirun -np 4 ex11p -m ../data/klein-bottle.mesh -n 10
30 //
31 // Description: This example code demonstrates the use of MFEM to solve the
32 // eigenvalue problem -Delta u = lambda u with homogeneous
33 // Dirichlet boundary conditions.
34 //
35 // We compute a number of the lowest eigenmodes by discretizing
36 // the Laplacian and Mass operators using a FE space of the
37 // specified order, or an isoparametric/isogeometric space if
38 // order < 1 (quadratic for quadratic curvilinear mesh, NURBS for
39 // NURBS mesh, etc.)
40 //
41 // The example highlights the use of the LOBPCG eigenvalue solver
42 // together with the BoomerAMG preconditioner in HYPRE, as well as
43 // optionally the SuperLU or STRUMPACK parallel direct solvers.
44 // Reusing a single GLVis visualization window for multiple
45 // eigenfunctions is also illustrated.
46 //
47 // We recommend viewing Example 1 before viewing this example.
48 
49 #include "mfem.hpp"
50 #include <fstream>
51 #include <iostream>
52 
53 using namespace std;
54 using namespace mfem;
55 
56 int main(int argc, char *argv[])
57 {
58  // 1. Initialize MPI.
59  int num_procs, myid;
60  MPI_Init(&argc, &argv);
61  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
62  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
63 
64  // 2. Parse command-line options.
65  const char *mesh_file = "../data/star.mesh";
66  int ser_ref_levels = 2;
67  int par_ref_levels = 1;
68  int order = 1;
69  int nev = 5;
70  int seed = 75;
71  bool slu_solver = false;
72  bool sp_solver = false;
73  bool visualization = 1;
74 
75  OptionsParser args(argc, argv);
76  args.AddOption(&mesh_file, "-m", "--mesh",
77  "Mesh file to use.");
78  args.AddOption(&ser_ref_levels, "-rs", "--refine-serial",
79  "Number of times to refine the mesh uniformly in serial.");
80  args.AddOption(&par_ref_levels, "-rp", "--refine-parallel",
81  "Number of times to refine the mesh uniformly in parallel.");
82  args.AddOption(&order, "-o", "--order",
83  "Finite element order (polynomial degree) or -1 for"
84  " isoparametric space.");
85  args.AddOption(&nev, "-n", "--num-eigs",
86  "Number of desired eigenmodes.");
87  args.AddOption(&seed, "-s", "--seed",
88  "Random seed used to initialize LOBPCG.");
89 #ifdef MFEM_USE_SUPERLU
90  args.AddOption(&slu_solver, "-slu", "--superlu", "-no-slu",
91  "--no-superlu", "Use the SuperLU Solver.");
92 #endif
93 #ifdef MFEM_USE_STRUMPACK
94  args.AddOption(&sp_solver, "-sp", "--strumpack", "-no-sp",
95  "--no-strumpack", "Use the STRUMPACK Solver.");
96 #endif
97  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
98  "--no-visualization",
99  "Enable or disable GLVis visualization.");
100  args.Parse();
101  if (slu_solver && sp_solver)
102  {
103  if (myid == 0)
104  cout << "WARNING: Both SuperLU and STRUMPACK have been selected,"
105  << " please choose either one." << endl
106  << " Defaulting to SuperLU." << endl;
107  sp_solver = false;
108  }
109  // The command line options are also passed to the STRUMPACK
110  // solver. So do not exit if some options are not recognized.
111  if (!sp_solver)
112  {
113  if (!args.Good())
114  {
115  if (myid == 0)
116  {
117  args.PrintUsage(cout);
118  }
119  MPI_Finalize();
120  return 1;
121  }
122  }
123  if (myid == 0)
124  {
125  args.PrintOptions(cout);
126  }
127 
128  // 3. Read the (serial) mesh from the given mesh file on all processors. We
129  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
130  // and volume meshes with the same code.
131  Mesh *mesh = new Mesh(mesh_file, 1, 1);
132  int dim = mesh->Dimension();
133 
134  // 4. Refine the serial mesh on all processors to increase the resolution. In
135  // this example we do 'ref_levels' of uniform refinement (2 by default, or
136  // specified on the command line with -rs).
137  for (int lev = 0; lev < ser_ref_levels; lev++)
138  {
139  mesh->UniformRefinement();
140  }
141 
142  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
143  // this mesh further in parallel to increase the resolution (1 time by
144  // default, or specified on the command line with -rp). Once the parallel
145  // mesh is defined, the serial mesh can be deleted.
146  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
147  delete mesh;
148  for (int lev = 0; lev < par_ref_levels; lev++)
149  {
150  pmesh->UniformRefinement();
151  }
152 
153  // 6. Define a parallel finite element space on the parallel mesh. Here we
154  // use continuous Lagrange finite elements of the specified order. If
155  // order < 1, we instead use an isoparametric/isogeometric space.
157  if (order > 0)
158  {
159  fec = new H1_FECollection(order, dim);
160  }
161  else if (pmesh->GetNodes())
162  {
163  fec = pmesh->GetNodes()->OwnFEC();
164  }
165  else
166  {
167  fec = new H1_FECollection(order = 1, dim);
168  }
169  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
170  HYPRE_Int size = fespace->GlobalTrueVSize();
171  if (myid == 0)
172  {
173  cout << "Number of unknowns: " << size << endl;
174  }
175 
176  // 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite
177  // element space. The first corresponds to the Laplacian operator -Delta,
178  // while the second is a simple mass matrix needed on the right hand side
179  // of the generalized eigenvalue problem below. The boundary conditions
180  // are implemented by elimination with special values on the diagonal to
181  // shift the Dirichlet eigenvalues out of the computational range. After
182  // serial and parallel assembly we extract the corresponding parallel
183  // matrices A and M.
184  ConstantCoefficient one(1.0);
185  Array<int> ess_bdr;
186  if (pmesh->bdr_attributes.Size())
187  {
188  ess_bdr.SetSize(pmesh->bdr_attributes.Max());
189  ess_bdr = 1;
190  }
191 
192  ParBilinearForm *a = new ParBilinearForm(fespace);
194  if (pmesh->bdr_attributes.Size() == 0)
195  {
196  // Add a mass term if the mesh has no boundary, e.g. periodic mesh or
197  // closed surface.
198  a->AddDomainIntegrator(new MassIntegrator(one));
199  }
200  a->Assemble();
201  a->EliminateEssentialBCDiag(ess_bdr, 1.0);
202  a->Finalize();
203 
204  ParBilinearForm *m = new ParBilinearForm(fespace);
205  m->AddDomainIntegrator(new MassIntegrator(one));
206  m->Assemble();
207  // shift the eigenvalue corresponding to eliminated dofs to a large value
208  m->EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min());
209  m->Finalize();
210 
213 
214 #if defined(MFEM_USE_SUPERLU) || defined(MFEM_USE_STRUMPACK)
215  Operator * Arow = NULL;
216 #ifdef MFEM_USE_SUPERLU
217  if (slu_solver)
218  {
219  Arow = new SuperLURowLocMatrix(*A);
220  }
221 #endif
222 #ifdef MFEM_USE_STRUMPACK
223  if (sp_solver)
224  {
225  Arow = new STRUMPACKRowLocMatrix(*A);
226  }
227 #endif
228 #endif
229 
230  delete a;
231  delete m;
232 
233  // 8. Define and configure the LOBPCG eigensolver and the BoomerAMG
234  // preconditioner for A to be used within the solver. Set the matrices
235  // which define the generalized eigenproblem A x = lambda M x.
236  Solver * precond = NULL;
237  if (!slu_solver && !sp_solver)
238  {
239  HypreBoomerAMG * amg = new HypreBoomerAMG(*A);
240  amg->SetPrintLevel(0);
241  precond = amg;
242  }
243  else
244  {
245 #ifdef MFEM_USE_SUPERLU
246  if (slu_solver)
247  {
248  SuperLUSolver * superlu = new SuperLUSolver(MPI_COMM_WORLD);
249  superlu->SetPrintStatistics(false);
250  superlu->SetSymmetricPattern(true);
252  superlu->SetOperator(*Arow);
253  precond = superlu;
254  }
255 #endif
256 #ifdef MFEM_USE_STRUMPACK
257  if (sp_solver)
258  {
259  STRUMPACKSolver * strumpack = new STRUMPACKSolver(argc, argv, MPI_COMM_WORLD);
260  strumpack->SetPrintFactorStatistics(true);
261  strumpack->SetPrintSolveStatistics(false);
262  strumpack->SetKrylovSolver(strumpack::KrylovSolver::DIRECT);
263  strumpack->SetReorderingStrategy(strumpack::ReorderingStrategy::METIS);
264  strumpack->DisableMatching();
265  strumpack->SetOperator(*Arow);
266  strumpack->SetFromCommandLine();
267  precond = strumpack;
268  }
269 #endif
270  }
271 
272 
273  HypreLOBPCG * lobpcg = new HypreLOBPCG(MPI_COMM_WORLD);
274  lobpcg->SetNumModes(nev);
275  lobpcg->SetRandomSeed(seed);
276  lobpcg->SetPreconditioner(*precond);
277  lobpcg->SetMaxIter(200);
278  lobpcg->SetTol(1e-8);
279  lobpcg->SetPrecondUsageMode(1);
280  lobpcg->SetPrintLevel(1);
281  lobpcg->SetMassMatrix(*M);
282  lobpcg->SetOperator(*A);
283 
284  // 9. Compute the eigenmodes and extract the array of eigenvalues. Define a
285  // parallel grid function to represent each of the eigenmodes returned by
286  // the solver.
287  Array<double> eigenvalues;
288  lobpcg->Solve();
289  lobpcg->GetEigenvalues(eigenvalues);
290  ParGridFunction x(fespace);
291 
292  // 10. Save the refined mesh and the modes in parallel. This output can be
293  // viewed later using GLVis: "glvis -np <np> -m mesh -g mode".
294  {
295  ostringstream mesh_name, mode_name;
296  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
297 
298  ofstream mesh_ofs(mesh_name.str().c_str());
299  mesh_ofs.precision(8);
300  pmesh->Print(mesh_ofs);
301 
302  for (int i=0; i<nev; i++)
303  {
304  // convert eigenvector from HypreParVector to ParGridFunction
305  x = lobpcg->GetEigenvector(i);
306 
307  mode_name << "mode_" << setfill('0') << setw(2) << i << "."
308  << setfill('0') << setw(6) << myid;
309 
310  ofstream mode_ofs(mode_name.str().c_str());
311  mode_ofs.precision(8);
312  x.Save(mode_ofs);
313  mode_name.str("");
314  }
315  }
316 
317  // 11. Send the solution by socket to a GLVis server.
318  if (visualization)
319  {
320  char vishost[] = "localhost";
321  int visport = 19916;
322  socketstream mode_sock(vishost, visport);
323  mode_sock.precision(8);
324 
325  for (int i=0; i<nev; i++)
326  {
327  if ( myid == 0 )
328  {
329  cout << "Eigenmode " << i+1 << '/' << nev
330  << ", Lambda = " << eigenvalues[i] << endl;
331  }
332 
333  // convert eigenvector from HypreParVector to ParGridFunction
334  x = lobpcg->GetEigenvector(i);
335 
336  mode_sock << "parallel " << num_procs << " " << myid << "\n"
337  << "solution\n" << *pmesh << x << flush
338  << "window_title 'Eigenmode " << i+1 << '/' << nev
339  << ", Lambda = " << eigenvalues[i] << "'" << endl;
340 
341  char c;
342  if (myid == 0)
343  {
344  cout << "press (q)uit or (c)ontinue --> " << flush;
345  cin >> c;
346  }
347  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
348 
349  if (c != 'c')
350  {
351  break;
352  }
353  }
354  mode_sock.close();
355  }
356 
357  // 12. Free the used memory.
358  delete lobpcg;
359  delete precond;
360  delete M;
361  delete A;
362 #if defined(MFEM_USE_SUPERLU) || defined(MFEM_USE_STRUMPACK)
363  delete Arow;
364 #endif
365 
366  delete fespace;
367  if (order > 0)
368  {
369  delete fec;
370  }
371  delete pmesh;
372 
373  MPI_Finalize();
374 
375  return 0;
376 }
int Size() const
Logical size of the array.
Definition: array.hpp:118
HypreParVector & GetEigenvector(unsigned int i)
Extract a single eigenvector.
Definition: hypre.cpp:3412
Subclass constant coefficient.
Definition: coefficient.hpp:67
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:488
void SetPreconditioner(Solver &precond)
Definition: hypre.cpp:3335
void SetMassMatrix(Operator &M)
Definition: hypre.cpp:3390
Abstract parallel finite element space.
Definition: pfespace.hpp:28
void SetPrintLevel(int logging)
Definition: hypre.cpp:3320
int main(int argc, char *argv[])
Definition: ex1.cpp:58
void SetSymmetricPattern(bool sym)
Definition: superlu.cpp:395
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
void SetTol(double tol)
Definition: hypre.cpp:3298
void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: superlu.cpp:529
The BoomerAMG solver in hypre.
Definition: hypre.hpp:873
int dim
Definition: ex3.cpp:48
void SetPrintFactorStatistics(bool print_stat)
Definition: strumpack.cpp:127
void SetMaxIter(int max_iter)
Definition: hypre.cpp:3314
void SetColumnPermutation(superlu::ColPerm col_perm)
Definition: superlu.cpp:315
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:7591
void SetPrintLevel(int print_level)
Definition: hypre.hpp:914
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:68
HYPRE_Int GlobalTrueVSize() const
Definition: pfespace.hpp:251
void Assemble(int skip_zeros=1)
Assemble the local matrix.
void EliminateEssentialBCDiag(const Array< int > &bdr_attr_is_ess, double value)
Perform elimination and set the diagonal entry to the given value.
virtual void Print(std::ostream &out=mfem::out) const
Definition: pmesh.cpp:3863
void SetPrintSolveStatistics(bool print_stat)
Definition: strumpack.cpp:132
void SetPrintStatistics(bool print_stat)
Definition: superlu.cpp:297
int Dimension() const
Definition: mesh.hpp:713
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
void SetReorderingStrategy(strumpack::ReorderingStrategy method)
Definition: strumpack.cpp:142
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:179
void GetEigenvalues(Array< double > &eigenvalues)
Collect the converged eigenvalues.
Definition: hypre.cpp:3400
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:76
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
Definition: array.hpp:618
void SetRandomSeed(int s)
Definition: hypre.hpp:1118
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void SetKrylovSolver(strumpack::KrylovSolver method)
Definition: strumpack.cpp:137
void SetOperator(Operator &A)
Definition: hypre.cpp:3344
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator. Assumes ownership of bfi.
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
Class for parallel bilinear form.
void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: strumpack.cpp:221
void GetNodes(Vector &node_coord) const
Definition: mesh.cpp:5837
void SetPrecondUsageMode(int pcg_mode)
Definition: hypre.cpp:3329
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:83
Base class for solvers.
Definition: operator.hpp:279
Class for parallel grid function.
Definition: pgridfunc.hpp:32
Abstract operator.
Definition: operator.hpp:21
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:187
void SetNumModes(int num_eigs)
Definition: hypre.hpp:1116
Class for parallel meshes.
Definition: pmesh.hpp:32
void Solve()
Solve the eigenproblem.
Definition: hypre.cpp:3457
bool Good() const
Definition: optparser.hpp:122