MFEM  v4.3.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex11p.cpp
Go to the documentation of this file.
1 // MFEM Example 11 - Parallel Version
2 // PETSc Modification
3 //
4 // Compile with: make ex11p
5 //
6 // Sample runs: mpirun -np 4 ex11p -m ../../data/star.mesh
7 // mpirun -np 4 ex11p -m ../../data/star.mesh --slepcopts rc_ex11p_lobpcg
8 // mpirun -np 4 ex11p -m ../../data/star.mesh --slepcopts rc_ex11p_gd
9 //
10 // Description: This example code demonstrates the use of MFEM to solve the
11 // eigenvalue problem -Delta u = lambda u with homogeneous
12 // Dirichlet boundary conditions.
13 //
14 // We compute a number of the lowest eigenmodes by discretizing
15 // the Laplacian and Mass operators using a FE space of the
16 // specified order, or an isoparametric/isogeometric space if
17 // order < 1 (quadratic for quadratic curvilinear mesh, NURBS for
18 // NURBS mesh, etc.)
19 //
20 // The example demonstrates the use of the SLEPc eigensolver as an
21 // alternative to the LOBPCG eigenvalue solver. The shift and
22 // invert spectral transformation is used to help the convergence
23 // to the smaller eigenvalues. Alternative solver parameters can
24 // be passed in a file with "-slepcopts".
25 //
26 // Reusing a single GLVis visualization window for multiple
27 // eigenfunctions is also illustrated.
28 //
29 // We recommend viewing Example 1 before viewing this example.
30 
31 #include "mfem.hpp"
32 #include <fstream>
33 #include <iostream>
34 
35 #ifndef MFEM_USE_SLEPC
36 #error This examples requires that MFEM is build with MFEM_USE_SLEPC=YES
37 #endif
38 
39 using namespace std;
40 using namespace mfem;
41 
42 int main(int argc, char *argv[])
43 {
44  // 1. Initialize MPI.
45  int num_procs, myid;
46  MPI_Init(&argc, &argv);
47  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
48  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
49 
50  // 2. Parse command-line options.
51  const char *mesh_file = "../../data/star.mesh";
52  int ser_ref_levels = 2;
53  int par_ref_levels = 1;
54  int order = 1;
55  int nev = 5;
56  int seed = 75;
57  bool slu_solver = false;
58  bool sp_solver = false;
59  bool visualization = 1;
60  bool use_slepc = true;
61  const char *slepcrc_file = "";
62  const char *device_config = "cpu";
63 
64  OptionsParser args(argc, argv);
65  args.AddOption(&mesh_file, "-m", "--mesh",
66  "Mesh file to use.");
67  args.AddOption(&ser_ref_levels, "-rs", "--refine-serial",
68  "Number of times to refine the mesh uniformly in serial.");
69  args.AddOption(&par_ref_levels, "-rp", "--refine-parallel",
70  "Number of times to refine the mesh uniformly in parallel.");
71  args.AddOption(&order, "-o", "--order",
72  "Finite element order (polynomial degree) or -1 for"
73  " isoparametric space.");
74  args.AddOption(&nev, "-n", "--num-eigs",
75  "Number of desired eigenmodes.");
76  args.AddOption(&seed, "-s", "--seed",
77  "Random seed used to initialize LOBPCG.");
78 #ifdef MFEM_USE_SUPERLU
79  args.AddOption(&slu_solver, "-slu", "--superlu", "-no-slu",
80  "--no-superlu", "Use the SuperLU Solver.");
81 #endif
82 #ifdef MFEM_USE_STRUMPACK
83  args.AddOption(&sp_solver, "-sp", "--strumpack", "-no-sp",
84  "--no-strumpack", "Use the STRUMPACK Solver.");
85 #endif
86  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
87  "--no-visualization",
88  "Enable or disable GLVis visualization.");
89  args.AddOption(&use_slepc, "-useslepc","--useslepc","-no-slepc",
90  "--no-slepc","Use or not SLEPc to solve the eigenvalue problem");
91  args.AddOption(&slepcrc_file, "-slepcopts", "--slepcopts",
92  "SlepcOptions file to use.");
93  args.AddOption(&device_config, "-d", "--device",
94  "Device configuration string, see Device::Configure().");
95  args.Parse();
96  if (slu_solver && sp_solver)
97  {
98  if (myid == 0)
99  cout << "WARNING: Both SuperLU and STRUMPACK have been selected,"
100  << " please choose either one." << endl
101  << " Defaulting to SuperLU." << endl;
102  sp_solver = false;
103  }
104  // The command line options are also passed to the STRUMPACK
105  // solver. So do not exit if some options are not recognized.
106  if (!sp_solver)
107  {
108  if (!args.Good())
109  {
110  if (myid == 0)
111  {
112  args.PrintUsage(cout);
113  }
114  MPI_Finalize();
115  return 1;
116  }
117  }
118  if (myid == 0)
119  {
120  args.PrintOptions(cout);
121  }
122 
123  // 2b. Enable hardware devices such as GPUs, and programming models such as
124  // CUDA, OCCA, RAJA and OpenMP based on command line options.
125  Device device(device_config);
126  if (myid == 0) { device.Print(); }
127 
128  // 2c. We initialize SLEPc. This internally initializes PETSc as well.
129  MFEMInitializeSlepc(NULL,NULL,slepcrc_file,NULL);
130 
131  // 3. Read the (serial) mesh from the given mesh file on all processors. We
132  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
133  // and volume meshes with the same code.
134  Mesh *mesh = new Mesh(mesh_file, 1, 1);
135  int dim = mesh->Dimension();
136 
137  // 4. Refine the serial mesh on all processors to increase the resolution. In
138  // this example we do 'ref_levels' of uniform refinement (2 by default, or
139  // specified on the command line with -rs).
140  for (int lev = 0; lev < ser_ref_levels; lev++)
141  {
142  mesh->UniformRefinement();
143  }
144 
145  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
146  // this mesh further in parallel to increase the resolution (1 time by
147  // default, or specified on the command line with -rp). Once the parallel
148  // mesh is defined, the serial mesh can be deleted.
149  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
150  delete mesh;
151  for (int lev = 0; lev < par_ref_levels; lev++)
152  {
153  pmesh->UniformRefinement();
154  }
155 
156  // 6. Define a parallel finite element space on the parallel mesh. Here we
157  // use continuous Lagrange finite elements of the specified order. If
158  // order < 1, we instead use an isoparametric/isogeometric space.
160  if (order > 0)
161  {
162  fec = new H1_FECollection(order, dim);
163  }
164  else if (pmesh->GetNodes())
165  {
166  fec = pmesh->GetNodes()->OwnFEC();
167  }
168  else
169  {
170  fec = new H1_FECollection(order = 1, dim);
171  }
172  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
173  HYPRE_BigInt size = fespace->GlobalTrueVSize();
174  if (myid == 0)
175  {
176  cout << "Number of unknowns: " << size << endl;
177  }
178 
179  // 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite
180  // element space. The first corresponds to the Laplacian operator -Delta,
181  // while the second is a simple mass matrix needed on the right hand side
182  // of the generalized eigenvalue problem below. The boundary conditions
183  // are implemented by elimination with special values on the diagonal to
184  // shift the Dirichlet eigenvalues out of the computational range. After
185  // serial and parallel assembly we extract the corresponding parallel
186  // matrices A and M.
187  ConstantCoefficient one(1.0);
188  Array<int> ess_bdr;
189  if (pmesh->bdr_attributes.Size())
190  {
191  ess_bdr.SetSize(pmesh->bdr_attributes.Max());
192  ess_bdr = 1;
193  }
194 
195  ParBilinearForm *a = new ParBilinearForm(fespace);
197  if (pmesh->bdr_attributes.Size() == 0)
198  {
199  // Add a mass term if the mesh has no boundary, e.g. periodic mesh or
200  // closed surface.
201  a->AddDomainIntegrator(new MassIntegrator(one));
202  }
203  a->Assemble();
204  a->EliminateEssentialBCDiag(ess_bdr, 1.0);
205  a->Finalize();
206 
207  ParBilinearForm *m = new ParBilinearForm(fespace);
208  m->AddDomainIntegrator(new MassIntegrator(one));
209  m->Assemble();
210  // shift the eigenvalue corresponding to eliminated dofs to a large value
211  m->EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min());
212  m->Finalize();
213 
214  PetscParMatrix *pA = NULL, *pM = NULL;
215  HypreParMatrix *A = NULL, *M = NULL;
216  Operator::Type tid =
217  !use_slepc ? Operator::Hypre_ParCSR : Operator::PETSC_MATAIJ;
218  OperatorHandle Ah(tid), Mh(tid);
219 
220  a->ParallelAssemble(Ah);
221  if (!use_slepc) { Ah.Get(A); }
222  else { Ah.Get(pA); }
223  Ah.SetOperatorOwner(false);
224 
225  m->ParallelAssemble(Mh);
226  if (!use_slepc) {Mh.Get(M); }
227  else {Mh.Get(pM); }
228  Mh.SetOperatorOwner(false);
229 
230 #if defined(MFEM_USE_SUPERLU) || defined(MFEM_USE_STRUMPACK)
231  Operator * Arow = NULL;
232 #ifdef MFEM_USE_SUPERLU
233  if (slu_solver)
234  {
235  Arow = new SuperLURowLocMatrix(*A);
236  }
237 #endif
238 #ifdef MFEM_USE_STRUMPACK
239  if (sp_solver)
240  {
241  Arow = new STRUMPACKRowLocMatrix(*A);
242  }
243 #endif
244 #endif
245 
246  delete a;
247  delete m;
248 
249  // 8. Define and configure the LOBPCG eigensolver and the BoomerAMG
250  // preconditioner for A to be used within the solver. Set the matrices
251  // which define the generalized eigenproblem A x = lambda M x.
252  Solver * precond = NULL;
253  if (!use_slepc)
254  {
255  if (!slu_solver && !sp_solver)
256  {
257  HypreBoomerAMG * amg = new HypreBoomerAMG(*A);
258  amg->SetPrintLevel(0);
259  precond = amg;
260  }
261  else
262  {
263 #ifdef MFEM_USE_SUPERLU
264  if (slu_solver)
265  {
266  SuperLUSolver * superlu = new SuperLUSolver(MPI_COMM_WORLD);
267  superlu->SetPrintStatistics(false);
268  superlu->SetSymmetricPattern(true);
270  superlu->SetOperator(*Arow);
271  precond = superlu;
272  }
273 #endif
274 #ifdef MFEM_USE_STRUMPACK
275  if (sp_solver)
276  {
277  STRUMPACKSolver * strumpack = new STRUMPACKSolver(argc, argv, MPI_COMM_WORLD);
278  strumpack->SetPrintFactorStatistics(true);
279  strumpack->SetPrintSolveStatistics(false);
280  strumpack->SetKrylovSolver(strumpack::KrylovSolver::DIRECT);
281  strumpack->SetReorderingStrategy(strumpack::ReorderingStrategy::METIS);
282  strumpack->DisableMatching();
283  strumpack->SetOperator(*Arow);
284  strumpack->SetFromCommandLine();
285  precond = strumpack;
286  }
287 #endif
288  }
289  }
290 
291  HypreLOBPCG * lobpcg = NULL;
292  SlepcEigenSolver * slepc = NULL;
293  if (!use_slepc)
294  {
295 
296  lobpcg = new HypreLOBPCG(MPI_COMM_WORLD);
297  lobpcg->SetNumModes(nev);
298  lobpcg->SetRandomSeed(seed);
299  lobpcg->SetPreconditioner(*precond);
300  lobpcg->SetMaxIter(200);
301  lobpcg->SetTol(1e-8);
302  lobpcg->SetPrecondUsageMode(1);
303  lobpcg->SetPrintLevel(1);
304  lobpcg->SetMassMatrix(*M);
305  lobpcg->SetOperator(*A);
306  }
307  else
308  {
309  slepc = new SlepcEigenSolver(MPI_COMM_WORLD);
310  slepc->SetNumModes(nev);
311  slepc->SetWhichEigenpairs(SlepcEigenSolver::TARGET_REAL);
312  slepc->SetTarget(0.0);
313  slepc->SetSpectralTransformation(SlepcEigenSolver::SHIFT_INVERT);
314  slepc->SetOperators(*pA,*pM);
315  }
316 
317  // 9. Compute the eigenmodes and extract the array of eigenvalues. Define a
318  // parallel grid function to represent each of the eigenmodes returned by
319  // the solver.
320  Array<double> eigenvalues;
321  if (!use_slepc)
322  {
323  lobpcg->Solve();
324  lobpcg->GetEigenvalues(eigenvalues);
325  }
326  else
327  {
328  slepc->Solve();
329  eigenvalues.SetSize(nev);
330  for (int i=0; i<nev; i++)
331  {
332  slepc->GetEigenvalue(i,eigenvalues[i]);
333  }
334  }
335  Vector temp(fespace->GetTrueVSize());
336  ParGridFunction x(fespace);
337 
338  // 10. Save the refined mesh and the modes in parallel. This output can be
339  // viewed later using GLVis: "glvis -np <np> -m mesh -g mode".
340  {
341  ostringstream mesh_name, mode_name;
342  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
343 
344  ofstream mesh_ofs(mesh_name.str().c_str());
345  mesh_ofs.precision(8);
346  pmesh->Print(mesh_ofs);
347 
348  for (int i=0; i<nev; i++)
349  {
350  // convert eigenvector from HypreParVector to ParGridFunction
351  if (!use_slepc)
352  {
353  x = lobpcg->GetEigenvector(i);
354  }
355  else
356  {
357  slepc->GetEigenvector(i,temp);
358  x.Distribute(temp);
359  }
360 
361  mode_name << "mode_" << setfill('0') << setw(2) << i << "."
362  << setfill('0') << setw(6) << myid;
363 
364  ofstream mode_ofs(mode_name.str().c_str());
365  mode_ofs.precision(8);
366  x.Save(mode_ofs);
367  mode_name.str("");
368  }
369  }
370 
371  // 11. Send the solution by socket to a GLVis server.
372  if (visualization)
373  {
374  char vishost[] = "localhost";
375  int visport = 19916;
376  socketstream mode_sock(vishost, visport);
377  mode_sock.precision(8);
378 
379  for (int i=0; i<nev; i++)
380  {
381  if ( myid == 0 )
382  {
383  cout << "Eigenmode " << i+1 << '/' << nev
384  << ", Lambda = " << eigenvalues[i] << endl;
385  }
386 
387  // convert eigenvector from HypreParVector to ParGridFunction
388  if (!use_slepc)
389  {
390  x = lobpcg->GetEigenvector(i);
391  }
392  else
393  {
394  slepc->GetEigenvector(i,temp);
395  x.Distribute(temp);
396  }
397 
398  mode_sock << "parallel " << num_procs << " " << myid << "\n"
399  << "solution\n" << *pmesh << x << flush
400  << "window_title 'Eigenmode " << i+1 << '/' << nev
401  << ", Lambda = " << eigenvalues[i] << "'" << endl;
402 
403  char c;
404  if (myid == 0)
405  {
406  cout << "press (q)uit or (c)ontinue --> " << flush;
407  cin >> c;
408  }
409  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
410 
411  if (c != 'c')
412  {
413  break;
414  }
415  }
416  mode_sock.close();
417  }
418 
419  // 12. Free the used memory.
420  delete lobpcg;
421  delete slepc;
422  delete precond;
423  delete M;
424  delete A;
425  delete pA;
426  delete pM;
427 #if defined(MFEM_USE_SUPERLU) || defined(MFEM_USE_STRUMPACK)
428  delete Arow;
429 #endif
430  delete fespace;
431  if (order > 0)
432  {
433  delete fec;
434  }
435  delete pmesh;
436 
437  // We finalize SLEPc
439  MPI_Finalize();
440 
441  return 0;
442 }
int Size() const
Return the logical size of the array.
Definition: array.hpp:134
A coefficient that is constant across space and time.
Definition: coefficient.hpp:78
void MFEMInitializeSlepc()
Definition: slepc.cpp:29
Wrapper for PETSc&#39;s matrix class.
Definition: petsc.hpp:307
Pointer to an Operator of a specified type.
Definition: handle.hpp:33
void GetEigenvalue(unsigned int i, double &lr) const
Get the corresponding eigenvalue.
Definition: slepc.cpp:147
HYPRE_BigInt GlobalTrueVSize() const
Definition: pfespace.hpp:275
void SetPreconditioner(Solver &precond)
Definition: hypre.cpp:5415
void Print(std::ostream &out=mfem::out)
Print the configuration of the MFEM virtual device object.
Definition: device.cpp:279
void SetMassMatrix(Operator &M)
Definition: hypre.cpp:5471
Abstract parallel finite element space.
Definition: pfespace.hpp:28
void SetPrintLevel(int logging)
Definition: hypre.cpp:5400
void SetTarget(double target)
Definition: slepc.cpp:229
void SetSymmetricPattern(bool sym)
Definition: superlu.cpp:423
void GetEigenvalues(Array< double > &eigenvalues) const
Collect the converged eigenvalues.
Definition: hypre.cpp:5481
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
void SetTol(double tol)
Definition: hypre.cpp:5378
int close()
Close the socketstream.
virtual int GetTrueVSize() const
Return the number of local vector true dofs.
Definition: pfespace.hpp:279
void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: superlu.cpp:580
The BoomerAMG solver in hypre.
Definition: hypre.hpp:1387
void SetSpectralTransformation(SpectralTransformation transformation)
Definition: slepc.cpp:234
void SetPrintFactorStatistics(bool print_stat)
Definition: strumpack.cpp:134
void SetMaxIter(int max_iter)
Definition: hypre.cpp:5394
void Parse()
Parse the command-line options. Note that this function expects all the options provided through the ...
Definition: optparser.cpp:150
constexpr char vishost[]
void SetColumnPermutation(superlu::ColPerm col_perm)
Definition: superlu.cpp:343
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:9143
void SetPrintLevel(int print_level)
Definition: hypre.hpp:1467
constexpr int visport
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:68
void Assemble(int skip_zeros=1)
Assemble the local matrix.
void SetNumModes(int num_eigs)
Set the number of required eigenmodes.
Definition: slepc.cpp:124
void EliminateEssentialBCDiag(const Array< int > &bdr_attr_is_ess, double value)
Perform elimination and set the diagonal entry to the given value.
virtual void Print(std::ostream &out=mfem::out) const
Definition: pmesh.cpp:4382
void SetPrintSolveStatistics(bool print_stat)
Definition: strumpack.cpp:139
void SetPrintStatistics(bool print_stat)
Definition: superlu.cpp:325
void Solve()
Solve the eigenvalue problem for the specified number of eigenvalues.
Definition: slepc.cpp:130
int Dimension() const
Definition: mesh.hpp:911
void PrintUsage(std::ostream &out) const
Print the usage message.
Definition: optparser.cpp:457
void MFEMFinalizeSlepc()
Definition: slepc.cpp:53
void SetReorderingStrategy(strumpack::ReorderingStrategy method)
Definition: strumpack.cpp:149
Type
Enumeration defining IDs for some classes derived from Operator.
Definition: operator.hpp:252
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:204
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
Definition: fe_coll.hpp:26
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Add a boolean option and set &#39;var&#39; to receive the value. Enable/disable tags are used to set the bool...
Definition: optparser.hpp:82
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition: array.hpp:674
HYPRE_Int HYPRE_BigInt
void SetRandomSeed(int s)
Definition: hypre.hpp:1736
double a
Definition: lissajous.cpp:41
void SetOperatorOwner(bool own=true)
Set the ownership flag for the held Operator.
Definition: handle.hpp:115
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void SetWhichEigenpairs(Which which)
Definition: slepc.cpp:195
int dim
Definition: ex24.cpp:53
void SetKrylovSolver(strumpack::KrylovSolver method)
Definition: strumpack.cpp:144
void SetOperator(Operator &A)
Definition: hypre.cpp:5424
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator. Assumes ownership of bfi.
void PrintOptions(std::ostream &out) const
Print the options.
Definition: optparser.cpp:327
Class for parallel bilinear form.
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
Definition: hypre.cpp:5493
void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: strumpack.cpp:228
void Get(OpType *&A) const
Return the Operator pointer statically cast to a given OpType.
Definition: handle.hpp:109
Vector data type.
Definition: vector.hpp:60
void GetNodes(Vector &node_coord) const
Definition: mesh.cpp:7343
void SetPrecondUsageMode(int pcg_mode)
Definition: hypre.cpp:5409
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:216
Base class for solvers.
Definition: operator.hpp:648
Class for parallel grid function.
Definition: pgridfunc.hpp:32
void GetEigenvector(unsigned int i, Vector &vr) const
Get the corresponding eigenvector.
Definition: slepc.cpp:158
The MFEM Device class abstracts hardware devices such as GPUs, as well as programming models such as ...
Definition: device.hpp:121
Abstract operator.
Definition: operator.hpp:24
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:277
void SetNumModes(int num_eigs)
Definition: hypre.hpp:1734
Class for parallel meshes.
Definition: pmesh.hpp:32
void Solve()
Solve the eigenproblem.
Definition: hypre.cpp:5538
int main()
void SetOperators(const PetscParMatrix &op, const PetscParMatrix &opB)
Set operator for generalized eigenvalue problem.
Definition: slepc.cpp:93
bool Good() const
Return true if the command line options were parsed successfully.
Definition: optparser.hpp:150