MFEM  v4.3.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex11p.cpp
Go to the documentation of this file.
1 // MFEM Example 11 - Parallel Version
2 //
3 // Compile with: make ex11p
4 //
5 // Sample runs: mpirun -np 4 ex11p -m ../data/square-disc.mesh
6 // mpirun -np 4 ex11p -m ../data/star.mesh
7 // mpirun -np 4 ex11p -m ../data/star-mixed.mesh
8 // mpirun -np 4 ex11p -m ../data/escher.mesh
9 // mpirun -np 4 ex11p -m ../data/fichera.mesh
10 // mpirun -np 4 ex11p -m ../data/fichera-mixed.mesh
11 // mpirun -np 4 ex11p -m ../data/periodic-annulus-sector.msh
12 // mpirun -np 4 ex11p -m ../data/periodic-torus-sector.msh -rs 1
13 // mpirun -np 4 ex11p -m ../data/toroid-wedge.mesh -o 2
14 // mpirun -np 4 ex11p -m ../data/square-disc-p2.vtk -o 2
15 // mpirun -np 4 ex11p -m ../data/square-disc-p3.mesh -o 3
16 // mpirun -np 4 ex11p -m ../data/square-disc-nurbs.mesh -o -1
17 // mpirun -np 4 ex11p -m ../data/disc-nurbs.mesh -o -1 -n 20
18 // mpirun -np 4 ex11p -m ../data/pipe-nurbs.mesh -o -1
19 // mpirun -np 4 ex11p -m ../data/ball-nurbs.mesh -o 2
20 // mpirun -np 4 ex11p -m ../data/star-surf.mesh
21 // mpirun -np 4 ex11p -m ../data/square-disc-surf.mesh
22 // mpirun -np 4 ex11p -m ../data/inline-segment.mesh
23 // mpirun -np 4 ex11p -m ../data/inline-quad.mesh
24 // mpirun -np 4 ex11p -m ../data/inline-tri.mesh
25 // mpirun -np 4 ex11p -m ../data/inline-hex.mesh
26 // mpirun -np 4 ex11p -m ../data/inline-tet.mesh
27 // mpirun -np 4 ex11p -m ../data/inline-wedge.mesh -s 83
28 // mpirun -np 4 ex11p -m ../data/amr-quad.mesh
29 // mpirun -np 4 ex11p -m ../data/amr-hex.mesh
30 // mpirun -np 4 ex11p -m ../data/mobius-strip.mesh -n 8
31 // mpirun -np 4 ex11p -m ../data/klein-bottle.mesh -n 10
32 //
33 // Description: This example code demonstrates the use of MFEM to solve the
34 // eigenvalue problem -Delta u = lambda u with homogeneous
35 // Dirichlet boundary conditions.
36 //
37 // We compute a number of the lowest eigenmodes by discretizing
38 // the Laplacian and Mass operators using a FE space of the
39 // specified order, or an isoparametric/isogeometric space if
40 // order < 1 (quadratic for quadratic curvilinear mesh, NURBS for
41 // NURBS mesh, etc.)
42 //
43 // The example highlights the use of the LOBPCG eigenvalue solver
44 // together with the BoomerAMG preconditioner in HYPRE, as well as
45 // optionally the SuperLU or STRUMPACK parallel direct solvers.
46 // Reusing a single GLVis visualization window for multiple
47 // eigenfunctions is also illustrated.
48 //
49 // We recommend viewing Example 1 before viewing this example.
50 
51 #include "mfem.hpp"
52 #include <fstream>
53 #include <iostream>
54 
55 using namespace std;
56 using namespace mfem;
57 
58 int main(int argc, char *argv[])
59 {
60  // 1. Initialize MPI.
61  int num_procs, myid;
62  MPI_Init(&argc, &argv);
63  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
64  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
65 
66  // 2. Parse command-line options.
67  const char *mesh_file = "../data/star.mesh";
68  int ser_ref_levels = 2;
69  int par_ref_levels = 1;
70  int order = 1;
71  int nev = 5;
72  int seed = 75;
73  bool slu_solver = false;
74  bool sp_solver = false;
75  bool cpardiso_solver = false;
76  bool visualization = 1;
77 
78  OptionsParser args(argc, argv);
79  args.AddOption(&mesh_file, "-m", "--mesh",
80  "Mesh file to use.");
81  args.AddOption(&ser_ref_levels, "-rs", "--refine-serial",
82  "Number of times to refine the mesh uniformly in serial.");
83  args.AddOption(&par_ref_levels, "-rp", "--refine-parallel",
84  "Number of times to refine the mesh uniformly in parallel.");
85  args.AddOption(&order, "-o", "--order",
86  "Finite element order (polynomial degree) or -1 for"
87  " isoparametric space.");
88  args.AddOption(&nev, "-n", "--num-eigs",
89  "Number of desired eigenmodes.");
90  args.AddOption(&seed, "-s", "--seed",
91  "Random seed used to initialize LOBPCG.");
92 #ifdef MFEM_USE_SUPERLU
93  args.AddOption(&slu_solver, "-slu", "--superlu", "-no-slu",
94  "--no-superlu", "Use the SuperLU Solver.");
95 #endif
96 #ifdef MFEM_USE_STRUMPACK
97  args.AddOption(&sp_solver, "-sp", "--strumpack", "-no-sp",
98  "--no-strumpack", "Use the STRUMPACK Solver.");
99 #endif
100 #ifdef MFEM_USE_MKL_CPARDISO
101  args.AddOption(&cpardiso_solver, "-cpardiso", "--cpardiso", "-no-cpardiso",
102  "--no-cpardiso", "Use the MKL CPardiso Solver.");
103 #endif
104  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
105  "--no-visualization",
106  "Enable or disable GLVis visualization.");
107  args.Parse();
108  if (slu_solver && sp_solver)
109  {
110  if (myid == 0)
111  cout << "WARNING: Both SuperLU and STRUMPACK have been selected,"
112  << " please choose either one." << endl
113  << " Defaulting to SuperLU." << endl;
114  sp_solver = false;
115  }
116  // The command line options are also passed to the STRUMPACK
117  // solver. So do not exit if some options are not recognized.
118  if (!sp_solver)
119  {
120  if (!args.Good())
121  {
122  if (myid == 0)
123  {
124  args.PrintUsage(cout);
125  }
126  MPI_Finalize();
127  return 1;
128  }
129  }
130  if (myid == 0)
131  {
132  args.PrintOptions(cout);
133  }
134 
135  // 3. Read the (serial) mesh from the given mesh file on all processors. We
136  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
137  // and volume meshes with the same code.
138  Mesh *mesh = new Mesh(mesh_file, 1, 1);
139  int dim = mesh->Dimension();
140 
141  // 4. Refine the serial mesh on all processors to increase the resolution. In
142  // this example we do 'ref_levels' of uniform refinement (2 by default, or
143  // specified on the command line with -rs).
144  for (int lev = 0; lev < ser_ref_levels; lev++)
145  {
146  mesh->UniformRefinement();
147  }
148 
149  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
150  // this mesh further in parallel to increase the resolution (1 time by
151  // default, or specified on the command line with -rp). Once the parallel
152  // mesh is defined, the serial mesh can be deleted.
153  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
154  delete mesh;
155  for (int lev = 0; lev < par_ref_levels; lev++)
156  {
157  pmesh->UniformRefinement();
158  }
159 
160  // 6. Define a parallel finite element space on the parallel mesh. Here we
161  // use continuous Lagrange finite elements of the specified order. If
162  // order < 1, we instead use an isoparametric/isogeometric space.
164  if (order > 0)
165  {
166  fec = new H1_FECollection(order, dim);
167  }
168  else if (pmesh->GetNodes())
169  {
170  fec = pmesh->GetNodes()->OwnFEC();
171  }
172  else
173  {
174  fec = new H1_FECollection(order = 1, dim);
175  }
176  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
177  HYPRE_BigInt size = fespace->GlobalTrueVSize();
178  if (myid == 0)
179  {
180  cout << "Number of unknowns: " << size << endl;
181  }
182 
183  // 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite
184  // element space. The first corresponds to the Laplacian operator -Delta,
185  // while the second is a simple mass matrix needed on the right hand side
186  // of the generalized eigenvalue problem below. The boundary conditions
187  // are implemented by elimination with special values on the diagonal to
188  // shift the Dirichlet eigenvalues out of the computational range. After
189  // serial and parallel assembly we extract the corresponding parallel
190  // matrices A and M.
191  ConstantCoefficient one(1.0);
192  Array<int> ess_bdr;
193  if (pmesh->bdr_attributes.Size())
194  {
195  ess_bdr.SetSize(pmesh->bdr_attributes.Max());
196  ess_bdr = 1;
197  }
198 
199  ParBilinearForm *a = new ParBilinearForm(fespace);
201  if (pmesh->bdr_attributes.Size() == 0)
202  {
203  // Add a mass term if the mesh has no boundary, e.g. periodic mesh or
204  // closed surface.
205  a->AddDomainIntegrator(new MassIntegrator(one));
206  }
207  a->Assemble();
208  a->EliminateEssentialBCDiag(ess_bdr, 1.0);
209  a->Finalize();
210 
211  ParBilinearForm *m = new ParBilinearForm(fespace);
212  m->AddDomainIntegrator(new MassIntegrator(one));
213  m->Assemble();
214  // shift the eigenvalue corresponding to eliminated dofs to a large value
215  m->EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min());
216  m->Finalize();
217 
220 
221 #if defined(MFEM_USE_SUPERLU) || defined(MFEM_USE_STRUMPACK)
222  Operator * Arow = NULL;
223 #ifdef MFEM_USE_SUPERLU
224  if (slu_solver)
225  {
226  Arow = new SuperLURowLocMatrix(*A);
227  }
228 #endif
229 #ifdef MFEM_USE_STRUMPACK
230  if (sp_solver)
231  {
232  Arow = new STRUMPACKRowLocMatrix(*A);
233  }
234 #endif
235 #endif
236 
237  delete a;
238  delete m;
239 
240  // 8. Define and configure the LOBPCG eigensolver and the BoomerAMG
241  // preconditioner for A to be used within the solver. Set the matrices
242  // which define the generalized eigenproblem A x = lambda M x.
243  Solver * precond = NULL;
244  if (!slu_solver && !sp_solver && !cpardiso_solver)
245  {
246  HypreBoomerAMG * amg = new HypreBoomerAMG(*A);
247  amg->SetPrintLevel(0);
248  precond = amg;
249  }
250  else
251  {
252 #ifdef MFEM_USE_SUPERLU
253  if (slu_solver)
254  {
255  SuperLUSolver * superlu = new SuperLUSolver(MPI_COMM_WORLD);
256  superlu->SetPrintStatistics(false);
257  superlu->SetSymmetricPattern(true);
259  superlu->SetOperator(*Arow);
260  precond = superlu;
261  }
262 #endif
263 #ifdef MFEM_USE_STRUMPACK
264  if (sp_solver)
265  {
266  STRUMPACKSolver * strumpack = new STRUMPACKSolver(argc, argv, MPI_COMM_WORLD);
267  strumpack->SetPrintFactorStatistics(true);
268  strumpack->SetPrintSolveStatistics(false);
269  strumpack->SetKrylovSolver(strumpack::KrylovSolver::DIRECT);
270  strumpack->SetReorderingStrategy(strumpack::ReorderingStrategy::METIS);
271  strumpack->DisableMatching();
272  strumpack->SetOperator(*Arow);
273  strumpack->SetFromCommandLine();
274  precond = strumpack;
275  }
276 #endif
277 #ifdef MFEM_USE_MKL_CPARDISO
278  if (cpardiso_solver)
279  {
280  auto cpardiso = new CPardisoSolver(A->GetComm());
281  cpardiso->SetMatrixType(CPardisoSolver::MatType::REAL_STRUCTURE_SYMMETRIC);
282  cpardiso->SetPrintLevel(1);
283  cpardiso->SetOperator(*A);
284  precond = cpardiso;
285  }
286 #endif
287  }
288 
289  HypreLOBPCG * lobpcg = new HypreLOBPCG(MPI_COMM_WORLD);
290  lobpcg->SetNumModes(nev);
291  lobpcg->SetRandomSeed(seed);
292  lobpcg->SetPreconditioner(*precond);
293  lobpcg->SetMaxIter(200);
294  lobpcg->SetTol(1e-8);
295  lobpcg->SetPrecondUsageMode(1);
296  lobpcg->SetPrintLevel(1);
297  lobpcg->SetMassMatrix(*M);
298  lobpcg->SetOperator(*A);
299 
300  // 9. Compute the eigenmodes and extract the array of eigenvalues. Define a
301  // parallel grid function to represent each of the eigenmodes returned by
302  // the solver.
303  Array<double> eigenvalues;
304  lobpcg->Solve();
305  lobpcg->GetEigenvalues(eigenvalues);
306  ParGridFunction x(fespace);
307 
308  // 10. Save the refined mesh and the modes in parallel. This output can be
309  // viewed later using GLVis: "glvis -np <np> -m mesh -g mode".
310  {
311  ostringstream mesh_name, mode_name;
312  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
313 
314  ofstream mesh_ofs(mesh_name.str().c_str());
315  mesh_ofs.precision(8);
316  pmesh->Print(mesh_ofs);
317 
318  for (int i=0; i<nev; i++)
319  {
320  // convert eigenvector from HypreParVector to ParGridFunction
321  x = lobpcg->GetEigenvector(i);
322 
323  mode_name << "mode_" << setfill('0') << setw(2) << i << "."
324  << setfill('0') << setw(6) << myid;
325 
326  ofstream mode_ofs(mode_name.str().c_str());
327  mode_ofs.precision(8);
328  x.Save(mode_ofs);
329  mode_name.str("");
330  }
331  }
332 
333  // 11. Send the solution by socket to a GLVis server.
334  if (visualization)
335  {
336  char vishost[] = "localhost";
337  int visport = 19916;
338  socketstream mode_sock(vishost, visport);
339  mode_sock.precision(8);
340 
341  for (int i=0; i<nev; i++)
342  {
343  if ( myid == 0 )
344  {
345  cout << "Eigenmode " << i+1 << '/' << nev
346  << ", Lambda = " << eigenvalues[i] << endl;
347  }
348 
349  // convert eigenvector from HypreParVector to ParGridFunction
350  x = lobpcg->GetEigenvector(i);
351 
352  mode_sock << "parallel " << num_procs << " " << myid << "\n"
353  << "solution\n" << *pmesh << x << flush
354  << "window_title 'Eigenmode " << i+1 << '/' << nev
355  << ", Lambda = " << eigenvalues[i] << "'" << endl;
356 
357  char c;
358  if (myid == 0)
359  {
360  cout << "press (q)uit or (c)ontinue --> " << flush;
361  cin >> c;
362  }
363  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
364 
365  if (c != 'c')
366  {
367  break;
368  }
369  }
370  mode_sock.close();
371  }
372 
373  // 12. Free the used memory.
374  delete lobpcg;
375  delete precond;
376  delete M;
377  delete A;
378 #if defined(MFEM_USE_SUPERLU) || defined(MFEM_USE_STRUMPACK)
379  delete Arow;
380 #endif
381 
382  delete fespace;
383  if (order > 0)
384  {
385  delete fec;
386  }
387  delete pmesh;
388 
389  MPI_Finalize();
390 
391  return 0;
392 }
int Size() const
Return the logical size of the array.
Definition: array.hpp:134
MPI_Comm GetComm() const
MPI communicator.
Definition: hypre.hpp:472
A coefficient that is constant across space and time.
Definition: coefficient.hpp:78
HYPRE_BigInt GlobalTrueVSize() const
Definition: pfespace.hpp:275
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:841
void SetPreconditioner(Solver &precond)
Definition: hypre.cpp:5415
void SetMassMatrix(Operator &M)
Definition: hypre.cpp:5471
Abstract parallel finite element space.
Definition: pfespace.hpp:28
void SetPrintLevel(int logging)
Definition: hypre.cpp:5400
void SetSymmetricPattern(bool sym)
Definition: superlu.cpp:423
void GetEigenvalues(Array< double > &eigenvalues) const
Collect the converged eigenvalues.
Definition: hypre.cpp:5481
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
void SetTol(double tol)
Definition: hypre.cpp:5378
int close()
Close the socketstream.
void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: superlu.cpp:580
The BoomerAMG solver in hypre.
Definition: hypre.hpp:1387
void SetPrintFactorStatistics(bool print_stat)
Definition: strumpack.cpp:134
void SetMaxIter(int max_iter)
Definition: hypre.cpp:5394
void Parse()
Parse the command-line options. Note that this function expects all the options provided through the ...
Definition: optparser.cpp:150
constexpr char vishost[]
void SetColumnPermutation(superlu::ColPerm col_perm)
Definition: superlu.cpp:343
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:9143
void SetPrintLevel(int print_level)
Definition: hypre.hpp:1467
constexpr int visport
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:68
void Assemble(int skip_zeros=1)
Assemble the local matrix.
void EliminateEssentialBCDiag(const Array< int > &bdr_attr_is_ess, double value)
Perform elimination and set the diagonal entry to the given value.
virtual void Print(std::ostream &out=mfem::out) const
Definition: pmesh.cpp:4382
void SetPrintSolveStatistics(bool print_stat)
Definition: strumpack.cpp:139
void SetPrintStatistics(bool print_stat)
Definition: superlu.cpp:325
int Dimension() const
Definition: mesh.hpp:911
void PrintUsage(std::ostream &out) const
Print the usage message.
Definition: optparser.cpp:457
void SetReorderingStrategy(strumpack::ReorderingStrategy method)
Definition: strumpack.cpp:149
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:204
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
Definition: fe_coll.hpp:26
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Add a boolean option and set &#39;var&#39; to receive the value. Enable/disable tags are used to set the bool...
Definition: optparser.hpp:82
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition: array.hpp:674
HYPRE_Int HYPRE_BigInt
void SetRandomSeed(int s)
Definition: hypre.hpp:1736
double a
Definition: lissajous.cpp:41
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
int dim
Definition: ex24.cpp:53
void SetKrylovSolver(strumpack::KrylovSolver method)
Definition: strumpack.cpp:144
void SetOperator(Operator &A)
Definition: hypre.cpp:5424
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator. Assumes ownership of bfi.
void PrintOptions(std::ostream &out) const
Print the options.
Definition: optparser.cpp:327
Class for parallel bilinear form.
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
Definition: hypre.cpp:5493
void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: strumpack.cpp:228
void GetNodes(Vector &node_coord) const
Definition: mesh.cpp:7343
void SetPrecondUsageMode(int pcg_mode)
Definition: hypre.cpp:5409
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:216
void SetMatrixType(MatType mat_type)
Set the matrix type.
Definition: cpardiso.cpp:190
Base class for solvers.
Definition: operator.hpp:648
Class for parallel grid function.
Definition: pgridfunc.hpp:32
Abstract operator.
Definition: operator.hpp:24
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:277
void SetNumModes(int num_eigs)
Definition: hypre.hpp:1734
Class for parallel meshes.
Definition: pmesh.hpp:32
void Solve()
Solve the eigenproblem.
Definition: hypre.cpp:5538
MKL Parallel Direct Sparse Solver for Clusters.
Definition: cpardiso.hpp:30
int main()
bool Good() const
Return true if the command line options were parsed successfully.
Definition: optparser.hpp:150