MFEM  v3.1 Finite element discretization library
ex13p.cpp
Go to the documentation of this file.
1 // MFEM Example 13 - Parallel Version
2 //
3 // Compile with: make ex13p
4 //
5 // Sample runs: mpirun -np 4 ex13p -m ../data/star.mesh
6 // mpirun -np 4 ex13p -m ../data/square-disc.mesh -o 2
7 // mpirun -np 4 ex13p -m ../data/beam-tet.mesh
8 // mpirun -np 4 ex13p -m ../data/beam-hex.mesh
9 // mpirun -np 4 ex13p -m ../data/escher.mesh
10 // mpirun -np 4 ex13p -m ../data/fichera.mesh
11 // mpirun -np 4 ex13p -m ../data/fichera-q2.vtk
12 // mpirun -np 4 ex13p -m ../data/fichera-q3.mesh
13 // mpirun -np 4 ex13p -m ../data/square-disc-nurbs.mesh
14 // mpirun -np 4 ex13p -m ../data/beam-hex-nurbs.mesh
15 // mpirun -np 4 ex13p -m ../data/amr-quad.mesh -o 2
16 // mpirun -np 4 ex13p -m ../data/amr-hex.mesh
17 // mpirun -np 4 ex13p -m ../data/mobius-strip.mesh -n 8 -o 2
18 // mpirun -np 4 ex13p -m ../data/klein-bottle.mesh -n 10 -o 2
19 //
20 // Description: This example code solves the Maxwell (electromagnetic)
21 // eigenvalue problem curl curl E = lambda E with homogeneous
22 // Dirichlet boundary conditions E x n = 0.
23 //
24 // We compute a number of the lowest nonzero eigenmodes by
25 // discretizing the curl curl operator using a Nedelec FE space of
26 // the specified order in 2D or 3D.
27 //
28 // The example highlights the use of the AME subspace eigenvalue
29 // solver from HYPRE, which uses LOBPCG and AMS internally.
30 // Reusing a single GLVis visualization window for multiple
31 // eigenfunctions is also illustrated.
32 //
33 // We recommend viewing examples 3 and 11 before viewing this
34 // example.
35
36 #include "mfem.hpp"
37 #include <fstream>
38 #include <iostream>
39
40 using namespace std;
41 using namespace mfem;
42
43 int main(int argc, char *argv[])
44 {
45  // 1. Initialize MPI.
46  int num_procs, myid;
47  MPI_Init(&argc, &argv);
48  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
49  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
50
51  // 2. Parse command-line options.
52  const char *mesh_file = "../data/beam-tet.mesh";
53  int ser_ref_levels = 2;
54  int par_ref_levels = 1;
55  int order = 1;
56  int nev = 5;
57  bool visualization = 1;
58
59  OptionsParser args(argc, argv);
61  "Mesh file to use.");
63  "Number of times to refine the mesh uniformly in serial.");
65  "Number of times to refine the mesh uniformly in parallel.");
67  "Finite element order (polynomial degree) or -1 for"
68  " isoparametric space.");
70  "Number of desired eigenmodes.");
72  "--no-visualization",
73  "Enable or disable GLVis visualization.");
74  args.Parse();
75  if (!args.Good())
76  {
77  if (myid == 0)
78  {
79  args.PrintUsage(cout);
80  }
81  MPI_Finalize();
82  return 1;
83  }
84  if (myid == 0)
85  {
86  args.PrintOptions(cout);
87  }
88
89  // 3. Read the (serial) mesh from the given mesh file on all processors. We
90  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
91  // and volume meshes with the same code.
92  Mesh *mesh;
93  ifstream imesh(mesh_file);
94  if (!imesh)
95  {
96  if (myid == 0)
97  {
98  cerr << "\nCan not open mesh file: " << mesh_file << '\n' << endl;
99  }
100  MPI_Finalize();
101  return 2;
102  }
103  mesh = new Mesh(imesh, 1, 1);
104  imesh.close();
105  int dim = mesh->Dimension();
106
107  // 4. Refine the serial mesh on all processors to increase the resolution. In
108  // this example we do 'ref_levels' of uniform refinement (2 by default, or
109  // specified on the command line with -rs).
110  for (int lev = 0; lev < ser_ref_levels; lev++)
111  {
112  mesh->UniformRefinement();
113  }
114
115  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
116  // this mesh further in parallel to increase the resolution (1 time by
117  // default, or specified on the command line with -rp). Once the parallel
118  // mesh is defined, the serial mesh can be deleted.
119  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
120  delete mesh;
121  for (int lev = 0; lev < par_ref_levels; lev++)
122  {
123  pmesh->UniformRefinement();
124  }
125
126  // 6. Define a parallel finite element space on the parallel mesh. Here we
127  // use the Nedelec finite elements of the specified order.
128  FiniteElementCollection *fec = new ND_FECollection(order, dim);
129  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
130  HYPRE_Int size = fespace->GlobalTrueVSize();
131  if (myid == 0)
132  {
133  cout << "Number of unknowns: " << size << endl;
134  }
135
136  // 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite
137  // element space. The first corresponds to the curl curl, while the second
138  // is a simple mass matrix needed on the right hand side of the
139  // generalized eigenvalue problem below. The boundary conditions are
140  // implemented by marking all the boundary attributes from the mesh as
141  // essential. The corresponding degrees of freedom are eliminated with
142  // special values on the diagonal to shift the Dirichlet eigenvalues out
143  // of the computational range. After serial and parallel assembly we
144  // extract the corresponding parallel matrices A and M.
145  ConstantCoefficient one(1.0);
146  Array<int> ess_bdr;
147  if (pmesh->bdr_attributes.Size())
148  {
149  ess_bdr.SetSize(pmesh->bdr_attributes.Max());
150  ess_bdr = 1;
151  }
152
153  ParBilinearForm *a = new ParBilinearForm(fespace);
155  if (pmesh->bdr_attributes.Size() == 0)
156  {
157  // Add a mass term if the mesh has no boundary, e.g. periodic mesh or
158  // closed surface.
160  }
161  a->Assemble();
162  a->EliminateEssentialBCDiag(ess_bdr, 1.0);
163  a->Finalize();
164
165  ParBilinearForm *m = new ParBilinearForm(fespace);
167  m->Assemble();
168  // shift the eigenvalue corresponding to eliminated dofs to a large value
169  m->EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min());
170  m->Finalize();
171
174
175  delete a;
176  delete m;
177
178  // 8. Define and configure the AME eigensolver and the AMS preconditioner for
179  // A to be used within the solver. Set the matrices which define the
180  // generalized eigenproblem A x = lambda M x.
181  HypreAMS *ams = new HypreAMS(*A,fespace);
182  ams->SetPrintLevel(0);
183  ams->SetSingularProblem();
184
185  HypreAME *ame = new HypreAME(MPI_COMM_WORLD);
186  ame->SetNumModes(nev);
187  ame->SetPreconditioner(*ams);
188  ame->SetMaxIter(100);
189  ame->SetTol(1e-8);
190  ame->SetPrintLevel(1);
191  ame->SetMassMatrix(*M);
192  ame->SetOperator(*A);
193
194  // 9. Compute the eigenmodes and extract the array of eigenvalues. Define a
195  // parallel grid function to represent each of the eigenmodes returned by
196  // the solver.
197  Array<double> eigenvalues;
198  ame->Solve();
199  ame->GetEigenvalues(eigenvalues);
200  ParGridFunction x(fespace);
201
202  // 10. Save the refined mesh and the modes in parallel. This output can be
203  // viewed later using GLVis: "glvis -np <np> -m mesh -g mode".
204  {
205  ostringstream mesh_name, mode_name;
206  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
207
208  ofstream mesh_ofs(mesh_name.str().c_str());
209  mesh_ofs.precision(8);
210  pmesh->Print(mesh_ofs);
211
212  for (int i=0; i<nev; i++)
213  {
214  // convert eigenvector from HypreParVector to ParGridFunction
215  x = ame->GetEigenvector(i);
216
217  mode_name << "mode_" << setfill('0') << setw(2) << i << "."
218  << setfill('0') << setw(6) << myid;
219
220  ofstream mode_ofs(mode_name.str().c_str());
221  mode_ofs.precision(8);
222  x.Save(mode_ofs);
223  mode_name.str("");
224  }
225  }
226
227  // 11. Send the solution by socket to a GLVis server.
228  if (visualization)
229  {
230  char vishost[] = "localhost";
231  int visport = 19916;
232  socketstream mode_sock(vishost, visport);
233  mode_sock.precision(8);
234
235  for (int i=0; i<nev; i++)
236  {
237  if ( myid == 0 )
238  {
239  cout << "Eigenmode " << i+1 << '/' << nev
240  << ", Lambda = " << eigenvalues[i] << endl;
241  }
242
243  // convert eigenvector from HypreParVector to ParGridFunction
244  x = ame->GetEigenvector(i);
245
246  mode_sock << "parallel " << num_procs << " " << myid << "\n"
247  << "solution\n" << *pmesh << x << flush
248  << "window_title 'Eigenmode " << i+1 << '/' << nev
249  << ", Lambda = " << eigenvalues[i] << "'" << endl;
250
251  char c;
252  if (myid == 0)
253  {
254  cout << "press (q)uit or (c)ontinue --> " << flush;
255  cin >> c;
256  }
257  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
258
259  if (c != 'c')
260  {
261  break;
262  }
263  }
264  mode_sock.close();
265  }
266
267  // 12. Free the used memory.
268  delete ame;
269  delete ams;
270  delete M;
271  delete A;
272
273  delete fespace;
274  delete fec;
275  delete pmesh;
276
277  MPI_Finalize();
278
279  return 0;
280 }
int Size() const
Logical size of the array.
Definition: array.hpp:109
The Auxiliary-space Maxwell Solver in hypre.
Definition: hypre.hpp:761
Subclass constant coefficient.
Definition: coefficient.hpp:57
Integrator for (curl u, curl v) for Nedelec elements.
Definition: bilininteg.hpp:401
void GetEigenvalues(Array< double > &eigenvalues)
Collect the converged eigenvalues.
Definition: hypre.cpp:3307
void SetPreconditioner(HypreSolver &precond)
Definition: hypre.cpp:3273
void SetTol(double tol)
Definition: hypre.cpp:3252
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:306
Abstract parallel finite element space.
Definition: pfespace.hpp:28
void SetOperator(HypreParMatrix &A)
Definition: hypre.cpp:3279
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
int dim
Definition: ex3.cpp:48
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:7316
T Max() const
Definition: array.cpp:90
void Assemble(int skip_zeros=1)
Assemble the local matrix.
int Dimension() const
Definition: mesh.hpp:475
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
void EliminateEssentialBCDiag(Array< int > &bdr_attr_is_ess, double value)
Perform elimination and set the diagonal entry to the given value.
Array< int > bdr_attributes
Definition: mesh.hpp:140
int main(int argc, char *argv[])
Definition: ex1.cpp:45
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:2649
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:74
void SetSize(int nsize)
Change logical size of the array, keep existing entries.
Definition: array.hpp:323
void Solve()
Solve the eigenproblem.
Definition: hypre.cpp:3301
void SetNumModes(int num_eigs)
Definition: hypre.cpp:3244
void SetMaxIter(int max_iter)
Definition: hypre.cpp:3258
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void SetMassMatrix(HypreParMatrix &M)
Definition: hypre.cpp:3294
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
void SetPrintLevel(int logging)
Definition: hypre.cpp:3264
Class for parallel bilinear form.
Arbitrary order H(curl)-conforming Nedelec finite elements.
Definition: fe_coll.hpp:185
HypreParVector & GetEigenvector(unsigned int i)
Extract a single eigenvector.
Definition: hypre.cpp:3343
Class for parallel grid function.
Definition: pgridfunc.hpp:31
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:143
Class for parallel meshes.
Definition: pmesh.hpp:28
void SetSingularProblem()
Set this option when solving a curl-curl problem with zero mass term.
Definition: hypre.hpp:779
Integrator for (Q u, v) for VectorFiniteElements.
Definition: bilininteg.hpp:458
virtual void Print(std::ostream &out=std::cout) const
Definition: pmesh.cpp:2850
bool Good() const
Definition: optparser.hpp:120