MFEM  v4.5.1
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex32p.cpp
Go to the documentation of this file.
1 // MFEM Example 32 - Parallel Version
2 //
3 // Compile with: make ex32p
4 //
5 // Sample runs: mpirun -np 4 ex32p -m ../data/hexagon.mesh -o 2
6 // mpirun -np 4 ex32p -m ../data/star.mesh
7 // mpirun -np 4 ex32p -m ../data/square-disc.mesh -o 2 -n 4 -rs 1
8 // mpirun -np 4 ex32p -m ../data/square-disc-nurbs.mesh -rs 3 -o 3
9 // mpirun -np 4 ex32p -m ../data/amr-quad.mesh -o 2 -rs 1
10 // mpirun -np 4 ex32p -m ../data/amr-hex.mesh -rs 1
11 // mpirun -np 4 ex32p -m ../data/fichera.mesh -rs 1
12 //
13 // Description: This example code solves the Maxwell (electromagnetic)
14 // eigenvalue problem curl curl E = lambda epsilon E with an
15 // anisotropic dielectric tensor, epsilon, and homogeneous
16 // Dirichlet boundary conditions E x n = 0.
17 //
18 // We compute a number of the lowest nonzero eigenmodes by
19 // discretizing the curl curl operator using a Nedelec FE space of
20 // the specified order in 1D, 2D, or 3D.
21 //
22 // The example highlights the use of restricted H(curl) finite
23 // element spaces with the AME subspace eigenvalue solver from
24 // HYPRE, which uses LOBPCG and AMS internally. Reusing a single
25 // GLVis visualization window for multiple eigenfunctions is also
26 // illustrated.
27 //
28 // We recommend viewing examples 31 and 13 before viewing this
29 // example.
30 
31 #include "mfem.hpp"
32 #include <fstream>
33 #include <iostream>
34 
35 using namespace std;
36 using namespace mfem;
37 
38 double GetVectorMax(int vdim, const ParGridFunction &x);
39 double GetScalarMax(const ParGridFunction &x);
40 
41 int main(int argc, char *argv[])
42 {
43  // 1. Initialize MPI.
44  Mpi::Init(argc, argv);
45  int num_procs = Mpi::WorldSize();
46  int myid = Mpi::WorldRank();
47  Hypre::Init();
48 
49  // 2. Parse command-line options.
50  const char *mesh_file = "../data/inline-quad.mesh";
51  int ser_ref_levels = 2;
52  int par_ref_levels = 1;
53  int order = 1;
54  int nev = 5;
55  bool visualization = 1;
56 
57  OptionsParser args(argc, argv);
58  args.AddOption(&mesh_file, "-m", "--mesh",
59  "Mesh file to use.");
60  args.AddOption(&ser_ref_levels, "-rs", "--refine-serial",
61  "Number of times to refine the mesh uniformly in serial.");
62  args.AddOption(&par_ref_levels, "-rp", "--refine-parallel",
63  "Number of times to refine the mesh uniformly in parallel.");
64  args.AddOption(&order, "-o", "--order",
65  "Finite element order (polynomial degree) or -1 for"
66  " isoparametric space.");
67  args.AddOption(&nev, "-n", "--num-eigs",
68  "Number of desired eigenmodes.");
69  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
70  "--no-visualization",
71  "Enable or disable GLVis visualization.");
72  args.ParseCheck();
73 
74  // 3. Read the (serial) mesh from the given mesh file on all processors. We
75  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
76  // and volume meshes with the same code.
77  Mesh *mesh = new Mesh(mesh_file, 1, 1);
78  int dim = mesh->Dimension();
79 
80  Vector bbMin(dim);
81  Vector bbMax(dim);
82  mesh->GetBoundingBox(bbMin, bbMax);
83 
84  // 4. Refine the serial mesh on all processors to increase the resolution. In
85  // this example we do 'ref_levels' of uniform refinement (2 by default, or
86  // specified on the command line with -rs).
87  for (int lev = 0; lev < ser_ref_levels; lev++)
88  {
89  mesh->UniformRefinement();
90  }
91 
92  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
93  // this mesh further in parallel to increase the resolution (1 time by
94  // default, or specified on the command line with -rp). Once the parallel
95  // mesh is defined, the serial mesh can be deleted.
96  ParMesh pmesh(MPI_COMM_WORLD, *mesh);
97  delete mesh;
98  for (int lev = 0; lev < par_ref_levels; lev++)
99  {
100  pmesh.UniformRefinement();
101  }
102 
103  // 6. Define a parallel finite element space on the parallel mesh. Here we
104  // use the Nedelec finite elements of the specified order.
105  FiniteElementCollection *fec_nd = NULL;
106  FiniteElementCollection *fec_rt = NULL;
107  if (dim == 1)
108  {
109  fec_nd = new ND_R1D_FECollection(order, dim);
110  fec_rt = new RT_R1D_FECollection(order-1, dim);
111  }
112  else if (dim == 2)
113  {
114  fec_nd = new ND_R2D_FECollection(order, dim);
115  fec_rt = new RT_R2D_FECollection(order-1, dim);
116  }
117  else
118  {
119  fec_nd = new ND_FECollection(order, dim);
120  fec_rt = new RT_FECollection(order-1, dim);
121  }
122  ParFiniteElementSpace fespace_nd(&pmesh, fec_nd);
123  ParFiniteElementSpace fespace_rt(&pmesh, fec_rt);
124  HYPRE_Int size_nd = fespace_nd.GlobalTrueVSize();
125  HYPRE_Int size_rt = fespace_rt.GlobalTrueVSize();
126  if (Mpi::Root())
127  {
128  cout << "Number of H(Curl) unknowns: " << size_nd << endl;
129  cout << "Number of H(Div) unknowns: " << size_rt << endl;
130  }
131 
132  // 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite
133  // element space. The first corresponds to the curl curl, while the second
134  // is a simple mass matrix needed on the right hand side of the
135  // generalized eigenvalue problem below. The boundary conditions are
136  // implemented by marking all the boundary attributes from the mesh as
137  // essential. The corresponding degrees of freedom are eliminated with
138  // special values on the diagonal to shift the Dirichlet eigenvalues out
139  // of the computational range. After serial and parallel assembly we
140  // extract the corresponding parallel matrices A and M.
141  HypreParMatrix *A = NULL;
142  HypreParMatrix *M = NULL;
143  double shift = 0.0;
144  {
145  DenseMatrix epsilonMat(3);
146  epsilonMat(0,0) = 2.0; epsilonMat(1,1) = 2.0; epsilonMat(2,2) = 2.0;
147  epsilonMat(0,2) = 0.0; epsilonMat(2,0) = 0.0;
148  epsilonMat(0,1) = M_SQRT1_2; epsilonMat(1,0) = M_SQRT1_2;
149  epsilonMat(1,2) = M_SQRT1_2; epsilonMat(2,1) = M_SQRT1_2;
151 
152  ConstantCoefficient one(1.0);
153  Array<int> ess_bdr;
154  if (pmesh.bdr_attributes.Size())
155  {
156  ess_bdr.SetSize(pmesh.bdr_attributes.Max());
157  ess_bdr = 1;
158  }
159 
160  ParBilinearForm a(&fespace_nd);
162  if (pmesh.bdr_attributes.Size() == 0 || dim == 1)
163  {
164  // Add a mass term if the mesh has no boundary, e.g. periodic mesh or
165  // closed surface.
167  shift = 1.0;
168  if (Mpi::Root())
169  {
170  cout << "Computing eigenvalues shifted by " << shift << endl;
171  }
172  }
173  a.Assemble();
174  a.EliminateEssentialBCDiag(ess_bdr, 1.0);
175  a.Finalize();
176 
177  ParBilinearForm m(&fespace_nd);
179  m.Assemble();
180  // shift the eigenvalue corresponding to eliminated dofs to a large value
181  m.EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min());
182  m.Finalize();
183 
184  A = a.ParallelAssemble();
185  M = m.ParallelAssemble();
186  }
187 
188  // 8. Define and configure the AME eigensolver and the AMS preconditioner for
189  // A to be used within the solver. Set the matrices which define the
190  // generalized eigenproblem A x = lambda M x.
191  HypreAMS *ams = new HypreAMS(*A,&fespace_nd);
192  ams->SetPrintLevel(0);
193  ams->SetSingularProblem();
194 
195  HypreAME *ame = new HypreAME(MPI_COMM_WORLD);
196  ame->SetNumModes(nev);
197  ame->SetPreconditioner(*ams);
198  ame->SetMaxIter(100);
199  ame->SetTol(1e-8);
200  ame->SetPrintLevel(1);
201  ame->SetMassMatrix(*M);
202  ame->SetOperator(*A);
203 
204  // 9. Compute the eigenmodes and extract the array of eigenvalues. Define
205  // parallel grid functions to represent each of the eigenmodes returned by
206  // the solver and their derivatives.
207  Array<double> eigenvalues;
208  ame->Solve();
209  ame->GetEigenvalues(eigenvalues);
210  ParGridFunction x(&fespace_nd);
211  ParGridFunction dx(&fespace_rt);
212 
213  ParDiscreteLinearOperator curl(&fespace_nd, &fespace_rt);
215  curl.Assemble();
216  curl.Finalize();
217 
218  // 10. Save the refined mesh and the modes in parallel. This output can be
219  // viewed later using GLVis: "glvis -np <np> -m mesh -g mode".
220  {
221  ostringstream mesh_name, mode_name, mode_deriv_name;
222  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
223 
224  ofstream mesh_ofs(mesh_name.str().c_str());
225  mesh_ofs.precision(8);
226  pmesh.Print(mesh_ofs);
227 
228  for (int i=0; i<nev; i++)
229  {
230  // convert eigenvector from HypreParVector to ParGridFunction
231  x = ame->GetEigenvector(i);
232  curl.Mult(x, dx);
233 
234  mode_name << "mode_" << setfill('0') << setw(2) << i << "."
235  << setfill('0') << setw(6) << myid;
236  mode_deriv_name << "mode_deriv_" << setfill('0') << setw(2) << i << "."
237  << setfill('0') << setw(6) << myid;
238 
239  ofstream mode_ofs(mode_name.str().c_str());
240  mode_ofs.precision(8);
241  x.Save(mode_ofs);
242  mode_name.str("");
243 
244  ofstream mode_deriv_ofs(mode_deriv_name.str().c_str());
245  mode_deriv_ofs.precision(8);
246  dx.Save(mode_deriv_ofs);
247  mode_deriv_name.str("");
248  }
249  }
250 
251  // 11. Send the solution by socket to a GLVis server.
252  if (visualization)
253  {
254  char vishost[] = "localhost";
255  int visport = 19916;
256  if (dim == 1)
257  {
258  socketstream mode_x_sock(vishost, visport);
259  socketstream mode_y_sock(vishost, visport);
260  socketstream mode_z_sock(vishost, visport);
261  socketstream mode_dy_sock(vishost, visport);
262  socketstream mode_dz_sock(vishost, visport);
263  mode_x_sock.precision(8);
264  mode_y_sock.precision(8);
265  mode_z_sock.precision(8);
266  mode_dy_sock.precision(8);
267  mode_dz_sock.precision(8);
268 
269  Vector xVec(3); xVec = 0.0; xVec(0) = 1;
270  Vector yVec(3); yVec = 0.0; yVec(1) = 1;
271  Vector zVec(3); zVec = 0.0; zVec(2) = 1;
272  VectorConstantCoefficient xVecCoef(xVec);
273  VectorConstantCoefficient yVecCoef(yVec);
274  VectorConstantCoefficient zVecCoef(zVec);
275 
276  H1_FECollection fec_h1(order, dim);
277  L2_FECollection fec_l2(order-1, dim);
278 
279  ParFiniteElementSpace fes_h1(&pmesh, &fec_h1);
280  ParFiniteElementSpace fes_l2(&pmesh, &fec_l2);
281 
282  ParGridFunction xComp(&fes_l2);
283  ParGridFunction yComp(&fes_h1);
284  ParGridFunction zComp(&fes_h1);
285 
286  ParGridFunction dyComp(&fes_l2);
287  ParGridFunction dzComp(&fes_l2);
288 
289  for (int i=0; i<nev; i++)
290  {
291  if (Mpi::Root())
292  {
293  cout << "Eigenmode " << i+1 << '/' << nev
294  << ", Lambda = " << eigenvalues[i] - shift << endl;
295  }
296 
297  // convert eigenvector from HypreParVector to ParGridFunction
298  x = ame->GetEigenvector(i);
299  curl.Mult(x, dx);
300 
301  {
302  VectorGridFunctionCoefficient modeCoef(&x);
303  InnerProductCoefficient xCoef(xVecCoef, modeCoef);
304  InnerProductCoefficient yCoef(yVecCoef, modeCoef);
305  InnerProductCoefficient zCoef(zVecCoef, modeCoef);
306 
307  xComp.ProjectCoefficient(xCoef);
308  yComp.ProjectCoefficient(yCoef);
309  zComp.ProjectCoefficient(zCoef);
310 
311  double max_x = GetScalarMax(xComp);
312  double max_y = GetScalarMax(yComp);
313  double max_z = GetScalarMax(zComp);
314  double max_r = std::max(max_x, std::max(max_y, max_z));
315 
316  ostringstream x_cmd;
317  x_cmd << " window_title 'Eigenmode " << i+1 << '/' << nev
318  << " X, Lambda = " << eigenvalues[i] - shift << "'"
319  << " valuerange -"<< max_r << ' ' << max_r;
320  if (i == 0)
321  {
322  x_cmd << " keys aa"
323  << " window_geometry 0 0 400 350";
324  }
325 
326  mode_x_sock << "parallel " << num_procs << " " << myid << "\n"
327  << "solution\n" << pmesh << xComp << flush
328  << x_cmd.str() << endl << flush;
329 
330  MPI_Barrier(MPI_COMM_WORLD);
331 
332  ostringstream y_cmd;
333  y_cmd << " window_title 'Eigenmode " << i+1 << '/' << nev
334  << " Y, Lambda = " << eigenvalues[i] - shift << "'"
335  << " valuerange -"<< max_r << ' ' << max_r;
336  if (i == 0)
337  {
338  y_cmd << " keys aa "
339  << " window_geometry 403 0 400 350";
340  }
341 
342  mode_y_sock << "parallel " << num_procs << " " << myid << "\n"
343  << "solution\n" << pmesh << yComp << flush
344  << y_cmd.str() << endl << flush;
345 
346  MPI_Barrier(MPI_COMM_WORLD);
347 
348  ostringstream z_cmd;
349  z_cmd << " window_title 'Eigenmode " << i+1 << '/' << nev
350  << " Z, Lambda = " << eigenvalues[i] - shift << "'"
351  << " valuerange -"<< max_r << ' ' << max_r;
352  if (i == 0)
353  {
354  z_cmd << " keys aa "
355  << " window_geometry 806 0 400 350";
356  }
357 
358  mode_z_sock << "parallel " << num_procs << " " << myid << "\n"
359  << "solution\n" << pmesh << zComp << flush
360  << z_cmd.str() << endl << flush;
361 
362  MPI_Barrier(MPI_COMM_WORLD);
363 
364  VectorGridFunctionCoefficient dmodeCoef(&dx);
365  InnerProductCoefficient dyCoef(yVecCoef, dmodeCoef);
366  InnerProductCoefficient dzCoef(zVecCoef, dmodeCoef);
367 
368  dyComp.ProjectCoefficient(dyCoef);
369  dzComp.ProjectCoefficient(dzCoef);
370 
371  double min_d = max_r / (bbMax[0] - bbMin[0]);
372 
373  max_y = GetScalarMax(dyComp);
374  max_z = GetScalarMax(dzComp);
375  max_r = std::max(std::max(max_y, max_z), min_d);
376 
377  ostringstream dy_cmd;
378  dy_cmd << " window_title 'Curl Eigenmode "
379  << i+1 << '/' << nev
380  << " Y, Lambda = " << eigenvalues[i] - shift << "'"
381  << "valuerange -"<< max_r << ' ' << max_r;
382  if (i == 0)
383  {
384  dy_cmd << " keys aa"
385  << " window_geometry 403 375 400 350";
386  }
387 
388  mode_dy_sock << "parallel " << num_procs << " " << myid << "\n"
389  << "solution\n" << pmesh << dyComp << flush
390  << dy_cmd.str() << endl << flush;
391 
392  MPI_Barrier(MPI_COMM_WORLD);
393 
394  ostringstream dz_cmd;
395  dz_cmd << " window_title 'Curl Eigenmode "
396  << i+1 << '/' << nev
397  << " Z, Lambda = " << eigenvalues[i] - shift << "'"
398  << "valuerange -"<< max_r << ' ' << max_r;
399  if (i == 0)
400  {
401  dz_cmd << " keys aa"
402  << " window_geometry 806 375 400 350";
403  }
404 
405  mode_dz_sock << "parallel " << num_procs << " " << myid << "\n"
406  << "solution\n" << pmesh << dzComp << flush
407  << dz_cmd.str() << endl << flush;
408 
409  MPI_Barrier(MPI_COMM_WORLD);
410  }
411  char c;
412  if (Mpi::Root())
413  {
414  cout << "press (q)uit or (c)ontinue --> " << flush;
415  cin >> c;
416  }
417  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
418 
419  if (c != 'c')
420  {
421  break;
422  }
423  }
424  mode_x_sock.close();
425  mode_y_sock.close();
426  mode_z_sock.close();
427  mode_dy_sock.close();
428  mode_dz_sock.close();
429  }
430  else if (dim == 2)
431  {
432  socketstream mode_xy_sock(vishost, visport);
433  socketstream mode_z_sock(vishost, visport);
434  socketstream mode_dxy_sock(vishost, visport);
435  socketstream mode_dz_sock(vishost, visport);
436  mode_xy_sock.precision(8);
437  mode_z_sock.precision(8);
438  mode_dxy_sock.precision(8);
439  mode_dz_sock.precision(8);
440 
441  DenseMatrix xyMat(2,3); xyMat = 0.0;
442  xyMat(0,0) = 1.0; xyMat(1,1) = 1.0;
443  MatrixConstantCoefficient xyMatCoef(xyMat);
444  Vector zVec(3); zVec = 0.0; zVec(2) = 1;
445  VectorConstantCoefficient zVecCoef(zVec);
446 
447  H1_FECollection fec_h1(order, dim);
448  ND_FECollection fec_nd_xy(order, dim);
449  RT_FECollection fec_rt_xy(order-1, dim);
450  L2_FECollection fec_l2(order-1, dim);
451 
452  ParFiniteElementSpace fes_h1(&pmesh, &fec_h1);
453  ParFiniteElementSpace fes_nd(&pmesh, &fec_nd_xy);
454  ParFiniteElementSpace fes_rt(&pmesh, &fec_rt_xy);
455  ParFiniteElementSpace fes_l2(&pmesh, &fec_l2);
456 
457  ParGridFunction xyComp(&fes_nd);
458  ParGridFunction zComp(&fes_h1);
459 
460  ParGridFunction dxyComp(&fes_rt);
461  ParGridFunction dzComp(&fes_l2);
462 
463  for (int i=0; i<nev; i++)
464  {
465  if (Mpi::Root())
466  {
467  cout << "Eigenmode " << i+1 << '/' << nev
468  << ", Lambda = " << eigenvalues[i] - shift << endl;
469  }
470 
471  // convert eigenvector from HypreParVector to ParGridFunction
472  x = ame->GetEigenvector(i);
473  curl.Mult(x, dx);
474 
475  {
476  VectorGridFunctionCoefficient modeCoef(&x);
477  MatrixVectorProductCoefficient xyCoef(xyMatCoef, modeCoef);
478  InnerProductCoefficient zCoef(zVecCoef, modeCoef);
479 
480  xyComp.ProjectCoefficient(xyCoef);
481  zComp.ProjectCoefficient(zCoef);
482 
483  double max_v = GetVectorMax(2, xyComp);
484  double max_s = GetScalarMax(zComp);
485  double max_r = std::max(max_v, max_s);
486 
487  ostringstream xy_cmd;
488  xy_cmd << " window_title 'Eigenmode " << i+1 << '/' << nev
489  << " XY, Lambda = " << eigenvalues[i] - shift << "'"
490  << " valuerange 0.0 " << max_r;
491  if (i == 0)
492  {
493  xy_cmd << " keys aavvv"
494  << " window_geometry 0 0 400 350";
495  }
496 
497  mode_xy_sock << "parallel " << num_procs << " " << myid << "\n"
498  << "solution\n" << pmesh << xyComp << flush
499  << xy_cmd.str() << endl << flush;
500 
501  MPI_Barrier(MPI_COMM_WORLD);
502 
503  ostringstream z_cmd;
504  z_cmd << " window_title 'Eigenmode " << i+1 << '/' << nev
505  << " Z, Lambda = " << eigenvalues[i] - shift << "'"
506  << " valuerange -"<< max_r << ' ' << max_r;
507  if (i == 0)
508  {
509  z_cmd << " keys aa"
510  << " window_geometry 403 0 400 350";
511  }
512 
513  mode_z_sock << "parallel " << num_procs << " " << myid << "\n"
514  << "solution\n" << pmesh << zComp << flush
515  << z_cmd.str() << endl << flush;
516 
517  MPI_Barrier(MPI_COMM_WORLD);
518 
519  VectorGridFunctionCoefficient dmodeCoef(&dx);
520  MatrixVectorProductCoefficient dxyCoef(xyMatCoef, dmodeCoef);
521  InnerProductCoefficient dzCoef(zVecCoef, dmodeCoef);
522 
523  dxyComp.ProjectCoefficient(dxyCoef);
524  dzComp.ProjectCoefficient(dzCoef);
525 
526  double min_d = max_r / std::min(bbMax[0] - bbMin[0],
527  bbMax[1] - bbMin[1]);
528 
529  max_v = GetVectorMax(2, dxyComp);
530  max_s = GetScalarMax(dzComp);
531  max_r = std::max(std::max(max_v, max_s), min_d);
532 
533  ostringstream dxy_cmd;
534  dxy_cmd << " window_title 'Curl Eigenmode "
535  << i+1 << '/' << nev
536  << " XY, Lambda = " << eigenvalues[i] - shift << "'"
537  << " valuerange 0.0 " << max_r << '\n';
538  if (i == 0)
539  {
540  dxy_cmd << " keys aavvv "
541  << " window_geometry 0 375 400 350";
542 
543  }
544 
545  mode_dxy_sock << "parallel " << num_procs << " " << myid << "\n"
546  << "solution\n" << pmesh << dxyComp << flush
547  << dxy_cmd.str() << endl << flush;
548 
549  MPI_Barrier(MPI_COMM_WORLD);
550 
551  ostringstream dz_cmd;
552  dz_cmd << " window_title 'Curl Eigenmode "
553  << i+1 << '/' << nev
554  << " Z, Lambda = " << eigenvalues[i] - shift << "'"
555  << " valuerange -" << max_r << ' ' << max_r;
556  if (i == 0)
557  {
558  dz_cmd << " keys aa"
559  << " window_geometry 403 375 400 350";
560  }
561 
562  mode_dz_sock << "parallel " << num_procs << " " << myid << "\n"
563  << "solution\n" << pmesh << dzComp << flush
564  << dz_cmd.str() << endl << flush;
565 
566  MPI_Barrier(MPI_COMM_WORLD);
567  }
568  char c;
569  if (Mpi::Root())
570  {
571  cout << "press (q)uit or (c)ontinue --> " << flush;
572  cin >> c;
573  }
574  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
575 
576  if (c != 'c')
577  {
578  break;
579  }
580  }
581  mode_xy_sock.close();
582  mode_z_sock.close();
583  mode_dxy_sock.close();
584  mode_dz_sock.close();
585  }
586  else
587  {
588  socketstream mode_sock(vishost, visport);
589  socketstream mode_deriv_sock(vishost, visport);
590  mode_sock.precision(8);
591  mode_deriv_sock.precision(8);
592 
593  for (int i=0; i<nev; i++)
594  {
595  if (Mpi::Root())
596  {
597  cout << "Eigenmode " << i+1 << '/' << nev
598  << ", Lambda = " << eigenvalues[i] - shift << endl;
599  }
600 
601  // convert eigenvector from HypreParVector to ParGridFunction
602  x = ame->GetEigenvector(i);
603  curl.Mult(x, dx);
604 
605  mode_sock << "parallel " << num_procs << " " << myid << "\n"
606  << "solution\n" << pmesh << x << flush
607  << "window_title 'Eigenmode " << i+1 << '/' << nev
608  << ", Lambda = " << eigenvalues[i] - shift
609  << "'" << endl;
610 
611  MPI_Barrier(MPI_COMM_WORLD);
612 
613  mode_deriv_sock << "parallel " << num_procs << " " << myid << "\n"
614  << "solution\n" << pmesh << dx << flush
615  << "window_geometry 0 375 400 350 "
616  << "window_title 'Curl Eigenmode "
617  << i+1 << '/' << nev
618  << ", Lambda = " << eigenvalues[i] - shift
619  << "'" << endl;
620 
621  MPI_Barrier(MPI_COMM_WORLD);
622 
623  char c;
624  if (Mpi::Root())
625  {
626  cout << "press (q)uit or (c)ontinue --> " << flush;
627  cin >> c;
628  }
629  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
630 
631  if (c != 'c')
632  {
633  break;
634  }
635  }
636  mode_sock.close();
637  }
638  }
639 
640  // 12. Free the used memory.
641  delete ame;
642  delete ams;
643  delete M;
644  delete A;
645 
646  delete fec_nd;
647  delete fec_rt;
648 
649  return 0;
650 }
651 
652 double GetVectorMax(int vdim, const ParGridFunction &x)
653 {
654  Vector zeroVec(vdim); zeroVec = 0.0;
655  VectorConstantCoefficient zero(zeroVec);
656  double nrm = x.ComputeMaxError(zero);
657  return nrm;
658 }
659 
661 {
662  ConstantCoefficient zero(0.0);
663  double nrm = x.ComputeMaxError(zero);
664  return nrm;
665 }
A matrix coefficient that is constant in space and time.
int Size() const
Return the logical size of the array.
Definition: array.hpp:138
The Auxiliary-space Maxwell Solver in hypre.
Definition: hypre.hpp:1732
A coefficient that is constant across space and time.
Definition: coefficient.hpp:84
void SetMassMatrix(const HypreParMatrix &M)
Definition: hypre.cpp:6498
double epsilon
Definition: ex25.cpp:140
Scalar coefficient defined as the inner product of two vector coefficients.
Arbitrary order 3D H(curl)-conforming Nedelec finite elements in 1D.
Definition: fe_coll.hpp:464
Integrator for (curl u, curl v) for Nedelec elements.
double GetVectorMax(int vdim, const ParGridFunction &x)
Definition: ex32p.cpp:652
Vector coefficient that is constant in space and time.
void GetBoundingBox(Vector &min, Vector &max, int ref=2)
Returns the minimum and maximum corners of the mesh bounding box.
Definition: mesh.cpp:129
HYPRE_BigInt GlobalTrueVSize() const
Definition: pfespace.hpp:285
void SetPreconditioner(HypreSolver &precond)
Definition: hypre.cpp:6477
Data type dense matrix using column-major storage.
Definition: densemat.hpp:23
void SetTol(double tol)
Definition: hypre.cpp:6446
Arbitrary order 3D H(curl)-conforming Nedelec finite elements in 2D.
Definition: fe_coll.hpp:518
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:873
Abstract parallel finite element space.
Definition: pfespace.hpp:28
virtual void ProjectCoefficient(Coefficient &coeff)
Project coeff Coefficient to this GridFunction. The projection computation depends on the choice of t...
Definition: pgridfunc.cpp:525
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void AddDomainInterpolator(DiscreteInterpolator *di)
Adds a domain interpolator. Assumes ownership of di.
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
int close()
Close the socketstream.
constexpr char vishost[]
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:9816
constexpr int visport
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:68
void Assemble(int skip_zeros=1)
Assemble the local matrix.
void EliminateEssentialBCDiag(const Array< int > &bdr_attr_is_ess, double value)
Perform elimination and set the diagonal entry to the given value.
int Dimension() const
Definition: mesh.hpp:1006
Arbitrary order H(div)-conforming Raviart-Thomas finite elements.
Definition: fe_coll.hpp:341
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:270
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
Definition: fe_coll.hpp:26
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:5662
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Add a boolean option and set &#39;var&#39; to receive the value. Enable/disable tags are used to set the bool...
Definition: optparser.hpp:82
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition: array.hpp:679
void Solve()
Solve the eigenproblem.
Definition: hypre.cpp:6505
virtual double ComputeMaxError(Coefficient *exsol[], const IntegrationRule *irs[]=NULL) const
Definition: pgridfunc.hpp:377
void SetNumModes(int num_eigs)
Definition: hypre.cpp:6438
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
Definition: hypre.cpp:6541
Vector coefficient defined as a product of a matrix coefficient and a vector coefficient.
void SetMaxIter(int max_iter)
Definition: hypre.cpp:6462
double a
Definition: lissajous.cpp:41
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
int dim
Definition: ex24.cpp:53
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator. Assumes ownership of bfi.
void GetEigenvalues(Array< double > &eigenvalues) const
Collect the converged eigenvalues.
Definition: hypre.cpp:6517
void SetPrintLevel(int logging)
Definition: hypre.cpp:6468
Class for parallel bilinear form.
Arbitrary order H(curl)-conforming Nedelec finite elements.
Definition: fe_coll.hpp:415
Vector data type.
Definition: vector.hpp:60
Vector coefficient defined by a vector GridFunction.
void SetOperator(const HypreParMatrix &A)
Definition: hypre.cpp:6483
Arbitrary order 3D H(div)-conforming Raviart-Thomas finite elements in 2D.
Definition: fe_coll.hpp:557
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:220
void Print(std::ostream &out=mfem::out) const override
Definition: pmesh.cpp:4770
Class for parallel grid function.
Definition: pgridfunc.hpp:32
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:343
virtual void Assemble(int skip_zeros=1)
Construct the internal matrix representation of the discrete linear operator.
Class for parallel meshes.
Definition: pmesh.hpp:32
void SetSingularProblem()
Set this option when solving a curl-curl problem with zero mass term.
Definition: hypre.hpp:1791
Arbitrary order 3D H(div)-conforming Raviart-Thomas finite elements in 1D.
Definition: fe_coll.hpp:491
virtual void Mult(const Vector &x, Vector &y) const
Matrix multiplication: .
int main()
void ParseCheck(std::ostream &out=mfem::out)
Definition: optparser.cpp:252
double GetScalarMax(const ParGridFunction &x)
Definition: ex32p.cpp:660
Arbitrary order &quot;L2-conforming&quot; discontinuous finite elements.
Definition: fe_coll.hpp:288