MFEM  v4.4.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex32p.cpp
Go to the documentation of this file.
1 // MFEM Example 32 - Parallel Version
2 //
3 // Compile with: make ex32p
4 //
5 // Sample runs: mpirun -np 4 ex32p -m ../data/hexagon.mesh -o 2
6 // mpirun -np 4 ex32p -m ../data/star.mesh
7 // mpirun -np 4 ex32p -m ../data/square-disc.mesh -o 2 -n 4 -rs 1
8 // mpirun -np 4 ex32p -m ../data/square-disc-nurbs.mesh -rs 3 -o 3
9 // mpirun -np 4 ex32p -m ../data/amr-quad.mesh -o 2 -rs 1
10 // mpirun -np 4 ex32p -m ../data/amr-hex.mesh -rs 1
11 // mpirun -np 4 ex32p -m ../data/fichera.mesh -rs 1
12 //
13 // Description: This example code solves the Maxwell (electromagnetic)
14 // eigenvalue problem curl curl E = lambda epsilon E with an
15 // anisotropic dielectric tensor, epsilon, and homogeneous
16 // Dirichlet boundary conditions E x n = 0.
17 //
18 // We compute a number of the lowest nonzero eigenmodes by
19 // discretizing the curl curl operator using a Nedelec FE space of
20 // the specified order in 1D, 2D, or 3D.
21 //
22 // The example highlights the use of restricted H(curl) finite
23 // element spaces with the AME subspace eigenvalue solver from
24 // HYPRE, which uses LOBPCG and AMS internally. Reusing a single
25 // GLVis visualization window for multiple eigenfunctions is also
26 // illustrated.
27 //
28 // We recommend viewing examples 31 and 13 before viewing this
29 // example.
30 
31 #include "mfem.hpp"
32 #include <fstream>
33 #include <iostream>
34 
35 using namespace std;
36 using namespace mfem;
37 
38 double GetVectorMax(int vdim, const ParGridFunction &x);
39 double GetScalarMax(const ParGridFunction &x);
40 
41 int main(int argc, char *argv[])
42 {
43  // 1. Initialize MPI.
44  MPI_Session mpi;
45  int num_procs = mpi.WorldSize();
46  int myid = mpi.WorldRank();
47 
48  // 2. Parse command-line options.
49  const char *mesh_file = "../data/inline-quad.mesh";
50  int ser_ref_levels = 2;
51  int par_ref_levels = 1;
52  int order = 1;
53  int nev = 5;
54  bool visualization = 1;
55 
56  OptionsParser args(argc, argv);
57  args.AddOption(&mesh_file, "-m", "--mesh",
58  "Mesh file to use.");
59  args.AddOption(&ser_ref_levels, "-rs", "--refine-serial",
60  "Number of times to refine the mesh uniformly in serial.");
61  args.AddOption(&par_ref_levels, "-rp", "--refine-parallel",
62  "Number of times to refine the mesh uniformly in parallel.");
63  args.AddOption(&order, "-o", "--order",
64  "Finite element order (polynomial degree) or -1 for"
65  " isoparametric space.");
66  args.AddOption(&nev, "-n", "--num-eigs",
67  "Number of desired eigenmodes.");
68  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
69  "--no-visualization",
70  "Enable or disable GLVis visualization.");
71  args.ParseCheck();
72 
73  // 3. Read the (serial) mesh from the given mesh file on all processors. We
74  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
75  // and volume meshes with the same code.
76  Mesh *mesh = new Mesh(mesh_file, 1, 1);
77  int dim = mesh->Dimension();
78 
79  Vector bbMin(dim);
80  Vector bbMax(dim);
81  mesh->GetBoundingBox(bbMin, bbMax);
82 
83  // 4. Refine the serial mesh on all processors to increase the resolution. In
84  // this example we do 'ref_levels' of uniform refinement (2 by default, or
85  // specified on the command line with -rs).
86  for (int lev = 0; lev < ser_ref_levels; lev++)
87  {
88  mesh->UniformRefinement();
89  }
90 
91  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
92  // this mesh further in parallel to increase the resolution (1 time by
93  // default, or specified on the command line with -rp). Once the parallel
94  // mesh is defined, the serial mesh can be deleted.
95  ParMesh pmesh(MPI_COMM_WORLD, *mesh);
96  delete mesh;
97  for (int lev = 0; lev < par_ref_levels; lev++)
98  {
99  pmesh.UniformRefinement();
100  }
101 
102  // 6. Define a parallel finite element space on the parallel mesh. Here we
103  // use the Nedelec finite elements of the specified order.
104  FiniteElementCollection *fec_nd = NULL;
105  FiniteElementCollection *fec_rt = NULL;
106  if (dim == 1)
107  {
108  fec_nd = new ND_R1D_FECollection(order, dim);
109  fec_rt = new RT_R1D_FECollection(order-1, dim);
110  }
111  else if (dim == 2)
112  {
113  fec_nd = new ND_R2D_FECollection(order, dim);
114  fec_rt = new RT_R2D_FECollection(order-1, dim);
115  }
116  else
117  {
118  fec_nd = new ND_FECollection(order, dim);
119  fec_rt = new RT_FECollection(order-1, dim);
120  }
121  ParFiniteElementSpace fespace_nd(&pmesh, fec_nd);
122  ParFiniteElementSpace fespace_rt(&pmesh, fec_rt);
123  HYPRE_Int size_nd = fespace_nd.GlobalTrueVSize();
124  HYPRE_Int size_rt = fespace_rt.GlobalTrueVSize();
125  if (mpi.Root())
126  {
127  cout << "Number of H(Curl) unknowns: " << size_nd << endl;
128  cout << "Number of H(Div) unknowns: " << size_rt << endl;
129  }
130 
131  // 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite
132  // element space. The first corresponds to the curl curl, while the second
133  // is a simple mass matrix needed on the right hand side of the
134  // generalized eigenvalue problem below. The boundary conditions are
135  // implemented by marking all the boundary attributes from the mesh as
136  // essential. The corresponding degrees of freedom are eliminated with
137  // special values on the diagonal to shift the Dirichlet eigenvalues out
138  // of the computational range. After serial and parallel assembly we
139  // extract the corresponding parallel matrices A and M.
140  HypreParMatrix *A = NULL;
141  HypreParMatrix *M = NULL;
142  double shift = 0.0;
143  {
144  DenseMatrix epsilonMat(3);
145  epsilonMat(0,0) = 2.0; epsilonMat(1,1) = 2.0; epsilonMat(2,2) = 2.0;
146  epsilonMat(0,2) = 0.0; epsilonMat(2,0) = 0.0;
147  epsilonMat(0,1) = M_SQRT1_2; epsilonMat(1,0) = M_SQRT1_2;
148  epsilonMat(1,2) = M_SQRT1_2; epsilonMat(2,1) = M_SQRT1_2;
150 
151  ConstantCoefficient one(1.0);
152  Array<int> ess_bdr;
153  if (pmesh.bdr_attributes.Size())
154  {
155  ess_bdr.SetSize(pmesh.bdr_attributes.Max());
156  ess_bdr = 1;
157  }
158 
159  ParBilinearForm a(&fespace_nd);
161  if (pmesh.bdr_attributes.Size() == 0 || dim == 1)
162  {
163  // Add a mass term if the mesh has no boundary, e.g. periodic mesh or
164  // closed surface.
166  shift = 1.0;
167  if (mpi.Root())
168  {
169  cout << "Computing eigenvalues shifted by " << shift << endl;
170  }
171  }
172  a.Assemble();
173  a.EliminateEssentialBCDiag(ess_bdr, 1.0);
174  a.Finalize();
175 
176  ParBilinearForm m(&fespace_nd);
178  m.Assemble();
179  // shift the eigenvalue corresponding to eliminated dofs to a large value
180  m.EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min());
181  m.Finalize();
182 
183  A = a.ParallelAssemble();
184  M = m.ParallelAssemble();
185  }
186 
187  // 8. Define and configure the AME eigensolver and the AMS preconditioner for
188  // A to be used within the solver. Set the matrices which define the
189  // generalized eigenproblem A x = lambda M x.
190  HypreAMS *ams = new HypreAMS(*A,&fespace_nd);
191  ams->SetPrintLevel(0);
192  ams->SetSingularProblem();
193 
194  HypreAME *ame = new HypreAME(MPI_COMM_WORLD);
195  ame->SetNumModes(nev);
196  ame->SetPreconditioner(*ams);
197  ame->SetMaxIter(100);
198  ame->SetTol(1e-8);
199  ame->SetPrintLevel(1);
200  ame->SetMassMatrix(*M);
201  ame->SetOperator(*A);
202 
203  // 9. Compute the eigenmodes and extract the array of eigenvalues. Define
204  // parallel grid functions to represent each of the eigenmodes returned by
205  // the solver and their derivatives.
206  Array<double> eigenvalues;
207  ame->Solve();
208  ame->GetEigenvalues(eigenvalues);
209  ParGridFunction x(&fespace_nd);
210  ParGridFunction dx(&fespace_rt);
211 
212  ParDiscreteLinearOperator curl(&fespace_nd, &fespace_rt);
214  curl.Assemble();
215  curl.Finalize();
216 
217  // 10. Save the refined mesh and the modes in parallel. This output can be
218  // viewed later using GLVis: "glvis -np <np> -m mesh -g mode".
219  {
220  ostringstream mesh_name, mode_name, mode_deriv_name;
221  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
222 
223  ofstream mesh_ofs(mesh_name.str().c_str());
224  mesh_ofs.precision(8);
225  pmesh.Print(mesh_ofs);
226 
227  for (int i=0; i<nev; i++)
228  {
229  // convert eigenvector from HypreParVector to ParGridFunction
230  x = ame->GetEigenvector(i);
231  curl.Mult(x, dx);
232 
233  mode_name << "mode_" << setfill('0') << setw(2) << i << "."
234  << setfill('0') << setw(6) << myid;
235  mode_deriv_name << "mode_deriv_" << setfill('0') << setw(2) << i << "."
236  << setfill('0') << setw(6) << myid;
237 
238  ofstream mode_ofs(mode_name.str().c_str());
239  mode_ofs.precision(8);
240  x.Save(mode_ofs);
241  mode_name.str("");
242 
243  ofstream mode_deriv_ofs(mode_deriv_name.str().c_str());
244  mode_deriv_ofs.precision(8);
245  dx.Save(mode_deriv_ofs);
246  mode_deriv_name.str("");
247  }
248  }
249 
250  // 11. Send the solution by socket to a GLVis server.
251  if (visualization)
252  {
253  char vishost[] = "localhost";
254  int visport = 19916;
255  if (dim == 1)
256  {
257  socketstream mode_x_sock(vishost, visport);
258  socketstream mode_y_sock(vishost, visport);
259  socketstream mode_z_sock(vishost, visport);
260  socketstream mode_dy_sock(vishost, visport);
261  socketstream mode_dz_sock(vishost, visport);
262  mode_x_sock.precision(8);
263  mode_y_sock.precision(8);
264  mode_z_sock.precision(8);
265  mode_dy_sock.precision(8);
266  mode_dz_sock.precision(8);
267 
268  Vector xVec(3); xVec = 0.0; xVec(0) = 1;
269  Vector yVec(3); yVec = 0.0; yVec(1) = 1;
270  Vector zVec(3); zVec = 0.0; zVec(2) = 1;
271  VectorConstantCoefficient xVecCoef(xVec);
272  VectorConstantCoefficient yVecCoef(yVec);
273  VectorConstantCoefficient zVecCoef(zVec);
274 
275  H1_FECollection fec_h1(order, dim);
276  L2_FECollection fec_l2(order-1, dim);
277 
278  ParFiniteElementSpace fes_h1(&pmesh, &fec_h1);
279  ParFiniteElementSpace fes_l2(&pmesh, &fec_l2);
280 
281  ParGridFunction xComp(&fes_l2);
282  ParGridFunction yComp(&fes_h1);
283  ParGridFunction zComp(&fes_h1);
284 
285  ParGridFunction dyComp(&fes_l2);
286  ParGridFunction dzComp(&fes_l2);
287 
288  for (int i=0; i<nev; i++)
289  {
290  if (mpi.Root())
291  {
292  cout << "Eigenmode " << i+1 << '/' << nev
293  << ", Lambda = " << eigenvalues[i] - shift << endl;
294  }
295 
296  // convert eigenvector from HypreParVector to ParGridFunction
297  x = ame->GetEigenvector(i);
298  curl.Mult(x, dx);
299 
300  {
301  VectorGridFunctionCoefficient modeCoef(&x);
302  InnerProductCoefficient xCoef(xVecCoef, modeCoef);
303  InnerProductCoefficient yCoef(yVecCoef, modeCoef);
304  InnerProductCoefficient zCoef(zVecCoef, modeCoef);
305 
306  xComp.ProjectCoefficient(xCoef);
307  yComp.ProjectCoefficient(yCoef);
308  zComp.ProjectCoefficient(zCoef);
309 
310  double max_x = GetScalarMax(xComp);
311  double max_y = GetScalarMax(yComp);
312  double max_z = GetScalarMax(zComp);
313  double max_r = std::max(max_x, std::max(max_y, max_z));
314 
315  ostringstream x_cmd;
316  x_cmd << " window_title 'Eigenmode " << i+1 << '/' << nev
317  << " X, Lambda = " << eigenvalues[i] - shift << "'"
318  << " valuerange -"<< max_r << ' ' << max_r;
319  if (i == 0)
320  {
321  x_cmd << " keys aa"
322  << " window_geometry 0 0 400 350";
323  }
324 
325  mode_x_sock << "parallel " << num_procs << " " << myid << "\n"
326  << "solution\n" << pmesh << xComp << flush
327  << x_cmd.str() << endl << flush;
328 
329  MPI_Barrier(MPI_COMM_WORLD);
330 
331  ostringstream y_cmd;
332  y_cmd << " window_title 'Eigenmode " << i+1 << '/' << nev
333  << " Y, Lambda = " << eigenvalues[i] - shift << "'"
334  << " valuerange -"<< max_r << ' ' << max_r;
335  if (i == 0)
336  {
337  y_cmd << " keys aa "
338  << " window_geometry 403 0 400 350";
339  }
340 
341  mode_y_sock << "parallel " << num_procs << " " << myid << "\n"
342  << "solution\n" << pmesh << yComp << flush
343  << y_cmd.str() << endl << flush;
344 
345  MPI_Barrier(MPI_COMM_WORLD);
346 
347  ostringstream z_cmd;
348  z_cmd << " window_title 'Eigenmode " << i+1 << '/' << nev
349  << " Z, Lambda = " << eigenvalues[i] - shift << "'"
350  << " valuerange -"<< max_r << ' ' << max_r;
351  if (i == 0)
352  {
353  z_cmd << " keys aa "
354  << " window_geometry 806 0 400 350";
355  }
356 
357  mode_z_sock << "parallel " << num_procs << " " << myid << "\n"
358  << "solution\n" << pmesh << zComp << flush
359  << z_cmd.str() << endl << flush;
360 
361  MPI_Barrier(MPI_COMM_WORLD);
362 
363  VectorGridFunctionCoefficient dmodeCoef(&dx);
364  InnerProductCoefficient dyCoef(yVecCoef, dmodeCoef);
365  InnerProductCoefficient dzCoef(zVecCoef, dmodeCoef);
366 
367  dyComp.ProjectCoefficient(dyCoef);
368  dzComp.ProjectCoefficient(dzCoef);
369 
370  double min_d = max_r / (bbMax[0] - bbMin[0]);
371 
372  max_y = GetScalarMax(dyComp);
373  max_z = GetScalarMax(dzComp);
374  max_r = std::max(std::max(max_y, max_z), min_d);
375 
376  ostringstream dy_cmd;
377  dy_cmd << " window_title 'Curl Eigenmode "
378  << i+1 << '/' << nev
379  << " Y, Lambda = " << eigenvalues[i] - shift << "'"
380  << "valuerange -"<< max_r << ' ' << max_r;
381  if (i == 0)
382  {
383  dy_cmd << " keys aa"
384  << " window_geometry 403 375 400 350";
385  }
386 
387  mode_dy_sock << "parallel " << num_procs << " " << myid << "\n"
388  << "solution\n" << pmesh << dyComp << flush
389  << dy_cmd.str() << endl << flush;
390 
391  MPI_Barrier(MPI_COMM_WORLD);
392 
393  ostringstream dz_cmd;
394  dz_cmd << " window_title 'Curl Eigenmode "
395  << i+1 << '/' << nev
396  << " Z, Lambda = " << eigenvalues[i] - shift << "'"
397  << "valuerange -"<< max_r << ' ' << max_r;
398  if (i == 0)
399  {
400  dz_cmd << " keys aa"
401  << " window_geometry 806 375 400 350";
402  }
403 
404  mode_dz_sock << "parallel " << num_procs << " " << myid << "\n"
405  << "solution\n" << pmesh << dzComp << flush
406  << dz_cmd.str() << endl << flush;
407 
408  MPI_Barrier(MPI_COMM_WORLD);
409  }
410  char c;
411  if (mpi.Root())
412  {
413  cout << "press (q)uit or (c)ontinue --> " << flush;
414  cin >> c;
415  }
416  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
417 
418  if (c != 'c')
419  {
420  break;
421  }
422  }
423  mode_x_sock.close();
424  mode_y_sock.close();
425  mode_z_sock.close();
426  mode_dy_sock.close();
427  mode_dz_sock.close();
428  }
429  else if (dim == 2)
430  {
431  socketstream mode_xy_sock(vishost, visport);
432  socketstream mode_z_sock(vishost, visport);
433  socketstream mode_dxy_sock(vishost, visport);
434  socketstream mode_dz_sock(vishost, visport);
435  mode_xy_sock.precision(8);
436  mode_z_sock.precision(8);
437  mode_dxy_sock.precision(8);
438  mode_dz_sock.precision(8);
439 
440  DenseMatrix xyMat(2,3); xyMat = 0.0;
441  xyMat(0,0) = 1.0; xyMat(1,1) = 1.0;
442  MatrixConstantCoefficient xyMatCoef(xyMat);
443  Vector zVec(3); zVec = 0.0; zVec(2) = 1;
444  VectorConstantCoefficient zVecCoef(zVec);
445 
446  H1_FECollection fec_h1(order, dim);
447  ND_FECollection fec_nd_xy(order, dim);
448  RT_FECollection fec_rt_xy(order-1, dim);
449  L2_FECollection fec_l2(order-1, dim);
450 
451  ParFiniteElementSpace fes_h1(&pmesh, &fec_h1);
452  ParFiniteElementSpace fes_nd(&pmesh, &fec_nd_xy);
453  ParFiniteElementSpace fes_rt(&pmesh, &fec_rt_xy);
454  ParFiniteElementSpace fes_l2(&pmesh, &fec_l2);
455 
456  ParGridFunction xyComp(&fes_nd);
457  ParGridFunction zComp(&fes_h1);
458 
459  ParGridFunction dxyComp(&fes_rt);
460  ParGridFunction dzComp(&fes_l2);
461 
462  for (int i=0; i<nev; i++)
463  {
464  if (mpi.Root())
465  {
466  cout << "Eigenmode " << i+1 << '/' << nev
467  << ", Lambda = " << eigenvalues[i] - shift << endl;
468  }
469 
470  // convert eigenvector from HypreParVector to ParGridFunction
471  x = ame->GetEigenvector(i);
472  curl.Mult(x, dx);
473 
474  {
475  VectorGridFunctionCoefficient modeCoef(&x);
476  MatrixVectorProductCoefficient xyCoef(xyMatCoef, modeCoef);
477  InnerProductCoefficient zCoef(zVecCoef, modeCoef);
478 
479  xyComp.ProjectCoefficient(xyCoef);
480  zComp.ProjectCoefficient(zCoef);
481 
482  double max_v = GetVectorMax(2, xyComp);
483  double max_s = GetScalarMax(zComp);
484  double max_r = std::max(max_v, max_s);
485 
486  ostringstream xy_cmd;
487  xy_cmd << " window_title 'Eigenmode " << i+1 << '/' << nev
488  << " XY, Lambda = " << eigenvalues[i] - shift << "'"
489  << " valuerange 0.0 " << max_r;
490  if (i == 0)
491  {
492  xy_cmd << " keys aavvv"
493  << " window_geometry 0 0 400 350";
494  }
495 
496  mode_xy_sock << "parallel " << num_procs << " " << myid << "\n"
497  << "solution\n" << pmesh << xyComp << flush
498  << xy_cmd.str() << endl << flush;
499 
500  MPI_Barrier(MPI_COMM_WORLD);
501 
502  ostringstream z_cmd;
503  z_cmd << " window_title 'Eigenmode " << i+1 << '/' << nev
504  << " Z, Lambda = " << eigenvalues[i] - shift << "'"
505  << " valuerange -"<< max_r << ' ' << max_r;
506  if (i == 0)
507  {
508  z_cmd << " keys aa"
509  << " window_geometry 403 0 400 350";
510  }
511 
512  mode_z_sock << "parallel " << num_procs << " " << myid << "\n"
513  << "solution\n" << pmesh << zComp << flush
514  << z_cmd.str() << endl << flush;
515 
516  MPI_Barrier(MPI_COMM_WORLD);
517 
518  VectorGridFunctionCoefficient dmodeCoef(&dx);
519  MatrixVectorProductCoefficient dxyCoef(xyMatCoef, dmodeCoef);
520  InnerProductCoefficient dzCoef(zVecCoef, dmodeCoef);
521 
522  dxyComp.ProjectCoefficient(dxyCoef);
523  dzComp.ProjectCoefficient(dzCoef);
524 
525  double min_d = max_r / std::min(bbMax[0] - bbMin[0],
526  bbMax[1] - bbMin[1]);
527 
528  max_v = GetVectorMax(2, dxyComp);
529  max_s = GetScalarMax(dzComp);
530  max_r = std::max(std::max(max_v, max_s), min_d);
531 
532  ostringstream dxy_cmd;
533  dxy_cmd << " window_title 'Curl Eigenmode "
534  << i+1 << '/' << nev
535  << " XY, Lambda = " << eigenvalues[i] - shift << "'"
536  << " valuerange 0.0 " << max_r << '\n';
537  if (i == 0)
538  {
539  dxy_cmd << " keys aavvv "
540  << " window_geometry 0 375 400 350";
541 
542  }
543 
544  mode_dxy_sock << "parallel " << num_procs << " " << myid << "\n"
545  << "solution\n" << pmesh << dxyComp << flush
546  << dxy_cmd.str() << endl << flush;
547 
548  MPI_Barrier(MPI_COMM_WORLD);
549 
550  ostringstream dz_cmd;
551  dz_cmd << " window_title 'Curl Eigenmode "
552  << i+1 << '/' << nev
553  << " Z, Lambda = " << eigenvalues[i] - shift << "'"
554  << " valuerange -" << max_r << ' ' << max_r;
555  if (i == 0)
556  {
557  dz_cmd << " keys aa"
558  << " window_geometry 403 375 400 350";
559  }
560 
561  mode_dz_sock << "parallel " << num_procs << " " << myid << "\n"
562  << "solution\n" << pmesh << dzComp << flush
563  << dz_cmd.str() << endl << flush;
564 
565  MPI_Barrier(MPI_COMM_WORLD);
566  }
567  char c;
568  if (mpi.Root())
569  {
570  cout << "press (q)uit or (c)ontinue --> " << flush;
571  cin >> c;
572  }
573  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
574 
575  if (c != 'c')
576  {
577  break;
578  }
579  }
580  mode_xy_sock.close();
581  mode_z_sock.close();
582  mode_dxy_sock.close();
583  mode_dz_sock.close();
584  }
585  else
586  {
587  socketstream mode_sock(vishost, visport);
588  socketstream mode_deriv_sock(vishost, visport);
589  mode_sock.precision(8);
590  mode_deriv_sock.precision(8);
591 
592  for (int i=0; i<nev; i++)
593  {
594  if (mpi.Root())
595  {
596  cout << "Eigenmode " << i+1 << '/' << nev
597  << ", Lambda = " << eigenvalues[i] - shift << endl;
598  }
599 
600  // convert eigenvector from HypreParVector to ParGridFunction
601  x = ame->GetEigenvector(i);
602  curl.Mult(x, dx);
603 
604  mode_sock << "parallel " << num_procs << " " << myid << "\n"
605  << "solution\n" << pmesh << x << flush
606  << "window_title 'Eigenmode " << i+1 << '/' << nev
607  << ", Lambda = " << eigenvalues[i] - shift
608  << "'" << endl;
609 
610  MPI_Barrier(MPI_COMM_WORLD);
611 
612  mode_deriv_sock << "parallel " << num_procs << " " << myid << "\n"
613  << "solution\n" << pmesh << dx << flush
614  << "window_geometry 0 375 400 350 "
615  << "window_title 'Curl Eigenmode "
616  << i+1 << '/' << nev
617  << ", Lambda = " << eigenvalues[i] - shift
618  << "'" << endl;
619 
620  MPI_Barrier(MPI_COMM_WORLD);
621 
622  char c;
623  if (mpi.Root())
624  {
625  cout << "press (q)uit or (c)ontinue --> " << flush;
626  cin >> c;
627  }
628  MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD);
629 
630  if (c != 'c')
631  {
632  break;
633  }
634  }
635  mode_sock.close();
636  }
637  }
638 
639  // 12. Free the used memory.
640  delete ame;
641  delete ams;
642  delete M;
643  delete A;
644 
645  delete fec_nd;
646  delete fec_rt;
647 
648  return 0;
649 }
650 
651 double GetVectorMax(int vdim, const ParGridFunction &x)
652 {
653  Vector zeroVec(vdim); zeroVec = 0.0;
654  VectorConstantCoefficient zero(zeroVec);
655  double nrm = x.ComputeMaxError(zero);
656  return nrm;
657 }
658 
660 {
661  ConstantCoefficient zero(0.0);
662  double nrm = x.ComputeMaxError(zero);
663  return nrm;
664 }
int WorldSize() const
Return MPI_COMM_WORLD&#39;s size.
A matrix coefficient that is constant in space and time.
int Size() const
Return the logical size of the array.
Definition: array.hpp:138
The Auxiliary-space Maxwell Solver in hypre.
Definition: hypre.hpp:1596
A coefficient that is constant across space and time.
Definition: coefficient.hpp:78
void SetMassMatrix(const HypreParMatrix &M)
Definition: hypre.cpp:5979
double epsilon
Definition: ex25.cpp:140
Scalar coefficient defined as the inner product of two vector coefficients.
Arbitrary order 3D H(curl)-conforming Nedelec finite elements in 1D.
Definition: fe_coll.hpp:460
Integrator for (curl u, curl v) for Nedelec elements.
double GetVectorMax(int vdim, const ParGridFunction &x)
Definition: ex32p.cpp:651
Vector coefficient that is constant in space and time.
void GetBoundingBox(Vector &min, Vector &max, int ref=2)
Returns the minimum and maximum corners of the mesh bounding box.
Definition: mesh.cpp:129
HYPRE_BigInt GlobalTrueVSize() const
Definition: pfespace.hpp:285
void SetPreconditioner(HypreSolver &precond)
Definition: hypre.cpp:5958
Data type dense matrix using column-major storage.
Definition: densemat.hpp:23
void SetTol(double tol)
Definition: hypre.cpp:5927
Arbitrary order 3D H(curl)-conforming Nedelec finite elements in 2D.
Definition: fe_coll.hpp:514
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:873
Abstract parallel finite element space.
Definition: pfespace.hpp:28
virtual void ProjectCoefficient(Coefficient &coeff)
Project coeff Coefficient to this GridFunction. The projection computation depends on the choice of t...
Definition: pgridfunc.cpp:525
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void AddDomainInterpolator(DiscreteInterpolator *di)
Adds a domain interpolator. Assumes ownership of di.
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
int close()
Close the socketstream.
A simple convenience class based on the Mpi singleton class above. Preserved for backward compatibili...
constexpr char vishost[]
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:9737
constexpr int visport
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:68
void Assemble(int skip_zeros=1)
Assemble the local matrix.
bool Root() const
Return true if WorldRank() == 0.
void EliminateEssentialBCDiag(const Array< int > &bdr_attr_is_ess, double value)
Perform elimination and set the diagonal entry to the given value.
int Dimension() const
Definition: mesh.hpp:999
Arbitrary order H(div)-conforming Raviart-Thomas finite elements.
Definition: fe_coll.hpp:337
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:270
int WorldRank() const
Return MPI_COMM_WORLD&#39;s rank.
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
Definition: fe_coll.hpp:26
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:5209
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Add a boolean option and set &#39;var&#39; to receive the value. Enable/disable tags are used to set the bool...
Definition: optparser.hpp:82
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition: array.hpp:679
void Solve()
Solve the eigenproblem.
Definition: hypre.cpp:5986
virtual double ComputeMaxError(Coefficient *exsol[], const IntegrationRule *irs[]=NULL) const
Definition: pgridfunc.hpp:371
void SetNumModes(int num_eigs)
Definition: hypre.cpp:5919
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
Definition: hypre.cpp:6022
Vector coefficient defined as a product of a matrix coefficient and a vector coefficient.
void SetMaxIter(int max_iter)
Definition: hypre.cpp:5943
double a
Definition: lissajous.cpp:41
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
int dim
Definition: ex24.cpp:53
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator. Assumes ownership of bfi.
void GetEigenvalues(Array< double > &eigenvalues) const
Collect the converged eigenvalues.
Definition: hypre.cpp:5998
void SetPrintLevel(int logging)
Definition: hypre.cpp:5949
Class for parallel bilinear form.
Arbitrary order H(curl)-conforming Nedelec finite elements.
Definition: fe_coll.hpp:411
Vector data type.
Definition: vector.hpp:60
Vector coefficient defined by a vector GridFunction.
void SetOperator(const HypreParMatrix &A)
Definition: hypre.cpp:5964
Arbitrary order 3D H(div)-conforming Raviart-Thomas finite elements in 2D.
Definition: fe_coll.hpp:553
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:216
void Print(std::ostream &out=mfem::out) const override
Definition: pmesh.cpp:4684
Class for parallel grid function.
Definition: pgridfunc.hpp:32
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:337
virtual void Assemble(int skip_zeros=1)
Construct the internal matrix representation of the discrete linear operator.
Class for parallel meshes.
Definition: pmesh.hpp:32
void SetSingularProblem()
Set this option when solving a curl-curl problem with zero mass term.
Definition: hypre.hpp:1621
Arbitrary order 3D H(div)-conforming Raviart-Thomas finite elements in 1D.
Definition: fe_coll.hpp:487
virtual void Mult(const Vector &x, Vector &y) const
Matrix multiplication: .
int main()
void ParseCheck(std::ostream &out=mfem::out)
Definition: optparser.cpp:252
double GetScalarMax(const ParGridFunction &x)
Definition: ex32p.cpp:659
Arbitrary order &quot;L2-conforming&quot; discontinuous finite elements.
Definition: fe_coll.hpp:284