MFEM  v3.3
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
petsc/ex9p.cpp
Go to the documentation of this file.
1 // MFEM Example 9 - Parallel Version
2 // PETSc Modification
3 //
4 // Compile with: make ex9p
5 //
6 // Sample runs:
7 // mpirun -np 4 ex9p -m ../../data/periodic-hexagon.mesh --petscopts rc_ex9p_expl
8 // mpirun -np 4 ex9p -m ../../data/periodic-hexagon.mesh --petscopts rc_ex9p_impl -implicit
9 //
10 // Description: This example code solves the time-dependent advection equation
11 // du/dt + v.grad(u) = 0, where v is a given fluid velocity, and
12 // u0(x)=u(0,x) is a given initial condition.
13 //
14 // The example demonstrates the use of Discontinuous Galerkin (DG)
15 // bilinear forms in MFEM (face integrators), the use of explicit
16 // ODE time integrators, the definition of periodic boundary
17 // conditions through periodic meshes, as well as the use of GLVis
18 // for persistent visualization of a time-evolving solution. The
19 // saving of time-dependent data files for external visualization
20 // with VisIt (visit.llnl.gov) is also illustrated.
21 //
22 // The example also demonstrates how to use PETSc ODE solvers and
23 // customize them by command line (see rc_ex9p_expl and
24 // rc_ex9p_impl). The split in left-hand side and right-hand side
25 // of the TimeDependentOperator is amenable for IMEX methods.
26 // When using fully implicit methods, just the left-hand side of
27 // the operator should be provided for efficiency reasons when
28 // assembling the Jacobians. Here, we provide two Jacobian
29 // routines just to illustrate the capabilities of the
30 // PetscODESolver class. We also show how to monitor the time
31 // dependent solution inside a call to PetscODESolver:Mult.
32 
33 #include "mfem.hpp"
34 #include <fstream>
35 #include <iostream>
36 
37 #ifndef MFEM_USE_PETSC
38 #error This example requires that MFEM is built with MFEM_USE_PETSC=YES
39 #endif
40 
41 using namespace std;
42 using namespace mfem;
43 
44 // Choice for the problem setup. The fluid velocity, initial condition and
45 // inflow boundary condition are chosen based on this parameter.
46 int problem;
47 
48 // Velocity coefficient
49 void velocity_function(const Vector &x, Vector &v);
50 
51 // Initial condition
52 double u0_function(const Vector &x);
53 
54 // Inflow boundary condition
55 double inflow_function(const Vector &x);
56 
57 // Mesh bounding box
59 
60 
61 /** A time-dependent operator for the ODE as F(u,du/dt,t) = G(u,t)
62  The DG weak form of du/dt = -v.grad(u) is M du/dt = K u + b, where M and K are the mass
63  and advection matrices, and b describes the flow on the boundary. This can
64  be also written as a general ODE with the right-hand side only as
65  du/dt = M^{-1} (K u + b).
66  This class is used to evaluate the right-hand side and the left-hand side. */
67 class FE_Evolution : public TimeDependentOperator
68 {
69 private:
70  HypreParMatrix &M, &K;
71  const Vector &b;
72  HypreSmoother M_prec;
73  CGSolver M_solver;
74 
75  mutable Vector z;
76  mutable PetscParMatrix* iJacobian;
77  mutable PetscParMatrix* rJacobian;
78 
79 public:
80  FE_Evolution(HypreParMatrix &_M, HypreParMatrix &_K, const Vector &_b,
81  bool M_in_lhs);
82 
83  virtual void ExplicitMult(const Vector &x, Vector &y) const;
84  virtual void ImplicitMult(const Vector &x, const Vector &xp, Vector &y) const;
85  virtual void Mult(const Vector &x, Vector &y) const;
86  virtual Operator& GetExplicitGradient(const Vector &x) const;
87  virtual Operator& GetImplicitGradient(const Vector &x, const Vector &xp,
88  double shift) const;
89  virtual ~FE_Evolution() { delete iJacobian; delete rJacobian; }
90 };
91 
92 
93 // Monitor the solution at time step "step", explicitly in the time loop
94 class UserMonitor : public PetscSolverMonitor
95 {
96 private:
97  socketstream& sout;
98  ParMesh* pmesh;
99  ParGridFunction* u;
100  int vt;
101  bool pause;
102 
103 public:
104  UserMonitor(socketstream& _s, ParMesh* _m, ParGridFunction* _u, int _vt) :
105  PetscSolverMonitor(true,false), sout(_s), pmesh(_m), u(_u), vt(_vt),
106  pause(true) {}
107 
108  void MonitorSolution(PetscInt step, PetscReal norm, const Vector &X)
109  {
110  if (step % vt == 0)
111  {
112  int num_procs, myid;
113 
114  *u = X;
115  MPI_Comm_size(pmesh->GetComm(),&num_procs);
116  MPI_Comm_rank(pmesh->GetComm(),&myid);
117  sout << "parallel " << num_procs << " " << myid << "\n";
118  sout << "solution\n" << *pmesh << *u;
119  if (pause) { sout << "pause\n"; }
120  sout << flush;
121  if (pause)
122  {
123  pause = false;
124  if (myid == 0)
125  {
126  cout << "GLVis visualization paused."
127  << " Press space (in the GLVis window) to resume it.\n";
128  }
129  }
130  }
131  }
132 };
133 
134 
135 int main(int argc, char *argv[])
136 {
137  // 1. Initialize MPI.
138  int num_procs, myid;
139  MPI_Init(&argc, &argv);
140  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
141  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
142 
143  // 2. Parse command-line options.
144  problem = 0;
145  const char *mesh_file = "../../data/periodic-hexagon.mesh";
146  int ser_ref_levels = 2;
147  int par_ref_levels = 0;
148  int order = 3;
149  int ode_solver_type = 4;
150  double t_final = 10.0;
151  double dt = 0.01;
152  bool visualization = true;
153  bool visit = false;
154  bool binary = false;
155  int vis_steps = 5;
156  bool use_petsc = true;
157  bool implicit = false;
158  bool use_step = true;
159  const char *petscrc_file = "";
160 
161  int precision = 8;
162  cout.precision(precision);
163 
164  OptionsParser args(argc, argv);
165  args.AddOption(&mesh_file, "-m", "--mesh",
166  "Mesh file to use.");
167  args.AddOption(&problem, "-p", "--problem",
168  "Problem setup to use. See options in velocity_function().");
169  args.AddOption(&ser_ref_levels, "-rs", "--refine-serial",
170  "Number of times to refine the mesh uniformly in serial.");
171  args.AddOption(&par_ref_levels, "-rp", "--refine-parallel",
172  "Number of times to refine the mesh uniformly in parallel.");
173  args.AddOption(&order, "-o", "--order",
174  "Order (degree) of the finite elements.");
175  args.AddOption(&ode_solver_type, "-s", "--ode-solver",
176  "ODE solver: 1 - Forward Euler,\n\t"
177  " 2 - RK2 SSP, 3 - RK3 SSP, 4 - RK4, 6 - RK6.");
178  args.AddOption(&t_final, "-tf", "--t-final",
179  "Final time; start time is 0.");
180  args.AddOption(&dt, "-dt", "--time-step",
181  "Time step.");
182  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
183  "--no-visualization",
184  "Enable or disable GLVis visualization.");
185  args.AddOption(&visit, "-visit", "--visit-datafiles", "-no-visit",
186  "--no-visit-datafiles",
187  "Save data files for VisIt (visit.llnl.gov) visualization.");
188  args.AddOption(&binary, "-binary", "--binary-datafiles", "-ascii",
189  "--ascii-datafiles",
190  "Use binary (Sidre) or ascii format for VisIt data files.");
191  args.AddOption(&vis_steps, "-vs", "--visualization-steps",
192  "Visualize every n-th timestep.");
193  args.AddOption(&use_petsc, "-usepetsc", "--usepetsc", "-no-petsc",
194  "--no-petsc",
195  "Use or not PETSc to solve the ODE system.");
196  args.AddOption(&petscrc_file, "-petscopts", "--petscopts",
197  "PetscOptions file to use.");
198  args.AddOption(&use_step, "-usestep", "--usestep", "-no-step",
199  "--no-step",
200  "Use the Step() or Run() method to solve the ODE system.");
201  args.AddOption(&implicit, "-implicit", "--implicit", "-no-implicit",
202  "--no-implicit",
203  "Use or not an implicit method in PETSc to solve the ODE system.");
204  args.Parse();
205  if (!args.Good())
206  {
207  if (myid == 0)
208  {
209  args.PrintUsage(cout);
210  }
211  MPI_Finalize();
212  return 1;
213  }
214  if (myid == 0)
215  {
216  args.PrintOptions(cout);
217  }
218 
219  // 3. Read the serial mesh from the given mesh file on all processors. We can
220  // handle geometrically periodic meshes in this code.
221  Mesh *mesh = new Mesh(mesh_file, 1, 1);
222  int dim = mesh->Dimension();
223 
224  // 4. Define the ODE solver used for time integration. Several explicit
225  // Runge-Kutta methods are available.
226  ODESolver *ode_solver = NULL;
227  PetscODESolver *pode_solver = NULL;
228  UserMonitor *pmon = NULL;
229  if (!use_petsc)
230  {
231  switch (ode_solver_type)
232  {
233  case 1: ode_solver = new ForwardEulerSolver; break;
234  case 2: ode_solver = new RK2Solver(1.0); break;
235  case 3: ode_solver = new RK3SSPSolver; break;
236  case 4: ode_solver = new RK4Solver; break;
237  case 6: ode_solver = new RK6Solver; break;
238  default:
239  if (myid == 0)
240  {
241  cout << "Unknown ODE solver type: " << ode_solver_type << '\n';
242  }
243  MPI_Finalize();
244  return 3;
245  }
246  }
247  else
248  {
249  // When using PETSc, we just create the ODE solver. We use command line
250  // customization to select a specific solver.
251  PetscInitialize(NULL, NULL, petscrc_file, NULL);
252  ode_solver = pode_solver = new PetscODESolver(MPI_COMM_WORLD);
253  }
254 
255  // 5. Refine the mesh in serial to increase the resolution. In this example
256  // we do 'ser_ref_levels' of uniform refinement, where 'ser_ref_levels' is
257  // a command-line parameter. If the mesh is of NURBS type, we convert it
258  // to a (piecewise-polynomial) high-order mesh.
259  for (int lev = 0; lev < ser_ref_levels; lev++)
260  {
261  mesh->UniformRefinement();
262  }
263  if (mesh->NURBSext)
264  {
265  mesh->SetCurvature(max(order, 1));
266  }
267  mesh->GetBoundingBox(bb_min, bb_max, max(order, 1));
268 
269  // 6. Define the parallel mesh by a partitioning of the serial mesh. Refine
270  // this mesh further in parallel to increase the resolution. Once the
271  // parallel mesh is defined, the serial mesh can be deleted.
272  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
273  delete mesh;
274  for (int lev = 0; lev < par_ref_levels; lev++)
275  {
276  pmesh->UniformRefinement();
277  }
278 
279  // 7. Define the parallel discontinuous DG finite element space on the
280  // parallel refined mesh of the given polynomial order.
281  DG_FECollection fec(order, dim);
282  ParFiniteElementSpace *fes = new ParFiniteElementSpace(pmesh, &fec);
283 
284  HYPRE_Int global_vSize = fes->GlobalTrueVSize();
285  if (myid == 0)
286  {
287  cout << "Number of unknowns: " << global_vSize << endl;
288  }
289 
290  // 8. Set up and assemble the parallel bilinear and linear forms (and the
291  // parallel hypre matrices) corresponding to the DG discretization. The
292  // DGTraceIntegrator involves integrals over mesh interior faces.
296 
297  ParBilinearForm *m = new ParBilinearForm(fes);
299  ParBilinearForm *k = new ParBilinearForm(fes);
300  k->AddDomainIntegrator(new ConvectionIntegrator(velocity, -1.0));
302  new TransposeIntegrator(new DGTraceIntegrator(velocity, 1.0, -0.5)));
304  new TransposeIntegrator(new DGTraceIntegrator(velocity, 1.0, -0.5)));
305 
306  ParLinearForm *b = new ParLinearForm(fes);
308  new BoundaryFlowIntegrator(inflow, velocity, -1.0, -0.5));
309 
310  m->Assemble();
311  m->Finalize();
312  int skip_zeros = 0;
313  k->Assemble(skip_zeros);
314  k->Finalize(skip_zeros);
315  b->Assemble();
316 
320 
321  // 9. Define the initial conditions, save the corresponding grid function to
322  // a file and (optionally) save data in the VisIt format and initialize
323  // GLVis visualization.
324  ParGridFunction *u = new ParGridFunction(fes);
325  u->ProjectCoefficient(u0);
326  HypreParVector *U = u->GetTrueDofs();
327 
328  {
329  ostringstream mesh_name, sol_name;
330  mesh_name << "ex9-mesh." << setfill('0') << setw(6) << myid;
331  sol_name << "ex9-init." << setfill('0') << setw(6) << myid;
332  ofstream omesh(mesh_name.str().c_str());
333  omesh.precision(precision);
334  pmesh->Print(omesh);
335  ofstream osol(sol_name.str().c_str());
336  osol.precision(precision);
337  u->Save(osol);
338  }
339 
340  // Create data collection for solution output: either VisItDataCollection for
341  // ascii data files, or SidreDataCollection for binary data files.
342  DataCollection *dc = NULL;
343  if (visit)
344  {
345  if (binary)
346  {
347 #ifdef MFEM_USE_SIDRE
348  dc = new SidreDataCollection("Example9-Parallel", pmesh);
349 #else
350  MFEM_ABORT("Must build with MFEM_USE_SIDRE=YES for binary output.");
351 #endif
352  }
353  else
354  {
355  dc = new VisItDataCollection("Example9-Parallel", pmesh);
356  dc->SetPrecision(precision);
357  }
358  dc->RegisterField("solution", u);
359  dc->SetCycle(0);
360  dc->SetTime(0.0);
361  dc->Save();
362  }
363 
364  socketstream sout;
365  if (visualization)
366  {
367  char vishost[] = "localhost";
368  int visport = 19916;
369  sout.open(vishost, visport);
370  if (!sout)
371  {
372  if (myid == 0)
373  cout << "Unable to connect to GLVis server at "
374  << vishost << ':' << visport << endl;
375  visualization = false;
376  if (myid == 0)
377  {
378  cout << "GLVis visualization disabled.\n";
379  }
380  }
381  else if (use_step)
382  {
383  sout << "parallel " << num_procs << " " << myid << "\n";
384  sout.precision(precision);
385  sout << "solution\n" << *pmesh << *u;
386  sout << "pause\n";
387  sout << flush;
388  if (myid == 0)
389  cout << "GLVis visualization paused."
390  << " Press space (in the GLVis window) to resume it.\n";
391  }
392  else if (use_petsc)
393  {
394  // Set the monitoring routine for the PetscODESolver.
395  sout.precision(precision);
396  pmon = new UserMonitor(sout,pmesh,u,vis_steps);
397  pode_solver->SetMonitor(pmon);
398  }
399  }
400 
401  // 10. Define the time-dependent evolution operator describing the ODE
402  FE_Evolution *adv = new FE_Evolution(*M, *K, *B, implicit);
403 
404  double t = 0.0;
405  adv->SetTime(t);
406  ode_solver->Init(*adv);
407 
408  // Explicitly perform time-integration (looping over the time iterations, ti,
409  // with a time-step dt), or use the Run method of the ODE solver class.
410  if (use_step)
411  {
412  bool done = false;
413  for (int ti = 0; !done; )
414  {
415  double dt_real = min(dt, t_final - t);
416  ode_solver->Step(*U, t, dt_real);
417  ti++;
418 
419  done = (t >= t_final - 1e-8*dt);
420 
421  if (done || ti % vis_steps == 0)
422  {
423  if (myid == 0)
424  {
425  cout << "time step: " << ti << ", time: " << t << endl;
426  }
427 
428  // 11. Extract the parallel grid function corresponding to the finite
429  // element approximation U (the local solution on each processor).
430  *u = *U;
431 
432  if (visualization)
433  {
434  sout << "parallel " << num_procs << " " << myid << "\n";
435  sout << "solution\n" << *pmesh << *u << flush;
436  }
437 
438  if (visit)
439  {
440  dc->SetCycle(ti);
441  dc->SetTime(t);
442  dc->Save();
443  }
444  }
445  }
446  }
447  else { ode_solver->Run(*U, t, dt, t_final); }
448 
449  // 12. Save the final solution in parallel. This output can be viewed later
450  // using GLVis: "glvis -np <np> -m ex9-mesh -g ex9-final".
451  {
452  *u = *U;
453  ostringstream sol_name;
454  sol_name << "ex9-final." << setfill('0') << setw(6) << myid;
455  ofstream osol(sol_name.str().c_str());
456  osol.precision(precision);
457  u->Save(osol);
458  }
459 
460  // 13. Free the used memory.
461  delete U;
462  delete u;
463  delete B;
464  delete b;
465  delete K;
466  delete k;
467  delete M;
468  delete m;
469  delete fes;
470  delete pmesh;
471  delete ode_solver;
472  delete dc;
473  delete adv;
474 
475  delete pmon;
476 
477  // We finalize PETSc
478  if (use_petsc) { PetscFinalize(); }
479 
480  MPI_Finalize();
481  return 0;
482 }
483 
484 
485 // Implementation of class FE_Evolution
486 FE_Evolution::FE_Evolution(HypreParMatrix &_M, HypreParMatrix &_K,
487  const Vector &_b,bool M_in_lhs)
488  : TimeDependentOperator(_M.Height(), 0.0,
489  M_in_lhs ? TimeDependentOperator::IMPLICIT
490  : TimeDependentOperator::EXPLICIT),
491  M(_M), K(_K), b(_b), M_solver(M.GetComm()), z(_M.Height()),
492  iJacobian(NULL), rJacobian(NULL)
493 {
494  if (isExplicit())
495  {
496  M_prec.SetType(HypreSmoother::Jacobi);
497  M_solver.SetPreconditioner(M_prec);
498  M_solver.SetOperator(M);
499 
500  M_solver.iterative_mode = false;
501  M_solver.SetRelTol(1e-9);
502  M_solver.SetAbsTol(0.0);
503  M_solver.SetMaxIter(100);
504  M_solver.SetPrintLevel(0);
505  }
506 }
507 
508 // RHS evaluation
509 void FE_Evolution::ExplicitMult(const Vector &x, Vector &y) const
510 {
511  if (isExplicit())
512  {
513  // y = M^{-1} (K x + b)
514  K.Mult(x, z);
515  z += b;
516  M_solver.Mult(z, y);
517  }
518  else
519  {
520  // y = K x + b
521  K.Mult(x, y);
522  y += b;
523  }
524 }
525 
526 // LHS evaluation
527 void FE_Evolution::ImplicitMult(const Vector &x, const Vector &xp,
528  Vector &y) const
529 {
530  if (isImplicit())
531  {
532  M.Mult(xp, y);
533  }
534  else
535  {
536  y = xp;
537  }
538 }
539 
540 void FE_Evolution::Mult(const Vector &x, Vector &y) const
541 {
542  // y = M^{-1} (K x + b)
543  K.Mult(x, z);
544  z += b;
545  M_solver.Mult(z, y);
546 }
547 
548 // RHS Jacobian
549 Operator& FE_Evolution::GetExplicitGradient(const Vector &x) const
550 {
551  delete rJacobian;
552  if (isImplicit())
553  {
554  rJacobian = new PetscParMatrix(&K, Operator::PETSC_MATAIJ);
555  }
556  else
557  {
558  mfem_error("FE_Evolution::GetExplicitGradient(x): Capability not coded!");
559  }
560  return *rJacobian;
561 }
562 
563 // LHS Jacobian, evaluated as shift*F_du/dt + F_u
564 Operator& FE_Evolution::GetImplicitGradient(const Vector &x, const Vector &xp,
565  double shift) const
566 {
567  delete iJacobian;
568  if (isImplicit())
569  {
570  iJacobian = new PetscParMatrix(&M, Operator::PETSC_MATAIJ);
571  *iJacobian *= shift;
572  }
573  else
574  {
575  mfem_error("FE_Evolution::GetImplicitGradient(x,xp,shift):"
576  " Capability not coded!");
577  }
578  return *iJacobian;
579 }
580 
581 // Velocity coefficient
582 void velocity_function(const Vector &x, Vector &v)
583 {
584  int dim = x.Size();
585 
586  // map to the reference [-1,1] domain
587  Vector X(dim);
588  for (int i = 0; i < dim; i++)
589  {
590  double center = (bb_min[i] + bb_max[i]) * 0.5;
591  X(i) = 2 * (x(i) - center) / (bb_max[i] - bb_min[i]);
592  }
593 
594  switch (problem)
595  {
596  case 0:
597  {
598  // Translations in 1D, 2D, and 3D
599  switch (dim)
600  {
601  case 1: v(0) = 1.0; break;
602  case 2: v(0) = sqrt(2./3.); v(1) = sqrt(1./3.); break;
603  case 3: v(0) = sqrt(3./6.); v(1) = sqrt(2./6.); v(2) = sqrt(1./6.);
604  break;
605  }
606  break;
607  }
608  case 1:
609  case 2:
610  {
611  // Clockwise rotation in 2D around the origin
612  const double w = M_PI/2;
613  switch (dim)
614  {
615  case 1: v(0) = 1.0; break;
616  case 2: v(0) = w*X(1); v(1) = -w*X(0); break;
617  case 3: v(0) = w*X(1); v(1) = -w*X(0); v(2) = 0.0; break;
618  }
619  break;
620  }
621  case 3:
622  {
623  // Clockwise twisting rotation in 2D around the origin
624  const double w = M_PI/2;
625  double d = max((X(0)+1.)*(1.-X(0)),0.) * max((X(1)+1.)*(1.-X(1)),0.);
626  d = d*d;
627  switch (dim)
628  {
629  case 1: v(0) = 1.0; break;
630  case 2: v(0) = d*w*X(1); v(1) = -d*w*X(0); break;
631  case 3: v(0) = d*w*X(1); v(1) = -d*w*X(0); v(2) = 0.0; break;
632  }
633  break;
634  }
635  }
636 }
637 
638 // Initial condition
639 double u0_function(const Vector &x)
640 {
641  int dim = x.Size();
642 
643  // map to the reference [-1,1] domain
644  Vector X(dim);
645  for (int i = 0; i < dim; i++)
646  {
647  double center = (bb_min[i] + bb_max[i]) * 0.5;
648  X(i) = 2 * (x(i) - center) / (bb_max[i] - bb_min[i]);
649  }
650 
651  switch (problem)
652  {
653  case 0:
654  case 1:
655  {
656  switch (dim)
657  {
658  case 1:
659  return exp(-40.*pow(X(0)-0.5,2));
660  case 2:
661  case 3:
662  {
663  double rx = 0.45, ry = 0.25, cx = 0., cy = -0.2, w = 10.;
664  if (dim == 3)
665  {
666  const double s = (1. + 0.25*cos(2*M_PI*X(2)));
667  rx *= s;
668  ry *= s;
669  }
670  return ( erfc(w*(X(0)-cx-rx))*erfc(-w*(X(0)-cx+rx)) *
671  erfc(w*(X(1)-cy-ry))*erfc(-w*(X(1)-cy+ry)) )/16;
672  }
673  }
674  }
675  case 2:
676  {
677  double x_ = X(0), y_ = X(1), rho, phi;
678  rho = hypot(x_, y_);
679  phi = atan2(y_, x_);
680  return pow(sin(M_PI*rho),2)*sin(3*phi);
681  }
682  case 3:
683  {
684  const double f = M_PI;
685  return sin(f*X(0))*sin(f*X(1));
686  }
687  }
688  return 0.0;
689 }
690 
691 // Inflow boundary condition (zero for the problems considered in this example)
692 double inflow_function(const Vector &x)
693 {
694  switch (problem)
695  {
696  case 0:
697  case 1:
698  case 2:
699  case 3: return 0.0;
700  }
701  return 0.0;
702 }
Vector bb_max
Definition: ex9.cpp:52
void SetPrecision(int prec)
Set the precision (number of digits) used for the text output of doubles.
Conjugate gradient method.
Definition: solvers.hpp:111
void SetCycle(int c)
Set time cycle (for time-dependent simulations)
virtual void Init(TimeDependentOperator &f)
Associate a TimeDependentOperator with the ODE solver.
Definition: ode.hpp:37
Wrapper for PETSc&#39;s matrix class.
Definition: petsc.hpp:128
Base abstract class for time dependent operators.
Definition: operator.hpp:140
void Mult(const Table &A, const Table &B, Table &C)
C = A * B (as boolean matrices)
Definition: table.cpp:468
void GetBoundingBox(Vector &min, Vector &max, int ref=2)
Definition: mesh.cpp:88
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:42
virtual void Step(Vector &x, double &t, double &dt)=0
Perform a time step from time t [in] to time t [out] based on the requested step size dt [in]...
int Size() const
Returns the size of the vector.
Definition: vector.hpp:106
Abstract class for solving systems of ODEs: dx/dt = f(x,t)
Definition: ode.hpp:22
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:314
Abstract parallel finite element space.
Definition: pfespace.hpp:28
void ProjectCoefficient(Coefficient &coeff)
Definition: pgridfunc.cpp:249
void SetMonitor(PetscSolverMonitor *ctx)
Sets user-defined monitoring routine.
Definition: petsc.cpp:1374
int main(int argc, char *argv[])
Abstract class for PETSc&#39;s ODE solvers.
Definition: petsc.hpp:520
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
Data collection with Sidre routines following the Conduit mesh blueprint specification.
Class for parallel linear form.
Definition: plinearform.hpp:26
int dim
Definition: ex3.cpp:47
Abstract class for monitoring PETSc&#39;s solvers.
Definition: petsc.hpp:537
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:6684
Data collection with VisIt I/O routines.
void SetCurvature(int order, bool discont=false, int space_dim=-1, int ordering=1)
Definition: mesh.cpp:3278
void Assemble(int skip_zeros=1)
Assemble the local matrix.
Parallel smoothers in hypre.
Definition: hypre.hpp:491
void AddBdrFaceIntegrator(LinearFormIntegrator *lfi)
Adds new Boundary Face Integrator.
Definition: linearform.cpp:29
int Dimension() const
Definition: mesh.hpp:611
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
void SetTime(double t)
Set physical time (for time-dependent simulations)
Vector bb_min
Definition: ex9.cpp:52
Wrapper for hypre&#39;s parallel vector class.
Definition: hypre.hpp:72
The classical explicit forth-order Runge-Kutta method, RK4.
Definition: ode.hpp:147
virtual void Mult(const Vector &x, Vector &y) const
Perform the action of the operator: y = k = f(x, t), where k solves the algebraic equation F(x...
Definition: operator.hpp:202
void velocity_function(const Vector &x, Vector &v)
Definition: ex9.cpp:339
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:74
int problem
Definition: ex15.cpp:57
void mfem_error(const char *msg)
Definition: error.cpp:106
Third-order, strong stability preserving (SSP) Runge-Kutta method.
Definition: ode.hpp:134
NURBSExtension * NURBSext
Optional NURBS mesh extension.
Definition: mesh.hpp:144
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void AddInteriorFaceIntegrator(BilinearFormIntegrator *bfi)
Adds new interior Face Integrator.
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator.
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
HypreParVector * GetTrueDofs() const
Returns the true dofs in a new HypreParVector.
Definition: pgridfunc.cpp:102
Class for parallel bilinear form.
int open(const char hostname[], int port)
double u0_function(const Vector &x)
Definition: ex9.cpp:396
class for C-function coefficient
Vector data type.
Definition: vector.hpp:36
virtual void Run(Vector &x, double &t, double &dt, double tf)
Perform time integration from time t [in] to time tf [in].
Definition: ode.hpp:91
virtual void RegisterField(const std::string &field_name, GridFunction *gf)
Add a grid function to the collection.
Class for parallel grid function.
Definition: pgridfunc.hpp:31
The classical forward Euler method.
Definition: ode.hpp:101
Abstract operator.
Definition: operator.hpp:21
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:174
Class for parallel meshes.
Definition: pmesh.hpp:28
void ParallelAssemble(Vector &tv)
Assemble the vector on the true dofs, i.e. P^t v.
Definition: plinearform.cpp:34
double inflow_function(const Vector &x)
Definition: ex9.cpp:449
void AddBdrFaceIntegrator(BilinearFormIntegrator *bfi)
Adds new boundary Face Integrator.
Arbitrary order &quot;L2-conforming&quot; discontinuous finite elements.
Definition: fe_coll.hpp:195
virtual void Print(std::ostream &out=std::cout) const
Definition: pmesh.cpp:3395
bool Good() const
Definition: optparser.hpp:120
alpha (q . grad u, v)