MFEM  v3.3.2
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
petsc/ex5p.cpp
Go to the documentation of this file.
1 // MFEM Example 5 - Parallel Version
2 // PETSc Modification
3 //
4 // Compile with: make ex5p
5 //
6 // Sample runs:
7 // mpirun -np 4 ex5p -m ../../data/beam-tet.mesh --petscopts rc_ex5p_fieldsplit
8 // mpirun -np 4 ex5p -m ../../data/star.mesh --petscopts rc_ex5p_bddc --nonoverlapping
9 //
10 // Description: This example code solves a simple 2D/3D mixed Darcy problem
11 // corresponding to the saddle point system
12 // k*u + grad p = f
13 // - div u = g
14 // with natural boundary condition -p = <given pressure>.
15 // Here, we use a given exact solution (u,p) and compute the
16 // corresponding r.h.s. (f,g). We discretize with Raviart-Thomas
17 // finite elements (velocity u) and piecewise discontinuous
18 // polynomials (pressure p).
19 //
20 // The example demonstrates the use of the BlockMatrix class, as
21 // well as the collective saving of several grid functions in a
22 // VisIt (visit.llnl.gov) visualization format.
23 //
24 // Two types of PETSc solvers can be used: BDDC or fieldsplit.
25 // When using BDDC, the nonoverlapping assembly feature should be
26 // used. This specific example needs PETSc compiled with support
27 // for SuiteSparse and/or MUMPS for using BDDC.
28 //
29 // We recommend viewing examples 1-4 before viewing this example.
30 
31 #include "mfem.hpp"
32 #include <fstream>
33 #include <iostream>
34 
35 #ifndef MFEM_USE_PETSC
36 #error This example requires that MFEM is built with MFEM_USE_PETSC=YES
37 #endif
38 
39 using namespace std;
40 using namespace mfem;
41 
42 // Define the analytical solution and forcing terms / boundary conditions
43 void uFun_ex(const Vector & x, Vector & u);
44 double pFun_ex(const Vector & x);
45 void fFun(const Vector & x, Vector & f);
46 double gFun(const Vector & x);
47 double f_natural(const Vector & x);
48 
49 int main(int argc, char *argv[])
50 {
51  StopWatch chrono;
52 
53  // 1. Initialize MPI.
54  int num_procs, myid;
55  MPI_Init(&argc, &argv);
56  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
57  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
58  bool verbose = (myid == 0);
59 
60  // 2. Parse command-line options.
61  const char *mesh_file = "../../data/star.mesh";
62  int order = 1;
63  bool visualization = 1;
64  bool use_petsc = true;
65  bool use_nonoverlapping = false;
66  const char *petscrc_file = "";
67 
68  OptionsParser args(argc, argv);
69  args.AddOption(&mesh_file, "-m", "--mesh",
70  "Mesh file to use.");
71  args.AddOption(&order, "-o", "--order",
72  "Finite element order (polynomial degree).");
73  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
74  "--no-visualization",
75  "Enable or disable GLVis visualization.");
76  args.AddOption(&use_petsc, "-usepetsc", "--usepetsc", "-no-petsc",
77  "--no-petsc",
78  "Use or not PETSc to solve the linear system.");
79  args.AddOption(&petscrc_file, "-petscopts", "--petscopts",
80  "PetscOptions file to use.");
81  args.AddOption(&use_nonoverlapping, "-nonoverlapping", "--nonoverlapping",
82  "-no-nonoverlapping", "--no-nonoverlapping",
83  "Use or not the block diagonal PETSc's matrix format "
84  "for non-overlapping domain decomposition.");
85  args.Parse();
86  if (!args.Good())
87  {
88  if (verbose)
89  {
90  args.PrintUsage(cout);
91  }
92  MPI_Finalize();
93  return 1;
94  }
95  if (verbose)
96  {
97  args.PrintOptions(cout);
98  }
99  // 2b. We initialize PETSc
100  if (use_petsc) { PetscInitialize(NULL,NULL,petscrc_file,NULL); }
101 
102  // 3. Read the (serial) mesh from the given mesh file on all processors. We
103  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
104  // and volume meshes with the same code.
105  Mesh *mesh = new Mesh(mesh_file, 1, 1);
106  int dim = mesh->Dimension();
107 
108  // 4. Refine the serial mesh on all processors to increase the resolution. In
109  // this example we do 'ref_levels' of uniform refinement. We choose
110  // 'ref_levels' to be the largest number that gives a final mesh with no
111  // more than 10,000 elements.
112  {
113  int ref_levels =
114  (int)floor(log(10000./mesh->GetNE())/log(2.)/dim);
115  for (int l = 0; l < ref_levels; l++)
116  {
117  mesh->UniformRefinement();
118  }
119  }
120 
121  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
122  // this mesh further in parallel to increase the resolution. Once the
123  // parallel mesh is defined, the serial mesh can be deleted.
124  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
125  delete mesh;
126  {
127  int par_ref_levels = 2;
128  for (int l = 0; l < par_ref_levels; l++)
129  {
130  pmesh->UniformRefinement();
131  }
132  }
133 
134  // 6. Define a parallel finite element space on the parallel mesh. Here we
135  // use the Raviart-Thomas finite elements of the specified order.
136  FiniteElementCollection *hdiv_coll(new RT_FECollection(order, dim));
137  FiniteElementCollection *l2_coll(new L2_FECollection(order, dim));
138 
139  ParFiniteElementSpace *R_space = new ParFiniteElementSpace(pmesh, hdiv_coll);
140  ParFiniteElementSpace *W_space = new ParFiniteElementSpace(pmesh, l2_coll);
141 
142  HYPRE_Int dimR = R_space->GlobalTrueVSize();
143  HYPRE_Int dimW = W_space->GlobalTrueVSize();
144 
145  if (verbose)
146  {
147  std::cout << "***********************************************************\n";
148  std::cout << "dim(R) = " << dimR << "\n";
149  std::cout << "dim(W) = " << dimW << "\n";
150  std::cout << "dim(R+W) = " << dimR + dimW << "\n";
151  std::cout << "***********************************************************\n";
152  }
153 
154  // 7. Define the two BlockStructure of the problem. block_offsets is used
155  // for Vector based on dof (like ParGridFunction or ParLinearForm),
156  // block_trueOffstes is used for Vector based on trueDof (HypreParVector
157  // for the rhs and solution of the linear system). The offsets computed
158  // here are local to the processor.
159  Array<int> block_offsets(3); // number of variables + 1
160  block_offsets[0] = 0;
161  block_offsets[1] = R_space->GetVSize();
162  block_offsets[2] = W_space->GetVSize();
163  block_offsets.PartialSum();
164 
165  Array<int> block_trueOffsets(3); // number of variables + 1
166  block_trueOffsets[0] = 0;
167  block_trueOffsets[1] = R_space->TrueVSize();
168  block_trueOffsets[2] = W_space->TrueVSize();
169  block_trueOffsets.PartialSum();
170 
171  // 8. Define the coefficients, analytical solution, and rhs of the PDE.
172  ConstantCoefficient k(1.0);
173 
174  VectorFunctionCoefficient fcoeff(dim, fFun);
175  FunctionCoefficient fnatcoeff(f_natural);
176  FunctionCoefficient gcoeff(gFun);
177 
178  VectorFunctionCoefficient ucoeff(dim, uFun_ex);
180 
181  // 9. Define the parallel grid function and parallel linear forms, solution
182  // vector and rhs.
183  BlockVector x(block_offsets), rhs(block_offsets);
184  BlockVector trueX(block_trueOffsets), trueRhs(block_trueOffsets);
185 
186  ParLinearForm *fform(new ParLinearForm);
187  fform->Update(R_space, rhs.GetBlock(0), 0);
190  fform->Assemble();
191  fform->ParallelAssemble(trueRhs.GetBlock(0));
192 
193  ParLinearForm *gform(new ParLinearForm);
194  gform->Update(W_space, rhs.GetBlock(1), 0);
195  gform->AddDomainIntegrator(new DomainLFIntegrator(gcoeff));
196  gform->Assemble();
197  gform->ParallelAssemble(trueRhs.GetBlock(1));
198 
199  // 10. Assemble the finite element matrices for the Darcy operator
200  //
201  // D = [ M B^T ]
202  // [ B 0 ]
203  // where:
204  //
205  // M = \int_\Omega k u_h \cdot v_h d\Omega u_h, v_h \in R_h
206  // B = -\int_\Omega \div u_h q_h d\Omega u_h \in R_h, q_h \in W_h
207  ParBilinearForm *mVarf(new ParBilinearForm(R_space));
208  ParMixedBilinearForm *bVarf(new ParMixedBilinearForm(R_space, W_space));
209 
210  PetscParMatrix *pM = NULL, *pB = NULL, *pBT = NULL;
211  HypreParMatrix *M = NULL, *B = NULL, *BT = NULL;
212  Operator::Type tid =
213  !use_petsc ? Operator::Hypre_ParCSR :
214  (use_nonoverlapping ? Operator::PETSC_MATIS : Operator::PETSC_MATAIJ);
215  OperatorHandle Mh(tid), Bh(tid);
216 
218  mVarf->Assemble();
219  mVarf->Finalize();
220  mVarf->ParallelAssemble(Mh);
221  if (!use_petsc) { Mh.Get(M); }
222  else { Mh.Get(pM); }
223  Mh.SetOperatorOwner(false);
224 
226  bVarf->Assemble();
227  bVarf->Finalize();
228  if (!use_petsc)
229  {
230  B = bVarf->ParallelAssemble();
231  (*B) *= -1;
232  }
233  else
234  {
235  bVarf->ParallelAssemble(Bh);
236  Bh.Get(pB);
237  Bh.SetOperatorOwner(false);
238  (*pB) *= -1;
239  }
240 
241  if (!use_petsc) { BT = B->Transpose(); }
242  else { pBT = pB->Transpose(); };
243 
244  Operator *darcyOp = NULL;
245  if (!use_petsc)
246  {
247  BlockOperator *tdarcyOp = new BlockOperator(block_trueOffsets);
248  tdarcyOp->SetBlock(0,0,M);
249  tdarcyOp->SetBlock(0,1,BT);
250  tdarcyOp->SetBlock(1,0,B);
251  darcyOp = tdarcyOp;
252  }
253  else
254  {
255  // We construct the BlockOperator and we then convert it to a
256  // PetscParMatrix to avoid any conversion in the construction of the
257  // preconditioners.
258  BlockOperator *tdarcyOp = new BlockOperator(block_trueOffsets);
259  tdarcyOp->SetBlock(0,0,pM);
260  tdarcyOp->SetBlock(0,1,pBT);
261  tdarcyOp->SetBlock(1,0,pB);
262  darcyOp = new PetscParMatrix(pM->GetComm(),tdarcyOp,
263  use_nonoverlapping ? Operator::PETSC_MATIS :
264  Operator::PETSC_MATAIJ);
265  delete tdarcyOp;
266  }
267 
268  // 11. Construct the operators for preconditioner
269  //
270  // P = [ diag(M) 0 ]
271  // [ 0 B diag(M)^-1 B^T ]
272  //
273  // Here we use Symmetric Gauss-Seidel to approximate the inverse of the
274  // pressure Schur Complement.
275  PetscPreconditioner *pdarcyPr = NULL;
276  BlockDiagonalPreconditioner *darcyPr = NULL;
277  HypreSolver *invM = NULL, *invS = NULL;
278  HypreParMatrix *S = NULL;
279  HypreParMatrix *MinvBt = NULL;
280  HypreParVector *Md = NULL;
281  if (!use_petsc)
282  {
283  MinvBt = B->Transpose();
284  Md = new HypreParVector(MPI_COMM_WORLD, M->GetGlobalNumRows(),
285  M->GetRowStarts());
286  M->GetDiag(*Md);
287 
288  MinvBt->InvScaleRows(*Md);
289  S = ParMult(B, MinvBt);
290 
291  invM = new HypreDiagScale(*M);
292  invS = new HypreBoomerAMG(*S);
293 
294  invM->iterative_mode = false;
295  invS->iterative_mode = false;
296 
297  darcyPr = new BlockDiagonalPreconditioner(block_trueOffsets);
298  darcyPr->SetDiagonalBlock(0, invM);
299  darcyPr->SetDiagonalBlock(1, invS);
300  }
301  else
302  {
303  if (use_nonoverlapping)
304  {
305  // For saddle point problems, we need to provide BDDC the list of
306  // boundary dofs either essential or natural.
307  // Since R_space is the only space that may have boundary dofs and it
308  // is ordered first then W_space, we don't need any local offset when
309  // specifying the dofs.
310  Array<int> bdr_tdof_list;
311  bool local = false;
312  if (pmesh->bdr_attributes.Size())
313  {
314  Array<int> bdr(pmesh->bdr_attributes.Max());
315  bdr = 1;
316 
317  R_space->GetEssentialTrueDofs(bdr, bdr_tdof_list);
318  local = false;
319  // Alternatively, you can also provide the list of dofs in local
320  // ordering:
321  // R_space->GetEssentialVDofs(bdr, bdr_tdof_list);
322  // bdr_tdof_list.SetSize(R_space->GetVSize()+W_space->GetVSize(),0);
323  // local = true;
324  }
325  else
326  {
327  MFEM_ABORT("Need to know the boundary dofs");
328  }
329 
331  opts.SetNatBdrDofs(&bdr_tdof_list,local);
332  // See also command line options rc_ex5p_bddc
333  pdarcyPr = new PetscBDDCSolver(MPI_COMM_WORLD,*darcyOp,opts,"prec_");
334  }
335  else
336  {
337  // With PETSc, we can construct the (same) block-diagonal solver with
338  // command line options (see rc_ex5p_fieldsplit)
339  pdarcyPr = new PetscFieldSplitSolver(MPI_COMM_WORLD,*darcyOp,"prec_");
340  }
341  }
342 
343  // 12. Solve the linear system with MINRES.
344  // Check the norm of the unpreconditioned residual.
345 
346  int maxIter(500);
347  double rtol(1.e-6);
348  double atol(1.e-10);
349 
350  chrono.Clear();
351  chrono.Start();
352 
353  trueX = 0.0;
354  if (!use_petsc)
355  {
356  MINRESSolver solver(MPI_COMM_WORLD);
357  solver.SetAbsTol(atol);
358  solver.SetRelTol(rtol);
359  solver.SetMaxIter(maxIter);
360  solver.SetOperator(*darcyOp);
361  solver.SetPreconditioner(*darcyPr);
362  solver.SetPrintLevel(1);
363  solver.Mult(trueRhs, trueX);
364  if (verbose)
365  {
366  if (solver.GetConverged())
367  {
368  std::cout << "MINRES converged in " << solver.GetNumIterations()
369  << " iterations with a residual norm of "
370  << solver.GetFinalNorm() << ".\n";
371  }
372  else
373  {
374  std::cout << "MINRES did not converge in "
375  << solver.GetNumIterations()
376  << " iterations. Residual norm is "
377  << solver.GetFinalNorm() << ".\n";
378  }
379  std::cout << "MINRES solver took " << chrono.RealTime() << "s. \n";
380  }
381 
382  }
383  else
384  {
385  std::string solvertype;
386  PetscLinearSolver *solver;
387  if (use_nonoverlapping)
388  {
389  // We can use conjugate gradients to solve the problem
390  solver = new PetscPCGSolver(MPI_COMM_WORLD);
391  solvertype = "PCG";
392  }
393  else
394  {
395  solver = new PetscLinearSolver(MPI_COMM_WORLD);
396  solvertype = "MINRES";
397  }
398  solver->SetOperator(*darcyOp);
399  solver->SetPreconditioner(*pdarcyPr);
400  solver->SetAbsTol(atol);
401  solver->SetRelTol(rtol);
402  solver->SetMaxIter(maxIter);
403  solver->SetPrintLevel(2);
404  solver->Mult(trueRhs, trueX);
405  if (verbose)
406  {
407  if (solver->GetConverged())
408  {
409  std::cout << solvertype << " converged in "
410  << solver->GetNumIterations()
411  << " iterations with a residual norm of "
412  << solver->GetFinalNorm() << ".\n";
413  }
414  else
415  {
416  std::cout << solvertype << " did not converge in "
417  << solver->GetNumIterations()
418  << " iterations. Residual norm is "
419  << solver->GetFinalNorm() << ".\n";
420  }
421  std::cout << solvertype << " solver took "
422  << chrono.RealTime() << "s. \n";
423  }
424  delete solver;
425  }
426  chrono.Stop();
427 
428  // 13. Extract the parallel grid function corresponding to the finite element
429  // approximation X. This is the local solution on each processor. Compute
430  // L2 error norms.
433  u->MakeRef(R_space, x.GetBlock(0), 0);
434  p->MakeRef(W_space, x.GetBlock(1), 0);
435  u->Distribute(&(trueX.GetBlock(0)));
436  p->Distribute(&(trueX.GetBlock(1)));
437 
438  int order_quad = max(2, 2*order+1);
439  const IntegrationRule *irs[Geometry::NumGeom];
440  for (int i=0; i < Geometry::NumGeom; ++i)
441  {
442  irs[i] = &(IntRules.Get(i, order_quad));
443  }
444 
445  double err_u = u->ComputeL2Error(ucoeff, irs);
446  double norm_u = ComputeGlobalLpNorm(2, ucoeff, *pmesh, irs);
447  double err_p = p->ComputeL2Error(pcoeff, irs);
448  double norm_p = ComputeGlobalLpNorm(2, pcoeff, *pmesh, irs);
449 
450  if (verbose)
451  {
452  std::cout << "|| u_h - u_ex || / || u_ex || = " << err_u / norm_u << "\n";
453  std::cout << "|| p_h - p_ex || / || p_ex || = " << err_p / norm_p << "\n";
454  }
455 
456  // 14. Save the refined mesh and the solution in parallel. This output can be
457  // viewed later using GLVis: "glvis -np <np> -m mesh -g sol_*".
458  {
459  ostringstream mesh_name, u_name, p_name;
460  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
461  u_name << "sol_u." << setfill('0') << setw(6) << myid;
462  p_name << "sol_p." << setfill('0') << setw(6) << myid;
463 
464  ofstream mesh_ofs(mesh_name.str().c_str());
465  mesh_ofs.precision(8);
466  pmesh->Print(mesh_ofs);
467 
468  ofstream u_ofs(u_name.str().c_str());
469  u_ofs.precision(8);
470  u->Save(u_ofs);
471 
472  ofstream p_ofs(p_name.str().c_str());
473  p_ofs.precision(8);
474  p->Save(p_ofs);
475  }
476 
477  // 15. Save data in the VisIt format
478  VisItDataCollection visit_dc("Example5-Parallel", pmesh);
479  visit_dc.RegisterField("velocity", u);
480  visit_dc.RegisterField("pressure", p);
481  visit_dc.Save();
482 
483  // 16. Send the solution by socket to a GLVis server.
484  if (visualization)
485  {
486  char vishost[] = "localhost";
487  int visport = 19916;
488  socketstream u_sock(vishost, visport);
489  u_sock << "parallel " << num_procs << " " << myid << "\n";
490  u_sock.precision(8);
491  u_sock << "solution\n" << *pmesh << *u << "window_title 'Velocity'"
492  << endl;
493  // Make sure all ranks have sent their 'u' solution before initiating
494  // another set of GLVis connections (one from each rank):
495  MPI_Barrier(pmesh->GetComm());
496  socketstream p_sock(vishost, visport);
497  p_sock << "parallel " << num_procs << " " << myid << "\n";
498  p_sock.precision(8);
499  p_sock << "solution\n" << *pmesh << *p << "window_title 'Pressure'"
500  << endl;
501  }
502 
503  // 17. Free the used memory.
504  delete fform;
505  delete gform;
506  delete u;
507  delete p;
508  delete darcyOp;
509  delete darcyPr;
510  delete pdarcyPr;
511  delete invM;
512  delete invS;
513  delete S;
514  delete BT;
515  delete B;
516  delete M;
517  delete pBT;
518  delete pB;
519  delete pM;
520  delete MinvBt;
521  delete Md;
522  delete mVarf;
523  delete bVarf;
524  delete W_space;
525  delete R_space;
526  delete l2_coll;
527  delete hdiv_coll;
528  delete pmesh;
529 
530  // We finalize PETSc
531  if (use_petsc) { PetscFinalize(); }
532 
533  MPI_Finalize();
534 
535  return 0;
536 }
537 
538 
539 void uFun_ex(const Vector & x, Vector & u)
540 {
541  double xi(x(0));
542  double yi(x(1));
543  double zi(0.0);
544  if (x.Size() == 3)
545  {
546  zi = x(2);
547  }
548 
549  u(0) = - exp(xi)*sin(yi)*cos(zi);
550  u(1) = - exp(xi)*cos(yi)*cos(zi);
551 
552  if (x.Size() == 3)
553  {
554  u(2) = exp(xi)*sin(yi)*sin(zi);
555  }
556 }
557 
558 // Change if needed
559 double pFun_ex(const Vector & x)
560 {
561  double xi(x(0));
562  double yi(x(1));
563  double zi(0.0);
564 
565  if (x.Size() == 3)
566  {
567  zi = x(2);
568  }
569 
570  return exp(xi)*sin(yi)*cos(zi);
571 }
572 
573 void fFun(const Vector & x, Vector & f)
574 {
575  f = 0.0;
576 }
577 
578 double gFun(const Vector & x)
579 {
580  if (x.Size() == 3)
581  {
582  return -pFun_ex(x);
583  }
584  else
585  {
586  return 0;
587  }
588 }
589 
590 double f_natural(const Vector & x)
591 {
592  return (-pFun_ex(x));
593 }
int Size() const
Logical size of the array.
Definition: array.hpp:110
void SetPreconditioner(Solver &precond)
Sets the solver to perform preconditioning.
Definition: petsc.cpp:2003
void SetPrintLevel(int plev)
Definition: petsc.cpp:1407
Class for domain integration L(v) := (f, v)
Definition: lininteg.hpp:93
int GetVSize() const
Definition: fespace.hpp:163
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
Definition: pfespace.cpp:529
Class for an integration rule - an Array of IntegrationPoint.
Definition: intrules.hpp:83
int GetNumIterations() const
Definition: solvers.hpp:66
Abstract class for PETSc&#39;s preconditioners.
Definition: petsc.hpp:521
const IntegrationRule & Get(int GeomType, int Order)
Returns an integration rule for given GeomType and Order.
Definition: intrules.cpp:861
void fFun(const Vector &x, Vector &f)
Definition: ex5.cpp:334
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Definition: petsc.cpp:1839
Subclass constant coefficient.
Definition: coefficient.hpp:57
double GetFinalNorm()
Definition: petsc.cpp:1654
Wrapper for PETSc&#39;s matrix class.
Definition: petsc.hpp:133
Abstract class for PETSc&#39;s linear solvers.
Definition: petsc.hpp:474
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:51
Pointer to an Operator of a specified type.
Definition: handle.hpp:33
int Size() const
Returns the size of the vector.
Definition: vector.hpp:113
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:614
virtual void Save()
Save the collection and a VisIt root file.
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:361
double RealTime()
Definition: tic_toc.cpp:426
Abstract parallel finite element space.
Definition: pfespace.hpp:31
void SetRelTol(double tol)
Definition: petsc.cpp:1330
bool iterative_mode
If true, use the second argument of Mult() as an initial guess.
Definition: operator.hpp:263
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
MINRES method.
Definition: solvers.hpp:220
void SetMaxIter(int max_iter)
Definition: petsc.cpp:1380
HypreParMatrix * ParMult(const HypreParMatrix *A, const HypreParMatrix *B)
Returns the matrix A * B.
Definition: hypre.cpp:1456
void Update(ParFiniteElementSpace *pf=NULL)
Definition: plinearform.cpp:21
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
double GetFinalNorm() const
Definition: solvers.hpp:68
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P_test^t A P_trial.
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
virtual void MakeRef(FiniteElementSpace *f, double *v)
Make the ParGridFunction reference external data on a new FiniteElementSpace.
Definition: pgridfunc.cpp:86
The BoomerAMG solver in hypre.
Definition: hypre.hpp:783
virtual void Mult(const Vector &b, Vector &x) const
Operator application: y=A(x).
Definition: solvers.cpp:1046
Class for parallel linear form.
Definition: plinearform.hpp:26
HYPRE_Int GetGlobalNumRows() const
Definition: hypre.hpp:374
int dim
Definition: ex3.cpp:47
void SetPrintLevel(int print_lvl)
Definition: solvers.cpp:72
virtual void SetPreconditioner(Solver &pr)
This should be called before SetOperator.
Definition: solvers.hpp:233
A class to handle Block diagonal preconditioners in a matrix-free implementation. ...
Jacobi preconditioner in hypre.
Definition: hypre.hpp:744
double f_natural(const Vector &x)
Definition: ex5.cpp:351
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:6718
Auxiliary class for BDDC customization.
Definition: petsc.hpp:543
Timing object.
Definition: tic_toc.hpp:34
MPI_Comm GetComm() const
Get the associated MPI communicator.
Definition: petsc.hpp:242
Data collection with VisIt I/O routines.
void SetMaxIter(int max_it)
Definition: solvers.hpp:63
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:108
void Assemble(int skip_zeros=1)
HYPRE_Int GlobalTrueVSize() const
Definition: pfespace.hpp:178
void Assemble(int skip_zeros=1)
Assemble the local matrix.
virtual void Print(std::ostream &out=mfem::out) const
Definition: pmesh.cpp:3399
double pFun_ex(const Vector &x)
Definition: ex5.cpp:320
HypreParMatrix * Transpose() const
Returns the transpose of *this.
Definition: hypre.cpp:936
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
Definition: hypre.cpp:872
virtual void SetOperator(const Operator &op)
Also calls SetOperator for the preconditioner if there is one.
Definition: solvers.cpp:1032
int GetConverged() const
Definition: solvers.hpp:67
void AddBoundaryIntegrator(LinearFormIntegrator *lfi)
Adds new Boundary Integrator.
Definition: linearform.cpp:33
int Dimension() const
Definition: mesh.hpp:641
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
int GetNumIterations()
Definition: petsc.cpp:1621
Arbitrary order H(div)-conforming Raviart-Thomas finite elements.
Definition: fe_coll.hpp:239
Wrapper for hypre&#39;s parallel vector class.
Definition: hypre.hpp:73
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator.
Definition: linearform.cpp:19
int main()
Type
Enumeration defining IDs for some classes derived from Operator.
Definition: operator.hpp:123
void SetAbsTol(double atol)
Definition: solvers.hpp:62
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:172
void SetRelTol(double rtol)
Definition: solvers.hpp:61
MPI_Comm GetComm() const
Definition: pmesh.hpp:117
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:74
void PartialSum()
Partial Sum.
Definition: array.cpp:139
double ComputeGlobalLpNorm(double p, Coefficient &coeff, ParMesh &pmesh, const IntegrationRule *irs[])
void Distribute(const Vector *tv)
Definition: pgridfunc.cpp:116
virtual void RegisterField(const std::string &field_name, GridFunction *gf)
Add a grid function to the collection and update the root file.
void uFun_ex(const Vector &x, Vector &u)
Definition: ex5.cpp:300
double gFun(const Vector &x)
Definition: ex5.cpp:339
Class for parallel bilinear form using different test and trial FE spaces.
void SetOperatorOwner(bool own=true)
Set the ownership flag for the held Operator.
Definition: handle.hpp:106
virtual double ComputeL2Error(Coefficient *exsol[], const IntegrationRule *irs[]=NULL) const
Definition: pgridfunc.hpp:204
int GetConverged()
Definition: petsc.cpp:1588
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void InvScaleRows(const Vector &s)
Scale the local row i by 1./s(i)
Definition: hypre.cpp:1147
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator.
void SetAbsTol(double tol)
Definition: petsc.cpp:1355
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
Class for parallel bilinear form.
Abstract class for hypre&#39;s solvers and preconditioners.
Definition: hypre.hpp:607
for VectorFiniteElements (Nedelec, Raviart-Thomas)
Definition: lininteg.hpp:231
class for C-function coefficient
void Get(OpType *&A) const
Return the Operator pointer statically cast to a given OpType.
Definition: handle.hpp:100
Vector data type.
Definition: vector.hpp:41
HYPRE_Int * GetRowStarts() const
Definition: hypre.hpp:380
Class for parallel grid function.
Definition: pgridfunc.hpp:32
void SetNatBdrDofs(const Array< int > *natdofs, bool loc=false)
Specify dofs on the natural boundary.
Definition: petsc.hpp:570
Abstract operator.
Definition: operator.hpp:21
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:175
A class to handle Block systems in a matrix-free implementation.
Vector & GetBlock(int i)
Get the i-th vector in the block.
virtual void Mult(const Vector &b, Vector &x) const
Application of the solver.
Definition: petsc.cpp:2043
Class for parallel meshes.
Definition: pmesh.hpp:29
IntegrationRules IntRules(0, Quadrature1D::GaussLegendre)
A global object with all integration rules (defined in intrules.cpp)
Definition: intrules.hpp:343
void ParallelAssemble(Vector &tv)
Assemble the vector on the true dofs, i.e. P^t v.
Definition: plinearform.cpp:34
Integrator for (Q u, v) for VectorFiniteElements.
void SetBlock(int iRow, int iCol, Operator *op, double c=1.0)
Add a block op in the block-entry (iblock, jblock).
void SetDiagonalBlock(int iblock, Operator *op)
Add a square block op in the block-entry (iblock, iblock).
Arbitrary order &quot;L2-conforming&quot; discontinuous finite elements.
Definition: fe_coll.hpp:195
bool Good() const
Definition: optparser.hpp:120