MFEM  v3.2
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex5p.cpp
Go to the documentation of this file.
1 // MFEM Example 5 - Parallel Version
2 //
3 // Compile with: make ex5p
4 //
5 // Sample runs: mpirun -np 4 ex5p -m ../data/square-disc.mesh
6 // mpirun -np 4 ex5p -m ../data/star.mesh
7 // mpirun -np 4 ex5p -m ../data/beam-tet.mesh
8 // mpirun -np 4 ex5p -m ../data/beam-hex.mesh
9 // mpirun -np 4 ex5p -m ../data/escher.mesh
10 // mpirun -np 4 ex5p -m ../data/fichera.mesh
11 //
12 // Description: This example code solves a simple 2D/3D mixed Darcy problem
13 // corresponding to the saddle point system
14 // k*u + grad p = f
15 // - div u = g
16 // with natural boundary condition -p = <given pressure>.
17 // Here, we use a given exact solution (u,p) and compute the
18 // corresponding r.h.s. (f,g). We discretize with Raviart-Thomas
19 // finite elements (velocity u) and piecewise discontinuous
20 // polynomials (pressure p).
21 //
22 // The example demonstrates the use of the BlockMatrix class, as
23 // well as the collective saving of several grid functions in a
24 // VisIt (visit.llnl.gov) visualization format.
25 //
26 // We recommend viewing examples 1-4 before viewing this example.
27 
28 #include "mfem.hpp"
29 #include <fstream>
30 #include <iostream>
31 
32 using namespace std;
33 using namespace mfem;
34 
35 // Define the analytical solution and forcing terms / boundary conditions
36 void uFun_ex(const Vector & x, Vector & u);
37 double pFun_ex(const Vector & x);
38 void fFun(const Vector & x, Vector & f);
39 double gFun(const Vector & x);
40 double f_natural(const Vector & x);
41 
42 int main(int argc, char *argv[])
43 {
44  StopWatch chrono;
45 
46  // 1. Initialize MPI.
47  int num_procs, myid;
48  MPI_Init(&argc, &argv);
49  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
50  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
51  bool verbose = (myid == 0);
52 
53  // 2. Parse command-line options.
54  const char *mesh_file = "../data/star.mesh";
55  int order = 1;
56  bool visualization = 1;
57 
58  OptionsParser args(argc, argv);
59  args.AddOption(&mesh_file, "-m", "--mesh",
60  "Mesh file to use.");
61  args.AddOption(&order, "-o", "--order",
62  "Finite element order (polynomial degree).");
63  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
64  "--no-visualization",
65  "Enable or disable GLVis visualization.");
66  args.Parse();
67  if (!args.Good())
68  {
69  if (verbose)
70  {
71  args.PrintUsage(cout);
72  }
73  MPI_Finalize();
74  return 1;
75  }
76  if (verbose)
77  {
78  args.PrintOptions(cout);
79  }
80 
81  // 3. Read the (serial) mesh from the given mesh file on all processors. We
82  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
83  // and volume meshes with the same code.
84  Mesh *mesh = new Mesh(mesh_file, 1, 1);;
85  int dim = mesh->Dimension();
86 
87  // 4. Refine the serial mesh on all processors to increase the resolution. In
88  // this example we do 'ref_levels' of uniform refinement. We choose
89  // 'ref_levels' to be the largest number that gives a final mesh with no
90  // more than 10,000 elements.
91  {
92  int ref_levels =
93  (int)floor(log(10000./mesh->GetNE())/log(2.)/dim);
94  for (int l = 0; l < ref_levels; l++)
95  {
96  mesh->UniformRefinement();
97  }
98  }
99 
100  // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
101  // this mesh further in parallel to increase the resolution. Once the
102  // parallel mesh is defined, the serial mesh can be deleted.
103  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
104  delete mesh;
105  {
106  int par_ref_levels = 2;
107  for (int l = 0; l < par_ref_levels; l++)
108  {
109  pmesh->UniformRefinement();
110  }
111  }
112 
113  // 6. Define a parallel finite element space on the parallel mesh. Here we
114  // use the Raviart-Thomas finite elements of the specified order.
115  FiniteElementCollection *hdiv_coll(new RT_FECollection(order, dim));
116  FiniteElementCollection *l2_coll(new L2_FECollection(order, dim));
117 
118  ParFiniteElementSpace *R_space = new ParFiniteElementSpace(pmesh, hdiv_coll);
119  ParFiniteElementSpace *W_space = new ParFiniteElementSpace(pmesh, l2_coll);
120 
121  HYPRE_Int dimR = R_space->GlobalTrueVSize();
122  HYPRE_Int dimW = W_space->GlobalTrueVSize();
123 
124  if (verbose)
125  {
126  std::cout << "***********************************************************\n";
127  std::cout << "dim(R) = " << dimR << "\n";
128  std::cout << "dim(W) = " << dimW << "\n";
129  std::cout << "dim(R+W) = " << dimR + dimW << "\n";
130  std::cout << "***********************************************************\n";
131  }
132 
133  // 7. Define the two BlockStructure of the problem. block_offsets is used
134  // for Vector based on dof (like ParGridFunction or ParLinearForm),
135  // block_trueOffstes is used for Vector based on trueDof (HypreParVector
136  // for the rhs and solution of the linear system). The offsets computed
137  // here are local to the processor.
138  Array<int> block_offsets(3); // number of variables + 1
139  block_offsets[0] = 0;
140  block_offsets[1] = R_space->GetVSize();
141  block_offsets[2] = W_space->GetVSize();
142  block_offsets.PartialSum();
143 
144  Array<int> block_trueOffsets(3); // number of variables + 1
145  block_trueOffsets[0] = 0;
146  block_trueOffsets[1] = R_space->TrueVSize();
147  block_trueOffsets[2] = W_space->TrueVSize();
148  block_trueOffsets.PartialSum();
149 
150  // 8. Define the coefficients, analytical solution, and rhs of the PDE.
151  ConstantCoefficient k(1.0);
152 
153  VectorFunctionCoefficient fcoeff(dim, fFun);
154  FunctionCoefficient fnatcoeff(f_natural);
155  FunctionCoefficient gcoeff(gFun);
156 
157  VectorFunctionCoefficient ucoeff(dim, uFun_ex);
159 
160  // 9. Define the parallel grid function and parallel linear forms, solution
161  // vector and rhs.
162  BlockVector x(block_offsets), rhs(block_offsets);
163  BlockVector trueX(block_trueOffsets), trueRhs(block_trueOffsets);
164 
165  ParLinearForm *fform(new ParLinearForm);
166  fform->Update(R_space, rhs.GetBlock(0), 0);
169  fform->Assemble();
170  fform->ParallelAssemble(trueRhs.GetBlock(0));
171 
172  ParLinearForm *gform(new ParLinearForm);
173  gform->Update(W_space, rhs.GetBlock(1), 0);
174  gform->AddDomainIntegrator(new DomainLFIntegrator(gcoeff));
175  gform->Assemble();
176  gform->ParallelAssemble(trueRhs.GetBlock(1));
177 
178  // 10. Assemble the finite element matrices for the Darcy operator
179  //
180  // D = [ M B^T ]
181  // [ B 0 ]
182  // where:
183  //
184  // M = \int_\Omega k u_h \cdot v_h d\Omega u_h, v_h \in R_h
185  // B = -\int_\Omega \div u_h q_h d\Omega u_h \in R_h, q_h \in W_h
186  ParBilinearForm *mVarf(new ParBilinearForm(R_space));
187  ParMixedBilinearForm *bVarf(new ParMixedBilinearForm(R_space, W_space));
188 
189  HypreParMatrix *M, *B;
190 
192  mVarf->Assemble();
193  mVarf->Finalize();
194  M = mVarf->ParallelAssemble();
195 
197  bVarf->Assemble();
198  bVarf->Finalize();
199  B = bVarf->ParallelAssemble();
200  (*B) *= -1;
201 
202  HypreParMatrix *BT = B->Transpose();
203 
204  BlockOperator *darcyOp = new BlockOperator(block_trueOffsets);
205  darcyOp->SetBlock(0,0, M);
206  darcyOp->SetBlock(0,1, BT);
207  darcyOp->SetBlock(1,0, B);
208 
209  // 11. Construct the operators for preconditioner
210  //
211  // P = [ diag(M) 0 ]
212  // [ 0 B diag(M)^-1 B^T ]
213  //
214  // Here we use Symmetric Gauss-Seidel to approximate the inverse of the
215  // pressure Schur Complement.
216  HypreParMatrix *MinvBt = B->Transpose();
217  HypreParVector *Md = new HypreParVector(MPI_COMM_WORLD, M->GetGlobalNumRows(),
218  M->GetRowStarts());
219  M->GetDiag(*Md);
220 
221  MinvBt->InvScaleRows(*Md);
222  HypreParMatrix *S = ParMult(B, MinvBt);
223 
224  HypreSolver *invM, *invS;
225  invM = new HypreDiagScale(*M);
226  invS = new HypreBoomerAMG(*S);
227 
228  invM->iterative_mode = false;
229  invS->iterative_mode = false;
230 
232  block_trueOffsets);
233  darcyPr->SetDiagonalBlock(0, invM);
234  darcyPr->SetDiagonalBlock(1, invS);
235 
236  // 12. Solve the linear system with MINRES.
237  // Check the norm of the unpreconditioned residual.
238 
239  int maxIter(500);
240  double rtol(1.e-6);
241  double atol(1.e-10);
242 
243  chrono.Clear();
244  chrono.Start();
245  MINRESSolver solver(MPI_COMM_WORLD);
246  solver.SetAbsTol(atol);
247  solver.SetRelTol(rtol);
248  solver.SetMaxIter(maxIter);
249  solver.SetOperator(*darcyOp);
250  solver.SetPreconditioner(*darcyPr);
251  solver.SetPrintLevel(verbose);
252  trueX = 0.0;
253  solver.Mult(trueRhs, trueX);
254  chrono.Stop();
255 
256  if (verbose)
257  {
258  if (solver.GetConverged())
259  std::cout << "MINRES converged in " << solver.GetNumIterations()
260  << " iterations with a residual norm of " << solver.GetFinalNorm() << ".\n";
261  else
262  std::cout << "MINRES did not converge in " << solver.GetNumIterations()
263  << " iterations. Residual norm is " << solver.GetFinalNorm() << ".\n";
264  std::cout << "MINRES solver took " << chrono.RealTime() << "s. \n";
265  }
266 
267  // 13. Extract the parallel grid function corresponding to the finite element
268  // approximation X. This is the local solution on each processor. Compute
269  // L2 error norms.
272  u->MakeRef(R_space, x.GetBlock(0), 0);
273  p->MakeRef(W_space, x.GetBlock(1), 0);
274  u->Distribute(&(trueX.GetBlock(0)));
275  p->Distribute(&(trueX.GetBlock(1)));
276 
277  int order_quad = max(2, 2*order+1);
278  const IntegrationRule *irs[Geometry::NumGeom];
279  for (int i=0; i < Geometry::NumGeom; ++i)
280  {
281  irs[i] = &(IntRules.Get(i, order_quad));
282  }
283 
284  double err_u = u->ComputeL2Error(ucoeff, irs);
285  double norm_u = ComputeGlobalLpNorm(2, ucoeff, *pmesh, irs);
286  double err_p = p->ComputeL2Error(pcoeff, irs);
287  double norm_p = ComputeGlobalLpNorm(2, pcoeff, *pmesh, irs);
288 
289  if (verbose)
290  {
291  std::cout << "|| u_h - u_ex || / || u_ex || = " << err_u / norm_u << "\n";
292  std::cout << "|| p_h - p_ex || / || p_ex || = " << err_p / norm_p << "\n";
293  }
294 
295  // 14. Save the refined mesh and the solution in parallel. This output can be
296  // viewed later using GLVis: "glvis -np <np> -m mesh -g sol_*".
297  {
298  ostringstream mesh_name, u_name, p_name;
299  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
300  u_name << "sol_u." << setfill('0') << setw(6) << myid;
301  p_name << "sol_p." << setfill('0') << setw(6) << myid;
302 
303  ofstream mesh_ofs(mesh_name.str().c_str());
304  mesh_ofs.precision(8);
305  pmesh->Print(mesh_ofs);
306 
307  ofstream u_ofs(u_name.str().c_str());
308  u_ofs.precision(8);
309  u->Save(u_ofs);
310 
311  ofstream p_ofs(p_name.str().c_str());
312  p_ofs.precision(8);
313  p->Save(p_ofs);
314  }
315 
316  // 15. Save data in the VisIt format
317  VisItDataCollection visit_dc("Example5-Parallel", pmesh);
318  visit_dc.RegisterField("velocity", u);
319  visit_dc.RegisterField("pressure", p);
320  visit_dc.Save();
321 
322  // 16. Send the solution by socket to a GLVis server.
323  if (visualization)
324  {
325  char vishost[] = "localhost";
326  int visport = 19916;
327  socketstream u_sock(vishost, visport);
328  u_sock << "parallel " << num_procs << " " << myid << "\n";
329  u_sock.precision(8);
330  u_sock << "solution\n" << *pmesh << *u << "window_title 'Velocity'"
331  << endl;
332  // Make sure all ranks have sent their 'u' solution before initiating
333  // another set of GLVis connections (one from each rank):
334  MPI_Barrier(pmesh->GetComm());
335  socketstream p_sock(vishost, visport);
336  p_sock << "parallel " << num_procs << " " << myid << "\n";
337  p_sock.precision(8);
338  p_sock << "solution\n" << *pmesh << *p << "window_title 'Pressure'"
339  << endl;
340  }
341 
342  // 17. Free the used memory.
343  delete fform;
344  delete gform;
345  delete u;
346  delete p;
347  delete darcyOp;
348  delete darcyPr;
349  delete invM;
350  delete invS;
351  delete S;
352  delete Md;
353  delete MinvBt;
354  delete BT;
355  delete B;
356  delete M;
357  delete mVarf;
358  delete bVarf;
359  delete W_space;
360  delete R_space;
361  delete l2_coll;
362  delete hdiv_coll;
363  delete pmesh;
364 
365  MPI_Finalize();
366 
367  return 0;
368 }
369 
370 
371 void uFun_ex(const Vector & x, Vector & u)
372 {
373  double xi(x(0));
374  double yi(x(1));
375  double zi(0.0);
376  if (x.Size() == 3)
377  {
378  zi = x(2);
379  }
380 
381  u(0) = - exp(xi)*sin(yi)*cos(zi);
382  u(1) = - exp(xi)*cos(yi)*cos(zi);
383 
384  if (x.Size() == 3)
385  {
386  u(2) = exp(xi)*sin(yi)*sin(zi);
387  }
388 }
389 
390 // Change if needed
391 double pFun_ex(const Vector & x)
392 {
393  double xi(x(0));
394  double yi(x(1));
395  double zi(0.0);
396 
397  if (x.Size() == 3)
398  {
399  zi = x(2);
400  }
401 
402  return exp(xi)*sin(yi)*cos(zi);
403 }
404 
405 void fFun(const Vector & x, Vector & f)
406 {
407  f = 0.0;
408 }
409 
410 double gFun(const Vector & x)
411 {
412  if (x.Size() == 3)
413  {
414  return -pFun_ex(x);
415  }
416  else
417  {
418  return 0;
419  }
420 }
421 
422 double f_natural(const Vector & x)
423 {
424  return (-pFun_ex(x));
425 }
Class for domain integration L(v) := (f, v)
Definition: lininteg.hpp:47
int GetVSize() const
Definition: fespace.hpp:161
Class for integration rule.
Definition: intrules.hpp:83
int GetNumIterations() const
Definition: solvers.hpp:65
double ComputeL2Error(Coefficient *exsol[], const IntegrationRule *irs[]=NULL) const
Definition: pgridfunc.hpp:148
const IntegrationRule & Get(int GeomType, int Order)
Returns an integration rule for given GeomType and Order.
Definition: intrules.cpp:235
void fFun(const Vector &x, Vector &f)
Definition: ex5.cpp:334
virtual void RegisterField(const char *field_name, GridFunction *gf)
Add a grid function to the collection and update the root file.
Subclass constant coefficient.
Definition: coefficient.hpp:57
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:34
int Size() const
Returns the size of the vector.
Definition: vector.hpp:86
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:496
virtual void Save()
Save the collection and a VisIt root file.
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:312
double RealTime()
Definition: tic_toc.cpp:317
Abstract parallel finite element space.
Definition: pfespace.hpp:28
void SetBlock(int iRow, int iCol, Operator *op)
Add a block op in the block-entry (iblock, jblock).
bool iterative_mode
If true, use the second argument of Mult as an initial guess.
Definition: operator.hpp:106
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
MINRES method.
Definition: solvers.hpp:219
int main(int argc, char *argv[])
void Update(ParFiniteElementSpace *pf=NULL)
Definition: plinearform.cpp:21
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
double GetFinalNorm() const
Definition: solvers.hpp:67
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
The BoomerAMG solver in hypre.
Definition: hypre.hpp:705
virtual void Mult(const Vector &b, Vector &x) const
Operator application.
Definition: solvers.cpp:1023
Class for parallel linear form.
Definition: plinearform.hpp:26
HYPRE_Int GetGlobalNumRows() const
Definition: hypre.hpp:340
int dim
Definition: ex3.cpp:47
IntegrationRules IntRules(0)
A global object with all integration rules (defined in intrules.cpp)
Definition: intrules.hpp:292
void SetPrintLevel(int print_lvl)
Definition: solvers.cpp:71
virtual void SetPreconditioner(Solver &pr)
This should be called before SetOperator.
Definition: solvers.hpp:232
A class to handle Block diagonal preconditioners in a matrix-free implementation. ...
Jacobi preconditioner in hypre.
Definition: hypre.hpp:666
double f_natural(const Vector &x)
Definition: ex5.cpp:351
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:6402
Timing object.
Definition: tic_toc.hpp:34
Data collection with VisIt I/O routines.
void SetMaxIter(int max_it)
Definition: solvers.hpp:62
void Assemble(int skip_zeros=1)
void Assemble(int skip_zeros=1)
Assemble the local matrix.
double pFun_ex(const Vector &x)
Definition: ex5.cpp:320
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
Definition: hypre.cpp:865
virtual void SetOperator(const Operator &op)
Also calls SetOperator for the preconditioner if there is one.
Definition: solvers.cpp:1009
int GetConverged() const
Definition: solvers.hpp:66
void AddBoundaryIntegrator(LinearFormIntegrator *lfi)
Adds new Boundary Integrator.
Definition: linearform.cpp:24
int Dimension() const
Definition: mesh.hpp:523
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
Arbitrary order H(div)-conforming Raviart-Thomas finite elements.
Definition: fe_coll.hpp:179
Wrapper for hypre&#39;s parallel vector class.
Definition: hypre.hpp:58
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator.
Definition: linearform.cpp:19
void MakeRef(ParFiniteElementSpace *f, Vector &v, int v_offset)
Definition: pgridfunc.cpp:78
void SetAbsTol(double atol)
Definition: solvers.hpp:61
void SetRelTol(double rtol)
Definition: solvers.hpp:60
MPI_Comm GetComm() const
Definition: pmesh.hpp:96
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:74
void PartialSum()
Partial Sum.
Definition: array.cpp:121
double ComputeGlobalLpNorm(double p, Coefficient &coeff, ParMesh &pmesh, const IntegrationRule *irs[])
void Distribute(const Vector *tv)
Definition: pgridfunc.cpp:85
HypreParMatrix * Transpose()
Returns the transpose of *this.
Definition: hypre.cpp:929
void uFun_ex(const Vector &x, Vector &u)
Definition: ex5.cpp:300
double gFun(const Vector &x)
Definition: ex5.cpp:339
Class for parallel bilinear form.
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void InvScaleRows(const Vector &s)
Scale the local row i by 1./s(i)
Definition: hypre.cpp:1128
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator.
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
Class for parallel bilinear form.
Abstract class for hypre&#39;s solvers and preconditioners.
Definition: hypre.hpp:529
for VectorFiniteElements (Nedelec, Raviart-Thomas)
Definition: lininteg.hpp:171
class for C-function coefficient
Vector data type.
Definition: vector.hpp:33
HYPRE_Int * GetRowStarts() const
Definition: hypre.hpp:346
Class for parallel grid function.
Definition: pgridfunc.hpp:31
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:150
A class to handle Block systems in a matrix-free implementation.
Vector & GetBlock(int i)
Get the i-th vector in the block.
Class for parallel meshes.
Definition: pmesh.hpp:28
void ParallelAssemble(Vector &tv)
Assemble the vector on the true dofs, i.e. P^t v.
Definition: plinearform.cpp:34
Integrator for (Q u, v) for VectorFiniteElements.
Definition: bilininteg.hpp:465
HypreParMatrix * ParMult(HypreParMatrix *A, HypreParMatrix *B)
Returns the matrix A * B.
Definition: hypre.cpp:1355
void SetDiagonalBlock(int iblock, Operator *op)
Add a square block op in the block-entry (iblock, iblock).
Arbitrary order &quot;L2-conforming&quot; discontinuous finite elements.
Definition: fe_coll.hpp:130
virtual void Print(std::ostream &out=std::cout) const
Definition: pmesh.cpp:2927
bool Good() const
Definition: optparser.hpp:120