MFEM  v4.6.0
Finite element discretization library
ex1p.cpp
Go to the documentation of this file.
1 // MFEM Example 1 - Parallel Version
2 //
3 // Compile with: make ex1p
4 //
5 // Sample runs: mpirun -np 4 ex1p -m ../data/square-disc.mesh
6 // mpirun -np 4 ex1p -m ../data/star.mesh
7 // mpirun -np 4 ex1p -m ../data/star-mixed.mesh
8 // mpirun -np 4 ex1p -m ../data/escher.mesh
9 // mpirun -np 4 ex1p -m ../data/fichera.mesh
10 // mpirun -np 4 ex1p -m ../data/fichera-mixed.mesh
11 // mpirun -np 4 ex1p -m ../data/toroid-wedge.mesh
12 // mpirun -np 4 ex1p -m ../data/octahedron.mesh -o 1
13 // mpirun -np 4 ex1p -m ../data/periodic-annulus-sector.msh
14 // mpirun -np 4 ex1p -m ../data/periodic-torus-sector.msh
15 // mpirun -np 4 ex1p -m ../data/square-disc-p2.vtk -o 2
16 // mpirun -np 4 ex1p -m ../data/square-disc-p3.mesh -o 3
17 // mpirun -np 4 ex1p -m ../data/square-disc-nurbs.mesh -o -1
18 // mpirun -np 4 ex1p -m ../data/star-mixed-p2.mesh -o 2
19 // mpirun -np 4 ex1p -m ../data/disc-nurbs.mesh -o -1
20 // mpirun -np 4 ex1p -m ../data/pipe-nurbs.mesh -o -1
21 // mpirun -np 4 ex1p -m ../data/ball-nurbs.mesh -o 2
22 // mpirun -np 4 ex1p -m ../data/fichera-mixed-p2.mesh -o 2
23 // mpirun -np 4 ex1p -m ../data/star-surf.mesh
24 // mpirun -np 4 ex1p -m ../data/square-disc-surf.mesh
25 // mpirun -np 4 ex1p -m ../data/inline-segment.mesh
26 // mpirun -np 4 ex1p -m ../data/amr-quad.mesh
27 // mpirun -np 4 ex1p -m ../data/amr-hex.mesh
28 // mpirun -np 4 ex1p -m ../data/mobius-strip.mesh
29 // mpirun -np 4 ex1p -m ../data/mobius-strip.mesh -o -1 -sc
30 //
31 // Device sample runs:
32 // mpirun -np 4 ex1p -pa -d cuda
33 // mpirun -np 4 ex1p -fa -d cuda
34 // mpirun -np 4 ex1p -pa -d occa-cuda
35 // mpirun -np 4 ex1p -pa -d raja-omp
36 // mpirun -np 4 ex1p -pa -d ceed-cpu
37 // mpirun -np 4 ex1p -pa -d ceed-cpu -o 4 -a
38 // mpirun -np 4 ex1p -pa -d ceed-cpu -m ../data/square-mixed.mesh
39 // mpirun -np 4 ex1p -pa -d ceed-cpu -m ../data/fichera-mixed.mesh
40 // * mpirun -np 4 ex1p -pa -d ceed-cuda
41 // * mpirun -np 4 ex1p -pa -d ceed-hip
42 // mpirun -np 4 ex1p -pa -d ceed-cuda:/gpu/cuda/shared
43 // mpirun -np 4 ex1p -pa -d ceed-cuda:/gpu/cuda/shared -m ../data/square-mixed.mesh
44 // mpirun -np 4 ex1p -pa -d ceed-cuda:/gpu/cuda/shared -m ../data/fichera-mixed.mesh
45 // mpirun -np 4 ex1p -m ../data/beam-tet.mesh -pa -d ceed-cpu
46 //
47 // Description: This example code demonstrates the use of MFEM to define a
48 // simple finite element discretization of the Laplace problem
49 // -Delta u = 1 with homogeneous Dirichlet boundary conditions.
50 // Specifically, we discretize using a FE space of the specified
51 // order, or if order < 1 using an isoparametric/isogeometric
52 // space (i.e. quadratic for quadratic curvilinear mesh, NURBS for
53 // NURBS mesh, etc.)
54 //
55 // The example highlights the use of mesh refinement, finite
56 // element grid functions, as well as linear and bilinear forms
57 // corresponding to the left-hand side and right-hand side of the
58 // discrete linear system. We also cover the explicit elimination
59 // of essential boundary conditions, static condensation, and the
60 // optional connection to the GLVis tool for visualization.
61 
62 #include "mfem.hpp"
63 #include <fstream>
64 #include <iostream>
65 
66 using namespace std;
67 using namespace mfem;
68 
69 int main(int argc, char *argv[])
70 {
71  // 1. Initialize MPI and HYPRE.
72  Mpi::Init();
73  int num_procs = Mpi::WorldSize();
74  int myid = Mpi::WorldRank();
75  Hypre::Init();
76 
77  // 2. Parse command-line options.
78  const char *mesh_file = "../data/star.mesh";
79  int order = 1;
80  bool static_cond = false;
81  bool pa = false;
82  bool fa = false;
83  const char *device_config = "cpu";
84  bool visualization = true;
85  bool algebraic_ceed = false;
86 
87  OptionsParser args(argc, argv);
88  args.AddOption(&mesh_file, "-m", "--mesh",
89  "Mesh file to use.");
90  args.AddOption(&order, "-o", "--order",
91  "Finite element order (polynomial degree) or -1 for"
92  " isoparametric space.");
93  args.AddOption(&static_cond, "-sc", "--static-condensation", "-no-sc",
94  "--no-static-condensation", "Enable static condensation.");
95  args.AddOption(&pa, "-pa", "--partial-assembly", "-no-pa",
96  "--no-partial-assembly", "Enable Partial Assembly.");
97  args.AddOption(&fa, "-fa", "--full-assembly", "-no-fa",
98  "--no-full-assembly", "Enable Full Assembly.");
99  args.AddOption(&device_config, "-d", "--device",
100  "Device configuration string, see Device::Configure().");
101 #ifdef MFEM_USE_CEED
102  args.AddOption(&algebraic_ceed, "-a", "--algebraic",
103  "-no-a", "--no-algebraic",
104  "Use algebraic Ceed solver");
105 #endif
106  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
107  "--no-visualization",
108  "Enable or disable GLVis visualization.");
109  args.Parse();
110  if (!args.Good())
111  {
112  if (myid == 0)
113  {
114  args.PrintUsage(cout);
115  }
116  return 1;
117  }
118  if (myid == 0)
119  {
120  args.PrintOptions(cout);
121  }
122 
123  // 3. Enable hardware devices such as GPUs, and programming models such as
124  // CUDA, OCCA, RAJA and OpenMP based on command line options.
125  Device device(device_config);
126  if (myid == 0) { device.Print(); }
127 
128  // 4. Read the (serial) mesh from the given mesh file on all processors. We
129  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
130  // and volume meshes with the same code.
131  Mesh mesh(mesh_file, 1, 1);
132  int dim = mesh.Dimension();
133 
134  // 5. Refine the serial mesh on all processors to increase the resolution. In
135  // this example we do 'ref_levels' of uniform refinement. We choose
136  // 'ref_levels' to be the largest number that gives a final mesh with no
137  // more than 10,000 elements.
138  {
139  int ref_levels =
140  (int)floor(log(10000./mesh.GetNE())/log(2.)/dim);
141  for (int l = 0; l < ref_levels; l++)
142  {
143  mesh.UniformRefinement();
144  }
145  }
146 
147  // 6. Define a parallel mesh by a partitioning of the serial mesh. Refine
148  // this mesh further in parallel to increase the resolution. Once the
149  // parallel mesh is defined, the serial mesh can be deleted.
150  ParMesh pmesh(MPI_COMM_WORLD, mesh);
151  mesh.Clear();
152  {
153  int par_ref_levels = 2;
154  for (int l = 0; l < par_ref_levels; l++)
155  {
156  pmesh.UniformRefinement();
157  }
158  }
159 
160  // 7. Define a parallel finite element space on the parallel mesh. Here we
161  // use continuous Lagrange finite elements of the specified order. If
162  // order < 1, we instead use an isoparametric/isogeometric space.
164  bool delete_fec;
165  if (order > 0)
166  {
167  fec = new H1_FECollection(order, dim);
168  delete_fec = true;
169  }
170  else if (pmesh.GetNodes())
171  {
172  fec = pmesh.GetNodes()->OwnFEC();
173  delete_fec = false;
174  if (myid == 0)
175  {
176  cout << "Using isoparametric FEs: " << fec->Name() << endl;
177  }
178  }
179  else
180  {
181  fec = new H1_FECollection(order = 1, dim);
182  delete_fec = true;
183  }
184  ParFiniteElementSpace fespace(&pmesh, fec);
185  HYPRE_BigInt size = fespace.GlobalTrueVSize();
186  if (myid == 0)
187  {
188  cout << "Number of finite element unknowns: " << size << endl;
189  }
190 
191  // 8. Determine the list of true (i.e. parallel conforming) essential
192  // boundary dofs. In this example, the boundary conditions are defined
193  // by marking all the boundary attributes from the mesh as essential
194  // (Dirichlet) and converting them to a list of true dofs.
195  Array<int> ess_tdof_list;
196  if (pmesh.bdr_attributes.Size())
197  {
198  Array<int> ess_bdr(pmesh.bdr_attributes.Max());
199  ess_bdr = 1;
200  fespace.GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
201  }
202 
203  // 9. Set up the parallel linear form b(.) which corresponds to the
204  // right-hand side of the FEM linear system, which in this case is
205  // (1,phi_i) where phi_i are the basis functions in fespace.
206  ParLinearForm b(&fespace);
207  ConstantCoefficient one(1.0);
208  b.AddDomainIntegrator(new DomainLFIntegrator(one));
209  b.Assemble();
210 
211  // 10. Define the solution vector x as a parallel finite element grid
212  // function corresponding to fespace. Initialize x with initial guess of
213  // zero, which satisfies the boundary conditions.
214  ParGridFunction x(&fespace);
215  x = 0.0;
216 
217  // 11. Set up the parallel bilinear form a(.,.) on the finite element space
218  // corresponding to the Laplacian operator -Delta, by adding the
219  // Diffusion domain integrator.
220  ParBilinearForm a(&fespace);
221  if (pa) { a.SetAssemblyLevel(AssemblyLevel::PARTIAL); }
222  if (fa)
223  {
224  a.SetAssemblyLevel(AssemblyLevel::FULL);
225  // Sort the matrix column indices when running on GPU or with OpenMP (i.e.
226  // when Device::IsEnabled() returns true). This makes the results
227  // bit-for-bit deterministic at the cost of somewhat longer run time.
228  a.EnableSparseMatrixSorting(Device::IsEnabled());
229  }
230  a.AddDomainIntegrator(new DiffusionIntegrator(one));
231 
232  // 12. Assemble the parallel bilinear form and the corresponding linear
233  // system, applying any necessary transformations such as: parallel
234  // assembly, eliminating boundary conditions, applying conforming
235  // constraints for non-conforming AMR, static condensation, etc.
236  if (static_cond) { a.EnableStaticCondensation(); }
237  a.Assemble();
238 
239  OperatorPtr A;
240  Vector B, X;
241  a.FormLinearSystem(ess_tdof_list, x, b, A, X, B);
242 
243  // 13. Solve the linear system A X = B.
244  // * With full assembly, use the BoomerAMG preconditioner from hypre.
245  // * With partial assembly, use Jacobi smoothing, for now.
246  Solver *prec = NULL;
247  if (pa)
248  {
249  if (UsesTensorBasis(fespace))
250  {
251  if (algebraic_ceed)
252  {
253  prec = new ceed::AlgebraicSolver(a, ess_tdof_list);
254  }
255  else
256  {
257  prec = new OperatorJacobiSmoother(a, ess_tdof_list);
258  }
259  }
260  }
261  else
262  {
263  prec = new HypreBoomerAMG;
264  }
265  CGSolver cg(MPI_COMM_WORLD);
266  cg.SetRelTol(1e-12);
267  cg.SetMaxIter(2000);
268  cg.SetPrintLevel(1);
269  if (prec) { cg.SetPreconditioner(*prec); }
270  cg.SetOperator(*A);
271  cg.Mult(B, X);
272  delete prec;
273 
274  // 14. Recover the parallel grid function corresponding to X. This is the
275  // local finite element solution on each processor.
276  a.RecoverFEMSolution(X, b, x);
277 
278  // 15. Save the refined mesh and the solution in parallel. This output can
279  // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
280  {
281  ostringstream mesh_name, sol_name;
282  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
283  sol_name << "sol." << setfill('0') << setw(6) << myid;
284 
285  ofstream mesh_ofs(mesh_name.str().c_str());
286  mesh_ofs.precision(8);
287  pmesh.Print(mesh_ofs);
288 
289  ofstream sol_ofs(sol_name.str().c_str());
290  sol_ofs.precision(8);
291  x.Save(sol_ofs);
292  }
293 
294  // 16. Send the solution by socket to a GLVis server.
295  if (visualization)
296  {
297  char vishost[] = "localhost";
298  int visport = 19916;
299  socketstream sol_sock(vishost, visport);
300  sol_sock << "parallel " << num_procs << " " << myid << "\n";
301  sol_sock.precision(8);
302  sol_sock << "solution\n" << pmesh << x << flush;
303  }
304 
305  // 17. Free the used memory.
306  if (delete_fec)
307  {
308  delete fec;
309  }
310 
311  return 0;
312 }
Class for domain integration L(v) := (f, v)
Definition: lininteg.hpp:108
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
Definition: pfespace.cpp:1031
int visport
Conjugate gradient method.
Definition: solvers.hpp:493
A coefficient that is constant across space and time.
Definition: coefficient.hpp:84
void PrintOptions(std::ostream &out) const
Print the options.
Definition: optparser.cpp:331
int Dimension() const
Dimension of the reference space used within the elements.
Definition: mesh.hpp:1020
void PrintUsage(std::ostream &out) const
Print the usage message.
Definition: optparser.cpp:462
Pointer to an Operator of a specified type.
Definition: handle.hpp:33
virtual void Mult(const Vector &b, Vector &x) const
Operator application: y=A(x).
Definition: solvers.cpp:718
T Max() const
Find the maximal element in the array, using the comparison operator < for class T.
Definition: array.cpp:68
void Print(std::ostream &out=mfem::out)
Print the configuration of the MFEM virtual device object.
Definition: device.cpp:279
bool Good() const
Return true if the command line options were parsed successfully.
Definition: optparser.hpp:159
Abstract parallel finite element space.
Definition: pfespace.hpp:28
STL namespace.
bool UsesTensorBasis(const FiniteElementSpace &fes)
Return true if the mesh contains only one topology and the elements are tensor elements.
Definition: fespace.hpp:1306
The BoomerAMG solver in hypre.
Definition: hypre.hpp:1590
Class for parallel linear form.
Definition: plinearform.hpp:26
virtual void SetPrintLevel(int print_lvl)
Legacy method to set the level of verbosity of the solver output.
Definition: solvers.cpp:71
void Parse()
Parse the command-line options. Note that this function expects all the options provided through the ...
Definition: optparser.cpp:151
char vishost[]
Jacobi smoothing for a given bilinear form (no matrix necessary).
Definition: solvers.hpp:302
double b
Definition: lissajous.cpp:42
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:10232
void SetMaxIter(int max_it)
Definition: solvers.hpp:201
virtual const char * Name() const
Definition: fe_coll.hpp:80
HYPRE_BigInt GlobalTrueVSize() const
Definition: pfespace.hpp:281
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:275
void SetRelTol(double rtol)
Definition: solvers.hpp:199
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
Definition: fe_coll.hpp:26
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Add a boolean option and set &#39;var&#39; to receive the value. Enable/disable tags are used to set the bool...
Definition: optparser.hpp:82
HYPRE_Int HYPRE_BigInt
int main(int argc, char *argv[])
Definition: ex1p.cpp:69
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:909
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:1086
Wrapper for AlgebraicMultigrid object.
Definition: algebraic.hpp:185
double a
Definition: lissajous.cpp:41
int dim
Definition: ex24.cpp:53
Class for parallel bilinear form.
int Size() const
Return the logical size of the array.
Definition: array.hpp:141
void Clear()
Clear the contents of the Mesh.
Definition: mesh.hpp:678
virtual void SetOperator(const Operator &op)
Also calls SetOperator for the preconditioner if there is one.
Definition: solvers.hpp:507
Vector data type.
Definition: vector.hpp:58
virtual void SetPreconditioner(Solver &pr)
This should be called before SetOperator.
Definition: solvers.cpp:173
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:259
void GetNodes(Vector &node_coord) const
Definition: mesh.cpp:8302
void Print(std::ostream &out=mfem::out) const override
Definition: pmesh.cpp:4825
Base class for solvers.
Definition: operator.hpp:682
Class for parallel grid function.
Definition: pgridfunc.hpp:32
The MFEM Device class abstracts hardware devices such as GPUs, as well as programming models such as ...
Definition: device.hpp:121
Class for parallel meshes.
Definition: pmesh.hpp:32