MFEM  v4.1.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex6p.cpp
Go to the documentation of this file.
1 // MFEM Example 6 - Parallel Version
2 // PUMI Modification
3 //
4 // Compile with: make ex1p
5 //
6 // Sample runs: mpirun -np 8 ex6p
7 //
8 // Description: This is a version of Example 1 with a simple adaptive mesh
9 // refinement loop. The problem being solved is again the Laplace
10 // equation -Delta u = 1 with homogeneous Dirichlet boundary
11 // conditions. The problem is solved on a sequence of meshes which
12 // are adapted in a conforming (tetrahedrons) manner according
13 // to a simple SPR ZZ error estimator.
14 //
15 // This PUMI variation also performs a "uniform" refinement,
16 // similar to MFEM examples, for coarse meshes. However, the
17 // refinement is performed using the PUMI API. A new option "-ar"
18 // is added to modify the "adapt_ratio" which is the fraction of
19 // allowable error that scales the output size field of the error
20 // estimator.
21 
22 #include "mfem.hpp"
23 #include <fstream>
24 #include <iostream>
25 
26 #ifdef MFEM_USE_SIMMETRIX
27 #include <SimUtil.h>
28 #include <gmi_sim.h>
29 #endif
30 #include <apfMDS.h>
31 #include <gmi_null.h>
32 #include <PCU.h>
33 #include <spr.h>
34 #include <apfConvert.h>
35 #include <gmi_mesh.h>
36 #include <crv.h>
37 
38 using namespace std;
39 using namespace mfem;
40 
41 int main(int argc, char *argv[])
42 {
43  // 1. Initialize MPI.
44  int num_procs, myid;
45  MPI_Init(&argc, &argv);
46  MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
47  MPI_Comm_rank(MPI_COMM_WORLD, &myid);
48 
49  // 2. Parse command-line options.
50  const char *mesh_file = "../../data/pumi/parallel/Kova/Kova100k_8.smb";
51 #ifdef MFEM_USE_SIMMETRIX
52  const char *model_file = "../../data/pumi/geom/Kova.x_t";
53  const char *smd_file = NULL;
54 #else
55  const char *model_file = "../../data/pumi/geom/Kova.dmg";
56 #endif
57  int order = 1;
58  bool static_cond = false;
59  bool visualization = 1;
60  int geom_order = 1;
61  double adapt_ratio = 0.05;
62 
63  OptionsParser args(argc, argv);
64  args.AddOption(&mesh_file, "-m", "--mesh",
65  "Mesh file to use.");
66  args.AddOption(&order, "-o", "--order",
67  "Finite element order (polynomial degree) or -1 for"
68  " isoparametric space.");
69  args.AddOption(&static_cond, "-sc", "--static-condensation", "-no-sc",
70  "--no-static-condensation", "Enable static condensation.");
71  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
72  "--no-visualization",
73  "Enable or disable GLVis visualization.");
74  args.AddOption(&model_file, "-p", "--model",
75  "parasolid or .dmg model to use.");
76 #ifdef MFEM_USE_SIMMETRIX
77  args.AddOption(&smd_file, "-sm", "--smd_model",
78  "smd model file to use.");
79 #endif
80  args.AddOption(&geom_order, "-go", "--geometry_order",
81  "Geometric order of the model");
82  args.AddOption(&adapt_ratio, "-ar", "--adapt_ratio",
83  "adaptation factor used in MeshAdapt");
84  args.Parse();
85  if (!args.Good())
86  {
87  if (myid == 0)
88  {
89  args.PrintUsage(cout);
90  }
91  MPI_Finalize();
92  return 1;
93  }
94  if (myid == 0)
95  {
96  args.PrintOptions(cout);
97  }
98 
99  // 3. Read the SCOREC Mesh.
100  PCU_Comm_Init();
101 #ifdef MFEM_USE_SIMMETRIX
102  Sim_readLicenseFile(0);
103  gmi_sim_start();
104  gmi_register_sim();
105 #endif
106  gmi_register_mesh();
107 
108  apf::Mesh2* pumi_mesh;
109 #ifdef MFEM_USE_SIMMETRIX
110  if (smd_file)
111  {
112  gmi_model *mixed_model = gmi_sim_load(model_file, smd_file);
113  pumi_mesh = apf::loadMdsMesh(mixed_model, mesh_file);
114  }
115  else
116 #endif
117  {
118  pumi_mesh = apf::loadMdsMesh(model_file, mesh_file);
119  }
120 
121  // 4. Increase the geometry order and refine the mesh if necessary. Parallel
122  // uniform refinement is performed if the total number of elements is less
123  // than 100,000.
124  int dim = pumi_mesh->getDimension();
125  int nEle = pumi_mesh->count(dim);
126  int ref_levels = (int)floor(log(100000./nEle)/log(2.)/dim);
127 
128  if (geom_order > 1)
129  {
130  crv::BezierCurver bc(pumi_mesh, geom_order, 2);
131  bc.run();
132  }
133 
134  // Perform Uniform refinement
135  if (myid == 1)
136  {
137  std::cout << " ref level : " << ref_levels << std::endl;
138  }
139 
140  if (ref_levels > 1)
141  {
142  ma::Input* uniInput = ma::configureUniformRefine(pumi_mesh, ref_levels);
143 
144  if ( geom_order > 1)
145  {
146  crv::adapt(uniInput);
147  }
148  else
149  {
150  ma::adapt(uniInput);
151  }
152  }
153 
154  pumi_mesh->verify();
155 
156  // 5. Create the parallel MFEM mesh object from the parallel PUMI mesh. We
157  // can handle triangular and tetrahedral meshes. Note that the mesh
158  // resolution is performed on the PUMI mesh.
159  ParMesh *pmesh = new ParPumiMesh(MPI_COMM_WORLD, pumi_mesh);
160 
161  // 6. Define a parallel finite element space on the parallel mesh. Here we
162  // use continuous Lagrange finite elements of the specified order. If
163  // order < 1, we instead use an isoparametric/isogeometric space.
165  if (order > 0)
166  {
167  fec = new H1_FECollection(order, dim);
168  }
169  else if (pmesh->GetNodes())
170  {
171  fec = pmesh->GetNodes()->OwnFEC();
172  if (myid == 1)
173  {
174  cout << "Using isoparametric FEs: " << fec->Name() << endl;
175  }
176  }
177  else
178  {
179  fec = new H1_FECollection(order = 1, dim);
180  }
181  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
182  HYPRE_Int size = fespace->GlobalTrueVSize();
183  if (myid == 1)
184  {
185  cout << "Number of finite element unknowns: " << size << endl;
186  }
187 
188  // 7. Set up the parallel linear form b(.) which corresponds to the
189  // right-hand side of the FEM linear system, which in this case is
190  // (1,phi_i) where phi_i are the basis functions in fespace.
191  ParLinearForm *b = new ParLinearForm(fespace);
192  ConstantCoefficient one(1.0);
194 
195  // 8. Define the solution vector x as a parallel finite element grid function
196  // corresponding to fespace. Initialize x with initial guess of zero,
197  // which satisfies the boundary conditions.
198  ParGridFunction x(fespace);
199  x = 0.0;
200 
201  // 9. Connect to GLVis.
202  char vishost[] = "localhost";
203  int visport = 19916;
204 
205  socketstream sout;
206  if (visualization)
207  {
208  sout.open(vishost, visport);
209  if (!sout)
210  {
211  if (myid == 0)
212  {
213  cout << "Unable to connect to GLVis server at "
214  << vishost << ':' << visport << endl;
215  cout << "GLVis visualization disabled.\n";
216  }
217  visualization = false;
218  }
219 
220  sout.precision(8);
221  }
222 
223  // 10. Set up the parallel bilinear form a(.,.) on the finite element space
224  // corresponding to the Laplacian operator -Delta, by adding the
225  // Diffusion domain integrator.
226  ParBilinearForm *a = new ParBilinearForm(fespace);
228 
229  // 11. Assemble the parallel bilinear form and the corresponding linear
230  // system, applying any necessary transformations such as: parallel
231  // assembly, eliminating boundary conditions, applying conforming
232  // constraints for non-conforming AMR, static condensation, etc.
233  if (static_cond) { a->EnableStaticCondensation(); }
234 
235  // 12. The main AMR loop. In each iteration we solve the problem on the
236  // current mesh, visualize the solution, and adapt the mesh.
237  apf::Field* Tmag_field = 0;
238  apf::Field* temp_field = 0;
239  apf::Field* ipfield = 0;
240  apf::Field* sizefield = 0;
241  int max_iter = 3;
242 
243  for (int Itr = 0; Itr < max_iter; Itr++)
244  {
245  HYPRE_Int global_dofs = fespace->GlobalTrueVSize();
246  if (myid == 1)
247  {
248  cout << "\nAMR iteration " << Itr << endl;
249  cout << "Number of unknowns: " << global_dofs << endl;
250  }
251 
252  // Assemble.
253  a->Assemble();
254  b->Assemble();
255 
256  // Essential boundary condition.
257  Array<int> ess_tdof_list;
258  if (pmesh->bdr_attributes.Size())
259  {
260  Array<int> ess_bdr(pmesh->bdr_attributes.Max());
261  ess_bdr = 1;
262  fespace->GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
263  }
264 
265  // Form linear system.
266  HypreParMatrix A;
267  Vector B, X;
268  const int copy_interior = 1;
269  a->FormLinearSystem(ess_tdof_list, x, *b, A, X, B, copy_interior);
270 
271  // 13. Define and apply a parallel PCG solver for AX=B with the BoomerAMG
272  // preconditioner from hypre.
273  HypreBoomerAMG amg;
274  amg.SetPrintLevel(0);
275  CGSolver pcg(A.GetComm());
276  pcg.SetPreconditioner(amg);
277  pcg.SetOperator(A);
278  pcg.SetRelTol(1e-6);
279  pcg.SetMaxIter(200);
280  pcg.SetPrintLevel(3); // print the first and the last iterations only
281  pcg.Mult(B, X);
282 
283  // 14. Recover the parallel grid function corresponding to X. This is the
284  // local finite element solution on each processor.
285  a->RecoverFEMSolution(X, *b, x);
286 
287  // 15. Save in parallel the displaced mesh and the inverted solution (which
288  // gives the backward displacements to the original grid). This output
289  // can be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
290  {
291  ostringstream mesh_name, sol_name;
292  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
293  sol_name << "sol." << setfill('0') << setw(6) << myid;
294 
295  ofstream mesh_ofs(mesh_name.str().c_str());
296  mesh_ofs.precision(8);
297  pmesh->Print(mesh_ofs);
298 
299  ofstream sol_ofs(sol_name.str().c_str());
300  sol_ofs.precision(8);
301  x.Save(sol_ofs);
302  }
303 
304  // 16. Send the above data by socket to a GLVis server. Use the "n" and "b"
305  // keys in GLVis to visualize the displacements.
306  if (visualization)
307  {
308  sout << "parallel " << num_procs << " " << myid << "\n";
309  sout << "solution\n" << *pmesh << x << flush;
310  }
311 
312  // 17. Field transfer. Scalar solution field and magnitude field for error
313  // estimation are created the PUMI mesh.
314  if (order > geom_order)
315  {
316  Tmag_field = apf::createField(pumi_mesh, "field_mag",
317  apf::SCALAR, apf::getLagrange(order));
318  temp_field = apf::createField(pumi_mesh, "T_field",
319  apf::SCALAR, apf::getLagrange(order));
320  }
321  else
322  {
323  Tmag_field = apf::createFieldOn(pumi_mesh, "field_mag",apf::SCALAR);
324  temp_field = apf::createFieldOn(pumi_mesh, "T_field", apf::SCALAR);
325  }
326 
327  ParPumiMesh* pPPmesh = dynamic_cast<ParPumiMesh*>(pmesh);
328  pPPmesh->FieldMFEMtoPUMI(pumi_mesh, &x, temp_field, Tmag_field);
329 
330  ipfield= spr::getGradIPField(Tmag_field, "MFEM_gradip", 2);
331  sizefield = spr::getSPRSizeField(ipfield, adapt_ratio);
332 
333  apf::destroyField(Tmag_field);
334  apf::destroyField(ipfield);
335  apf::destroyNumbering(pumi_mesh->findNumbering("LocalVertexNumbering"));
336 
337  // 18. Perform MesAdapt.
338  ma::Input* erinput = ma::configure(pumi_mesh, sizefield);
339  erinput->shouldFixShape = true;
340  erinput->maximumIterations = 2;
341  if ( geom_order > 1)
342  {
343  crv::adapt(erinput);
344  }
345  else
346  {
347  ma::adapt(erinput);
348  }
349 
350  ParMesh* Adapmesh = new ParPumiMesh(MPI_COMM_WORLD, pumi_mesh);
351  pPPmesh->UpdateMesh(Adapmesh);
352  delete Adapmesh;
353 
354  // 19. Update the FiniteElementSpace, GridFunction, and bilinear form.
355  fespace->Update();
356  x.Update();
357  x = 0.0;
358 
359  pPPmesh->FieldPUMItoMFEM(pumi_mesh, temp_field, &x);
360  a->Update();
361  b->Update();
362 
363  // Destroy fields.
364  apf::destroyField(temp_field);
365  apf::destroyField(sizefield);
366  }
367 
368  // 20. Free the used memory.
369  delete a;
370  delete b;
371  delete fespace;
372  if (order > 0) { delete fec; }
373  delete pmesh;
374 
375  pumi_mesh->destroyNative();
376  apf::destroyMesh(pumi_mesh);
377  PCU_Comm_Free();
378 
379 #ifdef MFEM_USE_SIMMETRIX
380  gmi_sim_stop();
381  Sim_unregisterAllKeys();
382 #endif
383 
384  MPI_Finalize();
385 
386  return 0;
387 }
void FieldPUMItoMFEM(apf::Mesh2 *apf_mesh, apf::Field *ScalarField, ParGridFunction *Pr)
Transfer a field from PUMI to MFEM after mesh adapt [Scalar].
Definition: pumi.cpp:1555
int Size() const
Logical size of the array.
Definition: array.hpp:124
void UpdateMesh(const ParMesh *AdaptedpMesh)
Update the mesh after adaptation.
Definition: pumi.cpp:766
Class for domain integration L(v) := (f, v)
Definition: lininteg.hpp:93
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
Definition: pfespace.cpp:770
Conjugate gradient method.
Definition: solvers.hpp:152
MPI_Comm GetComm() const
MPI communicator.
Definition: hypre.hpp:336
Subclass constant coefficient.
Definition: coefficient.hpp:67
virtual void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B, int copy_interior=0)
Form the linear system A X = B, corresponding to this bilinear form and the linear form b(...
virtual void Update(bool want_transform=true)
Definition: pfespace.cpp:2868
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:79
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:485
Abstract parallel finite element space.
Definition: pfespace.hpp:28
int main(int argc, char *argv[])
Definition: ex1.cpp:62
void Update(ParFiniteElementSpace *pf=NULL)
Update the object according to the given new FE space *pf.
Definition: plinearform.cpp:21
virtual void Update(FiniteElementSpace *nfes=NULL)
Class for PUMI parallel meshes.
Definition: pumi.hpp:72
The BoomerAMG solver in hypre.
Definition: hypre.hpp:951
Class for parallel linear form.
Definition: plinearform.hpp:26
virtual void Update()
Transform by the Space UpdateMatrix (e.g., on Mesh change).
Definition: pgridfunc.cpp:81
double b
Definition: lissajous.cpp:42
void SetPrintLevel(int print_level)
Definition: hypre.hpp:992
T Max() const
Find the maximal element in the array, using the comparison operator &lt; for class T.
Definition: array.cpp:68
HYPRE_Int GlobalTrueVSize() const
Definition: pfespace.hpp:251
void Assemble(int skip_zeros=1)
Assemble the local matrix.
virtual void Print(std::ostream &out=mfem::out) const
Definition: pmesh.cpp:4102
void FieldMFEMtoPUMI(apf::Mesh2 *apf_mesh, ParGridFunction *Vel, ParGridFunction *Pr, apf::Field *VelField, apf::Field *PrField, apf::Field *VelMagField)
Transfer field from MFEM mesh to PUMI mesh [Mixed].
Definition: pumi.cpp:928
void PrintUsage(std::ostream &out) const
Definition: optparser.cpp:434
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator. Assumes ownership of lfi.
Definition: linearform.cpp:39
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:189
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Definition: optparser.hpp:76
virtual void RecoverFEMSolution(const Vector &X, const Vector &b, Vector &x)
double a
Definition: lissajous.cpp:41
virtual const char * Name() const
Definition: fe_coll.hpp:53
int dim
Definition: ex24.cpp:43
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator. Assumes ownership of bfi.
void PrintOptions(std::ostream &out) const
Definition: optparser.cpp:304
Class for parallel bilinear form.
int open(const char hostname[], int port)
Vector data type.
Definition: vector.hpp:48
void GetNodes(Vector &node_coord) const
Definition: mesh.cpp:6203
virtual void SetPreconditioner(Solver &pr)
This should be called before SetOperator.
Definition: solvers.cpp:91
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:83
Class for parallel grid function.
Definition: pgridfunc.hpp:32
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:187
Class for parallel meshes.
Definition: pmesh.hpp:32
void EnableStaticCondensation()
bool Good() const
Definition: optparser.hpp:125