MFEM v4.8.0
Finite element discretization library
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ex5p.cpp
Go to the documentation of this file.
1// MFEM Example 5 - Parallel Version
2// PETSc Modification
3//
4// Compile with: make ex5p
5//
6// Sample runs:
7// mpirun -np 4 ex5p -m ../../data/beam-tet.mesh --petscopts rc_ex5p_fieldsplit
8// mpirun -np 4 ex5p -m ../../data/star.mesh --petscopts rc_ex5p_bddc --nonoverlapping
9//
10// Description: This example code solves a simple 2D/3D mixed Darcy problem
11// corresponding to the saddle point system
12// k*u + grad p = f
13// - div u = g
14// with natural boundary condition -p = <given pressure>.
15// Here, we use a given exact solution (u,p) and compute the
16// corresponding r.h.s. (f,g). We discretize with Raviart-Thomas
17// finite elements (velocity u) and piecewise discontinuous
18// polynomials (pressure p).
19//
20// The example demonstrates the use of the BlockOperator class, as
21// well as the collective saving of several grid functions in a
22// VisIt (visit.llnl.gov) visualization format.
23//
24// Two types of PETSc solvers can be used: BDDC or fieldsplit.
25// When using BDDC, the nonoverlapping assembly feature should be
26// used. This specific example needs PETSc compiled with support
27// for SuiteSparse and/or MUMPS for using BDDC.
28//
29// We recommend viewing examples 1-4 before viewing this example.
30
31#include "mfem.hpp"
32#include <fstream>
33#include <iostream>
34
35#ifndef MFEM_USE_PETSC
36#error This example requires that MFEM is built with MFEM_USE_PETSC=YES
37#endif
38
39using namespace std;
40using namespace mfem;
41
42// Define the analytical solution and forcing terms / boundary conditions
43void uFun_ex(const Vector & x, Vector & u);
44real_t pFun_ex(const Vector & x);
45void fFun(const Vector & x, Vector & f);
46real_t gFun(const Vector & x);
47real_t f_natural(const Vector & x);
48
49int main(int argc, char *argv[])
50{
51 StopWatch chrono;
52
53 // 1. Initialize MPI and HYPRE.
54 Mpi::Init(argc, argv);
55 int num_procs = Mpi::WorldSize();
56 int myid = Mpi::WorldRank();
58 bool verbose = (myid == 0);
59
60 // 2. Parse command-line options.
61 const char *mesh_file = "../../data/star.mesh";
62 int ser_ref_levels = -1;
63 int par_ref_levels = 2;
64 int order = 1;
65 bool par_format = false;
66 bool visualization = 1;
67 bool use_petsc = true;
68 bool use_nonoverlapping = false;
69 bool local_bdr_spec = false;
70 const char *petscrc_file = "";
71 const char *device_config = "cpu";
72
73 OptionsParser args(argc, argv);
74 args.AddOption(&mesh_file, "-m", "--mesh",
75 "Mesh file to use.");
76 args.AddOption(&ser_ref_levels, "-rs", "--refine-serial",
77 "Number of times to refine the mesh uniformly in serial.");
78 args.AddOption(&par_ref_levels, "-rp", "--refine-parallel",
79 "Number of times to refine the mesh uniformly in parallel.");
80 args.AddOption(&order, "-o", "--order",
81 "Finite element order (polynomial degree).");
82 args.AddOption(&par_format, "-pf", "--parallel-format", "-sf",
83 "--serial-format",
84 "Format to use when saving the results for VisIt.");
85 args.AddOption(&device_config, "-d", "--device",
86 "Device configuration string, see Device::Configure().");
87 args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
88 "--no-visualization",
89 "Enable or disable GLVis visualization.");
90 args.AddOption(&use_petsc, "-usepetsc", "--usepetsc", "-no-petsc",
91 "--no-petsc",
92 "Use or not PETSc to solve the linear system.");
93 args.AddOption(&petscrc_file, "-petscopts", "--petscopts",
94 "PetscOptions file to use.");
95 args.AddOption(&use_nonoverlapping, "-nonoverlapping", "--nonoverlapping",
96 "-no-nonoverlapping", "--no-nonoverlapping",
97 "Use or not the block diagonal PETSc's matrix format "
98 "for non-overlapping domain decomposition.");
99 args.AddOption(&local_bdr_spec, "-local-bdr", "--local-bdr", "-no-local-bdr",
100 "--no-local-bdr",
101 "Specify boundary dofs in local (Vdofs) ordering.");
102 args.Parse();
103 if (!args.Good())
104 {
105 if (verbose)
106 {
107 args.PrintUsage(cout);
108 }
109 return 1;
110 }
111 if (verbose)
112 {
113 args.PrintOptions(cout);
114 }
115
116 // 2b. Enable hardware devices such as GPUs, and programming models such as
117 // CUDA, OCCA, RAJA and OpenMP based on command line options.
118 Device device(device_config);
119 if (myid == 0) { device.Print(); }
120
121 // 2c. We initialize PETSc
122 if (use_petsc) { MFEMInitializePetsc(NULL,NULL,petscrc_file,NULL); }
123
124 // 3. Read the (serial) mesh from the given mesh file on all processors. We
125 // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
126 // and volume meshes with the same code.
127 Mesh *mesh = new Mesh(mesh_file, 1, 1);
128 int dim = mesh->Dimension();
129
130 // 4. Refine the serial mesh on all processors to increase the resolution. In
131 // this example we do 'ref_levels' of uniform refinement. We choose
132 // 'ref_levels' to be the largest number that gives a final mesh with no
133 // more than 10,000 elements.
134 {
135 if (ser_ref_levels < 0)
136 {
137 ser_ref_levels = (int)floor(log(10000./mesh->GetNE())/log(2.)/dim);
138 }
139 for (int l = 0; l < ser_ref_levels; l++)
140 {
141 mesh->UniformRefinement();
142 }
143 }
144
145 // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
146 // this mesh further in parallel to increase the resolution. Once the
147 // parallel mesh is defined, the serial mesh can be deleted.
148 ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
149 delete mesh;
150 {
151 for (int l = 0; l < par_ref_levels; l++)
152 {
153 pmesh->UniformRefinement();
154 }
155 }
156
157 // 6. Define a parallel finite element space on the parallel mesh. Here we
158 // use the Raviart-Thomas finite elements of the specified order.
159 FiniteElementCollection *hdiv_coll(new RT_FECollection(order, dim));
160 FiniteElementCollection *l2_coll(new L2_FECollection(order, dim));
161
162 ParFiniteElementSpace *R_space = new ParFiniteElementSpace(pmesh, hdiv_coll);
163 ParFiniteElementSpace *W_space = new ParFiniteElementSpace(pmesh, l2_coll);
164
165 HYPRE_BigInt dimR = R_space->GlobalTrueVSize();
166 HYPRE_BigInt dimW = W_space->GlobalTrueVSize();
167
168 if (verbose)
169 {
170 std::cout << "***********************************************************\n";
171 std::cout << "dim(R) = " << dimR << "\n";
172 std::cout << "dim(W) = " << dimW << "\n";
173 std::cout << "dim(R+W) = " << dimR + dimW << "\n";
174 std::cout << "***********************************************************\n";
175 }
176
177 // 7. Define the two BlockStructure of the problem. block_offsets is used
178 // for Vector based on dof (like ParGridFunction or ParLinearForm),
179 // block_trueOffstes is used for Vector based on trueDof (HypreParVector
180 // for the rhs and solution of the linear system). The offsets computed
181 // here are local to the processor.
182 Array<int> block_offsets(3); // number of variables + 1
183 block_offsets[0] = 0;
184 block_offsets[1] = R_space->GetVSize();
185 block_offsets[2] = W_space->GetVSize();
186 block_offsets.PartialSum();
187
188 Array<int> block_trueOffsets(3); // number of variables + 1
189 block_trueOffsets[0] = 0;
190 block_trueOffsets[1] = R_space->TrueVSize();
191 block_trueOffsets[2] = W_space->TrueVSize();
192 block_trueOffsets.PartialSum();
193
194 // 8. Define the coefficients, analytical solution, and rhs of the PDE.
195 ConstantCoefficient k(1.0);
196
200
203
204 // 9. Define the parallel grid function and parallel linear forms, solution
205 // vector and rhs.
206 MemoryType mt = device.GetMemoryType();
207 BlockVector x(block_offsets, mt), rhs(block_offsets, mt);
208 BlockVector trueX(block_trueOffsets, mt), trueRhs(block_trueOffsets, mt);
209
210 ParLinearForm *fform(new ParLinearForm);
211 fform->Update(R_space, rhs.GetBlock(0), 0);
214 fform->Assemble();
215 fform->SyncAliasMemory(rhs);
216 fform->ParallelAssemble(trueRhs.GetBlock(0));
217 trueRhs.GetBlock(0).SyncAliasMemory(trueRhs);
218
219 ParLinearForm *gform(new ParLinearForm);
220 gform->Update(W_space, rhs.GetBlock(1), 0);
221 gform->AddDomainIntegrator(new DomainLFIntegrator(gcoeff));
222 gform->Assemble();
223 gform->SyncAliasMemory(rhs);
224 gform->ParallelAssemble(trueRhs.GetBlock(1));
225 trueRhs.GetBlock(1).SyncAliasMemory(trueRhs);
226
227 // 10. Assemble the finite element matrices for the Darcy operator
228 //
229 // D = [ M B^T ]
230 // [ B 0 ]
231 // where:
232 //
233 // M = \int_\Omega k u_h \cdot v_h d\Omega u_h, v_h \in R_h
234 // B = -\int_\Omega \div u_h q_h d\Omega u_h \in R_h, q_h \in W_h
235 ParBilinearForm *mVarf(new ParBilinearForm(R_space));
236 ParMixedBilinearForm *bVarf(new ParMixedBilinearForm(R_space, W_space));
237
238 PetscParMatrix *pM = NULL, *pB = NULL, *pBT = NULL;
239 HypreParMatrix *M = NULL, *B = NULL, *BT = NULL;
240 Operator::Type tid =
241 !use_petsc ? Operator::Hypre_ParCSR :
242 (use_nonoverlapping ? Operator::PETSC_MATIS : Operator::PETSC_MATAIJ);
243 OperatorHandle Mh(tid), Bh(tid);
244
246 mVarf->Assemble();
247 mVarf->Finalize();
248 mVarf->ParallelAssemble(Mh);
249 if (!use_petsc) { Mh.Get(M); }
250 else { Mh.Get(pM); }
251 Mh.SetOperatorOwner(false);
252
254 bVarf->Assemble();
255 bVarf->Finalize();
256 if (!use_petsc)
257 {
258 B = bVarf->ParallelAssemble();
259 (*B) *= -1;
260 }
261 else
262 {
263 bVarf->ParallelAssemble(Bh);
264 Bh.Get(pB);
265 Bh.SetOperatorOwner(false);
266 (*pB) *= -1;
267 }
268
269 if (!use_petsc) { BT = B->Transpose(); }
270 else { pBT = pB->Transpose(); };
271
272 Operator *darcyOp = NULL;
273 if (!use_petsc)
274 {
275 BlockOperator *tdarcyOp = new BlockOperator(block_trueOffsets);
276 tdarcyOp->SetBlock(0,0,M);
277 tdarcyOp->SetBlock(0,1,BT);
278 tdarcyOp->SetBlock(1,0,B);
279 darcyOp = tdarcyOp;
280 }
281 else
282 {
283 // We construct the BlockOperator and we then convert it to a
284 // PetscParMatrix to avoid any conversion in the construction of the
285 // preconditioners.
286 BlockOperator *tdarcyOp = new BlockOperator(block_trueOffsets);
287 tdarcyOp->SetBlock(0,0,pM);
288 tdarcyOp->SetBlock(0,1,pBT);
289 tdarcyOp->SetBlock(1,0,pB);
290 darcyOp = new PetscParMatrix(pM->GetComm(),tdarcyOp,
291 use_nonoverlapping ? Operator::PETSC_MATIS :
293 delete tdarcyOp;
294 }
295
296 // 11. Construct the operators for preconditioner
297 //
298 // P = [ diag(M) 0 ]
299 // [ 0 B diag(M)^-1 B^T ]
300 //
301 // Here we use Symmetric Gauss-Seidel to approximate the inverse of the
302 // pressure Schur Complement.
303 PetscPreconditioner *pdarcyPr = NULL;
304 BlockDiagonalPreconditioner *darcyPr = NULL;
305 HypreSolver *invM = NULL, *invS = NULL;
306 HypreParMatrix *S = NULL;
307 HypreParMatrix *MinvBt = NULL;
308 HypreParVector *Md = NULL;
309 if (!use_petsc)
310 {
311 MinvBt = B->Transpose();
312 Md = new HypreParVector(MPI_COMM_WORLD, M->GetGlobalNumRows(),
313 M->GetRowStarts());
314 M->GetDiag(*Md);
315
316 MinvBt->InvScaleRows(*Md);
317 S = ParMult(B, MinvBt);
318
319 invM = new HypreDiagScale(*M);
320 invS = new HypreBoomerAMG(*S);
321
322 invM->iterative_mode = false;
323 invS->iterative_mode = false;
324
325 darcyPr = new BlockDiagonalPreconditioner(block_trueOffsets);
326 darcyPr->SetDiagonalBlock(0, invM);
327 darcyPr->SetDiagonalBlock(1, invS);
328 }
329 else
330 {
331 if (use_nonoverlapping)
332 {
334
335 // For saddle point problems, we need to provide BDDC the list of
336 // boundary dofs either essential or natural.
337 // Since R_space is the only space that may have boundary dofs and it
338 // is ordered first then W_space, we don't need any local offset when
339 // specifying the dofs.
340 Array<int> bdr_tdof_list;
341 if (pmesh->bdr_attributes.Size())
342 {
343 Array<int> bdr(pmesh->bdr_attributes.Max());
344 bdr = 1;
345
346 if (!local_bdr_spec)
347 {
348 // Essential dofs in global ordering
349 R_space->GetEssentialTrueDofs(bdr, bdr_tdof_list);
350 }
351 else
352 {
353 // Alternatively, you can also provide the list of dofs in local
354 // ordering
355 R_space->GetEssentialVDofs(bdr, bdr_tdof_list);
356 bdr_tdof_list.SetSize(R_space->GetVSize()+W_space->GetVSize(),0);
357 }
358 opts.SetNatBdrDofs(&bdr_tdof_list,local_bdr_spec);
359 }
360 else
361 {
362 MFEM_WARNING("Missing boundary dofs. This may cause solver failures.");
363 }
364
365 // See also command line options rc_ex5p_bddc
366 pdarcyPr = new PetscBDDCSolver(MPI_COMM_WORLD,*darcyOp,opts,"prec_");
367 }
368 else
369 {
370 // With PETSc, we can construct the (same) block-diagonal solver with
371 // command line options (see rc_ex5p_fieldsplit)
372 pdarcyPr = new PetscFieldSplitSolver(MPI_COMM_WORLD,*darcyOp,"prec_");
373 }
374 }
375
376 // 12. Solve the linear system with MINRES.
377 // Check the norm of the unpreconditioned residual.
378
379 int maxIter(500);
380 real_t rtol(1.e-6);
381 real_t atol(1.e-10);
382
383 chrono.Clear();
384 chrono.Start();
385
386 trueX = 0.0;
387 if (!use_petsc)
388 {
389 MINRESSolver solver(MPI_COMM_WORLD);
390 solver.SetAbsTol(atol);
391 solver.SetRelTol(rtol);
392 solver.SetMaxIter(maxIter);
393 solver.SetOperator(*darcyOp);
394 solver.SetPreconditioner(*darcyPr);
395 solver.SetPrintLevel(1);
396 solver.Mult(trueRhs, trueX);
397 if (verbose)
398 {
399 if (solver.GetConverged())
400 {
401 std::cout << "MINRES converged in " << solver.GetNumIterations()
402 << " iterations with a residual norm of "
403 << solver.GetFinalNorm() << ".\n";
404 }
405 else
406 {
407 std::cout << "MINRES did not converge in "
408 << solver.GetNumIterations()
409 << " iterations. Residual norm is "
410 << solver.GetFinalNorm() << ".\n";
411 }
412 std::cout << "MINRES solver took " << chrono.RealTime() << "s. \n";
413 }
414
415 }
416 else
417 {
418 std::string solvertype;
419 PetscLinearSolver *solver;
420 if (use_nonoverlapping)
421 {
422 // We can use conjugate gradients to solve the problem
423 solver = new PetscPCGSolver(MPI_COMM_WORLD);
424 solvertype = "PCG";
425 }
426 else
427 {
428 solver = new PetscLinearSolver(MPI_COMM_WORLD);
429 solvertype = "MINRES";
430 }
431 solver->SetOperator(*darcyOp);
432 solver->SetPreconditioner(*pdarcyPr);
433 solver->SetAbsTol(atol);
434 solver->SetRelTol(rtol);
435 solver->SetMaxIter(maxIter);
436 solver->SetPrintLevel(2);
437 solver->Mult(trueRhs, trueX);
438 if (verbose)
439 {
440 if (solver->GetConverged())
441 {
442 std::cout << solvertype << " converged in "
443 << solver->GetNumIterations()
444 << " iterations with a residual norm of "
445 << solver->GetFinalNorm() << ".\n";
446 }
447 else
448 {
449 std::cout << solvertype << " did not converge in "
450 << solver->GetNumIterations()
451 << " iterations. Residual norm is "
452 << solver->GetFinalNorm() << ".\n";
453 }
454 std::cout << solvertype << " solver took "
455 << chrono.RealTime() << "s. \n";
456 }
457 delete solver;
458 }
459 chrono.Stop();
460
461 // 13. Extract the parallel grid function corresponding to the finite element
462 // approximation X. This is the local solution on each processor. Compute
463 // L2 error norms.
466 u->MakeRef(R_space, x.GetBlock(0), 0);
467 p->MakeRef(W_space, x.GetBlock(1), 0);
468 u->Distribute(&(trueX.GetBlock(0)));
469 p->Distribute(&(trueX.GetBlock(1)));
470
471 int order_quad = max(2, 2*order+1);
473 for (int i=0; i < Geometry::NumGeom; ++i)
474 {
475 irs[i] = &(IntRules.Get(i, order_quad));
476 }
477
478 real_t err_u = u->ComputeL2Error(ucoeff, irs);
479 real_t norm_u = ComputeGlobalLpNorm(2, ucoeff, *pmesh, irs);
480 real_t err_p = p->ComputeL2Error(pcoeff, irs);
481 real_t norm_p = ComputeGlobalLpNorm(2, pcoeff, *pmesh, irs);
482
483 if (verbose)
484 {
485 std::cout << "|| u_h - u_ex || / || u_ex || = " << err_u / norm_u << "\n";
486 std::cout << "|| p_h - p_ex || / || p_ex || = " << err_p / norm_p << "\n";
487 }
488
489 // 14. Save the refined mesh and the solution in parallel. This output can be
490 // viewed later using GLVis: "glvis -np <np> -m mesh -g sol_*".
491 {
492 ostringstream mesh_name, u_name, p_name;
493 mesh_name << "mesh." << setfill('0') << setw(6) << myid;
494 u_name << "sol_u." << setfill('0') << setw(6) << myid;
495 p_name << "sol_p." << setfill('0') << setw(6) << myid;
496
497 ofstream mesh_ofs(mesh_name.str().c_str());
498 mesh_ofs.precision(8);
499 pmesh->Print(mesh_ofs);
500
501 ofstream u_ofs(u_name.str().c_str());
502 u_ofs.precision(8);
503 u->Save(u_ofs);
504
505 ofstream p_ofs(p_name.str().c_str());
506 p_ofs.precision(8);
507 p->Save(p_ofs);
508 }
509
510 // 15. Save data in the VisIt format
511 VisItDataCollection visit_dc("Example5-Parallel", pmesh);
512 visit_dc.RegisterField("velocity", u);
513 visit_dc.RegisterField("pressure", p);
514 visit_dc.SetFormat(!par_format ?
517 visit_dc.Save();
518
519 // 16. Send the solution by socket to a GLVis server.
520 if (visualization)
521 {
522 char vishost[] = "localhost";
523 int visport = 19916;
524 socketstream u_sock(vishost, visport);
525 u_sock << "parallel " << num_procs << " " << myid << "\n";
526 u_sock.precision(8);
527 u_sock << "solution\n" << *pmesh << *u << "window_title 'Velocity'"
528 << endl;
529 // Make sure all ranks have sent their 'u' solution before initiating
530 // another set of GLVis connections (one from each rank):
531 MPI_Barrier(pmesh->GetComm());
532 socketstream p_sock(vishost, visport);
533 p_sock << "parallel " << num_procs << " " << myid << "\n";
534 p_sock.precision(8);
535 p_sock << "solution\n" << *pmesh << *p << "window_title 'Pressure'"
536 << endl;
537 }
538
539 // 17. Free the used memory.
540 delete fform;
541 delete gform;
542 delete u;
543 delete p;
544 delete darcyOp;
545 delete darcyPr;
546 delete pdarcyPr;
547 delete invM;
548 delete invS;
549 delete S;
550 delete BT;
551 delete B;
552 delete M;
553 delete pBT;
554 delete pB;
555 delete pM;
556 delete MinvBt;
557 delete Md;
558 delete mVarf;
559 delete bVarf;
560 delete W_space;
561 delete R_space;
562 delete l2_coll;
563 delete hdiv_coll;
564 delete pmesh;
565
566 // We finalize PETSc
567 if (use_petsc) { MFEMFinalizePetsc(); }
568
569 return 0;
570}
571
572
573void uFun_ex(const Vector & x, Vector & u)
574{
575 real_t xi(x(0));
576 real_t yi(x(1));
577 real_t zi(0.0);
578 if (x.Size() == 3)
579 {
580 zi = x(2);
581 }
582
583 u(0) = - exp(xi)*sin(yi)*cos(zi);
584 u(1) = - exp(xi)*cos(yi)*cos(zi);
585
586 if (x.Size() == 3)
587 {
588 u(2) = exp(xi)*sin(yi)*sin(zi);
589 }
590}
591
592// Change if needed
594{
595 real_t xi(x(0));
596 real_t yi(x(1));
597 real_t zi(0.0);
598
599 if (x.Size() == 3)
600 {
601 zi = x(2);
602 }
603
604 return exp(xi)*sin(yi)*cos(zi);
605}
606
607void fFun(const Vector & x, Vector & f)
608{
609 f = 0.0;
610}
611
613{
614 if (x.Size() == 3)
615 {
616 return -pFun_ex(x);
617 }
618 else
619 {
620 return 0;
621 }
622}
623
625{
626 return (-pFun_ex(x));
627}
T Max() const
Find the maximal element in the array, using the comparison operator < for class T.
Definition array.cpp:68
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition array.hpp:758
int Size() const
Return the logical size of the array.
Definition array.hpp:147
void PartialSum()
Fill the entries of the array with the cumulative sum of the entries.
Definition array.cpp:103
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator. Assumes ownership of bfi.
void Finalize(int skip_zeros=1) override
Finalizes the matrix initialization if the AssemblyLevel is AssemblyLevel::LEGACY....
A class to handle Block diagonal preconditioners in a matrix-free implementation.
void SetDiagonalBlock(int iblock, Operator *op)
Add a square block op in the block-entry (iblock, iblock).
A class to handle Block systems in a matrix-free implementation.
void SetBlock(int iRow, int iCol, Operator *op, real_t c=1.0)
Add a block op in the block-entry (iblock, jblock).
A class to handle Vectors in a block fashion.
Vector & GetBlock(int i)
Get the i-th vector in the block.
A coefficient that is constant across space and time.
virtual void SetFormat(int fmt)
Set the desired output mesh and data format.
The MFEM Device class abstracts hardware devices such as GPUs, as well as programming models such as ...
Definition device.hpp:123
static MemoryType GetMemoryType()
(DEPRECATED) Equivalent to GetDeviceMemoryType().
Definition device.hpp:278
void Print(std::ostream &out=mfem::out)
Print the configuration of the MFEM virtual device object.
Definition device.cpp:297
Class for domain integration .
Definition lininteg.hpp:106
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
Definition fe_coll.hpp:27
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
Definition fespace.hpp:848
A general function coefficient.
static const int NumGeom
Definition geom.hpp:46
The BoomerAMG solver in hypre.
Definition hypre.hpp:1717
Jacobi preconditioner in hypre.
Definition hypre.hpp:1507
Wrapper for hypre's ParCSR matrix class.
Definition hypre.hpp:408
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
Definition hypre.cpp:1605
void InvScaleRows(const Vector &s)
Scale the local row i by 1./s(i)
Definition hypre.cpp:2181
HYPRE_BigInt * GetRowStarts() const
Return the parallel row partitioning array.
Definition hypre.hpp:709
HYPRE_BigInt GetGlobalNumRows() const
Return the global number of rows.
Definition hypre.hpp:699
HypreParMatrix * Transpose() const
Returns the transpose of *this.
Definition hypre.cpp:1730
Wrapper for hypre's parallel vector class.
Definition hypre.hpp:219
Abstract class for hypre's solvers and preconditioners.
Definition hypre.hpp:1188
static void Init()
Initialize hypre by calling HYPRE_Init() and set default options. After calling Hypre::Init(),...
Definition hypre.cpp:33
Class for an integration rule - an Array of IntegrationPoint.
Definition intrules.hpp:100
const IntegrationRule & Get(int GeomType, int Order)
Returns an integration rule for given GeomType and Order.
real_t GetFinalNorm() const
Returns the final residual norm after termination of the solver during the last call to Mult().
Definition solvers.hpp:295
void SetRelTol(real_t rtol)
Definition solvers.hpp:229
int GetNumIterations() const
Returns the number of iterations taken during the last call to Mult()
Definition solvers.hpp:280
virtual void SetPrintLevel(int print_lvl)
Legacy method to set the level of verbosity of the solver output.
Definition solvers.cpp:72
void SetMaxIter(int max_it)
Definition solvers.hpp:231
bool GetConverged() const
Returns true if the last call to Mult() converged successfully.
Definition solvers.hpp:282
void SetAbsTol(real_t atol)
Definition solvers.hpp:230
Arbitrary order "L2-conforming" discontinuous finite elements.
Definition fe_coll.hpp:346
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator. Assumes ownership of lfi.
void AddBoundaryIntegrator(LinearFormIntegrator *lfi)
Adds new Boundary Integrator. Assumes ownership of lfi.
MINRES method.
Definition solvers.hpp:653
void Mult(const Vector &b, Vector &x) const override
Iterative solution of the linear system using the MINRES method.
Definition solvers.cpp:1705
void SetOperator(const Operator &op) override
Also calls SetOperator for the preconditioner if there is one.
Definition solvers.cpp:1680
void SetPreconditioner(Solver &pr) override
This should be called before SetOperator.
Definition solvers.hpp:665
Mesh data type.
Definition mesh.hpp:64
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition mesh.hpp:290
int GetNE() const
Returns number of elements.
Definition mesh.hpp:1282
int Dimension() const
Dimension of the reference space used within the elements.
Definition mesh.hpp:1216
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition mesh.cpp:11295
void Assemble(int skip_zeros=1)
void Finalize(int skip_zeros=1) override
Finalizes the matrix initialization if the AssemblyLevel is AssemblyLevel::LEGACY.
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds a domain integrator. Assumes ownership of bfi.
static int WorldRank()
Return the MPI rank in MPI_COMM_WORLD.
static int WorldSize()
Return the size of MPI_COMM_WORLD.
static void Init(int &argc, char **&argv, int required=default_thread_required, int *provided=nullptr)
Singleton creation with Mpi::Init(argc, argv).
Pointer to an Operator of a specified type.
Definition handle.hpp:34
void SetOperatorOwner(bool own=true)
Set the ownership flag for the held Operator.
Definition handle.hpp:120
void Get(OpType *&A) const
Return the Operator pointer statically cast to a given OpType.
Definition handle.hpp:114
Abstract operator.
Definition operator.hpp:25
Type
Enumeration defining IDs for some classes derived from Operator.
Definition operator.hpp:284
@ PETSC_MATIS
ID for class PetscParMatrix, MATIS format.
Definition operator.hpp:289
@ Hypre_ParCSR
ID for class HypreParMatrix.
Definition operator.hpp:287
@ PETSC_MATAIJ
ID for class PetscParMatrix, MATAIJ format.
Definition operator.hpp:288
void Parse()
Parse the command-line options. Note that this function expects all the options provided through the ...
void PrintUsage(std::ostream &out) const
Print the usage message.
void PrintOptions(std::ostream &out) const
Print the options.
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Add a boolean option and set 'var' to receive the value. Enable/disable tags are used to set the bool...
Definition optparser.hpp:82
bool Good() const
Return true if the command line options were parsed successfully.
Class for parallel bilinear form.
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
void Assemble(int skip_zeros=1)
Assemble the local matrix.
Abstract parallel finite element space.
Definition pfespace.hpp:29
void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1) const override
HYPRE_BigInt GlobalTrueVSize() const
Definition pfespace.hpp:346
void GetEssentialVDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_dofs, int component=-1) const override
Determine the boundary degrees of freedom.
int TrueVSize() const
Obsolete, kept for backward compatibility.
Definition pfespace.hpp:524
Class for parallel grid function.
Definition pgridfunc.hpp:50
Class for parallel linear form.
void Update(ParFiniteElementSpace *pf=NULL)
Update the object according to the given new FE space *pf.
void ParallelAssemble(Vector &tv)
Assemble the vector on the true dofs, i.e. P^t v.
void Assemble()
Assembles the ParLinearForm i.e. sums over all domain/bdr integrators.
Class for parallel meshes.
Definition pmesh.hpp:34
MPI_Comm GetComm() const
Definition pmesh.hpp:402
void Print(std::ostream &out=mfem::out, const std::string &comments="") const override
Definition pmesh.cpp:4800
Class for parallel bilinear form using different test and trial FE spaces.
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P_test^t A P_trial.
Auxiliary class for BDDC customization.
Definition petsc.hpp:831
void SetNatBdrDofs(const Array< int > *natdofs, bool loc=false)
Specify dofs on the natural boundary.
Definition petsc.hpp:858
Abstract class for PETSc's linear solvers.
Definition petsc.hpp:750
void SetOperator(const Operator &op) override
Definition petsc.cpp:2984
void SetPreconditioner(Solver &precond)
Definition petsc.cpp:3129
void Mult(const Vector &b, Vector &x) const override
Application of the solver.
Definition petsc.cpp:3209
Wrapper for PETSc's matrix class.
Definition petsc.hpp:319
MPI_Comm GetComm() const
Get the associated MPI communicator.
Definition petsc.cpp:1339
Abstract class for PETSc's preconditioners.
Definition petsc.hpp:805
void SetAbsTol(real_t tol)
Definition petsc.cpp:2420
void SetPrintLevel(int plev)
Definition petsc.cpp:2472
void SetMaxIter(int max_iter)
Definition petsc.cpp:2445
void SetRelTol(real_t tol)
Definition petsc.cpp:2395
int GetNumIterations()
Definition petsc.cpp:2713
real_t GetFinalNorm()
Definition petsc.cpp:2746
Arbitrary order H(div)-conforming Raviart-Thomas finite elements.
Definition fe_coll.hpp:403
bool iterative_mode
If true, use the second argument of Mult() as an initial guess.
Definition operator.hpp:783
Timing object.
Definition tic_toc.hpp:36
double RealTime()
Return the number of real seconds elapsed since the stopwatch was started.
Definition tic_toc.cpp:432
void Start()
Start the stopwatch. The elapsed time is not cleared.
Definition tic_toc.cpp:411
void Stop()
Stop the stopwatch.
Definition tic_toc.cpp:422
void Clear()
Clear the elapsed time on the stopwatch and restart it if it's running.
Definition tic_toc.cpp:406
for VectorFiniteElements (Nedelec, Raviart-Thomas)
Definition lininteg.hpp:344
A general vector function coefficient.
Vector data type.
Definition vector.hpp:82
void SyncAliasMemory(const Vector &v) const
Update the alias memory location of the vector to match v.
Definition vector.hpp:267
int Size() const
Returns the size of the vector.
Definition vector.hpp:226
Data collection with VisIt I/O routines.
void Save() override
Save the collection and a VisIt root file.
void RegisterField(const std::string &field_name, GridFunction *gf) override
Add a grid function to the collection and update the root file.
int dim
Definition ex24.cpp:53
int main()
HYPRE_Int HYPRE_BigInt
real_t u(const Vector &xvec)
Definition lor_mms.hpp:22
real_t ComputeGlobalLpNorm(real_t p, Coefficient &coeff, ParMesh &pmesh, const IntegrationRule *irs[])
Compute the global Lp norm of a function f. .
void MFEMInitializePetsc()
Convenience functions to initialize/finalize PETSc.
Definition petsc.cpp:206
void MFEMFinalizePetsc()
Definition petsc.cpp:246
HypreParMatrix * ParMult(const HypreParMatrix *A, const HypreParMatrix *B, bool own_matrix)
Definition hypre.cpp:3007
float real_t
Definition config.hpp:43
MemoryType
Memory types supported by MFEM.
std::function< real_t(const Vector &)> f(real_t mass_coeff)
Definition lor_mms.hpp:30
IntegrationRules IntRules(0, Quadrature1D::GaussLegendre)
A global object with all integration rules (defined in intrules.cpp)
Definition intrules.hpp:492
const char vishost[]
real_t p(const Vector &x, real_t t)
real_t pFun_ex(const Vector &x)
Definition ex5p.cpp:593
void uFun_ex(const Vector &x, Vector &u)
Definition ex5p.cpp:573
real_t gFun(const Vector &x)
Definition ex5p.cpp:612
real_t f_natural(const Vector &x)
Definition ex5p.cpp:624
void fFun(const Vector &x, Vector &f)
Definition ex5p.cpp:607