MFEM v2.0
ex1p.cpp
Go to the documentation of this file.
00001 //                       MFEM Example 1 - Parallel Version
00002 //
00003 // Compile with: make ex1p
00004 //
00005 // Sample runs:  mpirun -np 4 ex1p ../data/square-disc.mesh
00006 //               mpirun -np 4 ex1p ../data/star.mesh
00007 //               mpirun -np 4 ex1p ../data/escher.mesh
00008 //               mpirun -np 4 ex1p ../data/fichera.mesh
00009 //               mpirun -np 4 ex1p ../data/square-disc-p2.vtk
00010 //               mpirun -np 4 ex1p ../data/square-disc-p3.mesh
00011 //               mpirun -np 4 ex1p ../data/square-disc-nurbs.mesh
00012 //               mpirun -np 4 ex1p ../data/disc-nurbs.mesh
00013 //               mpirun -np 4 ex1p ../data/pipe-nurbs.mesh
00014 //               mpirun -np 4 ex1p ../data/ball-nurbs.mesh
00015 //
00016 // Description:  This example code demonstrates the use of MFEM to define a
00017 //               simple isoparametric finite element discretization of the
00018 //               Laplace problem -Delta u = 1 with homogeneous Dirichlet
00019 //               boundary conditions. Specifically, we discretize with the
00020 //               FE space coming from the mesh (linear by default, quadratic
00021 //               for quadratic curvilinear mesh, NURBS for NURBS mesh, etc.)
00022 //
00023 //               The example highlights the use of mesh refinement, finite
00024 //               element grid functions, as well as linear and bilinear forms
00025 //               corresponding to the left-hand side and right-hand side of the
00026 //               discrete linear system. We also cover the explicit elimination
00027 //               of boundary conditions on all boundary edges, and the optional
00028 //               connection to the GLVis tool for visualization.
00029 
00030 #include <fstream>
00031 #include "mfem.hpp"
00032 
00033 int main (int argc, char *argv[])
00034 {
00035    int num_procs, myid;
00036 
00037    // 1. Initialize MPI
00038    MPI_Init(&argc, &argv);
00039    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
00040    MPI_Comm_rank(MPI_COMM_WORLD, &myid);
00041 
00042    Mesh *mesh;
00043 
00044    if (argc == 1)
00045    {
00046       if (myid == 0)
00047          cout << "\nUsage: mpirun -np <np> ex1p <mesh_file>\n" << endl;
00048       MPI_Finalize();
00049       return 1;
00050    }
00051 
00052    // 2. Read the (serial) mesh from the given mesh file on all processors.
00053    //    We can handle triangular, quadrilateral, tetrahedral or hexahedral
00054    //    elements with the same code.
00055    ifstream imesh(argv[1]);
00056    if (!imesh)
00057    {
00058       if (myid == 0)
00059          cerr << "\nCan not open mesh file: " << argv[1] << '\n' << endl;
00060       MPI_Finalize();
00061       return 2;
00062    }
00063    mesh = new Mesh(imesh, 1, 1);
00064    imesh.close();
00065 
00066    // 3. Refine the serial mesh on all processors to increase the resolution. In
00067    //    this example we do 'ref_levels' of uniform refinement. We choose
00068    //    'ref_levels' to be the largest number that gives a final mesh with no
00069    //    more than 10,000 elements.
00070    {
00071       int ref_levels =
00072          (int)floor(log(10000./mesh->GetNE())/log(2.)/mesh->Dimension());
00073       for (int l = 0; l < ref_levels; l++)
00074          mesh->UniformRefinement();
00075    }
00076 
00077    // 4. Define a parallel mesh by a partitioning of the serial mesh. Refine
00078    //    this mesh further in parallel to increase the resolution. Once the
00079    //    parallel mesh is defined, the serial mesh can be deleted.
00080    ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
00081    delete mesh;
00082    {
00083       int par_ref_levels = 2;
00084       for (int l = 0; l < par_ref_levels; l++)
00085          pmesh->UniformRefinement();
00086    }
00087 
00088    // 5. Define a parallel finite element space on the parallel mesh. Here we
00089    //    use isoparametric finite elements coming from the mesh nodes (linear
00090    //    by default).
00091    FiniteElementCollection *fec;
00092    if (pmesh->GetNodes())
00093       fec = pmesh->GetNodes()->OwnFEC();
00094    else
00095       fec = new LinearFECollection;
00096    ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
00097    int size = fespace->GlobalTrueVSize();
00098    if (myid == 0)
00099       cout << "Number of unknowns: " << size << endl;
00100 
00101    // 6. Set up the parallel linear form b(.) which corresponds to the
00102    //    right-hand side of the FEM linear system, which in this case is
00103    //    (1,phi_i) where phi_i are the basis functions in fespace.
00104    ParLinearForm *b = new ParLinearForm(fespace);
00105    ConstantCoefficient one(1.0);
00106    b->AddDomainIntegrator(new DomainLFIntegrator(one));
00107    b->Assemble();
00108 
00109    // 7. Define the solution vector x as a parallel finite element grid function
00110    //    corresponding to fespace. Initialize x with initial guess of zero,
00111    //    which satisfies the boundary conditions.
00112    ParGridFunction x(fespace);
00113    x = 0.0;
00114 
00115    // 8. Set up the parallel bilinear form a(.,.) on the finite element space
00116    //    corresponding to the Laplacian operator -Delta, by adding the Diffusion
00117    //    domain integrator and imposing homogeneous Dirichlet boundary
00118    //    conditions. The boundary conditions are implemented by marking all the
00119    //    boundary attributes from the mesh as essential. After serial and
00120    //    parallel assembly we extract the corresponding parallel matrix A.
00121    ParBilinearForm *a = new ParBilinearForm(fespace);
00122    a->AddDomainIntegrator(new DiffusionIntegrator(one));
00123    a->Assemble();
00124    {
00125       Array<int> ess_bdr(pmesh->bdr_attributes.Max());
00126       ess_bdr = 1;
00127       Array<int> ess_dofs;
00128       fespace->GetEssentialVDofs(ess_bdr, ess_dofs);
00129       a->EliminateEssentialBCFromDofs(ess_dofs, x, *b);
00130    }
00131    a->Finalize();
00132 
00133    // 9. Define the parallel (hypre) matrix and vectors representing a(.,.),
00134    //    b(.) and the finite element approximation.
00135    HypreParMatrix *A = a->ParallelAssemble();
00136    HypreParVector *B = b->ParallelAssemble();
00137    HypreParVector *X = x.ParallelAverage();
00138 
00139    delete a;
00140    delete b;
00141 
00142    // 10. Define and apply a parallel PCG solver for AX=B with the BoomerAMG
00143    //     preconditioner from hypre.
00144    HypreSolver *amg = new HypreBoomerAMG(*A);
00145    HyprePCG *pcg = new HyprePCG(*A);
00146    pcg->SetTol(1e-12);
00147    pcg->SetMaxIter(200);
00148    pcg->SetPrintLevel(2);
00149    pcg->SetPreconditioner(*amg);
00150    pcg->Mult(*B, *X);
00151 
00152    // 11. Extract the parallel grid function corresponding to the finite element
00153    //     approximation X. This is the local solution on each processor.
00154    x = *X;
00155 
00156    // 12. Save the refined mesh and the solution in parallel. This output can
00157    //     be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
00158    {
00159       ostringstream mesh_name, sol_name;
00160       mesh_name << "mesh." << setfill('0') << setw(6) << myid;
00161       sol_name << "sol." << setfill('0') << setw(6) << myid;
00162 
00163       ofstream mesh_ofs(mesh_name.str().c_str());
00164       mesh_ofs.precision(8);
00165       pmesh->Print(mesh_ofs);
00166 
00167       ofstream sol_ofs(sol_name.str().c_str());
00168       sol_ofs.precision(8);
00169       x.Save(sol_ofs);
00170    }
00171 
00172    // 13. (Optional) Send the solution by socket to a GLVis server.
00173    char vishost[] = "localhost";
00174    int  visport   = 19916;
00175    osockstream sol_sock(visport, vishost);
00176    sol_sock << "parallel " << num_procs << " " << myid << "\n";
00177    sol_sock << "solution\n";
00178    sol_sock.precision(8);
00179    pmesh->Print(sol_sock);
00180    x.Save(sol_sock);
00181    sol_sock.send();
00182 
00183    // 14. Free the used memory.
00184    delete pcg;
00185    delete amg;
00186    delete X;
00187    delete B;
00188    delete A;
00189 
00190    delete fespace;
00191    if (!pmesh->GetNodes())
00192       delete fec;
00193    delete pmesh;
00194 
00195    MPI_Finalize();
00196 
00197    return 0;
00198 }
 All Classes Files Functions Variables Typedefs Enumerations Enumerator Friends Defines