MFEM v2.0
ex3p.cpp
Go to the documentation of this file.
00001 //                       MFEM Example 3 - Parallel Version
00002 //
00003 // Compile with: make ex3p
00004 //
00005 // Sample runs:  mpirun -np 4 ex3p ../data/beam-tet.mesh
00006 //               mpirun -np 4 ex3p ../data/beam-hex.mesh
00007 //               mpirun -np 4 ex3p ../data/escher.mesh
00008 //               mpirun -np 4 ex3p ../data/fichera.mesh
00009 //               mpirun -np 4 ex3p ../data/fichera-q2.vtk
00010 //               mpirun -np 4 ex3p ../data/fichera-q3.mesh
00011 //               mpirun -np 4 ex3p ../data/beam-hex-nurbs.mesh
00012 //
00013 // Description:  This example code solves a simple 3D electromagnetic diffusion
00014 //               problem corresponding to the second order definite Maxwell
00015 //               equation curl curl E + E = f with boundary condition
00016 //               E x n = <given tangential field>. Here, we use a given exact
00017 //               solution E and compute the corresponding r.h.s. f.
00018 //               We discretize with Nedelec finite elements.
00019 //
00020 //               The example demonstrates the use of H(curl) finite element
00021 //               spaces with the curl-curl and the (vector finite element) mass
00022 //               bilinear form, as well as the computation of discretization
00023 //               error when the exact solution is known.
00024 //
00025 //               We recommend viewing examples 1-2 before viewing this example.
00026 
00027 #include <fstream>
00028 #include "mfem.hpp"
00029 
00030 // Exact solution, E, and r.h.s., f. See below for implementation.
00031 void E_exact(const Vector &, Vector &);
00032 void f_exact(const Vector &, Vector &);
00033 
00034 int main (int argc, char *argv[])
00035 {
00036    int num_procs, myid;
00037 
00038    // 1. Initialize MPI
00039    MPI_Init(&argc, &argv);
00040    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
00041    MPI_Comm_rank(MPI_COMM_WORLD, &myid);
00042 
00043    Mesh *mesh;
00044 
00045    if (argc == 1)
00046    {
00047       if (myid == 0)
00048          cout << "\nUsage: mpirun -np <np> ex3p <mesh_file>\n" << endl;
00049       MPI_Finalize();
00050       return 1;
00051    }
00052 
00053    // 2. Read the (serial) mesh from the given mesh file on all processors.
00054    //    In this 3D example, we can handle tetrahedral or hexahedral meshes
00055    //    with the same code.
00056    ifstream imesh(argv[1]);
00057    if (!imesh)
00058    {
00059       if (myid == 0)
00060          cerr << "\nCan not open mesh file: " << argv[1] << '\n' << endl;
00061       MPI_Finalize();
00062       return 2;
00063    }
00064    mesh = new Mesh(imesh, 1, 1);
00065    imesh.close();
00066    if (mesh -> Dimension() != 3)
00067    {
00068       if (myid == 0)
00069          cerr << "\nThis example requires a 3D mesh\n" << endl;
00070       MPI_Finalize();
00071       return 3;
00072    }
00073 
00074    // 3. Refine the serial mesh on all processors to increase the resolution. In
00075    //    this example we do 'ref_levels' of uniform refinement. We choose
00076    //    'ref_levels' to be the largest number that gives a final mesh with no
00077    //    more than 1,000 elements.
00078    {
00079       int ref_levels =
00080          (int)floor(log(1000./mesh->GetNE())/log(2.)/mesh->Dimension());
00081       for (int l = 0; l < ref_levels; l++)
00082          mesh->UniformRefinement();
00083    }
00084 
00085    // 4. Define a parallel mesh by a partitioning of the serial mesh. Refine
00086    //    this mesh further in parallel to increase the resolution. Once the
00087    //    parallel mesh is defined, the serial mesh can be deleted. Tetrahedral
00088    //    meshes need to be reoriented before we can define high-order Nedelec
00089    //    spaces on them.
00090    ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
00091    delete mesh;
00092    {
00093       int par_ref_levels = 2;
00094       for (int l = 0; l < par_ref_levels; l++)
00095          pmesh->UniformRefinement();
00096    }
00097    pmesh->ReorientTetMesh();
00098 
00099    // 5. Define a parallel finite element space on the parallel mesh. Here we
00100    //    use the lowest order Nedelec finite elements, but we can easily swich
00101    //    to higher-order spaces by changing the value of p.
00102    int p = 1;
00103    FiniteElementCollection *fec = new ND_FECollection(p, pmesh -> Dimension());
00104    ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
00105    int size = fespace->GlobalTrueVSize();
00106    if (myid == 0)
00107       cout << "Number of unknowns: " << size << endl;
00108 
00109    // 6. Set up the parallel linear form b(.) which corresponds to the
00110    //    right-hand side of the FEM linear system, which in this case is
00111    //    (f,phi_i) where f is given by the function f_exact and phi_i are the
00112    //    basis functions in the finite element fespace.
00113    VectorFunctionCoefficient f(3, f_exact);
00114    ParLinearForm *b = new ParLinearForm(fespace);
00115    b->AddDomainIntegrator(new VectorFEDomainLFIntegrator(f));
00116    b->Assemble();
00117 
00118    // 7. Define the solution vector x as a parallel finite element grid function
00119    //    corresponding to fespace. Initialize x by projecting the exact
00120    //    solution. Note that only values from the boundary edges will be used
00121    //    when eliminating the non-homogenious boundary condition to modify the
00122    //    r.h.s. vector b.
00123    ParGridFunction x(fespace);
00124    VectorFunctionCoefficient E(3, E_exact);
00125    x.ProjectCoefficient(E);
00126 
00127    // 8. Set up the parallel bilinear form corresponding to the EM diffusion
00128    //    operator curl muinv curl + sigma I, by adding the curl-curl and the
00129    //    mass domain integrators and finally imposing non-homogeneous Dirichlet
00130    //    boundary conditions. The boundary conditions are implemented by
00131    //    marking all the boundary attributes from the mesh as essential
00132    //    (Dirichlet). After serial and parallel assembly we extract the
00133    //    parallel matrix A.
00134    Coefficient *muinv = new ConstantCoefficient(1.0);
00135    Coefficient *sigma = new ConstantCoefficient(1.0);
00136    ParBilinearForm *a = new ParBilinearForm(fespace);
00137    a->AddDomainIntegrator(new CurlCurlIntegrator(*muinv));
00138    a->AddDomainIntegrator(new VectorFEMassIntegrator(*sigma));
00139    a->Assemble();
00140    {
00141       Array<int> ess_bdr(pmesh->bdr_attributes.Max());
00142       ess_bdr = 1;
00143       Array<int> ess_dofs;
00144       fespace->GetEssentialVDofs(ess_bdr, ess_dofs);
00145       a->EliminateEssentialBCFromDofs(ess_dofs, x, *b);
00146    }
00147    a->Finalize();
00148 
00149    // 9. Define the parallel (hypre) matrix and vectors representing a(.,.),
00150    //    b(.) and the finite element approximation.
00151    HypreParMatrix *A = a->ParallelAssemble();
00152    HypreParVector *B = b->ParallelAssemble();
00153    HypreParVector *X = x.ParallelAverage();
00154    *X = 0.0;
00155 
00156    delete a;
00157    delete sigma;
00158    delete muinv;
00159    delete b;
00160 
00161    // 10. Define and apply a parallel PCG solver for AX=B with the AMS
00162    //     preconditioner from hypre.
00163    HypreSolver *ams = new HypreAMS(*A, fespace);
00164    HyprePCG *pcg = new HyprePCG(*A);
00165    pcg->SetTol(1e-12);
00166    pcg->SetMaxIter(500);
00167    pcg->SetPrintLevel(2);
00168    pcg->SetPreconditioner(*ams);
00169    pcg->Mult(*B, *X);
00170 
00171    // 11. Extract the parallel grid function corresponding to the finite element
00172    //     approximation X. This is the local solution on each processor.
00173    x = *X;
00174 
00175    // 12. Compute and print the L^2 norm of the error.
00176    {
00177       double err = x.ComputeL2Error(E);
00178       if (myid == 0)
00179          cout << "\n|| E_h - E ||_{L^2} = " << err << '\n' << endl;
00180    }
00181 
00182    // 13. Save the refined mesh and the solution in parallel. This output can
00183    //     be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
00184    {
00185       ostringstream mesh_name, sol_name;
00186       mesh_name << "mesh." << setfill('0') << setw(6) << myid;
00187       sol_name << "sol." << setfill('0') << setw(6) << myid;
00188 
00189       ofstream mesh_ofs(mesh_name.str().c_str());
00190       mesh_ofs.precision(8);
00191       pmesh->Print(mesh_ofs);
00192 
00193       ofstream sol_ofs(sol_name.str().c_str());
00194       sol_ofs.precision(8);
00195       x.Save(sol_ofs);
00196    }
00197 
00198    // 14. (Optional) Send the solution by socket to a GLVis server.
00199    char vishost[] = "localhost";
00200    int  visport   = 19916;
00201    osockstream sol_sock(visport, vishost);
00202    sol_sock << "parallel " << num_procs << " " << myid << "\n";
00203    sol_sock << "solution\n";
00204    sol_sock.precision(8);
00205    pmesh->Print(sol_sock);
00206    x.Save(sol_sock);
00207    sol_sock.send();
00208 
00209    // 15. Free the used memory.
00210    delete pcg;
00211    delete ams;
00212    delete X;
00213    delete B;
00214    delete A;
00215    delete fespace;
00216    delete fec;
00217    delete pmesh;
00218 
00219    MPI_Finalize();
00220 
00221    return 0;
00222 }
00223 
00224 // A parameter for the exact solution.
00225 const double kappa = M_PI;
00226 
00227 void E_exact(const Vector &x, Vector &E)
00228 {
00229    E(0) = sin(kappa * x(1));
00230    E(1) = sin(kappa * x(2));
00231    E(2) = sin(kappa * x(0));
00232 }
00233 
00234 void f_exact(const Vector &x, Vector &f)
00235 {
00236    f(0) = (1. + kappa * kappa) * sin(kappa * x(1));
00237    f(1) = (1. + kappa * kappa) * sin(kappa * x(2));
00238    f(2) = (1. + kappa * kappa) * sin(kappa * x(0));
00239 }
 All Classes Files Functions Variables Typedefs Enumerations Enumerator Friends Defines