MFEM v2.0
|
00001 // MFEM Example 4 00002 // 00003 // Compile with: make ex4 00004 // 00005 // Sample runs: mpirun -np 4 ex4p ../data/square-disc.mesh 00006 // mpirun -np 4 ex4p ../data/star.mesh 00007 // mpirun -np 4 ex4p ../data/beam-tet.mesh 00008 // mpirun -np 4 ex4p ../data/beam-hex.mesh 00009 // mpirun -np 4 ex4p ../data/escher.mesh 00010 // mpirun -np 4 ex4p ../data/fichera.mesh 00011 // mpirun -np 4 ex4p ../data/fichera-q2.vtk 00012 // mpirun -np 4 ex4p ../data/fichera-q3.mesh 00013 // mpirun -np 4 ex4p ../data/square-disc-nurbs.mesh 00014 // mpirun -np 4 ex4p ../data/beam-hex-nurbs.mesh 00015 // 00016 // Description: This example code solves a simple 2D/3D H(div) diffusion 00017 // problem corresponding to the second order definite equation 00018 // -grad(alpha div F) + beta F = f with boundary condition F dot n 00019 // = <given normal field>. Here, we use a given exact solution F 00020 // and compute the corresponding r.h.s. f. We discretize with 00021 // Raviart-Thomas finite elements. 00022 // 00023 // The example demonstrates the use of H(div) finite element 00024 // spaces with the grad-div and H(div) vector finite element mass 00025 // bilinear form, as well as the computation of discretization 00026 // error when the exact solution is known. 00027 // 00028 // We recommend viewing examples 1-3 before viewing this example. 00029 00030 #include <fstream> 00031 #include "mfem.hpp" 00032 00033 // Exact solution, F, and r.h.s., f. See below for implementation. 00034 void F_exact(const Vector &, Vector &); 00035 void f_exact(const Vector &, Vector &); 00036 00037 int main (int argc, char *argv[]) 00038 { 00039 int num_procs, myid; 00040 00041 // 1. Initialize MPI 00042 MPI_Init(&argc, &argv); 00043 MPI_Comm_size(MPI_COMM_WORLD, &num_procs); 00044 MPI_Comm_rank(MPI_COMM_WORLD, &myid); 00045 00046 Mesh *mesh; 00047 00048 if (argc == 1) 00049 { 00050 cout << "\nUsage: mpirun -np <np> ex4p <mesh_file>\n" << endl; 00051 MPI_Finalize(); 00052 return 1; 00053 } 00054 00055 // 2. Read the (serial) mesh from the given mesh file on all processors. 00056 // In this 2D/3D example, we can handle triangular, quadrilateral, 00057 // tetrahedral or hexahedral meshes with the same code. 00058 ifstream imesh(argv[1]); 00059 if (!imesh) 00060 { 00061 if (myid == 0) 00062 cerr << "\nCan not open mesh file: " << argv[1] << '\n' << endl; 00063 MPI_Finalize(); 00064 return 2; 00065 } 00066 mesh = new Mesh(imesh, 1, 1); 00067 imesh.close(); 00068 00069 const int dim = mesh->Dimension(); 00070 00071 // 3. Refine the serial mesh on all processors to increase the resolution. In 00072 // this example we do 'ref_levels' of uniform refinement. We choose 00073 // 'ref_levels' to be the largest number that gives a final mesh with no 00074 // more than 1,000 elements. 00075 { 00076 int ref_levels = 00077 (int)floor(log(1000./mesh->GetNE())/log(2.)/dim); 00078 for (int l = 0; l < ref_levels; l++) 00079 mesh->UniformRefinement(); 00080 } 00081 00082 // 4. Define a parallel mesh by a partitioning of the serial mesh. Refine 00083 // this mesh further in parallel to increase the resolution. Once the 00084 // parallel mesh is defined, the serial mesh can be deleted. Tetrahedral 00085 // meshes need to be reoriented before we can define high-order Nedelec 00086 // spaces on them (this is needed in the ADS solver below). 00087 ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); 00088 delete mesh; 00089 { 00090 int par_ref_levels = 2; 00091 for (int l = 0; l < par_ref_levels; l++) 00092 pmesh->UniformRefinement(); 00093 } 00094 pmesh->ReorientTetMesh(); 00095 00096 // 5. Define a parallel finite element space on the parallel mesh. Here we 00097 // use the lowest order Raviart-Thomas finite elements, but we can easily 00098 // swich to higher-order spaces by changing the value of p. 00099 int p = 1; 00100 FiniteElementCollection *fec = new RT_FECollection(p-1, pmesh -> Dimension()); 00101 ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec); 00102 int size = fespace->GlobalTrueVSize(); 00103 if (myid == 0) 00104 cout << "Number of unknowns: " << size << endl; 00105 00106 // 6. Set up the parallel linear form b(.) which corresponds to the 00107 // right-hand side of the FEM linear system, which in this case is 00108 // (f,phi_i) where f is given by the function f_exact and phi_i are the 00109 // basis functions in the finite element fespace. 00110 VectorFunctionCoefficient f(dim, f_exact); 00111 ParLinearForm *b = new ParLinearForm(fespace); 00112 b->AddDomainIntegrator(new VectorFEDomainLFIntegrator(f)); 00113 b->Assemble(); 00114 00115 // 7. Define the solution vector x as a parallel finite element grid function 00116 // corresponding to fespace. Initialize x by projecting the exact 00117 // solution. Note that only values from the boundary faces will be used 00118 // when eliminating the non-homogeneous boundary condition to modify the 00119 // r.h.s. vector b. 00120 ParGridFunction x(fespace); 00121 VectorFunctionCoefficient F(dim, F_exact); 00122 x.ProjectCoefficient(F); 00123 00124 // 8. Set up the parallel bilinear form corresponding to the H(div) diffusion 00125 // operator grad alpha div + beta I, by adding the div-div and the 00126 // mass domain integrators and finally imposing non-homogeneous Dirichlet 00127 // boundary conditions. The boundary conditions are implemented by 00128 // marking all the boundary attributes from the mesh as essential 00129 // (Dirichlet). After serial and parallel assembly we extract the 00130 // parallel matrix A. 00131 Coefficient *alpha = new ConstantCoefficient(1.0); 00132 Coefficient *beta = new ConstantCoefficient(1.0); 00133 ParBilinearForm *a = new ParBilinearForm(fespace); 00134 a->AddDomainIntegrator(new DivDivIntegrator(*alpha)); 00135 a->AddDomainIntegrator(new VectorFEMassIntegrator(*beta)); 00136 a->Assemble(); 00137 { 00138 Array<int> ess_bdr(pmesh->bdr_attributes.Max()); 00139 ess_bdr = 1; 00140 Array<int> ess_dofs; 00141 fespace->GetEssentialVDofs(ess_bdr, ess_dofs); 00142 a->EliminateEssentialBCFromDofs(ess_dofs, x, *b); 00143 } 00144 a->Finalize(); 00145 00146 // 9. Define the parallel (hypre) matrix and vectors representing a(.,.), 00147 // b(.) and the finite element approximation. 00148 HypreParMatrix *A = a->ParallelAssemble(); 00149 HypreParVector *B = b->ParallelAssemble(); 00150 HypreParVector *X = x.ParallelAverage(); 00151 *X = 0.0; 00152 00153 delete a; 00154 delete alpha; 00155 delete beta; 00156 delete b; 00157 00158 // 10. Define and apply a parallel PCG solver for AX=B with the 2D AMS or the 00159 // 3D ADS preconditioners from hypre. 00160 HypreSolver *prec; 00161 if (dim == 2) 00162 prec = new HypreAMS(*A, fespace); 00163 else 00164 prec = new HypreADS(*A, fespace); 00165 HyprePCG *pcg = new HyprePCG(*A); 00166 pcg->SetTol(1e-10); 00167 pcg->SetMaxIter(500); 00168 pcg->SetPrintLevel(2); 00169 pcg->SetPreconditioner(*prec); 00170 pcg->Mult(*B, *X); 00171 00172 // 11. Extract the parallel grid function corresponding to the finite element 00173 // approximation X. This is the local solution on each processor. 00174 x = *X; 00175 00176 // 12. Compute and print the L^2 norm of the error. 00177 { 00178 double err = x.ComputeL2Error(F); 00179 if (myid == 0) 00180 cout << "\n|| F_h - F ||_{L^2} = " << err << '\n' << endl; 00181 } 00182 00183 // 13. Save the refined mesh and the solution in parallel. This output can 00184 // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol". 00185 { 00186 ostringstream mesh_name, sol_name; 00187 mesh_name << "mesh." << setfill('0') << setw(6) << myid; 00188 sol_name << "sol." << setfill('0') << setw(6) << myid; 00189 00190 ofstream mesh_ofs(mesh_name.str().c_str()); 00191 mesh_ofs.precision(8); 00192 pmesh->Print(mesh_ofs); 00193 00194 ofstream sol_ofs(sol_name.str().c_str()); 00195 sol_ofs.precision(8); 00196 x.Save(sol_ofs); 00197 } 00198 00199 // 14. (Optional) Send the solution by socket to a GLVis server. 00200 char vishost[] = "localhost"; 00201 int visport = 19916; 00202 osockstream sol_sock(visport, vishost); 00203 sol_sock << "parallel " << num_procs << " " << myid << "\n"; 00204 sol_sock << "solution\n"; 00205 sol_sock.precision(8); 00206 pmesh->Print(sol_sock); 00207 x.Save(sol_sock); 00208 sol_sock.send(); 00209 00210 // 15. Free the used memory. 00211 delete pcg; 00212 delete prec; 00213 delete X; 00214 delete B; 00215 delete A; 00216 delete fespace; 00217 delete fec; 00218 delete pmesh; 00219 00220 MPI_Finalize(); 00221 00222 return 0; 00223 } 00224 00225 00226 // The exact solution 00227 void F_exact(const Vector &p, Vector &F) 00228 { 00229 double x,y,z; 00230 00231 int dim = p.Size(); 00232 00233 x = p(0); 00234 y = p(1); 00235 if(dim == 3) 00236 z = p(2); 00237 00238 F(0) = cos(M_PI*x)*sin(M_PI*y); 00239 F(1) = cos(M_PI*y)*sin(M_PI*x); 00240 if(dim == 3) 00241 F(2) = 0.0; 00242 } 00243 00244 // The right hand side 00245 void f_exact(const Vector &p, Vector &f) 00246 { 00247 double x,y,z; 00248 00249 int dim = p.Size(); 00250 00251 x = p(0); 00252 y = p(1); 00253 if(dim == 3) 00254 z = p(2); 00255 00256 double temp = 1 + 2*M_PI*M_PI; 00257 00258 f(0) = temp*cos(M_PI*x)*sin(M_PI*y); 00259 f(1) = temp*cos(M_PI*y)*sin(M_PI*x); 00260 if(dim == 3) 00261 f(2) = 0; 00262 }