MFEM v2.0
|
00001 // MFEM Example 2 - Parallel Version 00002 // 00003 // Compile with: make ex2p 00004 // 00005 // Sample runs: mpirun -np 4 ex2p ../data/beam-tri.mesh 00006 // mpirun -np 4 ex2p ../data/beam-quad.mesh 00007 // mpirun -np 4 ex2p ../data/beam-tet.mesh 00008 // mpirun -np 4 ex2p ../data/beam-hex.mesh 00009 // mpirun -np 4 ex2p ../data/beam-quad-nurbs.mesh 00010 // mpirun -np 4 ex2p ../data/beam-hex-nurbs.mesh 00011 // 00012 // Description: This example code solves a simple linear elasticity problem 00013 // describing a multi-material cantilever beam. 00014 // 00015 // Specifically, we approximate the weak form of -div(sigma(u))=0 00016 // where sigma(u)=lambda*div(u)*I+mu*(grad*u+u*grad) is the stress 00017 // tensor corresponding to displacement field u, and lambda and mu 00018 // are the material Lame constants. The boundary conditions are 00019 // u=0 on the fixed part of the boundary with attribute 1, and 00020 // sigma(u).n=f on the remainder with f being a constant pull down 00021 // vector on boundary elements with attribute 2, and zero 00022 // otherwise. The geometry of the domain is assumed to be as 00023 // follows: 00024 // 00025 // +----------+----------+ 00026 // boundary --->| material | material |<--- boundary 00027 // attribute 1 | 1 | 2 | attribute 2 00028 // (fixed) +----------+----------+ (pull down) 00029 // 00030 // The example demonstrates the use of high-order and NURBS vector 00031 // finite element spaces with the linear elasticity bilinear form, 00032 // meshes with curved elements, and the definition of piece-wise 00033 // constant and vector coefficient objects. 00034 // 00035 // We recommend viewing example 1 before viewing this example. 00036 00037 #include <fstream> 00038 #include "mfem.hpp" 00039 00040 int main (int argc, char *argv[]) 00041 { 00042 int num_procs, myid; 00043 00044 // 1. Initialize MPI 00045 MPI_Init(&argc, &argv); 00046 MPI_Comm_size(MPI_COMM_WORLD, &num_procs); 00047 MPI_Comm_rank(MPI_COMM_WORLD, &myid); 00048 00049 Mesh *mesh; 00050 00051 if (argc == 1) 00052 { 00053 if (myid == 0) 00054 cout << "\nUsage: mpirun -np <np> ex2p <mesh_file>\n" << endl; 00055 MPI_Finalize(); 00056 return 1; 00057 } 00058 00059 // 2. Read the (serial) mesh from the given mesh file on all processors. 00060 // We can handle triangular, quadrilateral, tetrahedral or hexahedral 00061 // elements with the same code. 00062 ifstream imesh(argv[1]); 00063 if (!imesh) 00064 { 00065 if (myid == 0) 00066 cerr << "\nCan not open mesh file: " << argv[1] << '\n' << endl; 00067 MPI_Finalize(); 00068 return 2; 00069 } 00070 mesh = new Mesh(imesh, 1, 1); 00071 imesh.close(); 00072 00073 if (mesh->attributes.Max() < 2 || mesh->bdr_attributes.Max() < 2) 00074 { 00075 if (myid == 0) 00076 cerr << "\nInput mesh should have at least two materials and " 00077 << "two boundary attributes! (See schematic in ex2.cpp)\n" 00078 << endl; 00079 MPI_Finalize(); 00080 return 3; 00081 } 00082 int dim = mesh->Dimension(); 00083 00084 // 3. Select the order of the finite element discretization space. For NURBS 00085 // meshes, we increase the order by degree elevation. 00086 int p; 00087 if (myid == 0) 00088 { 00089 cout << "Enter finite element space order --> " << flush; 00090 cin >> p; 00091 } 00092 MPI_Bcast(&p, 1, MPI_INT, 0, MPI_COMM_WORLD); 00093 00094 if (mesh->NURBSext && p > mesh->NURBSext->GetOrder()) 00095 mesh->DegreeElevate(p - mesh->NURBSext->GetOrder()); 00096 00097 // 4. Refine the serial mesh on all processors to increase the resolution. In 00098 // this example we do 'ref_levels' of uniform refinement. We choose 00099 // 'ref_levels' to be the largest number that gives a final mesh with no 00100 // more than 1,000 elements. 00101 { 00102 int ref_levels = 00103 (int)floor(log(1000./mesh->GetNE())/log(2.)/dim); 00104 for (int l = 0; l < ref_levels; l++) 00105 mesh->UniformRefinement(); 00106 } 00107 00108 // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine 00109 // this mesh further in parallel to increase the resolution. Once the 00110 // parallel mesh is defined, the serial mesh can be deleted. 00111 ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); 00112 delete mesh; 00113 { 00114 int par_ref_levels = 1; 00115 for (int l = 0; l < par_ref_levels; l++) 00116 pmesh->UniformRefinement(); 00117 } 00118 00119 // 6. Define a parallel finite element space on the parallel mesh. Here we 00120 // use vector finite elements, i.e. dim copies of a scalar finite element 00121 // space. We use the ordering by vector dimension (the last argument of 00122 // the FiniteElementSpace constructor) which is expected in the systems 00123 // version of BoomerAMG preconditioner. For NURBS meshes, we use the 00124 // (degree elevated) NURBS space associated with the mesh nodes. 00125 FiniteElementCollection *fec; 00126 ParFiniteElementSpace *fespace; 00127 if (pmesh->NURBSext) 00128 { 00129 fec = NULL; 00130 fespace = (ParFiniteElementSpace *)pmesh->GetNodes()->FESpace(); 00131 } 00132 else 00133 { 00134 fec = new H1_FECollection(p, dim); 00135 fespace = new ParFiniteElementSpace(pmesh, fec, dim, Ordering::byVDIM); 00136 } 00137 int size = fespace->GlobalTrueVSize(); 00138 if (myid == 0) 00139 cout << "Number of unknowns: " << size << endl 00140 << "Assembling: " << flush; 00141 00142 // 7. Set up the parallel linear form b(.) which corresponds to the 00143 // right-hand side of the FEM linear system. In this case, b_i equals the 00144 // boundary integral of f*phi_i where f represents a "pull down" force on 00145 // the Neumann part of the boundary and phi_i are the basis functions in 00146 // the finite element fespace. The force is defined by the object f, which 00147 // is a vector of Coefficient objects. The fact that f is non-zero on 00148 // boundary attribute 2 is indicated by the use of piece-wise constants 00149 // coefficient for its last component. 00150 VectorArrayCoefficient f(dim); 00151 for (int i = 0; i < dim-1; i++) 00152 f.Set(i, new ConstantCoefficient(0.0)); 00153 { 00154 Vector pull_force(pmesh->bdr_attributes.Max()); 00155 pull_force = 0.0; 00156 pull_force(1) = -1.0e-2; 00157 f.Set(dim-1, new PWConstCoefficient(pull_force)); 00158 } 00159 00160 ParLinearForm *b = new ParLinearForm(fespace); 00161 b->AddBoundaryIntegrator(new VectorBoundaryLFIntegrator(f)); 00162 if (myid == 0) 00163 cout << "r.h.s. ... " << flush; 00164 b->Assemble(); 00165 00166 // 8. Define the solution vector x as a parallel finite element grid function 00167 // corresponding to fespace. Initialize x with initial guess of zero, 00168 // which satisfies the boundary conditions. 00169 ParGridFunction x(fespace); 00170 x = 0.0; 00171 00172 // 9. Set up the parallel bilinear form a(.,.) on the finite element space 00173 // corresponding to the linear elasticity integrator with piece-wise 00174 // constants coefficient lambda and mu. The boundary conditions are 00175 // implemented by marking only boundary attribute 1 as essential. After 00176 // serial/parallel assembly we extract the corresponding parallel matrix. 00177 Vector lambda(pmesh->attributes.Max()); 00178 lambda = 1.0; 00179 lambda(0) = lambda(1)*50; 00180 PWConstCoefficient lambda_func(lambda); 00181 Vector mu(pmesh->attributes.Max()); 00182 mu = 1.0; 00183 mu(0) = mu(1)*50; 00184 PWConstCoefficient mu_func(mu); 00185 00186 ParBilinearForm *a = new ParBilinearForm(fespace); 00187 a->AddDomainIntegrator(new ElasticityIntegrator(lambda_func, mu_func)); 00188 if (myid == 0) 00189 cout << "matrix ... " << flush; 00190 a->Assemble(); 00191 { 00192 Array<int> ess_bdr(pmesh->bdr_attributes.Max()); 00193 ess_bdr = 0; 00194 ess_bdr[0] = 1; 00195 Array<int> ess_dofs; 00196 fespace->GetEssentialVDofs(ess_bdr, ess_dofs); 00197 a->EliminateEssentialBCFromDofs(ess_dofs, x, *b); 00198 } 00199 a->Finalize(); 00200 if (myid == 0) 00201 cout << "done." << endl; 00202 00203 // 10. Define the parallel (hypre) matrix and vectors representing a(.,.), 00204 // b(.) and the finite element approximation. 00205 HypreParMatrix *A = a->ParallelAssemble(); 00206 HypreParVector *B = b->ParallelAssemble(); 00207 HypreParVector *X = x.ParallelAverage(); 00208 00209 delete a; 00210 delete b; 00211 00212 // 11. Define and apply a parallel PCG solver for AX=B with the BoomerAMG 00213 // preconditioner from hypre. 00214 HypreBoomerAMG *amg = new HypreBoomerAMG(*A); 00215 amg->SetSystemsOptions(dim); 00216 HyprePCG *pcg = new HyprePCG(*A); 00217 pcg->SetTol(1e-8); 00218 pcg->SetMaxIter(500); 00219 pcg->SetPrintLevel(2); 00220 pcg->SetPreconditioner(*amg); 00221 pcg->Mult(*B, *X); 00222 00223 // 12. Extract the parallel grid function corresponding to the finite element 00224 // approximation X. This is the local solution on each processor. 00225 x = *X; 00226 00227 // 13. For non-NURBS meshes, make the mesh curved based on the finite element 00228 // space. This means that we define the mesh elements through a fespace 00229 // based transformation of the reference element. This allows us to save 00230 // the displaced mesh as a curved mesh when using high-order finite 00231 // element displacement field. We assume that the initial mesh (read from 00232 // the file) is not higher order curved mesh compared to the chosen FE 00233 // space. 00234 if (!pmesh->NURBSext) 00235 pmesh->SetNodalFESpace(fespace); 00236 00237 // 14. Save in parallel the displaced mesh and the inverted solution (which 00238 // gives the backward displacements to the original grid). This output 00239 // can be viewed later using GLVis: "glvis -np <np> -m mesh -g sol". 00240 { 00241 GridFunction *nodes = pmesh->GetNodes(); 00242 *nodes += x; 00243 x *= -1; 00244 00245 ostringstream mesh_name, sol_name; 00246 mesh_name << "mesh." << setfill('0') << setw(6) << myid; 00247 sol_name << "sol." << setfill('0') << setw(6) << myid; 00248 00249 ofstream mesh_ofs(mesh_name.str().c_str()); 00250 mesh_ofs.precision(8); 00251 pmesh->Print(mesh_ofs); 00252 00253 ofstream sol_ofs(sol_name.str().c_str()); 00254 sol_ofs.precision(8); 00255 x.Save(sol_ofs); 00256 } 00257 00258 // 15. (Optional) Send the above data by socket to a GLVis server. 00259 // Use the "n" and "b" keys in GLVis to visualize the displacements. 00260 char vishost[] = "localhost"; 00261 int visport = 19916; 00262 osockstream sol_sock(visport, vishost); 00263 sol_sock << "parallel " << num_procs << " " << myid << "\n"; 00264 sol_sock << "solution\n"; 00265 sol_sock.precision(8); 00266 pmesh->Print(sol_sock); 00267 x.Save(sol_sock); 00268 sol_sock.send(); 00269 00270 // 16. Free the used memory. 00271 delete pcg; 00272 delete amg; 00273 delete X; 00274 delete B; 00275 delete A; 00276 00277 if (fec) 00278 { 00279 delete fespace; 00280 delete fec; 00281 } 00282 delete pmesh; 00283 00284 MPI_Finalize(); 00285 00286 return 0; 00287 }