MFEM v2.0
pgridfunc.cpp
Go to the documentation of this file.
00001 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
00002 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
00003 // reserved. See file COPYRIGHT for details.
00004 //
00005 // This file is part of the MFEM library. For more information and source code
00006 // availability see http://mfem.googlecode.com.
00007 //
00008 // MFEM is free software; you can redistribute it and/or modify it under the
00009 // terms of the GNU Lesser General Public License (as published by the Free
00010 // Software Foundation) version 2.1 dated February 1999.
00011 
00012 #ifdef MFEM_USE_MPI
00013 
00014 #include "fem.hpp"
00015 
00016 ParGridFunction::ParGridFunction(ParFiniteElementSpace *pf, GridFunction *gf)
00017 {
00018    fes = pfes = pf;
00019    SetDataAndSize(gf->GetData(), gf->Size());
00020 }
00021 
00022 ParGridFunction::ParGridFunction(ParFiniteElementSpace *pf, HypreParVector *tv)
00023    : GridFunction(pf), pfes(pf)
00024 {
00025    Distribute(tv);
00026 }
00027 
00028 ParGridFunction::ParGridFunction(ParMesh *pmesh, GridFunction *gf)
00029 {
00030    // duplicate the FiniteElementCollection from 'gf'
00031    fec = FiniteElementCollection::New(gf->FESpace()->FEColl()->Name());
00032    fes = pfes = new ParFiniteElementSpace(pmesh, fec, gf->FESpace()->GetVDim(),
00033                                           gf->FESpace()->GetOrdering());
00034    SetSize(pfes->GetVSize());
00035 }
00036 
00037 void ParGridFunction::Update(ParFiniteElementSpace *f)
00038 {
00039    GridFunction::Update(f);
00040    pfes = f;
00041 }
00042 
00043 void ParGridFunction::Update(ParFiniteElementSpace *f, Vector &v, int v_offset)
00044 {
00045    GridFunction::Update(f, v, v_offset);
00046    pfes = f;
00047 }
00048 
00049 void ParGridFunction::Distribute(HypreParVector *tv)
00050 {
00051    pfes->Dof_TrueDof_Matrix()->Mult(*tv, *this);
00052 }
00053 
00054 void ParGridFunction::ParallelAverage(HypreParVector &tv)
00055 {
00056    pfes->Dof_TrueDof_Matrix()->MultTranspose(*this, tv);
00057    pfes->DivideByGroupSize(tv);
00058 }
00059 
00060 HypreParVector *ParGridFunction::ParallelAverage()
00061 {
00062    HypreParVector *tv = new HypreParVector(pfes->GlobalTrueVSize(),
00063                                            pfes->GetTrueDofOffsets());
00064    ParallelAverage(*tv);
00065    return tv;
00066 }
00067 
00068 double ParGridFunction::ComputeL1Error(Coefficient *exsol[],
00069                                        const IntegrationRule *irs[]) const
00070 {
00071    double lerr, gerr;
00072 
00073    lerr = GridFunction::ComputeW11Error(*exsol, NULL, 1, NULL, irs);
00074 
00075    MPI_Allreduce(&lerr, &gerr, 1, MPI_DOUBLE, MPI_SUM, pfes->GetComm());
00076 
00077    return gerr;
00078 }
00079 
00080 double ParGridFunction::ComputeL1Error(VectorCoefficient &exsol,
00081                                        const IntegrationRule *irs[]) const
00082 {
00083    double lerr, gerr;
00084 
00085    lerr = GridFunction::ComputeL1Error(exsol, irs);
00086 
00087    MPI_Allreduce(&lerr, &gerr, 1, MPI_DOUBLE, MPI_SUM, pfes->GetComm());
00088 
00089    return gerr;
00090 }
00091 
00092 double ParGridFunction::ComputeL2Error(Coefficient *exsol[],
00093                                        const IntegrationRule *irs[]) const
00094 {
00095    double lerr, gerr;
00096 
00097    lerr = GridFunction::ComputeL2Error(exsol, irs);
00098    lerr *= lerr;
00099 
00100    MPI_Allreduce(&lerr, &gerr, 1, MPI_DOUBLE, MPI_SUM, pfes->GetComm());
00101 
00102    return sqrt(gerr);
00103 }
00104 
00105 double ParGridFunction::ComputeL2Error(VectorCoefficient &exsol,
00106                                        const IntegrationRule *irs[],
00107                                        Array<int> *elems) const
00108 {
00109    double lerr, gerr;
00110 
00111    lerr = GridFunction::ComputeL2Error(exsol, irs, elems);
00112    lerr *= lerr;
00113 
00114    MPI_Allreduce(&lerr, &gerr, 1, MPI_DOUBLE, MPI_SUM, pfes->GetComm());
00115 
00116    return sqrt(gerr);
00117 }
00118 
00119 double ParGridFunction::ComputeMaxError(Coefficient *exsol[],
00120                                         const IntegrationRule *irs[]) const
00121 {
00122    double lerr, gerr;
00123 
00124    lerr = GridFunction::ComputeMaxError(exsol, irs);
00125 
00126    MPI_Allreduce(&lerr, &gerr, 1, MPI_DOUBLE, MPI_MAX, pfes->GetComm());
00127 
00128    return gerr;
00129 }
00130 
00131 double ParGridFunction::ComputeMaxError(VectorCoefficient &exsol,
00132                                         const IntegrationRule *irs[]) const
00133 {
00134    double lerr, gerr;
00135 
00136    lerr = GridFunction::ComputeMaxError(exsol, irs);
00137 
00138    MPI_Allreduce(&lerr, &gerr, 1, MPI_DOUBLE, MPI_MAX, pfes->GetComm());
00139 
00140    return gerr;
00141 }
00142 
00143 void ParGridFunction::Save(ostream &out)
00144 {
00145    for (int i = 0; i < size; i++)
00146       if (pfes->GetDofSign(i) < 0)
00147          data[i] = -data[i];
00148 
00149    GridFunction::Save(out);
00150 
00151    for (int i = 0; i < size; i++)
00152       if (pfes->GetDofSign(i) < 0)
00153          data[i] = -data[i];
00154 }
00155 
00156 void ParGridFunction::SaveAsOne(ostream &out)
00157 {
00158    int i, p;
00159 
00160    MPI_Comm MyComm;
00161    MPI_Status status;
00162    int MyRank, NRanks;
00163 
00164    MyComm = pfes -> GetComm();
00165 
00166    MPI_Comm_size(MyComm, &NRanks);
00167    MPI_Comm_rank(MyComm, &MyRank);
00168 
00169    double **values = new double*[NRanks];
00170    int *nv = new int[NRanks];
00171    int *nvdofs = new int[NRanks];
00172    int *nedofs = new int[NRanks];
00173    int *nfdofs = new int[NRanks];
00174    int *nrdofs = new int[NRanks];
00175 
00176    values[0] = data;
00177    nv[0]     = pfes -> GetVSize();
00178    nvdofs[0] = pfes -> GetNVDofs();
00179    nedofs[0] = pfes -> GetNEDofs();
00180    nfdofs[0] = pfes -> GetNFDofs();
00181 
00182    if (MyRank == 0)
00183    {
00184       pfes -> Save(out);
00185       out << '\n';
00186 
00187       for (p = 1; p < NRanks; p++)
00188       {
00189          MPI_Recv(&nv[p], 1, MPI_INT, p, 455, MyComm, &status);
00190          MPI_Recv(&nvdofs[p], 1, MPI_INT, p, 456, MyComm, &status);
00191          MPI_Recv(&nedofs[p], 1, MPI_INT, p, 457, MyComm, &status);
00192          MPI_Recv(&nfdofs[p], 1, MPI_INT, p, 458, MyComm, &status);
00193          values[p] = new double[nv[p]];
00194          MPI_Recv(values[p], nv[p], MPI_DOUBLE, p, 460, MyComm, &status);
00195       }
00196 
00197       int vdim = pfes -> GetVDim();
00198 
00199       for (p = 0; p < NRanks; p++)
00200          nrdofs[p] = nv[p]/vdim - nvdofs[p] - nedofs[p] - nfdofs[p];
00201 
00202       if (pfes->GetOrdering() == Ordering::byNODES)
00203       {
00204          for (int d = 0; d < vdim; d++)
00205          {
00206             for (p = 0; p < NRanks; p++)
00207                for (i = 0; i < nvdofs[p]; i++)
00208                   out << *values[p]++ << '\n';
00209 
00210             for (p = 0; p < NRanks; p++)
00211                for (i = 0; i < nedofs[p]; i++)
00212                   out << *values[p]++ << '\n';
00213 
00214             for (p = 0; p < NRanks; p++)
00215                for (i = 0; i < nfdofs[p]; i++)
00216                   out << *values[p]++ << '\n';
00217 
00218             for (p = 0; p < NRanks; p++)
00219                for (i = 0; i < nrdofs[p]; i++)
00220                   out << *values[p]++ << '\n';
00221          }
00222       }
00223       else
00224       {
00225          for (p = 0; p < NRanks; p++)
00226             for (i = 0; i < nvdofs[p]; i++)
00227                for (int d = 0; d < vdim; d++)
00228                   out << *values[p]++ << '\n';
00229 
00230          for (p = 0; p < NRanks; p++)
00231             for (i = 0; i < nedofs[p]; i++)
00232                for (int d = 0; d < vdim; d++)
00233                   out << *values[p]++ << '\n';
00234 
00235          for (p = 0; p < NRanks; p++)
00236             for (i = 0; i < nfdofs[p]; i++)
00237                for (int d = 0; d < vdim; d++)
00238                   out << *values[p]++ << '\n';
00239 
00240          for (p = 0; p < NRanks; p++)
00241             for (i = 0; i < nrdofs[p]; i++)
00242                for (int d = 0; d < vdim; d++)
00243                   out << *values[p]++ << '\n';
00244       }
00245 
00246       for (p = 1; p < NRanks; p++)
00247       {
00248          values[p] -= nv[p];
00249          delete [] values[p];
00250       }
00251    }
00252    else
00253    {
00254       MPI_Send(&nv[0], 1, MPI_INT, 0, 455, MyComm);
00255       MPI_Send(&nvdofs[0], 1, MPI_INT, 0, 456, MyComm);
00256       MPI_Send(&nedofs[0], 1, MPI_INT, 0, 457, MyComm);
00257       MPI_Send(&nfdofs[0], 1, MPI_INT, 0, 458, MyComm);
00258       MPI_Send(data, nv[0], MPI_DOUBLE, 0, 460, MyComm);
00259    }
00260 
00261    delete [] values;
00262    delete [] nv;
00263    delete [] nvdofs;
00264    delete [] nedofs;
00265    delete [] nfdofs;
00266    delete [] nrdofs;
00267 }
00268 
00269 #endif
 All Classes Files Functions Variables Typedefs Enumerations Enumerator Friends Defines