MFEM v2.0
communication.cpp
Go to the documentation of this file.
00001 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
00002 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
00003 // reserved. See file COPYRIGHT for details.
00004 //
00005 // This file is part of the MFEM library. For more information and source code
00006 // availability see http://mfem.googlecode.com.
00007 //
00008 // MFEM is free software; you can redistribute it and/or modify it under the
00009 // terms of the GNU Lesser General Public License (as published by the Free
00010 // Software Foundation) version 2.1 dated February 1999.
00011 
00012 #ifdef MFEM_USE_MPI
00013 
00014 #include <mpi.h>
00015 
00016 #include "array.hpp"
00017 #include "table.hpp"
00018 #include "sets.hpp"
00019 #include "communication.hpp"
00020 
00021 void GroupTopology::ProcToLProc()
00022 {
00023    int NRanks;
00024    MPI_Comm_size(MyComm, &NRanks);
00025 
00026    Array<int> proc_lproc(NRanks); // array of size number of processors!
00027    proc_lproc = -1;
00028 
00029    int lproc_counter = 0;
00030    for (int i = 0; i < group_lproc.Size_of_connections(); i++)
00031       if (proc_lproc[group_lproc.GetJ()[i]] < 0)
00032          proc_lproc[group_lproc.GetJ()[i]] = lproc_counter++;
00033    // Note: group_lproc.GetJ()[0] == MyRank --> proc_lproc[MyRank] == 0
00034 
00035    lproc_proc.SetSize(lproc_counter);
00036    for (int i = 0; i < NRanks; i++)
00037       if (proc_lproc[i] >= 0)
00038          lproc_proc[proc_lproc[i]] = i;
00039 
00040    for (int i = 0; i < group_lproc.Size_of_connections(); i++)
00041       group_lproc.GetJ()[i] = proc_lproc[group_lproc.GetJ()[i]];
00042 
00043    for (int i = 0; i < NGroups(); i++)
00044       groupmaster_lproc[i] = proc_lproc[groupmaster_lproc[i]];
00045 }
00046 
00047 void GroupTopology::Create(ListOfIntegerSets &groups, int mpitag)
00048 {
00049    groups.AsTable(group_lproc); // group_lproc = group_proc
00050 
00051    Table group_mgroupandproc;
00052    group_mgroupandproc.SetDims(NGroups(),
00053                                group_lproc.Size_of_connections() + NGroups());
00054    for (int i = 0; i < NGroups(); i++)
00055    {
00056       int j = group_mgroupandproc.GetI()[i];
00057       group_mgroupandproc.GetI()[i+1] = j + group_lproc.RowSize(i) + 1;
00058       group_mgroupandproc.GetJ()[j] = i;
00059       j++;
00060       for (int k = group_lproc.GetI()[i];
00061            j < group_mgroupandproc.GetI()[i+1]; j++, k++)
00062          group_mgroupandproc.GetJ()[j] = group_lproc.GetJ()[k];
00063    }
00064 
00065    // build groupmaster_lproc with lproc = proc
00066    groupmaster_lproc.SetSize(NGroups());
00067 
00068    // simplest choice of the group owner
00069    for (int i = 0; i < NGroups(); i++)
00070       groupmaster_lproc[i] = groups.PickElementInSet(i);
00071 
00072    // load-balanced choice of the group owner, which however can lead to
00073    // isolated dofs
00074    // for (i = 0; i < NGroups(); i++)
00075    //    groupmaster_lproc[i] = groups.PickRandomElementInSet(i);
00076 
00077    ProcToLProc();
00078 
00079    // build group_mgroup
00080    group_mgroup.SetSize(NGroups());
00081 
00082    int send_counter = 0;
00083    int recv_counter = 0;
00084    for (int i = 1; i < NGroups(); i++)
00085       if (groupmaster_lproc[i] != 0) // we are not the master
00086          recv_counter++;
00087       else
00088          send_counter += group_lproc.RowSize(i)-1;
00089 
00090    MPI_Request *requests = new MPI_Request[send_counter];
00091    MPI_Status  *statuses = new MPI_Status[send_counter];
00092 
00093    int max_recv_size = 0;
00094    send_counter = 0;
00095    for (int i = 1; i < NGroups(); i++)
00096    {
00097       if (groupmaster_lproc[i] == 0) // we are the master
00098       {
00099          group_mgroup[i] = i;
00100 
00101          for (int j = group_lproc.GetI()[i];
00102               j < group_lproc.GetI()[i+1]; j++)
00103          {
00104             if (group_lproc.GetJ()[j] != 0)
00105             {
00106                MPI_Isend(group_mgroupandproc.GetRow(i),
00107                          group_mgroupandproc.RowSize(i),
00108                          MPI_INT,
00109                          lproc_proc[group_lproc.GetJ()[j]],
00110                          mpitag,
00111                          MyComm,
00112                          &requests[send_counter]);
00113                send_counter++;
00114             }
00115          }
00116       }
00117       else // we are not the master
00118          if (max_recv_size < group_lproc.RowSize(i))
00119             max_recv_size = group_lproc.RowSize(i);
00120    }
00121    max_recv_size++;
00122 
00123    IntegerSet group;
00124    if (recv_counter > 0)
00125    {
00126       int count;
00127       MPI_Status status;
00128       int *recv_buf = new int[max_recv_size];
00129       for ( ; recv_counter > 0; recv_counter--)
00130       {
00131          MPI_Recv(recv_buf, max_recv_size, MPI_INT,
00132                   MPI_ANY_SOURCE, mpitag, MyComm, &status);
00133 
00134          MPI_Get_count(&status, MPI_INT, &count);
00135 
00136          group.Recreate(count-1, recv_buf+1);
00137          int g = groups.Lookup(group);
00138          group_mgroup[g] = recv_buf[0];
00139 
00140          if (lproc_proc[groupmaster_lproc[g]] != status.MPI_SOURCE)
00141          {
00142             cerr << "\n\n\nGroupTopology::GroupTopology: "
00143                  << MyRank() << ": ERROR\n\n\n" << endl;
00144             mfem_error();
00145          }
00146       }
00147       delete [] recv_buf;
00148    }
00149 
00150    MPI_Waitall(send_counter, requests, statuses);
00151 
00152    delete [] statuses;
00153    delete [] requests;
00154 }
00155 
00156 
00157 GroupCommunicator::GroupCommunicator(GroupTopology &gt)
00158    : gtopo(gt)
00159 {
00160    requests = NULL;
00161    statuses = NULL;
00162 }
00163 
00164 void GroupCommunicator::Create(Array<int> &ldof_group)
00165 {
00166    group_ldof.MakeI(gtopo.NGroups());
00167    for (int i = 0; i < ldof_group.Size(); i++)
00168    {
00169       int group = ldof_group[i];
00170       if (group != 0)
00171          group_ldof.AddAColumnInRow(group);
00172    }
00173    group_ldof.MakeJ();
00174 
00175    for (int i = 0; i < ldof_group.Size(); i++)
00176    {
00177       int group = ldof_group[i];
00178       if (group != 0)
00179          group_ldof.AddConnection(group, i);
00180    }
00181    group_ldof.ShiftUpI();
00182 
00183    Finalize();
00184 }
00185 
00186 void GroupCommunicator::Finalize()
00187 {
00188    int request_counter = 0;
00189    int reduce_buf_size = 0;
00190 
00191    for (int gr = 1; gr < group_ldof.Size(); gr++)
00192       if (group_ldof.RowSize(gr) != 0)
00193       {
00194          int gr_requests;
00195          if (!gtopo.IAmMaster(gr)) // we are not the master
00196             gr_requests = 1;
00197          else
00198             gr_requests = gtopo.GetGroupSize(gr)-1;
00199 
00200          request_counter += gr_requests;
00201          reduce_buf_size += gr_requests * group_ldof.RowSize(gr);
00202       }
00203 
00204    requests = new MPI_Request[request_counter];
00205    statuses = new MPI_Status[request_counter];
00206 
00207    group_buf.SetSize(reduce_buf_size);
00208 }
00209 
00210 void GroupCommunicator::Bcast(Array<int> &ldata)
00211 {
00212    if (group_buf.Size() == 0)
00213       return;
00214 
00215    int i, gr, request_counter = 0;
00216 
00217    for (gr = 1; gr < group_ldof.Size(); gr++)
00218    {
00219       // ignore groups without dofs
00220       if (group_ldof.RowSize(gr) == 0)
00221          continue;
00222 
00223       if (!gtopo.IAmMaster(gr)) // we are not the master
00224       {
00225          MPI_Irecv(&group_buf[group_ldof.GetI()[gr]],
00226                    group_ldof.RowSize(gr),
00227                    MPI_INT,
00228                    gtopo.GetGroupMasterRank(gr),
00229                    40822 + gtopo.GetGroupMasterGroup(gr),
00230                    gtopo.GetComm(),
00231                    &requests[request_counter]);
00232          request_counter++;
00233       }
00234       else // we are the master
00235       {
00236          // fill send buffer
00237          for (i = group_ldof.GetI()[gr]; i < group_ldof.GetI()[gr+1]; i++)
00238             group_buf[i] = ldata[group_ldof.GetJ()[i]];
00239 
00240          const int  gs  = gtopo.GetGroupSize(gr);
00241          const int *nbs = gtopo.GetGroup(gr);
00242          for (i = 0; i < gs; i++)
00243          {
00244             if (nbs[i] != 0)
00245             {
00246                MPI_Isend(&group_buf[group_ldof.GetI()[gr]],
00247                          group_ldof.RowSize(gr),
00248                          MPI_INT,
00249                          gtopo.GetNeighborRank(nbs[i]),
00250                          40822 + gtopo.GetGroupMasterGroup(gr),
00251                          gtopo.GetComm(),
00252                          &requests[request_counter]);
00253                request_counter++;
00254             }
00255          }
00256       }
00257    }
00258 
00259    MPI_Waitall(request_counter, requests, statuses);
00260 
00261    // copy the received data from the buffer to ldata
00262    for (gr = 1; gr < group_ldof.Size(); gr++)
00263       if (!gtopo.IAmMaster(gr)) // we are not the master
00264       {
00265          for (i = group_ldof.GetI()[gr]; i < group_ldof.GetI()[gr+1]; i++)
00266             ldata[group_ldof.GetJ()[i]] = group_buf[i];
00267       }
00268 }
00269 
00270 void GroupCommunicator::Reduce(Array<int> &ldata)
00271 {
00272    if (group_buf.Size() == 0)
00273       return;
00274 
00275    int i, gr, request_counter = 0, buf_offset = 0;
00276 
00277    for (gr = 1; gr < group_ldof.Size(); gr++)
00278    {
00279       // ignore groups without dofs
00280       if (group_ldof.RowSize(gr) == 0)
00281          continue;
00282 
00283       const int *ldofs = group_ldof.GetRow(gr);
00284       const int nldofs = group_ldof.RowSize(gr);
00285 
00286       if (!gtopo.IAmMaster(gr)) // we are not the master
00287       {
00288          for (i = 0; i < nldofs; i++)
00289             group_buf[buf_offset+i] = ldata[ldofs[i]];
00290 
00291          MPI_Isend(&group_buf[buf_offset],
00292                    nldofs,
00293                    MPI_INT,
00294                    gtopo.GetGroupMasterRank(gr),
00295                    43822 + gtopo.GetGroupMasterGroup(gr),
00296                    gtopo.GetComm(),
00297                    &requests[request_counter]);
00298          request_counter++;
00299          buf_offset += nldofs;
00300       }
00301       else // we are the master
00302       {
00303          const int  gs  = gtopo.GetGroupSize(gr);
00304          const int *nbs = gtopo.GetGroup(gr);
00305          for (i = 0; i < gs; i++)
00306          {
00307             if (nbs[i] != 0)
00308             {
00309                MPI_Irecv(&group_buf[buf_offset],
00310                          nldofs,
00311                          MPI_INT,
00312                          gtopo.GetNeighborRank(nbs[i]),
00313                          43822 + gtopo.GetGroupMasterGroup(gr),
00314                          gtopo.GetComm(),
00315                          &requests[request_counter]);
00316                request_counter++;
00317                buf_offset += nldofs;
00318             }
00319          }
00320       }
00321    }
00322 
00323    MPI_Waitall(request_counter, requests, statuses);
00324 
00325    // perform the reduce operation
00326    buf_offset = 0;
00327    for (gr = 1; gr < group_ldof.Size(); gr++)
00328    {
00329       // ignore groups without dofs
00330       if (group_ldof.RowSize(gr) == 0)
00331          continue;
00332 
00333       const int nldofs = group_ldof.RowSize(gr);
00334 
00335       if (!gtopo.IAmMaster(gr)) // we are not the master
00336       {
00337          buf_offset += nldofs;
00338       }
00339       else // we are the master
00340       {
00341          const int *ldofs = group_ldof.GetRow(gr);
00342          const int nb = gtopo.GetGroupSize(gr)-1;
00343          for (i = 0; i < nldofs; i++)
00344          {
00345             int data = ldata[ldofs[i]];
00346             for (int j = 0; j < nb; j++)
00347                data |= group_buf[buf_offset+j*nldofs+i];
00348             ldata[ldofs[i]] = data;
00349          }
00350          buf_offset += nb * nldofs;
00351       }
00352    }
00353 }
00354 
00355 GroupCommunicator::~GroupCommunicator()
00356 {
00357    delete [] statuses;
00358    delete [] requests;
00359 }
00360 
00361 #endif
 All Classes Files Functions Variables Typedefs Enumerations Enumerator Friends Defines