MFEM  v3.1
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
pgridfunc.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include "fem.hpp"
17 #include <iostream>
18 #include <limits>
19 using namespace std;
20 
21 namespace mfem
22 {
23 
24 ParGridFunction::ParGridFunction(ParFiniteElementSpace *pf, GridFunction *gf)
25 {
26  fes = pfes = pf;
27  SetDataAndSize(gf->GetData(), gf->Size());
28 }
29 
30 ParGridFunction::ParGridFunction(ParFiniteElementSpace *pf, HypreParVector *tv)
31  : GridFunction(pf), pfes(pf)
32 {
33  Distribute(tv);
34 }
35 
37  int * partitioning)
38 {
39  // duplicate the FiniteElementCollection from 'gf'
41  fes = pfes = new ParFiniteElementSpace(pmesh, fec, gf->FESpace()->GetVDim(),
42  gf->FESpace()->GetOrdering());
43  SetSize(pfes->GetVSize());
44 
45  if (partitioning)
46  {
47  Array<int> gvdofs, lvdofs;
48  Vector lnodes;
49  int element_counter = 0;
50  Mesh & mesh(*gf->FESpace()->GetMesh());
51  int MyRank;
52  MPI_Comm_rank(pfes->GetComm(), &MyRank);
53  for (int i = 0; i < mesh.GetNE(); i++)
54  if (partitioning[i] == MyRank)
55  {
56  pfes->GetElementVDofs(element_counter, lvdofs);
57  gf->FESpace()->GetElementVDofs(i, gvdofs);
58  gf->GetSubVector(gvdofs, lnodes);
59  SetSubVector(lvdofs, lnodes);
60  element_counter++;
61  }
62  }
63 }
64 
66 {
69  pfes = f;
70 }
71 
73 {
75  GridFunction::Update(f, v, v_offset);
76  pfes = f;
77 }
78 
80 {
81  pfes->Dof_TrueDof_Matrix()->Mult(*tv, *this);
82 }
83 
84 void ParGridFunction::AddDistribute(double a, const Vector *tv)
85 {
86  pfes->Dof_TrueDof_Matrix()->Mult(a, *tv, 1.0, *this);
87 }
88 
90 {
91  pfes->GetRestrictionMatrix()->Mult(*this, tv);
92 }
93 
95 {
97  GetTrueDofs(*tv);
98  return tv;
99 }
100 
102 {
103  pfes->Dof_TrueDof_Matrix()->MultTranspose(*this, tv);
104  pfes->DivideByGroupSize(tv);
105 }
106 
108 {
109  pfes->Dof_TrueDof_Matrix()->MultTranspose(*this, tv);
110  pfes->DivideByGroupSize(tv);
111 }
112 
114 {
116  ParallelAverage(*tv);
117  return tv;
118 }
119 
121 {
122  pfes->GetRestrictionMatrix()->Mult(*this, tv);
123 }
124 
126 {
127  pfes->GetRestrictionMatrix()->Mult(*this, tv);
128 }
129 
131 {
133  ParallelProject(*tv);
134  return tv;
135 }
136 
138 {
139  pfes->Dof_TrueDof_Matrix()->MultTranspose(*this, tv);
140 }
141 
143 {
144  pfes->Dof_TrueDof_Matrix()->MultTranspose(*this, tv);
145 }
146 
148 {
150  ParallelAssemble(*tv);
151  return tv;
152 }
153 
155 {
157 
158  if (pfes->GetFaceNbrVSize() <= 0)
159  {
160  return;
161  }
162 
163  ParMesh *pmesh = pfes->GetParMesh();
164 
167 
168  int *send_offset = pfes->send_face_nbr_ldof.GetI();
169  int *send_ldof = pfes->send_face_nbr_ldof.GetJ();
170  int *recv_offset = pfes->face_nbr_ldof.GetI();
171  MPI_Comm MyComm = pfes->GetComm();
172 
173  int num_face_nbrs = pmesh->GetNFaceNeighbors();
174  MPI_Request *requests = new MPI_Request[2*num_face_nbrs];
175  MPI_Request *send_requests = requests;
176  MPI_Request *recv_requests = requests + num_face_nbrs;
177  MPI_Status *statuses = new MPI_Status[num_face_nbrs];
178 
179  for (int i = 0; i < send_data.Size(); i++)
180  {
181  send_data[i] = data[send_ldof[i]];
182  }
183 
184  for (int fn = 0; fn < num_face_nbrs; fn++)
185  {
186  int nbr_rank = pmesh->GetFaceNbrRank(fn);
187  int tag = 0;
188 
189  MPI_Isend(&send_data(send_offset[fn]),
190  send_offset[fn+1] - send_offset[fn],
191  MPI_DOUBLE, nbr_rank, tag, MyComm, &send_requests[fn]);
192 
193  MPI_Irecv(&face_nbr_data(recv_offset[fn]),
194  recv_offset[fn+1] - recv_offset[fn],
195  MPI_DOUBLE, nbr_rank, tag, MyComm, &recv_requests[fn]);
196  }
197 
198  MPI_Waitall(num_face_nbrs, send_requests, statuses);
199  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
200 
201  delete [] statuses;
202  delete [] requests;
203 }
204 
205 double ParGridFunction::GetValue(int i, const IntegrationPoint &ip, int vdim)
206 const
207 {
208  Array<int> dofs;
209  Vector DofVal, LocVec;
210  int nbr_el_no = i - pfes->GetParMesh()->GetNE();
211  if (nbr_el_no >= 0)
212  {
213  int fes_vdim = pfes->GetVDim();
214  pfes->GetFaceNbrElementVDofs(nbr_el_no, dofs);
215  if (fes_vdim > 1)
216  {
217  int s = dofs.Size()/fes_vdim;
218  Array<int> _dofs(&dofs[(vdim-1)*s], s);
219  face_nbr_data.GetSubVector(_dofs, LocVec);
220  DofVal.SetSize(s);
221  }
222  else
223  {
224  face_nbr_data.GetSubVector(dofs, LocVec);
225  DofVal.SetSize(dofs.Size());
226  }
227  pfes->GetFaceNbrFE(nbr_el_no)->CalcShape(ip, DofVal);
228  }
229  else
230  {
231  fes->GetElementDofs(i, dofs);
232  fes->DofsToVDofs(vdim-1, dofs);
233  DofVal.SetSize(dofs.Size());
234  fes->GetFE(i)->CalcShape(ip, DofVal);
235  GetSubVector(dofs, LocVec);
236  }
237 
238  return (DofVal * LocVec);
239 }
240 
242 {
243  DeltaCoefficient *delta_c = dynamic_cast<DeltaCoefficient *>(&coeff);
244 
245  if (delta_c == NULL)
246  {
248  }
249  else
250  {
251  double loc_integral, glob_integral;
252 
253  ProjectDeltaCoefficient(*delta_c, loc_integral);
254 
255  MPI_Allreduce(&loc_integral, &glob_integral, 1, MPI_DOUBLE, MPI_SUM,
256  pfes->GetComm());
257 
258  (*this) *= (delta_c->Scale() / glob_integral);
259  }
260 }
261 
263 {
264  // local maximal element attribute for each dof
265  Array<int> ldof_attr;
266 
267  // local projection
268  GridFunction::ProjectDiscCoefficient(coeff, ldof_attr);
269 
270  // global maximal element attribute for each dof
271  Array<int> gdof_attr;
272  ldof_attr.Copy(gdof_attr);
273  GroupCommunicator &gcomm = pfes->GroupComm();
274  gcomm.Reduce<int>(gdof_attr, GroupCommunicator::Max);
275  gcomm.Bcast(gdof_attr);
276 
277  // set local value to zero if global maximal element attribute is larger than
278  // the local one, and mark (in gdof_attr) if we have the correct value
279  for (int i = 0; i < pfes->GetVSize(); i++)
280  {
281  if (gdof_attr[i] > ldof_attr[i])
282  {
283  (*this)(i) = 0.0;
284  gdof_attr[i] = 0;
285  }
286  else
287  {
288  gdof_attr[i] = 1;
289  }
290  }
291 
292  // parallel averaging plus interpolation to determine final values
294  gcomm.Reduce<int>(gdof_attr, GroupCommunicator::Sum);
295  gcomm.Bcast(gdof_attr);
296  for (int i = 0; i < fes->GetVSize(); i++)
297  {
298  (*this)(i) /= gdof_attr[i];
299  }
300  this->ParallelAssemble(*tv);
301  this->Distribute(tv);
302  delete tv;
303 }
304 
305 
306 void ParGridFunction::Save(std::ostream &out) const
307 {
308  for (int i = 0; i < size; i++)
309  {
310  if (pfes->GetDofSign(i) < 0) { data[i] = -data[i]; }
311  }
312 
313  GridFunction::Save(out);
314 
315  for (int i = 0; i < size; i++)
316  {
317  if (pfes->GetDofSign(i) < 0) { data[i] = -data[i]; }
318  }
319 }
320 
321 void ParGridFunction::SaveAsOne(std::ostream &out)
322 {
323  int i, p;
324 
325  MPI_Comm MyComm;
326  MPI_Status status;
327  int MyRank, NRanks;
328 
329  MyComm = pfes -> GetComm();
330 
331  MPI_Comm_size(MyComm, &NRanks);
332  MPI_Comm_rank(MyComm, &MyRank);
333 
334  double **values = new double*[NRanks];
335  int *nv = new int[NRanks];
336  int *nvdofs = new int[NRanks];
337  int *nedofs = new int[NRanks];
338  int *nfdofs = new int[NRanks];
339  int *nrdofs = new int[NRanks];
340 
341  values[0] = data;
342  nv[0] = pfes -> GetVSize();
343  nvdofs[0] = pfes -> GetNVDofs();
344  nedofs[0] = pfes -> GetNEDofs();
345  nfdofs[0] = pfes -> GetNFDofs();
346 
347  if (MyRank == 0)
348  {
349  pfes -> Save(out);
350  out << '\n';
351 
352  for (p = 1; p < NRanks; p++)
353  {
354  MPI_Recv(&nv[p], 1, MPI_INT, p, 455, MyComm, &status);
355  MPI_Recv(&nvdofs[p], 1, MPI_INT, p, 456, MyComm, &status);
356  MPI_Recv(&nedofs[p], 1, MPI_INT, p, 457, MyComm, &status);
357  MPI_Recv(&nfdofs[p], 1, MPI_INT, p, 458, MyComm, &status);
358  values[p] = new double[nv[p]];
359  MPI_Recv(values[p], nv[p], MPI_DOUBLE, p, 460, MyComm, &status);
360  }
361 
362  int vdim = pfes -> GetVDim();
363 
364  for (p = 0; p < NRanks; p++)
365  {
366  nrdofs[p] = nv[p]/vdim - nvdofs[p] - nedofs[p] - nfdofs[p];
367  }
368 
370  {
371  for (int d = 0; d < vdim; d++)
372  {
373  for (p = 0; p < NRanks; p++)
374  for (i = 0; i < nvdofs[p]; i++)
375  {
376  out << *values[p]++ << '\n';
377  }
378 
379  for (p = 0; p < NRanks; p++)
380  for (i = 0; i < nedofs[p]; i++)
381  {
382  out << *values[p]++ << '\n';
383  }
384 
385  for (p = 0; p < NRanks; p++)
386  for (i = 0; i < nfdofs[p]; i++)
387  {
388  out << *values[p]++ << '\n';
389  }
390 
391  for (p = 0; p < NRanks; p++)
392  for (i = 0; i < nrdofs[p]; i++)
393  {
394  out << *values[p]++ << '\n';
395  }
396  }
397  }
398  else
399  {
400  for (p = 0; p < NRanks; p++)
401  for (i = 0; i < nvdofs[p]; i++)
402  for (int d = 0; d < vdim; d++)
403  {
404  out << *values[p]++ << '\n';
405  }
406 
407  for (p = 0; p < NRanks; p++)
408  for (i = 0; i < nedofs[p]; i++)
409  for (int d = 0; d < vdim; d++)
410  {
411  out << *values[p]++ << '\n';
412  }
413 
414  for (p = 0; p < NRanks; p++)
415  for (i = 0; i < nfdofs[p]; i++)
416  for (int d = 0; d < vdim; d++)
417  {
418  out << *values[p]++ << '\n';
419  }
420 
421  for (p = 0; p < NRanks; p++)
422  for (i = 0; i < nrdofs[p]; i++)
423  for (int d = 0; d < vdim; d++)
424  {
425  out << *values[p]++ << '\n';
426  }
427  }
428 
429  for (p = 1; p < NRanks; p++)
430  {
431  values[p] -= nv[p];
432  delete [] values[p];
433  }
434  }
435  else
436  {
437  MPI_Send(&nv[0], 1, MPI_INT, 0, 455, MyComm);
438  MPI_Send(&nvdofs[0], 1, MPI_INT, 0, 456, MyComm);
439  MPI_Send(&nedofs[0], 1, MPI_INT, 0, 457, MyComm);
440  MPI_Send(&nfdofs[0], 1, MPI_INT, 0, 458, MyComm);
441  MPI_Send(data, nv[0], MPI_DOUBLE, 0, 460, MyComm);
442  }
443 
444  delete [] values;
445  delete [] nv;
446  delete [] nvdofs;
447  delete [] nedofs;
448  delete [] nfdofs;
449  delete [] nrdofs;
450 }
451 
452 double GlobalLpNorm(const double p, double loc_norm, MPI_Comm comm)
453 {
454  double glob_norm;
455 
456  if (p < numeric_limits<double>::infinity())
457  {
458  // negative quadrature weights may cause the error to be negative
459  if (loc_norm < 0.0)
460  {
461  loc_norm = -pow(-loc_norm, p);
462  }
463  else
464  {
465  loc_norm = pow(loc_norm, p);
466  }
467 
468  MPI_Allreduce(&loc_norm, &glob_norm, 1, MPI_DOUBLE, MPI_SUM, comm);
469 
470  if (glob_norm < 0.0)
471  {
472  glob_norm = -pow(-glob_norm, 1.0/p);
473  }
474  else
475  {
476  glob_norm = pow(glob_norm, 1.0/p);
477  }
478  }
479  else
480  {
481  MPI_Allreduce(&loc_norm, &glob_norm, 1, MPI_DOUBLE, MPI_MAX, comm);
482  }
483 
484  return glob_norm;
485 }
486 
487 
490  GridFunction &flux_, int wcoef, int subdomain)
491 {
492  // In this context we know that flux should be a ParGridFunction
493  ParGridFunction& flux = dynamic_cast<ParGridFunction&>(flux_);
494 
495  ParFiniteElementSpace *ffes = flux.ParFESpace();
496 
497  Array<int> count(flux.Size());
498  SumFluxAndCount(blfi, flux, count, 0, subdomain);
499 
500  if (ffes->Conforming()) // FIXME: nonconforming
501  {
502  // Accumulate flux and counts in parallel
503 
504  ffes->GroupComm().Reduce<double>(flux, GroupCommunicator::Sum);
505  ffes->GroupComm().Bcast<double>(flux);
506 
507  ffes->GroupComm().Reduce<int>(count, GroupCommunicator::Sum);
508  ffes->GroupComm().Bcast<int>(count);
509  }
510  else
511  {
512  MFEM_WARNING("Averaging on processor boundaries not implemented for "
513  "NC meshes yet.");
514  }
515 
516  // complete averaging
517  for (int i = 0; i < count.Size(); i++)
518  {
519  if (count[i] != 0) { flux(i) /= count[i]; }
520  }
521 
522  if (ffes->Nonconforming())
523  {
524  // On a partially conforming flux space, project on the conforming space.
525  // Using this code may lead to worse refinements in ex6, so we do not use
526  // it by default.
527 
528  // Vector conf_flux;
529  // flux.ConformingProject(conf_flux);
530  // flux.ConformingProlongate(conf_flux);
531  }
532 }
533 
534 
536  ParGridFunction &x,
537  ParFiniteElementSpace &smooth_flux_fes,
538  ParFiniteElementSpace &flux_fes,
539  Vector &errors,
540  int norm_p, double solver_tol, int solver_max_it)
541 {
542  // Compute fluxes in discontinuous space
543  GridFunction flux(&flux_fes);
544  flux = 0.0;
545 
546  FiniteElementSpace *xfes = x.FESpace();
547  Array<int> xdofs, fdofs;
548  Vector el_x, el_f;
549 
550  for (int i = 0; i < xfes->GetNE(); i++)
551  {
552  xfes->GetElementVDofs(i, xdofs);
553  x.GetSubVector(xdofs, el_x);
554 
556  flux_integrator.ComputeElementFlux(*xfes->GetFE(i), *Transf, el_x,
557  *flux_fes.GetFE(i), el_f, false);
558 
559  flux_fes.GetElementVDofs(i, fdofs);
560  flux.AddElementVector(fdofs, el_f);
561  }
562 
563  // Assemble the linear system for L2 projection into the "smooth" space
564  ParBilinearForm *a = new ParBilinearForm(&smooth_flux_fes);
565  ParLinearForm *b = new ParLinearForm(&smooth_flux_fes);
567 
568  if (smooth_flux_fes.GetFE(0)->GetRangeType() == FiniteElement::SCALAR)
569  {
572  }
573  else
574  {
577  }
578 
579  b->Assemble();
580  a->Assemble();
581  a->Finalize();
582 
583  // The destination of the projected discontinuous flux
584  ParGridFunction smooth_flux(&smooth_flux_fes);
585  smooth_flux = 0.0;
586 
589  HypreParVector* X = smooth_flux.ParallelProject();
590 
591  delete a;
592  delete b;
593 
594  // Define and apply a parallel PCG solver for AX=B with the BoomerAMG
595  // preconditioner from hypre.
596  HypreBoomerAMG *amg = new HypreBoomerAMG(*A);
597  amg->SetPrintLevel(0);
598  HyprePCG *pcg = new HyprePCG(*A);
599  pcg->SetTol(solver_tol);
600  pcg->SetMaxIter(solver_max_it);
601  pcg->SetPrintLevel(0);
602  pcg->SetPreconditioner(*amg);
603  pcg->Mult(*B, *X);
604 
605  // Extract the parallel grid function corresponding to the finite element
606  // approximation X. This is the local solution on each processor.
607  smooth_flux = *X;
608 
609  // Proceed through the elements one by one, and find the Lp norm differences
610  // between the flux as computed per element and the flux projected onto the
611  // smooth_flux_fes space.
612  for (int i = 0; i < xfes->GetNE(); i++)
613  {
614  errors(i) = ComputeElementLpDistance(norm_p, i, smooth_flux, flux);
615  }
616 
617  delete A;
618  delete B;
619  delete X;
620  delete amg;
621  delete pcg;
622 }
623 
624 }
625 
626 #endif
int GetNFaceNeighbors() const
Definition: pmesh.hpp:119
void SetTol(double tol)
Definition: hypre.cpp:1917
int Size() const
Logical size of the array.
Definition: array.hpp:109
int GetVSize() const
Definition: fespace.hpp:164
int * GetJ()
Definition: table.hpp:108
HypreParVector * NewTrueDofVector()
Definition: pfespace.hpp:176
Class for grid function - Vector with associated FE space.
Definition: gridfunc.hpp:27
void SaveAsOne(std::ostream &out=std::cout)
Merge the local grid functions.
Definition: pgridfunc.cpp:321
HypreParVector * ParallelAssemble() const
Returns a new vector assembled on the true dofs.
Definition: pgridfunc.cpp:147
void ProjectDeltaCoefficient(DeltaCoefficient &delta_coeff, double &integral)
Definition: gridfunc.cpp:1077
HypreParVector * ParallelProject() const
Returns a new vector restricted to the true dofs.
Definition: pgridfunc.cpp:130
HYPRE_Int MultTranspose(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0)
Computes y = alpha * A^t * x + beta * y.
Definition: hypre.cpp:963
void SetSize(int s)
Resizes the vector if the new size is different.
Definition: vector.hpp:259
void GetElementVDofs(int i, Array< int > &vdofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: fespace.cpp:161
void ProjectDiscCoefficient(VectorCoefficient &coeff, Array< int > &dof_attr)
Definition: gridfunc.cpp:1269
ParFiniteElementSpace * pfes
Definition: pgridfunc.hpp:34
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:34
void Copy(Array &copy) const
Create a copy of the current array.
Definition: array.hpp:160
void GetSubVector(const Array< int > &dofs, Vector &elemvect) const
Definition: vector.cpp:457
Delta function coefficient.
int Size() const
Returns the size of the vector.
Definition: vector.hpp:84
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:454
virtual double GetValue(int i, const IntegrationPoint &ip, int vdim=1) const
Definition: pgridfunc.cpp:205
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:306
void DivideByGroupSize(double *vec)
Scale a vector of true dofs.
Definition: pfespace.cpp:479
void ProjectDiscCoefficient(VectorCoefficient &coeff)
Definition: pgridfunc.cpp:262
Abstract parallel finite element space.
Definition: pfespace.hpp:28
virtual const SparseMatrix * GetRestrictionMatrix()
Get the R matrix which restricts a local dof vector to true dof vector.
Definition: pfespace.hpp:215
void ProjectCoefficient(Coefficient &coeff)
Definition: pgridfunc.cpp:241
ParFiniteElementSpace * ParFESpace()
Definition: pgridfunc.hpp:61
double * GetData() const
Definition: vector.hpp:88
HYPRE_Int Mult(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0)
Computes y = alpha * A * x + beta * y.
Definition: hypre.cpp:903
int Size_of_connections() const
Definition: table.hpp:92
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:1932
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
void GetFaceNbrElementVDofs(int i, Array< int > &vdofs) const
Definition: pfespace.cpp:867
virtual void Save(std::ostream &out) const
Save the GridFunction to an output stream.
Definition: gridfunc.cpp:2138
int GetNE() const
Returns number of elements in the mesh.
Definition: fespace.hpp:185
The BoomerAMG solver in hypre.
Definition: hypre.hpp:698
Class for parallel linear form.
Definition: plinearform.hpp:26
void SumFluxAndCount(BilinearFormIntegrator &blfi, GridFunction &flux, Array< int > &counts, int wcoef, int subdomain)
Definition: gridfunc.cpp:162
const FiniteElement * GetFaceNbrFE(int i) const
Definition: pfespace.cpp:873
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
Mesh * GetMesh() const
Returns the mesh.
Definition: fespace.hpp:140
void SetPrintLevel(int print_level)
Definition: hypre.hpp:739
void L2ZZErrorEstimator(BilinearFormIntegrator &flux_integrator, ParGridFunction &x, ParFiniteElementSpace &smooth_flux_fes, ParFiniteElementSpace &flux_fes, Vector &errors, int norm_p, double solver_tol, int solver_max_it)
Definition: pgridfunc.cpp:535
virtual void CalcShape(const IntegrationPoint &ip, Vector &shape) const =0
virtual void GetElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: fespace.cpp:1100
void Assemble(int skip_zeros=1)
Assemble the local matrix.
virtual void ComputeElementFlux(const FiniteElement &el, ElementTransformation &Trans, Vector &u, const FiniteElement &fluxelem, Vector &flux, int with_coef=1)
Definition: bilininteg.hpp:68
int GetVDim() const
Returns vector dimension.
Definition: fespace.hpp:154
FiniteElementSpace * FESpace()
Definition: gridfunc.hpp:270
void AddElementVector(const Array< int > &dofs, const Vector &elemvect)
Add (element) subvector to the vector.
Definition: vector.cpp:527
void SetMaxIter(int max_iter)
Definition: hypre.cpp:1922
HypreParMatrix * Dof_TrueDof_Matrix()
The true dof-to-dof interpolation matrix.
Definition: pfespace.cpp:411
int GetOrdering() const
Return the ordering method.
Definition: fespace.hpp:176
Wrapper for hypre&#39;s parallel vector class.
Definition: hypre.hpp:58
ElementTransformation * GetElementTransformation(int i) const
Returns ElementTransformation for the i&#39;th element.
Definition: fespace.hpp:206
double GlobalLpNorm(const double p, double loc_norm, MPI_Comm comm)
Compute a global Lp norm from the local Lp norms computed by each processor.
Definition: pgridfunc.cpp:452
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator.
Definition: linearform.cpp:19
Abstract base class BilinearFormIntegrator.
Definition: bilininteg.hpp:22
PCG solver in hypre.
Definition: hypre.hpp:558
Abstract finite element space.
Definition: fespace.hpp:62
virtual void Mult(const Vector &x, Vector &y) const
Matrix vector multiplication.
Definition: sparsemat.cpp:417
Base class Coefficient that may optionally depend on time.
Definition: coefficient.hpp:31
FiniteElementSpace * fes
FE space on which grid function lives.
Definition: gridfunc.hpp:31
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
GroupCommunicator & GroupComm()
Return a reference to the internal GroupCommunicator (on VDofs)
Definition: pfespace.hpp:183
void SetSubVector(const Array< int > &dofs, const Vector &elemvect)
Definition: vector.cpp:493
FiniteElementCollection * fec
Used when the grid function is read from a file.
Definition: gridfunc.hpp:34
void Distribute(const Vector *tv)
Definition: pgridfunc.cpp:79
double ComputeElementLpDistance(double p, int i, GridFunction &gf1, GridFunction &gf2)
Compute the Lp distance between two grid functions on the given element.
Definition: gridfunc.cpp:2431
bool Nonconforming() const
Definition: pfespace.hpp:230
static FiniteElementCollection * New(const char *name)
Definition: fe_coll.cpp:44
virtual const char * Name() const
Definition: fe_coll.hpp:36
Class for integration point with weight.
Definition: intrules.hpp:25
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void Reduce(T *ldata, void(*Op)(OpData< T >))
HypreParVector * ParallelAverage() const
Returns a new vector averaged on the true dofs.
Definition: pgridfunc.cpp:113
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
Definition: hypre.cpp:1937
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator.
void Destroy()
Destroy a vector.
Definition: vector.hpp:278
const FiniteElement * GetFE(int i) const
Returns pointer to the FiniteElement associated with i&#39;th element.
Definition: fespace.cpp:1199
HypreParVector * GetTrueDofs() const
Returns the true dofs in a new HypreParVector.
Definition: pgridfunc.cpp:94
void AddDistribute(double a, const Vector *tv)
Definition: pgridfunc.cpp:84
void ProjectCoefficient(Coefficient &coeff)
Definition: gridfunc.cpp:1140
Class for parallel bilinear form.
int GetFaceNbrVSize() const
Definition: pfespace.hpp:220
for VectorFiniteElements (Nedelec, Raviart-Thomas)
Definition: lininteg.hpp:171
void DofsToVDofs(Array< int > &dofs) const
Definition: fespace.cpp:35
Vector data type.
Definition: vector.hpp:33
const FiniteElementCollection * FEColl() const
Definition: fespace.hpp:178
Vector coefficient defined by a vector GridFunction.
int * GetI()
Definition: table.hpp:107
Class for parallel grid function.
Definition: pgridfunc.hpp:31
double * data
Definition: vector.hpp:38
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:143
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre&#39;s PCG.
Definition: hypre.cpp:1958
int GetRangeType() const
Definition: fe.hpp:96
Class for parallel meshes.
Definition: pmesh.hpp:28
int GetFaceNbrRank(int fn) const
Definition: pmesh.cpp:1293
void ParallelAssemble(Vector &tv)
Assemble the vector on the true dofs, i.e. P^t v.
Definition: plinearform.cpp:34
Integrator for (Q u, v) for VectorFiniteElements.
Definition: bilininteg.hpp:458
virtual void ComputeFlux(BilinearFormIntegrator &blfi, GridFunction &flux, int wcoef=1, int subdomain=-1)
Definition: pgridfunc.cpp:488
void ParallelProject(Vector &tv) const
Returns the vector restricted to the true dofs.
Definition: pgridfunc.cpp:120