MFEM  v3.3.2
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
pgridfunc.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include "fem.hpp"
17 #include <iostream>
18 #include <limits>
19 using namespace std;
20 
21 namespace mfem
22 {
23 
24 ParGridFunction::ParGridFunction(ParFiniteElementSpace *pf, GridFunction *gf)
25 {
26  fes = pfes = pf;
27  SetDataAndSize(gf->GetData(), gf->Size());
28 }
29 
30 ParGridFunction::ParGridFunction(ParFiniteElementSpace *pf, HypreParVector *tv)
31  : GridFunction(pf), pfes(pf)
32 {
33  Distribute(tv);
34 }
35 
37  int * partitioning)
38 {
39  // duplicate the FiniteElementCollection from 'gf'
41  fes = pfes = new ParFiniteElementSpace(pmesh, fec, gf->FESpace()->GetVDim(),
42  gf->FESpace()->GetOrdering());
43  SetSize(pfes->GetVSize());
44 
45  if (partitioning)
46  {
47  Array<int> gvdofs, lvdofs;
48  Vector lnodes;
49  int element_counter = 0;
50  Mesh & mesh(*gf->FESpace()->GetMesh());
51  int MyRank;
52  MPI_Comm_rank(pfes->GetComm(), &MyRank);
53  for (int i = 0; i < mesh.GetNE(); i++)
54  if (partitioning[i] == MyRank)
55  {
56  pfes->GetElementVDofs(element_counter, lvdofs);
57  gf->FESpace()->GetElementVDofs(i, gvdofs);
58  gf->GetSubVector(gvdofs, lnodes);
59  SetSubVector(lvdofs, lnodes);
60  element_counter++;
61  }
62  }
63 }
64 
66 {
69 }
70 
72 {
75  pfes = dynamic_cast<ParFiniteElementSpace*>(f);
76  MFEM_ASSERT(pfes != NULL, "not a ParFiniteElementSpace");
77 }
78 
80 {
83  pfes = f;
84 }
85 
87 {
90  pfes = dynamic_cast<ParFiniteElementSpace*>(f);
91  MFEM_ASSERT(pfes != NULL, "not a ParFiniteElementSpace");
92 }
93 
95 {
98  pfes = f;
99 }
100 
102 {
104  GridFunction::MakeRef(f, v, v_offset);
105  pfes = dynamic_cast<ParFiniteElementSpace*>(f);
106  MFEM_ASSERT(pfes != NULL, "not a ParFiniteElementSpace");
107 }
108 
110 {
112  GridFunction::MakeRef(f, v, v_offset);
113  pfes = f;
114 }
115 
117 {
118  pfes->GetProlongationMatrix()->Mult(*tv, *this);
119 }
120 
121 void ParGridFunction::AddDistribute(double a, const Vector *tv)
122 {
123  pfes->Dof_TrueDof_Matrix()->Mult(a, *tv, 1.0, *this);
124 }
125 
127 {
129  GetTrueDofs(*tv);
130  return tv;
131 }
132 
134 {
136  pfes->DivideByGroupSize(tv);
137 }
138 
140 {
142  pfes->DivideByGroupSize(tv);
143 }
144 
146 {
148  ParallelAverage(*tv);
149  return tv;
150 }
151 
153 {
154  pfes->GetRestrictionMatrix()->Mult(*this, tv);
155 }
156 
158 {
159  pfes->GetRestrictionMatrix()->Mult(*this, tv);
160 }
161 
163 {
165  ParallelProject(*tv);
166  return tv;
167 }
168 
170 {
172 }
173 
175 {
177 }
178 
180 {
182  ParallelAssemble(*tv);
183  return tv;
184 }
185 
187 {
189 
190  if (pfes->GetFaceNbrVSize() <= 0)
191  {
192  return;
193  }
194 
195  ParMesh *pmesh = pfes->GetParMesh();
196 
199 
200  int *send_offset = pfes->send_face_nbr_ldof.GetI();
201  int *send_ldof = pfes->send_face_nbr_ldof.GetJ();
202  int *recv_offset = pfes->face_nbr_ldof.GetI();
203  MPI_Comm MyComm = pfes->GetComm();
204 
205  int num_face_nbrs = pmesh->GetNFaceNeighbors();
206  MPI_Request *requests = new MPI_Request[2*num_face_nbrs];
207  MPI_Request *send_requests = requests;
208  MPI_Request *recv_requests = requests + num_face_nbrs;
209  MPI_Status *statuses = new MPI_Status[num_face_nbrs];
210 
211  for (int i = 0; i < send_data.Size(); i++)
212  {
213  send_data[i] = data[send_ldof[i]];
214  }
215 
216  for (int fn = 0; fn < num_face_nbrs; fn++)
217  {
218  int nbr_rank = pmesh->GetFaceNbrRank(fn);
219  int tag = 0;
220 
221  MPI_Isend(&send_data(send_offset[fn]),
222  send_offset[fn+1] - send_offset[fn],
223  MPI_DOUBLE, nbr_rank, tag, MyComm, &send_requests[fn]);
224 
225  MPI_Irecv(&face_nbr_data(recv_offset[fn]),
226  recv_offset[fn+1] - recv_offset[fn],
227  MPI_DOUBLE, nbr_rank, tag, MyComm, &recv_requests[fn]);
228  }
229 
230  MPI_Waitall(num_face_nbrs, send_requests, statuses);
231  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
232 
233  delete [] statuses;
234  delete [] requests;
235 }
236 
237 double ParGridFunction::GetValue(int i, const IntegrationPoint &ip, int vdim)
238 const
239 {
240  Array<int> dofs;
241  Vector DofVal, LocVec;
242  int nbr_el_no = i - pfes->GetParMesh()->GetNE();
243  if (nbr_el_no >= 0)
244  {
245  int fes_vdim = pfes->GetVDim();
246  pfes->GetFaceNbrElementVDofs(nbr_el_no, dofs);
247  if (fes_vdim > 1)
248  {
249  int s = dofs.Size()/fes_vdim;
250  Array<int> _dofs(&dofs[(vdim-1)*s], s);
251  face_nbr_data.GetSubVector(_dofs, LocVec);
252  DofVal.SetSize(s);
253  }
254  else
255  {
256  face_nbr_data.GetSubVector(dofs, LocVec);
257  DofVal.SetSize(dofs.Size());
258  }
259  pfes->GetFaceNbrFE(nbr_el_no)->CalcShape(ip, DofVal);
260  }
261  else
262  {
263  fes->GetElementDofs(i, dofs);
264  fes->DofsToVDofs(vdim-1, dofs);
265  DofVal.SetSize(dofs.Size());
266  const FiniteElement *fe = fes->GetFE(i);
267  MFEM_ASSERT(fe->GetMapType() == FiniteElement::VALUE, "invalid FE map type");
268  fe->CalcShape(ip, DofVal);
269  GetSubVector(dofs, LocVec);
270  }
271 
272  return (DofVal * LocVec);
273 }
274 
276 {
277  DeltaCoefficient *delta_c = dynamic_cast<DeltaCoefficient *>(&coeff);
278 
279  if (delta_c == NULL)
280  {
282  }
283  else
284  {
285  double loc_integral, glob_integral;
286 
287  ProjectDeltaCoefficient(*delta_c, loc_integral);
288 
289  MPI_Allreduce(&loc_integral, &glob_integral, 1, MPI_DOUBLE, MPI_SUM,
290  pfes->GetComm());
291 
292  (*this) *= (delta_c->Scale() / glob_integral);
293  }
294 }
295 
297 {
298  // local maximal element attribute for each dof
299  Array<int> ldof_attr;
300 
301  // local projection
302  GridFunction::ProjectDiscCoefficient(coeff, ldof_attr);
303 
304  // global maximal element attribute for each dof
305  Array<int> gdof_attr;
306  ldof_attr.Copy(gdof_attr);
307  GroupCommunicator &gcomm = pfes->GroupComm();
308  gcomm.Reduce<int>(gdof_attr, GroupCommunicator::Max);
309  gcomm.Bcast(gdof_attr);
310 
311  // set local value to zero if global maximal element attribute is larger than
312  // the local one, and mark (in gdof_attr) if we have the correct value
313  for (int i = 0; i < pfes->GetVSize(); i++)
314  {
315  if (gdof_attr[i] > ldof_attr[i])
316  {
317  (*this)(i) = 0.0;
318  gdof_attr[i] = 0;
319  }
320  else
321  {
322  gdof_attr[i] = 1;
323  }
324  }
325 
326  // parallel averaging plus interpolation to determine final values
328  gcomm.Reduce<int>(gdof_attr, GroupCommunicator::Sum);
329  gcomm.Bcast(gdof_attr);
330  for (int i = 0; i < fes->GetVSize(); i++)
331  {
332  (*this)(i) /= gdof_attr[i];
333  }
334  this->ParallelAssemble(*tv);
335  this->Distribute(tv);
336  delete tv;
337 }
338 
339 
341 {
342  // Harmonic (x1 ... xn) = [ (1/x1 + ... + 1/xn) / n ]^-1.
343  // Arithmetic(x1 ... xn) = (x1 + ... + xn) / n.
344 
345  // Number of zones that contain a given dof.
346  Array<int> zones_per_vdof;
347  AccumulateAndCountZones(coeff, type, zones_per_vdof);
348 
349  // Count the zones globally.
350  GroupCommunicator &gcomm = pfes->GroupComm();
351  gcomm.Reduce<int>(zones_per_vdof, GroupCommunicator::Sum);
352  gcomm.Bcast(zones_per_vdof);
353  // Accumulate for all tdofs.
354  HypreParVector *tv = this->ParallelAssemble();
355  this->Distribute(tv);
356  delete tv;
357 
358  ComputeMeans(type, zones_per_vdof);
359 }
360 
361 void ParGridFunction::Save(std::ostream &out) const
362 {
363  for (int i = 0; i < size; i++)
364  {
365  if (pfes->GetDofSign(i) < 0) { data[i] = -data[i]; }
366  }
367 
368  GridFunction::Save(out);
369 
370  for (int i = 0; i < size; i++)
371  {
372  if (pfes->GetDofSign(i) < 0) { data[i] = -data[i]; }
373  }
374 }
375 
376 void ParGridFunction::SaveAsOne(std::ostream &out)
377 {
378  int i, p;
379 
380  MPI_Comm MyComm;
381  MPI_Status status;
382  int MyRank, NRanks;
383 
384  MyComm = pfes -> GetComm();
385 
386  MPI_Comm_size(MyComm, &NRanks);
387  MPI_Comm_rank(MyComm, &MyRank);
388 
389  double **values = new double*[NRanks];
390  int *nv = new int[NRanks];
391  int *nvdofs = new int[NRanks];
392  int *nedofs = new int[NRanks];
393  int *nfdofs = new int[NRanks];
394  int *nrdofs = new int[NRanks];
395 
396  values[0] = data;
397  nv[0] = pfes -> GetVSize();
398  nvdofs[0] = pfes -> GetNVDofs();
399  nedofs[0] = pfes -> GetNEDofs();
400  nfdofs[0] = pfes -> GetNFDofs();
401 
402  if (MyRank == 0)
403  {
404  pfes -> Save(out);
405  out << '\n';
406 
407  for (p = 1; p < NRanks; p++)
408  {
409  MPI_Recv(&nv[p], 1, MPI_INT, p, 455, MyComm, &status);
410  MPI_Recv(&nvdofs[p], 1, MPI_INT, p, 456, MyComm, &status);
411  MPI_Recv(&nedofs[p], 1, MPI_INT, p, 457, MyComm, &status);
412  MPI_Recv(&nfdofs[p], 1, MPI_INT, p, 458, MyComm, &status);
413  values[p] = new double[nv[p]];
414  MPI_Recv(values[p], nv[p], MPI_DOUBLE, p, 460, MyComm, &status);
415  }
416 
417  int vdim = pfes -> GetVDim();
418 
419  for (p = 0; p < NRanks; p++)
420  {
421  nrdofs[p] = nv[p]/vdim - nvdofs[p] - nedofs[p] - nfdofs[p];
422  }
423 
425  {
426  for (int d = 0; d < vdim; d++)
427  {
428  for (p = 0; p < NRanks; p++)
429  for (i = 0; i < nvdofs[p]; i++)
430  {
431  out << *values[p]++ << '\n';
432  }
433 
434  for (p = 0; p < NRanks; p++)
435  for (i = 0; i < nedofs[p]; i++)
436  {
437  out << *values[p]++ << '\n';
438  }
439 
440  for (p = 0; p < NRanks; p++)
441  for (i = 0; i < nfdofs[p]; i++)
442  {
443  out << *values[p]++ << '\n';
444  }
445 
446  for (p = 0; p < NRanks; p++)
447  for (i = 0; i < nrdofs[p]; i++)
448  {
449  out << *values[p]++ << '\n';
450  }
451  }
452  }
453  else
454  {
455  for (p = 0; p < NRanks; p++)
456  for (i = 0; i < nvdofs[p]; i++)
457  for (int d = 0; d < vdim; d++)
458  {
459  out << *values[p]++ << '\n';
460  }
461 
462  for (p = 0; p < NRanks; p++)
463  for (i = 0; i < nedofs[p]; i++)
464  for (int d = 0; d < vdim; d++)
465  {
466  out << *values[p]++ << '\n';
467  }
468 
469  for (p = 0; p < NRanks; p++)
470  for (i = 0; i < nfdofs[p]; i++)
471  for (int d = 0; d < vdim; d++)
472  {
473  out << *values[p]++ << '\n';
474  }
475 
476  for (p = 0; p < NRanks; p++)
477  for (i = 0; i < nrdofs[p]; i++)
478  for (int d = 0; d < vdim; d++)
479  {
480  out << *values[p]++ << '\n';
481  }
482  }
483 
484  for (p = 1; p < NRanks; p++)
485  {
486  values[p] -= nv[p];
487  delete [] values[p];
488  }
489  out.flush();
490  }
491  else
492  {
493  MPI_Send(&nv[0], 1, MPI_INT, 0, 455, MyComm);
494  MPI_Send(&nvdofs[0], 1, MPI_INT, 0, 456, MyComm);
495  MPI_Send(&nedofs[0], 1, MPI_INT, 0, 457, MyComm);
496  MPI_Send(&nfdofs[0], 1, MPI_INT, 0, 458, MyComm);
497  MPI_Send(data, nv[0], MPI_DOUBLE, 0, 460, MyComm);
498  }
499 
500  delete [] values;
501  delete [] nv;
502  delete [] nvdofs;
503  delete [] nedofs;
504  delete [] nfdofs;
505  delete [] nrdofs;
506 }
507 
508 double GlobalLpNorm(const double p, double loc_norm, MPI_Comm comm)
509 {
510  double glob_norm;
511 
512  if (p < numeric_limits<double>::infinity())
513  {
514  // negative quadrature weights may cause the error to be negative
515  if (loc_norm < 0.0)
516  {
517  loc_norm = -pow(-loc_norm, p);
518  }
519  else
520  {
521  loc_norm = pow(loc_norm, p);
522  }
523 
524  MPI_Allreduce(&loc_norm, &glob_norm, 1, MPI_DOUBLE, MPI_SUM, comm);
525 
526  if (glob_norm < 0.0)
527  {
528  glob_norm = -pow(-glob_norm, 1.0/p);
529  }
530  else
531  {
532  glob_norm = pow(glob_norm, 1.0/p);
533  }
534  }
535  else
536  {
537  MPI_Allreduce(&loc_norm, &glob_norm, 1, MPI_DOUBLE, MPI_MAX, comm);
538  }
539 
540  return glob_norm;
541 }
542 
543 
546  GridFunction &flux, int wcoef, int subdomain)
547 {
548  ParFiniteElementSpace *ffes =
549  dynamic_cast<ParFiniteElementSpace*>(flux.FESpace());
550  MFEM_VERIFY(ffes, "the flux FE space must be ParFiniteElementSpace");
551 
552  Array<int> count(flux.Size());
553  SumFluxAndCount(blfi, flux, count, wcoef, subdomain);
554 
555  if (ffes->Conforming()) // FIXME: nonconforming
556  {
557  // Accumulate flux and counts in parallel
558 
559  ffes->GroupComm().Reduce<double>(flux, GroupCommunicator::Sum);
560  ffes->GroupComm().Bcast<double>(flux);
561 
562  ffes->GroupComm().Reduce<int>(count, GroupCommunicator::Sum);
563  ffes->GroupComm().Bcast<int>(count);
564  }
565  else
566  {
567  MFEM_ABORT("Averaging on processor boundaries not implemented for "
568  "NC meshes yet.\n"
569  "Use L2ZZErrorEstimator() instead of ZZErrorEstimator().");
570  }
571 
572  // complete averaging
573  for (int i = 0; i < count.Size(); i++)
574  {
575  if (count[i] != 0) { flux(i) /= count[i]; }
576  }
577 
578  if (ffes->Nonconforming())
579  {
580  // On a partially conforming flux space, project on the conforming space.
581  // Using this code may lead to worse refinements in ex6, so we do not use
582  // it by default.
583 
584  // Vector conf_flux;
585  // flux.ConformingProject(conf_flux);
586  // flux.ConformingProlongate(conf_flux);
587  }
588 }
589 
590 
592  const ParGridFunction &x,
593  ParFiniteElementSpace &smooth_flux_fes,
594  ParFiniteElementSpace &flux_fes,
595  Vector &errors,
596  int norm_p, double solver_tol, int solver_max_it)
597 {
598  // Compute fluxes in discontinuous space
599  GridFunction flux(&flux_fes);
600  flux = 0.0;
601 
602  ParFiniteElementSpace *xfes = x.ParFESpace();
603  Array<int> xdofs, fdofs;
604  Vector el_x, el_f;
605 
606  for (int i = 0; i < xfes->GetNE(); i++)
607  {
608  xfes->GetElementVDofs(i, xdofs);
609  x.GetSubVector(xdofs, el_x);
610 
612  flux_integrator.ComputeElementFlux(*xfes->GetFE(i), *Transf, el_x,
613  *flux_fes.GetFE(i), el_f, false);
614 
615  flux_fes.GetElementVDofs(i, fdofs);
616  flux.AddElementVector(fdofs, el_f);
617  }
618 
619  // Assemble the linear system for L2 projection into the "smooth" space
620  ParBilinearForm *a = new ParBilinearForm(&smooth_flux_fes);
621  ParLinearForm *b = new ParLinearForm(&smooth_flux_fes);
623 
624  if (smooth_flux_fes.GetFE(0)->GetRangeType() == FiniteElement::SCALAR)
625  {
628  }
629  else
630  {
633  }
634 
635  b->Assemble();
636  a->Assemble();
637  a->Finalize();
638 
639  // The destination of the projected discontinuous flux
640  ParGridFunction smooth_flux(&smooth_flux_fes);
641  smooth_flux = 0.0;
642 
645  HypreParVector* X = smooth_flux.ParallelProject();
646 
647  delete a;
648  delete b;
649 
650  // Define and apply a parallel PCG solver for AX=B with the BoomerAMG
651  // preconditioner from hypre.
652  HypreBoomerAMG *amg = new HypreBoomerAMG(*A);
653  amg->SetPrintLevel(0);
654  HyprePCG *pcg = new HyprePCG(*A);
655  pcg->SetTol(solver_tol);
656  pcg->SetMaxIter(solver_max_it);
657  pcg->SetPrintLevel(0);
658  pcg->SetPreconditioner(*amg);
659  pcg->Mult(*B, *X);
660 
661  // Extract the parallel grid function corresponding to the finite element
662  // approximation X. This is the local solution on each processor.
663  smooth_flux = *X;
664 
665  delete A;
666  delete B;
667  delete X;
668  delete amg;
669  delete pcg;
670 
671  // Proceed through the elements one by one, and find the Lp norm differences
672  // between the flux as computed per element and the flux projected onto the
673  // smooth_flux_fes space.
674  double total_error = 0.0;
675  errors.SetSize(xfes->GetNE());
676  for (int i = 0; i < xfes->GetNE(); i++)
677  {
678  errors(i) = ComputeElementLpDistance(norm_p, i, smooth_flux, flux);
679  total_error += pow(errors(i), norm_p);
680  }
681 
682  double glob_error;
683  MPI_Allreduce(&total_error, &glob_error, 1, MPI_DOUBLE, MPI_SUM,
684  xfes->GetComm());
685 
686  return pow(glob_error, 1.0/norm_p);
687 }
688 
689 }
690 
691 #endif // MFEM_USE_MPI
Abstract class for Finite Elements.
Definition: fe.hpp:47
virtual const Operator * GetProlongationMatrix()
Definition: pfespace.cpp:632
int GetNFaceNeighbors() const
Definition: pmesh.hpp:155
void SetTol(double tol)
Definition: hypre.cpp:2088
Ordering::Type GetOrdering() const
Return the ordering method.
Definition: fespace.hpp:175
int Size() const
Logical size of the array.
Definition: array.hpp:110
void SetSubVector(const Array< int > &dofs, const double value)
Set the entries listed in dofs to the given value.
Definition: vector.cpp:498
int GetVSize() const
Definition: fespace.hpp:163
int * GetJ()
Definition: table.hpp:111
HypreParVector * NewTrueDofVector()
Definition: pfespace.hpp:208
Class for grid function - Vector with associated FE space.
Definition: gridfunc.hpp:27
HypreParVector * ParallelAssemble() const
Returns a new vector assembled on the true dofs.
Definition: pgridfunc.cpp:179
void ProjectDeltaCoefficient(DeltaCoefficient &delta_coeff, double &integral)
Definition: gridfunc.cpp:1232
HypreParVector * ParallelProject() const
Returns a new vector restricted to the true dofs.
Definition: pgridfunc.cpp:162
void SetSize(int s)
Resize the vector to size s.
Definition: vector.hpp:320
void GetElementVDofs(int i, Array< int > &vdofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: fespace.cpp:133
double Scale()
Return the scale set by SetScale() multiplied by the time-dependent function specified by SetFunction...
void ProjectDiscCoefficient(VectorCoefficient &coeff, Array< int > &dof_attr)
Definition: gridfunc.cpp:1424
ParFiniteElementSpace * pfes
Definition: pgridfunc.hpp:35
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:51
void Copy(Array &copy) const
Create a copy of the current array.
Definition: array.hpp:161
void GetSubVector(const Array< int > &dofs, Vector &elemvect) const
Definition: vector.cpp:462
void SaveAsOne(std::ostream &out=mfem::out)
Merge the local grid functions.
Definition: pgridfunc.cpp:376
Delta function coefficient.
int Size() const
Returns the size of the vector.
Definition: vector.hpp:113
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:614
virtual double GetValue(int i, const IntegrationPoint &ip, int vdim=1) const
Definition: pgridfunc.cpp:237
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:361
void DivideByGroupSize(double *vec)
Scale a vector of true dofs.
Definition: pfespace.cpp:460
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
virtual void ProjectDiscCoefficient(VectorCoefficient &coeff)
Project a discontinuous vector coefficient as a grid function on a continuous finite element space...
Definition: pgridfunc.cpp:296
virtual void SetSpace(FiniteElementSpace *f)
Associate a new FiniteElementSpace with the ParGridFunction.
Definition: pgridfunc.cpp:71
Abstract parallel finite element space.
Definition: pfespace.hpp:31
int GetMapType() const
Definition: fe.hpp:134
virtual const SparseMatrix * GetRestrictionMatrix()
Get the R matrix which restricts a local dof vector to true dof vector.
Definition: pfespace.hpp:250
virtual void ProjectCoefficient(Coefficient &coeff)
Definition: pgridfunc.cpp:275
void ComputeMeans(AvgType type, Array< int > &zones_per_vdof)
Definition: gridfunc.cpp:1209
double * GetData() const
Definition: vector.hpp:121
HYPRE_Int Mult(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0)
Computes y = alpha * A * x + beta * y.
Definition: hypre.cpp:947
int Size_of_connections() const
Definition: table.hpp:95
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:2103
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
virtual void MultTranspose(const Vector &x, Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition: operator.hpp:52
void GetFaceNbrElementVDofs(int i, Array< int > &vdofs) const
Definition: pfespace.cpp:865
virtual void MakeRef(FiniteElementSpace *f, double *v)
Make the ParGridFunction reference external data on a new FiniteElementSpace.
Definition: pgridfunc.cpp:86
virtual void Save(std::ostream &out) const
Save the GridFunction to an output stream.
Definition: gridfunc.cpp:2259
int GetNE() const
Returns number of elements in the mesh.
Definition: fespace.hpp:184
The BoomerAMG solver in hypre.
Definition: hypre.hpp:783
Class for parallel linear form.
Definition: plinearform.hpp:26
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
void SumFluxAndCount(BilinearFormIntegrator &blfi, GridFunction &flux, Array< int > &counts, int wcoef, int subdomain)
Definition: gridfunc.cpp:199
const FiniteElement * GetFaceNbrFE(int i) const
Definition: pfespace.cpp:899
MPI_Comm GetComm() const
Definition: pfespace.hpp:166
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
virtual void Update()
Transform by the Space UpdateMatrix (e.g., on Mesh change).
Definition: pgridfunc.cpp:65
Mesh * GetMesh() const
Returns the mesh.
Definition: fespace.hpp:136
void SetPrintLevel(int print_level)
Definition: hypre.hpp:824
virtual void CalcShape(const IntegrationPoint &ip, Vector &shape) const =0
Evaluate the values of all shape functions of a scalar finite element in reference space at the given...
HypreParMatrix * Dof_TrueDof_Matrix() const
The true dof-to-dof interpolation matrix.
Definition: pfespace.hpp:198
For scalar fields; preserves point values.
Definition: fe.hpp:85
virtual void GetElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: fespace.cpp:1072
void Assemble(int skip_zeros=1)
Assemble the local matrix.
virtual void ComputeElementFlux(const FiniteElement &el, ElementTransformation &Trans, Vector &u, const FiniteElement &fluxelem, Vector &flux, int with_coef=1)
Definition: bilininteg.hpp:72
void AccumulateAndCountZones(Coefficient &coeff, AvgType type, Array< int > &zones_per_vdof)
Accumulates (depending on type) the values of coeff at all shared vdofs and counts in how many zones ...
Definition: gridfunc.cpp:1171
virtual void MakeRef(FiniteElementSpace *f, double *v)
Make the GridFunction reference external data on a new FiniteElementSpace.
Definition: gridfunc.cpp:181
int GetVDim() const
Returns vector dimension.
Definition: fespace.hpp:153
FiniteElementSpace * FESpace()
Definition: gridfunc.hpp:309
void AddElementVector(const Array< int > &dofs, const Vector &elemvect)
Add (element) subvector to the vector.
Definition: vector.cpp:550
void SetMaxIter(int max_iter)
Definition: hypre.cpp:2093
Wrapper for hypre&#39;s parallel vector class.
Definition: hypre.hpp:73
ElementTransformation * GetElementTransformation(int i) const
Returns ElementTransformation for the i&#39;th element.
Definition: fespace.hpp:205
double GlobalLpNorm(const double p, double loc_norm, MPI_Comm comm)
Compute a global Lp norm from the local Lp norms computed by each processor.
Definition: pgridfunc.cpp:508
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator.
Definition: linearform.cpp:19
Abstract base class BilinearFormIntegrator.
Definition: bilininteg.hpp:22
PCG solver in hypre.
Definition: hypre.hpp:643
virtual void Mult(const Vector &x, Vector &y) const
Matrix vector multiplication.
Definition: sparsemat.cpp:475
Base class Coefficient that may optionally depend on time.
Definition: coefficient.hpp:31
FiniteElementSpace * fes
FE space on which grid function lives.
Definition: gridfunc.hpp:31
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
GroupCommunicator & GroupComm()
Return a reference to the internal GroupCommunicator (on VDofs)
Definition: pfespace.hpp:215
FiniteElementCollection * fec
Used when the grid function is read from a file.
Definition: gridfunc.hpp:34
double L2ZZErrorEstimator(BilinearFormIntegrator &flux_integrator, const ParGridFunction &x, ParFiniteElementSpace &smooth_flux_fes, ParFiniteElementSpace &flux_fes, Vector &errors, int norm_p, double solver_tol, int solver_max_it)
Definition: pgridfunc.cpp:591
void Distribute(const Vector *tv)
Definition: pgridfunc.cpp:116
virtual void Update()
Transform by the Space UpdateMatrix (e.g., on Mesh change).
Definition: gridfunc.cpp:146
double ComputeElementLpDistance(double p, int i, GridFunction &gf1, GridFunction &gf2)
Compute the Lp distance between two grid functions on the given element.
Definition: gridfunc.cpp:2589
bool Nonconforming() const
Definition: pfespace.hpp:267
static FiniteElementCollection * New(const char *name)
Factory method: return a newly allocated FiniteElementCollection according to the given name...
Definition: fe_coll.cpp:44
virtual const char * Name() const
Definition: fe_coll.hpp:117
Class for integration point with weight.
Definition: intrules.hpp:25
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
void Reduce(T *ldata, void(*Op)(OpData< T >))
Reduce within each group where the master is the root.
HypreParVector * ParallelAverage() const
Returns a new vector averaged on the true dofs.
Definition: pgridfunc.cpp:145
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
Definition: hypre.cpp:2108
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator.
void Destroy()
Destroy a vector.
Definition: vector.hpp:339
const FiniteElement * GetFE(int i) const
Returns pointer to the FiniteElement associated with i&#39;th element.
Definition: fespace.cpp:1171
HypreParVector * GetTrueDofs() const
Returns the true dofs in a new HypreParVector.
Definition: pgridfunc.cpp:126
void AddDistribute(double a, const Vector *tv)
Definition: pgridfunc.cpp:121
virtual void ProjectCoefficient(Coefficient &coeff)
Definition: gridfunc.cpp:1295
Class for parallel bilinear form.
virtual void SetSpace(FiniteElementSpace *f)
Associate a new FiniteElementSpace with the GridFunction.
Definition: gridfunc.cpp:173
int GetFaceNbrVSize() const
Definition: pfespace.hpp:255
for VectorFiniteElements (Nedelec, Raviart-Thomas)
Definition: lininteg.hpp:231
Vector data type.
Definition: vector.hpp:41
const FiniteElementCollection * FEColl() const
Definition: fespace.hpp:177
Vector coefficient defined by a vector GridFunction.
int * GetI()
Definition: table.hpp:110
Class for parallel grid function.
Definition: pgridfunc.hpp:32
double * data
Definition: vector.hpp:46
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:64
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:175
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre&#39;s PCG.
Definition: hypre.cpp:2129
int GetRangeType() const
Definition: fe.hpp:130
Class for parallel meshes.
Definition: pmesh.hpp:29
int GetFaceNbrRank(int fn) const
Definition: pmesh.cpp:1818
void ParallelAssemble(Vector &tv)
Assemble the vector on the true dofs, i.e. P^t v.
Definition: plinearform.cpp:34
Integrator for (Q u, v) for VectorFiniteElements.
virtual void ComputeFlux(BilinearFormIntegrator &blfi, GridFunction &flux, int wcoef=1, int subdomain=-1)
Definition: pgridfunc.cpp:544
void Bcast(T *ldata, int layout)
Broadcast within each group where the master is the root.
void ParallelProject(Vector &tv) const
Returns the vector restricted to the true dofs.
Definition: pgridfunc.cpp:152
ParFiniteElementSpace * ParFESpace() const
Definition: pgridfunc.hpp:76
void DofsToVDofs(Array< int > &dofs, int ndofs=-1) const
Definition: fespace.cpp:68