MFEM  v4.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
pgridfunc.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include "fem.hpp"
17 #include <iostream>
18 #include <limits>
19 using namespace std;
20 
21 namespace mfem
22 {
23 
24 ParGridFunction::ParGridFunction(ParFiniteElementSpace *pf, GridFunction *gf)
25 {
26  fes = pfes = pf;
27  SetDataAndSize(gf->GetData(), gf->Size());
28 }
29 
30 ParGridFunction::ParGridFunction(ParFiniteElementSpace *pf, HypreParVector *tv)
31  : GridFunction(pf), pfes(pf)
32 {
33  Distribute(tv);
34 }
35 
37  const int *partitioning)
38 {
39  const FiniteElementSpace *glob_fes = gf->FESpace();
40  // duplicate the FiniteElementCollection from 'gf'
41  fec = FiniteElementCollection::New(glob_fes->FEColl()->Name());
42  // create a local ParFiniteElementSpace from the global one:
43  fes = pfes = new ParFiniteElementSpace(pmesh, glob_fes, partitioning, fec);
44  SetSize(pfes->GetVSize());
45 
46  if (partitioning)
47  {
48  // Assumption: the map "local element id" -> "global element id" is
49  // increasing, i.e. the local numbering preserves the element order from
50  // the global numbering.
51  Array<int> gvdofs, lvdofs;
52  Vector lnodes;
53  int element_counter = 0;
54  const int MyRank = pfes->GetMyRank();
55  const int glob_ne = glob_fes->GetNE();
56  for (int i = 0; i < glob_ne; i++)
57  {
58  if (partitioning[i] == MyRank)
59  {
60  pfes->GetElementVDofs(element_counter, lvdofs);
61  glob_fes->GetElementVDofs(i, gvdofs);
62  gf->GetSubVector(gvdofs, lnodes);
63  SetSubVector(lvdofs, lnodes);
64  element_counter++;
65  }
66  }
67  }
68 }
69 
70 ParGridFunction::ParGridFunction(ParMesh *pmesh, std::istream &input)
71  : GridFunction(pmesh, input)
72 {
73  // Convert the FiniteElementSpace, fes, to a ParFiniteElementSpace:
74  pfes = new ParFiniteElementSpace(pmesh, fec, fes->GetVDim(),
75  fes->GetOrdering());
76  delete fes;
77  fes = pfes;
78 }
79 
81 {
84 }
85 
87 {
90  pfes = dynamic_cast<ParFiniteElementSpace*>(f);
91  MFEM_ASSERT(pfes != NULL, "not a ParFiniteElementSpace");
92 }
93 
95 {
98  pfes = f;
99 }
100 
102 {
104  GridFunction::MakeRef(f, v);
105  pfes = dynamic_cast<ParFiniteElementSpace*>(f);
106  MFEM_ASSERT(pfes != NULL, "not a ParFiniteElementSpace");
107 }
108 
110 {
112  GridFunction::MakeRef(f, v);
113  pfes = f;
114 }
115 
117 {
119  GridFunction::MakeRef(f, v, v_offset);
120  pfes = dynamic_cast<ParFiniteElementSpace*>(f);
121  MFEM_ASSERT(pfes != NULL, "not a ParFiniteElementSpace");
122 }
123 
125 {
127  GridFunction::MakeRef(f, v, v_offset);
128  pfes = f;
129 }
130 
132 {
133  pfes->GetProlongationMatrix()->Mult(*tv, *this);
134 }
135 
136 void ParGridFunction::AddDistribute(double a, const Vector *tv)
137 {
138  pfes->Dof_TrueDof_Matrix()->Mult(a, *tv, 1.0, *this);
139 }
140 
142 {
144  GetTrueDofs(*tv);
145  return tv;
146 }
147 
149 {
150  MFEM_VERIFY(pfes->Conforming(), "not implemented for NC meshes");
152  pfes->DivideByGroupSize(tv);
153 }
154 
156 {
157  MFEM_VERIFY(pfes->Conforming(), "not implemented for NC meshes");
159  pfes->DivideByGroupSize(tv);
160 }
161 
163 {
165  ParallelAverage(*tv);
166  return tv;
167 }
168 
170 {
171  pfes->GetRestrictionMatrix()->Mult(*this, tv);
172 }
173 
175 {
176  pfes->GetRestrictionMatrix()->Mult(*this, tv);
177 }
178 
180 {
182  ParallelProject(*tv);
183  return tv;
184 }
185 
187 {
189 }
190 
192 {
194 }
195 
197 {
199  ParallelAssemble(*tv);
200  return tv;
201 }
202 
204 {
206 
207  if (pfes->GetFaceNbrVSize() <= 0)
208  {
209  return;
210  }
211 
212  ParMesh *pmesh = pfes->GetParMesh();
213 
216 
217  int *send_offset = pfes->send_face_nbr_ldof.GetI();
218  int *send_ldof = pfes->send_face_nbr_ldof.GetJ();
219  int *recv_offset = pfes->face_nbr_ldof.GetI();
220  MPI_Comm MyComm = pfes->GetComm();
221 
222  int num_face_nbrs = pmesh->GetNFaceNeighbors();
223  MPI_Request *requests = new MPI_Request[2*num_face_nbrs];
224  MPI_Request *send_requests = requests;
225  MPI_Request *recv_requests = requests + num_face_nbrs;
226  MPI_Status *statuses = new MPI_Status[num_face_nbrs];
227 
228  for (int i = 0; i < send_data.Size(); i++)
229  {
230  send_data[i] = data[send_ldof[i]];
231  }
232 
233  for (int fn = 0; fn < num_face_nbrs; fn++)
234  {
235  int nbr_rank = pmesh->GetFaceNbrRank(fn);
236  int tag = 0;
237 
238  MPI_Isend(&send_data(send_offset[fn]),
239  send_offset[fn+1] - send_offset[fn],
240  MPI_DOUBLE, nbr_rank, tag, MyComm, &send_requests[fn]);
241 
242  MPI_Irecv(&face_nbr_data(recv_offset[fn]),
243  recv_offset[fn+1] - recv_offset[fn],
244  MPI_DOUBLE, nbr_rank, tag, MyComm, &recv_requests[fn]);
245  }
246 
247  MPI_Waitall(num_face_nbrs, send_requests, statuses);
248  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
249 
250  delete [] statuses;
251  delete [] requests;
252 }
253 
254 double ParGridFunction::GetValue(int i, const IntegrationPoint &ip, int vdim)
255 const
256 {
257  Array<int> dofs;
258  Vector DofVal, LocVec;
259  int nbr_el_no = i - pfes->GetParMesh()->GetNE();
260  if (nbr_el_no >= 0)
261  {
262  int fes_vdim = pfes->GetVDim();
263  pfes->GetFaceNbrElementVDofs(nbr_el_no, dofs);
264  if (fes_vdim > 1)
265  {
266  int s = dofs.Size()/fes_vdim;
267  Array<int> _dofs(&dofs[(vdim-1)*s], s);
268  face_nbr_data.GetSubVector(_dofs, LocVec);
269  DofVal.SetSize(s);
270  }
271  else
272  {
273  face_nbr_data.GetSubVector(dofs, LocVec);
274  DofVal.SetSize(dofs.Size());
275  }
276  pfes->GetFaceNbrFE(nbr_el_no)->CalcShape(ip, DofVal);
277  }
278  else
279  {
280  fes->GetElementDofs(i, dofs);
281  fes->DofsToVDofs(vdim-1, dofs);
282  DofVal.SetSize(dofs.Size());
283  const FiniteElement *fe = fes->GetFE(i);
284  MFEM_ASSERT(fe->GetMapType() == FiniteElement::VALUE, "invalid FE map type");
285  fe->CalcShape(ip, DofVal);
286  GetSubVector(dofs, LocVec);
287  }
288 
289  return (DofVal * LocVec);
290 }
291 
293 {
294  DeltaCoefficient *delta_c = dynamic_cast<DeltaCoefficient *>(&coeff);
295 
296  if (delta_c == NULL)
297  {
299  }
300  else
301  {
302  double loc_integral, glob_integral;
303 
304  ProjectDeltaCoefficient(*delta_c, loc_integral);
305 
306  MPI_Allreduce(&loc_integral, &glob_integral, 1, MPI_DOUBLE, MPI_SUM,
307  pfes->GetComm());
308 
309  (*this) *= (delta_c->Scale() / glob_integral);
310  }
311 }
312 
314 {
315  // local maximal element attribute for each dof
316  Array<int> ldof_attr;
317 
318  // local projection
319  GridFunction::ProjectDiscCoefficient(coeff, ldof_attr);
320 
321  // global maximal element attribute for each dof
322  Array<int> gdof_attr;
323  ldof_attr.Copy(gdof_attr);
324  GroupCommunicator &gcomm = pfes->GroupComm();
325  gcomm.Reduce<int>(gdof_attr, GroupCommunicator::Max);
326  gcomm.Bcast(gdof_attr);
327 
328  // set local value to zero if global maximal element attribute is larger than
329  // the local one, and mark (in gdof_attr) if we have the correct value
330  for (int i = 0; i < pfes->GetVSize(); i++)
331  {
332  if (gdof_attr[i] > ldof_attr[i])
333  {
334  (*this)(i) = 0.0;
335  gdof_attr[i] = 0;
336  }
337  else
338  {
339  gdof_attr[i] = 1;
340  }
341  }
342 
343  // parallel averaging plus interpolation to determine final values
345  gcomm.Reduce<int>(gdof_attr, GroupCommunicator::Sum);
346  gcomm.Bcast(gdof_attr);
347  for (int i = 0; i < fes->GetVSize(); i++)
348  {
349  (*this)(i) /= gdof_attr[i];
350  }
351  this->ParallelAssemble(*tv);
352  this->Distribute(tv);
353  delete tv;
354 }
355 
356 
358 {
359  // Harmonic (x1 ... xn) = [ (1/x1 + ... + 1/xn) / n ]^-1.
360  // Arithmetic(x1 ... xn) = (x1 + ... + xn) / n.
361 
362  // Number of zones that contain a given dof.
363  Array<int> zones_per_vdof;
364  AccumulateAndCountZones(coeff, type, zones_per_vdof);
365 
366  // Count the zones globally.
367  GroupCommunicator &gcomm = pfes->GroupComm();
368  gcomm.Reduce<int>(zones_per_vdof, GroupCommunicator::Sum);
369  gcomm.Bcast(zones_per_vdof);
370 
371  // Accumulate for all vdofs.
372  gcomm.Reduce<double>(data, GroupCommunicator::Sum);
373  gcomm.Bcast<double>(data);
374 
375  ComputeMeans(type, zones_per_vdof);
376 }
377 
379  AvgType type)
380 {
381  // Harmonic (x1 ... xn) = [ (1/x1 + ... + 1/xn) / n ]^-1.
382  // Arithmetic(x1 ... xn) = (x1 + ... + xn) / n.
383 
384  // Number of zones that contain a given dof.
385  Array<int> zones_per_vdof;
386  AccumulateAndCountZones(vcoeff, type, zones_per_vdof);
387 
388  // Count the zones globally.
389  GroupCommunicator &gcomm = pfes->GroupComm();
390  gcomm.Reduce<int>(zones_per_vdof, GroupCommunicator::Sum);
391  gcomm.Bcast(zones_per_vdof);
392 
393  // Accumulate for all vdofs.
394  gcomm.Reduce<double>(data, GroupCommunicator::Sum);
395  gcomm.Bcast<double>(data);
396 
397  ComputeMeans(type, zones_per_vdof);
398 }
399 
401  Coefficient *coeff[], VectorCoefficient *vcoeff, Array<int> &attr)
402 {
403  Array<int> values_counter;
404  AccumulateAndCountBdrValues(coeff, vcoeff, attr, values_counter);
405  if (pfes->Conforming())
406  {
407  Vector values(Size());
408  for (int i = 0; i < values.Size(); i++)
409  {
410  values(i) = values_counter[i] ? (*this)(i) : 0.0;
411  }
412  // Count the values globally.
413  GroupCommunicator &gcomm = pfes->GroupComm();
414  gcomm.Reduce<int>(values_counter, GroupCommunicator::Sum);
415  // Accumulate the values globally.
416  gcomm.Reduce<double>(values, GroupCommunicator::Sum);
417  // Only the values in the master are guaranteed to be correct!
418  for (int i = 0; i < values.Size(); i++)
419  {
420  if (values_counter[i])
421  {
422  (*this)(i) = values(i)/values_counter[i];
423  }
424  }
425  }
426  else
427  {
428  // TODO: is this the same as the conforming case (after the merge of
429  // cut-mesh-groups-dev)?
430  ComputeMeans(ARITHMETIC, values_counter);
431  }
432 #ifdef MFEM_DEBUG
433  Array<int> ess_vdofs_marker;
434  pfes->GetEssentialVDofs(attr, ess_vdofs_marker);
435  for (int i = 0; i < values_counter.Size(); i++)
436  {
437  MFEM_ASSERT(pfes->GetLocalTDofNumber(i) == -1 ||
438  bool(values_counter[i]) == bool(ess_vdofs_marker[i]),
439  "internal error");
440  }
441 #endif
442 }
443 
445  Array<int> &bdr_attr)
446 {
447  Array<int> values_counter;
448  AccumulateAndCountBdrTangentValues(vcoeff, bdr_attr, values_counter);
449  if (pfes->Conforming())
450  {
451  Vector values(Size());
452  for (int i = 0; i < values.Size(); i++)
453  {
454  values(i) = values_counter[i] ? (*this)(i) : 0.0;
455  }
456  // Count the values globally.
457  GroupCommunicator &gcomm = pfes->GroupComm();
458  gcomm.Reduce<int>(values_counter, GroupCommunicator::Sum);
459  // Accumulate the values globally.
460  gcomm.Reduce<double>(values, GroupCommunicator::Sum);
461  // Only the values in the master are guaranteed to be correct!
462  for (int i = 0; i < values.Size(); i++)
463  {
464  if (values_counter[i])
465  {
466  (*this)(i) = values(i)/values_counter[i];
467  }
468  }
469  }
470  else
471  {
472  // TODO: is this the same as the conforming case (after the merge of
473  // cut-mesh-groups-dev)?
474  ComputeMeans(ARITHMETIC, values_counter);
475  }
476 #ifdef MFEM_DEBUG
477  Array<int> ess_vdofs_marker;
478  pfes->GetEssentialVDofs(bdr_attr, ess_vdofs_marker);
479  for (int i = 0; i < values_counter.Size(); i++)
480  {
481  MFEM_ASSERT(pfes->GetLocalTDofNumber(i) == -1 ||
482  bool(values_counter[i]) == bool(ess_vdofs_marker[i]),
483  "internal error");
484  }
485 #endif
486 }
487 
488 void ParGridFunction::Save(std::ostream &out) const
489 {
490  double *data_ = const_cast<double*>(HostRead());
491  for (int i = 0; i < size; i++)
492  {
493  if (pfes->GetDofSign(i) < 0) { data_[i] = -data_[i]; }
494  }
495 
496  GridFunction::Save(out);
497 
498  for (int i = 0; i < size; i++)
499  {
500  if (pfes->GetDofSign(i) < 0) { data_[i] = -data_[i]; }
501  }
502 }
503 
504 void ParGridFunction::SaveAsOne(std::ostream &out)
505 {
506  int i, p;
507 
508  MPI_Comm MyComm;
509  MPI_Status status;
510  int MyRank, NRanks;
511 
512  MyComm = pfes -> GetComm();
513 
514  MPI_Comm_size(MyComm, &NRanks);
515  MPI_Comm_rank(MyComm, &MyRank);
516 
517  double **values = new double*[NRanks];
518  int *nv = new int[NRanks];
519  int *nvdofs = new int[NRanks];
520  int *nedofs = new int[NRanks];
521  int *nfdofs = new int[NRanks];
522  int *nrdofs = new int[NRanks];
523 
524  values[0] = data;
525  nv[0] = pfes -> GetVSize();
526  nvdofs[0] = pfes -> GetNVDofs();
527  nedofs[0] = pfes -> GetNEDofs();
528  nfdofs[0] = pfes -> GetNFDofs();
529 
530  if (MyRank == 0)
531  {
532  pfes -> Save(out);
533  out << '\n';
534 
535  for (p = 1; p < NRanks; p++)
536  {
537  MPI_Recv(&nv[p], 1, MPI_INT, p, 455, MyComm, &status);
538  MPI_Recv(&nvdofs[p], 1, MPI_INT, p, 456, MyComm, &status);
539  MPI_Recv(&nedofs[p], 1, MPI_INT, p, 457, MyComm, &status);
540  MPI_Recv(&nfdofs[p], 1, MPI_INT, p, 458, MyComm, &status);
541  values[p] = new double[nv[p]];
542  MPI_Recv(values[p], nv[p], MPI_DOUBLE, p, 460, MyComm, &status);
543  }
544 
545  int vdim = pfes -> GetVDim();
546 
547  for (p = 0; p < NRanks; p++)
548  {
549  nrdofs[p] = nv[p]/vdim - nvdofs[p] - nedofs[p] - nfdofs[p];
550  }
551 
553  {
554  for (int d = 0; d < vdim; d++)
555  {
556  for (p = 0; p < NRanks; p++)
557  for (i = 0; i < nvdofs[p]; i++)
558  {
559  out << *values[p]++ << '\n';
560  }
561 
562  for (p = 0; p < NRanks; p++)
563  for (i = 0; i < nedofs[p]; i++)
564  {
565  out << *values[p]++ << '\n';
566  }
567 
568  for (p = 0; p < NRanks; p++)
569  for (i = 0; i < nfdofs[p]; i++)
570  {
571  out << *values[p]++ << '\n';
572  }
573 
574  for (p = 0; p < NRanks; p++)
575  for (i = 0; i < nrdofs[p]; i++)
576  {
577  out << *values[p]++ << '\n';
578  }
579  }
580  }
581  else
582  {
583  for (p = 0; p < NRanks; p++)
584  for (i = 0; i < nvdofs[p]; i++)
585  for (int d = 0; d < vdim; d++)
586  {
587  out << *values[p]++ << '\n';
588  }
589 
590  for (p = 0; p < NRanks; p++)
591  for (i = 0; i < nedofs[p]; i++)
592  for (int d = 0; d < vdim; d++)
593  {
594  out << *values[p]++ << '\n';
595  }
596 
597  for (p = 0; p < NRanks; p++)
598  for (i = 0; i < nfdofs[p]; i++)
599  for (int d = 0; d < vdim; d++)
600  {
601  out << *values[p]++ << '\n';
602  }
603 
604  for (p = 0; p < NRanks; p++)
605  for (i = 0; i < nrdofs[p]; i++)
606  for (int d = 0; d < vdim; d++)
607  {
608  out << *values[p]++ << '\n';
609  }
610  }
611 
612  for (p = 1; p < NRanks; p++)
613  {
614  values[p] -= nv[p];
615  delete [] values[p];
616  }
617  out.flush();
618  }
619  else
620  {
621  MPI_Send(&nv[0], 1, MPI_INT, 0, 455, MyComm);
622  MPI_Send(&nvdofs[0], 1, MPI_INT, 0, 456, MyComm);
623  MPI_Send(&nedofs[0], 1, MPI_INT, 0, 457, MyComm);
624  MPI_Send(&nfdofs[0], 1, MPI_INT, 0, 458, MyComm);
625  MPI_Send(data, nv[0], MPI_DOUBLE, 0, 460, MyComm);
626  }
627 
628  delete [] values;
629  delete [] nv;
630  delete [] nvdofs;
631  delete [] nedofs;
632  delete [] nfdofs;
633  delete [] nrdofs;
634 }
635 
636 double GlobalLpNorm(const double p, double loc_norm, MPI_Comm comm)
637 {
638  double glob_norm;
639 
640  if (p < infinity())
641  {
642  // negative quadrature weights may cause the error to be negative
643  if (loc_norm < 0.0)
644  {
645  loc_norm = -pow(-loc_norm, p);
646  }
647  else
648  {
649  loc_norm = pow(loc_norm, p);
650  }
651 
652  MPI_Allreduce(&loc_norm, &glob_norm, 1, MPI_DOUBLE, MPI_SUM, comm);
653 
654  if (glob_norm < 0.0)
655  {
656  glob_norm = -pow(-glob_norm, 1.0/p);
657  }
658  else
659  {
660  glob_norm = pow(glob_norm, 1.0/p);
661  }
662  }
663  else
664  {
665  MPI_Allreduce(&loc_norm, &glob_norm, 1, MPI_DOUBLE, MPI_MAX, comm);
666  }
667 
668  return glob_norm;
669 }
670 
671 
674  GridFunction &flux, int wcoef, int subdomain)
675 {
676  ParFiniteElementSpace *ffes =
677  dynamic_cast<ParFiniteElementSpace*>(flux.FESpace());
678  MFEM_VERIFY(ffes, "the flux FE space must be ParFiniteElementSpace");
679 
680  Array<int> count(flux.Size());
681  SumFluxAndCount(blfi, flux, count, wcoef, subdomain);
682 
683  // Accumulate flux and counts in parallel
684  ffes->GroupComm().Reduce<double>(flux, GroupCommunicator::Sum);
685  ffes->GroupComm().Bcast<double>(flux);
686 
687  ffes->GroupComm().Reduce<int>(count, GroupCommunicator::Sum);
688  ffes->GroupComm().Bcast<int>(count);
689 
690  // complete averaging
691  for (int i = 0; i < count.Size(); i++)
692  {
693  if (count[i] != 0) { flux(i) /= count[i]; }
694  }
695 
696  if (ffes->Nonconforming())
697  {
698  // On a partially conforming flux space, project on the conforming space.
699  // Using this code may lead to worse refinements in ex6, so we do not use
700  // it by default.
701 
702  // Vector conf_flux;
703  // flux.ConformingProject(conf_flux);
704  // flux.ConformingProlongate(conf_flux);
705  }
706 }
707 
708 
710  const ParGridFunction &x,
711  ParFiniteElementSpace &smooth_flux_fes,
712  ParFiniteElementSpace &flux_fes,
713  Vector &errors,
714  int norm_p, double solver_tol, int solver_max_it)
715 {
716  // Compute fluxes in discontinuous space
717  GridFunction flux(&flux_fes);
718  flux = 0.0;
719 
720  ParFiniteElementSpace *xfes = x.ParFESpace();
721  Array<int> xdofs, fdofs;
722  Vector el_x, el_f;
723 
724  for (int i = 0; i < xfes->GetNE(); i++)
725  {
726  xfes->GetElementVDofs(i, xdofs);
727  x.GetSubVector(xdofs, el_x);
728 
730  flux_integrator.ComputeElementFlux(*xfes->GetFE(i), *Transf, el_x,
731  *flux_fes.GetFE(i), el_f, false);
732 
733  flux_fes.GetElementVDofs(i, fdofs);
734  flux.AddElementVector(fdofs, el_f);
735  }
736 
737  // Assemble the linear system for L2 projection into the "smooth" space
738  ParBilinearForm *a = new ParBilinearForm(&smooth_flux_fes);
739  ParLinearForm *b = new ParLinearForm(&smooth_flux_fes);
741 
742  if (xfes->GetNE())
743  {
744  if (smooth_flux_fes.GetFE(0)->GetRangeType() == FiniteElement::SCALAR)
745  {
747  vmass->SetVDim(smooth_flux_fes.GetVDim());
748  a->AddDomainIntegrator(vmass);
750  }
751  else
752  {
755  }
756  }
757 
758  b->Assemble();
759  a->Assemble();
760  a->Finalize();
761 
762  // The destination of the projected discontinuous flux
763  ParGridFunction smooth_flux(&smooth_flux_fes);
764  smooth_flux = 0.0;
765 
768  HypreParVector* X = smooth_flux.ParallelProject();
769 
770  delete a;
771  delete b;
772 
773  // Define and apply a parallel PCG solver for AX=B with the BoomerAMG
774  // preconditioner from hypre.
775  HypreBoomerAMG *amg = new HypreBoomerAMG(*A);
776  amg->SetPrintLevel(0);
777  HyprePCG *pcg = new HyprePCG(*A);
778  pcg->SetTol(solver_tol);
779  pcg->SetMaxIter(solver_max_it);
780  pcg->SetPrintLevel(0);
781  pcg->SetPreconditioner(*amg);
782  pcg->Mult(*B, *X);
783 
784  // Extract the parallel grid function corresponding to the finite element
785  // approximation X. This is the local solution on each processor.
786  smooth_flux = *X;
787 
788  delete A;
789  delete B;
790  delete X;
791  delete amg;
792  delete pcg;
793 
794  // Proceed through the elements one by one, and find the Lp norm differences
795  // between the flux as computed per element and the flux projected onto the
796  // smooth_flux_fes space.
797  double total_error = 0.0;
798  errors.SetSize(xfes->GetNE());
799  for (int i = 0; i < xfes->GetNE(); i++)
800  {
801  errors(i) = ComputeElementLpDistance(norm_p, i, smooth_flux, flux);
802  total_error += pow(errors(i), norm_p);
803  }
804 
805  double glob_error;
806  MPI_Allreduce(&total_error, &glob_error, 1, MPI_DOUBLE, MPI_SUM,
807  xfes->GetComm());
808 
809  return pow(glob_error, 1.0/norm_p);
810 }
811 
812 }
813 
814 #endif // MFEM_USE_MPI
Abstract class for Finite Elements.
Definition: fe.hpp:229
int GetNFaceNeighbors() const
Definition: pmesh.hpp:263
void SetTol(double tol)
Definition: hypre.cpp:2182
Ordering::Type GetOrdering() const
Return the ordering method.
Definition: fespace.hpp:359
int Size() const
Logical size of the array.
Definition: array.hpp:118
void SetSubVector(const Array< int > &dofs, const double value)
Set the entries listed in dofs to the given value.
Definition: vector.cpp:498
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
Definition: fespace.hpp:347
int * GetJ()
Definition: table.hpp:114
HypreParVector * NewTrueDofVector()
Definition: pfespace.hpp:282
Class for grid function - Vector with associated FE space.
Definition: gridfunc.hpp:27
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
Memory< double > data
Definition: vector.hpp:52
HypreParVector * ParallelAssemble() const
Returns a new vector assembled on the true dofs.
Definition: pgridfunc.cpp:196
virtual void GetEssentialVDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_dofs, int component=-1) const
Determine the boundary degrees of freedom.
Definition: pfespace.cpp:723
void ProjectDeltaCoefficient(DeltaCoefficient &delta_coeff, double &integral)
Definition: gridfunc.cpp:1519
HypreParVector * ParallelProject() const
Returns a new vector restricted to the true dofs.
Definition: pgridfunc.cpp:179
virtual const Operator * GetProlongationMatrix() const
The returned Operator is owned by the FiniteElementSpace.
Definition: pfespace.cpp:855
void SetSize(int s)
Resize the vector to size s.
Definition: vector.hpp:400
void GetElementVDofs(int i, Array< int > &vdofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: fespace.cpp:172
double Scale()
Return the scale set by SetScale() multiplied by the time-dependent function specified by SetFunction...
void ProjectDiscCoefficient(VectorCoefficient &coeff, Array< int > &dof_attr)
Definition: gridfunc.cpp:1713
ParFiniteElementSpace * pfes
Points to the same object as fes.
Definition: pgridfunc.hpp:35
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:79
void Copy(Array &copy) const
Create a copy of the current array.
Definition: array.hpp:795
void GetSubVector(const Array< int > &dofs, Vector &elemvect) const
Definition: vector.cpp:472
void SaveAsOne(std::ostream &out=mfem::out)
Merge the local grid functions.
Definition: pgridfunc.cpp:504
Delta function coefficient.
int Size() const
Returns the size of the vector.
Definition: vector.hpp:150
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:676
virtual double GetValue(int i, const IntegrationPoint &ip, int vdim=1) const
Definition: pgridfunc.cpp:254
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:488
void DivideByGroupSize(double *vec)
Scale a vector of true dofs.
Definition: pfespace.cpp:677
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
virtual void ProjectDiscCoefficient(VectorCoefficient &coeff)
Project a discontinuous vector coefficient as a grid function on a continuous finite element space...
Definition: pgridfunc.cpp:313
virtual void SetSpace(FiniteElementSpace *f)
Associate a new FiniteElementSpace with the ParGridFunction.
Definition: pgridfunc.cpp:86
void AccumulateAndCountBdrTangentValues(VectorCoefficient &vcoeff, Array< int > &bdr_attr, Array< int > &values_counter)
Definition: gridfunc.cpp:1445
Abstract parallel finite element space.
Definition: pfespace.hpp:28
int GetMapType() const
Definition: fe.hpp:331
virtual void ProjectCoefficient(Coefficient &coeff)
Definition: pgridfunc.cpp:292
int GetLocalTDofNumber(int ldof) const
Definition: pfespace.cpp:764
void ComputeMeans(AvgType type, Array< int > &zones_per_vdof)
Definition: gridfunc.cpp:1494
double * GetData() const
Return a pointer to the beginning of the Vector data.
Definition: vector.hpp:159
HYPRE_Int Mult(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0)
Computes y = alpha * A * x + beta * y.
Definition: hypre.cpp:1006
int Size_of_connections() const
Definition: table.hpp:98
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:2197
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
virtual void MultTranspose(const Vector &x, Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition: operator.hpp:63
void GetFaceNbrElementVDofs(int i, Array< int > &vdofs) const
Definition: pfespace.cpp:1109
virtual void MakeRef(FiniteElementSpace *f, double *v)
Make the ParGridFunction reference external data on a new FiniteElementSpace.
Definition: pgridfunc.cpp:101
virtual void Save(std::ostream &out) const
Save the GridFunction to an output stream.
Definition: gridfunc.cpp:2627
int GetNE() const
Returns number of elements in the mesh.
Definition: fespace.hpp:374
const double * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:330
The BoomerAMG solver in hypre.
Definition: hypre.hpp:873
Class for parallel linear form.
Definition: plinearform.hpp:26
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
void SumFluxAndCount(BilinearFormIntegrator &blfi, GridFunction &flux, Array< int > &counts, int wcoef, int subdomain)
Definition: gridfunc.cpp:240
const FiniteElement * GetFaceNbrFE(int i) const
Definition: pfespace.cpp:1143
MPI_Comm GetComm() const
Definition: pfespace.hpp:239
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
virtual void Update()
Transform by the Space UpdateMatrix (e.g., on Mesh change).
Definition: pgridfunc.cpp:80
void SetPrintLevel(int print_level)
Definition: hypre.hpp:914
virtual void CalcShape(const IntegrationPoint &ip, Vector &shape) const =0
Evaluate the values of all shape functions of a scalar finite element in reference space at the given...
HypreParMatrix * Dof_TrueDof_Matrix() const
The true dof-to-dof interpolation matrix.
Definition: pfespace.hpp:272
virtual void GetElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: fespace.cpp:1439
void Assemble(int skip_zeros=1)
Assemble the local matrix.
virtual void ComputeElementFlux(const FiniteElement &el, ElementTransformation &Trans, Vector &u, const FiniteElement &fluxelem, Vector &flux, int with_coef=1)
Virtual method required for Zienkiewicz-Zhu type error estimators.
Definition: bilininteg.hpp:137
void AccumulateAndCountZones(Coefficient &coeff, AvgType type, Array< int > &zones_per_vdof)
Accumulates (depending on type) the values of coeff at all shared vdofs and counts in how many zones ...
Definition: gridfunc.cpp:1216
virtual void MakeRef(FiniteElementSpace *f, double *v)
Make the GridFunction reference external data on a new FiniteElementSpace.
Definition: gridfunc.cpp:188
int GetVDim() const
Returns vector dimension.
Definition: fespace.hpp:336
FiniteElementSpace * FESpace()
Definition: gridfunc.hpp:430
void AddElementVector(const Array< int > &dofs, const Vector &elemvect)
Add (element) subvector to the vector.
Definition: vector.cpp:564
void SetMaxIter(int max_iter)
Definition: hypre.cpp:2187
Wrapper for hypre&#39;s parallel vector class.
Definition: hypre.hpp:73
ElementTransformation * GetElementTransformation(int i) const
Returns ElementTransformation for the i-th element.
Definition: fespace.hpp:398
double GlobalLpNorm(const double p, double loc_norm, MPI_Comm comm)
Compute a global Lp norm from the local Lp norms computed by each processor.
Definition: pgridfunc.cpp:636
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator. Assumes ownership of lfi.
Definition: linearform.cpp:39
Abstract base class BilinearFormIntegrator.
Definition: bilininteg.hpp:23
PCG solver in hypre.
Definition: hypre.hpp:706
void ProjectBdrCoefficient(Coefficient *coeff[], VectorCoefficient *vcoeff, Array< int > &attr)
Definition: pgridfunc.cpp:400
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition: fespace.hpp:85
virtual void Mult(const Vector &x, Vector &y) const
Matrix vector multiplication.
Definition: sparsemat.cpp:545
Base class Coefficient that may optionally depend on time.
Definition: coefficient.hpp:31
FiniteElementSpace * fes
FE space on which the grid function lives. Owned if fec is not NULL.
Definition: gridfunc.hpp:31
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
GroupCommunicator & GroupComm()
Return a reference to the internal GroupCommunicator (on VDofs)
Definition: pfespace.hpp:289
FiniteElementCollection * fec
Used when the grid function is read from a file. It can also be set explicitly, see MakeOwner()...
Definition: gridfunc.hpp:37
double L2ZZErrorEstimator(BilinearFormIntegrator &flux_integrator, const ParGridFunction &x, ParFiniteElementSpace &smooth_flux_fes, ParFiniteElementSpace &flux_fes, Vector &errors, int norm_p, double solver_tol, int solver_max_it)
Definition: pgridfunc.cpp:709
void Distribute(const Vector *tv)
Definition: pgridfunc.cpp:131
virtual void Update()
Transform by the Space UpdateMatrix (e.g., on Mesh change).
Definition: gridfunc.cpp:152
double ComputeElementLpDistance(double p, int i, GridFunction &gf1, GridFunction &gf2)
Compute the Lp distance between two grid functions on the given element.
Definition: gridfunc.cpp:2981
bool Nonconforming() const
Definition: pfespace.hpp:347
static FiniteElementCollection * New(const char *name)
Factory method: return a newly allocated FiniteElementCollection according to the given name...
Definition: fe_coll.cpp:47
Vector face_nbr_data
Vector used to store data from face-neighbor processors, initialized by ExchangeFaceNbrData().
Definition: pgridfunc.hpp:39
virtual const char * Name() const
Definition: fe_coll.hpp:53
Class for integration point with weight.
Definition: intrules.hpp:25
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
virtual const SparseMatrix * GetRestrictionMatrix() const
Get the R matrix which restricts a local dof vector to true dof vector.
Definition: pfespace.hpp:330
virtual void ProjectBdrCoefficientTangent(VectorCoefficient &vcoeff, Array< int > &bdr_attr)
Project the tangential components of the given VectorCoefficient on the boundary. Only boundary attri...
Definition: pgridfunc.cpp:444
HypreParVector * ParallelAverage() const
Returns a new vector averaged on the true dofs.
Definition: pgridfunc.cpp:162
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
Definition: hypre.cpp:2202
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator. Assumes ownership of bfi.
void Destroy()
Destroy a vector.
Definition: vector.hpp:458
const FiniteElement * GetFE(int i) const
Returns pointer to the FiniteElement associated with i&#39;th element.
Definition: fespace.cpp:1541
HypreParVector * GetTrueDofs() const
Returns the true dofs in a new HypreParVector.
Definition: pgridfunc.cpp:141
void AddDistribute(double a, const Vector *tv)
Definition: pgridfunc.cpp:136
double infinity()
Define a shortcut for std::numeric_limits&lt;double&gt;::infinity()
Definition: vector.hpp:42
virtual void ProjectCoefficient(Coefficient &coeff)
Definition: gridfunc.cpp:1582
Class for parallel bilinear form.
virtual void SetSpace(FiniteElementSpace *f)
Associate a new FiniteElementSpace with the GridFunction.
Definition: gridfunc.cpp:180
int GetFaceNbrVSize() const
Definition: pfespace.hpp:335
for VectorFiniteElements (Nedelec, Raviart-Thomas)
Definition: lininteg.hpp:231
Vector data type.
Definition: vector.hpp:48
const FiniteElementCollection * FEColl() const
Definition: fespace.hpp:361
For scalar fields; preserves point values.
Definition: fe.hpp:273
Vector coefficient defined by a vector GridFunction.
int * GetI()
Definition: table.hpp:113
Class for parallel grid function.
Definition: pgridfunc.hpp:32
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:64
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:187
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre&#39;s PCG.
Definition: hypre.cpp:2223
int GetRangeType() const
Definition: fe.hpp:327
Class for parallel meshes.
Definition: pmesh.hpp:32
int GetFaceNbrRank(int fn) const
Definition: pmesh.cpp:2169
void ParallelAssemble(Vector &tv)
Assemble the vector on the true dofs, i.e. P^t v.
Definition: plinearform.cpp:34
Integrator for (Q u, v) for VectorFiniteElements.
virtual void ComputeFlux(BilinearFormIntegrator &blfi, GridFunction &flux, int wcoef=1, int subdomain=-1)
Definition: pgridfunc.cpp:672
void AccumulateAndCountBdrValues(Coefficient *coeff[], VectorCoefficient *vcoeff, Array< int > &attr, Array< int > &values_counter)
Definition: gridfunc.cpp:1306
void ParallelProject(Vector &tv) const
Returns the vector restricted to the true dofs.
Definition: pgridfunc.cpp:169
ParFiniteElementSpace * ParFESpace() const
Definition: pgridfunc.hpp:103
void DofsToVDofs(Array< int > &dofs, int ndofs=-1) const
Definition: fespace.cpp:107