MFEM  v4.1.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
pgridfunc.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2020, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include "fem.hpp"
17 #include <iostream>
18 #include <limits>
19 #include "../general/forall.hpp"
20 using namespace std;
21 
22 namespace mfem
23 {
24 
25 ParGridFunction::ParGridFunction(ParFiniteElementSpace *pf, GridFunction *gf)
26 {
27  fes = pfes = pf;
28  SetDataAndSize(gf->GetData(), gf->Size());
29 }
30 
31 ParGridFunction::ParGridFunction(ParFiniteElementSpace *pf, HypreParVector *tv)
32  : GridFunction(pf), pfes(pf)
33 {
34  Distribute(tv);
35 }
36 
38  const int *partitioning)
39 {
40  const FiniteElementSpace *glob_fes = gf->FESpace();
41  // duplicate the FiniteElementCollection from 'gf'
42  fec = FiniteElementCollection::New(glob_fes->FEColl()->Name());
43  // create a local ParFiniteElementSpace from the global one:
44  fes = pfes = new ParFiniteElementSpace(pmesh, glob_fes, partitioning, fec);
45  SetSize(pfes->GetVSize());
46 
47  if (partitioning)
48  {
49  // Assumption: the map "local element id" -> "global element id" is
50  // increasing, i.e. the local numbering preserves the element order from
51  // the global numbering.
52  Array<int> gvdofs, lvdofs;
53  Vector lnodes;
54  int element_counter = 0;
55  const int MyRank = pfes->GetMyRank();
56  const int glob_ne = glob_fes->GetNE();
57  for (int i = 0; i < glob_ne; i++)
58  {
59  if (partitioning[i] == MyRank)
60  {
61  pfes->GetElementVDofs(element_counter, lvdofs);
62  glob_fes->GetElementVDofs(i, gvdofs);
63  gf->GetSubVector(gvdofs, lnodes);
64  SetSubVector(lvdofs, lnodes);
65  element_counter++;
66  }
67  }
68  }
69 }
70 
71 ParGridFunction::ParGridFunction(ParMesh *pmesh, std::istream &input)
72  : GridFunction(pmesh, input)
73 {
74  // Convert the FiniteElementSpace, fes, to a ParFiniteElementSpace:
75  pfes = new ParFiniteElementSpace(pmesh, fec, fes->GetVDim(),
76  fes->GetOrdering());
77  delete fes;
78  fes = pfes;
79 }
80 
82 {
85 }
86 
88 {
91  pfes = dynamic_cast<ParFiniteElementSpace*>(f);
92  MFEM_ASSERT(pfes != NULL, "not a ParFiniteElementSpace");
93 }
94 
96 {
99  pfes = f;
100 }
101 
103 {
105  GridFunction::MakeRef(f, v);
106  pfes = dynamic_cast<ParFiniteElementSpace*>(f);
107  MFEM_ASSERT(pfes != NULL, "not a ParFiniteElementSpace");
108 }
109 
111 {
113  GridFunction::MakeRef(f, v);
114  pfes = f;
115 }
116 
118 {
120  GridFunction::MakeRef(f, v, v_offset);
121  pfes = dynamic_cast<ParFiniteElementSpace*>(f);
122  MFEM_ASSERT(pfes != NULL, "not a ParFiniteElementSpace");
123 }
124 
126 {
128  GridFunction::MakeRef(f, v, v_offset);
129  pfes = f;
130 }
131 
133 {
134  const Operator *prolong = pfes->GetProlongationMatrix();
135  prolong->Mult(*tv, *this);
136 }
137 
138 void ParGridFunction::AddDistribute(double a, const Vector *tv)
139 {
140  pfes->Dof_TrueDof_Matrix()->Mult(a, *tv, 1.0, *this);
141 }
142 
144 {
146  GetTrueDofs(*tv);
147  return tv;
148 }
149 
151 {
152  MFEM_VERIFY(pfes->Conforming(), "not implemented for NC meshes");
154  pfes->DivideByGroupSize(tv);
155 }
156 
158 {
159  MFEM_VERIFY(pfes->Conforming(), "not implemented for NC meshes");
161  pfes->DivideByGroupSize(tv);
162 }
163 
165 {
167  ParallelAverage(*tv);
168  return tv;
169 }
170 
172 {
173  pfes->GetRestrictionMatrix()->Mult(*this, tv);
174 }
175 
177 {
178  pfes->GetRestrictionMatrix()->Mult(*this, tv);
179 }
180 
182 {
184  ParallelProject(*tv);
185  return tv;
186 }
187 
189 {
191 }
192 
194 {
196 }
197 
199 {
201  ParallelAssemble(*tv);
202  return tv;
203 }
204 
206 {
208 
209  if (pfes->GetFaceNbrVSize() <= 0)
210  {
211  return;
212  }
213 
214  ParMesh *pmesh = pfes->GetParMesh();
215 
218 
219  int *send_offset = pfes->send_face_nbr_ldof.GetI();
220  const int *d_send_ldof = mfem::Read(pfes->send_face_nbr_ldof.GetJMemory(),
221  send_data.Size());
222  int *recv_offset = pfes->face_nbr_ldof.GetI();
223  MPI_Comm MyComm = pfes->GetComm();
224 
225  int num_face_nbrs = pmesh->GetNFaceNeighbors();
226  MPI_Request *requests = new MPI_Request[2*num_face_nbrs];
227  MPI_Request *send_requests = requests;
228  MPI_Request *recv_requests = requests + num_face_nbrs;
229  MPI_Status *statuses = new MPI_Status[num_face_nbrs];
230 
231  auto d_data = this->Read();
232  auto d_send_data = send_data.Write();
233  MFEM_FORALL(i, send_data.Size(),
234  {
235  d_send_data[i] = d_data[d_send_ldof[i]];
236  });
237 
238  bool mpi_gpu_aware = Device::GetGPUAwareMPI();
239  auto send_data_ptr = mpi_gpu_aware ? send_data.Read() : send_data.HostRead();
240  auto face_nbr_data_ptr = mpi_gpu_aware ? face_nbr_data.Write() :
242  for (int fn = 0; fn < num_face_nbrs; fn++)
243  {
244  int nbr_rank = pmesh->GetFaceNbrRank(fn);
245  int tag = 0;
246 
247  MPI_Isend(&send_data_ptr[send_offset[fn]],
248  send_offset[fn+1] - send_offset[fn],
249  MPI_DOUBLE, nbr_rank, tag, MyComm, &send_requests[fn]);
250 
251  MPI_Irecv(&face_nbr_data_ptr[recv_offset[fn]],
252  recv_offset[fn+1] - recv_offset[fn],
253  MPI_DOUBLE, nbr_rank, tag, MyComm, &recv_requests[fn]);
254  }
255 
256  MPI_Waitall(num_face_nbrs, send_requests, statuses);
257  MPI_Waitall(num_face_nbrs, recv_requests, statuses);
258 
259  delete [] statuses;
260  delete [] requests;
261 }
262 
263 double ParGridFunction::GetValue(int i, const IntegrationPoint &ip, int vdim)
264 const
265 {
266  Array<int> dofs;
267  Vector DofVal, LocVec;
268  int nbr_el_no = i - pfes->GetParMesh()->GetNE();
269  if (nbr_el_no >= 0)
270  {
271  int fes_vdim = pfes->GetVDim();
272  pfes->GetFaceNbrElementVDofs(nbr_el_no, dofs);
273  if (fes_vdim > 1)
274  {
275  int s = dofs.Size()/fes_vdim;
276  Array<int> _dofs(&dofs[(vdim-1)*s], s);
277  face_nbr_data.GetSubVector(_dofs, LocVec);
278  DofVal.SetSize(s);
279  }
280  else
281  {
282  face_nbr_data.GetSubVector(dofs, LocVec);
283  DofVal.SetSize(dofs.Size());
284  }
285  pfes->GetFaceNbrFE(nbr_el_no)->CalcShape(ip, DofVal);
286  }
287  else
288  {
289  fes->GetElementDofs(i, dofs);
290  fes->DofsToVDofs(vdim-1, dofs);
291  DofVal.SetSize(dofs.Size());
292  const FiniteElement *fe = fes->GetFE(i);
293  MFEM_ASSERT(fe->GetMapType() == FiniteElement::VALUE, "invalid FE map type");
294  fe->CalcShape(ip, DofVal);
295  GetSubVector(dofs, LocVec);
296  }
297 
298  return (DofVal * LocVec);
299 }
300 
302 {
303  DeltaCoefficient *delta_c = dynamic_cast<DeltaCoefficient *>(&coeff);
304 
305  if (delta_c == NULL)
306  {
308  }
309  else
310  {
311  double loc_integral, glob_integral;
312 
313  ProjectDeltaCoefficient(*delta_c, loc_integral);
314 
315  MPI_Allreduce(&loc_integral, &glob_integral, 1, MPI_DOUBLE, MPI_SUM,
316  pfes->GetComm());
317 
318  (*this) *= (delta_c->Scale() / glob_integral);
319  }
320 }
321 
323 {
324  // local maximal element attribute for each dof
325  Array<int> ldof_attr;
326 
327  // local projection
328  GridFunction::ProjectDiscCoefficient(coeff, ldof_attr);
329 
330  // global maximal element attribute for each dof
331  Array<int> gdof_attr;
332  ldof_attr.Copy(gdof_attr);
333  GroupCommunicator &gcomm = pfes->GroupComm();
334  gcomm.Reduce<int>(gdof_attr, GroupCommunicator::Max);
335  gcomm.Bcast(gdof_attr);
336 
337  // set local value to zero if global maximal element attribute is larger than
338  // the local one, and mark (in gdof_attr) if we have the correct value
339  for (int i = 0; i < pfes->GetVSize(); i++)
340  {
341  if (gdof_attr[i] > ldof_attr[i])
342  {
343  (*this)(i) = 0.0;
344  gdof_attr[i] = 0;
345  }
346  else
347  {
348  gdof_attr[i] = 1;
349  }
350  }
351 
352  // parallel averaging plus interpolation to determine final values
354  gcomm.Reduce<int>(gdof_attr, GroupCommunicator::Sum);
355  gcomm.Bcast(gdof_attr);
356  for (int i = 0; i < fes->GetVSize(); i++)
357  {
358  (*this)(i) /= gdof_attr[i];
359  }
360  this->ParallelAssemble(*tv);
361  this->Distribute(tv);
362  delete tv;
363 }
364 
365 
367 {
368  // Harmonic (x1 ... xn) = [ (1/x1 + ... + 1/xn) / n ]^-1.
369  // Arithmetic(x1 ... xn) = (x1 + ... + xn) / n.
370 
371  // Number of zones that contain a given dof.
372  Array<int> zones_per_vdof;
373  AccumulateAndCountZones(coeff, type, zones_per_vdof);
374 
375  // Count the zones globally.
376  GroupCommunicator &gcomm = pfes->GroupComm();
377  gcomm.Reduce<int>(zones_per_vdof, GroupCommunicator::Sum);
378  gcomm.Bcast(zones_per_vdof);
379 
380  // Accumulate for all vdofs.
381  gcomm.Reduce<double>(data, GroupCommunicator::Sum);
382  gcomm.Bcast<double>(data);
383 
384  ComputeMeans(type, zones_per_vdof);
385 }
386 
388  AvgType type)
389 {
390  // Harmonic (x1 ... xn) = [ (1/x1 + ... + 1/xn) / n ]^-1.
391  // Arithmetic(x1 ... xn) = (x1 + ... + xn) / n.
392 
393  // Number of zones that contain a given dof.
394  Array<int> zones_per_vdof;
395  AccumulateAndCountZones(vcoeff, type, zones_per_vdof);
396 
397  // Count the zones globally.
398  GroupCommunicator &gcomm = pfes->GroupComm();
399  gcomm.Reduce<int>(zones_per_vdof, GroupCommunicator::Sum);
400  gcomm.Bcast(zones_per_vdof);
401 
402  // Accumulate for all vdofs.
403  gcomm.Reduce<double>(data, GroupCommunicator::Sum);
404  gcomm.Bcast<double>(data);
405 
406  ComputeMeans(type, zones_per_vdof);
407 }
408 
410  Coefficient *coeff[], VectorCoefficient *vcoeff, Array<int> &attr)
411 {
412  Array<int> values_counter;
413  AccumulateAndCountBdrValues(coeff, vcoeff, attr, values_counter);
414 
415  Vector values(Size());
416  for (int i = 0; i < values.Size(); i++)
417  {
418  values(i) = values_counter[i] ? (*this)(i) : 0.0;
419  }
420 
421  // Count the values globally.
422  GroupCommunicator &gcomm = pfes->GroupComm();
423  gcomm.Reduce<int>(values_counter, GroupCommunicator::Sum);
424  // Accumulate the values globally.
425  gcomm.Reduce<double>(values, GroupCommunicator::Sum);
426  // Only the values in the master are guaranteed to be correct!
427  for (int i = 0; i < values.Size(); i++)
428  {
429  if (values_counter[i])
430  {
431  (*this)(i) = values(i)/values_counter[i];
432  }
433  }
434 
435 #ifdef MFEM_DEBUG
436  Array<int> ess_vdofs_marker;
437  pfes->GetEssentialVDofs(attr, ess_vdofs_marker);
438  for (int i = 0; i < values_counter.Size(); i++)
439  {
440  MFEM_ASSERT(pfes->GetLocalTDofNumber(i) == -1 ||
441  bool(values_counter[i]) == bool(ess_vdofs_marker[i]),
442  "internal error");
443  }
444 #endif
445 }
446 
448  Array<int> &bdr_attr)
449 {
450  Array<int> values_counter;
451  AccumulateAndCountBdrTangentValues(vcoeff, bdr_attr, values_counter);
452 
453  Vector values(Size());
454  for (int i = 0; i < values.Size(); i++)
455  {
456  values(i) = values_counter[i] ? (*this)(i) : 0.0;
457  }
458 
459  // Count the values globally.
460  GroupCommunicator &gcomm = pfes->GroupComm();
461  gcomm.Reduce<int>(values_counter, GroupCommunicator::Sum);
462  // Accumulate the values globally.
463  gcomm.Reduce<double>(values, GroupCommunicator::Sum);
464  // Only the values in the master are guaranteed to be correct!
465  for (int i = 0; i < values.Size(); i++)
466  {
467  if (values_counter[i])
468  {
469  (*this)(i) = values(i)/values_counter[i];
470  }
471  }
472 
473 #ifdef MFEM_DEBUG
474  Array<int> ess_vdofs_marker;
475  pfes->GetEssentialVDofs(bdr_attr, ess_vdofs_marker);
476  for (int i = 0; i < values_counter.Size(); i++)
477  {
478  MFEM_ASSERT(pfes->GetLocalTDofNumber(i) == -1 ||
479  bool(values_counter[i]) == bool(ess_vdofs_marker[i]),
480  "internal error");
481  }
482 #endif
483 }
484 
485 void ParGridFunction::Save(std::ostream &out) const
486 {
487  double *data_ = const_cast<double*>(HostRead());
488  for (int i = 0; i < size; i++)
489  {
490  if (pfes->GetDofSign(i) < 0) { data_[i] = -data_[i]; }
491  }
492 
493  GridFunction::Save(out);
494 
495  for (int i = 0; i < size; i++)
496  {
497  if (pfes->GetDofSign(i) < 0) { data_[i] = -data_[i]; }
498  }
499 }
500 
501 void ParGridFunction::SaveAsOne(std::ostream &out)
502 {
503  int i, p;
504 
505  MPI_Comm MyComm;
506  MPI_Status status;
507  int MyRank, NRanks;
508 
509  MyComm = pfes -> GetComm();
510 
511  MPI_Comm_size(MyComm, &NRanks);
512  MPI_Comm_rank(MyComm, &MyRank);
513 
514  double **values = new double*[NRanks];
515  int *nv = new int[NRanks];
516  int *nvdofs = new int[NRanks];
517  int *nedofs = new int[NRanks];
518  int *nfdofs = new int[NRanks];
519  int *nrdofs = new int[NRanks];
520 
521  values[0] = data;
522  nv[0] = pfes -> GetVSize();
523  nvdofs[0] = pfes -> GetNVDofs();
524  nedofs[0] = pfes -> GetNEDofs();
525  nfdofs[0] = pfes -> GetNFDofs();
526 
527  if (MyRank == 0)
528  {
529  pfes -> Save(out);
530  out << '\n';
531 
532  for (p = 1; p < NRanks; p++)
533  {
534  MPI_Recv(&nv[p], 1, MPI_INT, p, 455, MyComm, &status);
535  MPI_Recv(&nvdofs[p], 1, MPI_INT, p, 456, MyComm, &status);
536  MPI_Recv(&nedofs[p], 1, MPI_INT, p, 457, MyComm, &status);
537  MPI_Recv(&nfdofs[p], 1, MPI_INT, p, 458, MyComm, &status);
538  values[p] = new double[nv[p]];
539  MPI_Recv(values[p], nv[p], MPI_DOUBLE, p, 460, MyComm, &status);
540  }
541 
542  int vdim = pfes -> GetVDim();
543 
544  for (p = 0; p < NRanks; p++)
545  {
546  nrdofs[p] = nv[p]/vdim - nvdofs[p] - nedofs[p] - nfdofs[p];
547  }
548 
550  {
551  for (int d = 0; d < vdim; d++)
552  {
553  for (p = 0; p < NRanks; p++)
554  for (i = 0; i < nvdofs[p]; i++)
555  {
556  out << *values[p]++ << '\n';
557  }
558 
559  for (p = 0; p < NRanks; p++)
560  for (i = 0; i < nedofs[p]; i++)
561  {
562  out << *values[p]++ << '\n';
563  }
564 
565  for (p = 0; p < NRanks; p++)
566  for (i = 0; i < nfdofs[p]; i++)
567  {
568  out << *values[p]++ << '\n';
569  }
570 
571  for (p = 0; p < NRanks; p++)
572  for (i = 0; i < nrdofs[p]; i++)
573  {
574  out << *values[p]++ << '\n';
575  }
576  }
577  }
578  else
579  {
580  for (p = 0; p < NRanks; p++)
581  for (i = 0; i < nvdofs[p]; i++)
582  for (int d = 0; d < vdim; d++)
583  {
584  out << *values[p]++ << '\n';
585  }
586 
587  for (p = 0; p < NRanks; p++)
588  for (i = 0; i < nedofs[p]; i++)
589  for (int d = 0; d < vdim; d++)
590  {
591  out << *values[p]++ << '\n';
592  }
593 
594  for (p = 0; p < NRanks; p++)
595  for (i = 0; i < nfdofs[p]; i++)
596  for (int d = 0; d < vdim; d++)
597  {
598  out << *values[p]++ << '\n';
599  }
600 
601  for (p = 0; p < NRanks; p++)
602  for (i = 0; i < nrdofs[p]; i++)
603  for (int d = 0; d < vdim; d++)
604  {
605  out << *values[p]++ << '\n';
606  }
607  }
608 
609  for (p = 1; p < NRanks; p++)
610  {
611  values[p] -= nv[p];
612  delete [] values[p];
613  }
614  out.flush();
615  }
616  else
617  {
618  MPI_Send(&nv[0], 1, MPI_INT, 0, 455, MyComm);
619  MPI_Send(&nvdofs[0], 1, MPI_INT, 0, 456, MyComm);
620  MPI_Send(&nedofs[0], 1, MPI_INT, 0, 457, MyComm);
621  MPI_Send(&nfdofs[0], 1, MPI_INT, 0, 458, MyComm);
622  MPI_Send(data, nv[0], MPI_DOUBLE, 0, 460, MyComm);
623  }
624 
625  delete [] values;
626  delete [] nv;
627  delete [] nvdofs;
628  delete [] nedofs;
629  delete [] nfdofs;
630  delete [] nrdofs;
631 }
632 
633 double GlobalLpNorm(const double p, double loc_norm, MPI_Comm comm)
634 {
635  double glob_norm;
636 
637  if (p < infinity())
638  {
639  // negative quadrature weights may cause the error to be negative
640  if (loc_norm < 0.0)
641  {
642  loc_norm = -pow(-loc_norm, p);
643  }
644  else
645  {
646  loc_norm = pow(loc_norm, p);
647  }
648 
649  MPI_Allreduce(&loc_norm, &glob_norm, 1, MPI_DOUBLE, MPI_SUM, comm);
650 
651  if (glob_norm < 0.0)
652  {
653  glob_norm = -pow(-glob_norm, 1.0/p);
654  }
655  else
656  {
657  glob_norm = pow(glob_norm, 1.0/p);
658  }
659  }
660  else
661  {
662  MPI_Allreduce(&loc_norm, &glob_norm, 1, MPI_DOUBLE, MPI_MAX, comm);
663  }
664 
665  return glob_norm;
666 }
667 
668 
671  GridFunction &flux, bool wcoef, int subdomain)
672 {
673  ParFiniteElementSpace *ffes =
674  dynamic_cast<ParFiniteElementSpace*>(flux.FESpace());
675  MFEM_VERIFY(ffes, "the flux FE space must be ParFiniteElementSpace");
676 
677  Array<int> count(flux.Size());
678  SumFluxAndCount(blfi, flux, count, wcoef, subdomain);
679 
680  // Accumulate flux and counts in parallel
681  ffes->GroupComm().Reduce<double>(flux, GroupCommunicator::Sum);
682  ffes->GroupComm().Bcast<double>(flux);
683 
684  ffes->GroupComm().Reduce<int>(count, GroupCommunicator::Sum);
685  ffes->GroupComm().Bcast<int>(count);
686 
687  // complete averaging
688  for (int i = 0; i < count.Size(); i++)
689  {
690  if (count[i] != 0) { flux(i) /= count[i]; }
691  }
692 
693  if (ffes->Nonconforming())
694  {
695  // On a partially conforming flux space, project on the conforming space.
696  // Using this code may lead to worse refinements in ex6, so we do not use
697  // it by default.
698 
699  // Vector conf_flux;
700  // flux.ConformingProject(conf_flux);
701  // flux.ConformingProlongate(conf_flux);
702  }
703 }
704 
705 
707  const ParGridFunction &x,
708  ParFiniteElementSpace &smooth_flux_fes,
709  ParFiniteElementSpace &flux_fes,
710  Vector &errors,
711  int norm_p, double solver_tol, int solver_max_it)
712 {
713  // Compute fluxes in discontinuous space
714  GridFunction flux(&flux_fes);
715  flux = 0.0;
716 
717  ParFiniteElementSpace *xfes = x.ParFESpace();
718  Array<int> xdofs, fdofs;
719  Vector el_x, el_f;
720 
721  for (int i = 0; i < xfes->GetNE(); i++)
722  {
723  xfes->GetElementVDofs(i, xdofs);
724  x.GetSubVector(xdofs, el_x);
725 
727  flux_integrator.ComputeElementFlux(*xfes->GetFE(i), *Transf, el_x,
728  *flux_fes.GetFE(i), el_f, false);
729 
730  flux_fes.GetElementVDofs(i, fdofs);
731  flux.AddElementVector(fdofs, el_f);
732  }
733 
734  // Assemble the linear system for L2 projection into the "smooth" space
735  ParBilinearForm *a = new ParBilinearForm(&smooth_flux_fes);
736  ParLinearForm *b = new ParLinearForm(&smooth_flux_fes);
738 
739  if (xfes->GetNE())
740  {
741  if (smooth_flux_fes.GetFE(0)->GetRangeType() == FiniteElement::SCALAR)
742  {
744  vmass->SetVDim(smooth_flux_fes.GetVDim());
745  a->AddDomainIntegrator(vmass);
747  }
748  else
749  {
752  }
753  }
754 
755  b->Assemble();
756  a->Assemble();
757  a->Finalize();
758 
759  // The destination of the projected discontinuous flux
760  ParGridFunction smooth_flux(&smooth_flux_fes);
761  smooth_flux = 0.0;
762 
765  HypreParVector* X = smooth_flux.ParallelProject();
766 
767  delete a;
768  delete b;
769 
770  // Define and apply a parallel PCG solver for AX=B with the BoomerAMG
771  // preconditioner from hypre.
772  HypreBoomerAMG *amg = new HypreBoomerAMG(*A);
773  amg->SetPrintLevel(0);
774  HyprePCG *pcg = new HyprePCG(*A);
775  pcg->SetTol(solver_tol);
776  pcg->SetMaxIter(solver_max_it);
777  pcg->SetPrintLevel(0);
778  pcg->SetPreconditioner(*amg);
779  pcg->Mult(*B, *X);
780 
781  // Extract the parallel grid function corresponding to the finite element
782  // approximation X. This is the local solution on each processor.
783  smooth_flux = *X;
784 
785  delete A;
786  delete B;
787  delete X;
788  delete amg;
789  delete pcg;
790 
791  // Proceed through the elements one by one, and find the Lp norm differences
792  // between the flux as computed per element and the flux projected onto the
793  // smooth_flux_fes space.
794  double total_error = 0.0;
795  errors.SetSize(xfes->GetNE());
796  for (int i = 0; i < xfes->GetNE(); i++)
797  {
798  errors(i) = ComputeElementLpDistance(norm_p, i, smooth_flux, flux);
799  total_error += pow(errors(i), norm_p);
800  }
801 
802  double glob_error;
803  MPI_Allreduce(&total_error, &glob_error, 1, MPI_DOUBLE, MPI_SUM,
804  xfes->GetComm());
805 
806  return pow(glob_error, 1.0/norm_p);
807 }
808 
809 }
810 
811 #endif // MFEM_USE_MPI
Abstract class for Finite Elements.
Definition: fe.hpp:232
int GetNFaceNeighbors() const
Definition: pmesh.hpp:273
void SetTol(double tol)
Definition: hypre.cpp:2305
Ordering::Type GetOrdering() const
Return the ordering method.
Definition: fespace.hpp:393
int Size() const
Logical size of the array.
Definition: array.hpp:124
For scalar fields; preserves point values.
Definition: fe.hpp:276
void SetSubVector(const Array< int > &dofs, const double value)
Set the entries listed in dofs to the given value.
Definition: vector.cpp:498
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
Definition: fespace.hpp:381
HypreParVector * NewTrueDofVector()
Definition: pfespace.hpp:291
Class for grid function - Vector with associated FE space.
Definition: gridfunc.hpp:27
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
Memory< double > data
Definition: vector.hpp:52
void SumFluxAndCount(BilinearFormIntegrator &blfi, GridFunction &flux, Array< int > &counts, bool wcoef, int subdomain)
Definition: gridfunc.cpp:240
HypreParVector * ParallelAssemble() const
Returns a new vector assembled on the true dofs.
Definition: pgridfunc.cpp:198
virtual void GetEssentialVDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_dofs, int component=-1) const
Determine the boundary degrees of freedom.
Definition: pfespace.cpp:756
void ProjectDeltaCoefficient(DeltaCoefficient &delta_coeff, double &integral)
Definition: gridfunc.cpp:1622
HypreParVector * ParallelProject() const
Returns a new vector restricted to the true dofs.
Definition: pgridfunc.cpp:181
virtual const Operator * GetProlongationMatrix() const
The returned Operator is owned by the FiniteElementSpace.
Definition: pfespace.cpp:890
void SetSize(int s)
Resize the vector to size s.
Definition: vector.hpp:407
void GetElementVDofs(int i, Array< int > &vdofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: fespace.cpp:172
double Scale()
Return the scale set by SetScale() multiplied by the time-dependent function specified by SetFunction...
void ProjectDiscCoefficient(VectorCoefficient &coeff, Array< int > &dof_attr)
Definition: gridfunc.cpp:1816
ParFiniteElementSpace * pfes
Points to the same object as fes.
Definition: pgridfunc.hpp:35
void Assemble()
Assembles the linear form i.e. sums over all domain/bdr integrators.
Definition: linearform.cpp:79
void Copy(Array &copy) const
Create a copy of the current array.
Definition: array.hpp:812
void GetSubVector(const Array< int > &dofs, Vector &elemvect) const
Definition: vector.cpp:472
void SaveAsOne(std::ostream &out=mfem::out)
Merge the local grid functions.
Definition: pgridfunc.cpp:501
Delta function coefficient.
int Size() const
Returns the size of the vector.
Definition: vector.hpp:157
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:693
virtual double GetValue(int i, const IntegrationPoint &ip, int vdim=1) const
Definition: pgridfunc.cpp:263
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:485
void DivideByGroupSize(double *vec)
Scale a vector of true dofs.
Definition: pfespace.cpp:710
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
virtual void ProjectDiscCoefficient(VectorCoefficient &coeff)
Project a discontinuous vector coefficient as a grid function on a continuous finite element space...
Definition: pgridfunc.cpp:322
virtual void SetSpace(FiniteElementSpace *f)
Associate a new FiniteElementSpace with the ParGridFunction.
Definition: pgridfunc.cpp:87
void AccumulateAndCountBdrTangentValues(VectorCoefficient &vcoeff, Array< int > &bdr_attr, Array< int > &values_counter)
Definition: gridfunc.cpp:1546
Abstract parallel finite element space.
Definition: pfespace.hpp:28
int GetMapType() const
Definition: fe.hpp:334
virtual void ProjectCoefficient(Coefficient &coeff)
Definition: pgridfunc.cpp:301
double * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:341
int GetLocalTDofNumber(int ldof) const
Definition: pfespace.cpp:799
void ComputeMeans(AvgType type, Array< int > &zones_per_vdof)
Definition: gridfunc.cpp:1597
double * GetData() const
Return a pointer to the beginning of the Vector data.
Definition: vector.hpp:166
HYPRE_Int Mult(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0)
Computes y = alpha * A * x + beta * y.
Definition: hypre.cpp:1021
int Size_of_connections() const
Definition: table.hpp:98
void SetPrintLevel(int print_lvl)
Definition: hypre.cpp:2320
HypreParMatrix * ParallelAssemble()
Returns the matrix assembled on the true dofs, i.e. P^t A P.
virtual void MultTranspose(const Vector &x, Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition: operator.hpp:84
void GetFaceNbrElementVDofs(int i, Array< int > &vdofs) const
Definition: pfespace.cpp:1160
virtual void MakeRef(FiniteElementSpace *f, double *v)
Make the ParGridFunction reference external data on a new FiniteElementSpace.
Definition: pgridfunc.cpp:102
virtual void Save(std::ostream &out) const
Save the GridFunction to an output stream.
Definition: gridfunc.cpp:2731
int GetNE() const
Returns number of elements in the mesh.
Definition: fespace.hpp:408
const double * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:337
The BoomerAMG solver in hypre.
Definition: hypre.hpp:951
Class for parallel linear form.
Definition: plinearform.hpp:26
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
const FiniteElement * GetFaceNbrFE(int i) const
Definition: pfespace.cpp:1194
MPI_Comm GetComm() const
Definition: pfespace.hpp:239
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
virtual void Update()
Transform by the Space UpdateMatrix (e.g., on Mesh change).
Definition: pgridfunc.cpp:81
Memory< int > & GetJMemory()
Definition: table.hpp:119
double b
Definition: lissajous.cpp:42
void SetPrintLevel(int print_level)
Definition: hypre.hpp:992
virtual void CalcShape(const IntegrationPoint &ip, Vector &shape) const =0
Evaluate the values of all shape functions of a scalar finite element in reference space at the given...
HypreParMatrix * Dof_TrueDof_Matrix() const
The true dof-to-dof interpolation matrix.
Definition: pfespace.hpp:281
virtual void GetElementDofs(int i, Array< int > &dofs) const
Returns indexes of degrees of freedom in array dofs for i&#39;th element.
Definition: fespace.cpp:1540
void Assemble(int skip_zeros=1)
Assemble the local matrix.
void AccumulateAndCountZones(Coefficient &coeff, AvgType type, Array< int > &zones_per_vdof)
Accumulates (depending on type) the values of coeff at all shared vdofs and counts in how many zones ...
Definition: gridfunc.cpp:1308
virtual void MakeRef(FiniteElementSpace *f, double *v)
Make the GridFunction reference external data on a new FiniteElementSpace.
Definition: gridfunc.cpp:188
int GetVDim() const
Returns vector dimension.
Definition: fespace.hpp:370
FiniteElementSpace * FESpace()
Definition: gridfunc.hpp:442
const T * Read(const Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for read access to mem with the mfem::Device&#39;s DeviceMemoryClass, if on_dev = true...
Definition: device.hpp:295
void AddElementVector(const Array< int > &dofs, const Vector &elemvect)
Add (element) subvector to the vector.
Definition: vector.cpp:564
void SetMaxIter(int max_iter)
Definition: hypre.cpp:2310
Wrapper for hypre&#39;s parallel vector class.
Definition: hypre.hpp:73
ElementTransformation * GetElementTransformation(int i) const
Returns ElementTransformation for the i-th element.
Definition: fespace.hpp:441
double GlobalLpNorm(const double p, double loc_norm, MPI_Comm comm)
Compute a global Lp norm from the local Lp norms computed by each processor.
Definition: pgridfunc.cpp:633
void AddDomainIntegrator(LinearFormIntegrator *lfi)
Adds new Domain Integrator. Assumes ownership of lfi.
Definition: linearform.cpp:39
Abstract base class BilinearFormIntegrator.
Definition: bilininteg.hpp:24
PCG solver in hypre.
Definition: hypre.hpp:743
void ProjectBdrCoefficient(Coefficient *coeff[], VectorCoefficient *vcoeff, Array< int > &attr)
Definition: pgridfunc.cpp:409
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition: fespace.hpp:87
virtual void Mult(const Vector &x, Vector &y) const
Matrix vector multiplication.
Definition: sparsemat.cpp:548
Base class Coefficient that may optionally depend on time.
Definition: coefficient.hpp:31
FiniteElementSpace * fes
FE space on which the grid function lives. Owned if fec is not NULL.
Definition: gridfunc.hpp:31
virtual void ComputeFlux(BilinearFormIntegrator &blfi, GridFunction &flux, bool wcoef=true, int subdomain=-1)
Definition: pgridfunc.cpp:669
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
GroupCommunicator & GroupComm()
Return a reference to the internal GroupCommunicator (on VDofs)
Definition: pfespace.hpp:298
static bool GetGPUAwareMPI()
Definition: device.hpp:266
FiniteElementCollection * fec
Used when the grid function is read from a file. It can also be set explicitly, see MakeOwner()...
Definition: gridfunc.hpp:37
double L2ZZErrorEstimator(BilinearFormIntegrator &flux_integrator, const ParGridFunction &x, ParFiniteElementSpace &smooth_flux_fes, ParFiniteElementSpace &flux_fes, Vector &errors, int norm_p, double solver_tol, int solver_max_it)
Definition: pgridfunc.cpp:706
void Distribute(const Vector *tv)
Definition: pgridfunc.cpp:132
virtual void Update()
Transform by the Space UpdateMatrix (e.g., on Mesh change).
Definition: gridfunc.cpp:152
double ComputeElementLpDistance(double p, int i, GridFunction &gf1, GridFunction &gf2)
Compute the Lp distance between two grid functions on the given element.
Definition: gridfunc.cpp:3085
bool Nonconforming() const
Definition: pfespace.hpp:356
static FiniteElementCollection * New(const char *name)
Factory method: return a newly allocated FiniteElementCollection according to the given name...
Definition: fe_coll.cpp:47
double a
Definition: lissajous.cpp:41
Vector face_nbr_data
Vector used to store data from face-neighbor processors, initialized by ExchangeFaceNbrData().
Definition: pgridfunc.hpp:39
virtual const char * Name() const
Definition: fe_coll.hpp:53
Class for integration point with weight.
Definition: intrules.hpp:25
const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:333
virtual void Finalize(int skip_zeros=1)
Finalizes the matrix initialization.
virtual const SparseMatrix * GetRestrictionMatrix() const
Get the R matrix which restricts a local dof vector to true dof vector.
Definition: pfespace.hpp:339
virtual void ProjectBdrCoefficientTangent(VectorCoefficient &vcoeff, Array< int > &bdr_attr)
Project the tangential components of the given VectorCoefficient on the boundary. Only boundary attri...
Definition: pgridfunc.cpp:447
HypreParVector * ParallelAverage() const
Returns a new vector averaged on the true dofs.
Definition: pgridfunc.cpp:164
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
Definition: hypre.cpp:2325
void AddDomainIntegrator(BilinearFormIntegrator *bfi)
Adds new Domain Integrator. Assumes ownership of bfi.
void Destroy()
Destroy a vector.
Definition: vector.hpp:478
const FiniteElement * GetFE(int i) const
Returns pointer to the FiniteElement associated with i&#39;th element.
Definition: fespace.cpp:1642
HypreParVector * GetTrueDofs() const
Returns the true dofs in a new HypreParVector.
Definition: pgridfunc.cpp:143
void AddDistribute(double a, const Vector *tv)
Definition: pgridfunc.cpp:138
double infinity()
Define a shortcut for std::numeric_limits&lt;double&gt;::infinity()
Definition: vector.hpp:42
virtual void ProjectCoefficient(Coefficient &coeff)
Definition: gridfunc.cpp:1685
Class for parallel bilinear form.
virtual void SetSpace(FiniteElementSpace *f)
Associate a new FiniteElementSpace with the GridFunction.
Definition: gridfunc.cpp:180
int GetFaceNbrVSize() const
Definition: pfespace.hpp:344
double * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:345
for VectorFiniteElements (Nedelec, Raviart-Thomas)
Definition: lininteg.hpp:231
Vector data type.
Definition: vector.hpp:48
const FiniteElementCollection * FEColl() const
Definition: fespace.hpp:395
Vector coefficient defined by a vector GridFunction.
int * GetI()
Definition: table.hpp:113
virtual void ComputeElementFlux(const FiniteElement &el, ElementTransformation &Trans, Vector &u, const FiniteElement &fluxelem, Vector &flux, bool with_coef=true)
Virtual method required for Zienkiewicz-Zhu type error estimators.
Definition: bilininteg.hpp:150
Class for parallel grid function.
Definition: pgridfunc.hpp:32
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:66
Abstract operator.
Definition: operator.hpp:24
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:187
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre&#39;s PCG.
Definition: hypre.cpp:2348
int GetRangeType() const
Definition: fe.hpp:330
Class for parallel meshes.
Definition: pmesh.hpp:32
int GetFaceNbrRank(int fn) const
Definition: pmesh.cpp:2249
void ParallelAssemble(Vector &tv)
Assemble the vector on the true dofs, i.e. P^t v.
Definition: plinearform.cpp:34
void AccumulateAndCountBdrValues(Coefficient *coeff[], VectorCoefficient *vcoeff, Array< int > &attr, Array< int > &values_counter)
Definition: gridfunc.cpp:1404
void ParallelProject(Vector &tv) const
Returns the vector restricted to the true dofs.
Definition: pgridfunc.cpp:171
ParFiniteElementSpace * ParFESpace() const
Definition: pgridfunc.hpp:103
void DofsToVDofs(Array< int > &dofs, int ndofs=-1) const
Definition: fespace.cpp:107