MFEM  v4.5.1
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
algebraic.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2022, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #include "algebraic.hpp"
13 
14 #include "../../bilinearform.hpp"
15 #include "../../fespace.hpp"
16 #include "../../pfespace.hpp"
17 #include "../../../general/forall.hpp"
18 #include "solvers-atpmg.hpp"
19 #include "full-assembly.hpp"
20 #include "../interface/restriction.hpp"
21 #include "../interface/ceed.hpp"
22 
23 namespace mfem
24 {
25 
26 namespace ceed
27 {
28 
29 #ifdef MFEM_USE_CEED
30 
31 /** Wraps a CeedOperator in an mfem::Operator, with essential boundary
32  conditions and a prolongation operator for parallel application. */
33 class ConstrainedOperator : public mfem::Operator
34 {
35 public:
36  /// This object takes ownership of oper and will delete it
37  ConstrainedOperator(CeedOperator oper, const Array<int> &ess_tdofs_,
38  const mfem::Operator *P_);
39  ConstrainedOperator(CeedOperator oper, const mfem::Operator *P_);
40  ~ConstrainedOperator();
41  void Mult(const Vector& x, Vector& y) const;
42  CeedOperator GetCeedOperator() const;
43  const Array<int> &GetEssentialTrueDofs() const;
44  const mfem::Operator *GetProlongation() const;
45 private:
46  Array<int> ess_tdofs;
47  const mfem::Operator *P;
48  ceed::Operator *unconstrained_op;
49  mfem::ConstrainedOperator *constrained_op;
50 };
51 
53  CeedOperator oper,
54  const Array<int> &ess_tdofs_,
55  const mfem::Operator *P_)
56  : ess_tdofs(ess_tdofs_), P(P_)
57 {
58  unconstrained_op = new ceed::Operator(oper);
59  mfem::Operator *rap = unconstrained_op->SetupRAP(P, P);
60  height = width = rap->Height();
61  bool own_rap = (rap != unconstrained_op);
62  constrained_op = new mfem::ConstrainedOperator(rap, ess_tdofs, own_rap);
63 }
64 
66  const mfem::Operator *P_)
67  : ConstrainedOperator(oper, Array<int>(), P_)
68 { }
69 
71 {
72  delete constrained_op;
73  delete unconstrained_op;
74 }
75 
76 void ConstrainedOperator::Mult(const Vector& x, Vector& y) const
77 {
78  constrained_op->Mult(x, y);
79 }
80 
81 CeedOperator ConstrainedOperator::GetCeedOperator() const
82 {
83  return unconstrained_op->GetCeedOperator();
84 }
85 
86 const Array<int> &ConstrainedOperator::GetEssentialTrueDofs() const
87 {
88  return ess_tdofs;
89 }
90 
92 {
93  return P;
94 }
95 
96 /// assumes a square operator (you could do rectangular, you'd have
97 /// to find separate active input and output fields/restrictions)
98 int CeedOperatorGetSize(CeedOperator oper, CeedInt * size)
99 {
100  CeedSize in_len, out_len;
101  int ierr = CeedOperatorGetActiveVectorLengths(oper, &in_len, &out_len);
102  CeedChk(ierr);
103  *size = (CeedInt)in_len;
104  MFEM_VERIFY(in_len == out_len, "not a square CeedOperator");
105  MFEM_VERIFY(in_len == *size, "size overflow");
106  return 0;
107 }
108 
109 Solver *BuildSmootherFromCeed(ConstrainedOperator &op, bool chebyshev)
110 {
111  int ierr;
112  CeedOperator ceed_op = op.GetCeedOperator();
113  const Array<int> &ess_tdofs = op.GetEssentialTrueDofs();
114  const mfem::Operator *P = op.GetProlongation();
115  // Assemble the a local diagonal, in the sense of L-vector
116  CeedVector diagceed;
117  CeedInt length;
118  ierr = CeedOperatorGetSize(ceed_op, &length); PCeedChk(ierr);
119  ierr = CeedVectorCreate(internal::ceed, length, &diagceed); PCeedChk(ierr);
120  CeedMemType mem;
121  ierr = CeedGetPreferredMemType(internal::ceed, &mem); PCeedChk(ierr);
122  if (!Device::Allows(Backend::CUDA) || mem != CEED_MEM_DEVICE)
123  {
124  mem = CEED_MEM_HOST;
125  }
126  Vector local_diag(length);
127  CeedScalar *ptr = (mem == CEED_MEM_HOST) ? local_diag.HostWrite() :
128  local_diag.Write(true);
129  ierr = CeedVectorSetArray(diagceed, mem, CEED_USE_POINTER, ptr);
130  PCeedChk(ierr);
131  ierr = CeedOperatorLinearAssembleDiagonal(ceed_op, diagceed,
132  CEED_REQUEST_IMMEDIATE);
133  PCeedChk(ierr);
134  ierr = CeedVectorTakeArray(diagceed, mem, NULL); PCeedChk(ierr);
135 
136  Vector t_diag;
137  if (P)
138  {
139  t_diag.SetSize(P->Width());
140  P->MultTranspose(local_diag, t_diag);
141  }
142  else
143  {
144  t_diag.NewMemoryAndSize(local_diag.GetMemory(), length, false);
145  }
146  Solver *out = NULL;
147  if (chebyshev)
148  {
149  const int cheb_order = 3;
150  out = new OperatorChebyshevSmoother(op, t_diag, ess_tdofs, cheb_order);
151  }
152  else
153  {
154  const double jacobi_scale = 0.65;
155  out = new OperatorJacobiSmoother(t_diag, ess_tdofs, jacobi_scale);
156  }
157  ierr = CeedVectorDestroy(&diagceed); PCeedChk(ierr);
158  return out;
159 }
160 
161 #ifdef MFEM_USE_MPI
162 
163 /// Builds and applies assembled AMG to a CeedOperator
164 class AssembledAMG : public Solver
165 {
166 public:
167  AssembledAMG(ConstrainedOperator &oper, HypreParMatrix *P)
168  {
169  MFEM_ASSERT(P != NULL, "Provided HypreParMatrix is invalid!");
170  height = width = oper.Height();
171 
172  int ierr;
173  const Array<int> ess_tdofs = oper.GetEssentialTrueDofs();
174 
175  ierr = CeedOperatorFullAssemble(oper.GetCeedOperator(), &mat_local);
176  PCeedChk(ierr);
177 
178  {
179  HypreParMatrix hypre_local(
180  P->GetComm(), P->GetGlobalNumRows(), P->RowPart(), mat_local);
181  op_assembled = RAP(&hypre_local, P);
182  }
183  HypreParMatrix *mat_e = op_assembled->EliminateRowsCols(ess_tdofs);
184  delete mat_e;
185  amg = new HypreBoomerAMG(*op_assembled);
186  amg->SetPrintLevel(0);
187  }
188  void SetOperator(const mfem::Operator &op) override { }
189  void Mult(const Vector &x, Vector &y) const override { amg->Mult(x, y); }
190  ~AssembledAMG()
191  {
192  delete op_assembled;
193  delete amg;
194  delete mat_local;
195  }
196 private:
197  SparseMatrix *mat_local;
198  HypreParMatrix *op_assembled;
199  HypreBoomerAMG *amg;
200 };
201 
202 #endif // MFEM_USE_MPI
203 
205  const Array<int> &ho_ess_tdofs,
206  Array<int> &alg_lo_ess_tdofs)
207 {
208  Vector ho_boundary_ones(interp.Height());
209  ho_boundary_ones = 0.0;
210  const int *ho_ess_tdofs_h = ho_ess_tdofs.HostRead();
211  for (int i=0; i<ho_ess_tdofs.Size(); ++i)
212  {
213  ho_boundary_ones[ho_ess_tdofs_h[i]] = 1.0;
214  }
215  Vector lo_boundary_ones(interp.Width());
216  interp.MultTranspose(ho_boundary_ones, lo_boundary_ones);
217  auto lobo = lo_boundary_ones.HostRead();
218  for (int i = 0; i < lo_boundary_ones.Size(); ++i)
219  {
220  if (lobo[i] > 0.9)
221  {
222  alg_lo_ess_tdofs.Append(i);
223  }
224  }
225 }
226 
227 void AddToCompositeOperator(BilinearFormIntegrator *integ, CeedOperator op)
228 {
229  if (integ->SupportsCeed())
230  {
231  CeedCompositeOperatorAddSub(op, integ->GetCeedOp().GetCeedOperator());
232  }
233  else
234  {
235  MFEM_ABORT("This integrator does not support Ceed!");
236  }
237 }
238 
240 {
241  int ierr;
242  CeedOperator op;
243  ierr = CeedCompositeOperatorCreate(internal::ceed, &op); PCeedChk(ierr);
244 
245  MFEM_VERIFY(form.GetBBFI()->Size() == 0,
246  "Not implemented for this integrator!");
247  MFEM_VERIFY(form.GetFBFI()->Size() == 0,
248  "Not implemented for this integrator!");
249  MFEM_VERIFY(form.GetBFBFI()->Size() == 0,
250  "Not implemented for this integrator!");
251 
252  // Get the domain bilinear form integrators (DBFIs)
253  Array<BilinearFormIntegrator*> *bffis = form.GetDBFI();
254  for (int i = 0; i < bffis->Size(); ++i)
255  {
256  AddToCompositeOperator((*bffis)[i], op);
257  }
258  return op;
259 }
260 
262  CeedOperator op,
263  CeedElemRestriction er,
264  CeedBasis c2f,
265  int order_reduction
266 )
267 {
268  int ierr;
269  bool isComposite;
270  ierr = CeedOperatorIsComposite(op, &isComposite); PCeedChk(ierr);
271  MFEM_ASSERT(isComposite, "");
272 
273  CeedOperator op_coarse;
274  ierr = CeedCompositeOperatorCreate(internal::ceed,
275  &op_coarse); PCeedChk(ierr);
276 
277  int nsub;
278  CeedOperator *subops;
279 #if CEED_VERSION_GE(0, 10, 2)
280  ierr = CeedCompositeOperatorGetNumSub(op, &nsub); PCeedChk(ierr);
281  ierr = CeedCompositeOperatorGetSubList(op, &subops); PCeedChk(ierr);
282 #else
283  ierr = CeedOperatorGetNumSub(op, &nsub); PCeedChk(ierr);
284  ierr = CeedOperatorGetSubList(op, &subops); PCeedChk(ierr);
285 #endif
286  for (int isub=0; isub<nsub; ++isub)
287  {
288  CeedOperator subop = subops[isub];
289  CeedBasis basis_coarse, basis_c2f;
290  CeedOperator subop_coarse;
291  ierr = CeedATPMGOperator(subop, order_reduction, er, &basis_coarse,
292  &basis_c2f, &subop_coarse); PCeedChk(ierr);
293  // destructions below make sense because these objects are
294  // refcounted by existing objects
295  ierr = CeedBasisDestroy(&basis_coarse); PCeedChk(ierr);
296  ierr = CeedBasisDestroy(&basis_c2f); PCeedChk(ierr);
297  ierr = CeedCompositeOperatorAddSub(op_coarse, subop_coarse);
298  PCeedChk(ierr);
299  ierr = CeedOperatorDestroy(&subop_coarse); PCeedChk(ierr);
300  }
301  return op_coarse;
302 }
303 
304 AlgebraicMultigrid::AlgebraicMultigrid(
305  AlgebraicSpaceHierarchy &hierarchy,
306  BilinearForm &form,
307  const Array<int> &ess_tdofs
308 ) : GeometricMultigrid(hierarchy)
309 {
310  int nlevels = fespaces.GetNumLevels();
311  ceed_operators.SetSize(nlevels);
312  essentialTrueDofs.SetSize(nlevels);
313 
314  // Construct finest level
315  ceed_operators[nlevels-1] = CreateCeedCompositeOperatorFromBilinearForm(form);
316  essentialTrueDofs[nlevels-1] = new Array<int>;
317  *essentialTrueDofs[nlevels-1] = ess_tdofs;
318 
319  // Construct operators at all levels of hierarchy by coarsening
320  for (int ilevel=nlevels-2; ilevel>=0; --ilevel)
321  {
323  ceed_operators[ilevel] = CoarsenCeedCompositeOperator(
324  ceed_operators[ilevel+1], space.GetCeedElemRestriction(),
325  space.GetCeedCoarseToFine(), space.GetOrderReduction());
326  mfem::Operator *P = hierarchy.GetProlongationAtLevel(ilevel);
327  essentialTrueDofs[ilevel] = new Array<int>;
329  *essentialTrueDofs[ilevel]);
330  }
331 
332  // Add the operators and smoothers to the hierarchy, from coarse to fine
333  for (int ilevel=0; ilevel<nlevels; ++ilevel)
334  {
335  FiniteElementSpace &space = hierarchy.GetFESpaceAtLevel(ilevel);
336  const mfem::Operator *P = space.GetProlongationMatrix();
337  ConstrainedOperator *op = new ConstrainedOperator(
338  ceed_operators[ilevel], *essentialTrueDofs[ilevel], P);
339  Solver *smoother;
340 #ifdef MFEM_USE_MPI
341  if (ilevel == 0 && !Device::Allows(Backend::CUDA))
342  {
343  HypreParMatrix *P_mat = NULL;
344  if (nlevels == 1)
345  {
346  // Only one level -- no coarsening, finest level
348  = dynamic_cast<ParFiniteElementSpace*>(&space);
349  if (pfes) { P_mat = pfes->Dof_TrueDof_Matrix(); }
350  }
351  else
352  {
354  = dynamic_cast<ParAlgebraicCoarseSpace*>(&space);
355  if (pspace) { P_mat = pspace->GetProlongationHypreParMatrix(); }
356  }
357  if (P_mat) { smoother = new AssembledAMG(*op, P_mat); }
358  else { smoother = BuildSmootherFromCeed(*op, true); }
359  }
360  else
361 #endif
362  {
363  smoother = BuildSmootherFromCeed(*op, true);
364  }
365  AddLevel(op, smoother, true, true);
366  }
367 }
368 
370 {
371 }
372 
373 int AlgebraicInterpolation::Initialize(
374  Ceed ceed, CeedBasis basisctof,
375  CeedElemRestriction erestrictu_coarse, CeedElemRestriction erestrictu_fine)
376 {
377  int ierr = 0;
378 
379  CeedSize height, width;
380  ierr = CeedElemRestrictionGetLVectorSize(erestrictu_coarse, &width);
381  CeedChk(ierr);
382  ierr = CeedElemRestrictionGetLVectorSize(erestrictu_fine, &height);
383  CeedChk(ierr);
384 
385  // interpolation qfunction
386  const int bp3_ncompu = 1;
387  CeedQFunction l_qf_restrict, l_qf_prolong;
388  ierr = CeedQFunctionCreateIdentity(ceed, bp3_ncompu, CEED_EVAL_NONE,
389  CEED_EVAL_INTERP, &l_qf_restrict); CeedChk(ierr);
390  ierr = CeedQFunctionCreateIdentity(ceed, bp3_ncompu, CEED_EVAL_INTERP,
391  CEED_EVAL_NONE, &l_qf_prolong); CeedChk(ierr);
392 
393  qf_restrict = l_qf_restrict;
394  qf_prolong = l_qf_prolong;
395 
396  CeedVector c_fine_multiplicity;
397  ierr = CeedVectorCreate(ceed, height, &c_fine_multiplicity); CeedChk(ierr);
398  ierr = CeedVectorSetValue(c_fine_multiplicity, 0.0); CeedChk(ierr);
399 
400  // Create the restriction operator
401  // Restriction - Fine to coarse
402  ierr = CeedOperatorCreate(ceed, qf_restrict, CEED_QFUNCTION_NONE,
403  CEED_QFUNCTION_NONE, &op_restrict); CeedChk(ierr);
404  ierr = CeedOperatorSetField(op_restrict, "input", erestrictu_fine,
405  CEED_BASIS_COLLOCATED, CEED_VECTOR_ACTIVE); CeedChk(ierr);
406  ierr = CeedOperatorSetField(op_restrict, "output", erestrictu_coarse,
407  basisctof, CEED_VECTOR_ACTIVE); CeedChk(ierr);
408 
409  // Interpolation - Coarse to fine
410  // Create the prolongation operator
411  ierr = CeedOperatorCreate(ceed, qf_prolong, CEED_QFUNCTION_NONE,
412  CEED_QFUNCTION_NONE, &op_interp); CeedChk(ierr);
413  ierr = CeedOperatorSetField(op_interp, "input", erestrictu_coarse,
414  basisctof, CEED_VECTOR_ACTIVE); CeedChk(ierr);
415  ierr = CeedOperatorSetField(op_interp, "output", erestrictu_fine,
416  CEED_BASIS_COLLOCATED, CEED_VECTOR_ACTIVE); CeedChk(ierr);
417 
418  ierr = CeedElemRestrictionGetMultiplicity(erestrictu_fine,
419  c_fine_multiplicity); CeedChk(ierr);
420  ierr = CeedVectorCreate(ceed, height, &fine_multiplicity_r); CeedChk(ierr);
421 
422  CeedScalar* fine_r_data;
423  const CeedScalar* fine_data;
424  ierr = CeedVectorGetArrayWrite(fine_multiplicity_r, CEED_MEM_HOST,
425  &fine_r_data); CeedChk(ierr);
426  ierr = CeedVectorGetArrayRead(c_fine_multiplicity, CEED_MEM_HOST,
427  &fine_data); CeedChk(ierr);
428  for (CeedSize i = 0; i < height; ++i)
429  {
430  fine_r_data[i] = 1.0 / fine_data[i];
431  }
432 
433  ierr = CeedVectorRestoreArray(fine_multiplicity_r, &fine_r_data); CeedChk(ierr);
434  ierr = CeedVectorRestoreArrayRead(c_fine_multiplicity, &fine_data);
435  CeedChk(ierr);
436  ierr = CeedVectorDestroy(&c_fine_multiplicity); CeedChk(ierr);
437 
438  ierr = CeedVectorCreate(ceed, height, &fine_work); CeedChk(ierr);
439 
440  ierr = CeedVectorCreate(ceed, height, &v_); CeedChk(ierr);
441  ierr = CeedVectorCreate(ceed, width, &u_); CeedChk(ierr);
442 
443  return 0;
444 }
445 
446 int AlgebraicInterpolation::Finalize()
447 {
448  int ierr;
449 
450  ierr = CeedQFunctionDestroy(&qf_restrict); CeedChk(ierr);
451  ierr = CeedQFunctionDestroy(&qf_prolong); CeedChk(ierr);
452  ierr = CeedOperatorDestroy(&op_interp); CeedChk(ierr);
453  ierr = CeedOperatorDestroy(&op_restrict); CeedChk(ierr);
454  ierr = CeedVectorDestroy(&fine_multiplicity_r); CeedChk(ierr);
455  ierr = CeedVectorDestroy(&fine_work); CeedChk(ierr);
456 
457  return 0;
458 }
459 
461  Ceed ceed, CeedBasis basisctof,
462  CeedElemRestriction erestrictu_coarse,
463  CeedElemRestriction erestrictu_fine)
464 {
465  int ierr;
466  CeedSize lo_nldofs, ho_nldofs;
467  ierr = CeedElemRestrictionGetLVectorSize(erestrictu_coarse, &lo_nldofs);
468  PCeedChk(ierr);
469  ierr = CeedElemRestrictionGetLVectorSize(erestrictu_fine,
470  &ho_nldofs); PCeedChk(ierr);
471  height = (int)ho_nldofs;
472  width = (int)lo_nldofs;
473  MFEM_VERIFY(ho_nldofs == height, "height overflow");
474  MFEM_VERIFY(lo_nldofs == width, "width overflow");
475  owns_basis_ = false;
476  ierr = Initialize(ceed, basisctof, erestrictu_coarse, erestrictu_fine);
477  PCeedChk(ierr);
478 }
479 
481 {
482  int ierr;
483  ierr = CeedVectorDestroy(&v_); PCeedChk(ierr);
484  ierr = CeedVectorDestroy(&u_); PCeedChk(ierr);
485  if (owns_basis_)
486  {
487  ierr = CeedBasisDestroy(&basisctof_); PCeedChk(ierr);
488  }
489  Finalize();
490 }
491 
492 /// a = a (pointwise*) b
493 /// @todo: using MPI_FORALL in this Ceed-like function is ugly
494 int CeedVectorPointwiseMult(CeedVector a, const CeedVector b)
495 {
496  int ierr;
497  Ceed ceed;
498  CeedVectorGetCeed(a, &ceed);
499 
500  CeedSize length, length2;
501  ierr = CeedVectorGetLength(a, &length); CeedChk(ierr);
502  ierr = CeedVectorGetLength(b, &length2); CeedChk(ierr);
503  if (length != length2)
504  {
505  return CeedError(ceed, 1, "Vector sizes don't match");
506  }
507 
508  CeedMemType mem;
510  {
511  mem = CEED_MEM_DEVICE;
512  }
513  else
514  {
515  mem = CEED_MEM_HOST;
516  }
517  CeedScalar *a_data;
518  const CeedScalar *b_data;
519  ierr = CeedVectorGetArray(a, mem, &a_data); CeedChk(ierr);
520  ierr = CeedVectorGetArrayRead(b, mem, &b_data); CeedChk(ierr);
521  MFEM_VERIFY(int(length) == length, "length overflow");
522  MFEM_FORALL(i, length,
523  {a_data[i] *= b_data[i];});
524 
525  ierr = CeedVectorRestoreArray(a, &a_data); CeedChk(ierr);
526  ierr = CeedVectorRestoreArrayRead(b, &b_data); CeedChk(ierr);
527 
528  return 0;
529 }
530 
532 {
533  int ierr = 0;
534  const CeedScalar *in_ptr;
535  CeedScalar *out_ptr;
536  CeedMemType mem;
537  ierr = CeedGetPreferredMemType(internal::ceed, &mem); PCeedChk(ierr);
538  if ( Device::Allows(Backend::DEVICE_MASK) && mem==CEED_MEM_DEVICE )
539  {
540  in_ptr = x.Read();
541  out_ptr = y.ReadWrite();
542  }
543  else
544  {
545  in_ptr = x.HostRead();
546  out_ptr = y.HostReadWrite();
547  mem = CEED_MEM_HOST;
548  }
549  ierr = CeedVectorSetArray(u_, mem, CEED_USE_POINTER,
550  const_cast<CeedScalar*>(in_ptr)); PCeedChk(ierr);
551  ierr = CeedVectorSetArray(v_, mem, CEED_USE_POINTER,
552  out_ptr); PCeedChk(ierr);
553 
554  ierr = CeedOperatorApply(op_interp, u_, v_,
555  CEED_REQUEST_IMMEDIATE); PCeedChk(ierr);
556  ierr = CeedVectorPointwiseMult(v_, fine_multiplicity_r); PCeedChk(ierr);
557 
558  ierr = CeedVectorTakeArray(u_, mem, const_cast<CeedScalar**>(&in_ptr));
559  PCeedChk(ierr);
560  ierr = CeedVectorTakeArray(v_, mem, &out_ptr); PCeedChk(ierr);
561 }
562 
564  mfem::Vector& y) const
565 {
566  int ierr = 0;
567  CeedMemType mem;
568  ierr = CeedGetPreferredMemType(internal::ceed, &mem); PCeedChk(ierr);
569  const CeedScalar *in_ptr;
570  CeedScalar *out_ptr;
571  if ( Device::Allows(Backend::DEVICE_MASK) && mem==CEED_MEM_DEVICE )
572  {
573  in_ptr = x.Read();
574  out_ptr = y.ReadWrite();
575  }
576  else
577  {
578  in_ptr = x.HostRead();
579  out_ptr = y.HostReadWrite();
580  mem = CEED_MEM_HOST;
581  }
582  ierr = CeedVectorSetArray(v_, mem, CEED_USE_POINTER,
583  const_cast<CeedScalar*>(in_ptr)); PCeedChk(ierr);
584  ierr = CeedVectorSetArray(u_, mem, CEED_USE_POINTER,
585  out_ptr); PCeedChk(ierr);
586 
587  CeedSize length;
588  ierr = CeedVectorGetLength(v_, &length); PCeedChk(ierr);
589 
590  const CeedScalar *multiplicitydata;
591  CeedScalar *workdata;
592  ierr = CeedVectorGetArrayRead(fine_multiplicity_r, mem,
593  &multiplicitydata); PCeedChk(ierr);
594  ierr = CeedVectorGetArrayWrite(fine_work, mem, &workdata); PCeedChk(ierr);
595  MFEM_VERIFY((int)length == length, "length overflow");
596  MFEM_FORALL(i, length,
597  {workdata[i] = in_ptr[i] * multiplicitydata[i];});
598  ierr = CeedVectorRestoreArrayRead(fine_multiplicity_r,
599  &multiplicitydata);
600  ierr = CeedVectorRestoreArray(fine_work, &workdata); PCeedChk(ierr);
601 
602  ierr = CeedOperatorApply(op_restrict, fine_work, u_,
603  CEED_REQUEST_IMMEDIATE); PCeedChk(ierr);
604 
605  ierr = CeedVectorTakeArray(v_, mem, const_cast<CeedScalar**>(&in_ptr));
606  PCeedChk(ierr);
607  ierr = CeedVectorTakeArray(u_, mem, &out_ptr); PCeedChk(ierr);
608 }
609 
611 {
612  int order = fes.GetOrder(0);
613  int nlevels = 0;
614  int current_order = order;
615  while (current_order > 0)
616  {
617  nlevels++;
618  current_order = current_order/2;
619  }
620 
621  meshes.SetSize(nlevels);
622  ownedMeshes.SetSize(nlevels);
623  meshes = fes.GetMesh();
624  ownedMeshes = false;
625 
626  fespaces.SetSize(nlevels);
627  ownedFES.SetSize(nlevels);
628  // Own all FESpaces except for the finest, own all prolongations
629  ownedFES = true;
630  fespaces[nlevels-1] = &fes;
631  ownedFES[nlevels-1] = false;
632 
633  ceed_interpolations.SetSize(nlevels-1);
634  R_tr.SetSize(nlevels-1);
635  prolongations.SetSize(nlevels-1);
636  ownedProlongations.SetSize(nlevels-1);
637 
638  current_order = order;
639 
640  Ceed ceed = internal::ceed;
641  InitRestriction(fes, ceed, &fine_er);
642  CeedElemRestriction er = fine_er;
643 
644  int dim = fes.GetMesh()->Dimension();
645 #ifdef MFEM_USE_MPI
646  GroupCommunicator *gc = NULL;
647  ParFiniteElementSpace *pfes = dynamic_cast<ParFiniteElementSpace*>(&fes);
648  if (pfes)
649  {
650  gc = &pfes->GroupComm();
651  }
652 #endif
653 
654  for (int ilevel=nlevels-2; ilevel>=0; --ilevel)
655  {
656  const int order_reduction = current_order - (current_order/2);
658 
659 #ifdef MFEM_USE_MPI
660  if (pfes)
661  {
663  *fespaces[ilevel+1], er, current_order, dim, order_reduction, gc);
664  gc = parspace->GetGroupCommunicator();
665  space = parspace;
666  }
667  else
668 #endif
669  {
670  space = new AlgebraicCoarseSpace(
671  *fespaces[ilevel+1], er, current_order, dim, order_reduction);
672  }
673  current_order = current_order/2;
674  fespaces[ilevel] = space;
675  ceed_interpolations[ilevel] = new AlgebraicInterpolation(
676  ceed,
677  space->GetCeedCoarseToFine(),
678  space->GetCeedElemRestriction(),
679  er
680  );
681  const SparseMatrix *R = fespaces[ilevel+1]->GetRestrictionMatrix();
682  if (R)
683  {
684  R_tr[ilevel] = new TransposeOperator(*R);
685  }
686  else
687  {
688  R_tr[ilevel] = NULL;
689  }
690  prolongations[ilevel] = ceed_interpolations[ilevel]->SetupRAP(
691  space->GetProlongationMatrix(), R_tr[ilevel]);
692  ownedProlongations[ilevel]
693  = prolongations[ilevel] != ceed_interpolations[ilevel];
694 
695  er = space->GetCeedElemRestriction();
696  }
697 }
698 
700  FiniteElementSpace &fine_fes,
701  CeedElemRestriction fine_er,
702  int order,
703  int dim,
704  int order_reduction_
705 ) : order_reduction(order_reduction_)
706 {
707  int ierr;
708  order_reduction = order_reduction_;
709 
710  ierr = CeedATPMGElemRestriction(order, order_reduction, fine_er,
712  PCeedChk(ierr);
713  ierr = CeedBasisATPMGCoarseToFine(internal::ceed, order+1, dim,
715  PCeedChk(ierr);
716  CeedSize ndofs_;
717  ierr = CeedElemRestrictionGetLVectorSize(ceed_elem_restriction, &ndofs_);
718  PCeedChk(ierr);
719  ndofs = ndofs_;
720  MFEM_VERIFY(ndofs == ndofs_, "ndofs overflow");
721 
722  mesh = fine_fes.GetMesh();
723 }
724 
726 {
727  int ierr;
728  delete [] dof_map;
729  ierr = CeedBasisDestroy(&coarse_to_fine); PCeedChk(ierr);
730  ierr = CeedElemRestrictionDestroy(&ceed_elem_restriction); PCeedChk(ierr);
731 }
732 
733 #ifdef MFEM_USE_MPI
734 
736  FiniteElementSpace &fine_fes,
737  CeedElemRestriction fine_er,
738  int order,
739  int dim,
740  int order_reduction_,
741  GroupCommunicator *gc_fine)
742  : AlgebraicCoarseSpace(fine_fes, fine_er, order, dim, order_reduction_)
743 {
744  CeedSize lsize;
745  CeedElemRestrictionGetLVectorSize(ceed_elem_restriction, &lsize);
746  const Table &group_ldof_fine = gc_fine->GroupLDofTable();
747 
748  MFEM_VERIFY((int)lsize == lsize, "size overflow");
749  ldof_group.SetSize(lsize);
750  ldof_group = 0;
751 
752  const GroupTopology &group_topo = gc_fine->GetGroupTopology();
753  gc = new GroupCommunicator(group_topo);
754  Table &group_ldof = gc->GroupLDofTable();
755  group_ldof.MakeI(group_ldof_fine.Size());
756  for (int g=1; g<group_ldof_fine.Size(); ++g)
757  {
758  int nldof_fine_g = group_ldof_fine.RowSize(g);
759  const int *ldof_fine_g = group_ldof_fine.GetRow(g);
760  for (int i=0; i<nldof_fine_g; ++i)
761  {
762  int icoarse = dof_map[ldof_fine_g[i]];
763  if (icoarse >= 0)
764  {
765  group_ldof.AddAColumnInRow(g);
766  ldof_group[icoarse] = g;
767  }
768  }
769  }
770  group_ldof.MakeJ();
771  for (int g=1; g<group_ldof_fine.Size(); ++g)
772  {
773  int nldof_fine_g = group_ldof_fine.RowSize(g);
774  const int *ldof_fine_g = group_ldof_fine.GetRow(g);
775  for (int i=0; i<nldof_fine_g; ++i)
776  {
777  int icoarse = dof_map[ldof_fine_g[i]];
778  if (icoarse >= 0)
779  {
780  group_ldof.AddConnection(g, icoarse);
781  }
782  }
783  }
784  group_ldof.ShiftUpI();
785  gc->Finalize();
786  ldof_ltdof.SetSize(lsize);
787  ldof_ltdof = -2;
788  int ltsize = 0;
789  for (int i=0; i<lsize; ++i)
790  {
791  int g = ldof_group[i];
792  if (group_topo.IAmMaster(g))
793  {
794  ldof_ltdof[i] = ltsize;
795  ++ltsize;
796  }
797  }
798  gc->SetLTDofTable(ldof_ltdof);
799  gc->Bcast(ldof_ltdof);
800 
801  R_mat = new SparseMatrix(ltsize, lsize);
802  for (int j=0; j<lsize; ++j)
803  {
804  if (group_topo.IAmMaster(ldof_group[j]))
805  {
806  int i = ldof_ltdof[j];
807  R_mat->Set(i,j,1.0);
808  }
809  }
810  R_mat->Finalize();
811 
813  {
814  P = new DeviceConformingProlongationOperator(*gc, R_mat);
815  }
816  else
817  {
818  P = new ConformingProlongationOperator(lsize, *gc);
819  }
820  P_mat = NULL;
821 }
822 
824 {
825  if (P_mat) { return P_mat; }
826 
827  ParMesh *pmesh = dynamic_cast<ParMesh*>(mesh);
828  MFEM_VERIFY(pmesh != NULL, "");
829  Array<HYPRE_BigInt> dof_offsets, tdof_offsets, tdof_nb_offsets;
830  Array<HYPRE_BigInt> *offsets[2] = {&dof_offsets, &tdof_offsets};
831  int lsize = P->Height();
832  int ltsize = P->Width();
833  HYPRE_BigInt loc_sizes[2] = {lsize, ltsize};
834  pmesh->GenerateOffsets(2, loc_sizes, offsets);
835 
836  MPI_Comm comm = pmesh->GetComm();
837 
838  const GroupTopology &group_topo = gc->GetGroupTopology();
839 
840  if (HYPRE_AssumedPartitionCheck())
841  {
842  // communicate the neighbor offsets in tdof_nb_offsets
843  int nsize = group_topo.GetNumNeighbors()-1;
844  MPI_Request *requests = new MPI_Request[2*nsize];
845  MPI_Status *statuses = new MPI_Status[2*nsize];
846  tdof_nb_offsets.SetSize(nsize+1);
847  tdof_nb_offsets[0] = tdof_offsets[0];
848 
849  // send and receive neighbors' local tdof offsets
850  int request_counter = 0;
851  for (int i = 1; i <= nsize; i++)
852  {
853  MPI_Irecv(&tdof_nb_offsets[i], 1, HYPRE_MPI_INT,
854  group_topo.GetNeighborRank(i), 5365, comm,
855  &requests[request_counter++]);
856  }
857  for (int i = 1; i <= nsize; i++)
858  {
859  MPI_Isend(&tdof_nb_offsets[0], 1, HYPRE_MPI_INT,
860  group_topo.GetNeighborRank(i), 5365, comm,
861  &requests[request_counter++]);
862  }
863  MPI_Waitall(request_counter, requests, statuses);
864 
865  delete [] statuses;
866  delete [] requests;
867  }
868 
869  HYPRE_Int *i_diag = Memory<HYPRE_Int>(lsize+1);
870  HYPRE_Int *j_diag = Memory<HYPRE_Int>(ltsize);
871  int diag_counter;
872 
873  HYPRE_Int *i_offd = Memory<HYPRE_Int>(lsize+1);
874  HYPRE_Int *j_offd = Memory<HYPRE_Int>(lsize-ltsize);
875  int offd_counter;
876 
877  HYPRE_BigInt *cmap = Memory<HYPRE_BigInt>(lsize-ltsize);
878 
879  HYPRE_BigInt *col_starts = tdof_offsets;
880  HYPRE_BigInt *row_starts = dof_offsets;
881 
882  Array<Pair<HYPRE_BigInt, int> > cmap_j_offd(lsize-ltsize);
883 
884  i_diag[0] = i_offd[0] = 0;
885  diag_counter = offd_counter = 0;
886  for (int i_ldof = 0; i_ldof < lsize; i_ldof++)
887  {
888  int g = ldof_group[i_ldof];
889  int i_ltdof = ldof_ltdof[i_ldof];
890  if (group_topo.IAmMaster(g))
891  {
892  j_diag[diag_counter++] = i_ltdof;
893  }
894  else
895  {
896  HYPRE_BigInt global_tdof_number;
897  if (HYPRE_AssumedPartitionCheck())
898  {
899  global_tdof_number
900  = i_ltdof + tdof_nb_offsets[group_topo.GetGroupMaster(g)];
901  }
902  else
903  {
904  global_tdof_number
905  = i_ltdof + tdof_offsets[group_topo.GetGroupMasterRank(g)];
906  }
907 
908  cmap_j_offd[offd_counter].one = global_tdof_number;
909  cmap_j_offd[offd_counter].two = offd_counter;
910  offd_counter++;
911  }
912  i_diag[i_ldof+1] = diag_counter;
913  i_offd[i_ldof+1] = offd_counter;
914  }
915 
916  SortPairs<HYPRE_BigInt, int>(cmap_j_offd, offd_counter);
917 
918  for (int i = 0; i < offd_counter; i++)
919  {
920  cmap[i] = cmap_j_offd[i].one;
921  j_offd[cmap_j_offd[i].two] = i;
922  }
923 
924  P_mat = new HypreParMatrix(
925  comm, pmesh->GetMyRank(), pmesh->GetNRanks(),
926  row_starts, col_starts,
927  i_diag, j_diag, i_offd, j_offd,
928  cmap, offd_counter
929  );
930 
931  P_mat->CopyRowStarts();
932  P_mat->CopyColStarts();
933 
934  return P_mat;
935 }
936 
938 {
939  delete P;
940  delete R_mat;
941  delete P_mat;
942  delete gc;
943 }
944 
945 #endif // MFEM_USE_MPI
946 
947 #endif // MFEM_USE_CEED
948 
950  const Array<int>& ess_tdofs)
951 {
952  MFEM_VERIFY(DeviceCanUseCeed(),
953  "AlgebraicSolver requires a Ceed device");
954  MFEM_VERIFY(form.GetAssemblyLevel() == AssemblyLevel::PARTIAL ||
956  "AlgebraicSolver requires partial assembly or fully matrix-free.");
957  MFEM_VERIFY(UsesTensorBasis(*form.FESpace()),
958  "AlgebraicSolver requires tensor product basis functions.");
959 #ifdef MFEM_USE_CEED
960  fespaces = new AlgebraicSpaceHierarchy(*form.FESpace());
961  multigrid = new AlgebraicMultigrid(*fespaces, form, ess_tdofs);
962 #else
963  MFEM_ABORT("AlgebraicSolver requires Ceed support");
964 #endif
965 }
966 
968 {
969 #ifdef MFEM_USE_CEED
970  delete fespaces;
971  delete multigrid;
972 #endif
973 }
974 
975 void AlgebraicSolver::Mult(const Vector& x, Vector& y) const
976 {
977 #ifdef MFEM_USE_CEED
978  multigrid->Mult(x, y);
979 #endif
980 }
981 
983 {
984 #ifdef MFEM_USE_CEED
985  multigrid->SetOperator(op);
986 #endif
987 }
988 
989 } // namespace ceed
990 
991 } // namespace mfem
int GetGroupMasterRank(int g) const
Return the rank of the group master for group &#39;g&#39;.
int Size() const
Return the logical size of the array.
Definition: array.hpp:138
int CeedATPMGOperator(CeedOperator oper, int order_reduction, CeedElemRestriction coarse_er, CeedBasis coarse_basis_in, CeedBasis basis_ctof_in, CeedOperator *out)
ConstrainedOperator(Operator *A, const Array< int > &list, bool own_A=false, DiagonalPolicy diag_policy=DIAG_ONE)
Constructor from a general Operator and a list of essential indices/dofs.
Definition: operator.cpp:410
MPI_Comm GetComm() const
MPI communicator.
Definition: hypre.hpp:534
Array< BilinearFormIntegrator * > * GetBBFI()
Access all the integrators added with AddBoundaryIntegrator().
int ndofs
Number of degrees of freedom. Number of unknowns is ndofs * vdim.
Definition: fespace.hpp:119
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
Chebyshev accelerated smoothing with given vector, no matrix necessary.
Definition: solvers.hpp:351
ceed::Operator & GetCeedOp()
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
Definition: table.cpp:81
CeedElemRestriction GetCeedElemRestriction() const
Definition: algebraic.hpp:37
AlgebraicInterpolation(Ceed ceed, CeedBasis basisctof, CeedElemRestriction erestrictu_coarse, CeedElemRestriction erestrictu_fine)
Definition: algebraic.cpp:460
FiniteElementSpace * FESpace()
Return the FE space associated with the BilinearForm.
virtual void Finalize(int skip_zeros=1)
Finalize the matrix initialization, switching the storage format from LIL to CSR. ...
Definition: sparsemat.hpp:545
Array< BilinearFormIntegrator * > * GetDBFI()
Access all the integrators added with AddDomainIntegrator().
Auxiliary class used by ParFiniteElementSpace.
Definition: pfespace.hpp:439
void SetSize(int s)
Resize the vector to size s.
Definition: vector.hpp:513
Geometric multigrid associated with a hierarchy of finite element spaces.
Definition: multigrid.hpp:119
Array< BilinearFormIntegrator * > * GetBFBFI()
Access all integrators added with AddBdrFaceIntegrator().
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
Definition: array.hpp:308
AssemblyLevel GetAssemblyLevel() const
Returns the assembly level.
void Mult(const Table &A, const Table &B, Table &C)
C = A * B (as boolean matrices)
Definition: table.cpp:475
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
Definition: operator.hpp:73
virtual void Mult(const Vector &x, Vector &y) const
Constrained operator action.
Definition: operator.cpp:484
virtual double * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:461
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:187
Abstract parallel finite element space.
Definition: pfespace.hpp:28
bool UsesTensorBasis(const FiniteElementSpace &fes)
Return true if the mesh contains only one topology and the elements are tensor elements.
Definition: fespace.hpp:957
Array< BilinearFormIntegrator * > * GetFBFI()
Access all integrators added with AddInteriorFaceIntegrator().
Extension of Multigrid object to algebraically generated coarse spaces.
Definition: algebraic.hpp:155
GroupCommunicator * GetGroupCommunicator() const
Definition: algebraic.hpp:69
Memory< double > & GetMemory()
Return a reference to the Memory object used by the Vector.
Definition: vector.hpp:235
A way to use algebraic levels in a Multigrid object.
Definition: algebraic.hpp:31
Solver * BuildSmootherFromCeed(ConstrainedOperator &op, bool chebyshev)
Definition: algebraic.cpp:109
virtual void MultTranspose(const Vector &x, Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition: operator.hpp:94
bool IAmMaster(int g) const
Return true if I am master for group &#39;g&#39;.
CeedOperator & GetCeedOperator()
Definition: operator.hpp:54
int CeedVectorPointwiseMult(CeedVector a, const CeedVector b)
Definition: algebraic.cpp:494
Array< Array< int > * > essentialTrueDofs
Definition: multigrid.hpp:123
int GetNRanks() const
Definition: pmesh.hpp:352
virtual const Operator * GetProlongation() const
Prolongation operator from linear algebra (linear system) vectors, to input vectors for the operator...
Definition: operator.hpp:116
void Set(const int i, const int j, const double val)
Definition: sparsemat.cpp:2764
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
Data type sparse matrix.
Definition: sparsemat.hpp:50
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition: operator.hpp:67
int Append(const T &el)
Append element &#39;el&#39; to array, resize if necessary.
Definition: array.hpp:751
virtual ~ConstrainedOperator()
Destructor: destroys the unconstrained Operator, if owned.
Definition: operator.hpp:901
Mesh * GetMesh() const
Returns the mesh.
Definition: fespace.hpp:441
AlgebraicSolver(BilinearForm &form, const Array< int > &ess_tdofs)
Constructs algebraic multigrid hierarchy and solver.
Definition: algebraic.cpp:949
virtual double * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:457
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
Jacobi smoothing for a given bilinear form (no matrix necessary).
Definition: solvers.hpp:274
double b
Definition: lissajous.cpp:42
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
void AddConnection(int r, int c)
Definition: table.hpp:80
void Mult(const Vector &x, Vector &y) const
Operator application: y=A(x).
Definition: algebraic.cpp:975
HypreParMatrix * Dof_TrueDof_Matrix() const
The true dof-to-dof interpolation matrix.
Definition: pfespace.hpp:321
virtual void MultTranspose(const mfem::Vector &x, mfem::Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition: algebraic.cpp:563
virtual const Operator * GetProlongationMatrix() const
The returned Operator is owned by the FiniteElementSpace.
Definition: fespace.hpp:479
Array< FiniteElementSpace * > fespaces
int GetNumLevels() const
Returns the number of levels in the hierarchy.
CeedBasis GetCeedCoarseToFine() const
Definition: algebraic.hpp:38
int CeedATPMGElemRestriction(int order, int order_reduction, CeedElemRestriction er_in, CeedElemRestriction *er_out, CeedInt *&dof_map)
Take given (high-order) CeedElemRestriction and make a new CeedElemRestriction, which corresponds to ...
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:92
void RAP(const DenseMatrix &A, const DenseMatrix &P, DenseMatrix &RAP)
Definition: densemat.cpp:3213
Multigrid interpolation operator in Ceed framework.
Definition: algebraic.hpp:88
int Dimension() const
Definition: mesh.hpp:1006
virtual const double * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:453
Auxiliary device class used by ParFiniteElementSpace.
Definition: pfespace.hpp:461
const FiniteElementSpaceHierarchy & fespaces
Definition: multigrid.hpp:122
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor...
Abstract base class BilinearFormIntegrator.
Definition: bilininteg.hpp:35
int GetMyRank() const
Definition: pmesh.hpp:353
virtual const FiniteElementSpace & GetFESpaceAtLevel(int level) const
Returns the finite element space at the given level.
const GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
virtual void Mult(const mfem::Vector &x, mfem::Vector &y) const
Operator application: y=A(x).
Definition: algebraic.cpp:531
MPI_Comm GetComm() const
Definition: pmesh.hpp:351
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition: fespace.hpp:96
void SetOperator(const mfem::Operator &op)
Set/update the solver for the given operator.
Definition: algebraic.cpp:982
int GetNeighborRank(int i) const
Return the MPI rank of neighbor &#39;i&#39;.
void AddAColumnInRow(int r)
Definition: table.hpp:77
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition: array.hpp:679
GroupCommunicator & GroupComm()
Return a reference to the internal GroupCommunicator (on VDofs)
Definition: pfespace.hpp:338
string space
CeedOperator CoarsenCeedCompositeOperator(CeedOperator op, CeedElemRestriction er, CeedBasis c2f, int order_reduction)
Definition: algebraic.cpp:261
HYPRE_Int HYPRE_BigInt
bool DeviceCanUseCeed()
Function that determines if a CEED kernel should be used, based on the current mfem::Device configura...
Definition: util.cpp:33
int GetOrder(int i) const
Returns the polynomial degree of the i&#39;th finite element.
Definition: fespace.hpp:571
CeedOperator CreateCeedCompositeOperatorFromBilinearForm(BilinearForm &form)
Definition: algebraic.cpp:239
int CeedBasisATPMGCoarseToFine(Ceed ceed, int P1d, int dim, int order_reduction, CeedBasis *basisc2f)
Create coarse-to-fine basis, given number of input nodes and order reduction.
int CeedOperatorGetSize(CeedOperator oper, CeedInt *size)
Definition: algebraic.cpp:98
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Definition: device.hpp:258
The transpose of a given operator. Switches the roles of the methods Mult() and MultTranspose().
Definition: operator.hpp:722
int CeedOperatorFullAssemble(CeedOperator op, SparseMatrix **mat)
Assembles a CeedOperator as an mfem::SparseMatrix.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize()...
void ShiftUpI()
Definition: table.cpp:115
AlgebraicCoarseSpace(FiniteElementSpace &fine_fes, CeedElemRestriction fine_er, int order, int dim, int order_reduction_)
Definition: algebraic.cpp:699
int height
Dimension of the output / number of rows in the matrix.
Definition: operator.hpp:27
void AddLevel(Operator *opr, Solver *smoother, bool ownOperator, bool ownSmoother)
Adds a level to the multigrid operator hierarchy.
Definition: multigrid.cpp:86
CeedElemRestriction ceed_elem_restriction
Definition: algebraic.hpp:46
Parallel version of AlgebraicCoarseSpace.
Definition: algebraic.hpp:56
double a
Definition: lissajous.cpp:41
virtual void Mult(const Vector &x, Vector &y) const override
Application of the multigrid as a preconditioner.
Definition: multigrid.cpp:148
A &quot;square matrix&quot; operator for the associated FE space and BLFIntegrators The sum of all the BLFInteg...
virtual double * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:465
Mesh * mesh
The mesh that FE space lives on (not owned).
Definition: fespace.hpp:105
HYPRE_BigInt GetGlobalNumRows() const
Return the global number of rows.
Definition: hypre.hpp:635
HYPRE_BigInt * RowPart()
Returns the row partitioning.
Definition: hypre.hpp:569
void MakeJ()
Definition: table.cpp:91
int dim
Definition: ex24.cpp:53
void NewMemoryAndSize(const Memory< double > &mem, int s, bool own_mem)
Reset the Vector to use the given external Memory mem and size s.
Definition: vector.hpp:562
virtual void SetOperator(const mfem::Operator &op) override
Not supported for multigrid.
Definition: algebraic.hpp:171
Operator * SetupRAP(const Operator *Pi, const Operator *Po)
Returns RAP Operator of this, using input/output Prolongation matrices Pi corresponds to &quot;P&quot;...
Definition: operator.cpp:105
ParAlgebraicCoarseSpace(FiniteElementSpace &fine_fes, CeedElemRestriction fine_er, int order, int dim, int order_reduction_, GroupCommunicator *gc_fine)
Definition: algebraic.cpp:735
Vector data type.
Definition: vector.hpp:60
virtual bool SupportsCeed() const
Indicates whether this integrator can use a Ceed backend.
AlgebraicSpaceHierarchy(FiniteElementSpace &fespace)
Construct hierarchy based on finest FiniteElementSpace.
Definition: algebraic.cpp:610
Base class for solvers.
Definition: operator.hpp:655
int RowSize(int i) const
Definition: table.hpp:108
Biwise-OR of all device backends.
Definition: device.hpp:96
void AddToCompositeOperator(BilinearFormIntegrator *integ, CeedOperator op)
Definition: algebraic.cpp:227
Square Operator for imposing essential boundary conditions using only the action, Mult()...
Definition: operator.hpp:845
Operator * GetProlongationAtLevel(int level) const
Returns the prolongation operator from the finite element space at level to the finite element space ...
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:66
Abstract operator.
Definition: operator.hpp:24
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:343
Class for parallel meshes.
Definition: pmesh.hpp:32
[device] CUDA backend. Enabled when MFEM_USE_CUDA = YES.
Definition: device.hpp:37
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:449
virtual double * HostReadWrite()
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), false).
Definition: vector.hpp:469
void GenerateOffsets(int N, HYPRE_BigInt loc_sizes[], Array< HYPRE_BigInt > *offsets[]) const
Definition: pmesh.cpp:1933
int width
Dimension of the input / number of columns in the matrix.
Definition: operator.hpp:28
void InitRestriction(const FiniteElementSpace &fes, Ceed ceed, CeedElemRestriction *restr)
Initialize a CeedElemRestriction for non-mixed meshes.
void CoarsenEssentialDofs(const mfem::Operator &interp, const Array< int > &ho_ess_tdofs, Array< int > &alg_lo_ess_tdofs)
Definition: algebraic.cpp:204
HypreParMatrix * GetProlongationHypreParMatrix()
Definition: algebraic.cpp:823
Hierarchy of AlgebraicCoarseSpace objects for use in Multigrid object.
Definition: algebraic.hpp:122
AlgebraicCoarseSpace & GetAlgebraicCoarseSpace(int level)
Definition: algebraic.hpp:130