MFEM v4.9.0
Finite element discretization library
Loading...
Searching...
No Matches
algebraic.cpp
Go to the documentation of this file.
1// Copyright (c) 2010-2025, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11
12#include "algebraic.hpp"
13
15#include "../../fespace.hpp"
16#include "../../pfespace.hpp"
18#include "solvers-atpmg.hpp"
19#include "full-assembly.hpp"
21#include "../interface/ceed.hpp"
22
23namespace mfem
24{
25
26namespace ceed
27{
28
29#ifdef MFEM_USE_CEED
30
31/** Wraps a CeedOperator in an mfem::Operator, with essential boundary
32 conditions and a prolongation operator for parallel application. */
33class ConstrainedOperator : public mfem::Operator
34{
35public:
36 /// This object takes ownership of oper and will delete it
37 ConstrainedOperator(CeedOperator oper, const Array<int> &ess_tdofs_,
38 const mfem::Operator *P_);
39 ConstrainedOperator(CeedOperator oper, const mfem::Operator *P_);
40 ~ConstrainedOperator();
41 void Mult(const Vector& x, Vector& y) const;
42 CeedOperator GetCeedOperator() const;
43 const Array<int> &GetEssentialTrueDofs() const;
44 const mfem::Operator *GetProlongation() const;
45private:
46 Array<int> ess_tdofs;
47 const mfem::Operator *P;
48 ceed::Operator *unconstrained_op;
49 mfem::ConstrainedOperator *constrained_op;
50};
51
52ConstrainedOperator::ConstrainedOperator(
53 CeedOperator oper,
54 const Array<int> &ess_tdofs_,
55 const mfem::Operator *P_)
56 : ess_tdofs(ess_tdofs_), P(P_)
57{
58 unconstrained_op = new ceed::Operator(oper);
59 mfem::Operator *rap = unconstrained_op->SetupRAP(P, P);
60 height = width = rap->Height();
61 bool own_rap = (rap != unconstrained_op);
62 constrained_op = new mfem::ConstrainedOperator(rap, ess_tdofs, own_rap);
63}
64
65ConstrainedOperator::ConstrainedOperator(CeedOperator oper,
66 const mfem::Operator *P_)
67 : ConstrainedOperator(oper, Array<int>(), P_)
68{ }
69
70ConstrainedOperator::~ConstrainedOperator()
71{
72 delete constrained_op;
73 delete unconstrained_op;
74}
75
76void ConstrainedOperator::Mult(const Vector& x, Vector& y) const
77{
78 constrained_op->Mult(x, y);
79}
80
81CeedOperator ConstrainedOperator::GetCeedOperator() const
82{
83 return unconstrained_op->GetCeedOperator();
84}
85
86const Array<int> &ConstrainedOperator::GetEssentialTrueDofs() const
87{
88 return ess_tdofs;
89}
90
91const mfem::Operator *ConstrainedOperator::GetProlongation() const
92{
93 return P;
94}
95
96/// assumes a square operator (you could do rectangular, you'd have
97/// to find separate active input and output fields/restrictions)
98int CeedOperatorGetSize(CeedOperator oper, CeedInt * size)
99{
100 CeedSize in_len, out_len;
101 int ierr = CeedOperatorGetActiveVectorLengths(oper, &in_len, &out_len);
102 PCeedChk(ierr);
103 *size = (CeedInt)in_len;
104 MFEM_VERIFY(in_len == out_len, "not a square CeedOperator");
105 MFEM_VERIFY(in_len == *size, "size overflow");
106 return 0;
107}
108
109Solver *BuildSmootherFromCeed(ConstrainedOperator &op, bool chebyshev)
110{
111 int ierr;
112 CeedOperator ceed_op = op.GetCeedOperator();
113 const Array<int> &ess_tdofs = op.GetEssentialTrueDofs();
114 const mfem::Operator *P = op.GetProlongation();
115 // Assemble the a local diagonal, in the sense of L-vector
116 CeedVector diagceed;
117 CeedInt length;
118 ierr = CeedOperatorGetSize(ceed_op, &length); PCeedChk(ierr);
119 ierr = CeedVectorCreate(internal::ceed, length, &diagceed); PCeedChk(ierr);
120 CeedMemType mem;
121 ierr = CeedGetPreferredMemType(internal::ceed, &mem); PCeedChk(ierr);
122 if (!Device::Allows(Backend::CUDA) || mem != CEED_MEM_DEVICE)
123 {
124 mem = CEED_MEM_HOST;
125 }
126 Vector local_diag(length);
127 CeedScalar *ptr = (mem == CEED_MEM_HOST) ? local_diag.HostWrite() :
128 local_diag.Write(true);
129 ierr = CeedVectorSetArray(diagceed, mem, CEED_USE_POINTER, ptr);
130 PCeedChk(ierr);
131 ierr = CeedOperatorLinearAssembleDiagonal(ceed_op, diagceed,
132 CEED_REQUEST_IMMEDIATE);
133 PCeedChk(ierr);
134 ierr = CeedVectorTakeArray(diagceed, mem, NULL); PCeedChk(ierr);
135
136 Vector t_diag;
137 if (P)
138 {
139 t_diag.SetSize(P->Width());
140 P->MultTranspose(local_diag, t_diag);
141 }
142 else
143 {
144 t_diag.NewMemoryAndSize(local_diag.GetMemory(), length, false);
145 }
146 Solver *out = NULL;
147 if (chebyshev)
148 {
149 const int cheb_order = 3;
150 out = new OperatorChebyshevSmoother(op, t_diag, ess_tdofs, cheb_order);
151 }
152 else
153 {
154 const double jacobi_scale = 0.65;
155 out = new OperatorJacobiSmoother(t_diag, ess_tdofs, jacobi_scale);
156 }
157 ierr = CeedVectorDestroy(&diagceed); PCeedChk(ierr);
158 return out;
159}
160
161#ifdef MFEM_USE_MPI
162
163/// Builds and applies assembled AMG to a CeedOperator
164class AssembledAMG : public Solver
165{
166public:
167 AssembledAMG(ConstrainedOperator &oper, HypreParMatrix *P)
168 {
169 MFEM_ASSERT(P != NULL, "Provided HypreParMatrix is invalid!");
170 height = width = oper.Height();
171
172 int ierr;
173 const Array<int> ess_tdofs = oper.GetEssentialTrueDofs();
174
175 ierr = CeedOperatorFullAssemble(oper.GetCeedOperator(), &mat_local);
176 PCeedChk(ierr);
177
178 {
179 HypreParMatrix hypre_local(
180 P->GetComm(), P->GetGlobalNumRows(), P->RowPart(), mat_local);
181 op_assembled = RAP(&hypre_local, P);
182 }
183 HypreParMatrix *mat_e = op_assembled->EliminateRowsCols(ess_tdofs);
184 delete mat_e;
185 amg = new HypreBoomerAMG(*op_assembled);
186 amg->SetPrintLevel(0);
187 }
188 void SetOperator(const mfem::Operator &op) override { }
189 void Mult(const Vector &x, Vector &y) const override { amg->Mult(x, y); }
190 ~AssembledAMG()
191 {
192 delete op_assembled;
193 delete amg;
194 delete mat_local;
195 }
196private:
197 SparseMatrix *mat_local;
198 HypreParMatrix *op_assembled;
199 HypreBoomerAMG *amg;
200};
201
202#endif // MFEM_USE_MPI
203
205 const Array<int> &ho_ess_tdofs,
206 Array<int> &alg_lo_ess_tdofs)
207{
208 Vector ho_boundary_ones(interp.Height());
209 ho_boundary_ones = 0.0;
210 const int *ho_ess_tdofs_h = ho_ess_tdofs.HostRead();
211 for (int i=0; i<ho_ess_tdofs.Size(); ++i)
212 {
213 ho_boundary_ones[ho_ess_tdofs_h[i]] = 1.0;
214 }
215 Vector lo_boundary_ones(interp.Width());
216 interp.MultTranspose(ho_boundary_ones, lo_boundary_ones);
217 auto lobo = lo_boundary_ones.HostRead();
218 for (int i = 0; i < lo_boundary_ones.Size(); ++i)
219 {
220 if (lobo[i] > 0.9)
221 {
222 alg_lo_ess_tdofs.Append(i);
223 }
224 }
225}
226
228{
229 if (integ->SupportsCeed())
230 {
231 CeedOperatorCompositeAddSub(op, integ->GetCeedOp().GetCeedOperator());
232 }
233 else
234 {
235 MFEM_ABORT("This integrator does not support Ceed!");
236 }
237}
238
240{
241 int ierr;
242 CeedOperator op;
243 ierr = CeedOperatorCreateComposite(internal::ceed, &op); PCeedChk(ierr);
244
245 MFEM_VERIFY(form.GetBBFI()->Size() == 0,
246 "Not implemented for this integrator!");
247 MFEM_VERIFY(form.GetFBFI()->Size() == 0,
248 "Not implemented for this integrator!");
249 MFEM_VERIFY(form.GetBFBFI()->Size() == 0,
250 "Not implemented for this integrator!");
251
252 // Get the domain bilinear form integrators (DBFIs)
254 for (int i = 0; i < bffis->Size(); ++i)
255 {
256 AddToCompositeOperator((*bffis)[i], op);
257 }
258 return op;
259}
260
262 CeedOperator op,
263 CeedElemRestriction er,
264 CeedBasis c2f,
265 int order_reduction
266)
267{
268 int ierr;
269 bool isComposite;
270 ierr = CeedOperatorIsComposite(op, &isComposite); PCeedChk(ierr);
271 MFEM_ASSERT(isComposite, "");
272
273 CeedOperator op_coarse;
274 ierr = CeedOperatorCreateComposite(internal::ceed,
275 &op_coarse); PCeedChk(ierr);
276
277 int nsub;
278 CeedOperator *subops;
279 ierr = CeedOperatorCompositeGetNumSub(op, &nsub); PCeedChk(ierr);
280 ierr = CeedOperatorCompositeGetSubList(op, &subops); PCeedChk(ierr);
281 for (int isub=0; isub<nsub; ++isub)
282 {
283 CeedOperator subop = subops[isub];
284 CeedBasis basis_coarse, basis_c2f;
285 CeedOperator subop_coarse;
286 ierr = CeedATPMGOperator(subop, order_reduction, er, &basis_coarse,
287 &basis_c2f, &subop_coarse); PCeedChk(ierr);
288 // destructions below make sense because these objects are
289 // refcounted by existing objects
290 ierr = CeedBasisDestroy(&basis_coarse); PCeedChk(ierr);
291 ierr = CeedBasisDestroy(&basis_c2f); PCeedChk(ierr);
292 ierr = CeedOperatorCompositeAddSub(op_coarse, subop_coarse);
293 PCeedChk(ierr);
294 ierr = CeedOperatorDestroy(&subop_coarse); PCeedChk(ierr);
295 }
296 return op_coarse;
297}
298
299AlgebraicMultigrid::AlgebraicMultigrid(
300 AlgebraicSpaceHierarchy &hierarchy,
301 BilinearForm &form,
302 const Array<int> &ess_tdofs
303) : GeometricMultigrid(hierarchy, Array<int>())
304{
305 int nlevels = fespaces.GetNumLevels();
306 ceed_operators.SetSize(nlevels);
307 essentialTrueDofs.SetSize(nlevels);
308
309 // Construct finest level
310 ceed_operators[nlevels-1] = CreateCeedCompositeOperatorFromBilinearForm(form);
311 essentialTrueDofs[nlevels-1] = new Array<int>;
312 *essentialTrueDofs[nlevels-1] = ess_tdofs;
313
314 // Construct operators at all levels of hierarchy by coarsening
315 for (int ilevel=nlevels-2; ilevel>=0; --ilevel)
316 {
318 ceed_operators[ilevel] = CoarsenCeedCompositeOperator(
319 ceed_operators[ilevel+1], space.GetCeedElemRestriction(),
320 space.GetCeedCoarseToFine(), space.GetOrderReduction());
321 mfem::Operator *P = hierarchy.GetProlongationAtLevel(ilevel);
322 essentialTrueDofs[ilevel] = new Array<int>;
324 *essentialTrueDofs[ilevel]);
325 }
326
327 // Add the operators and smoothers to the hierarchy, from coarse to fine
328 for (int ilevel=0; ilevel<nlevels; ++ilevel)
329 {
330 FiniteElementSpace &space = hierarchy.GetFESpaceAtLevel(ilevel);
331 const mfem::Operator *P = space.GetProlongationMatrix();
332 ConstrainedOperator *op = new ConstrainedOperator(
333 ceed_operators[ilevel], *essentialTrueDofs[ilevel], P);
334 Solver *smoother;
335#ifdef MFEM_USE_MPI
336 if (ilevel == 0 && !Device::Allows(Backend::CUDA))
337 {
338 HypreParMatrix *P_mat = NULL;
339 if (nlevels == 1)
340 {
341 // Only one level -- no coarsening, finest level
343 = dynamic_cast<ParFiniteElementSpace*>(&space);
344 if (pfes) { P_mat = pfes->Dof_TrueDof_Matrix(); }
345 }
346 else
347 {
349 = dynamic_cast<ParAlgebraicCoarseSpace*>(&space);
350 if (pspace) { P_mat = pspace->GetProlongationHypreParMatrix(); }
351 }
352 if (P_mat) { smoother = new AssembledAMG(*op, P_mat); }
353 else { smoother = BuildSmootherFromCeed(*op, true); }
354 }
355 else
356#endif
357 {
358 smoother = BuildSmootherFromCeed(*op, true);
359 }
360 AddLevel(op, smoother, true, true);
361 }
362}
363
367
368int AlgebraicInterpolation::Initialize(
369 Ceed ceed, CeedBasis basisctof,
370 CeedElemRestriction erestrictu_coarse, CeedElemRestriction erestrictu_fine)
371{
372 int ierr = 0;
373
374 CeedSize height, width;
375 ierr = CeedElemRestrictionGetLVectorSize(erestrictu_coarse, &width);
376 PCeedChk(ierr);
377 ierr = CeedElemRestrictionGetLVectorSize(erestrictu_fine, &height);
378 PCeedChk(ierr);
379
380 // interpolation qfunction
381 const int bp3_ncompu = 1;
382 CeedQFunction l_qf_restrict, l_qf_prolong;
383 ierr = CeedQFunctionCreateIdentity(ceed, bp3_ncompu, CEED_EVAL_NONE,
384 CEED_EVAL_INTERP, &l_qf_restrict); PCeedChk(ierr);
385 ierr = CeedQFunctionCreateIdentity(ceed, bp3_ncompu, CEED_EVAL_INTERP,
386 CEED_EVAL_NONE, &l_qf_prolong); PCeedChk(ierr);
387
388 qf_restrict = l_qf_restrict;
389 qf_prolong = l_qf_prolong;
390
391 CeedVector c_fine_multiplicity;
392 ierr = CeedVectorCreate(ceed, height, &c_fine_multiplicity); PCeedChk(ierr);
393 ierr = CeedVectorSetValue(c_fine_multiplicity, 0.0); PCeedChk(ierr);
394
395 // Create the restriction operator
396 // Restriction - Fine to coarse
397 ierr = CeedOperatorCreate(ceed, qf_restrict, CEED_QFUNCTION_NONE,
398 CEED_QFUNCTION_NONE, &op_restrict); PCeedChk(ierr);
399 ierr = CeedOperatorSetField(op_restrict, "input", erestrictu_fine,
400 CEED_BASIS_NONE, CEED_VECTOR_ACTIVE); PCeedChk(ierr);
401 ierr = CeedOperatorSetField(op_restrict, "output", erestrictu_coarse,
402 basisctof, CEED_VECTOR_ACTIVE); PCeedChk(ierr);
403
404 // Interpolation - Coarse to fine
405 // Create the prolongation operator
406 ierr = CeedOperatorCreate(ceed, qf_prolong, CEED_QFUNCTION_NONE,
407 CEED_QFUNCTION_NONE, &op_interp); PCeedChk(ierr);
408 ierr = CeedOperatorSetField(op_interp, "input", erestrictu_coarse,
409 basisctof, CEED_VECTOR_ACTIVE); PCeedChk(ierr);
410 ierr = CeedOperatorSetField(op_interp, "output", erestrictu_fine,
411 CEED_BASIS_NONE, CEED_VECTOR_ACTIVE); PCeedChk(ierr);
412
413 ierr = CeedElemRestrictionGetMultiplicity(erestrictu_fine,
414 c_fine_multiplicity); PCeedChk(ierr);
415 ierr = CeedVectorCreate(ceed, height, &fine_multiplicity_r); PCeedChk(ierr);
416
417 CeedScalar* fine_r_data;
418 const CeedScalar* fine_data;
419 ierr = CeedVectorGetArrayWrite(fine_multiplicity_r, CEED_MEM_HOST,
420 &fine_r_data); PCeedChk(ierr);
421 ierr = CeedVectorGetArrayRead(c_fine_multiplicity, CEED_MEM_HOST,
422 &fine_data); PCeedChk(ierr);
423 for (CeedSize i = 0; i < height; ++i)
424 {
425 fine_r_data[i] = 1.0 / fine_data[i];
426 }
427
428 ierr = CeedVectorRestoreArray(fine_multiplicity_r, &fine_r_data);
429 PCeedChk(ierr);
430 ierr = CeedVectorRestoreArrayRead(c_fine_multiplicity, &fine_data);
431 PCeedChk(ierr);
432 ierr = CeedVectorDestroy(&c_fine_multiplicity); PCeedChk(ierr);
433
434 ierr = CeedVectorCreate(ceed, height, &fine_work); PCeedChk(ierr);
435
436 ierr = CeedVectorCreate(ceed, height, &v_); PCeedChk(ierr);
437 ierr = CeedVectorCreate(ceed, width, &u_); PCeedChk(ierr);
438
439 return 0;
440}
441
442int AlgebraicInterpolation::Finalize()
443{
444 int ierr;
445
446 ierr = CeedQFunctionDestroy(&qf_restrict); PCeedChk(ierr);
447 ierr = CeedQFunctionDestroy(&qf_prolong); PCeedChk(ierr);
448 ierr = CeedOperatorDestroy(&op_interp); PCeedChk(ierr);
449 ierr = CeedOperatorDestroy(&op_restrict); PCeedChk(ierr);
450 ierr = CeedVectorDestroy(&fine_multiplicity_r); PCeedChk(ierr);
451 ierr = CeedVectorDestroy(&fine_work); PCeedChk(ierr);
452
453 return 0;
454}
455
457 Ceed ceed, CeedBasis basisctof,
458 CeedElemRestriction erestrictu_coarse,
459 CeedElemRestriction erestrictu_fine)
460{
461 int ierr;
462 CeedSize lo_nldofs, ho_nldofs;
463 ierr = CeedElemRestrictionGetLVectorSize(erestrictu_coarse, &lo_nldofs);
464 PCeedChk(ierr);
465 ierr = CeedElemRestrictionGetLVectorSize(erestrictu_fine,
466 &ho_nldofs); PCeedChk(ierr);
467 height = (int)ho_nldofs;
468 width = (int)lo_nldofs;
469 MFEM_VERIFY(ho_nldofs == height, "height overflow");
470 MFEM_VERIFY(lo_nldofs == width, "width overflow");
471 owns_basis_ = false;
472 ierr = Initialize(ceed, basisctof, erestrictu_coarse, erestrictu_fine);
473 PCeedChk(ierr);
474}
475
477{
478 int ierr;
479 ierr = CeedVectorDestroy(&v_); PCeedChk(ierr);
480 ierr = CeedVectorDestroy(&u_); PCeedChk(ierr);
481 if (owns_basis_)
482 {
483 ierr = CeedBasisDestroy(&basisctof_); PCeedChk(ierr);
484 }
485 Finalize();
486}
487
488/// a = a (pointwise*) b
489/// @todo: using MPI_FORALL in this Ceed-like function is ugly
490int CeedVectorPointwiseMult(CeedVector a, const CeedVector b)
491{
492 int ierr;
493 Ceed ceed;
494 CeedVectorGetCeed(a, &ceed);
495
496 CeedSize length, length2;
497 ierr = CeedVectorGetLength(a, &length); PCeedChk(ierr);
498 ierr = CeedVectorGetLength(b, &length2); PCeedChk(ierr);
499 if (length != length2)
500 {
501 return CeedError(ceed, 1, "Vector sizes don't match");
502 }
503
504 CeedMemType mem;
506 {
507 mem = CEED_MEM_DEVICE;
508 }
509 else
510 {
511 mem = CEED_MEM_HOST;
512 }
513 CeedScalar *a_data;
514 const CeedScalar *b_data;
515 ierr = CeedVectorGetArray(a, mem, &a_data); PCeedChk(ierr);
516 ierr = CeedVectorGetArrayRead(b, mem, &b_data); PCeedChk(ierr);
517 MFEM_VERIFY(int(length) == length, "length overflow");
518 mfem::forall(length, [=] MFEM_HOST_DEVICE (int i)
519 {a_data[i] *= b_data[i];});
520
521 ierr = CeedVectorRestoreArray(a, &a_data); PCeedChk(ierr);
522 ierr = CeedVectorRestoreArrayRead(b, &b_data); PCeedChk(ierr);
523
524 return 0;
525}
526
528{
529 int ierr = 0;
530 const CeedScalar *in_ptr;
531 CeedScalar *out_ptr;
532 CeedMemType mem;
533 ierr = CeedGetPreferredMemType(internal::ceed, &mem); PCeedChk(ierr);
534 if ( Device::Allows(Backend::DEVICE_MASK) && mem==CEED_MEM_DEVICE )
535 {
536 in_ptr = x.Read();
537 out_ptr = y.ReadWrite();
538 }
539 else
540 {
541 in_ptr = x.HostRead();
542 out_ptr = y.HostReadWrite();
543 mem = CEED_MEM_HOST;
544 }
545 ierr = CeedVectorSetArray(u_, mem, CEED_USE_POINTER,
546 const_cast<CeedScalar*>(in_ptr)); PCeedChk(ierr);
547 ierr = CeedVectorSetArray(v_, mem, CEED_USE_POINTER,
548 out_ptr); PCeedChk(ierr);
549
550 ierr = CeedOperatorApply(op_interp, u_, v_,
551 CEED_REQUEST_IMMEDIATE); PCeedChk(ierr);
552 ierr = CeedVectorPointwiseMult(v_, fine_multiplicity_r); PCeedChk(ierr);
553
554 ierr = CeedVectorTakeArray(u_, mem, const_cast<CeedScalar**>(&in_ptr));
555 PCeedChk(ierr);
556 ierr = CeedVectorTakeArray(v_, mem, &out_ptr); PCeedChk(ierr);
557}
558
560 mfem::Vector& y) const
561{
562 int ierr = 0;
563 CeedMemType mem;
564 ierr = CeedGetPreferredMemType(internal::ceed, &mem); PCeedChk(ierr);
565 const CeedScalar *in_ptr;
566 CeedScalar *out_ptr;
567 if ( Device::Allows(Backend::DEVICE_MASK) && mem==CEED_MEM_DEVICE )
568 {
569 in_ptr = x.Read();
570 out_ptr = y.ReadWrite();
571 }
572 else
573 {
574 in_ptr = x.HostRead();
575 out_ptr = y.HostReadWrite();
576 mem = CEED_MEM_HOST;
577 }
578 ierr = CeedVectorSetArray(v_, mem, CEED_USE_POINTER,
579 const_cast<CeedScalar*>(in_ptr)); PCeedChk(ierr);
580 ierr = CeedVectorSetArray(u_, mem, CEED_USE_POINTER,
581 out_ptr); PCeedChk(ierr);
582
583 CeedSize length;
584 ierr = CeedVectorGetLength(v_, &length); PCeedChk(ierr);
585
586 const CeedScalar *multiplicitydata;
587 CeedScalar *workdata;
588 ierr = CeedVectorGetArrayRead(fine_multiplicity_r, mem,
589 &multiplicitydata); PCeedChk(ierr);
590 ierr = CeedVectorGetArrayWrite(fine_work, mem, &workdata); PCeedChk(ierr);
591 MFEM_VERIFY((int)length == length, "length overflow");
592 mfem::forall(length, [=] MFEM_HOST_DEVICE (int i)
593 {workdata[i] = in_ptr[i] * multiplicitydata[i];});
594 ierr = CeedVectorRestoreArrayRead(fine_multiplicity_r,
595 &multiplicitydata);
596 ierr = CeedVectorRestoreArray(fine_work, &workdata); PCeedChk(ierr);
597
598 ierr = CeedOperatorApply(op_restrict, fine_work, u_,
599 CEED_REQUEST_IMMEDIATE); PCeedChk(ierr);
600
601 ierr = CeedVectorTakeArray(v_, mem, const_cast<CeedScalar**>(&in_ptr));
602 PCeedChk(ierr);
603 ierr = CeedVectorTakeArray(u_, mem, &out_ptr); PCeedChk(ierr);
604}
605
607{
608 int order = fes.GetOrder(0);
609 int nlevels = 0;
610 int current_order = order;
611 while (current_order > 0)
612 {
613 nlevels++;
614 current_order = current_order/2;
615 }
616
617 meshes.SetSize(nlevels);
618 ownedMeshes.SetSize(nlevels);
619 meshes = fes.GetMesh();
620 ownedMeshes = false;
621
622 fespaces.SetSize(nlevels);
623 ownedFES.SetSize(nlevels);
624 // Own all FESpaces except for the finest, own all prolongations
625 ownedFES = true;
626 fespaces[nlevels-1] = &fes;
627 ownedFES[nlevels-1] = false;
628
629 ceed_interpolations.SetSize(nlevels-1);
630 R_tr.SetSize(nlevels-1);
631 prolongations.SetSize(nlevels-1);
632 ownedProlongations.SetSize(nlevels-1);
633
634 current_order = order;
635
636 Ceed ceed = internal::ceed;
637 InitRestriction(fes, ceed, &fine_er);
638 CeedElemRestriction er = fine_er;
639
640 int dim = fes.GetMesh()->Dimension();
641#ifdef MFEM_USE_MPI
642 GroupCommunicator *gc = NULL;
643 ParFiniteElementSpace *pfes = dynamic_cast<ParFiniteElementSpace*>(&fes);
644 if (pfes)
645 {
646 gc = &pfes->GroupComm();
647 }
648#endif
649
650 for (int ilevel=nlevels-2; ilevel>=0; --ilevel)
651 {
652 const int order_reduction = current_order - (current_order/2);
654
655#ifdef MFEM_USE_MPI
656 if (pfes)
657 {
659 *fespaces[ilevel+1], er, current_order, dim, order_reduction, gc);
660 gc = parspace->GetGroupCommunicator();
661 space = parspace;
662 }
663 else
664#endif
665 {
667 *fespaces[ilevel+1], er, current_order, dim, order_reduction);
668 }
669 current_order = current_order/2;
670 fespaces[ilevel] = space;
671 ceed_interpolations[ilevel] = new AlgebraicInterpolation(
672 ceed,
673 space->GetCeedCoarseToFine(),
674 space->GetCeedElemRestriction(),
675 er
676 );
677 const SparseMatrix *R = fespaces[ilevel+1]->GetRestrictionMatrix();
678 if (R)
679 {
680 R_tr[ilevel] = new TransposeOperator(*R);
681 }
682 else
683 {
684 R_tr[ilevel] = NULL;
685 }
686 prolongations[ilevel] = ceed_interpolations[ilevel]->SetupRAP(
687 space->GetProlongationMatrix(), R_tr[ilevel]);
688 ownedProlongations[ilevel]
689 = prolongations[ilevel] != ceed_interpolations[ilevel];
690
691 er = space->GetCeedElemRestriction();
692 }
693}
694
696 FiniteElementSpace &fine_fes,
697 CeedElemRestriction fine_er,
698 int order,
699 int dim,
700 int order_reduction_
701) : order_reduction(order_reduction_)
702{
703 int ierr;
704 order_reduction = order_reduction_;
705
706 ierr = CeedATPMGElemRestriction(order, order_reduction, fine_er,
708 PCeedChk(ierr);
709 ierr = CeedBasisATPMGCoarseToFine(internal::ceed, order+1, dim,
711 PCeedChk(ierr);
712 CeedSize ndofs_;
713 ierr = CeedElemRestrictionGetLVectorSize(ceed_elem_restriction, &ndofs_);
714 PCeedChk(ierr);
715 ndofs = ndofs_;
716 MFEM_VERIFY(ndofs == ndofs_, "ndofs overflow");
717
718 mesh = fine_fes.GetMesh();
719}
720
722{
723 int ierr;
724 delete [] dof_map;
725 ierr = CeedBasisDestroy(&coarse_to_fine); PCeedChk(ierr);
726 ierr = CeedElemRestrictionDestroy(&ceed_elem_restriction); PCeedChk(ierr);
727}
728
729#ifdef MFEM_USE_MPI
730
732 FiniteElementSpace &fine_fes,
733 CeedElemRestriction fine_er,
734 int order,
735 int dim,
736 int order_reduction_,
737 GroupCommunicator *gc_fine)
738 : AlgebraicCoarseSpace(fine_fes, fine_er, order, dim, order_reduction_)
739{
740 CeedSize lsize;
741 CeedElemRestrictionGetLVectorSize(ceed_elem_restriction, &lsize);
742 const Table &group_ldof_fine = gc_fine->GroupLDofTable();
743
744 MFEM_VERIFY((int)lsize == lsize, "size overflow");
745 ldof_group.SetSize(lsize);
746 ldof_group = 0;
747
748 const GroupTopology &group_topo = gc_fine->GetGroupTopology();
749 gc = new GroupCommunicator(group_topo);
750 Table &group_ldof = gc->GroupLDofTable();
751 group_ldof.MakeI(group_ldof_fine.Size());
752 for (int g=1; g<group_ldof_fine.Size(); ++g)
753 {
754 int nldof_fine_g = group_ldof_fine.RowSize(g);
755 const int *ldof_fine_g = group_ldof_fine.GetRow(g);
756 for (int i=0; i<nldof_fine_g; ++i)
757 {
758 int icoarse = dof_map[ldof_fine_g[i]];
759 if (icoarse >= 0)
760 {
761 group_ldof.AddAColumnInRow(g);
762 ldof_group[icoarse] = g;
763 }
764 }
765 }
766 group_ldof.MakeJ();
767 for (int g=1; g<group_ldof_fine.Size(); ++g)
768 {
769 int nldof_fine_g = group_ldof_fine.RowSize(g);
770 const int *ldof_fine_g = group_ldof_fine.GetRow(g);
771 for (int i=0; i<nldof_fine_g; ++i)
772 {
773 int icoarse = dof_map[ldof_fine_g[i]];
774 if (icoarse >= 0)
775 {
776 group_ldof.AddConnection(g, icoarse);
777 }
778 }
779 }
780 group_ldof.ShiftUpI();
781 gc->Finalize();
782 ldof_ltdof.SetSize(lsize);
783 ldof_ltdof = -2;
784 int ltsize = 0;
785 for (int i=0; i<lsize; ++i)
786 {
787 int g = ldof_group[i];
788 if (group_topo.IAmMaster(g))
789 {
790 ldof_ltdof[i] = ltsize;
791 ++ltsize;
792 }
793 }
794 gc->SetLTDofTable(ldof_ltdof);
795 gc->Bcast(ldof_ltdof);
796
797 R_mat = new SparseMatrix(ltsize, lsize);
798 for (int j=0; j<lsize; ++j)
799 {
800 if (group_topo.IAmMaster(ldof_group[j]))
801 {
802 int i = ldof_ltdof[j];
803 R_mat->Set(i,j,1.0);
804 }
805 }
806 R_mat->Finalize();
807
809 {
810 P = new DeviceConformingProlongationOperator(*gc, R_mat);
811 }
812 else
813 {
814 P = new ConformingProlongationOperator(lsize, *gc);
815 }
816 P_mat = NULL;
817}
818
820{
821 if (P_mat) { return P_mat; }
822
823 ParMesh *pmesh = dynamic_cast<ParMesh*>(mesh);
824 MFEM_VERIFY(pmesh != NULL, "");
825 Array<HYPRE_BigInt> dof_offsets, tdof_offsets, tdof_nb_offsets;
826 Array<HYPRE_BigInt> *offsets[2] = {&dof_offsets, &tdof_offsets};
827 int lsize = P->Height();
828 int ltsize = P->Width();
829 HYPRE_BigInt loc_sizes[2] = {lsize, ltsize};
830 pmesh->GenerateOffsets(2, loc_sizes, offsets);
831
832 MPI_Comm comm = pmesh->GetComm();
833
834 const GroupTopology &group_topo = gc->GetGroupTopology();
835
836 if (HYPRE_AssumedPartitionCheck())
837 {
838 // communicate the neighbor offsets in tdof_nb_offsets
839 int nsize = group_topo.GetNumNeighbors()-1;
840 MPI_Request *requests = new MPI_Request[2*nsize];
841 MPI_Status *statuses = new MPI_Status[2*nsize];
842 tdof_nb_offsets.SetSize(nsize+1);
843 tdof_nb_offsets[0] = tdof_offsets[0];
844
845 // send and receive neighbors' local tdof offsets
846 int request_counter = 0;
847 for (int i = 1; i <= nsize; i++)
848 {
849 MPI_Irecv(&tdof_nb_offsets[i], 1, HYPRE_MPI_INT,
850 group_topo.GetNeighborRank(i), 5365, comm,
851 &requests[request_counter++]);
852 }
853 for (int i = 1; i <= nsize; i++)
854 {
855 MPI_Isend(&tdof_nb_offsets[0], 1, HYPRE_MPI_INT,
856 group_topo.GetNeighborRank(i), 5365, comm,
857 &requests[request_counter++]);
858 }
859 MPI_Waitall(request_counter, requests, statuses);
860
861 delete [] statuses;
862 delete [] requests;
863 }
864
865 HYPRE_Int *i_diag = Memory<HYPRE_Int>(lsize+1);
866 HYPRE_Int *j_diag = Memory<HYPRE_Int>(ltsize);
867 int diag_counter;
868
869 HYPRE_Int *i_offd = Memory<HYPRE_Int>(lsize+1);
870 HYPRE_Int *j_offd = Memory<HYPRE_Int>(lsize-ltsize);
871 int offd_counter;
872
873 HYPRE_BigInt *cmap = Memory<HYPRE_BigInt>(lsize-ltsize);
874
875 HYPRE_BigInt *col_starts = tdof_offsets;
876 HYPRE_BigInt *row_starts = dof_offsets;
877
878 Array<Pair<HYPRE_BigInt, int> > cmap_j_offd(lsize-ltsize);
879
880 i_diag[0] = i_offd[0] = 0;
881 diag_counter = offd_counter = 0;
882 for (int i_ldof = 0; i_ldof < lsize; i_ldof++)
883 {
884 int g = ldof_group[i_ldof];
885 int i_ltdof = ldof_ltdof[i_ldof];
886 if (group_topo.IAmMaster(g))
887 {
888 j_diag[diag_counter++] = i_ltdof;
889 }
890 else
891 {
892 HYPRE_BigInt global_tdof_number;
893 if (HYPRE_AssumedPartitionCheck())
894 {
895 global_tdof_number
896 = i_ltdof + tdof_nb_offsets[group_topo.GetGroupMaster(g)];
897 }
898 else
899 {
900 global_tdof_number
901 = i_ltdof + tdof_offsets[group_topo.GetGroupMasterRank(g)];
902 }
903
904 cmap_j_offd[offd_counter].one = global_tdof_number;
905 cmap_j_offd[offd_counter].two = offd_counter;
906 offd_counter++;
907 }
908 i_diag[i_ldof+1] = diag_counter;
909 i_offd[i_ldof+1] = offd_counter;
910 }
911
912 SortPairs<HYPRE_BigInt, int>(cmap_j_offd, offd_counter);
913
914 for (int i = 0; i < offd_counter; i++)
915 {
916 cmap[i] = cmap_j_offd[i].one;
917 j_offd[cmap_j_offd[i].two] = i;
918 }
919
920 P_mat = new HypreParMatrix(
921 comm, pmesh->GetMyRank(), pmesh->GetNRanks(),
922 row_starts, col_starts,
923 i_diag, j_diag, i_offd, j_offd,
924 cmap, offd_counter
925 );
926
927 P_mat->CopyRowStarts();
928 P_mat->CopyColStarts();
929
930 return P_mat;
931}
932
934{
935 delete P;
936 delete R_mat;
937 delete P_mat;
938 delete gc;
939}
940
941#endif // MFEM_USE_MPI
942
943#endif // MFEM_USE_CEED
944
946 const Array<int>& ess_tdofs)
947{
948 MFEM_VERIFY(DeviceCanUseCeed(),
949 "AlgebraicSolver requires a Ceed device");
950 MFEM_VERIFY(form.GetAssemblyLevel() == AssemblyLevel::PARTIAL ||
952 "AlgebraicSolver requires partial assembly or fully matrix-free.");
953 MFEM_VERIFY(UsesTensorBasis(*form.FESpace()),
954 "AlgebraicSolver requires tensor product basis functions.");
955#ifdef MFEM_USE_CEED
956 fespaces = new AlgebraicSpaceHierarchy(*form.FESpace());
957 multigrid = new AlgebraicMultigrid(*fespaces, form, ess_tdofs);
958#else
959 MFEM_ABORT("AlgebraicSolver requires Ceed support");
960#endif
961}
962
964{
965#ifdef MFEM_USE_CEED
966 delete fespaces;
967 delete multigrid;
968#endif
969}
970
971void AlgebraicSolver::Mult(const Vector& x, Vector& y) const
972{
973#ifdef MFEM_USE_CEED
974 multigrid->Mult(x, y);
975#endif
976}
977
979{
980#ifdef MFEM_USE_CEED
981 multigrid->SetOperator(op);
982#endif
983}
984
985} // namespace ceed
986
987} // namespace mfem
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
Definition array.hpp:385
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition array.hpp:840
int Size() const
Return the logical size of the array.
Definition array.hpp:166
int Append(const T &el)
Append element 'el' to array, resize if necessary.
Definition array.hpp:912
Abstract base class BilinearFormIntegrator.
A "square matrix" operator for the associated FE space and BLFIntegrators The sum of all the BLFInteg...
Array< BilinearFormIntegrator * > * GetFBFI()
Access all integrators added with AddInteriorFaceIntegrator().
Array< BilinearFormIntegrator * > * GetDBFI()
Access all the integrators added with AddDomainIntegrator().
FiniteElementSpace * FESpace()
Return the FE space associated with the BilinearForm.
AssemblyLevel GetAssemblyLevel() const
Returns the assembly level.
Array< BilinearFormIntegrator * > * GetBBFI()
Access all the integrators added with AddBoundaryIntegrator().
Array< BilinearFormIntegrator * > * GetBFBFI()
Access all integrators added with AddBdrFaceIntegrator().
Auxiliary class used by ParFiniteElementSpace.
Definition pfespace.hpp:567
Square Operator for imposing essential boundary conditions using only the action, Mult(),...
Auxiliary device class used by ParFiniteElementSpace.
Definition pfespace.hpp:596
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Definition device.hpp:262
int GetNumLevels() const
Returns the number of levels in the hierarchy.
Operator * GetProlongationAtLevel(int level) const
Returns the prolongation operator from the finite element space at level to the finite element space ...
virtual const FiniteElementSpace & GetFESpaceAtLevel(int level) const
Returns the finite element space at the given level.
Array< FiniteElementSpace * > fespaces
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition fespace.hpp:208
int GetOrder(int i) const
Returns the polynomial degree of the i'th finite element.
Definition fespace.hpp:807
int ndofs
Number of degrees of freedom. Number of unknowns is ndofs * vdim.
Definition fespace.hpp:231
Mesh * mesh
The mesh that FE space lives on (not owned).
Definition fespace.hpp:217
Mesh * GetMesh() const
Returns the mesh.
Definition fespace.hpp:644
Geometric multigrid associated with a hierarchy of finite element spaces.
Array< Array< int > * > essentialTrueDofs
const FiniteElementSpaceHierarchy & fespaces
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize().
const GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
int GetNeighborRank(int i) const
Return the MPI rank of neighbor 'i'.
bool IAmMaster(int g) const
Return true if I am master for group 'g'.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group 'g'.
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
Wrapper for hypre's ParCSR matrix class.
Definition hypre.hpp:419
HYPRE_BigInt GetGlobalNumRows() const
Return the global number of rows.
Definition hypre.hpp:710
MPI_Comm GetComm() const
MPI communicator.
Definition hypre.hpp:609
HYPRE_BigInt * RowPart()
Returns the row partitioning.
Definition hypre.hpp:644
Class used by MFEM to store pointers to host and/or device memory.
int Dimension() const
Dimension of the reference space used within the elements.
Definition mesh.hpp:1306
void AddLevel(Operator *op, Solver *smoother, bool ownOperator, bool ownSmoother)
Adds a level to the multigrid operator hierarchy.
Definition multigrid.cpp:87
void Mult(const Vector &x, Vector &y) const override
Application of the multigrid as a preconditioner.
virtual bool SupportsCeed() const
Indicates whether this integrator can use a Ceed backend.
Chebyshev accelerated smoothing with given vector, no matrix necessary.
Definition solvers.hpp:503
Jacobi smoothing for a given bilinear form (no matrix necessary).
Definition solvers.hpp:422
Abstract operator.
Definition operator.hpp:25
int width
Dimension of the input / number of columns in the matrix.
Definition operator.hpp:28
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition operator.hpp:66
int height
Dimension of the output / number of rows in the matrix.
Definition operator.hpp:27
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
Definition operator.hpp:72
Operator * SetupRAP(const Operator *Pi, const Operator *Po)
Returns RAP Operator of this, using input/output Prolongation matrices Pi corresponds to "P",...
Definition operator.cpp:168
virtual void MultTranspose(const Vector &x, Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition operator.hpp:100
Abstract parallel finite element space.
Definition pfespace.hpp:31
GroupCommunicator & GroupComm()
Return a reference to the internal GroupCommunicator (on VDofs)
Definition pfespace.hpp:408
HypreParMatrix * Dof_TrueDof_Matrix() const
The true dof-to-dof interpolation matrix.
Definition pfespace.hpp:391
Class for parallel meshes.
Definition pmesh.hpp:34
MPI_Comm GetComm() const
Definition pmesh.hpp:405
int GetMyRank() const
Definition pmesh.hpp:407
int GetNRanks() const
Definition pmesh.hpp:406
void GenerateOffsets(int N, HYPRE_BigInt loc_sizes[], Array< HYPRE_BigInt > *offsets[]) const
Definition pmesh.cpp:1953
Base class for solvers.
Definition operator.hpp:792
virtual void SetOperator(const Operator &op)=0
Set/update the solver for the given operator.
Data type sparse matrix.
Definition sparsemat.hpp:51
void Finalize(int skip_zeros=1) override
Finalize the matrix initialization, switching the storage format from LIL to CSR.
void Set(const int i, const int j, const real_t val)
Table stores the connectivity of elements of TYPE I to elements of TYPE II. For example,...
Definition table.hpp:43
int RowSize(int i) const
Definition table.hpp:122
void ShiftUpI()
Definition table.cpp:163
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition table.cpp:233
void AddConnection(int r, int c)
Definition table.hpp:89
void MakeI(int nrows)
Definition table.cpp:130
int Size() const
Returns the number of TYPE I elements.
Definition table.hpp:103
void MakeJ()
Definition table.cpp:140
void AddAColumnInRow(int r)
Definition table.hpp:86
The transpose of a given operator. Switches the roles of the methods Mult() and MultTranspose().
Definition operator.hpp:859
Vector data type.
Definition vector.hpp:82
virtual const real_t * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Definition vector.hpp:524
virtual const real_t * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:520
virtual real_t * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:536
Memory< real_t > & GetMemory()
Return a reference to the Memory object used by the Vector.
Definition vector.hpp:265
void NewMemoryAndSize(const Memory< real_t > &mem, int s, bool own_mem)
Reset the Vector to use the given external Memory mem and size s.
Definition vector.hpp:645
int Size() const
Returns the size of the vector.
Definition vector.hpp:234
void SetSize(int s)
Resize the vector to size s.
Definition vector.hpp:584
virtual real_t * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Definition vector.hpp:532
virtual real_t * HostReadWrite()
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), false).
Definition vector.hpp:540
virtual real_t * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:528
A way to use algebraic levels in a Multigrid object.
Definition algebraic.hpp:32
AlgebraicCoarseSpace(FiniteElementSpace &fine_fes, CeedElemRestriction fine_er, int order, int dim, int order_reduction_)
CeedElemRestriction ceed_elem_restriction
Definition algebraic.hpp:46
Multigrid interpolation operator in Ceed framework.
Definition algebraic.hpp:89
virtual void Mult(const mfem::Vector &x, mfem::Vector &y) const
Operator application: y=A(x).
AlgebraicInterpolation(Ceed ceed, CeedBasis basisctof, CeedElemRestriction erestrictu_coarse, CeedElemRestriction erestrictu_fine)
virtual void MultTranspose(const mfem::Vector &x, mfem::Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Extension of Multigrid object to algebraically generated coarse spaces.
virtual void SetOperator(const mfem::Operator &op) override
Set/update the solver for the given operator.
AlgebraicSolver(BilinearForm &form, const Array< int > &ess_tdofs)
Constructs algebraic multigrid hierarchy and solver.
void SetOperator(const mfem::Operator &op)
Set/update the solver for the given operator.
void Mult(const Vector &x, Vector &y) const
Operator application: y=A(x).
Hierarchy of AlgebraicCoarseSpace objects for use in Multigrid object.
AlgebraicSpaceHierarchy(FiniteElementSpace &fespace)
Construct hierarchy based on finest FiniteElementSpace.
AlgebraicCoarseSpace & GetAlgebraicCoarseSpace(int level)
CeedOperator & GetCeedOperator()
Definition operator.hpp:55
Parallel version of AlgebraicCoarseSpace.
Definition algebraic.hpp:57
ParAlgebraicCoarseSpace(FiniteElementSpace &fine_fes, CeedElemRestriction fine_er, int order, int dim, int order_reduction_, GroupCommunicator *gc_fine)
GroupCommunicator * GetGroupCommunicator() const
Definition algebraic.hpp:69
HypreParMatrix * GetProlongationHypreParMatrix()
int dim
Definition ex24.cpp:53
HYPRE_Int HYPRE_BigInt
real_t b
Definition lissajous.cpp:42
real_t a
Definition lissajous.cpp:41
string space
int CeedBasisATPMGCoarseToFine(Ceed ceed, int P1d, int dim, int order_reduction, CeedBasis *basisc2f)
Create coarse-to-fine basis, given number of input nodes and order reduction.
Solver * BuildSmootherFromCeed(ConstrainedOperator &op, bool chebyshev)
void CoarsenEssentialDofs(const mfem::Operator &interp, const Array< int > &ho_ess_tdofs, Array< int > &alg_lo_ess_tdofs)
void InitRestriction(const FiniteElementSpace &fes, Ceed ceed, CeedElemRestriction *restr)
Initialize a CeedElemRestriction for non-mixed meshes.
int CeedOperatorGetSize(CeedOperator oper, CeedInt *size)
Definition algebraic.cpp:98
CeedOperator CreateCeedCompositeOperatorFromBilinearForm(BilinearForm &form)
int CeedVectorPointwiseMult(CeedVector a, const CeedVector b)
int CeedATPMGOperator(CeedOperator oper, int order_reduction, CeedElemRestriction coarse_er, CeedBasis coarse_basis_in, CeedBasis basis_ctof_in, CeedOperator *out)
CeedOperator CoarsenCeedCompositeOperator(CeedOperator op, CeedElemRestriction er, CeedBasis c2f, int order_reduction)
void AddToCompositeOperator(BilinearFormIntegrator *integ, CeedOperator op)
int CeedATPMGElemRestriction(int order, int order_reduction, CeedElemRestriction er_in, CeedElemRestriction *er_out, CeedInt *&dof_map)
Take given (high-order) CeedElemRestriction and make a new CeedElemRestriction, which corresponds to ...
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition globals.hpp:66
void RAP(const DenseMatrix &A, const DenseMatrix &P, DenseMatrix &RAP)
bool DeviceCanUseCeed()
Function that determines if a CEED kernel should be used, based on the current mfem::Device configura...
Definition util.cpp:33
bool UsesTensorBasis(const FiniteElementSpace &fes)
Return true if the mesh contains only one topology and the elements are tensor elements.
Definition fespace.hpp:1548
void SortPairs(Pair< A, B > *pairs, int size)
Sort an array of Pairs with respect to the first element.
void forall(int N, lambda &&body)
Definition forall.hpp:839
@ CUDA
[device] CUDA backend. Enabled when MFEM_USE_CUDA = YES.
Definition device.hpp:40
@ DEVICE_MASK
Biwise-OR of all device backends.
Definition device.hpp:99