MFEM v4.7.0
Finite element discretization library
Loading...
Searching...
No Matches
algebraic.cpp
Go to the documentation of this file.
1// Copyright (c) 2010-2024, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11
12#include "algebraic.hpp"
13
15#include "../../fespace.hpp"
16#include "../../pfespace.hpp"
18#include "solvers-atpmg.hpp"
19#include "full-assembly.hpp"
21#include "../interface/ceed.hpp"
22
23namespace mfem
24{
25
26namespace ceed
27{
28
29#ifdef MFEM_USE_CEED
30
31/** Wraps a CeedOperator in an mfem::Operator, with essential boundary
32 conditions and a prolongation operator for parallel application. */
33class ConstrainedOperator : public mfem::Operator
34{
35public:
36 /// This object takes ownership of oper and will delete it
37 ConstrainedOperator(CeedOperator oper, const Array<int> &ess_tdofs_,
38 const mfem::Operator *P_);
39 ConstrainedOperator(CeedOperator oper, const mfem::Operator *P_);
40 ~ConstrainedOperator();
41 void Mult(const Vector& x, Vector& y) const;
42 CeedOperator GetCeedOperator() const;
43 const Array<int> &GetEssentialTrueDofs() const;
44 const mfem::Operator *GetProlongation() const;
45private:
46 Array<int> ess_tdofs;
47 const mfem::Operator *P;
48 ceed::Operator *unconstrained_op;
49 mfem::ConstrainedOperator *constrained_op;
50};
51
52ConstrainedOperator::ConstrainedOperator(
53 CeedOperator oper,
54 const Array<int> &ess_tdofs_,
55 const mfem::Operator *P_)
56 : ess_tdofs(ess_tdofs_), P(P_)
57{
58 unconstrained_op = new ceed::Operator(oper);
59 mfem::Operator *rap = unconstrained_op->SetupRAP(P, P);
60 height = width = rap->Height();
61 bool own_rap = (rap != unconstrained_op);
62 constrained_op = new mfem::ConstrainedOperator(rap, ess_tdofs, own_rap);
63}
64
65ConstrainedOperator::ConstrainedOperator(CeedOperator oper,
66 const mfem::Operator *P_)
67 : ConstrainedOperator(oper, Array<int>(), P_)
68{ }
69
70ConstrainedOperator::~ConstrainedOperator()
71{
72 delete constrained_op;
73 delete unconstrained_op;
74}
75
76void ConstrainedOperator::Mult(const Vector& x, Vector& y) const
77{
78 constrained_op->Mult(x, y);
79}
80
81CeedOperator ConstrainedOperator::GetCeedOperator() const
82{
83 return unconstrained_op->GetCeedOperator();
84}
85
86const Array<int> &ConstrainedOperator::GetEssentialTrueDofs() const
87{
88 return ess_tdofs;
89}
90
91const mfem::Operator *ConstrainedOperator::GetProlongation() const
92{
93 return P;
94}
95
96/// assumes a square operator (you could do rectangular, you'd have
97/// to find separate active input and output fields/restrictions)
98int CeedOperatorGetSize(CeedOperator oper, CeedInt * size)
99{
100 CeedSize in_len, out_len;
101 int ierr = CeedOperatorGetActiveVectorLengths(oper, &in_len, &out_len);
102 PCeedChk(ierr);
103 *size = (CeedInt)in_len;
104 MFEM_VERIFY(in_len == out_len, "not a square CeedOperator");
105 MFEM_VERIFY(in_len == *size, "size overflow");
106 return 0;
107}
108
109Solver *BuildSmootherFromCeed(ConstrainedOperator &op, bool chebyshev)
110{
111 int ierr;
112 CeedOperator ceed_op = op.GetCeedOperator();
113 const Array<int> &ess_tdofs = op.GetEssentialTrueDofs();
114 const mfem::Operator *P = op.GetProlongation();
115 // Assemble the a local diagonal, in the sense of L-vector
116 CeedVector diagceed;
117 CeedInt length;
118 ierr = CeedOperatorGetSize(ceed_op, &length); PCeedChk(ierr);
119 ierr = CeedVectorCreate(internal::ceed, length, &diagceed); PCeedChk(ierr);
120 CeedMemType mem;
121 ierr = CeedGetPreferredMemType(internal::ceed, &mem); PCeedChk(ierr);
122 if (!Device::Allows(Backend::CUDA) || mem != CEED_MEM_DEVICE)
123 {
124 mem = CEED_MEM_HOST;
125 }
126 Vector local_diag(length);
127 CeedScalar *ptr = (mem == CEED_MEM_HOST) ? local_diag.HostWrite() :
128 local_diag.Write(true);
129 ierr = CeedVectorSetArray(diagceed, mem, CEED_USE_POINTER, ptr);
130 PCeedChk(ierr);
131 ierr = CeedOperatorLinearAssembleDiagonal(ceed_op, diagceed,
132 CEED_REQUEST_IMMEDIATE);
133 PCeedChk(ierr);
134 ierr = CeedVectorTakeArray(diagceed, mem, NULL); PCeedChk(ierr);
135
136 Vector t_diag;
137 if (P)
138 {
139 t_diag.SetSize(P->Width());
140 P->MultTranspose(local_diag, t_diag);
141 }
142 else
143 {
144 t_diag.NewMemoryAndSize(local_diag.GetMemory(), length, false);
145 }
146 Solver *out = NULL;
147 if (chebyshev)
148 {
149 const int cheb_order = 3;
150 out = new OperatorChebyshevSmoother(op, t_diag, ess_tdofs, cheb_order);
151 }
152 else
153 {
154 const double jacobi_scale = 0.65;
155 out = new OperatorJacobiSmoother(t_diag, ess_tdofs, jacobi_scale);
156 }
157 ierr = CeedVectorDestroy(&diagceed); PCeedChk(ierr);
158 return out;
159}
160
161#ifdef MFEM_USE_MPI
162
163/// Builds and applies assembled AMG to a CeedOperator
164class AssembledAMG : public Solver
165{
166public:
167 AssembledAMG(ConstrainedOperator &oper, HypreParMatrix *P)
168 {
169 MFEM_ASSERT(P != NULL, "Provided HypreParMatrix is invalid!");
170 height = width = oper.Height();
171
172 int ierr;
173 const Array<int> ess_tdofs = oper.GetEssentialTrueDofs();
174
175 ierr = CeedOperatorFullAssemble(oper.GetCeedOperator(), &mat_local);
176 PCeedChk(ierr);
177
178 {
179 HypreParMatrix hypre_local(
180 P->GetComm(), P->GetGlobalNumRows(), P->RowPart(), mat_local);
181 op_assembled = RAP(&hypre_local, P);
182 }
183 HypreParMatrix *mat_e = op_assembled->EliminateRowsCols(ess_tdofs);
184 delete mat_e;
185 amg = new HypreBoomerAMG(*op_assembled);
186 amg->SetPrintLevel(0);
187 }
188 void SetOperator(const mfem::Operator &op) override { }
189 void Mult(const Vector &x, Vector &y) const override { amg->Mult(x, y); }
190 ~AssembledAMG()
191 {
192 delete op_assembled;
193 delete amg;
194 delete mat_local;
195 }
196private:
197 SparseMatrix *mat_local;
198 HypreParMatrix *op_assembled;
199 HypreBoomerAMG *amg;
200};
201
202#endif // MFEM_USE_MPI
203
205 const Array<int> &ho_ess_tdofs,
206 Array<int> &alg_lo_ess_tdofs)
207{
208 Vector ho_boundary_ones(interp.Height());
209 ho_boundary_ones = 0.0;
210 const int *ho_ess_tdofs_h = ho_ess_tdofs.HostRead();
211 for (int i=0; i<ho_ess_tdofs.Size(); ++i)
212 {
213 ho_boundary_ones[ho_ess_tdofs_h[i]] = 1.0;
214 }
215 Vector lo_boundary_ones(interp.Width());
216 interp.MultTranspose(ho_boundary_ones, lo_boundary_ones);
217 auto lobo = lo_boundary_ones.HostRead();
218 for (int i = 0; i < lo_boundary_ones.Size(); ++i)
219 {
220 if (lobo[i] > 0.9)
221 {
222 alg_lo_ess_tdofs.Append(i);
223 }
224 }
225}
226
228{
229 if (integ->SupportsCeed())
230 {
231 CeedCompositeOperatorAddSub(op, integ->GetCeedOp().GetCeedOperator());
232 }
233 else
234 {
235 MFEM_ABORT("This integrator does not support Ceed!");
236 }
237}
238
240{
241 int ierr;
242 CeedOperator op;
243 ierr = CeedCompositeOperatorCreate(internal::ceed, &op); PCeedChk(ierr);
244
245 MFEM_VERIFY(form.GetBBFI()->Size() == 0,
246 "Not implemented for this integrator!");
247 MFEM_VERIFY(form.GetFBFI()->Size() == 0,
248 "Not implemented for this integrator!");
249 MFEM_VERIFY(form.GetBFBFI()->Size() == 0,
250 "Not implemented for this integrator!");
251
252 // Get the domain bilinear form integrators (DBFIs)
254 for (int i = 0; i < bffis->Size(); ++i)
255 {
256 AddToCompositeOperator((*bffis)[i], op);
257 }
258 return op;
259}
260
262 CeedOperator op,
263 CeedElemRestriction er,
264 CeedBasis c2f,
265 int order_reduction
266)
267{
268 int ierr;
269 bool isComposite;
270 ierr = CeedOperatorIsComposite(op, &isComposite); PCeedChk(ierr);
271 MFEM_ASSERT(isComposite, "");
272
273 CeedOperator op_coarse;
274 ierr = CeedCompositeOperatorCreate(internal::ceed,
275 &op_coarse); PCeedChk(ierr);
276
277 int nsub;
278 CeedOperator *subops;
279#if CEED_VERSION_GE(0, 10, 2)
280 ierr = CeedCompositeOperatorGetNumSub(op, &nsub); PCeedChk(ierr);
281 ierr = CeedCompositeOperatorGetSubList(op, &subops); PCeedChk(ierr);
282#else
283 ierr = CeedOperatorGetNumSub(op, &nsub); PCeedChk(ierr);
284 ierr = CeedOperatorGetSubList(op, &subops); PCeedChk(ierr);
285#endif
286 for (int isub=0; isub<nsub; ++isub)
287 {
288 CeedOperator subop = subops[isub];
289 CeedBasis basis_coarse, basis_c2f;
290 CeedOperator subop_coarse;
291 ierr = CeedATPMGOperator(subop, order_reduction, er, &basis_coarse,
292 &basis_c2f, &subop_coarse); PCeedChk(ierr);
293 // destructions below make sense because these objects are
294 // refcounted by existing objects
295 ierr = CeedBasisDestroy(&basis_coarse); PCeedChk(ierr);
296 ierr = CeedBasisDestroy(&basis_c2f); PCeedChk(ierr);
297 ierr = CeedCompositeOperatorAddSub(op_coarse, subop_coarse);
298 PCeedChk(ierr);
299 ierr = CeedOperatorDestroy(&subop_coarse); PCeedChk(ierr);
300 }
301 return op_coarse;
302}
303
304AlgebraicMultigrid::AlgebraicMultigrid(
305 AlgebraicSpaceHierarchy &hierarchy,
306 BilinearForm &form,
307 const Array<int> &ess_tdofs
308) : GeometricMultigrid(hierarchy, Array<int>())
309{
310 int nlevels = fespaces.GetNumLevels();
311 ceed_operators.SetSize(nlevels);
312 essentialTrueDofs.SetSize(nlevels);
313
314 // Construct finest level
315 ceed_operators[nlevels-1] = CreateCeedCompositeOperatorFromBilinearForm(form);
316 essentialTrueDofs[nlevels-1] = new Array<int>;
317 *essentialTrueDofs[nlevels-1] = ess_tdofs;
318
319 // Construct operators at all levels of hierarchy by coarsening
320 for (int ilevel=nlevels-2; ilevel>=0; --ilevel)
321 {
323 ceed_operators[ilevel] = CoarsenCeedCompositeOperator(
324 ceed_operators[ilevel+1], space.GetCeedElemRestriction(),
325 space.GetCeedCoarseToFine(), space.GetOrderReduction());
326 mfem::Operator *P = hierarchy.GetProlongationAtLevel(ilevel);
327 essentialTrueDofs[ilevel] = new Array<int>;
329 *essentialTrueDofs[ilevel]);
330 }
331
332 // Add the operators and smoothers to the hierarchy, from coarse to fine
333 for (int ilevel=0; ilevel<nlevels; ++ilevel)
334 {
335 FiniteElementSpace &space = hierarchy.GetFESpaceAtLevel(ilevel);
336 const mfem::Operator *P = space.GetProlongationMatrix();
337 ConstrainedOperator *op = new ConstrainedOperator(
338 ceed_operators[ilevel], *essentialTrueDofs[ilevel], P);
339 Solver *smoother;
340#ifdef MFEM_USE_MPI
341 if (ilevel == 0 && !Device::Allows(Backend::CUDA))
342 {
343 HypreParMatrix *P_mat = NULL;
344 if (nlevels == 1)
345 {
346 // Only one level -- no coarsening, finest level
348 = dynamic_cast<ParFiniteElementSpace*>(&space);
349 if (pfes) { P_mat = pfes->Dof_TrueDof_Matrix(); }
350 }
351 else
352 {
354 = dynamic_cast<ParAlgebraicCoarseSpace*>(&space);
355 if (pspace) { P_mat = pspace->GetProlongationHypreParMatrix(); }
356 }
357 if (P_mat) { smoother = new AssembledAMG(*op, P_mat); }
358 else { smoother = BuildSmootherFromCeed(*op, true); }
359 }
360 else
361#endif
362 {
363 smoother = BuildSmootherFromCeed(*op, true);
364 }
365 AddLevel(op, smoother, true, true);
366 }
367}
368
372
373int AlgebraicInterpolation::Initialize(
374 Ceed ceed, CeedBasis basisctof,
375 CeedElemRestriction erestrictu_coarse, CeedElemRestriction erestrictu_fine)
376{
377 int ierr = 0;
378
379 CeedSize height, width;
380 ierr = CeedElemRestrictionGetLVectorSize(erestrictu_coarse, &width);
381 PCeedChk(ierr);
382 ierr = CeedElemRestrictionGetLVectorSize(erestrictu_fine, &height);
383 PCeedChk(ierr);
384
385 // interpolation qfunction
386 const int bp3_ncompu = 1;
387 CeedQFunction l_qf_restrict, l_qf_prolong;
388 ierr = CeedQFunctionCreateIdentity(ceed, bp3_ncompu, CEED_EVAL_NONE,
389 CEED_EVAL_INTERP, &l_qf_restrict); PCeedChk(ierr);
390 ierr = CeedQFunctionCreateIdentity(ceed, bp3_ncompu, CEED_EVAL_INTERP,
391 CEED_EVAL_NONE, &l_qf_prolong); PCeedChk(ierr);
392
393 qf_restrict = l_qf_restrict;
394 qf_prolong = l_qf_prolong;
395
396 CeedVector c_fine_multiplicity;
397 ierr = CeedVectorCreate(ceed, height, &c_fine_multiplicity); PCeedChk(ierr);
398 ierr = CeedVectorSetValue(c_fine_multiplicity, 0.0); PCeedChk(ierr);
399
400 // Create the restriction operator
401 // Restriction - Fine to coarse
402 ierr = CeedOperatorCreate(ceed, qf_restrict, CEED_QFUNCTION_NONE,
403 CEED_QFUNCTION_NONE, &op_restrict); PCeedChk(ierr);
404 ierr = CeedOperatorSetField(op_restrict, "input", erestrictu_fine,
405 CEED_BASIS_NONE, CEED_VECTOR_ACTIVE); PCeedChk(ierr);
406 ierr = CeedOperatorSetField(op_restrict, "output", erestrictu_coarse,
407 basisctof, CEED_VECTOR_ACTIVE); PCeedChk(ierr);
408
409 // Interpolation - Coarse to fine
410 // Create the prolongation operator
411 ierr = CeedOperatorCreate(ceed, qf_prolong, CEED_QFUNCTION_NONE,
412 CEED_QFUNCTION_NONE, &op_interp); PCeedChk(ierr);
413 ierr = CeedOperatorSetField(op_interp, "input", erestrictu_coarse,
414 basisctof, CEED_VECTOR_ACTIVE); PCeedChk(ierr);
415 ierr = CeedOperatorSetField(op_interp, "output", erestrictu_fine,
416 CEED_BASIS_NONE, CEED_VECTOR_ACTIVE); PCeedChk(ierr);
417
418 ierr = CeedElemRestrictionGetMultiplicity(erestrictu_fine,
419 c_fine_multiplicity); PCeedChk(ierr);
420 ierr = CeedVectorCreate(ceed, height, &fine_multiplicity_r); PCeedChk(ierr);
421
422 CeedScalar* fine_r_data;
423 const CeedScalar* fine_data;
424 ierr = CeedVectorGetArrayWrite(fine_multiplicity_r, CEED_MEM_HOST,
425 &fine_r_data); PCeedChk(ierr);
426 ierr = CeedVectorGetArrayRead(c_fine_multiplicity, CEED_MEM_HOST,
427 &fine_data); PCeedChk(ierr);
428 for (CeedSize i = 0; i < height; ++i)
429 {
430 fine_r_data[i] = 1.0 / fine_data[i];
431 }
432
433 ierr = CeedVectorRestoreArray(fine_multiplicity_r, &fine_r_data);
434 PCeedChk(ierr);
435 ierr = CeedVectorRestoreArrayRead(c_fine_multiplicity, &fine_data);
436 PCeedChk(ierr);
437 ierr = CeedVectorDestroy(&c_fine_multiplicity); PCeedChk(ierr);
438
439 ierr = CeedVectorCreate(ceed, height, &fine_work); PCeedChk(ierr);
440
441 ierr = CeedVectorCreate(ceed, height, &v_); PCeedChk(ierr);
442 ierr = CeedVectorCreate(ceed, width, &u_); PCeedChk(ierr);
443
444 return 0;
445}
446
447int AlgebraicInterpolation::Finalize()
448{
449 int ierr;
450
451 ierr = CeedQFunctionDestroy(&qf_restrict); PCeedChk(ierr);
452 ierr = CeedQFunctionDestroy(&qf_prolong); PCeedChk(ierr);
453 ierr = CeedOperatorDestroy(&op_interp); PCeedChk(ierr);
454 ierr = CeedOperatorDestroy(&op_restrict); PCeedChk(ierr);
455 ierr = CeedVectorDestroy(&fine_multiplicity_r); PCeedChk(ierr);
456 ierr = CeedVectorDestroy(&fine_work); PCeedChk(ierr);
457
458 return 0;
459}
460
462 Ceed ceed, CeedBasis basisctof,
463 CeedElemRestriction erestrictu_coarse,
464 CeedElemRestriction erestrictu_fine)
465{
466 int ierr;
467 CeedSize lo_nldofs, ho_nldofs;
468 ierr = CeedElemRestrictionGetLVectorSize(erestrictu_coarse, &lo_nldofs);
469 PCeedChk(ierr);
470 ierr = CeedElemRestrictionGetLVectorSize(erestrictu_fine,
471 &ho_nldofs); PCeedChk(ierr);
472 height = (int)ho_nldofs;
473 width = (int)lo_nldofs;
474 MFEM_VERIFY(ho_nldofs == height, "height overflow");
475 MFEM_VERIFY(lo_nldofs == width, "width overflow");
476 owns_basis_ = false;
477 ierr = Initialize(ceed, basisctof, erestrictu_coarse, erestrictu_fine);
478 PCeedChk(ierr);
479}
480
482{
483 int ierr;
484 ierr = CeedVectorDestroy(&v_); PCeedChk(ierr);
485 ierr = CeedVectorDestroy(&u_); PCeedChk(ierr);
486 if (owns_basis_)
487 {
488 ierr = CeedBasisDestroy(&basisctof_); PCeedChk(ierr);
489 }
490 Finalize();
491}
492
493/// a = a (pointwise*) b
494/// @todo: using MPI_FORALL in this Ceed-like function is ugly
495int CeedVectorPointwiseMult(CeedVector a, const CeedVector b)
496{
497 int ierr;
498 Ceed ceed;
499 CeedVectorGetCeed(a, &ceed);
500
501 CeedSize length, length2;
502 ierr = CeedVectorGetLength(a, &length); PCeedChk(ierr);
503 ierr = CeedVectorGetLength(b, &length2); PCeedChk(ierr);
504 if (length != length2)
505 {
506 return CeedError(ceed, 1, "Vector sizes don't match");
507 }
508
509 CeedMemType mem;
511 {
512 mem = CEED_MEM_DEVICE;
513 }
514 else
515 {
516 mem = CEED_MEM_HOST;
517 }
518 CeedScalar *a_data;
519 const CeedScalar *b_data;
520 ierr = CeedVectorGetArray(a, mem, &a_data); PCeedChk(ierr);
521 ierr = CeedVectorGetArrayRead(b, mem, &b_data); PCeedChk(ierr);
522 MFEM_VERIFY(int(length) == length, "length overflow");
523 mfem::forall(length, [=] MFEM_HOST_DEVICE (int i)
524 {a_data[i] *= b_data[i];});
525
526 ierr = CeedVectorRestoreArray(a, &a_data); PCeedChk(ierr);
527 ierr = CeedVectorRestoreArrayRead(b, &b_data); PCeedChk(ierr);
528
529 return 0;
530}
531
533{
534 int ierr = 0;
535 const CeedScalar *in_ptr;
536 CeedScalar *out_ptr;
537 CeedMemType mem;
538 ierr = CeedGetPreferredMemType(internal::ceed, &mem); PCeedChk(ierr);
539 if ( Device::Allows(Backend::DEVICE_MASK) && mem==CEED_MEM_DEVICE )
540 {
541 in_ptr = x.Read();
542 out_ptr = y.ReadWrite();
543 }
544 else
545 {
546 in_ptr = x.HostRead();
547 out_ptr = y.HostReadWrite();
548 mem = CEED_MEM_HOST;
549 }
550 ierr = CeedVectorSetArray(u_, mem, CEED_USE_POINTER,
551 const_cast<CeedScalar*>(in_ptr)); PCeedChk(ierr);
552 ierr = CeedVectorSetArray(v_, mem, CEED_USE_POINTER,
553 out_ptr); PCeedChk(ierr);
554
555 ierr = CeedOperatorApply(op_interp, u_, v_,
556 CEED_REQUEST_IMMEDIATE); PCeedChk(ierr);
557 ierr = CeedVectorPointwiseMult(v_, fine_multiplicity_r); PCeedChk(ierr);
558
559 ierr = CeedVectorTakeArray(u_, mem, const_cast<CeedScalar**>(&in_ptr));
560 PCeedChk(ierr);
561 ierr = CeedVectorTakeArray(v_, mem, &out_ptr); PCeedChk(ierr);
562}
563
565 mfem::Vector& y) const
566{
567 int ierr = 0;
568 CeedMemType mem;
569 ierr = CeedGetPreferredMemType(internal::ceed, &mem); PCeedChk(ierr);
570 const CeedScalar *in_ptr;
571 CeedScalar *out_ptr;
572 if ( Device::Allows(Backend::DEVICE_MASK) && mem==CEED_MEM_DEVICE )
573 {
574 in_ptr = x.Read();
575 out_ptr = y.ReadWrite();
576 }
577 else
578 {
579 in_ptr = x.HostRead();
580 out_ptr = y.HostReadWrite();
581 mem = CEED_MEM_HOST;
582 }
583 ierr = CeedVectorSetArray(v_, mem, CEED_USE_POINTER,
584 const_cast<CeedScalar*>(in_ptr)); PCeedChk(ierr);
585 ierr = CeedVectorSetArray(u_, mem, CEED_USE_POINTER,
586 out_ptr); PCeedChk(ierr);
587
588 CeedSize length;
589 ierr = CeedVectorGetLength(v_, &length); PCeedChk(ierr);
590
591 const CeedScalar *multiplicitydata;
592 CeedScalar *workdata;
593 ierr = CeedVectorGetArrayRead(fine_multiplicity_r, mem,
594 &multiplicitydata); PCeedChk(ierr);
595 ierr = CeedVectorGetArrayWrite(fine_work, mem, &workdata); PCeedChk(ierr);
596 MFEM_VERIFY((int)length == length, "length overflow");
597 mfem::forall(length, [=] MFEM_HOST_DEVICE (int i)
598 {workdata[i] = in_ptr[i] * multiplicitydata[i];});
599 ierr = CeedVectorRestoreArrayRead(fine_multiplicity_r,
600 &multiplicitydata);
601 ierr = CeedVectorRestoreArray(fine_work, &workdata); PCeedChk(ierr);
602
603 ierr = CeedOperatorApply(op_restrict, fine_work, u_,
604 CEED_REQUEST_IMMEDIATE); PCeedChk(ierr);
605
606 ierr = CeedVectorTakeArray(v_, mem, const_cast<CeedScalar**>(&in_ptr));
607 PCeedChk(ierr);
608 ierr = CeedVectorTakeArray(u_, mem, &out_ptr); PCeedChk(ierr);
609}
610
612{
613 int order = fes.GetOrder(0);
614 int nlevels = 0;
615 int current_order = order;
616 while (current_order > 0)
617 {
618 nlevels++;
619 current_order = current_order/2;
620 }
621
622 meshes.SetSize(nlevels);
623 ownedMeshes.SetSize(nlevels);
624 meshes = fes.GetMesh();
625 ownedMeshes = false;
626
627 fespaces.SetSize(nlevels);
628 ownedFES.SetSize(nlevels);
629 // Own all FESpaces except for the finest, own all prolongations
630 ownedFES = true;
631 fespaces[nlevels-1] = &fes;
632 ownedFES[nlevels-1] = false;
633
634 ceed_interpolations.SetSize(nlevels-1);
635 R_tr.SetSize(nlevels-1);
636 prolongations.SetSize(nlevels-1);
637 ownedProlongations.SetSize(nlevels-1);
638
639 current_order = order;
640
641 Ceed ceed = internal::ceed;
642 InitRestriction(fes, ceed, &fine_er);
643 CeedElemRestriction er = fine_er;
644
645 int dim = fes.GetMesh()->Dimension();
646#ifdef MFEM_USE_MPI
647 GroupCommunicator *gc = NULL;
648 ParFiniteElementSpace *pfes = dynamic_cast<ParFiniteElementSpace*>(&fes);
649 if (pfes)
650 {
651 gc = &pfes->GroupComm();
652 }
653#endif
654
655 for (int ilevel=nlevels-2; ilevel>=0; --ilevel)
656 {
657 const int order_reduction = current_order - (current_order/2);
659
660#ifdef MFEM_USE_MPI
661 if (pfes)
662 {
664 *fespaces[ilevel+1], er, current_order, dim, order_reduction, gc);
665 gc = parspace->GetGroupCommunicator();
666 space = parspace;
667 }
668 else
669#endif
670 {
672 *fespaces[ilevel+1], er, current_order, dim, order_reduction);
673 }
674 current_order = current_order/2;
675 fespaces[ilevel] = space;
676 ceed_interpolations[ilevel] = new AlgebraicInterpolation(
677 ceed,
678 space->GetCeedCoarseToFine(),
679 space->GetCeedElemRestriction(),
680 er
681 );
682 const SparseMatrix *R = fespaces[ilevel+1]->GetRestrictionMatrix();
683 if (R)
684 {
685 R_tr[ilevel] = new TransposeOperator(*R);
686 }
687 else
688 {
689 R_tr[ilevel] = NULL;
690 }
691 prolongations[ilevel] = ceed_interpolations[ilevel]->SetupRAP(
692 space->GetProlongationMatrix(), R_tr[ilevel]);
693 ownedProlongations[ilevel]
694 = prolongations[ilevel] != ceed_interpolations[ilevel];
695
696 er = space->GetCeedElemRestriction();
697 }
698}
699
701 FiniteElementSpace &fine_fes,
702 CeedElemRestriction fine_er,
703 int order,
704 int dim,
705 int order_reduction_
706) : order_reduction(order_reduction_)
707{
708 int ierr;
709 order_reduction = order_reduction_;
710
711 ierr = CeedATPMGElemRestriction(order, order_reduction, fine_er,
713 PCeedChk(ierr);
714 ierr = CeedBasisATPMGCoarseToFine(internal::ceed, order+1, dim,
716 PCeedChk(ierr);
717 CeedSize ndofs_;
718 ierr = CeedElemRestrictionGetLVectorSize(ceed_elem_restriction, &ndofs_);
719 PCeedChk(ierr);
720 ndofs = ndofs_;
721 MFEM_VERIFY(ndofs == ndofs_, "ndofs overflow");
722
723 mesh = fine_fes.GetMesh();
724}
725
727{
728 int ierr;
729 delete [] dof_map;
730 ierr = CeedBasisDestroy(&coarse_to_fine); PCeedChk(ierr);
731 ierr = CeedElemRestrictionDestroy(&ceed_elem_restriction); PCeedChk(ierr);
732}
733
734#ifdef MFEM_USE_MPI
735
737 FiniteElementSpace &fine_fes,
738 CeedElemRestriction fine_er,
739 int order,
740 int dim,
741 int order_reduction_,
742 GroupCommunicator *gc_fine)
743 : AlgebraicCoarseSpace(fine_fes, fine_er, order, dim, order_reduction_)
744{
745 CeedSize lsize;
746 CeedElemRestrictionGetLVectorSize(ceed_elem_restriction, &lsize);
747 const Table &group_ldof_fine = gc_fine->GroupLDofTable();
748
749 MFEM_VERIFY((int)lsize == lsize, "size overflow");
750 ldof_group.SetSize(lsize);
751 ldof_group = 0;
752
753 const GroupTopology &group_topo = gc_fine->GetGroupTopology();
754 gc = new GroupCommunicator(group_topo);
755 Table &group_ldof = gc->GroupLDofTable();
756 group_ldof.MakeI(group_ldof_fine.Size());
757 for (int g=1; g<group_ldof_fine.Size(); ++g)
758 {
759 int nldof_fine_g = group_ldof_fine.RowSize(g);
760 const int *ldof_fine_g = group_ldof_fine.GetRow(g);
761 for (int i=0; i<nldof_fine_g; ++i)
762 {
763 int icoarse = dof_map[ldof_fine_g[i]];
764 if (icoarse >= 0)
765 {
766 group_ldof.AddAColumnInRow(g);
767 ldof_group[icoarse] = g;
768 }
769 }
770 }
771 group_ldof.MakeJ();
772 for (int g=1; g<group_ldof_fine.Size(); ++g)
773 {
774 int nldof_fine_g = group_ldof_fine.RowSize(g);
775 const int *ldof_fine_g = group_ldof_fine.GetRow(g);
776 for (int i=0; i<nldof_fine_g; ++i)
777 {
778 int icoarse = dof_map[ldof_fine_g[i]];
779 if (icoarse >= 0)
780 {
781 group_ldof.AddConnection(g, icoarse);
782 }
783 }
784 }
785 group_ldof.ShiftUpI();
786 gc->Finalize();
787 ldof_ltdof.SetSize(lsize);
788 ldof_ltdof = -2;
789 int ltsize = 0;
790 for (int i=0; i<lsize; ++i)
791 {
792 int g = ldof_group[i];
793 if (group_topo.IAmMaster(g))
794 {
795 ldof_ltdof[i] = ltsize;
796 ++ltsize;
797 }
798 }
799 gc->SetLTDofTable(ldof_ltdof);
800 gc->Bcast(ldof_ltdof);
801
802 R_mat = new SparseMatrix(ltsize, lsize);
803 for (int j=0; j<lsize; ++j)
804 {
805 if (group_topo.IAmMaster(ldof_group[j]))
806 {
807 int i = ldof_ltdof[j];
808 R_mat->Set(i,j,1.0);
809 }
810 }
811 R_mat->Finalize();
812
814 {
815 P = new DeviceConformingProlongationOperator(*gc, R_mat);
816 }
817 else
818 {
819 P = new ConformingProlongationOperator(lsize, *gc);
820 }
821 P_mat = NULL;
822}
823
825{
826 if (P_mat) { return P_mat; }
827
828 ParMesh *pmesh = dynamic_cast<ParMesh*>(mesh);
829 MFEM_VERIFY(pmesh != NULL, "");
830 Array<HYPRE_BigInt> dof_offsets, tdof_offsets, tdof_nb_offsets;
831 Array<HYPRE_BigInt> *offsets[2] = {&dof_offsets, &tdof_offsets};
832 int lsize = P->Height();
833 int ltsize = P->Width();
834 HYPRE_BigInt loc_sizes[2] = {lsize, ltsize};
835 pmesh->GenerateOffsets(2, loc_sizes, offsets);
836
837 MPI_Comm comm = pmesh->GetComm();
838
839 const GroupTopology &group_topo = gc->GetGroupTopology();
840
841 if (HYPRE_AssumedPartitionCheck())
842 {
843 // communicate the neighbor offsets in tdof_nb_offsets
844 int nsize = group_topo.GetNumNeighbors()-1;
845 MPI_Request *requests = new MPI_Request[2*nsize];
846 MPI_Status *statuses = new MPI_Status[2*nsize];
847 tdof_nb_offsets.SetSize(nsize+1);
848 tdof_nb_offsets[0] = tdof_offsets[0];
849
850 // send and receive neighbors' local tdof offsets
851 int request_counter = 0;
852 for (int i = 1; i <= nsize; i++)
853 {
854 MPI_Irecv(&tdof_nb_offsets[i], 1, HYPRE_MPI_INT,
855 group_topo.GetNeighborRank(i), 5365, comm,
856 &requests[request_counter++]);
857 }
858 for (int i = 1; i <= nsize; i++)
859 {
860 MPI_Isend(&tdof_nb_offsets[0], 1, HYPRE_MPI_INT,
861 group_topo.GetNeighborRank(i), 5365, comm,
862 &requests[request_counter++]);
863 }
864 MPI_Waitall(request_counter, requests, statuses);
865
866 delete [] statuses;
867 delete [] requests;
868 }
869
870 HYPRE_Int *i_diag = Memory<HYPRE_Int>(lsize+1);
871 HYPRE_Int *j_diag = Memory<HYPRE_Int>(ltsize);
872 int diag_counter;
873
874 HYPRE_Int *i_offd = Memory<HYPRE_Int>(lsize+1);
875 HYPRE_Int *j_offd = Memory<HYPRE_Int>(lsize-ltsize);
876 int offd_counter;
877
878 HYPRE_BigInt *cmap = Memory<HYPRE_BigInt>(lsize-ltsize);
879
880 HYPRE_BigInt *col_starts = tdof_offsets;
881 HYPRE_BigInt *row_starts = dof_offsets;
882
883 Array<Pair<HYPRE_BigInt, int> > cmap_j_offd(lsize-ltsize);
884
885 i_diag[0] = i_offd[0] = 0;
886 diag_counter = offd_counter = 0;
887 for (int i_ldof = 0; i_ldof < lsize; i_ldof++)
888 {
889 int g = ldof_group[i_ldof];
890 int i_ltdof = ldof_ltdof[i_ldof];
891 if (group_topo.IAmMaster(g))
892 {
893 j_diag[diag_counter++] = i_ltdof;
894 }
895 else
896 {
897 HYPRE_BigInt global_tdof_number;
898 if (HYPRE_AssumedPartitionCheck())
899 {
900 global_tdof_number
901 = i_ltdof + tdof_nb_offsets[group_topo.GetGroupMaster(g)];
902 }
903 else
904 {
905 global_tdof_number
906 = i_ltdof + tdof_offsets[group_topo.GetGroupMasterRank(g)];
907 }
908
909 cmap_j_offd[offd_counter].one = global_tdof_number;
910 cmap_j_offd[offd_counter].two = offd_counter;
911 offd_counter++;
912 }
913 i_diag[i_ldof+1] = diag_counter;
914 i_offd[i_ldof+1] = offd_counter;
915 }
916
917 SortPairs<HYPRE_BigInt, int>(cmap_j_offd, offd_counter);
918
919 for (int i = 0; i < offd_counter; i++)
920 {
921 cmap[i] = cmap_j_offd[i].one;
922 j_offd[cmap_j_offd[i].two] = i;
923 }
924
925 P_mat = new HypreParMatrix(
926 comm, pmesh->GetMyRank(), pmesh->GetNRanks(),
927 row_starts, col_starts,
928 i_diag, j_diag, i_offd, j_offd,
929 cmap, offd_counter
930 );
931
932 P_mat->CopyRowStarts();
933 P_mat->CopyColStarts();
934
935 return P_mat;
936}
937
939{
940 delete P;
941 delete R_mat;
942 delete P_mat;
943 delete gc;
944}
945
946#endif // MFEM_USE_MPI
947
948#endif // MFEM_USE_CEED
949
951 const Array<int>& ess_tdofs)
952{
953 MFEM_VERIFY(DeviceCanUseCeed(),
954 "AlgebraicSolver requires a Ceed device");
955 MFEM_VERIFY(form.GetAssemblyLevel() == AssemblyLevel::PARTIAL ||
957 "AlgebraicSolver requires partial assembly or fully matrix-free.");
958 MFEM_VERIFY(UsesTensorBasis(*form.FESpace()),
959 "AlgebraicSolver requires tensor product basis functions.");
960#ifdef MFEM_USE_CEED
961 fespaces = new AlgebraicSpaceHierarchy(*form.FESpace());
962 multigrid = new AlgebraicMultigrid(*fespaces, form, ess_tdofs);
963#else
964 MFEM_ABORT("AlgebraicSolver requires Ceed support");
965#endif
966}
967
969{
970#ifdef MFEM_USE_CEED
971 delete fespaces;
972 delete multigrid;
973#endif
974}
975
976void AlgebraicSolver::Mult(const Vector& x, Vector& y) const
977{
978#ifdef MFEM_USE_CEED
979 multigrid->Mult(x, y);
980#endif
981}
982
984{
985#ifdef MFEM_USE_CEED
986 multigrid->SetOperator(op);
987#endif
988}
989
990} // namespace ceed
991
992} // namespace mfem
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
Definition array.hpp:321
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition array.hpp:697
int Size() const
Return the logical size of the array.
Definition array.hpp:144
int Append(const T &el)
Append element 'el' to array, resize if necessary.
Definition array.hpp:769
Abstract base class BilinearFormIntegrator.
A "square matrix" operator for the associated FE space and BLFIntegrators The sum of all the BLFInteg...
Array< BilinearFormIntegrator * > * GetFBFI()
Access all integrators added with AddInteriorFaceIntegrator().
Array< BilinearFormIntegrator * > * GetDBFI()
Access all the integrators added with AddDomainIntegrator().
FiniteElementSpace * FESpace()
Return the FE space associated with the BilinearForm.
AssemblyLevel GetAssemblyLevel() const
Returns the assembly level.
Array< BilinearFormIntegrator * > * GetBBFI()
Access all the integrators added with AddBoundaryIntegrator().
Array< BilinearFormIntegrator * > * GetBFBFI()
Access all integrators added with AddBdrFaceIntegrator().
Auxiliary class used by ParFiniteElementSpace.
Definition pfespace.hpp:442
Square Operator for imposing essential boundary conditions using only the action, Mult(),...
Definition operator.hpp:895
Auxiliary device class used by ParFiniteElementSpace.
Definition pfespace.hpp:465
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Definition device.hpp:259
int GetNumLevels() const
Returns the number of levels in the hierarchy.
Operator * GetProlongationAtLevel(int level) const
Returns the prolongation operator from the finite element space at level to the finite element space ...
virtual const FiniteElementSpace & GetFESpaceAtLevel(int level) const
Returns the finite element space at the given level.
Array< FiniteElementSpace * > fespaces
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition fespace.hpp:220
int GetOrder(int i) const
Returns the polynomial degree of the i'th finite element.
Definition fespace.hpp:696
int ndofs
Number of degrees of freedom. Number of unknowns is ndofs * vdim.
Definition fespace.hpp:242
Mesh * mesh
The mesh that FE space lives on (not owned).
Definition fespace.hpp:228
Mesh * GetMesh() const
Returns the mesh.
Definition fespace.hpp:559
Geometric multigrid associated with a hierarchy of finite element spaces.
Array< Array< int > * > essentialTrueDofs
const FiniteElementSpaceHierarchy & fespaces
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize().
const GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
int GetNeighborRank(int i) const
Return the MPI rank of neighbor 'i'.
bool IAmMaster(int g) const
Return true if I am master for group 'g'.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group 'g'.
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
Wrapper for hypre's ParCSR matrix class.
Definition hypre.hpp:388
HYPRE_BigInt GetGlobalNumRows() const
Return the global number of rows.
Definition hypre.hpp:679
MPI_Comm GetComm() const
MPI communicator.
Definition hypre.hpp:578
HYPRE_BigInt * RowPart()
Returns the row partitioning.
Definition hypre.hpp:613
Class used by MFEM to store pointers to host and/or device memory.
int Dimension() const
Dimension of the reference space used within the elements.
Definition mesh.hpp:1160
void AddLevel(Operator *op, Solver *smoother, bool ownOperator, bool ownSmoother)
Adds a level to the multigrid operator hierarchy.
Definition multigrid.cpp:87
virtual void Mult(const Vector &x, Vector &y) const override
Application of the multigrid as a preconditioner.
virtual bool SupportsCeed() const
Indicates whether this integrator can use a Ceed backend.
Chebyshev accelerated smoothing with given vector, no matrix necessary.
Definition solvers.hpp:394
Jacobi smoothing for a given bilinear form (no matrix necessary).
Definition solvers.hpp:313
Abstract operator.
Definition operator.hpp:25
int width
Dimension of the input / number of columns in the matrix.
Definition operator.hpp:28
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition operator.hpp:66
int height
Dimension of the output / number of rows in the matrix.
Definition operator.hpp:27
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
Definition operator.hpp:72
Operator * SetupRAP(const Operator *Pi, const Operator *Po)
Returns RAP Operator of this, using input/output Prolongation matrices Pi corresponds to "P",...
Definition operator.cpp:168
virtual void MultTranspose(const Vector &x, Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition operator.hpp:93
Abstract parallel finite element space.
Definition pfespace.hpp:29
GroupCommunicator & GroupComm()
Return a reference to the internal GroupCommunicator (on VDofs)
Definition pfespace.hpp:344
HypreParMatrix * Dof_TrueDof_Matrix() const
The true dof-to-dof interpolation matrix.
Definition pfespace.hpp:327
Class for parallel meshes.
Definition pmesh.hpp:34
MPI_Comm GetComm() const
Definition pmesh.hpp:402
int GetMyRank() const
Definition pmesh.hpp:404
int GetNRanks() const
Definition pmesh.hpp:403
void GenerateOffsets(int N, HYPRE_BigInt loc_sizes[], Array< HYPRE_BigInt > *offsets[]) const
Definition pmesh.cpp:1943
Base class for solvers.
Definition operator.hpp:683
virtual void SetOperator(const Operator &op)=0
Set/update the solver for the given operator.
Data type sparse matrix.
Definition sparsemat.hpp:51
virtual void Finalize(int skip_zeros=1)
Finalize the matrix initialization, switching the storage format from LIL to CSR.
void Set(const int i, const int j, const real_t val)
int RowSize(int i) const
Definition table.hpp:108
void ShiftUpI()
Definition table.cpp:115
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition table.cpp:187
void AddConnection(int r, int c)
Definition table.hpp:80
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
Definition table.cpp:81
int Size() const
Returns the number of TYPE I elements.
Definition table.hpp:92
void MakeJ()
Definition table.cpp:91
void AddAColumnInRow(int r)
Definition table.hpp:77
The transpose of a given operator. Switches the roles of the methods Mult() and MultTranspose().
Definition operator.hpp:750
Vector data type.
Definition vector.hpp:80
virtual const real_t * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Definition vector.hpp:478
virtual const real_t * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:474
virtual real_t * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:490
Memory< real_t > & GetMemory()
Return a reference to the Memory object used by the Vector.
Definition vector.hpp:249
void NewMemoryAndSize(const Memory< real_t > &mem, int s, bool own_mem)
Reset the Vector to use the given external Memory mem and size s.
Definition vector.hpp:587
int Size() const
Returns the size of the vector.
Definition vector.hpp:218
void SetSize(int s)
Resize the vector to size s.
Definition vector.hpp:538
virtual real_t * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Definition vector.hpp:486
virtual real_t * HostReadWrite()
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), false).
Definition vector.hpp:494
virtual real_t * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:482
A way to use algebraic levels in a Multigrid object.
Definition algebraic.hpp:32
AlgebraicCoarseSpace(FiniteElementSpace &fine_fes, CeedElemRestriction fine_er, int order, int dim, int order_reduction_)
CeedElemRestriction ceed_elem_restriction
Definition algebraic.hpp:46
Multigrid interpolation operator in Ceed framework.
Definition algebraic.hpp:89
virtual void Mult(const mfem::Vector &x, mfem::Vector &y) const
Operator application: y=A(x).
AlgebraicInterpolation(Ceed ceed, CeedBasis basisctof, CeedElemRestriction erestrictu_coarse, CeedElemRestriction erestrictu_fine)
virtual void MultTranspose(const mfem::Vector &x, mfem::Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Extension of Multigrid object to algebraically generated coarse spaces.
virtual void SetOperator(const mfem::Operator &op) override
Not supported for multigrid.
AlgebraicSolver(BilinearForm &form, const Array< int > &ess_tdofs)
Constructs algebraic multigrid hierarchy and solver.
void SetOperator(const mfem::Operator &op)
Set/update the solver for the given operator.
void Mult(const Vector &x, Vector &y) const
Operator application: y=A(x).
Hierarchy of AlgebraicCoarseSpace objects for use in Multigrid object.
AlgebraicSpaceHierarchy(FiniteElementSpace &fespace)
Construct hierarchy based on finest FiniteElementSpace.
AlgebraicCoarseSpace & GetAlgebraicCoarseSpace(int level)
CeedOperator & GetCeedOperator()
Definition operator.hpp:55
Parallel version of AlgebraicCoarseSpace.
Definition algebraic.hpp:57
ParAlgebraicCoarseSpace(FiniteElementSpace &fine_fes, CeedElemRestriction fine_er, int order, int dim, int order_reduction_, GroupCommunicator *gc_fine)
GroupCommunicator * GetGroupCommunicator() const
Definition algebraic.hpp:69
HypreParMatrix * GetProlongationHypreParMatrix()
int dim
Definition ex24.cpp:53
HYPRE_Int HYPRE_BigInt
real_t b
Definition lissajous.cpp:42
real_t a
Definition lissajous.cpp:41
string space
int CeedBasisATPMGCoarseToFine(Ceed ceed, int P1d, int dim, int order_reduction, CeedBasis *basisc2f)
Create coarse-to-fine basis, given number of input nodes and order reduction.
Solver * BuildSmootherFromCeed(ConstrainedOperator &op, bool chebyshev)
void CoarsenEssentialDofs(const mfem::Operator &interp, const Array< int > &ho_ess_tdofs, Array< int > &alg_lo_ess_tdofs)
void InitRestriction(const FiniteElementSpace &fes, Ceed ceed, CeedElemRestriction *restr)
Initialize a CeedElemRestriction for non-mixed meshes.
int CeedOperatorGetSize(CeedOperator oper, CeedInt *size)
Definition algebraic.cpp:98
CeedOperator CreateCeedCompositeOperatorFromBilinearForm(BilinearForm &form)
int CeedVectorPointwiseMult(CeedVector a, const CeedVector b)
int CeedATPMGOperator(CeedOperator oper, int order_reduction, CeedElemRestriction coarse_er, CeedBasis coarse_basis_in, CeedBasis basis_ctof_in, CeedOperator *out)
CeedOperator CoarsenCeedCompositeOperator(CeedOperator op, CeedElemRestriction er, CeedBasis c2f, int order_reduction)
void AddToCompositeOperator(BilinearFormIntegrator *integ, CeedOperator op)
int CeedATPMGElemRestriction(int order, int order_reduction, CeedElemRestriction er_in, CeedElemRestriction *er_out, CeedInt *&dof_map)
Take given (high-order) CeedElemRestriction and make a new CeedElemRestriction, which corresponds to ...
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition globals.hpp:66
void RAP(const DenseMatrix &A, const DenseMatrix &P, DenseMatrix &RAP)
bool DeviceCanUseCeed()
Function that determines if a CEED kernel should be used, based on the current mfem::Device configura...
Definition util.cpp:33
bool UsesTensorBasis(const FiniteElementSpace &fes)
Return true if the mesh contains only one topology and the elements are tensor elements.
Definition fespace.hpp:1345
void SortPairs(Pair< A, B > *pairs, int size)
Sort an array of Pairs with respect to the first element.
void forall(int N, lambda &&body)
Definition forall.hpp:754
@ CUDA
[device] CUDA backend. Enabled when MFEM_USE_CUDA = YES.
Definition device.hpp:38
@ DEVICE_MASK
Biwise-OR of all device backends.
Definition device.hpp:97