MFEM v4.9.0
Finite element discretization library
Loading...
Searching...
No Matches
bilinearform_ext.cpp
Go to the documentation of this file.
1// Copyright (c) 2010-2025, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11
12// Implementations of classes FABilinearFormExtension, EABilinearFormExtension,
13// PABilinearFormExtension and MFBilinearFormExtension.
14
15#include "../general/forall.hpp"
16#include "bilinearform.hpp"
17#include "pbilinearform.hpp"
18#include "pgridfunc.hpp"
19#include "fe/face_map_utils.hpp"
21
22namespace mfem
23{
24
26 : Operator(form->Size()), a(form)
27{
28 // empty
29}
30
35
40
41// Data and methods for partially-assembled bilinear forms
44 trial_fes(a->FESpace()),
45 test_fes(a->FESpace())
46{
47 elem_restrict = NULL;
50}
51
53{
54 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
55 const int integratorCount = integrators.Size();
56 for (int i = 0; i < integratorCount; ++i)
57 {
58 integrators[i]->AssembleMF(*a->FESpace());
59 }
60
61 MFEM_VERIFY(a->GetBBFI()->Size() == 0, "AddBoundaryIntegrator is not "
62 "currently supported in MFBilinearFormExtension");
63}
64
66{
67 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
68
69 const int iSz = integrators.Size();
71 {
72 localY = 0.0;
73 for (int i = 0; i < iSz; ++i)
74 {
75 integrators[i]->AssembleDiagonalMF(localY);
76 }
77 const ElementRestriction* H1elem_restrict =
78 dynamic_cast<const ElementRestriction*>(elem_restrict);
79 if (H1elem_restrict)
80 {
81 H1elem_restrict->AbsMultTranspose(localY, y);
82 }
83 else
84 {
86 }
87 }
88 else
89 {
90 y.UseDevice(true); // typically this is a large vector, so store on device
91 y = 0.0;
92 for (int i = 0; i < iSz; ++i)
93 {
94 integrators[i]->AssembleDiagonalMF(y);
95 }
96 }
97}
98
100{
101 FiniteElementSpace *fes = a->FESpace();
102 height = width = fes->GetVSize();
103 trial_fes = fes;
104 test_fes = fes;
105
106 elem_restrict = nullptr;
107 int_face_restrict_lex = nullptr;
108 bdr_face_restrict_lex = nullptr;
109}
110
113{
114 Operator *oper;
115 Operator::FormSystemOperator(ess_tdof_list, oper);
116 A.Reset(oper); // A will own oper
117}
118
120 Vector &x, Vector &b,
122 Vector &X, Vector &B,
123 int copy_interior)
124{
125 Operator *oper;
126 Operator::FormLinearSystem(ess_tdof_list, x, b, oper, X, B, copy_interior);
127 A.Reset(oper); // A will own oper
128}
129
131{
132 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
133
134 const int iSz = integrators.Size();
136 {
137 y.UseDevice(true); // typically this is a large vector, so store on device
138 y = 0.0;
139 for (int i = 0; i < iSz; ++i)
140 {
141 integrators[i]->AddMultMF(x, y);
142 }
143 }
144 else
145 {
147 localY = 0.0;
148 for (int i = 0; i < iSz; ++i)
149 {
150 integrators[i]->AddMultMF(localX, localY);
151 }
153 }
154
155 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
156 const int iFISz = intFaceIntegrators.Size();
157 if (int_face_restrict_lex && iFISz>0)
158 {
160 if (int_face_X.Size()>0)
161 {
162 int_face_Y = 0.0;
163 for (int i = 0; i < iFISz; ++i)
164 {
165 intFaceIntegrators[i]->AddMultMF(int_face_X, int_face_Y);
166 }
168 }
169 }
170
171 Array<BilinearFormIntegrator*> &bdrFaceIntegrators = *a->GetBFBFI();
172 const int bFISz = bdrFaceIntegrators.Size();
173 if (bdr_face_restrict_lex && bFISz>0)
174 {
176 if (bdr_face_X.Size()>0)
177 {
178 bdr_face_Y = 0.0;
179 for (int i = 0; i < bFISz; ++i)
180 {
181 bdrFaceIntegrators[i]->AddMultMF(bdr_face_X, bdr_face_Y);
182 }
184 }
185 }
186}
187
189{
190 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
191 const int iSz = integrators.Size();
192 if (elem_restrict)
193 {
195 localY = 0.0;
196 for (int i = 0; i < iSz; ++i)
197 {
198 integrators[i]->AddMultTransposeMF(localX, localY);
199 }
201 }
202 else
203 {
204 y.UseDevice(true);
205 y = 0.0;
206 for (int i = 0; i < iSz; ++i)
207 {
208 integrators[i]->AddMultTransposeMF(x, y);
209 }
210 }
211
212 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
213 const int iFISz = intFaceIntegrators.Size();
214 if (int_face_restrict_lex && iFISz>0)
215 {
217 if (int_face_X.Size()>0)
218 {
219 int_face_Y = 0.0;
220 for (int i = 0; i < iFISz; ++i)
221 {
222 intFaceIntegrators[i]->AddMultTransposeMF(int_face_X, int_face_Y);
223 }
225 }
226 }
227
228 Array<BilinearFormIntegrator*> &bdrFaceIntegrators = *a->GetBFBFI();
229 const int bFISz = bdrFaceIntegrators.Size();
230 if (bdr_face_restrict_lex && bFISz>0)
231 {
233 if (bdr_face_X.Size()>0)
234 {
235 bdr_face_Y = 0.0;
236 for (int i = 0; i < bFISz; ++i)
237 {
238 bdrFaceIntegrators[i]->AddMultTransposeMF(bdr_face_X, bdr_face_Y);
239 }
241 }
242 }
243}
244
245// Data and methods for partially-assembled bilinear forms
247 : BilinearFormExtension(form),
248 trial_fes(a->FESpace()),
249 test_fes(a->FESpace())
250{
251 elem_restrict = NULL;
254}
255
257{
258 if ( Device::Allows(Backend::CEED_MASK) ) { return; }
261 if (elem_restrict)
262 {
265 localY.UseDevice(true); // ensure 'localY = 0.0' is done on device
266
267 // Gather the attributes on the host from all the elements
268 const Mesh &mesh = *trial_fes->GetMesh();
270 }
271
272 // Construct face restriction operators only if the bilinear form has
273 // interior or boundary face integrators
274 if (int_face_restrict_lex == NULL && a->GetFBFI()->Size() > 0)
275 {
281 int_face_Y.UseDevice(true); // ensure 'int_face_Y = 0.0' is done on device
282
283 bool needs_normal_derivs = false;
284 auto &integs = *a->GetFBFI();
285 for (int i = 0; i < integs.Size(); ++i)
286 {
287 if (integs[i]->RequiresFaceNormalDerivatives())
288 {
289 needs_normal_derivs = true;
290 break;
291 }
292 }
293 if (needs_normal_derivs)
294 {
297 }
298 }
299
300 const bool has_bdr_integs = (a->GetBFBFI()->Size() > 0 ||
301 a->GetBBFI()->Size() > 0);
302 if (bdr_face_restrict_lex == NULL && has_bdr_integs)
303 {
307 m);
310 bdr_face_Y.UseDevice(true); // ensure 'faceBoundY = 0.0' is done on device
311
312 bool needs_normal_derivs = false;
313 auto &integs = *a->GetBFBFI();
314 for (int i = 0; i < integs.Size(); ++i)
315 {
316 if (integs[i]->RequiresFaceNormalDerivatives())
317 {
318 needs_normal_derivs = true;
319 break;
320 }
321 }
322 if (needs_normal_derivs)
323 {
326 }
327
329 }
330}
331
333{
335
336 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
337 for (BilinearFormIntegrator *integ : integrators)
338 {
339 if (integ->Patchwise())
340 {
341 MFEM_VERIFY(a->FESpace()->GetNURBSext(),
342 "Patchwise integration requires a NURBS FE space");
343 integ->AssembleNURBSPA(*a->FESpace());
344 }
345 else
346 {
347 integ->AssemblePA(*a->FESpace());
348 }
349 }
350
351 Array<BilinearFormIntegrator*> &bdr_integrators = *a->GetBBFI();
352 for (BilinearFormIntegrator *integ : bdr_integrators)
353 {
354 integ->AssemblePABoundary(*a->FESpace());
355 }
356
357 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
358 for (BilinearFormIntegrator *integ : intFaceIntegrators)
359 {
360 integ->AssemblePAInteriorFaces(*a->FESpace());
361 }
362
363 Array<BilinearFormIntegrator*> &bdrFaceIntegrators = *a->GetBFBFI();
364 for (BilinearFormIntegrator *integ : bdrFaceIntegrators)
365 {
366 integ->AssemblePABoundaryFaces(*a->FESpace());
367 }
368}
369
371{
372 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
373
374 auto assemble_diagonal_with_markers = [&](BilinearFormIntegrator &integ,
375 const Array<int> *markers,
376 const Array<int> &attributes,
377 Vector &d)
378 {
379 integ.AssembleDiagonalPA(d);
380 if (markers)
381 {
382 const int ne = attributes.Size();
383 const int nd = d.Size() / ne;
384 const auto d_attr = Reshape(attributes.Read(), ne);
385 const auto d_m = Reshape(markers->Read(), markers->Size());
386 auto d_d = Reshape(d.ReadWrite(), nd, ne);
387 mfem::forall(ne, [=] MFEM_HOST_DEVICE (int e)
388 {
389 const int attr = d_attr[e];
390 if (attr <= 0 || d_m[attr - 1] == 0)
391 {
392 for (int i = 0; i < nd; ++i)
393 {
394 d_d(i, e) = 0.0;
395 }
396 }
397 });
398 }
399 };
400
401 const int iSz = integrators.Size();
403 {
404 if (iSz > 0)
405 {
406 localY = 0.0;
407 Array<Array<int>*> &elem_markers = *a->GetDBFI_Marker();
408 for (int i = 0; i < iSz; ++i)
409 {
410 assemble_diagonal_with_markers(*integrators[i], elem_markers[i],
412 }
413 const ElementRestriction* H1elem_restrict =
414 dynamic_cast<const ElementRestriction*>(elem_restrict);
415 if (H1elem_restrict)
416 {
417 H1elem_restrict->AbsMultTranspose(localY, y);
418 }
419 else
420 {
422 }
423 }
424 else
425 {
426 y = 0.0;
427 }
428 }
429 else
430 {
431 Array<Array<int>*> &elem_markers = *a->GetDBFI_Marker();
432 y.UseDevice(true); // typically this is a large vector, so store on device
433 y = 0.0;
434 for (int i = 0; i < iSz; ++i)
435 {
436 assemble_diagonal_with_markers(*integrators[i], elem_markers[i],
437 *elem_attributes, y);
438 }
439 }
440
441 Array<BilinearFormIntegrator*> &bdr_integs = *a->GetBBFI();
442 const int n_bdr_integs = bdr_integs.Size();
443 if (bdr_face_restrict_lex && n_bdr_integs > 0)
444 {
445 Array<Array<int>*> &bdr_markers = *a->GetBBFI_Marker();
446 bdr_face_Y = 0.0;
447 for (int i = 0; i < n_bdr_integs; ++i)
448 {
449 assemble_diagonal_with_markers(*bdr_integs[i], bdr_markers[i],
451 }
453 }
454}
455
457{
458 FiniteElementSpace *fes = a->FESpace();
459 height = width = fes->GetVSize();
460 trial_fes = fes;
461 test_fes = fes;
462
463 elem_restrict = nullptr;
464 int_face_restrict_lex = nullptr;
465 bdr_face_restrict_lex = nullptr;
466}
467
470{
471 Operator *oper;
472 Operator::FormSystemOperator(ess_tdof_list, oper);
473 A.Reset(oper); // A will own oper
474}
475
477 Vector &x, Vector &b,
479 Vector &X, Vector &B,
480 int copy_interior)
481{
482 Operator *oper;
483 Operator::FormLinearSystem(ess_tdof_list, x, b, oper, X, B, copy_interior);
484 A.Reset(oper); // A will own oper
485}
486
488 const bool useAbs) const
489{
490 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
491
492 const int iSz = integrators.Size();
493
494 bool allPatchwise = true;
495 bool somePatchwise = false;
496
497 for (int i = 0; i < iSz; ++i)
498 {
499 if (integrators[i]->Patchwise())
500 {
501 somePatchwise = true;
502 }
503 else
504 {
505 allPatchwise = false;
506 }
507 }
508
509 MFEM_VERIFY(!(somePatchwise && !allPatchwise),
510 "All or none of the integrators should be patchwise");
511
512 if (DeviceCanUseCeed() || !elem_restrict || allPatchwise)
513 {
514 y.UseDevice(true); // typically this is a large vector, so store on device
515 y = 0.0;
516 for (int i = 0; i < iSz; ++i)
517 {
518 if (integrators[i]->Patchwise())
519 {
520 MFEM_ASSERT(!useAbs, "AbsMult not implemented with NURBS!")
521 integrators[i]->AddMultNURBSPA(x, y);
522 }
523 else
524 {
525 if (useAbs) { integrators[i]->AddAbsMultPA(x, y); }
526 else { integrators[i]->AddMultPA(x, y); }
527 }
528 }
529 }
530 else
531 {
532 if (iSz)
533 {
534 Array<Array<int>*> &elem_markers = *a->GetDBFI_Marker();
535 auto H1elem_restrict =
536 dynamic_cast<const ElementRestriction*>(elem_restrict);
537 if (H1elem_restrict && useAbs)
538 {
539 H1elem_restrict->AbsMult(x, localX);
540 }
541 else
542 {
544 }
545 localY = 0.0;
546 for (int i = 0; i < iSz; ++i)
547 {
548 AddMultWithMarkers(*integrators[i], localX, elem_markers[i],
549 *elem_attributes, false, localY, useAbs);
550 }
551 if (H1elem_restrict && useAbs)
552 {
553 H1elem_restrict->AbsMultTranspose(localY, y);
554 }
555 else
556 {
558 }
559 }
560 else
561 {
562 y = 0.0;
563 }
564 }
565
566 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
567 const int iFISz = intFaceIntegrators.Size();
568 if (int_face_restrict_lex && iFISz>0)
569 {
570 MFEM_ASSERT(!useAbs, "AbsMult not implemented for face integrators!")
571 // When assembling interior face integrators for DG spaces, we need to
572 // exchange the face-neighbor information. This happens inside member
573 // functions of the 'int_face_restrict_lex'. To avoid repeated calls to
574 // ParGridFunction::ExchangeFaceNbrData, if we have a parallel space
575 // with interior face integrators, we create a ParGridFunction that
576 // will be used to cache the face-neighbor data. x_dg should be passed
577 // to any restriction operator that may need to use face-neighbor data.
578 const Vector *x_dg = &x;
579#ifdef MFEM_USE_MPI
580 ParGridFunction x_pgf;
581 if (auto *pfes = dynamic_cast<ParFiniteElementSpace*>(a->FESpace()))
582 {
583 x_pgf.MakeRef(pfes, const_cast<Vector&>(x), 0);
584 x_dg = &x_pgf;
585 }
586#endif
587
589 if (int_face_dXdn.Size() > 0)
590 {
592 }
593 if (int_face_X.Size() > 0)
594 {
595 int_face_Y = 0.0;
596
597 // if normal derivatives are needed by at least one integrator...
598 if (int_face_dYdn.Size() > 0)
599 {
600 int_face_dYdn = 0.0;
601 }
602
603 for (int i = 0; i < iFISz; ++i)
604 {
605 if (intFaceIntegrators[i]->RequiresFaceNormalDerivatives())
606 {
607 intFaceIntegrators[i]->AddMultPAFaceNormalDerivatives(
610 }
611 else
612 {
613 intFaceIntegrators[i]->AddMultPA(int_face_X, int_face_Y);
614 }
615 }
617 if (int_face_dYdn.Size() > 0)
618 {
620 int_face_dYdn, y);
621 }
622 }
623 }
624
625 Array<BilinearFormIntegrator*> &bdr_integs = *a->GetBBFI();
626 Array<BilinearFormIntegrator*> &bdr_face_integs = *a->GetBFBFI();
627 const int n_bdr_integs = bdr_integs.Size();
628 const int n_bdr_face_integs = bdr_face_integs.Size();
629 const bool has_bdr_integs = (n_bdr_face_integs > 0 || n_bdr_integs > 0);
630 if (bdr_face_restrict_lex && has_bdr_integs)
631 {
632 MFEM_ASSERT(!useAbs, "AbsMult not implemented for bdr integrators!")
633 Array<Array<int>*> &bdr_markers = *a->GetBBFI_Marker();
634 Array<Array<int>*> &bdr_face_markers = *a->GetBFBFI_Marker();
636 if (bdr_face_dXdn.Size() > 0)
637 {
639 }
640 if (bdr_face_X.Size() > 0)
641 {
642 bdr_face_Y = 0.0;
643
644 // if normal derivatives are needed by at least one integrator...
645 if (bdr_face_dYdn.Size() > 0)
646 {
647 bdr_face_dYdn = 0.0;
648 }
649 for (int i = 0; i < n_bdr_integs; ++i)
650 {
651 AddMultWithMarkers(*bdr_integs[i], bdr_face_X, bdr_markers[i],
653 }
654 for (int i = 0; i < n_bdr_face_integs; ++i)
655 {
656 if (bdr_face_integs[i]->RequiresFaceNormalDerivatives())
657 {
659 *bdr_face_integs[i], bdr_face_X, bdr_face_dXdn,
660 bdr_face_markers[i], *bdr_face_attributes, bdr_face_Y,
662 }
663 else
664 {
665 AddMultWithMarkers(*bdr_face_integs[i], bdr_face_X,
666 bdr_face_markers[i], *bdr_face_attributes, false,
667 bdr_face_Y);
668 }
669 }
671 if (bdr_face_dYdn.Size() > 0)
672 {
674 }
675 }
676 }
677}
678
680{
681 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
682 const int iSz = integrators.Size();
683 if (elem_restrict)
684 {
685 Array<Array<int>*> &elem_markers = *a->GetDBFI_Marker();
687 localY = 0.0;
688 for (int i = 0; i < iSz; ++i)
689 {
690 AddMultWithMarkers(*integrators[i], localX, elem_markers[i], *elem_attributes,
691 true, localY);
692 }
694 }
695 else
696 {
697 y.UseDevice(true);
698 y = 0.0;
699 for (int i = 0; i < iSz; ++i)
700 {
701 integrators[i]->AddMultTransposePA(x, y);
702 }
703 }
704
705 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
706 const int iFISz = intFaceIntegrators.Size();
707 if (int_face_restrict_lex && iFISz>0)
708 {
710 if (int_face_X.Size()>0)
711 {
712 int_face_Y = 0.0;
713 for (int i = 0; i < iFISz; ++i)
714 {
715 intFaceIntegrators[i]->AddMultTransposePA(int_face_X, int_face_Y);
716 }
718 }
719 }
720
721 Array<BilinearFormIntegrator*> &bdr_integs = *a->GetBBFI();
722 Array<BilinearFormIntegrator*> &bdr_face_integs = *a->GetBFBFI();
723 const int n_bdr_integs = bdr_integs.Size();
724 const int n_bdr_face_integs = bdr_face_integs.Size();
725 const bool has_bdr_integs = (n_bdr_face_integs > 0 || n_bdr_integs > 0);
726 if (bdr_face_restrict_lex && has_bdr_integs)
727 {
728 Array<Array<int>*> &bdr_markers = *a->GetBBFI_Marker();
729 Array<Array<int>*> &bdr_face_markers = *a->GetBFBFI_Marker();
730
732 if (bdr_face_X.Size() > 0)
733 {
734 bdr_face_Y = 0.0;
735 for (int i = 0; i < n_bdr_integs; ++i)
736 {
737 AddMultWithMarkers(*bdr_integs[i], bdr_face_X, bdr_markers[i],
739 }
740 for (int i = 0; i < n_bdr_face_integs; ++i)
741 {
742 AddMultWithMarkers(*bdr_face_integs[i], bdr_face_X,
743 bdr_face_markers[i], *bdr_face_attributes, true,
744 bdr_face_Y);
745 }
747 }
748 }
749}
750
751// Compute kernels for PABilinearFormExtension::AddMultWithMarkers.
752// Cannot be in member function with non-public visibility.
753static void AddWithMarkers_(
754 const int ne,
755 const int nd,
756 const Vector &x,
757 const Array<int> &markers,
758 const Array<int> &attributes,
759 Vector &y)
760{
761 const auto d_x = Reshape(x.Read(), nd, ne);
762 const auto d_m = Reshape(markers.Read(), markers.Size());
763 const auto d_attr = Reshape(attributes.Read(), ne);
764 auto d_y = Reshape(y.ReadWrite(), nd, ne);
765 mfem::forall(ne, [=] MFEM_HOST_DEVICE (int e)
766 {
767 const int attr = d_attr[e];
768 if (attr <= 0 || d_m[attr - 1] == 0) { return; }
769 for (int i = 0; i < nd; ++i)
770 {
771 d_y(i, e) += d_x(i, e);
772 }
773 });
774}
775
777 const BilinearFormIntegrator &integ,
778 const Vector &x,
779 const Vector &dxdn,
780 const Array<int> *markers,
781 const Array<int> &attributes,
782 Vector &y,
783 Vector &dydn) const
784{
785 if (markers)
786 {
787 tmp_evec.SetSize(y.Size() + dydn.Size());
788 tmp_evec = 0.0;
789 Vector tmp_y(tmp_evec, 0, y.Size());
790 Vector tmp_dydn(tmp_evec, y.Size(), dydn.Size());
791
792 integ.AddMultPAFaceNormalDerivatives(x, dxdn, tmp_y, tmp_dydn);
793
794 const int ne = attributes.Size();
795 const int nd_1 = x.Size() / ne;
796 const int nd_2 = dxdn.Size() / ne;
797
798 AddWithMarkers_(ne, nd_1, tmp_y, *markers, attributes, y);
799 AddWithMarkers_(ne, nd_2, tmp_dydn, *markers, attributes, dydn);
800 }
801 else
802 {
803 integ.AddMultPAFaceNormalDerivatives(x, dxdn, y, dydn);
804 }
805}
806
808 const BilinearFormIntegrator &integ,
809 const Vector &x,
810 const Array<int> *markers,
811 const Array<int> &attributes,
812 const bool transpose,
813 Vector &y,
814 const bool useAbs) const
815{
816 if (markers)
817 {
818 tmp_evec.SetSize(y.Size());
819 tmp_evec = 0.0;
820 if (useAbs)
821 {
822 if (transpose) { integ.AddAbsMultTransposePA(x, tmp_evec); }
823 else { integ.AddAbsMultPA(x, tmp_evec); }
824 }
825 else
826 {
827 if (transpose) { integ.AddMultTransposePA(x, tmp_evec); }
828 else { integ.AddMultPA(x, tmp_evec); }
829 }
830 const int ne = attributes.Size();
831 const int nd = x.Size() / ne;
832 AddWithMarkers_(ne, nd, tmp_evec, *markers, attributes, y);
833 }
834 else
835 {
836 if (useAbs)
837 {
838 if (transpose) { integ.AddAbsMultTransposePA(x, y); }
839 else { integ.AddAbsMultPA(x, y); }
840 }
841 else
842 {
843 if (transpose) { integ.AddMultTransposePA(x, y); }
844 else { integ.AddMultPA(x, y); }
845 }
846 }
847}
848
849// Data and methods for element-assembled bilinear forms
852 factorize_face_terms(false)
853{
854 if ( form->FESpace()->IsDGSpace() )
855 {
857 }
858}
859
861{
863
864 ne = trial_fes->GetMesh()->GetNE();
866
867 Vector ea_data_tmp;
868
869 auto add_with_markers = [&](const Vector &ea_1, Vector &ea_2, const int ne_,
870 const Array<int> &markers, const Array<int> &attrs,
871 const bool add)
872 {
873 if (ne_ == 0) { return; }
874 const int sz = ea_1.Size() / ne_;
875 const int *d_m = markers.Read();
876 const int *d_a = attrs.Read();
877 const auto d_ea_1 = Reshape(ea_1.Read(), sz, ne_);
878 auto d_ea_2 = Reshape(add ? ea_2.ReadWrite() : ea_2.Write(), sz, ne_);
879
880 mfem::forall(sz*ne_, [=] MFEM_HOST_DEVICE (int idx)
881 {
882 const int i = idx % sz;
883 const int e = idx / sz;
884 const real_t val =
885 d_a[e] > 0 ? (d_m[d_a[e] - 1] ? d_ea_1(i, e) : 0) : 0;
886 if (add)
887 {
888 d_ea_2(i, e) += val;
889 }
890 else
891 {
892 d_ea_2(i, e) = val;
893 }
894 });
895 };
896
897 {
899 ea_data.UseDevice(true);
900 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
901 Array<Array<int>*> &markers_array = *a->GetDBFI_Marker();
902
903 if (integrators.Size() == 0) { ea_data = 0.0; }
904
905 for (int i = 0; i < integrators.Size(); ++i)
906 {
907 const bool add = (i > 0);
908 const Array<int> *markers = markers_array[i];
909 if (markers == nullptr)
910 {
911 integrators[i]->AssembleEA(*a->FESpace(), ea_data, add);
912 }
913 else
914 {
915 ea_data_tmp.SetSize(ea_data.Size());
916 integrators[i]->AssembleEA(*a->FESpace(), ea_data_tmp, false);
917 add_with_markers(ea_data_tmp, ea_data, ne, *markers,
919 }
920 }
921 }
922
924
925 {
926 Array<BilinearFormIntegrator*> &bdr_integs = *a->GetBBFI();
927 Array<Array<int>*> &markers_array = *a->GetBBFI_Marker();
928 const int n_bdr_integs = bdr_integs.Size();
929 if (n_bdr_integs > 0)
930 {
933 }
934 for (int i = 0; i < n_bdr_integs; ++i)
935 {
936 const bool add = (i > 0);
937 const Array<int> *markers = markers_array[i];
938 if (markers == nullptr)
939 {
940 bdr_integs[i]->AssembleEABoundary(*a->FESpace(), ea_data_bdr, add);
941 }
942 else
943 {
944 ea_data_tmp.SetSize(ea_data_bdr.Size());
945 bdr_integs[i]->AssembleEABoundary(*a->FESpace(), ea_data_tmp, add);
946 add_with_markers(ea_data_tmp, ea_data_bdr, nf_bdr, *markers,
948 }
949 }
950 }
951
952 {
953 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
954 const int intFaceIntegratorCount = intFaceIntegrators.Size();
955 if (intFaceIntegratorCount>0)
956 {
960 }
961 for (int i = 0; i < intFaceIntegratorCount; ++i)
962 {
963 const bool add = (i > 0);
964 intFaceIntegrators[i]->AssembleEAInteriorFaces(*a->FESpace(),
967 add);
968 }
969 }
970
971 {
972 Array<BilinearFormIntegrator*> &bdr_face_integs = *a->GetBFBFI();
973 Array<Array<int>*> &markers_array = *a->GetBFBFI_Marker();
974 const int n_bdr_face_integs = bdr_face_integs.Size();
975 if (n_bdr_face_integs > 0)
976 {
979 }
980 for (int i = 0; i < n_bdr_face_integs; ++i)
981 {
982 const bool add = (i > 0);
983 const Array<int> *markers = markers_array[i];
984 if (markers == nullptr)
985 {
986 bdr_face_integs[i]->AssembleEABoundaryFaces(
987 *a->FESpace(), ea_data_bdr, add);
988 }
989 else
990 {
991 ea_data_tmp.SetSize(ea_data_bdr.Size());
992 bdr_face_integs[i]->AssembleEABoundaryFaces(*a->FESpace(),
993 ea_data_tmp,
994 add);
995 add_with_markers(ea_data_tmp, ea_data_bdr, nf_bdr, *markers,
997 }
998 }
999 }
1000
1002 {
1003 auto restFint = dynamic_cast<const L2FaceRestriction*>(int_face_restrict_lex);
1005 }
1007 {
1008 auto restFbdr = dynamic_cast<const L2FaceRestriction*>(bdr_face_restrict_lex);
1010 }
1011}
1012
1014 const bool useTranspose,
1015 const bool useAbs) const
1016{
1017 auto elemRest = dynamic_cast<const ElementRestriction*>(elem_restrict);
1018 MFEM_ASSERT(useAbs?(elemRest!=nullptr):true,
1019 "elem_restrict is not ElementRestriction*!")
1020 // Apply the Element Restriction
1021 const bool useRestrict = !DeviceCanUseCeed() && elem_restrict;
1022 if (!useRestrict)
1023 {
1024 y.UseDevice(true); // typically this is a large vector, so store on device
1025 y = 0.0;
1026 }
1027 else if (useAbs)
1028 {
1029 elemRest->AbsMult(x, localX);
1030 localY = 0.0;
1031 }
1032 else
1033 {
1035 localY = 0.0;
1036 }
1037 // Apply the Element Matrices
1038 {
1039 Vector abs_ea_data;
1040 if (useAbs)
1041 {
1042 abs_ea_data = ea_data;
1043 abs_ea_data.Abs();
1044 }
1045 const int NDOFS = elemDofs;
1046 auto X = Reshape(useRestrict?localX.Read():x.Read(), NDOFS, ne);
1047 auto Y = Reshape(useRestrict?localY.ReadWrite():y.ReadWrite(), NDOFS, ne);
1048 auto A = Reshape(useAbs?abs_ea_data.Read():ea_data.Read(), NDOFS, NDOFS, ne);
1049 if (!useTranspose)
1050 {
1051 mfem::forall(ne*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1052 {
1053 const int e = glob_j/NDOFS;
1054 const int j = glob_j%NDOFS;
1055 real_t res = 0.0;
1056 for (int i = 0; i < NDOFS; i++)
1057 {
1058 res += A(i, j, e)*X(i, e);
1059 }
1060 Y(j, e) += res;
1061 });
1062 }
1063 else
1064 {
1065 mfem::forall(ne*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1066 {
1067 const int e = glob_j/NDOFS;
1068 const int j = glob_j%NDOFS;
1069 real_t res = 0.0;
1070 for (int i = 0; i < NDOFS; i++)
1071 {
1072 res += A(j, i, e)*X(i, e);
1073 }
1074 Y(j, e) += res;
1075 });
1076 }
1077 // Apply the Element Restriction transposed
1078 if (useRestrict)
1079 {
1080 if (useAbs)
1081 {
1082 elemRest->AbsMultTranspose(localY, y);
1083 }
1084 else
1085 {
1087 }
1088 }
1089 }
1090
1091 // Treatment of interior faces
1092 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
1093 const int iFISz = intFaceIntegrators.Size();
1094 if (int_face_restrict_lex && iFISz>0)
1095 {
1096 MFEM_VERIFY(!useAbs, "AbsMult not implemented with Face integrators!")
1097 // Apply the Interior Face Restriction
1099 if (int_face_X.Size()>0)
1100 {
1101 int_face_Y = 0.0;
1102 // Apply the interior face matrices
1103 const int NDOFS = faceDofs;
1104 auto X = Reshape(int_face_X.Read(), NDOFS, 2, nf_int);
1105 auto Y = Reshape(int_face_Y.ReadWrite(), NDOFS, 2, nf_int);
1107 {
1108 Vector abs_ea_data_int(ea_data_int.Size());
1109 if (useAbs)
1110 {
1111 abs_ea_data_int = ea_data_int;
1112 abs_ea_data_int.Abs();
1113 }
1114 auto A_int = Reshape(useAbs?abs_ea_data_int.Read():ea_data_int.Read(),
1115 NDOFS, NDOFS, 2, nf_int);
1116 if (!useTranspose)
1117 {
1118 mfem::forall(nf_int*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1119 {
1120 const int f = glob_j/NDOFS;
1121 const int j = glob_j%NDOFS;
1122 real_t res = 0.0;
1123 for (int i = 0; i < NDOFS; i++)
1124 {
1125 res += A_int(i, j, 0, f)*X(i, 0, f);
1126 }
1127 Y(j, 0, f) += res;
1128 res = 0.0;
1129 for (int i = 0; i < NDOFS; i++)
1130 {
1131 res += A_int(i, j, 1, f)*X(i, 1, f);
1132 }
1133 Y(j, 1, f) += res;
1134 });
1135 }
1136 else
1137 {
1138 mfem::forall(nf_int*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1139 {
1140 const int f = glob_j/NDOFS;
1141 const int j = glob_j%NDOFS;
1142 real_t res = 0.0;
1143 for (int i = 0; i < NDOFS; i++)
1144 {
1145 res += A_int(j, i, 0, f)*X(i, 0, f);
1146 }
1147 Y(j, 0, f) += res;
1148 res = 0.0;
1149 for (int i = 0; i < NDOFS; i++)
1150 {
1151 res += A_int(j, i, 1, f)*X(i, 1, f);
1152 }
1153 Y(j, 1, f) += res;
1154 });
1155 }
1156 }
1157 Vector abs_ea_data_ext(ea_data_ext.Size());
1158 if (useAbs)
1159 {
1160 abs_ea_data_ext = ea_data_ext;
1161 abs_ea_data_ext.Abs();
1162 }
1163 auto A_ext = Reshape(useAbs?abs_ea_data_ext.Read():ea_data_ext.Read(),
1164 NDOFS, NDOFS, 2, nf_int);
1165 if (!useTranspose)
1166 {
1167 mfem::forall(nf_int*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1168 {
1169 const int f = glob_j/NDOFS;
1170 const int j = glob_j%NDOFS;
1171 real_t res = 0.0;
1172 for (int i = 0; i < NDOFS; i++)
1173 {
1174 res += A_ext(i, j, 0, f)*X(i, 0, f);
1175 }
1176 Y(j, 1, f) += res;
1177 res = 0.0;
1178 for (int i = 0; i < NDOFS; i++)
1179 {
1180 res += A_ext(i, j, 1, f)*X(i, 1, f);
1181 }
1182 Y(j, 0, f) += res;
1183 });
1184 }
1185 else
1186 {
1187 mfem::forall(nf_int*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1188 {
1189 const int f = glob_j/NDOFS;
1190 const int j = glob_j%NDOFS;
1191 real_t res = 0.0;
1192 for (int i = 0; i < NDOFS; i++)
1193 {
1194 res += A_ext(j, i, 1, f)*X(i, 0, f);
1195 }
1196 Y(j, 1, f) += res;
1197 res = 0.0;
1198 for (int i = 0; i < NDOFS; i++)
1199 {
1200 res += A_ext(j, i, 0, f)*X(i, 1, f);
1201 }
1202 Y(j, 0, f) += res;
1203 });
1204 }
1205 // Apply the Interior Face Restriction transposed
1207 }
1208 }
1209
1210 // Treatment of boundary faces
1212 {
1213 MFEM_ASSERT(!useAbs, "AbsMult not implemented with Face integrators!")
1214 // Apply the Boundary Face Restriction
1215 // TODO: AbsMult if needed
1217 bdr_face_Y = 0.0;
1218 // Apply the boundary face matrices
1219 const int NDOFS = faceDofs;
1220 auto X = Reshape(bdr_face_X.Read(), NDOFS, nf_bdr);
1221 auto Y = Reshape(bdr_face_Y.ReadWrite(), NDOFS, nf_bdr);
1222 auto A = Reshape(ea_data_bdr.Read(), NDOFS, NDOFS, nf_bdr);
1223 if (!useTranspose)
1224 {
1225 // TODO: useAbs
1226 mfem::forall(nf_bdr*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1227 {
1228 const int f = glob_j/NDOFS;
1229 const int j = glob_j%NDOFS;
1230 real_t res = 0.0;
1231 for (int i = 0; i < NDOFS; i++)
1232 {
1233 res += A(i, j, f)*X(i, f);
1234 }
1235 Y(j, f) += res;
1236 });
1237 }
1238 else
1239 {
1240 // TODO: useAbs
1241 mfem::forall(nf_bdr*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1242 {
1243 const int f = glob_j/NDOFS;
1244 const int j = glob_j%NDOFS;
1245 real_t res = 0.0;
1246 for (int i = 0; i < NDOFS; i++)
1247 {
1248 res += A(j, i, f)*X(i, f);
1249 }
1250 Y(j, f) += res;
1251 });
1252 }
1253 // Apply the Boundary Face Restriction transposed
1254 // TODO: AbsMultTranspose if needed
1256 }
1257}
1258
1260 DenseTensor &element_matrices, ElementDofOrdering ordering, bool add_bdr)
1261{
1262 // Ensure the EA data is assembled
1263 if (ea_data.Size() == 0) { Assemble(); }
1264
1265 const int ndofs = elemDofs;
1266 element_matrices.SetSize(ndofs, ndofs, ne);
1267 const int N = element_matrices.TotalSize();
1268
1269 const auto d_ea_data = Reshape(ea_data.Read(), ndofs, ndofs, ne);
1270 auto d_element_matrices = Reshape(element_matrices.Write(),
1271 ndofs, ndofs,
1272 ne);
1273
1274 const int *d_dof_map = nullptr;
1275 Array<int> dof_map;
1276 if (ordering == ElementDofOrdering::NATIVE)
1277 {
1278 const TensorBasisElement* tbe =
1279 dynamic_cast<const TensorBasisElement*>(trial_fes->GetFE(0));
1280 if (tbe)
1281 {
1282 // Deep copy to avoid issues with host device (see similar comment in
1283 // HybridizationExtension::ConstructC).
1284 dof_map = tbe->GetDofMap();
1285 d_dof_map = dof_map.Read();
1286 }
1287 }
1288
1289 if (d_dof_map)
1290 {
1291 // Reordering required
1292 mfem::forall(N, [=] MFEM_HOST_DEVICE (int idx)
1293 {
1294 const int e = idx / ndofs / ndofs;
1295 const int i = idx % ndofs;
1296 const int j = (idx / ndofs) % ndofs;
1297 const int ii_s = d_dof_map[i];
1298 const int ii = (ii_s >= 0) ? ii_s : -1 - ii_s;
1299 const int s_i = (ii_s >= 0) ? 1 : -1;
1300 const int jj_s = d_dof_map[j];
1301 const int jj = (jj_s >= 0) ? jj_s : -1 - jj_s;
1302 const int s_j = (jj_s >= 0) ? 1 : -1;
1303 d_element_matrices(ii, jj, e) = s_i*s_j*d_ea_data(j, i, e);
1304 });
1305 }
1306 else
1307 {
1308 // No reordering required
1309 mfem::forall(N, [=] MFEM_HOST_DEVICE (int idx)
1310 {
1311 const int e = idx / ndofs / ndofs;
1312 const int i = idx % ndofs;
1313 const int j = (idx / ndofs) % ndofs;
1314 d_element_matrices(i, j, e) = d_ea_data(j, i, e);
1315 });
1316 }
1317
1318 if (add_bdr && ea_data_bdr.Size() > 0)
1319 {
1320 const int ndof_face = faceDofs;
1321 const auto d_ea_bdr = Reshape(ea_data_bdr.Read(),
1322 ndof_face, ndof_face, nf_bdr);
1323
1324 // Get all the local face maps (mapping from lexicographic face index to
1325 // lexicographic volume index, depending on the local face index).
1326 const Mesh &mesh = *trial_fes->GetMesh();
1327 const int dim = mesh.Dimension();
1328 const int n_faces_per_el = 2*dim; // assuming tensor product
1329 Array<int> face_maps(ndof_face * n_faces_per_el);
1330 for (int lf_i = 0; lf_i < n_faces_per_el; ++lf_i)
1331 {
1332 Array<int> face_map(ndof_face);
1333 trial_fes->GetFE(0)->GetFaceMap(lf_i, face_map);
1334 for (int i = 0; i < ndof_face; ++i)
1335 {
1336 face_maps[i + lf_i*ndof_face] = face_map[i];
1337 }
1338 }
1339
1340 Array<int> face_info(nf_bdr * 2);
1341 {
1342 int fidx = 0;
1343 for (int f = 0; f < mesh.GetNumFaces(); ++f)
1344 {
1346 if (!finfo.IsBoundary()) { continue; }
1347 face_info[0 + fidx*2] = finfo.element[0].local_face_id;
1348 face_info[1 + fidx*2] = finfo.element[0].index;
1349 fidx++;
1350 }
1351 }
1352
1353 const auto d_face_maps = Reshape(face_maps.Read(), ndof_face, n_faces_per_el);
1354 const auto d_face_info = Reshape(face_info.Read(), 2, nf_bdr);
1355
1356 const bool reorder = (ordering == ElementDofOrdering::NATIVE);
1357
1358 mfem::forall_2D(nf_bdr, ndof_face, ndof_face, [=] MFEM_HOST_DEVICE (int f)
1359 {
1360 const int lf_i = d_face_info(0, f);
1361 const int e = d_face_info(1, f);
1362 // Loop over face indices in "native ordering"
1363 MFEM_FOREACH_THREAD(i_lex_face, x, ndof_face)
1364 {
1365 // Convert from lexicographic face DOF to volume DOF
1366 const int i_lex = d_face_maps(i_lex_face, lf_i);
1367
1368 const int ii_s = d_dof_map[i_lex];
1369 const int ii = (ii_s >= 0) ? ii_s : -1 - ii_s;
1370
1371 const int i = reorder ? ii : i_lex;
1372 const int s_i = (ii_s < 0 && reorder) ? -1 : 1;
1373
1374 MFEM_FOREACH_THREAD(j_lex_face, y, ndof_face)
1375 {
1376 // Convert from lexicographic face DOF to volume DOF
1377 const int j_lex = d_face_maps(j_lex_face, lf_i);
1378
1379 const int jj_s = d_dof_map[j_lex];
1380 const int jj = (jj_s >= 0) ? jj_s : -1 - jj_s;
1381
1382 const int j = reorder ? jj : j_lex;
1383 const int s_j = (jj_s < 0 && reorder) ? -1 : 1;
1384
1385 AtomicAdd(d_element_matrices(i, j, e),
1386 s_i*s_j*d_ea_bdr(i_lex_face, j_lex_face, f));
1387 }
1388 }
1389 });
1390 }
1391}
1392
1393// Data and methods for fully-assembled bilinear forms
1396 mat(a->mat)
1397{
1398#ifdef MFEM_USE_MPI
1399 ParFiniteElementSpace *pfes = nullptr;
1400 if ( a->GetFBFI()->Size()>0 &&
1401 (pfes = dynamic_cast<ParFiniteElementSpace*>(form->FESpace())) )
1402 {
1403 pfes->ExchangeFaceNbrData();
1404 }
1405#endif
1406}
1407
1409{
1411 FiniteElementSpace &fes = *a->FESpace();
1412 int width = fes.GetVSize();
1413 int height = fes.GetVSize();
1414 bool keep_nbr_block = false;
1415#ifdef MFEM_USE_MPI
1416 ParFiniteElementSpace *pfes = nullptr;
1417 if ( a->GetFBFI()->Size()>0 &&
1418 (pfes = dynamic_cast<ParFiniteElementSpace*>(&fes)) )
1419 {
1420 pfes->ExchangeFaceNbrData();
1421 width += pfes->GetFaceNbrVSize();
1422 dg_x.SetSize(width);
1423 ParBilinearForm *pb = nullptr;
1424 if ((pb = dynamic_cast<ParBilinearForm*>(a)) && (pb->keep_nbr_block))
1425 {
1426 height += pfes->GetFaceNbrVSize();
1427 dg_y.SetSize(height);
1428 keep_nbr_block = true;
1429 }
1430 }
1431#endif
1432 if (a->mat) // We reuse the sparse matrix memory
1433 {
1434 if (fes.IsDGSpace())
1435 {
1436 const L2ElementRestriction *restE =
1437 static_cast<const L2ElementRestriction*>(elem_restrict);
1438 const L2FaceRestriction *restF =
1439 static_cast<const L2FaceRestriction*>(int_face_restrict_lex);
1440 MFEM_VERIFY(
1441 fes.Conforming(),
1442 "Full Assembly not yet supported on NCMesh.");
1443 // 1. Fill J and Data
1444 // 1.1 Fill J and Data with Elem ea_data
1445 restE->FillJAndData(ea_data, *mat);
1446 // 1.2 Fill J and Data with Face ea_data_ext
1447 if (restF) { restF->FillJAndData(ea_data_ext, *mat, keep_nbr_block); }
1448 // 1.3 Shift indirections in I back to original
1449 auto I = mat->HostReadWriteI();
1450 for (int i = height; i > 0; i--)
1451 {
1452 I[i] = I[i-1];
1453 }
1454 I[0] = 0;
1455 }
1456 else
1457 {
1458 const ElementRestriction &rest =
1459 static_cast<const ElementRestriction&>(*elem_restrict);
1460 rest.FillJAndData(ea_data, *mat);
1461 }
1462 }
1463 else // We create, compute the sparsity, and fill the sparse matrix
1464 {
1465 mat = new SparseMatrix;
1466 mat->OverrideSize(height, width);
1467 if (fes.IsDGSpace())
1468 {
1469 const L2ElementRestriction *restE =
1470 static_cast<const L2ElementRestriction*>(elem_restrict);
1471 const L2FaceRestriction *restF =
1472 static_cast<const L2FaceRestriction*>(int_face_restrict_lex);
1473 // 1. Fill I
1474 mat->GetMemoryI().New(height+1, mat->GetMemoryI().GetMemoryType());
1475 // 1.1 Increment with restE
1476 restE->FillI(*mat);
1477 // 1.2 Increment with restF
1478 if (restF) { restF->FillI(*mat, keep_nbr_block); }
1479 // 1.3 Sum the non-zeros in I
1480 auto h_I = mat->HostReadWriteI();
1481 int cpt = 0;
1482 for (int i = 0; i < height; i++)
1483 {
1484 const int nnz = h_I[i];
1485 h_I[i] = cpt;
1486 cpt += nnz;
1487 }
1488 const int nnz = cpt;
1489 h_I[height] = nnz;
1490 mat->GetMemoryJ().New(nnz, mat->GetMemoryJ().GetMemoryType());
1491 mat->GetMemoryData().New(nnz, mat->GetMemoryData().GetMemoryType());
1492 // 2. Fill J and Data
1493 // 2.1 Fill J and Data with Elem ea_data
1494 restE->FillJAndData(ea_data, *mat);
1495 // 2.2 Fill J and Data with Face ea_data_ext
1496 if (restF) { restF->FillJAndData(ea_data_ext, *mat, keep_nbr_block); }
1497 // 2.3 Shift indirections in I back to original
1498 auto I = mat->HostReadWriteI();
1499 for (int i = height; i > 0; i--)
1500 {
1501 I[i] = I[i-1];
1502 }
1503 I[0] = 0;
1504 }
1505 else // continuous Galerkin case
1506 {
1507 const ElementRestriction &rest =
1508 static_cast<const ElementRestriction&>(*elem_restrict);
1509 rest.FillSparseMatrix(ea_data, *mat);
1510 }
1511 a->mat = mat;
1512 }
1513 if ( a->sort_sparse_matrix )
1514 {
1516 }
1517}
1518
1519
1521{
1522#ifdef MFEM_USE_MPI
1523 if ( auto pa = dynamic_cast<ParBilinearForm*>(a) )
1524 {
1525 pa->ParallelRAP(*pa->mat, A);
1526 }
1527 else
1528#endif
1529 {
1530 a->SerialRAP(A);
1531 }
1532}
1533
1535 OperatorHandle &A)
1536{
1537 MFEM_VERIFY(a->diag_policy == DiagonalPolicy::DIAG_ONE,
1538 "Only DiagonalPolicy::DIAG_ONE supported with"
1539 " FABilinearFormExtension.");
1540#ifdef MFEM_USE_MPI
1541 if ( dynamic_cast<ParBilinearForm*>(a) )
1542 {
1543 A.As<HypreParMatrix>()->EliminateBC(ess_dofs,
1545 }
1546 else
1547#endif
1548 {
1549 A.As<SparseMatrix>()->EliminateBC(ess_dofs,
1551 }
1552}
1553
1555 OperatorHandle &A)
1556{
1557 RAP(A);
1558 EliminateBC(ess_dofs, A);
1559}
1560
1562 Vector &x, Vector &b,
1563 OperatorHandle &A,
1564 Vector &X, Vector &B,
1565 int copy_interior)
1566{
1567 Operator *A_out;
1568 Operator::FormLinearSystem(ess_tdof_list, x, b, A_out, X, B, copy_interior);
1569 delete A_out;
1570 FormSystemMatrix(ess_tdof_list, A);
1571}
1572
1574{
1575#ifdef MFEM_USE_MPI
1576 const ParFiniteElementSpace *pfes;
1577 if ( (pfes = dynamic_cast<const ParFiniteElementSpace*>(test_fes)) )
1578 {
1579 // DG Prolongation
1580 ParGridFunction x_gf;
1581 x_gf.MakeRef(const_cast<ParFiniteElementSpace*>(pfes),
1582 const_cast<Vector&>(x),0);
1583 x_gf.ExchangeFaceNbrData();
1584 Vector &shared_x = x_gf.FaceNbrData();
1585 const int local_size = a->FESpace()->GetVSize();
1586 auto dg_x_ptr = dg_x.Write();
1587 auto x_ptr = x.Read();
1588 mfem::forall(local_size, [=] MFEM_HOST_DEVICE (int i)
1589 {
1590 dg_x_ptr[i] = x_ptr[i];
1591 });
1592 const int shared_size = shared_x.Size();
1593 auto shared_x_ptr = shared_x.Read();
1594 mfem::forall(shared_size, [=] MFEM_HOST_DEVICE (int i)
1595 {
1596 dg_x_ptr[local_size+i] = shared_x_ptr[i];
1597 });
1598 ParBilinearForm *pform = nullptr;
1599 if ((pform = dynamic_cast<ParBilinearForm*>(a)) && (pform->keep_nbr_block))
1600 {
1601 mat->Mult(dg_x, dg_y);
1602 // DG Restriction
1603 auto dg_y_ptr = dg_y.Read();
1604 auto y_ptr = y.ReadWrite();
1605 mfem::forall(local_size, [=] MFEM_HOST_DEVICE (int i)
1606 {
1607 y_ptr[i] += dg_y_ptr[i];
1608 });
1609 }
1610 else
1611 {
1612 mat->Mult(dg_x, y);
1613 }
1614 }
1615 else
1616#endif
1617 {
1618 mat->Mult(x, y);
1619 }
1620}
1621
1623{
1624 if ( a->GetFBFI()->Size()>0 )
1625 {
1626 DGMult(x, y);
1627 }
1628 else
1629 {
1630 mat->Mult(x, y);
1631 }
1632}
1633
1635{
1636#ifdef MFEM_USE_MPI
1637 const ParFiniteElementSpace *pfes;
1638 if ( (pfes = dynamic_cast<const ParFiniteElementSpace*>(test_fes)) )
1639 {
1640 // DG Prolongation
1641 ParGridFunction x_gf;
1642 x_gf.MakeRef(const_cast<ParFiniteElementSpace*>(pfes),
1643 const_cast<Vector&>(x),0);
1644 x_gf.ExchangeFaceNbrData();
1645 Vector &shared_x = x_gf.FaceNbrData();
1646 const int local_size = a->FESpace()->GetVSize();
1647 auto dg_x_ptr = dg_x.Write();
1648 auto x_ptr = x.Read();
1649 mfem::forall(local_size, [=] MFEM_HOST_DEVICE (int i)
1650 {
1651 dg_x_ptr[i] = x_ptr[i];
1652 });
1653 const int shared_size = shared_x.Size();
1654 auto shared_x_ptr = shared_x.Read();
1655 mfem::forall(shared_size, [=] MFEM_HOST_DEVICE (int i)
1656 {
1657 dg_x_ptr[local_size+i] = shared_x_ptr[i];
1658 });
1659 ParBilinearForm *pb = nullptr;
1660 if ((pb = dynamic_cast<ParBilinearForm*>(a)) && (pb->keep_nbr_block))
1661 {
1662 mat->MultTranspose(dg_x, dg_y);
1663 // DG Restriction
1664 auto dg_y_ptr = dg_y.Read();
1665 auto y_ptr = y.ReadWrite();
1666 mfem::forall(local_size, [=] MFEM_HOST_DEVICE (int i)
1667 {
1668 y_ptr[i] += dg_y_ptr[i];
1669 });
1670 }
1671 else
1672 {
1673 mat->MultTranspose(dg_x, y);
1674 }
1675 }
1676 else
1677#endif
1678 {
1679 mat->MultTranspose(x, y);
1680 }
1681}
1682
1684{
1685 if ( a->GetFBFI()->Size()>0 )
1686 {
1687 DGMultTranspose(x, y);
1688 }
1689 else
1690 {
1691 mat->MultTranspose(x, y);
1692 }
1693}
1694
1695
1697 : Operator(form->Height(), form->Width()), a(form)
1698{
1699 // empty
1700}
1701
1706
1708{
1709 return a->GetRestriction();
1710}
1711
1716
1721
1722// Data and methods for partially-assembled bilinear forms
1723
1725 MixedBilinearForm *form)
1727 trial_fes(form->TrialFESpace()),
1728 test_fes(form->TestFESpace()),
1729 elem_restrict_trial(NULL),
1730 elem_restrict_test(NULL)
1731{
1732 Update();
1733}
1734
1736{
1737 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
1738 const int integratorCount = integrators.Size();
1739 for (int i = 0; i < integratorCount; ++i)
1740 {
1741 integrators[i]->AssemblePA(*trial_fes, *test_fes);
1742 }
1743 MFEM_VERIFY(a->GetBBFI()->Size() == 0,
1744 "Partial assembly does not support AddBoundaryIntegrator yet.");
1745 MFEM_VERIFY(a->GetTFBFI()->Size() == 0,
1746 "Partial assembly does not support AddTraceFaceIntegrator yet.");
1747 MFEM_VERIFY(a->GetBTFBFI()->Size() == 0,
1748 "Partial assembly does not support AddBdrTraceFaceIntegrator yet.");
1749}
1750
1773
1775 const Array<int> &trial_tdof_list,
1776 const Array<int> &test_tdof_list,
1777 OperatorHandle &A)
1778{
1779 Operator * oper;
1780 Operator::FormRectangularSystemOperator(trial_tdof_list, test_tdof_list,
1781 oper);
1782 A.Reset(oper); // A will own oper
1783}
1784
1786 const Array<int> &trial_tdof_list,
1787 const Array<int> &test_tdof_list,
1788 Vector &x, Vector &b,
1789 OperatorHandle &A,
1790 Vector &X, Vector &B)
1791{
1792 Operator *oper;
1793 Operator::FormRectangularLinearSystem(trial_tdof_list, test_tdof_list, x, b,
1794 oper, X, B);
1795 A.Reset(oper); // A will own oper
1796}
1797
1799 const Operator *elem_restrict_x,
1800 const Vector &x,
1801 Vector &localX,
1802 const Operator *elem_restrict_y,
1803 Vector &y,
1804 Vector &localY,
1805 const real_t c) const
1806{
1807 // * G operation: localX = c*local(x)
1808 if (elem_restrict_x)
1809 {
1810 elem_restrict_x->Mult(x, localX);
1811 if (c != 1.0)
1812 {
1813 localX *= c;
1814 }
1815 }
1816 else
1817 {
1818 if (c == 1.0)
1819 {
1820 localX.SyncAliasMemory(x);
1821 }
1822 else
1823 {
1824 localX.Set(c, x);
1825 }
1826 }
1827 if (elem_restrict_y)
1828 {
1829 localY = 0.0;
1830 }
1831 else
1832 {
1833 y.UseDevice(true);
1834 localY.SyncAliasMemory(y);
1835 }
1836}
1837
1839{
1840 y = 0.0;
1841 AddMult(x, y);
1842}
1843
1845 const real_t c) const
1846{
1847 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
1848 const int iSz = integrators.Size();
1849
1850 // * G operation
1853
1854 // * B^TDB operation
1855 for (int i = 0; i < iSz; ++i)
1856 {
1857 integrators[i]->AddMultPA(localTrial, localTest);
1858 }
1859
1860 // * G^T operation
1862 {
1863 tempY.SetSize(y.Size());
1865 y += tempY;
1866 }
1867}
1868
1870 Vector &y) const
1871{
1872 y = 0.0;
1873 AddMultTranspose(x, y);
1874}
1875
1877 const real_t c) const
1878{
1879 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
1880 const int iSz = integrators.Size();
1881
1882 // * G operation
1885
1886 // * B^TD^TB operation
1887 for (int i = 0; i < iSz; ++i)
1888 {
1889 integrators[i]->AddMultTransposePA(localTest, localTrial);
1890 }
1891
1892 // * G^T operation
1894 {
1895 tempY.SetSize(y.Size());
1897 y += tempY;
1898 }
1899}
1900
1902 Vector &diag) const
1903{
1904 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
1905
1906 const int iSz = integrators.Size();
1907
1909 {
1910 const ElementRestriction* H1elem_restrict_trial =
1911 dynamic_cast<const ElementRestriction*>(elem_restrict_trial);
1912 if (H1elem_restrict_trial)
1913 {
1914 H1elem_restrict_trial->AbsMult(D, localTrial);
1915 }
1916 else
1917 {
1919 }
1920 }
1921
1923 {
1924 localTest = 0.0;
1925 for (int i = 0; i < iSz; ++i)
1926 {
1928 {
1929 integrators[i]->AssembleDiagonalPA_ADAt(localTrial, localTest);
1930 }
1931 else
1932 {
1933 integrators[i]->AssembleDiagonalPA_ADAt(D, localTest);
1934 }
1935 }
1936 const ElementRestriction* H1elem_restrict_test =
1937 dynamic_cast<const ElementRestriction*>(elem_restrict_test);
1938 if (H1elem_restrict_test)
1939 {
1940 H1elem_restrict_test->AbsMultTranspose(localTest, diag);
1941 }
1942 else
1943 {
1945 }
1946 }
1947 else
1948 {
1949 diag.UseDevice(true); // typically this is a large vector, so store on device
1950 diag = 0.0;
1951 for (int i = 0; i < iSz; ++i)
1952 {
1954 {
1955 integrators[i]->AssembleDiagonalPA_ADAt(localTrial, diag);
1956 }
1957 else
1958 {
1959 integrators[i]->AssembleDiagonalPA_ADAt(D, diag);
1960 }
1961 }
1962 }
1963}
1964
1970
1971const
1977
1979{
1980 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
1981 const int integratorCount = integrators.Size();
1982 for (int i = 0; i < integratorCount; ++i)
1983 {
1984 integrators[i]->AssemblePA(*trial_fes, *test_fes);
1985 }
1986
1987 test_multiplicity.UseDevice(true);
1988 test_multiplicity.SetSize(elem_restrict_test->Width()); // l-vector
1989 Vector ones(elem_restrict_test->Height()); // e-vector
1990 ones = 1.0;
1991
1992 const ElementRestriction* elem_restrict =
1993 dynamic_cast<const ElementRestriction*>(elem_restrict_test);
1994 if (elem_restrict)
1995 {
1996 elem_restrict->AbsMultTranspose(ones, test_multiplicity);
1997 }
1998 else
1999 {
2000 mfem_error("A real ElementRestriction is required in this setting!");
2001 }
2002
2003 auto tm = test_multiplicity.ReadWrite();
2004 mfem::forall(test_multiplicity.Size(), [=] MFEM_HOST_DEVICE (int i)
2005 {
2006 tm[i] = 1.0 / tm[i];
2007 });
2008}
2009
2011 const Vector &x, Vector &y, const real_t c) const
2012{
2013 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
2014 const int iSz = integrators.Size();
2015
2016 // * G operation
2019
2020 // * B^TDB operation
2021 for (int i = 0; i < iSz; ++i)
2022 {
2023 integrators[i]->AddMultPA(localTrial, localTest);
2024 }
2025
2026 // do a kind of "set" rather than "add" in the below
2027 // operation as compared to the BilinearForm case
2028 // * G^T operation (kind of...)
2029 const ElementRestriction* elem_restrict =
2030 dynamic_cast<const ElementRestriction*>(elem_restrict_test);
2031 if (elem_restrict)
2032 {
2033 tempY.SetSize(y.Size());
2034 elem_restrict->MultLeftInverse(localTest, tempY);
2035 y += tempY;
2036 }
2037 else
2038 {
2039 mfem_error("In this setting you need a real ElementRestriction!");
2040 }
2041}
2042
2044 const Vector &x, Vector &y, const real_t c) const
2045{
2046 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
2047 const int iSz = integrators.Size();
2048
2049 // do a kind of "set" rather than "add" in the below
2050 // operation as compared to the BilinearForm case
2051 // * G operation (kinda)
2052 Vector xscaled(x);
2053 MFEM_VERIFY(x.Size() == test_multiplicity.Size(), "Input vector of wrong size");
2054 auto xs = xscaled.ReadWrite();
2055 auto tm = test_multiplicity.Read();
2056 mfem::forall(x.Size(), [=] MFEM_HOST_DEVICE (int i)
2057 {
2058 xs[i] *= tm[i];
2059 });
2062
2063 // * B^TD^TB operation
2064 for (int i = 0; i < iSz; ++i)
2065 {
2066 integrators[i]->AddMultTransposePA(localTest, localTrial);
2067 }
2068
2069 // * G^T operation
2071 {
2072 tempY.SetSize(y.Size());
2074 y += tempY;
2075 }
2076 else
2077 {
2078 mfem_error("Trial ElementRestriction not defined");
2079 }
2080}
2081
2083 const Array<int>& ess1, const Array<int>& ess2, OperatorHandle &A)
2084{
2085 const Operator *Pi = this->GetProlongation();
2086 const Operator *RoT = this->GetOutputRestrictionTranspose();
2087 Operator *rap = SetupRAP(Pi, RoT);
2088
2090 = new RectangularConstrainedOperator(rap, ess1, ess2, rap != this);
2091
2092 A.Reset(Arco);
2093}
2094
2095} // namespace mfem
MFEM_HOST_DEVICE T AtomicAdd(T &add, const T val)
Definition backends.hpp:94
int Size() const
Return the logical size of the array.
Definition array.hpp:166
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
Definition array.hpp:381
Class extending the BilinearForm class to support different AssemblyLevels.
BilinearFormExtension(BilinearForm *form)
const Operator * GetRestriction() const override
Get the finite element space restriction matrix.
BilinearForm * a
Not owned.
const Operator * GetProlongation() const override
Get the finite element space prolongation matrix.
Abstract base class BilinearFormIntegrator.
virtual void AssembleDiagonalPA(Vector &diag)
Assemble diagonal and add it to Vector diag.
virtual void AddMultTransposePA(const Vector &x, Vector &y) const
Method for partially assembled transposed action.
void AddMultPA(const Vector &x, Vector &y) const override
Method for partially assembled action.
virtual void AddAbsMultPA(const Vector &x, Vector &y) const
virtual void AddMultPAFaceNormalDerivatives(const Vector &x, const Vector &dxdn, Vector &y, Vector &dydn) const
Method for partially assembled action.
virtual void AddAbsMultTransposePA(const Vector &x, Vector &y) const
A "square matrix" operator for the associated FE space and BLFIntegrators The sum of all the BLFInteg...
Array< BilinearFormIntegrator * > * GetFBFI()
Access all integrators added with AddInteriorFaceIntegrator().
Array< BilinearFormIntegrator * > * GetDBFI()
Access all the integrators added with AddDomainIntegrator().
DiagonalPolicy diag_policy
This data member allows one to specify what should be done to the diagonal matrix entries and corresp...
FiniteElementSpace * FESpace()
Return the FE space associated with the BilinearForm.
Array< BilinearFormIntegrator * > * GetBBFI()
Access all the integrators added with AddBoundaryIntegrator().
Array< Array< int > * > * GetBBFI_Marker()
Access all boundary markers added with AddBoundaryIntegrator(). If no marker was specified when the i...
SparseMatrix * mat
Sparse matrix to be associated with the form. Owned.
const Operator * GetRestriction() const override
Get the finite element space restriction operator.
const Operator * GetProlongation() const override
Get the finite element space prolongation operator.
void SerialRAP(OperatorHandle &A)
Compute serial RAP operator and store it in A as a SparseMatrix.
Array< BilinearFormIntegrator * > * GetBFBFI()
Access all integrators added with AddBdrFaceIntegrator().
Array< Array< int > * > * GetBFBFI_Marker()
Access all boundary markers added with AddBdrFaceIntegrator(). If no marker was specified when the in...
Array< Array< int > * > * GetDBFI_Marker()
Access all boundary markers added with AddDomainIntegrator(). If no marker was specified when the int...
Rank 3 tensor (array of matrices)
void SetSize(int i, int j, int k, MemoryType mt_=MemoryType::PRESERVE)
int TotalSize() const
real_t * Write(bool on_dev=true)
Shortcut for mfem::Write(GetMemory(), TotalSize(), on_dev).
static MemoryType GetMemoryType()
(DEPRECATED) Equivalent to GetDeviceMemoryType().
Definition device.hpp:281
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Definition device.hpp:262
static MemoryType GetDeviceMemoryType()
Get the current Device MemoryType. This is the MemoryType used by most MFEM classes when allocating m...
Definition device.hpp:277
Data and methods for element-assembled bilinear forms.
void GetElementMatrices(DenseTensor &element_matrices, ElementDofOrdering ordering, bool add_bdr)
Populates element_matrices with the element matrices.
void MultInternal(const Vector &x, Vector &y, const bool useTranspose, const bool useAbs=false) const
EABilinearFormExtension(BilinearForm *form)
void Assemble() override
Assemble at the level given for the BilinearFormExtension subclass.
Operator that converts FiniteElementSpace L-vectors to E-vectors.
void AbsMult(const Vector &x, Vector &y) const override
Compute Mult without applying signs based on DOF orientations.
void FillSparseMatrix(const Vector &mat_ea, SparseMatrix &mat) const
Fill a Sparse Matrix with Element Matrices.
void FillJAndData(const Vector &ea_data, SparseMatrix &mat) const
void MultLeftInverse(const Vector &x, Vector &y) const
void AbsMultTranspose(const Vector &x, Vector &y) const override
Compute MultTranspose without applying signs based on DOF orientations.
void MultTranspose(const Vector &x, Vector &y) const override
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
void DGMultTranspose(const Vector &x, Vector &y) const
void EliminateBC(const Array< int > &ess_dofs, OperatorHandle &A)
void DGMult(const Vector &x, Vector &y) const
void FormSystemMatrix(const Array< int > &ess_tdof_list, OperatorHandle &A) override
void Mult(const Vector &x, Vector &y) const override
Operator application: y=A(x).
void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B, int copy_interior=0) override
void Assemble() override
Assemble at the level given for the BilinearFormExtension subclass.
FABilinearFormExtension(BilinearForm *form)
virtual void AddMultTransposeInPlace(Vector &x, Vector &y) const
Add the face degrees of freedom x to the element degrees of freedom y. Perform the same computation a...
virtual void NormalDerivativeAddMultTranspose(const Vector &x, Vector &y) const
Add the face reference-normal derivative degrees of freedom in x to the element degrees of freedom in...
virtual void NormalDerivativeMult(const Vector &x, Vector &y) const
For each face, sets y to the partial derivative of x with respect to the reference coordinate whose d...
virtual void AddAbsMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const
Add the face degrees of freedom x to the element degrees of freedom y ignoring the signs from DOF ori...
void Mult(const Vector &x, Vector &y) const override=0
Extract the face degrees of freedom from x into y.
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition fespace.hpp:208
const NURBSExtension * GetNURBSext() const
Definition fespace.hpp:646
bool Conforming() const
Definition fespace.hpp:650
virtual const FiniteElement * GetFE(int i) const
Returns pointer to the FiniteElement in the FiniteElementCollection associated with i'th element in t...
Definition fespace.cpp:3824
const ElementRestrictionOperator * GetElementRestriction(ElementDofOrdering e_ordering) const
Return an Operator that converts L-vectors to E-vectors.
Definition fespace.cpp:1480
const FiniteElement * GetTypicalTraceElement() const
Return a "typical" trace element.
Definition fespace.cpp:3948
int GetNFbyType(FaceType type) const
Returns the number of faces according to the requested type.
Definition fespace.hpp:886
Mesh * GetMesh() const
Returns the mesh.
Definition fespace.hpp:644
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
Definition fespace.hpp:826
bool IsDGSpace() const
Return whether or not the space is discontinuous (L2)
Definition fespace.hpp:1503
const FiniteElement * GetTypicalFE() const
Return GetFE(0) if the local mesh is not empty; otherwise return a typical FE based on the Geometry t...
Definition fespace.cpp:3860
virtual const FaceRestriction * GetFaceRestriction(ElementDofOrdering f_ordering, FaceType, L2FaceValues mul=L2FaceValues::DoubleValued) const
Return an Operator that converts L-vectors to E-vectors on each face.
Definition fespace.cpp:1513
virtual void GetFaceMap(const int face_id, Array< int > &face_map) const
Return the mapping from lexicographic face DOFs to lexicographic element DOFs for the given local fac...
Definition fe_base.cpp:517
int GetDof() const
Returns the number of degrees of freedom in the finite element.
Definition fe_base.hpp:337
Wrapper for hypre's ParCSR matrix class.
Definition hypre.hpp:419
Operator that converts L2 FiniteElementSpace L-vectors to E-vectors.
void FillJAndData(const Vector &ea_data, SparseMatrix &mat) const
void FillI(SparseMatrix &mat) const
Operator that extracts Face degrees of freedom for L2 spaces.
virtual void FillI(SparseMatrix &mat, const bool keep_nbr_block=false) const
Fill the I array of SparseMatrix corresponding to the sparsity pattern given by this L2FaceRestrictio...
virtual void FillJAndData(const Vector &fea_data, SparseMatrix &mat, const bool keep_nbr_block=false) const
Fill the J and Data arrays of the SparseMatrix corresponding to the sparsity pattern given by this L2...
virtual void AddFaceMatricesToElementMatrices(const Vector &fea_data, Vector &ea_data) const
This methods adds the DG face matrices to the element matrices.
void Mult(const Vector &x, Vector &y) const override
Operator application: y=A(x).
const FiniteElementSpace * trial_fes
void AssembleDiagonal(Vector &diag) const override
Computes the diagonal entries into diag. Typically, this operation only makes sense for linear Operat...
const FaceRestriction * bdr_face_restrict_lex
void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B, int copy_interior=0) override
void MultTranspose(const Vector &x, Vector &y) const override
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
void Assemble() override
Assemble at the level given for the BilinearFormExtension subclass.
const FiniteElementSpace * test_fes
const FaceRestriction * int_face_restrict_lex
void FormSystemMatrix(const Array< int > &ess_tdof_list, OperatorHandle &A) override
MFBilinearFormExtension(BilinearForm *form)
MemoryType GetMemoryType() const
Return a MemoryType that is currently valid. If both the host and the device pointers are currently v...
void New(int size)
Allocate host memory for size entries with the current host memory type returned by MemoryManager::Ge...
Mesh data type.
Definition mesh.hpp:65
int GetNumFaces() const
Return the number of faces (3D), edges (2D) or vertices (1D).
Definition mesh.cpp:6846
const Array< int > & GetElementAttributes() const
Returns the attributes for all elements in this mesh. The i'th entry of the array is the attribute of...
Definition mesh.cpp:965
int GetNE() const
Returns number of elements.
Definition mesh.hpp:1377
int Dimension() const
Dimension of the reference space used within the elements.
Definition mesh.hpp:1306
FaceInformation GetFaceInformation(int f) const
Definition mesh.cpp:1293
const Array< int > & GetBdrFaceAttributes() const
Returns the attributes for all boundary elements in this mesh.
Definition mesh.cpp:925
Class extending the MixedBilinearForm class to support different AssemblyLevels.
MixedBilinearForm * a
Not owned.
const Operator * GetOutputProlongation() const override
Get the output finite element space restriction matrix.
const Operator * GetProlongation() const override
Get the finite element space prolongation matrix.
const Operator * GetOutputRestriction() const override
Get the output finite element space restriction matrix.
MixedBilinearFormExtension(MixedBilinearForm *form)
const Operator * GetRestriction() const override
Get the finite element space restriction matrix.
Array< BilinearFormIntegrator * > * GetTFBFI()
Access all integrators added with AddTraceFaceIntegrator().
const Operator * GetOutputProlongation() const override
Get the test finite element space prolongation matrix.
const Operator * GetOutputRestriction() const override
Get the test finite element space restriction matrix.
Array< BilinearFormIntegrator * > * GetBBFI()
Access all integrators added with AddBoundaryIntegrator().
FiniteElementSpace * TestFESpace()
Return the test FE space associated with the BilinearForm.
const Operator * GetRestriction() const override
Get the input finite element space restriction matrix.
const Operator * GetProlongation() const override
Get the input finite element space prolongation matrix.
FiniteElementSpace * TrialFESpace()
Return the trial FE space associated with the BilinearForm.
Array< BilinearFormIntegrator * > * GetDBFI()
Access all integrators added with AddDomainIntegrator().
Array< BilinearFormIntegrator * > * GetBTFBFI()
Access all integrators added with AddBdrTraceFaceIntegrator().
Pointer to an Operator of a specified type.
Definition handle.hpp:34
OpType * As() const
Return the Operator pointer statically cast to a specified OpType. Similar to the method Get().
Definition handle.hpp:104
void Reset(OpType *A, bool own_A=true)
Reset the OperatorHandle to the given OpType pointer, A.
Definition handle.hpp:145
Abstract operator.
Definition operator.hpp:25
void FormRectangularLinearSystem(const Array< int > &trial_tdof_list, const Array< int > &test_tdof_list, Vector &x, Vector &b, Operator *&A, Vector &X, Vector &B)
Form a column-constrained linear system using a matrix-free approach.
Definition operator.cpp:131
void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, Operator *&A, Vector &X, Vector &B, int copy_interior=0)
Form a constrained linear system using a matrix-free approach.
Definition operator.cpp:114
int width
Dimension of the input / number of columns in the matrix.
Definition operator.hpp:28
void FormSystemOperator(const Array< int > &ess_tdof_list, Operator *&A)
Return in A a parallel (on truedofs) version of this square operator.
Definition operator.cpp:227
virtual const Operator * GetOutputRestrictionTranspose() const
Transpose of GetOutputRestriction, directly available in this form to facilitate matrix-free RAP-type...
Definition operator.hpp:168
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition operator.hpp:66
int height
Dimension of the output / number of rows in the matrix.
Definition operator.hpp:27
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
@ DIAG_ONE
Set the diagonal value to one.
Definition operator.hpp:50
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
Definition operator.hpp:72
void FormRectangularSystemOperator(const Array< int > &trial_tdof_list, const Array< int > &test_tdof_list, Operator *&A)
Return in A a parallel (on truedofs) version of this rectangular operator (including constraints).
Definition operator.cpp:235
Operator * SetupRAP(const Operator *Pi, const Operator *Po)
Returns RAP Operator of this, using input/output Prolongation matrices Pi corresponds to "P",...
Definition operator.cpp:168
virtual void MultTranspose(const Vector &x, Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition operator.hpp:100
Data and methods for partially-assembled bilinear forms.
void FormSystemMatrix(const Array< int > &ess_tdof_list, OperatorHandle &A) override
void AssembleDiagonal(Vector &diag) const override
Computes the diagonal entries into diag. Typically, this operation only makes sense for linear Operat...
const FiniteElementSpace * trial_fes
const FaceRestriction * int_face_restrict_lex
const FaceRestriction * bdr_face_restrict_lex
void MultInternal(const Vector &x, Vector &y, const bool useAbs=false) const
void AddMultNormalDerivativesWithMarkers(const BilinearFormIntegrator &integ, const Vector &x, const Vector &dxdn, const Array< int > *markers, const Array< int > &attributes, Vector &y, Vector &dydn) const
Performs the same function as AddMultWithMarkers, but takes as input and output face normal derivativ...
void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B, int copy_interior=0) override
void MultTranspose(const Vector &x, Vector &y) const override
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
const FiniteElementSpace * test_fes
void Assemble() override
Assemble at the level given for the BilinearFormExtension subclass.
void AddMultWithMarkers(const BilinearFormIntegrator &integ, const Vector &x, const Array< int > *markers, const Array< int > &attributes, const bool transpose, Vector &y, const bool useAbs=false) const
Accumulate the action (or transpose) of the integrator on x into y, taking into account the (possibly...
const Array< int > * elem_attributes
Attributes of all mesh elements.
void SetupRestrictionOperators(const L2FaceValues m)
const Array< int > * bdr_face_attributes
void Assemble() override
Partial assembly of all internal integrators.
PADiscreteLinearOperatorExtension(DiscreteLinearOperator *linop)
void AddMult(const Vector &x, Vector &y, const real_t c=1.0) const override
Operator application: y+=A(x) (default) or y+=a*A(x).
void AddMultTranspose(const Vector &x, Vector &y, const real_t c=1.0) const override
Operator transpose application: y+=A^t(x) (default) or y+=a*A^t(x).
const Operator * GetOutputRestrictionTranspose() const override
Transpose of GetOutputRestriction, directly available in this form to facilitate matrix-free RAP-type...
void FormRectangularSystemOperator(const Array< int > &, const Array< int > &, OperatorHandle &A) override
Data and methods for partially-assembled mixed bilinear forms.
void FormRectangularSystemOperator(const Array< int > &trial_tdof_list, const Array< int > &test_tdof_list, OperatorHandle &A) override
Setup OperatorHandle A to contain constrained linear operator.
const FiniteElementSpace * test_fes
void Assemble() override
Partial assembly of all internal integrators.
PAMixedBilinearFormExtension(MixedBilinearForm *form)
void MultTranspose(const Vector &x, Vector &y) const override
y = A^T*x
const FiniteElementSpace * trial_fes
void AddMultTranspose(const Vector &x, Vector &y, const real_t c=1.0) const override
y += c*A^T*x
void AssembleDiagonal_ADAt(const Vector &D, Vector &diag) const override
Assemble the diagonal of ADA^T for a diagonal vector D.
void SetupMultInputs(const Operator *elem_restrict_x, const Vector &x, Vector &localX, const Operator *elem_restrict_y, Vector &y, Vector &localY, const real_t c) const
Helper function to set up inputs/outputs for Mult or MultTranspose.
void FormRectangularLinearSystem(const Array< int > &trial_tdof_list, const Array< int > &test_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B) override
void Update() override
Update internals for when a new MixedBilinearForm is given to this class.
void AddMult(const Vector &x, Vector &y, const real_t c=1.0) const override
y += c*A*x
void Mult(const Vector &x, Vector &y) const override
y = A*x
Class for parallel bilinear form.
Abstract parallel finite element space.
Definition pfespace.hpp:31
Class for parallel grid function.
Definition pgridfunc.hpp:50
void MakeRef(FiniteElementSpace *f, real_t *v) override
Make the ParGridFunction reference external data on a new FiniteElementSpace.
Rectangular Operator for imposing essential boundary conditions on the input space using only the act...
Data type sparse matrix.
Definition sparsemat.hpp:51
void MultTranspose(const Vector &x, Vector &y) const override
Multiply a vector with the transposed matrix. y = At * x.
Memory< int > & GetMemoryI()
void Mult(const Vector &x, Vector &y) const override
Matrix vector multiplication.
void OverrideSize(int height_, int width_)
Sets the height and width of the matrix.
void SortColumnIndices()
Sort the column indices corresponding to each row.
Memory< int > & GetMemoryJ()
Memory< real_t > & GetMemoryData()
const Array< int > & GetDofMap() const
Get an Array<int> that maps lexicographically ordered indices to the indices of the respective nodes/...
Definition fe_base.hpp:1276
Vector data type.
Definition vector.hpp:82
virtual const real_t * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:520
virtual real_t * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:536
void SyncAliasMemory(const Vector &v) const
Update the alias memory location of the vector to match v.
Definition vector.hpp:275
Vector & Set(const real_t a, const Vector &x)
(*this) = a * x
Definition vector.cpp:341
int Size() const
Returns the size of the vector.
Definition vector.hpp:234
void Abs()
(*this)(i) = abs((*this)(i))
Definition vector.cpp:392
virtual void UseDevice(bool use_dev) const
Enable execution of Vector operations using the mfem::Device.
Definition vector.hpp:145
void SetSize(int s)
Resize the vector to size s.
Definition vector.hpp:584
virtual real_t * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:528
int dim
Definition ex24.cpp:53
real_t b
Definition lissajous.cpp:42
real_t a
Definition lissajous.cpp:41
void mfem_error(const char *msg)
Definition error.cpp:154
void add(const Vector &v1, const Vector &v2, Vector &v)
Definition vector.cpp:414
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
Definition dtensor.hpp:138
bool DeviceCanUseCeed()
Function that determines if a CEED kernel should be used, based on the current mfem::Device configura...
Definition util.cpp:33
void forall_2D(int N, int X, int Y, lambda &&body)
Definition forall.hpp:925
float real_t
Definition config.hpp:46
ElementDofOrdering GetEVectorOrdering(const FiniteElementSpace &fes)
Return LEXICOGRAPHIC if mesh contains only one topology and the elements are tensor elements,...
Definition fespace.cpp:4586
ElementDofOrdering
Constants describing the possible orderings of the DOFs in one element.
Definition fespace.hpp:47
@ NATIVE
Native ordering as defined by the FiniteElement.
std::function< real_t(const Vector &)> f(real_t mass_coeff)
Definition lor_mms.hpp:30
void forall(int N, lambda &&body)
Definition forall.hpp:839
@ CEED_MASK
Bitwise-OR of all CEED backends.
Definition device.hpp:97
This structure is used as a human readable output format that deciphers the information contained in ...
Definition mesh.hpp:2081
bool IsBoundary() const
Return true if the face is a boundary face.
Definition mesh.hpp:2122
struct mfem::Mesh::FaceInformation::@15 element[2]