MFEM v4.8.0
Finite element discretization library
Loading...
Searching...
No Matches
bilinearform_ext.cpp
Go to the documentation of this file.
1// Copyright (c) 2010-2025, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11
12// Implementations of classes FABilinearFormExtension, EABilinearFormExtension,
13// PABilinearFormExtension and MFBilinearFormExtension.
14
15#include "../general/forall.hpp"
16#include "bilinearform.hpp"
17#include "pbilinearform.hpp"
18#include "pgridfunc.hpp"
19#include "fe/face_map_utils.hpp"
21
22namespace mfem
23{
24
26 : Operator(form->Size()), a(form)
27{
28 // empty
29}
30
35
40
41// Data and methods for partially-assembled bilinear forms
44 trial_fes(a->FESpace()),
45 test_fes(a->FESpace())
46{
47 elem_restrict = NULL;
50}
51
53{
54 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
55 const int integratorCount = integrators.Size();
56 for (int i = 0; i < integratorCount; ++i)
57 {
58 integrators[i]->AssembleMF(*a->FESpace());
59 }
60
61 MFEM_VERIFY(a->GetBBFI()->Size() == 0, "AddBoundaryIntegrator is not "
62 "currently supported in MFBilinearFormExtension");
63}
64
66{
67 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
68
69 const int iSz = integrators.Size();
71 {
72 localY = 0.0;
73 for (int i = 0; i < iSz; ++i)
74 {
75 integrators[i]->AssembleDiagonalMF(localY);
76 }
77 const ElementRestriction* H1elem_restrict =
78 dynamic_cast<const ElementRestriction*>(elem_restrict);
79 if (H1elem_restrict)
80 {
81 H1elem_restrict->MultTransposeUnsigned(localY, y);
82 }
83 else
84 {
86 }
87 }
88 else
89 {
90 y.UseDevice(true); // typically this is a large vector, so store on device
91 y = 0.0;
92 for (int i = 0; i < iSz; ++i)
93 {
94 integrators[i]->AssembleDiagonalMF(y);
95 }
96 }
97}
98
100{
101 FiniteElementSpace *fes = a->FESpace();
102 height = width = fes->GetVSize();
103 trial_fes = fes;
104 test_fes = fes;
105
106 elem_restrict = nullptr;
107 int_face_restrict_lex = nullptr;
108 bdr_face_restrict_lex = nullptr;
109}
110
113{
114 Operator *oper;
115 Operator::FormSystemOperator(ess_tdof_list, oper);
116 A.Reset(oper); // A will own oper
117}
118
120 Vector &x, Vector &b,
122 Vector &X, Vector &B,
123 int copy_interior)
124{
125 Operator *oper;
126 Operator::FormLinearSystem(ess_tdof_list, x, b, oper, X, B, copy_interior);
127 A.Reset(oper); // A will own oper
128}
129
131{
132 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
133
134 const int iSz = integrators.Size();
136 {
137 y.UseDevice(true); // typically this is a large vector, so store on device
138 y = 0.0;
139 for (int i = 0; i < iSz; ++i)
140 {
141 integrators[i]->AddMultMF(x, y);
142 }
143 }
144 else
145 {
147 localY = 0.0;
148 for (int i = 0; i < iSz; ++i)
149 {
150 integrators[i]->AddMultMF(localX, localY);
151 }
153 }
154
155 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
156 const int iFISz = intFaceIntegrators.Size();
157 if (int_face_restrict_lex && iFISz>0)
158 {
160 if (int_face_X.Size()>0)
161 {
162 int_face_Y = 0.0;
163 for (int i = 0; i < iFISz; ++i)
164 {
165 intFaceIntegrators[i]->AddMultMF(int_face_X, int_face_Y);
166 }
168 }
169 }
170
171 Array<BilinearFormIntegrator*> &bdrFaceIntegrators = *a->GetBFBFI();
172 const int bFISz = bdrFaceIntegrators.Size();
173 if (bdr_face_restrict_lex && bFISz>0)
174 {
176 if (bdr_face_X.Size()>0)
177 {
178 bdr_face_Y = 0.0;
179 for (int i = 0; i < bFISz; ++i)
180 {
181 bdrFaceIntegrators[i]->AddMultMF(bdr_face_X, bdr_face_Y);
182 }
184 }
185 }
186}
187
189{
190 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
191 const int iSz = integrators.Size();
192 if (elem_restrict)
193 {
195 localY = 0.0;
196 for (int i = 0; i < iSz; ++i)
197 {
198 integrators[i]->AddMultTransposeMF(localX, localY);
199 }
201 }
202 else
203 {
204 y.UseDevice(true);
205 y = 0.0;
206 for (int i = 0; i < iSz; ++i)
207 {
208 integrators[i]->AddMultTransposeMF(x, y);
209 }
210 }
211
212 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
213 const int iFISz = intFaceIntegrators.Size();
214 if (int_face_restrict_lex && iFISz>0)
215 {
217 if (int_face_X.Size()>0)
218 {
219 int_face_Y = 0.0;
220 for (int i = 0; i < iFISz; ++i)
221 {
222 intFaceIntegrators[i]->AddMultTransposeMF(int_face_X, int_face_Y);
223 }
225 }
226 }
227
228 Array<BilinearFormIntegrator*> &bdrFaceIntegrators = *a->GetBFBFI();
229 const int bFISz = bdrFaceIntegrators.Size();
230 if (bdr_face_restrict_lex && bFISz>0)
231 {
233 if (bdr_face_X.Size()>0)
234 {
235 bdr_face_Y = 0.0;
236 for (int i = 0; i < bFISz; ++i)
237 {
238 bdrFaceIntegrators[i]->AddMultTransposeMF(bdr_face_X, bdr_face_Y);
239 }
241 }
242 }
243}
244
245// Data and methods for partially-assembled bilinear forms
247 : BilinearFormExtension(form),
248 trial_fes(a->FESpace()),
249 test_fes(a->FESpace())
250{
251 elem_restrict = NULL;
254}
255
257{
258 if ( Device::Allows(Backend::CEED_MASK) ) { return; }
261 if (elem_restrict)
262 {
265 localY.UseDevice(true); // ensure 'localY = 0.0' is done on device
266
267 // Gather the attributes on the host from all the elements
268 const Mesh &mesh = *trial_fes->GetMesh();
270 for (int i = 0; i < mesh.GetNE(); ++i)
271 {
272 elem_attributes[i] = mesh.GetAttribute(i);
273 }
274 }
275
276 // Construct face restriction operators only if the bilinear form has
277 // interior or boundary face integrators
278 if (int_face_restrict_lex == NULL && a->GetFBFI()->Size() > 0)
279 {
285 int_face_Y.UseDevice(true); // ensure 'int_face_Y = 0.0' is done on device
286
287 bool needs_normal_derivs = false;
288 auto &integs = *a->GetFBFI();
289 for (int i = 0; i < integs.Size(); ++i)
290 {
291 if (integs[i]->RequiresFaceNormalDerivatives())
292 {
293 needs_normal_derivs = true;
294 break;
295 }
296 }
297 if (needs_normal_derivs)
298 {
301 }
302 }
303
304 const bool has_bdr_integs = (a->GetBFBFI()->Size() > 0 ||
305 a->GetBBFI()->Size() > 0);
306 if (bdr_face_restrict_lex == NULL && has_bdr_integs)
307 {
311 m);
314 bdr_face_Y.UseDevice(true); // ensure 'faceBoundY = 0.0' is done on device
315
316 bool needs_normal_derivs = false;
317 auto &integs = *a->GetBFBFI();
318 for (int i = 0; i < integs.Size(); ++i)
319 {
320 if (integs[i]->RequiresFaceNormalDerivatives())
321 {
322 needs_normal_derivs = true;
323 break;
324 }
325 }
326 if (needs_normal_derivs)
327 {
330 }
331
332 const Mesh &mesh = *trial_fes->GetMesh();
333 // See LinearFormExtension::Update for explanation of f_to_be logic.
334 std::unordered_map<int,int> f_to_be;
335 for (int i = 0; i < mesh.GetNBE(); ++i)
336 {
337 const int f = mesh.GetBdrElementFaceIndex(i);
338 f_to_be[f] = i;
339 }
340 const int nf_bdr = trial_fes->GetNFbyType(FaceType::Boundary);
341 bdr_attributes.SetSize(nf_bdr);
342 int f_ind = 0;
343 int missing_bdr_elems = 0;
344 for (int f = 0; f < mesh.GetNumFaces(); ++f)
345 {
347 {
348 continue;
349 }
350 int attribute = 1; // default value
351 if (f_to_be.find(f) != f_to_be.end())
352 {
353 const int be = f_to_be[f];
354 attribute = mesh.GetBdrAttribute(be);
355 }
356 else
357 {
358 // If a boundary face does not correspond to the a boundary element,
359 // we assign it the default attribute of 1. We also generate a
360 // warning at runtime with the number of such missing elements.
361 ++missing_bdr_elems;
362 }
363 bdr_attributes[f_ind] = attribute;
364 ++f_ind;
365 }
366 if (missing_bdr_elems)
367 {
368 MFEM_WARNING("Missing " << missing_bdr_elems << " boundary elements "
369 "for boundary faces.");
370 }
371 }
372}
373
375{
377
378 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
379 for (BilinearFormIntegrator *integ : integrators)
380 {
381 if (integ->Patchwise())
382 {
383 MFEM_VERIFY(a->FESpace()->GetNURBSext(),
384 "Patchwise integration requires a NURBS FE space");
385 integ->AssembleNURBSPA(*a->FESpace());
386 }
387 else
388 {
389 integ->AssemblePA(*a->FESpace());
390 }
391 }
392
393 Array<BilinearFormIntegrator*> &bdr_integrators = *a->GetBBFI();
394 for (BilinearFormIntegrator *integ : bdr_integrators)
395 {
396 integ->AssemblePABoundary(*a->FESpace());
397 }
398
399 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
400 for (BilinearFormIntegrator *integ : intFaceIntegrators)
401 {
402 integ->AssemblePAInteriorFaces(*a->FESpace());
403 }
404
405 Array<BilinearFormIntegrator*> &bdrFaceIntegrators = *a->GetBFBFI();
406 for (BilinearFormIntegrator *integ : bdrFaceIntegrators)
407 {
408 integ->AssemblePABoundaryFaces(*a->FESpace());
409 }
410}
411
413{
414 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
415
416 auto assemble_diagonal_with_markers = [&](BilinearFormIntegrator &integ,
417 const Array<int> *markers,
418 const Array<int> &attributes,
419 Vector &d)
420 {
421 integ.AssembleDiagonalPA(d);
422 if (markers)
423 {
424 const int ne = attributes.Size();
425 const int nd = d.Size() / ne;
426 const auto d_attr = Reshape(attributes.Read(), ne);
427 const auto d_m = Reshape(markers->Read(), markers->Size());
428 auto d_d = Reshape(d.ReadWrite(), nd, ne);
429 mfem::forall(ne, [=] MFEM_HOST_DEVICE (int e)
430 {
431 const int attr = d_attr[e];
432 if (d_m[attr - 1] == 0)
433 {
434 for (int i = 0; i < nd; ++i)
435 {
436 d_d(i, e) = 0.0;
437 }
438 }
439 });
440 }
441 };
442
443 const int iSz = integrators.Size();
445 {
446 if (iSz > 0)
447 {
448 localY = 0.0;
449 Array<Array<int>*> &elem_markers = *a->GetDBFI_Marker();
450 for (int i = 0; i < iSz; ++i)
451 {
452 assemble_diagonal_with_markers(*integrators[i], elem_markers[i],
454 }
455 const ElementRestriction* H1elem_restrict =
456 dynamic_cast<const ElementRestriction*>(elem_restrict);
457 if (H1elem_restrict)
458 {
459 H1elem_restrict->MultTransposeUnsigned(localY, y);
460 }
461 else
462 {
464 }
465 }
466 else
467 {
468 y = 0.0;
469 }
470 }
471 else
472 {
473 Array<Array<int>*> &elem_markers = *a->GetDBFI_Marker();
474 y.UseDevice(true); // typically this is a large vector, so store on device
475 y = 0.0;
476 for (int i = 0; i < iSz; ++i)
477 {
478 assemble_diagonal_with_markers(*integrators[i], elem_markers[i],
479 elem_attributes, y);
480 }
481 }
482
483 Array<BilinearFormIntegrator*> &bdr_integs = *a->GetBBFI();
484 const int n_bdr_integs = bdr_integs.Size();
485 if (bdr_face_restrict_lex && n_bdr_integs > 0)
486 {
487 Array<Array<int>*> &bdr_markers = *a->GetBBFI_Marker();
488 bdr_face_Y = 0.0;
489 for (int i = 0; i < n_bdr_integs; ++i)
490 {
491 assemble_diagonal_with_markers(*bdr_integs[i], bdr_markers[i],
493 }
495 }
496}
497
499{
500 FiniteElementSpace *fes = a->FESpace();
501 height = width = fes->GetVSize();
502 trial_fes = fes;
503 test_fes = fes;
504
505 elem_restrict = nullptr;
506 int_face_restrict_lex = nullptr;
507 bdr_face_restrict_lex = nullptr;
508}
509
512{
513 Operator *oper;
514 Operator::FormSystemOperator(ess_tdof_list, oper);
515 A.Reset(oper); // A will own oper
516}
517
519 Vector &x, Vector &b,
521 Vector &X, Vector &B,
522 int copy_interior)
523{
524 Operator *oper;
525 Operator::FormLinearSystem(ess_tdof_list, x, b, oper, X, B, copy_interior);
526 A.Reset(oper); // A will own oper
527}
528
530{
531 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
532
533 const int iSz = integrators.Size();
534
535 bool allPatchwise = true;
536 bool somePatchwise = false;
537
538 for (int i = 0; i < iSz; ++i)
539 {
540 if (integrators[i]->Patchwise())
541 {
542 somePatchwise = true;
543 }
544 else
545 {
546 allPatchwise = false;
547 }
548 }
549
550 MFEM_VERIFY(!(somePatchwise && !allPatchwise),
551 "All or none of the integrators should be patchwise");
552
553 if (DeviceCanUseCeed() || !elem_restrict || allPatchwise)
554 {
555 y.UseDevice(true); // typically this is a large vector, so store on device
556 y = 0.0;
557 for (int i = 0; i < iSz; ++i)
558 {
559 if (integrators[i]->Patchwise())
560 {
561 integrators[i]->AddMultNURBSPA(x, y);
562 }
563 else
564 {
565 integrators[i]->AddMultPA(x, y);
566 }
567 }
568 }
569 else
570 {
571 if (iSz)
572 {
573 Array<Array<int>*> &elem_markers = *a->GetDBFI_Marker();
575 localY = 0.0;
576 for (int i = 0; i < iSz; ++i)
577 {
578 AddMultWithMarkers(*integrators[i], localX, elem_markers[i],
579 elem_attributes, false, localY);
580 }
582 }
583 else
584 {
585 y = 0.0;
586 }
587 }
588
589 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
590 const int iFISz = intFaceIntegrators.Size();
591 if (int_face_restrict_lex && iFISz>0)
592 {
593 // When assembling interior face integrators for DG spaces, we need to
594 // exchange the face-neighbor information. This happens inside member
595 // functions of the 'int_face_restrict_lex'. To avoid repeated calls to
596 // ParGridFunction::ExchangeFaceNbrData, if we have a parallel space
597 // with interior face integrators, we create a ParGridFunction that
598 // will be used to cache the face-neighbor data. x_dg should be passed
599 // to any restriction operator that may need to use face-neighbor data.
600 const Vector *x_dg = &x;
601#ifdef MFEM_USE_MPI
602 ParGridFunction x_pgf;
603 if (auto *pfes = dynamic_cast<ParFiniteElementSpace*>(a->FESpace()))
604 {
605 x_pgf.MakeRef(pfes, const_cast<Vector&>(x), 0);
606 x_dg = &x_pgf;
607 }
608#endif
609
611 if (int_face_dXdn.Size() > 0)
612 {
614 }
615 if (int_face_X.Size() > 0)
616 {
617 int_face_Y = 0.0;
618
619 // if normal derivatives are needed by at least one integrator...
620 if (int_face_dYdn.Size() > 0)
621 {
622 int_face_dYdn = 0.0;
623 }
624
625 for (int i = 0; i < iFISz; ++i)
626 {
627 if (intFaceIntegrators[i]->RequiresFaceNormalDerivatives())
628 {
629 intFaceIntegrators[i]->AddMultPAFaceNormalDerivatives(
632 }
633 else
634 {
635 intFaceIntegrators[i]->AddMultPA(int_face_X, int_face_Y);
636 }
637 }
639 if (int_face_dYdn.Size() > 0)
640 {
642 int_face_dYdn, y);
643 }
644 }
645 }
646
647 Array<BilinearFormIntegrator*> &bdr_integs = *a->GetBBFI();
648 Array<BilinearFormIntegrator*> &bdr_face_integs = *a->GetBFBFI();
649 const int n_bdr_integs = bdr_integs.Size();
650 const int n_bdr_face_integs = bdr_face_integs.Size();
651 const bool has_bdr_integs = (n_bdr_face_integs > 0 || n_bdr_integs > 0);
652 if (bdr_face_restrict_lex && has_bdr_integs)
653 {
654 Array<Array<int>*> &bdr_markers = *a->GetBBFI_Marker();
655 Array<Array<int>*> &bdr_face_markers = *a->GetBFBFI_Marker();
657 if (bdr_face_dXdn.Size() > 0)
658 {
660 }
661 if (bdr_face_X.Size() > 0)
662 {
663 bdr_face_Y = 0.0;
664
665 // if normal derivatives are needed by at least one integrator...
666 if (bdr_face_dYdn.Size() > 0)
667 {
668 bdr_face_dYdn = 0.0;
669 }
670 for (int i = 0; i < n_bdr_integs; ++i)
671 {
672 AddMultWithMarkers(*bdr_integs[i], bdr_face_X, bdr_markers[i], bdr_attributes,
673 false, bdr_face_Y);
674 }
675 for (int i = 0; i < n_bdr_face_integs; ++i)
676 {
677 if (bdr_face_integs[i]->RequiresFaceNormalDerivatives())
678 {
680 *bdr_face_integs[i], bdr_face_X, bdr_face_dXdn,
681 bdr_face_markers[i], bdr_attributes, bdr_face_Y, bdr_face_dYdn);
682 }
683 else
684 {
685 AddMultWithMarkers(*bdr_face_integs[i], bdr_face_X, bdr_face_markers[i],
686 bdr_attributes, false, bdr_face_Y);
687 }
688 }
690 if (bdr_face_dYdn.Size() > 0)
691 {
693 }
694 }
695 }
696}
697
699{
700 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
701 const int iSz = integrators.Size();
702 if (elem_restrict)
703 {
704 Array<Array<int>*> &elem_markers = *a->GetDBFI_Marker();
706 localY = 0.0;
707 for (int i = 0; i < iSz; ++i)
708 {
709 AddMultWithMarkers(*integrators[i], localX, elem_markers[i], elem_attributes,
710 true, localY);
711 }
713 }
714 else
715 {
716 y.UseDevice(true);
717 y = 0.0;
718 for (int i = 0; i < iSz; ++i)
719 {
720 integrators[i]->AddMultTransposePA(x, y);
721 }
722 }
723
724 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
725 const int iFISz = intFaceIntegrators.Size();
726 if (int_face_restrict_lex && iFISz>0)
727 {
729 if (int_face_X.Size()>0)
730 {
731 int_face_Y = 0.0;
732 for (int i = 0; i < iFISz; ++i)
733 {
734 intFaceIntegrators[i]->AddMultTransposePA(int_face_X, int_face_Y);
735 }
737 }
738 }
739
740 Array<BilinearFormIntegrator*> &bdr_integs = *a->GetBBFI();
741 Array<BilinearFormIntegrator*> &bdr_face_integs = *a->GetBFBFI();
742 const int n_bdr_integs = bdr_integs.Size();
743 const int n_bdr_face_integs = bdr_face_integs.Size();
744 const bool has_bdr_integs = (n_bdr_face_integs > 0 || n_bdr_integs > 0);
745 if (bdr_face_restrict_lex && has_bdr_integs)
746 {
747 Array<Array<int>*> &bdr_markers = *a->GetBBFI_Marker();
748 Array<Array<int>*> &bdr_face_markers = *a->GetBFBFI_Marker();
749
751 if (bdr_face_X.Size() > 0)
752 {
753 bdr_face_Y = 0.0;
754 for (int i = 0; i < n_bdr_integs; ++i)
755 {
756 AddMultWithMarkers(*bdr_integs[i], bdr_face_X, bdr_markers[i], bdr_attributes,
757 true, bdr_face_Y);
758 }
759 for (int i = 0; i < n_bdr_face_integs; ++i)
760 {
761 AddMultWithMarkers(*bdr_face_integs[i], bdr_face_X, bdr_face_markers[i],
763 }
765 }
766 }
767}
768
769// Compute kernels for PABilinearFormExtension::AddMultWithMarkers.
770// Cannot be in member function with non-public visibility.
771static void AddWithMarkers_(
772 const int ne,
773 const int nd,
774 const Vector &x,
775 const Array<int> &markers,
776 const Array<int> &attributes,
777 Vector &y)
778{
779 const auto d_x = Reshape(x.Read(), nd, ne);
780 const auto d_m = Reshape(markers.Read(), markers.Size());
781 const auto d_attr = Reshape(attributes.Read(), ne);
782 auto d_y = Reshape(y.ReadWrite(), nd, ne);
783 mfem::forall(ne, [=] MFEM_HOST_DEVICE (int e)
784 {
785 const int attr = d_attr[e];
786 if (d_m[attr - 1] == 0) { return; }
787 for (int i = 0; i < nd; ++i)
788 {
789 d_y(i, e) += d_x(i, e);
790 }
791 });
792}
793
795 const BilinearFormIntegrator &integ,
796 const Vector &x,
797 const Vector &dxdn,
798 const Array<int> *markers,
799 const Array<int> &attributes,
800 Vector &y,
801 Vector &dydn) const
802{
803 if (markers)
804 {
805 tmp_evec.SetSize(y.Size() + dydn.Size());
806 tmp_evec = 0.0;
807 Vector tmp_y(tmp_evec, 0, y.Size());
808 Vector tmp_dydn(tmp_evec, y.Size(), dydn.Size());
809
810 integ.AddMultPAFaceNormalDerivatives(x, dxdn, tmp_y, tmp_dydn);
811
812 const int ne = attributes.Size();
813 const int nd_1 = x.Size() / ne;
814 const int nd_2 = dxdn.Size() / ne;
815
816 AddWithMarkers_(ne, nd_1, tmp_y, *markers, attributes, y);
817 AddWithMarkers_(ne, nd_2, tmp_dydn, *markers, attributes, dydn);
818 }
819 else
820 {
821 integ.AddMultPAFaceNormalDerivatives(x, dxdn, y, dydn);
822 }
823}
824
826 const BilinearFormIntegrator &integ,
827 const Vector &x,
828 const Array<int> *markers,
829 const Array<int> &attributes,
830 const bool transpose,
831 Vector &y) const
832{
833 if (markers)
834 {
835 tmp_evec.SetSize(y.Size());
836 tmp_evec = 0.0;
837 if (transpose) { integ.AddMultTransposePA(x, tmp_evec); }
838 else { integ.AddMultPA(x, tmp_evec); }
839 const int ne = attributes.Size();
840 const int nd = x.Size() / ne;
841 AddWithMarkers_(ne, nd, tmp_evec, *markers, attributes, y);
842 }
843 else
844 {
845 if (transpose) { integ.AddMultTransposePA(x, y); }
846 else { integ.AddMultPA(x, y); }
847 }
848}
849
850// Data and methods for element-assembled bilinear forms
853 factorize_face_terms(false)
854{
855 if ( form->FESpace()->IsDGSpace() )
856 {
858 }
859}
860
862{
864
865 ne = trial_fes->GetMesh()->GetNE();
867
868 Vector ea_data_tmp;
869
870 auto add_with_markers = [&](const Vector &ea_1, Vector &ea_2, const int ne_,
871 const Array<int> &markers, const Array<int> &attrs,
872 const bool add)
873 {
874 if (ne_ == 0) { return; }
875 const int sz = ea_1.Size() / ne_;
876 const int *d_m = markers.Read();
877 const int *d_a = attrs.Read();
878 const auto d_ea_1 = Reshape(ea_1.Read(), sz, ne_);
879 auto d_ea_2 = Reshape(add ? ea_2.ReadWrite() : ea_2.Write(), sz, ne_);
880
881 mfem::forall(sz*ne_, [=] MFEM_HOST_DEVICE (int idx)
882 {
883 const int i = idx % sz;
884 const int e = idx / sz;
885 const real_t val = d_m[d_a[e] - 1] ? d_ea_1(i, e) : 0.0;
886 if (add)
887 {
888 d_ea_2(i, e) += val;
889 }
890 else
891 {
892 d_ea_2(i, e) = val;
893 }
894 });
895 };
896
897 {
899 ea_data.UseDevice(true);
900 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
901 Array<Array<int>*> &markers_array = *a->GetDBFI_Marker();
902
903 if (integrators.Size() == 0) { ea_data = 0.0; }
904
905 for (int i = 0; i < integrators.Size(); ++i)
906 {
907 const bool add = (i > 0);
908 const Array<int> *markers = markers_array[i];
909 if (markers == nullptr)
910 {
911 integrators[i]->AssembleEA(*a->FESpace(), ea_data, add);
912 }
913 else
914 {
915 ea_data_tmp.SetSize(ea_data.Size());
916 integrators[i]->AssembleEA(*a->FESpace(), ea_data_tmp, false);
917 add_with_markers(ea_data_tmp, ea_data, ne, *markers,
919 }
920 }
921 }
922
924
925 {
926 Array<BilinearFormIntegrator*> &bdr_integs = *a->GetBBFI();
927 Array<Array<int>*> &markers_array = *a->GetBBFI_Marker();
928 const int n_bdr_integs = bdr_integs.Size();
929 if (n_bdr_integs > 0)
930 {
933 }
934 for (int i = 0; i < n_bdr_integs; ++i)
935 {
936 const bool add = (i > 0);
937 const Array<int> *markers = markers_array[i];
938 if (markers == nullptr)
939 {
940 bdr_integs[i]->AssembleEABoundary(*a->FESpace(), ea_data_bdr, add);
941 }
942 else
943 {
944 ea_data_tmp.SetSize(ea_data_bdr.Size());
945 bdr_integs[i]->AssembleEABoundary(*a->FESpace(), ea_data_tmp, add);
946 add_with_markers(ea_data_tmp, ea_data_bdr, nf_bdr, *markers,
948 }
949 }
950 }
951
952 {
953 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
954 const int intFaceIntegratorCount = intFaceIntegrators.Size();
955 if (intFaceIntegratorCount>0)
956 {
960 }
961 for (int i = 0; i < intFaceIntegratorCount; ++i)
962 {
963 const bool add = (i > 0);
964 intFaceIntegrators[i]->AssembleEAInteriorFaces(*a->FESpace(),
967 add);
968 }
969 }
970
971 {
972 Array<BilinearFormIntegrator*> &bdr_face_integs = *a->GetBFBFI();
973 Array<Array<int>*> &markers_array = *a->GetBFBFI_Marker();
974 const int n_bdr_face_integs = bdr_face_integs.Size();
975 if (n_bdr_face_integs > 0)
976 {
979 }
980 for (int i = 0; i < n_bdr_face_integs; ++i)
981 {
982 const bool add = (i > 0);
983 const Array<int> *markers = markers_array[i];
984 if (markers == nullptr)
985 {
986 bdr_face_integs[i]->AssembleEABoundaryFaces(
987 *a->FESpace(), ea_data_bdr, add);
988 }
989 else
990 {
991 ea_data_tmp.SetSize(ea_data_bdr.Size());
992 bdr_face_integs[i]->AssembleEABoundaryFaces(*a->FESpace(),
993 ea_data_tmp,
994 add);
995 add_with_markers(ea_data_tmp, ea_data_bdr, nf_bdr, *markers,
997 }
998 }
999 }
1000
1002 {
1003 auto restFint = dynamic_cast<const L2FaceRestriction*>(int_face_restrict_lex);
1005 }
1007 {
1008 auto restFbdr = dynamic_cast<const L2FaceRestriction*>(bdr_face_restrict_lex);
1010 }
1011}
1012
1014{
1015 // Apply the Element Restriction
1016 const bool useRestrict = !DeviceCanUseCeed() && elem_restrict;
1017 if (!useRestrict)
1018 {
1019 y.UseDevice(true); // typically this is a large vector, so store on device
1020 y = 0.0;
1021 }
1022 else
1023 {
1025 localY = 0.0;
1026 }
1027 // Apply the Element Matrices
1028 {
1029 const int NDOFS = elemDofs;
1030 auto X = Reshape(useRestrict?localX.Read():x.Read(), NDOFS, ne);
1031 auto Y = Reshape(useRestrict?localY.ReadWrite():y.ReadWrite(), NDOFS, ne);
1032 auto A = Reshape(ea_data.Read(), NDOFS, NDOFS, ne);
1033 mfem::forall(ne*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1034 {
1035 const int e = glob_j/NDOFS;
1036 const int j = glob_j%NDOFS;
1037 real_t res = 0.0;
1038 for (int i = 0; i < NDOFS; i++)
1039 {
1040 res += A(i, j, e)*X(i, e);
1041 }
1042 Y(j, e) += res;
1043 });
1044 // Apply the Element Restriction transposed
1045 if (useRestrict)
1046 {
1048 }
1049 }
1050
1051 // Treatment of interior faces
1052 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
1053 const int iFISz = intFaceIntegrators.Size();
1054 if (int_face_restrict_lex && iFISz>0)
1055 {
1056 // Apply the Interior Face Restriction
1058 if (int_face_X.Size()>0)
1059 {
1060 int_face_Y = 0.0;
1061 // Apply the interior face matrices
1062 const int NDOFS = faceDofs;
1063 auto X = Reshape(int_face_X.Read(), NDOFS, 2, nf_int);
1064 auto Y = Reshape(int_face_Y.ReadWrite(), NDOFS, 2, nf_int);
1066 {
1067 auto A_int = Reshape(ea_data_int.Read(), NDOFS, NDOFS, 2, nf_int);
1068 mfem::forall(nf_int*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1069 {
1070 const int f = glob_j/NDOFS;
1071 const int j = glob_j%NDOFS;
1072 real_t res = 0.0;
1073 for (int i = 0; i < NDOFS; i++)
1074 {
1075 res += A_int(i, j, 0, f)*X(i, 0, f);
1076 }
1077 Y(j, 0, f) += res;
1078 res = 0.0;
1079 for (int i = 0; i < NDOFS; i++)
1080 {
1081 res += A_int(i, j, 1, f)*X(i, 1, f);
1082 }
1083 Y(j, 1, f) += res;
1084 });
1085 }
1086 auto A_ext = Reshape(ea_data_ext.Read(), NDOFS, NDOFS, 2, nf_int);
1087 mfem::forall(nf_int*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1088 {
1089 const int f = glob_j/NDOFS;
1090 const int j = glob_j%NDOFS;
1091 real_t res = 0.0;
1092 for (int i = 0; i < NDOFS; i++)
1093 {
1094 res += A_ext(i, j, 0, f)*X(i, 0, f);
1095 }
1096 Y(j, 1, f) += res;
1097 res = 0.0;
1098 for (int i = 0; i < NDOFS; i++)
1099 {
1100 res += A_ext(i, j, 1, f)*X(i, 1, f);
1101 }
1102 Y(j, 0, f) += res;
1103 });
1104 // Apply the Interior Face Restriction transposed
1106 }
1107 }
1108
1109 // Treatment of boundary faces
1111 {
1112 // Apply the Boundary Face Restriction
1114 bdr_face_Y = 0.0;
1115 // Apply the boundary face matrices
1116 const int NDOFS = faceDofs;
1117 auto X = Reshape(bdr_face_X.Read(), NDOFS, nf_bdr);
1118 auto Y = Reshape(bdr_face_Y.ReadWrite(), NDOFS, nf_bdr);
1119 auto A = Reshape(ea_data_bdr.Read(), NDOFS, NDOFS, nf_bdr);
1120 mfem::forall(nf_bdr*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1121 {
1122 const int f = glob_j/NDOFS;
1123 const int j = glob_j%NDOFS;
1124 real_t res = 0.0;
1125 for (int i = 0; i < NDOFS; i++)
1126 {
1127 res += A(i, j, f)*X(i, f);
1128 }
1129 Y(j, f) += res;
1130 });
1131 // Apply the Boundary Face Restriction transposed
1133 }
1134}
1135
1137{
1138 // Apply the Element Restriction
1139 const bool useRestrict = !DeviceCanUseCeed() && elem_restrict;
1140 if (!useRestrict)
1141 {
1142 y.UseDevice(true); // typically this is a large vector, so store on device
1143 y = 0.0;
1144 }
1145 else
1146 {
1148 localY = 0.0;
1149 }
1150 // Apply the Element Matrices transposed
1151 {
1152 const int NDOFS = elemDofs;
1153 auto X = Reshape(useRestrict?localX.Read():x.Read(), NDOFS, ne);
1154 auto Y = Reshape(useRestrict?localY.ReadWrite():y.ReadWrite(), NDOFS, ne);
1155 auto A = Reshape(ea_data.Read(), NDOFS, NDOFS, ne);
1156 mfem::forall(ne*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1157 {
1158 const int e = glob_j/NDOFS;
1159 const int j = glob_j%NDOFS;
1160 real_t res = 0.0;
1161 for (int i = 0; i < NDOFS; i++)
1162 {
1163 res += A(j, i, e)*X(i, e);
1164 }
1165 Y(j, e) += res;
1166 });
1167 // Apply the Element Restriction transposed
1168 if (useRestrict)
1169 {
1171 }
1172 }
1173
1174 // Treatment of interior faces
1175 Array<BilinearFormIntegrator*> &intFaceIntegrators = *a->GetFBFI();
1176 const int iFISz = intFaceIntegrators.Size();
1177 if (int_face_restrict_lex && iFISz>0)
1178 {
1179 // Apply the Interior Face Restriction
1181 if (int_face_X.Size()>0)
1182 {
1183 int_face_Y = 0.0;
1184 // Apply the interior face matrices transposed
1185 const int NDOFS = faceDofs;
1186 auto X = Reshape(int_face_X.Read(), NDOFS, 2, nf_int);
1187 auto Y = Reshape(int_face_Y.ReadWrite(), NDOFS, 2, nf_int);
1189 {
1190 auto A_int = Reshape(ea_data_int.Read(), NDOFS, NDOFS, 2, nf_int);
1191 mfem::forall(nf_int*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1192 {
1193 const int f = glob_j/NDOFS;
1194 const int j = glob_j%NDOFS;
1195 real_t res = 0.0;
1196 for (int i = 0; i < NDOFS; i++)
1197 {
1198 res += A_int(j, i, 0, f)*X(i, 0, f);
1199 }
1200 Y(j, 0, f) += res;
1201 res = 0.0;
1202 for (int i = 0; i < NDOFS; i++)
1203 {
1204 res += A_int(j, i, 1, f)*X(i, 1, f);
1205 }
1206 Y(j, 1, f) += res;
1207 });
1208 }
1209 auto A_ext = Reshape(ea_data_ext.Read(), NDOFS, NDOFS, 2, nf_int);
1210 mfem::forall(nf_int*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1211 {
1212 const int f = glob_j/NDOFS;
1213 const int j = glob_j%NDOFS;
1214 real_t res = 0.0;
1215 for (int i = 0; i < NDOFS; i++)
1216 {
1217 res += A_ext(j, i, 1, f)*X(i, 0, f);
1218 }
1219 Y(j, 1, f) += res;
1220 res = 0.0;
1221 for (int i = 0; i < NDOFS; i++)
1222 {
1223 res += A_ext(j, i, 0, f)*X(i, 1, f);
1224 }
1225 Y(j, 0, f) += res;
1226 });
1227 // Apply the Interior Face Restriction transposed
1229 }
1230 }
1231
1232 // Treatment of boundary faces
1234 {
1235 // Apply the Boundary Face Restriction
1237 bdr_face_Y = 0.0;
1238 // Apply the boundary face matrices transposed
1239 const int NDOFS = faceDofs;
1240 auto X = Reshape(bdr_face_X.Read(), NDOFS, nf_bdr);
1241 auto Y = Reshape(bdr_face_Y.ReadWrite(), NDOFS, nf_bdr);
1242 auto A = Reshape(ea_data_bdr.Read(), NDOFS, NDOFS, nf_bdr);
1243 mfem::forall(nf_bdr*NDOFS, [=] MFEM_HOST_DEVICE (int glob_j)
1244 {
1245 const int f = glob_j/NDOFS;
1246 const int j = glob_j%NDOFS;
1247 real_t res = 0.0;
1248 for (int i = 0; i < NDOFS; i++)
1249 {
1250 res += A(j, i, f)*X(i, f);
1251 }
1252 Y(j, f) += res;
1253 });
1254 // Apply the Boundary Face Restriction transposed
1256 }
1257}
1258
1260 DenseTensor &element_matrices, ElementDofOrdering ordering, bool add_bdr)
1261{
1262 // Ensure the EA data is assembled
1263 if (ea_data.Size() == 0) { Assemble(); }
1264
1265 const int ndofs = elemDofs;
1266 element_matrices.SetSize(ndofs, ndofs, ne);
1267 const int N = element_matrices.TotalSize();
1268
1269 const auto d_ea_data = Reshape(ea_data.Read(), ndofs, ndofs, ne);
1270 auto d_element_matrices = Reshape(element_matrices.Write(),
1271 ndofs, ndofs,
1272 ne);
1273
1274 const int *d_dof_map = nullptr;
1275 Array<int> dof_map;
1276 if (ordering == ElementDofOrdering::NATIVE)
1277 {
1278 const TensorBasisElement* tbe =
1279 dynamic_cast<const TensorBasisElement*>(trial_fes->GetFE(0));
1280 if (tbe)
1281 {
1282 // Deep copy to avoid issues with host device (see similar comment in
1283 // HybridizationExtension::ConstructC).
1284 dof_map = tbe->GetDofMap();
1285 d_dof_map = dof_map.Read();
1286 }
1287 }
1288
1289 if (d_dof_map)
1290 {
1291 // Reordering required
1292 mfem::forall(N, [=] MFEM_HOST_DEVICE (int idx)
1293 {
1294 const int e = idx / ndofs / ndofs;
1295 const int i = idx % ndofs;
1296 const int j = (idx / ndofs) % ndofs;
1297 const int ii_s = d_dof_map[i];
1298 const int ii = (ii_s >= 0) ? ii_s : -1 - ii_s;
1299 const int s_i = (ii_s >= 0) ? 1 : -1;
1300 const int jj_s = d_dof_map[j];
1301 const int jj = (jj_s >= 0) ? jj_s : -1 - jj_s;
1302 const int s_j = (jj_s >= 0) ? 1 : -1;
1303 d_element_matrices(ii, jj, e) = s_i*s_j*d_ea_data(j, i, e);
1304 });
1305 }
1306 else
1307 {
1308 // No reordering required
1309 mfem::forall(N, [=] MFEM_HOST_DEVICE (int idx)
1310 {
1311 const int e = idx / ndofs / ndofs;
1312 const int i = idx % ndofs;
1313 const int j = (idx / ndofs) % ndofs;
1314 d_element_matrices(i, j, e) = d_ea_data(j, i, e);
1315 });
1316 }
1317
1318 if (add_bdr && ea_data_bdr.Size() > 0)
1319 {
1320 const int ndof_face = faceDofs;
1321 const auto d_ea_bdr = Reshape(ea_data_bdr.Read(),
1322 ndof_face, ndof_face, nf_bdr);
1323
1324 // Get all the local face maps (mapping from lexicographic face index to
1325 // lexicographic volume index, depending on the local face index).
1326 const Mesh &mesh = *trial_fes->GetMesh();
1327 const int dim = mesh.Dimension();
1328 const int n_faces_per_el = 2*dim; // assuming tensor product
1329 Array<int> face_maps(ndof_face * n_faces_per_el);
1330 for (int lf_i = 0; lf_i < n_faces_per_el; ++lf_i)
1331 {
1332 Array<int> face_map(ndof_face);
1333 trial_fes->GetFE(0)->GetFaceMap(lf_i, face_map);
1334 for (int i = 0; i < ndof_face; ++i)
1335 {
1336 face_maps[i + lf_i*ndof_face] = face_map[i];
1337 }
1338 }
1339
1340 Array<int> face_info(nf_bdr * 2);
1341 {
1342 int fidx = 0;
1343 for (int f = 0; f < mesh.GetNumFaces(); ++f)
1344 {
1346 if (!finfo.IsBoundary()) { continue; }
1347 face_info[0 + fidx*2] = finfo.element[0].local_face_id;
1348 face_info[1 + fidx*2] = finfo.element[0].index;
1349 fidx++;
1350 }
1351 }
1352
1353 const auto d_face_maps = Reshape(face_maps.Read(), ndof_face, n_faces_per_el);
1354 const auto d_face_info = Reshape(face_info.Read(), 2, nf_bdr);
1355
1356 const bool reorder = (ordering == ElementDofOrdering::NATIVE);
1357
1358 mfem::forall_2D(nf_bdr, ndof_face, ndof_face, [=] MFEM_HOST_DEVICE (int f)
1359 {
1360 const int lf_i = d_face_info(0, f);
1361 const int e = d_face_info(1, f);
1362 // Loop over face indices in "native ordering"
1363 MFEM_FOREACH_THREAD(i_lex_face, x, ndof_face)
1364 {
1365 // Convert from lexicographic face DOF to volume DOF
1366 const int i_lex = d_face_maps(i_lex_face, lf_i);
1367
1368 const int ii_s = d_dof_map[i_lex];
1369 const int ii = (ii_s >= 0) ? ii_s : -1 - ii_s;
1370
1371 const int i = reorder ? ii : i_lex;
1372 const int s_i = (ii_s < 0 && reorder) ? -1 : 1;
1373
1374 MFEM_FOREACH_THREAD(j_lex_face, y, ndof_face)
1375 {
1376 // Convert from lexicographic face DOF to volume DOF
1377 const int j_lex = d_face_maps(j_lex_face, lf_i);
1378
1379 const int jj_s = d_dof_map[j_lex];
1380 const int jj = (jj_s >= 0) ? jj_s : -1 - jj_s;
1381
1382 const int j = reorder ? jj : j_lex;
1383 const int s_j = (jj_s < 0 && reorder) ? -1 : 1;
1384
1385 AtomicAdd(d_element_matrices(i, j, e),
1386 s_i*s_j*d_ea_bdr(i_lex_face, j_lex_face, f));
1387 }
1388 }
1389 });
1390 }
1391}
1392
1393// Data and methods for fully-assembled bilinear forms
1396 mat(a->mat)
1397{
1398#ifdef MFEM_USE_MPI
1399 ParFiniteElementSpace *pfes = nullptr;
1400 if ( a->GetFBFI()->Size()>0 &&
1401 (pfes = dynamic_cast<ParFiniteElementSpace*>(form->FESpace())) )
1402 {
1403 pfes->ExchangeFaceNbrData();
1404 }
1405#endif
1406}
1407
1409{
1411 FiniteElementSpace &fes = *a->FESpace();
1412 int width = fes.GetVSize();
1413 int height = fes.GetVSize();
1414 bool keep_nbr_block = false;
1415#ifdef MFEM_USE_MPI
1416 ParFiniteElementSpace *pfes = nullptr;
1417 if ( a->GetFBFI()->Size()>0 &&
1418 (pfes = dynamic_cast<ParFiniteElementSpace*>(&fes)) )
1419 {
1420 pfes->ExchangeFaceNbrData();
1421 width += pfes->GetFaceNbrVSize();
1422 dg_x.SetSize(width);
1423 ParBilinearForm *pb = nullptr;
1424 if ((pb = dynamic_cast<ParBilinearForm*>(a)) && (pb->keep_nbr_block))
1425 {
1426 height += pfes->GetFaceNbrVSize();
1427 dg_y.SetSize(height);
1428 keep_nbr_block = true;
1429 }
1430 }
1431#endif
1432 if (a->mat) // We reuse the sparse matrix memory
1433 {
1434 if (fes.IsDGSpace())
1435 {
1436 const L2ElementRestriction *restE =
1437 static_cast<const L2ElementRestriction*>(elem_restrict);
1438 const L2FaceRestriction *restF =
1439 static_cast<const L2FaceRestriction*>(int_face_restrict_lex);
1440 MFEM_VERIFY(
1441 fes.Conforming(),
1442 "Full Assembly not yet supported on NCMesh.");
1443 // 1. Fill J and Data
1444 // 1.1 Fill J and Data with Elem ea_data
1445 restE->FillJAndData(ea_data, *mat);
1446 // 1.2 Fill J and Data with Face ea_data_ext
1447 if (restF) { restF->FillJAndData(ea_data_ext, *mat, keep_nbr_block); }
1448 // 1.3 Shift indirections in I back to original
1449 auto I = mat->HostReadWriteI();
1450 for (int i = height; i > 0; i--)
1451 {
1452 I[i] = I[i-1];
1453 }
1454 I[0] = 0;
1455 }
1456 else
1457 {
1458 const ElementRestriction &rest =
1459 static_cast<const ElementRestriction&>(*elem_restrict);
1460 rest.FillJAndData(ea_data, *mat);
1461 }
1462 }
1463 else // We create, compute the sparsity, and fill the sparse matrix
1464 {
1465 mat = new SparseMatrix;
1466 mat->OverrideSize(height, width);
1467 if (fes.IsDGSpace())
1468 {
1469 const L2ElementRestriction *restE =
1470 static_cast<const L2ElementRestriction*>(elem_restrict);
1471 const L2FaceRestriction *restF =
1472 static_cast<const L2FaceRestriction*>(int_face_restrict_lex);
1473 // 1. Fill I
1474 mat->GetMemoryI().New(height+1, mat->GetMemoryI().GetMemoryType());
1475 // 1.1 Increment with restE
1476 restE->FillI(*mat);
1477 // 1.2 Increment with restF
1478 if (restF) { restF->FillI(*mat, keep_nbr_block); }
1479 // 1.3 Sum the non-zeros in I
1480 auto h_I = mat->HostReadWriteI();
1481 int cpt = 0;
1482 for (int i = 0; i < height; i++)
1483 {
1484 const int nnz = h_I[i];
1485 h_I[i] = cpt;
1486 cpt += nnz;
1487 }
1488 const int nnz = cpt;
1489 h_I[height] = nnz;
1490 mat->GetMemoryJ().New(nnz, mat->GetMemoryJ().GetMemoryType());
1491 mat->GetMemoryData().New(nnz, mat->GetMemoryData().GetMemoryType());
1492 // 2. Fill J and Data
1493 // 2.1 Fill J and Data with Elem ea_data
1494 restE->FillJAndData(ea_data, *mat);
1495 // 2.2 Fill J and Data with Face ea_data_ext
1496 if (restF) { restF->FillJAndData(ea_data_ext, *mat, keep_nbr_block); }
1497 // 2.3 Shift indirections in I back to original
1498 auto I = mat->HostReadWriteI();
1499 for (int i = height; i > 0; i--)
1500 {
1501 I[i] = I[i-1];
1502 }
1503 I[0] = 0;
1504 }
1505 else // continuous Galerkin case
1506 {
1507 const ElementRestriction &rest =
1508 static_cast<const ElementRestriction&>(*elem_restrict);
1509 rest.FillSparseMatrix(ea_data, *mat);
1510 }
1511 a->mat = mat;
1512 }
1513 if ( a->sort_sparse_matrix )
1514 {
1516 }
1517}
1518
1519
1521{
1522#ifdef MFEM_USE_MPI
1523 if ( auto pa = dynamic_cast<ParBilinearForm*>(a) )
1524 {
1525 pa->ParallelRAP(*pa->mat, A);
1526 }
1527 else
1528#endif
1529 {
1530 a->SerialRAP(A);
1531 }
1532}
1533
1535 OperatorHandle &A)
1536{
1537 MFEM_VERIFY(a->diag_policy == DiagonalPolicy::DIAG_ONE,
1538 "Only DiagonalPolicy::DIAG_ONE supported with"
1539 " FABilinearFormExtension.");
1540#ifdef MFEM_USE_MPI
1541 if ( dynamic_cast<ParBilinearForm*>(a) )
1542 {
1543 A.As<HypreParMatrix>()->EliminateBC(ess_dofs,
1545 }
1546 else
1547#endif
1548 {
1549 A.As<SparseMatrix>()->EliminateBC(ess_dofs,
1551 }
1552}
1553
1555 OperatorHandle &A)
1556{
1557 RAP(A);
1558 EliminateBC(ess_dofs, A);
1559}
1560
1562 Vector &x, Vector &b,
1563 OperatorHandle &A,
1564 Vector &X, Vector &B,
1565 int copy_interior)
1566{
1567 Operator *A_out;
1568 Operator::FormLinearSystem(ess_tdof_list, x, b, A_out, X, B, copy_interior);
1569 delete A_out;
1570 FormSystemMatrix(ess_tdof_list, A);
1571}
1572
1574{
1575#ifdef MFEM_USE_MPI
1576 const ParFiniteElementSpace *pfes;
1577 if ( (pfes = dynamic_cast<const ParFiniteElementSpace*>(test_fes)) )
1578 {
1579 // DG Prolongation
1580 ParGridFunction x_gf;
1581 x_gf.MakeRef(const_cast<ParFiniteElementSpace*>(pfes),
1582 const_cast<Vector&>(x),0);
1583 x_gf.ExchangeFaceNbrData();
1584 Vector &shared_x = x_gf.FaceNbrData();
1585 const int local_size = a->FESpace()->GetVSize();
1586 auto dg_x_ptr = dg_x.Write();
1587 auto x_ptr = x.Read();
1588 mfem::forall(local_size, [=] MFEM_HOST_DEVICE (int i)
1589 {
1590 dg_x_ptr[i] = x_ptr[i];
1591 });
1592 const int shared_size = shared_x.Size();
1593 auto shared_x_ptr = shared_x.Read();
1594 mfem::forall(shared_size, [=] MFEM_HOST_DEVICE (int i)
1595 {
1596 dg_x_ptr[local_size+i] = shared_x_ptr[i];
1597 });
1598 ParBilinearForm *pform = nullptr;
1599 if ((pform = dynamic_cast<ParBilinearForm*>(a)) && (pform->keep_nbr_block))
1600 {
1601 mat->Mult(dg_x, dg_y);
1602 // DG Restriction
1603 auto dg_y_ptr = dg_y.Read();
1604 auto y_ptr = y.ReadWrite();
1605 mfem::forall(local_size, [=] MFEM_HOST_DEVICE (int i)
1606 {
1607 y_ptr[i] += dg_y_ptr[i];
1608 });
1609 }
1610 else
1611 {
1612 mat->Mult(dg_x, y);
1613 }
1614 }
1615 else
1616#endif
1617 {
1618 mat->Mult(x, y);
1619 }
1620}
1621
1623{
1624 if ( a->GetFBFI()->Size()>0 )
1625 {
1626 DGMult(x, y);
1627 }
1628 else
1629 {
1630 mat->Mult(x, y);
1631 }
1632}
1633
1635{
1636#ifdef MFEM_USE_MPI
1637 const ParFiniteElementSpace *pfes;
1638 if ( (pfes = dynamic_cast<const ParFiniteElementSpace*>(test_fes)) )
1639 {
1640 // DG Prolongation
1641 ParGridFunction x_gf;
1642 x_gf.MakeRef(const_cast<ParFiniteElementSpace*>(pfes),
1643 const_cast<Vector&>(x),0);
1644 x_gf.ExchangeFaceNbrData();
1645 Vector &shared_x = x_gf.FaceNbrData();
1646 const int local_size = a->FESpace()->GetVSize();
1647 auto dg_x_ptr = dg_x.Write();
1648 auto x_ptr = x.Read();
1649 mfem::forall(local_size, [=] MFEM_HOST_DEVICE (int i)
1650 {
1651 dg_x_ptr[i] = x_ptr[i];
1652 });
1653 const int shared_size = shared_x.Size();
1654 auto shared_x_ptr = shared_x.Read();
1655 mfem::forall(shared_size, [=] MFEM_HOST_DEVICE (int i)
1656 {
1657 dg_x_ptr[local_size+i] = shared_x_ptr[i];
1658 });
1659 ParBilinearForm *pb = nullptr;
1660 if ((pb = dynamic_cast<ParBilinearForm*>(a)) && (pb->keep_nbr_block))
1661 {
1662 mat->MultTranspose(dg_x, dg_y);
1663 // DG Restriction
1664 auto dg_y_ptr = dg_y.Read();
1665 auto y_ptr = y.ReadWrite();
1666 mfem::forall(local_size, [=] MFEM_HOST_DEVICE (int i)
1667 {
1668 y_ptr[i] += dg_y_ptr[i];
1669 });
1670 }
1671 else
1672 {
1673 mat->MultTranspose(dg_x, y);
1674 }
1675 }
1676 else
1677#endif
1678 {
1679 mat->MultTranspose(x, y);
1680 }
1681}
1682
1684{
1685 if ( a->GetFBFI()->Size()>0 )
1686 {
1687 DGMultTranspose(x, y);
1688 }
1689 else
1690 {
1691 mat->MultTranspose(x, y);
1692 }
1693}
1694
1695
1697 : Operator(form->Height(), form->Width()), a(form)
1698{
1699 // empty
1700}
1701
1706
1708{
1709 return a->GetRestriction();
1710}
1711
1716
1721
1722// Data and methods for partially-assembled bilinear forms
1723
1725 MixedBilinearForm *form)
1727 trial_fes(form->TrialFESpace()),
1728 test_fes(form->TestFESpace()),
1729 elem_restrict_trial(NULL),
1730 elem_restrict_test(NULL)
1731{
1732 Update();
1733}
1734
1736{
1737 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
1738 const int integratorCount = integrators.Size();
1739 for (int i = 0; i < integratorCount; ++i)
1740 {
1741 integrators[i]->AssemblePA(*trial_fes, *test_fes);
1742 }
1743 MFEM_VERIFY(a->GetBBFI()->Size() == 0,
1744 "Partial assembly does not support AddBoundaryIntegrator yet.");
1745 MFEM_VERIFY(a->GetTFBFI()->Size() == 0,
1746 "Partial assembly does not support AddTraceFaceIntegrator yet.");
1747 MFEM_VERIFY(a->GetBTFBFI()->Size() == 0,
1748 "Partial assembly does not support AddBdrTraceFaceIntegrator yet.");
1749}
1750
1773
1775 const Array<int> &trial_tdof_list,
1776 const Array<int> &test_tdof_list,
1777 OperatorHandle &A)
1778{
1779 Operator * oper;
1780 Operator::FormRectangularSystemOperator(trial_tdof_list, test_tdof_list,
1781 oper);
1782 A.Reset(oper); // A will own oper
1783}
1784
1786 const Array<int> &trial_tdof_list,
1787 const Array<int> &test_tdof_list,
1788 Vector &x, Vector &b,
1789 OperatorHandle &A,
1790 Vector &X, Vector &B)
1791{
1792 Operator *oper;
1793 Operator::FormRectangularLinearSystem(trial_tdof_list, test_tdof_list, x, b,
1794 oper, X, B);
1795 A.Reset(oper); // A will own oper
1796}
1797
1799 const Operator *elem_restrict_x,
1800 const Vector &x,
1801 Vector &localX,
1802 const Operator *elem_restrict_y,
1803 Vector &y,
1804 Vector &localY,
1805 const real_t c) const
1806{
1807 // * G operation: localX = c*local(x)
1808 if (elem_restrict_x)
1809 {
1810 elem_restrict_x->Mult(x, localX);
1811 if (c != 1.0)
1812 {
1813 localX *= c;
1814 }
1815 }
1816 else
1817 {
1818 if (c == 1.0)
1819 {
1820 localX.SyncAliasMemory(x);
1821 }
1822 else
1823 {
1824 localX.Set(c, x);
1825 }
1826 }
1827 if (elem_restrict_y)
1828 {
1829 localY = 0.0;
1830 }
1831 else
1832 {
1833 y.UseDevice(true);
1834 localY.SyncAliasMemory(y);
1835 }
1836}
1837
1839{
1840 y = 0.0;
1841 AddMult(x, y);
1842}
1843
1845 const real_t c) const
1846{
1847 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
1848 const int iSz = integrators.Size();
1849
1850 // * G operation
1853
1854 // * B^TDB operation
1855 for (int i = 0; i < iSz; ++i)
1856 {
1857 integrators[i]->AddMultPA(localTrial, localTest);
1858 }
1859
1860 // * G^T operation
1862 {
1863 tempY.SetSize(y.Size());
1865 y += tempY;
1866 }
1867}
1868
1870 Vector &y) const
1871{
1872 y = 0.0;
1873 AddMultTranspose(x, y);
1874}
1875
1877 const real_t c) const
1878{
1879 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
1880 const int iSz = integrators.Size();
1881
1882 // * G operation
1885
1886 // * B^TD^TB operation
1887 for (int i = 0; i < iSz; ++i)
1888 {
1889 integrators[i]->AddMultTransposePA(localTest, localTrial);
1890 }
1891
1892 // * G^T operation
1894 {
1895 tempY.SetSize(y.Size());
1897 y += tempY;
1898 }
1899}
1900
1902 Vector &diag) const
1903{
1904 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
1905
1906 const int iSz = integrators.Size();
1907
1909 {
1910 const ElementRestriction* H1elem_restrict_trial =
1911 dynamic_cast<const ElementRestriction*>(elem_restrict_trial);
1912 if (H1elem_restrict_trial)
1913 {
1914 H1elem_restrict_trial->MultUnsigned(D, localTrial);
1915 }
1916 else
1917 {
1919 }
1920 }
1921
1923 {
1924 localTest = 0.0;
1925 for (int i = 0; i < iSz; ++i)
1926 {
1928 {
1929 integrators[i]->AssembleDiagonalPA_ADAt(localTrial, localTest);
1930 }
1931 else
1932 {
1933 integrators[i]->AssembleDiagonalPA_ADAt(D, localTest);
1934 }
1935 }
1936 const ElementRestriction* H1elem_restrict_test =
1937 dynamic_cast<const ElementRestriction*>(elem_restrict_test);
1938 if (H1elem_restrict_test)
1939 {
1940 H1elem_restrict_test->MultTransposeUnsigned(localTest, diag);
1941 }
1942 else
1943 {
1945 }
1946 }
1947 else
1948 {
1949 diag.UseDevice(true); // typically this is a large vector, so store on device
1950 diag = 0.0;
1951 for (int i = 0; i < iSz; ++i)
1952 {
1954 {
1955 integrators[i]->AssembleDiagonalPA_ADAt(localTrial, diag);
1956 }
1957 else
1958 {
1959 integrators[i]->AssembleDiagonalPA_ADAt(D, diag);
1960 }
1961 }
1962 }
1963}
1964
1970
1971const
1977
1979{
1980 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
1981 const int integratorCount = integrators.Size();
1982 for (int i = 0; i < integratorCount; ++i)
1983 {
1984 integrators[i]->AssemblePA(*trial_fes, *test_fes);
1985 }
1986
1987 test_multiplicity.UseDevice(true);
1988 test_multiplicity.SetSize(elem_restrict_test->Width()); // l-vector
1989 Vector ones(elem_restrict_test->Height()); // e-vector
1990 ones = 1.0;
1991
1992 const ElementRestriction* elem_restrict =
1993 dynamic_cast<const ElementRestriction*>(elem_restrict_test);
1994 if (elem_restrict)
1995 {
1996 elem_restrict->MultTransposeUnsigned(ones, test_multiplicity);
1997 }
1998 else
1999 {
2000 mfem_error("A real ElementRestriction is required in this setting!");
2001 }
2002
2003 auto tm = test_multiplicity.ReadWrite();
2004 mfem::forall(test_multiplicity.Size(), [=] MFEM_HOST_DEVICE (int i)
2005 {
2006 tm[i] = 1.0 / tm[i];
2007 });
2008}
2009
2011 const Vector &x, Vector &y, const real_t c) const
2012{
2013 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
2014 const int iSz = integrators.Size();
2015
2016 // * G operation
2019
2020 // * B^TDB operation
2021 for (int i = 0; i < iSz; ++i)
2022 {
2023 integrators[i]->AddMultPA(localTrial, localTest);
2024 }
2025
2026 // do a kind of "set" rather than "add" in the below
2027 // operation as compared to the BilinearForm case
2028 // * G^T operation (kind of...)
2029 const ElementRestriction* elem_restrict =
2030 dynamic_cast<const ElementRestriction*>(elem_restrict_test);
2031 if (elem_restrict)
2032 {
2033 tempY.SetSize(y.Size());
2034 elem_restrict->MultLeftInverse(localTest, tempY);
2035 y += tempY;
2036 }
2037 else
2038 {
2039 mfem_error("In this setting you need a real ElementRestriction!");
2040 }
2041}
2042
2044 const Vector &x, Vector &y, const real_t c) const
2045{
2046 Array<BilinearFormIntegrator*> &integrators = *a->GetDBFI();
2047 const int iSz = integrators.Size();
2048
2049 // do a kind of "set" rather than "add" in the below
2050 // operation as compared to the BilinearForm case
2051 // * G operation (kinda)
2052 Vector xscaled(x);
2053 MFEM_VERIFY(x.Size() == test_multiplicity.Size(), "Input vector of wrong size");
2054 auto xs = xscaled.ReadWrite();
2055 auto tm = test_multiplicity.Read();
2056 mfem::forall(x.Size(), [=] MFEM_HOST_DEVICE (int i)
2057 {
2058 xs[i] *= tm[i];
2059 });
2062
2063 // * B^TD^TB operation
2064 for (int i = 0; i < iSz; ++i)
2065 {
2066 integrators[i]->AddMultTransposePA(localTest, localTrial);
2067 }
2068
2069 // * G^T operation
2071 {
2072 tempY.SetSize(y.Size());
2074 y += tempY;
2075 }
2076 else
2077 {
2078 mfem_error("Trial ElementRestriction not defined");
2079 }
2080}
2081
2083 const Array<int>& ess1, const Array<int>& ess2, OperatorHandle &A)
2084{
2085 const Operator *Pi = this->GetProlongation();
2086 const Operator *RoT = this->GetOutputRestrictionTranspose();
2087 Operator *rap = SetupRAP(Pi, RoT);
2088
2090 = new RectangularConstrainedOperator(rap, ess1, ess2, rap != this);
2091
2092 A.Reset(Arco);
2093}
2094
2095} // namespace mfem
MFEM_HOST_DEVICE T AtomicAdd(T &add, const T val)
Definition backends.hpp:93
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition array.hpp:758
int Size() const
Return the logical size of the array.
Definition array.hpp:147
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
Definition array.hpp:337
Class extending the BilinearForm class to support different AssemblyLevels.
BilinearFormExtension(BilinearForm *form)
const Operator * GetRestriction() const override
Get the finite element space restriction matrix.
BilinearForm * a
Not owned.
const Operator * GetProlongation() const override
Get the finite element space prolongation matrix.
Abstract base class BilinearFormIntegrator.
virtual void AssembleDiagonalPA(Vector &diag)
Assemble diagonal and add it to Vector diag.
virtual void AddMultTransposePA(const Vector &x, Vector &y) const
Method for partially assembled transposed action.
void AddMultPA(const Vector &x, Vector &y) const override
Method for partially assembled action.
virtual void AddMultPAFaceNormalDerivatives(const Vector &x, const Vector &dxdn, Vector &y, Vector &dydn) const
Method for partially assembled action.
A "square matrix" operator for the associated FE space and BLFIntegrators The sum of all the BLFInteg...
Array< BilinearFormIntegrator * > * GetFBFI()
Access all integrators added with AddInteriorFaceIntegrator().
Array< BilinearFormIntegrator * > * GetDBFI()
Access all the integrators added with AddDomainIntegrator().
DiagonalPolicy diag_policy
This data member allows one to specify what should be done to the diagonal matrix entries and corresp...
FiniteElementSpace * FESpace()
Return the FE space associated with the BilinearForm.
Array< BilinearFormIntegrator * > * GetBBFI()
Access all the integrators added with AddBoundaryIntegrator().
Array< Array< int > * > * GetBBFI_Marker()
Access all boundary markers added with AddBoundaryIntegrator(). If no marker was specified when the i...
SparseMatrix * mat
Sparse matrix to be associated with the form. Owned.
const Operator * GetRestriction() const override
Get the finite element space restriction operator.
const Operator * GetProlongation() const override
Get the finite element space prolongation operator.
void SerialRAP(OperatorHandle &A)
Compute serial RAP operator and store it in A as a SparseMatrix.
Array< BilinearFormIntegrator * > * GetBFBFI()
Access all integrators added with AddBdrFaceIntegrator().
Array< Array< int > * > * GetBFBFI_Marker()
Access all boundary markers added with AddBdrFaceIntegrator(). If no marker was specified when the in...
Array< Array< int > * > * GetDBFI_Marker()
Access all boundary markers added with AddDomainIntegrator(). If no marker was specified when the int...
Rank 3 tensor (array of matrices)
void SetSize(int i, int j, int k, MemoryType mt_=MemoryType::PRESERVE)
int TotalSize() const
real_t * Write(bool on_dev=true)
Shortcut for mfem::Write(GetMemory(), TotalSize(), on_dev).
static MemoryType GetMemoryType()
(DEPRECATED) Equivalent to GetDeviceMemoryType().
Definition device.hpp:278
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Definition device.hpp:259
static MemoryType GetDeviceMemoryType()
Get the current Device MemoryType. This is the MemoryType used by most MFEM classes when allocating m...
Definition device.hpp:274
Data and methods for element-assembled bilinear forms.
void Mult(const Vector &x, Vector &y) const override
Operator application: y=A(x).
void GetElementMatrices(DenseTensor &element_matrices, ElementDofOrdering ordering, bool add_bdr)
Populates element_matrices with the element matrices.
void MultTranspose(const Vector &x, Vector &y) const override
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
EABilinearFormExtension(BilinearForm *form)
void Assemble() override
Assemble at the level given for the BilinearFormExtension subclass.
Operator that converts FiniteElementSpace L-vectors to E-vectors.
void FillSparseMatrix(const Vector &mat_ea, SparseMatrix &mat) const
Fill a Sparse Matrix with Element Matrices.
void FillJAndData(const Vector &ea_data, SparseMatrix &mat) const
void MultTransposeUnsigned(const Vector &x, Vector &y) const
Compute MultTranspose without applying signs based on DOF orientations.
void MultLeftInverse(const Vector &x, Vector &y) const
void MultUnsigned(const Vector &x, Vector &y) const
Compute Mult without applying signs based on DOF orientations.
void MultTranspose(const Vector &x, Vector &y) const override
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
void DGMultTranspose(const Vector &x, Vector &y) const
void EliminateBC(const Array< int > &ess_dofs, OperatorHandle &A)
void DGMult(const Vector &x, Vector &y) const
void FormSystemMatrix(const Array< int > &ess_tdof_list, OperatorHandle &A) override
void Mult(const Vector &x, Vector &y) const override
Operator application: y=A(x).
void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B, int copy_interior=0) override
void Assemble() override
Assemble at the level given for the BilinearFormExtension subclass.
FABilinearFormExtension(BilinearForm *form)
virtual void AddMultTransposeInPlace(Vector &x, Vector &y) const
Add the face degrees of freedom x to the element degrees of freedom y. Perform the same computation a...
virtual void NormalDerivativeAddMultTranspose(const Vector &x, Vector &y) const
Add the face reference-normal derivative degrees of freedom in x to the element degrees of freedom in...
virtual void AddMultTransposeUnsigned(const Vector &x, Vector &y, const real_t a=1.0) const
Add the face degrees of freedom x to the element degrees of freedom y ignoring the signs from DOF ori...
virtual void NormalDerivativeMult(const Vector &x, Vector &y) const
For each face, sets y to the partial derivative of x with respect to the reference coordinate whose d...
void Mult(const Vector &x, Vector &y) const override=0
Extract the face degrees of freedom from x into y.
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition fespace.hpp:244
const NURBSExtension * GetNURBSext() const
Definition fespace.hpp:681
bool Conforming() const
Definition fespace.hpp:685
virtual const FiniteElement * GetFE(int i) const
Returns pointer to the FiniteElement in the FiniteElementCollection associated with i'th element in t...
Definition fespace.cpp:3835
const ElementRestrictionOperator * GetElementRestriction(ElementDofOrdering e_ordering) const
Return an Operator that converts L-vectors to E-vectors.
Definition fespace.cpp:1510
const FiniteElement * GetTypicalTraceElement() const
Return a "typical" trace element.
Definition fespace.cpp:3959
int GetNFbyType(FaceType type) const
Returns the number of faces according to the requested type.
Definition fespace.hpp:908
Mesh * GetMesh() const
Returns the mesh.
Definition fespace.hpp:679
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
Definition fespace.hpp:848
bool IsDGSpace() const
Return whether or not the space is discontinuous (L2)
Definition fespace.hpp:1510
const FiniteElement * GetTypicalFE() const
Return GetFE(0) if the local mesh is not empty; otherwise return a typical FE based on the Geometry t...
Definition fespace.cpp:3871
virtual const FaceRestriction * GetFaceRestriction(ElementDofOrdering f_ordering, FaceType, L2FaceValues mul=L2FaceValues::DoubleValued) const
Return an Operator that converts L-vectors to E-vectors on each face.
Definition fespace.cpp:1543
virtual void GetFaceMap(const int face_id, Array< int > &face_map) const
Return the mapping from lexicographic face DOFs to lexicographic element DOFs for the given local fac...
Definition fe_base.cpp:507
int GetDof() const
Returns the number of degrees of freedom in the finite element.
Definition fe_base.hpp:334
Wrapper for hypre's ParCSR matrix class.
Definition hypre.hpp:408
Operator that converts L2 FiniteElementSpace L-vectors to E-vectors.
void FillJAndData(const Vector &ea_data, SparseMatrix &mat) const
void FillI(SparseMatrix &mat) const
Operator that extracts Face degrees of freedom for L2 spaces.
virtual void FillI(SparseMatrix &mat, const bool keep_nbr_block=false) const
Fill the I array of SparseMatrix corresponding to the sparsity pattern given by this L2FaceRestrictio...
virtual void FillJAndData(const Vector &fea_data, SparseMatrix &mat, const bool keep_nbr_block=false) const
Fill the J and Data arrays of the SparseMatrix corresponding to the sparsity pattern given by this L2...
virtual void AddFaceMatricesToElementMatrices(const Vector &fea_data, Vector &ea_data) const
This methods adds the DG face matrices to the element matrices.
void Mult(const Vector &x, Vector &y) const override
Operator application: y=A(x).
const FiniteElementSpace * trial_fes
void AssembleDiagonal(Vector &diag) const override
Computes the diagonal entries into diag. Typically, this operation only makes sense for linear Operat...
const FaceRestriction * bdr_face_restrict_lex
void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B, int copy_interior=0) override
void MultTranspose(const Vector &x, Vector &y) const override
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
void Assemble() override
Assemble at the level given for the BilinearFormExtension subclass.
const FiniteElementSpace * test_fes
const FaceRestriction * int_face_restrict_lex
void FormSystemMatrix(const Array< int > &ess_tdof_list, OperatorHandle &A) override
MFBilinearFormExtension(BilinearForm *form)
MemoryType GetMemoryType() const
Return a MemoryType that is currently valid. If both the host and the device pointers are currently v...
void New(int size)
Allocate host memory for size entries with the current host memory type returned by MemoryManager::Ge...
Mesh data type.
Definition mesh.hpp:64
int GetNumFaces() const
Return the number of faces (3D), edges (2D) or vertices (1D).
Definition mesh.cpp:6531
int GetAttribute(int i) const
Return the attribute of element i.
Definition mesh.hpp:1389
int GetBdrAttribute(int i) const
Return the attribute of boundary element i.
Definition mesh.hpp:1395
int GetBdrElementFaceIndex(int be_idx) const
Return the local face (codimension-1) index for the given boundary element index.
Definition mesh.hpp:1588
int GetNE() const
Returns number of elements.
Definition mesh.hpp:1282
int Dimension() const
Dimension of the reference space used within the elements.
Definition mesh.hpp:1216
FaceInformation GetFaceInformation(int f) const
Definition mesh.cpp:1193
int GetNBE() const
Returns number of boundary elements.
Definition mesh.hpp:1285
Class extending the MixedBilinearForm class to support different AssemblyLevels.
MixedBilinearForm * a
Not owned.
const Operator * GetOutputProlongation() const override
Get the output finite element space restriction matrix.
const Operator * GetProlongation() const override
Get the finite element space prolongation matrix.
const Operator * GetOutputRestriction() const override
Get the output finite element space restriction matrix.
MixedBilinearFormExtension(MixedBilinearForm *form)
const Operator * GetRestriction() const override
Get the finite element space restriction matrix.
Array< BilinearFormIntegrator * > * GetTFBFI()
Access all integrators added with AddTraceFaceIntegrator().
const Operator * GetOutputProlongation() const override
Get the test finite element space prolongation matrix.
const Operator * GetOutputRestriction() const override
Get the test finite element space restriction matrix.
Array< BilinearFormIntegrator * > * GetBBFI()
Access all integrators added with AddBoundaryIntegrator().
FiniteElementSpace * TestFESpace()
Return the test FE space associated with the BilinearForm.
const Operator * GetRestriction() const override
Get the input finite element space restriction matrix.
const Operator * GetProlongation() const override
Get the input finite element space prolongation matrix.
FiniteElementSpace * TrialFESpace()
Return the trial FE space associated with the BilinearForm.
Array< BilinearFormIntegrator * > * GetDBFI()
Access all integrators added with AddDomainIntegrator().
Array< BilinearFormIntegrator * > * GetBTFBFI()
Access all integrators added with AddBdrTraceFaceIntegrator().
Pointer to an Operator of a specified type.
Definition handle.hpp:34
OpType * As() const
Return the Operator pointer statically cast to a specified OpType. Similar to the method Get().
Definition handle.hpp:104
void Reset(OpType *A, bool own_A=true)
Reset the OperatorHandle to the given OpType pointer, A.
Definition handle.hpp:145
Abstract operator.
Definition operator.hpp:25
void FormRectangularLinearSystem(const Array< int > &trial_tdof_list, const Array< int > &test_tdof_list, Vector &x, Vector &b, Operator *&A, Vector &X, Vector &B)
Form a column-constrained linear system using a matrix-free approach.
Definition operator.cpp:131
void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, Operator *&A, Vector &X, Vector &B, int copy_interior=0)
Form a constrained linear system using a matrix-free approach.
Definition operator.cpp:114
int width
Dimension of the input / number of columns in the matrix.
Definition operator.hpp:28
void FormSystemOperator(const Array< int > &ess_tdof_list, Operator *&A)
Return in A a parallel (on truedofs) version of this square operator.
Definition operator.cpp:227
virtual const Operator * GetOutputRestrictionTranspose() const
Transpose of GetOutputRestriction, directly available in this form to facilitate matrix-free RAP-type...
Definition operator.hpp:156
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition operator.hpp:66
int height
Dimension of the output / number of rows in the matrix.
Definition operator.hpp:27
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
@ DIAG_ONE
Set the diagonal value to one.
Definition operator.hpp:50
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
Definition operator.hpp:72
void FormRectangularSystemOperator(const Array< int > &trial_tdof_list, const Array< int > &test_tdof_list, Operator *&A)
Return in A a parallel (on truedofs) version of this rectangular operator (including constraints).
Definition operator.cpp:235
Operator * SetupRAP(const Operator *Pi, const Operator *Po)
Returns RAP Operator of this, using input/output Prolongation matrices Pi corresponds to "P",...
Definition operator.cpp:168
virtual void MultTranspose(const Vector &x, Vector &y) const
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
Definition operator.hpp:93
Data and methods for partially-assembled bilinear forms.
void FormSystemMatrix(const Array< int > &ess_tdof_list, OperatorHandle &A) override
Array< int > elem_attributes
Attributes of all mesh elements.
void AssembleDiagonal(Vector &diag) const override
Computes the diagonal entries into diag. Typically, this operation only makes sense for linear Operat...
const FiniteElementSpace * trial_fes
const FaceRestriction * int_face_restrict_lex
const FaceRestriction * bdr_face_restrict_lex
void AddMultNormalDerivativesWithMarkers(const BilinearFormIntegrator &integ, const Vector &x, const Vector &dxdn, const Array< int > *markers, const Array< int > &attributes, Vector &y, Vector &dydn) const
Performs the same function as AddMultWithMarkers, but takes as input and output face normal derivativ...
void AddMultWithMarkers(const BilinearFormIntegrator &integ, const Vector &x, const Array< int > *markers, const Array< int > &attributes, const bool transpose, Vector &y) const
Accumulate the action (or transpose) of the integrator on x into y, taking into account the (possibly...
void FormLinearSystem(const Array< int > &ess_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B, int copy_interior=0) override
void MultTranspose(const Vector &x, Vector &y) const override
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
const FiniteElementSpace * test_fes
void Assemble() override
Assemble at the level given for the BilinearFormExtension subclass.
void Mult(const Vector &x, Vector &y) const override
Operator application: y=A(x).
void SetupRestrictionOperators(const L2FaceValues m)
void Assemble() override
Partial assembly of all internal integrators.
PADiscreteLinearOperatorExtension(DiscreteLinearOperator *linop)
void AddMult(const Vector &x, Vector &y, const real_t c=1.0) const override
Operator application: y+=A(x) (default) or y+=a*A(x).
void AddMultTranspose(const Vector &x, Vector &y, const real_t c=1.0) const override
Operator transpose application: y+=A^t(x) (default) or y+=a*A^t(x).
const Operator * GetOutputRestrictionTranspose() const override
Transpose of GetOutputRestriction, directly available in this form to facilitate matrix-free RAP-type...
void FormRectangularSystemOperator(const Array< int > &, const Array< int > &, OperatorHandle &A) override
Data and methods for partially-assembled mixed bilinear forms.
void FormRectangularSystemOperator(const Array< int > &trial_tdof_list, const Array< int > &test_tdof_list, OperatorHandle &A) override
Setup OperatorHandle A to contain constrained linear operator.
const FiniteElementSpace * test_fes
void Assemble() override
Partial assembly of all internal integrators.
PAMixedBilinearFormExtension(MixedBilinearForm *form)
void MultTranspose(const Vector &x, Vector &y) const override
y = A^T*x
const FiniteElementSpace * trial_fes
void AddMultTranspose(const Vector &x, Vector &y, const real_t c=1.0) const override
y += c*A^T*x
void AssembleDiagonal_ADAt(const Vector &D, Vector &diag) const override
Assemble the diagonal of ADA^T for a diagonal vector D.
void SetupMultInputs(const Operator *elem_restrict_x, const Vector &x, Vector &localX, const Operator *elem_restrict_y, Vector &y, Vector &localY, const real_t c) const
Helper function to set up inputs/outputs for Mult or MultTranspose.
void FormRectangularLinearSystem(const Array< int > &trial_tdof_list, const Array< int > &test_tdof_list, Vector &x, Vector &b, OperatorHandle &A, Vector &X, Vector &B) override
void Update() override
Update internals for when a new MixedBilinearForm is given to this class.
void AddMult(const Vector &x, Vector &y, const real_t c=1.0) const override
y += c*A*x
void Mult(const Vector &x, Vector &y) const override
y = A*x
Class for parallel bilinear form.
Abstract parallel finite element space.
Definition pfespace.hpp:29
Class for parallel grid function.
Definition pgridfunc.hpp:50
void MakeRef(FiniteElementSpace *f, real_t *v) override
Make the ParGridFunction reference external data on a new FiniteElementSpace.
Rectangular Operator for imposing essential boundary conditions on the input space using only the act...
Data type sparse matrix.
Definition sparsemat.hpp:51
void MultTranspose(const Vector &x, Vector &y) const override
Multiply a vector with the transposed matrix. y = At * x.
Memory< int > & GetMemoryI()
void Mult(const Vector &x, Vector &y) const override
Matrix vector multiplication.
void OverrideSize(int height_, int width_)
Sets the height and width of the matrix.
void SortColumnIndices()
Sort the column indices corresponding to each row.
Memory< int > & GetMemoryJ()
Memory< real_t > & GetMemoryData()
const Array< int > & GetDofMap() const
Get an Array<int> that maps lexicographically ordered indices to the indices of the respective nodes/...
Definition fe_base.hpp:1273
Vector data type.
Definition vector.hpp:82
virtual const real_t * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:494
virtual real_t * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:510
void SyncAliasMemory(const Vector &v) const
Update the alias memory location of the vector to match v.
Definition vector.hpp:267
Vector & Set(const real_t a, const Vector &x)
(*this) = a * x
Definition vector.cpp:337
int Size() const
Returns the size of the vector.
Definition vector.hpp:226
virtual void UseDevice(bool use_dev) const
Enable execution of Vector operations using the mfem::Device.
Definition vector.hpp:144
void SetSize(int s)
Resize the vector to size s.
Definition vector.hpp:558
virtual real_t * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:502
int dim
Definition ex24.cpp:53
real_t b
Definition lissajous.cpp:42
real_t a
Definition lissajous.cpp:41
void mfem_error(const char *msg)
Definition error.cpp:154
void add(const Vector &v1, const Vector &v2, Vector &v)
Definition vector.cpp:391
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
Definition dtensor.hpp:131
bool DeviceCanUseCeed()
Function that determines if a CEED kernel should be used, based on the current mfem::Device configura...
Definition util.cpp:33
void forall_2D(int N, int X, int Y, lambda &&body)
Definition forall.hpp:762
float real_t
Definition config.hpp:43
ElementDofOrdering GetEVectorOrdering(const FiniteElementSpace &fes)
Return LEXICOGRAPHIC if mesh contains only one topology and the elements are tensor elements,...
Definition fespace.cpp:4596
ElementDofOrdering
Constants describing the possible orderings of the DOFs in one element.
Definition fespace.hpp:83
@ NATIVE
Native ordering as defined by the FiniteElement.
std::function< real_t(const Vector &)> f(real_t mass_coeff)
Definition lor_mms.hpp:30
void forall(int N, lambda &&body)
Definition forall.hpp:753
@ CEED_MASK
Bitwise-OR of all CEED backends.
Definition device.hpp:95
This structure is used as a human readable output format that deciphers the information contained in ...
Definition mesh.hpp:1980
bool IsBoundary() const
Return true if the face is a boundary face.
Definition mesh.hpp:2021
bool IsOfFaceType(FaceType type) const
Return true if the face is of the same type as type.
Definition mesh.hpp:2027
struct mfem::Mesh::FaceInformation::@15 element[2]