MFEM  v4.5.2
Finite element discretization library
lor_batched.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2023, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #include "lor_batched.hpp"
13 #include "../../fem/quadinterpolator.hpp"
14 #include "../../general/forall.hpp"
15 #include <climits>
16 #include "../pbilinearform.hpp"
17 
18 // Specializations
19 #include "lor_h1.hpp"
20 #include "lor_nd.hpp"
21 #include "lor_rt.hpp"
22 
23 namespace mfem
24 {
25 
26 template <typename T1, typename T2>
28 {
29  Array<BilinearFormIntegrator*> *integs = a.GetDBFI();
30  if (integs == NULL) { return false; }
31  if (integs->Size() == 1)
32  {
33  BilinearFormIntegrator *i0 = (*integs)[0];
34  if (dynamic_cast<T1*>(i0) || dynamic_cast<T2*>(i0)) { return true; }
35  }
36  else if (integs->Size() == 2)
37  {
38  BilinearFormIntegrator *i0 = (*integs)[0];
39  BilinearFormIntegrator *i1 = (*integs)[1];
40  if ((dynamic_cast<T1*>(i0) && dynamic_cast<T2*>(i1)) ||
41  (dynamic_cast<T2*>(i0) && dynamic_cast<T1*>(i1)))
42  {
43  return true;
44  }
45  }
46  return false;
47 }
48 
50 {
51  const FiniteElementCollection *fec = a.FESpace()->FEColl();
52  // TODO: check for maximum supported orders
53 
54  // Batched LOR requires all tensor elements
55  if (!UsesTensorBasis(*a.FESpace())) { return false; }
56 
57  if (dynamic_cast<const H1_FECollection*>(fec))
58  {
59  if (HasIntegrators<DiffusionIntegrator, MassIntegrator>(a)) { return true; }
60  }
61  else if (dynamic_cast<const ND_FECollection*>(fec))
62  {
63  if (HasIntegrators<CurlCurlIntegrator, VectorFEMassIntegrator>(a)) { return true; }
64  }
65  else if (dynamic_cast<const RT_FECollection*>(fec))
66  {
67  if (HasIntegrators<DivDivIntegrator, VectorFEMassIntegrator>(a)) { return true; }
68  }
69  return false;
70 }
71 
73  Vector &X_vert)
74 {
75  Mesh &mesh_ho = *fes_ho.GetMesh();
76  mesh_ho.EnsureNodes();
77 
78  // Get nodal points at the LOR vertices
79  const int dim = mesh_ho.Dimension();
80  const int nel_ho = mesh_ho.GetNE();
81  const int order = fes_ho.GetMaxElementOrder();
82  const int nd1d = order + 1;
83  const int ndof_per_el = static_cast<int>(pow(nd1d, dim));
84 
85  const GridFunction *nodal_gf = mesh_ho.GetNodes();
86  const FiniteElementSpace *nodal_fes = nodal_gf->FESpace();
87  const Operator *nodal_restriction =
89 
90  // Map from nodal L-vector to E-vector
91  Vector nodal_evec(nodal_restriction->Height());
92  nodal_restriction->Mult(*nodal_gf, nodal_evec);
93 
95 
96  // Map from nodal E-vector to Q-vector at the LOR vertex points
97  X_vert.SetSize(dim*ndof_per_el*nel_ho);
98  const QuadratureInterpolator *quad_interp =
99  nodal_fes->GetQuadratureInterpolator(ir);
101  quad_interp->Values(nodal_evec, X_vert);
102 }
103 
104 // The following two functions (GetMinElt and GetAndIncrementNnzIndex) are
105 // copied from restriction.cpp. Should they be factored out?
106 
107 // Return the minimal value found in both my_elts and nbr_elts
108 static MFEM_HOST_DEVICE int GetMinElt(const int *my_elts, const int n_my_elts,
109  const int *nbr_elts, const int n_nbr_elts)
110 {
111  int min_el = INT_MAX;
112  for (int i = 0; i < n_my_elts; i++)
113  {
114  const int e_i = my_elts[i];
115  if (e_i >= min_el) { continue; }
116  for (int j = 0; j < n_nbr_elts; j++)
117  {
118  if (e_i==nbr_elts[j])
119  {
120  min_el = e_i; // we already know e_i < min_el
121  break;
122  }
123  }
124  }
125  return min_el;
126 }
127 
128 // Returns the index where a non-zero entry should be added and increment the
129 // number of non-zeros for the row i_L.
130 static MFEM_HOST_DEVICE int GetAndIncrementNnzIndex(const int i_L, int* I)
131 {
132  int ind = AtomicAdd(I[i_L],1);
133  return ind;
134 }
135 
137 {
138  static constexpr int Max = 16;
139 
140  const int nvdof = fes_ho.GetVSize();
141 
142  const int ndof_per_el = fes_ho.GetFE(0)->GetDof();
143  const int nel_ho = fes_ho.GetNE();
144  const int nnz_per_row = sparse_mapping.Size()/ndof_per_el;
145 
147  const Operator *op = fes_ho.GetElementRestriction(ordering);
148  const ElementRestriction *el_restr =
149  dynamic_cast<const ElementRestriction*>(op);
150  MFEM_VERIFY(el_restr != nullptr, "Bad element restriction");
151 
152  const Array<int> &el_dof_lex_ = el_restr->GatherMap();
153  const Array<int> &dof_glob2loc_ = el_restr->Indices();
154  const Array<int> &dof_glob2loc_offsets_ = el_restr->Offsets();
155 
156  const auto el_dof_lex = Reshape(el_dof_lex_.Read(), ndof_per_el, nel_ho);
157  const auto dof_glob2loc = dof_glob2loc_.Read();
158  const auto K = dof_glob2loc_offsets_.Read();
159  const auto map = Reshape(sparse_mapping.Read(), nnz_per_row, ndof_per_el);
160 
161  auto I = A.WriteI();
162 
163  MFEM_FORALL(ii, nvdof + 1, I[ii] = 0;);
164  MFEM_FORALL(i, ndof_per_el*nel_ho,
165  {
166  const int ii_el = i%ndof_per_el;
167  const int iel_ho = i/ndof_per_el;
168  const int sii = el_dof_lex(ii_el, iel_ho);
169  const int ii = (sii >= 0) ? sii : -1 -sii;
170  // Get number and list of elements containing this DOF
171  int i_elts[Max];
172  const int i_offset = K[ii];
173  const int i_next_offset = K[ii+1];
174  const int i_ne = i_next_offset - i_offset;
175  for (int e_i = 0; e_i < i_ne; ++e_i)
176  {
177  const int si_E = dof_glob2loc[i_offset+e_i]; // signed
178  const int i_E = (si_E >= 0) ? si_E : -1 - si_E;
179  i_elts[e_i] = i_E/ndof_per_el;
180  }
181  for (int j = 0; j < nnz_per_row; ++j)
182  {
183  int jj_el = map(j, ii_el);
184  if (jj_el < 0) { continue; }
185  // LDOF index of column
186  const int sjj = el_dof_lex(jj_el, iel_ho); // signed
187  const int jj = (sjj >= 0) ? sjj : -1 - sjj;
188  const int j_offset = K[jj];
189  const int j_next_offset = K[jj+1];
190  const int j_ne = j_next_offset - j_offset;
191  if (i_ne == 1 || j_ne == 1) // no assembly required
192  {
193  AtomicAdd(I[ii], 1);
194  }
195  else // assembly required
196  {
197  int j_elts[Max];
198  for (int e_j = 0; e_j < j_ne; ++e_j)
199  {
200  const int sj_E = dof_glob2loc[j_offset+e_j]; // signed
201  const int j_E = (sj_E >= 0) ? sj_E : -1 - sj_E;
202  const int elt = j_E/ndof_per_el;
203  j_elts[e_j] = elt;
204  }
205  const int min_e = GetMinElt(i_elts, i_ne, j_elts, j_ne);
206  if (iel_ho == min_e) // add the nnz only once
207  {
208  AtomicAdd(I[ii], 1);
209  }
210  }
211  }
212  });
213  // TODO: on device, this is a scan operation
214  // We need to sum the entries of I, we do it on CPU as it is very sequential.
215  auto h_I = A.HostReadWriteI();
216  int sum = 0;
217  for (int i = 0; i < nvdof; i++)
218  {
219  const int nnz = h_I[i];
220  h_I[i] = sum;
221  sum+=nnz;
222  }
223  h_I[nvdof] = sum;
224 
225  // Return the number of nnz
226  return h_I[nvdof];
227 }
228 
230 {
231  const int nvdof = fes_ho.GetVSize();
232  const int ndof_per_el = fes_ho.GetFE(0)->GetDof();
233  const int nel_ho = fes_ho.GetNE();
234  const int nnz_per_row = sparse_mapping.Size()/ndof_per_el;
235 
237  const Operator *op = fes_ho.GetElementRestriction(ordering);
238  const ElementRestriction *el_restr =
239  dynamic_cast<const ElementRestriction*>(op);
240  MFEM_VERIFY(el_restr != nullptr, "Bad element restriction");
241 
242  const Array<int> &el_dof_lex_ = el_restr->GatherMap();
243  const Array<int> &dof_glob2loc_ = el_restr->Indices();
244  const Array<int> &dof_glob2loc_offsets_ = el_restr->Offsets();
245 
246  const auto el_dof_lex = Reshape(el_dof_lex_.Read(), ndof_per_el, nel_ho);
247  const auto dof_glob2loc = dof_glob2loc_.Read();
248  const auto K = dof_glob2loc_offsets_.Read();
249 
250  const auto V = Reshape(sparse_ij.Read(), nnz_per_row, ndof_per_el, nel_ho);
251  const auto map = Reshape(sparse_mapping.Read(), nnz_per_row, ndof_per_el);
252 
253  Array<int> I_(nvdof + 1);
254  const auto I = I_.Write();
255  const auto J = A.WriteJ();
256  auto AV = A.WriteData();
257 
258  // Copy A.I into I, use it as a temporary buffer
259  {
260  const auto I2 = A.ReadI();
261  MFEM_FORALL(i, nvdof + 1, I[i] = I2[i];);
262  }
263 
264  static constexpr int Max = 16;
265 
266  MFEM_FORALL(i, ndof_per_el*nel_ho,
267  {
268  const int ii_el = i%ndof_per_el;
269  const int iel_ho = i/ndof_per_el;
270  // LDOF index of current row
271  const int sii = el_dof_lex(ii_el, iel_ho); // signed
272  const int ii = (sii >= 0) ? sii : -1 - sii;
273  // Get number and list of elements containing this DOF
274  int i_elts[Max];
275  int i_B[Max];
276  const int i_offset = K[ii];
277  const int i_next_offset = K[ii+1];
278  const int i_ne = i_next_offset - i_offset;
279  for (int e_i = 0; e_i < i_ne; ++e_i)
280  {
281  const int si_E = dof_glob2loc[i_offset+e_i]; // signed
282  const bool plus = si_E >= 0;
283  const int i_E = plus ? si_E : -1 - si_E;
284  i_elts[e_i] = i_E/ndof_per_el;
285  const int i_Bi = i_E % ndof_per_el;
286  i_B[e_i] = plus ? i_Bi : -1 - i_Bi; // encode with sign
287  }
288  for (int j=0; j<nnz_per_row; ++j)
289  {
290  int jj_el = map(j, ii_el);
291  if (jj_el < 0) { continue; }
292  // LDOF index of column
293  const int sjj = el_dof_lex(jj_el, iel_ho); // signed
294  const int jj = (sjj >= 0) ? sjj : -1 - sjj;
295  const int sgn = ((sjj >=0 && sii >= 0) || (sjj < 0 && sii <0)) ? 1 : -1;
296  const int j_offset = K[jj];
297  const int j_next_offset = K[jj+1];
298  const int j_ne = j_next_offset - j_offset;
299  if (i_ne == 1 || j_ne == 1) // no assembly required
300  {
301  const int nnz = GetAndIncrementNnzIndex(ii, I);
302  J[nnz] = jj;
303  AV[nnz] = sgn*V(j, ii_el, iel_ho);
304  }
305  else // assembly required
306  {
307  int j_elts[Max];
308  int j_B[Max];
309  for (int e_j = 0; e_j < j_ne; ++e_j)
310  {
311  const int sj_E = dof_glob2loc[j_offset+e_j]; // signed
312  const bool plus = sj_E >= 0;
313  const int j_E = plus ? sj_E : -1 - sj_E;
314  j_elts[e_j] = j_E/ndof_per_el;
315  const int j_Bj = j_E % ndof_per_el;
316  j_B[e_j] = plus ? j_Bj : -1 - j_Bj; // encode with sign
317  }
318  const int min_e = GetMinElt(i_elts, i_ne, j_elts, j_ne);
319  if (iel_ho == min_e) // add the nnz only once
320  {
321  double val = 0.0;
322  for (int k = 0; k < i_ne; k++)
323  {
324  const int iel_ho_2 = i_elts[k];
325  const int sii_el_2 = i_B[k]; // signed
326  const int ii_el_2 = (sii_el_2 >= 0) ? sii_el_2 : -1 -sii_el_2;
327  for (int l = 0; l < j_ne; l++)
328  {
329  const int jel_ho_2 = j_elts[l];
330  if (iel_ho_2 == jel_ho_2)
331  {
332  const int sjj_el_2 = j_B[l]; // signed
333  const int jj_el_2 = (sjj_el_2 >= 0) ? sjj_el_2 : -1 -sjj_el_2;
334  const int sgn_2 = ((sjj_el_2 >=0 && sii_el_2 >= 0)
335  || (sjj_el_2 < 0 && sii_el_2 <0)) ? 1 : -1;
336  int j2 = -1;
337  // find nonzero in matrix of other element
338  for (int m = 0; m < nnz_per_row; ++m)
339  {
340  if (map(m, ii_el_2) == jj_el_2)
341  {
342  j2 = m;
343  break;
344  }
345  }
346  MFEM_ASSERT_KERNEL(j >= 0, "Can't find nonzero");
347  val += sgn_2*V(j2, ii_el_2, iel_ho_2);
348  }
349  }
350  }
351  const int nnz = GetAndIncrementNnzIndex(ii, I);
352  J[nnz] = jj;
353  AV[nnz] = val;
354  }
355  }
356  }
357  });
358 }
359 
361 {
362  const int nvdof = fes_ho.GetVSize();
363 
364  // If A contains an existing SparseMatrix, reuse it (and try to reuse its
365  // I, J, A arrays if they are big enough)
366  SparseMatrix *A_mat = A.Is<SparseMatrix>();
367  if (!A_mat)
368  {
369  A_mat = new SparseMatrix;
370  A.Reset(A_mat);
371  }
372 
373  A_mat->OverrideSize(nvdof, nvdof);
374 
375  A_mat->GetMemoryI().New(nvdof+1, Device::GetDeviceMemoryType());
376  int nnz = FillI(*A_mat);
377 
378  A_mat->GetMemoryJ().New(nnz, Device::GetDeviceMemoryType());
380  FillJAndData(*A_mat);
381 }
382 
383 template <typename LOR_KERNEL>
385 {
386  LOR_KERNEL kernel(a, fes_ho, X_vert, sparse_ij, sparse_mapping);
387 
388  const int dim = fes_ho.GetMesh()->Dimension();
389  const int order = fes_ho.GetMaxElementOrder();
390 
391  if (dim == 2)
392  {
393  switch (order)
394  {
395  case 1: kernel.template Assemble2D<1>(); break;
396  case 2: kernel.template Assemble2D<2>(); break;
397  case 3: kernel.template Assemble2D<3>(); break;
398  case 4: kernel.template Assemble2D<4>(); break;
399  case 5: kernel.template Assemble2D<5>(); break;
400  case 6: kernel.template Assemble2D<6>(); break;
401  case 7: kernel.template Assemble2D<7>(); break;
402  case 8: kernel.template Assemble2D<8>(); break;
403  default: MFEM_ABORT("No kernel order " << order << "!");
404  }
405  }
406  else if (dim == 3)
407  {
408  switch (order)
409  {
410  case 1: kernel.template Assemble3D<1>(); break;
411  case 2: kernel.template Assemble3D<2>(); break;
412  case 3: kernel.template Assemble3D<3>(); break;
413  case 4: kernel.template Assemble3D<4>(); break;
414  case 5: kernel.template Assemble3D<5>(); break;
415  case 6: kernel.template Assemble3D<6>(); break;
416  case 7: kernel.template Assemble3D<7>(); break;
417  case 8: kernel.template Assemble3D<8>(); break;
418  default: MFEM_ABORT("No kernel order " << order << "!");
419  }
420  }
421 }
422 
424 {
425  // Assemble the matrix, depending on what the form is.
426  // This fills in the arrays sparse_ij and sparse_mapping.
427  const FiniteElementCollection *fec = fes_ho.FEColl();
428  if (dynamic_cast<const H1_FECollection*>(fec))
429  {
430  if (HasIntegrators<DiffusionIntegrator, MassIntegrator>(a))
431  {
432  AssemblyKernel<BatchedLOR_H1>(a);
433  }
434  }
435  else if (dynamic_cast<const ND_FECollection*>(fec))
436  {
437  if (HasIntegrators<CurlCurlIntegrator, VectorFEMassIntegrator>(a))
438  {
439  AssemblyKernel<BatchedLOR_ND>(a);
440  }
441  }
442  else if (dynamic_cast<const RT_FECollection*>(fec))
443  {
444  if (HasIntegrators<DivDivIntegrator, VectorFEMassIntegrator>(a))
445  {
446  AssemblyKernel<BatchedLOR_RT>(a);
447  }
448  }
449 
450  return SparseIJToCSR(A);
451 }
452 
453 #ifdef MFEM_USE_MPI
455  BilinearForm &a, const Array<int> &ess_dofs, OperatorHandle &A)
456 {
457  // Assemble the system matrix local to this partition
458  OperatorHandle A_local;
459  AssembleWithoutBC(a, A_local);
460 
461  ParBilinearForm *pa =
462  dynamic_cast<ParBilinearForm*>(&a);
463 
464  pa->ParallelRAP(*A_local.As<SparseMatrix>(), A, true);
465 
466  A.As<HypreParMatrix>()->EliminateBC(ess_dofs,
467  Operator::DiagonalPolicy::DIAG_ONE);
468 }
469 #endif
470 
472  BilinearForm &a, const Array<int> ess_dofs, OperatorHandle &A)
473 {
474 #ifdef MFEM_USE_MPI
475  if (dynamic_cast<ParFiniteElementSpace*>(&fes_ho))
476  {
477  return ParAssemble(a, ess_dofs, A);
478  }
479 #endif
480 
481  AssembleWithoutBC(a, A);
482  SparseMatrix *A_mat = A.As<SparseMatrix>();
483 
484  A_mat->EliminateBC(ess_dofs,
485  Operator::DiagonalPolicy::DIAG_KEEP);
486 }
487 
489  : fes_ho(fes_ho_)
490 {
492 }
493 
495 {
497  const Geometry::Type geom = fes.GetMesh()->GetElementGeometry(0);
498  const int nd1d = fes.GetMaxElementOrder() + 1;
499  return irs.Get(geom, 2*nd1d - 3);
500 }
501 
502 } // namespace mfem
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
Definition: array.hpp:307
void ParAssemble(BilinearForm &a, const Array< int > &ess_dofs, OperatorHandle &A)
Assemble the system in parallel and place the result in A.
FiniteElementSpace & fes_ho
The high-order space.
Definition: lor_batched.hpp:36
void EliminateBC(const HypreParMatrix &A, const HypreParMatrix &Ae, const Array< int > &ess_dof_list, const Vector &X, Vector &B)
Eliminate essential BC specified by ess_dof_list from the solution X to the r.h.s. B.
Definition: hypre.cpp:3264
Class for an integration rule - an Array of IntegrationPoint.
Definition: intrules.hpp:90
Class for grid function - Vector with associated FE space.
Definition: gridfunc.hpp:30
const IntegrationRule & Get(int GeomType, int Order)
Returns an integration rule for given GeomType and Order.
Definition: intrules.cpp:923
int Dimension() const
Definition: mesh.hpp:1047
void SetSize(int s)
Resize the vector to size s.
Definition: vector.hpp:512
Pointer to an Operator of a specified type.
Definition: handle.hpp:33
OpType * Is() const
Return the Operator pointer dynamically cast to a specified OpType.
Definition: handle.hpp:108
void ParallelRAP(SparseMatrix &loc_A, OperatorHandle &A, bool steal_loc_A=false)
Compute parallel RAP operator and store it in A as a HypreParMatrix.
MFEM_HOST_DEVICE T AtomicAdd(T &add, const T val)
Definition: backends.hpp:84
Container class for integration rules.
Definition: intrules.hpp:311
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:448
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
bool UsesTensorBasis(const FiniteElementSpace &fes)
Return true if the mesh contains only one topology and the elements are tensor elements.
Definition: fespace.hpp:957
Memory< double > & GetMemoryData()
Definition: sparsemat.hpp:269
static bool FormIsSupported(BilinearForm &a)
Returns true if the form a supports batched assembly, false otherwise.
Definition: lor_batched.cpp:49
Memory< int > & GetMemoryI()
Definition: sparsemat.hpp:237
virtual const FiniteElement * GetFE(int i) const
Returns pointer to the FiniteElement in the FiniteElementCollection associated with i&#39;th element in t...
Definition: fespace.cpp:2783
static void FormLORVertexCoordinates(FiniteElementSpace &fes_ho, Vector &X_vert)
Compute the vertices of the LOR mesh and place the result in X_vert.
Definition: lor_batched.cpp:72
int FillI(SparseMatrix &A) const
Fill the I array of the sparse matrix A.
const ElementRestrictionOperator * GetElementRestriction(ElementDofOrdering e_ordering) const
Return an Operator that converts L-vectors to E-vectors.
Definition: fespace.cpp:1259
Memory< int > & GetMemoryJ()
Definition: sparsemat.hpp:253
bool HasIntegrators(BilinearForm &a)
Definition: lor_batched.cpp:27
const Array< int > & GatherMap() const
Definition: restriction.hpp:62
int GetMaxElementOrder() const
Return the maximum polynomial order.
Definition: fespace.hpp:459
Data type sparse matrix.
Definition: sparsemat.hpp:50
const FiniteElementCollection * FEColl() const
Definition: fespace.hpp:601
A class that performs interpolation from an E-vector to quadrature point values and/or derivatives (Q...
static MemoryType GetDeviceMemoryType()
Get the current Device MemoryType. This is the MemoryType used by most MFEM classes when allocating m...
Definition: device.hpp:273
Vector sparse_ij
The elementwise LOR matrices in a sparse "ij" format.
Definition: lor_batched.hpp:45
const Array< int > & Offsets() const
Definition: restriction.hpp:64
Vector X_vert
LOR vertex coordinates.
Definition: lor_batched.hpp:38
Array< int > sparse_mapping
The sparsity pattern of the element matrices.
Definition: lor_batched.hpp:54
int * WriteI(bool on_dev=true)
Definition: sparsemat.hpp:241
FiniteElementSpace * FESpace()
Definition: gridfunc.hpp:683
int GetNE() const
Returns number of elements in the mesh.
Definition: fespace.hpp:614
Geometry::Type GetElementGeometry(int i) const
Definition: mesh.hpp:1094
Mesh * GetMesh() const
Returns the mesh.
Definition: fespace.hpp:441
Abstract base class BilinearFormIntegrator.
Definition: bilininteg.hpp:35
const int * ReadI(bool on_dev=true) const
Definition: sparsemat.hpp:239
Operator that converts FiniteElementSpace L-vectors to E-vectors.
Definition: restriction.hpp:37
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition: fespace.hpp:96
void Values(const Vector &e_vec, Vector &q_val) const
Interpolate the values of the E-vector e_vec at quadrature points.
void FillJAndData(SparseMatrix &A) const
Fill the J and data arrays of the sparse matrix A.
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
Definition: fe_coll.hpp:26
void Assemble(BilinearForm &a, const Array< int > ess_dofs, OperatorHandle &A)
Assemble the given form as a matrix and place the result in A.
int GetDof() const
Returns the number of degrees of freedom in the finite element.
Definition: fe_base.hpp:322
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition: operator.hpp:66
int * WriteJ(bool on_dev=true)
Definition: sparsemat.hpp:257
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:936
double a
Definition: lissajous.cpp:41
A "square matrix" operator for the associated FE space and BLFIntegrators The sum of all the BLFInteg...
const QuadratureInterpolator * GetQuadratureInterpolator(const IntegrationRule &ir) const
Return a QuadratureInterpolator that interpolates E-vectors to quadrature point values and/or derivat...
Definition: fespace.cpp:1327
OpType * As() const
Return the Operator pointer statically cast to a specified OpType. Similar to the method Get()...
Definition: handle.hpp:104
void New(int size)
Allocate host memory for size entries with the current host memory type returned by MemoryManager::Ge...
ElementDofOrdering
Constants describing the possible orderings of the DOFs in one element.
Definition: fespace.hpp:74
int dim
Definition: ex24.cpp:53
void OverrideSize(int height_, int width_)
Sets the height and width of the matrix.
Definition: sparsemat.cpp:297
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
Definition: fespace.hpp:587
Lexicographic ordering for tensor-product FiniteElements.
Class for parallel bilinear form.
int Size() const
Return the logical size of the array.
Definition: array.hpp:141
void AssembleWithoutBC(BilinearForm &a, OperatorHandle &A)
Assemble the system without eliminating essential DOFs.
Vector data type.
Definition: vector.hpp:60
BatchedLORAssembly(FiniteElementSpace &fes_ho_)
Construct the batched assembly object corresponding to fes_ho_.
void GetNodes(Vector &node_coord) const
Definition: mesh.cpp:7949
int * HostReadWriteI()
Definition: sparsemat.hpp:249
void SetOutputLayout(QVectorLayout layout) const
Set the desired output Q-vector layout. The default value is QVectorLayout::byNODES.
const Array< int > & Indices() const
Definition: restriction.hpp:63
Abstract operator.
Definition: operator.hpp:24
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:343
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
Definition: dtensor.hpp:131
IntegrationRule GetCollocatedIntRule(FiniteElementSpace &fes)
void EliminateBC(const Array< int > &ess_dofs, DiagonalPolicy diag_policy)
Eliminate essential (Dirichlet) boundary conditions.
Definition: sparsemat.cpp:2315
void SparseIJToCSR(OperatorHandle &A) const
After assembling the "sparse IJ" format, convert it to CSR.
void EnsureNodes()
Make sure that the mesh has valid nodes, i.e. its geometry is described by a vector finite element gr...
Definition: mesh.cpp:5304
void Reset(OpType *A, bool own_A=true)
Reset the OperatorHandle to the given OpType pointer, A.
Definition: handle.hpp:145
VDIM x NQPT x NE (values) / VDIM x DIM x NQPT x NE (grads)
double * WriteData(bool on_dev=true)
Definition: sparsemat.hpp:273
void AssemblyKernel(BilinearForm &a)
Fill in sparse_ij and sparse_mapping using one of the specialized LOR assembly kernel classes...