12 #include "../tmop.hpp"
14 #include "../../general/forall.hpp"
15 #include "../../linalg/kernels.hpp"
28 constexpr
int DIM = 2;
30 const int D1D = T_D1D ? T_D1D : d1d;
31 const int Q1D = T_Q1D ? T_Q1D : q1d;
38 MFEM_FORALL_2D(e, NE, Q1D, Q1D, 1,
40 constexpr
int DIM = 2;
41 const int D1D = T_D1D ? T_D1D : d1d;
42 const int Q1D = T_Q1D ? T_Q1D : q1d;
43 constexpr
int MD1 = T_D1D ? T_D1D :
MAX_D1D;
44 constexpr
int MQ1 = T_Q1D ? T_Q1D :
MAX_Q1D;
46 MFEM_SHARED
double qd[MQ1*MD1];
49 for (
int v = 0; v <
DIM; v++)
51 MFEM_FOREACH_THREAD(qx,x,Q1D)
53 MFEM_FOREACH_THREAD(dy,y,D1D)
57 for (
int qy = 0; qy < Q1D; ++qy)
59 const double bb = B(qy,dy) * B(qy,dy);
60 QD(qx,dy) += bb * H0(v,v,qx,qy,e);
65 MFEM_FOREACH_THREAD(dy,y,D1D)
67 MFEM_FOREACH_THREAD(dx,x,D1D)
71 for (
int qx = 0; qx < Q1D; ++qx)
73 const double bb = B(qx,dx) * B(qx,dx);
87 const int D1D =
PA.maps->ndof;
88 const int Q1D =
PA.maps->nqpt;
89 const int id = (D1D << 4 ) | Q1D;
93 MFEM_LAUNCH_TMOP_KERNEL(AssembleDiagonalPA_Kernel_C0_2D,
id,N,B,H0,D);
struct mfem::TMOP_Integrator::@23 PA
void AssembleDiagonalPA_C0_2D(Vector &) const
MFEM_REGISTER_TMOP_KERNELS(void, DatcSize, const int NE, const int ncomp, const int sizeidx, const DenseMatrix &w_, const Array< double > &b_, const Vector &x_, DenseTensor &j_, const int d1d, const int q1d)
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
A basic generic Tensor class, appropriate for use on the GPU.
virtual double * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims...dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.