30   constexpr int DIM = 2;
 
   31   constexpr int NBZ = 1;
 
   33   const int D1D = T_D1D ? T_D1D : d1d;
 
   34   const int Q1D = T_Q1D ? T_Q1D : q1d;
 
   44      constexpr int DIM = 2;
 
   45      const int D1D = T_D1D ? T_D1D : d1d;
 
   46      const int Q1D = T_Q1D ? T_Q1D : q1d;
 
   47      constexpr int NBZ = 1;
 
   48      constexpr int MQ1 = T_Q1D ? T_Q1D : T_MAX;
 
   49      constexpr int MD1 = T_D1D ? T_D1D : T_MAX;
 
   51      MFEM_SHARED 
real_t B[MQ1*MD1];
 
   53      MFEM_SHARED 
real_t XY[2][NBZ][MD1*MD1];
 
   54      MFEM_SHARED 
real_t DQ[2][NBZ][MD1*MQ1];
 
   55      MFEM_SHARED 
real_t QQ[2][NBZ][MQ1*MQ1];
 
   57      kernels::internal::LoadX<MD1,NBZ>(e,D1D,R,XY);
 
   58      kernels::internal::LoadB<MD1,MQ1>(D1D,Q1D,
b,B);
 
   60      kernels::internal::EvalX<MD1,MQ1,NBZ>(D1D,Q1D,B,XY,DQ);
 
   61      kernels::internal::EvalY<MD1,MQ1,NBZ>(D1D,Q1D,B,DQ,QQ);
 
   63      MFEM_FOREACH_THREAD(qy,y,Q1D)
 
   65         MFEM_FOREACH_THREAD(qx,x,Q1D)
 
   69            kernels::internal::PullEval<MQ1,NBZ>(Q1D,qx,qy,QQ,Xh);
 
   73            for (
int i = 0; i < 
DIM; i++)
 
   75               for (
int j = 0; j < 
DIM; j++)
 
   77                  H(i,j) = H0(i,j,qx,qy,e);
 
   84            kernels::internal::PushEval<MQ1,NBZ>(Q1D,qx,qy,p2,QQ);
 
   88      kernels::internal::LoadBt<MD1,MQ1>(D1D,Q1D,
b,B);
 
   89      kernels::internal::EvalXt<MD1,MQ1,NBZ>(D1D,Q1D,B,QQ,DQ);
 
   90      kernels::internal::EvalYt<MD1,MQ1,NBZ>(D1D,Q1D,B,DQ,Y,e);
 
 
MFEM_HOST_DEVICE void Mult(const int height, const int width, const TA *data, const TX *x, TY *y)
Matrix vector multiplication: y = A x, where the matrix A is of size height x width with given data,...
MFEM_REGISTER_TMOP_KERNELS(void, DatcSize, const int NE, const int ncomp, const int sizeidx, const real_t input_min_size, const DenseMatrix &w_, const Array< real_t > &b_, const Vector &x_, const Vector &nc_reduce, DenseTensor &j_, const int d1d, const int q1d)
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.