12 #include "../tmop.hpp" 14 #include "../../general/forall.hpp" 15 #include "../../linalg/kernels.hpp" 31 constexpr
int DIM = 2;
32 constexpr
int NBZ = 1;
34 const int D1D = T_D1D ? T_D1D : d1d;
35 const int Q1D = T_Q1D ? T_Q1D : q1d;
44 MFEM_FORALL_2D(e, NE, Q1D, Q1D, NBZ,
46 const int D1D = T_D1D ? T_D1D : d1d;
47 const int Q1D = T_Q1D ? T_Q1D : q1d;
48 constexpr
int NBZ = 1;
49 constexpr
int MQ1 = T_Q1D ? T_Q1D : T_MAX;
50 constexpr
int MD1 = T_D1D ? T_D1D : T_MAX;
52 MFEM_SHARED
double BG[2][MQ1*MD1];
53 MFEM_SHARED
double XY[2][NBZ][MD1*MD1];
54 MFEM_SHARED
double DQ[4][NBZ][MD1*MQ1];
55 MFEM_SHARED
double QQ[4][NBZ][MQ1*MQ1];
57 kernels::internal::LoadX<MD1,NBZ>(e,D1D,X,XY);
58 kernels::internal::LoadBG<MD1,MQ1>(D1D,Q1D,
b,g,BG);
60 kernels::internal::GradX<MD1,MQ1,NBZ>(D1D,Q1D,BG,XY,DQ);
61 kernels::internal::GradY<MD1,MQ1,NBZ>(D1D,Q1D,BG,DQ,QQ);
63 MFEM_FOREACH_THREAD(qy,y,Q1D)
65 MFEM_FOREACH_THREAD(qx,x,Q1D)
67 const double *Jtr = &J(0,0,qx,qy,e);
71 kernels::CalcInverse<2>(Jtr, Jrt);
75 kernels::internal::PullGrad<MQ1,NBZ>(Q1D,qx,qy,QQ,Jpr);
85 for (
int i = 0; i <
DIM; i++)
87 for (
int j = 0; j <
DIM; j++)
90 for (
int r = 0; r <
DIM; r++)
92 for (
int c = 0; c <
DIM; c++)
94 M(i,j) += H(r,c,i,j,qx,qy,e) * J(r,c);
104 kernels::internal::PushGrad<MQ1,NBZ>(Q1D,qx,qy, C, QQ);
108 kernels::internal::LoadBGt<MD1,MQ1>(D1D,Q1D,
b,g,BG);
109 kernels::internal::GradYt<MD1,MQ1,NBZ>(D1D,Q1D,BG,QQ,DQ);
110 kernels::internal::GradXt<MD1,MQ1,NBZ>(D1D,Q1D,BG,DQ,Y,e);
117 const int D1D =
PA.maps->ndof;
118 const int Q1D =
PA.maps->nqpt;
119 const int id = (D1D << 4 ) | Q1D;
125 MFEM_LAUNCH_TMOP_KERNEL(AddMultGradPA_Kernel_2D,
id,N,B,G,J,
H,R,C);
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
MFEM_HOST_DEVICE void MultABt(const int Aheight, const int Awidth, const int Bheight, const TA *Adata, const TB *Bdata, TC *ABtdata)
Multiply a matrix of size Aheight x Awidth and data Adata with the transpose of a matrix of size Bhei...
void AddMultGradPA_2D(const Vector &, Vector &) const
struct mfem::TMOP_Integrator::@23 PA
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
const double * Read(bool on_dev=true) const
Shortcut for mfem::Read( GetMemory(), TotalSize(), on_dev).
MFEM_REGISTER_TMOP_KERNELS(void, DatcSize, const int NE, const int ncomp, const int sizeidx, const DenseMatrix &w_, const Array< double > &b_, const Vector &x_, DenseTensor &j_, const int d1d, const int q1d)
A basic generic Tensor class, appropriate for use on the GPU.
MFEM_HOST_DEVICE void Mult(const int height, const int width, const TA *data, const TX *x, TY *y)
Matrix vector multiplication: y = A x, where the matrix A is of size height x width with given data...
virtual double * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
Rank 3 tensor (array of matrices)