12 #include "../tmop.hpp"
14 #include "../../general/forall.hpp"
15 #include "../../linalg/kernels.hpp"
31 constexpr
int DIM = 3;
32 const int D1D = T_D1D ? T_D1D : d1d;
33 const int Q1D = T_Q1D ? T_Q1D : q1d;
42 MFEM_FORALL_3D(e, NE, Q1D, Q1D, Q1D,
44 constexpr
int DIM = 3;
45 const int D1D = T_D1D ? T_D1D : d1d;
46 const int Q1D = T_Q1D ? T_Q1D : q1d;
47 constexpr
int MQ1 = T_Q1D ? T_Q1D : T_MAX;
48 constexpr
int MD1 = T_D1D ? T_D1D : T_MAX;
50 MFEM_SHARED
double BG[2][MQ1*MD1];
51 MFEM_SHARED
double DDD[3][MD1*MD1*MD1];
52 MFEM_SHARED
double DDQ[9][MD1*MD1*MQ1];
53 MFEM_SHARED
double DQQ[9][MD1*MQ1*MQ1];
54 MFEM_SHARED
double QQQ[9][MQ1*MQ1*MQ1];
56 kernels::internal::LoadX<MD1>(e,D1D,X,DDD);
57 kernels::internal::LoadBG<MD1,MQ1>(D1D,Q1D,
b,g,BG);
59 kernels::internal::GradX<MD1,MQ1>(D1D,Q1D,BG,DDD,DDQ);
60 kernels::internal::GradY<MD1,MQ1>(D1D,Q1D,BG,DDQ,DQQ);
61 kernels::internal::GradZ<MD1,MQ1>(D1D,Q1D,BG,DQQ,QQQ);
63 MFEM_FOREACH_THREAD(qz,z,Q1D)
65 MFEM_FOREACH_THREAD(qy,y,Q1D)
67 MFEM_FOREACH_THREAD(qx,x,Q1D)
69 const double *Jtr = &J(0,0,qx,qy,qz,e);
73 kernels::CalcInverse<3>(Jtr, Jrt);
77 kernels::internal::PullGrad<MQ1>(Q1D, qx,qy,qz, QQQ, Jpr);
87 for (
int i = 0; i <
DIM; i++)
89 for (
int j = 0; j <
DIM; j++)
92 for (
int r = 0; r <
DIM; r++)
94 for (
int c = 0; c <
DIM; c++)
96 M(i,j) += H(r,c,i,j,qx,qy,qz,e) * J(r,c);
105 kernels::internal::PushGrad<MQ1>(Q1D, qx,qy,qz, A, QQQ);
110 kernels::internal::LoadBGt<MD1,MQ1>(D1D,Q1D,
b,g,BG);
111 kernels::internal::GradZt<MD1,MQ1>(D1D,Q1D,BG,QQQ,DQQ);
112 kernels::internal::GradYt<MD1,MQ1>(D1D,Q1D,BG,DQQ,DDQ);
113 kernels::internal::GradXt<MD1,MQ1>(D1D,Q1D,BG,DDQ,Y,e);
120 const int D1D =
PA.maps->ndof;
121 const int Q1D =
PA.maps->nqpt;
122 const int id = (D1D << 4 ) | Q1D;
128 MFEM_LAUNCH_TMOP_KERNEL(AddMultGradPA_Kernel_3D,
id,N,B,G,J,H,R,C);
MFEM_HOST_DEVICE void MultABt(const int Aheight, const int Awidth, const int Bheight, const TA *Adata, const TB *Bdata, TC *ABtdata)
Multiply a matrix of size Aheight x Awidth and data Adata with the transpose of a matrix of size Bhei...
struct mfem::TMOP_Integrator::@23 PA
const double * Read(bool on_dev=true) const
Shortcut for mfem::Read( GetMemory(), TotalSize(), on_dev).
MFEM_REGISTER_TMOP_KERNELS(void, DatcSize, const int NE, const int ncomp, const int sizeidx, const DenseMatrix &w_, const Array< double > &b_, const Vector &x_, DenseTensor &j_, const int d1d, const int q1d)
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
A basic generic Tensor class, appropriate for use on the GPU.
MFEM_HOST_DEVICE void Mult(const int height, const int width, const TA *data, const TX *x, TY *y)
Matrix vector multiplication: y = A x, where the matrix A is of size height x width with given data...
virtual double * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
void AddMultGradPA_3D(const Vector &, Vector &) const
Rank 3 tensor (array of matrices)
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims...dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.