12 #include "../tmop.hpp" 14 #include "../linearform.hpp" 15 #include "../../general/forall.hpp" 16 #include "../../linalg/kernels.hpp" 30 constexpr
int DIM = 3;
32 const int D1D = T_D1D ? T_D1D : d1d;
33 const int Q1D = T_Q1D ? T_Q1D : q1d;
43 constexpr
int DIM = 3;
44 const int D1D = T_D1D ? T_D1D : d1d;
45 const int Q1D = T_Q1D ? T_Q1D : q1d;
46 constexpr
int MQ1 = T_Q1D ? T_Q1D : T_MAX;
47 constexpr
int MD1 = T_D1D ? T_D1D : T_MAX;
49 MFEM_SHARED
double B[MQ1*MD1];
51 MFEM_SHARED
double DDD[3][MD1*MD1*MD1];
52 MFEM_SHARED
double DDQ[3][MD1*MD1*MQ1];
53 MFEM_SHARED
double DQQ[3][MD1*MQ1*MQ1];
54 MFEM_SHARED
double QQQ[3][MQ1*MQ1*MQ1];
56 kernels::internal::LoadX<MD1>(e,D1D,R,DDD);
57 kernels::internal::LoadB<MD1,MQ1>(D1D,Q1D,
b,B);
59 kernels::internal::EvalX<MD1,MQ1>(D1D,Q1D,B,DDD,DDQ);
60 kernels::internal::EvalY<MD1,MQ1>(D1D,Q1D,B,DDQ,DQQ);
61 kernels::internal::EvalZ<MD1,MQ1>(D1D,Q1D,B,DQQ,QQQ);
63 MFEM_FOREACH_THREAD(qz,z,Q1D)
65 MFEM_FOREACH_THREAD(qy,y,Q1D)
67 MFEM_FOREACH_THREAD(qx,x,Q1D)
71 kernels::internal::PullEval<MQ1>(Q1D,qx,qy,qz,QQQ,Xh);
75 for (
int i = 0; i <
DIM; i++)
77 for (
int j = 0; j <
DIM; j++)
79 H(i,j) = H0(i,j,qx,qy,qz,e);
86 kernels::internal::PushEval<MQ1>(Q1D,qx,qy,qz,p2,QQQ);
91 kernels::internal::LoadBt<MD1,MQ1>(D1D,Q1D,
b,B);
92 kernels::internal::EvalXt<MD1,MQ1>(D1D,Q1D,B,QQQ,DQQ);
93 kernels::internal::EvalYt<MD1,MQ1>(D1D,Q1D,B,DQQ,DDQ);
94 kernels::internal::EvalZt<MD1,MQ1>(D1D,Q1D,B,DDQ,Y,e);
101 const int D1D =
PA.maps->ndof;
102 const int Q1D =
PA.maps->nqpt;
103 const int id = (D1D << 4 ) | Q1D;
107 MFEM_LAUNCH_TMOP_KERNEL(AddMultGradPA_Kernel_C0_3D,
id,N,B,
H0,R,C);
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
void forall_3D(int N, int X, int Y, int Z, lambda &&body)
struct mfem::TMOP_Integrator::@23 PA
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
MFEM_REGISTER_TMOP_KERNELS(void, DatcSize, const int NE, const int ncomp, const int sizeidx, const double input_min_size, const DenseMatrix &w_, const Array< double > &b_, const Vector &x_, const Vector &nc_reduce, DenseTensor &j_, const int d1d, const int q1d)
A basic generic Tensor class, appropriate for use on the GPU.
MFEM_HOST_DEVICE void Mult(const int height, const int width, const TA *data, const TX *x, TY *y)
Matrix vector multiplication: y = A x, where the matrix A is of size height x width with given data...
virtual double * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
void AddMultGradPA_C0_3D(const Vector &, Vector &) const