12 #include "../tmop.hpp" 14 #include "../linearform.hpp" 15 #include "../../general/forall.hpp" 16 #include "../../linalg/kernels.hpp" 22 const double lim_normal,
37 constexpr
int DIM = 2;
38 constexpr
int NBZ = 1;
39 const int D1D = T_D1D ? T_D1D : d1d;
40 const int Q1D = T_Q1D ? T_Q1D : q1d;
42 const bool const_c0 = c0_.
Size() == 1;
43 const auto C0 = const_c0 ?
46 const auto LD =
Reshape(lim_dist.
Read(), D1D, D1D, NE);
56 MFEM_FORALL_2D(e, NE, Q1D, Q1D, NBZ,
58 const int D1D = T_D1D ? T_D1D : d1d;
59 const int Q1D = T_Q1D ? T_Q1D : q1d;
60 constexpr
int NBZ = 1;
61 constexpr
int MQ1 = T_Q1D ? T_Q1D : T_MAX;
62 constexpr
int MD1 = T_D1D ? T_D1D : T_MAX;
64 MFEM_SHARED
double B[MQ1*MD1];
65 MFEM_SHARED
double BLD[MQ1*MD1];
67 MFEM_SHARED
double XY[NBZ][MD1*MD1];
68 MFEM_SHARED
double DQ[NBZ][MD1*MQ1];
69 MFEM_SHARED
double QQ[NBZ][MQ1*MQ1];
71 MFEM_SHARED
double XY0[2][NBZ][MD1*MD1];
72 MFEM_SHARED
double DQ0[2][NBZ][MD1*MQ1];
73 MFEM_SHARED
double QQ0[2][NBZ][MQ1*MQ1];
75 MFEM_SHARED
double XY1[2][NBZ][MD1*MD1];
76 MFEM_SHARED
double DQ1[2][NBZ][MD1*MQ1];
77 MFEM_SHARED
double QQ1[2][NBZ][MQ1*MQ1];
79 kernels::internal::LoadX<MD1,NBZ>(e,D1D,LD,XY);
80 kernels::internal::LoadX<MD1,NBZ>(e,D1D,X0,XY0);
81 kernels::internal::LoadX<MD1,NBZ>(e,D1D,X1,XY1);
83 kernels::internal::LoadB<MD1,MQ1>(D1D,Q1D,
b,B);
84 kernels::internal::LoadB<MD1,MQ1>(D1D,Q1D,bld,BLD);
86 kernels::internal::EvalX<MD1,MQ1,NBZ>(D1D,Q1D,BLD,XY,DQ);
87 kernels::internal::EvalY<MD1,MQ1,NBZ>(D1D,Q1D,BLD,DQ,QQ);
89 kernels::internal::EvalX<MD1,MQ1,NBZ>(D1D,Q1D,B,XY0,DQ0);
90 kernels::internal::EvalY<MD1,MQ1,NBZ>(D1D,Q1D,B,DQ0,QQ0);
92 kernels::internal::EvalX<MD1,MQ1,NBZ>(D1D,Q1D,B,XY1,DQ1);
93 kernels::internal::EvalY<MD1,MQ1,NBZ>(D1D,Q1D,B,DQ1,QQ1);
95 MFEM_FOREACH_THREAD(qy,y,Q1D)
97 MFEM_FOREACH_THREAD(qx,x,Q1D)
99 const double *Jtr = &J(0,0,qx,qy,e);
100 const double detJtr = kernels::Det<2>(Jtr);
101 const double weight = W(qx,qy) * detJtr;
102 const double coeff0 = const_c0 ? C0(0,0,0) : C0(qx,qy,e);
103 const double weight_m = weight * lim_normal * coeff0;
105 double D, p0[2], p1[2];
106 kernels::internal::PullEval<MQ1,NBZ>(Q1D,qx,qy,QQ,D);
107 kernels::internal::PullEval<MQ1,NBZ>(Q1D,qx,qy,QQ0,p0);
108 kernels::internal::PullEval<MQ1,NBZ>(Q1D,qx,qy,QQ1,p1);
110 const double dist = D;
118 const double c = 1.0 / (dist * dist);
119 kernels::Diag<2>(c, grad_grad);
124 kernels::Subtract<2>(1.0, p1, p0, tmp);
125 double dsq = kernels::DistanceSquared<2>(p1,p0);
126 double dist_squared = dist*dist;
127 double dist_squared_squared = dist_squared*dist_squared;
128 double f = exp(10.0*((dsq / dist_squared)-1.0));
129 grad_grad[0] = ((400.0*tmp[0]*tmp[0]*
f)/dist_squared_squared)+
130 (20.0*
f/dist_squared);
131 grad_grad[1] = (400.0*tmp[0]*tmp[1]*
f)/dist_squared_squared;
132 grad_grad[2] = grad_grad[1];
133 grad_grad[3] = ((400.0*tmp[1]*tmp[1]*
f)/dist_squared_squared)+
134 (20.0*
f/dist_squared);
138 for (
int i = 0; i <
DIM; i++)
140 for (
int j = 0; j <
DIM; j++)
142 H0(i,j,qx,qy,e) = weight_m * gg(i,j);
152 MFEM_CONTRACT_VAR(X);
154 const int D1D =
PA.maps_lim->ndof;
155 const int Q1D =
PA.maps_lim->nqpt;
156 const int id = (D1D << 4 ) | Q1D;
168 const bool exp_lim = (el) ?
true :
false;
170 MFEM_LAUNCH_TMOP_KERNEL(SetupGradPA_C0_2D,
id,ln,
LD,
C0,N,J,W,B,BLD,
X0,X,
H0,
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
struct mfem::TMOP_Integrator::@23 PA
int Size() const
Returns the size of the vector.
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
void AssembleGradPA_C0_2D(const Vector &) const
const double * Read(bool on_dev=true) const
Shortcut for mfem::Read( GetMemory(), TotalSize(), on_dev).
double f(const Vector &xvec)
virtual double * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
MFEM_REGISTER_TMOP_KERNELS(void, DatcSize, const int NE, const int ncomp, const int sizeidx, const DenseMatrix &w_, const Array< double > &b_, const Vector &x_, DenseTensor &j_, const int d1d, const int q1d)
A basic generic Tensor class, appropriate for use on the GPU.
Exponential limiter function in TMOP_Integrator.
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
Rank 3 tensor (array of matrices)
TMOP_LimiterFunction * lim_func