12 #include "../tmop.hpp" 14 #include "../../general/forall.hpp" 15 #include "../../linalg/kernels.hpp" 16 #include "../../linalg/dinvariants.hpp" 21 using Args = kernels::InvariantsEvaluator2D::Buffers;
23 static MFEM_HOST_DEVICE
inline 24 void EvalP_001(
const double *Jpt,
double *P)
27 kernels::InvariantsEvaluator2D ie(
Args().J(Jpt).dI1(dI1));
31 static MFEM_HOST_DEVICE
inline 32 void EvalP_002(
const double *Jpt,
double *P)
34 double dI1b[4], dI2b[4];
35 kernels::InvariantsEvaluator2D ie(
Args().J(Jpt).dI1b(dI1b).dI2b(dI2b));
39 static MFEM_HOST_DEVICE
inline 40 void EvalP_007(
const double *Jpt,
double *P)
42 double dI1[4], dI2[4], dI2b[4];
43 kernels::InvariantsEvaluator2D ie(
Args().J(Jpt).dI1(dI1)
44 .dI2(dI2).dI2b(dI2b));
45 const double I2 = ie.Get_I2();
47 -ie.Get_I1() / (I2*I2), ie.Get_dI2(), P);
50 static MFEM_HOST_DEVICE
inline 51 void EvalP_077(
const double *Jpt,
double *P)
53 double dI2[4], dI2b[4];
54 kernels::InvariantsEvaluator2D ie(
Args().
57 const double I2 = ie.Get_I2();
58 kernels::Set(2,2, 0.5 * (1.0 - 1.0 / (I2 * I2)), ie.Get_dI2(), P);
61 static MFEM_HOST_DEVICE
inline 62 void EvalP_080(
const double *Jpt,
double gamma,
double *P)
66 double dI1b[4], dI2[4], dI2b[4];
67 kernels::InvariantsEvaluator2D ie(
Args().J(Jpt).
68 dI1b(dI1b).dI2(dI2).dI2b(dI2b));
70 kernels::Set(2,2, (1.0 - gamma) * 1./2., ie.Get_dI1b(), P);
72 const double I2 = ie.Get_I2();
73 kernels::Add(2,2, gamma * 0.5 * (1.0 - 1.0 / (I2 * I2)), ie.Get_dI2(), P);
77 const double metric_normal,
78 const double metric_param,
90 MFEM_VERIFY(mid == 1 || mid == 2 || mid == 7 || mid == 77 || mid == 80,
91 "Metric not yet implemented!");
93 constexpr
int DIM = 2;
94 constexpr
int NBZ = 1;
96 const int D1D = T_D1D ? T_D1D : d1d;
97 const int Q1D = T_Q1D ? T_Q1D : q1d;
106 MFEM_FORALL_2D(e, NE, Q1D, Q1D, NBZ,
108 constexpr
int NBZ = 1;
109 constexpr
int MQ1 = T_Q1D ? T_Q1D : T_MAX;
110 constexpr
int MD1 = T_D1D ? T_D1D : T_MAX;
111 const int D1D = T_D1D ? T_D1D : d1d;
112 const int Q1D = T_Q1D ? T_Q1D : q1d;
114 MFEM_SHARED
double BG[2][MQ1*MD1];
115 MFEM_SHARED
double XY[2][NBZ][MD1*MD1];
116 MFEM_SHARED
double DQ[4][NBZ][MD1*MQ1];
117 MFEM_SHARED
double QQ[4][NBZ][MQ1*MQ1];
119 kernels::internal::LoadX<MD1,NBZ>(e,D1D,X,XY);
120 kernels::internal::LoadBG<MD1,MQ1>(D1D,Q1D,
b,g,BG);
122 kernels::internal::GradX<MD1,MQ1,NBZ>(D1D,Q1D,BG,XY,DQ);
123 kernels::internal::GradY<MD1,MQ1,NBZ>(D1D,Q1D,BG,DQ,QQ);
125 MFEM_FOREACH_THREAD(qy,y,Q1D)
127 MFEM_FOREACH_THREAD(qx,x,Q1D)
129 const double *Jtr = &J(0,0,qx,qy,e);
130 const double detJtr = kernels::Det<2>(Jtr);
131 const double weight = metric_normal * W(qx,qy) * detJtr;
135 kernels::CalcInverse<2>(Jtr, Jrt);
139 kernels::internal::PullGrad<MQ1,NBZ>(Q1D,qx,qy,QQ,Jpr);
147 if (mid == 1) { EvalP_001(Jpt, P); }
148 if (mid == 2) { EvalP_002(Jpt, P); }
149 if (mid == 7) { EvalP_007(Jpt, P); }
150 if (mid == 77) { EvalP_077(Jpt, P); }
151 if (mid == 80) { EvalP_080(Jpt, metric_param, P); }
152 for (
int i = 0; i < 4; i++) { P[i] *= weight; }
157 kernels::internal::PushGrad<MQ1,NBZ>(Q1D,qx,qy,A,QQ);
161 kernels::internal::LoadBGt<MD1,MQ1>(D1D,Q1D,
b,g,BG);
162 kernels::internal::GradYt<MD1,MQ1,NBZ>(D1D,Q1D,BG,QQ,DQ);
163 kernels::internal::GradXt<MD1,MQ1,NBZ>(D1D,Q1D,BG,DQ,Y,e);
171 const int D1D =
PA.maps->ndof;
172 const int Q1D =
PA.maps->nqpt;
173 const int id = (D1D << 4 ) | Q1D;
181 if (
auto m = dynamic_cast<TMOP_Metric_080 *>(
metric)) { mp = m->GetGamma(); }
183 MFEM_LAUNCH_TMOP_KERNEL(AddMultPA_Kernel_2D,
id,mn,mp,M,N,J,W,B,G,X,Y);
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
MFEM_HOST_DEVICE void MultABt(const int Aheight, const int Awidth, const int Bheight, const TA *Adata, const TB *Bdata, TC *ABtdata)
Multiply a matrix of size Aheight x Awidth and data Adata with the transpose of a matrix of size Bhei...
struct mfem::TMOP_Integrator::@23 PA
TMOP_QualityMetric * metric
MFEM_HOST_DEVICE void Add(const int height, const int width, const TALPHA alpha, const TA *Adata, const TB *Bdata, TC *Cdata)
Compute C = A + alpha*B, where the matrices A, B and C are of size height x width with data Adata...
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
const double * Read(bool on_dev=true) const
Shortcut for mfem::Read( GetMemory(), TotalSize(), on_dev).
virtual int Id() const
Return the metric ID.
MFEM_REGISTER_TMOP_KERNELS(void, DatcSize, const int NE, const int ncomp, const int sizeidx, const DenseMatrix &w_, const Array< double > &b_, const Vector &x_, DenseTensor &j_, const int d1d, const int q1d)
MFEM_HOST_DEVICE void Mult(const int height, const int width, const TA *data, const TX *x, TY *y)
Matrix vector multiplication: y = A x, where the matrix A is of size height x width with given data...
virtual double * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
kernels::InvariantsEvaluator2D::Buffers Args
MFEM_HOST_DEVICE void Set(const int height, const int width, const double alpha, const TA *Adata, TB *Bdata)
Compute B = alpha*A, where the matrices A and B are of size height x width with data Adata and Bdata...
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
void AddMultPA_2D(const Vector &, Vector &) const
Rank 3 tensor (array of matrices)