12 #include "../quadinterpolator.hpp"
13 #include "../../general/forall.hpp"
14 #include "../../linalg/dtensor.hpp"
15 #include "../../fem/kernels.hpp"
16 #include "../../linalg/kernels.hpp"
26 namespace quadrature_interpolator
29 static void Det1D(
const int NE,
36 const auto G =
Reshape(g, q1d, d1d);
37 const auto X =
Reshape(x, d1d, NE);
43 for (
int q = 0; q < q1d; q++)
46 for (
int d = 0; d < d1d; d++)
48 u += G(q, d) * X(d, e);
55 template<
int T_D1D = 0,
int T_Q1D = 0,
int MAX_D1D = 0,
int MAX_Q1D = 0>
56 static void Det2D(
const int NE,
65 constexpr
int DIM = 2;
66 static constexpr
int NBZ = 1;
68 const int D1D = T_D1D ? T_D1D : d1d;
69 const int Q1D = T_Q1D ? T_Q1D : q1d;
71 const auto B =
Reshape(b, Q1D, D1D);
72 const auto G =
Reshape(g, Q1D, D1D);
73 const auto X =
Reshape(x, D1D, D1D, DIM, NE);
74 auto Y =
Reshape(y, Q1D, Q1D, NE);
76 MFEM_FORALL_2D(e, NE, Q1D, Q1D, NBZ,
78 constexpr
int MQ1 = T_Q1D ? T_Q1D :
MAX_Q1D;
79 constexpr
int MD1 = T_D1D ? T_D1D :
MAX_D1D;
80 const int D1D = T_D1D ? T_D1D : d1d;
81 const int Q1D = T_Q1D ? T_Q1D : q1d;
83 MFEM_SHARED
double BG[2][MQ1*MD1];
84 MFEM_SHARED
double XY[2][NBZ][MD1*MD1];
85 MFEM_SHARED
double DQ[4][NBZ][MD1*MQ1];
86 MFEM_SHARED
double QQ[4][NBZ][MQ1*MQ1];
88 kernels::internal::LoadX<MD1,NBZ>(e,D1D,X,XY);
89 kernels::internal::LoadBG<MD1,MQ1>(D1D,Q1D,B,G,BG);
91 kernels::internal::GradX<MD1,MQ1,NBZ>(D1D,Q1D,BG,XY,DQ);
92 kernels::internal::GradY<MD1,MQ1,NBZ>(D1D,Q1D,BG,DQ,QQ);
94 MFEM_FOREACH_THREAD(qy,y,Q1D)
96 MFEM_FOREACH_THREAD(qx,x,Q1D)
99 kernels::internal::PullGrad<MQ1,NBZ>(Q1D,qx,qy,QQ,J);
100 Y(qx,qy,e) = kernels::Det<2>(J);
106 template<
int T_D1D = 0,
int T_Q1D = 0,
int MAX_D1D = 0,
int MAX_Q1D = 0,
108 static void Det3D(
const int NE,
118 constexpr
int DIM = 3;
119 static constexpr
int MQ1 = T_Q1D ? T_Q1D :
MAX_Q1D;
120 static constexpr
int MD1 = T_D1D ? T_D1D :
MAX_D1D;
121 static constexpr
int MDQ = MQ1 > MD1 ? MQ1 : MD1;
122 static constexpr
int MSZ = MDQ * MDQ * MDQ * 9;
123 static constexpr
int GRID = SMEM ? 0 : 128;
125 const int D1D = T_D1D ? T_D1D : d1d;
126 const int Q1D = T_Q1D ? T_Q1D : q1d;
128 const auto B =
Reshape(b, Q1D, D1D);
129 const auto G =
Reshape(g, Q1D, D1D);
130 const auto X =
Reshape(x, D1D, D1D, D1D, DIM, NE);
131 auto Y =
Reshape(y, Q1D, Q1D, Q1D, NE);
133 double *GM =
nullptr;
136 d_buff->SetSize(2*MSZ*GRID);
137 GM = d_buff->Write();
140 MFEM_FORALL_3D_GRID(e, NE, Q1D, Q1D, Q1D, GRID,
142 const int bid = MFEM_BLOCK_ID(x);
143 MFEM_SHARED
double BG[2][MQ1*MD1];
144 MFEM_SHARED
double SM0[SMEM?MSZ:1];
145 MFEM_SHARED
double SM1[SMEM?MSZ:1];
146 double *lm0 = SMEM ? SM0 : GM + MSZ*bid;
147 double *lm1 = SMEM ? SM1 : GM + MSZ*(GRID+bid);
148 double (*DDD)[MD1*MD1*MD1] = (double (*)[MD1*MD1*MD1]) (lm0);
149 double (*DDQ)[MD1*MD1*MQ1] = (double (*)[MD1*MD1*MQ1]) (lm1);
150 double (*DQQ)[MD1*MQ1*MQ1] = (double (*)[MD1*MQ1*MQ1]) (lm0);
151 double (*QQQ)[MQ1*MQ1*MQ1] = (double (*)[MQ1*MQ1*MQ1]) (lm1);
153 kernels::internal::LoadX<MD1>(e,D1D,X,DDD);
154 kernels::internal::LoadBG<MD1,MQ1>(D1D,Q1D,B,G,BG);
156 kernels::internal::GradX<MD1,MQ1>(D1D,Q1D,BG,DDD,DDQ);
157 kernels::internal::GradY<MD1,MQ1>(D1D,Q1D,BG,DDQ,DQQ);
158 kernels::internal::GradZ<MD1,MQ1>(D1D,Q1D,BG,DQQ,QQQ);
160 MFEM_FOREACH_THREAD(qz,z,Q1D)
162 MFEM_FOREACH_THREAD(qy,y,Q1D)
164 MFEM_FOREACH_THREAD(qx,x,Q1D)
167 kernels::internal::PullGrad<MQ1>(Q1D, qx,qy,qz, QQQ, J);
168 Y(qx,qy,qz,e) = kernels::Det<3>(J);
177 void TensorDeterminants(
const int NE,
184 if (NE == 0) {
return; }
186 const int D1D = maps.
ndof;
187 const int Q1D = maps.
nqpt;
188 const double *B = maps.
B.
Read();
189 const double *G = maps.
G.
Read();
190 const double *X = e_vec.
Read();
191 double *Y = q_det.
Write();
193 const int id = (vdim<<8) | (D1D<<4) | Q1D;
198 <<
" are not supported!");
199 MFEM_VERIFY(Q1D <=
MAX_Q1D,
"Quadrature rules with more than "
200 <<
MAX_Q1D <<
" 1D points are not supported!");
201 Det1D(NE, G, X, Y, D1D, Q1D);
208 case 0x222:
return Det2D<2,2>(NE,B,G,X,Y);
209 case 0x223:
return Det2D<2,3>(NE,B,G,X,Y);
210 case 0x224:
return Det2D<2,4>(NE,B,G,X,Y);
211 case 0x226:
return Det2D<2,6>(NE,B,G,X,Y);
212 case 0x234:
return Det2D<3,4>(NE,B,G,X,Y);
213 case 0x236:
return Det2D<3,6>(NE,B,G,X,Y);
214 case 0x244:
return Det2D<4,4>(NE,B,G,X,Y);
215 case 0x246:
return Det2D<4,6>(NE,B,G,X,Y);
216 case 0x256:
return Det2D<5,6>(NE,B,G,X,Y);
221 MFEM_VERIFY(D1D <= MD,
"Orders higher than " << MD-1
222 <<
" are not supported!");
223 MFEM_VERIFY(Q1D <= MQ,
"Quadrature rules with more than "
224 << MQ <<
" 1D points are not supported!");
225 Det2D<0,0,MD,MQ>(NE,B,G,X,Y,vdim,D1D,Q1D);
234 case 0x324:
return Det3D<2,4>(NE,B,G,X,Y);
235 case 0x333:
return Det3D<3,3>(NE,B,G,X,Y);
236 case 0x335:
return Det3D<3,5>(NE,B,G,X,Y);
237 case 0x336:
return Det3D<3,6>(NE,B,G,X,Y);
240 constexpr
int MD = 6;
241 constexpr
int MQ = 6;
243 if (D1D <= MD && Q1D <= MQ)
244 {
return Det3D<0,0,MD,MQ>(NE,B,G,X,Y,vdim,D1D,Q1D); }
246 return Det3D<0,0,MAX_D1D,MAX_Q1D,false>(
247 NE,B,G,X,Y,vdim,D1D,Q1D,&d_buff);
251 MFEM_ABORT(
"Kernel " << std::hex <<
id << std::dec <<
" not supported yet");
int GetDim() const
Returns the reference space dimension for the finite element.
int nqpt
Number of quadrature points. When mode is TENSOR, this is the 1D number.
int ndof
Number of degrees of freedom = number of basis functions. When mode is TENSOR, this is the 1D number...
virtual double * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
class FiniteElement * FE
The FiniteElement that created and owns this object.
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
MFEM_HOST_DEVICE double Det3D(DeviceMatrix &J)
MFEM_HOST_DEVICE double Det2D(DeviceMatrix &J)
Structure representing the matrices/tensors needed to evaluate (in reference space) the values...
Array< double > B
Basis functions evaluated at quadrature points.
Array< double > G
Gradients/divergences/curls of basis functions evaluated at quadrature points.
double u(const Vector &xvec)
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims...dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.