MFEM  v4.5.2
Finite element discretization library
tmop_pa_h2m.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2023, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #include "../tmop.hpp"
13 #include "tmop_pa.hpp"
14 #include "../../general/forall.hpp"
15 #include "../../linalg/kernels.hpp"
16 
17 namespace mfem
18 {
19 
20 MFEM_REGISTER_TMOP_KERNELS(void, AddMultGradPA_Kernel_2D,
21  const int NE,
22  const Array<double> &b_,
23  const Array<double> &g_,
24  const DenseTensor &j_,
25  const Vector &h_,
26  const Vector &x_,
27  Vector &y_,
28  const int d1d,
29  const int q1d)
30 {
31  constexpr int DIM = 2;
32  constexpr int NBZ = 1;
33 
34  const int D1D = T_D1D ? T_D1D : d1d;
35  const int Q1D = T_Q1D ? T_Q1D : q1d;
36 
37  const auto b = Reshape(b_.Read(), Q1D, D1D);
38  const auto g = Reshape(g_.Read(), Q1D, D1D);
39  const auto J = Reshape(j_.Read(), DIM, DIM, Q1D, Q1D, NE);
40  const auto X = Reshape(x_.Read(), D1D, D1D, DIM, NE);
41  const auto H = Reshape(h_.Read(), DIM, DIM, DIM, DIM, Q1D, Q1D, NE);
42  auto Y = Reshape(y_.ReadWrite(), D1D, D1D, DIM, NE);
43 
44  MFEM_FORALL_2D(e, NE, Q1D, Q1D, NBZ,
45  {
46  const int D1D = T_D1D ? T_D1D : d1d;
47  const int Q1D = T_Q1D ? T_Q1D : q1d;
48  constexpr int NBZ = 1;
49  constexpr int MQ1 = T_Q1D ? T_Q1D : T_MAX;
50  constexpr int MD1 = T_D1D ? T_D1D : T_MAX;
51 
52  MFEM_SHARED double BG[2][MQ1*MD1];
53  MFEM_SHARED double XY[2][NBZ][MD1*MD1];
54  MFEM_SHARED double DQ[4][NBZ][MD1*MQ1];
55  MFEM_SHARED double QQ[4][NBZ][MQ1*MQ1];
56 
57  kernels::internal::LoadX<MD1,NBZ>(e,D1D,X,XY);
58  kernels::internal::LoadBG<MD1,MQ1>(D1D,Q1D,b,g,BG);
59 
60  kernels::internal::GradX<MD1,MQ1,NBZ>(D1D,Q1D,BG,XY,DQ);
61  kernels::internal::GradY<MD1,MQ1,NBZ>(D1D,Q1D,BG,DQ,QQ);
62 
63  MFEM_FOREACH_THREAD(qy,y,Q1D)
64  {
65  MFEM_FOREACH_THREAD(qx,x,Q1D)
66  {
67  const double *Jtr = &J(0,0,qx,qy,e);
68 
69  // Jrt = Jtr^{-1}
70  double Jrt[4];
71  kernels::CalcInverse<2>(Jtr, Jrt);
72 
73  // Jpr = X^T.DSh
74  double Jpr[4];
75  kernels::internal::PullGrad<MQ1,NBZ>(Q1D,qx,qy,QQ,Jpr);
76 
77  // Jpt = Jpr . Jrt
78  double Jpt[4];
79  kernels::Mult(2,2,2, Jpr, Jrt, Jpt);
80 
81  // B = Jpt : H
82  double B[4];
83  DeviceMatrix M(B,2,2);
84  ConstDeviceMatrix J(Jpt,2,2);
85  for (int i = 0; i < DIM; i++)
86  {
87  for (int j = 0; j < DIM; j++)
88  {
89  M(i,j) = 0.0;
90  for (int r = 0; r < DIM; r++)
91  {
92  for (int c = 0; c < DIM; c++)
93  {
94  M(i,j) += H(r,c,i,j,qx,qy,e) * J(r,c);
95  }
96  }
97  }
98  }
99  // C = Jrt . B
100  double C[4];
101  kernels::MultABt(2,2,2, Jrt, B, C);
102 
103  // Overwrite QQ = Jrt . (Jpt : H)^t
104  kernels::internal::PushGrad<MQ1,NBZ>(Q1D,qx,qy, C, QQ);
105  }
106  }
107  MFEM_SYNC_THREAD;
108  kernels::internal::LoadBGt<MD1,MQ1>(D1D,Q1D,b,g,BG);
109  kernels::internal::GradYt<MD1,MQ1,NBZ>(D1D,Q1D,BG,QQ,DQ);
110  kernels::internal::GradXt<MD1,MQ1,NBZ>(D1D,Q1D,BG,DQ,Y,e);
111  });
112 }
113 
115 {
116  const int N = PA.ne;
117  const int D1D = PA.maps->ndof;
118  const int Q1D = PA.maps->nqpt;
119  const int id = (D1D << 4 ) | Q1D;
120  const DenseTensor &J = PA.Jtr;
121  const Array<double> &B = PA.maps->B;
122  const Array<double> &G = PA.maps->G;
123  const Vector &H = PA.H;
124 
125  MFEM_LAUNCH_TMOP_KERNEL(AddMultGradPA_Kernel_2D,id,N,B,G,J,H,R,C);
126 }
127 
128 } // namespace mfem
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
Definition: array.hpp:307
MFEM_HOST_DEVICE void MultABt(const int Aheight, const int Awidth, const int Bheight, const TA *Adata, const TB *Bdata, TC *ABtdata)
Multiply a matrix of size Aheight x Awidth and data Adata with the transpose of a matrix of size Bhei...
Definition: kernels.hpp:363
void AddMultGradPA_2D(const Vector &, Vector &) const
struct mfem::TMOP_Integrator::@23 PA
virtual const double * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:448
constexpr int DIM
const double * Read(bool on_dev=true) const
Shortcut for mfem::Read( GetMemory(), TotalSize(), on_dev).
Definition: densemat.hpp:1112
double b
Definition: lissajous.cpp:42
MFEM_REGISTER_TMOP_KERNELS(void, DatcSize, const int NE, const int ncomp, const int sizeidx, const DenseMatrix &w_, const Array< double > &b_, const Vector &x_, DenseTensor &j_, const int d1d, const int q1d)
Definition: tmop_pa_da3.cpp:20
A basic generic Tensor class, appropriate for use on the GPU.
Definition: dtensor.hpp:81
MFEM_HOST_DEVICE void Mult(const int height, const int width, const TA *data, const TX *x, TY *y)
Matrix vector multiplication: y = A x, where the matrix A is of size height x width with given data...
Definition: kernels.hpp:163
virtual double * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition: vector.hpp:464
Vector data type.
Definition: vector.hpp:60
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
Definition: dtensor.hpp:131
Rank 3 tensor (array of matrices)
Definition: densemat.hpp:978