MFEM v4.9.0
Finite element discretization library
Loading...
Searching...
No Matches
qfunction_transform.hpp
Go to the documentation of this file.
1// Copyright (c) 2010-2025, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11#pragma once
12
13#include "util.hpp"
15
16namespace mfem::future
17{
18
19template <typename T0, typename T1, typename T2>
20MFEM_HOST_DEVICE
21void process_qf_arg(const T0 &, const T1 &, T2 &)
22{
24 "process_qf_arg not implemented for arg type");
25}
26
27template <typename T>
28MFEM_HOST_DEVICE
30 const DeviceTensor<1, T> &u,
31 const DeviceTensor<1, T> &v,
32 T &arg)
33{
34 arg = u(0);
35}
36
37template <typename T, int n, int m>
38MFEM_HOST_DEVICE inline
40 const DeviceTensor<1> &u,
41 tensor<dual<T, T>, n, m> &arg)
42{
43 for (int i = 0; i < m; i++)
44 {
45 for (int j = 0; j < n; j++)
46 {
47 arg(j, i).value = u((i * n) + j);
48 }
49 }
50}
51
52template <typename T>
53MFEM_HOST_DEVICE inline
55 const DeviceTensor<1> &u,
56 dual<T, T> &arg)
57{
58 arg.value = u(0);
59}
60
61template <typename T>
62MFEM_HOST_DEVICE inline
64 const DeviceTensor<1> &u,
65 const DeviceTensor<1> &v,
66 dual<T, T> &arg)
67{
68 arg.value = u(0);
69 arg.gradient = v(0);
70}
71
72template <typename T, int n>
73MFEM_HOST_DEVICE inline
75 const DeviceTensor<1> &u,
76 const DeviceTensor<1> &v,
77 tensor<dual<T, T>, n> &arg)
78{
79 for (int i = 0; i < n; i++)
80 {
81 arg(i).value = u(i);
82 arg(i).gradient = v(i);
83 }
84}
85
86template <typename T, int n, int m>
87MFEM_HOST_DEVICE inline
89 const DeviceTensor<1> &u,
90 const DeviceTensor<1> &v,
91 tensor<dual<T, T>, n, m> &arg)
92{
93 for (int i = 0; i < m; i++)
94 {
95 for (int j = 0; j < n; j++)
96 {
97 arg(j, i).value = u((i * n) + j);
98 arg(j, i).gradient = v((i * n) + j);
99 }
100 }
101}
102
103template <typename T, int n>
104MFEM_HOST_DEVICE inline
107 const tensor<dual<T, T>, n> &x)
108{
109 for (size_t i = 0; i < n; i++)
110 {
111 r(i) = x(i).value;
112 }
113}
114
115template <typename T, int n, int m>
116MFEM_HOST_DEVICE inline
119 const tensor<dual<T, T>, n, m> &x)
120{
121 for (size_t i = 0; i < n; i++)
122 {
123 for (size_t j = 0; j < m; j++)
124 {
125 r(i + n * j) = x(i, j).value;
126 }
127 }
128}
129
130template <typename arg_type>
131MFEM_HOST_DEVICE inline
133 const DeviceTensor<2> &u,
134 const DeviceTensor<2> &v,
135 arg_type &arg,
136 const int &qp)
137{
138 const auto u_qp = Reshape(&u(0, qp), u.GetShape()[0]);
139 const auto v_qp = Reshape(&v(0, qp), v.GetShape()[0]);
140 process_qf_arg(u_qp, v_qp, arg);
141}
142
143template <size_t num_fields, typename qf_args>
144MFEM_HOST_DEVICE inline
146 const std::array<DeviceTensor<2>, num_fields> &u,
147 const std::array<DeviceTensor<2>, num_fields> &v,
148 qf_args &args,
149 const int &qp)
150{
151 for_constexpr<tuple_size<qf_args>::value>([&](auto i)
152 {
153 process_qf_arg(u[i], v[i], get<i>(args), qp);
154 });
155}
156
157template <typename T, int n, int m>
158MFEM_HOST_DEVICE inline
161 const tensor<dual<T, T>, n, m> &x)
162{
163 for (size_t i = 0; i < n; i++)
164 {
165 for (size_t j = 0; j < m; j++)
166 {
167 r(i + n * j) = x(i, j).gradient;
168 }
169 }
170}
171
172template <typename T, int n>
173MFEM_HOST_DEVICE inline
176 const tensor<dual<T, T>, n> &x)
177{
178 for (size_t i = 0; i < n; i++)
179 {
180 r(i) = x(i).gradient;
181 }
182}
183
184template <typename T>
185MFEM_HOST_DEVICE inline
188 const dual<T, T> &x)
189{
190 r(0) = x.gradient;
191}
192
193template <typename T0, typename T1>
194MFEM_HOST_DEVICE inline
195void process_qf_arg(const T0 &, T1 &)
196{
197 static_assert(dfem::always_false<T0, T1>,
198 "process_qf_arg not implemented for arg type");
199}
200
201template <typename T>
202MFEM_HOST_DEVICE inline
204 const DeviceTensor<1, T> &u,
205 T &arg)
206{
207 arg = u(0);
208}
209
210template <typename T>
211MFEM_HOST_DEVICE inline
213 const DeviceTensor<1, T> &u,
214 tensor<T> &arg)
215{
216 arg(0) = u(0);
217}
218
219template <typename T, int n>
220MFEM_HOST_DEVICE inline
222 const DeviceTensor<1> &u,
223 tensor<T, n> &arg)
224{
225 for (int i = 0; i < n; i++)
226 {
227 arg(i) = u(i);
228 }
229}
230
231template <typename T, int n, int m>
232MFEM_HOST_DEVICE inline
234 const DeviceTensor<1> &u,
235 tensor<T, n, m> &arg)
236{
237 for (int i = 0; i < m; i++)
238 {
239 for (int j = 0; j < n; j++)
240 {
241 arg(j, i) = u((i * n) + j);
242 }
243 }
244}
245
246template <typename arg_type>
247MFEM_HOST_DEVICE inline
248void process_qf_arg(const DeviceTensor<2> &u, arg_type &arg, int qp)
249{
250 const auto u_qp = Reshape(&u(0, qp), u.GetShape()[0]);
251 process_qf_arg(u_qp, arg);
252}
253
254template <size_t num_fields, typename qf_args>
255MFEM_HOST_DEVICE inline
257 const std::array<DeviceTensor<2>, num_fields> &u,
258 qf_args &args,
259 const int &qp)
260{
261 for_constexpr<tuple_size<qf_args>::value>([&](auto i)
262 {
263 process_qf_arg(u[i], get<i>(args), qp);
264 });
265}
266
267template <typename T0, typename T1>
268MFEM_HOST_DEVICE inline
270{
271 static_assert(dfem::always_false<T0, T1>,
272 "process_qf_result not implemented for result type");
273 return Vector{};
274}
275
276template <typename T>
277MFEM_HOST_DEVICE inline
280 const T &x)
281{
282 r(0) = x;
283}
284
285template <typename T>
286MFEM_HOST_DEVICE inline
289 const dual<T, T> &x)
290{
291 r(0) = x.value;
292}
293
294template <typename T>
295MFEM_HOST_DEVICE inline
298 const tensor<T> &x)
299{
300 r(0) = x(0);
301}
302
303template <typename T, int n>
304MFEM_HOST_DEVICE inline
307 const tensor<T, n> &x)
308{
309 for (size_t i = 0; i < n; i++)
310 {
311 r(i) = x(i);
312 }
313}
314
315template <typename T, int n, int m>
316MFEM_HOST_DEVICE inline
319 const tensor<T, n, m> &x)
320{
321 for (size_t i = 0; i < n; i++)
322 {
323 for (size_t j = 0; j < m; j++)
324 {
325 r(i + n * j) = x(i, j);
326 }
327 }
328}
329
330template <typename T, int n, int m>
331MFEM_HOST_DEVICE inline
333 const DeviceTensor<1, T> &u,
334 const DeviceTensor<1, T> &v,
335 tensor<T, n, m> &arg)
336{
337 for (int i = 0; i < m; i++)
338 {
339 for (int j = 0; j < n; j++)
340 {
341 arg(j, i) = u((i * n) + j);
342 }
343 }
344}
345
346} // namespace mfem::future
A basic generic Tensor class, appropriate for use on the GPU.
Definition dtensor.hpp:84
MFEM_HOST_DEVICE auto & GetShape() const
Returns the shape of the tensor.
Definition dtensor.hpp:131
Vector data type.
Definition vector.hpp:82
constexpr bool always_false
Definition util.hpp:575
MFEM_HOST_DEVICE void process_derivative_from_native_dual(DeviceTensor< 1, T > &r, const tensor< dual< T, T >, n, m > &x)
MFEM_HOST_DEVICE void process_qf_result(DeviceTensor< 1, T > &r, const tensor< dual< T, T >, n > &x)
MFEM_HOST_DEVICE zero & get(zero &x)
let zero be accessed like a tuple
Definition tensor.hpp:281
MFEM_HOST_DEVICE void process_qf_args(const std::array< DeviceTensor< 2 >, num_fields > &u, const std::array< DeviceTensor< 2 >, num_fields > &v, qf_args &args, const int &qp)
MFEM_HOST_DEVICE void process_qf_arg(const T0 &, const T1 &, T2 &)
real_t u(const Vector &xvec)
Definition lor_mms.hpp:22
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
Definition dtensor.hpp:138
Dual number struct (value plus gradient)
Definition dual.hpp:36
gradient_type gradient
the partial derivatives of value w.r.t. some other quantity
Definition dual.hpp:40
value_type value
the actual numerical value
Definition dual.hpp:38
Implementation of the tensor class.