MFEM  v4.6.0
Finite element discretization library
tensor.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2023, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 /**
13  * @file tensor.hpp
14  *
15  * @brief Implementation of the tensor class
16  */
17 
18 #ifndef MFEM_INTERNAL_TENSOR_HPP
19 #define MFEM_INTERNAL_TENSOR_HPP
20 
21 #include "dual.hpp"
22 #include <type_traits> // for std::false_type
23 
24 namespace mfem
25 {
26 namespace internal
27 {
28 
29 #if defined(__CUDACC__)
30 #if __CUDAVER__ >= 75000
31 #define MFEM_SUPPRESS_NVCC_HOSTDEVICE_WARNING #pragma nv_exec_check_disable
32 #else
33 #define MFEM_SUPPRESS_NVCC_HOSTDEVICE_WARNING #pragma hd_warning_disable
34 #endif
35 #else //__CUDACC__
36 #define MFEM_SUPPRESS_NVCC_HOSTDEVICE_WARNING
37 #endif
38 
39 template <typename T, int... n>
40 struct tensor;
41 
42 /// The implementation can be drastically generalized by using concepts of the
43 /// c++17 standard.
44 
45 template < typename T >
46 struct tensor<T>
47 {
48  using type = T;
49  static constexpr int ndim = 1;
50  static constexpr int first_dim = 0;
51  MFEM_HOST_DEVICE T& operator[](int /*unused*/) { return values; }
52  MFEM_HOST_DEVICE const T& operator[](int /*unused*/) const { return values; }
53  MFEM_HOST_DEVICE T& operator()(int /*unused*/) { return values; }
54  MFEM_HOST_DEVICE const T& operator()(int /*unused*/) const { return values; }
55  MFEM_HOST_DEVICE operator T() const { return values; }
56  T values;
57 };
58 
59 template < typename T, int n0 >
60 struct tensor<T, n0>
61 {
62  using type = T;
63  static constexpr int ndim = 1;
64  static constexpr int first_dim = n0;
65  MFEM_HOST_DEVICE T& operator[](int i) { return values[i]; }
66  MFEM_HOST_DEVICE const T& operator[](int i) const { return values[i]; }
67  MFEM_HOST_DEVICE T& operator()(int i) { return values[i]; }
68  MFEM_HOST_DEVICE const T& operator()(int i) const { return values[i]; }
69  T values[n0];
70 };
71 
72 template < typename T, int n0, int n1 >
73 struct tensor<T, n0, n1>
74 {
75  using type = T;
76  static constexpr int ndim = 2;
77  static constexpr int first_dim = n0;
78  MFEM_HOST_DEVICE tensor< T, n1 >& operator[](int i) { return values[i]; }
79  MFEM_HOST_DEVICE const tensor< T, n1 >& operator[](int i) const { return values[i]; }
80  MFEM_HOST_DEVICE tensor< T, n1 >& operator()(int i) { return values[i]; }
81  MFEM_HOST_DEVICE const tensor< T, n1 >& operator()(int i) const { return values[i]; }
82  MFEM_HOST_DEVICE T& operator()(int i, int j) { return values[i][j]; }
83  MFEM_HOST_DEVICE const T& operator()(int i, int j) const { return values[i][j]; }
84  tensor < T, n1 > values[n0];
85 };
86 
87 template < typename T, int n0, int n1, int n2 >
88 struct tensor<T, n0, n1, n2>
89 {
90  using type = T;
91  static constexpr int ndim = 3;
92  static constexpr int first_dim = n0;
93  MFEM_HOST_DEVICE tensor< T, n1, n2 >& operator[](int i) { return values[i]; }
94  MFEM_HOST_DEVICE const tensor< T, n1, n2 >& operator[](int i) const { return values[i]; }
95  MFEM_HOST_DEVICE tensor< T, n1, n2 >& operator()(int i) { return values[i]; }
96  MFEM_HOST_DEVICE const tensor< T, n1, n2 >& operator()(int i) const { return values[i]; }
97  MFEM_HOST_DEVICE tensor< T, n2 >& operator()(int i, int j) { return values[i][j]; }
98  MFEM_HOST_DEVICE const tensor< T, n2 >& operator()(int i, int j) const { return values[i][j]; }
99  MFEM_HOST_DEVICE T& operator()(int i, int j, int k) { return values[i][j][k]; }
100  MFEM_HOST_DEVICE const T& operator()(int i, int j, int k) const { return values[i][j][k]; }
101  tensor < T, n1, n2 > values[n0];
102 };
103 
104 template < typename T, int n0, int n1, int n2, int n3 >
105 struct tensor<T, n0, n1, n2, n3>
106 {
107  using type = T;
108  static constexpr int ndim = 4;
109  static constexpr int first_dim = n0;
110  MFEM_HOST_DEVICE tensor< T, n1, n2, n3 >& operator[](int i) { return values[i]; }
111  MFEM_HOST_DEVICE const tensor< T, n1, n2, n3 >& operator[](int i) const { return values[i]; }
112  MFEM_HOST_DEVICE tensor< T, n1, n2, n3 >& operator()(int i) { return values[i]; }
113  MFEM_HOST_DEVICE const tensor< T, n1, n2, n3 >& operator()(int i) const { return values[i]; }
114  MFEM_HOST_DEVICE tensor< T, n2, n3 >& operator()(int i, int j) { return values[i][j]; }
115  MFEM_HOST_DEVICE const tensor< T, n2, n3 >& operator()(int i, int j) const { return values[i][j]; }
116  MFEM_HOST_DEVICE tensor< T, n3 >& operator()(int i, int j, int k) { return values[i][j][k]; }
117  MFEM_HOST_DEVICE const tensor< T, n3 >& operator()(int i, int j, int k) const { return values[i][j][k]; }
118  MFEM_HOST_DEVICE T& operator()(int i, int j, int k, int l) { return values[i][j][k][l]; }
119  MFEM_HOST_DEVICE const T& operator()(int i, int j, int k, int l) const { return values[i][j][k][l]; }
120  tensor < T, n1, n2, n3 > values[n0];
121 };
122 
123 template < typename T, int n0, int n1, int n2, int n3, int n4 >
124 struct tensor<T, n0, n1, n2, n3, n4>
125 {
126  using type = T;
127  static constexpr int ndim = 5;
128  static constexpr int first_dim = n0;
129  MFEM_HOST_DEVICE tensor< T, n1, n2, n3, n4 >& operator[](int i) { return values[i]; }
130  MFEM_HOST_DEVICE const tensor< T, n1, n2, n3, n4 >& operator[](int i) const { return values[i]; }
131  MFEM_HOST_DEVICE tensor< T, n1, n2, n3, n4 >& operator()(int i) { return values[i]; }
132  MFEM_HOST_DEVICE const tensor< T, n1, n2, n3, n4 >& operator()(int i) const { return values[i]; }
133  MFEM_HOST_DEVICE tensor< T, n2, n3, n4 >& operator()(int i, int j) { return values[i][j]; }
134  MFEM_HOST_DEVICE const tensor< T, n2, n3, n4 >& operator()(int i,
135  int j) const { return values[i][j]; }
136  MFEM_HOST_DEVICE tensor< T, n3, n4>& operator()(int i, int j, int k) { return values[i][j][k]; }
137  MFEM_HOST_DEVICE const tensor< T, n3, n4>& operator()(int i, int j,
138  int k) const { return values[i][j][k]; }
139  MFEM_HOST_DEVICE tensor< T, n4 >& operator()(int i, int j, int k, int l) { return values[i][j][k][l]; }
140  MFEM_HOST_DEVICE const tensor< T, n4 >& operator()(int i, int j, int k,
141  int l) const { return values[i][j][k][l]; }
142  MFEM_HOST_DEVICE T& operator()(int i, int j, int k, int l, int m) { return values[i][j][k][l][m]; }
143  MFEM_HOST_DEVICE const T& operator()(int i, int j, int k, int l, int m) const { return values[i][j][k][l][m]; }
144  tensor < T, n1, n2, n3, n4 > values[n0];
145 };
146 
147 /**
148  * @brief A sentinel struct for eliding no-op tensor operations
149  */
150 struct zero
151 {
152  /** @brief `zero` is implicitly convertible to double with value 0.0 */
153  MFEM_HOST_DEVICE operator double() { return 0.0; }
154 
155  /** @brief `zero` is implicitly convertible to a tensor of any shape */
156  template <typename T, int... n>
157  MFEM_HOST_DEVICE operator tensor<T, n...>()
158  {
159  return tensor<T, n...> {};
160  }
161 
162  /** @brief `zero` can be accessed like a multidimensional array */
163  template <typename... T>
164  MFEM_HOST_DEVICE zero operator()(T...)
165  {
166  return zero{};
167  }
168 
169  /** @brief anything assigned to `zero` does not change its value and returns `zero` */
170  template <typename T>
171  MFEM_HOST_DEVICE zero operator=(T)
172  {
173  return zero{};
174  }
175 };
176 
177 /** @brief checks if a type is `zero` */
178 template <typename T>
179 struct is_zero : std::false_type
180 {
181 };
182 
183 /** @overload */
184 template <>
185 struct is_zero<zero> : std::true_type
186 {
187 };
188 
189 /** @brief the sum of two `zero`s is `zero` */
190 MFEM_HOST_DEVICE constexpr zero operator+(zero, zero) { return zero{}; }
191 
192 /** @brief the sum of `zero` with something non-`zero` just returns the other value */
193 template <typename T>
194 MFEM_HOST_DEVICE constexpr T operator+(zero, T other)
195 {
196  return other;
197 }
198 
199 /** @brief the sum of `zero` with something non-`zero` just returns the other value */
200 template <typename T>
201 MFEM_HOST_DEVICE constexpr T operator+(T other, zero)
202 {
203  return other;
204 }
205 
206 /////////////////////////////////////////////////
207 
208 /** @brief the unary negation of `zero` is `zero` */
209 MFEM_HOST_DEVICE constexpr zero operator-(zero) { return zero{}; }
210 
211 /** @brief the difference of two `zero`s is `zero` */
212 MFEM_HOST_DEVICE constexpr zero operator-(zero, zero) { return zero{}; }
213 
214 /** @brief the difference of `zero` with something else is the unary negation of the other thing */
215 template <typename T>
216 MFEM_HOST_DEVICE constexpr T operator-(zero, T other)
217 {
218  return -other;
219 }
220 
221 /** @brief the difference of something else with `zero` is the other thing itself */
222 template <typename T>
223 MFEM_HOST_DEVICE constexpr T operator-(T other, zero)
224 {
225  return other;
226 }
227 
228 /////////////////////////////////////////////////
229 
230 /** @brief the product of two `zero`s is `zero` */
231 MFEM_HOST_DEVICE constexpr zero operator*(zero, zero) { return zero{}; }
232 
233 /** @brief the product `zero` with something else is also `zero` */
234 template <typename T>
235 MFEM_HOST_DEVICE constexpr zero operator*(zero, T /*other*/)
236 {
237  return zero{};
238 }
239 
240 /** @brief the product `zero` with something else is also `zero` */
241 template <typename T>
242 MFEM_HOST_DEVICE constexpr zero operator*(T /*other*/, zero)
243 {
244  return zero{};
245 }
246 
247 /** @brief `zero` divided by something is `zero` */
248 template <typename T>
249 MFEM_HOST_DEVICE constexpr zero operator/(zero, T /*other*/)
250 {
251  return zero{};
252 }
253 
254 /** @brief `zero` plus `zero` is `zero */
255 MFEM_HOST_DEVICE constexpr zero operator+=(zero, zero) { return zero{}; }
256 
257 /** @brief `zero` minus `zero` is `zero */
258 MFEM_HOST_DEVICE constexpr zero operator-=(zero, zero) { return zero{}; }
259 
260 /** @brief let `zero` be accessed like a tuple */
261 template <int i>
262 MFEM_HOST_DEVICE zero& get(zero& x)
263 {
264  return x;
265 }
266 
267 /** @brief the dot product of anything with `zero` is `zero` */
268 template <typename T>
269 MFEM_HOST_DEVICE zero dot(const T&, zero)
270 {
271  return zero{};
272 }
273 
274 /** @brief the dot product of anything with `zero` is `zero` */
275 template <typename T>
276 MFEM_HOST_DEVICE zero dot(zero, const T&)
277 {
278  return zero{};
279 }
280 
281 /**
282  * @brief Removes 1s from tensor dimensions
283  * For example, a tensor<T, 1, 10> is equivalent to a tensor<T, 10>
284  * @tparam T The scalar type of the tensor
285  * @tparam n1 The first dimension
286  * @tparam n2 The second dimension
287  */
288 template <typename T, int n1, int n2 = 1>
289 using reduced_tensor = typename std::conditional<
290  (n1 == 1 && n2 == 1), T,
291  typename std::conditional<n1 == 1, tensor<T, n2>,
292  typename std::conditional<n2 == 1, tensor<T, n1>, tensor<T, n1, n2>
293  >::type
294  >::type
295  >::type;
296 
297 /**
298  * @brief Creates a tensor of requested dimension by subsequent calls to a functor
299  * Can be thought of as analogous to @p std::transform in that the set of possible
300  * indices for dimensions @p n are transformed into the values of the tensor by @a f
301  * @tparam lambda_type The type of the functor
302  * @param[in] f The functor to generate the tensor values from
303  *
304  * @note the different cases of 0D, 1D, 2D, 3D, and 4D are implemented separately
305  * to work around a limitation in nvcc involving __host__ __device__ lambdas with `auto` parameters.
306  */
307 MFEM_SUPPRESS_NVCC_HOSTDEVICE_WARNING
308 template <typename lambda_type>
309 MFEM_HOST_DEVICE constexpr auto make_tensor(lambda_type f) ->
310 tensor<decltype(f())>
311 {
312  return {f()};
313 }
314 
315 /**
316  * @brief Creates a tensor of requested dimension by subsequent calls to a functor
317  *
318  * @tparam n1 The dimension of the tensor
319  * @tparam lambda_type The type of the functor
320  * @param[in] f The functor to generate the tensor values from
321  * @pre @a f must accept @p n1 arguments of type @p int
322  *
323  * @note the different cases of 0D, 1D, 2D, 3D, and 4D are implemented separately
324  * to work around a limitation in nvcc involving __host__ __device__ lambdas with `auto` parameters.
325  */
326 MFEM_SUPPRESS_NVCC_HOSTDEVICE_WARNING
327 template <int n1, typename lambda_type>
328 MFEM_HOST_DEVICE auto make_tensor(lambda_type f) ->
329 tensor<decltype(f(n1)), n1>
330 {
331  using T = decltype(f(n1));
332  tensor<T, n1> A{};
333  for (int i = 0; i < n1; i++)
334  {
335  A(i) = f(i);
336  }
337  return A;
338 }
339 
340 /**
341  * @brief Creates a tensor of requested dimension by subsequent calls to a functor
342  *
343  * @tparam n1 The first dimension of the tensor
344  * @tparam n2 The second dimension of the tensor
345  * @tparam lambda_type The type of the functor
346  * @param[in] f The functor to generate the tensor values from
347  * @pre @a f must accept @p n1 x @p n2 arguments of type @p int
348  *
349  * @note the different cases of 0D, 1D, 2D, 3D, and 4D are implemented separately
350  * to work around a limitation in nvcc involving __host__ __device__ lambdas with `auto` parameters.
351  */
352 MFEM_SUPPRESS_NVCC_HOSTDEVICE_WARNING
353 template <int n1, int n2, typename lambda_type>
354 MFEM_HOST_DEVICE auto make_tensor(lambda_type f) ->
355 tensor<decltype(f(n1, n2)), n1, n2>
356 {
357  using T = decltype(f(n1, n2));
358  tensor<T, n1, n2> A{};
359  for (int i = 0; i < n1; i++)
360  {
361  for (int j = 0; j < n2; j++)
362  {
363  A(i, j) = f(i, j);
364  }
365  }
366  return A;
367 }
368 
369 /**
370  * @brief Creates a tensor of requested dimension by subsequent calls to a functor
371  *
372  * @tparam n1 The first dimension of the tensor
373  * @tparam n2 The second dimension of the tensor
374  * @tparam n3 The third dimension of the tensor
375  * @tparam lambda_type The type of the functor
376  * @param[in] f The functor to generate the tensor values from
377  * @pre @a f must accept @p n1 x @p n2 x @p n3 arguments of type @p int
378  *
379  * @note the different cases of 0D, 1D, 2D, 3D, and 4D are implemented separately
380  * to work around a limitation in nvcc involving __host__ __device__ lambdas with `auto` parameters.
381  */
382 MFEM_SUPPRESS_NVCC_HOSTDEVICE_WARNING
383 template <int n1, int n2, int n3, typename lambda_type>
384 MFEM_HOST_DEVICE auto make_tensor(lambda_type f) ->
385 tensor<decltype(f(n1, n2, n3)), n1, n2, n3>
386 {
387  using T = decltype(f(n1, n2, n3));
388  tensor<T, n1, n2, n3> A{};
389  for (int i = 0; i < n1; i++)
390  {
391  for (int j = 0; j < n2; j++)
392  {
393  for (int k = 0; k < n3; k++)
394  {
395  A(i, j, k) = f(i, j, k);
396  }
397  }
398  }
399  return A;
400 }
401 
402 /**
403  * @brief Creates a tensor of requested dimension by subsequent calls to a functor
404  *
405  * @tparam n1 The first dimension of the tensor
406  * @tparam n2 The second dimension of the tensor
407  * @tparam n3 The third dimension of the tensor
408  * @tparam n4 The fourth dimension of the tensor
409  * @tparam lambda_type The type of the functor
410  * @param[in] f The functor to generate the tensor values from
411  * @pre @a f must accept @p n1 x @p n2 x @p n3 x @p n4 arguments of type @p int
412  *
413  * @note the different cases of 0D, 1D, 2D, 3D, and 4D are implemented separately
414  * to work around a limitation in nvcc involving __host__ __device__ lambdas with `auto` parameters.
415  */
416 MFEM_SUPPRESS_NVCC_HOSTDEVICE_WARNING
417 template <int n1, int n2, int n3, int n4, typename lambda_type>
418 MFEM_HOST_DEVICE auto make_tensor(lambda_type f) ->
419 tensor<decltype(f(n1, n2, n3, n4)), n1, n2, n3, n4>
420 {
421  using T = decltype(f(n1, n2, n3, n4));
422  tensor<T, n1, n2, n3, n4> A{};
423  for (int i = 0; i < n1; i++)
424  {
425  for (int j = 0; j < n2; j++)
426  {
427  for (int k = 0; k < n3; k++)
428  {
429  for (int l = 0; l < n4; l++)
430  {
431  A(i, j, k, l) = f(i, j, k, l);
432  }
433  }
434  }
435  }
436  return A;
437 }
438 
439 /**
440  * @brief return the sum of two tensors
441  * @tparam S the underlying type of the lefthand argument
442  * @tparam T the underlying type of the righthand argument
443  * @tparam n integers describing the tensor shape
444  * @param[in] A The lefthand operand
445  * @param[in] B The righthand operand
446  */
447 template <typename S, typename T, int... n>
448 MFEM_HOST_DEVICE auto operator+(const tensor<S, n...>& A,
449  const tensor<T, n...>& B) ->
450 tensor<decltype(S {} + T{}), n...>
451 {
452  tensor<decltype(S{} + T{}), n...> C{};
453  for (int i = 0; i < tensor<T, n...>::first_dim; i++)
454  {
455  C[i] = A[i] + B[i];
456  }
457  return C;
458 }
459 
460 /**
461  * @brief return the unary negation of a tensor
462  * @tparam T the underlying type of the righthand argument
463  * @tparam n integers describing the tensor shape
464  * @param[in] A The tensor to negate
465  */
466 template <typename T, int... n>
467 MFEM_HOST_DEVICE tensor<T, n...> operator-(const tensor<T, n...>& A)
468 {
469  tensor<T, n...> B{};
470  for (int i = 0; i < tensor<T, n...>::first_dim; i++)
471  {
472  B[i] = -A[i];
473  }
474  return B;
475 }
476 
477 /**
478  * @brief return the difference of two tensors
479  * @tparam S the underlying type of the lefthand argument
480  * @tparam T the underlying type of the righthand argument
481  * @tparam n integers describing the tensor shape
482  * @param[in] A The lefthand operand
483  * @param[in] B The righthand operand
484  */
485 template <typename S, typename T, int... n>
486 MFEM_HOST_DEVICE auto operator-(const tensor<S, n...>& A,
487  const tensor<T, n...>& B) ->
488 tensor<decltype(S {} + T{}), n...>
489 {
490  tensor<decltype(S{} + T{}), n...> C{};
491  for (int i = 0; i < tensor<T, n...>::first_dim; i++)
492  {
493  C[i] = A[i] - B[i];
494  }
495  return C;
496 }
497 
498 /**
499  * @brief multiply a tensor by a scalar value
500  * @tparam S the scalar value type. Must be arithmetic (e.g. float, double, int) or a dual number
501  * @tparam T the underlying type of the tensor (righthand) argument
502  * @tparam n integers describing the tensor shape
503  * @param[in] scale The scaling factor
504  * @param[in] A The tensor to be scaled
505  */
506 template <typename S, typename T, int... n,
507  typename = typename std::enable_if<std::is_arithmetic<S>::value ||
508  is_dual_number<S>::value>::type>
509 MFEM_HOST_DEVICE auto operator*(S scale, const tensor<T, n...>& A) ->
510 tensor<decltype(S {} * T{}), n...>
511 {
512  tensor<decltype(S{} * T{}), n...> C{};
513  for (int i = 0; i < tensor<T, n...>::first_dim; i++)
514  {
515  C[i] = scale * A[i];
516  }
517  return C;
518 }
519 
520 /**
521  * @brief multiply a tensor by a scalar value
522  * @tparam S the scalar value type. Must be arithmetic (e.g. float, double, int) or a dual number
523  * @tparam T the underlying type of the tensor (righthand) argument
524  * @tparam n integers describing the tensor shape
525  * @param[in] A The tensor to be scaled
526  * @param[in] scale The scaling factor
527  */
528 template <typename S, typename T, int... n,
529  typename = typename std::enable_if<std::is_arithmetic<S>::value ||
530  is_dual_number<S>::value>::type>
531 MFEM_HOST_DEVICE auto operator*(const tensor<T, n...>& A, S scale) ->
532 tensor<decltype(T {} * S{}), n...>
533 {
534  tensor<decltype(T{} * S{}), n...> C{};
535  for (int i = 0; i < tensor<T, n...>::first_dim; i++)
536  {
537  C[i] = A[i] * scale;
538  }
539  return C;
540 }
541 
542 /**
543  * @brief divide a scalar by each element in a tensor
544  * @tparam S the scalar value type. Must be arithmetic (e.g. float, double, int) or a dual number
545  * @tparam T the underlying type of the tensor (righthand) argument
546  * @tparam n integers describing the tensor shape
547  * @param[in] scale The numerator
548  * @param[in] A The tensor of denominators
549  */
550 template <typename S, typename T, int... n,
551  typename = typename std::enable_if<std::is_arithmetic<S>::value ||
552  is_dual_number<S>::value>::type>
553 MFEM_HOST_DEVICE auto operator/(S scale, const tensor<T, n...>& A) ->
554 tensor<decltype(S {} * T{}), n...>
555 {
556  tensor<decltype(S{} * T{}), n...> C{};
557  for (int i = 0; i < tensor<T, n...>::first_dim; i++)
558  {
559  C[i] = scale / A[i];
560  }
561  return C;
562 }
563 
564 /**
565  * @brief divide a tensor by a scalar
566  * @tparam S the scalar value type. Must be arithmetic (e.g. float, double, int) or a dual number
567  * @tparam T the underlying type of the tensor (righthand) argument
568  * @tparam n integers describing the tensor shape
569  * @param[in] A The tensor of numerators
570  * @param[in] scale The denominator
571  */
572 template <typename S, typename T, int... n,
573  typename = typename std::enable_if<std::is_arithmetic<S>::value ||
574  is_dual_number<S>::value>::type>
575 MFEM_HOST_DEVICE auto operator/(const tensor<T, n...>& A, S scale) ->
576 tensor<decltype(T {} * S{}), n...>
577 {
578  tensor<decltype(T{} * S{}), n...> C{};
579  for (int i = 0; i < tensor<T, n...>::first_dim; i++)
580  {
581  C[i] = A[i] / scale;
582  }
583  return C;
584 }
585 
586 /**
587  * @brief compound assignment (+) on tensors
588  * @tparam S the underlying type of the tensor (lefthand) argument
589  * @tparam T the underlying type of the tensor (righthand) argument
590  * @tparam n integers describing the tensor shape
591  * @param[in] A The lefthand tensor
592  * @param[in] B The righthand tensor
593  */
594 template <typename S, typename T, int... n> MFEM_HOST_DEVICE
595 tensor<S, n...>& operator+=(tensor<S, n...>& A,
596  const tensor<T, n...>& B)
597 {
598  for (int i = 0; i < tensor<S, n...>::first_dim; i++)
599  {
600  A[i] += B[i];
601  }
602  return A;
603 }
604 
605 /**
606  * @brief compound assignment (+) on tensors
607  * @tparam T the underlying type of the tensor argument
608  * @param[in] A The lefthand tensor
609  * @param[in] B The righthand tensor
610  */
611 template <typename T> MFEM_HOST_DEVICE
612 tensor<T>& operator+=(tensor<T>& A, const T& B)
613 {
614  return A.values += B;
615 }
616 
617 /**
618  * @brief compound assignment (+) on tensors
619  * @tparam T the underlying type of the tensor argument
620  * @param[in] A The lefthand tensor
621  * @param[in] B The righthand tensor
622  */
623 template <typename T> MFEM_HOST_DEVICE
624 tensor<T, 1>& operator+=(tensor<T, 1>& A, const T& B)
625 {
626  return A.values += B;
627 }
628 
629 /**
630  * @brief compound assignment (+) on tensors
631  * @tparam T the underlying type of the tensor argument
632  * @param[in] A The lefthand tensor
633  * @param[in] B The righthand tensor
634  */
635 template <typename T> MFEM_HOST_DEVICE
636 tensor<T, 1, 1>& operator+=(tensor<T, 1, 1>& A, const T& B)
637 {
638  return A.values += B;
639 }
640 
641 /**
642  * @brief compound assignment (+) between a tensor and zero (no-op)
643  * @tparam T the underlying type of the tensor (righthand) argument
644  * @tparam n integers describing the tensor shape
645  * @param[in] A The lefthand tensor
646  */
647 template <typename T, int... n> MFEM_HOST_DEVICE
648 tensor<T, n...>& operator+=(tensor<T, n...>& A, zero)
649 {
650  return A;
651 }
652 
653 /**
654  * @brief compound assignment (-) on tensors
655  * @tparam S the underlying type of the tensor (lefthand) argument
656  * @tparam T the underlying type of the tensor (righthand) argument
657  * @tparam n integers describing the tensor shape
658  * @param[in] A The lefthand tensor
659  * @param[in] B The righthand tensor
660  */
661 template <typename S, typename T, int... n> MFEM_HOST_DEVICE
662 tensor<S, n...>& operator-=(tensor<S, n...>& A, const tensor<T, n...>& B)
663 {
664  for (int i = 0; i < tensor<S, n...>::first_dim; i++)
665  {
666  A[i] -= B[i];
667  }
668  return A;
669 }
670 
671 /**
672  * @brief compound assignment (-) between a tensor and zero (no-op)
673  * @tparam T the underlying type of the tensor (righthand) argument
674  * @tparam n integers describing the tensor shape
675  * @param[in] A The lefthand tensor
676  */
677 template <typename T, int... n> MFEM_HOST_DEVICE
678 constexpr tensor<T, n...>& operator-=(tensor<T, n...>& A, zero)
679 {
680  return A;
681 }
682 
683 /**
684  * @brief compute the outer product of two tensors
685  * @tparam S the type of the lefthand argument
686  * @tparam T the type of the righthand argument
687  * @param[in] A The lefthand argument
688  * @param[in] B The righthand argument
689  *
690  * @note this overload implements the special case where both arguments are scalars
691  */
692 template <typename S, typename T> MFEM_HOST_DEVICE
693 auto outer(S A, T B) -> decltype(A * B)
694 {
695  static_assert(std::is_arithmetic<S>::value && std::is_arithmetic<T>::value,
696  "outer product types must be tensor or arithmetic_type");
697  return A * B;
698 }
699 
700 /**
701  * @overload
702  * @note this overload implements the case where the left argument is a scalar, and the right argument is a tensor
703  */
704 template <typename S, typename T, int n> MFEM_HOST_DEVICE
705 tensor<decltype(S{} * T{}), n> outer(S A, tensor<T, n> B)
706 {
707  static_assert(std::is_arithmetic<S>::value,
708  "outer product types must be tensor or arithmetic_type");
709  tensor<decltype(S{} * T{}), n> AB{};
710  for (int i = 0; i < n; i++)
711  {
712  AB[i] = A * B[i];
713  }
714  return AB;
715 }
716 
717 /**
718  * @overload
719  * @note this overload implements the case where the left argument is a tensor, and the right argument is a scalar
720  */
721 template <typename S, typename T, int m> MFEM_HOST_DEVICE
722 tensor<decltype(S{} * T{}), m> outer(const tensor<S, m>& A, T B)
723 {
724  static_assert(std::is_arithmetic<T>::value,
725  "outer product types must be tensor or arithmetic_type");
726  tensor<decltype(S{} * T{}), m> AB{};
727  for (int i = 0; i < m; i++)
728  {
729  AB[i] = A[i] * B;
730  }
731  return AB;
732 }
733 
734 /**
735  * @overload
736  * @note this overload implements the case where the left argument is `zero`, and the right argument is a tensor
737  */
738 template <typename T, int n> MFEM_HOST_DEVICE
739 zero outer(zero, const tensor<T, n>&)
740 {
741  return zero{};
742 }
743 
744 /**
745  * @overload
746  * @note this overload implements the case where the left argument is a tensor, and the right argument is `zero`
747  */
748 template <typename T, int n> MFEM_HOST_DEVICE
749 zero outer(const tensor<T, n>&, zero)
750 {
751  return zero{};
752 }
753 
754 /**
755  * @overload
756  * @note this overload implements the case where the left argument is a scalar,
757  * and the right argument is a tensor
758  */
759 template <typename S, typename T, int m, int n> MFEM_HOST_DEVICE
760 tensor<decltype(S{} * T{}), m, n> outer(S A, const tensor<T, m, n>& B)
761 {
762  static_assert(std::is_arithmetic<S>::value,
763  "outer product types must be tensor or arithmetic_type");
764  tensor<decltype(S{} * T{}), m, n> AB{};
765  for (int i = 0; i < m; i++)
766  {
767  for (int j = 0; j < n; j++)
768  {
769  AB[i][j] = A * B[i][j];
770  }
771  }
772  return AB;
773 }
774 
775 /**
776  * @overload
777  * @note this overload implements the case where both arguments are vectors
778  */
779 template <typename S, typename T, int m, int n> MFEM_HOST_DEVICE
780 tensor<decltype(S{} * T{}), m, n> outer(const tensor<S, m>& A,
781  const tensor<T, n>& B)
782 {
783  tensor<decltype(S{} * T{}), m, n> AB{};
784  for (int i = 0; i < m; i++)
785  {
786  for (int j = 0; j < n; j++)
787  {
788  AB[i][j] = A[i] * B[j];
789  }
790  }
791  return AB;
792 }
793 
794 /**
795  * @overload
796  * @note this overload implements the case where the left argument is a 2nd order tensor, and the right argument is a
797  * scalar
798  */
799 template <typename S, typename T, int m, int n> MFEM_HOST_DEVICE
800 tensor<decltype(S{} * T{}), m, n> outer(const tensor<S, m, n>& A, T B)
801 {
802  static_assert(std::is_arithmetic<T>::value,
803  "outer product types must be tensor or arithmetic_type");
804  tensor<decltype(S{} * T{}), m, n> AB{};
805  for (int i = 0; i < m; i++)
806  {
807  for (int j = 0; j < n; j++)
808  {
809  AB[i][j] = A[i][j] * B;
810  }
811  }
812  return AB;
813 }
814 
815 /**
816  * @overload
817  * @note this overload implements the case where the left argument is a 2nd order tensor, and the right argument is a
818  * first order tensor
819  */
820 template <typename S, typename T, int m, int n, int p> MFEM_HOST_DEVICE
821 tensor<decltype(S{} * T{}), m, n, p> outer(const tensor<S, m, n>& A,
822  const tensor<T, p>& B)
823 {
824  tensor<decltype(S{} * T{}), m, n, p> AB{};
825  for (int i = 0; i < m; i++)
826  {
827  for (int j = 0; j < n; j++)
828  {
829  for (int k = 0; k < p; k++)
830  {
831  AB[i][j][k] = A[i][j] * B[k];
832  }
833  }
834  }
835  return AB;
836 }
837 
838 /**
839  * @overload
840  * @note this overload implements the case where the left argument is a 1st order tensor, and the right argument is a
841  * 2nd order tensor
842  */
843 template <typename S, typename T, int m, int n, int p> MFEM_HOST_DEVICE
844 tensor<decltype(S{} * T{}), m, n, p> outer(const tensor<S, m>& A,
845  const tensor<T, n, p>& B)
846 {
847  tensor<decltype(S{} * T{}), m, n, p> AB{};
848  for (int i = 0; i < m; i++)
849  {
850  for (int j = 0; j < n; j++)
851  {
852  for (int k = 0; k < p; k++)
853  {
854  AB[i][j][k] = A[i] * B[j][k];
855  }
856  }
857  }
858  return AB;
859 }
860 
861 /**
862  * @overload
863  * @note this overload implements the case where both arguments are second order tensors
864  */
865 template <typename S, typename T, int m, int n, int p, int q> MFEM_HOST_DEVICE
866 tensor<decltype(S{} * T{}), m, n, p, q> outer(const tensor<S, m, n>& A,
867  const tensor<T, p, q>& B)
868 {
869  tensor<decltype(S{} * T{}), m, n, p, q> AB{};
870  for (int i = 0; i < m; i++)
871  {
872  for (int j = 0; j < n; j++)
873  {
874  for (int k = 0; k < p; k++)
875  {
876  for (int l = 0; l < q; l++)
877  {
878  AB[i][j][k][l] = A[i][j] * B[k][l];
879  }
880  }
881  }
882  }
883  return AB;
884 }
885 
886 /**
887  * @brief this function contracts over all indices of the two tensor arguments
888  * @tparam S the underlying type of the tensor (lefthand) argument
889  * @tparam T the underlying type of the tensor (righthand) argument
890  * @tparam m the number of rows
891  * @tparam n the number of columns
892  * @param[in] A The lefthand tensor
893  * @param[in] B The righthand tensor
894  */
895 template <typename S, typename T, int m, int n> MFEM_HOST_DEVICE
896 auto inner(const tensor<S, m, n>& A, const tensor<T, m, n>& B) ->
897 decltype(S {} * T{})
898 {
899  decltype(S{} * T{}) sum{};
900  for (int i = 0; i < m; i++)
901  {
902  for (int j = 0; j < n; j++)
903  {
904  sum += A[i][j] * B[i][j];
905  }
906  }
907  return sum;
908 }
909 
910 /**
911  * @brief this function contracts over the "middle" index of the two tensor
912  * arguments. E.g. returns tensor C, such that C_ij = sum_kl A_ijkl B_kl.
913  * @tparam S the underlying type of the tensor (lefthand) argument
914  * @tparam T the underlying type of the tensor (righthand) argument
915  * @tparam n integers describing the tensor shape
916  * @param[in] A The lefthand tensor
917  * @param[in] B The righthand tensor
918  */
919 template <typename S, typename T, int m, int n, int p> MFEM_HOST_DEVICE
920 auto dot(const tensor<S, m, n>& A,
921  const tensor<T, n, p>& B) ->
922 tensor<decltype(S {} * T{}), m, p>
923 {
924  tensor<decltype(S{} * T{}), m, p> AB{};
925  for (int i = 0; i < m; i++)
926  {
927  for (int j = 0; j < p; j++)
928  {
929  for (int k = 0; k < n; k++)
930  {
931  AB[i][j] = AB[i][j] + A[i][k] * B[k][j];
932  }
933  }
934  }
935  return AB;
936 }
937 
938 /**
939  * @overload
940  * @note vector . matrix
941  */
942 template <typename S, typename T, int m, int n> MFEM_HOST_DEVICE
943 auto dot(const tensor<S, m>& A, const tensor<T, m, n>& B) ->
944 tensor<decltype(S {} * T{}), n>
945 {
946  tensor<decltype(S{} * T{}), n> AB{};
947  for (int i = 0; i < n; i++)
948  {
949  for (int j = 0; j < m; j++)
950  {
951  AB[i] = AB[i] + A[j] * B[j][i];
952  }
953  }
954  return AB;
955 }
956 
957 /**
958  * @overload
959  * @note matrix . vector
960  */
961 template <typename S, typename T, int m, int n> MFEM_HOST_DEVICE
962 auto dot(const tensor<S, m, n>& A, const tensor<T, n>& B) ->
963 tensor<decltype(S {} * T{}), m>
964 {
965  tensor<decltype(S{} * T{}), m> AB{};
966  for (int i = 0; i < m; i++)
967  {
968  for (int j = 0; j < n; j++)
969  {
970  AB[i] = AB[i] + A[i][j] * B[j];
971  }
972  }
973  return AB;
974 }
975 
976 /**
977  * @overload
978  * @note 3rd-order-tensor . vector
979  */
980 template <typename S, typename T, int m, int n, int p> MFEM_HOST_DEVICE
981 auto dot(const tensor<S, m, n, p>& A, const tensor<T, p>& B) ->
982 tensor<decltype(S {} * T{}), m, n>
983 {
984  tensor<decltype(S{} * T{}), m, n> AB{};
985  for (int i = 0; i < m; i++)
986  {
987  for (int j = 0; j < n; j++)
988  {
989  for (int k = 0; k < p; k++)
990  {
991  AB[i][j] += A[i][j][k] * B[k];
992  }
993  }
994  }
995  return AB;
996 }
997 
998 // /**
999 // * @brief Dot product of a vector . vector and vector . tensor
1000 // *
1001 // * @tparam S the underlying type of the tensor (lefthand) argument
1002 // * @tparam T the underlying type of the tensor (righthand) argument
1003 // * @tparam m the dimension of the first tensor
1004 // * @tparam n the parameter pack of dimensions of the second tensor
1005 // * @param A The lefthand tensor
1006 // * @param B The righthand tensor
1007 // * @return The computed dot product
1008 // */
1009 // template <typename S, typename T, int m, int... n>
1010 // auto dot(const tensor<S, m>& A, const tensor<T, m, n...>& B)
1011 // {
1012 // // this dot product function includes the vector * vector implementation and
1013 // // the vector * tensor one, since clang emits an error about ambiguous
1014 // // overloads if they are separate functions. The `if constexpr` expression avoids
1015 // // using an `else` because that confuses nvcc (11.2) into thinking there's not
1016 // // a return statement
1017 // if constexpr (sizeof...(n) == 0)
1018 // {
1019 // decltype(S{} * T{}) AB{};
1020 // for (int i = 0; i < m; i++)
1021 // {
1022 // AB += A[i] * B[i];
1023 // }
1024 // return AB;
1025 // }
1026 
1027 // if constexpr (sizeof...(n) > 0)
1028 // {
1029 // constexpr int dimensions[] = {n...};
1030 // tensor<decltype(S{} * T{}), n...> AB{};
1031 // for (int i = 0; i < dimensions[0]; i++)
1032 // {
1033 // for (int j = 0; j < m; j++)
1034 // {
1035 // AB[i] = AB[i] + A[j] * B[j][i];
1036 // }
1037 // }
1038 // return AB;
1039 // }
1040 // }
1041 
1042 template <typename S, typename T, int m> MFEM_HOST_DEVICE
1043 auto dot(const tensor<S, m>& A, const tensor<T, m>& B) ->
1044 decltype(S {} * T{})
1045 {
1046  decltype(S{} * T{}) AB{};
1047  for (int i = 0; i < m; i++)
1048  {
1049  AB += A[i] * B[i];
1050  }
1051  return AB;
1052 }
1053 
1054 template <typename S, typename T, int m, int... n> MFEM_HOST_DEVICE
1055 auto dot(const tensor<S, m>& A, const tensor<T, m, n...>& B) ->
1056 tensor<decltype(S {} * T{}), n...>
1057 {
1058  constexpr int dimensions[] = {n...};
1059  tensor<decltype(S{} * T{}), n...> AB{};
1060  for (int i = 0; i < dimensions[0]; i++)
1061  {
1062  for (int j = 0; j < m; j++)
1063  {
1064  AB[i] = AB[i] + A[j] * B[j][i];
1065  }
1066  }
1067  return AB;
1068 }
1069 
1070 /**
1071  * @overload
1072  * @note vector . matrix . vector
1073  */
1074 template <typename S, typename T, typename U, int m, int n> MFEM_HOST_DEVICE
1075 auto dot(const tensor<S, m>& u, const tensor<T, m, n>& A,
1076  const tensor<U, n>& v) ->
1077 decltype(S {} * T{} * U{})
1078 {
1079  decltype(S{} * T{} * U{}) uAv{};
1080  for (int i = 0; i < m; i++)
1081  {
1082  for (int j = 0; j < n; j++)
1083  {
1084  uAv += u[i] * A[i][j] * v[j];
1085  }
1086  }
1087  return uAv;
1088 }
1089 
1090 /**
1091  * @brief double dot product, contracting over the two "middle" indices
1092  * @tparam S the underlying type of the tensor (lefthand) argument
1093  * @tparam T the underlying type of the tensor (righthand) argument
1094  * @tparam m first dimension of A
1095  * @tparam n second dimension of A
1096  * @tparam p third dimension of A, first dimensions of B
1097  * @tparam q fourth dimension of A, second dimensions of B
1098  * @param[in] A The lefthand tensor
1099  * @param[in] B The righthand tensor
1100  */
1101 template <typename S, typename T, int m, int n, int p, int q> MFEM_HOST_DEVICE
1102 auto ddot(const tensor<S, m, n, p, q>& A, const tensor<T, p, q>& B) ->
1103 tensor<decltype(S {} * T{}), m, n>
1104 {
1105  tensor<decltype(S{} * T{}), m, n> AB{};
1106  for (int i = 0; i < m; i++)
1107  {
1108  for (int j = 0; j < n; j++)
1109  {
1110  for (int k = 0; k < p; k++)
1111  {
1112  for (int l = 0; l < q; l++)
1113  {
1114  AB[i][j] += A[i][j][k][l] * B[k][l];
1115  }
1116  }
1117  }
1118  }
1119  return AB;
1120 }
1121 
1122 /**
1123  * @overload
1124  * @note 3rd-order-tensor : 2nd-order-tensor. Returns vector C, such that C_i =
1125  * sum_jk A_ijk B_jk.
1126  */
1127 template <typename S, typename T, int m, int n, int p> MFEM_HOST_DEVICE
1128 auto ddot(const tensor<S, m, n, p>& A, const tensor<T, n, p>& B) ->
1129 tensor<decltype(S {} * T{}), m>
1130 {
1131  tensor<decltype(S{} * T{}), m> AB{};
1132  for (int i = 0; i < m; i++)
1133  {
1134  for (int j = 0; j < n; j++)
1135  {
1136  for (int k = 0; k < p; k++)
1137  {
1138  AB[i] += A[i][j][k] * B[j][k];
1139  }
1140  }
1141  }
1142  return AB;
1143 }
1144 
1145 /**
1146  * @overload
1147  * @note 2nd-order-tensor : 2nd-order-tensor, like inner()
1148  */
1149 template <typename S, typename T, int m, int n> MFEM_HOST_DEVICE
1150 auto ddot(const tensor<S, m, n>& A, const tensor<T, m, n>& B) ->
1151 decltype(S {} * T{})
1152 {
1153  decltype(S{} * T{}) AB{};
1154  for (int i = 0; i < m; i++)
1155  {
1156  for (int j = 0; j < n; j++)
1157  {
1158  AB += A[i][j] * B[i][j];
1159  }
1160  }
1161  return AB;
1162 }
1163 
1164 /**
1165  * @brief this is a shorthand for dot(A, B)
1166  */
1167 template <typename S, typename T, int... m, int... n> MFEM_HOST_DEVICE
1168 auto operator*(const tensor<S, m...>& A, const tensor<T, n...>& B) ->
1169 decltype(dot(A, B))
1170 {
1171  return dot(A, B);
1172 }
1173 
1174 /**
1175  * @brief Returns the squared Frobenius norm of the tensor
1176  * @param[in] A The tensor to obtain the squared norm from
1177  */
1178 template <typename T, int m> MFEM_HOST_DEVICE
1179 T sqnorm(const tensor<T, m>& A)
1180 {
1181  T total{};
1182  for (int i = 0; i < m; i++)
1183  {
1184  total += A[i] * A[i];
1185  }
1186  return total;
1187 }
1188 
1189 /**
1190  * @overload
1191  * @brief Returns the squared Frobenius norm of the tensor
1192  */
1193 template <typename T, int m, int n> MFEM_HOST_DEVICE
1194 T sqnorm(const tensor<T, m, n>& A)
1195 {
1196  T total{};
1197  for (int i = 0; i < m; i++)
1198  {
1199  for (int j = 0; j < n; j++)
1200  {
1201  total += A[i][j] * A[i][j];
1202  }
1203  }
1204  return total;
1205 }
1206 
1207 /**
1208  * @brief Returns the Frobenius norm of the tensor
1209  * @param[in] A The tensor to obtain the norm from
1210  */
1211 template <typename T, int... n> MFEM_HOST_DEVICE
1212 T norm(const tensor<T, n...>& A)
1213 {
1214  return std::sqrt(sqnorm(A));
1215 }
1216 
1217 /**
1218  * @brief Normalizes the tensor
1219  * Each element is divided by the Frobenius norm of the tensor, @see norm
1220  * @param[in] A The tensor to normalize
1221  */
1222 template <typename T, int... n> MFEM_HOST_DEVICE
1223 auto normalize(const tensor<T, n...>& A) ->
1224 decltype(A / norm(A))
1225 {
1226  return A / norm(A);
1227 }
1228 
1229 /**
1230  * @brief Returns the trace of a square matrix
1231  * @param[in] A The matrix to compute the trace of
1232  * @return The sum of the elements on the main diagonal
1233  */
1234 template <typename T, int n> MFEM_HOST_DEVICE
1235 T tr(const tensor<T, n, n>& A)
1236 {
1237  T trA{};
1238  for (int i = 0; i < n; i++)
1239  {
1240  trA = trA + A[i][i];
1241  }
1242  return trA;
1243 }
1244 
1245 /**
1246  * @brief Returns the symmetric part of a square matrix
1247  * @param[in] A The matrix to obtain the symmetric part of
1248  * @return (1/2) * (A + A^T)
1249  */
1250 template <typename T, int n> MFEM_HOST_DEVICE
1251 tensor<T, n, n> sym(const tensor<T, n, n>& A)
1252 {
1253  tensor<T, n, n> symA{};
1254  for (int i = 0; i < n; i++)
1255  {
1256  for (int j = 0; j < n; j++)
1257  {
1258  symA[i][j] = 0.5 * (A[i][j] + A[j][i]);
1259  }
1260  }
1261  return symA;
1262 }
1263 
1264 /**
1265  * @brief Calculates the deviator of a matrix (rank-2 tensor)
1266  * @param[in] A The matrix to calculate the deviator of
1267  * In the context of stress tensors, the deviator is obtained by
1268  * subtracting the mean stress (average of main diagonal elements)
1269  * from each element on the main diagonal
1270  */
1271 template <typename T, int n> MFEM_HOST_DEVICE
1272 tensor<T, n, n> dev(const tensor<T, n, n>& A)
1273 {
1274  auto devA = A;
1275  auto trA = tr(A);
1276  for (int i = 0; i < n; i++)
1277  {
1278  devA[i][i] -= trA / n;
1279  }
1280  return devA;
1281 }
1282 
1283 /**
1284  * @brief Obtains the identity matrix of the specified dimension
1285  * @return I_dim
1286  */
1287 template <int dim>
1288 MFEM_HOST_DEVICE tensor<double, dim, dim> Identity()
1289 {
1290  tensor<double, dim, dim> I{};
1291  for (int i = 0; i < dim; i++)
1292  {
1293  for (int j = 0; j < dim; j++)
1294  {
1295  I[i][j] = (i == j);
1296  }
1297  }
1298  return I;
1299 }
1300 
1301 /**
1302  * @brief Returns the transpose of the matrix
1303  * @param[in] A The matrix to obtain the transpose of
1304  */
1305 template <typename T, int m, int n> MFEM_HOST_DEVICE
1306 tensor<T, n, m> transpose(const tensor<T, m, n>& A)
1307 {
1308  tensor<T, n, m> AT{};
1309  for (int i = 0; i < n; i++)
1310  {
1311  for (int j = 0; j < m; j++)
1312  {
1313  AT[i][j] = A[j][i];
1314  }
1315  }
1316  return AT;
1317 }
1318 
1319 /**
1320  * @brief Returns the determinant of a matrix
1321  * @param[in] A The matrix to obtain the determinant of
1322  */
1323 template <typename T> MFEM_HOST_DEVICE
1324 T det(const tensor<T, 2, 2>& A)
1325 {
1326  return A[0][0] * A[1][1] - A[0][1] * A[1][0];
1327 }
1328 /// @overload
1329 template <typename T> MFEM_HOST_DEVICE
1330 T det(const tensor<T, 3, 3>& A)
1331 {
1332  return A[0][0] * A[1][1] * A[2][2] + A[0][1] * A[1][2] * A[2][0] + A[0][2] *
1333  A[1][0] * A[2][1] -
1334  A[0][0] * A[1][2] * A[2][1] - A[0][1] * A[1][0] * A[2][2] - A[0][2] * A[1][1] *
1335  A[2][0];
1336 }
1337 
1338 /**
1339  * @brief Return whether a square rank 2 tensor is symmetric
1340  *
1341  * @tparam n The height of the tensor
1342  * @param A The square rank 2 tensor
1343  * @param abs_tolerance The absolute tolerance to check for symmetry
1344  * @return Whether the square rank 2 tensor (matrix) is symmetric
1345  */
1346 template <int n> MFEM_HOST_DEVICE
1347 bool is_symmetric(tensor<double, n, n> A, double abs_tolerance = 1.0e-8)
1348 {
1349  for (int i = 0; i < n; ++i)
1350  {
1351  for (int j = i + 1; j < n; ++j)
1352  {
1353  if (std::abs(A(i, j) - A(j, i)) > abs_tolerance)
1354  {
1355  return false;
1356  }
1357  }
1358  }
1359  return true;
1360 }
1361 
1362 /**
1363  * @brief Return whether a matrix is symmetric and positive definite
1364  * This check uses Sylvester's criterion, checking that each upper left subtensor has a
1365  * determinant greater than zero.
1366  *
1367  * @param A The matrix to test for positive definiteness
1368  * @return Whether the matrix is positive definite
1369  */
1370 inline MFEM_HOST_DEVICE
1371 bool is_symmetric_and_positive_definite(tensor<double, 2, 2> A)
1372 {
1373  if (!is_symmetric(A))
1374  {
1375  return false;
1376  }
1377  if (A(0, 0) < 0.0)
1378  {
1379  return false;
1380  }
1381  if (det(A) < 0.0)
1382  {
1383  return false;
1384  }
1385  return true;
1386 }
1387 /// @overload
1388 inline MFEM_HOST_DEVICE
1389 bool is_symmetric_and_positive_definite(tensor<double, 3, 3> A)
1390 {
1391  if (!is_symmetric(A))
1392  {
1393  return false;
1394  }
1395  if (det(A) < 0.0)
1396  {
1397  return false;
1398  }
1399  auto subtensor = make_tensor<2, 2>([A](int i, int j) { return A(i, j); });
1400  if (!is_symmetric_and_positive_definite(subtensor))
1401  {
1402  return false;
1403  }
1404  return true;
1405 }
1406 
1407 /**
1408  * @brief Solves Ax = b for x using Gaussian elimination with partial pivoting
1409  * @param[in] A The coefficient matrix A
1410  * @param[in] b The righthand side vector b
1411  * @note @a A and @a b are by-value as they are mutated as part of the elimination
1412  */
1413 template <typename T, int n> MFEM_HOST_DEVICE
1414 tensor<T, n> linear_solve(tensor<T, n, n> A, const tensor<T, n> b)
1415 {
1416  auto abs = [](double x) { return (x < 0) ? -x : x; };
1417  auto swap_vector = [](tensor<T, n>& x, tensor<T, n>& y)
1418  {
1419  auto tmp = x;
1420  x = y;
1421  y = tmp;
1422  };
1423  auto swap_scalar = [](T& x, T& y)
1424  {
1425  auto tmp = x;
1426  x = y;
1427  y = tmp;
1428  };
1429 
1430 
1431  tensor<double, n> x{};
1432 
1433  for (int i = 0; i < n; i++)
1434  {
1435  // Search for maximum in this column
1436  double max_val = abs(A[i][i]);
1437 
1438  int max_row = i;
1439  for (int j = i + 1; j < n; j++)
1440  {
1441  if (abs(A[j][i]) > max_val)
1442  {
1443  max_val = abs(A[j][i]);
1444  max_row = j;
1445  }
1446  }
1447 
1448  swap_scalar(b[max_row], b[i]);
1449  swap_vector(A[max_row], A[i]);
1450 
1451  // zero entries below in this column
1452  for (int j = i + 1; j < n; j++)
1453  {
1454  double c = -A[j][i] / A[i][i];
1455  A[j] += c * A[i];
1456  b[j] += c * b[i];
1457  A[j][i] = 0;
1458  }
1459  }
1460 
1461  // Solve equation Ax=b for an upper triangular matrix A
1462  for (int i = n - 1; i >= 0; i--)
1463  {
1464  x[i] = b[i] / A[i][i];
1465  for (int j = i - 1; j >= 0; j--)
1466  {
1467  b[j] -= A[j][i] * x[i];
1468  }
1469  }
1470 
1471  return x;
1472 }
1473 
1474 /**
1475  * @brief Inverts a matrix
1476  * @param[in] A The matrix to invert
1477  * @note Uses a shortcut for inverting a 2-by-2 matrix
1478  */
1479 inline MFEM_HOST_DEVICE tensor<double, 2, 2> inv(const tensor<double, 2, 2>& A)
1480 {
1481  double inv_detA(1.0 / det(A));
1482 
1483  tensor<double, 2, 2> invA{};
1484 
1485  invA[0][0] = A[1][1] * inv_detA;
1486  invA[0][1] = -A[0][1] * inv_detA;
1487  invA[1][0] = -A[1][0] * inv_detA;
1488  invA[1][1] = A[0][0] * inv_detA;
1489 
1490  return invA;
1491 }
1492 
1493 /**
1494  * @overload
1495  * @note Uses a shortcut for inverting a 3-by-3 matrix
1496  */
1497 inline MFEM_HOST_DEVICE tensor<double, 3, 3> inv(const tensor<double, 3, 3>& A)
1498 {
1499  double inv_detA(1.0 / det(A));
1500 
1501  tensor<double, 3, 3> invA{};
1502 
1503  invA[0][0] = (A[1][1] * A[2][2] - A[1][2] * A[2][1]) * inv_detA;
1504  invA[0][1] = (A[0][2] * A[2][1] - A[0][1] * A[2][2]) * inv_detA;
1505  invA[0][2] = (A[0][1] * A[1][2] - A[0][2] * A[1][1]) * inv_detA;
1506  invA[1][0] = (A[1][2] * A[2][0] - A[1][0] * A[2][2]) * inv_detA;
1507  invA[1][1] = (A[0][0] * A[2][2] - A[0][2] * A[2][0]) * inv_detA;
1508  invA[1][2] = (A[0][2] * A[1][0] - A[0][0] * A[1][2]) * inv_detA;
1509  invA[2][0] = (A[1][0] * A[2][1] - A[1][1] * A[2][0]) * inv_detA;
1510  invA[2][1] = (A[0][1] * A[2][0] - A[0][0] * A[2][1]) * inv_detA;
1511  invA[2][2] = (A[0][0] * A[1][1] - A[0][1] * A[1][0]) * inv_detA;
1512 
1513  return invA;
1514 }
1515 /**
1516  * @overload
1517  * @note For N-by-N matrices with N > 3, requires Gaussian elimination
1518  * with partial pivoting
1519  */
1520 template <typename T, int n> MFEM_HOST_DEVICE
1521 tensor<T, n, n> inv(const tensor<T, n, n>& A)
1522 {
1523  auto abs = [](double x) { return (x < 0) ? -x : x; };
1524  auto swap = [](tensor<T, n>& x, tensor<T, n>& y)
1525  {
1526  auto tmp = x;
1527  x = y;
1528  y = tmp;
1529  };
1530 
1531  tensor<double, n, n> B = Identity<n>();
1532 
1533  for (int i = 0; i < n; i++)
1534  {
1535  // Search for maximum in this column
1536  double max_val = abs(A[i][i]);
1537 
1538  int max_row = i;
1539  for (int j = i + 1; j < n; j++)
1540  {
1541  if (abs(A[j][i]) > max_val)
1542  {
1543  max_val = abs(A[j][i]);
1544  max_row = j;
1545  }
1546  }
1547 
1548  swap(B[max_row], B[i]);
1549  swap(A[max_row], A[i]);
1550 
1551  // zero entries below in this column
1552  for (int j = i + 1; j < n; j++)
1553  {
1554  if (A[j][i] != 0.0)
1555  {
1556  double c = -A[j][i] / A[i][i];
1557  A[j] += c * A[i];
1558  B[j] += c * B[i];
1559  A[j][i] = 0;
1560  }
1561  }
1562  }
1563 
1564  // upper triangular solve
1565  for (int i = n - 1; i >= 0; i--)
1566  {
1567  B[i] = B[i] / A[i][i];
1568  for (int j = i - 1; j >= 0; j--)
1569  {
1570  if (A[j][i] != 0.0)
1571  {
1572  B[j] -= A[j][i] * B[i];
1573  }
1574  }
1575  }
1576 
1577  return B;
1578 }
1579 
1580 /**
1581  * @overload
1582  * @note when inverting a tensor of dual numbers,
1583  * hardcode the analytic derivative of the
1584  * inverse of a square matrix, rather than
1585  * apply Gauss elimination directly on the dual number types
1586  *
1587  * TODO: compare performance of this hardcoded implementation to just using inv() directly
1588  */
1589 template <typename value_type, typename gradient_type, int n> MFEM_HOST_DEVICE
1590 dual<value_type, gradient_type> inv(
1591  tensor<dual<value_type, gradient_type>, n, n> A)
1592 {
1593  auto invA = inv(get_value(A));
1594  return make_tensor<n, n>([&](int i, int j)
1595  {
1596  auto value = invA[i][j];
1597  gradient_type gradient{};
1598  for (int k = 0; k < n; k++)
1599  {
1600  for (int l = 0; l < n; l++)
1601  {
1602  gradient -= invA[i][k] * A[k][l].gradient * invA[l][j];
1603  }
1604  }
1605  return dual<value_type, gradient_type> {value, gradient};
1606  });
1607 }
1608 
1609 /**
1610  * @brief recursively serialize the entries in a tensor to an output stream.
1611  * Output format uses braces and comma separators to mimic C syntax for multidimensional array
1612  * initialization.
1613  *
1614  * @param[in] os The stream to work with standard output streams
1615  * @param[in] A The tensor to write out
1616  */
1617 template <typename T, int... n>
1618 std::ostream& operator<<(std::ostream& os, const tensor<T, n...>& A)
1619 {
1620  os << '{' << A[0];
1621  for (int i = 1; i < tensor<T, n...>::first_dim; i++)
1622  {
1623  os << ", " << A[i];
1624  }
1625  os << '}';
1626  return os;
1627 }
1628 
1629 /**
1630  * @brief replace all entries in a tensor satisfying |x| < 1.0e-10 by literal zero
1631  * @param[in] A The tensor to "chop"
1632  */
1633 template <int n> MFEM_HOST_DEVICE
1634 tensor<double, n> chop(const tensor<double, n>& A)
1635 {
1636  auto copy = A;
1637  for (int i = 0; i < n; i++)
1638  {
1639  if (copy[i] * copy[i] < 1.0e-20)
1640  {
1641  copy[i] = 0.0;
1642  }
1643  }
1644  return copy;
1645 }
1646 
1647 /// @overload
1648 template <int m, int n> MFEM_HOST_DEVICE
1649 tensor<double, m, n> chop(const tensor<double, m, n>& A)
1650 {
1651  auto copy = A;
1652  for (int i = 0; i < m; i++)
1653  {
1654  for (int j = 0; j < n; j++)
1655  {
1656  if (copy[i][j] * copy[i][j] < 1.0e-20)
1657  {
1658  copy[i][j] = 0.0;
1659  }
1660  }
1661  }
1662  return copy;
1663 }
1664 
1665 /// @cond
1666 namespace detail
1667 {
1668 template <typename T1, typename T2>
1669 struct outer_prod;
1670 
1671 template <int... m, int... n>
1672 struct outer_prod<tensor<double, m...>, tensor<double, n...>>
1673 {
1674  using type = tensor<double, m..., n...>;
1675 };
1676 
1677 template <int... n>
1678 struct outer_prod<double, tensor<double, n...>>
1679 {
1680  using type = tensor<double, n...>;
1681 };
1682 
1683 template <int... n>
1684 struct outer_prod<tensor<double, n...>, double>
1685 {
1686  using type = tensor<double, n...>;
1687 };
1688 
1689 template <>
1690 struct outer_prod<double, double>
1691 {
1692  using type = tensor<double>;
1693 };
1694 
1695 template <typename T>
1696 struct outer_prod<zero, T>
1697 {
1698  using type = zero;
1699 };
1700 
1701 template <typename T>
1702 struct outer_prod<T, zero>
1703 {
1704  using type = zero;
1705 };
1706 
1707 } // namespace detail
1708 /// @endcond
1709 
1710 /**
1711  * @brief a type function that returns the tensor type of an outer product of two tensors
1712  * @tparam T1 the first argument to the outer product
1713  * @tparam T2 the second argument to the outer product
1714  */
1715 template <typename T1, typename T2>
1716 using outer_product_t = typename detail::outer_prod<T1, T2>::type;
1717 
1718 /**
1719  * @brief Retrieves the gradient component of a double (which is nothing)
1720  * @return The sentinel, @see zero
1721  */
1722 inline MFEM_HOST_DEVICE zero get_gradient(double /* arg */) { return zero{}; }
1723 
1724 /**
1725  * @brief get the gradient of type `tensor` (note: since its stored type is not a dual
1726  * number, the derivative term is identically zero)
1727  * @return The sentinel, @see zero
1728  */
1729 template <int... n>
1730 MFEM_HOST_DEVICE zero get_gradient(const tensor<double, n...>& /* arg */)
1731 {
1732  return zero{};
1733 }
1734 
1735 /**
1736  * @brief evaluate the change (to first order) in a function, f, given a small change in the input argument, dx.
1737  */
1738 inline MFEM_HOST_DEVICE zero chain_rule(const zero /* df_dx */,
1739  const zero /* dx */) { return zero{}; }
1740 
1741 /**
1742  * @overload
1743  * @note this overload implements a no-op for the case where the gradient w.r.t. an input argument is identically zero
1744  */
1745 template <typename T>
1746 MFEM_HOST_DEVICE zero chain_rule(const zero /* df_dx */,
1747  const T /* dx */)
1748 {
1749  return zero{};
1750 }
1751 
1752 /**
1753  * @overload
1754  * @note this overload implements a no-op for the case where the small change is identically zero
1755  */
1756 template <typename T>
1757 MFEM_HOST_DEVICE zero chain_rule(const T /* df_dx */,
1758  const zero /* dx */)
1759 {
1760  return zero{};
1761 }
1762 
1763 /**
1764  * @overload
1765  * @note for a scalar-valued function of a scalar, the chain rule is just multiplication
1766  */
1767 inline MFEM_HOST_DEVICE double chain_rule(const double df_dx,
1768  const double dx) { return df_dx * dx; }
1769 
1770 /**
1771  * @overload
1772  * @note for a tensor-valued function of a scalar, the chain rule is just scalar multiplication
1773  */
1774 template <int... n>
1775 MFEM_HOST_DEVICE auto chain_rule(const tensor<double, n...>& df_dx,
1776  const double dx) ->
1777 decltype(df_dx * dx)
1778 {
1779  return df_dx * dx;
1780 }
1781 
1782 template <int n> struct always_false : std::false_type { };
1783 
1784 template <typename T, int... n> struct isotropic_tensor;
1785 
1786 template <typename T, int n>
1787 struct isotropic_tensor<T, n>
1788 {
1789  static_assert(always_false<n> {},
1790  "error: there is no such thing as a rank-1 isotropic tensor!");
1791 };
1792 
1793 // rank-2 isotropic tensors are just identity matrices
1794 template <typename T, int m>
1795 struct isotropic_tensor<T, m, m>
1796 {
1797  MFEM_HOST_DEVICE constexpr T operator()(int i, int j) const
1798  {
1799  return (i == j) * value;
1800  }
1801  T value;
1802 };
1803 
1804 template <int m>
1805 MFEM_HOST_DEVICE constexpr isotropic_tensor<double, m, m> IsotropicIdentity()
1806 {
1807  return isotropic_tensor<double, m, m> {1.0};
1808 }
1809 
1810 template <typename S, typename T, int m> MFEM_HOST_DEVICE constexpr
1811 auto operator*(S scale,
1812  const isotropic_tensor<T, m, m> & I)
1813 -> isotropic_tensor<decltype(S {} * T{}), m, m>
1814 {
1815  return {I.value * scale};
1816 }
1817 
1818 template <typename S, typename T, int m> MFEM_HOST_DEVICE constexpr
1819 auto operator*(const isotropic_tensor<T, m, m> & I,
1820  const S scale)
1821 -> isotropic_tensor<decltype(S {}, T{}), m, m>
1822 {
1823  return {I.value * scale};
1824 }
1825 
1826 template <typename S, typename T, int m> MFEM_HOST_DEVICE constexpr
1827 auto operator+(const isotropic_tensor<S, m, m>& I1,
1828  const isotropic_tensor<T, m, m>& I2)
1829 -> isotropic_tensor<decltype(S {} + T{}), m, m>
1830 {
1831  return {I1.value + I2.value};
1832 }
1833 
1834 template <typename S, typename T, int m> MFEM_HOST_DEVICE constexpr
1835 auto operator-(const isotropic_tensor<S, m, m>& I1,
1836  const isotropic_tensor<T, m, m>& I2)
1837 -> isotropic_tensor<decltype(S {} - T{}), m, m>
1838 {
1839  return {I1.value - I2.value};
1840 }
1841 
1842 template <typename S, typename T, int m> MFEM_HOST_DEVICE //constexpr
1843 auto operator+(const isotropic_tensor<S, m, m>& I,
1844  const tensor<T, m, m>& A)
1845 -> tensor<decltype(S {} + T{}), m, m>
1846 {
1847  tensor<decltype(S{} + T{}), m, m> output{};
1848  for (int i = 0; i < m; i++)
1849  {
1850  for (int j = 0; j < m; j++)
1851  {
1852  output[i][j] = I.value * (i == j) + A[i][j];
1853  }
1854  }
1855  return output;
1856 }
1857 
1858 template <typename S, typename T, int m> MFEM_HOST_DEVICE //constexpr
1859 auto operator+(const tensor<S, m, m>& A,
1860  const isotropic_tensor<T, m, m>& I)
1861 -> tensor<decltype(S {} + T{}), m, m>
1862 {
1863  tensor<decltype(S{} + T{}), m, m> output{};
1864  for (int i = 0; i < m; i++)
1865  {
1866  for (int j = 0; j < m; j++)
1867  {
1868  output[i][j] = A[i][j] + I.value * (i == j);
1869  }
1870  }
1871  return output;
1872 }
1873 
1874 template <typename S, typename T, int m> MFEM_HOST_DEVICE //constexpr
1875 auto operator-(const isotropic_tensor<S, m, m>& I,
1876  const tensor<T, m, m>& A)
1877 -> tensor<decltype(S {} - T{}), m, m>
1878 {
1879  tensor<decltype(S{} - T{}), m, m> output{};
1880  for (int i = 0; i < m; i++)
1881  {
1882  for (int j = 0; j < m; j++)
1883  {
1884  output[i][j] = I.value * (i == j) - A[i][j];
1885  }
1886  }
1887  return output;
1888 }
1889 
1890 template <typename S, typename T, int m> MFEM_HOST_DEVICE // constexpr
1891 auto operator-(const tensor<S, m, m>& A,
1892  const isotropic_tensor<T, m, m>& I)
1893 -> tensor<decltype(S {} - T{}), m, m>
1894 {
1895  tensor<decltype(S{} - T{}), m, m> output{};
1896  for (int i = 0; i < m; i++)
1897  {
1898  for (int j = 0; j < m; j++)
1899  {
1900  output[i][j] = A[i][j] - I.value * (i == j);
1901  }
1902  }
1903  return output;
1904 }
1905 
1906 template <typename S, typename T, int m, int... n> MFEM_HOST_DEVICE constexpr
1907 auto dot(const isotropic_tensor<S, m, m>& I,
1908  const tensor<T, m, n...>& A)
1909 -> tensor<decltype(S {} * T{}), m, n...>
1910 {
1911  return I.value * A;
1912 }
1913 
1914 template <typename S, typename T, int m, int... n> MFEM_HOST_DEVICE //constexpr
1915 auto dot(const tensor<S, n...>& A,
1916  const isotropic_tensor<T, m, m> & I)
1917 -> tensor<decltype(S {} * T{}), n...>
1918 {
1919  constexpr int dimensions[sizeof...(n)] = {n...};
1920  static_assert(dimensions[sizeof...(n) - 1] == m, "n-1 != m");
1921  return A * I.value;
1922 }
1923 
1924 template <typename S, typename T, int m, int... n> MFEM_HOST_DEVICE constexpr
1925 auto ddot(const isotropic_tensor<S, m, m>& I,
1926  const tensor<T, m, m>& A)
1927 -> decltype(S {} * T{})
1928 {
1929  return I.value * tr(A);
1930 }
1931 
1932 template <typename T, int m> MFEM_HOST_DEVICE constexpr
1933 auto sym(const isotropic_tensor<T, m, m>& I) -> isotropic_tensor<T, m, m>
1934 {
1935  return I;
1936 }
1937 
1938 template <typename T, int m> MFEM_HOST_DEVICE constexpr
1939 auto antisym(const isotropic_tensor<T, m, m>&) -> zero
1940 {
1941  return zero{};
1942 }
1943 
1944 template <typename T, int m> MFEM_HOST_DEVICE constexpr
1945 auto tr(const isotropic_tensor<T, m, m>& I) -> decltype(T {} * m)
1946 {
1947  return I.value * m;
1948 }
1949 
1950 template <typename T, int m> MFEM_HOST_DEVICE constexpr
1951 auto transpose(const isotropic_tensor<T, m, m>& I) -> isotropic_tensor<T, m, m>
1952 {
1953  return I;
1954 }
1955 
1956 template <typename T, int m> MFEM_HOST_DEVICE constexpr
1957 auto det(const isotropic_tensor<T, m, m>& I) -> T
1958 {
1959  return std::pow(I.value, m);
1960 }
1961 
1962 template <typename T, int m> MFEM_HOST_DEVICE constexpr
1963 auto norm(const isotropic_tensor<T, m, m>& I) -> T
1964 {
1965  return sqrt(I.value * I.value * m);
1966 }
1967 
1968 template <typename T, int m> MFEM_HOST_DEVICE constexpr
1969 auto sqnorm(const isotropic_tensor<T, m, m>& I) -> T
1970 {
1971  return I.value * I.value * m;
1972 }
1973 
1974 // rank-3 isotropic tensors are just the alternating symbol
1975 template <typename T>
1976 struct isotropic_tensor<T, 3, 3, 3>
1977 {
1978  MFEM_HOST_DEVICE constexpr T operator()(int i, int j, int k) const
1979  {
1980  return 0.5 * (i - j) * (j - k) * (k - i) * value;
1981  }
1982  T value;
1983 };
1984 
1985 // there are 3 linearly-independent rank-4 isotropic tensors,
1986 // so the general one will be some linear combination of them
1987 template <typename T, int m>
1988 struct isotropic_tensor<T, m, m, m, m>
1989 {
1990  T c1, c2, c3;
1991 
1992  MFEM_HOST_DEVICE constexpr T operator()(int i, int j, int k, int l) const
1993  {
1994  return c1 * (i == j) * (k == l)
1995  + c2 * ((i == k) * (j == l) + (i == l) * (j == k)) * 0.5
1996  + c3 * ((i == k) * (j == l) - (i == l) * (j == k)) * 0.5;
1997  }
1998 };
1999 
2000 template <int m> MFEM_HOST_DEVICE constexpr
2001 auto SymmetricIdentity() -> isotropic_tensor<double, m, m, m, m>
2002 {
2003  return {0.0, 1.0, 0.0};
2004 }
2005 
2006 template <int m>MFEM_HOST_DEVICE constexpr
2007 auto AntisymmetricIdentity() -> isotropic_tensor<double, m, m, m, m>
2008 {
2009  return {0.0, 0.0, 1.0};
2010 }
2011 
2012 template <typename S, typename T, int m> MFEM_HOST_DEVICE constexpr
2013 auto operator*(S scale,
2014  isotropic_tensor<T, m, m, m, m> I)
2015 -> isotropic_tensor<decltype(S {} * T{}), m, m, m, m>
2016 {
2017  return {I.c1 * scale, I.c2 * scale, I.c3 * scale};
2018 }
2019 
2020 template <typename S, typename T, int m> MFEM_HOST_DEVICE constexpr
2021 auto operator*(isotropic_tensor<S, m, m, m, m> I,
2022  T scale)
2023 -> isotropic_tensor<decltype(S {} * T{}), m, m, m, m>
2024 {
2025  return {I.c1 * scale, I.c2 * scale, I.c3 * scale};
2026 }
2027 
2028 template <typename S, typename T, int m> MFEM_HOST_DEVICE constexpr
2029 auto operator+(isotropic_tensor<S, m, m, m, m> I1,
2030  isotropic_tensor<T, m, m, m, m> I2)
2031 -> isotropic_tensor<decltype(S {} + T{}), m, m, m, m>
2032 {
2033  return {I1.c1 + I2.c1, I1.c2 + I2.c2, I1.c3 + I2.c3};
2034 }
2035 
2036 template <typename S, typename T, int m> MFEM_HOST_DEVICE constexpr
2037 auto operator-(isotropic_tensor<S, m, m, m, m> I1,
2038  isotropic_tensor<T, m, m, m, m> I2)
2039 -> isotropic_tensor<decltype(S {} - T{}), m, m, m, m>
2040 {
2041  return {I1.c1 - I2.c1, I1.c2 - I2.c2, I1.c3 - I2.c3};
2042 }
2043 
2044 template <typename S, typename T, int m, int... n> MFEM_HOST_DEVICE constexpr
2045 auto ddot(const isotropic_tensor<S, m, m, m, m>& I,
2046  const tensor<T, m, m>& A)
2047 -> tensor<decltype(S {} * T{}), m, m>
2048 {
2049  return I.c1 * tr(A) * Identity<m>() + I.c2 * sym(A) + I.c3 * antisym(A);
2050 }
2051 
2052 } // namespace internal
2053 
2054 } // namespace mfem
2055 
2056 #endif
MFEM_ALWAYS_INLINE AutoSIMD< scalar_t, S, A > operator/(const scalar_t &e, const AutoSIMD< scalar_t, S, A > &v)
Definition: auto.hpp:271
std::function< double(const Vector &)> f(double mass_coeff)
Definition: lor_mms.hpp:30
MFEM_ALWAYS_INLINE AutoSIMD< scalar_t, S, A > operator+(const scalar_t &e, const AutoSIMD< scalar_t, S, A > &v)
Definition: auto.hpp:238
double b
Definition: lissajous.cpp:42
int dim
Definition: ex24.cpp:53
This file contains the declaration of a dual number class.
double u(const Vector &xvec)
Definition: lor_mms.hpp:22
MemoryClass operator*(MemoryClass mc1, MemoryClass mc2)
Return a suitable MemoryClass from a pair of MemoryClasses.
MFEM_ALWAYS_INLINE AutoSIMD< scalar_t, S, A > operator-(const scalar_t &e, const AutoSIMD< scalar_t, S, A > &v)
Definition: auto.hpp:249