MFEM  v4.2.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ttensor.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2020, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #ifndef MFEM_TEMPLATE_TENSOR
13 #define MFEM_TEMPLATE_TENSOR
14 
15 #include "../config/tconfig.hpp"
16 #include "../linalg/simd.hpp"
17 #include "../general/tassign.hpp"
18 #include "../general/backends.hpp"
19 #include "tlayout.hpp"
20 #include "tmatrix.hpp"
21 
22 // Templated tensor implementation (up to order 4)
23 
24 namespace mfem
25 {
26 
27 // Element-wise tensor operations
28 
29 namespace internal
30 {
31 
32 template <int Rank>
33 struct TensorOps;
34 
35 template <>
36 struct TensorOps<1> // rank = 1
37 {
38  // Assign: A {=,+=,*=} scalar_value
39  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
40  typename scalar_t>
41  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
42  const scalar_t value)
43  {
44  MFEM_STATIC_ASSERT(A_layout_t::rank == 1, "invalid rank");
45  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
46  {
47  mfem::Assign<Op>(A_data[A_layout.ind(i1)], value);
48  }
49  }
50 
51  // Assign: A {=,+=,*=} scalar_value, host+device version
52  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
53  typename scalar_t>
54  MFEM_HOST_DEVICE
55  static void AssignHD(const A_layout_t &A_layout, A_data_t &A_data,
56  const scalar_t value)
57  {
58  MFEM_STATIC_ASSERT(A_layout_t::rank == 1, "invalid rank");
59  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
60  {
61  mfem::AssignHD<Op>(A_data[A_layout.ind(i1)], value);
62  }
63  }
64 
65  // Assign: A {=,+=,*=} B
66  template <AssignOp::Type Op,
67  typename A_layout_t, typename A_data_t,
68  typename B_layout_t, typename B_data_t>
69  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
70  const B_layout_t &B_layout, const B_data_t &B_data)
71  {
72  MFEM_STATIC_ASSERT(A_layout_t::rank == 1 && B_layout_t::rank == 1,
73  "invalid ranks");
74  MFEM_STATIC_ASSERT(A_layout_t::dim_1 == B_layout_t::dim_1,
75  "invalid dimensions");
76  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
77  {
78  mfem::Assign<Op>(A_data[A_layout.ind(i1)], B_data[B_layout.ind(i1)]);
79  }
80  }
81 };
82 
83 template <>
84 struct TensorOps<2> // rank = 2
85 {
86  // Assign: A {=,+=,*=} scalar_value
87  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
88  typename scalar_t>
89  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
90  scalar_t value)
91  {
92  MFEM_STATIC_ASSERT(A_layout_t::rank == 2, "invalid rank");
93  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
94  {
95  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
96  {
97  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2)], value);
98  }
99  }
100  }
101 
102  // Assign: A {=,+=,*=} scalar_value, host+device version
103  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
104  typename scalar_t>
105  MFEM_HOST_DEVICE
106  static void AssignHD(const A_layout_t &A_layout, A_data_t &A_data,
107  scalar_t value)
108  {
109  MFEM_STATIC_ASSERT(A_layout_t::rank == 2, "invalid rank");
110  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
111  {
112  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
113  {
114  mfem::AssignHD<Op>(A_data[A_layout.ind(i1,i2)], value);
115  }
116  }
117  }
118 
119  // Assign: A {=,+=,*=} B
120  template <AssignOp::Type Op,
121  typename A_layout_t, typename A_data_t,
122  typename B_layout_t, typename B_data_t>
123  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
124  const B_layout_t &B_layout, const B_data_t &B_data)
125  {
126  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 2,
127  "invalid ranks");
128  MFEM_STATIC_ASSERT(A_layout_t::dim_1 == B_layout_t::dim_1 &&
129  A_layout_t::dim_2 == B_layout_t::dim_2,
130  "invalid dimensions");
131  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
132  {
133  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
134  {
135  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2)],
136  B_data[B_layout.ind(i1,i2)]);
137  }
138  }
139  }
140 };
141 
142 template <>
143 struct TensorOps<3> // rank = 3
144 {
145  // Assign: A {=,+=,*=} scalar_value
146  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
147  typename scalar_t>
148  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
149  scalar_t value)
150  {
151  MFEM_STATIC_ASSERT(A_layout_t::rank == 3, "invalid rank");
152  for (int i3 = 0; i3 < A_layout_t::dim_3; i3++)
153  {
154  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
155  {
156  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
157  {
158  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2,i3)], value);
159  }
160  }
161  }
162  }
163 
164  // Assign: A {=,+=,*=} B
165  template <AssignOp::Type Op,
166  typename A_layout_t, typename A_data_t,
167  typename B_layout_t, typename B_data_t>
168  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
169  const B_layout_t &B_layout, const B_data_t &B_data)
170  {
171  MFEM_STATIC_ASSERT(A_layout_t::rank == 3 && B_layout_t::rank == 3,
172  "invalid ranks");
173  MFEM_STATIC_ASSERT(A_layout_t::dim_1 == B_layout_t::dim_1 &&
174  A_layout_t::dim_2 == B_layout_t::dim_2 &&
175  A_layout_t::dim_3 == B_layout_t::dim_3,
176  "invalid dimensions");
177  for (int i3 = 0; i3 < A_layout_t::dim_3; i3++)
178  {
179  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
180  {
181  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
182  {
183  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2,i3)],
184  B_data[B_layout.ind(i1,i2,i3)]);
185  }
186  }
187  }
188  }
189 };
190 
191 template <>
192 struct TensorOps<4> // rank = 4
193 {
194  // Assign: A {=,+=,*=} scalar_value
195  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
196  typename scalar_t>
197  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
198  scalar_t value)
199  {
200  MFEM_STATIC_ASSERT(A_layout_t::rank == 4, "invalid rank");
201  for (int i4 = 0; i4 < A_layout_t::dim_4; i4++)
202  {
203  for (int i3 = 0; i3 < A_layout_t::dim_3; i3++)
204  {
205  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
206  {
207  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
208  {
209  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2,i3,i4)], value);
210  }
211  }
212  }
213  }
214  }
215 
216  // Assign: A {=,+=,*=} B
217  template <AssignOp::Type Op,
218  typename A_layout_t, typename A_data_t,
219  typename B_layout_t, typename B_data_t>
220  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
221  const B_layout_t &B_layout, const B_data_t &B_data)
222  {
223  MFEM_STATIC_ASSERT(A_layout_t::rank == 4 && B_layout_t::rank == 4,
224  "invalid ranks");
225  MFEM_STATIC_ASSERT(A_layout_t::dim_1 == B_layout_t::dim_1 &&
226  A_layout_t::dim_2 == B_layout_t::dim_2 &&
227  A_layout_t::dim_3 == B_layout_t::dim_3 &&
228  A_layout_t::dim_4 == B_layout_t::dim_4,
229  "invalid dimensions");
230  for (int i4 = 0; i4 < A_layout_t::dim_4; i4++)
231  {
232  for (int i3 = 0; i3 < A_layout_t::dim_3; i3++)
233  {
234  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
235  {
236  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
237  {
238  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2,i3,i4)],
239  B_data[B_layout.ind(i1,i2,i3,i4)]);
240  }
241  }
242  }
243  }
244  }
245 };
246 
247 } // namespace mfem::internal
248 
249 // Tensor or sub-tensor assign function: A {=,+=,*=} scalar_value.
250 template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
251  typename scalar_t>
252 inline void TAssign(const A_layout_t &A_layout, A_data_t &A_data,
253  const scalar_t value)
254 {
255  internal::TensorOps<A_layout_t::rank>::
256  template Assign<Op>(A_layout, A_data, value);
257 }
258 
259 // Tensor or sub-tensor assign function: A {=,+=,*=} scalar_value.
260 // Host+device version.
261 template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
262  typename scalar_t>
263 MFEM_HOST_DEVICE
264 inline void TAssignHD(const A_layout_t &A_layout, A_data_t &A_data,
265  const scalar_t value)
266 {
267  internal::TensorOps<A_layout_t::rank>::
268  template AssignHD<Op>(A_layout, A_data, value);
269 }
270 
271 // Tensor assign function: A {=,+=,*=} B that allows different input and output
272 // layouts. With suitable layouts this function can be used to permute
273 // (transpose) tensors, extract sub-tensors, etc.
274 template <AssignOp::Type Op,
275  typename A_layout_t, typename A_data_t,
276  typename B_layout_t, typename B_data_t>
277 inline void TAssign(const A_layout_t &A_layout, A_data_t &A_data,
278  const B_layout_t &B_layout, const B_data_t &B_data)
279 {
280  internal::TensorOps<A_layout_t::rank>::
281  template Assign<Op>(A_layout, A_data, B_layout, B_data);
282 }
283 
284 
285 // classes TVector, TMatrix, TTensor3, TTensor4
286 
287 template <int S, typename data_t = double, bool align = false>
288 struct TVector
289 {
290 public:
291  static const int size = S;
292  static const int aligned_size = align ? MFEM_ALIGN_SIZE(S,data_t) : size;
293  typedef data_t data_type;
295 
297  static const layout_type layout;
298 
299  data_t &operator[](int i) { return data[i]; }
300  const data_t &operator[](int i) const { return data[i]; }
301 
302  template <AssignOp::Type Op>
303  void Assign(const data_t d)
304  {
305  TAssign<Op>(layout, data, d);
306  }
307 
308  template <AssignOp::Type Op, typename src_data_t>
309  void Assign(const src_data_t &src)
310  {
311  TAssign<Op>(layout, data, layout, src);
312  }
313 
314  template <AssignOp::Type Op, typename dest_data_t>
315  void AssignTo(dest_data_t &dest)
316  {
317  TAssign<Op>(layout, dest, layout, data);
318  }
319 
320  void Set(const data_t d)
321  {
322  Assign<AssignOp::Set>(d);
323  }
324 
325  template <typename src_data_t>
326  void Set(const src_data_t &src)
327  {
328  Assign<AssignOp::Set>(src);
329  }
330 
331  template <typename dest_data_t>
332  void Assemble(dest_data_t &dest) const
333  {
334  AssignTo<AssignOp::Add>(dest);
335  }
336 
337  void Scale(const data_t scale)
338  {
339  Assign<AssignOp::Mult>(scale);
340  }
341 };
342 
343 template <int S, typename data_t, bool align>
344 const typename TVector<S,data_t,align>::layout_type
345 TVector<S,data_t,align>::layout = layout_type();
346 
347 
348 template <int N1, int N2, typename data_t = double, bool align = false>
349 struct TMatrix : public TVector<N1*N2,data_t,align>
350 {
352  using base_class::size;
353  using base_class::data;
354 
356  static const layout_type layout;
357  static inline int ind(int i1, int i2) { return layout.ind(i1,i2); }
358 
359  data_t &operator()(int i, int j) { return data[ind(i,j)]; }
360  const data_t &operator()(int i, int j) const { return data[ind(i,j)]; }
361 
362  inline data_t Det() const
363  {
364  return TDet<data_t>(layout, data);
365  }
366 
367  inline void Adjugate(TMatrix<N1,N2,data_t> &adj) const
368  {
369  TAdjugate<data_t>(layout, data, layout, adj.data);
370  }
371 
372  // Compute the adjugate and the determinant of a (small) matrix.
373  inline data_t AdjDet(TMatrix<N2,N1,data_t> &adj) const
374  {
375  return TAdjDet<data_t>(layout, data, layout, adj.data);
376  }
377 };
378 
379 template <int N1, int N2, typename data_t, bool align>
380 const typename TMatrix<N1,N2,data_t,align>::layout_type
381 TMatrix<N1,N2,data_t,align>::layout = layout_type();
382 
383 
384 template <int N1, int N2, int N3, typename data_t = double, bool align = false>
385 struct TTensor3 : TVector<N1*N2*N3,data_t,align>
386 {
388  using base_class::size;
389  using base_class::data;
390 
392  static const layout_type layout;
393  static inline int ind(int i1, int i2, int i3)
394  { return layout.ind(i1,i2,i3); }
395 
396  data_t &operator()(int i, int j, int k) { return data[ind(i,j,k)]; }
397  const data_t &operator()(int i, int j, int k) const
398  { return data[ind(i,j,k)]; }
399 };
400 
401 template <int N1, int N2, int N3, typename data_t, bool align>
402 const typename TTensor3<N1,N2,N3,data_t,align>::layout_type
403 TTensor3<N1,N2,N3,data_t,align>::layout = layout_type();
404 
405 template <int N1, int N2, int N3, int N4, typename data_t = double,
406  bool align = false>
407 struct TTensor4 : TVector<N1*N2*N3*N4,data_t,align>
408 {
410  using base_class::size;
411  using base_class::data;
412 
414  static const layout_type layout;
415  static inline int ind(int i1, int i2, int i3, int i4)
416  { return layout.ind(i1,i2,i3,i4); }
417 
418  data_t &operator()(int i, int j, int k, int l)
419  { return data[ind(i,j,k,l)]; }
420  const data_t &operator()(int i, int j, int k, int l) const
421  { return data[ind(i,j,k,l)]; }
422 };
423 
424 template <int N1, int N2, int N3, int N4, typename data_t, bool align>
425 const typename TTensor4<N1,N2,N3,N4,data_t,align>::layout_type
426 TTensor4<N1,N2,N3,N4,data_t,align>::layout = layout_type();
427 
428 
429 // Tensor products
430 
431 // C_{i,j,k} {=|+=} \sum_s A_{s,j} B_{i,s,k}
432 template <bool Add,
433  typename A_layout_t, typename A_data_t,
434  typename B_layout_t, typename B_data_t,
435  typename C_layout_t, typename C_data_t>
436 MFEM_ALWAYS_INLINE inline
437 void Mult_1_2(const A_layout_t &A_layout, const A_data_t &A_data,
438  const B_layout_t &B_layout, const B_data_t &B_data,
439  const C_layout_t &C_layout, C_data_t &C_data)
440 {
441  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 3 &&
442  C_layout_t::rank == 3, "invalid ranks");
443  const int B3 = B_layout_t::dim_3;
444  const int C3 = C_layout_t::dim_3;
445  MFEM_STATIC_ASSERT(B3 == C3, "invalid dimensions");
446  for (int k = 0; k < B3; k++)
447  {
448  Mult_AB<Add>(B_layout.ind3(k), B_data,
449  A_layout, A_data,
450  C_layout.ind3(k), C_data);
451  }
452 }
453 
454 // C_{i,j,k} {=|+=} \sum_s A_{i,s} B_{s,j,k}
455 template <bool Add,
456  typename A_layout_t, typename A_data_t,
457  typename B_layout_t, typename B_data_t,
458  typename C_layout_t, typename C_data_t>
459 MFEM_ALWAYS_INLINE inline
460 void Mult_2_1(const A_layout_t &A_layout, const A_data_t &A_data,
461  const B_layout_t &B_layout, const B_data_t &B_data,
462  const C_layout_t &C_layout, C_data_t &C_data)
463 {
464  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 3 &&
465  C_layout_t::rank == 3, "invalid ranks");
466  Mult_AB<Add>(A_layout, A_data,
467  B_layout.merge_23(), B_data,
468  C_layout.merge_23(), C_data);
469 }
470 
471 // C_{i,k,j,l} {=|+=} \sum_s A_{s,i} A_{s,j} B_{k,s,l}
472 template <bool Add,
473  typename A_layout_t, typename A_data_t,
474  typename B_layout_t, typename B_data_t,
475  typename C_layout_t, typename C_data_t>
476 MFEM_ALWAYS_INLINE inline
477 void TensorAssemble(const A_layout_t &A_layout, const A_data_t &A_data,
478  const B_layout_t &B_layout, const B_data_t &B_data,
479  const C_layout_t &C_layout, C_data_t &C_data)
480 {
481  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 3 &&
482  C_layout_t::rank == 4, "invalid ranks");
483  const int A1 = A_layout_t::dim_1;
484  const int A2 = A_layout_t::dim_2;
485  const int B1 = B_layout_t::dim_1;
486  const int B2 = B_layout_t::dim_2;
487  const int B3 = B_layout_t::dim_3;
488  const int C1 = C_layout_t::dim_1;
489  const int C2 = C_layout_t::dim_2;
490  const int C3 = C_layout_t::dim_3;
491  const int C4 = C_layout_t::dim_4;
492  MFEM_STATIC_ASSERT(A1 == B2 && A2 == C1 && A2 == C3 && B1 == C2 && B3 == C4,
493  "invalid dimensions");
494 
495 #if 1
496  // Impl == 3
497  MFEM_FLOPS_ADD(3*A1*A2*A2*B1*B3);
498  if (!Add) { TAssign<AssignOp::Set>(C_layout, C_data, 0.0); }
499  for (int j = 0; j < A2; j++)
500  {
501  for (int i = 0; i < A2; i++)
502  {
503  for (int l = 0; l < B3; l++)
504  {
505  for (int k = 0; k < B1; k++)
506  {
507  for (int s = 0; s < A1; s++)
508  {
509  // C(i,k,j,l) += A(s,i) * A(s,j) * B(k,s,l);
510  C_data[C_layout.ind(i,k,j,l)] +=
511  A_data[A_layout.ind(s,i)] *
512  A_data[A_layout.ind(s,j)] *
513  B_data[B_layout.ind(k,s,l)];
514  }
515  }
516  }
517  }
518  }
519 #else
520  // Impl == 1
521  if (!Add) { TAssign<AssignOp::Set>(C_layout, C_data, 0.0); }
522  for (int s = 0; s < A1; s++)
523  {
524  for (int i = 0; i < A2; i++)
525  {
526  for (int k = 0; k < B1; k++)
527  {
528  for (int j = 0; j < A2; j++)
529  {
530  for (int l = 0; l < B3; l++)
531  {
532  // C(i,k,j,l) += A(s,i) * A(s,j) * B(k,s,l);
533  C_data[C_layout.ind(i,k,j,l)] +=
534  A_data[A_layout.ind(s,i)] *
535  A_data[A_layout.ind(s,j)] *
536  B_data[B_layout.ind(k,s,l)];
537  }
538  }
539  }
540  }
541  }
542 #endif
543 }
544 
545 // D_{i,k,j,l} {=|+=} \sum_s A_{i,s} B_{s,j} C_{k,s,l}
546 template <bool Add,
547  typename A_layout_t, typename A_data_t,
548  typename B_layout_t, typename B_data_t,
549  typename C_layout_t, typename C_data_t,
550  typename D_layout_t, typename D_data_t>
551 MFEM_ALWAYS_INLINE inline
552 void TensorAssemble(const A_layout_t &A_layout, const A_data_t &A_data,
553  const B_layout_t &B_layout, const B_data_t &B_data,
554  const C_layout_t &C_layout, const C_data_t &C_data,
555  const D_layout_t &D_layout, D_data_t &D_data)
556 {
557  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 2 &&
558  C_layout_t::rank == 3 && D_layout_t::rank == 4,
559  "invalid ranks");
560  const int A1 = A_layout_t::dim_1;
561  const int A2 = A_layout_t::dim_2;
562  const int B1 = B_layout_t::dim_1;
563  const int B2 = B_layout_t::dim_2;
564  const int C1 = C_layout_t::dim_1;
565  const int C2 = C_layout_t::dim_2;
566  const int C3 = C_layout_t::dim_3;
567  const int D1 = D_layout_t::dim_1;
568  const int D2 = D_layout_t::dim_2;
569  const int D3 = D_layout_t::dim_3;
570  const int D4 = D_layout_t::dim_4;
571  MFEM_STATIC_ASSERT(A2 == B1 && A2 == C2 && A1 == D1 && B2 == D3 &&
572  C1 == D2 && C3 == D4, "invalid dimensions");
573 
574 #if 0
576  // H_{i,k,s,l} = A_{i,s} C_{k,s,l}
577  for (int l = 0; l < C3; l++)
578  {
579  for (int s = 0; s < B1; s++)
580  {
581  for (int k = 0; k < C1; k++)
582  {
583  for (int i = 0; i < A1; i++)
584  {
585  H(i,k,s,l) = A_data[A_layout.ind(i,s)]*
586  C_data[C_layout.ind(k,s,l)];
587  }
588  }
589  }
590  }
591  // D_{(i,k),j,l} = \sum_s B_{s,j} H_{(i,k),s,l}
592  Mult_1_2<Add>(B_layout, B_data, H.layout.merge_12(), H,
593  D_layout.merge_12(), D_data);
594 #elif 1
595  MFEM_FLOPS_ADD(A1*B1*C1*C3); // computation of H(l)
596  for (int l = 0; l < C3; l++)
597  {
599  // H(l)_{i,k,s} = A_{i,s} C_{k,s,l}
600  for (int s = 0; s < B1; s++)
601  {
602  for (int k = 0; k < C1; k++)
603  {
604  for (int i = 0; i < A1; i++)
605  {
606  H(i,k,s) = A_data[A_layout.ind(i,s)]*
607  C_data[C_layout.ind(k,s,l)];
608  }
609  }
610  }
611  // D_{(i,k),j,l} = \sum_s H(l)_{(i,k),s} B_{s,j}
612  Mult_AB<Add>(H.layout.merge_12(), H, B_layout, B_data,
613  D_layout.merge_12().ind3(l), D_data);
614  }
615 #else
617  for (int l = 0; l < C3; l++)
618  {
619  for (int j = 0; j < B2; j++)
620  {
621  for (int k = 0; k < C1; k++)
622  {
623  for (int s = 0; s < B1; s++)
624  {
625  F(s,k,j,l) = B_data[B_layout.ind(s,j)]*
626  C_data[C_layout.ind(k,s,l)];
627  }
628  }
629  }
630  }
631  Mult_AB<Add>(A_layout, A_data, F.layout.merge_34().merge_23(), F,
632  D_layout.merge_34().merge_23(), D_data);
633 #endif
634 }
635 
636 
637 // C_{i,j,k,l} {=|+=} A_{i,j,k} B_{j,l}
638 template <AssignOp::Type Op,
639  typename A_layout_t, typename A_data_t,
640  typename B_layout_t, typename B_data_t,
641  typename C_layout_t, typename C_data_t>
642 MFEM_ALWAYS_INLINE inline
643 void TensorProduct(const A_layout_t &a, const A_data_t &A,
644  const B_layout_t &b, const B_data_t &B,
645  const C_layout_t &c, C_data_t &C)
646 {
647  const int A1 = A_layout_t::dim_1;
648  const int A2 = A_layout_t::dim_2;
649  const int A3 = A_layout_t::dim_3;
650  const int B1 = B_layout_t::dim_1;
651  const int B2 = B_layout_t::dim_2;
652  const int C1 = C_layout_t::dim_1;
653  const int C2 = C_layout_t::dim_2;
654  const int C3 = C_layout_t::dim_3;
655  const int C4 = C_layout_t::dim_4;
656  MFEM_STATIC_ASSERT(A1 == C1 && A2 == B1 && A2 == C2 && A3 == C3 && B2 == C4,
657  "invalid dimensions");
658 
659  MFEM_FLOPS_ADD(A1*A2*A3*B2);
660  for (int l = 0; l < B2; l++)
661  {
662  for (int k = 0; k < A3; k++)
663  {
664  for (int j = 0; j < A2; j++)
665  {
666  for (int i = 0; i < A1; i++)
667  {
668  mfem::Assign<Op>(C[c.ind(i,j,k,l)],
669  A[a.ind(i,j,k)]*B[b.ind(j,l)]);
670  }
671  }
672  }
673  }
674 }
675 
676 } // namespace mfem
677 
678 #endif // MFEM_TEMPLATE_TENSOR
void Assemble(dest_data_t &dest) const
Definition: ttensor.hpp:332
ColumnMajorLayout3D< N1, N2, N3 > layout_type
Definition: ttensor.hpp:391
ColumnMajorLayout4D< N1, N2, N3, N4 > layout_type
Definition: ttensor.hpp:413
static const layout_type layout
Definition: ttensor.hpp:356
void Adjugate(TMatrix< N1, N2, data_t > &adj) const
Definition: ttensor.hpp:367
void Assign(const src_data_t &src)
Definition: ttensor.hpp:309
void AssignTo(dest_data_t &dest)
Definition: ttensor.hpp:315
const data_t & operator()(int i, int j, int k) const
Definition: ttensor.hpp:397
void Assign(const data_t d)
Definition: ttensor.hpp:303
static const layout_type layout
Definition: ttensor.hpp:392
static int ind(int i1, int i2, int i3)
Definition: ttensor.hpp:393
const data_t & operator()(int i, int j) const
Definition: ttensor.hpp:360
data_t & operator()(int i, int j)
Definition: ttensor.hpp:359
data_t Det() const
Definition: ttensor.hpp:362
MFEM_ALWAYS_INLINE void TensorAssemble(const A_layout_t &A_layout, const A_data_t &A_data, const B_layout_t &B_layout, const B_data_t &B_data, const C_layout_t &C_layout, C_data_t &C_data)
Definition: ttensor.hpp:477
void Add(const DenseMatrix &A, const DenseMatrix &B, double alpha, DenseMatrix &C)
C = A + alpha*B.
Definition: densemat.cpp:1928
static const layout_type layout
Definition: ttensor.hpp:414
static int ind(int i1, int i2, int i3, int i4)
Definition: tlayout.hpp:396
static int ind(int i1, int i2, int i3, int i4)
Definition: ttensor.hpp:415
double b
Definition: lissajous.cpp:42
MFEM_HOST_DEVICE lvalue_t & AssignHD(lvalue_t &a, const rvalue_t &b)
Definition: tassign.hpp:144
void TAssign(const A_layout_t &A_layout, A_data_t &A_data, const scalar_t value)
Definition: ttensor.hpp:252
data_t & operator[](int i)
Definition: ttensor.hpp:299
const data_t & operator()(int i, int j, int k, int l) const
Definition: ttensor.hpp:420
static int ind(int i1, int i2, int i3)
Definition: tlayout.hpp:249
TVector< N1 *N2, data_t, align > base_class
Definition: ttensor.hpp:351
MFEM_ALWAYS_INLINE void Mult_2_1(const A_layout_t &A_layout, const A_data_t &A_data, const B_layout_t &B_layout, const B_data_t &B_data, const C_layout_t &C_layout, C_data_t &C_data)
Definition: ttensor.hpp:460
ColumnMajorLayout2D< N1, N2 > layout_type
Definition: ttensor.hpp:355
MFEM_ALWAYS_INLINE void TensorProduct(const A_layout_t &a, const A_data_t &A, const B_layout_t &b, const B_data_t &B, const C_layout_t &c, C_data_t &C)
Definition: ttensor.hpp:643
static const layout_type layout
Definition: ttensor.hpp:297
lvalue_t & Assign(lvalue_t &a, const rvalue_t &b)
Definition: tassign.hpp:137
double a
Definition: lissajous.cpp:41
MFEM_HOST_DEVICE void TAssignHD(const A_layout_t &A_layout, A_data_t &A_data, const scalar_t value)
Definition: ttensor.hpp:264
data_t data_type
Definition: ttensor.hpp:293
TVector< N1 *N2 *N3 *N4, data_t, align > base_class
Definition: ttensor.hpp:409
static int ind(int i1, int i2)
Definition: ttensor.hpp:357
data_t AdjDet(TMatrix< N2, N1, data_t > &adj) const
Definition: ttensor.hpp:373
MFEM_ALWAYS_INLINE void Mult_1_2(const A_layout_t &A_layout, const A_data_t &A_data, const B_layout_t &B_layout, const B_data_t &B_data, const C_layout_t &C_layout, C_data_t &C_data)
Definition: ttensor.hpp:437
static const int size
Definition: ttensor.hpp:291
TVector< N1 *N2 *N3, data_t, align > base_class
Definition: ttensor.hpp:387
StridedLayout1D< S, 1 > layout_type
Definition: ttensor.hpp:296
static MFEM_HOST_DEVICE int ind(int i1, int i2)
Definition: tlayout.hpp:107
data_t data[aligned_size >0?aligned_size:1]
Definition: ttensor.hpp:294
void Set(const src_data_t &src)
Definition: ttensor.hpp:326
const data_t & operator[](int i) const
Definition: ttensor.hpp:300
static const int aligned_size
Definition: ttensor.hpp:292
void Set(const data_t d)
Definition: ttensor.hpp:320
void Scale(const data_t scale)
Definition: ttensor.hpp:337
data_t & operator()(int i, int j, int k, int l)
Definition: ttensor.hpp:418
data_t & operator()(int i, int j, int k)
Definition: ttensor.hpp:396