MFEM  v3.3.2
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
ttensor.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #ifndef MFEM_TEMPLATE_TENSOR
13 #define MFEM_TEMPLATE_TENSOR
14 
15 #include "../config/tconfig.hpp"
16 #include "../general/tassign.hpp"
17 #include "tlayout.hpp"
18 #include "tmatrix.hpp"
19 
20 // Templated tensor implementation (up to order 4)
21 
22 namespace mfem
23 {
24 
25 // Element-wise tensor operations
26 
27 namespace internal
28 {
29 
30 template <int Rank>
31 struct TensorOps;
32 
33 template <>
34 struct TensorOps<1> // rank = 1
35 {
36  // Assign: A {=,+=,*=} scalar_value
37  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
38  typename scalar_t>
39  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
40  scalar_t value)
41  {
42  MFEM_STATIC_ASSERT(A_layout_t::rank == 1, "invalid rank");
43  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
44  {
45  mfem::Assign<Op>(A_data[A_layout.ind(i1)], value);
46  }
47  }
48 
49  // Assign: A {=,+=,*=} B
50  template <AssignOp::Type Op,
51  typename A_layout_t, typename A_data_t,
52  typename B_layout_t, typename B_data_t>
53  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
54  const B_layout_t &B_layout, const B_data_t &B_data)
55  {
56  MFEM_STATIC_ASSERT(A_layout_t::rank == 1 && B_layout_t::rank == 1,
57  "invalid ranks");
58  MFEM_STATIC_ASSERT(A_layout_t::dim_1 == B_layout_t::dim_1,
59  "invalid dimensions");
60  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
61  {
62  mfem::Assign<Op>(A_data[A_layout.ind(i1)], B_data[B_layout.ind(i1)]);
63  }
64  }
65 };
66 
67 template <>
68 struct TensorOps<2> // rank = 2
69 {
70  // Assign: A {=,+=,*=} scalar_value
71  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
72  typename scalar_t>
73  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
74  scalar_t value)
75  {
76  MFEM_STATIC_ASSERT(A_layout_t::rank == 2, "invalid rank");
77  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
78  {
79  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
80  {
81  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2)], value);
82  }
83  }
84  }
85 
86  // Assign: A {=,+=,*=} B
87  template <AssignOp::Type Op,
88  typename A_layout_t, typename A_data_t,
89  typename B_layout_t, typename B_data_t>
90  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
91  const B_layout_t &B_layout, const B_data_t &B_data)
92  {
93  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 2,
94  "invalid ranks");
95  MFEM_STATIC_ASSERT(A_layout_t::dim_1 == B_layout_t::dim_1 &&
96  A_layout_t::dim_2 == B_layout_t::dim_2,
97  "invalid dimensions");
98  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
99  {
100  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
101  {
102  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2)],
103  B_data[B_layout.ind(i1,i2)]);
104  }
105  }
106  }
107 };
108 
109 template <>
110 struct TensorOps<3> // rank = 3
111 {
112  // Assign: A {=,+=,*=} scalar_value
113  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
114  typename scalar_t>
115  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
116  scalar_t value)
117  {
118  MFEM_STATIC_ASSERT(A_layout_t::rank == 3, "invalid rank");
119  for (int i3 = 0; i3 < A_layout_t::dim_3; i3++)
120  {
121  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
122  {
123  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
124  {
125  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2,i3)], value);
126  }
127  }
128  }
129  }
130 
131  // Assign: A {=,+=,*=} B
132  template <AssignOp::Type Op,
133  typename A_layout_t, typename A_data_t,
134  typename B_layout_t, typename B_data_t>
135  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
136  const B_layout_t &B_layout, const B_data_t &B_data)
137  {
138  MFEM_STATIC_ASSERT(A_layout_t::rank == 3 && B_layout_t::rank == 3,
139  "invalid ranks");
140  MFEM_STATIC_ASSERT(A_layout_t::dim_1 == B_layout_t::dim_1 &&
141  A_layout_t::dim_2 == B_layout_t::dim_2 &&
142  A_layout_t::dim_3 == B_layout_t::dim_3,
143  "invalid dimensions");
144  for (int i3 = 0; i3 < A_layout_t::dim_3; i3++)
145  {
146  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
147  {
148  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
149  {
150  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2,i3)],
151  B_data[B_layout.ind(i1,i2,i3)]);
152  }
153  }
154  }
155  }
156 };
157 
158 template <>
159 struct TensorOps<4> // rank = 4
160 {
161  // Assign: A {=,+=,*=} scalar_value
162  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
163  typename scalar_t>
164  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
165  scalar_t value)
166  {
167  MFEM_STATIC_ASSERT(A_layout_t::rank == 4, "invalid rank");
168  for (int i4 = 0; i4 < A_layout_t::dim_4; i4++)
169  {
170  for (int i3 = 0; i3 < A_layout_t::dim_3; i3++)
171  {
172  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
173  {
174  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
175  {
176  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2,i3,i4)], value);
177  }
178  }
179  }
180  }
181  }
182 
183  // Assign: A {=,+=,*=} B
184  template <AssignOp::Type Op,
185  typename A_layout_t, typename A_data_t,
186  typename B_layout_t, typename B_data_t>
187  static void Assign(const A_layout_t &A_layout, A_data_t &A_data,
188  const B_layout_t &B_layout, const B_data_t &B_data)
189  {
190  MFEM_STATIC_ASSERT(A_layout_t::rank == 4 && B_layout_t::rank == 4,
191  "invalid ranks");
192  MFEM_STATIC_ASSERT(A_layout_t::dim_1 == B_layout_t::dim_1 &&
193  A_layout_t::dim_2 == B_layout_t::dim_2 &&
194  A_layout_t::dim_3 == B_layout_t::dim_3 &&
195  A_layout_t::dim_4 == B_layout_t::dim_4,
196  "invalid dimensions");
197  for (int i4 = 0; i4 < A_layout_t::dim_4; i4++)
198  {
199  for (int i3 = 0; i3 < A_layout_t::dim_3; i3++)
200  {
201  for (int i2 = 0; i2 < A_layout_t::dim_2; i2++)
202  {
203  for (int i1 = 0; i1 < A_layout_t::dim_1; i1++)
204  {
205  mfem::Assign<Op>(A_data[A_layout.ind(i1,i2,i3,i4)],
206  B_data[B_layout.ind(i1,i2,i3,i4)]);
207  }
208  }
209  }
210  }
211  }
212 };
213 
214 } // namespace mfem::internal
215 
216 // Tensor or sub-tensor assign function: A {=,+=,*=} scalar_value.
217 template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
218  typename scalar_t>
219 inline void TAssign(const A_layout_t &A_layout, A_data_t &A_data,
220  scalar_t value)
221 {
222  internal::TensorOps<A_layout_t::rank>::
223  template Assign<Op>(A_layout, A_data, value);
224 }
225 
226 // Tensor assign function: A {=,+=,*=} B that allows different input and output
227 // layouts. With suitable layouts this function can be used to permute
228 // (transpose) tensors, extract sub-tensors, etc.
229 template <AssignOp::Type Op,
230  typename A_layout_t, typename A_data_t,
231  typename B_layout_t, typename B_data_t>
232 inline void TAssign(const A_layout_t &A_layout, A_data_t &A_data,
233  const B_layout_t &B_layout, const B_data_t &B_data)
234 {
235  internal::TensorOps<A_layout_t::rank>::
236  template Assign<Op>(A_layout, A_data, B_layout, B_data);
237 }
238 
239 
240 // classes TVector, TMatrix, TTensor3, TTensor4
241 
242 template <int S, typename data_t = double, bool align = false>
243 struct TVector
244 {
245 public:
246  static const int size = S;
247  static const int aligned_size = align ? MFEM_ALIGN_SIZE(S,data_t) : size;
248  typedef data_t data_type;
250 
252  static const layout_type layout;
253 
254  data_t &operator[](int i) { return data[i]; }
255  const data_t &operator[](int i) const { return data[i]; }
256 
257  template <AssignOp::Type Op>
258  void Assign(const data_t d)
259  {
260  TAssign<Op>(layout, data, d);
261  }
262 
263  template <AssignOp::Type Op, typename src_data_t>
264  void Assign(const src_data_t &src)
265  {
266  TAssign<Op>(layout, data, layout, src);
267  }
268 
269  template <AssignOp::Type Op, typename dest_data_t>
270  void AssignTo(dest_data_t &dest)
271  {
272  TAssign<Op>(layout, dest, layout, data);
273  }
274 
275  void Set(const data_t d)
276  {
277  Assign<AssignOp::Set>(d);
278  }
279 
280  template <typename src_data_t>
281  void Set(const src_data_t &src)
282  {
283  Assign<AssignOp::Set>(src);
284  }
285 
286  template <typename dest_data_t>
287  void Assemble(dest_data_t &dest) const
288  {
289  AssignTo<AssignOp::Add>(dest);
290  }
291 
292  void Scale(const data_t scale)
293  {
294  Assign<AssignOp::Mult>(scale);
295  }
296 };
297 
298 template <int S, typename data_t, bool align>
299 const typename TVector<S,data_t,align>::layout_type
300 TVector<S,data_t,align>::layout = layout_type();
301 
302 
303 template <int N1, int N2, typename data_t = double, bool align = false>
304 struct TMatrix : public TVector<N1*N2,data_t,align>
305 {
307  using base_class::size;
308  using base_class::data;
309 
311  static const layout_type layout;
312  static inline int ind(int i1, int i2) { return layout.ind(i1,i2); }
313 
314  data_t &operator()(int i, int j) { return data[ind(i,j)]; }
315  const data_t &operator()(int i, int j) const { return data[ind(i,j)]; }
316 
317  inline data_t Det() const
318  {
319  return TDet<data_t>(layout, data);
320  }
321 
322  inline void Adjugate(TMatrix<N1,N2,data_t> &adj) const
323  {
324  TAdjugate<data_t>(layout, data, layout, adj.data);
325  }
326 
327  // Compute the adjugate and the determinant of a (small) matrix.
328  inline data_t AdjDet(TMatrix<N2,N1,data_t> &adj) const
329  {
330  return TAdjDet<data_t>(layout, data, layout, adj.data);
331  }
332 };
333 
334 template <int N1, int N2, typename data_t, bool align>
335 const typename TMatrix<N1,N2,data_t,align>::layout_type
336 TMatrix<N1,N2,data_t,align>::layout = layout_type();
337 
338 
339 template <int N1, int N2, int N3, typename data_t = double, bool align = false>
340 struct TTensor3 : TVector<N1*N2*N3,data_t,align>
341 {
343  using base_class::size;
344  using base_class::data;
345 
347  static const layout_type layout;
348  static inline int ind(int i1, int i2, int i3)
349  { return layout.ind(i1,i2,i3); }
350 
351  data_t &operator()(int i, int j, int k) { return data[ind(i,j,k)]; }
352  const data_t &operator()(int i, int j, int k) const
353  { return data[ind(i,j,k)]; }
354 };
355 
356 template <int N1, int N2, int N3, typename data_t, bool align>
357 const typename TTensor3<N1,N2,N3,data_t,align>::layout_type
358 TTensor3<N1,N2,N3,data_t,align>::layout = layout_type();
359 
360 template <int N1, int N2, int N3, int N4, typename data_t = double,
361  bool align = false>
362 struct TTensor4 : TVector<N1*N2*N3*N4,data_t,align>
363 {
365  using base_class::size;
366  using base_class::data;
367 
369  static const layout_type layout;
370  static inline int ind(int i1, int i2, int i3, int i4)
371  { return layout.ind(i1,i2,i3,i4); }
372 
373  data_t &operator()(int i, int j, int k, int l)
374  { return data[ind(i,j,k,l)]; }
375  const data_t &operator()(int i, int j, int k, int l) const
376  { return data[ind(i,j,k,l)]; }
377 };
378 
379 template <int N1, int N2, int N3, int N4, typename data_t, bool align>
380 const typename TTensor4<N1,N2,N3,N4,data_t,align>::layout_type
381 TTensor4<N1,N2,N3,N4,data_t,align>::layout = layout_type();
382 
383 
384 // Tensor products
385 
386 // C_{i,j,k} {=|+=} \sum_s A_{s,j} B_{i,s,k}
387 template <bool Add,
388  typename A_layout_t, typename A_data_t,
389  typename B_layout_t, typename B_data_t,
390  typename C_layout_t, typename C_data_t>
391 MFEM_ALWAYS_INLINE inline
392 void Mult_1_2(const A_layout_t &A_layout, const A_data_t &A_data,
393  const B_layout_t &B_layout, const B_data_t &B_data,
394  const C_layout_t &C_layout, C_data_t &C_data)
395 {
396  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 3 &&
397  C_layout_t::rank == 3, "invalid ranks");
398  const int B3 = B_layout_t::dim_3;
399  const int C3 = C_layout_t::dim_3;
400  MFEM_STATIC_ASSERT(B3 == C3, "invalid dimentions");
401  for (int k = 0; k < B3; k++)
402  {
403  Mult_AB<Add>(B_layout.ind3(k), B_data,
404  A_layout, A_data,
405  C_layout.ind3(k), C_data);
406  }
407 }
408 
409 // C_{i,j,k} {=|+=} \sum_s A_{i,s} B_{s,j,k}
410 template <bool Add,
411  typename A_layout_t, typename A_data_t,
412  typename B_layout_t, typename B_data_t,
413  typename C_layout_t, typename C_data_t>
414 MFEM_ALWAYS_INLINE inline
415 void Mult_2_1(const A_layout_t &A_layout, const A_data_t &A_data,
416  const B_layout_t &B_layout, const B_data_t &B_data,
417  const C_layout_t &C_layout, C_data_t &C_data)
418 {
419  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 3 &&
420  C_layout_t::rank == 3, "invalid ranks");
421  Mult_AB<Add>(A_layout, A_data,
422  B_layout.merge_23(), B_data,
423  C_layout.merge_23(), C_data);
424 }
425 
426 // C_{i,k,j,l} {=|+=} \sum_s A_{s,i} A_{s,j} B_{k,s,l}
427 template <bool Add,
428  typename A_layout_t, typename A_data_t,
429  typename B_layout_t, typename B_data_t,
430  typename C_layout_t, typename C_data_t>
431 MFEM_ALWAYS_INLINE inline
432 void TensorAssemble(const A_layout_t &A_layout, const A_data_t &A_data,
433  const B_layout_t &B_layout, const B_data_t &B_data,
434  const C_layout_t &C_layout, C_data_t &C_data)
435 {
436  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 3 &&
437  C_layout_t::rank == 4, "invalid ranks");
438  const int A1 = A_layout_t::dim_1;
439  const int A2 = A_layout_t::dim_2;
440  const int B1 = B_layout_t::dim_1;
441  const int B2 = B_layout_t::dim_2;
442  const int B3 = B_layout_t::dim_3;
443  const int C1 = C_layout_t::dim_1;
444  const int C2 = C_layout_t::dim_2;
445  const int C3 = C_layout_t::dim_3;
446  const int C4 = C_layout_t::dim_4;
447  MFEM_STATIC_ASSERT(A1 == B2 && A2 == C1 && A2 == C3 && B1 == C2 && B3 == C4,
448  "invalid dimensions");
449 
450 #if 1
451  // Impl == 3
452  MFEM_FLOPS_ADD(3*A1*A2*A2*B1*B3);
453  if (!Add) { TAssign<AssignOp::Set>(C_layout, C_data, 0.0); }
454  for (int j = 0; j < A2; j++)
455  {
456  for (int i = 0; i < A2; i++)
457  {
458  for (int l = 0; l < B3; l++)
459  {
460  for (int k = 0; k < B1; k++)
461  {
462  for (int s = 0; s < A1; s++)
463  {
464  // C(i,k,j,l) += A(s,i) * A(s,j) * B(k,s,l);
465  C_data[C_layout.ind(i,k,j,l)] +=
466  A_data[A_layout.ind(s,i)] *
467  A_data[A_layout.ind(s,j)] *
468  B_data[B_layout.ind(k,s,l)];
469  }
470  }
471  }
472  }
473  }
474 #else
475  // Impl == 1
476  if (!Add) { TAssign<AssignOp::Set>(C_layout, C_data, 0.0); }
477  for (int s = 0; s < A1; s++)
478  {
479  for (int i = 0; i < A2; i++)
480  {
481  for (int k = 0; k < B1; k++)
482  {
483  for (int j = 0; j < A2; j++)
484  {
485  for (int l = 0; l < B3; l++)
486  {
487  // C(i,k,j,l) += A(s,i) * A(s,j) * B(k,s,l);
488  C_data[C_layout.ind(i,k,j,l)] +=
489  A_data[A_layout.ind(s,i)] *
490  A_data[A_layout.ind(s,j)] *
491  B_data[B_layout.ind(k,s,l)];
492  }
493  }
494  }
495  }
496  }
497 #endif
498 }
499 
500 // D_{i,k,j,l} {=|+=} \sum_s A_{i,s} B_{s,j} C_{k,s,l}
501 template <bool Add,
502  typename A_layout_t, typename A_data_t,
503  typename B_layout_t, typename B_data_t,
504  typename C_layout_t, typename C_data_t,
505  typename D_layout_t, typename D_data_t>
506 MFEM_ALWAYS_INLINE inline
507 void TensorAssemble(const A_layout_t &A_layout, const A_data_t &A_data,
508  const B_layout_t &B_layout, const B_data_t &B_data,
509  const C_layout_t &C_layout, const C_data_t &C_data,
510  const D_layout_t &D_layout, D_data_t &D_data)
511 {
512  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 2 &&
513  C_layout_t::rank == 3 && D_layout_t::rank == 4,
514  "invalid ranks");
515  const int A1 = A_layout_t::dim_1;
516  const int A2 = A_layout_t::dim_2;
517  const int B1 = B_layout_t::dim_1;
518  const int B2 = B_layout_t::dim_2;
519  const int C1 = C_layout_t::dim_1;
520  const int C2 = C_layout_t::dim_2;
521  const int C3 = C_layout_t::dim_3;
522  const int D1 = D_layout_t::dim_1;
523  const int D2 = D_layout_t::dim_2;
524  const int D3 = D_layout_t::dim_3;
525  const int D4 = D_layout_t::dim_4;
526  MFEM_STATIC_ASSERT(A2 == B1 && A2 == C2 && A1 == D1 && B2 == D3 &&
527  C1 == D2 && C3 == D4, "invalid dimensions");
528 
529 #if 0
531  // H_{i,k,s,l} = A_{i,s} C_{k,s,l}
532  for (int l = 0; l < C3; l++)
533  {
534  for (int s = 0; s < B1; s++)
535  {
536  for (int k = 0; k < C1; k++)
537  {
538  for (int i = 0; i < A1; i++)
539  {
540  H(i,k,s,l) = A_data[A_layout.ind(i,s)]*
541  C_data[C_layout.ind(k,s,l)];
542  }
543  }
544  }
545  }
546  // D_{(i,k),j,l} = \sum_s B_{s,j} H_{(i,k),s,l}
547  Mult_1_2<Add>(B_layout, B_data, H.layout.merge_12(), H,
548  D_layout.merge_12(), D_data);
549 #elif 1
550  MFEM_FLOPS_ADD(A1*B1*C1*C3); // computation of H(l)
551  for (int l = 0; l < C3; l++)
552  {
554  // H(l)_{i,k,s} = A_{i,s} C_{k,s,l}
555  for (int s = 0; s < B1; s++)
556  {
557  for (int k = 0; k < C1; k++)
558  {
559  for (int i = 0; i < A1; i++)
560  {
561  H(i,k,s) = A_data[A_layout.ind(i,s)]*
562  C_data[C_layout.ind(k,s,l)];
563  }
564  }
565  }
566  // D_{(i,k),j,l} = \sum_s H(l)_{(i,k),s} B_{s,j}
567  Mult_AB<Add>(H.layout.merge_12(), H, B_layout, B_data,
568  D_layout.merge_12().ind3(l), D_data);
569  }
570 #else
572  for (int l = 0; l < C3; l++)
573  {
574  for (int j = 0; j < B2; j++)
575  {
576  for (int k = 0; k < C1; k++)
577  {
578  for (int s = 0; s < B1; s++)
579  {
580  F(s,k,j,l) = B_data[B_layout.ind(s,j)]*
581  C_data[C_layout.ind(k,s,l)];
582  }
583  }
584  }
585  }
586  Mult_AB<Add>(A_layout, A_data, F.layout.merge_34().merge_23(), F,
587  D_layout.merge_34().merge_23(), D_data);
588 #endif
589 }
590 
591 
592 // C_{i,j,k,l} {=|+=} A_{i,j,k} B_{j,l}
593 template <AssignOp::Type Op,
594  typename A_layout_t, typename A_data_t,
595  typename B_layout_t, typename B_data_t,
596  typename C_layout_t, typename C_data_t>
597 MFEM_ALWAYS_INLINE inline
598 void TensorProduct(const A_layout_t &a, const A_data_t &A,
599  const B_layout_t &b, const B_data_t &B,
600  const C_layout_t &c, C_data_t &C)
601 {
602  const int A1 = A_layout_t::dim_1;
603  const int A2 = A_layout_t::dim_2;
604  const int A3 = A_layout_t::dim_3;
605  const int B1 = B_layout_t::dim_1;
606  const int B2 = B_layout_t::dim_2;
607  const int C1 = C_layout_t::dim_1;
608  const int C2 = C_layout_t::dim_2;
609  const int C3 = C_layout_t::dim_3;
610  const int C4 = C_layout_t::dim_4;
611  MFEM_STATIC_ASSERT(A1 == C1 && A2 == B1 && A2 == C2 && A3 == C3 && B2 == C4,
612  "invalid dimensions");
613 
614  MFEM_FLOPS_ADD(A1*A2*A3*B2);
615  for (int l = 0; l < B2; l++)
616  {
617  for (int k = 0; k < A3; k++)
618  {
619  for (int j = 0; j < A2; j++)
620  {
621  for (int i = 0; i < A1; i++)
622  {
623  mfem::Assign<Op>(C[c.ind(i,j,k,l)],
624  A[a.ind(i,j,k)]*B[b.ind(j,l)]);
625  }
626  }
627  }
628  }
629 }
630 
631 } // namespace mfem
632 
633 #endif // MFEM_TEMPLATE_TENSOR
void Assemble(dest_data_t &dest) const
Definition: ttensor.hpp:287
ColumnMajorLayout3D< N1, N2, N3 > layout_type
Definition: ttensor.hpp:346
ColumnMajorLayout4D< N1, N2, N3, N4 > layout_type
Definition: ttensor.hpp:368
static const layout_type layout
Definition: ttensor.hpp:311
void Adjugate(TMatrix< N1, N2, data_t > &adj) const
Definition: ttensor.hpp:322
void Assign(const src_data_t &src)
Definition: ttensor.hpp:264
void AssignTo(dest_data_t &dest)
Definition: ttensor.hpp:270
const data_t & operator()(int i, int j, int k) const
Definition: ttensor.hpp:352
void TAssign(const A_layout_t &A_layout, A_data_t &A_data, scalar_t value)
Definition: ttensor.hpp:219
void Assign(const data_t d)
Definition: ttensor.hpp:258
static const layout_type layout
Definition: ttensor.hpp:347
static int ind(int i1, int i2, int i3)
Definition: ttensor.hpp:348
const data_t & operator()(int i, int j) const
Definition: ttensor.hpp:315
data_t & operator()(int i, int j)
Definition: ttensor.hpp:314
data_t Det() const
Definition: ttensor.hpp:317
MFEM_ALWAYS_INLINE void TensorAssemble(const A_layout_t &A_layout, const A_data_t &A_data, const B_layout_t &B_layout, const B_data_t &B_data, const C_layout_t &C_layout, C_data_t &C_data)
Definition: ttensor.hpp:432
void Add(const DenseMatrix &A, const DenseMatrix &B, double alpha, DenseMatrix &C)
C = A + alpha*B.
Definition: densemat.cpp:2942
static const layout_type layout
Definition: ttensor.hpp:369
static int ind(int i1, int i2, int i3, int i4)
Definition: tlayout.hpp:395
static int ind(int i1, int i2, int i3, int i4)
Definition: ttensor.hpp:370
data_t & operator[](int i)
Definition: ttensor.hpp:254
const data_t & operator()(int i, int j, int k, int l) const
Definition: ttensor.hpp:375
static int ind(int i1, int i2, int i3)
Definition: tlayout.hpp:248
TVector< N1 *N2, data_t, align > base_class
Definition: ttensor.hpp:306
static int ind(int i1, int i2)
Definition: tlayout.hpp:106
MFEM_ALWAYS_INLINE void Mult_2_1(const A_layout_t &A_layout, const A_data_t &A_data, const B_layout_t &B_layout, const B_data_t &B_data, const C_layout_t &C_layout, C_data_t &C_data)
Definition: ttensor.hpp:415
ColumnMajorLayout2D< N1, N2 > layout_type
Definition: ttensor.hpp:310
MFEM_ALWAYS_INLINE void TensorProduct(const A_layout_t &a, const A_data_t &A, const B_layout_t &b, const B_data_t &B, const C_layout_t &c, C_data_t &C)
Definition: ttensor.hpp:598
static const layout_type layout
Definition: ttensor.hpp:252
lvalue_t & Assign(lvalue_t &a, const rvalue_t &b)
Definition: tassign.hpp:97
data_t data_type
Definition: ttensor.hpp:248
TVector< N1 *N2 *N3 *N4, data_t, align > base_class
Definition: ttensor.hpp:364
static int ind(int i1, int i2)
Definition: ttensor.hpp:312
data_t AdjDet(TMatrix< N2, N1, data_t > &adj) const
Definition: ttensor.hpp:328
MFEM_ALWAYS_INLINE void Mult_1_2(const A_layout_t &A_layout, const A_data_t &A_data, const B_layout_t &B_layout, const B_data_t &B_data, const C_layout_t &C_layout, C_data_t &C_data)
Definition: ttensor.hpp:392
static const int size
Definition: ttensor.hpp:246
TVector< N1 *N2 *N3, data_t, align > base_class
Definition: ttensor.hpp:342
StridedLayout1D< S, 1 > layout_type
Definition: ttensor.hpp:251
data_t data[aligned_size >0?aligned_size:1]
Definition: ttensor.hpp:249
void Set(const src_data_t &src)
Definition: ttensor.hpp:281
const data_t & operator[](int i) const
Definition: ttensor.hpp:255
static const int aligned_size
Definition: ttensor.hpp:247
void Set(const data_t d)
Definition: ttensor.hpp:275
void Scale(const data_t scale)
Definition: ttensor.hpp:292
data_t & operator()(int i, int j, int k, int l)
Definition: ttensor.hpp:373
data_t & operator()(int i, int j, int k)
Definition: ttensor.hpp:351