MFEM  v3.2
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
tmatrix.hpp
Go to the documentation of this file.
1 // Copyright (c) 2010, Lawrence Livermore National Security, LLC. Produced at
2 // the Lawrence Livermore National Laboratory. LLNL-CODE-443211. All Rights
3 // reserved. See file COPYRIGHT for details.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability see http://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the GNU Lesser General Public License (as published by the Free
10 // Software Foundation) version 2.1 dated February 1999.
11 
12 #ifndef MFEM_TEMPLATE_MATRIX
13 #define MFEM_TEMPLATE_MATRIX
14 
15 #include "../config/tconfig.hpp"
16 #include "../general/tassign.hpp"
17 
18 namespace mfem
19 {
20 
21 // Matrix-matrix products
22 
23 // C {=|+=} A.B -- simple version (no blocks)
24 template <bool Add,
25  typename A_layout_t, typename A_data_t,
26  typename B_layout_t, typename B_data_t,
27  typename C_layout_t, typename C_data_t>
28 MFEM_ALWAYS_INLINE inline
29 void sMult_AB(const A_layout_t &A_layout, const A_data_t &A_data,
30  const B_layout_t &B_layout, const B_data_t &B_data,
31  const C_layout_t &C_layout, C_data_t &C_data)
32 {
33  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 2 &&
34  C_layout_t::rank == 2, "invalid ranks");
35  const int A1 = A_layout_t::dim_1;
36  const int A2 = A_layout_t::dim_2;
37  const int B1 = B_layout_t::dim_1;
38  const int B2 = B_layout_t::dim_2;
39  const int C1 = C_layout_t::dim_1;
40  const int C2 = C_layout_t::dim_2;
41  MFEM_STATIC_ASSERT(A2 == B1 && A1 == C1 && B2 == C2,
42  "invalid dimensions");
43 
44  MFEM_FLOPS_ADD(Add ? 2*A1*A2*B2 : 2*A1*A2*B2-A1*B2);
45  for (int b2 = 0; b2 < B2; b2++)
46  {
47  for (int s = 0; s < A2; s++)
48  {
49  for (int a1 = 0; a1 < A1; a1++)
50  {
51  if (!Add && s == 0)
52  {
53  // C(a1,b2) = A(a1,s) * B(s,b2);
54  C_data[C_layout.ind(a1,b2)] =
55  A_data[A_layout.ind(a1,s)] * B_data[B_layout.ind(s,b2)];
56  }
57  else
58  {
59  // C(a1,b2) += A(a1,s) * B(s,b2);
60  C_data[C_layout.ind(a1,b2)] +=
61  A_data[A_layout.ind(a1,s)] * B_data[B_layout.ind(s,b2)];
62  }
63  }
64  }
65  }
66 }
67 
68 // C {=|+=} A.B -- block version
69 template <int bA1, int bA2, int bB2, // block sizes
70  bool Add,
71  typename A_layout_t, typename A_data_t,
72  typename B_layout_t, typename B_data_t,
73  typename C_layout_t, typename C_data_t>
74 MFEM_ALWAYS_INLINE inline
75 void bMult_AB(const A_layout_t &A_layout, const A_data_t &A_data,
76  const B_layout_t &B_layout, const B_data_t &B_data,
77  const C_layout_t &C_layout, C_data_t &C_data)
78 {
79  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 2 &&
80  C_layout_t::rank == 2, "invalid ranks");
81  const int A1 = A_layout_t::dim_1;
82  const int A2 = A_layout_t::dim_2;
83  const int B1 = B_layout_t::dim_1;
84  const int B2 = B_layout_t::dim_2;
85  const int C1 = C_layout_t::dim_1;
86  const int C2 = C_layout_t::dim_2;
87  MFEM_STATIC_ASSERT(A2 == B1 && A1 == C1 && B2 == C2,
88  "invalid dimensions");
89 
90  const int rA1 = A1%bA1;
91  const int rA2 = A2%bA2;
92  const int rB2 = B2%bB2;
93 
94  for (int b2_b = 0; b2_b < B2/bB2; b2_b++)
95  {
96  if (A2/bA2 > 0)
97  {
98  // s_b == 0
99  for (int a1_b = 0; a1_b < A1/bA1; a1_b++)
100  {
101  sMult_AB<Add>(
102  A_layout.template sub<bA1,bA2>(a1_b*bA1,0), A_data,
103  B_layout.template sub<bA2,bB2>(0,b2_b*bB2), B_data,
104  C_layout.template sub<bA1,bB2>(a1_b*bA1,b2_b*bB2), C_data);
105  }
106  if (rA1)
107  {
108  sMult_AB<Add>(
109  A_layout.template sub<rA1,bA2>(A1-rA1,0), A_data,
110  B_layout.template sub<bA2,bB2>(0,b2_b*bB2), B_data,
111  C_layout.template sub<rA1,bB2>(A1-rA1,b2_b*bB2), C_data);
112  }
113  for (int s_b = 1; s_b < A2/bA2; s_b++)
114  {
115  for (int a1_b = 0; a1_b < A1/bA1; a1_b++)
116  {
117  sMult_AB<true>(
118  A_layout.template sub<bA1,bA2>(a1_b*bA1,s_b*bA2), A_data,
119  B_layout.template sub<bA2,bB2>(s_b*bA2,b2_b*bB2), B_data,
120  C_layout.template sub<bA1,bB2>(a1_b*bA1,b2_b*bB2), C_data);
121  }
122  if (rA1)
123  {
124  sMult_AB<true>(
125  A_layout.template sub<rA1,bA2>(A1-rA1,s_b*bA2), A_data,
126  B_layout.template sub<bA2,bB2>(s_b*bA2,b2_b*bB2), B_data,
127  C_layout.template sub<rA1,bB2>(A1-rA1,b2_b*bB2), C_data);
128  }
129  }
130  }
131  if (rA2)
132  {
133  const bool rAdd = Add || (A2/bA2 > 0);
134  for (int a1_b = 0; a1_b < A1/bA1; a1_b++)
135  {
136  sMult_AB<rAdd>(
137  A_layout.template sub<bA1,rA2>(a1_b*bA1,A2-rA2), A_data,
138  B_layout.template sub<rA2,bB2>(A2-rA2,b2_b*bB2), B_data,
139  C_layout.template sub<bA1,bB2>(a1_b*bA1,b2_b*bB2), C_data);
140  }
141  if (rA1)
142  {
143  sMult_AB<rAdd>(
144  A_layout.template sub<rA1,rA2>(A1-rA1,A2-rA2), A_data,
145  B_layout.template sub<rA2,bB2>(A2-rA2,b2_b*bB2), B_data,
146  C_layout.template sub<rA1,bB2>(A1-rA1,b2_b*bB2), C_data);
147  }
148  }
149  }
150  if (rB2)
151  {
152  if (A2/bA2 > 0)
153  {
154  // s_b == 0
155  for (int a1_b = 0; a1_b < A1/bA1; a1_b++)
156  {
157  sMult_AB<Add>(
158  A_layout.template sub<bA1,bA2>(a1_b*bA1,0), A_data,
159  B_layout.template sub<bA2,rB2>(0,B2-rB2), B_data,
160  C_layout.template sub<bA1,rB2>(a1_b*bA1,B2-rB2), C_data);
161  }
162  if (rA1)
163  {
164  sMult_AB<Add>(
165  A_layout.template sub<rA1,bA2>(A1-rA1,0), A_data,
166  B_layout.template sub<bA2,rB2>(0,B2-rB2), B_data,
167  C_layout.template sub<rA1,rB2>(A1-rA1,B2-rB2), C_data);
168  }
169  }
170  if (A2/bA2 > 1)
171  {
172  for (int s_b = 1; s_b < A2/bA2; s_b++)
173  {
174  for (int a1_b = 0; a1_b < A1/bA1; a1_b++)
175  {
176  sMult_AB<true>(
177  A_layout.template sub<bA1,bA2>(a1_b*bA1,s_b*bA2), A_data,
178  B_layout.template sub<bA2,rB2>(s_b*bA2,B2-rB2), B_data,
179  C_layout.template sub<bA1,rB2>(a1_b*bA1,B2-rB2), C_data);
180  }
181  if (rA1)
182  {
183  sMult_AB<true>(
184  A_layout.template sub<rA1,bA2>(A1-rA1,s_b*bA2), A_data,
185  B_layout.template sub<bA2,rB2>(s_b*bA2,B2-rB2), B_data,
186  C_layout.template sub<rA1,rB2>(A1-rA1,B2-rB2), C_data);
187  }
188  }
189  }
190  if (rA2)
191  {
192  const bool rAdd = Add || (A2/bA2 > 0);
193  for (int a1_b = 0; a1_b < A1/bA1; a1_b++)
194  {
195  sMult_AB<rAdd>(
196  A_layout.template sub<bA1,rA2>(a1_b*bA1,A2-rA2), A_data,
197  B_layout.template sub<rA2,rB2>(A2-rA2,B2-rB2), B_data,
198  C_layout.template sub<bA1,rB2>(a1_b*bA1,B2-rB2), C_data);
199  }
200  if (rA1)
201  {
202  sMult_AB<rAdd>(
203  A_layout.template sub<rA1,rA2>(A1-rA1,A2-rA2), A_data,
204  B_layout.template sub<rA2,rB2>(A2-rA2,B2-rB2), B_data,
205  C_layout.template sub<rA1,rB2>(A1-rA1,B2-rB2), C_data);
206  }
207  }
208  }
209 }
210 
211 template <bool Add,
212  typename A_layout_t, typename A_data_t,
213  typename B_layout_t, typename B_data_t,
214  typename C_layout_t, typename C_data_t>
215 MFEM_ALWAYS_INLINE inline
216 void Mult_AB(const A_layout_t &A_layout, const A_data_t &A_data,
217  const B_layout_t &B_layout, const B_data_t &B_data,
218  const C_layout_t &C_layout, C_data_t &C_data)
219 {
220  const int b = MFEM_TEMPLATE_BLOCK_SIZE;
221  bMult_AB<b,b,b,Add>(A_layout, A_data, B_layout, B_data, C_layout, C_data);
222 }
223 
224 
225 // Small matrix operations (determinant, adjugate,...) defined by specialization
226 
227 namespace internal
228 {
229 
230 template <int N1, int N2>
231 struct MatrixOps { };
232 
233 template <>
234 struct MatrixOps<1,1>
235 {
236  // Compute det(A).
237  template <typename scalar_t, typename layout_t, typename data_t>
238  static inline scalar_t Det(const layout_t &a, const data_t &A)
239  {
240  return A[a.ind(0,0)];
241  }
242 
243  // Compute det(A). Batched version: D[i] {=,+=,*=} det(A[i,*,*])
244  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
245  typename D_data_t>
246  static inline void Det(const A_layout_t &a, const A_data_t &A, D_data_t &D)
247  {
248  const int M = A_layout_t::dim_1;
249  for (int i = 0; i < M; i++)
250  {
251  Assign<Op>(D[i], A[a.ind(i,0,0)]);
252  }
253  }
254 
255  // Compute B = adj(A).
256  template <typename scalar_t,
257  typename A_layout_t, typename A_data_t,
258  typename B_layout_t, typename B_data_t>
259  static inline void Adjugate(const A_layout_t &a, const A_data_t &A,
260  const B_layout_t &b, B_data_t &B)
261  {
262  B[b.ind(0,0)] = scalar_t(1);
263  }
264 
265  // Compute adj(A) and det(A).
266  template <typename scalar_t,
267  typename A_layout_t, typename A_data_t,
268  typename B_layout_t, typename B_data_t>
269  static inline scalar_t AdjDet(const A_layout_t &a, const A_data_t &A,
270  const B_layout_t &b, B_data_t &B)
271  {
272  Adjugate<scalar_t>(a, A, b, B);
273  return Det<scalar_t>(a, A);
274  }
275 };
276 
277 template <>
278 struct MatrixOps<2,2>
279 {
280  // Compute det(A).
281  template <typename scalar_t, typename layout_t, typename data_t>
282  static inline scalar_t Det(const layout_t &a, const data_t &A)
283  {
284  MFEM_FLOPS_ADD(3);
285  return (A[a.ind(0,0)]*A[a.ind(1,1)] -
286  A[a.ind(1,0)]*A[a.ind(0,1)]);
287  }
288 
289  // Compute det(A). Batched version: D[i] {=,+=,*=} det(A[i,*,*])
290  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
291  typename D_data_t>
292  static inline void Det(const A_layout_t &a, const A_data_t &A, D_data_t &D)
293  {
294  const int M = A_layout_t::dim_1;
295  MFEM_FLOPS_ADD(3*M);
296  for (int i = 0; i < M; i++)
297  {
298  Assign<Op>(D[i], (A[a.ind(i,0,0)]*A[a.ind(i,1,1)] -
299  A[a.ind(i,1,0)]*A[a.ind(i,0,1)]));
300  }
301  }
302 
303  // Compute B = adj(A).
304  template <typename scalar_t,
305  typename A_layout_t, typename A_data_t,
306  typename B_layout_t, typename B_data_t>
307  static inline void Adjugate(const A_layout_t &a, const A_data_t &A,
308  const B_layout_t &b, B_data_t &B)
309  {
310  B[b.ind(0,0)] = A[a.ind(1,1)];
311  B[b.ind(0,1)] = -A[a.ind(0,1)];
312  B[b.ind(1,0)] = -A[a.ind(1,0)];
313  B[b.ind(1,1)] = A[a.ind(0,0)];
314  }
315 
316  // Compute adj(A) and det(A).
317  template <typename scalar_t,
318  typename A_layout_t, typename A_data_t,
319  typename B_layout_t, typename B_data_t>
320  static inline scalar_t AdjDet(const A_layout_t &a, const A_data_t &A,
321  const B_layout_t &b, B_data_t &B)
322  {
323  Adjugate<scalar_t>(a, A, b, B);
324  return Det<scalar_t>(a, A);
325  }
326 
327  template <bool symm> struct Symm;
328 };
329 
330 template <>
331 struct MatrixOps<2,2>::Symm<true>
332 {
333  template <typename A_layout_t, typename A_data_t, typename scalar_t>
334  static inline MFEM_ALWAYS_INLINE
335  void Set(const A_layout_t &a, A_data_t &A,
336  const scalar_t a11, const scalar_t a21, const scalar_t a22)
337  {
338  A[a.ind(0)] = a11;
339  A[a.ind(1)] = a21;
340  A[a.ind(2)] = a22;
341  }
342 };
343 
344 template <>
345 struct MatrixOps<2,2>::Symm<false>
346 {
347  template <typename A_layout_t, typename A_data_t, typename scalar_t>
348  static inline MFEM_ALWAYS_INLINE
349  void Set(const A_layout_t &a, A_data_t &A,
350  const scalar_t a11, const scalar_t a21, const scalar_t a22)
351  {
352  A[a.ind(0,0)] = a11;
353  A[a.ind(1,0)] = a21;
354  A[a.ind(0,1)] = a21;
355  A[a.ind(1,1)] = a22;
356  }
357 };
358 
359 template <>
360 struct MatrixOps<3,3>
361 {
362  // Compute det(A).
363  template <typename scalar_t, typename layout_t, typename data_t>
364  static inline scalar_t Det(const layout_t &a, const data_t &A)
365  {
366  MFEM_FLOPS_ADD(14);
367  return (A[a.ind(0,0)]*(A[a.ind(1,1)]*A[a.ind(2,2)] -
368  A[a.ind(2,1)]*A[a.ind(1,2)]) -
369  A[a.ind(1,0)]*(A[a.ind(0,1)]*A[a.ind(2,2)] -
370  A[a.ind(2,1)]*A[a.ind(0,2)]) +
371  A[a.ind(2,0)]*(A[a.ind(0,1)]*A[a.ind(1,2)] -
372  A[a.ind(1,1)]*A[a.ind(0,2)]));
373  }
374 
375  // Compute det(A). Batched version: D[i] {=,+=,*=} det(A[i,*,*])
376  template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
377  typename D_data_t>
378  static inline void Det(const A_layout_t &a, const A_data_t &A, D_data_t &D)
379  {
380  const int M = A_layout_t::dim_1;
381  MFEM_FLOPS_ADD(14*M);
382  for (int i = 0; i < M; i++)
383  {
384  Assign<Op>(
385  D[i],
386  A[a.ind(i,0,0)]*(A[a.ind(i,1,1)]*A[a.ind(i,2,2)] -
387  A[a.ind(i,2,1)]*A[a.ind(i,1,2)]) -
388  A[a.ind(i,1,0)]*(A[a.ind(i,0,1)]*A[a.ind(i,2,2)] -
389  A[a.ind(i,2,1)]*A[a.ind(i,0,2)]) +
390  A[a.ind(i,2,0)]*(A[a.ind(i,0,1)]*A[a.ind(i,1,2)] -
391  A[a.ind(i,1,1)]*A[a.ind(i,0,2)]));
392  }
393  }
394 
395  // Compute B = adj(A).
396  template <typename scalar_t,
397  typename A_layout_t, typename A_data_t,
398  typename B_layout_t, typename B_data_t>
399  static inline void Adjugate(const A_layout_t &a, const A_data_t &A,
400  const B_layout_t &b, B_data_t &B)
401  {
402  MFEM_FLOPS_ADD(27);
403  B[b.ind(0,0)] = A[a.ind(1,1)]*A[a.ind(2,2)] - A[a.ind(1,2)]*A[a.ind(2,1)];
404  B[b.ind(0,1)] = A[a.ind(0,2)]*A[a.ind(2,1)] - A[a.ind(0,1)]*A[a.ind(2,2)];
405  B[b.ind(0,2)] = A[a.ind(0,1)]*A[a.ind(1,2)] - A[a.ind(0,2)]*A[a.ind(1,1)];
406  B[b.ind(1,0)] = A[a.ind(1,2)]*A[a.ind(2,0)] - A[a.ind(1,0)]*A[a.ind(2,2)];
407  B[b.ind(1,1)] = A[a.ind(0,0)]*A[a.ind(2,2)] - A[a.ind(0,2)]*A[a.ind(2,0)];
408  B[b.ind(1,2)] = A[a.ind(0,2)]*A[a.ind(1,0)] - A[a.ind(0,0)]*A[a.ind(1,2)];
409  B[b.ind(2,0)] = A[a.ind(1,0)]*A[a.ind(2,1)] - A[a.ind(1,1)]*A[a.ind(2,0)];
410  B[b.ind(2,1)] = A[a.ind(0,1)]*A[a.ind(2,0)] - A[a.ind(0,0)]*A[a.ind(2,1)];
411  B[b.ind(2,2)] = A[a.ind(0,0)]*A[a.ind(1,1)] - A[a.ind(0,1)]*A[a.ind(1,0)];
412  }
413 
414  // Compute adj(A) and det(A).
415  template <typename scalar_t,
416  typename A_layout_t, typename A_data_t,
417  typename B_layout_t, typename B_data_t>
418  static inline scalar_t AdjDet(const A_layout_t &a, const A_data_t &A,
419  const B_layout_t &b, B_data_t &B)
420  {
421  MFEM_FLOPS_ADD(5);
422  Adjugate<scalar_t>(a, A, b, B);
423  return (A[a.ind(0,0)]*B[b.ind(0,0)] +
424  A[a.ind(1,0)]*B[b.ind(0,1)] +
425  A[a.ind(2,0)]*B[b.ind(0,2)]);
426  }
427 
428  template <bool symm> struct Symm;
429 };
430 
431 template <>
432 struct MatrixOps<3,3>::Symm<true>
433 {
434  template <typename A_layout_t, typename A_data_t, typename scalar_t>
435  static inline MFEM_ALWAYS_INLINE
436  void Set(const A_layout_t &a, A_data_t &A,
437  const scalar_t a11, const scalar_t a21, const scalar_t a31,
438  const scalar_t a22, const scalar_t a32, const scalar_t a33)
439  {
440  A[a.ind(0)] = a11;
441  A[a.ind(1)] = a21;
442  A[a.ind(2)] = a31;
443  A[a.ind(3)] = a22;
444  A[a.ind(4)] = a32;
445  A[a.ind(5)] = a33;
446  }
447 };
448 
449 template <>
450 struct MatrixOps<3,3>::Symm<false>
451 {
452  template <typename A_layout_t, typename A_data_t, typename scalar_t>
453  static inline MFEM_ALWAYS_INLINE
454  void Set(const A_layout_t &a, A_data_t &A,
455  const scalar_t a11, const scalar_t a21, const scalar_t a31,
456  const scalar_t a22, const scalar_t a32, const scalar_t a33)
457  {
458  A[a.ind(0,0)] = a11;
459  A[a.ind(1,0)] = a21;
460  A[a.ind(2,0)] = a31;
461  A[a.ind(0,1)] = a21;
462  A[a.ind(1,1)] = a22;
463  A[a.ind(2,1)] = a32;
464  A[a.ind(0,2)] = a31;
465  A[a.ind(1,2)] = a32;
466  A[a.ind(2,2)] = a33;
467  }
468 };
469 
470 } // namespace mfem::internal
471 
472 // Compute the determinant of a (small) matrix: det(A).
473 template <typename scalar_t, typename layout_t, typename data_t>
474 inline scalar_t TDet(const layout_t &a, const data_t &A)
475 {
476  MFEM_STATIC_ASSERT(layout_t::rank == 2, "invalid rank");
477  return internal::MatrixOps<layout_t::dim_1,layout_t::dim_2>::
478  template Det<scalar_t>(a, A);
479 }
480 
481 // Compute the determinants of a set of (small) matrices: D[i] = det(A[i,*,*]).
482 // The layout of A is (M x N1 x N2) and the size of D is M.
483 template <AssignOp::Type Op, typename A_layout_t, typename A_data_t,
484  typename D_data_t>
485 inline void TDet(const A_layout_t &a, const A_data_t &A, D_data_t &D)
486 {
487  MFEM_STATIC_ASSERT(A_layout_t::rank == 3, "invalid rank");
488  internal::MatrixOps<A_layout_t::dim_2,A_layout_t::dim_3>::
489  template Det<Op>(a, A, D);
490 }
491 
492 // Compute the adjugate matrix of a (small) matrix: B = adj(A).
493 template <typename scalar_t,
494  typename A_layout_t, typename A_data_t,
495  typename B_layout_t, typename B_data_t>
496 inline void TAdjugate(const A_layout_t &a, const A_data_t &A,
497  const B_layout_t &b, B_data_t &B)
498 {
499  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 2,
500  "invalid ranks");
501  internal::MatrixOps<A_layout_t::dim_1,A_layout_t::dim_2>::
502  template Adjugate<scalar_t>(a, A, b, B);
503 }
504 
505 // Compute the adjugate and the determinant of a (small) matrix: B = adj(A),
506 // return det(A).
507 template <typename scalar_t,
508  typename A_layout_t, typename A_data_t,
509  typename B_layout_t, typename B_data_t>
510 inline scalar_t TAdjDet(const A_layout_t &a, const A_data_t &A,
511  const B_layout_t &b, B_data_t &B)
512 {
513  MFEM_STATIC_ASSERT(A_layout_t::rank == 2 && B_layout_t::rank == 2,
514  "invalid ranks");
515  return internal::MatrixOps<A_layout_t::dim_1,A_layout_t::dim_2>::
516  template AdjDet<scalar_t>(a, A, b, B);
517 }
518 
519 } // namespace mfem
520 
521 #endif // MFEM_TEMPLATE_MATRIX
scalar_t TDet(const layout_t &a, const data_t &A)
Definition: tmatrix.hpp:474
MFEM_ALWAYS_INLINE void bMult_AB(const A_layout_t &A_layout, const A_data_t &A_data, const B_layout_t &B_layout, const B_data_t &B_data, const C_layout_t &C_layout, C_data_t &C_data)
Definition: tmatrix.hpp:75
scalar_t TAdjDet(const A_layout_t &a, const A_data_t &A, const B_layout_t &b, B_data_t &B)
Definition: tmatrix.hpp:510
void Add(const DenseMatrix &A, const DenseMatrix &B, double alpha, DenseMatrix &C)
C = A + alpha*B.
Definition: densemat.cpp:2809
void TAdjugate(const A_layout_t &a, const A_data_t &A, const B_layout_t &b, B_data_t &B)
Definition: tmatrix.hpp:496
MFEM_ALWAYS_INLINE void Mult_AB(const A_layout_t &A_layout, const A_data_t &A_data, const B_layout_t &B_layout, const B_data_t &B_data, const C_layout_t &C_layout, C_data_t &C_data)
Definition: tmatrix.hpp:216
MFEM_ALWAYS_INLINE void sMult_AB(const A_layout_t &A_layout, const A_data_t &A_data, const B_layout_t &B_layout, const B_data_t &B_data, const C_layout_t &C_layout, C_data_t &C_data)
Definition: tmatrix.hpp:29