MFEM v4.8.0
Finite element discretization library
All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
restriction.cpp
Go to the documentation of this file.
1// Copyright (c) 2010-2025, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11
12#include "restriction.hpp"
14#include "gridfunc.hpp"
15#include "fespace.hpp"
16#include "pgridfunc.hpp"
17#include "qspace.hpp"
18#include "fe/face_map_utils.hpp"
19#include "../general/forall.hpp"
20
21#include <climits>
22
23namespace mfem
24{
25
27 ElementDofOrdering e_ordering)
28 : fes(f),
29 ne(fes.GetNE()),
30 vdim(fes.GetVDim()),
31 byvdim(fes.GetOrdering() == Ordering::byVDIM),
32 ndofs(fes.GetNDofs()),
33 dof(fes.GetTypicalFE()->GetDof()),
34 nedofs(ne*dof),
35 offsets(ndofs+1),
36 indices(ne*dof),
37 gather_map(ne*dof)
38{
39 // Assuming all finite elements are the same.
40 MFEM_VERIFY(!f.IsVariableOrder(), "Variable-order spaces are not supported");
41
42 height = vdim*ne*dof;
43 width = fes.GetVSize();
44 const bool dof_reorder = (e_ordering == ElementDofOrdering::LEXICOGRAPHIC);
45 const int *dof_map = NULL;
46 if (dof_reorder && ne > 0)
47 {
48 for (int e = 0; e < ne; ++e)
49 {
50 const FiniteElement *fe = fes.GetFE(e);
51 auto el_t = dynamic_cast<const TensorBasisElement*>(fe);
52 auto el_n = dynamic_cast<const NodalFiniteElement*>(fe);
53 if (el_t || el_n) { continue; }
54 MFEM_ABORT("Finite element not suitable for lexicographic ordering");
55 }
56 const FiniteElement *fe = fes.GetTypicalFE();
57 auto el_t = dynamic_cast<const TensorBasisElement*>(fe);
58 auto el_n = dynamic_cast<const NodalFiniteElement*>(fe);
59 const Array<int> &fe_dof_map =
60 (el_t) ? el_t->GetDofMap() : el_n->GetLexicographicOrdering();
61 MFEM_VERIFY(fe_dof_map.Size() > 0, "invalid dof map");
62 dof_map = fe_dof_map.HostRead();
63 }
64 const Table& e2dTable = fes.GetElementToDofTable();
65 const int* element_map = e2dTable.GetJ();
66 // We will be keeping a count of how many local nodes point to its global dof
67 for (int i = 0; i <= ndofs; ++i)
68 {
69 offsets[i] = 0;
70 }
71 for (int e = 0; e < ne; ++e)
72 {
73 for (int d = 0; d < dof; ++d)
74 {
75 const int sgid = element_map[dof*e + d]; // signed
76 const int gid = (sgid >= 0) ? sgid : -1 - sgid;
77 ++offsets[gid + 1];
78 }
79 }
80 // Aggregate to find offsets for each global dof
81 for (int i = 1; i <= ndofs; ++i)
82 {
83 offsets[i] += offsets[i - 1];
84 }
85 // For each global dof, fill in all local nodes that point to it
86 for (int e = 0; e < ne; ++e)
87 {
88 for (int d = 0; d < dof; ++d)
89 {
90 const int sdid = dof_reorder ? dof_map[d] : 0; // signed
91 const int did = (!dof_reorder)?d:(sdid >= 0 ? sdid : -1-sdid);
92 const int sgid = element_map[dof*e + did]; // signed
93 const int gid = (sgid >= 0) ? sgid : -1-sgid;
94 const int lid = dof*e + d;
95 const bool plus = (sgid >= 0 && sdid >= 0) || (sgid < 0 && sdid < 0);
96 gather_map[lid] = plus ? gid : -1-gid;
97 indices[offsets[gid]++] = plus ? lid : -1-lid;
98 }
99 }
100 // We shifted the offsets vector by 1 by using it as a counter.
101 // Now we shift it back.
102 for (int i = ndofs; i > 0; --i)
103 {
104 offsets[i] = offsets[i - 1];
105 }
106 offsets[0] = 0;
107}
108
109void ElementRestriction::Mult(const Vector& x, Vector& y) const
110{
111 // Assumes all elements have the same number of dofs
112 const int nd = dof;
113 const int vd = vdim;
114 const bool t = byvdim;
115 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
116 auto d_y = Reshape(y.Write(), nd, vd, ne);
117 auto d_gather_map = gather_map.Read();
118 mfem::forall(dof*ne, [=] MFEM_HOST_DEVICE (int i)
119 {
120 const int gid = d_gather_map[i];
121 const bool plus = gid >= 0;
122 const int j = plus ? gid : -1-gid;
123 for (int c = 0; c < vd; ++c)
124 {
125 const real_t dof_value = d_x(t?c:j, t?j:c);
126 d_y(i % nd, c, i / nd) = plus ? dof_value : -dof_value;
127 }
128 });
129}
130
132{
133 // Assumes all elements have the same number of dofs
134 const int nd = dof;
135 const int vd = vdim;
136 const bool t = byvdim;
137 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
138 auto d_y = Reshape(y.Write(), nd, vd, ne);
139 auto d_gather_map = gather_map.Read();
140
141 mfem::forall(dof*ne, [=] MFEM_HOST_DEVICE (int i)
142 {
143 const int gid = d_gather_map[i];
144 const int j = gid >= 0 ? gid : -1-gid;
145 for (int c = 0; c < vd; ++c)
146 {
147 d_y(i % nd, c, i / nd) = d_x(t?c:j, t?j:c);
148 }
149 });
150}
151
152template <bool ADD>
153void ElementRestriction::TAddMultTranspose(const Vector& x, Vector& y) const
154{
155 // Assumes all elements have the same number of dofs
156 const int nd = dof;
157 const int vd = vdim;
158 const bool t = byvdim;
159 auto d_offsets = offsets.Read();
160 auto d_indices = indices.Read();
161 auto d_x = Reshape(x.Read(), nd, vd, ne);
162 auto d_y = Reshape(ADD ? y.ReadWrite() : y.Write(), t?vd:ndofs, t?ndofs:vd);
163 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
164 {
165 const int offset = d_offsets[i];
166 const int next_offset = d_offsets[i + 1];
167 for (int c = 0; c < vd; ++c)
168 {
169 real_t dof_value = 0;
170 for (int j = offset; j < next_offset; ++j)
171 {
172 const int idx_j = (d_indices[j] >= 0) ? d_indices[j] : -1 - d_indices[j];
173 dof_value += ((d_indices[j] >= 0) ? d_x(idx_j % nd, c, idx_j / nd) :
174 -d_x(idx_j % nd, c, idx_j / nd));
175 }
176 if (ADD) { d_y(t?c:i,t?i:c) += dof_value; }
177 else { d_y(t?c:i,t?i:c) = dof_value; }
178 }
179 });
180}
181
183{
184 constexpr bool ADD = false;
185 TAddMultTranspose<ADD>(x, y);
186}
187
189 const real_t a) const
190{
191 MFEM_VERIFY(a == 1.0, "General coefficient case is not yet supported!");
192 constexpr bool ADD = true;
193 TAddMultTranspose<ADD>(x, y);
194}
195
197{
198 // Assumes all elements have the same number of dofs
199 const int nd = dof;
200 const int vd = vdim;
201 const bool t = byvdim;
202 auto d_offsets = offsets.Read();
203 auto d_indices = indices.Read();
204 auto d_x = Reshape(x.Read(), nd, vd, ne);
205 auto d_y = Reshape(y.Write(), t?vd:ndofs, t?ndofs:vd);
206 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
207 {
208 const int offset = d_offsets[i];
209 const int next_offset = d_offsets[i + 1];
210 for (int c = 0; c < vd; ++c)
211 {
212 real_t dof_value = 0;
213 for (int j = offset; j < next_offset; ++j)
214 {
215 const int idx_j = (d_indices[j] >= 0) ? d_indices[j] : -1 - d_indices[j];
216 dof_value += d_x(idx_j % nd, c, idx_j / nd);
217 }
218 d_y(t?c:i,t?i:c) = dof_value;
219 }
220 });
221}
222
224{
225 // Assumes all elements have the same number of dofs
226 const int nd = dof;
227 const int vd = vdim;
228 const bool t = byvdim;
229 auto d_offsets = offsets.Read();
230 auto d_indices = indices.Read();
231 auto d_x = Reshape(x.Read(), nd, vd, ne);
232 auto d_y = Reshape(y.Write(), t?vd:ndofs, t?ndofs:vd);
233 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
234 {
235 const int next_offset = d_offsets[i + 1];
236 for (int c = 0; c < vd; ++c)
237 {
238 real_t dof_value = 0;
239 const int j = next_offset - 1;
240 const int idx_j = (d_indices[j] >= 0) ? d_indices[j] : -1 - d_indices[j];
241 dof_value = (d_indices[j] >= 0) ? d_x(idx_j % nd, c, idx_j / nd) :
242 -d_x(idx_j % nd, c, idx_j / nd);
243 d_y(t?c:i,t?i:c) = dof_value;
244 }
245 });
246}
247
249{
250 // Assumes all elements have the same number of dofs
251 const int nd = dof;
252 const int vd = vdim;
253 const bool t = byvdim;
254
255 Array<char> processed(vd * ndofs);
256 processed = 0;
257
258 auto d_offsets = offsets.HostRead();
259 auto d_indices = indices.HostRead();
260 auto d_x = Reshape(processed.HostReadWrite(), t?vd:ndofs, t?ndofs:vd);
261 auto d_y = Reshape(y.HostWrite(), nd, vd, ne);
262 for (int i = 0; i < ndofs; ++i)
263 {
264 const int offset = d_offsets[i];
265 const int next_offset = d_offsets[i+1];
266 for (int c = 0; c < vd; ++c)
267 {
268 for (int j = offset; j < next_offset; ++j)
269 {
270 const int idx_j = d_indices[j];
271 if (d_x(t?c:i,t?i:c))
272 {
273 d_y(idx_j % nd, c, idx_j / nd) = 0.0;
274 }
275 else
276 {
277 d_y(idx_j % nd, c, idx_j / nd) = 1.0;
278 d_x(t?c:i,t?i:c) = 1;
279 }
280 }
281 }
282 }
283}
284
286 SparseMatrix &mat) const
287{
288 mat.GetMemoryI().New(mat.Height()+1, mat.GetMemoryI().GetMemoryType());
289 const int nnz = FillI(mat);
290 mat.GetMemoryJ().New(nnz, mat.GetMemoryJ().GetMemoryType());
291 mat.GetMemoryData().New(nnz, mat.GetMemoryData().GetMemoryType());
292 FillJAndData(mat_ea, mat);
293}
294
295static MFEM_HOST_DEVICE int GetMinElt(const int *my_elts, const int nbElts,
296 const int *nbr_elts, const int nbrNbElts)
297{
298 // Find the minimal element index found in both my_elts[] and nbr_elts[]
299 int min_el = INT_MAX;
300 for (int i = 0; i < nbElts; i++)
301 {
302 const int e_i = my_elts[i];
303 if (e_i >= min_el) { continue; }
304 for (int j = 0; j < nbrNbElts; j++)
305 {
306 if (e_i==nbr_elts[j])
307 {
308 min_el = e_i; // we already know e_i < min_el
309 break;
310 }
311 }
312 }
313 return min_el;
314}
315
316/** Returns the index where a non-zero entry should be added and increment the
317 number of non-zeros for the row i_L. */
318static MFEM_HOST_DEVICE int GetAndIncrementNnzIndex(const int i_L, int* I)
319{
320 int ind = AtomicAdd(I[i_L],1);
321 return ind;
322}
323
325{
326 const int all_dofs = ndofs;
327 const int vd = vdim;
328 const int elt_dofs = dof;
329 auto I = mat.ReadWriteI();
330 auto d_offsets = offsets.Read();
331 auto d_indices = indices.Read();
332 auto d_gather_map = gather_map.Read();
333
334 Array<int> ij_elts(indices.Size() * 2);
335 auto d_ij_elts = Reshape(ij_elts.Write(), indices.Size(), 2);
336
337 mfem::forall(vd*all_dofs+1, [=] MFEM_HOST_DEVICE (int i_L)
338 {
339 I[i_L] = 0;
340 });
341 mfem::forall(ne*elt_dofs, [=] MFEM_HOST_DEVICE (int l_dof)
342 {
343 const int e = l_dof/elt_dofs;
344 const int i = l_dof%elt_dofs;
345
346 const int i_gm = e*elt_dofs + i;
347 const int i_L = d_gather_map[i_gm];
348 const int i_offset = d_offsets[i_L];
349 const int i_next_offset = d_offsets[i_L+1];
350 const int i_nbElts = i_next_offset - i_offset;
351
352 int *i_elts = &d_ij_elts(i_offset, 0);
353 for (int e_i = 0; e_i < i_nbElts; ++e_i)
354 {
355 const int i_E = d_indices[i_offset+e_i];
356 i_elts[e_i] = i_E/elt_dofs;
357 }
358 for (int j = 0; j < elt_dofs; j++)
359 {
360 const int j_gm = e*elt_dofs + j;
361 const int j_L = d_gather_map[j_gm];
362 const int j_offset = d_offsets[j_L];
363 const int j_next_offset = d_offsets[j_L+1];
364 const int j_nbElts = j_next_offset - j_offset;
365 if (i_nbElts == 1 || j_nbElts == 1) // no assembly required
366 {
367 GetAndIncrementNnzIndex(i_L, I);
368 }
369 else // assembly required
370 {
371 int *j_elts = &d_ij_elts(j_offset, 1);
372 for (int e_j = 0; e_j < j_nbElts; ++e_j)
373 {
374 const int j_E = d_indices[j_offset+e_j];
375 const int elt = j_E/elt_dofs;
376 j_elts[e_j] = elt;
377 }
378 int min_e = GetMinElt(i_elts, i_nbElts, j_elts, j_nbElts);
379 if (e == min_e) // add the nnz only once
380 {
381 GetAndIncrementNnzIndex(i_L, I);
382 }
383 }
384 }
385 });
386 // We need to sum the entries of I, we do it on CPU as it is very sequential.
387 auto h_I = mat.HostReadWriteI();
388 const int nTdofs = vd*all_dofs;
389 int sum = 0;
390 for (int i = 0; i < nTdofs; i++)
391 {
392 const int nnz = h_I[i];
393 h_I[i] = sum;
394 sum+=nnz;
395 }
396 h_I[nTdofs] = sum;
397 // We return the number of nnz
398 return h_I[nTdofs];
399}
400
402 SparseMatrix &mat) const
403{
404 const int all_dofs = ndofs;
405 const int vd = vdim;
406 const int elt_dofs = dof;
407 auto I = mat.ReadWriteI();
408 auto J = mat.WriteJ();
409 auto Data = mat.WriteData();
410 auto d_offsets = offsets.Read();
411 auto d_indices = indices.Read();
412 auto d_gather_map = gather_map.Read();
413 auto mat_ea = Reshape(ea_data.Read(), elt_dofs, elt_dofs, ne);
414
415 Array<int> ij_B_el(indices.Size() * 4);
416 auto d_ij_B_el = Reshape(ij_B_el.Write(), indices.Size(), 4);
417
418 mfem::forall(ne*elt_dofs, [=] MFEM_HOST_DEVICE (int l_dof)
419 {
420 const int e = l_dof/elt_dofs;
421 const int i = l_dof%elt_dofs;
422
423 const int i_gm = e*elt_dofs + i;
424 const int i_L = d_gather_map[i_gm];
425 const int i_offset = d_offsets[i_L];
426 const int i_next_offset = d_offsets[i_L+1];
427 const int i_nbElts = i_next_offset - i_offset;
428
429 int *i_elts = &d_ij_B_el(i_offset, 0);
430 int *i_B = &d_ij_B_el(i_offset, 1);
431 for (int e_i = 0; e_i < i_nbElts; ++e_i)
432 {
433 const int i_E = d_indices[i_offset+e_i];
434 i_elts[e_i] = i_E/elt_dofs;
435 i_B[e_i] = i_E%elt_dofs;
436 }
437 for (int j = 0; j < elt_dofs; j++)
438 {
439 const int j_gm = e*elt_dofs + j;
440 const int j_L = d_gather_map[j_gm];
441 const int j_offset = d_offsets[j_L];
442 const int j_next_offset = d_offsets[j_L+1];
443 const int j_nbElts = j_next_offset - j_offset;
444 if (i_nbElts == 1 || j_nbElts == 1) // no assembly required
445 {
446 const int nnz = GetAndIncrementNnzIndex(i_L, I);
447 J[nnz] = j_L;
448 Data[nnz] = mat_ea(j,i,e);
449 }
450 else // assembly required
451 {
452 int *j_elts = &d_ij_B_el(j_offset, 2);
453 int *j_B = &d_ij_B_el(j_offset, 3);
454 for (int e_j = 0; e_j < j_nbElts; ++e_j)
455 {
456 const int j_E = d_indices[j_offset+e_j];
457 const int elt = j_E/elt_dofs;
458 j_elts[e_j] = elt;
459 j_B[e_j] = j_E%elt_dofs;
460 }
461 int min_e = GetMinElt(i_elts, i_nbElts, j_elts, j_nbElts);
462 if (e == min_e) // add the nnz only once
463 {
464 real_t val = 0.0;
465 for (int k = 0; k < i_nbElts; k++)
466 {
467 const int e_i = i_elts[k];
468 const int i_Bloc = i_B[k];
469 for (int l = 0; l < j_nbElts; l++)
470 {
471 const int e_j = j_elts[l];
472 const int j_Bloc = j_B[l];
473 if (e_i == e_j)
474 {
475 val += mat_ea(j_Bloc, i_Bloc, e_i);
476 }
477 }
478 }
479 const int nnz = GetAndIncrementNnzIndex(i_L, I);
480 J[nnz] = j_L;
481 Data[nnz] = val;
482 }
483 }
484 }
485 });
486 // We need to shift again the entries of I, we do it on CPU as it is very
487 // sequential.
488 auto h_I = mat.HostReadWriteI();
489 const int size = vd*all_dofs;
490 for (int i = 0; i < size; i++)
491 {
492 h_I[size-i] = h_I[size-(i+1)];
493 }
494 h_I[0] = 0;
495}
496
498 : ne(fes.GetNE()),
499 vdim(fes.GetVDim()),
500 byvdim(fes.GetOrdering() == Ordering::byVDIM),
501 ndof(fes.GetTypicalFE()->GetDof()),
502 ndofs(fes.GetNDofs())
503{
504 height = vdim*ne*ndof;
505 width = vdim*ne*ndof;
506}
507
509{
510 const int nd = ndof;
511 const int vd = vdim;
512 const bool t = byvdim;
513 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
514 auto d_y = Reshape(y.Write(), nd, vd, ne);
515 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
516 {
517 const int idx = i;
518 const int dof = idx % nd;
519 const int e = idx / nd;
520 for (int c = 0; c < vd; ++c)
521 {
522 d_y(dof, c, e) = d_x(t?c:idx, t?idx:c);
523 }
524 });
525}
526
527template <bool ADD>
528void L2ElementRestriction::TAddMultTranspose(const Vector &x, Vector &y) const
529{
530 const int nd = ndof;
531 const int vd = vdim;
532 const bool t = byvdim;
533 auto d_x = Reshape(x.Read(), nd, vd, ne);
534 auto d_y = Reshape(ADD ? y.ReadWrite() : y.Write(), t?vd:ndofs, t?ndofs:vd);
535 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
536 {
537 const int idx = i;
538 const int dof = idx % nd;
539 const int e = idx / nd;
540 for (int c = 0; c < vd; ++c)
541 {
542 if (ADD) { d_y(t?c:idx,t?idx:c) += d_x(dof, c, e); }
543 else { d_y(t?c:idx,t?idx:c) = d_x(dof, c, e); }
544 }
545 });
546}
547
549{
550 constexpr bool ADD = false;
551 TAddMultTranspose<ADD>(x, y);
552}
553
555 const real_t a) const
556{
557 MFEM_VERIFY(a == 1.0, "General coefficient case is not yet supported!");
558 constexpr bool ADD = true;
559 TAddMultTranspose<ADD>(x, y);
560}
561
563{
564 const int elem_dofs = ndof;
565 const int vd = vdim;
566 auto I = mat.WriteI();
567 const int isize = mat.Height() + 1;
568 const int interior_dofs = ne*elem_dofs*vd;
569 mfem::forall(isize, [=] MFEM_HOST_DEVICE (int dof)
570 {
571 I[dof] = dof<interior_dofs ? elem_dofs : 0;
572 });
573}
574
575static MFEM_HOST_DEVICE int AddNnz(const int iE, int *I, const int dofs)
576{
577 int val = AtomicAdd(I[iE],dofs);
578 return val;
579}
580
582 SparseMatrix &mat) const
583{
584 const int elem_dofs = ndof;
585 const int vd = vdim;
586 auto I = mat.ReadWriteI();
587 auto J = mat.WriteJ();
588 auto Data = mat.WriteData();
589 auto mat_ea = Reshape(ea_data.Read(), elem_dofs, elem_dofs, ne);
590 mfem::forall(ne*elem_dofs*vd, [=] MFEM_HOST_DEVICE (int iE)
591 {
592 const int offset = AddNnz(iE,I,elem_dofs);
593 const int e = iE/elem_dofs;
594 const int i = iE%elem_dofs;
595 for (int j = 0; j < elem_dofs; j++)
596 {
597 J[offset+j] = e*elem_dofs+j;
598 Data[offset+j] = mat_ea(j,i,e);
599 }
600 });
601}
602
604 const FiniteElementSpace &fes,
605 const ElementDofOrdering f_ordering,
606 const FaceType type,
607 bool build)
608 : fes(fes),
609 nf(fes.GetNFbyType(type)),
610 vdim(fes.GetVDim()),
611 byvdim(fes.GetOrdering() == Ordering::byVDIM),
612 face_dofs(nf > 0 ? fes.GetFaceElement(0)->GetDof() : 0),
613 elem_dofs(fes.GetTypicalFE()->GetDof()),
614 nfdofs(nf*face_dofs),
615 ndofs(fes.GetNDofs()),
616 scatter_indices(nf*face_dofs),
617 gather_offsets(ndofs+1),
618 gather_indices(nf*face_dofs),
619 face_map(face_dofs)
620{
622 width = fes.GetVSize();
623 if (nf==0) { return; }
624
625 CheckFESpace(f_ordering);
626
627 // Get the mapping from lexicographic DOF ordering to native ordering.
628 const TensorBasisElement* el =
629 dynamic_cast<const TensorBasisElement*>(fes.GetTypicalFE());
630 const Array<int> &dof_map_ = el->GetDofMap();
631 if (dof_map_.Size() > 0)
632 {
633 vol_dof_map.MakeRef(dof_map_);
634 }
635 else
636 {
637 // For certain types of elements dof_map_ is empty. In this case, that
638 // means the element is already ordered lexicographically, so the
639 // permutation is the identity.
641 for (int i = 0; i < elem_dofs; ++i) { vol_dof_map[i] = i; }
642 }
643
644 if (!build) { return; }
645 ComputeScatterIndicesAndOffsets(f_ordering, type);
646 ComputeGatherIndices(f_ordering,type);
647}
648
650 const FiniteElementSpace &fes,
651 const ElementDofOrdering f_ordering,
652 const FaceType type)
653 : ConformingFaceRestriction(fes, f_ordering, type, true)
654{ }
655
657{
658 if (nf==0) { return; }
659 // Assumes all elements have the same number of dofs
660 const int nface_dofs = face_dofs;
661 const int vd = vdim;
662 const bool t = byvdim;
663 auto d_indices = scatter_indices.Read();
664 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
665 auto d_y = Reshape(y.Write(), nface_dofs, vd, nf);
666 mfem::forall(nfdofs, [=] MFEM_HOST_DEVICE (int i)
667 {
668 const int s_idx = d_indices[i];
669 const int sgn = (s_idx >= 0) ? 1 : -1;
670 const int idx = (s_idx >= 0) ? s_idx : -1 - s_idx;
671 const int dof = i % nface_dofs;
672 const int face = i / nface_dofs;
673 for (int c = 0; c < vd; ++c)
674 {
675 d_y(dof, c, face) = sgn*d_x(t?c:idx, t?idx:c);
676 }
677 });
678}
679
680static void ConformingFaceRestriction_AddMultTranspose(
681 const int ndofs,
682 const int face_dofs,
683 const int nf,
684 const int vdim,
685 const bool by_vdim,
686 const Array<int> &gather_offsets,
687 const Array<int> &gather_indices,
688 const Vector &x,
689 Vector &y,
690 bool use_signs,
691 const real_t a)
692{
693 MFEM_VERIFY(a == 1.0, "General coefficient case is not yet supported!");
694 if (nf==0) { return; }
695 // Assumes all elements have the same number of dofs
696 auto d_offsets = gather_offsets.Read();
697 auto d_indices = gather_indices.Read();
698 auto d_x = Reshape(x.Read(), face_dofs, vdim, nf);
699 auto d_y = Reshape(y.ReadWrite(), by_vdim?vdim:ndofs, by_vdim?ndofs:vdim);
700 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
701 {
702 const int offset = d_offsets[i];
703 const int next_offset = d_offsets[i + 1];
704 for (int c = 0; c < vdim; ++c)
705 {
706 real_t dof_value = 0;
707 for (int j = offset; j < next_offset; ++j)
708 {
709 const int s_idx_j = d_indices[j];
710 const real_t sgn = (s_idx_j >= 0 || !use_signs) ? 1.0 : -1.0;
711 const int idx_j = (s_idx_j >= 0) ? s_idx_j : -1 - s_idx_j;
712 dof_value += sgn*d_x(idx_j % face_dofs, c, idx_j / face_dofs);
713 }
714 d_y(by_vdim?c:i,by_vdim?i:c) += dof_value;
715 }
716 });
717}
718
720 const Vector& x, Vector& y, const real_t a) const
721{
722 ConformingFaceRestriction_AddMultTranspose(
724 true, a);
725}
726
728 const Vector& x, Vector& y, const real_t a) const
729{
730 ConformingFaceRestriction_AddMultTranspose(
732 false, a);
733}
734
736 f_ordering)
737{
738#ifdef MFEM_USE_MPI
739
740 // If the underlying finite element space is parallel, ensure the face
741 // neighbor information is generated.
742 if (const ParFiniteElementSpace *pfes
743 = dynamic_cast<const ParFiniteElementSpace*>(&fes))
744 {
745 pfes->GetParMesh()->ExchangeFaceNbrData();
746 }
747
748#endif
749
750#ifdef MFEM_DEBUG
751 const FiniteElement *fe0 = fes.GetTypicalFE();
752 const TensorBasisElement *tfe = dynamic_cast<const TensorBasisElement*>(fe0);
753 MFEM_VERIFY(tfe != NULL,
754 "ConformingFaceRestriction only supports TensorBasisElements");
755 MFEM_VERIFY(tfe->GetBasisType()==BasisType::GaussLobatto ||
757 "ConformingFaceRestriction only supports Gauss-Lobatto and Bernstein bases");
758
759 // Assuming all finite elements are using Gauss-Lobatto.
760 const bool dof_reorder = (f_ordering == ElementDofOrdering::LEXICOGRAPHIC);
761 if (dof_reorder && nf > 0)
762 {
763 for (int f = 0; f < fes.GetNF(); ++f)
764 {
765 const FiniteElement *fe = fes.GetFaceElement(f);
766 const TensorBasisElement* el =
767 dynamic_cast<const TensorBasisElement*>(fe);
768 if (el) { continue; }
769 MFEM_ABORT("Finite element not suitable for lexicographic ordering");
770 }
771 }
772#endif
773}
774
775void ConformingFaceRestriction::ComputeScatterIndicesAndOffsets(
776 const ElementDofOrdering f_ordering,
777 const FaceType type)
778{
779 Mesh &mesh = *fes.GetMesh();
780
781 // Initialization of the offsets
782 for (int i = 0; i <= ndofs; ++i)
783 {
784 gather_offsets[i] = 0;
785 }
786
787 // Computation of scatter indices and offsets
788 int f_ind = 0;
789 for (int f = 0; f < fes.GetNF(); ++f)
790 {
791 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
792 if ( face.IsNonconformingCoarse() )
793 {
794 // We skip nonconforming coarse faces as they are treated
795 // by the corresponding nonconforming fine faces.
796 continue;
797 }
798 else if ( face.IsOfFaceType(type) )
799 {
800 SetFaceDofsScatterIndices(face, f_ind, f_ordering);
801 f_ind++;
802 }
803 }
804 MFEM_VERIFY(f_ind==nf, "Unexpected number of faces.");
805
806 // Summation of the offsets
807 for (int i = 1; i <= ndofs; ++i)
808 {
809 gather_offsets[i] += gather_offsets[i - 1];
810 }
811}
812
813void ConformingFaceRestriction::ComputeGatherIndices(
814 const ElementDofOrdering f_ordering,
815 const FaceType type)
816{
817 Mesh &mesh = *fes.GetMesh();
818
819 // Computation of gather_indices
820 int f_ind = 0;
821 for (int f = 0; f < fes.GetNF(); ++f)
822 {
823 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
824 if ( face.IsNonconformingCoarse() )
825 {
826 // We skip nonconforming coarse faces as they are treated
827 // by the corresponding nonconforming fine faces.
828 continue;
829 }
830 else if ( face.IsOfFaceType(type) )
831 {
832 SetFaceDofsGatherIndices(face, f_ind, f_ordering);
833 f_ind++;
834 }
835 }
836 MFEM_VERIFY(f_ind==nf, "Unexpected number of faces.");
837
838 // Reset offsets to their initial value
839 for (int i = ndofs; i > 0; --i)
840 {
841 gather_offsets[i] = gather_offsets[i - 1];
842 }
843 gather_offsets[0] = 0;
844}
845
846static inline int absdof(int i) { return i < 0 ? -1-i : i; }
847
849 const Mesh::FaceInformation &face,
850 const int face_index,
851 const ElementDofOrdering f_ordering)
852{
853 MFEM_ASSERT(!(face.IsNonconformingCoarse()),
854 "This method should not be used on nonconforming coarse faces.");
855 MFEM_ASSERT(face.element[0].orientation==0,
856 "FaceRestriction used on degenerated mesh.");
857 MFEM_VERIFY(f_ordering == ElementDofOrdering::LEXICOGRAPHIC,
858 "NATIVE ordering is not supported yet");
859
861
862 const Table& e2dTable = fes.GetElementToDofTable();
863 const int* elem_map = e2dTable.GetJ();
864 const int elem_index = face.element[0].index;
865
866 for (int face_dof = 0; face_dof < face_dofs; ++face_dof)
867 {
868 const int lex_volume_dof = face_map[face_dof];
869 const int s_volume_dof = AsConst(vol_dof_map)[lex_volume_dof]; // signed
870 const int volume_dof = absdof(s_volume_dof);
871 const int s_global_dof = elem_map[elem_index*elem_dofs + volume_dof];
872 const int global_dof = absdof(s_global_dof);
873 const int restriction_dof = face_dofs*face_index + face_dof;
874 scatter_indices[restriction_dof] = s_global_dof;
875 ++gather_offsets[global_dof + 1];
876 }
877}
878
880 const Mesh::FaceInformation &face,
881 const int face_index,
882 const ElementDofOrdering f_ordering)
883{
884 MFEM_ASSERT(!(face.IsNonconformingCoarse()),
885 "This method should not be used on nonconforming coarse faces.");
886 MFEM_VERIFY(f_ordering == ElementDofOrdering::LEXICOGRAPHIC,
887 "NATIVE ordering is not supported yet");
888
890
891 const Table& e2dTable = fes.GetElementToDofTable();
892 const int* elem_map = e2dTable.GetJ();
893 const int elem_index = face.element[0].index;
894
895 for (int face_dof = 0; face_dof < face_dofs; ++face_dof)
896 {
897 const int lex_volume_dof = face_map[face_dof];
898 const int s_volume_dof = AsConst(vol_dof_map)[lex_volume_dof];
899 const int volume_dof = absdof(s_volume_dof);
900 const int s_global_dof = elem_map[elem_index*elem_dofs + volume_dof];
901 const int sgn = (s_global_dof >= 0) ? 1 : -1;
902 const int global_dof = absdof(s_global_dof);
903 const int restriction_dof = face_dofs*face_index + face_dof;
904 const int s_restriction_dof = (sgn >= 0) ? restriction_dof : -1 -
905 restriction_dof;
906 gather_indices[gather_offsets[global_dof]++] = s_restriction_dof;
907 }
908}
909
910// Permute dofs or quads on a face for e2 to match with the ordering of e1
911int PermuteFaceL2(const int dim, const int face_id1,
912 const int face_id2, const int orientation,
913 const int size1d, const int index)
914{
915 switch (dim)
916 {
917 case 1:
918 return 0;
919 case 2:
920 return internal::PermuteFace2D(face_id1, face_id2, orientation, size1d, index);
921 case 3:
922 return internal::PermuteFace3D(face_id1, face_id2, orientation, size1d, index);
923 default:
924 MFEM_ABORT("Unsupported dimension.");
925 return 0;
926 }
927}
928
930 const ElementDofOrdering f_ordering,
931 const FaceType type,
932 const L2FaceValues m,
933 bool build)
934 : fes(fes),
935 ordering(f_ordering),
936 nf(fes.GetNFbyType(type)),
937 ne(fes.GetNE()),
938 vdim(fes.GetVDim()),
939 byvdim(fes.GetOrdering() == Ordering::byVDIM),
940 face_dofs(fes.GetTypicalTraceElement()->GetDof()),
941 elem_dofs(fes.GetTypicalFE()->GetDof()),
942 nfdofs(nf*face_dofs),
943 ndofs(fes.GetNDofs()),
944 type(type),
945 m(m),
946 scatter_indices1(nf*face_dofs),
947 scatter_indices2(m==L2FaceValues::DoubleValued?nf*face_dofs:0),
948 gather_offsets(ndofs+1),
949 gather_indices((m==L2FaceValues::DoubleValued? 2 : 1)*nf*face_dofs),
950 face_map(face_dofs)
951{
953 width = fes.GetVSize();
954 if (!build) { return; }
955
956 CheckFESpace();
957 ComputeScatterIndicesAndOffsets();
958 ComputeGatherIndices();
959}
960
962 const ElementDofOrdering f_ordering,
963 const FaceType type,
964 const L2FaceValues m)
965 : L2FaceRestriction(fes, f_ordering, type, m, true)
966{ }
967
969 Vector& y) const
970{
971 if (nf == 0) { return; }
972 MFEM_ASSERT(
974 "This method should be called when m == L2FaceValues::SingleValued.");
975 // Assumes all elements have the same number of dofs
976 const int nface_dofs = face_dofs;
977 const int vd = vdim;
978 const bool t = byvdim;
979 auto d_indices1 = scatter_indices1.Read();
980 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
981 auto d_y = Reshape(y.Write(), nface_dofs, vd, nf);
982 mfem::forall(nfdofs, [=] MFEM_HOST_DEVICE (int i)
983 {
984 const int dof = i % nface_dofs;
985 const int face = i / nface_dofs;
986 const int idx1 = d_indices1[i];
987 for (int c = 0; c < vd; ++c)
988 {
989 d_y(dof, c, face) = d_x(t?c:idx1, t?idx1:c);
990 }
991 });
992}
993
995 Vector& y) const
996{
997 MFEM_ASSERT(
999 "This method should be called when m == L2FaceValues::DoubleValued.");
1000 // Assumes all elements have the same number of dofs
1001 const int nface_dofs = face_dofs;
1002 const int vd = vdim;
1003 const bool t = byvdim;
1004 auto d_indices1 = scatter_indices1.Read();
1005 auto d_indices2 = scatter_indices2.Read();
1006 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
1007 auto d_y = Reshape(y.Write(), nface_dofs, vd, 2, nf);
1008 mfem::forall(nfdofs, [=] MFEM_HOST_DEVICE (int i)
1009 {
1010 const int dof = i % nface_dofs;
1011 const int face = i / nface_dofs;
1012 const int idx1 = d_indices1[i];
1013 for (int c = 0; c < vd; ++c)
1014 {
1015 d_y(dof, c, 0, face) = d_x(t?c:idx1, t?idx1:c);
1016 }
1017 const int idx2 = d_indices2[i];
1018 for (int c = 0; c < vd; ++c)
1019 {
1020 d_y(dof, c, 1, face) = idx2==-1 ? 0.0 : d_x(t?c:idx2, t?idx2:c);
1021 }
1022 });
1023}
1024
1025void L2FaceRestriction::Mult(const Vector& x, Vector& y) const
1026{
1027 if (nf==0) { return; }
1029 {
1031 }
1032 else
1033 {
1035 }
1036}
1037
1039 const Vector& x, Vector& y) const
1040{
1041 // Assumes all elements have the same number of dofs
1042 const int nface_dofs = face_dofs;
1043 const int vd = vdim;
1044 const bool t = byvdim;
1045 auto d_offsets = gather_offsets.Read();
1046 auto d_indices = gather_indices.Read();
1047 auto d_x = Reshape(x.Read(), nface_dofs, vd, nf);
1048 auto d_y = Reshape(y.ReadWrite(), t?vd:ndofs, t?ndofs:vd);
1049 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
1050 {
1051 const int offset = d_offsets[i];
1052 const int next_offset = d_offsets[i + 1];
1053 for (int c = 0; c < vd; ++c)
1054 {
1055 real_t dof_value = 0;
1056 for (int j = offset; j < next_offset; ++j)
1057 {
1058 int idx_j = d_indices[j];
1059 dof_value += d_x(idx_j % nface_dofs, c, idx_j / nface_dofs);
1060 }
1061 d_y(t?c:i,t?i:c) += dof_value;
1062 }
1063 });
1064}
1065
1067 const Vector& x, Vector& y) const
1068{
1069 // Assumes all elements have the same number of dofs
1070 const int nface_dofs = face_dofs;
1071 const int vd = vdim;
1072 const bool t = byvdim;
1073 const int dofs = nfdofs;
1074 auto d_offsets = gather_offsets.Read();
1075 auto d_indices = gather_indices.Read();
1076 auto d_x = Reshape(x.Read(), nface_dofs, vd, 2, nf);
1077 auto d_y = Reshape(y.ReadWrite(), t?vd:ndofs, t?ndofs:vd);
1078 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
1079 {
1080 const int offset = d_offsets[i];
1081 const int next_offset = d_offsets[i + 1];
1082 for (int c = 0; c < vd; ++c)
1083 {
1084 real_t dof_value = 0;
1085 for (int j = offset; j < next_offset; ++j)
1086 {
1087 int idx_j = d_indices[j];
1088 bool isE1 = idx_j < dofs;
1089 idx_j = isE1 ? idx_j : idx_j - dofs;
1090 dof_value += isE1 ?
1091 d_x(idx_j % nface_dofs, c, 0, idx_j / nface_dofs)
1092 :d_x(idx_j % nface_dofs, c, 1, idx_j / nface_dofs);
1093 }
1094 d_y(t?c:i,t?i:c) += dof_value;
1095 }
1096 });
1097}
1098
1100 const real_t a) const
1101{
1102 MFEM_VERIFY(a == 1.0, "General coefficient case is not yet supported!");
1103 if (nf==0) { return; }
1105 {
1107 }
1108 else
1109 {
1111 }
1112}
1113
1115 const bool keep_nbr_block) const
1116{
1117 const int nface_dofs = face_dofs;
1118 auto d_indices1 = scatter_indices1.Read();
1119 auto d_indices2 = scatter_indices2.Read();
1120 auto I = mat.ReadWriteI();
1121 mfem::forall(nf*nface_dofs, [=] MFEM_HOST_DEVICE (int fdof)
1122 {
1123 const int iE1 = d_indices1[fdof];
1124 const int iE2 = d_indices2[fdof];
1125 AddNnz(iE1,I,nface_dofs);
1126 AddNnz(iE2,I,nface_dofs);
1127 });
1128}
1129
1131 SparseMatrix &mat,
1132 const bool keep_nbr_block) const
1133{
1134 const int nface_dofs = face_dofs;
1135 auto d_indices1 = scatter_indices1.Read();
1136 auto d_indices2 = scatter_indices2.Read();
1137 auto I = mat.ReadWriteI();
1138 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, 2, nf);
1139 auto J = mat.WriteJ();
1140 auto Data = mat.WriteData();
1141 mfem::forall(nf*nface_dofs, [=] MFEM_HOST_DEVICE (int fdof)
1142 {
1143 const int f = fdof/nface_dofs;
1144 const int iF = fdof%nface_dofs;
1145 const int iE1 = d_indices1[f*nface_dofs+iF];
1146 const int iE2 = d_indices2[f*nface_dofs+iF];
1147 const int offset1 = AddNnz(iE1,I,nface_dofs);
1148 const int offset2 = AddNnz(iE2,I,nface_dofs);
1149 for (int jF = 0; jF < nface_dofs; jF++)
1150 {
1151 const int jE1 = d_indices1[f*nface_dofs+jF];
1152 const int jE2 = d_indices2[f*nface_dofs+jF];
1153 J[offset2+jF] = jE1;
1154 J[offset1+jF] = jE2;
1155 Data[offset2+jF] = mat_fea(jF,iF,0,f);
1156 Data[offset1+jF] = mat_fea(jF,iF,1,f);
1157 }
1158 });
1159}
1160
1162 Vector &ea_data) const
1163{
1164 const int nface_dofs = face_dofs;
1165 const int nelem_dofs = elem_dofs;
1166 const int NE = ne;
1168 {
1169 auto d_indices1 = scatter_indices1.Read();
1170 auto d_indices2 = scatter_indices2.Read();
1171 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, 2, nf);
1172 auto mat_ea = Reshape(ea_data.ReadWrite(), nelem_dofs, nelem_dofs, ne);
1173 mfem::forall(nf, [=] MFEM_HOST_DEVICE (int f)
1174 {
1175 const int e1 = d_indices1[f*nface_dofs]/nelem_dofs;
1176 const int e2 = d_indices2[f*nface_dofs]/nelem_dofs;
1177 for (int j = 0; j < nface_dofs; j++)
1178 {
1179 const int jB1 = d_indices1[f*nface_dofs+j]%nelem_dofs;
1180 for (int i = 0; i < nface_dofs; i++)
1181 {
1182 const int iB1 = d_indices1[f*nface_dofs+i]%nelem_dofs;
1183 AtomicAdd(mat_ea(iB1,jB1,e1), mat_fea(i,j,0,f));
1184 }
1185 }
1186 if (e2 < NE)
1187 {
1188 for (int j = 0; j < nface_dofs; j++)
1189 {
1190 const int jB2 = d_indices2[f*nface_dofs+j]%nelem_dofs;
1191 for (int i = 0; i < nface_dofs; i++)
1192 {
1193 const int iB2 = d_indices2[f*nface_dofs+i]%nelem_dofs;
1194 AtomicAdd(mat_ea(iB2,jB2,e2), mat_fea(i,j,1,f));
1195 }
1196 }
1197 }
1198 });
1199 }
1200 else
1201 {
1202 auto d_indices = scatter_indices1.Read();
1203 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, nf);
1204 auto mat_ea = Reshape(ea_data.ReadWrite(), nelem_dofs, nelem_dofs, ne);
1205 mfem::forall(nf, [=] MFEM_HOST_DEVICE (int f)
1206 {
1207 const int e = d_indices[f*nface_dofs]/nelem_dofs;
1208 for (int j = 0; j < nface_dofs; j++)
1209 {
1210 const int jE = d_indices[f*nface_dofs+j]%nelem_dofs;
1211 for (int i = 0; i < nface_dofs; i++)
1212 {
1213 const int iE = d_indices[f*nface_dofs+i]%nelem_dofs;
1214 AtomicAdd(mat_ea(iE,jE,e), mat_fea(i,j,f));
1215 }
1216 }
1217 });
1218 }
1219}
1220
1222{
1223#ifdef MFEM_USE_MPI
1224
1225 // If the underlying finite element space is parallel, ensure the face
1226 // neighbor information is generated.
1227 if (const ParFiniteElementSpace *pfes
1228 = dynamic_cast<const ParFiniteElementSpace*>(&fes))
1229 {
1230 pfes->GetParMesh()->ExchangeFaceNbrData();
1231 }
1232
1233#endif
1234
1235#ifdef MFEM_DEBUG
1236 // If fespace == L2
1237 const FiniteElement *fe0 = fes.GetTypicalFE();
1238 const TensorBasisElement *tfe = dynamic_cast<const TensorBasisElement*>(fe0);
1239 MFEM_VERIFY(tfe != NULL &&
1242 "Only Gauss-Lobatto and Bernstein basis are supported in "
1243 "L2FaceRestriction.");
1244 if (nf==0) { return; }
1245 const bool dof_reorder = (ordering == ElementDofOrdering::LEXICOGRAPHIC);
1246 if (!dof_reorder)
1247 {
1248 MFEM_ABORT("Non-Tensor L2FaceRestriction not yet implemented.");
1249 }
1250 if (dof_reorder && nf > 0)
1251 {
1252 for (int f = 0; f < fes.GetNF(); ++f)
1253 {
1255 const TensorBasisElement* el = dynamic_cast<const TensorBasisElement*>(fe);
1256 if (el) { continue; }
1257 MFEM_ABORT("Finite element not suitable for lexicographic ordering");
1258 }
1259 }
1260#endif
1261}
1262
1263void L2FaceRestriction::ComputeScatterIndicesAndOffsets()
1264{
1265 Mesh &mesh = *fes.GetMesh();
1266 // Initialization of the offsets
1267 for (int i = 0; i <= ndofs; ++i)
1268 {
1269 gather_offsets[i] = 0;
1270 }
1271
1272 // Computation of scatter indices and offsets
1273 int f_ind=0;
1274 for (int f = 0; f < fes.GetNF(); ++f)
1275 {
1276 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
1277 MFEM_ASSERT(!face.IsShared(),
1278 "Unexpected shared face in L2FaceRestriction.");
1279 if ( face.IsOfFaceType(type) )
1280 {
1281 SetFaceDofsScatterIndices1(face,f_ind);
1283 {
1284 if ( type==FaceType::Interior && face.IsInterior() )
1285 {
1287 }
1288 else if ( type==FaceType::Boundary && face.IsBoundary() )
1289 {
1291 }
1292 }
1293 f_ind++;
1294 }
1295 }
1296 MFEM_VERIFY(f_ind==nf, "Unexpected number of faces.");
1297
1298 // Summation of the offsets
1299 for (int i = 1; i <= ndofs; ++i)
1300 {
1301 gather_offsets[i] += gather_offsets[i - 1];
1302 }
1303}
1304
1305void L2FaceRestriction::ComputeGatherIndices()
1306{
1307 Mesh &mesh = *fes.GetMesh();
1308 // Computation of gather_indices
1309 int f_ind = 0;
1310 for (int f = 0; f < fes.GetNF(); ++f)
1311 {
1312 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
1313 MFEM_ASSERT(!face.IsShared(),
1314 "Unexpected shared face in L2FaceRestriction.");
1315 if ( face.IsOfFaceType(type) )
1316 {
1317 SetFaceDofsGatherIndices1(face,f_ind);
1320 face.IsLocal())
1321 {
1323 }
1324 f_ind++;
1325 }
1326 }
1327 MFEM_VERIFY(f_ind==nf, "Unexpected number of faces.");
1328
1329 // Reset offsets to their correct value
1330 for (int i = ndofs; i > 0; --i)
1331 {
1332 gather_offsets[i] = gather_offsets[i - 1];
1333 }
1334 gather_offsets[0] = 0;
1335}
1336
1338 const Mesh::FaceInformation &face,
1339 const int face_index)
1340{
1341 MFEM_ASSERT(!(face.IsNonconformingCoarse()),
1342 "This method should not be used on nonconforming coarse faces.");
1343 const Table& e2dTable = fes.GetElementToDofTable();
1344 const int* elem_map = e2dTable.GetJ();
1345 const int face_id1 = face.element[0].local_face_id;
1346 const int elem_index = face.element[0].index;
1347 fes.GetTypicalFE()->GetFaceMap(face_id1, face_map);
1348
1349 for (int face_dof_elem1 = 0; face_dof_elem1 < face_dofs; ++face_dof_elem1)
1350 {
1351 const int volume_dof_elem1 = face_map[face_dof_elem1];
1352 const int global_dof_elem1 = elem_map[elem_index*elem_dofs + volume_dof_elem1];
1353 const int restriction_dof_elem1 = face_dofs*face_index + face_dof_elem1;
1354 scatter_indices1[restriction_dof_elem1] = global_dof_elem1;
1355 ++gather_offsets[global_dof_elem1 + 1];
1356 }
1357}
1358
1360 const Mesh::FaceInformation &face,
1361 const int face_index)
1362{
1363 MFEM_ASSERT(face.IsLocal(),
1364 "This method should only be used on local faces.");
1365 const Table& e2dTable = fes.GetElementToDofTable();
1366 const int* elem_map = e2dTable.GetJ();
1367 const int elem_index = face.element[1].index;
1368 const int face_id1 = face.element[0].local_face_id;
1369 const int face_id2 = face.element[1].local_face_id;
1370 const int orientation = face.element[1].orientation;
1371 const int dim = fes.GetMesh()->Dimension();
1372 const int dof1d = fes.GetTypicalFE()->GetOrder()+1;
1373 fes.GetTypicalFE()->GetFaceMap(face_id2, face_map);
1374
1375 for (int face_dof_elem1 = 0; face_dof_elem1 < face_dofs; ++face_dof_elem1)
1376 {
1377 const int face_dof_elem2 = PermuteFaceL2(dim, face_id1, face_id2,
1378 orientation, dof1d,
1379 face_dof_elem1);
1380 const int volume_dof_elem2 = face_map[face_dof_elem2];
1381 const int global_dof_elem2 = elem_map[elem_index*elem_dofs + volume_dof_elem2];
1382 const int restriction_dof_elem2 = face_dofs*face_index + face_dof_elem1;
1383 scatter_indices2[restriction_dof_elem2] = global_dof_elem2;
1384 ++gather_offsets[global_dof_elem2 + 1];
1385 }
1386}
1387
1389 const Mesh::FaceInformation &face,
1390 const int face_index)
1391{
1392#ifdef MFEM_USE_MPI
1393 MFEM_ASSERT(face.IsShared(),
1394 "This method should only be used on shared faces.");
1395 const int elem_index = face.element[1].index;
1396 const int face_id1 = face.element[0].local_face_id;
1397 const int face_id2 = face.element[1].local_face_id;
1398 const int orientation = face.element[1].orientation;
1399 const int dim = fes.GetMesh()->Dimension();
1400 const int dof1d = fes.GetTypicalFE()->GetOrder()+1;
1401 fes.GetTypicalFE()->GetFaceMap(face_id2, face_map);
1402 Array<int> face_nbr_dofs;
1403 const ParFiniteElementSpace &pfes =
1404 static_cast<const ParFiniteElementSpace&>(this->fes);
1405 pfes.GetFaceNbrElementVDofs(elem_index, face_nbr_dofs);
1406
1407 for (int face_dof_elem1 = 0; face_dof_elem1 < face_dofs; ++face_dof_elem1)
1408 {
1409 const int face_dof_elem2 = PermuteFaceL2(dim, face_id1, face_id2,
1410 orientation, dof1d, face_dof_elem1);
1411 const int volume_dof_elem2 = face_map[face_dof_elem2];
1412 const int global_dof_elem2 = face_nbr_dofs[volume_dof_elem2];
1413 const int restriction_dof_elem2 = face_dofs*face_index + face_dof_elem1;
1414 // Trick to differentiate dof location inter/shared
1415 scatter_indices2[restriction_dof_elem2] = ndofs+global_dof_elem2;
1416 }
1417#endif
1418}
1419
1421 const Mesh::FaceInformation &face,
1422 const int face_index)
1423{
1424 MFEM_ASSERT(face.IsBoundary(),
1425 "This method should only be used on boundary faces.");
1426
1427 for (int d = 0; d < face_dofs; ++d)
1428 {
1429 const int restriction_dof_elem2 = face_dofs*face_index + d;
1430 scatter_indices2[restriction_dof_elem2] = -1;
1431 }
1432}
1433
1435 const Mesh::FaceInformation &face,
1436 const int face_index)
1437{
1438 MFEM_ASSERT(!(face.IsNonconformingCoarse()),
1439 "This method should not be used on nonconforming coarse faces.");
1440 const Table& e2dTable = fes.GetElementToDofTable();
1441 const int* elem_map = e2dTable.GetJ();
1442 const int face_id1 = face.element[0].local_face_id;
1443 const int elem_index = face.element[0].index;
1444 fes.GetTypicalFE()->GetFaceMap(face_id1, face_map);
1445
1446 for (int face_dof_elem1 = 0; face_dof_elem1 < face_dofs; ++face_dof_elem1)
1447 {
1448 const int volume_dof_elem1 = face_map[face_dof_elem1];
1449 const int global_dof_elem1 = elem_map[elem_index*elem_dofs + volume_dof_elem1];
1450 const int restriction_dof_elem1 = face_dofs*face_index + face_dof_elem1;
1451 // We don't shift restriction_dof_elem1 to express that it's elem1 of the face
1452 gather_indices[gather_offsets[global_dof_elem1]++] = restriction_dof_elem1;
1453 }
1454}
1455
1457 const Mesh::FaceInformation &face,
1458 const int face_index)
1459{
1460 MFEM_ASSERT(face.IsLocal(),
1461 "This method should only be used on local faces.");
1462 const Table& e2dTable = fes.GetElementToDofTable();
1463 const int* elem_map = e2dTable.GetJ();
1464 const int elem_index = face.element[1].index;
1465 const int face_id1 = face.element[0].local_face_id;
1466 const int face_id2 = face.element[1].local_face_id;
1467 const int orientation = face.element[1].orientation;
1468 const int dim = fes.GetMesh()->Dimension();
1469 const int dof1d = fes.GetTypicalFE()->GetOrder()+1;
1470 fes.GetTypicalFE()->GetFaceMap(face_id2, face_map);
1471
1472 for (int face_dof_elem1 = 0; face_dof_elem1 < face_dofs; ++face_dof_elem1)
1473 {
1474 const int face_dof_elem2 = PermuteFaceL2(dim, face_id1, face_id2,
1475 orientation, dof1d,
1476 face_dof_elem1);
1477 const int volume_dof_elem2 = face_map[face_dof_elem2];
1478 const int global_dof_elem2 = elem_map[elem_index*elem_dofs + volume_dof_elem2];
1479 const int restriction_dof_elem2 = face_dofs*face_index + face_dof_elem1;
1480 // We shift restriction_dof_elem2 to express that it's elem2 of the face
1481 gather_indices[gather_offsets[global_dof_elem2]++] = nfdofs +
1482 restriction_dof_elem2;
1483 }
1484}
1485
1487{
1488 EnsureNormalDerivativeRestriction();
1489 normal_deriv_restr->Mult(x, y);
1490}
1491
1493 Vector &y) const
1494{
1495 EnsureNormalDerivativeRestriction();
1496 normal_deriv_restr->AddMultTranspose(x, y);
1497}
1498
1499void L2FaceRestriction::EnsureNormalDerivativeRestriction() const
1500{
1501 if (!normal_deriv_restr)
1502 {
1503 normal_deriv_restr.reset(
1505 }
1506}
1507
1509 ElementDofOrdering ordering,
1510 FaceType type)
1511 : fes(fes),
1512 ordering(ordering),
1513 interp_config( fes.GetNFbyType(type) ),
1514 nc_cpt(0)
1515{ }
1516
1518 const Mesh::FaceInformation &face,
1519 int face_index)
1520{
1521 interp_config[face_index] = InterpConfig();
1522}
1523
1525 const Mesh::FaceInformation &face,
1526 int face_index)
1527{
1528 MFEM_ASSERT(!face.IsConforming(),
1529 "Registering face as nonconforming even though it is not.");
1530 const DenseMatrix* ptMat = face.point_matrix;
1531 // In the case of nonconforming slave shared face the master face is elem1.
1532 const int master_side =
1534 const int face_key = (master_side == 0 ? 1000 : 0) +
1535 face.element[0].local_face_id +
1536 6*face.element[1].local_face_id +
1537 36*face.element[1].orientation ;
1538 // Unfortunately we can't trust unicity of the ptMat to identify the transformation.
1539 Key key(ptMat, face_key);
1540 auto itr = interp_map.find(key);
1541 if ( itr == interp_map.end() )
1542 {
1543 const DenseMatrix* interpolator =
1544 GetCoarseToFineInterpolation(face,ptMat);
1545 interp_map[key] = {nc_cpt, interpolator};
1546 interp_config[face_index] = {master_side, nc_cpt};
1547 nc_cpt++;
1548 }
1549 else
1550 {
1551 interp_config[face_index] = {master_side, itr->second.first};
1552 }
1553}
1554
1555const DenseMatrix* InterpolationManager::GetCoarseToFineInterpolation(
1556 const Mesh::FaceInformation &face,
1557 const DenseMatrix* ptMat)
1558{
1560 "The following interpolation operator is only implemented for"
1561 "lexicographic ordering.");
1562 MFEM_VERIFY(!face.IsConforming(),
1563 "This method should not be called on conforming faces.")
1564 const int face_id1 = face.element[0].local_face_id;
1565 const int face_id2 = face.element[1].local_face_id;
1566
1567 const bool is_ghost_slave =
1568 face.element[0].conformity == Mesh::ElementConformity::Superset;
1569 const int master_face_id = is_ghost_slave ? face_id1 : face_id2;
1570
1571 // Computation of the interpolation matrix from master
1572 // (coarse) face to slave (fine) face.
1573 // Assumes all trace elements are the same.
1574 const FiniteElement *trace_fe = fes.GetTypicalTraceElement();
1575 const int face_dofs = trace_fe->GetDof();
1576 const TensorBasisElement* el =
1577 dynamic_cast<const TensorBasisElement*>(trace_fe);
1578 const auto dof_map = el->GetDofMap();
1579 DenseMatrix* interpolator = new DenseMatrix(face_dofs,face_dofs);
1580 Vector shape(face_dofs);
1581
1583 isotr.SetIdentityTransformation(trace_fe->GetGeomType());
1584 isotr.SetPointMat(*ptMat);
1585 DenseMatrix& trans_pt_mat = isotr.GetPointMat();
1586 // PointMatrix needs to be flipped in 2D
1587 if ( trace_fe->GetGeomType()==Geometry::SEGMENT && !is_ghost_slave )
1588 {
1589 std::swap(trans_pt_mat(0,0),trans_pt_mat(0,1));
1590 }
1591 DenseMatrix native_interpolator(face_dofs,face_dofs);
1592 trace_fe->GetLocalInterpolation(isotr, native_interpolator);
1593 const int dim = trace_fe->GetDim()+1;
1594 const int dof1d = trace_fe->GetOrder()+1;
1595 const int orientation = face.element[1].orientation;
1596 for (int i = 0; i < face_dofs; i++)
1597 {
1598 const int ni = (dof_map.Size()==0) ? i : dof_map[i];
1599 int li = ToLexOrdering(dim, master_face_id, dof1d, i);
1600 if ( !is_ghost_slave )
1601 {
1602 // master side is elem 2, so we permute to order dofs as elem 1.
1603 li = PermuteFaceL2(dim, face_id2, face_id1,
1604 orientation, dof1d, li);
1605 }
1606 for (int j = 0; j < face_dofs; j++)
1607 {
1608 int lj = ToLexOrdering(dim, master_face_id, dof1d, j);
1609 if ( !is_ghost_slave )
1610 {
1611 // master side is elem 2, so we permute to order dofs as elem 1.
1612 lj = PermuteFaceL2(dim, face_id2, face_id1,
1613 orientation, dof1d, lj);
1614 }
1615 const int nj = (dof_map.Size()==0) ? j : dof_map[j];
1616 (*interpolator)(li,lj) = native_interpolator(ni,nj);
1617 }
1618 }
1619 return interpolator;
1620}
1621
1623{
1624 // Assumes all trace elements are the same.
1625 const FiniteElement *trace_fe = fes.GetTypicalTraceElement();
1626 const int face_dofs = trace_fe->GetDof();
1627 const int nc_size = static_cast<int>(interp_map.size());
1628 MFEM_VERIFY(nc_cpt==nc_size, "Unexpected number of interpolators.");
1629 interpolators.SetSize(face_dofs*face_dofs*nc_size);
1630 auto d_interp = Reshape(interpolators.HostWrite(),face_dofs,face_dofs,nc_size);
1631 for (auto val : interp_map)
1632 {
1633 const int idx = val.second.first;
1634 const DenseMatrix &interpolator = *val.second.second;
1635 for (int i = 0; i < face_dofs; i++)
1636 {
1637 for (int j = 0; j < face_dofs; j++)
1638 {
1639 d_interp(i,j,idx) = interpolator(i,j);
1640 }
1641 }
1642 delete val.second.second;
1643 }
1644 interp_map.clear();
1645}
1646
1648{
1649 // Count nonconforming faces
1650 int num_nc_faces = 0;
1651 for (int i = 0; i < interp_config.Size(); i++)
1652 {
1653 if ( interp_config[i].is_non_conforming )
1654 {
1655 num_nc_faces++;
1656 }
1657 }
1658 // Set nc_interp_config
1659 nc_interp_config.SetSize(num_nc_faces);
1660 int nc_index = 0;
1661 for (int i = 0; i < interp_config.Size(); i++)
1662 {
1663 auto & config = interp_config[i];
1664 if ( config.is_non_conforming )
1665 {
1666 nc_interp_config[nc_index] = NCInterpConfig(i, config);
1667 nc_index++;
1668 }
1669 }
1670}
1671
1673 const ElementDofOrdering f_ordering,
1674 const FaceType type,
1675 const L2FaceValues m,
1676 bool build)
1677 : L2FaceRestriction(fes, f_ordering, type, m, false),
1678 interpolations(fes, f_ordering, type)
1679{
1680 if (!build) { return; }
1681 x_interp.UseDevice(true);
1682
1683 CheckFESpace();
1684
1685 ComputeScatterIndicesAndOffsets();
1686
1687 ComputeGatherIndices();
1688}
1689
1691 const ElementDofOrdering f_ordering,
1692 const FaceType type,
1693 const L2FaceValues m)
1694 : NCL2FaceRestriction(fes, f_ordering, type, m, true)
1695{ }
1696
1703
1705 Vector& y) const
1706{
1707 if (nf == 0) { return; }
1708 // Assumes all elements have the same number of dofs
1709 const int nface_dofs = face_dofs;
1710 const int vd = vdim;
1711 auto d_y = Reshape(y.ReadWrite(), nface_dofs, vd, 2, nf);
1712 auto &nc_interp_config = interpolations.GetNCFaceInterpConfig();
1713 const int num_nc_faces = nc_interp_config.Size();
1714 if ( num_nc_faces == 0 ) { return; }
1715 auto interp_config_ptr = nc_interp_config.Read();
1716 const int nc_size = interpolations.GetNumInterpolators();
1717 auto d_interp = Reshape(interpolations.GetInterpolators().Read(),
1718 nface_dofs, nface_dofs, nc_size);
1719 static constexpr int max_nd = 16*16;
1720 MFEM_VERIFY(nface_dofs<=max_nd, "Too many degrees of freedom.");
1721 mfem::forall_2D(num_nc_faces, nface_dofs, 1, [=] MFEM_HOST_DEVICE (int nc_face)
1722 {
1723 MFEM_SHARED real_t dof_values[max_nd];
1724 const NCInterpConfig conf = interp_config_ptr[nc_face];
1725 if ( conf.is_non_conforming )
1726 {
1727 const int master_side = conf.master_side;
1728 const int interp_index = conf.index;
1729 const int face = conf.face_index;
1730 for (int c = 0; c < vd; ++c)
1731 {
1732 MFEM_FOREACH_THREAD(dof,x,nface_dofs)
1733 {
1734 dof_values[dof] = d_y(dof, c, master_side, face);
1735 }
1736 MFEM_SYNC_THREAD;
1737 MFEM_FOREACH_THREAD(dof_out,x,nface_dofs)
1738 {
1739 real_t res = 0.0;
1740 for (int dof_in = 0; dof_in<nface_dofs; dof_in++)
1741 {
1742 res += d_interp(dof_out, dof_in, interp_index)*dof_values[dof_in];
1743 }
1744 d_y(dof_out, c, master_side, face) = res;
1745 }
1746 MFEM_SYNC_THREAD;
1747 }
1748 }
1749 });
1750}
1751
1752void NCL2FaceRestriction::Mult(const Vector& x, Vector& y) const
1753{
1754 if ( type==FaceType::Interior && m==L2FaceValues::DoubleValued )
1755 {
1756 DoubleValuedNonconformingMult(x, y);
1757 }
1758 else if ( type==FaceType::Boundary && m==L2FaceValues::DoubleValued )
1759 {
1760 DoubleValuedConformingMult(x, y);
1761 }
1762 else // Single valued (assumes no nonconforming master on elem1)
1763 {
1764 SingleValuedConformingMult(x, y);
1765 }
1766}
1767
1768void NCL2FaceRestriction::SingleValuedNonconformingTransposeInterpolation(
1769 const Vector& x) const
1770{
1771 MFEM_ASSERT(
1772 m == L2FaceValues::SingleValued,
1773 "This method should be called when m == L2FaceValues::SingleValued.");
1774 if (x_interp.Size()==0)
1775 {
1776 x_interp.SetSize(x.Size());
1777 }
1778 x_interp = x;
1779 SingleValuedNonconformingTransposeInterpolationInPlace(x_interp);
1780}
1781
1782
1783void NCL2FaceRestriction::SingleValuedNonconformingTransposeInterpolationInPlace(
1784 Vector& x) const
1785{
1786 // Assumes all elements have the same number of dofs
1787 const int nface_dofs = face_dofs;
1788 const int vd = vdim;
1789 // Interpolation
1790 auto d_x = Reshape(x_interp.ReadWrite(), nface_dofs, vd, nf);
1791 auto &nc_interp_config = interpolations.GetNCFaceInterpConfig();
1792 const int num_nc_faces = nc_interp_config.Size();
1793 if ( num_nc_faces == 0 ) { return; }
1794 auto interp_config_ptr = nc_interp_config.Read();
1795 auto interpolators = interpolations.GetInterpolators().Read();
1796 const int nc_size = interpolations.GetNumInterpolators();
1797 auto d_interp = Reshape(interpolators, nface_dofs, nface_dofs, nc_size);
1798 static constexpr int max_nd = 16*16;
1799 MFEM_VERIFY(nface_dofs<=max_nd, "Too many degrees of freedom.");
1800 mfem::forall_2D(num_nc_faces, nface_dofs, 1, [=] MFEM_HOST_DEVICE (int nc_face)
1801 {
1802 MFEM_SHARED real_t dof_values[max_nd];
1803 const NCInterpConfig conf = interp_config_ptr[nc_face];
1804 const int master_side = conf.master_side;
1805 const int interp_index = conf.index;
1806 const int face = conf.face_index;
1807 if ( conf.is_non_conforming && master_side==0 )
1808 {
1809 // Interpolation from fine to coarse
1810 for (int c = 0; c < vd; ++c)
1811 {
1812 MFEM_FOREACH_THREAD(dof,x,nface_dofs)
1813 {
1814 dof_values[dof] = d_x(dof, c, face);
1815 }
1816 MFEM_SYNC_THREAD;
1817 MFEM_FOREACH_THREAD(dof_out,x,nface_dofs)
1818 {
1819 real_t res = 0.0;
1820 for (int dof_in = 0; dof_in<nface_dofs; dof_in++)
1821 {
1822 res += d_interp(dof_in, dof_out, interp_index)*dof_values[dof_in];
1823 }
1824 d_x(dof_out, c, face) = res;
1825 }
1826 MFEM_SYNC_THREAD;
1827 }
1828 }
1829 });
1830}
1831
1832void NCL2FaceRestriction::DoubleValuedNonconformingTransposeInterpolation(
1833 const Vector& x) const
1834{
1835 MFEM_ASSERT(
1836 m == L2FaceValues::DoubleValued,
1837 "This method should be called when m == L2FaceValues::DoubleValued.");
1838 if (x_interp.Size()==0)
1839 {
1840 x_interp.SetSize(x.Size());
1841 }
1842 x_interp = x;
1843 DoubleValuedNonconformingTransposeInterpolationInPlace(x_interp);
1844}
1845
1846void NCL2FaceRestriction::DoubleValuedNonconformingTransposeInterpolationInPlace(
1847 Vector& x) const
1848{
1849 // Assumes all elements have the same number of dofs
1850 const int nface_dofs = face_dofs;
1851 const int vd = vdim;
1852 // Interpolation
1853 auto d_x = Reshape(x.ReadWrite(), nface_dofs, vd, 2, nf);
1854 auto &nc_interp_config = interpolations.GetNCFaceInterpConfig();
1855 const int num_nc_faces = nc_interp_config.Size();
1856 if ( num_nc_faces == 0 ) { return; }
1857 auto interp_config_ptr = nc_interp_config.Read();
1858 auto interpolators = interpolations.GetInterpolators().Read();
1859 const int nc_size = interpolations.GetNumInterpolators();
1860 auto d_interp = Reshape(interpolators, nface_dofs, nface_dofs, nc_size);
1861 static constexpr int max_nd = 16*16;
1862 MFEM_VERIFY(nface_dofs<=max_nd, "Too many degrees of freedom.");
1863 mfem::forall_2D(num_nc_faces, nface_dofs, 1, [=] MFEM_HOST_DEVICE (int nc_face)
1864 {
1865 MFEM_SHARED real_t dof_values[max_nd];
1866 const NCInterpConfig conf = interp_config_ptr[nc_face];
1867 const int master_side = conf.master_side;
1868 const int interp_index = conf.index;
1869 const int face = conf.face_index;
1870 if ( conf.is_non_conforming )
1871 {
1872 // Interpolation from fine to coarse
1873 for (int c = 0; c < vd; ++c)
1874 {
1875 MFEM_FOREACH_THREAD(dof,x,nface_dofs)
1876 {
1877 dof_values[dof] = d_x(dof, c, master_side, face);
1878 }
1879 MFEM_SYNC_THREAD;
1880 MFEM_FOREACH_THREAD(dof_out,x,nface_dofs)
1881 {
1882 real_t res = 0.0;
1883 for (int dof_in = 0; dof_in<nface_dofs; dof_in++)
1884 {
1885 res += d_interp(dof_in, dof_out, interp_index)*dof_values[dof_in];
1886 }
1887 d_x(dof_out, c, master_side, face) = res;
1888 }
1889 MFEM_SYNC_THREAD;
1890 }
1891 }
1892 });
1893}
1894
1895void NCL2FaceRestriction::AddMultTranspose(const Vector& x, Vector& y,
1896 const real_t a) const
1897{
1898 MFEM_VERIFY(a == 1.0, "General coefficient case is not yet supported!");
1899 if (nf==0) { return; }
1900 if (type==FaceType::Interior)
1901 {
1902 if ( m==L2FaceValues::DoubleValued )
1903 {
1904 DoubleValuedNonconformingTransposeInterpolation(x);
1905 DoubleValuedConformingAddMultTranspose(x_interp, y);
1906 }
1907 else if ( m==L2FaceValues::SingleValued )
1908 {
1909 SingleValuedNonconformingTransposeInterpolation(x);
1910 SingleValuedConformingAddMultTranspose(x_interp, y);
1911 }
1912 }
1913 else
1914 {
1915 if ( m==L2FaceValues::DoubleValued )
1916 {
1917 DoubleValuedConformingAddMultTranspose(x, y);
1918 }
1919 else if ( m==L2FaceValues::SingleValued )
1920 {
1921 SingleValuedConformingAddMultTranspose(x, y);
1922 }
1923 }
1924}
1925
1926void NCL2FaceRestriction::AddMultTransposeInPlace(Vector& x, Vector& y) const
1927{
1928 if (nf==0) { return; }
1929 if (type==FaceType::Interior)
1930 {
1931 if ( m==L2FaceValues::DoubleValued )
1932 {
1933 DoubleValuedNonconformingTransposeInterpolationInPlace(x);
1934 DoubleValuedConformingAddMultTranspose(x, y);
1935 }
1936 else if ( m==L2FaceValues::SingleValued )
1937 {
1938 SingleValuedNonconformingTransposeInterpolationInPlace(x);
1939 SingleValuedConformingAddMultTranspose(x, y);
1940 }
1941 }
1942 else
1943 {
1944 if ( m==L2FaceValues::DoubleValued )
1945 {
1946 DoubleValuedConformingAddMultTranspose(x, y);
1947 }
1948 else if ( m==L2FaceValues::SingleValued )
1949 {
1950 SingleValuedConformingAddMultTranspose(x, y);
1951 }
1952 }
1953}
1954
1955void NCL2FaceRestriction::FillI(SparseMatrix &mat,
1956 const bool keep_nbr_block) const
1957{
1958 const int nface_dofs = face_dofs;
1959 auto d_indices1 = scatter_indices1.Read();
1960 auto d_indices2 = scatter_indices2.Read();
1961 auto I = mat.ReadWriteI();
1962 mfem::forall(nf*nface_dofs, [=] MFEM_HOST_DEVICE (int fdof)
1963 {
1964 const int iE1 = d_indices1[fdof];
1965 const int iE2 = d_indices2[fdof];
1966 AddNnz(iE1,I,nface_dofs);
1967 AddNnz(iE2,I,nface_dofs);
1968 });
1969}
1970
1971void NCL2FaceRestriction::FillJAndData(const Vector &fea_data,
1972 SparseMatrix &mat,
1973 const bool keep_nbr_block) const
1974{
1975 const int nface_dofs = face_dofs;
1976 auto d_indices1 = scatter_indices1.Read();
1977 auto d_indices2 = scatter_indices2.Read();
1978 auto I = mat.ReadWriteI();
1979 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, 2, nf);
1980 auto J = mat.WriteJ();
1981 auto Data = mat.WriteData();
1982 auto interp_config_ptr = interpolations.GetFaceInterpConfig().Read();
1983 auto interpolators = interpolations.GetInterpolators().Read();
1984 const int nc_size = interpolations.GetNumInterpolators();
1985 auto d_interp = Reshape(interpolators, nface_dofs, nface_dofs, nc_size);
1986 mfem::forall(nf*nface_dofs, [=] MFEM_HOST_DEVICE (int fdof)
1987 {
1988 const int f = fdof/nface_dofs;
1989 const InterpConfig conf = interp_config_ptr[f];
1990 const int master_side = conf.master_side;
1991 const int interp_index = conf.index;
1992 const int iF = fdof%nface_dofs;
1993 const int iE1 = d_indices1[f*nface_dofs+iF];
1994 const int iE2 = d_indices2[f*nface_dofs+iF];
1995 const int offset1 = AddNnz(iE1,I,nface_dofs);
1996 const int offset2 = AddNnz(iE2,I,nface_dofs);
1997 for (int jF = 0; jF < nface_dofs; jF++)
1998 {
1999 const int jE1 = d_indices1[f*nface_dofs+jF];
2000 const int jE2 = d_indices2[f*nface_dofs+jF];
2001 J[offset2+jF] = jE1;
2002 J[offset1+jF] = jE2;
2003 real_t val1 = 0.0;
2004 real_t val2 = 0.0;
2005 if ( conf.is_non_conforming && master_side==0 )
2006 {
2007 for (int kF = 0; kF < nface_dofs; kF++)
2008 {
2009 val1 += mat_fea(kF,iF,0,f) * d_interp(kF, jF, interp_index);
2010 val2 += d_interp(kF, iF, interp_index) * mat_fea(jF,kF,1,f);
2011 }
2012 }
2013 else if ( conf.is_non_conforming && master_side==1 )
2014 {
2015 for (int kF = 0; kF < nface_dofs; kF++)
2016 {
2017 val1 += d_interp(kF, iF, interp_index) * mat_fea(jF,kF,0,f);
2018 val2 += mat_fea(kF,iF,1,f) * d_interp(kF, jF, interp_index);
2019 }
2020 }
2021 else
2022 {
2023 val1 = mat_fea(jF,iF,0,f);
2024 val2 = mat_fea(jF,iF,1,f);
2025 }
2026 Data[offset2+jF] = val1;
2027 Data[offset1+jF] = val2;
2028 }
2029 });
2030}
2031
2032void NCL2FaceRestriction::AddFaceMatricesToElementMatrices(
2033 const Vector &fea_data,
2034 Vector &ea_data)
2035const
2036{
2037 const int nface_dofs = face_dofs;
2038 const int nelem_dofs = elem_dofs;
2039 const int NE = ne;
2040 if (m==L2FaceValues::DoubleValued)
2041 {
2042 auto d_indices1 = scatter_indices1.Read();
2043 auto d_indices2 = scatter_indices2.Read();
2044 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, 2, nf);
2045 auto mat_ea = Reshape(ea_data.ReadWrite(), nelem_dofs, nelem_dofs, ne);
2046 auto interp_config_ptr = interpolations.GetFaceInterpConfig().Read();
2047 auto interpolators = interpolations.GetInterpolators().Read();
2048 const int nc_size = interpolations.GetNumInterpolators();
2049 auto d_interp = Reshape(interpolators, nface_dofs, nface_dofs, nc_size);
2050 mfem::forall(nf, [=] MFEM_HOST_DEVICE (int f)
2051 {
2052 const InterpConfig conf = interp_config_ptr[f];
2053 const int master_side = conf.master_side;
2054 const int interp_index = conf.index;
2055 const int e1 = d_indices1[f*nface_dofs]/nelem_dofs;
2056 const int e2 = d_indices2[f*nface_dofs]/nelem_dofs;
2057 for (int j = 0; j < nface_dofs; j++)
2058 {
2059 const int jB1 = d_indices1[f*nface_dofs+j]%nelem_dofs;
2060 for (int i = 0; i < nface_dofs; i++)
2061 {
2062 const int iB1 = d_indices1[f*nface_dofs+i]%nelem_dofs;
2063 real_t val = 0.0;
2064 if ( conf.is_non_conforming && master_side==0 )
2065 {
2066 for (int k = 0; k < nface_dofs; k++)
2067 {
2068 for (int l = 0; l < nface_dofs; l++)
2069 {
2070 val += d_interp(l, j, interp_index)
2071 * mat_fea(k,l,0,f)
2072 * d_interp(k, i, interp_index);
2073 }
2074 }
2075 }
2076 else
2077 {
2078 val = mat_fea(i,j,0,f);
2079 }
2080 AtomicAdd(mat_ea(iB1,jB1,e1), val);
2081 }
2082 }
2083 if (e2 < NE)
2084 {
2085 for (int j = 0; j < nface_dofs; j++)
2086 {
2087 const int jB2 = d_indices2[f*nface_dofs+j]%nelem_dofs;
2088 for (int i = 0; i < nface_dofs; i++)
2089 {
2090 const int iB2 = d_indices2[f*nface_dofs+i]%nelem_dofs;
2091 real_t val = 0.0;
2092 if ( conf.is_non_conforming && master_side==1 )
2093 {
2094 for (int k = 0; k < nface_dofs; k++)
2095 {
2096 for (int l = 0; l < nface_dofs; l++)
2097 {
2098 val += d_interp(l, j, interp_index)
2099 * mat_fea(k,l,1,f)
2100 * d_interp(k, i, interp_index);
2101 }
2102 }
2103 }
2104 else
2105 {
2106 val = mat_fea(i,j,1,f);
2107 }
2108 AtomicAdd(mat_ea(iB2,jB2,e2), val);
2109 }
2110 }
2111 }
2112 });
2113 }
2114 else
2115 {
2116 auto d_indices = scatter_indices1.Read();
2117 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, nf);
2118 auto mat_ea = Reshape(ea_data.ReadWrite(), nelem_dofs, nelem_dofs, ne);
2119 auto interp_config_ptr = interpolations.GetFaceInterpConfig().Read();
2120 auto interpolators = interpolations.GetInterpolators().Read();
2121 const int nc_size = interpolations.GetNumInterpolators();
2122 auto d_interp = Reshape(interpolators, nface_dofs, nface_dofs, nc_size);
2123 mfem::forall(nf, [=] MFEM_HOST_DEVICE (int f)
2124 {
2125 const InterpConfig conf = interp_config_ptr[f];
2126 const int master_side = conf.master_side;
2127 const int interp_index = conf.index;
2128 const int e = d_indices[f*nface_dofs]/nelem_dofs;
2129 for (int j = 0; j < nface_dofs; j++)
2130 {
2131 const int jE = d_indices[f*nface_dofs+j]%nelem_dofs;
2132 for (int i = 0; i < nface_dofs; i++)
2133 {
2134 const int iE = d_indices[f*nface_dofs+i]%nelem_dofs;
2135 real_t val = 0.0;
2136 if ( conf.is_non_conforming && master_side==0 )
2137 {
2138 for (int k = 0; k < nface_dofs; k++)
2139 {
2140 for (int l = 0; l < nface_dofs; l++)
2141 {
2142 val += d_interp(l, j, interp_index)
2143 * mat_fea(k,l,f)
2144 * d_interp(k, i, interp_index);
2145 }
2146 }
2147 }
2148 else
2149 {
2150 val = mat_fea(i,j,f);
2151 }
2152 AtomicAdd(mat_ea(iE,jE,e), val);
2153 }
2154 }
2155 });
2156 }
2157}
2158
2159int ToLexOrdering(const int dim, const int face_id, const int size1d,
2160 const int index)
2161{
2162 switch (dim)
2163 {
2164 case 1:
2165 return 0;
2166 case 2:
2167 return internal::ToLexOrdering2D(face_id, size1d, index);
2168 case 3:
2169 return internal::ToLexOrdering3D(face_id, size1d, index%size1d, index/size1d);
2170 default:
2171 MFEM_ABORT("Unsupported dimension.");
2172 return 0;
2173 }
2174}
2175
2176void NCL2FaceRestriction::ComputeScatterIndicesAndOffsets()
2177{
2178 Mesh &mesh = *fes.GetMesh();
2179
2180 // Initialization of the offsets
2181 for (int i = 0; i <= ndofs; ++i)
2182 {
2183 gather_offsets[i] = 0;
2184 }
2185
2186 // Computation of scatter and offsets indices
2187 int f_ind=0;
2188 for (int f = 0; f < fes.GetNF(); ++f)
2189 {
2190 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
2191 if ( face.IsNonconformingCoarse() )
2192 {
2193 // We skip nonconforming coarse faces as they are treated
2194 // by the corresponding nonconforming fine faces.
2195 continue;
2196 }
2197 else if ( type==FaceType::Interior && face.IsInterior() )
2198 {
2199 SetFaceDofsScatterIndices1(face,f_ind);
2200 if ( m==L2FaceValues::DoubleValued )
2201 {
2202 PermuteAndSetFaceDofsScatterIndices2(face,f_ind);
2203 }
2204 if ( face.IsConforming() )
2205 {
2206 interpolations.RegisterFaceConformingInterpolation(face,f_ind);
2207 }
2208 else // Non-conforming face
2209 {
2210 interpolations.RegisterFaceCoarseToFineInterpolation(face,f_ind);
2211 }
2212 f_ind++;
2213 }
2214 else if ( type==FaceType::Boundary && face.IsBoundary() )
2215 {
2216 SetFaceDofsScatterIndices1(face,f_ind);
2217 if ( m==L2FaceValues::DoubleValued )
2218 {
2219 SetBoundaryDofsScatterIndices2(face,f_ind);
2220 }
2221 interpolations.RegisterFaceConformingInterpolation(face,f_ind);
2222 f_ind++;
2223 }
2224 }
2225 MFEM_VERIFY(f_ind==nf, "Unexpected number of " <<
2226 (type==FaceType::Interior? "interior" : "boundary") <<
2227 " faces: " << f_ind << " vs " << nf );
2228
2229 // Summation of the offsets
2230 for (int i = 1; i <= ndofs; ++i)
2231 {
2232 gather_offsets[i] += gather_offsets[i - 1];
2233 }
2234
2235 // Transform the interpolation matrix map into a contiguous memory structure.
2236 interpolations.LinearizeInterpolatorMapIntoVector();
2237 interpolations.InitializeNCInterpConfig();
2238}
2239
2240void NCL2FaceRestriction::ComputeGatherIndices()
2241{
2242 Mesh &mesh = *fes.GetMesh();
2243 // Computation of gather_indices
2244 int f_ind = 0;
2245 for (int f = 0; f < fes.GetNF(); ++f)
2246 {
2247 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
2248 MFEM_ASSERT(!face.IsShared(),
2249 "Unexpected shared face in NCL2FaceRestriction.");
2250 if ( face.IsNonconformingCoarse() )
2251 {
2252 // We skip nonconforming coarse faces as they are treated
2253 // by the corresponding nonconforming fine faces.
2254 continue;
2255 }
2256 else if ( face.IsOfFaceType(type) )
2257 {
2258 SetFaceDofsGatherIndices1(face,f_ind);
2259 if ( m==L2FaceValues::DoubleValued &&
2260 type==FaceType::Interior &&
2261 face.IsInterior() )
2262 {
2263 PermuteAndSetFaceDofsGatherIndices2(face,f_ind);
2264 }
2265 f_ind++;
2266 }
2267 }
2268 MFEM_VERIFY(f_ind==nf, "Unexpected number of " <<
2269 (type==FaceType::Interior? "interior" : "boundary") <<
2270 " faces: " << f_ind << " vs " << nf );
2271
2272 // Switch back offsets to their correct value
2273 for (int i = ndofs; i > 0; --i)
2274 {
2275 gather_offsets[i] = gather_offsets[i - 1];
2276 }
2277 gather_offsets[0] = 0;
2278}
2279
2280L2InterfaceFaceRestriction::L2InterfaceFaceRestriction(
2281 const FiniteElementSpace& fes_,
2282 const ElementDofOrdering ordering_,
2283 const FaceType type_)
2284 : fes(fes_),
2285 ordering(ordering_),
2286 type(type_),
2287 nfaces(fes.GetNFbyType(type)),
2288 vdim(fes.GetVDim()),
2289 byvdim(fes.GetOrdering() == Ordering::byVDIM),
2290 face_dofs(nfaces > 0 ? fes.GetFaceElement(0)->GetDof() : 0),
2291 nfdofs(face_dofs*nfaces),
2292 ndofs(fes.GetNDofs())
2293{
2294 height = nfdofs;
2295 width = ndofs;
2296
2297 const Table &face2dof = fes.GetFaceToDofTable();
2298
2299 const Mesh &mesh = *fes.GetMesh();
2300 int face_idx = 0;
2302 for (int f = 0; f < mesh.GetNumFaces(); ++f)
2303 {
2305 if (!face.IsOfFaceType(type)) { continue; }
2306 for (int i = 0; i < face_dofs; ++i)
2307 {
2308 gather_map[i + face_idx*face_dofs] = face2dof.GetJ()[i + f*face_dofs];
2309 }
2310 ++face_idx;
2311 }
2312}
2313
2315{
2316 const int nd = face_dofs;
2317 const int nf = nfaces;
2318 const int vd = vdim;
2319 const bool t = byvdim;
2320 const int *map = gather_map.Read();
2321
2322 const auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
2323 auto d_y = Reshape(y.Write(), nd, vd, nf);
2324
2325 mfem::forall(nd*nf, [=] MFEM_HOST_DEVICE (int i)
2326 {
2327 const int j = map[i];
2328 for (int c = 0; c < vd; ++c)
2329 {
2330 d_y(i % nd, c, i / nd) = d_x(t?c:j, t?j:c);
2331 }
2332 });
2333}
2334
2336 const Vector &x, Vector &y, const real_t a) const
2337{
2338 const int nd = face_dofs;
2339 const int nf = nfaces;
2340 const int vd = vdim;
2341 const bool t = byvdim;
2342 const int *map = gather_map.Read();
2343
2344 const auto d_x = Reshape(x.Read(), nd, vd, nf);
2345 auto d_y = Reshape(y.Write(), t?vd:ndofs, t?ndofs:vd);
2346
2347 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i) { d_y[i] = 0.0; });
2348 mfem::forall(nd*nf, [=] MFEM_HOST_DEVICE (int i)
2349 {
2350 const int j = map[i];
2351 for (int c = 0; c < vd; ++c)
2352 {
2353 d_y(t?c:j, t?j:c) = d_x(i % nd, c, i / nd);
2354 }
2355 });
2356}
2357
2359{
2360 return gather_map;
2361}
2362
2364 const FiniteElementSpace &fes, const Vector &x, FaceType ftype)
2365{
2366#ifdef MFEM_USE_MPI
2367 if (ftype == FaceType::Interior)
2368 {
2369 if (auto *pfes = const_cast<ParFiniteElementSpace*>
2370 (dynamic_cast<const ParFiniteElementSpace*>(&fes)))
2371 {
2372 if (auto *x_gf = const_cast<ParGridFunction*>
2373 (dynamic_cast<const ParGridFunction*>(&x)))
2374 {
2375 Vector &gf_face_nbr = x_gf->FaceNbrData();
2376 if (gf_face_nbr.Size() == 0) { x_gf->ExchangeFaceNbrData(); }
2377 gf_face_nbr.Read();
2378 return Vector(gf_face_nbr, 0, gf_face_nbr.Size());
2379 }
2380 else
2381 {
2382 ParGridFunction gf(pfes, const_cast<Vector&>(x));
2384 return std::move(gf.FaceNbrData());
2385 }
2386 }
2387 }
2388#endif
2389 return Vector();
2390}
2391
2392} // namespace mfem
MFEM_HOST_DEVICE T AtomicAdd(T &add, const T val)
Definition backends.hpp:93
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
Definition array.hpp:341
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition array.hpp:758
int Size() const
Return the logical size of the array.
Definition array.hpp:147
void MakeRef(T *data_, int size_, bool own_data=false)
Make this Array a reference to a pointer.
Definition array.hpp:943
T * Write(bool on_dev=true)
Shortcut for mfem::Write(a.GetMemory(), a.Size(), on_dev).
Definition array.hpp:345
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
Definition array.hpp:337
T * HostReadWrite()
Shortcut for mfem::ReadWrite(a.GetMemory(), a.Size(), false).
Definition array.hpp:357
@ GaussLobatto
Closed type.
Definition fe_base.hpp:36
@ Positive
Bernstein polynomials.
Definition fe_base.hpp:37
Operator that extracts face degrees of freedom for H1, ND, or RT FiniteElementSpaces.
ConformingFaceRestriction(const FiniteElementSpace &fes, const ElementDofOrdering f_ordering, const FaceType type, bool build)
Construct a ConformingFaceRestriction.
void CheckFESpace(const ElementDofOrdering f_ordering)
Verify that ConformingFaceRestriction is built from a supported finite element space.
void SetFaceDofsGatherIndices(const Mesh::FaceInformation &face, const int face_index, const ElementDofOrdering f_ordering)
Set the gathering indices of elem1 for the interior face described by the face.
void AddMultTransposeUnsigned(const Vector &x, Vector &y, const real_t a=1.0) const override
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector not taking into account signs...
const FiniteElementSpace & fes
void AddMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const override
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector.
void SetFaceDofsScatterIndices(const Mesh::FaceInformation &face, const int face_index, const ElementDofOrdering f_ordering)
Set the scattering indices of elem1, and increment the offsets for the face described by the face.
void Mult(const Vector &x, Vector &y) const override
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector.
Data type dense matrix using column-major storage.
Definition densemat.hpp:24
ElementRestriction(const FiniteElementSpace &, ElementDofOrdering)
const FiniteElementSpace & fes
void Mult(const Vector &x, Vector &y) const override
Operator application: y=A(x).
void FillSparseMatrix(const Vector &mat_ea, SparseMatrix &mat) const
Fill a Sparse Matrix with Element Matrices.
void AddMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const override
Add the E-vector degrees of freedom x to the L-vector degrees of freedom y.
void FillJAndData(const Vector &ea_data, SparseMatrix &mat) const
void MultTranspose(const Vector &x, Vector &y) const override
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
void MultTransposeUnsigned(const Vector &x, Vector &y) const
Compute MultTranspose without applying signs based on DOF orientations.
void MultLeftInverse(const Vector &x, Vector &y) const
void BooleanMask(Vector &y) const
Fills the E-vector y with boolean values 0.0 and 1.0 such that each each entry of the L-vector is uni...
void MultUnsigned(const Vector &x, Vector &y) const
Compute Mult without applying signs based on DOF orientations.
int FillI(SparseMatrix &mat) const
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition fespace.hpp:244
const Table & GetElementToDofTable() const
Return a reference to the internal Table that stores the lists of scalar dofs, for each mesh element,...
Definition fespace.hpp:1287
const Table & GetFaceToDofTable() const
Return a reference to the internal Table that stores the lists of scalar dofs, for each face in the m...
Definition fespace.hpp:1300
int GetNF() const
Returns number of faces (i.e. co-dimension 1 entities) in the mesh.
Definition fespace.hpp:897
virtual const FiniteElement * GetFE(int i) const
Returns pointer to the FiniteElement in the FiniteElementCollection associated with i'th element in t...
Definition fespace.cpp:3835
const FiniteElement * GetTypicalTraceElement() const
Return a "typical" trace element.
Definition fespace.cpp:3959
const FiniteElement * GetFaceElement(int i) const
Returns pointer to the FiniteElement in the FiniteElementCollection associated with i'th face in the ...
Definition fespace.cpp:3914
Mesh * GetMesh() const
Returns the mesh.
Definition fespace.hpp:679
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
Definition fespace.hpp:848
const FiniteElement * GetTypicalFE() const
Return GetFE(0) if the local mesh is not empty; otherwise return a typical FE based on the Geometry t...
Definition fespace.cpp:3871
Abstract class for all finite elements.
Definition fe_base.hpp:244
int GetOrder() const
Returns the order of the finite element. In the case of anisotropic orders, returns the maximum order...
Definition fe_base.hpp:338
virtual void GetFaceMap(const int face_id, Array< int > &face_map) const
Return the mapping from lexicographic face DOFs to lexicographic element DOFs for the given local fac...
Definition fe_base.cpp:507
int GetDof() const
Returns the number of degrees of freedom in the finite element.
Definition fe_base.hpp:334
std::pair< const DenseMatrix *, int > Key
const Array< NCInterpConfig > & GetNCFaceInterpConfig() const
Return an array containing the interpolation configuration for each face registered with RegisterFace...
void RegisterFaceCoarseToFineInterpolation(const Mesh::FaceInformation &face, int face_index)
Register the face with face and index face_index as a conforming face for the interpolation of the de...
int GetNumInterpolators() const
Return the total number of interpolators.
Array< NCInterpConfig > nc_interp_config
const Vector & GetInterpolators() const
Return an mfem::Vector containing the interpolators in the following format: face_dofs x face_dofs x ...
void LinearizeInterpolatorMapIntoVector()
Transform the interpolation matrix map into a contiguous memory structure.
const FiniteElementSpace & fes
const ElementDofOrdering ordering
Array< InterpConfig > interp_config
void RegisterFaceConformingInterpolation(const Mesh::FaceInformation &face, int face_index)
Register the face with face and index face_index as a conforming face for the interpolation of the de...
A standard isoparametric element transformation.
Definition eltrans.hpp:629
L2ElementRestriction(const FiniteElementSpace &)
void AddMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const override
Add the E-vector degrees of freedom x to the L-vector degrees of freedom y.
void FillJAndData(const Vector &ea_data, SparseMatrix &mat) const
void MultTranspose(const Vector &x, Vector &y) const override
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
void FillI(SparseMatrix &mat) const
void Mult(const Vector &x, Vector &y) const override
Operator application: y=A(x).
Operator that extracts Face degrees of freedom for L2 spaces.
void PermuteAndSetFaceDofsGatherIndices2(const Mesh::FaceInformation &face, const int face_index)
Permute and set the gathering indices of elem2 for the interior face described by the face....
virtual void FillI(SparseMatrix &mat, const bool keep_nbr_block=false) const
Fill the I array of SparseMatrix corresponding to the sparsity pattern given by this L2FaceRestrictio...
Array< int > scatter_indices2
void SingleValuedConformingMult(const Vector &x, Vector &y) const
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector. Should only be used with co...
L2FaceRestriction(const FiniteElementSpace &fes, const ElementDofOrdering f_ordering, const FaceType type, const L2FaceValues m, bool build)
Constructs an L2FaceRestriction.
void NormalDerivativeMult(const Vector &x, Vector &y) const override
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector.
void AddMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const override
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector.
void NormalDerivativeAddMultTranspose(const Vector &x, Vector &y) const override
Add the face reference-normal derivative degrees of freedom in x to the element degrees of freedom in...
void Mult(const Vector &x, Vector &y) const override
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector.
void SingleValuedConformingAddMultTranspose(const Vector &x, Vector &y) const
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector. Should only be used with con...
std::unique_ptr< L2NormalDerivativeFaceRestriction > normal_deriv_restr
void PermuteAndSetSharedFaceDofsScatterIndices2(const Mesh::FaceInformation &face, const int face_index)
Permute and set the scattering indices of elem2 for the shared face described by the face....
void CheckFESpace()
Verify that L2FaceRestriction is built from an L2 FESpace.
virtual void DoubleValuedConformingMult(const Vector &x, Vector &y) const
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector. Should only be used with co...
void PermuteAndSetFaceDofsScatterIndices2(const Mesh::FaceInformation &face, const int face_index)
Permute and set the scattering indices of elem2, and increment the offsets for the face described by ...
Array< int > scatter_indices1
const L2FaceValues m
const FiniteElementSpace & fes
void SetBoundaryDofsScatterIndices2(const Mesh::FaceInformation &face, const int face_index)
Set the scattering indices of elem2 for the boundary face described by the face.
void SetFaceDofsScatterIndices1(const Mesh::FaceInformation &face, const int face_index)
Set the scattering indices of elem1, and increment the offsets for the face described by the face....
void SetFaceDofsGatherIndices1(const Mesh::FaceInformation &face, const int face_index)
Set the gathering indices of elem1 for the interior face described by the face.
const ElementDofOrdering ordering
void DoubleValuedConformingAddMultTranspose(const Vector &x, Vector &y) const
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector. Should only be used with con...
virtual void FillJAndData(const Vector &fea_data, SparseMatrix &mat, const bool keep_nbr_block=false) const
Fill the J and Data arrays of the SparseMatrix corresponding to the sparsity pattern given by this L2...
virtual void AddFaceMatricesToElementMatrices(const Vector &fea_data, Vector &ea_data) const
This methods adds the DG face matrices to the element matrices.
const int face_dofs
Number of dofs on each face.
const int ndofs
Number of dofs in the space (L-vector size)
const int nfdofs
Total number of dofs on the faces (E-vector size)
const FiniteElementSpace & fes
The finite element space.
Array< int > gather_map
Gather map.
const int vdim
vdim of the space
const int nfaces
Number of faces of the requested type.
void Mult(const Vector &x, Vector &y) const override
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector.
const Array< int > & GatherMap() const override
Low-level access to the underlying gather map.
void AddMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const override
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector.
const FaceType type
Face type (interior or boundary)
const bool byvdim
DOF ordering (by nodes or by vdim)
Class to compute face normal derivatives (in reference coordinate) of an L2 grid function (used inter...
MemoryType GetMemoryType() const
Return a MemoryType that is currently valid. If both the host and the device pointers are currently v...
void New(int size)
Allocate host memory for size entries with the current host memory type returned by MemoryManager::Ge...
Mesh data type.
Definition mesh.hpp:64
int GetNumFaces() const
Return the number of faces (3D), edges (2D) or vertices (1D).
Definition mesh.cpp:6531
int Dimension() const
Dimension of the reference space used within the elements.
Definition mesh.hpp:1216
FaceInformation GetFaceInformation(int f) const
Definition mesh.cpp:1193
Operator that extracts face degrees of freedom for L2 nonconforming spaces.
InterpolationManager interpolations
virtual void DoubleValuedNonconformingMult(const Vector &x, Vector &y) const
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector. Should only be used with no...
void DoubleValuedNonconformingInterpolation(Vector &x) const
Apply a change of basis from coarse element basis to fine element basis for the coarse face dofs.
NCL2FaceRestriction(const FiniteElementSpace &fes, const ElementDofOrdering f_ordering, const FaceType type, const L2FaceValues m, bool build)
Constructs an NCL2FaceRestriction, this is a specialization of a L2FaceRestriction for nonconforming ...
Class for standard nodal finite elements.
Definition fe_base.hpp:721
int width
Dimension of the input / number of columns in the matrix.
Definition operator.hpp:28
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition operator.hpp:66
int height
Dimension of the output / number of rows in the matrix.
Definition operator.hpp:27
The ordering method used when the number of unknowns per mesh node (vector dimension) is bigger than ...
Definition fespace.hpp:30
Abstract parallel finite element space.
Definition pfespace.hpp:29
void GetFaceNbrElementVDofs(int i, Array< int > &vdofs, DofTransformation &doftrans) const
Class for parallel grid function.
Definition pgridfunc.hpp:50
Data type sparse matrix.
Definition sparsemat.hpp:51
int * ReadWriteI(bool on_dev=true)
int * WriteJ(bool on_dev=true)
Memory< int > & GetMemoryI()
int * WriteI(bool on_dev=true)
Memory< int > & GetMemoryJ()
Memory< real_t > & GetMemoryData()
real_t * WriteData(bool on_dev=true)
int * GetJ()
Definition table.hpp:122
const Array< int > & GetDofMap() const
Get an Array<int> that maps lexicographically ordered indices to the indices of the respective nodes/...
Definition fe_base.hpp:1273
Vector data type.
Definition vector.hpp:82
virtual const real_t * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:494
virtual real_t * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:510
int Size() const
Returns the size of the vector.
Definition vector.hpp:226
virtual void UseDevice(bool use_dev) const
Enable execution of Vector operations using the mfem::Device.
Definition vector.hpp:144
void SetSize(int s)
Resize the vector to size s.
Definition vector.hpp:558
virtual real_t * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Definition vector.hpp:506
virtual real_t * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:502
int dim
Definition ex24.cpp:53
int index(int i, int j, int nx, int ny)
Definition life.cpp:236
real_t a
Definition lissajous.cpp:41
real_t f(const Vector &p)
Vector GetLVectorFaceNbrData(const FiniteElementSpace &fes, const Vector &x, FaceType ftype)
Return the face-neighbor data given the L-vector x.
T * Write(Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for write access to mem with the mfem::Device's DeviceMemoryClass, if on_dev = true,...
Definition device.hpp:358
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
Definition dtensor.hpp:131
int ToLexOrdering(const int dim, const int face_id, const int size1d, const int index)
Convert a dof face index from Native ordering to lexicographic ordering for quads and hexes.
void forall_2D(int N, int X, int Y, lambda &&body)
Definition forall.hpp:762
const T & AsConst(const T &a)
Utility function similar to std::as_const in c++17.
Definition array.hpp:380
float real_t
Definition config.hpp:43
int PermuteFaceL2(const int dim, const int face_id1, const int face_id2, const int orientation, const int size1d, const int index)
Compute the dof face index of elem2 corresponding to the given dof face index.
ElementDofOrdering
Constants describing the possible orderings of the DOFs in one element.
Definition fespace.hpp:83
std::function< real_t(const Vector &)> f(real_t mass_coeff)
Definition lor_mms.hpp:30
void forall(int N, lambda &&body)
Definition forall.hpp:753
FaceType
Definition mesh.hpp:48
This structure is used as a human readable output format that deciphers the information contained in ...
Definition mesh.hpp:1980
bool IsBoundary() const
Return true if the face is a boundary face.
Definition mesh.hpp:2021
bool IsNonconformingCoarse() const
Return true if the face is a nonconforming coarse face.
Definition mesh.hpp:2058
bool IsOfFaceType(FaceType type) const
Return true if the face is of the same type as type.
Definition mesh.hpp:2027
struct mfem::Mesh::FaceInformation::@15 element[2]
bool IsLocal() const
Return true if the face is a local interior face which is NOT a master nonconforming face.
Definition mesh.hpp:1998
bool IsConforming() const
Return true if the face is a conforming face.
Definition mesh.hpp:2041
ElementConformity conformity
Definition mesh.hpp:1986
bool IsShared() const
Return true if the face is a shared interior face which is NOT a master nonconforming face.
Definition mesh.hpp:2005
const DenseMatrix * point_matrix
Definition mesh.hpp:1994