MFEM v4.9.0
Finite element discretization library
Loading...
Searching...
No Matches
restriction.cpp
Go to the documentation of this file.
1// Copyright (c) 2010-2025, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11
12#include "restriction.hpp"
14#include "gridfunc.hpp"
15#include "fespace.hpp"
16#include "pgridfunc.hpp"
17#include "qspace.hpp"
18#include "fe/face_map_utils.hpp"
19#include "../general/forall.hpp"
20
21#include <climits>
22
23namespace mfem
24{
25
27 ElementDofOrdering e_ordering)
28 : fes(f),
29 ne(fes.GetNE()),
30 vdim(fes.GetVDim()),
31 byvdim(fes.GetOrdering() == Ordering::byVDIM),
32 ndofs(fes.GetNDofs()),
33 dof(fes.GetTypicalFE()->GetDof()),
34 nedofs(ne*dof),
35 offsets(ndofs+1),
36 indices(ne*dof),
37 gather_map(ne*dof)
38{
39 // Assuming all finite elements are the same.
40 MFEM_VERIFY(!f.IsVariableOrder(), "Variable-order spaces are not supported");
41
42 height = vdim*ne*dof;
43 width = fes.GetVSize();
44 const bool dof_reorder = (e_ordering == ElementDofOrdering::LEXICOGRAPHIC);
45 const int *dof_map = NULL;
46 if (dof_reorder && ne > 0)
47 {
48 for (int e = 0; e < ne; ++e)
49 {
50 const FiniteElement *fe = fes.GetFE(e);
51 auto el_t = dynamic_cast<const TensorBasisElement*>(fe);
52 auto el_n = dynamic_cast<const NodalFiniteElement*>(fe);
53 if (el_t || el_n) { continue; }
54 MFEM_ABORT("Finite element not suitable for lexicographic ordering");
55 }
56 const FiniteElement *fe = fes.GetTypicalFE();
57 auto el_t = dynamic_cast<const TensorBasisElement*>(fe);
58 auto el_n = dynamic_cast<const NodalFiniteElement*>(fe);
59 const Array<int> &fe_dof_map =
60 (el_t) ? el_t->GetDofMap() : el_n->GetLexicographicOrdering();
61 MFEM_VERIFY(fe_dof_map.Size() > 0, "invalid dof map");
62 dof_map = fe_dof_map.HostRead();
63 }
64 const Table& e2dTable = fes.GetElementToDofTable();
65 const int* element_map = e2dTable.GetJ();
66 // We will be keeping a count of how many local nodes point to its global dof
67 for (int i = 0; i <= ndofs; ++i)
68 {
69 offsets[i] = 0;
70 }
71 for (int e = 0; e < ne; ++e)
72 {
73 for (int d = 0; d < dof; ++d)
74 {
75 const int sgid = element_map[dof*e + d]; // signed
76 const int gid = (sgid >= 0) ? sgid : -1 - sgid;
77 ++offsets[gid + 1];
78 }
79 }
80 // Aggregate to find offsets for each global dof
81 for (int i = 1; i <= ndofs; ++i)
82 {
83 offsets[i] += offsets[i - 1];
84 }
85 // For each global dof, fill in all local nodes that point to it
86 for (int e = 0; e < ne; ++e)
87 {
88 for (int d = 0; d < dof; ++d)
89 {
90 const int sdid = dof_reorder ? dof_map[d] : 0; // signed
91 const int did = (!dof_reorder)?d:(sdid >= 0 ? sdid : -1-sdid);
92 const int sgid = element_map[dof*e + did]; // signed
93 const int gid = (sgid >= 0) ? sgid : -1-sgid;
94 const int lid = dof*e + d;
95 const bool plus = (sgid >= 0 && sdid >= 0) || (sgid < 0 && sdid < 0);
96 gather_map[lid] = plus ? gid : -1-gid;
97 indices[offsets[gid]++] = plus ? lid : -1-lid;
98 }
99 }
100 // We shifted the offsets vector by 1 by using it as a counter.
101 // Now we shift it back.
102 for (int i = ndofs; i > 0; --i)
103 {
104 offsets[i] = offsets[i - 1];
105 }
106 offsets[0] = 0;
107}
108
109void ElementRestriction::Mult(const Vector& x, Vector& y) const
110{
111 // Assumes all elements have the same number of dofs
112 const int nd = dof;
113 const int vd = vdim;
114 const bool t = byvdim;
115 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
116 auto d_y = Reshape(y.Write(), nd, vd, ne);
117 auto d_gather_map = gather_map.Read();
118 mfem::forall(dof*ne, [=] MFEM_HOST_DEVICE (int i)
119 {
120 const int gid = d_gather_map[i];
121 const bool plus = gid >= 0;
122 const int j = plus ? gid : -1-gid;
123 for (int c = 0; c < vd; ++c)
124 {
125 const real_t dof_value = d_x(t?c:j, t?j:c);
126 d_y(i % nd, c, i / nd) = plus ? dof_value : -dof_value;
127 }
128 });
129}
130
132{
133 // Assumes all elements have the same number of dofs
134 const int nd = dof;
135 const int vd = vdim;
136 const bool t = byvdim;
137 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
138 auto d_y = Reshape(y.Write(), nd, vd, ne);
139 auto d_gather_map = gather_map.Read();
140
141 mfem::forall(dof*ne, [=] MFEM_HOST_DEVICE (int i)
142 {
143 const int gid = d_gather_map[i];
144 const int j = gid >= 0 ? gid : -1-gid;
145 for (int c = 0; c < vd; ++c)
146 {
147 d_y(i % nd, c, i / nd) = d_x(t?c:j, t?j:c);
148 }
149 });
150}
151
152template <bool ADD>
153void ElementRestriction::TAddMultTranspose(const Vector& x, Vector& y) const
154{
155 // Assumes all elements have the same number of dofs
156 const int nd = dof;
157 const int vd = vdim;
158 const bool t = byvdim;
159 auto d_offsets = offsets.Read();
160 auto d_indices = indices.Read();
161 auto d_x = Reshape(x.Read(), nd, vd, ne);
162 auto d_y = Reshape(ADD ? y.ReadWrite() : y.Write(), t?vd:ndofs, t?ndofs:vd);
163 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
164 {
165 const int offset = d_offsets[i];
166 const int next_offset = d_offsets[i + 1];
167 for (int c = 0; c < vd; ++c)
168 {
169 real_t dof_value = 0;
170 for (int j = offset; j < next_offset; ++j)
171 {
172 const int idx_j = (d_indices[j] >= 0) ? d_indices[j] : -1 - d_indices[j];
173 dof_value += ((d_indices[j] >= 0) ? d_x(idx_j % nd, c, idx_j / nd) :
174 -d_x(idx_j % nd, c, idx_j / nd));
175 }
176 if (ADD) { d_y(t?c:i,t?i:c) += dof_value; }
177 else { d_y(t?c:i,t?i:c) = dof_value; }
178 }
179 });
180}
181
183{
184 constexpr bool ADD = false;
185 TAddMultTranspose<ADD>(x, y);
186}
187
189 const real_t a) const
190{
191 MFEM_VERIFY(a == 1.0, "General coefficient case is not yet supported!");
192 constexpr bool ADD = true;
193 TAddMultTranspose<ADD>(x, y);
194}
195
197{
198 // Assumes all elements have the same number of dofs
199 const int nd = dof;
200 const int vd = vdim;
201 const bool t = byvdim;
202 auto d_offsets = offsets.Read();
203 auto d_indices = indices.Read();
204 auto d_x = Reshape(x.Read(), nd, vd, ne);
205 auto d_y = Reshape(y.Write(), t?vd:ndofs, t?ndofs:vd);
206 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
207 {
208 const int offset = d_offsets[i];
209 const int next_offset = d_offsets[i + 1];
210 for (int c = 0; c < vd; ++c)
211 {
212 real_t dof_value = 0;
213 for (int j = offset; j < next_offset; ++j)
214 {
215 const int idx_j = (d_indices[j] >= 0) ? d_indices[j] : -1 - d_indices[j];
216 dof_value += d_x(idx_j % nd, c, idx_j / nd);
217 }
218 d_y(t?c:i,t?i:c) = dof_value;
219 }
220 });
221}
222
224{
225 // Assumes all elements have the same number of dofs
226 const int nd = dof;
227 const int vd = vdim;
228 const bool t = byvdim;
229 auto d_offsets = offsets.Read();
230 auto d_indices = indices.Read();
231 auto d_x = Reshape(x.Read(), nd, vd, ne);
232 auto d_y = Reshape(y.Write(), t?vd:ndofs, t?ndofs:vd);
233 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
234 {
235 const int next_offset = d_offsets[i + 1];
236 for (int c = 0; c < vd; ++c)
237 {
238 real_t dof_value = 0;
239 const int j = next_offset - 1;
240 const int idx_j = (d_indices[j] >= 0) ? d_indices[j] : -1 - d_indices[j];
241 dof_value = (d_indices[j] >= 0) ? d_x(idx_j % nd, c, idx_j / nd) :
242 -d_x(idx_j % nd, c, idx_j / nd);
243 d_y(t?c:i,t?i:c) = dof_value;
244 }
245 });
246}
247
249{
250 // Assumes all elements have the same number of dofs
251 const int nd = dof;
252 const int vd = vdim;
253 const bool t = byvdim;
254
255 Array<char> processed(vd * ndofs);
256 processed = 0;
257
258 auto d_offsets = offsets.HostRead();
259 auto d_indices = indices.HostRead();
260 auto d_x = Reshape(processed.HostReadWrite(), t?vd:ndofs, t?ndofs:vd);
261 auto d_y = Reshape(y.HostWrite(), nd, vd, ne);
262 for (int i = 0; i < ndofs; ++i)
263 {
264 const int offset = d_offsets[i];
265 const int next_offset = d_offsets[i+1];
266 for (int c = 0; c < vd; ++c)
267 {
268 for (int j = offset; j < next_offset; ++j)
269 {
270 const int idx_j = d_indices[j];
271 if (d_x(t?c:i,t?i:c))
272 {
273 d_y(idx_j % nd, c, idx_j / nd) = 0.0;
274 }
275 else
276 {
277 d_y(idx_j % nd, c, idx_j / nd) = 1.0;
278 d_x(t?c:i,t?i:c) = 1;
279 }
280 }
281 }
282 }
283}
284
286 SparseMatrix &mat) const
287{
288 mat.GetMemoryI().New(mat.Height()+1, mat.GetMemoryI().GetMemoryType());
289 const int nnz = FillI(mat);
290 mat.GetMemoryJ().New(nnz, mat.GetMemoryJ().GetMemoryType());
291 mat.GetMemoryData().New(nnz, mat.GetMemoryData().GetMemoryType());
292 FillJAndData(mat_ea, mat);
293}
294
295static MFEM_HOST_DEVICE int GetMinElt(const int *my_elts, const int nbElts,
296 const int *nbr_elts, const int nbrNbElts)
297{
298 // Find the minimal element index found in both my_elts[] and nbr_elts[]
299 int min_el = INT_MAX;
300 for (int i = 0; i < nbElts; i++)
301 {
302 const int e_i = my_elts[i];
303 if (e_i >= min_el) { continue; }
304 for (int j = 0; j < nbrNbElts; j++)
305 {
306 if (e_i==nbr_elts[j])
307 {
308 min_el = e_i; // we already know e_i < min_el
309 break;
310 }
311 }
312 }
313 return min_el;
314}
315
316/** Returns the index where a non-zero entry should be added and increment the
317 number of non-zeros for the row i_L. */
318static MFEM_HOST_DEVICE int GetAndIncrementNnzIndex(const int i_L, int* I)
319{
320 int ind = AtomicAdd(I[i_L],1);
321 return ind;
322}
323
325{
326 const int all_dofs = ndofs;
327 const int vd = vdim;
328 const int elt_dofs = dof;
329 auto I = mat.ReadWriteI();
330 auto d_offsets = offsets.Read();
331 auto d_indices = indices.Read();
332 auto d_gather_map = gather_map.Read();
333
334 Array<int> ij_elts(indices.Size() * 2);
335 auto d_ij_elts = Reshape(ij_elts.Write(), indices.Size(), 2);
336
337 mfem::forall(vd*all_dofs+1, [=] MFEM_HOST_DEVICE (int i_L)
338 {
339 I[i_L] = 0;
340 });
341 mfem::forall(ne*elt_dofs, [=] MFEM_HOST_DEVICE (int l_dof)
342 {
343 const int e = l_dof/elt_dofs;
344 const int i = l_dof%elt_dofs;
345
346 const int i_gm = e*elt_dofs + i;
347 const int i_L = d_gather_map[i_gm];
348 const int i_offset = d_offsets[i_L];
349 const int i_next_offset = d_offsets[i_L+1];
350 const int i_nbElts = i_next_offset - i_offset;
351
352 int *i_elts = &d_ij_elts(i_offset, 0);
353 for (int e_i = 0; e_i < i_nbElts; ++e_i)
354 {
355 const int i_E = d_indices[i_offset+e_i];
356 i_elts[e_i] = i_E/elt_dofs;
357 }
358 for (int j = 0; j < elt_dofs; j++)
359 {
360 const int j_gm = e*elt_dofs + j;
361 const int j_L = d_gather_map[j_gm];
362 const int j_offset = d_offsets[j_L];
363 const int j_next_offset = d_offsets[j_L+1];
364 const int j_nbElts = j_next_offset - j_offset;
365 if (i_nbElts == 1 || j_nbElts == 1) // no assembly required
366 {
367 GetAndIncrementNnzIndex(i_L, I);
368 }
369 else // assembly required
370 {
371 int *j_elts = &d_ij_elts(j_offset, 1);
372 for (int e_j = 0; e_j < j_nbElts; ++e_j)
373 {
374 const int j_E = d_indices[j_offset+e_j];
375 const int elt = j_E/elt_dofs;
376 j_elts[e_j] = elt;
377 }
378 int min_e = GetMinElt(i_elts, i_nbElts, j_elts, j_nbElts);
379 if (e == min_e) // add the nnz only once
380 {
381 GetAndIncrementNnzIndex(i_L, I);
382 }
383 }
384 }
385 });
386 // We need to sum the entries of I, we do it on CPU as it is very sequential.
387 auto h_I = mat.HostReadWriteI();
388 const int nTdofs = vd*all_dofs;
389 int sum = 0;
390 for (int i = 0; i < nTdofs; i++)
391 {
392 const int nnz = h_I[i];
393 h_I[i] = sum;
394 sum+=nnz;
395 }
396 h_I[nTdofs] = sum;
397 // We return the number of nnz
398 return h_I[nTdofs];
399}
400
402 SparseMatrix &mat) const
403{
404 const int all_dofs = ndofs;
405 const int vd = vdim;
406 const int elt_dofs = dof;
407 auto I = mat.ReadWriteI();
408 auto J = mat.WriteJ();
409 auto Data = mat.WriteData();
410 auto d_offsets = offsets.Read();
411 auto d_indices = indices.Read();
412 auto d_gather_map = gather_map.Read();
413 auto mat_ea = Reshape(ea_data.Read(), elt_dofs, elt_dofs, ne);
414
415 Array<int> ij_B_el(indices.Size() * 4);
416 auto d_ij_B_el = Reshape(ij_B_el.Write(), indices.Size(), 4);
417
418 mfem::forall(ne*elt_dofs, [=] MFEM_HOST_DEVICE (int l_dof)
419 {
420 const int e = l_dof/elt_dofs;
421 const int i = l_dof%elt_dofs;
422
423 const int i_gm = e*elt_dofs + i;
424 const int i_L = d_gather_map[i_gm];
425 const int i_offset = d_offsets[i_L];
426 const int i_next_offset = d_offsets[i_L+1];
427 const int i_nbElts = i_next_offset - i_offset;
428
429 int *i_elts = &d_ij_B_el(i_offset, 0);
430 int *i_B = &d_ij_B_el(i_offset, 1);
431 for (int e_i = 0; e_i < i_nbElts; ++e_i)
432 {
433 const int i_E = d_indices[i_offset+e_i];
434 i_elts[e_i] = i_E/elt_dofs;
435 i_B[e_i] = i_E%elt_dofs;
436 }
437 for (int j = 0; j < elt_dofs; j++)
438 {
439 const int j_gm = e*elt_dofs + j;
440 const int j_L = d_gather_map[j_gm];
441 const int j_offset = d_offsets[j_L];
442 const int j_next_offset = d_offsets[j_L+1];
443 const int j_nbElts = j_next_offset - j_offset;
444 if (i_nbElts == 1 || j_nbElts == 1) // no assembly required
445 {
446 const int nnz = GetAndIncrementNnzIndex(i_L, I);
447 J[nnz] = j_L;
448 Data[nnz] = mat_ea(j,i,e);
449 }
450 else // assembly required
451 {
452 int *j_elts = &d_ij_B_el(j_offset, 2);
453 int *j_B = &d_ij_B_el(j_offset, 3);
454 for (int e_j = 0; e_j < j_nbElts; ++e_j)
455 {
456 const int j_E = d_indices[j_offset+e_j];
457 const int elt = j_E/elt_dofs;
458 j_elts[e_j] = elt;
459 j_B[e_j] = j_E%elt_dofs;
460 }
461 int min_e = GetMinElt(i_elts, i_nbElts, j_elts, j_nbElts);
462 if (e == min_e) // add the nnz only once
463 {
464 real_t val = 0.0;
465 for (int k = 0; k < i_nbElts; k++)
466 {
467 const int e_i = i_elts[k];
468 const int i_Bloc = i_B[k];
469 for (int l = 0; l < j_nbElts; l++)
470 {
471 const int e_j = j_elts[l];
472 const int j_Bloc = j_B[l];
473 if (e_i == e_j)
474 {
475 val += mat_ea(j_Bloc, i_Bloc, e_i);
476 }
477 }
478 }
479 const int nnz = GetAndIncrementNnzIndex(i_L, I);
480 J[nnz] = j_L;
481 Data[nnz] = val;
482 }
483 }
484 }
485 });
486 // We need to shift again the entries of I, we do it on CPU as it is very
487 // sequential.
488 auto h_I = mat.HostReadWriteI();
489 const int size = vd*all_dofs;
490 for (int i = 0; i < size; i++)
491 {
492 h_I[size-i] = h_I[size-(i+1)];
493 }
494 h_I[0] = 0;
495}
496
498 : ne(fes.GetNE()),
499 vdim(fes.GetVDim()),
500 byvdim(fes.GetOrdering() == Ordering::byVDIM),
501 ndof(fes.GetTypicalFE()->GetDof()),
502 ndofs(fes.GetNDofs())
503{
504 height = vdim*ne*ndof;
505 width = vdim*ne*ndof;
506}
507
509{
510 const int nd = ndof;
511 const int vd = vdim;
512 const bool t = byvdim;
513 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
514 auto d_y = Reshape(y.Write(), nd, vd, ne);
515 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
516 {
517 const int idx = i;
518 const int dof = idx % nd;
519 const int e = idx / nd;
520 for (int c = 0; c < vd; ++c)
521 {
522 d_y(dof, c, e) = d_x(t?c:idx, t?idx:c);
523 }
524 });
525}
526
527template <bool ADD>
528void L2ElementRestriction::TAddMultTranspose(const Vector &x, Vector &y) const
529{
530 const int nd = ndof;
531 const int vd = vdim;
532 const bool t = byvdim;
533 auto d_x = Reshape(x.Read(), nd, vd, ne);
534 auto d_y = Reshape(ADD ? y.ReadWrite() : y.Write(), t?vd:ndofs, t?ndofs:vd);
535 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
536 {
537 const int idx = i;
538 const int dof = idx % nd;
539 const int e = idx / nd;
540 for (int c = 0; c < vd; ++c)
541 {
542 if (ADD) { d_y(t?c:idx,t?idx:c) += d_x(dof, c, e); }
543 else { d_y(t?c:idx,t?idx:c) = d_x(dof, c, e); }
544 }
545 });
546}
547
549{
550 constexpr bool ADD = false;
551 TAddMultTranspose<ADD>(x, y);
552}
553
555 const real_t a) const
556{
557 MFEM_VERIFY(a == 1.0, "General coefficient case is not yet supported!");
558 constexpr bool ADD = true;
559 TAddMultTranspose<ADD>(x, y);
560}
561
563{
564 const int elem_dofs = ndof;
565 const int vd = vdim;
566 auto I = mat.WriteI();
567 const int isize = mat.Height() + 1;
568 const int interior_dofs = ne*elem_dofs*vd;
569 mfem::forall(isize, [=] MFEM_HOST_DEVICE (int dof)
570 {
571 I[dof] = dof<interior_dofs ? elem_dofs : 0;
572 });
573}
574
575static MFEM_HOST_DEVICE int AddNnz(const int iE, int *I, const int dofs)
576{
577 int val = AtomicAdd(I[iE],dofs);
578 return val;
579}
580
582 SparseMatrix &mat) const
583{
584 const int elem_dofs = ndof;
585 const int vd = vdim;
586 auto I = mat.ReadWriteI();
587 auto J = mat.WriteJ();
588 auto Data = mat.WriteData();
589 auto mat_ea = Reshape(ea_data.Read(), elem_dofs, elem_dofs, ne);
590 mfem::forall(ne*elem_dofs*vd, [=] MFEM_HOST_DEVICE (int iE)
591 {
592 const int offset = AddNnz(iE,I,elem_dofs);
593 const int e = iE/elem_dofs;
594 const int i = iE%elem_dofs;
595 for (int j = 0; j < elem_dofs; j++)
596 {
597 J[offset+j] = e*elem_dofs+j;
598 Data[offset+j] = mat_ea(j,i,e);
599 }
600 });
601}
602
604 const FiniteElementSpace &fes,
605 const ElementDofOrdering f_ordering,
606 const FaceType type,
607 bool build)
608 : fes(fes),
609 nf(fes.GetNFbyType(type)),
610 vdim(fes.GetVDim()),
611 byvdim(fes.GetOrdering() == Ordering::byVDIM),
612 face_dofs(nf > 0 ? fes.GetFaceElement(0)->GetDof() : 0),
613 elem_dofs(fes.GetTypicalFE()->GetDof()),
614 nfdofs(nf*face_dofs),
615 ndofs(fes.GetNDofs()),
616 scatter_indices(nf*face_dofs),
617 gather_offsets(ndofs+1),
618 gather_indices(nf*face_dofs),
619 face_map(face_dofs)
620{
622 width = fes.GetVSize();
623 if (nf==0) { return; }
624
625 CheckFESpace(f_ordering);
626
627 // Get the mapping from lexicographic DOF ordering to native ordering.
628 const TensorBasisElement* el =
629 dynamic_cast<const TensorBasisElement*>(fes.GetTypicalFE());
630 const Array<int> &dof_map_ = el->GetDofMap();
631 if (dof_map_.Size() > 0)
632 {
633 vol_dof_map.MakeRef(dof_map_);
634 }
635 else
636 {
637 // For certain types of elements dof_map_ is empty. In this case, that
638 // means the element is already ordered lexicographically, so the
639 // permutation is the identity.
641 for (int i = 0; i < elem_dofs; ++i) { vol_dof_map[i] = i; }
642 }
643
644 if (!build) { return; }
645 ComputeScatterIndicesAndOffsets(f_ordering, type);
646 ComputeGatherIndices(f_ordering,type);
647}
648
650 const FiniteElementSpace &fes,
651 const ElementDofOrdering f_ordering,
652 const FaceType type)
653 : ConformingFaceRestriction(fes, f_ordering, type, true)
654{ }
655
657 const bool useAbs) const
658{
659 if (nf==0) { return; }
660 // Assumes all elements have the same number of dofs
661 const int nface_dofs = face_dofs;
662 const int vd = vdim;
663 const bool t = byvdim;
664 auto d_indices = scatter_indices.Read();
665 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
666 auto d_y = Reshape(y.Write(), nface_dofs, vd, nf);
667 mfem::forall(nfdofs, [=] MFEM_HOST_DEVICE (int i)
668 {
669 const int s_idx = d_indices[i];
670 const int sgn = (useAbs || s_idx >= 0) ? 1 : -1;
671 const int idx = (s_idx >= 0) ? s_idx : -1 - s_idx;
672 const int dof = i % nface_dofs;
673 const int face = i / nface_dofs;
674 for (int c = 0; c < vd; ++c)
675 {
676 d_y(dof, c, face) = sgn*d_x(t?c:idx, t?idx:c);
677 }
678 });
679}
680
681static void ConformingFaceRestriction_AddMultTranspose(
682 const int ndofs,
683 const int face_dofs,
684 const int nf,
685 const int vdim,
686 const bool by_vdim,
687 const Array<int> &gather_offsets,
688 const Array<int> &gather_indices,
689 const Vector &x,
690 Vector &y,
691 bool use_signs,
692 const real_t a)
693{
694 MFEM_VERIFY(a == 1.0, "General coefficient case is not yet supported!");
695 if (nf==0) { return; }
696 // Assumes all elements have the same number of dofs
697 auto d_offsets = gather_offsets.Read();
698 auto d_indices = gather_indices.Read();
699 auto d_x = Reshape(x.Read(), face_dofs, vdim, nf);
700 auto d_y = Reshape(y.ReadWrite(), by_vdim?vdim:ndofs, by_vdim?ndofs:vdim);
701 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
702 {
703 const int offset = d_offsets[i];
704 const int next_offset = d_offsets[i + 1];
705 for (int c = 0; c < vdim; ++c)
706 {
707 real_t dof_value = 0;
708 for (int j = offset; j < next_offset; ++j)
709 {
710 const int s_idx_j = d_indices[j];
711 const real_t sgn = (s_idx_j >= 0 || !use_signs) ? 1.0 : -1.0;
712 const int idx_j = (s_idx_j >= 0) ? s_idx_j : -1 - s_idx_j;
713 dof_value += sgn*d_x(idx_j % face_dofs, c, idx_j / face_dofs);
714 }
715 d_y(by_vdim?c:i,by_vdim?i:c) += dof_value;
716 }
717 });
718}
719
721 const Vector& x, Vector& y, const real_t a) const
722{
723 ConformingFaceRestriction_AddMultTranspose(
725 true, a);
726}
727
729 const Vector& x, Vector& y, const real_t a) const
730{
731 ConformingFaceRestriction_AddMultTranspose(
733 false, a);
734}
735
737 f_ordering)
738{
739#ifdef MFEM_USE_MPI
740
741 // If the underlying finite element space is parallel, ensure the face
742 // neighbor information is generated.
743 if (const ParFiniteElementSpace *pfes
744 = dynamic_cast<const ParFiniteElementSpace*>(&fes))
745 {
746 pfes->GetParMesh()->ExchangeFaceNbrData();
747 }
748
749#endif
750
751#ifdef MFEM_DEBUG
752 const FiniteElement *fe0 = fes.GetTypicalFE();
753 const TensorBasisElement *tfe = dynamic_cast<const TensorBasisElement*>(fe0);
754 MFEM_VERIFY(tfe != NULL,
755 "ConformingFaceRestriction only supports TensorBasisElements");
756 MFEM_VERIFY(tfe->GetBasisType()==BasisType::GaussLobatto ||
758 "ConformingFaceRestriction only supports Gauss-Lobatto and Bernstein bases");
759
760 // Assuming all finite elements are using Gauss-Lobatto.
761 const bool dof_reorder = (f_ordering == ElementDofOrdering::LEXICOGRAPHIC);
762 if (dof_reorder && nf > 0)
763 {
764 for (int f = 0; f < fes.GetNF(); ++f)
765 {
766 const FiniteElement *fe = fes.GetFaceElement(f);
767 const TensorBasisElement* el =
768 dynamic_cast<const TensorBasisElement*>(fe);
769 if (el) { continue; }
770 MFEM_ABORT("Finite element not suitable for lexicographic ordering");
771 }
772 }
773#endif
774}
775
776void ConformingFaceRestriction::ComputeScatterIndicesAndOffsets(
777 const ElementDofOrdering f_ordering,
778 const FaceType type)
779{
780 Mesh &mesh = *fes.GetMesh();
781
782 // Initialization of the offsets
783 for (int i = 0; i <= ndofs; ++i)
784 {
785 gather_offsets[i] = 0;
786 }
787
788 // Computation of scatter indices and offsets
789 int f_ind = 0;
790 for (int f = 0; f < fes.GetNF(); ++f)
791 {
792 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
793 if ( face.IsNonconformingCoarse() )
794 {
795 // We skip nonconforming coarse faces as they are treated
796 // by the corresponding nonconforming fine faces.
797 continue;
798 }
799 else if ( face.IsOfFaceType(type) )
800 {
801 SetFaceDofsScatterIndices(face, f_ind, f_ordering);
802 f_ind++;
803 }
804 }
805 MFEM_VERIFY(f_ind==nf, "Unexpected number of faces.");
806
807 // Summation of the offsets
808 for (int i = 1; i <= ndofs; ++i)
809 {
810 gather_offsets[i] += gather_offsets[i - 1];
811 }
812}
813
814void ConformingFaceRestriction::ComputeGatherIndices(
815 const ElementDofOrdering f_ordering,
816 const FaceType type)
817{
818 Mesh &mesh = *fes.GetMesh();
819
820 // Computation of gather_indices
821 int f_ind = 0;
822 for (int f = 0; f < fes.GetNF(); ++f)
823 {
824 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
825 if ( face.IsNonconformingCoarse() )
826 {
827 // We skip nonconforming coarse faces as they are treated
828 // by the corresponding nonconforming fine faces.
829 continue;
830 }
831 else if ( face.IsOfFaceType(type) )
832 {
833 SetFaceDofsGatherIndices(face, f_ind, f_ordering);
834 f_ind++;
835 }
836 }
837 MFEM_VERIFY(f_ind==nf, "Unexpected number of faces.");
838
839 // Reset offsets to their initial value
840 for (int i = ndofs; i > 0; --i)
841 {
842 gather_offsets[i] = gather_offsets[i - 1];
843 }
844 gather_offsets[0] = 0;
845}
846
847static inline int absdof(int i) { return i < 0 ? -1-i : i; }
848
850 const Mesh::FaceInformation &face,
851 const int face_index,
852 const ElementDofOrdering f_ordering)
853{
854 MFEM_ASSERT(!(face.IsNonconformingCoarse()),
855 "This method should not be used on nonconforming coarse faces.");
856 MFEM_ASSERT(face.element[0].orientation==0,
857 "FaceRestriction used on degenerated mesh.");
858 MFEM_VERIFY(f_ordering == ElementDofOrdering::LEXICOGRAPHIC,
859 "NATIVE ordering is not supported yet");
860
862
863 const Table& e2dTable = fes.GetElementToDofTable();
864 const int* elem_map = e2dTable.GetJ();
865 const int elem_index = face.element[0].index;
866
867 for (int face_dof = 0; face_dof < face_dofs; ++face_dof)
868 {
869 const int lex_volume_dof = face_map[face_dof];
870 const int s_volume_dof = AsConst(vol_dof_map)[lex_volume_dof]; // signed
871 const int volume_dof = absdof(s_volume_dof);
872 const int s_global_dof = elem_map[elem_index*elem_dofs + volume_dof];
873 const int global_dof = absdof(s_global_dof);
874 const int restriction_dof = face_dofs*face_index + face_dof;
875 scatter_indices[restriction_dof] = s_global_dof;
876 ++gather_offsets[global_dof + 1];
877 }
878}
879
881 const Mesh::FaceInformation &face,
882 const int face_index,
883 const ElementDofOrdering f_ordering)
884{
885 MFEM_ASSERT(!(face.IsNonconformingCoarse()),
886 "This method should not be used on nonconforming coarse faces.");
887 MFEM_VERIFY(f_ordering == ElementDofOrdering::LEXICOGRAPHIC,
888 "NATIVE ordering is not supported yet");
889
891
892 const Table& e2dTable = fes.GetElementToDofTable();
893 const int* elem_map = e2dTable.GetJ();
894 const int elem_index = face.element[0].index;
895
896 for (int face_dof = 0; face_dof < face_dofs; ++face_dof)
897 {
898 const int lex_volume_dof = face_map[face_dof];
899 const int s_volume_dof = AsConst(vol_dof_map)[lex_volume_dof];
900 const int volume_dof = absdof(s_volume_dof);
901 const int s_global_dof = elem_map[elem_index*elem_dofs + volume_dof];
902 const int sgn = (s_global_dof >= 0) ? 1 : -1;
903 const int global_dof = absdof(s_global_dof);
904 const int restriction_dof = face_dofs*face_index + face_dof;
905 const int s_restriction_dof = (sgn >= 0) ? restriction_dof : -1 -
906 restriction_dof;
907 gather_indices[gather_offsets[global_dof]++] = s_restriction_dof;
908 }
909}
910
911// Permute dofs or quads on a face for e2 to match with the ordering of e1
912int PermuteFaceL2(const int dim, const int face_id1,
913 const int face_id2, const int orientation,
914 const int size1d, const int index)
915{
916 switch (dim)
917 {
918 case 1:
919 return 0;
920 case 2:
921 return internal::PermuteFace2D(face_id1, face_id2, orientation, size1d, index);
922 case 3:
923 return internal::PermuteFace3D(face_id1, face_id2, orientation, size1d, index);
924 default:
925 MFEM_ABORT("Unsupported dimension.");
926 return 0;
927 }
928}
929
931 const ElementDofOrdering f_ordering,
932 const FaceType type,
933 const L2FaceValues m,
934 bool build)
935 : fes(fes),
936 ordering(f_ordering),
937 nf(fes.GetNFbyType(type)),
938 ne(fes.GetNE()),
939 vdim(fes.GetVDim()),
940 byvdim(fes.GetOrdering() == Ordering::byVDIM),
941 face_dofs(fes.GetTypicalTraceElement()->GetDof()),
942 elem_dofs(fes.GetTypicalFE()->GetDof()),
943 nfdofs(nf*face_dofs),
944 ndofs(fes.GetNDofs()),
945 type(type),
946 m(m),
947 scatter_indices1(nf*face_dofs),
948 scatter_indices2(m==L2FaceValues::DoubleValued?nf*face_dofs:0),
949 gather_offsets(ndofs+1),
950 gather_indices((m==L2FaceValues::DoubleValued? 2 : 1)*nf*face_dofs),
951 face_map(face_dofs)
952{
954 width = fes.GetVSize();
955 if (!build) { return; }
956
957 CheckFESpace();
958 ComputeScatterIndicesAndOffsets();
959 ComputeGatherIndices();
960}
961
963 const ElementDofOrdering f_ordering,
964 const FaceType type,
965 const L2FaceValues m)
966 : L2FaceRestriction(fes, f_ordering, type, m, true)
967{ }
968
970 Vector& y) const
971{
972 if (nf == 0) { return; }
973 MFEM_ASSERT(
975 "This method should be called when m == L2FaceValues::SingleValued.");
976 // Assumes all elements have the same number of dofs
977 const int nface_dofs = face_dofs;
978 const int vd = vdim;
979 const bool t = byvdim;
980 auto d_indices1 = scatter_indices1.Read();
981 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
982 auto d_y = Reshape(y.Write(), nface_dofs, vd, nf);
983 mfem::forall(nfdofs, [=] MFEM_HOST_DEVICE (int i)
984 {
985 const int dof = i % nface_dofs;
986 const int face = i / nface_dofs;
987 const int idx1 = d_indices1[i];
988 for (int c = 0; c < vd; ++c)
989 {
990 d_y(dof, c, face) = d_x(t?c:idx1, t?idx1:c);
991 }
992 });
993}
994
996 Vector& y) const
997{
998 MFEM_ASSERT(
1000 "This method should be called when m == L2FaceValues::DoubleValued.");
1001 // Assumes all elements have the same number of dofs
1002 const int nface_dofs = face_dofs;
1003 const int vd = vdim;
1004 const bool t = byvdim;
1005 auto d_indices1 = scatter_indices1.Read();
1006 auto d_indices2 = scatter_indices2.Read();
1007 auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
1008 auto d_y = Reshape(y.Write(), nface_dofs, vd, 2, nf);
1009 mfem::forall(nfdofs, [=] MFEM_HOST_DEVICE (int i)
1010 {
1011 const int dof = i % nface_dofs;
1012 const int face = i / nface_dofs;
1013 const int idx1 = d_indices1[i];
1014 for (int c = 0; c < vd; ++c)
1015 {
1016 d_y(dof, c, 0, face) = d_x(t?c:idx1, t?idx1:c);
1017 }
1018 const int idx2 = d_indices2[i];
1019 for (int c = 0; c < vd; ++c)
1020 {
1021 d_y(dof, c, 1, face) = idx2==-1 ? 0.0 : d_x(t?c:idx2, t?idx2:c);
1022 }
1023 });
1024}
1025
1026void L2FaceRestriction::Mult(const Vector& x, Vector& y) const
1027{
1028 if (nf==0) { return; }
1030 {
1032 }
1033 else
1034 {
1036 }
1037}
1038
1040 const Vector& x, Vector& y) const
1041{
1042 // Assumes all elements have the same number of dofs
1043 const int nface_dofs = face_dofs;
1044 const int vd = vdim;
1045 const bool t = byvdim;
1046 auto d_offsets = gather_offsets.Read();
1047 auto d_indices = gather_indices.Read();
1048 auto d_x = Reshape(x.Read(), nface_dofs, vd, nf);
1049 auto d_y = Reshape(y.ReadWrite(), t?vd:ndofs, t?ndofs:vd);
1050 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
1051 {
1052 const int offset = d_offsets[i];
1053 const int next_offset = d_offsets[i + 1];
1054 for (int c = 0; c < vd; ++c)
1055 {
1056 real_t dof_value = 0;
1057 for (int j = offset; j < next_offset; ++j)
1058 {
1059 int idx_j = d_indices[j];
1060 dof_value += d_x(idx_j % nface_dofs, c, idx_j / nface_dofs);
1061 }
1062 d_y(t?c:i,t?i:c) += dof_value;
1063 }
1064 });
1065}
1066
1068 const Vector& x, Vector& y) const
1069{
1070 // Assumes all elements have the same number of dofs
1071 const int nface_dofs = face_dofs;
1072 const int vd = vdim;
1073 const bool t = byvdim;
1074 const int dofs = nfdofs;
1075 auto d_offsets = gather_offsets.Read();
1076 auto d_indices = gather_indices.Read();
1077 auto d_x = Reshape(x.Read(), nface_dofs, vd, 2, nf);
1078 auto d_y = Reshape(y.ReadWrite(), t?vd:ndofs, t?ndofs:vd);
1079 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i)
1080 {
1081 const int offset = d_offsets[i];
1082 const int next_offset = d_offsets[i + 1];
1083 for (int c = 0; c < vd; ++c)
1084 {
1085 real_t dof_value = 0;
1086 for (int j = offset; j < next_offset; ++j)
1087 {
1088 int idx_j = d_indices[j];
1089 bool isE1 = idx_j < dofs;
1090 idx_j = isE1 ? idx_j : idx_j - dofs;
1091 dof_value += isE1 ?
1092 d_x(idx_j % nface_dofs, c, 0, idx_j / nface_dofs)
1093 :d_x(idx_j % nface_dofs, c, 1, idx_j / nface_dofs);
1094 }
1095 d_y(t?c:i,t?i:c) += dof_value;
1096 }
1097 });
1098}
1099
1101 const real_t a) const
1102{
1103 MFEM_VERIFY(a == 1.0, "General coefficient case is not yet supported!");
1104 if (nf==0) { return; }
1106 {
1108 }
1109 else
1110 {
1112 }
1113}
1114
1116 const bool keep_nbr_block) const
1117{
1118 const int nface_dofs = face_dofs;
1119 auto d_indices1 = scatter_indices1.Read();
1120 auto d_indices2 = scatter_indices2.Read();
1121 auto I = mat.ReadWriteI();
1122 mfem::forall(nf*nface_dofs, [=] MFEM_HOST_DEVICE (int fdof)
1123 {
1124 const int iE1 = d_indices1[fdof];
1125 const int iE2 = d_indices2[fdof];
1126 AddNnz(iE1,I,nface_dofs);
1127 AddNnz(iE2,I,nface_dofs);
1128 });
1129}
1130
1132 SparseMatrix &mat,
1133 const bool keep_nbr_block) const
1134{
1135 const int nface_dofs = face_dofs;
1136 auto d_indices1 = scatter_indices1.Read();
1137 auto d_indices2 = scatter_indices2.Read();
1138 auto I = mat.ReadWriteI();
1139 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, 2, nf);
1140 auto J = mat.WriteJ();
1141 auto Data = mat.WriteData();
1142 mfem::forall(nf*nface_dofs, [=] MFEM_HOST_DEVICE (int fdof)
1143 {
1144 const int f = fdof/nface_dofs;
1145 const int iF = fdof%nface_dofs;
1146 const int iE1 = d_indices1[f*nface_dofs+iF];
1147 const int iE2 = d_indices2[f*nface_dofs+iF];
1148 const int offset1 = AddNnz(iE1,I,nface_dofs);
1149 const int offset2 = AddNnz(iE2,I,nface_dofs);
1150 for (int jF = 0; jF < nface_dofs; jF++)
1151 {
1152 const int jE1 = d_indices1[f*nface_dofs+jF];
1153 const int jE2 = d_indices2[f*nface_dofs+jF];
1154 J[offset2+jF] = jE1;
1155 J[offset1+jF] = jE2;
1156 Data[offset2+jF] = mat_fea(jF,iF,0,f);
1157 Data[offset1+jF] = mat_fea(jF,iF,1,f);
1158 }
1159 });
1160}
1161
1163 Vector &ea_data) const
1164{
1165 const int nface_dofs = face_dofs;
1166 const int nelem_dofs = elem_dofs;
1167 const int NE = ne;
1169 {
1170 auto d_indices1 = scatter_indices1.Read();
1171 auto d_indices2 = scatter_indices2.Read();
1172 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, 2, nf);
1173 auto mat_ea = Reshape(ea_data.ReadWrite(), nelem_dofs, nelem_dofs, ne);
1174 mfem::forall(nf, [=] MFEM_HOST_DEVICE (int f)
1175 {
1176 const int e1 = d_indices1[f*nface_dofs]/nelem_dofs;
1177 const int e2 = d_indices2[f*nface_dofs]/nelem_dofs;
1178 for (int j = 0; j < nface_dofs; j++)
1179 {
1180 const int jB1 = d_indices1[f*nface_dofs+j]%nelem_dofs;
1181 for (int i = 0; i < nface_dofs; i++)
1182 {
1183 const int iB1 = d_indices1[f*nface_dofs+i]%nelem_dofs;
1184 AtomicAdd(mat_ea(iB1,jB1,e1), mat_fea(i,j,0,f));
1185 }
1186 }
1187 if (e2 < NE)
1188 {
1189 for (int j = 0; j < nface_dofs; j++)
1190 {
1191 const int jB2 = d_indices2[f*nface_dofs+j]%nelem_dofs;
1192 for (int i = 0; i < nface_dofs; i++)
1193 {
1194 const int iB2 = d_indices2[f*nface_dofs+i]%nelem_dofs;
1195 AtomicAdd(mat_ea(iB2,jB2,e2), mat_fea(i,j,1,f));
1196 }
1197 }
1198 }
1199 });
1200 }
1201 else
1202 {
1203 auto d_indices = scatter_indices1.Read();
1204 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, nf);
1205 auto mat_ea = Reshape(ea_data.ReadWrite(), nelem_dofs, nelem_dofs, ne);
1206 mfem::forall(nf, [=] MFEM_HOST_DEVICE (int f)
1207 {
1208 const int e = d_indices[f*nface_dofs]/nelem_dofs;
1209 for (int j = 0; j < nface_dofs; j++)
1210 {
1211 const int jE = d_indices[f*nface_dofs+j]%nelem_dofs;
1212 for (int i = 0; i < nface_dofs; i++)
1213 {
1214 const int iE = d_indices[f*nface_dofs+i]%nelem_dofs;
1215 AtomicAdd(mat_ea(iE,jE,e), mat_fea(i,j,f));
1216 }
1217 }
1218 });
1219 }
1220}
1221
1223{
1224#ifdef MFEM_USE_MPI
1225
1226 // If the underlying finite element space is parallel, ensure the face
1227 // neighbor information is generated.
1228 if (const ParFiniteElementSpace *pfes
1229 = dynamic_cast<const ParFiniteElementSpace*>(&fes))
1230 {
1231 pfes->GetParMesh()->ExchangeFaceNbrData();
1232 }
1233
1234#endif
1235
1236#ifdef MFEM_DEBUG
1237 // If fespace == L2
1238 const FiniteElement *fe0 = fes.GetTypicalFE();
1239 const TensorBasisElement *tfe = dynamic_cast<const TensorBasisElement*>(fe0);
1240 MFEM_VERIFY(tfe != NULL &&
1243 "Only Gauss-Lobatto and Bernstein basis are supported in "
1244 "L2FaceRestriction.");
1245 if (nf==0) { return; }
1246 const bool dof_reorder = (ordering == ElementDofOrdering::LEXICOGRAPHIC);
1247 if (!dof_reorder)
1248 {
1249 MFEM_ABORT("Non-Tensor L2FaceRestriction not yet implemented.");
1250 }
1251 if (dof_reorder && nf > 0)
1252 {
1253 for (int f = 0; f < fes.GetNF(); ++f)
1254 {
1256 const TensorBasisElement* el = dynamic_cast<const TensorBasisElement*>(fe);
1257 if (el) { continue; }
1258 MFEM_ABORT("Finite element not suitable for lexicographic ordering");
1259 }
1260 }
1261#endif
1262}
1263
1264void L2FaceRestriction::ComputeScatterIndicesAndOffsets()
1265{
1266 Mesh &mesh = *fes.GetMesh();
1267 // Initialization of the offsets
1268 for (int i = 0; i <= ndofs; ++i)
1269 {
1270 gather_offsets[i] = 0;
1271 }
1272
1273 // Computation of scatter indices and offsets
1274 int f_ind=0;
1275 for (int f = 0; f < fes.GetNF(); ++f)
1276 {
1277 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
1278 MFEM_ASSERT(!face.IsShared(),
1279 "Unexpected shared face in L2FaceRestriction.");
1280 if ( face.IsOfFaceType(type) )
1281 {
1282 SetFaceDofsScatterIndices1(face,f_ind);
1284 {
1285 if ( type==FaceType::Interior && face.IsInterior() )
1286 {
1288 }
1289 else if ( type==FaceType::Boundary && face.IsBoundary() )
1290 {
1292 }
1293 }
1294 f_ind++;
1295 }
1296 }
1297 MFEM_VERIFY(f_ind==nf, "Unexpected number of faces.");
1298
1299 // Summation of the offsets
1300 for (int i = 1; i <= ndofs; ++i)
1301 {
1302 gather_offsets[i] += gather_offsets[i - 1];
1303 }
1304}
1305
1306void L2FaceRestriction::ComputeGatherIndices()
1307{
1308 Mesh &mesh = *fes.GetMesh();
1309 // Computation of gather_indices
1310 int f_ind = 0;
1311 for (int f = 0; f < fes.GetNF(); ++f)
1312 {
1313 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
1314 MFEM_ASSERT(!face.IsShared(),
1315 "Unexpected shared face in L2FaceRestriction.");
1316 if ( face.IsOfFaceType(type) )
1317 {
1318 SetFaceDofsGatherIndices1(face,f_ind);
1321 face.IsLocal())
1322 {
1324 }
1325 f_ind++;
1326 }
1327 }
1328 MFEM_VERIFY(f_ind==nf, "Unexpected number of faces.");
1329
1330 // Reset offsets to their correct value
1331 for (int i = ndofs; i > 0; --i)
1332 {
1333 gather_offsets[i] = gather_offsets[i - 1];
1334 }
1335 gather_offsets[0] = 0;
1336}
1337
1339 const Mesh::FaceInformation &face,
1340 const int face_index)
1341{
1342 MFEM_ASSERT(!(face.IsNonconformingCoarse()),
1343 "This method should not be used on nonconforming coarse faces.");
1344 const Table& e2dTable = fes.GetElementToDofTable();
1345 const int* elem_map = e2dTable.GetJ();
1346 const int face_id1 = face.element[0].local_face_id;
1347 const int elem_index = face.element[0].index;
1348 fes.GetTypicalFE()->GetFaceMap(face_id1, face_map);
1349
1350 for (int face_dof_elem1 = 0; face_dof_elem1 < face_dofs; ++face_dof_elem1)
1351 {
1352 const int volume_dof_elem1 = face_map[face_dof_elem1];
1353 const int global_dof_elem1 = elem_map[elem_index*elem_dofs + volume_dof_elem1];
1354 const int restriction_dof_elem1 = face_dofs*face_index + face_dof_elem1;
1355 scatter_indices1[restriction_dof_elem1] = global_dof_elem1;
1356 ++gather_offsets[global_dof_elem1 + 1];
1357 }
1358}
1359
1361 const Mesh::FaceInformation &face,
1362 const int face_index)
1363{
1364 MFEM_ASSERT(face.IsLocal(),
1365 "This method should only be used on local faces.");
1366 const Table& e2dTable = fes.GetElementToDofTable();
1367 const int* elem_map = e2dTable.GetJ();
1368 const int elem_index = face.element[1].index;
1369 const int face_id1 = face.element[0].local_face_id;
1370 const int face_id2 = face.element[1].local_face_id;
1371 const int orientation = face.element[1].orientation;
1372 const int dim = fes.GetMesh()->Dimension();
1373 const int dof1d = fes.GetTypicalFE()->GetOrder()+1;
1374 fes.GetTypicalFE()->GetFaceMap(face_id2, face_map);
1375
1376 for (int face_dof_elem1 = 0; face_dof_elem1 < face_dofs; ++face_dof_elem1)
1377 {
1378 const int face_dof_elem2 = PermuteFaceL2(dim, face_id1, face_id2,
1379 orientation, dof1d,
1380 face_dof_elem1);
1381 const int volume_dof_elem2 = face_map[face_dof_elem2];
1382 const int global_dof_elem2 = elem_map[elem_index*elem_dofs + volume_dof_elem2];
1383 const int restriction_dof_elem2 = face_dofs*face_index + face_dof_elem1;
1384 scatter_indices2[restriction_dof_elem2] = global_dof_elem2;
1385 ++gather_offsets[global_dof_elem2 + 1];
1386 }
1387}
1388
1390 const Mesh::FaceInformation &face,
1391 const int face_index)
1392{
1393#ifdef MFEM_USE_MPI
1394 MFEM_ASSERT(face.IsShared(),
1395 "This method should only be used on shared faces.");
1396 const int elem_index = face.element[1].index;
1397 const int face_id1 = face.element[0].local_face_id;
1398 const int face_id2 = face.element[1].local_face_id;
1399 const int orientation = face.element[1].orientation;
1400 const int dim = fes.GetMesh()->Dimension();
1401 const int dof1d = fes.GetTypicalFE()->GetOrder()+1;
1402 fes.GetTypicalFE()->GetFaceMap(face_id2, face_map);
1403 Array<int> face_nbr_dofs;
1404 const ParFiniteElementSpace &pfes =
1405 static_cast<const ParFiniteElementSpace&>(this->fes);
1406 pfes.GetFaceNbrElementVDofs(elem_index, face_nbr_dofs);
1407
1408 for (int face_dof_elem1 = 0; face_dof_elem1 < face_dofs; ++face_dof_elem1)
1409 {
1410 const int face_dof_elem2 = PermuteFaceL2(dim, face_id1, face_id2,
1411 orientation, dof1d, face_dof_elem1);
1412 const int volume_dof_elem2 = face_map[face_dof_elem2];
1413 const int global_dof_elem2 = face_nbr_dofs[volume_dof_elem2];
1414 const int restriction_dof_elem2 = face_dofs*face_index + face_dof_elem1;
1415 // Trick to differentiate dof location inter/shared
1416 scatter_indices2[restriction_dof_elem2] = ndofs+global_dof_elem2;
1417 }
1418#endif
1419}
1420
1422 const Mesh::FaceInformation &face,
1423 const int face_index)
1424{
1425 MFEM_ASSERT(face.IsBoundary(),
1426 "This method should only be used on boundary faces.");
1427
1428 for (int d = 0; d < face_dofs; ++d)
1429 {
1430 const int restriction_dof_elem2 = face_dofs*face_index + d;
1431 scatter_indices2[restriction_dof_elem2] = -1;
1432 }
1433}
1434
1436 const Mesh::FaceInformation &face,
1437 const int face_index)
1438{
1439 MFEM_ASSERT(!(face.IsNonconformingCoarse()),
1440 "This method should not be used on nonconforming coarse faces.");
1441 const Table& e2dTable = fes.GetElementToDofTable();
1442 const int* elem_map = e2dTable.GetJ();
1443 const int face_id1 = face.element[0].local_face_id;
1444 const int elem_index = face.element[0].index;
1445 fes.GetTypicalFE()->GetFaceMap(face_id1, face_map);
1446
1447 for (int face_dof_elem1 = 0; face_dof_elem1 < face_dofs; ++face_dof_elem1)
1448 {
1449 const int volume_dof_elem1 = face_map[face_dof_elem1];
1450 const int global_dof_elem1 = elem_map[elem_index*elem_dofs + volume_dof_elem1];
1451 const int restriction_dof_elem1 = face_dofs*face_index + face_dof_elem1;
1452 // We don't shift restriction_dof_elem1 to express that it's elem1 of the face
1453 gather_indices[gather_offsets[global_dof_elem1]++] = restriction_dof_elem1;
1454 }
1455}
1456
1458 const Mesh::FaceInformation &face,
1459 const int face_index)
1460{
1461 MFEM_ASSERT(face.IsLocal(),
1462 "This method should only be used on local faces.");
1463 const Table& e2dTable = fes.GetElementToDofTable();
1464 const int* elem_map = e2dTable.GetJ();
1465 const int elem_index = face.element[1].index;
1466 const int face_id1 = face.element[0].local_face_id;
1467 const int face_id2 = face.element[1].local_face_id;
1468 const int orientation = face.element[1].orientation;
1469 const int dim = fes.GetMesh()->Dimension();
1470 const int dof1d = fes.GetTypicalFE()->GetOrder()+1;
1471 fes.GetTypicalFE()->GetFaceMap(face_id2, face_map);
1472
1473 for (int face_dof_elem1 = 0; face_dof_elem1 < face_dofs; ++face_dof_elem1)
1474 {
1475 const int face_dof_elem2 = PermuteFaceL2(dim, face_id1, face_id2,
1476 orientation, dof1d,
1477 face_dof_elem1);
1478 const int volume_dof_elem2 = face_map[face_dof_elem2];
1479 const int global_dof_elem2 = elem_map[elem_index*elem_dofs + volume_dof_elem2];
1480 const int restriction_dof_elem2 = face_dofs*face_index + face_dof_elem1;
1481 // We shift restriction_dof_elem2 to express that it's elem2 of the face
1482 gather_indices[gather_offsets[global_dof_elem2]++] = nfdofs +
1483 restriction_dof_elem2;
1484 }
1485}
1486
1488{
1489 EnsureNormalDerivativeRestriction();
1490 normal_deriv_restr->Mult(x, y);
1491}
1492
1494 Vector &y) const
1495{
1496 EnsureNormalDerivativeRestriction();
1497 normal_deriv_restr->AddMultTranspose(x, y);
1498}
1499
1500void L2FaceRestriction::EnsureNormalDerivativeRestriction() const
1501{
1502 if (!normal_deriv_restr)
1503 {
1504 normal_deriv_restr.reset(
1506 }
1507}
1508
1510 ElementDofOrdering ordering,
1511 FaceType type)
1512 : fes(fes),
1513 ordering(ordering),
1514 interp_config( fes.GetNFbyType(type) ),
1515 nc_cpt(0)
1516{ }
1517
1519 const Mesh::FaceInformation &face,
1520 int face_index)
1521{
1522 interp_config[face_index] = InterpConfig();
1523}
1524
1526 const Mesh::FaceInformation &face,
1527 int face_index)
1528{
1529 MFEM_ASSERT(!face.IsConforming(),
1530 "Registering face as nonconforming even though it is not.");
1531 const DenseMatrix* ptMat = face.point_matrix;
1532 // In the case of nonconforming slave shared face the master face is elem1.
1533 const int master_side =
1535 const int face_key = (master_side == 0 ? 1000 : 0) +
1536 face.element[0].local_face_id +
1537 6*face.element[1].local_face_id +
1538 36*face.element[1].orientation ;
1539 // Unfortunately we can't trust unicity of the ptMat to identify the transformation.
1540 Key key(ptMat, face_key);
1541 auto itr = interp_map.find(key);
1542 if ( itr == interp_map.end() )
1543 {
1544 const DenseMatrix* interpolator =
1545 GetCoarseToFineInterpolation(face,ptMat);
1546 interp_map[key] = {nc_cpt, interpolator};
1547 interp_config[face_index] = {master_side, nc_cpt};
1548 nc_cpt++;
1549 }
1550 else
1551 {
1552 interp_config[face_index] = {master_side, itr->second.first};
1553 }
1554}
1555
1556const DenseMatrix* InterpolationManager::GetCoarseToFineInterpolation(
1557 const Mesh::FaceInformation &face,
1558 const DenseMatrix* ptMat)
1559{
1561 "The following interpolation operator is only implemented for"
1562 "lexicographic ordering.");
1563 MFEM_VERIFY(!face.IsConforming(),
1564 "This method should not be called on conforming faces.")
1565 const int face_id1 = face.element[0].local_face_id;
1566 const int face_id2 = face.element[1].local_face_id;
1567
1568 const bool is_ghost_slave =
1569 face.element[0].conformity == Mesh::ElementConformity::Superset;
1570 const int master_face_id = is_ghost_slave ? face_id1 : face_id2;
1571
1572 // Computation of the interpolation matrix from master
1573 // (coarse) face to slave (fine) face.
1574 // Assumes all trace elements are the same.
1575 const FiniteElement *trace_fe = fes.GetTypicalTraceElement();
1576 const int face_dofs = trace_fe->GetDof();
1577 const TensorBasisElement* el =
1578 dynamic_cast<const TensorBasisElement*>(trace_fe);
1579 const auto dof_map = el->GetDofMap();
1580 DenseMatrix* interpolator = new DenseMatrix(face_dofs,face_dofs);
1581 Vector shape(face_dofs);
1582
1584 isotr.SetIdentityTransformation(trace_fe->GetGeomType());
1585 isotr.SetPointMat(*ptMat);
1586 DenseMatrix& trans_pt_mat = isotr.GetPointMat();
1587 // PointMatrix needs to be flipped in 2D
1588 if ( trace_fe->GetGeomType()==Geometry::SEGMENT && !is_ghost_slave )
1589 {
1590 std::swap(trans_pt_mat(0,0),trans_pt_mat(0,1));
1591 }
1592 DenseMatrix native_interpolator(face_dofs,face_dofs);
1593 trace_fe->GetLocalInterpolation(isotr, native_interpolator);
1594 const int dim = trace_fe->GetDim()+1;
1595 const int dof1d = trace_fe->GetOrder()+1;
1596 const int orientation = face.element[1].orientation;
1597 for (int i = 0; i < face_dofs; i++)
1598 {
1599 const int ni = (dof_map.Size()==0) ? i : dof_map[i];
1600 int li = ToLexOrdering(dim, master_face_id, dof1d, i);
1601 if ( !is_ghost_slave )
1602 {
1603 // master side is elem 2, so we permute to order dofs as elem 1.
1604 li = PermuteFaceL2(dim, face_id2, face_id1,
1605 orientation, dof1d, li);
1606 }
1607 for (int j = 0; j < face_dofs; j++)
1608 {
1609 int lj = ToLexOrdering(dim, master_face_id, dof1d, j);
1610 if ( !is_ghost_slave )
1611 {
1612 // master side is elem 2, so we permute to order dofs as elem 1.
1613 lj = PermuteFaceL2(dim, face_id2, face_id1,
1614 orientation, dof1d, lj);
1615 }
1616 const int nj = (dof_map.Size()==0) ? j : dof_map[j];
1617 (*interpolator)(li,lj) = native_interpolator(ni,nj);
1618 }
1619 }
1620 return interpolator;
1621}
1622
1624{
1625 // Assumes all trace elements are the same.
1626 const FiniteElement *trace_fe = fes.GetTypicalTraceElement();
1627 const int face_dofs = trace_fe->GetDof();
1628 const int nc_size = static_cast<int>(interp_map.size());
1629 MFEM_VERIFY(nc_cpt==nc_size, "Unexpected number of interpolators.");
1630 interpolators.SetSize(face_dofs*face_dofs*nc_size);
1631 auto d_interp = Reshape(interpolators.HostWrite(),face_dofs,face_dofs,nc_size);
1632 for (auto val : interp_map)
1633 {
1634 const int idx = val.second.first;
1635 const DenseMatrix &interpolator = *val.second.second;
1636 for (int i = 0; i < face_dofs; i++)
1637 {
1638 for (int j = 0; j < face_dofs; j++)
1639 {
1640 d_interp(i,j,idx) = interpolator(i,j);
1641 }
1642 }
1643 delete val.second.second;
1644 }
1645 interp_map.clear();
1646}
1647
1649{
1650 // Count nonconforming faces
1651 int num_nc_faces = 0;
1652 for (int i = 0; i < interp_config.Size(); i++)
1653 {
1654 if ( interp_config[i].is_non_conforming )
1655 {
1656 num_nc_faces++;
1657 }
1658 }
1659 // Set nc_interp_config
1660 nc_interp_config.SetSize(num_nc_faces);
1661 int nc_index = 0;
1662 for (int i = 0; i < interp_config.Size(); i++)
1663 {
1664 auto & config = interp_config[i];
1665 if ( config.is_non_conforming )
1666 {
1667 nc_interp_config[nc_index] = NCInterpConfig(i, config);
1668 nc_index++;
1669 }
1670 }
1671}
1672
1674 const ElementDofOrdering f_ordering,
1675 const FaceType type,
1676 const L2FaceValues m,
1677 bool build)
1678 : L2FaceRestriction(fes, f_ordering, type, m, false),
1679 interpolations(fes, f_ordering, type)
1680{
1681 if (!build) { return; }
1682 x_interp.UseDevice(true);
1683
1684 CheckFESpace();
1685
1686 ComputeScatterIndicesAndOffsets();
1687
1688 ComputeGatherIndices();
1689}
1690
1692 const ElementDofOrdering f_ordering,
1693 const FaceType type,
1694 const L2FaceValues m)
1695 : NCL2FaceRestriction(fes, f_ordering, type, m, true)
1696{ }
1697
1704
1706 Vector& y) const
1707{
1708 if (nf == 0) { return; }
1709 // Assumes all elements have the same number of dofs
1710 const int nface_dofs = face_dofs;
1711 const int vd = vdim;
1712 auto d_y = Reshape(y.ReadWrite(), nface_dofs, vd, 2, nf);
1713 auto &nc_interp_config = interpolations.GetNCFaceInterpConfig();
1714 const int num_nc_faces = nc_interp_config.Size();
1715 if ( num_nc_faces == 0 ) { return; }
1716 auto interp_config_ptr = nc_interp_config.Read();
1717 const int nc_size = interpolations.GetNumInterpolators();
1718 auto d_interp = Reshape(interpolations.GetInterpolators().Read(),
1719 nface_dofs, nface_dofs, nc_size);
1720 static constexpr int max_nd = 16*16;
1721 MFEM_VERIFY(nface_dofs<=max_nd, "Too many degrees of freedom.");
1722 mfem::forall_2D(num_nc_faces, nface_dofs, 1, [=] MFEM_HOST_DEVICE (int nc_face)
1723 {
1724 MFEM_SHARED real_t dof_values[max_nd];
1725 const NCInterpConfig conf = interp_config_ptr[nc_face];
1726 if ( conf.is_non_conforming )
1727 {
1728 const int master_side = conf.master_side;
1729 const int interp_index = conf.index;
1730 const int face = conf.face_index;
1731 for (int c = 0; c < vd; ++c)
1732 {
1733 MFEM_FOREACH_THREAD(dof,x,nface_dofs)
1734 {
1735 dof_values[dof] = d_y(dof, c, master_side, face);
1736 }
1737 MFEM_SYNC_THREAD;
1738 MFEM_FOREACH_THREAD(dof_out,x,nface_dofs)
1739 {
1740 real_t res = 0.0;
1741 for (int dof_in = 0; dof_in<nface_dofs; dof_in++)
1742 {
1743 res += d_interp(dof_out, dof_in, interp_index)*dof_values[dof_in];
1744 }
1745 d_y(dof_out, c, master_side, face) = res;
1746 }
1747 MFEM_SYNC_THREAD;
1748 }
1749 }
1750 });
1751}
1752
1753void NCL2FaceRestriction::Mult(const Vector& x, Vector& y) const
1754{
1755 if ( type==FaceType::Interior && m==L2FaceValues::DoubleValued )
1756 {
1757 DoubleValuedNonconformingMult(x, y);
1758 }
1759 else if ( type==FaceType::Boundary && m==L2FaceValues::DoubleValued )
1760 {
1761 DoubleValuedConformingMult(x, y);
1762 }
1763 else // Single valued (assumes no nonconforming master on elem1)
1764 {
1765 SingleValuedConformingMult(x, y);
1766 }
1767}
1768
1769void NCL2FaceRestriction::SingleValuedNonconformingTransposeInterpolation(
1770 const Vector& x) const
1771{
1772 MFEM_ASSERT(
1773 m == L2FaceValues::SingleValued,
1774 "This method should be called when m == L2FaceValues::SingleValued.");
1775 if (x_interp.Size()==0)
1776 {
1777 x_interp.SetSize(x.Size());
1778 }
1779 x_interp = x;
1780 SingleValuedNonconformingTransposeInterpolationInPlace(x_interp);
1781}
1782
1783
1784void NCL2FaceRestriction::SingleValuedNonconformingTransposeInterpolationInPlace(
1785 Vector& x) const
1786{
1787 // Assumes all elements have the same number of dofs
1788 const int nface_dofs = face_dofs;
1789 const int vd = vdim;
1790 // Interpolation
1791 auto d_x = Reshape(x_interp.ReadWrite(), nface_dofs, vd, nf);
1792 auto &nc_interp_config = interpolations.GetNCFaceInterpConfig();
1793 const int num_nc_faces = nc_interp_config.Size();
1794 if ( num_nc_faces == 0 ) { return; }
1795 auto interp_config_ptr = nc_interp_config.Read();
1796 auto interpolators = interpolations.GetInterpolators().Read();
1797 const int nc_size = interpolations.GetNumInterpolators();
1798 auto d_interp = Reshape(interpolators, nface_dofs, nface_dofs, nc_size);
1799 static constexpr int max_nd = 16*16;
1800 MFEM_VERIFY(nface_dofs<=max_nd, "Too many degrees of freedom.");
1801 mfem::forall_2D(num_nc_faces, nface_dofs, 1, [=] MFEM_HOST_DEVICE (int nc_face)
1802 {
1803 MFEM_SHARED real_t dof_values[max_nd];
1804 const NCInterpConfig conf = interp_config_ptr[nc_face];
1805 const int master_side = conf.master_side;
1806 const int interp_index = conf.index;
1807 const int face = conf.face_index;
1808 if ( conf.is_non_conforming && master_side==0 )
1809 {
1810 // Interpolation from fine to coarse
1811 for (int c = 0; c < vd; ++c)
1812 {
1813 MFEM_FOREACH_THREAD(dof,x,nface_dofs)
1814 {
1815 dof_values[dof] = d_x(dof, c, face);
1816 }
1817 MFEM_SYNC_THREAD;
1818 MFEM_FOREACH_THREAD(dof_out,x,nface_dofs)
1819 {
1820 real_t res = 0.0;
1821 for (int dof_in = 0; dof_in<nface_dofs; dof_in++)
1822 {
1823 res += d_interp(dof_in, dof_out, interp_index)*dof_values[dof_in];
1824 }
1825 d_x(dof_out, c, face) = res;
1826 }
1827 MFEM_SYNC_THREAD;
1828 }
1829 }
1830 });
1831}
1832
1833void NCL2FaceRestriction::DoubleValuedNonconformingTransposeInterpolation(
1834 const Vector& x) const
1835{
1836 MFEM_ASSERT(
1837 m == L2FaceValues::DoubleValued,
1838 "This method should be called when m == L2FaceValues::DoubleValued.");
1839 if (x_interp.Size()==0)
1840 {
1841 x_interp.SetSize(x.Size());
1842 }
1843 x_interp = x;
1844 DoubleValuedNonconformingTransposeInterpolationInPlace(x_interp);
1845}
1846
1847void NCL2FaceRestriction::DoubleValuedNonconformingTransposeInterpolationInPlace(
1848 Vector& x) const
1849{
1850 // Assumes all elements have the same number of dofs
1851 const int nface_dofs = face_dofs;
1852 const int vd = vdim;
1853 // Interpolation
1854 auto d_x = Reshape(x.ReadWrite(), nface_dofs, vd, 2, nf);
1855 auto &nc_interp_config = interpolations.GetNCFaceInterpConfig();
1856 const int num_nc_faces = nc_interp_config.Size();
1857 if ( num_nc_faces == 0 ) { return; }
1858 auto interp_config_ptr = nc_interp_config.Read();
1859 auto interpolators = interpolations.GetInterpolators().Read();
1860 const int nc_size = interpolations.GetNumInterpolators();
1861 auto d_interp = Reshape(interpolators, nface_dofs, nface_dofs, nc_size);
1862 static constexpr int max_nd = 16*16;
1863 MFEM_VERIFY(nface_dofs<=max_nd, "Too many degrees of freedom.");
1864 mfem::forall_2D(num_nc_faces, nface_dofs, 1, [=] MFEM_HOST_DEVICE (int nc_face)
1865 {
1866 MFEM_SHARED real_t dof_values[max_nd];
1867 const NCInterpConfig conf = interp_config_ptr[nc_face];
1868 const int master_side = conf.master_side;
1869 const int interp_index = conf.index;
1870 const int face = conf.face_index;
1871 if ( conf.is_non_conforming )
1872 {
1873 // Interpolation from fine to coarse
1874 for (int c = 0; c < vd; ++c)
1875 {
1876 MFEM_FOREACH_THREAD(dof,x,nface_dofs)
1877 {
1878 dof_values[dof] = d_x(dof, c, master_side, face);
1879 }
1880 MFEM_SYNC_THREAD;
1881 MFEM_FOREACH_THREAD(dof_out,x,nface_dofs)
1882 {
1883 real_t res = 0.0;
1884 for (int dof_in = 0; dof_in<nface_dofs; dof_in++)
1885 {
1886 res += d_interp(dof_in, dof_out, interp_index)*dof_values[dof_in];
1887 }
1888 d_x(dof_out, c, master_side, face) = res;
1889 }
1890 MFEM_SYNC_THREAD;
1891 }
1892 }
1893 });
1894}
1895
1896void NCL2FaceRestriction::AddMultTranspose(const Vector& x, Vector& y,
1897 const real_t a) const
1898{
1899 MFEM_VERIFY(a == 1.0, "General coefficient case is not yet supported!");
1900 if (nf==0) { return; }
1901 if (type==FaceType::Interior)
1902 {
1903 if ( m==L2FaceValues::DoubleValued )
1904 {
1905 DoubleValuedNonconformingTransposeInterpolation(x);
1906 DoubleValuedConformingAddMultTranspose(x_interp, y);
1907 }
1908 else if ( m==L2FaceValues::SingleValued )
1909 {
1910 SingleValuedNonconformingTransposeInterpolation(x);
1911 SingleValuedConformingAddMultTranspose(x_interp, y);
1912 }
1913 }
1914 else
1915 {
1916 if ( m==L2FaceValues::DoubleValued )
1917 {
1918 DoubleValuedConformingAddMultTranspose(x, y);
1919 }
1920 else if ( m==L2FaceValues::SingleValued )
1921 {
1922 SingleValuedConformingAddMultTranspose(x, y);
1923 }
1924 }
1925}
1926
1927void NCL2FaceRestriction::AddMultTransposeInPlace(Vector& x, Vector& y) const
1928{
1929 if (nf==0) { return; }
1930 if (type==FaceType::Interior)
1931 {
1932 if ( m==L2FaceValues::DoubleValued )
1933 {
1934 DoubleValuedNonconformingTransposeInterpolationInPlace(x);
1935 DoubleValuedConformingAddMultTranspose(x, y);
1936 }
1937 else if ( m==L2FaceValues::SingleValued )
1938 {
1939 SingleValuedNonconformingTransposeInterpolationInPlace(x);
1940 SingleValuedConformingAddMultTranspose(x, y);
1941 }
1942 }
1943 else
1944 {
1945 if ( m==L2FaceValues::DoubleValued )
1946 {
1947 DoubleValuedConformingAddMultTranspose(x, y);
1948 }
1949 else if ( m==L2FaceValues::SingleValued )
1950 {
1951 SingleValuedConformingAddMultTranspose(x, y);
1952 }
1953 }
1954}
1955
1956void NCL2FaceRestriction::FillI(SparseMatrix &mat,
1957 const bool keep_nbr_block) const
1958{
1959 const int nface_dofs = face_dofs;
1960 auto d_indices1 = scatter_indices1.Read();
1961 auto d_indices2 = scatter_indices2.Read();
1962 auto I = mat.ReadWriteI();
1963 mfem::forall(nf*nface_dofs, [=] MFEM_HOST_DEVICE (int fdof)
1964 {
1965 const int iE1 = d_indices1[fdof];
1966 const int iE2 = d_indices2[fdof];
1967 AddNnz(iE1,I,nface_dofs);
1968 AddNnz(iE2,I,nface_dofs);
1969 });
1970}
1971
1972void NCL2FaceRestriction::FillJAndData(const Vector &fea_data,
1973 SparseMatrix &mat,
1974 const bool keep_nbr_block) const
1975{
1976 const int nface_dofs = face_dofs;
1977 auto d_indices1 = scatter_indices1.Read();
1978 auto d_indices2 = scatter_indices2.Read();
1979 auto I = mat.ReadWriteI();
1980 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, 2, nf);
1981 auto J = mat.WriteJ();
1982 auto Data = mat.WriteData();
1983 auto interp_config_ptr = interpolations.GetFaceInterpConfig().Read();
1984 auto interpolators = interpolations.GetInterpolators().Read();
1985 const int nc_size = interpolations.GetNumInterpolators();
1986 auto d_interp = Reshape(interpolators, nface_dofs, nface_dofs, nc_size);
1987 mfem::forall(nf*nface_dofs, [=] MFEM_HOST_DEVICE (int fdof)
1988 {
1989 const int f = fdof/nface_dofs;
1990 const InterpConfig conf = interp_config_ptr[f];
1991 const int master_side = conf.master_side;
1992 const int interp_index = conf.index;
1993 const int iF = fdof%nface_dofs;
1994 const int iE1 = d_indices1[f*nface_dofs+iF];
1995 const int iE2 = d_indices2[f*nface_dofs+iF];
1996 const int offset1 = AddNnz(iE1,I,nface_dofs);
1997 const int offset2 = AddNnz(iE2,I,nface_dofs);
1998 for (int jF = 0; jF < nface_dofs; jF++)
1999 {
2000 const int jE1 = d_indices1[f*nface_dofs+jF];
2001 const int jE2 = d_indices2[f*nface_dofs+jF];
2002 J[offset2+jF] = jE1;
2003 J[offset1+jF] = jE2;
2004 real_t val1 = 0.0;
2005 real_t val2 = 0.0;
2006 if ( conf.is_non_conforming && master_side==0 )
2007 {
2008 for (int kF = 0; kF < nface_dofs; kF++)
2009 {
2010 val1 += mat_fea(kF,iF,0,f) * d_interp(kF, jF, interp_index);
2011 val2 += d_interp(kF, iF, interp_index) * mat_fea(jF,kF,1,f);
2012 }
2013 }
2014 else if ( conf.is_non_conforming && master_side==1 )
2015 {
2016 for (int kF = 0; kF < nface_dofs; kF++)
2017 {
2018 val1 += d_interp(kF, iF, interp_index) * mat_fea(jF,kF,0,f);
2019 val2 += mat_fea(kF,iF,1,f) * d_interp(kF, jF, interp_index);
2020 }
2021 }
2022 else
2023 {
2024 val1 = mat_fea(jF,iF,0,f);
2025 val2 = mat_fea(jF,iF,1,f);
2026 }
2027 Data[offset2+jF] = val1;
2028 Data[offset1+jF] = val2;
2029 }
2030 });
2031}
2032
2033void NCL2FaceRestriction::AddFaceMatricesToElementMatrices(
2034 const Vector &fea_data,
2035 Vector &ea_data)
2036const
2037{
2038 const int nface_dofs = face_dofs;
2039 const int nelem_dofs = elem_dofs;
2040 const int NE = ne;
2041 if (m==L2FaceValues::DoubleValued)
2042 {
2043 auto d_indices1 = scatter_indices1.Read();
2044 auto d_indices2 = scatter_indices2.Read();
2045 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, 2, nf);
2046 auto mat_ea = Reshape(ea_data.ReadWrite(), nelem_dofs, nelem_dofs, ne);
2047 auto interp_config_ptr = interpolations.GetFaceInterpConfig().Read();
2048 auto interpolators = interpolations.GetInterpolators().Read();
2049 const int nc_size = interpolations.GetNumInterpolators();
2050 auto d_interp = Reshape(interpolators, nface_dofs, nface_dofs, nc_size);
2051 mfem::forall(nf, [=] MFEM_HOST_DEVICE (int f)
2052 {
2053 const InterpConfig conf = interp_config_ptr[f];
2054 const int master_side = conf.master_side;
2055 const int interp_index = conf.index;
2056 const int e1 = d_indices1[f*nface_dofs]/nelem_dofs;
2057 const int e2 = d_indices2[f*nface_dofs]/nelem_dofs;
2058 for (int j = 0; j < nface_dofs; j++)
2059 {
2060 const int jB1 = d_indices1[f*nface_dofs+j]%nelem_dofs;
2061 for (int i = 0; i < nface_dofs; i++)
2062 {
2063 const int iB1 = d_indices1[f*nface_dofs+i]%nelem_dofs;
2064 real_t val = 0.0;
2065 if ( conf.is_non_conforming && master_side==0 )
2066 {
2067 for (int k = 0; k < nface_dofs; k++)
2068 {
2069 for (int l = 0; l < nface_dofs; l++)
2070 {
2071 val += d_interp(l, j, interp_index)
2072 * mat_fea(k,l,0,f)
2073 * d_interp(k, i, interp_index);
2074 }
2075 }
2076 }
2077 else
2078 {
2079 val = mat_fea(i,j,0,f);
2080 }
2081 AtomicAdd(mat_ea(iB1,jB1,e1), val);
2082 }
2083 }
2084 if (e2 < NE)
2085 {
2086 for (int j = 0; j < nface_dofs; j++)
2087 {
2088 const int jB2 = d_indices2[f*nface_dofs+j]%nelem_dofs;
2089 for (int i = 0; i < nface_dofs; i++)
2090 {
2091 const int iB2 = d_indices2[f*nface_dofs+i]%nelem_dofs;
2092 real_t val = 0.0;
2093 if ( conf.is_non_conforming && master_side==1 )
2094 {
2095 for (int k = 0; k < nface_dofs; k++)
2096 {
2097 for (int l = 0; l < nface_dofs; l++)
2098 {
2099 val += d_interp(l, j, interp_index)
2100 * mat_fea(k,l,1,f)
2101 * d_interp(k, i, interp_index);
2102 }
2103 }
2104 }
2105 else
2106 {
2107 val = mat_fea(i,j,1,f);
2108 }
2109 AtomicAdd(mat_ea(iB2,jB2,e2), val);
2110 }
2111 }
2112 }
2113 });
2114 }
2115 else
2116 {
2117 auto d_indices = scatter_indices1.Read();
2118 auto mat_fea = Reshape(fea_data.Read(), nface_dofs, nface_dofs, nf);
2119 auto mat_ea = Reshape(ea_data.ReadWrite(), nelem_dofs, nelem_dofs, ne);
2120 auto interp_config_ptr = interpolations.GetFaceInterpConfig().Read();
2121 auto interpolators = interpolations.GetInterpolators().Read();
2122 const int nc_size = interpolations.GetNumInterpolators();
2123 auto d_interp = Reshape(interpolators, nface_dofs, nface_dofs, nc_size);
2124 mfem::forall(nf, [=] MFEM_HOST_DEVICE (int f)
2125 {
2126 const InterpConfig conf = interp_config_ptr[f];
2127 const int master_side = conf.master_side;
2128 const int interp_index = conf.index;
2129 const int e = d_indices[f*nface_dofs]/nelem_dofs;
2130 for (int j = 0; j < nface_dofs; j++)
2131 {
2132 const int jE = d_indices[f*nface_dofs+j]%nelem_dofs;
2133 for (int i = 0; i < nface_dofs; i++)
2134 {
2135 const int iE = d_indices[f*nface_dofs+i]%nelem_dofs;
2136 real_t val = 0.0;
2137 if ( conf.is_non_conforming && master_side==0 )
2138 {
2139 for (int k = 0; k < nface_dofs; k++)
2140 {
2141 for (int l = 0; l < nface_dofs; l++)
2142 {
2143 val += d_interp(l, j, interp_index)
2144 * mat_fea(k,l,f)
2145 * d_interp(k, i, interp_index);
2146 }
2147 }
2148 }
2149 else
2150 {
2151 val = mat_fea(i,j,f);
2152 }
2153 AtomicAdd(mat_ea(iE,jE,e), val);
2154 }
2155 }
2156 });
2157 }
2158}
2159
2160int ToLexOrdering(const int dim, const int face_id, const int size1d,
2161 const int index)
2162{
2163 switch (dim)
2164 {
2165 case 1:
2166 return 0;
2167 case 2:
2168 return internal::ToLexOrdering2D(face_id, size1d, index);
2169 case 3:
2170 return internal::ToLexOrdering3D(face_id, size1d, index%size1d, index/size1d);
2171 default:
2172 MFEM_ABORT("Unsupported dimension.");
2173 return 0;
2174 }
2175}
2176
2177void NCL2FaceRestriction::ComputeScatterIndicesAndOffsets()
2178{
2179 Mesh &mesh = *fes.GetMesh();
2180
2181 // Initialization of the offsets
2182 for (int i = 0; i <= ndofs; ++i)
2183 {
2184 gather_offsets[i] = 0;
2185 }
2186
2187 // Computation of scatter and offsets indices
2188 int f_ind=0;
2189 for (int f = 0; f < fes.GetNF(); ++f)
2190 {
2191 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
2192 if ( face.IsNonconformingCoarse() )
2193 {
2194 // We skip nonconforming coarse faces as they are treated
2195 // by the corresponding nonconforming fine faces.
2196 continue;
2197 }
2198 else if ( type==FaceType::Interior && face.IsInterior() )
2199 {
2200 SetFaceDofsScatterIndices1(face,f_ind);
2201 if ( m==L2FaceValues::DoubleValued )
2202 {
2203 PermuteAndSetFaceDofsScatterIndices2(face,f_ind);
2204 }
2205 if ( face.IsConforming() )
2206 {
2207 interpolations.RegisterFaceConformingInterpolation(face,f_ind);
2208 }
2209 else // Non-conforming face
2210 {
2211 interpolations.RegisterFaceCoarseToFineInterpolation(face,f_ind);
2212 }
2213 f_ind++;
2214 }
2215 else if ( type==FaceType::Boundary && face.IsBoundary() )
2216 {
2217 SetFaceDofsScatterIndices1(face,f_ind);
2218 if ( m==L2FaceValues::DoubleValued )
2219 {
2220 SetBoundaryDofsScatterIndices2(face,f_ind);
2221 }
2222 interpolations.RegisterFaceConformingInterpolation(face,f_ind);
2223 f_ind++;
2224 }
2225 }
2226 MFEM_VERIFY(f_ind==nf, "Unexpected number of " <<
2227 (type==FaceType::Interior? "interior" : "boundary") <<
2228 " faces: " << f_ind << " vs " << nf );
2229
2230 // Summation of the offsets
2231 for (int i = 1; i <= ndofs; ++i)
2232 {
2233 gather_offsets[i] += gather_offsets[i - 1];
2234 }
2235
2236 // Transform the interpolation matrix map into a contiguous memory structure.
2237 interpolations.LinearizeInterpolatorMapIntoVector();
2238 interpolations.InitializeNCInterpConfig();
2239}
2240
2241void NCL2FaceRestriction::ComputeGatherIndices()
2242{
2243 Mesh &mesh = *fes.GetMesh();
2244 // Computation of gather_indices
2245 int f_ind = 0;
2246 for (int f = 0; f < fes.GetNF(); ++f)
2247 {
2248 Mesh::FaceInformation face = mesh.GetFaceInformation(f);
2249 MFEM_ASSERT(!face.IsShared(),
2250 "Unexpected shared face in NCL2FaceRestriction.");
2251 if ( face.IsNonconformingCoarse() )
2252 {
2253 // We skip nonconforming coarse faces as they are treated
2254 // by the corresponding nonconforming fine faces.
2255 continue;
2256 }
2257 else if ( face.IsOfFaceType(type) )
2258 {
2259 SetFaceDofsGatherIndices1(face,f_ind);
2260 if ( m==L2FaceValues::DoubleValued &&
2261 type==FaceType::Interior &&
2262 face.IsInterior() )
2263 {
2264 PermuteAndSetFaceDofsGatherIndices2(face,f_ind);
2265 }
2266 f_ind++;
2267 }
2268 }
2269 MFEM_VERIFY(f_ind==nf, "Unexpected number of " <<
2270 (type==FaceType::Interior? "interior" : "boundary") <<
2271 " faces: " << f_ind << " vs " << nf );
2272
2273 // Switch back offsets to their correct value
2274 for (int i = ndofs; i > 0; --i)
2275 {
2276 gather_offsets[i] = gather_offsets[i - 1];
2277 }
2278 gather_offsets[0] = 0;
2279}
2280
2281L2InterfaceFaceRestriction::L2InterfaceFaceRestriction(
2282 const FiniteElementSpace& fes_,
2283 const ElementDofOrdering ordering_,
2284 const FaceType type_)
2285 : fes(fes_),
2286 ordering(ordering_),
2287 type(type_),
2288 nfaces(fes.GetNFbyType(type)),
2289 vdim(fes.GetVDim()),
2290 byvdim(fes.GetOrdering() == Ordering::byVDIM),
2291 face_dofs(nfaces > 0 ? fes.GetFaceElement(0)->GetDof() : 0),
2292 nfdofs(face_dofs*nfaces),
2293 ndofs(fes.GetNDofs())
2294{
2295 height = nfdofs;
2296 width = ndofs;
2297
2298 const Table &face2dof = fes.GetFaceToDofTable();
2299
2300 const Mesh &mesh = *fes.GetMesh();
2301 int face_idx = 0;
2303 for (int f = 0; f < mesh.GetNumFaces(); ++f)
2304 {
2306 if (!face.IsOfFaceType(type)) { continue; }
2307 for (int i = 0; i < face_dofs; ++i)
2308 {
2309 gather_map[i + face_idx*face_dofs] = face2dof.GetJ()[i + f*face_dofs];
2310 }
2311 ++face_idx;
2312 }
2313}
2314
2316{
2317 const int nd = face_dofs;
2318 const int nf = nfaces;
2319 const int vd = vdim;
2320 const bool t = byvdim;
2321 const int *map = gather_map.Read();
2322
2323 const auto d_x = Reshape(x.Read(), t?vd:ndofs, t?ndofs:vd);
2324 auto d_y = Reshape(y.Write(), nd, vd, nf);
2325
2326 mfem::forall(nd*nf, [=] MFEM_HOST_DEVICE (int i)
2327 {
2328 const int j = map[i];
2329 for (int c = 0; c < vd; ++c)
2330 {
2331 d_y(i % nd, c, i / nd) = d_x(t?c:j, t?j:c);
2332 }
2333 });
2334}
2335
2337 const Vector &x, Vector &y, const real_t a) const
2338{
2339 const int nd = face_dofs;
2340 const int nf = nfaces;
2341 const int vd = vdim;
2342 const bool t = byvdim;
2343 const int *map = gather_map.Read();
2344
2345 const auto d_x = Reshape(x.Read(), nd, vd, nf);
2346 auto d_y = Reshape(y.Write(), t?vd:ndofs, t?ndofs:vd);
2347
2348 mfem::forall(ndofs, [=] MFEM_HOST_DEVICE (int i) { d_y[i] = 0.0; });
2349 mfem::forall(nd*nf, [=] MFEM_HOST_DEVICE (int i)
2350 {
2351 const int j = map[i];
2352 for (int c = 0; c < vd; ++c)
2353 {
2354 d_y(t?c:j, t?j:c) = d_x(i % nd, c, i / nd);
2355 }
2356 });
2357}
2358
2360{
2361 return gather_map;
2362}
2363
2365 const FiniteElementSpace &fes, const Vector &x, FaceType ftype)
2366{
2367#ifdef MFEM_USE_MPI
2368 if (ftype == FaceType::Interior)
2369 {
2370 if (auto *pfes = const_cast<ParFiniteElementSpace*>
2371 (dynamic_cast<const ParFiniteElementSpace*>(&fes)))
2372 {
2373 if (auto *x_gf = const_cast<ParGridFunction*>
2374 (dynamic_cast<const ParGridFunction*>(&x)))
2375 {
2376 Vector &gf_face_nbr = x_gf->FaceNbrData();
2377 if (gf_face_nbr.Size() == 0) { x_gf->ExchangeFaceNbrData(); }
2378 gf_face_nbr.Read();
2379 return Vector(gf_face_nbr, 0, gf_face_nbr.Size());
2380 }
2381 else
2382 {
2383 ParGridFunction gf(pfes, const_cast<Vector&>(x));
2385 return std::move(gf.FaceNbrData());
2386 }
2387 }
2388 }
2389#endif
2390 return Vector();
2391}
2392
2393} // namespace mfem
MFEM_HOST_DEVICE T AtomicAdd(T &add, const T val)
Definition backends.hpp:94
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
Definition array.hpp:385
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition array.hpp:840
int Size() const
Return the logical size of the array.
Definition array.hpp:166
void MakeRef(T *data_, int size_, bool own_data=false)
Make this Array a reference to a pointer.
Definition array.hpp:1053
T * Write(bool on_dev=true)
Shortcut for mfem::Write(a.GetMemory(), a.Size(), on_dev).
Definition array.hpp:389
const T * Read(bool on_dev=true) const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), on_dev).
Definition array.hpp:381
T * HostReadWrite()
Shortcut for mfem::ReadWrite(a.GetMemory(), a.Size(), false).
Definition array.hpp:401
@ GaussLobatto
Closed type.
Definition fe_base.hpp:36
@ Positive
Bernstein polynomials.
Definition fe_base.hpp:37
Operator that extracts face degrees of freedom for H1, ND, or RT FiniteElementSpaces.
ConformingFaceRestriction(const FiniteElementSpace &fes, const ElementDofOrdering f_ordering, const FaceType type, bool build)
Construct a ConformingFaceRestriction.
void CheckFESpace(const ElementDofOrdering f_ordering)
Verify that ConformingFaceRestriction is built from a supported finite element space.
void SetFaceDofsGatherIndices(const Mesh::FaceInformation &face, const int face_index, const ElementDofOrdering f_ordering)
Set the gathering indices of elem1 for the interior face described by the face.
void MultInternal(const Vector &x, Vector &y, const bool useAbs=false) const
const FiniteElementSpace & fes
void AddMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const override
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector.
void SetFaceDofsScatterIndices(const Mesh::FaceInformation &face, const int face_index, const ElementDofOrdering f_ordering)
Set the scattering indices of elem1, and increment the offsets for the face described by the face.
void AddAbsMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const override
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector not taking into account signs...
Data type dense matrix using column-major storage.
Definition densemat.hpp:24
ElementRestriction(const FiniteElementSpace &, ElementDofOrdering)
const FiniteElementSpace & fes
void Mult(const Vector &x, Vector &y) const override
Operator application: y=A(x).
void AbsMult(const Vector &x, Vector &y) const override
Compute Mult without applying signs based on DOF orientations.
void FillSparseMatrix(const Vector &mat_ea, SparseMatrix &mat) const
Fill a Sparse Matrix with Element Matrices.
void AddMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const override
Add the E-vector degrees of freedom x to the L-vector degrees of freedom y.
void FillJAndData(const Vector &ea_data, SparseMatrix &mat) const
void MultTranspose(const Vector &x, Vector &y) const override
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
void MultLeftInverse(const Vector &x, Vector &y) const
void BooleanMask(Vector &y) const
Fills the E-vector y with boolean values 0.0 and 1.0 such that each each entry of the L-vector is uni...
void AbsMultTranspose(const Vector &x, Vector &y) const override
Compute MultTranspose without applying signs based on DOF orientations.
int FillI(SparseMatrix &mat) const
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition fespace.hpp:208
const Table & GetElementToDofTable() const
Return a reference to the internal Table that stores the lists of scalar dofs, for each mesh element,...
Definition fespace.hpp:1280
const Table & GetFaceToDofTable() const
Return a reference to the internal Table that stores the lists of scalar dofs, for each face in the m...
Definition fespace.hpp:1293
int GetNF() const
Returns number of faces (i.e. co-dimension 1 entities) in the mesh.
Definition fespace.hpp:875
virtual const FiniteElement * GetFE(int i) const
Returns pointer to the FiniteElement in the FiniteElementCollection associated with i'th element in t...
Definition fespace.cpp:3824
const FiniteElement * GetTypicalTraceElement() const
Return a "typical" trace element.
Definition fespace.cpp:3948
const FiniteElement * GetFaceElement(int i) const
Returns pointer to the FiniteElement in the FiniteElementCollection associated with i'th face in the ...
Definition fespace.cpp:3903
Mesh * GetMesh() const
Returns the mesh.
Definition fespace.hpp:644
int GetVSize() const
Return the number of vector dofs, i.e. GetNDofs() x GetVDim().
Definition fespace.hpp:826
const FiniteElement * GetTypicalFE() const
Return GetFE(0) if the local mesh is not empty; otherwise return a typical FE based on the Geometry t...
Definition fespace.cpp:3860
Abstract class for all finite elements.
Definition fe_base.hpp:247
int GetOrder() const
Returns the order of the finite element. In the case of anisotropic orders, returns the maximum order...
Definition fe_base.hpp:341
virtual void GetFaceMap(const int face_id, Array< int > &face_map) const
Return the mapping from lexicographic face DOFs to lexicographic element DOFs for the given local fac...
Definition fe_base.cpp:517
int GetDof() const
Returns the number of degrees of freedom in the finite element.
Definition fe_base.hpp:337
std::pair< const DenseMatrix *, int > Key
const Array< NCInterpConfig > & GetNCFaceInterpConfig() const
Return an array containing the interpolation configuration for each face registered with RegisterFace...
void RegisterFaceCoarseToFineInterpolation(const Mesh::FaceInformation &face, int face_index)
Register the face with face and index face_index as a conforming face for the interpolation of the de...
int GetNumInterpolators() const
Return the total number of interpolators.
Array< NCInterpConfig > nc_interp_config
const Vector & GetInterpolators() const
Return an mfem::Vector containing the interpolators in the following format: face_dofs x face_dofs x ...
void LinearizeInterpolatorMapIntoVector()
Transform the interpolation matrix map into a contiguous memory structure.
const FiniteElementSpace & fes
const ElementDofOrdering ordering
Array< InterpConfig > interp_config
void RegisterFaceConformingInterpolation(const Mesh::FaceInformation &face, int face_index)
Register the face with face and index face_index as a conforming face for the interpolation of the de...
A standard isoparametric element transformation.
Definition eltrans.hpp:629
L2ElementRestriction(const FiniteElementSpace &)
void AddMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const override
Add the E-vector degrees of freedom x to the L-vector degrees of freedom y.
void FillJAndData(const Vector &ea_data, SparseMatrix &mat) const
void MultTranspose(const Vector &x, Vector &y) const override
Action of the transpose operator: y=A^t(x). The default behavior in class Operator is to generate an ...
void FillI(SparseMatrix &mat) const
void Mult(const Vector &x, Vector &y) const override
Operator application: y=A(x).
Operator that extracts Face degrees of freedom for L2 spaces.
void PermuteAndSetFaceDofsGatherIndices2(const Mesh::FaceInformation &face, const int face_index)
Permute and set the gathering indices of elem2 for the interior face described by the face....
virtual void FillI(SparseMatrix &mat, const bool keep_nbr_block=false) const
Fill the I array of SparseMatrix corresponding to the sparsity pattern given by this L2FaceRestrictio...
Array< int > scatter_indices2
void SingleValuedConformingMult(const Vector &x, Vector &y) const
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector. Should only be used with co...
L2FaceRestriction(const FiniteElementSpace &fes, const ElementDofOrdering f_ordering, const FaceType type, const L2FaceValues m, bool build)
Constructs an L2FaceRestriction.
void NormalDerivativeMult(const Vector &x, Vector &y) const override
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector.
void AddMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const override
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector.
void NormalDerivativeAddMultTranspose(const Vector &x, Vector &y) const override
Add the face reference-normal derivative degrees of freedom in x to the element degrees of freedom in...
void Mult(const Vector &x, Vector &y) const override
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector.
void SingleValuedConformingAddMultTranspose(const Vector &x, Vector &y) const
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector. Should only be used with con...
std::unique_ptr< L2NormalDerivativeFaceRestriction > normal_deriv_restr
void PermuteAndSetSharedFaceDofsScatterIndices2(const Mesh::FaceInformation &face, const int face_index)
Permute and set the scattering indices of elem2 for the shared face described by the face....
void CheckFESpace()
Verify that L2FaceRestriction is built from an L2 FESpace.
virtual void DoubleValuedConformingMult(const Vector &x, Vector &y) const
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector. Should only be used with co...
void PermuteAndSetFaceDofsScatterIndices2(const Mesh::FaceInformation &face, const int face_index)
Permute and set the scattering indices of elem2, and increment the offsets for the face described by ...
Array< int > scatter_indices1
const L2FaceValues m
const FiniteElementSpace & fes
void SetBoundaryDofsScatterIndices2(const Mesh::FaceInformation &face, const int face_index)
Set the scattering indices of elem2 for the boundary face described by the face.
void SetFaceDofsScatterIndices1(const Mesh::FaceInformation &face, const int face_index)
Set the scattering indices of elem1, and increment the offsets for the face described by the face....
void SetFaceDofsGatherIndices1(const Mesh::FaceInformation &face, const int face_index)
Set the gathering indices of elem1 for the interior face described by the face.
const ElementDofOrdering ordering
void DoubleValuedConformingAddMultTranspose(const Vector &x, Vector &y) const
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector. Should only be used with con...
virtual void FillJAndData(const Vector &fea_data, SparseMatrix &mat, const bool keep_nbr_block=false) const
Fill the J and Data arrays of the SparseMatrix corresponding to the sparsity pattern given by this L2...
virtual void AddFaceMatricesToElementMatrices(const Vector &fea_data, Vector &ea_data) const
This methods adds the DG face matrices to the element matrices.
const int face_dofs
Number of dofs on each face.
const int ndofs
Number of dofs in the space (L-vector size)
const int nfdofs
Total number of dofs on the faces (E-vector size)
const FiniteElementSpace & fes
The finite element space.
Array< int > gather_map
Gather map.
const int vdim
vdim of the space
const int nfaces
Number of faces of the requested type.
void Mult(const Vector &x, Vector &y) const override
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector.
const Array< int > & GatherMap() const override
Low-level access to the underlying gather map.
void AddMultTranspose(const Vector &x, Vector &y, const real_t a=1.0) const override
Gather the degrees of freedom, i.e. goes from face E-Vector to L-Vector.
const FaceType type
Face type (interior or boundary)
const bool byvdim
DOF ordering (by nodes or by vdim)
Class to compute face normal derivatives (in reference coordinate) of an L2 grid function (used inter...
MemoryType GetMemoryType() const
Return a MemoryType that is currently valid. If both the host and the device pointers are currently v...
void New(int size)
Allocate host memory for size entries with the current host memory type returned by MemoryManager::Ge...
Mesh data type.
Definition mesh.hpp:65
int GetNumFaces() const
Return the number of faces (3D), edges (2D) or vertices (1D).
Definition mesh.cpp:6846
int Dimension() const
Dimension of the reference space used within the elements.
Definition mesh.hpp:1306
FaceInformation GetFaceInformation(int f) const
Definition mesh.cpp:1293
Operator that extracts face degrees of freedom for L2 nonconforming spaces.
InterpolationManager interpolations
virtual void DoubleValuedNonconformingMult(const Vector &x, Vector &y) const
Scatter the degrees of freedom, i.e. goes from L-Vector to face E-Vector. Should only be used with no...
void DoubleValuedNonconformingInterpolation(Vector &x) const
Apply a change of basis from coarse element basis to fine element basis for the coarse face dofs.
NCL2FaceRestriction(const FiniteElementSpace &fes, const ElementDofOrdering f_ordering, const FaceType type, const L2FaceValues m, bool build)
Constructs an NCL2FaceRestriction, this is a specialization of a L2FaceRestriction for nonconforming ...
Class for standard nodal finite elements.
Definition fe_base.hpp:724
int width
Dimension of the input / number of columns in the matrix.
Definition operator.hpp:28
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition operator.hpp:66
int height
Dimension of the output / number of rows in the matrix.
Definition operator.hpp:27
The ordering method used when the number of unknowns per mesh node (vector dimension) is bigger than ...
Definition ordering.hpp:13
Abstract parallel finite element space.
Definition pfespace.hpp:31
void GetFaceNbrElementVDofs(int i, Array< int > &vdofs, DofTransformation &doftrans) const
Class for parallel grid function.
Definition pgridfunc.hpp:50
Data type sparse matrix.
Definition sparsemat.hpp:51
int * ReadWriteI(bool on_dev=true)
int * WriteJ(bool on_dev=true)
Memory< int > & GetMemoryI()
int * WriteI(bool on_dev=true)
Memory< int > & GetMemoryJ()
Memory< real_t > & GetMemoryData()
real_t * WriteData(bool on_dev=true)
Table stores the connectivity of elements of TYPE I to elements of TYPE II. For example,...
Definition table.hpp:43
int * GetJ()
Definition table.hpp:128
const Array< int > & GetDofMap() const
Get an Array<int> that maps lexicographically ordered indices to the indices of the respective nodes/...
Definition fe_base.hpp:1276
Vector data type.
Definition vector.hpp:82
virtual const real_t * Read(bool on_dev=true) const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:520
virtual real_t * ReadWrite(bool on_dev=true)
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:536
int Size() const
Returns the size of the vector.
Definition vector.hpp:234
virtual void UseDevice(bool use_dev) const
Enable execution of Vector operations using the mfem::Device.
Definition vector.hpp:145
void SetSize(int s)
Resize the vector to size s.
Definition vector.hpp:584
virtual real_t * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Definition vector.hpp:532
virtual real_t * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
Definition vector.hpp:528
int dim
Definition ex24.cpp:53
int index(int i, int j, int nx, int ny)
Definition life.cpp:236
real_t a
Definition lissajous.cpp:41
real_t f(const Vector &p)
mfem::real_t real_t
Vector GetLVectorFaceNbrData(const FiniteElementSpace &fes, const Vector &x, FaceType ftype)
Return the face-neighbor data given the L-vector x.
T * Write(Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for write access to mem with the mfem::Device's DeviceMemoryClass, if on_dev = true,...
Definition device.hpp:365
MFEM_HOST_DEVICE DeviceTensor< sizeof...(Dims), T > Reshape(T *ptr, Dims... dims)
Wrap a pointer as a DeviceTensor with automatically deduced template parameters.
Definition dtensor.hpp:138
int ToLexOrdering(const int dim, const int face_id, const int size1d, const int index)
Convert a dof face index from Native ordering to lexicographic ordering for quads and hexes.
void forall_2D(int N, int X, int Y, lambda &&body)
Definition forall.hpp:925
const T & AsConst(const T &a)
Utility function similar to std::as_const in c++17.
Definition array.hpp:424
float real_t
Definition config.hpp:46
int PermuteFaceL2(const int dim, const int face_id1, const int face_id2, const int orientation, const int size1d, const int index)
Compute the dof face index of elem2 corresponding to the given dof face index.
ElementDofOrdering
Constants describing the possible orderings of the DOFs in one element.
Definition fespace.hpp:47
std::function< real_t(const Vector &)> f(real_t mass_coeff)
Definition lor_mms.hpp:30
void forall(int N, lambda &&body)
Definition forall.hpp:839
FaceType
Definition mesh.hpp:49
This structure is used as a human readable output format that deciphers the information contained in ...
Definition mesh.hpp:2081
bool IsBoundary() const
Return true if the face is a boundary face.
Definition mesh.hpp:2122
bool IsNonconformingCoarse() const
Return true if the face is a nonconforming coarse face.
Definition mesh.hpp:2159
bool IsOfFaceType(FaceType type) const
Return true if the face is of the same type as type.
Definition mesh.hpp:2128
struct mfem::Mesh::FaceInformation::@15 element[2]
bool IsLocal() const
Return true if the face is a local interior face which is NOT a master nonconforming face.
Definition mesh.hpp:2099
bool IsConforming() const
Return true if the face is a conforming face.
Definition mesh.hpp:2142
ElementConformity conformity
Definition mesh.hpp:2087
bool IsShared() const
Return true if the face is a shared interior face which is NOT a master nonconforming face.
Definition mesh.hpp:2106
const DenseMatrix * point_matrix
Definition mesh.hpp:2095