MFEM  v4.4.0
Finite element discretization library
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Pages
pncmesh.cpp
Go to the documentation of this file.
1 // Copyright (c) 2010-2022, Lawrence Livermore National Security, LLC. Produced
2 // at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3 // LICENSE and NOTICE for details. LLNL-CODE-806117.
4 //
5 // This file is part of the MFEM library. For more information and source code
6 // availability visit https://mfem.org.
7 //
8 // MFEM is free software; you can redistribute it and/or modify it under the
9 // terms of the BSD-3 license. We welcome feedback and contributions, see file
10 // CONTRIBUTING.md for details.
11 
12 #include "../config/config.hpp"
13 
14 #ifdef MFEM_USE_MPI
15 
16 #include "mesh_headers.hpp"
17 #include "pncmesh.hpp"
18 #include "../general/binaryio.hpp"
19 
20 #include <map>
21 #include <climits> // INT_MIN, INT_MAX
22 
23 namespace mfem
24 {
25 
26 using namespace bin_io;
27 
28 ParNCMesh::ParNCMesh(MPI_Comm comm, const NCMesh &ncmesh, int *part)
29  : NCMesh(ncmesh)
30 {
31  MyComm = comm;
32  MPI_Comm_size(MyComm, &NRanks);
33  MPI_Comm_rank(MyComm, &MyRank);
34 
35  // assign leaf elements to the processors by simply splitting the
36  // sequence of leaf elements into 'NRanks' parts
37  for (int i = 0; i < leaf_elements.Size(); i++)
38  {
39  elements[leaf_elements[i]].rank = part ? part[i] : InitialPartition(i);
40  }
41 
42  Update();
43 
44  // note that at this point all processors still have all the leaf elements;
45  // we however may now start pruning the refinement tree to get rid of
46  // branches that only contain someone else's leaves (see Prune())
47 }
48 
49 ParNCMesh::ParNCMesh(MPI_Comm comm, std::istream &input, int version,
50  int &curved, int &is_nc)
51  : NCMesh(input, version, curved, is_nc)
52 {
53  MyComm = comm;
54  MPI_Comm_size(MyComm, &NRanks);
55 
56  int my_rank;
57  MPI_Comm_rank(MyComm, &my_rank);
58 
59  int max_rank = 0;
60  for (int i = 0; i < leaf_elements.Size(); i++)
61  {
62  max_rank = std::max(elements[leaf_elements[i]].rank, max_rank);
63  }
64 
65  MFEM_VERIFY((my_rank == MyRank) && (max_rank < NRanks),
66  "Parallel mesh file doesn't seem to match current MPI setup. "
67  "Loading a parallel NC mesh with a non-matching communicator "
68  "size is not supported.");
69 
70  bool iso = Iso;
71  MPI_Allreduce(&iso, &Iso, 1, MPI_C_BOOL, MPI_LAND, MyComm);
72 
73  Update();
74 }
75 
77 // copy primary data only
78  : NCMesh(other)
79  , MyComm(other.MyComm)
80  , NRanks(other.NRanks)
81 {
82  Update(); // mark all secondary stuff for recalculation
83 }
84 
86 {
87  ClearAuxPM();
88 }
89 
91 {
93 
94  groups.clear();
95  group_id.clear();
96 
97  CommGroup self;
98  self.push_back(MyRank);
99  groups.push_back(self);
100  group_id[self] = 0;
101 
102  for (int i = 0; i < 3; i++)
103  {
104  entity_owner[i].DeleteAll();
106  entity_index_rank[i].DeleteAll();
107  }
108 
112 
114  ghost_layer.SetSize(0);
116 }
117 
118 void ParNCMesh::ElementSharesFace(int elem, int local, int face)
119 {
120  // Analogous to ElementSharesEdge.
121 
122  Element &el = elements[elem];
123  int f_index = faces[face].index;
124 
125  int &owner = tmp_owner[f_index];
126  owner = std::min(owner, el.rank);
127 
128  char &flag = tmp_shared_flag[f_index];
129  flag |= (el.rank == MyRank) ? 0x1 : 0x2;
130 
131  entity_index_rank[2].Append(Connection(f_index, el.rank));
132 
133  // derive globally consistent face ID from the global element sequence
134  int &el_loc = entity_elem_local[2][f_index];
135  if (el_loc < 0 || leaf_sfc_index[el.index] < leaf_sfc_index[(el_loc >> 4)])
136  {
137  el_loc = (el.index << 4) | local;
138  }
139 }
140 
142 {
143  if (HaveTets()) { GetEdgeList(); } // needed by TraverseTetEdge()
144 
145  // This is an extension of NCMesh::BuildFaceList() which also determines
146  // face ownership and prepares face processor groups.
147 
148  face_list.Clear();
151 
152  if (Dim < 3 || !leaf_elements.Size()) { return; }
153 
154  int nfaces = NFaces + NGhostFaces;
155 
156  tmp_owner.SetSize(nfaces);
157  tmp_owner = INT_MAX;
158 
159  tmp_shared_flag.SetSize(nfaces);
160  tmp_shared_flag = 0;
161 
162  entity_index_rank[2].SetSize(6*leaf_elements.Size() * 3/2);
163  entity_index_rank[2].SetSize(0);
164 
165  entity_elem_local[2].SetSize(nfaces);
166  entity_elem_local[2] = -1;
167 
169 
170  InitOwners(nfaces, entity_owner[2]);
172 
175 
176  // create simple conforming (cut-mesh) groups now
178  // NOTE: entity_index_rank[2] is not deleted until CalculatePMatrixGroups
179 
181 }
182 
183 void ParNCMesh::ElementSharesEdge(int elem, int local, int enode)
184 {
185  // Called by NCMesh::BuildEdgeList when an edge is visited in a leaf element.
186  // This allows us to determine edge ownership and whether it is shared
187  // without duplicating all the HashTable lookups in NCMesh::BuildEdgeList().
188 
189  Element &el= elements[elem];
190  int e_index = nodes[enode].edge_index;
191 
192  int &owner = tmp_owner[e_index];
193  owner = std::min(owner, el.rank);
194 
195  char &flag = tmp_shared_flag[e_index];
196  flag |= (el.rank == MyRank) ? 0x1 : 0x2;
197 
198  entity_index_rank[1].Append(Connection(e_index, el.rank));
199 
200  // derive globally consistent edge ID from the global element sequence
201  int &el_loc = entity_elem_local[1][e_index];
202  if (el_loc < 0 || leaf_sfc_index[el.index] < leaf_sfc_index[(el_loc >> 4)])
203  {
204  el_loc = (el.index << 4) | local;
205  }
206 }
207 
209 {
210  // This is an extension of NCMesh::BuildEdgeList() which also determines
211  // edge ownership and prepares edge processor groups.
212 
213  edge_list.Clear();
215  if (Dim < 3) { boundary_faces.SetSize(0); }
216 
217  if (Dim < 2 || !leaf_elements.Size()) { return; }
218 
219  int nedges = NEdges + NGhostEdges;
220 
221  tmp_owner.SetSize(nedges);
222  tmp_owner = INT_MAX;
223 
224  tmp_shared_flag.SetSize(nedges);
225  tmp_shared_flag = 0;
226 
227  entity_index_rank[1].SetSize(12*leaf_elements.Size() * 3/2);
228  entity_index_rank[1].SetSize(0);
229 
230  entity_elem_local[1].SetSize(nedges);
231  entity_elem_local[1] = -1;
232 
234 
235  InitOwners(nedges, entity_owner[1]);
237 
240 
241  // create simple conforming (cut-mesh) groups now
243  // NOTE: entity_index_rank[1] is not deleted until CalculatePMatrixGroups
244 }
245 
246 void ParNCMesh::ElementSharesVertex(int elem, int local, int vnode)
247 {
248  // Analogous to ElementSharesEdge.
249 
250  Element &el = elements[elem];
251  int v_index = nodes[vnode].vert_index;
252 
253  int &owner = tmp_owner[v_index];
254  owner = std::min(owner, el.rank);
255 
256  char &flag = tmp_shared_flag[v_index];
257  flag |= (el.rank == MyRank) ? 0x1 : 0x2;
258 
259  entity_index_rank[0].Append(Connection(v_index, el.rank));
260 
261  // derive globally consistent vertex ID from the global element sequence
262  int &el_loc = entity_elem_local[0][v_index];
263  if (el_loc < 0 || leaf_sfc_index[el.index] < leaf_sfc_index[(el_loc >> 4)])
264  {
265  el_loc = (el.index << 4) | local;
266  }
267 }
268 
270 {
271  // This is an extension of NCMesh::BuildVertexList() which also determines
272  // vertex ownership and creates vertex processor groups.
273 
274  int nvertices = NVertices + NGhostVertices;
275 
276  tmp_owner.SetSize(nvertices);
277  tmp_owner = INT_MAX;
278 
279  tmp_shared_flag.SetSize(nvertices);
280  tmp_shared_flag = 0;
281 
282  entity_index_rank[0].SetSize(8*leaf_elements.Size());
283  entity_index_rank[0].SetSize(0);
284 
285  entity_elem_local[0].SetSize(nvertices);
286  entity_elem_local[0] = -1;
287 
289 
290  InitOwners(nvertices, entity_owner[0]);
292 
295 
296  // create simple conforming (cut-mesh) groups now
298  // NOTE: entity_index_rank[0] is not deleted until CalculatePMatrixGroups
299 }
300 
301 void ParNCMesh::InitOwners(int num, Array<GroupId> &entity_owner_)
302 {
303  entity_owner_.SetSize(num);
304  for (int i = 0; i < num; i++)
305  {
306  entity_owner_[i] =
307  (tmp_owner[i] != INT_MAX) ? GetSingletonGroup(tmp_owner[i]) : 0;
308  }
309 }
310 
311 void ParNCMesh::MakeSharedList(const NCList &list, NCList &shared)
312 {
313  MFEM_VERIFY(tmp_shared_flag.Size(), "wrong code path");
314 
315  // combine flags of masters and slaves
316  for (int i = 0; i < list.masters.Size(); i++)
317  {
318  const Master &master = list.masters[i];
319  char &master_flag = tmp_shared_flag[master.index];
320  char master_old_flag = master_flag;
321 
322  for (int j = master.slaves_begin; j < master.slaves_end; j++)
323  {
324  int si = list.slaves[j].index;
325  if (si >= 0)
326  {
327  char &slave_flag = tmp_shared_flag[si];
328  master_flag |= slave_flag;
329  slave_flag |= master_old_flag;
330  }
331  else // special case: prism edge-face constraint
332  {
333  if (entity_owner[1][-1-si] != MyRank)
334  {
335  master_flag |= 0x2;
336  }
337  }
338  }
339  }
340 
341  shared.Clear();
342 
343  for (int i = 0; i < list.conforming.Size(); i++)
344  {
345  if (tmp_shared_flag[list.conforming[i].index] == 0x3)
346  {
347  shared.conforming.Append(list.conforming[i]);
348  }
349  }
350  for (int i = 0; i < list.masters.Size(); i++)
351  {
352  if (tmp_shared_flag[list.masters[i].index] == 0x3)
353  {
354  shared.masters.Append(list.masters[i]);
355  }
356  }
357  for (int i = 0; i < list.slaves.Size(); i++)
358  {
359  int si = list.slaves[i].index;
360  if (si >= 0 && tmp_shared_flag[si] == 0x3)
361  {
362  shared.slaves.Append(list.slaves[i]);
363  }
364  }
365 }
366 
368 {
369  if (lhs.size() == rhs.size())
370  {
371  for (unsigned i = 0; i < lhs.size(); i++)
372  {
373  if (lhs[i] < rhs[i]) { return true; }
374  }
375  return false;
376  }
377  return lhs.size() < rhs.size();
378 }
379 
380 #ifdef MFEM_DEBUG
381 static bool group_sorted(const ParNCMesh::CommGroup &group)
382 {
383  for (unsigned i = 1; i < group.size(); i++)
384  {
385  if (group[i] <= group[i-1]) { return false; }
386  }
387  return true;
388 }
389 #endif
390 
392 {
393  if (group.size() == 1 && group[0] == MyRank)
394  {
395  return 0;
396  }
397  MFEM_ASSERT(group_sorted(group), "invalid group");
398  GroupId &id = group_id[group];
399  if (!id)
400  {
401  id = groups.size();
402  groups.push_back(group);
403  }
404  return id;
405 }
406 
408 {
409  MFEM_ASSERT(rank != INT_MAX, "invalid rank");
410  static std::vector<int> group;
411  group.resize(1);
412  group[0] = rank;
413  return GetGroupId(group);
414 }
415 
416 bool ParNCMesh::GroupContains(GroupId id, int rank) const
417 {
418  // TODO: would std::lower_bound() pay off here? Groups are usually small.
419  const CommGroup &group = groups[id];
420  for (unsigned i = 0; i < group.size(); i++)
421  {
422  if (group[i] == rank) { return true; }
423  }
424  return false;
425 }
426 
427 void ParNCMesh::CreateGroups(int nentities, Array<Connection> &index_rank,
428  Array<GroupId> &entity_group)
429 {
430  index_rank.Sort();
431  index_rank.Unique();
432 
433  entity_group.SetSize(nentities);
434  entity_group = 0;
435 
436  CommGroup group;
437  group.reserve(128);
438 
439  int begin = 0, end = 0;
440  while (begin < index_rank.Size())
441  {
442  int index = index_rank[begin].from;
443  if (index >= nentities)
444  {
445  break; // probably creating entity_conf_group (no ghosts)
446  }
447  while (end < index_rank.Size() && index_rank[end].from == index)
448  {
449  end++;
450  }
451  group.resize(end - begin);
452  for (int i = begin; i < end; i++)
453  {
454  group[i - begin] = index_rank[i].to;
455  }
456  entity_group[index] = GetGroupId(group);
457  begin = end;
458  }
459 }
460 
461 void ParNCMesh::AddConnections(int entity, int index, const Array<int> &ranks)
462 {
463  for (int i = 0; i < ranks.Size(); i++)
464  {
465  entity_index_rank[entity].Append(Connection(index, ranks[i]));
466  }
467 }
468 
470 {
471  // make sure all entity_index_rank[i] arrays are filled
473  GetSharedEdges();
474  GetSharedFaces();
475 
476  int v[4], e[4], eo[4];
477 
478  Array<int> ranks;
479  ranks.Reserve(256);
480 
481  // connect slave edges to master edges and their vertices
482  for (int i = 0; i < shared_edges.masters.Size(); i++)
483  {
484  const Master &master_edge = shared_edges.masters[i];
485  ranks.SetSize(0);
486  for (int j = master_edge.slaves_begin; j < master_edge.slaves_end; j++)
487  {
488  int owner = entity_owner[1][edge_list.slaves[j].index];
489  ranks.Append(groups[owner][0]);
490  }
491  ranks.Sort();
492  ranks.Unique();
493 
494  AddConnections(1, master_edge.index, ranks);
495 
496  GetEdgeVertices(master_edge, v);
497  for (int j = 0; j < 2; j++)
498  {
499  AddConnections(0, v[j], ranks);
500  }
501  }
502 
503  // connect slave faces to master faces and their edges and vertices
504  for (int i = 0; i < shared_faces.masters.Size(); i++)
505  {
506  const Master &master_face = shared_faces.masters[i];
507  ranks.SetSize(0);
508  for (int j = master_face.slaves_begin; j < master_face.slaves_end; j++)
509  {
510  int si = face_list.slaves[j].index;
511  int owner = (si >= 0) ? entity_owner[2][si] // standard face dependency
512  /* */ : entity_owner[1][-1 - si]; // prism edge-face dep
513  ranks.Append(groups[owner][0]);
514  }
515  ranks.Sort();
516  ranks.Unique();
517 
518  AddConnections(2, master_face.index, ranks);
519 
520  int nfv = GetFaceVerticesEdges(master_face, v, e, eo);
521  for (int j = 0; j < nfv; j++)
522  {
523  AddConnections(0, v[j], ranks);
524  AddConnections(1, e[j], ranks);
525  }
526  }
527 
528  int nentities[3] =
529  {
533  };
534 
535  // compress the index-rank arrays into group representation
536  for (int i = 0; i < 3; i++)
537  {
538  CreateGroups(nentities[i], entity_index_rank[i], entity_pmat_group[i]);
539  entity_index_rank[i].DeleteAll();
540  }
541 }
542 
544  int local[2])
545 {
546  // Return face orientation in e2, assuming the face has orientation 0 in e1.
547  int ids[2][4];
548  Element* e[2] = { &e1, &e2 };
549  for (int i = 0; i < 2; i++)
550  {
551  // get local face number (remember that p1, p2, p3 are not in order, and
552  // p4 is not stored)
553  int lf = find_local_face(e[i]->Geom(),
554  find_node(*e[i], face.p1),
555  find_node(*e[i], face.p2),
556  find_node(*e[i], face.p3));
557  // optional output
558  if (local) { local[i] = lf; }
559 
560  // get node IDs for the face as seen from e[i]
561  const int* fv = GI[e[i]->Geom()].faces[lf];
562  for (int j = 0; j < 4; j++)
563  {
564  ids[i][j] = e[i]->node[fv[j]];
565  }
566  }
567 
568  return (ids[0][3] >= 0) ? Mesh::GetQuadOrientation(ids[0], ids[1])
569  /* */ : Mesh::GetTriOrientation(ids[0], ids[1]);
570 }
571 
573 {
574  if (Dim < 3) { return; }
575 
576  // Calculate orientation of shared conforming faces.
577  // NOTE: face orientation is calculated relative to its lower rank element.
578  // Thanks to the ghost layer this can be done locally, without communication.
579 
581  face_orient = 0;
582 
583  for (auto face = faces.begin(); face != faces.end(); ++face)
584  {
585  if (face->elem[0] >= 0 && face->elem[1] >= 0 && face->index < NFaces)
586  {
587  Element *e1 = &elements[face->elem[0]];
588  Element *e2 = &elements[face->elem[1]];
589 
590  if (e1->rank == e2->rank) { continue; }
591  if (e1->rank > e2->rank) { std::swap(e1, e2); }
592 
593  face_orient[face->index] = get_face_orientation(*face, *e1, *e2);
594  }
595  }
596 }
597 
598 void ParNCMesh::GetBoundaryClosure(const Array<int> &bdr_attr_is_ess,
599  Array<int> &bdr_vertices,
600  Array<int> &bdr_edges)
601 {
602  NCMesh::GetBoundaryClosure(bdr_attr_is_ess, bdr_vertices, bdr_edges);
603 
604  int i, j;
605  // filter out ghost vertices
606  for (i = j = 0; i < bdr_vertices.Size(); i++)
607  {
608  if (bdr_vertices[i] < NVertices) { bdr_vertices[j++] = bdr_vertices[i]; }
609  }
610  bdr_vertices.SetSize(j);
611 
612  // filter out ghost edges
613  for (i = j = 0; i < bdr_edges.Size(); i++)
614  {
615  if (bdr_edges[i] < NEdges) { bdr_edges[j++] = bdr_edges[i]; }
616  }
617  bdr_edges.SetSize(j);
618 }
619 
620 
621 //// Neighbors /////////////////////////////////////////////////////////////////
622 
624 {
625  if (element_type.Size()) { return; }
626 
627  int nleaves = leaf_elements.Size();
628 
629  element_type.SetSize(nleaves);
630  for (int i = 0; i < nleaves; i++)
631  {
632  element_type[i] = (elements[leaf_elements[i]].rank == MyRank) ? 1 : 0;
633  }
634 
635  // determine the ghost layer
636  Array<char> ghost_set;
637  FindSetNeighbors(element_type, NULL, &ghost_set);
638 
639  // find the neighbors of the ghost layer
640  Array<char> boundary_set;
641  FindSetNeighbors(ghost_set, NULL, &boundary_set);
642 
643  ghost_layer.SetSize(0);
645  for (int i = 0; i < nleaves; i++)
646  {
647  char &etype = element_type[i];
648  if (ghost_set[i])
649  {
650  etype = 2;
652  }
653  else if (boundary_set[i] && etype)
654  {
655  etype = 3;
657  }
658  }
659 }
660 
661 bool ParNCMesh::CheckElementType(int elem, int type)
662 {
663  Element &el = elements[elem];
664  if (!el.ref_type)
665  {
666  return (element_type[el.index] == type);
667  }
668  else
669  {
670  for (int i = 0; i < 8 && el.child[i] >= 0; i++)
671  {
672  if (!CheckElementType(el.child[i], type)) { return false; }
673  }
674  return true;
675  }
676 }
677 
679 {
680  ranks.SetSize(0); // preserve capacity
681 
682  // big shortcut: there are no neighbors if element_type == 1
683  if (CheckElementType(elem, 1)) { return; }
684 
685  // ok, we do need to look for neighbors;
686  // at least we can only search in the ghost layer
689 
690  // return a list of processors
691  for (int i = 0; i < tmp_neighbors.Size(); i++)
692  {
693  ranks.Append(elements[tmp_neighbors[i]].rank);
694  }
695  ranks.Sort();
696  ranks.Unique();
697 }
698 
699 template<class T>
700 static void set_to_array(const std::set<T> &set, Array<T> &array)
701 {
702  array.Reserve(set.size());
703  array.SetSize(0);
704  for (std::set<int>::iterator it = set.begin(); it != set.end(); ++it)
705  {
706  array.Append(*it);
707  }
708 }
709 
711 {
712  UpdateLayers();
713 
714  // TODO: look at groups instead?
715 
716  std::set<int> ranks;
717  for (int i = 0; i < ghost_layer.Size(); i++)
718  {
719  ranks.insert(elements[ghost_layer[i]].rank);
720  }
721  set_to_array(ranks, neighbors);
722 }
723 
724 
725 //// ParMesh compatibility /////////////////////////////////////////////////////
726 
727 void ParNCMesh::MakeSharedTable(int ngroups, int ent, Array<int> &shared_local,
728  Table &group_shared, Array<char> *entity_geom,
729  char geom)
730 {
731  const Array<GroupId> &conf_group = entity_conf_group[ent];
732 
733  group_shared.MakeI(ngroups-1);
734 
735  // count shared entities
736  int num_shared = 0;
737  for (int i = 0; i < conf_group.Size(); i++)
738  {
739  if (conf_group[i])
740  {
741  if (entity_geom && (*entity_geom)[i] != geom) { continue; }
742 
743  num_shared++;
744  group_shared.AddAColumnInRow(conf_group[i]-1);
745  }
746  }
747 
748  shared_local.SetSize(num_shared);
749  group_shared.MakeJ();
750 
751  // fill shared_local and group_shared
752  for (int i = 0, j = 0; i < conf_group.Size(); i++)
753  {
754  if (conf_group[i])
755  {
756  if (entity_geom && (*entity_geom)[i] != geom) { continue; }
757 
758  shared_local[j] = i;
759  group_shared.AddConnection(conf_group[i]-1, j);
760  j++;
761  }
762  }
763  group_shared.ShiftUpI();
764 
765  // sort the groups consistently across processors
766  for (int i = 0; i < group_shared.Size(); i++)
767  {
768  int size = group_shared.RowSize(i);
769  int *row = group_shared.GetRow(i);
770 
771  Array<int> ref_row(row, size);
772  ref_row.Sort([&](const int a, const int b)
773  {
774  int el_loc_a = entity_elem_local[ent][shared_local[a]];
775  int el_loc_b = entity_elem_local[ent][shared_local[b]];
776 
777  int lsi_a = leaf_sfc_index[el_loc_a >> 4];
778  int lsi_b = leaf_sfc_index[el_loc_b >> 4];
779 
780  if (lsi_a != lsi_b) { return lsi_a < lsi_b; }
781 
782  return (el_loc_a & 0xf) < (el_loc_b & 0xf);
783  });
784  }
785 }
786 
788 {
789  if (leaf_elements.Size())
790  {
791  // make sure we have entity_conf_group[x] and the ordering arrays
792  for (int ent = 0; ent < Dim; ent++)
793  {
794  GetSharedList(ent);
795  MFEM_VERIFY(entity_conf_group[ent].Size(), "internal error");
796  MFEM_VERIFY(entity_elem_local[ent].Size(), "internal error");
797  }
798  }
799 
800  // create ParMesh groups, and the map (ncmesh_group -> pmesh_group)
801  Array<int> group_map(groups.size());
802  {
803  group_map = 0;
804  IntegerSet iset;
805  ListOfIntegerSets int_groups;
806  for (unsigned i = 0; i < groups.size(); i++)
807  {
808  if (groups[i].size() > 1 || !i) // skip singleton groups
809  {
810  iset.Recreate(groups[i].size(), groups[i].data());
811  group_map[i] = int_groups.Insert(iset);
812  }
813  }
814  pmesh.gtopo.Create(int_groups, 822);
815  }
816 
817  // renumber groups in entity_conf_group[] (due to missing singletons)
818  for (int ent = 0; ent < 3; ent++)
819  {
820  for (int i = 0; i < entity_conf_group[ent].Size(); i++)
821  {
822  GroupId &ecg = entity_conf_group[ent][i];
823  ecg = group_map[ecg];
824  }
825  }
826 
827  // create shared to local index mappings and group tables
828  int ng = pmesh.gtopo.NGroups();
829  MakeSharedTable(ng, 0, pmesh.svert_lvert, pmesh.group_svert);
830  MakeSharedTable(ng, 1, pmesh.sedge_ledge, pmesh.group_sedge);
831 
832  Array<int> slt, slq;
835 
836  pmesh.sface_lface = slt;
837  pmesh.sface_lface.Append(slq);
838 
839  // create shared_edges
840  for (int i = 0; i < pmesh.shared_edges.Size(); i++)
841  {
842  delete pmesh.shared_edges[i];
843  }
844  pmesh.shared_edges.SetSize(pmesh.sedge_ledge.Size());
845  for (int i = 0; i < pmesh.shared_edges.Size(); i++)
846  {
847  int el_loc = entity_elem_local[1][pmesh.sedge_ledge[i]];
848  MeshId edge_id(-1, leaf_elements[(el_loc >> 4)], (el_loc & 0xf));
849 
850  int v[2];
851  GetEdgeVertices(edge_id, v, false);
852  pmesh.shared_edges[i] = new Segment(v, 1);
853  }
854 
855  // create shared_trias
856  pmesh.shared_trias.SetSize(slt.Size());
857  for (int i = 0; i < slt.Size(); i++)
858  {
859  int el_loc = entity_elem_local[2][slt[i]];
860  MeshId face_id(-1, leaf_elements[(el_loc >> 4)], (el_loc & 0xf));
861 
862  int v[4], e[4], eo[4];
863  GetFaceVerticesEdges(face_id, v, e, eo);
864  pmesh.shared_trias[i].Set(v);
865  }
866 
867  // create shared_quads
868  pmesh.shared_quads.SetSize(slq.Size());
869  for (int i = 0; i < slq.Size(); i++)
870  {
871  int el_loc = entity_elem_local[2][slq[i]];
872  MeshId face_id(-1, leaf_elements[(el_loc >> 4)], (el_loc & 0xf));
873 
874  int e[4], eo[4];
875  GetFaceVerticesEdges(face_id, pmesh.shared_quads[i].v, e, eo);
876  }
877 
878  // free the arrays, they're not needed anymore (until next mesh update)
879  for (int ent = 0; ent < Dim; ent++)
880  {
883  }
884 }
885 
887 {
888  ClearAuxPM();
889 
890  const NCList &shared = (Dim == 3) ? GetSharedFaces() : GetSharedEdges();
891  const NCList &full_list = (Dim == 3) ? GetFaceList() : GetEdgeList();
892 
893  Array<Element*> fnbr;
894  Array<Connection> send_elems;
895 
896  int bound = shared.conforming.Size() + shared.slaves.Size();
897 
898  fnbr.Reserve(bound);
899  send_elems.Reserve(bound);
900 
901  // go over all shared faces and collect face neighbor elements
902  for (int i = 0; i < shared.conforming.Size(); i++)
903  {
904  const MeshId &cf = shared.conforming[i];
905  Face* face = GetFace(elements[cf.element], cf.local);
906  MFEM_ASSERT(face != NULL, "");
907 
908  MFEM_ASSERT(face->elem[0] >= 0 && face->elem[1] >= 0, "");
909  Element* e[2] = { &elements[face->elem[0]], &elements[face->elem[1]] };
910 
911  if (e[0]->rank == MyRank) { std::swap(e[0], e[1]); }
912  MFEM_ASSERT(e[0]->rank != MyRank && e[1]->rank == MyRank, "");
913 
914  fnbr.Append(e[0]);
915  send_elems.Append(Connection(e[0]->rank, e[1]->index));
916  }
917 
918  for (int i = 0; i < shared.masters.Size(); i++)
919  {
920  const Master &mf = shared.masters[i];
921  for (int j = mf.slaves_begin; j < mf.slaves_end; j++)
922  {
923  const Slave &sf = full_list.slaves[j];
924  if (sf.element < 0) { continue; }
925 
926  MFEM_ASSERT(mf.element >= 0, "");
927  Element* e[2] = { &elements[mf.element], &elements[sf.element] };
928 
929  bool loc0 = (e[0]->rank == MyRank);
930  bool loc1 = (e[1]->rank == MyRank);
931  if (loc0 == loc1) { continue; }
932  if (loc0) { std::swap(e[0], e[1]); }
933 
934  fnbr.Append(e[0]);
935  send_elems.Append(Connection(e[0]->rank, e[1]->index));
936  }
937  }
938 
939  MFEM_ASSERT(fnbr.Size() <= bound, "oops, bad upper bound");
940 
941  // remove duplicate face neighbor elements and sort them by rank & index
942  // (note that the send table is sorted the same way and the order is also the
943  // same on different processors, this is important for ExchangeFaceNbrData)
944  fnbr.Sort();
945  fnbr.Unique();
946  fnbr.Sort([](const Element* a, const Element* b)
947  {
948  return (a->rank != b->rank) ? a->rank < b->rank
949  /* */ : a->index < b->index;
950  });
951 
952  // put the ranks into 'face_nbr_group'
953  for (int i = 0; i < fnbr.Size(); i++)
954  {
955  if (!i || fnbr[i]->rank != pmesh.face_nbr_group.Last())
956  {
957  pmesh.face_nbr_group.Append(fnbr[i]->rank);
958  }
959  }
960  int nranks = pmesh.face_nbr_group.Size();
961 
962  // create a new mfem::Element for each face neighbor element
963  pmesh.face_nbr_elements.SetSize(0);
964  pmesh.face_nbr_elements.Reserve(fnbr.Size());
965 
968 
969  Array<int> fnbr_index(NGhostElements);
970  fnbr_index = -1;
971 
972  std::map<int, int> vert_map;
973  for (int i = 0; i < fnbr.Size(); i++)
974  {
975  Element* elem = fnbr[i];
976  mfem::Element* fne = NewMeshElement(elem->geom);
977  fne->SetAttribute(elem->attribute);
978  pmesh.face_nbr_elements.Append(fne);
979 
980  GeomInfo& gi = GI[(int) elem->geom];
981  for (int k = 0; k < gi.nv; k++)
982  {
983  int &v = vert_map[elem->node[k]];
984  if (!v) { v = vert_map.size(); }
985  fne->GetVertices()[k] = v-1;
986  }
987 
988  if (!i || elem->rank != fnbr[i-1]->rank)
989  {
991  }
992 
993  MFEM_ASSERT(elem->index >= NElements, "not a ghost element");
994  fnbr_index[elem->index - NElements] = i;
995  }
996  pmesh.face_nbr_elements_offset.Append(fnbr.Size());
997 
998  // create vertices in 'face_nbr_vertices'
999  {
1000  pmesh.face_nbr_vertices.SetSize(vert_map.size());
1001  if (coordinates.Size())
1002  {
1003  tmp_vertex = new TmpVertex[nodes.NumIds()]; // TODO: something cheaper?
1004  for (auto it = vert_map.begin(); it != vert_map.end(); ++it)
1005  {
1006  pmesh.face_nbr_vertices[it->second-1].SetCoords(
1007  spaceDim, CalcVertexPos(it->first));
1008  }
1009  delete [] tmp_vertex;
1010  }
1011  }
1012 
1013  // make the 'send_face_nbr_elements' table
1014  send_elems.Sort();
1015  send_elems.Unique();
1016 
1017  for (int i = 0, last_rank = -1; i < send_elems.Size(); i++)
1018  {
1019  Connection &c = send_elems[i];
1020  if (c.from != last_rank)
1021  {
1022  // renumber rank to position in 'face_nbr_group'
1023  last_rank = c.from;
1024  c.from = pmesh.face_nbr_group.Find(c.from);
1025  }
1026  else
1027  {
1028  c.from = send_elems[i-1].from; // avoid search
1029  }
1030  }
1031  pmesh.send_face_nbr_elements.MakeFromList(nranks, send_elems);
1032 
1033  // go over the shared faces again and modify their Mesh::FaceInfo
1034  for (int i = 0; i < shared.conforming.Size(); i++)
1035  {
1036  const MeshId &cf = shared.conforming[i];
1037  Face* face = GetFace(elements[cf.element], cf.local);
1038 
1039  Element* e[2] = { &elements[face->elem[0]], &elements[face->elem[1]] };
1040  if (e[0]->rank == MyRank) { std::swap(e[0], e[1]); }
1041 
1042  Mesh::FaceInfo &fi = pmesh.faces_info[cf.index];
1043  fi.Elem2No = -1 - fnbr_index[e[0]->index - NElements];
1044 
1045  if (Dim == 3)
1046  {
1047  int local[2];
1048  int o = get_face_orientation(*face, *e[1], *e[0], local);
1049  fi.Elem2Inf = 64*local[1] + o;
1050  }
1051  else
1052  {
1053  fi.Elem2Inf = 64*find_element_edge(*e[0], face->p1, face->p3) + 1;
1054  }
1055  }
1056 
1057  if (shared.slaves.Size())
1058  {
1059  int nfaces = NFaces, nghosts = NGhostFaces;
1060  if (Dim <= 2) { nfaces = NEdges, nghosts = NGhostEdges; }
1061 
1062  // enlarge Mesh::faces_info for ghost slaves
1063  MFEM_ASSERT(pmesh.faces_info.Size() == nfaces, "");
1064  MFEM_ASSERT(pmesh.GetNumFaces() == nfaces, "");
1065  pmesh.faces_info.SetSize(nfaces + nghosts);
1066  for (int i = nfaces; i < pmesh.faces_info.Size(); i++)
1067  {
1068  Mesh::FaceInfo &fi = pmesh.faces_info[i];
1069  fi.Elem1No = fi.Elem2No = -1;
1070  fi.Elem1Inf = fi.Elem2Inf = -1;
1071  fi.NCFace = -1;
1072  }
1073  // Note that some of the indices i >= nfaces in pmesh.faces_info will
1074  // remain untouched below and they will have Elem1No == -1, in particular.
1075 
1076  // fill in FaceInfo for shared slave faces
1077  for (int i = 0; i < shared.masters.Size(); i++)
1078  {
1079  const Master &mf = shared.masters[i];
1080  for (int j = mf.slaves_begin; j < mf.slaves_end; j++)
1081  {
1082  const Slave &sf = full_list.slaves[j];
1083  if (sf.element < 0) { continue; }
1084 
1085  MFEM_ASSERT(mf.element >= 0, "");
1086  Element &sfe = elements[sf.element];
1087  Element &mfe = elements[mf.element];
1088 
1089  bool sloc = (sfe.rank == MyRank);
1090  bool mloc = (mfe.rank == MyRank);
1091  if (sloc == mloc) { continue; }
1092 
1093  Mesh::FaceInfo &fi = pmesh.faces_info[sf.index];
1094  fi.Elem1No = sfe.index;
1095  fi.Elem2No = mfe.index;
1096  fi.Elem1Inf = 64 * sf.local;
1097  fi.Elem2Inf = 64 * mf.local;
1098 
1099  if (!sloc)
1100  {
1101  // 'fi' is the info for a ghost slave face with index:
1102  // sf.index >= nfaces
1103  std::swap(fi.Elem1No, fi.Elem2No);
1104  std::swap(fi.Elem1Inf, fi.Elem2Inf);
1105  // After the above swap, Elem1No refers to the local, master-side
1106  // element. In other words, side 1 IS NOT the side that generated
1107  // the face.
1108  }
1109  else
1110  {
1111  // 'fi' is the info for a local slave face with index:
1112  // sf.index < nfaces
1113  // Here, Elem1No refers to the local, slave-side element.
1114  // In other words, side 1 IS the side that generated the face.
1115  }
1116  MFEM_ASSERT(fi.Elem2No >= NElements, "");
1117  fi.Elem2No = -1 - fnbr_index[fi.Elem2No - NElements];
1118 
1119  const DenseMatrix* pm = full_list.point_matrices[sf.geom][sf.matrix];
1120  if (!sloc && Dim == 3)
1121  {
1122  // TODO: does this handle triangle faces correctly?
1123 
1124  // ghost slave in 3D needs flipping orientation
1125  DenseMatrix* pm2 = new DenseMatrix(*pm);
1126  std::swap((*pm2)(0,1), (*pm2)(0,3));
1127  std::swap((*pm2)(1,1), (*pm2)(1,3));
1128  aux_pm_store.Append(pm2);
1129 
1130  fi.Elem2Inf ^= 1;
1131  pm = pm2;
1132 
1133  // The problem is that sf.point_matrix is designed for P matrix
1134  // construction and always has orientation relative to the slave
1135  // face. In ParMesh::GetSharedFaceTransformations the result
1136  // would therefore be the same on both processors, which is not
1137  // how that function works for conforming faces. The orientation
1138  // of Loc1, Loc2 and Face needs to always be relative to Element
1139  // 1, which is the element containing the slave face on one
1140  // processor, but on the other it is the element containing the
1141  // master face. In the latter case we need to flip the pm.
1142  }
1143  else if (!sloc && Dim == 2)
1144  {
1145  fi.Elem2Inf ^= 1; // set orientation to 1
1146  // The point matrix (used to define "side 1" which is the same as
1147  // "parent side" in this case) does not require a flip since it
1148  // is aligned with the parent side, so NO flip is performed in
1149  // Mesh::ApplyLocalSlaveTransformation.
1150  }
1151 
1152  MFEM_ASSERT(fi.NCFace < 0, "");
1153  fi.NCFace = pmesh.nc_faces_info.Size();
1154  pmesh.nc_faces_info.Append(Mesh::NCFaceInfo(true, sf.master, pm));
1155  }
1156  }
1157  }
1158 
1159  // NOTE: this function skips ParMesh::send_face_nbr_vertices and
1160  // ParMesh::face_nbr_vertices_offset, these are not used outside of ParMesh
1161 }
1162 
1164 {
1165  for (int i = 0; i < aux_pm_store.Size(); i++)
1166  {
1167  delete aux_pm_store[i];
1168  }
1169  aux_pm_store.DeleteAll();
1170 }
1171 
1172 
1173 //// Prune, Refine, Derefine ///////////////////////////////////////////////////
1174 
1175 bool ParNCMesh::PruneTree(int elem)
1176 {
1177  Element &el = elements[elem];
1178  if (el.ref_type)
1179  {
1180  bool remove[8];
1181  bool removeAll = true;
1182 
1183  // determine which subtrees can be removed (and whether it's all of them)
1184  for (int i = 0; i < 8; i++)
1185  {
1186  remove[i] = false;
1187  if (el.child[i] >= 0)
1188  {
1189  remove[i] = PruneTree(el.child[i]);
1190  if (!remove[i]) { removeAll = false; }
1191  }
1192  }
1193 
1194  // all children can be removed, let the (maybe indirect) parent do it
1195  if (removeAll) { return true; }
1196 
1197  // not all children can be removed, but remove those that can be
1198  for (int i = 0; i < 8; i++)
1199  {
1200  if (remove[i]) { DerefineElement(el.child[i]); }
1201  }
1202 
1203  return false; // need to keep this element and up
1204  }
1205  else
1206  {
1207  // return true if this leaf can be removed
1208  return el.rank < 0;
1209  }
1210 }
1211 
1213 {
1214  if (!Iso && Dim == 3)
1215  {
1216  if (MyRank == 0)
1217  {
1218  MFEM_WARNING("Can't prune 3D aniso meshes yet.");
1219  }
1220  return;
1221  }
1222 
1223  UpdateLayers();
1224 
1225  for (int i = 0; i < leaf_elements.Size(); i++)
1226  {
1227  // rank of elements beyond the ghost layer is unknown / not updated
1228  if (element_type[i] == 0)
1229  {
1230  elements[leaf_elements[i]].rank = -1;
1231  // NOTE: rank == -1 will make the element disappear from leaf_elements
1232  // on next Update, see NCMesh::CollectLeafElements
1233  }
1234  }
1235 
1236  // derefine subtrees whose leaves are all unneeded
1237  for (int i = 0; i < root_state.Size(); i++)
1238  {
1239  if (PruneTree(i)) { DerefineElement(i); }
1240  }
1241 
1242  Update();
1243 }
1244 
1245 
1246 void ParNCMesh::Refine(const Array<Refinement> &refinements)
1247 {
1248  if (NRanks == 1)
1249  {
1250  NCMesh::Refine(refinements);
1251  return;
1252  }
1253 
1254  for (int i = 0; i < refinements.Size(); i++)
1255  {
1256  const Refinement &ref = refinements[i];
1257  MFEM_VERIFY(ref.ref_type == 7 || Dim < 3,
1258  "anisotropic parallel refinement not supported yet in 3D.");
1259  }
1260  MFEM_VERIFY(Iso || Dim < 3,
1261  "parallel refinement of 3D aniso meshes not supported yet.");
1262 
1264 
1265  // create refinement messages to all neighbors (NOTE: some may be empty)
1266  Array<int> neighbors;
1267  NeighborProcessors(neighbors);
1268  for (int i = 0; i < neighbors.Size(); i++)
1269  {
1270  send_ref[neighbors[i]].SetNCMesh(this);
1271  }
1272 
1273  // populate messages: all refinements that occur next to the processor
1274  // boundary need to be sent to the adjoining neighbors so they can keep
1275  // their ghost layer up to date
1276  Array<int> ranks;
1277  ranks.Reserve(64);
1278  for (int i = 0; i < refinements.Size(); i++)
1279  {
1280  const Refinement &ref = refinements[i];
1281  MFEM_ASSERT(ref.index < NElements, "");
1282  int elem = leaf_elements[ref.index];
1283  ElementNeighborProcessors(elem, ranks);
1284  for (int j = 0; j < ranks.Size(); j++)
1285  {
1286  send_ref[ranks[j]].AddRefinement(elem, ref.ref_type);
1287  }
1288  }
1289 
1290  // send the messages (overlap with local refinements)
1292 
1293  // do local refinements
1294  for (int i = 0; i < refinements.Size(); i++)
1295  {
1296  const Refinement &ref = refinements[i];
1298  }
1299 
1300  // receive (ghost layer) refinements from all neighbors
1301  for (int j = 0; j < neighbors.Size(); j++)
1302  {
1303  int rank, size;
1305 
1307  msg.SetNCMesh(this);
1308  msg.Recv(rank, size, MyComm);
1309 
1310  // do the ghost refinements
1311  for (int i = 0; i < msg.Size(); i++)
1312  {
1313  NCMesh::RefineElement(msg.elements[i], msg.values[i]);
1314  }
1315  }
1316 
1317  Update();
1318 
1319  // make sure we can delete the send buffers
1321 }
1322 
1323 
1324 void ParNCMesh::LimitNCLevel(int max_nc_level)
1325 {
1326  MFEM_VERIFY(max_nc_level >= 1, "'max_nc_level' must be 1 or greater.");
1327 
1328  while (1)
1329  {
1330  Array<Refinement> refinements;
1331  GetLimitRefinements(refinements, max_nc_level);
1332 
1333  long size = refinements.Size(), glob_size;
1334  MPI_Allreduce(&size, &glob_size, 1, MPI_LONG, MPI_SUM, MyComm);
1335 
1336  if (!glob_size) { break; }
1337 
1338  Refine(refinements);
1339  }
1340 }
1341 
1343  Array<int> &new_ranks) const
1344 {
1346  for (int i = 0; i < leaf_elements.Size()-GetNGhostElements(); i++)
1347  {
1348  new_ranks[i] = elements[leaf_elements[i]].rank;
1349  }
1350 
1351  for (int i = 0; i < derefs.Size(); i++)
1352  {
1353  int row = derefs[i];
1354  MFEM_VERIFY(row >= 0 && row < derefinements.Size(),
1355  "invalid derefinement number.");
1356 
1357  const int* fine = derefinements.GetRow(row);
1358  int size = derefinements.RowSize(row);
1359 
1360  int coarse_rank = INT_MAX;
1361  for (int j = 0; j < size; j++)
1362  {
1363  int fine_rank = elements[leaf_elements[fine[j]]].rank;
1364  coarse_rank = std::min(coarse_rank, fine_rank);
1365  }
1366  for (int j = 0; j < size; j++)
1367  {
1368  new_ranks[fine[j]] = coarse_rank;
1369  }
1370  }
1371 }
1372 
1373 void ParNCMesh::Derefine(const Array<int> &derefs)
1374 {
1375  MFEM_VERIFY(Dim < 3 || Iso,
1376  "derefinement of 3D anisotropic meshes not implemented yet.");
1377 
1379 
1380  // store fine element ranks
1382  for (int i = 0; i < leaf_elements.Size(); i++)
1383  {
1385  }
1386 
1387  // back up the leaf_elements array
1388  Array<int> old_elements;
1389  leaf_elements.Copy(old_elements);
1390 
1391  // *** STEP 1: redistribute elements to avoid complex derefinements ***
1392 
1393  Array<int> new_ranks(leaf_elements.Size());
1394  for (int i = 0; i < leaf_elements.Size(); i++)
1395  {
1396  new_ranks[i] = elements[leaf_elements[i]].rank;
1397  }
1398 
1399  // make the lowest rank get all the fine elements for each derefinement
1400  for (int i = 0; i < derefs.Size(); i++)
1401  {
1402  int row = derefs[i];
1403  MFEM_VERIFY(row >= 0 && row < derefinements.Size(),
1404  "invalid derefinement number.");
1405 
1406  const int* fine = derefinements.GetRow(row);
1407  int size = derefinements.RowSize(row);
1408 
1409  int coarse_rank = INT_MAX;
1410  for (int j = 0; j < size; j++)
1411  {
1412  int fine_rank = elements[leaf_elements[fine[j]]].rank;
1413  coarse_rank = std::min(coarse_rank, fine_rank);
1414  }
1415  for (int j = 0; j < size; j++)
1416  {
1417  new_ranks[fine[j]] = coarse_rank;
1418  }
1419  }
1420 
1421  int target_elements = 0;
1422  for (int i = 0; i < new_ranks.Size(); i++)
1423  {
1424  if (new_ranks[i] == MyRank) { target_elements++; }
1425  }
1426 
1427  // redistribute elements slightly to get rid of complex derefinements
1428  // straddling processor boundaries *and* update the ghost layer
1429  RedistributeElements(new_ranks, target_elements, false);
1430 
1431  // *** STEP 2: derefine now, communication similar to Refine() ***
1432 
1434 
1435  // create derefinement messages to all neighbors (NOTE: some may be empty)
1436  Array<int> neighbors;
1437  NeighborProcessors(neighbors);
1438  for (int i = 0; i < neighbors.Size(); i++)
1439  {
1440  send_deref[neighbors[i]].SetNCMesh(this);
1441  }
1442 
1443  // derefinements that occur next to the processor boundary need to be sent
1444  // to the adjoining neighbors to keep their ghost layers in sync
1445  Array<int> ranks;
1446  ranks.Reserve(64);
1447  for (int i = 0; i < derefs.Size(); i++)
1448  {
1449  const int* fine = derefinements.GetRow(derefs[i]);
1450  int parent = elements[old_elements[fine[0]]].parent;
1451 
1452  // send derefinement to neighbors
1453  ElementNeighborProcessors(parent, ranks);
1454  for (int j = 0; j < ranks.Size(); j++)
1455  {
1456  send_deref[ranks[j]].AddDerefinement(parent, new_ranks[fine[0]]);
1457  }
1458  }
1460 
1461  // restore old (pre-redistribution) element indices, for SetDerefMatrixCodes
1462  for (int i = 0; i < leaf_elements.Size(); i++)
1463  {
1464  elements[leaf_elements[i]].index = -1;
1465  }
1466  for (int i = 0; i < old_elements.Size(); i++)
1467  {
1468  elements[old_elements[i]].index = i;
1469  }
1470 
1471  // do local derefinements
1472  Array<int> coarse;
1473  old_elements.Copy(coarse);
1474  for (int i = 0; i < derefs.Size(); i++)
1475  {
1476  const int* fine = derefinements.GetRow(derefs[i]);
1477  int parent = elements[old_elements[fine[0]]].parent;
1478 
1479  // record the relation of the fine elements to their parent
1480  SetDerefMatrixCodes(parent, coarse);
1481 
1482  NCMesh::DerefineElement(parent);
1483  }
1484 
1485  // receive ghost layer derefinements from all neighbors
1486  for (int j = 0; j < neighbors.Size(); j++)
1487  {
1488  int rank, size;
1490 
1492  msg.SetNCMesh(this);
1493  msg.Recv(rank, size, MyComm);
1494 
1495  // do the ghost derefinements
1496  for (int i = 0; i < msg.Size(); i++)
1497  {
1498  int elem = msg.elements[i];
1499  if (elements[elem].ref_type)
1500  {
1501  SetDerefMatrixCodes(elem, coarse);
1503  }
1504  elements[elem].rank = msg.values[i];
1505  }
1506  }
1507 
1508  // update leaf_elements, Element::index etc.
1509  Update();
1510 
1511  UpdateLayers();
1512 
1513  // link old fine elements to the new coarse elements
1514  for (int i = 0; i < coarse.Size(); i++)
1515  {
1516  int index = elements[coarse[i]].index;
1517  if (element_type[index] == 0)
1518  {
1519  // this coarse element will get pruned, encode who owns it now
1520  index = -1 - elements[coarse[i]].rank;
1521  }
1522  transforms.embeddings[i].parent = index;
1523  }
1524 
1525  leaf_elements.Copy(old_elements);
1526 
1527  Prune();
1528 
1529  // renumber coarse element indices after pruning
1530  for (int i = 0; i < coarse.Size(); i++)
1531  {
1532  int &index = transforms.embeddings[i].parent;
1533  if (index >= 0)
1534  {
1535  index = elements[old_elements[index]].index;
1536  }
1537  }
1538 
1539  // make sure we can delete all send buffers
1541 }
1542 
1543 
1544 template<typename Type>
1546  const Table &deref_table)
1547 {
1548  const MPI_Datatype datatype = MPITypeMap<Type>::mpi_type;
1549 
1550  Array<MPI_Request*> requests;
1551  Array<int> neigh;
1552 
1553  requests.Reserve(64);
1554  neigh.Reserve(8);
1555 
1556  // make room for ghost values (indices beyond NumElements)
1557  elem_data.SetSize(leaf_elements.Size(), 0);
1558 
1559  for (int i = 0; i < deref_table.Size(); i++)
1560  {
1561  const int* fine = deref_table.GetRow(i);
1562  int size = deref_table.RowSize(i);
1563  MFEM_ASSERT(size <= 8, "");
1564 
1565  int ranks[8], min_rank = INT_MAX, max_rank = INT_MIN;
1566  for (int j = 0; j < size; j++)
1567  {
1568  ranks[j] = elements[leaf_elements[fine[j]]].rank;
1569  min_rank = std::min(min_rank, ranks[j]);
1570  max_rank = std::max(max_rank, ranks[j]);
1571  }
1572 
1573  // exchange values for derefinements that straddle processor boundaries
1574  if (min_rank != max_rank)
1575  {
1576  neigh.SetSize(0);
1577  for (int j = 0; j < size; j++)
1578  {
1579  if (ranks[j] != MyRank) { neigh.Append(ranks[j]); }
1580  }
1581  neigh.Sort();
1582  neigh.Unique();
1583 
1584  for (int j = 0; j < size; j++/*pass*/)
1585  {
1586  Type *data = &elem_data[fine[j]];
1587 
1588  int rnk = ranks[j], len = 1; /*j;
1589  do { j++; } while (j < size && ranks[j] == rnk);
1590  len = j - len;*/
1591 
1592  if (rnk == MyRank)
1593  {
1594  for (int k = 0; k < neigh.Size(); k++)
1595  {
1596  MPI_Request* req = new MPI_Request;
1597  MPI_Isend(data, len, datatype, neigh[k], 292, MyComm, req);
1598  requests.Append(req);
1599  }
1600  }
1601  else
1602  {
1603  MPI_Request* req = new MPI_Request;
1604  MPI_Irecv(data, len, datatype, rnk, 292, MyComm, req);
1605  requests.Append(req);
1606  }
1607  }
1608  }
1609  }
1610 
1611  for (int i = 0; i < requests.Size(); i++)
1612  {
1613  MPI_Wait(requests[i], MPI_STATUS_IGNORE);
1614  delete requests[i];
1615  }
1616 }
1617 
1618 // instantiate SynchronizeDerefinementData for int and double
1619 template void
1620 ParNCMesh::SynchronizeDerefinementData<int>(Array<int> &, const Table &);
1621 template void
1622 ParNCMesh::SynchronizeDerefinementData<double>(Array<double> &, const Table &);
1623 
1624 
1626  Array<int> &level_ok, int max_nc_level)
1627 {
1628  Array<int> leaf_ok(leaf_elements.Size());
1629  leaf_ok = 1;
1630 
1631  // check elements that we own
1632  for (int i = 0; i < deref_table.Size(); i++)
1633  {
1634  const int *fine = deref_table.GetRow(i),
1635  size = deref_table.RowSize(i);
1636 
1637  int parent = elements[leaf_elements[fine[0]]].parent;
1638  Element &pa = elements[parent];
1639 
1640  for (int j = 0; j < size; j++)
1641  {
1642  int child = leaf_elements[fine[j]];
1643  if (elements[child].rank == MyRank)
1644  {
1645  int splits[3];
1646  CountSplits(child, splits);
1647 
1648  for (int k = 0; k < Dim; k++)
1649  {
1650  if ((pa.ref_type & (1 << k)) &&
1651  splits[k] >= max_nc_level)
1652  {
1653  leaf_ok[fine[j]] = 0; break;
1654  }
1655  }
1656  }
1657  }
1658  }
1659 
1660  SynchronizeDerefinementData(leaf_ok, deref_table);
1661 
1662  level_ok.SetSize(deref_table.Size());
1663  level_ok = 1;
1664 
1665  for (int i = 0; i < deref_table.Size(); i++)
1666  {
1667  const int* fine = deref_table.GetRow(i),
1668  size = deref_table.RowSize(i);
1669 
1670  for (int j = 0; j < size; j++)
1671  {
1672  if (!leaf_ok[fine[j]])
1673  {
1674  level_ok[i] = 0; break;
1675  }
1676  }
1677  }
1678 }
1679 
1680 
1681 //// Rebalance /////////////////////////////////////////////////////////////////
1682 
1683 void ParNCMesh::Rebalance(const Array<int> *custom_partition)
1684 {
1685  send_rebalance_dofs.clear();
1686  recv_rebalance_dofs.clear();
1687 
1688  Array<int> old_elements;
1689  leaf_elements.GetSubArray(0, NElements, old_elements);
1690 
1691  if (!custom_partition) // SFC based partitioning
1692  {
1693  Array<int> new_ranks(leaf_elements.Size());
1694  new_ranks = -1;
1695 
1696  // figure out new assignments for Element::rank
1697  long local_elems = NElements, total_elems = 0;
1698  MPI_Allreduce(&local_elems, &total_elems, 1, MPI_LONG, MPI_SUM, MyComm);
1699 
1700  long first_elem_global = 0;
1701  MPI_Scan(&local_elems, &first_elem_global, 1, MPI_LONG, MPI_SUM, MyComm);
1702  first_elem_global -= local_elems;
1703 
1704  for (int i = 0, j = 0; i < leaf_elements.Size(); i++)
1705  {
1706  if (elements[leaf_elements[i]].rank == MyRank)
1707  {
1708  new_ranks[i] = Partition(first_elem_global + (j++), total_elems);
1709  }
1710  }
1711 
1712  int target_elements = PartitionFirstIndex(MyRank+1, total_elems)
1713  - PartitionFirstIndex(MyRank, total_elems);
1714 
1715  // assign the new ranks and send elements (plus ghosts) to new owners
1716  RedistributeElements(new_ranks, target_elements, true);
1717  }
1718  else // whatever partitioning the user has passed
1719  {
1720  MFEM_VERIFY(custom_partition->Size() == NElements,
1721  "Size of the partition array must match the number "
1722  "of local mesh elements (ParMesh::GetNE()).");
1723 
1724  Array<int> new_ranks;
1725  custom_partition->Copy(new_ranks);
1726  new_ranks.SetSize(leaf_elements.Size(), -1); // make room for ghosts
1727 
1728  RedistributeElements(new_ranks, -1, true);
1729  }
1730 
1731  // set up the old index array
1733  old_index_or_rank = -1;
1734  for (int i = 0; i < old_elements.Size(); i++)
1735  {
1736  Element &el = elements[old_elements[i]];
1737  if (el.rank == MyRank) { old_index_or_rank[el.index] = i; }
1738  }
1739 
1740  // get rid of elements beyond the new ghost layer
1741  Prune();
1742 }
1743 
1744 void ParNCMesh::RedistributeElements(Array<int> &new_ranks, int target_elements,
1745  bool record_comm)
1746 {
1747  bool sfc = (target_elements >= 0);
1748 
1749  UpdateLayers();
1750 
1751  // *** STEP 1: communicate new rank assignments for the ghost layer ***
1752 
1753  NeighborElementRankMessage::Map send_ghost_ranks, recv_ghost_ranks;
1754 
1755  ghost_layer.Sort([&](const int a, const int b)
1756  {
1757  return elements[a].rank < elements[b].rank;
1758  });
1759 
1760  {
1761  Array<int> rank_neighbors;
1762 
1763  // loop over neighbor ranks and their elements
1764  int begin = 0, end = 0;
1765  while (end < ghost_layer.Size())
1766  {
1767  // find range of elements belonging to one rank
1768  int rank = elements[ghost_layer[begin]].rank;
1769  while (end < ghost_layer.Size() &&
1770  elements[ghost_layer[end]].rank == rank) { end++; }
1771 
1772  Array<int> rank_elems;
1773  rank_elems.MakeRef(&ghost_layer[begin], end - begin);
1774 
1775  // find elements within boundary_layer that are neighbors to 'rank'
1776  rank_neighbors.SetSize(0);
1777  NeighborExpand(rank_elems, rank_neighbors, &boundary_layer);
1778 
1779  // send a message with new rank assignments within 'rank_neighbors'
1780  NeighborElementRankMessage& msg = send_ghost_ranks[rank];
1781  msg.SetNCMesh(this);
1782 
1783  msg.Reserve(rank_neighbors.Size());
1784  for (int i = 0; i < rank_neighbors.Size(); i++)
1785  {
1786  int elem = rank_neighbors[i];
1787  msg.AddElementRank(elem, new_ranks[elements[elem].index]);
1788  }
1789 
1790  msg.Isend(rank, MyComm);
1791 
1792  // prepare to receive a message from the neighbor too, these will
1793  // be new the new rank assignments for our ghost layer
1794  recv_ghost_ranks[rank].SetNCMesh(this);
1795 
1796  begin = end;
1797  }
1798  }
1799 
1800  NeighborElementRankMessage::RecvAll(recv_ghost_ranks, MyComm);
1801 
1802  // read new ranks for the ghost layer from messages received
1803  NeighborElementRankMessage::Map::iterator it;
1804  for (it = recv_ghost_ranks.begin(); it != recv_ghost_ranks.end(); ++it)
1805  {
1806  NeighborElementRankMessage &msg = it->second;
1807  for (int i = 0; i < msg.Size(); i++)
1808  {
1809  int ghost_index = elements[msg.elements[i]].index;
1810  MFEM_ASSERT(element_type[ghost_index] == 2, "");
1811  new_ranks[ghost_index] = msg.values[i];
1812  }
1813  }
1814 
1815  recv_ghost_ranks.clear();
1816 
1817  // *** STEP 2: send elements that no longer belong to us to new assignees ***
1818 
1819  /* The result thus far is just the array 'new_ranks' containing new owners
1820  for elements that we currently own plus new owners for the ghost layer.
1821  Next we keep elements that still belong to us and send ElementSets with
1822  the remaining elements to their new owners. Each batch of elements needs
1823  to be sent together with their neighbors so the receiver also gets a
1824  ghost layer that is up to date (this is why we needed Step 1). */
1825 
1826  int received_elements = 0;
1827  for (int i = 0; i < leaf_elements.Size(); i++)
1828  {
1829  Element &el = elements[leaf_elements[i]];
1830  if (el.rank == MyRank && new_ranks[i] == MyRank)
1831  {
1832  received_elements++; // initialize to number of elements we're keeping
1833  }
1834  el.rank = new_ranks[i];
1835  }
1836 
1837  int nsent = 0, nrecv = 0; // for debug check
1838 
1839  RebalanceMessage::Map send_elems;
1840  {
1841  // sort elements we own by the new rank
1842  Array<int> owned_elements;
1843  owned_elements.MakeRef(leaf_elements.GetData(), NElements);
1844  owned_elements.Sort([&](const int a, const int b)
1845  {
1846  return elements[a].rank < elements[b].rank;
1847  });
1848 
1849  Array<int> batch;
1850  batch.Reserve(1024);
1851 
1852  // send elements to new owners
1853  int begin = 0, end = 0;
1854  while (end < NElements)
1855  {
1856  // find range of elements belonging to one rank
1857  int rank = elements[owned_elements[begin]].rank;
1858  while (end < owned_elements.Size() &&
1859  elements[owned_elements[end]].rank == rank) { end++; }
1860 
1861  if (rank != MyRank)
1862  {
1863  Array<int> rank_elems;
1864  rank_elems.MakeRef(&owned_elements[begin], end - begin);
1865 
1866  // expand the 'rank_elems' set by its neighbor elements (ghosts)
1867  batch.SetSize(0);
1868  NeighborExpand(rank_elems, batch);
1869 
1870  // send the batch
1871  RebalanceMessage &msg = send_elems[rank];
1872  msg.SetNCMesh(this);
1873 
1874  msg.Reserve(batch.Size());
1875  for (int i = 0; i < batch.Size(); i++)
1876  {
1877  int elem = batch[i];
1878  Element &el = elements[elem];
1879 
1880  if ((element_type[el.index] & 1) || el.rank != rank)
1881  {
1882  msg.AddElementRank(elem, el.rank);
1883  }
1884  // NOTE: we skip 'ghosts' that are of the receiver's rank because
1885  // they are not really ghosts and would get sent multiple times,
1886  // disrupting the termination mechanism in Step 4.
1887  }
1888 
1889  if (sfc)
1890  {
1891  msg.Isend(rank, MyComm);
1892  }
1893  else
1894  {
1895  // custom partitioning needs synchronous sends
1896  msg.Issend(rank, MyComm);
1897  }
1898  nsent++;
1899 
1900  // also: record what elements we sent (excluding the ghosts)
1901  // so that SendRebalanceDofs can later send data for them
1902  if (record_comm)
1903  {
1904  send_rebalance_dofs[rank].SetElements(rank_elems, this);
1905  }
1906  }
1907 
1908  begin = end;
1909  }
1910  }
1911 
1912  // *** STEP 3: receive elements from others ***
1913 
1914  RebalanceMessage msg;
1915  msg.SetNCMesh(this);
1916 
1917  if (sfc)
1918  {
1919  /* We don't know from whom we're going to receive, so we need to probe.
1920  However, for the default SFC partitioning, we do know how many elements
1921  we're going to own eventually, so the termination condition is easy. */
1922 
1923  while (received_elements < target_elements)
1924  {
1925  int rank, size;
1926  RebalanceMessage::Probe(rank, size, MyComm);
1927 
1928  // receive message; note: elements are created as the message is decoded
1929  msg.Recv(rank, size, MyComm);
1930  nrecv++;
1931 
1932  for (int i = 0; i < msg.Size(); i++)
1933  {
1934  int elem_rank = msg.values[i];
1935  elements[msg.elements[i]].rank = elem_rank;
1936 
1937  if (elem_rank == MyRank) { received_elements++; }
1938  }
1939 
1940  // save the ranks we received from, for later use in RecvRebalanceDofs
1941  if (record_comm)
1942  {
1943  recv_rebalance_dofs[rank].SetNCMesh(this);
1944  }
1945  }
1946 
1947  Update();
1948 
1949  RebalanceMessage::WaitAllSent(send_elems);
1950  }
1951  else
1952  {
1953  /* The case (target_elements < 0) is used for custom partitioning.
1954  Here we need to employ the "non-blocking consensus" algorithm
1955  (https://scorec.rpi.edu/REPORTS/2015-9.pdf) to determine when the
1956  element exchange is finished. The algorithm uses a non-blocking
1957  barrier. */
1958 
1959  MPI_Request barrier = MPI_REQUEST_NULL;
1960  int done = 0;
1961 
1962  while (!done)
1963  {
1964  int rank, size;
1965  while (RebalanceMessage::IProbe(rank, size, MyComm))
1966  {
1967  // receive message; note: elements are created as the msg is decoded
1968  msg.Recv(rank, size, MyComm);
1969  nrecv++;
1970 
1971  for (int i = 0; i < msg.Size(); i++)
1972  {
1973  elements[msg.elements[i]].rank = msg.values[i];
1974  }
1975 
1976  // save the ranks we received from, for later use in RecvRebalanceDofs
1977  if (record_comm)
1978  {
1979  recv_rebalance_dofs[rank].SetNCMesh(this);
1980  }
1981  }
1982 
1983  if (barrier != MPI_REQUEST_NULL)
1984  {
1985  MPI_Test(&barrier, &done, MPI_STATUS_IGNORE);
1986  }
1987  else
1988  {
1989  if (RebalanceMessage::TestAllSent(send_elems))
1990  {
1991  int mpi_err = MPI_Ibarrier(MyComm, &barrier);
1992 
1993  MFEM_VERIFY(mpi_err == MPI_SUCCESS, "");
1994  MFEM_VERIFY(barrier != MPI_REQUEST_NULL, "");
1995  }
1996  }
1997  }
1998 
1999  Update();
2000  }
2001 
2002  NeighborElementRankMessage::WaitAllSent(send_ghost_ranks);
2003 
2004 #ifdef MFEM_DEBUG
2005  int glob_sent, glob_recv;
2006  MPI_Reduce(&nsent, &glob_sent, 1, MPI_INT, MPI_SUM, 0, MyComm);
2007  MPI_Reduce(&nrecv, &glob_recv, 1, MPI_INT, MPI_SUM, 0, MyComm);
2008 
2009  if (MyRank == 0)
2010  {
2011  MFEM_ASSERT(glob_sent == glob_recv,
2012  "(glob_sent, glob_recv) = ("
2013  << glob_sent << ", " << glob_recv << ")");
2014  }
2015 #endif
2016 }
2017 
2018 
2019 void ParNCMesh::SendRebalanceDofs(int old_ndofs,
2020  const Table &old_element_dofs,
2021  long old_global_offset,
2023 {
2024  Array<int> dofs;
2025  int vdim = space->GetVDim();
2026 
2027  // fill messages (prepared by Rebalance) with element DOFs
2028  RebalanceDofMessage::Map::iterator it;
2029  for (it = send_rebalance_dofs.begin(); it != send_rebalance_dofs.end(); ++it)
2030  {
2031  RebalanceDofMessage &msg = it->second;
2032  msg.dofs.clear();
2033  int ne = msg.elem_ids.size();
2034  if (ne)
2035  {
2036  msg.dofs.reserve(old_element_dofs.RowSize(msg.elem_ids[0]) * ne * vdim);
2037  }
2038  for (int i = 0; i < ne; i++)
2039  {
2040  old_element_dofs.GetRow(msg.elem_ids[i], dofs);
2041  space->DofsToVDofs(dofs, old_ndofs);
2042  msg.dofs.insert(msg.dofs.end(), dofs.begin(), dofs.end());
2043  }
2044  msg.dof_offset = old_global_offset;
2045  }
2046 
2047  // send the DOFs to element recipients from last Rebalance()
2049 }
2050 
2051 
2053 {
2054  // receive from the same ranks as in last Rebalance()
2056 
2057  // count the size of the result
2058  int ne = 0, nd = 0;
2059  RebalanceDofMessage::Map::iterator it;
2060  for (it = recv_rebalance_dofs.begin(); it != recv_rebalance_dofs.end(); ++it)
2061  {
2062  RebalanceDofMessage &msg = it->second;
2063  ne += msg.elem_ids.size();
2064  nd += msg.dofs.size();
2065  }
2066 
2067  elements.SetSize(ne);
2068  dofs.SetSize(nd);
2069 
2070  // copy element indices and their DOFs
2071  ne = nd = 0;
2072  for (it = recv_rebalance_dofs.begin(); it != recv_rebalance_dofs.end(); ++it)
2073  {
2074  RebalanceDofMessage &msg = it->second;
2075  for (unsigned i = 0; i < msg.elem_ids.size(); i++)
2076  {
2077  elements[ne++] = msg.elem_ids[i];
2078  }
2079  for (unsigned i = 0; i < msg.dofs.size(); i++)
2080  {
2081  dofs[nd++] = msg.dof_offset + msg.dofs[i];
2082  }
2083  }
2084 
2086 }
2087 
2088 
2089 //// ElementSet ////////////////////////////////////////////////////////////////
2090 
2092  : ncmesh(other.ncmesh), include_ref_types(other.include_ref_types)
2093 {
2094  other.data.Copy(data);
2095 }
2096 
2098 {
2099  // helper to put an int to the data array
2100  data.Append(value & 0xff);
2101  data.Append((value >> 8) & 0xff);
2102  data.Append((value >> 16) & 0xff);
2103  data.Append((value >> 24) & 0xff);
2104 }
2105 
2107 {
2108  // helper to get an int from the data array
2109  return (int) data[pos] +
2110  ((int) data[pos+1] << 8) +
2111  ((int) data[pos+2] << 16) +
2112  ((int) data[pos+3] << 24);
2113 }
2114 
2116 {
2117  for (int i = 0; i < elements.Size(); i++)
2118  {
2119  int elem = elements[i];
2120  while (elem >= 0)
2121  {
2122  Element &el = ncmesh->elements[elem];
2123  if (el.flag == flag) { break; }
2124  el.flag = flag;
2125  elem = el.parent;
2126  }
2127  }
2128 }
2129 
2131 {
2132  Element &el = ncmesh->elements[elem];
2133  if (!el.ref_type)
2134  {
2135  // we reached a leaf, mark this as zero child mask
2136  data.Append(0);
2137  }
2138  else
2139  {
2140  // check which subtrees contain marked elements
2141  int mask = 0;
2142  for (int i = 0; i < 8; i++)
2143  {
2144  if (el.child[i] >= 0 && ncmesh->elements[el.child[i]].flag)
2145  {
2146  mask |= 1 << i;
2147  }
2148  }
2149 
2150  // write the bit mask and visit the subtrees
2151  data.Append(mask);
2152  if (include_ref_types)
2153  {
2154  data.Append(el.ref_type);
2155  }
2156 
2157  for (int i = 0; i < 8; i++)
2158  {
2159  if (mask & (1 << i))
2160  {
2161  EncodeTree(el.child[i]);
2162  }
2163  }
2164  }
2165 }
2166 
2168 {
2169  FlagElements(elements, 1);
2170 
2171  // Each refinement tree that contains at least one element from the set
2172  // is encoded as HEADER + TREE, where HEADER is the root element number and
2173  // TREE is the output of EncodeTree().
2174  for (int i = 0; i < ncmesh->root_state.Size(); i++)
2175  {
2176  if (ncmesh->elements[i].flag)
2177  {
2178  WriteInt(i);
2179  EncodeTree(i);
2180  }
2181  }
2182  WriteInt(-1); // mark end of data
2183 
2184  FlagElements(elements, 0);
2185 }
2186 
2187 #ifdef MFEM_DEBUG
2189 {
2190  std::ostringstream oss;
2191  for (int i = 0; i < ref_path.Size(); i++)
2192  {
2193  oss << " elem " << ref_path[i] << " (";
2194  const Element &el = ncmesh->elements[ref_path[i]];
2195  for (int j = 0; j < GI[el.Geom()].nv; j++)
2196  {
2197  if (j) { oss << ", "; }
2198  oss << ncmesh->RetrieveNode(el, j);
2199  }
2200  oss << ")\n";
2201  }
2202  return oss.str();
2203 }
2204 #endif
2205 
2206 void ParNCMesh::ElementSet::DecodeTree(int elem, int &pos,
2207  Array<int> &elements) const
2208 {
2209 #ifdef MFEM_DEBUG
2210  ref_path.Append(elem);
2211 #endif
2212  int mask = data[pos++];
2213  if (!mask)
2214  {
2215  elements.Append(elem);
2216  }
2217  else
2218  {
2219  Element &el = ncmesh->elements[elem];
2220  if (include_ref_types)
2221  {
2222  int ref_type = data[pos++];
2223  if (!el.ref_type)
2224  {
2225  ncmesh->RefineElement(elem, ref_type);
2226  }
2227  else { MFEM_ASSERT(ref_type == el.ref_type, "") }
2228  }
2229  else
2230  {
2231  MFEM_ASSERT(el.ref_type != 0, "Path not found:\n"
2232  << RefPath() << " mask = " << mask);
2233  }
2234 
2235  for (int i = 0; i < 8; i++)
2236  {
2237  if (mask & (1 << i))
2238  {
2239  DecodeTree(el.child[i], pos, elements);
2240  }
2241  }
2242  }
2243 #ifdef MFEM_DEBUG
2244  ref_path.DeleteLast();
2245 #endif
2246 }
2247 
2249 {
2250  int root, pos = 0;
2251  while ((root = GetInt(pos)) >= 0)
2252  {
2253  pos += 4;
2254  DecodeTree(root, pos, elements);
2255  }
2256 }
2257 
2258 void ParNCMesh::ElementSet::Dump(std::ostream &os) const
2259 {
2260  write<int>(os, data.Size());
2261  os.write((const char*) data.GetData(), data.Size());
2262 }
2263 
2264 void ParNCMesh::ElementSet::Load(std::istream &is)
2265 {
2266  data.SetSize(read<int>(is));
2267  is.read((char*) data.GetData(), data.Size());
2268 }
2269 
2270 
2271 //// EncodeMeshIds/DecodeMeshIds ///////////////////////////////////////////////
2272 
2274 {
2276  GetSharedEdges();
2277  GetSharedFaces();
2278 
2279  if (!shared_edges.masters.Size() &&
2280  !shared_faces.masters.Size()) { return; }
2281 
2282  Array<bool> contains_rank(groups.size());
2283  for (unsigned i = 0; i < groups.size(); i++)
2284  {
2285  contains_rank[i] = GroupContains(i, rank);
2286  }
2287 
2288  Array<Pair<int, int> > find_v(ids[0].Size());
2289  for (int i = 0; i < ids[0].Size(); i++)
2290  {
2291  find_v[i].one = ids[0][i].index;
2292  find_v[i].two = i;
2293  }
2294  find_v.Sort();
2295 
2296  // find vertices of master edges shared with 'rank', and modify their
2297  // MeshIds so their element/local matches the element of the master edge
2298  for (int i = 0; i < shared_edges.masters.Size(); i++)
2299  {
2300  const MeshId &edge_id = shared_edges.masters[i];
2301  if (contains_rank[entity_pmat_group[1][edge_id.index]])
2302  {
2303  int v[2], pos, k;
2304  GetEdgeVertices(edge_id, v);
2305  for (int j = 0; j < 2; j++)
2306  {
2307  if ((pos = find_v.FindSorted(Pair<int, int>(v[j], 0))) != -1)
2308  {
2309  // switch to an element/local that is safe for 'rank'
2310  k = find_v[pos].two;
2311  ChangeVertexMeshIdElement(ids[0][k], edge_id.element);
2312  ChangeRemainingMeshIds(ids[0], pos, find_v);
2313  }
2314  }
2315  }
2316  }
2317 
2318  if (!shared_faces.masters.Size()) { return; }
2319 
2320  Array<Pair<int, int> > find_e(ids[1].Size());
2321  for (int i = 0; i < ids[1].Size(); i++)
2322  {
2323  find_e[i].one = ids[1][i].index;
2324  find_e[i].two = i;
2325  }
2326  find_e.Sort();
2327 
2328  // find vertices/edges of master faces shared with 'rank', and modify their
2329  // MeshIds so their element/local matches the element of the master face
2330  for (int i = 0; i < shared_faces.masters.Size(); i++)
2331  {
2332  const MeshId &face_id = shared_faces.masters[i];
2333  if (contains_rank[entity_pmat_group[2][face_id.index]])
2334  {
2335  int v[4], e[4], eo[4], pos, k;
2336  int nfv = GetFaceVerticesEdges(face_id, v, e, eo);
2337  for (int j = 0; j < nfv; j++)
2338  {
2339  if ((pos = find_v.FindSorted(Pair<int, int>(v[j], 0))) != -1)
2340  {
2341  k = find_v[pos].two;
2342  ChangeVertexMeshIdElement(ids[0][k], face_id.element);
2343  ChangeRemainingMeshIds(ids[0], pos, find_v);
2344  }
2345  if ((pos = find_e.FindSorted(Pair<int, int>(e[j], 0))) != -1)
2346  {
2347  k = find_e[pos].two;
2348  ChangeEdgeMeshIdElement(ids[1][k], face_id.element);
2349  ChangeRemainingMeshIds(ids[1], pos, find_e);
2350  }
2351  }
2352  }
2353  }
2354 }
2355 
2357 {
2358  Element &el = elements[elem];
2359  MFEM_ASSERT(el.ref_type == 0, "");
2360 
2361  GeomInfo& gi = GI[el.Geom()];
2362  for (int i = 0; i < gi.nv; i++)
2363  {
2364  if (nodes[el.node[i]].vert_index == id.index)
2365  {
2366  id.local = i;
2367  id.element = elem;
2368  return;
2369  }
2370  }
2371  MFEM_ABORT("Vertex not found.");
2372 }
2373 
2375 {
2376  Element &old = elements[id.element];
2377  const int *old_ev = GI[old.Geom()].edges[(int) id.local];
2378  Node* node = nodes.Find(old.node[old_ev[0]], old.node[old_ev[1]]);
2379  MFEM_ASSERT(node != NULL, "Edge not found.");
2380 
2381  Element &el = elements[elem];
2382  MFEM_ASSERT(el.ref_type == 0, "");
2383 
2384  GeomInfo& gi = GI[el.Geom()];
2385  for (int i = 0; i < gi.ne; i++)
2386  {
2387  const int* ev = gi.edges[i];
2388  if ((el.node[ev[0]] == node->p1 && el.node[ev[1]] == node->p2) ||
2389  (el.node[ev[1]] == node->p1 && el.node[ev[0]] == node->p2))
2390  {
2391  id.local = i;
2392  id.element = elem;
2393  return;
2394  }
2395 
2396  }
2397  MFEM_ABORT("Edge not found.");
2398 }
2399 
2401  const Array<Pair<int, int> > &find)
2402 {
2403  const MeshId &first = ids[find[pos].two];
2404  while (++pos < find.Size() && ids[find[pos].two].index == first.index)
2405  {
2406  MeshId &other = ids[find[pos].two];
2407  other.element = first.element;
2408  other.local = first.local;
2409  }
2410 }
2411 
2412 void ParNCMesh::EncodeMeshIds(std::ostream &os, Array<MeshId> ids[])
2413 {
2414  std::map<int, int> stream_id;
2415 
2416  // get a list of elements involved, dump them to 'os' and create the mapping
2417  // element_id: (Element index -> stream ID)
2418  {
2420  for (int type = 0; type < 3; type++)
2421  {
2422  for (int i = 0; i < ids[type].Size(); i++)
2423  {
2424  elements.Append(ids[type][i].element);
2425  }
2426  }
2427 
2428  ElementSet eset(this);
2429  eset.Encode(elements);
2430  eset.Dump(os);
2431 
2432  Array<int> decoded;
2433  decoded.Reserve(elements.Size());
2434  eset.Decode(decoded);
2435 
2436  for (int i = 0; i < decoded.Size(); i++)
2437  {
2438  stream_id[decoded[i]] = i;
2439  }
2440  }
2441 
2442  // write the IDs as element/local pairs
2443  for (int type = 0; type < 3; type++)
2444  {
2445  write<int>(os, ids[type].Size());
2446  for (int i = 0; i < ids[type].Size(); i++)
2447  {
2448  const MeshId& id = ids[type][i];
2449  write<int>(os, stream_id[id.element]); // TODO: variable 1-4 bytes
2450  write<char>(os, id.local);
2451  }
2452  }
2453 }
2454 
2455 void ParNCMesh::DecodeMeshIds(std::istream &is, Array<MeshId> ids[])
2456 {
2457  // read the list of elements
2458  ElementSet eset(this);
2459  eset.Load(is);
2460 
2461  Array<int> elems;
2462  eset.Decode(elems);
2463 
2464  // read vertex/edge/face IDs
2465  for (int type = 0; type < 3; type++)
2466  {
2467  int ne = read<int>(is);
2468  ids[type].SetSize(ne);
2469 
2470  for (int i = 0; i < ne; i++)
2471  {
2472  int el_num = read<int>(is);
2473  int elem = elems[el_num];
2474  Element &el = elements[elem];
2475 
2476  MFEM_VERIFY(!el.ref_type, "not a leaf element: " << el_num);
2477 
2478  MeshId &id = ids[type][i];
2479  id.element = elem;
2480  id.local = read<char>(is);
2481 
2482  // find vertex/edge/face index
2483  GeomInfo &gi = GI[el.Geom()];
2484  switch (type)
2485  {
2486  case 0:
2487  {
2488  id.index = nodes[el.node[(int) id.local]].vert_index;
2489  break;
2490  }
2491  case 1:
2492  {
2493  const int* ev = gi.edges[(int) id.local];
2494  Node* node = nodes.Find(el.node[ev[0]], el.node[ev[1]]);
2495  MFEM_ASSERT(node && node->HasEdge(), "edge not found.");
2496  id.index = node->edge_index;
2497  break;
2498  }
2499  default:
2500  {
2501  const int* fv = gi.faces[(int) id.local];
2502  Face* face = faces.Find(el.node[fv[0]], el.node[fv[1]],
2503  el.node[fv[2]], el.node[fv[3]]);
2504  MFEM_ASSERT(face, "face not found.");
2505  id.index = face->index;
2506  }
2507  }
2508  }
2509  }
2510 }
2511 
2512 void ParNCMesh::EncodeGroups(std::ostream &os, const Array<GroupId> &ids)
2513 {
2514  // get a list of unique GroupIds
2515  std::map<GroupId, GroupId> stream_id;
2516  for (int i = 0; i < ids.Size(); i++)
2517  {
2518  if (i && ids[i] == ids[i-1]) { continue; }
2519  unsigned size = stream_id.size();
2520  GroupId &sid = stream_id[ids[i]];
2521  if (size != stream_id.size()) { sid = size; }
2522  }
2523 
2524  // write the unique groups
2525  write<short>(os, stream_id.size());
2526  for (std::map<GroupId, GroupId>::iterator
2527  it = stream_id.begin(); it != stream_id.end(); ++it)
2528  {
2529  write<GroupId>(os, it->second);
2530  if (it->first >= 0)
2531  {
2532  const CommGroup &group = groups[it->first];
2533  write<short>(os, group.size());
2534  for (unsigned i = 0; i < group.size(); i++)
2535  {
2536  write<int>(os, group[i]);
2537  }
2538  }
2539  else
2540  {
2541  // special "invalid" group, marks forwarded rows
2542  write<short>(os, -1);
2543  }
2544  }
2545 
2546  // write the list of all GroupIds
2547  write<int>(os, ids.Size());
2548  for (int i = 0; i < ids.Size(); i++)
2549  {
2550  write<GroupId>(os, stream_id[ids[i]]);
2551  }
2552 }
2553 
2554 void ParNCMesh::DecodeGroups(std::istream &is, Array<GroupId> &ids)
2555 {
2556  int ngroups = read<short>(is);
2557  Array<GroupId> sgroups(ngroups);
2558 
2559  // read stream groups, convert to our groups
2560  CommGroup ranks;
2561  ranks.reserve(128);
2562  for (int i = 0; i < ngroups; i++)
2563  {
2564  int id = read<GroupId>(is);
2565  int size = read<short>(is);
2566  if (size >= 0)
2567  {
2568  ranks.resize(size);
2569  for (int ii = 0; ii < size; ii++)
2570  {
2571  ranks[ii] = read<int>(is);
2572  }
2573  sgroups[id] = GetGroupId(ranks);
2574  }
2575  else
2576  {
2577  sgroups[id] = -1; // forwarded
2578  }
2579  }
2580 
2581  // read the list of IDs
2582  ids.SetSize(read<int>(is));
2583  for (int i = 0; i < ids.Size(); i++)
2584  {
2585  ids[i] = sgroups[read<GroupId>(is)];
2586  }
2587 }
2588 
2589 
2590 //// Messages //////////////////////////////////////////////////////////////////
2591 
2592 template<class ValueType, bool RefTypes, int Tag>
2594 {
2595  std::ostringstream ostream;
2596 
2597  Array<int> tmp_elements;
2598  tmp_elements.MakeRef(elements.data(), elements.size());
2599 
2600  ElementSet eset(pncmesh, RefTypes);
2601  eset.Encode(tmp_elements);
2602  eset.Dump(ostream);
2603 
2604  // decode the element set to obtain a local numbering of elements
2605  Array<int> decoded;
2606  decoded.Reserve(tmp_elements.Size());
2607  eset.Decode(decoded);
2608 
2609  std::map<int, int> element_index;
2610  for (int i = 0; i < decoded.Size(); i++)
2611  {
2612  element_index[decoded[i]] = i;
2613  }
2614 
2615  write<int>(ostream, values.size());
2616  MFEM_ASSERT(elements.size() == values.size(), "");
2617 
2618  for (unsigned i = 0; i < values.size(); i++)
2619  {
2620  write<int>(ostream, element_index[elements[i]]); // element number
2621  write<ValueType>(ostream, values[i]);
2622  }
2623 
2624  ostream.str().swap(data);
2625 }
2626 
2627 template<class ValueType, bool RefTypes, int Tag>
2629 {
2630  std::istringstream istream(data);
2631 
2632  ElementSet eset(pncmesh, RefTypes);
2633  eset.Load(istream);
2634 
2635  Array<int> tmp_elements;
2636  eset.Decode(tmp_elements);
2637 
2638  int* el = tmp_elements.GetData();
2639  elements.assign(el, el + tmp_elements.Size());
2640  values.resize(elements.size());
2641 
2642  int count = read<int>(istream);
2643  for (int i = 0; i < count; i++)
2644  {
2645  int index = read<int>(istream);
2646  MFEM_ASSERT(index >= 0 && (size_t) index < values.size(), "");
2647  values[index] = read<ValueType>(istream);
2648  }
2649 
2650  // no longer need the raw data
2651  data.clear();
2652 }
2653 
2655  NCMesh *ncmesh)
2656 {
2657  eset.SetNCMesh(ncmesh);
2658  eset.Encode(elems);
2659 
2660  Array<int> decoded;
2661  decoded.Reserve(elems.Size());
2662  eset.Decode(decoded);
2663 
2664  elem_ids.resize(decoded.Size());
2665  for (int i = 0; i < decoded.Size(); i++)
2666  {
2667  elem_ids[i] = eset.GetNCMesh()->elements[decoded[i]].index;
2668  }
2669 }
2670 
2671 static void write_dofs(std::ostream &os, const std::vector<int> &dofs)
2672 {
2673  write<int>(os, dofs.size());
2674  // TODO: we should compress the ints, mostly they are contiguous ranges
2675  os.write((const char*) dofs.data(), dofs.size() * sizeof(int));
2676 }
2677 
2678 static void read_dofs(std::istream &is, std::vector<int> &dofs)
2679 {
2680  dofs.resize(read<int>(is));
2681  is.read((char*) dofs.data(), dofs.size() * sizeof(int));
2682 }
2683 
2685 {
2686  std::ostringstream stream;
2687 
2688  eset.Dump(stream);
2689  write<long>(stream, dof_offset);
2690  write_dofs(stream, dofs);
2691 
2692  stream.str().swap(data);
2693 }
2694 
2696 {
2697  std::istringstream stream(data);
2698 
2699  eset.Load(stream);
2700  dof_offset = read<long>(stream);
2701  read_dofs(stream, dofs);
2702 
2703  data.clear();
2704 
2705  Array<int> elems;
2706  eset.Decode(elems);
2707 
2708  elem_ids.resize(elems.Size());
2709  for (int i = 0; i < elems.Size(); i++)
2710  {
2711  elem_ids[i] = eset.GetNCMesh()->elements[elems[i]].index;
2712  }
2713 }
2714 
2715 
2716 //// Utility ///////////////////////////////////////////////////////////////////
2717 
2718 void ParNCMesh::GetDebugMesh(Mesh &debug_mesh) const
2719 {
2720  // create a serial NCMesh containing all our elements (ghosts and all)
2721  NCMesh* copy = new NCMesh(*this);
2722 
2723  Array<int> &cle = copy->leaf_elements;
2724  for (int i = 0; i < cle.Size(); i++)
2725  {
2726  Element &el = copy->elements[cle[i]];
2727  el.attribute = el.rank + 1;
2728  }
2729 
2730  debug_mesh.InitFromNCMesh(*copy);
2731  debug_mesh.SetAttributes();
2732  debug_mesh.ncmesh = copy;
2733 }
2734 
2736 {
2737  NCMesh::Trim();
2738 
2740  shared_edges.Clear();
2741  shared_faces.Clear();
2742 
2743  for (int i = 0; i < 3; i++)
2744  {
2745  entity_owner[i].DeleteAll();
2747  entity_index_rank[i].DeleteAll();
2748  }
2749 
2750  send_rebalance_dofs.clear();
2751  recv_rebalance_dofs.clear();
2752 
2754 
2755  ClearAuxPM();
2756 }
2757 
2759 {
2760  return (elem_ids.capacity() + dofs.capacity()) * sizeof(int);
2761 }
2762 
2763 template<typename K, typename V>
2764 static long map_memory_usage(const std::map<K, V> &map)
2765 {
2766  long result = 0;
2767  for (typename std::map<K, V>::const_iterator
2768  it = map.begin(); it != map.end(); ++it)
2769  {
2770  result += it->second.MemoryUsage();
2771  result += sizeof(std::pair<K, V>) + 3*sizeof(void*) + sizeof(bool);
2772  }
2773  return result;
2774 }
2775 
2777 {
2778  long groups_size = groups.capacity() * sizeof(CommGroup);
2779  for (unsigned i = 0; i < groups.size(); i++)
2780  {
2781  groups_size += groups[i].capacity() * sizeof(int);
2782  }
2783  const int approx_node_size =
2784  sizeof(std::pair<CommGroup, GroupId>) + 3*sizeof(void*) + sizeof(bool);
2785  return groups_size + group_id.size() * approx_node_size;
2786 }
2787 
2788 template<typename Type, int Size>
2789 static long arrays_memory_usage(const Array<Type> (&arrays)[Size])
2790 {
2791  long total = 0;
2792  for (int i = 0; i < Size; i++)
2793  {
2794  total += arrays[i].MemoryUsage();
2795  }
2796  return total;
2797 }
2798 
2799 long ParNCMesh::MemoryUsage(bool with_base) const
2800 {
2801  long total_groups_owners = 0;
2802  for (int i = 0; i < 3; i++)
2803  {
2804  total_groups_owners += entity_owner[i].MemoryUsage() +
2806  entity_index_rank[i].MemoryUsage();
2807  }
2808 
2809  return (with_base ? NCMesh::MemoryUsage() : 0) +
2810  GroupsMemoryUsage() +
2811  arrays_memory_usage(entity_owner) +
2812  arrays_memory_usage(entity_pmat_group) +
2813  arrays_memory_usage(entity_conf_group) +
2814  arrays_memory_usage(entity_elem_local) +
2824  arrays_memory_usage(entity_index_rank) +
2826  map_memory_usage(send_rebalance_dofs) +
2827  map_memory_usage(recv_rebalance_dofs) +
2829  aux_pm_store.MemoryUsage() +
2830  sizeof(ParNCMesh) - sizeof(NCMesh);
2831 }
2832 
2833 int ParNCMesh::PrintMemoryDetail(bool with_base) const
2834 {
2835  if (with_base) { NCMesh::PrintMemoryDetail(); }
2836 
2837  mfem::out << GroupsMemoryUsage() << " groups\n"
2838  << arrays_memory_usage(entity_owner) << " entity_owner\n"
2839  << arrays_memory_usage(entity_pmat_group) << " entity_pmat_group\n"
2840  << arrays_memory_usage(entity_conf_group) << " entity_conf_group\n"
2841  << arrays_memory_usage(entity_elem_local) << " entity_elem_local\n"
2842  << shared_vertices.MemoryUsage() << " shared_vertices\n"
2843  << shared_edges.MemoryUsage() << " shared_edges\n"
2844  << shared_faces.MemoryUsage() << " shared_faces\n"
2845  << face_orient.MemoryUsage() << " face_orient\n"
2846  << element_type.MemoryUsage() << " element_type\n"
2847  << ghost_layer.MemoryUsage() << " ghost_layer\n"
2848  << boundary_layer.MemoryUsage() << " boundary_layer\n"
2849  << tmp_owner.MemoryUsage() << " tmp_owner\n"
2850  << tmp_shared_flag.MemoryUsage() << " tmp_shared_flag\n"
2851  << arrays_memory_usage(entity_index_rank) << " entity_index_rank\n"
2852  << tmp_neighbors.MemoryUsage() << " tmp_neighbors\n"
2853  << map_memory_usage(send_rebalance_dofs) << " send_rebalance_dofs\n"
2854  << map_memory_usage(recv_rebalance_dofs) << " recv_rebalance_dofs\n"
2855  << old_index_or_rank.MemoryUsage() << " old_index_or_rank\n"
2856  << aux_pm_store.MemoryUsage() << " aux_pm_store\n"
2857  << sizeof(ParNCMesh) - sizeof(NCMesh) << " ParNCMesh" << std::endl;
2858 
2859  return leaf_elements.Size();
2860 }
2861 
2862 } // namespace mfem
2863 
2864 #endif // MFEM_USE_MPI
NCList face_list
lazy-initialized list of faces, see GetFaceList
Definition: ncmesh.hpp:535
NCList edge_list
lazy-initialized list of edges, see GetEdgeList
Definition: ncmesh.hpp:536
NCList shared_edges
Definition: pncmesh.hpp:287
int Partition(long index, long total_elements) const
Return the processor number for a global element number.
Definition: pncmesh.hpp:305
ElementSet(NCMesh *ncmesh=NULL, bool include_ref_types=false)
Definition: pncmesh.hpp:358
void SendRebalanceDofs(int old_ndofs, const Table &old_element_dofs, long old_global_offset, FiniteElementSpace *space)
Use the communication pattern from last Rebalance() to send element DOFs.
Definition: pncmesh.cpp:2019
void DecodeMeshIds(std::istream &is, Array< MeshId > ids[])
Definition: pncmesh.cpp:2455
void Create(ListOfIntegerSets &groups, int mpitag)
Set up the group topology given the list of sets of shared entities.
Array< char > element_type
Definition: pncmesh.hpp:297
int Size() const
Return the logical size of the array.
Definition: array.hpp:138
std::vector< ValueType > values
Definition: pncmesh.hpp:438
long PartitionFirstIndex(int rank, long total_elements) const
Return the global index of the first element owned by processor &#39;rank&#39;.
Definition: pncmesh.hpp:313
void GetConformingSharedStructures(class ParMesh &pmesh)
Definition: pncmesh.cpp:787
CoarseFineTransformations transforms
storage for data returned by Get[De]RefinementTransforms()
Definition: ncmesh.hpp:903
void Recreate(const int n, const int *p)
Create an integer set from C-array &#39;p&#39; of &#39;n&#39; integers. Overwrites any existing set data...
Definition: sets.cpp:68
int elem[2]
up to 2 elements sharing the face
Definition: ncmesh.hpp:456
virtual void Trim()
Save memory by releasing all non-essential and cached data.
Definition: pncmesh.cpp:2735
char ref_type
refinement XYZ bit mask (7 = full isotropic)
Definition: ncmesh.hpp:39
std::vector< int > CommGroup
Definition: pncmesh.hpp:145
void CalcFaceOrientations()
Definition: pncmesh.cpp:572
int GetFaceVerticesEdges(const MeshId &face_id, int vert_index[4], int edge_index[4], int edge_orientation[4]) const
Definition: ncmesh.cpp:4814
TmpVertex * tmp_vertex
Definition: ncmesh.hpp:921
void Unique()
Removes duplicities from a sorted array. This requires operator== to be defined for T...
Definition: array.hpp:256
T * end()
STL-like end. Returns pointer after the last element of the array.
Definition: array.hpp:292
void SetDerefMatrixCodes(int parent, Array< int > &fine_coarse)
Definition: ncmesh.cpp:1904
Helper struct to convert a C++ type to an MPI type.
void MakeSharedList(const NCList &list, NCList &shared)
Definition: pncmesh.cpp:311
char flag
generic flag/marker, can be used by algorithms
Definition: ncmesh.hpp:479
Array< char > face_orient
Definition: pncmesh.hpp:289
void WriteInt(int value)
Definition: pncmesh.cpp:2097
bool HasEdge() const
Definition: ncmesh.hpp:441
This holds in one place the constants about the geometries we support.
Definition: ncmesh.hpp:976
void MakeI(int nrows)
Next 7 methods are used together with the default constructor.
Definition: table.cpp:81
virtual void GetVertices(Array< int > &v) const =0
Returns element&#39;s vertices.
MPI_Comm MyComm
Definition: pncmesh.hpp:267
Array< Slave > slaves
Definition: ncmesh.hpp:231
void ChangeRemainingMeshIds(Array< MeshId > &ids, int pos, const Array< Pair< int, int > > &find)
Definition: pncmesh.cpp:2400
virtual void BuildFaceList()
Definition: pncmesh.cpp:141
void Dump(std::ostream &os) const
Definition: pncmesh.cpp:2258
virtual void Trim()
Save memory by releasing all non-essential and cached data.
Definition: ncmesh.cpp:5972
int index
element number in the Mesh, -1 if refined
Definition: ncmesh.hpp:480
Array< Element * > face_nbr_elements
Definition: pmesh.hpp:319
const NCList & GetSharedVertices()
Definition: pncmesh.hpp:123
GroupId GetSingletonGroup(int rank)
Definition: pncmesh.cpp:407
void GetFaceNeighbors(class ParMesh &pmesh)
Definition: pncmesh.cpp:886
Lists all edges/faces in the nonconforming mesh.
Definition: ncmesh.hpp:227
std::string RefPath() const
Definition: pncmesh.cpp:2188
std::map< int, NeighborElementRankMessage > Map
Definition: pncmesh.hpp:484
Array< int > sface_lface
Definition: pmesh.hpp:76
void Copy(Array &copy) const
Create a copy of the internal array to the provided copy.
Definition: array.hpp:856
virtual void ElementSharesVertex(int elem, int local, int vnode)
Definition: pncmesh.cpp:246
unsigned matrix
index into NCList::point_matrices[geom]
Definition: ncmesh.hpp:217
T * GetData()
Returns the data.
Definition: array.hpp:112
int NVertices
Definition: ncmesh.hpp:525
void Load(std::istream &is)
Definition: pncmesh.cpp:2264
void SetNCMesh(ParNCMesh *pncmesh_)
Set pointer to ParNCMesh (needed to encode the message).
Definition: pncmesh.hpp:447
Table derefinements
possible derefinements, see GetDerefinementTable
Definition: ncmesh.hpp:602
Data type dense matrix using column-major storage.
Definition: densemat.hpp:23
NCList vertex_list
lazy-initialized list of vertices, see GetVertexList
Definition: ncmesh.hpp:537
void InitDerefTransforms()
Definition: ncmesh.cpp:1885
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition: table.cpp:187
char geom
Geometry::Type of the element (char for storage only)
Definition: ncmesh.hpp:476
void SynchronizeDerefinementData(Array< Type > &elem_data, const Table &deref_table)
Definition: pncmesh.cpp:1545
int GetInt(int pos) const
Definition: pncmesh.cpp:2106
virtual void Derefine(const Array< int > &derefs)
Definition: pncmesh.cpp:1373
void SetElements(const Array< int > &elems, NCMesh *ncmesh)
Definition: pncmesh.cpp:2654
bool operator<(const Pair< A, B > &p, const Pair< A, B > &q)
Comparison operator for class Pair, based on the first element only.
Definition: sort_pairs.hpp:36
Array< Vert3 > shared_trias
Definition: pmesh.hpp:63
void CountSplits(int elem, int splits[3]) const
Definition: ncmesh.cpp:5088
Geometry::Type Geom() const
Definition: ncmesh.hpp:492
void ChangeEdgeMeshIdElement(NCMesh::MeshId &id, int elem)
Definition: pncmesh.cpp:2374
Array< int > face_nbr_group
Definition: pmesh.hpp:316
static int GetQuadOrientation(const int *base, const int *test)
Returns the orientation of &quot;test&quot; relative to &quot;base&quot;.
Definition: mesh.cpp:5585
void GetLimitRefinements(Array< Refinement > &refinements, int max_level)
Definition: ncmesh.cpp:5171
bool GroupContains(GroupId id, int rank) const
Return true if group &#39;id&#39; contains the given rank.
Definition: pncmesh.cpp:416
int PrintMemoryDetail() const
Definition: ncmesh.cpp:6038
void EncodeTree(int elem)
Definition: pncmesh.cpp:2130
int spaceDim
dimensions of the elements and the vertex coordinates
Definition: ncmesh.hpp:418
const NCList & GetSharedEdges()
Definition: pncmesh.hpp:124
Array< int > root_state
Definition: ncmesh.hpp:508
Array< int > sedge_ledge
Definition: pmesh.hpp:74
Table group_stria
Definition: pmesh.hpp:69
void DeleteAll()
Delete the whole array.
Definition: array.hpp:846
static int find_element_edge(const Element &el, int vn0, int vn1, bool abort=true)
Definition: ncmesh.cpp:2624
int index
Mesh element number.
Definition: ncmesh.hpp:38
int master
master number (in Mesh numbering)
Definition: ncmesh.hpp:216
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor &#39;rank&#39; of message size &#39;size&#39;.
const NCList & GetFaceList()
Return the current list of conforming and nonconforming faces.
Definition: ncmesh.hpp:253
const NCList & GetSharedList(int entity)
Helper to get shared vertices/edges/faces (&#39;entity&#39; == 0/1/2 resp.).
Definition: pncmesh.hpp:128
Array< NCFaceInfo > nc_faces_info
Definition: mesh.hpp:218
A parallel extension of the NCMesh class.
Definition: pncmesh.hpp:64
static int find_local_face(int geom, int a, int b, int c)
Definition: ncmesh.cpp:2642
RebalanceDofMessage::Map send_rebalance_dofs
Definition: pncmesh.hpp:531
int GetNumFaces() const
Return the number of faces (3D), edges (2D) or vertices (1D).
Definition: mesh.cpp:5345
void FlagElements(const Array< int > &elements, char flag)
Definition: pncmesh.cpp:2115
virtual void Update()
Definition: pncmesh.cpp:90
void Decode(Array< int > &elements) const
Definition: pncmesh.cpp:2248
Face * GetFace(Element &elem, int face_no)
Definition: ncmesh.cpp:428
void AddElementRank(int elem, int rank)
Definition: pncmesh.hpp:494
GroupMap group_id
Definition: pncmesh.hpp:274
Array< int > face_nbr_elements_offset
Definition: pmesh.hpp:317
int index
face number in the Mesh
Definition: ncmesh.hpp:455
void MakeFromList(int nrows, const Array< Connection > &list)
Definition: table.cpp:276
Array< Element * > shared_edges
Definition: pmesh.hpp:59
Array< DenseMatrix * > aux_pm_store
Stores modified point matrices created by GetFaceNeighbors.
Definition: pncmesh.hpp:540
virtual void SetAttributes()
Definition: mesh.cpp:1547
int Append(const T &el)
Append element &#39;el&#39; to array, resize if necessary.
Definition: array.hpp:751
virtual void GetFineToCoarsePartitioning(const Array< int > &derefs, Array< int > &new_ranks) const
Definition: pncmesh.cpp:1342
void RedistributeElements(Array< int > &new_ranks, int target_elements, bool record_comm)
Definition: pncmesh.cpp:1744
void GetSubArray(int offset, int sa_size, Array< T > &sa) const
Copy sub array starting from offset out to the provided sa.
Definition: array.hpp:881
double b
Definition: lissajous.cpp:42
void FindNeighbors(int elem, Array< int > &neighbors, const Array< int > *search_set=NULL)
Definition: ncmesh.cpp:3587
void ElementNeighborProcessors(int elem, Array< int > &ranks)
Definition: pncmesh.cpp:678
void AddConnection(int r, int c)
Definition: table.hpp:80
A pair of objects.
Definition: sort_pairs.hpp:23
Array< int > old_index_or_rank
Definition: pncmesh.hpp:537
void CreateGroups(int nentities, Array< Connection > &index_rank, Array< GroupId > &entity_group)
Definition: pncmesh.cpp:427
RebalanceDofMessage::Map recv_rebalance_dofs
Definition: pncmesh.hpp:532
int NGhostVertices
Definition: ncmesh.hpp:529
int InitialPartition(int index) const
Helper to get the partitioning when the serial mesh gets split initially.
Definition: pncmesh.hpp:309
This structure stores the low level information necessary to interpret the configuration of elements ...
Definition: mesh.hpp:153
Array< DenseMatrix * > point_matrices[Geometry::NumGeom]
List of unique point matrices for each slave geometry.
Definition: ncmesh.hpp:234
int Insert(IntegerSet &s)
Check to see if set &#39;s&#39; is in the list. If not append it to the end of the list. Returns the index of...
Definition: sets.cpp:91
void Reserve(int capacity)
Ensures that the allocated size is at least the given size.
Definition: array.hpp:156
Array< Embedding > embeddings
Fine element positions in their parents.
Definition: ncmesh.hpp:73
NCList shared_vertices
Definition: pncmesh.hpp:287
virtual void BuildEdgeList()
Definition: ncmesh.cpp:3086
Identifies a vertex/edge/face in both Mesh and NCMesh.
Definition: ncmesh.hpp:187
T * begin()
STL-like begin. Returns pointer to the first element of the array.
Definition: array.hpp:289
int parent
parent element, -1 if this is a root element, -2 if free&#39;d
Definition: ncmesh.hpp:488
std::map< int, NeighborRefinementMessage > Map
Definition: pncmesh.hpp:465
long MemoryUsage() const
Returns the number of bytes allocated for the array including any reserve.
Definition: array.hpp:301
GroupList groups
Definition: pncmesh.hpp:273
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
signed char local
local number within &#39;element&#39;
Definition: ncmesh.hpp:191
void Sort()
Sorts the array in ascending order. This requires operator&lt; to be defined for T.
Definition: array.hpp:248
int MyRank
used in parallel, or when loading a parallel file in serial
Definition: ncmesh.hpp:419
int FindSorted(const T &el) const
Do bisection search for &#39;el&#39; in a sorted array; return -1 if not found.
Definition: array.hpp:820
int GetVDim() const
Returns vector dimension.
Definition: fespace.hpp:557
int Size() const
Returns the number of TYPE I elements.
Definition: table.hpp:92
virtual void ElementSharesFace(int elem, int local, int face)
Definition: pncmesh.cpp:118
A class for non-conforming AMR. The class is not used directly by the user, rather it is an extension...
Definition: ncmesh.hpp:121
void ClearAuxPM()
Definition: pncmesh.cpp:1163
NCList shared_faces
Definition: pncmesh.hpp:287
virtual void GetBoundaryClosure(const Array< int > &bdr_attr_is_ess, Array< int > &bdr_vertices, Array< int > &bdr_edges)
Definition: ncmesh.cpp:4964
int element
NCMesh::Element containing this vertex/edge/face.
Definition: ncmesh.hpp:190
virtual void GetBoundaryClosure(const Array< int > &bdr_attr_is_ess, Array< int > &bdr_vertices, Array< int > &bdr_edges)
Definition: pncmesh.cpp:598
Array< Vert4 > shared_quads
Definition: pmesh.hpp:64
void Rebalance(const Array< int > *custom_partition=NULL)
Definition: pncmesh.cpp:1683
static GeomInfo GI[Geometry::NumGeom]
Definition: ncmesh.hpp:988
static bool TestAllSent(MapT &rank_msg)
Return true if all messages in the map container were sent, otherwise return false, without waiting.
int slaves_end
slave faces
Definition: ncmesh.hpp:205
void DecodeGroups(std::istream &is, Array< GroupId > &ids)
Definition: pncmesh.cpp:2554
void AdjustMeshIds(Array< MeshId > ids[], int rank)
Definition: pncmesh.cpp:2273
virtual void ElementSharesEdge(int elem, int local, int enode)
Definition: pncmesh.cpp:183
bool Iso
true if the mesh only contains isotropic refinements
Definition: ncmesh.hpp:420
Array< int > tmp_neighbors
Definition: pncmesh.hpp:408
std::map< int, NeighborDerefinementMessage > Map
Definition: pncmesh.hpp:474
Array< Connection > entity_index_rank[3]
Definition: pncmesh.hpp:329
virtual void BuildEdgeList()
Definition: pncmesh.cpp:208
Array< GroupId > entity_owner[3]
Definition: pncmesh.hpp:277
void Encode(const Array< int > &elements)
Definition: pncmesh.cpp:2167
Array< char > tmp_shared_flag
Definition: pncmesh.hpp:328
Array< Vertex > face_nbr_vertices
Definition: pmesh.hpp:320
virtual void BuildFaceList()
Definition: ncmesh.cpp:2966
Nonconforming edge/face within a bigger edge/face.
Definition: ncmesh.hpp:214
void DerefineElement(int elem)
Derefine the element elem, does nothing on leaf elements.
Definition: ncmesh.cpp:1606
Class FiniteElementSpace - responsible for providing FEM view of the mesh, mainly managing the set of...
Definition: fespace.hpp:88
void GetDebugMesh(Mesh &debug_mesh) const
Definition: pncmesh.cpp:2718
bool PruneTree(int elem)
Internal. Recursive part of Prune().
Definition: pncmesh.cpp:1175
int Find(const T &el) const
Return the first index where &#39;el&#39; is found; return -1 if not found.
Definition: array.hpp:810
void AddAColumnInRow(int r)
Definition: table.hpp:77
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition: array.hpp:679
string space
mfem::Element * NewMeshElement(int geom) const
Definition: ncmesh.cpp:2262
Array< int > leaf_sfc_index
natural tree ordering of leaf elements
Definition: ncmesh.hpp:532
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
Helper struct for defining a connectivity table, see Table::MakeFromList.
Definition: table.hpp:27
void ShiftUpI()
Definition: table.cpp:115
void UpdateLayers()
Definition: pncmesh.cpp:623
void Issend(int rank, MPI_Comm comm)
Non-blocking synchronous send to processor &#39;rank&#39;. Returns immediately. Completion (MPI_Wait/Test) me...
int NGhostElements
Definition: ncmesh.hpp:529
long MemoryUsage() const
Definition: ncmesh.cpp:5987
HashTable< Node > nodes
Definition: ncmesh.hpp:499
double a
Definition: lissajous.cpp:41
void RefineElement(int elem, char ref_type)
Definition: ncmesh.cpp:887
virtual void Refine(const Array< Refinement > &refinements)
Definition: pncmesh.cpp:1246
virtual ~ParNCMesh()
Definition: pncmesh.cpp:85
Array< int > boundary_faces
subset of all faces, set by BuildFaceList
Definition: ncmesh.hpp:539
int child[8]
2-8 children (if ref_type != 0)
Definition: ncmesh.hpp:486
int NGhostFaces
Definition: ncmesh.hpp:529
Table group_sedge
Definition: pmesh.hpp:68
Array< double > coordinates
Definition: ncmesh.hpp:512
void RecvRebalanceDofs(Array< int > &elements, Array< long > &dofs)
Receive element DOFs sent by SendRebalanceDofs().
Definition: pncmesh.cpp:2052
Array< int > leaf_elements
finest elements, in Mesh ordering (+ ghosts)
Definition: ncmesh.hpp:531
int GetNGhostElements() const
Definition: pncmesh.hpp:119
Array< Master > masters
Definition: ncmesh.hpp:230
A set of integers.
Definition: sets.hpp:23
void MakeJ()
Definition: table.cpp:91
Table group_svert
Shared objects in each group.
Definition: pmesh.hpp:67
Array< int > boundary_layer
list of type 3 elements
Definition: pncmesh.hpp:300
Array< MeshId > conforming
Definition: ncmesh.hpp:229
signed char geom
Geometry::Type (faces only) (char to save RAM)
Definition: ncmesh.hpp:192
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor &#39;rank&#39;. Returns immediately. Completion (as tested by MPI_Wait/Test) d...
int index(int i, int j, int nx, int ny)
Definition: life.cpp:237
void AddElementRank(int elem, int rank)
Definition: pncmesh.hpp:483
void InitOwners(int num, Array< GroupId > &entity_owner)
Definition: pncmesh.cpp:301
void ChangeVertexMeshIdElement(NCMesh::MeshId &id, int elem)
Definition: pncmesh.cpp:2356
int NGhostEdges
Definition: ncmesh.hpp:529
void DecodeTree(int elem, int &pos, Array< int > &elements) const
Definition: pncmesh.cpp:2206
NCMesh(const Mesh *mesh)
Definition: ncmesh.cpp:104
Array< FaceInfo > faces_info
Definition: mesh.hpp:217
void SetAttribute(const int attr)
Set element&#39;s attribute.
Definition: element.hpp:58
int NGroups() const
Return the number of groups.
void EncodeGroups(std::ostream &os, const Array< GroupId > &ids)
Definition: pncmesh.cpp:2512
virtual void BuildVertexList()
Definition: pncmesh.cpp:269
void CalculatePMatrixGroups()
Definition: pncmesh.cpp:469
NCMesh * ncmesh
Optional nonconforming mesh extension.
Definition: mesh.hpp:273
Array< int > entity_elem_local[3]
Definition: pncmesh.hpp:284
T & Last()
Return the last element in the array.
Definition: array.hpp:784
void NeighborExpand(const Array< int > &elems, Array< int > &expanded, const Array< int > *search_set=NULL)
Definition: ncmesh.cpp:3670
BlockArray< Element > elements
Definition: ncmesh.hpp:502
virtual void CheckDerefinementNCLevel(const Table &deref_table, Array< int > &level_ok, int max_nc_level)
Definition: pncmesh.cpp:1625
void FindSetNeighbors(const Array< char > &elem_set, Array< int > *neighbors, Array< char > *neighbor_set=NULL)
Definition: ncmesh.cpp:3484
Array< int > ghost_layer
list of elements whose &#39;element_type&#39; == 2.
Definition: pncmesh.hpp:299
void GetEdgeVertices(const MeshId &edge_id, int vert_index[2], bool oriented=true) const
Return Mesh vertex indices of an edge identified by &#39;edge_id&#39;.
Definition: ncmesh.cpp:4783
void MakeRef(T *, int)
Make this Array a reference to a pointer.
Definition: array.hpp:864
virtual void Update()
Definition: ncmesh.cpp:231
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
void EncodeMeshIds(std::ostream &os, Array< MeshId > ids[])
Definition: pncmesh.cpp:2412
bool HaveTets() const
Return true if the mesh contains tetrahedral elements.
Definition: ncmesh.hpp:590
void AddConnections(int entity, int index, const Array< int > &ranks)
Definition: pncmesh.cpp:461
long MemoryUsage() const
Return total number of bytes allocated.
Definition: ncmesh.cpp:6015
void MakeSharedTable(int ngroups, int ent, Array< int > &shared_local, Table &group_shared, Array< char > *entity_geom=NULL, char geom=0)
Definition: pncmesh.cpp:727
static int GetTriOrientation(const int *base, const int *test)
Returns the orientation of &quot;test&quot; relative to &quot;base&quot;.
Definition: mesh.cpp:5537
Array< int > svert_lvert
Shared to local index mapping.
Definition: pmesh.hpp:73
const NCList & GetSharedFaces()
Definition: pncmesh.hpp:125
std::map< int, RebalanceMessage > Map
Definition: pncmesh.hpp:495
static int get_face_orientation(Face &face, Element &e1, Element &e2, int local[2]=NULL)
Definition: pncmesh.cpp:543
virtual void BuildVertexList()
Definition: ncmesh.cpp:3189
const NCList & GetEdgeList()
Return the current list of conforming and nonconforming edges.
Definition: ncmesh.hpp:260
Array< GroupId > entity_pmat_group[3]
Definition: pncmesh.hpp:279
void NeighborProcessors(Array< int > &neighbors)
Definition: pncmesh.cpp:710
long GroupsMemoryUsage() const
Definition: pncmesh.cpp:2776
int RowSize(int i) const
Definition: table.hpp:108
static bool IProbe(int &rank, int &size, MPI_Comm comm)
Non-blocking probe for incoming message of this type from any rank. If there is an incoming message...
Table send_face_nbr_elements
Definition: pmesh.hpp:322
List of integer sets.
Definition: sets.hpp:62
char ref_type
bit mask of X,Y,Z refinements (bits 0,1,2 respectively)
Definition: ncmesh.hpp:477
int NElements
Definition: ncmesh.hpp:525
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition: globals.hpp:66
void InitFromNCMesh(const NCMesh &ncmesh)
Initialize vertices/elements/boundary/tables from a nonconforming mesh.
Definition: mesh.cpp:9162
GroupTopology gtopo
Definition: pmesh.hpp:312
static int find_node(const Element &el, int node)
Definition: ncmesh.cpp:2604
bool CheckElementType(int elem, int type)
Definition: pncmesh.cpp:661
int index
Mesh number.
Definition: ncmesh.hpp:189
Class for parallel meshes.
Definition: pmesh.hpp:32
Array< int > tmp_owner
Definition: pncmesh.hpp:327
Abstract data type element.
Definition: element.hpp:28
GroupId GetGroupId(const CommGroup &group)
Definition: pncmesh.cpp:391
Data type line segment element.
Definition: segment.hpp:22
Table group_squad
Definition: pmesh.hpp:70
const double * CalcVertexPos(int node) const
Definition: ncmesh.cpp:2276
int rank
processor number (ParNCMesh), -1 if undefined/unknown
Definition: ncmesh.hpp:481
virtual void LimitNCLevel(int max_nc_level)
Parallel version of NCMesh::LimitNCLevel.
Definition: pncmesh.cpp:1324
int node[8]
element corners (if ref_type == 0)
Definition: ncmesh.hpp:485
Array< unsigned char > data
encoded refinement (sub-)trees
Definition: pncmesh.hpp:372
static void Probe(int &rank, int &size, MPI_Comm comm)
Blocking probe for incoming message of this type from any rank. Returns the rank and message size...
HashTable< Face > faces
Definition: ncmesh.hpp:500
virtual void Refine(const Array< Refinement > &refinements)
Definition: ncmesh.cpp:1524
ParNCMesh(MPI_Comm comm, const NCMesh &ncmesh, int *part=NULL)
Construct by partitioning a serial NCMesh.
Definition: pncmesh.cpp:28
Array< GroupId > entity_conf_group[3]
Definition: pncmesh.hpp:282
Array< char > face_geom
face geometry by face index, set by OnMeshUpdated
Definition: ncmesh.hpp:540
void DofsToVDofs(Array< int > &dofs, int ndofs=-1) const
Definition: fespace.cpp:215