MFEM v4.9.0
Finite element discretization library
Loading...
Searching...
No Matches
communication.hpp
Go to the documentation of this file.
1// Copyright (c) 2010-2025, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11
12#ifndef MFEM_COMMUNICATION
13#define MFEM_COMMUNICATION
14
15#include "../config/config.hpp"
16
17#ifdef MFEM_USE_MPI
18
19#include "array.hpp"
20#include "table.hpp"
21#include "sets.hpp"
22#include "globals.hpp"
23#include <mpi.h>
24#include <cstdint>
25
26// can't directly use MPI_CXX_BOOL because Microsoft's MPI implementation
27// doesn't include MPI_CXX_BOOL. Fallback to MPI_C_BOOL if unavailable.
28#ifdef MPI_CXX_BOOL
29#define MFEM_MPI_CXX_BOOL MPI_CXX_BOOL
30#else
31#define MFEM_MPI_CXX_BOOL MPI_C_BOOL
32#endif
33
34namespace mfem
35{
36
37/** @brief A simple singleton class that calls MPI_Init() at construction and
38 MPI_Finalize() at destruction. It also provides easy access to
39 MPI_COMM_WORLD's rank and size. */
40class Mpi
41{
42public:
43 /// Singleton creation with Mpi::Init(argc, argv).
44 static void Init(int &argc, char **&argv,
45 int required = default_thread_required,
46 int *provided = nullptr)
47 { Init(&argc, &argv, required, provided); }
48 /// Singleton creation with Mpi::Init().
49 static void Init(int *argc = nullptr, char ***argv = nullptr,
50 int required = default_thread_required,
51 int *provided = nullptr)
52 {
53 MFEM_VERIFY(!IsInitialized(), "MPI already initialized!");
54 if (required == MPI_THREAD_SINGLE)
55 {
56 int mpi_err = MPI_Init(argc, argv);
57 MFEM_VERIFY(!mpi_err, "error in MPI_Init()!");
58 if (provided) { *provided = MPI_THREAD_SINGLE; }
59 }
60 else
61 {
62 int mpi_provided;
63 int mpi_err = MPI_Init_thread(argc, argv, required, &mpi_provided);
64 MFEM_VERIFY(!mpi_err, "error in MPI_Init()!");
65 if (provided) { *provided = mpi_provided; }
66 }
67 // The Mpi singleton object below needs to be created after MPI_Init() for
68 // some MPI implementations.
69 Singleton();
70 }
71 /// Finalize MPI (if it has been initialized and not yet already finalized).
72 static void Finalize()
73 {
74 if (IsInitialized() && !IsFinalized()) { MPI_Finalize(); }
75 }
76 /// Return true if MPI has been initialized.
77 static bool IsInitialized()
78 {
79 int mpi_is_initialized;
80 int mpi_err = MPI_Initialized(&mpi_is_initialized);
81 return (mpi_err == MPI_SUCCESS) && mpi_is_initialized;
82 }
83 /// Return true if MPI has been finalized.
84 static bool IsFinalized()
85 {
86 int mpi_is_finalized;
87 int mpi_err = MPI_Finalized(&mpi_is_finalized);
88 return (mpi_err == MPI_SUCCESS) && mpi_is_finalized;
89 }
90 /// Return the MPI rank in MPI_COMM_WORLD.
91 static int WorldRank()
92 {
93 int world_rank;
94 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
95 return world_rank;
96 }
97 /// Return the size of MPI_COMM_WORLD.
98 static int WorldSize()
99 {
100 int world_size;
101 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
102 return world_size;
103 }
104 /// Return true if the rank in MPI_COMM_WORLD is zero.
105 static bool Root() { return WorldRank() == 0; }
106 /// Default level of thread support for MPI_Init_thread.
107 static MFEM_EXPORT int default_thread_required;
108private:
109 /// Initialize the Mpi singleton.
110 static Mpi &Singleton()
111 {
112 static Mpi mpi;
113 return mpi;
114 }
115 /// Finalize MPI.
116 ~Mpi() { Finalize(); }
117 /// Prevent direct construction of objects of this class.
118 Mpi() {}
119};
120
121/** @brief A simple convenience class based on the Mpi singleton class above.
122 Preserved for backward compatibility. New code should use Mpi::Init() and
123 other Mpi methods instead. */
125{
126public:
128 MPI_Session(int &argc, char **&argv) { Mpi::Init(argc, argv); }
129 /// Return MPI_COMM_WORLD's rank.
130 int WorldRank() const { return Mpi::WorldRank(); }
131 /// Return MPI_COMM_WORLD's size.
132 int WorldSize() const { return Mpi::WorldSize(); }
133 /// Return true if WorldRank() == 0.
134 bool Root() const { return Mpi::Root(); }
135};
136
137
138/** The shared entities (e.g. vertices, faces and edges) are split into groups,
139 each group determined by the set of participating processors. They are
140 numbered locally in lproc. Assumptions:
141 - group 0 is the 'local' group
142 - groupmaster_lproc[0] = 0
143 - lproc_proc[0] = MyRank */
145{
146private:
147 MPI_Comm MyComm;
148
149 /// Neighbor ids (lproc) in each group.
150 Table group_lproc;
151 /// Master neighbor id for each group.
152 Array<int> groupmaster_lproc;
153 /// MPI rank of each neighbor.
154 Array<int> lproc_proc;
155 /// Group --> Group number in the master.
156 Array<int> group_mgroup;
157
158 void ProcToLProc();
159
160public:
161 /// Constructor with the MPI communicator = 0.
162 GroupTopology() : MyComm(0) {}
163
164 /// Constructor given the MPI communicator 'comm'.
165 GroupTopology(MPI_Comm comm) { MyComm = comm; }
166
167 /// Copy constructor
168 GroupTopology(const GroupTopology &gt);
169
170 /// Set the MPI communicator to 'comm'.
171 void SetComm(MPI_Comm comm) { MyComm = comm; }
172
173 /// Return the MPI communicator.
174 MPI_Comm GetComm() const { return MyComm; }
175
176 /// Return the MPI rank within this object's communicator.
177 int MyRank() const { int r; MPI_Comm_rank(MyComm, &r); return r; }
178
179 /// Return the number of MPI ranks within this object's communicator.
180 int NRanks() const { int s; MPI_Comm_size(MyComm, &s); return s; }
181
182 /// Set up the group topology given the list of sets of shared entities.
183 void Create(ListOfIntegerSets &groups, int mpitag);
184
185 /// Return the number of groups.
186 int NGroups() const { return group_lproc.Size(); }
187
188 /// Return the number of neighbors including the local processor.
189 int GetNumNeighbors() const { return lproc_proc.Size(); }
190
191 /// Return the MPI rank of neighbor 'i'.
192 int GetNeighborRank(int i) const { return lproc_proc[i]; }
193
194 /// Return true if I am master for group 'g'.
195 bool IAmMaster(int g) const { return (groupmaster_lproc[g] == 0); }
196
197 /** @brief Return the neighbor index of the group master for a given group.
198 Neighbor 0 is the local processor. */
199 int GetGroupMaster(int g) const { return groupmaster_lproc[g]; }
200
201 /// Return the rank of the group master for group 'g'.
202 int GetGroupMasterRank(int g) const
203 { return lproc_proc[groupmaster_lproc[g]]; }
204
205 /// Return the group number in the master for group 'g'.
206 int GetGroupMasterGroup(int g) const { return group_mgroup[g]; }
207
208 /// Get the number of processors in a group
209 int GetGroupSize(int g) const { return group_lproc.RowSize(g); }
210
211 /** @brief Return a pointer to a list of neighbors for a given group.
212 Neighbor 0 is the local processor */
213 const int *GetGroup(int g) const { return group_lproc.GetRow(g); }
214
215 /// Save the data in a stream.
216 void Save(std::ostream &out) const;
217
218 /// Load the data from a stream.
219 void Load(std::istream &in);
220
221 /// Copy the internal data to the external 'copy'.
222 void Copy(GroupTopology & copy) const;
223
224 /// Swap the internal data with another @a GroupTopology object.
225 void Swap(GroupTopology &other);
226
227 virtual ~GroupTopology() {}
228};
229
230/** @brief Communicator performing operations within groups defined by a
231 GroupTopology with arbitrary-size data associated with each group. */
233{
234public:
235 /// Communication mode.
236 enum Mode
237 {
238 byGroup, ///< Communications are performed one group at a time.
239 byNeighbor /**< Communications are performed one neighbor at a time,
240 aggregating over groups. */
241 };
242
243protected:
247 Table group_ltdof; // only for groups for which this processor is master.
250 MPI_Request *requests;
251 // MPI_Status *statuses;
252 // comm_lock: 0 - no lock, 1 - locked for Bcast, 2 - locked for Reduce
253 mutable int comm_lock;
254 mutable int num_requests;
256 int *buf_offsets; // size = max(number of groups, number of neighbors)
258
259public:
260 /// Construct a GroupCommunicator object.
261 /** The object must be initialized before it can be used to perform any
262 operations. To initialize the object, either
263 - call Create() or
264 - initialize the Table reference returned by GroupLDofTable() and then
265 call Finalize().
266 */
268
269 /** @brief Initialize the communicator from a local-dof to group map.
270 Finalize() is called internally. */
271 void Create(const Array<int> &ldof_group);
272
273 /** @brief Fill-in the returned Table reference to initialize the
274 GroupCommunicator then call Finalize(). */
276
277 /// Read-only access to group-ldof Table.
278 const Table &GroupLDofTable() const { return group_ldof; }
279
280 /// Allocate internal buffers after the GroupLDofTable is defined
281 void Finalize();
282
283 /// Initialize the internal group_ltdof Table.
284 /** This method must be called before performing operations that use local
285 data layout 2, see CopyGroupToBuffer() for layout descriptions. */
286 void SetLTDofTable(const Array<int> &ldof_ltdof);
287
288 /// Get a reference to the associated GroupTopology object
290
291 /// Get a const reference to the associated GroupTopology object
292 const GroupTopology &GetGroupTopology() const { return gtopo; }
293
294 /// Dofs to be sent to communication neighbors
295 void GetNeighborLTDofTable(Table &nbr_ltdof) const;
296
297 /// Dofs to be received from communication neighbors
298 void GetNeighborLDofTable(Table &nbr_ldof) const;
299
300 /** @brief Data structure on which we define reduce operations.
301 The data is associated with (and the operation is performed on) one
302 group at a time. */
303 template <class T> struct OpData
304 {
305 int nldofs, nb;
306 const int *ldofs;
307 T *ldata, *buf;
308 };
309
310 /** @brief Copy the entries corresponding to the group @a group from the
311 local array @a ldata to the buffer @a buf. */
312 /** The @a layout of the local array can be:
313 - 0 - @a ldata is an array on all ldofs: copied indices:
314 `{ J[j] : I[group] <= j < I[group+1] }` where `I,J=group_ldof.{I,J}`
315 - 1 - @a ldata is an array on the shared ldofs: copied indices:
316 `{ j : I[group] <= j < I[group+1] }` where `I,J=group_ldof.{I,J}`
317 - 2 - @a ldata is an array on the true ldofs, ltdofs: copied indices:
318 `{ J[j] : I[group] <= j < I[group+1] }` where `I,J=group_ltdof.{I,J}`.
319 @returns The pointer @a buf plus the number of elements in the group. */
320 template <class T>
321 T *CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const;
322
323 /** @brief Copy the entries corresponding to the group @a group from the
324 buffer @a buf to the local array @a ldata. */
325 /** For a description of @a layout, see CopyGroupToBuffer().
326 @returns The pointer @a buf plus the number of elements in the group. */
327 template <class T>
328 const T *CopyGroupFromBuffer(const T *buf, T *ldata, int group,
329 int layout) const;
330
331 /** @brief Perform the reduction operation @a Op to the entries of group
332 @a group using the values from the buffer @a buf and the values from the
333 local array @a ldata, saving the result in the latter. */
334 /** For a description of @a layout, see CopyGroupToBuffer().
335 @returns The pointer @a buf plus the number of elements in the group. */
336 template <class T>
337 const T *ReduceGroupFromBuffer(const T *buf, T *ldata, int group,
338 int layout, void (*Op)(OpData<T>)) const;
339
340 /// Begin a broadcast within each group where the master is the root.
341 /** For a description of @a layout, see CopyGroupToBuffer(). */
342 template <class T> void BcastBegin(T *ldata, int layout) const;
343
344 /** @brief Finalize a broadcast started with BcastBegin().
345
346 The output data @a layout can be:
347 - 0 - @a ldata is an array on all ldofs; the input layout should be
348 either 0 or 2
349 - 1 - @a ldata is the same array as given to BcastBegin(); the input
350 layout should be 1.
351
352 For more details about @a layout, see CopyGroupToBuffer(). */
353 template <class T> void BcastEnd(T *ldata, int layout) const;
354
355 /** @brief Broadcast within each group where the master is the root.
356
357 The data @a layout can be either 0 or 1.
358
359 For a description of @a layout, see CopyGroupToBuffer(). */
360 template <class T> void Bcast(T *ldata, int layout) const
361 {
362 BcastBegin(ldata, layout);
363 BcastEnd(ldata, layout);
364 }
365
366 /// Broadcast within each group where the master is the root.
367 template <class T> void Bcast(T *ldata) const { Bcast<T>(ldata, 0); }
368 /// Broadcast within each group where the master is the root.
369 template <class T> void Bcast(Array<T> &ldata) const
370 { Bcast<T>((T *)ldata); }
371
372 /** @brief Begin reduction operation within each group where the master is
373 the root. */
374 /** The input data layout is an array on all ldofs, i.e. layout 0, see
375 CopyGroupToBuffer().
376
377 The reduce operation will be specified when calling ReduceEnd(). This
378 method is instantiated for int and double. */
379 template <class T> void ReduceBegin(const T *ldata) const;
380
381 /** @brief Finalize reduction operation started with ReduceBegin().
382
383 The output data @a layout can be either 0 or 2, see CopyGroupToBuffer().
384
385 The reduce operation is given by the third argument (see below for list
386 of the supported operations.) This method is instantiated for int and
387 double.
388
389 @note If the output data layout is 2, then the data from the @a ldata
390 array passed to this call is used in the reduction operation, instead of
391 the data from the @a ldata array passed to ReduceBegin(). Therefore, the
392 data for master-groups has to be identical in both arrays.
393 */
394 template <class T> void ReduceEnd(T *ldata, int layout,
395 void (*Op)(OpData<T>)) const;
396
397 /** @brief Reduce within each group where the master is the root.
398
399 The reduce operation is given by the second argument (see below for list
400 of the supported operations.) */
401 template <class T> void Reduce(T *ldata, void (*Op)(OpData<T>)) const
402 {
403 ReduceBegin(ldata);
404 ReduceEnd(ldata, 0, Op);
405 }
406
407 /// Reduce within each group where the master is the root.
408 template <class T> void Reduce(Array<T> &ldata, void (*Op)(OpData<T>)) const
409 { Reduce<T>((T *)ldata, Op); }
410
411 /// Reduce operation Sum, instantiated for int and double
412 template <class T> static void Sum(OpData<T>);
413 /// Reduce operation Min, instantiated for int and double
414 template <class T> static void Min(OpData<T>);
415 /// Reduce operation Max, instantiated for int and double
416 template <class T> static void Max(OpData<T>);
417 /// Reduce operation bitwise OR, instantiated for int only
418 template <class T> static void BitOR(OpData<T>);
419
420 /// Print information about the GroupCommunicator from all MPI ranks.
421 void PrintInfo(std::ostream &out = mfem::out) const;
422
423 /** @brief Destroy a GroupCommunicator object, deallocating internal data
424 structures and buffers. */
426};
427
428/// General MPI message tags used by MFEM
430{
432 291, /// ParFiniteElementSpace ParallelDerefinementMatrix and
433 /// ParDerefineMatrixOp
434};
435
437{
438 NEIGHBOR_ELEMENT_RANK_VM, ///< NeighborElementRankMessage
439 NEIGHBOR_ORDER_VM, ///< NeighborOrderMessage
440 NEIGHBOR_DEREFINEMENT_VM, ///< NeighborDerefinementMessage
441 NEIGHBOR_REFINEMENT_VM, ///< NeighborRefinementMessage
442 NEIGHBOR_PREFINEMENT_VM, ///< NeighborPRefinementMessage
443 NEIGHBOR_ROW_VM, ///< NeighborRowMessage
444 REBALANCE_VM, ///< RebalanceMessage
445 REBALANCE_DOF_VM, ///< RebalanceDofMessage
446};
447
448/// \brief Variable-length MPI message containing unspecific binary data.
449template<int Tag>
451{
452 std::string data;
453 MPI_Request send_request;
454
455 /** @brief Non-blocking send to processor 'rank'.
456 Returns immediately. Completion (as tested by MPI_Wait/Test) does not
457 mean the message was received -- it may be on its way or just buffered
458 locally. */
459 void Isend(int rank, MPI_Comm comm)
460 {
461 Encode(rank);
462 MPI_Isend((void*) data.data(), static_cast<int>(data.length()), MPI_BYTE, rank,
463 Tag, comm, &send_request);
464 }
465
466 /** @brief Non-blocking synchronous send to processor 'rank'.
467 Returns immediately. Completion (MPI_Wait/Test) means that the message
468 was received. */
469 void Issend(int rank, MPI_Comm comm)
470 {
471 Encode(rank);
472 MPI_Issend((void*) data.data(), static_cast<int>(data.length()), MPI_BYTE, rank,
473 Tag, comm, &send_request);
474 }
475
476 /// Helper to send all messages in a rank-to-message map container.
477 template<typename MapT>
478 static void IsendAll(MapT& rank_msg, MPI_Comm comm)
479 {
480 for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
481 {
482 it->second.Isend(it->first, comm);
483 }
484 }
485
486 /// Helper to wait for all messages in a map container to be sent.
487 template<typename MapT>
488 static void WaitAllSent(MapT& rank_msg)
489 {
490 for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
491 {
492 MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
493 it->second.Clear();
494 }
495 }
496
497 /** @brief Return true if all messages in the map container were sent,
498 otherwise return false, without waiting. */
499 template<typename MapT>
500 static bool TestAllSent(MapT& rank_msg)
501 {
502 for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
503 {
504 VarMessage &msg = it->second;
505 if (msg.send_request != MPI_REQUEST_NULL)
506 {
507 int sent;
508 MPI_Test(&msg.send_request, &sent, MPI_STATUS_IGNORE);
509 if (!sent) { return false; }
510 msg.Clear();
511 }
512 }
513 return true;
514 }
515
516 /** @brief Blocking probe for incoming message of this type from any rank.
517 Returns the rank and message size. */
518 static void Probe(int &rank, int &size, MPI_Comm comm)
519 {
520 MPI_Status status;
521 MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
522 rank = status.MPI_SOURCE;
523 MPI_Get_count(&status, MPI_BYTE, &size);
524 }
525
526 /** @brief Non-blocking probe for incoming message of this type from any
527 rank. If there is an incoming message, returns true and sets 'rank' and
528 'size'. Otherwise returns false. */
529 static bool IProbe(int &rank, int &size, MPI_Comm comm)
530 {
531 int flag;
532 MPI_Status status;
533 MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
534 if (!flag) { return false; }
535
536 rank = status.MPI_SOURCE;
537 MPI_Get_count(&status, MPI_BYTE, &size);
538 return true;
539 }
540
541 /// Post-probe receive from processor 'rank' of message size 'size'.
542 void Recv(int rank, int size, MPI_Comm comm)
543 {
544 MFEM_ASSERT(size >= 0, "");
545 data.resize(size);
546 MPI_Status status;
547 MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
548#ifdef MFEM_DEBUG
549 int count;
550 MPI_Get_count(&status, MPI_BYTE, &count);
551 MFEM_VERIFY(count == size, "");
552#endif
553 Decode(rank);
554 }
555
556 /// Like Recv(), but throw away the message.
557 void RecvDrop(int rank, int size, MPI_Comm comm)
558 {
559 data.resize(size);
560 MPI_Status status;
561 MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
562 data.resize(0); // don't decode
563 }
564
565 /// Helper to receive all messages in a rank-to-message map container.
566 template<typename MapT>
567 static void RecvAll(MapT& rank_msg, MPI_Comm comm)
568 {
569 int recv_left = static_cast<int>(rank_msg.size());
570 while (recv_left > 0)
571 {
572 int rank, size;
573 Probe(rank, size, comm);
574 MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(), "Unexpected message"
575 " (tag " << Tag << ") from rank " << rank);
576 // NOTE: no guard against receiving two messages from the same rank
577 rank_msg[rank].Recv(rank, size, comm);
578 --recv_left;
579 }
580 }
581
582 VarMessage() : send_request(MPI_REQUEST_NULL) {}
583
584 /// Clear the message and associated request.
585 void Clear() { data.clear(); send_request = MPI_REQUEST_NULL; }
586
587 virtual ~VarMessage()
588 {
589 MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
590 "WaitAllSent was not called after Isend");
591 }
592
593 VarMessage(const VarMessage &other)
594 : data(other.data), send_request(other.send_request)
595 {
596 MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
597 "Cannot copy message with a pending send.");
598 }
599
600protected:
601 virtual void Encode(int rank) = 0;
602 virtual void Decode(int rank) = 0;
603};
604
605
606/// Helper struct to convert a C++ type to an MPI type
607template <typename Type> struct MPITypeMap;
608
609// Specializations of MPITypeMap; mpi_type initialized in communication.cpp:
610template<> struct MPITypeMap<bool>
611{
612 static MFEM_EXPORT const MPI_Datatype mpi_type;
613};
614template<> struct MPITypeMap<char>
615{
616 static MFEM_EXPORT const MPI_Datatype mpi_type;
617};
618template<> struct MPITypeMap<unsigned char>
619{
620 static MFEM_EXPORT const MPI_Datatype mpi_type;
621};
622template<> struct MPITypeMap<short>
623{
624 static MFEM_EXPORT const MPI_Datatype mpi_type;
625};
626template<> struct MPITypeMap<unsigned short>
627{
628 static MFEM_EXPORT const MPI_Datatype mpi_type;
629};
630template<> struct MPITypeMap<int>
631{
632 static MFEM_EXPORT const MPI_Datatype mpi_type;
633};
634template<> struct MPITypeMap<unsigned int>
635{
636 static MFEM_EXPORT const MPI_Datatype mpi_type;
637};
638template<> struct MPITypeMap<long>
639{
640 static MFEM_EXPORT const MPI_Datatype mpi_type;
641};
642template<> struct MPITypeMap<unsigned long>
643{
644 static MFEM_EXPORT const MPI_Datatype mpi_type;
645};
646template<> struct MPITypeMap<long long>
647{
648 static MFEM_EXPORT const MPI_Datatype mpi_type;
649};
650template<> struct MPITypeMap<unsigned long long>
651{
652 static MFEM_EXPORT const MPI_Datatype mpi_type;
653};
654template<> struct MPITypeMap<double>
655{
656 static MFEM_EXPORT const MPI_Datatype mpi_type;
657};
658template<> struct MPITypeMap<float>
659{
660 static MFEM_EXPORT const MPI_Datatype mpi_type;
661};
662
663/** Reorder MPI ranks to follow the Z-curve within the physical machine topology
664 (provided that functions to query physical node coordinates are available).
665 Returns a new communicator with reordered ranks. */
666MPI_Comm ReorderRanksZCurve(MPI_Comm comm);
667
668
669} // namespace mfem
670
671#endif
672
673#endif
int Size() const
Return the logical size of the array.
Definition array.hpp:166
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
const Table & GroupLDofTable() const
Read-only access to group-ldof Table.
GroupCommunicator(const GroupTopology &gt, Mode m=byNeighbor)
Construct a GroupCommunicator object.
void GetNeighborLTDofTable(Table &nbr_ltdof) const
Dofs to be sent to communication neighbors.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize().
Mode
Communication mode.
@ byGroup
Communications are performed one group at a time.
void GetNeighborLDofTable(Table &nbr_ldof) const
Dofs to be received from communication neighbors.
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
const GroupTopology & GetGroupTopology() const
Get a const reference to the associated GroupTopology object.
const GroupTopology & gtopo
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf.
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata.
const GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
void Bcast(Array< T > &ldata) const
Broadcast within each group where the master is the root.
void Bcast(T *ldata) const
Broadcast within each group where the master is the root.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
int GetNeighborRank(int i) const
Return the MPI rank of neighbor 'i'.
void SetComm(MPI_Comm comm)
Set the MPI communicator to 'comm'.
int NRanks() const
Return the number of MPI ranks within this object's communicator.
bool IAmMaster(int g) const
Return true if I am master for group 'g'.
void Swap(GroupTopology &other)
Swap the internal data with another GroupTopology object.
void Save(std::ostream &out) const
Save the data in a stream.
const int * GetGroup(int g) const
Return a pointer to a list of neighbors for a given group. Neighbor 0 is the local processor.
int MyRank() const
Return the MPI rank within this object's communicator.
int GetGroupSize(int g) const
Get the number of processors in a group.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor.
void Load(std::istream &in)
Load the data from a stream.
GroupTopology()
Constructor with the MPI communicator = 0.
GroupTopology(MPI_Comm comm)
Constructor given the MPI communicator 'comm'.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group 'g'.
void Create(ListOfIntegerSets &groups, int mpitag)
Set up the group topology given the list of sets of shared entities.
MPI_Comm GetComm() const
Return the MPI communicator.
void Copy(GroupTopology &copy) const
Copy the internal data to the external 'copy'.
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
int NGroups() const
Return the number of groups.
int GetGroupMasterGroup(int g) const
Return the group number in the master for group 'g'.
List of integer sets.
Definition sets.hpp:51
A simple convenience class based on the Mpi singleton class above. Preserved for backward compatibili...
bool Root() const
Return true if WorldRank() == 0.
int WorldSize() const
Return MPI_COMM_WORLD's size.
MPI_Session(int &argc, char **&argv)
int WorldRank() const
Return MPI_COMM_WORLD's rank.
A simple singleton class that calls MPI_Init() at construction and MPI_Finalize() at destruction....
static MFEM_EXPORT int default_thread_required
Default level of thread support for MPI_Init_thread.
static bool IsFinalized()
Return true if MPI has been finalized.
static bool Root()
Return true if the rank in MPI_COMM_WORLD is zero.
static void Finalize()
Finalize MPI (if it has been initialized and not yet already finalized).
static void Init(int *argc=nullptr, char ***argv=nullptr, int required=default_thread_required, int *provided=nullptr)
Singleton creation with Mpi::Init().
static bool IsInitialized()
Return true if MPI has been initialized.
static int WorldRank()
Return the MPI rank in MPI_COMM_WORLD.
static int WorldSize()
Return the size of MPI_COMM_WORLD.
static void Init(int &argc, char **&argv, int required=default_thread_required, int *provided=nullptr)
Singleton creation with Mpi::Init(argc, argv).
Table stores the connectivity of elements of TYPE I to elements of TYPE II. For example,...
Definition table.hpp:43
int RowSize(int i) const
Definition table.hpp:122
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition table.cpp:233
int Size() const
Returns the number of TYPE I elements.
Definition table.hpp:103
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition globals.hpp:66
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
@ REBALANCE_DOF_VM
RebalanceDofMessage.
@ NEIGHBOR_REFINEMENT_VM
NeighborRefinementMessage.
@ NEIGHBOR_ROW_VM
NeighborRowMessage.
@ NEIGHBOR_PREFINEMENT_VM
NeighborPRefinementMessage.
@ NEIGHBOR_ELEMENT_RANK_VM
NeighborElementRankMessage.
@ NEIGHBOR_ORDER_VM
NeighborOrderMessage.
@ REBALANCE_VM
RebalanceMessage.
@ NEIGHBOR_DEREFINEMENT_VM
NeighborDerefinementMessage.
MessageTag
General MPI message tags used by MFEM.
@ DEREFINEMENT_MATRIX_CONSTRUCTION_DATA
Data structure on which we define reduce operations. The data is associated with (and the operation i...
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
Helper struct to convert a C++ type to an MPI type.
Variable-length MPI message containing unspecific binary data.
void Issend(int rank, MPI_Comm comm)
Non-blocking synchronous send to processor 'rank'. Returns immediately. Completion (MPI_Wait/Test) me...
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
virtual void Encode(int rank)=0
static bool TestAllSent(MapT &rank_msg)
Return true if all messages in the map container were sent, otherwise return false,...
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor 'rank'. Returns immediately. Completion (as tested by MPI_Wait/Test) d...
MPI_Request send_request
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
virtual void Decode(int rank)=0
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
VarMessage(const VarMessage &other)
void Clear()
Clear the message and associated request.
void RecvDrop(int rank, int size, MPI_Comm comm)
Like Recv(), but throw away the message.
static bool IProbe(int &rank, int &size, MPI_Comm comm)
Non-blocking probe for incoming message of this type from any rank. If there is an incoming message,...
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor 'rank' of message size 'size'.
static void Probe(int &rank, int &size, MPI_Comm comm)
Blocking probe for incoming message of this type from any rank. Returns the rank and message size.