MFEM v4.7.0
Finite element discretization library
Loading...
Searching...
No Matches
communication.hpp
Go to the documentation of this file.
1// Copyright (c) 2010-2024, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11
12#ifndef MFEM_COMMUNICATION
13#define MFEM_COMMUNICATION
14
15#include "../config/config.hpp"
16
17#ifdef MFEM_USE_MPI
18
19#include "array.hpp"
20#include "table.hpp"
21#include "sets.hpp"
22#include "globals.hpp"
23#include <mpi.h>
24
25namespace mfem
26{
27
28/** @brief A simple singleton class that calls MPI_Init() at construction and
29 MPI_Finalize() at destruction. It also provides easy access to
30 MPI_COMM_WORLD's rank and size. */
31class Mpi
32{
33public:
34 /// Singleton creation with Mpi::Init(argc, argv).
35 static void Init(int &argc, char **&argv,
36 int required = default_thread_required,
37 int *provided = nullptr)
38 { Init(&argc, &argv, required, provided); }
39 /// Singleton creation with Mpi::Init().
40 static void Init(int *argc = nullptr, char ***argv = nullptr,
41 int required = default_thread_required,
42 int *provided = nullptr)
43 {
44 MFEM_VERIFY(!IsInitialized(), "MPI already initialized!");
45 if (required == MPI_THREAD_SINGLE)
46 {
47 int mpi_err = MPI_Init(argc, argv);
48 MFEM_VERIFY(!mpi_err, "error in MPI_Init()!");
49 if (provided) { *provided = MPI_THREAD_SINGLE; }
50 }
51 else
52 {
53 int mpi_provided;
54 int mpi_err = MPI_Init_thread(argc, argv, required, &mpi_provided);
55 MFEM_VERIFY(!mpi_err, "error in MPI_Init()!");
56 if (provided) { *provided = mpi_provided; }
57 }
58 // The Mpi singleton object below needs to be created after MPI_Init() for
59 // some MPI implementations.
60 Singleton();
61 }
62 /// Finalize MPI (if it has been initialized and not yet already finalized).
63 static void Finalize()
64 {
65 if (IsInitialized() && !IsFinalized()) { MPI_Finalize(); }
66 }
67 /// Return true if MPI has been initialized.
68 static bool IsInitialized()
69 {
70 int mpi_is_initialized;
71 int mpi_err = MPI_Initialized(&mpi_is_initialized);
72 return (mpi_err == MPI_SUCCESS) && mpi_is_initialized;
73 }
74 /// Return true if MPI has been finalized.
75 static bool IsFinalized()
76 {
77 int mpi_is_finalized;
78 int mpi_err = MPI_Finalized(&mpi_is_finalized);
79 return (mpi_err == MPI_SUCCESS) && mpi_is_finalized;
80 }
81 /// Return the MPI rank in MPI_COMM_WORLD.
82 static int WorldRank()
83 {
84 int world_rank;
85 MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
86 return world_rank;
87 }
88 /// Return the size of MPI_COMM_WORLD.
89 static int WorldSize()
90 {
91 int world_size;
92 MPI_Comm_size(MPI_COMM_WORLD, &world_size);
93 return world_size;
94 }
95 /// Return true if the rank in MPI_COMM_WORLD is zero.
96 static bool Root() { return WorldRank() == 0; }
97 /// Default level of thread support for MPI_Init_thread.
98 static MFEM_EXPORT int default_thread_required;
99private:
100 /// Initialize the Mpi singleton.
101 static Mpi &Singleton()
102 {
103 static Mpi mpi;
104 return mpi;
105 }
106 /// Finalize MPI.
107 ~Mpi() { Finalize(); }
108 /// Prevent direct construction of objects of this class.
109 Mpi() {}
110};
111
112/** @brief A simple convenience class based on the Mpi singleton class above.
113 Preserved for backward compatibility. New code should use Mpi::Init() and
114 other Mpi methods instead. */
116{
117public:
119 MPI_Session(int &argc, char **&argv) { Mpi::Init(argc, argv); }
120 /// Return MPI_COMM_WORLD's rank.
121 int WorldRank() const { return Mpi::WorldRank(); }
122 /// Return MPI_COMM_WORLD's size.
123 int WorldSize() const { return Mpi::WorldSize(); }
124 /// Return true if WorldRank() == 0.
125 bool Root() const { return Mpi::Root(); }
126};
127
128
129/** The shared entities (e.g. vertices, faces and edges) are split into groups,
130 each group determined by the set of participating processors. They are
131 numbered locally in lproc. Assumptions:
132 - group 0 is the 'local' group
133 - groupmaster_lproc[0] = 0
134 - lproc_proc[0] = MyRank */
136{
137private:
138 MPI_Comm MyComm;
139
140 /// Neighbor ids (lproc) in each group.
141 Table group_lproc;
142 /// Master neighbor id for each group.
143 Array<int> groupmaster_lproc;
144 /// MPI rank of each neighbor.
145 Array<int> lproc_proc;
146 /// Group --> Group number in the master.
147 Array<int> group_mgroup;
148
149 void ProcToLProc();
150
151public:
152 /// Constructor with the MPI communicator = 0.
153 GroupTopology() : MyComm(0) {}
154
155 /// Constructor given the MPI communicator 'comm'.
156 GroupTopology(MPI_Comm comm) { MyComm = comm; }
157
158 /// Copy constructor
159 GroupTopology(const GroupTopology &gt);
160
161 /// Set the MPI communicator to 'comm'.
162 void SetComm(MPI_Comm comm) { MyComm = comm; }
163
164 /// Return the MPI communicator.
165 MPI_Comm GetComm() const { return MyComm; }
166
167 /// Return the MPI rank within this object's communicator.
168 int MyRank() const { int r; MPI_Comm_rank(MyComm, &r); return r; }
169
170 /// Return the number of MPI ranks within this object's communicator.
171 int NRanks() const { int s; MPI_Comm_size(MyComm, &s); return s; }
172
173 /// Set up the group topology given the list of sets of shared entities.
174 void Create(ListOfIntegerSets &groups, int mpitag);
175
176 /// Return the number of groups.
177 int NGroups() const { return group_lproc.Size(); }
178
179 /// Return the number of neighbors including the local processor.
180 int GetNumNeighbors() const { return lproc_proc.Size(); }
181
182 /// Return the MPI rank of neighbor 'i'.
183 int GetNeighborRank(int i) const { return lproc_proc[i]; }
184
185 /// Return true if I am master for group 'g'.
186 bool IAmMaster(int g) const { return (groupmaster_lproc[g] == 0); }
187
188 /** @brief Return the neighbor index of the group master for a given group.
189 Neighbor 0 is the local processor. */
190 int GetGroupMaster(int g) const { return groupmaster_lproc[g]; }
191
192 /// Return the rank of the group master for group 'g'.
193 int GetGroupMasterRank(int g) const
194 { return lproc_proc[groupmaster_lproc[g]]; }
195
196 /// Return the group number in the master for group 'g'.
197 int GetGroupMasterGroup(int g) const { return group_mgroup[g]; }
198
199 /// Get the number of processors in a group
200 int GetGroupSize(int g) const { return group_lproc.RowSize(g); }
201
202 /** @brief Return a pointer to a list of neighbors for a given group.
203 Neighbor 0 is the local processor */
204 const int *GetGroup(int g) const { return group_lproc.GetRow(g); }
205
206 /// Save the data in a stream.
207 void Save(std::ostream &out) const;
208
209 /// Load the data from a stream.
210 void Load(std::istream &in);
211
212 /// Copy the internal data to the external 'copy'.
213 void Copy(GroupTopology & copy) const;
214
215 /// Swap the internal data with another @a GroupTopology object.
216 void Swap(GroupTopology &other);
217
218 virtual ~GroupTopology() {}
219};
220
221/** @brief Communicator performing operations within groups defined by a
222 GroupTopology with arbitrary-size data associated with each group. */
224{
225public:
226 /// Communication mode.
227 enum Mode
228 {
229 byGroup, ///< Communications are performed one group at a time.
230 byNeighbor /**< Communications are performed one neighbor at a time,
231 aggregating over groups. */
232 };
233
234protected:
238 Table group_ltdof; // only for groups for which this processor is master.
241 MPI_Request *requests;
242 // MPI_Status *statuses;
243 // comm_lock: 0 - no lock, 1 - locked for Bcast, 2 - locked for Reduce
244 mutable int comm_lock;
245 mutable int num_requests;
247 int *buf_offsets; // size = max(number of groups, number of neighbors)
249
250public:
251 /// Construct a GroupCommunicator object.
252 /** The object must be initialized before it can be used to perform any
253 operations. To initialize the object, either
254 - call Create() or
255 - initialize the Table reference returned by GroupLDofTable() and then
256 call Finalize().
257 */
259
260 /** @brief Initialize the communicator from a local-dof to group map.
261 Finalize() is called internally. */
262 void Create(const Array<int> &ldof_group);
263
264 /** @brief Fill-in the returned Table reference to initialize the
265 GroupCommunicator then call Finalize(). */
267
268 /// Read-only access to group-ldof Table.
269 const Table &GroupLDofTable() const { return group_ldof; }
270
271 /// Allocate internal buffers after the GroupLDofTable is defined
272 void Finalize();
273
274 /// Initialize the internal group_ltdof Table.
275 /** This method must be called before performing operations that use local
276 data layout 2, see CopyGroupToBuffer() for layout descriptions. */
277 void SetLTDofTable(const Array<int> &ldof_ltdof);
278
279 /// Get a reference to the associated GroupTopology object
281
282 /// Get a const reference to the associated GroupTopology object
283 const GroupTopology &GetGroupTopology() const { return gtopo; }
284
285 /// Dofs to be sent to communication neighbors
286 void GetNeighborLTDofTable(Table &nbr_ltdof) const;
287
288 /// Dofs to be received from communication neighbors
289 void GetNeighborLDofTable(Table &nbr_ldof) const;
290
291 /** @brief Data structure on which we define reduce operations.
292 The data is associated with (and the operation is performed on) one
293 group at a time. */
294 template <class T> struct OpData
295 {
296 int nldofs, nb;
297 const int *ldofs;
298 T *ldata, *buf;
299 };
300
301 /** @brief Copy the entries corresponding to the group @a group from the
302 local array @a ldata to the buffer @a buf. */
303 /** The @a layout of the local array can be:
304 - 0 - @a ldata is an array on all ldofs: copied indices:
305 `{ J[j] : I[group] <= j < I[group+1] }` where `I,J=group_ldof.{I,J}`
306 - 1 - @a ldata is an array on the shared ldofs: copied indices:
307 `{ j : I[group] <= j < I[group+1] }` where `I,J=group_ldof.{I,J}`
308 - 2 - @a ldata is an array on the true ldofs, ltdofs: copied indices:
309 `{ J[j] : I[group] <= j < I[group+1] }` where `I,J=group_ltdof.{I,J}`.
310 @returns The pointer @a buf plus the number of elements in the group. */
311 template <class T>
312 T *CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const;
313
314 /** @brief Copy the entries corresponding to the group @a group from the
315 buffer @a buf to the local array @a ldata. */
316 /** For a description of @a layout, see CopyGroupToBuffer().
317 @returns The pointer @a buf plus the number of elements in the group. */
318 template <class T>
319 const T *CopyGroupFromBuffer(const T *buf, T *ldata, int group,
320 int layout) const;
321
322 /** @brief Perform the reduction operation @a Op to the entries of group
323 @a group using the values from the buffer @a buf and the values from the
324 local array @a ldata, saving the result in the latter. */
325 /** For a description of @a layout, see CopyGroupToBuffer().
326 @returns The pointer @a buf plus the number of elements in the group. */
327 template <class T>
328 const T *ReduceGroupFromBuffer(const T *buf, T *ldata, int group,
329 int layout, void (*Op)(OpData<T>)) const;
330
331 /// Begin a broadcast within each group where the master is the root.
332 /** For a description of @a layout, see CopyGroupToBuffer(). */
333 template <class T> void BcastBegin(T *ldata, int layout) const;
334
335 /** @brief Finalize a broadcast started with BcastBegin().
336
337 The output data @a layout can be:
338 - 0 - @a ldata is an array on all ldofs; the input layout should be
339 either 0 or 2
340 - 1 - @a ldata is the same array as given to BcastBegin(); the input
341 layout should be 1.
342
343 For more details about @a layout, see CopyGroupToBuffer(). */
344 template <class T> void BcastEnd(T *ldata, int layout) const;
345
346 /** @brief Broadcast within each group where the master is the root.
347
348 The data @a layout can be either 0 or 1.
349
350 For a description of @a layout, see CopyGroupToBuffer(). */
351 template <class T> void Bcast(T *ldata, int layout) const
352 {
353 BcastBegin(ldata, layout);
354 BcastEnd(ldata, layout);
355 }
356
357 /// Broadcast within each group where the master is the root.
358 template <class T> void Bcast(T *ldata) const { Bcast<T>(ldata, 0); }
359 /// Broadcast within each group where the master is the root.
360 template <class T> void Bcast(Array<T> &ldata) const
361 { Bcast<T>((T *)ldata); }
362
363 /** @brief Begin reduction operation within each group where the master is
364 the root. */
365 /** The input data layout is an array on all ldofs, i.e. layout 0, see
366 CopyGroupToBuffer().
367
368 The reduce operation will be specified when calling ReduceEnd(). This
369 method is instantiated for int and double. */
370 template <class T> void ReduceBegin(const T *ldata) const;
371
372 /** @brief Finalize reduction operation started with ReduceBegin().
373
374 The output data @a layout can be either 0 or 2, see CopyGroupToBuffer().
375
376 The reduce operation is given by the third argument (see below for list
377 of the supported operations.) This method is instantiated for int and
378 double.
379
380 @note If the output data layout is 2, then the data from the @a ldata
381 array passed to this call is used in the reduction operation, instead of
382 the data from the @a ldata array passed to ReduceBegin(). Therefore, the
383 data for master-groups has to be identical in both arrays.
384 */
385 template <class T> void ReduceEnd(T *ldata, int layout,
386 void (*Op)(OpData<T>)) const;
387
388 /** @brief Reduce within each group where the master is the root.
389
390 The reduce operation is given by the second argument (see below for list
391 of the supported operations.) */
392 template <class T> void Reduce(T *ldata, void (*Op)(OpData<T>)) const
393 {
394 ReduceBegin(ldata);
395 ReduceEnd(ldata, 0, Op);
396 }
397
398 /// Reduce within each group where the master is the root.
399 template <class T> void Reduce(Array<T> &ldata, void (*Op)(OpData<T>)) const
400 { Reduce<T>((T *)ldata, Op); }
401
402 /// Reduce operation Sum, instantiated for int and double
403 template <class T> static void Sum(OpData<T>);
404 /// Reduce operation Min, instantiated for int and double
405 template <class T> static void Min(OpData<T>);
406 /// Reduce operation Max, instantiated for int and double
407 template <class T> static void Max(OpData<T>);
408 /// Reduce operation bitwise OR, instantiated for int only
409 template <class T> static void BitOR(OpData<T>);
410
411 /// Print information about the GroupCommunicator from all MPI ranks.
412 void PrintInfo(std::ostream &out = mfem::out) const;
413
414 /** @brief Destroy a GroupCommunicator object, deallocating internal data
415 structures and buffers. */
417};
418
419
420/// \brief Variable-length MPI message containing unspecific binary data.
421template<int Tag>
423{
424 std::string data;
425 MPI_Request send_request;
426
427 /** @brief Non-blocking send to processor 'rank'.
428 Returns immediately. Completion (as tested by MPI_Wait/Test) does not
429 mean the message was received -- it may be on its way or just buffered
430 locally. */
431 void Isend(int rank, MPI_Comm comm)
432 {
433 Encode(rank);
434 MPI_Isend((void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
435 &send_request);
436 }
437
438 /** @brief Non-blocking synchronous send to processor 'rank'.
439 Returns immediately. Completion (MPI_Wait/Test) means that the message
440 was received. */
441 void Issend(int rank, MPI_Comm comm)
442 {
443 Encode(rank);
444 MPI_Issend((void*) data.data(), data.length(), MPI_BYTE, rank, Tag, comm,
445 &send_request);
446 }
447
448 /// Helper to send all messages in a rank-to-message map container.
449 template<typename MapT>
450 static void IsendAll(MapT& rank_msg, MPI_Comm comm)
451 {
452 for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
453 {
454 it->second.Isend(it->first, comm);
455 }
456 }
457
458 /// Helper to wait for all messages in a map container to be sent.
459 template<typename MapT>
460 static void WaitAllSent(MapT& rank_msg)
461 {
462 for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
463 {
464 MPI_Wait(&it->second.send_request, MPI_STATUS_IGNORE);
465 it->second.Clear();
466 }
467 }
468
469 /** @brief Return true if all messages in the map container were sent,
470 otherwise return false, without waiting. */
471 template<typename MapT>
472 static bool TestAllSent(MapT& rank_msg)
473 {
474 for (auto it = rank_msg.begin(); it != rank_msg.end(); ++it)
475 {
476 VarMessage &msg = it->second;
477 if (msg.send_request != MPI_REQUEST_NULL)
478 {
479 int sent;
480 MPI_Test(&msg.send_request, &sent, MPI_STATUS_IGNORE);
481 if (!sent) { return false; }
482 msg.Clear();
483 }
484 }
485 return true;
486 }
487
488 /** @brief Blocking probe for incoming message of this type from any rank.
489 Returns the rank and message size. */
490 static void Probe(int &rank, int &size, MPI_Comm comm)
491 {
492 MPI_Status status;
493 MPI_Probe(MPI_ANY_SOURCE, Tag, comm, &status);
494 rank = status.MPI_SOURCE;
495 MPI_Get_count(&status, MPI_BYTE, &size);
496 }
497
498 /** @brief Non-blocking probe for incoming message of this type from any
499 rank. If there is an incoming message, returns true and sets 'rank' and
500 'size'. Otherwise returns false. */
501 static bool IProbe(int &rank, int &size, MPI_Comm comm)
502 {
503 int flag;
504 MPI_Status status;
505 MPI_Iprobe(MPI_ANY_SOURCE, Tag, comm, &flag, &status);
506 if (!flag) { return false; }
507
508 rank = status.MPI_SOURCE;
509 MPI_Get_count(&status, MPI_BYTE, &size);
510 return true;
511 }
512
513 /// Post-probe receive from processor 'rank' of message size 'size'.
514 void Recv(int rank, int size, MPI_Comm comm)
515 {
516 MFEM_ASSERT(size >= 0, "");
517 data.resize(size);
518 MPI_Status status;
519 MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
520#ifdef MFEM_DEBUG
521 int count;
522 MPI_Get_count(&status, MPI_BYTE, &count);
523 MFEM_VERIFY(count == size, "");
524#endif
525 Decode(rank);
526 }
527
528 /// Like Recv(), but throw away the message.
529 void RecvDrop(int rank, int size, MPI_Comm comm)
530 {
531 data.resize(size);
532 MPI_Status status;
533 MPI_Recv((void*) data.data(), size, MPI_BYTE, rank, Tag, comm, &status);
534 data.resize(0); // don't decode
535 }
536
537 /// Helper to receive all messages in a rank-to-message map container.
538 template<typename MapT>
539 static void RecvAll(MapT& rank_msg, MPI_Comm comm)
540 {
541 int recv_left = rank_msg.size();
542 while (recv_left > 0)
543 {
544 int rank, size;
545 Probe(rank, size, comm);
546 MFEM_ASSERT(rank_msg.find(rank) != rank_msg.end(), "Unexpected message"
547 " (tag " << Tag << ") from rank " << rank);
548 // NOTE: no guard against receiving two messages from the same rank
549 rank_msg[rank].Recv(rank, size, comm);
550 --recv_left;
551 }
552 }
553
554 VarMessage() : send_request(MPI_REQUEST_NULL) {}
555
556 /// Clear the message and associated request.
557 void Clear() { data.clear(); send_request = MPI_REQUEST_NULL; }
558
559 virtual ~VarMessage()
560 {
561 MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
562 "WaitAllSent was not called after Isend");
563 }
564
565 VarMessage(const VarMessage &other)
566 : data(other.data), send_request(other.send_request)
567 {
568 MFEM_ASSERT(send_request == MPI_REQUEST_NULL,
569 "Cannot copy message with a pending send.");
570 }
571
572protected:
573 virtual void Encode(int rank) = 0;
574 virtual void Decode(int rank) = 0;
575};
576
577
578/// Helper struct to convert a C++ type to an MPI type
579template <typename Type> struct MPITypeMap;
580
581// Specializations of MPITypeMap; mpi_type initialized in communication.cpp:
582template<> struct MPITypeMap<int>
583{
584 static MFEM_EXPORT const MPI_Datatype mpi_type;
585};
586template<> struct MPITypeMap<double>
587{
588 static MFEM_EXPORT const MPI_Datatype mpi_type;
589};
590template<> struct MPITypeMap<float>
591{
592 static MFEM_EXPORT const MPI_Datatype mpi_type;
593};
594
595
596/** Reorder MPI ranks to follow the Z-curve within the physical machine topology
597 (provided that functions to query physical node coordinates are available).
598 Returns a new communicator with reordered ranks. */
599MPI_Comm ReorderRanksZCurve(MPI_Comm comm);
600
601
602} // namespace mfem
603
604#endif
605
606#endif
int Size() const
Return the logical size of the array.
Definition array.hpp:144
Communicator performing operations within groups defined by a GroupTopology with arbitrary-size data ...
const Table & GroupLDofTable() const
Read-only access to group-ldof Table.
GroupCommunicator(const GroupTopology &gt, Mode m=byNeighbor)
Construct a GroupCommunicator object.
void GetNeighborLTDofTable(Table &nbr_ltdof) const
Dofs to be sent to communication neighbors.
Table & GroupLDofTable()
Fill-in the returned Table reference to initialize the GroupCommunicator then call Finalize().
Mode
Communication mode.
@ byGroup
Communications are performed one group at a time.
void GetNeighborLDofTable(Table &nbr_ldof) const
Dofs to be received from communication neighbors.
void BcastBegin(T *ldata, int layout) const
Begin a broadcast within each group where the master is the root.
const T * ReduceGroupFromBuffer(const T *buf, T *ldata, int group, int layout, void(*Op)(OpData< T >)) const
Perform the reduction operation Op to the entries of group group using the values from the buffer buf...
void BcastEnd(T *ldata, int layout) const
Finalize a broadcast started with BcastBegin().
void ReduceBegin(const T *ldata) const
Begin reduction operation within each group where the master is the root.
void ReduceEnd(T *ldata, int layout, void(*Op)(OpData< T >)) const
Finalize reduction operation started with ReduceBegin().
void Reduce(Array< T > &ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
const GroupTopology & GetGroupTopology() const
Get a const reference to the associated GroupTopology object.
const GroupTopology & gtopo
void Reduce(T *ldata, void(*Op)(OpData< T >)) const
Reduce within each group where the master is the root.
T * CopyGroupToBuffer(const T *ldata, T *buf, int group, int layout) const
Copy the entries corresponding to the group group from the local array ldata to the buffer buf.
const T * CopyGroupFromBuffer(const T *buf, T *ldata, int group, int layout) const
Copy the entries corresponding to the group group from the buffer buf to the local array ldata.
const GroupTopology & GetGroupTopology()
Get a reference to the associated GroupTopology object.
void Create(const Array< int > &ldof_group)
Initialize the communicator from a local-dof to group map. Finalize() is called internally.
~GroupCommunicator()
Destroy a GroupCommunicator object, deallocating internal data structures and buffers.
static void Sum(OpData< T >)
Reduce operation Sum, instantiated for int and double.
void Bcast(T *ldata, int layout) const
Broadcast within each group where the master is the root.
void SetLTDofTable(const Array< int > &ldof_ltdof)
Initialize the internal group_ltdof Table.
static void BitOR(OpData< T >)
Reduce operation bitwise OR, instantiated for int only.
static void Max(OpData< T >)
Reduce operation Max, instantiated for int and double.
static void Min(OpData< T >)
Reduce operation Min, instantiated for int and double.
void Bcast(Array< T > &ldata) const
Broadcast within each group where the master is the root.
void Bcast(T *ldata) const
Broadcast within each group where the master is the root.
void Finalize()
Allocate internal buffers after the GroupLDofTable is defined.
void PrintInfo(std::ostream &out=mfem::out) const
Print information about the GroupCommunicator from all MPI ranks.
int GetNeighborRank(int i) const
Return the MPI rank of neighbor 'i'.
void SetComm(MPI_Comm comm)
Set the MPI communicator to 'comm'.
int NRanks() const
Return the number of MPI ranks within this object's communicator.
bool IAmMaster(int g) const
Return true if I am master for group 'g'.
void Swap(GroupTopology &other)
Swap the internal data with another GroupTopology object.
void Save(std::ostream &out) const
Save the data in a stream.
const int * GetGroup(int g) const
Return a pointer to a list of neighbors for a given group. Neighbor 0 is the local processor.
int MyRank() const
Return the MPI rank within this object's communicator.
int GetGroupSize(int g) const
Get the number of processors in a group.
int GetGroupMaster(int g) const
Return the neighbor index of the group master for a given group. Neighbor 0 is the local processor.
void Load(std::istream &in)
Load the data from a stream.
GroupTopology()
Constructor with the MPI communicator = 0.
GroupTopology(MPI_Comm comm)
Constructor given the MPI communicator 'comm'.
int GetGroupMasterRank(int g) const
Return the rank of the group master for group 'g'.
void Create(ListOfIntegerSets &groups, int mpitag)
Set up the group topology given the list of sets of shared entities.
MPI_Comm GetComm() const
Return the MPI communicator.
void Copy(GroupTopology &copy) const
Copy the internal data to the external 'copy'.
int GetNumNeighbors() const
Return the number of neighbors including the local processor.
int NGroups() const
Return the number of groups.
int GetGroupMasterGroup(int g) const
Return the group number in the master for group 'g'.
List of integer sets.
Definition sets.hpp:63
A simple convenience class based on the Mpi singleton class above. Preserved for backward compatibili...
bool Root() const
Return true if WorldRank() == 0.
int WorldSize() const
Return MPI_COMM_WORLD's size.
MPI_Session(int &argc, char **&argv)
int WorldRank() const
Return MPI_COMM_WORLD's rank.
A simple singleton class that calls MPI_Init() at construction and MPI_Finalize() at destruction....
static MFEM_EXPORT int default_thread_required
Default level of thread support for MPI_Init_thread.
static bool IsFinalized()
Return true if MPI has been finalized.
static bool Root()
Return true if the rank in MPI_COMM_WORLD is zero.
static void Finalize()
Finalize MPI (if it has been initialized and not yet already finalized).
static void Init(int *argc=nullptr, char ***argv=nullptr, int required=default_thread_required, int *provided=nullptr)
Singleton creation with Mpi::Init().
static bool IsInitialized()
Return true if MPI has been initialized.
static int WorldRank()
Return the MPI rank in MPI_COMM_WORLD.
static int WorldSize()
Return the size of MPI_COMM_WORLD.
static void Init(int &argc, char **&argv, int required=default_thread_required, int *provided=nullptr)
Singleton creation with Mpi::Init(argc, argv).
int RowSize(int i) const
Definition table.hpp:108
void GetRow(int i, Array< int > &row) const
Return row i in array row (the Table must be finalized)
Definition table.cpp:187
int Size() const
Returns the number of TYPE I elements.
Definition table.hpp:92
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition globals.hpp:66
MPI_Comm ReorderRanksZCurve(MPI_Comm comm)
RefCoord s[3]
Data structure on which we define reduce operations. The data is associated with (and the operation i...
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
static MFEM_EXPORT const MPI_Datatype mpi_type
Helper struct to convert a C++ type to an MPI type.
Variable-length MPI message containing unspecific binary data.
void Issend(int rank, MPI_Comm comm)
Non-blocking synchronous send to processor 'rank'. Returns immediately. Completion (MPI_Wait/Test) me...
static void WaitAllSent(MapT &rank_msg)
Helper to wait for all messages in a map container to be sent.
virtual void Encode(int rank)=0
static bool TestAllSent(MapT &rank_msg)
Return true if all messages in the map container were sent, otherwise return false,...
void Isend(int rank, MPI_Comm comm)
Non-blocking send to processor 'rank'. Returns immediately. Completion (as tested by MPI_Wait/Test) d...
MPI_Request send_request
static void IsendAll(MapT &rank_msg, MPI_Comm comm)
Helper to send all messages in a rank-to-message map container.
virtual void Decode(int rank)=0
static void RecvAll(MapT &rank_msg, MPI_Comm comm)
Helper to receive all messages in a rank-to-message map container.
VarMessage(const VarMessage &other)
void Clear()
Clear the message and associated request.
void RecvDrop(int rank, int size, MPI_Comm comm)
Like Recv(), but throw away the message.
static bool IProbe(int &rank, int &size, MPI_Comm comm)
Non-blocking probe for incoming message of this type from any rank. If there is an incoming message,...
void Recv(int rank, int size, MPI_Comm comm)
Post-probe receive from processor 'rank' of message size 'size'.
static void Probe(int &rank, int &size, MPI_Comm comm)
Blocking probe for incoming message of this type from any rank. Returns the rank and message size.