MFEM v4.7.0
Finite element discretization library
Loading...
Searching...
No Matches
mumps.cpp
Go to the documentation of this file.
1// Copyright (c) 2010-2024, Lawrence Livermore National Security, LLC. Produced
2// at the Lawrence Livermore National Laboratory. All Rights reserved. See files
3// LICENSE and NOTICE for details. LLNL-CODE-806117.
4//
5// This file is part of the MFEM library. For more information and source code
6// availability visit https://mfem.org.
7//
8// MFEM is free software; you can redistribute it and/or modify it under the
9// terms of the BSD-3 license. We welcome feedback and contributions, see file
10// CONTRIBUTING.md for details.
11
12#include "../config/config.hpp"
14
15#ifdef MFEM_USE_MUMPS
16#ifdef MFEM_USE_MPI
17
18#include "mumps.hpp"
19
20#include <algorithm>
21
22#if MFEM_MUMPS_VERSION >= 530
23#ifdef MUMPS_INTSIZE64
24#error "Full 64-bit MUMPS is not yet supported"
25#endif
26#else
27#ifdef INTSIZE64
28#error "Full 64-bit MUMPS is not yet supported"
29#endif
30#endif
31
32// Macro s.t. indices match MUMPS documentation
33#define MUMPS_ICNTL(I) icntl[(I) -1]
34#define MUMPS_CNTL(I) cntl[(I) -1]
35#define MUMPS_INFO(I) info[(I) -1]
36#define MUMPS_INFOG(I) infog[(I) -1]
37
38namespace mfem
39{
40
42{
43 Init(comm_);
44}
45
47{
48 auto APtr = dynamic_cast<const HypreParMatrix *>(&op);
49 MFEM_VERIFY(APtr, "Not a compatible matrix type");
50 Init(APtr->GetComm());
51 SetOperator(op);
52}
53
54void MUMPSSolver::Init(MPI_Comm comm_)
55{
56 id = nullptr;
57 comm = comm_;
58 MPI_Comm_size(comm, &numProcs);
59 MPI_Comm_rank(comm, &myid);
60
61 mat_type = MatType::UNSYMMETRIC;
62 print_level = 0;
63 reorder_method = ReorderingStrategy::AUTOMATIC;
64 reorder_reuse = false;
65 blr_tol = 0.0;
66
67#if MFEM_MUMPS_VERSION >= 530
68 irhs_loc = nullptr;
69 rhs_loc = nullptr;
70 isol_loc = nullptr;
71 sol_loc = nullptr;
72#else
73 recv_counts = nullptr;
74 displs = nullptr;
75 rhs_glob = nullptr;
76#endif
77}
78
80{
81#if MFEM_MUMPS_VERSION >= 530
82 delete [] irhs_loc;
83 delete [] rhs_loc;
84 delete [] isol_loc;
85 delete [] sol_loc;
86#else
87 delete [] recv_counts;
88 delete [] displs;
89 delete [] rhs_glob;
90#endif
91 if (id)
92 {
93 id->job = -2;
94#ifdef MFEM_USE_SINGLE
95 smumps_c(id);
96#else
97 dmumps_c(id);
98#endif
99 delete id;
100 }
101}
102
104{
105 auto APtr = dynamic_cast<const HypreParMatrix *>(&op);
106 MFEM_VERIFY(APtr, "Not a compatible matrix type");
107
108 height = op.Height();
109 width = op.Width();
110
111 auto parcsr_op = (hypre_ParCSRMatrix *)const_cast<HypreParMatrix &>(*APtr);
112 APtr->HostRead();
113 hypre_CSRMatrix *csr_op = hypre_MergeDiagAndOffd(parcsr_op);
114 APtr->HypreRead();
115 HYPRE_Int *Iptr = csr_op->i;
116#if MFEM_HYPRE_VERSION >= 21600
117 HYPRE_BigInt *Jptr = csr_op->big_j;
118#else
119 HYPRE_Int *Jptr = csr_op->j;
120#endif
121
122 int n_loc = internal::to_int(csr_op->num_rows);
123 row_start = internal::to_int(parcsr_op->first_row_index);
124
125 MUMPS_INT8 nnz = 0, k = 0;
126 if (mat_type)
127 {
128 // Count nnz in case of symmetric mode
129 for (int i = 0; i < n_loc; i++)
130 {
131 for (HYPRE_Int j = Iptr[i]; j < Iptr[i + 1]; j++)
132 {
133 int ii = row_start + i + 1;
134#if MFEM_HYPRE_VERSION >= 21600
135 HYPRE_BigInt jj = Jptr[k] + 1;
136#else
137 HYPRE_Int jj = Jptr[k] + 1;
138#endif
139 if (ii >= jj) { nnz++; }
140 k++;
141 }
142 }
143 }
144 else
145 {
146 nnz = csr_op->num_nonzeros;
147 }
148 int *I = new int[nnz];
149 int *J = new int[nnz];
150
151 // Fill in I and J arrays for
152 // COO format in 1-based indexing
153 k = 0;
154 real_t *data;
155 if (mat_type)
156 {
157 MUMPS_INT8 l = 0;
158 data = new real_t[nnz];
159 for (int i = 0; i < n_loc; i++)
160 {
161 for (HYPRE_Int j = Iptr[i]; j < Iptr[i + 1]; j++)
162 {
163 int ii = row_start + i + 1;
164#if MFEM_HYPRE_VERSION >= 21600
165 HYPRE_BigInt jj = Jptr[k] + 1;
166#else
167 HYPRE_Int jj = Jptr[k] + 1;
168#endif
169 if (ii >= jj)
170 {
171 I[l] = ii;
172 J[l] = internal::to_int(jj);
173 data[l++] = csr_op->data[k];
174 }
175 k++;
176 }
177 }
178 }
179 else
180 {
181 for (int i = 0; i < n_loc; i++)
182 {
183 for (HYPRE_Int j = Iptr[i]; j < Iptr[i + 1]; j++)
184 {
185 I[k] = row_start + i + 1;
186 J[k] = internal::to_int(Jptr[k] + 1);
187 k++;
188 }
189 }
190 data = csr_op->data;
191 }
192
193 // New MUMPS object or reuse the one from a previous matrix
194 if (!id || !reorder_reuse)
195 {
196 if (id)
197 {
198 id->job = -2;
199#ifdef MFEM_USE_SINGLE
200 smumps_c(id);
201#else
202 dmumps_c(id);
203#endif
204 delete id;
205 }
206#ifdef MFEM_USE_SINGLE
207 id = new SMUMPS_STRUC_C;
208#else
209 id = new DMUMPS_STRUC_C;
210#endif
211 id->sym = mat_type;
212
213 // C to Fortran communicator
214 id->comm_fortran = (MUMPS_INT)MPI_Comm_c2f(comm);
215
216 // Host is involved in computation
217 id->par = 1;
218
219 // MUMPS init
220 id->job = -1;
221#ifdef MFEM_USE_SINGLE
222 smumps_c(id);
223#else
224 dmumps_c(id);
225#endif
226
227 // Set MUMPS default parameters
228 SetParameters();
229
230 id->n = internal::to_int(parcsr_op->global_num_rows);
231 id->nnz_loc = nnz;
232 id->irn_loc = I;
233 id->jcn_loc = J;
234 id->a_loc = data;
235
236 // MUMPS analysis
237 id->job = 1;
238#ifdef MFEM_USE_SINGLE
239 smumps_c(id);
240#else
241 dmumps_c(id);
242#endif
243 }
244 else
245 {
246 id->irn_loc = I;
247 id->jcn_loc = J;
248 id->a_loc = data;
249 }
250
251 // MUMPS factorization
252 id->job = 2;
253 {
254 const int mem_relax_lim = 200;
255 while (true)
256 {
257#ifdef MFEM_USE_SINGLE
258 smumps_c(id);
259#else
260 dmumps_c(id);
261#endif
262 if (id->MUMPS_INFOG(1) < 0)
263 {
264 if (id->MUMPS_INFOG(1) == -8 || id->MUMPS_INFOG(1) == -9)
265 {
266 id->MUMPS_ICNTL(14) += 20;
267 MFEM_VERIFY(id->MUMPS_ICNTL(14) <= mem_relax_lim,
268 "Memory relaxation limit reached for MUMPS factorization");
269 if (myid == 0 && print_level > 0)
270 {
271 mfem::out << "Re-running MUMPS factorization with memory relaxation "
272 << id->MUMPS_ICNTL(14) << '\n';
273 }
274 }
275 else
276 {
277 MFEM_ABORT("Error during MUMPS numerical factorization");
278 }
279 }
280 else { break; }
281 }
282 }
283
284 hypre_CSRMatrixDestroy(csr_op);
285 delete [] I;
286 delete [] J;
287 if (mat_type) { delete [] data; }
288
289 id->nrhs = -1; // Set up solution storage on first call to Mult
290#if MFEM_MUMPS_VERSION >= 530
291 delete [] irhs_loc;
292 delete [] isol_loc;
293 id->nloc_rhs = n_loc;
294 id->lrhs_loc = n_loc;
295 id->lsol_loc = id->MUMPS_INFO(23);
296 irhs_loc = new int[id->lrhs_loc];
297 isol_loc = new int[id->lsol_loc];
298 for (int i = 0; i < n_loc; i++)
299 {
300 irhs_loc[i] = row_start + i + 1;
301 }
302 id->irhs_loc = irhs_loc;
303 id->isol_loc = isol_loc;
304
305 row_starts.SetSize(numProcs);
306 MPI_Allgather(&row_start, 1, MPI_INT, row_starts, 1, MPI_INT, comm);
307#else
308 id->lrhs = id->n;
309 if (myid == 0)
310 {
311 delete [] recv_counts;
312 delete [] displs;
313 recv_counts = new int[numProcs];
314 displs = new int[numProcs];
315 }
316 MPI_Gather(&n_loc, 1, MPI_INT, recv_counts, 1, MPI_INT, 0, comm);
317 if (myid == 0)
318 {
319 displs[0] = 0;
320 int s = 0;
321 for (int k = 0; k < numProcs-1; k++)
322 {
323 s += recv_counts[k];
324 displs[k+1] = s;
325 }
326 }
327#endif
328}
329
330void MUMPSSolver::InitRhsSol(int nrhs) const
331{
332 if (id->nrhs != nrhs)
333 {
334#if MFEM_MUMPS_VERSION >= 530
335 delete [] rhs_loc;
336 delete [] sol_loc;
337 rhs_loc = (nrhs > 1) ? new real_t[nrhs * id->lrhs_loc] : nullptr;
338 sol_loc = new real_t[nrhs * id->lsol_loc];
339 id->rhs_loc = rhs_loc;
340 id->sol_loc = sol_loc;
341#else
342 if (myid == 0)
343 {
344 delete rhs_glob;
345 rhs_glob = new real_t[nrhs * id->lrhs];
346 id->rhs = rhs_glob;
347 }
348#endif
349 }
350 id->nrhs = nrhs;
351}
352
353void MUMPSSolver::Mult(const Vector &x, Vector &y) const
354{
356 Array<Vector *> Y(1);
357 X[0] = &x;
358 Y[0] = &y;
359 ArrayMult(X, Y);
360}
361
363 Array<Vector *> &Y) const
364{
365 MFEM_ASSERT(X.Size() == Y.Size(),
366 "Number of columns mismatch in MUMPSSolver::Mult!");
367 InitRhsSol(X.Size());
368#if MFEM_MUMPS_VERSION >= 530
369 if (id->nrhs == 1)
370 {
371 MFEM_ASSERT(X.Size() == 1 && X[0], "Missing Vector in MUMPSSolver::Mult!");
372 X[0]->HostRead();
373 id->rhs_loc = X[0]->GetData();
374 }
375 else
376 {
377 for (int i = 0; i < id->nrhs; i++)
378 {
379 MFEM_ASSERT(X[i], "Missing Vector in MUMPSSolver::Mult!");
380 X[i]->HostRead();
381 std::copy(X[i]->GetData(), X[i]->GetData() + X[i]->Size(),
382 id->rhs_loc + i * id->lrhs_loc);
383 }
384 }
385
386 // MUMPS solve
387 id->job = 3;
388#ifdef MFEM_USE_SINGLE
389 smumps_c(id);
390#else
391 dmumps_c(id);
392#endif
393
394 RedistributeSol(id->isol_loc, id->sol_loc, id->lsol_loc, Y);
395#else
396 for (int i = 0; i < id->nrhs; i++)
397 {
398 MFEM_ASSERT(X[i], "Missing Vector in MUMPSSolver::Mult!");
399 X[i]->HostRead();
400 MPI_Gatherv(X[i]->GetData(), X[i]->Size(), MPITypeMap<real_t>::mpi_type,
401 id->rhs + i * id->lrhs, recv_counts, displs, MPITypeMap<real_t>::mpi_type, 0,
402 comm);
403 }
404
405 // MUMPS solve
406 id->job = 3;
407#ifdef MFEM_USE_SINGLE
408 smumps_c(id);
409#else
410 dmumps_c(id);
411#endif
412
413 for (int i = 0; i < id->nrhs; i++)
414 {
415 MFEM_ASSERT(Y[i], "Missing Vector in MUMPSSolver::Mult!");
416 Y[i]->HostWrite();
417 MPI_Scatterv(id->rhs + i * id->lrhs, recv_counts, displs,
419 Y[i]->GetData(), Y[i]->Size(), MPITypeMap<real_t>::mpi_type, 0, comm);
420 }
421#endif
422}
423
425{
426 // Set flag for transpose solve
427 id->MUMPS_ICNTL(9) = 0;
428 Mult(x, y);
429
430 // Reset the flag
431 id->MUMPS_ICNTL(9) = 1;
432}
433
435 Array<Vector *> &Y) const
436{
437 // Set flag for transpose solve
438 id->MUMPS_ICNTL(9) = 0;
439 ArrayMult(X, Y);
440
441 // Reset the flag
442 id->MUMPS_ICNTL(9) = 1;
443}
444
445void MUMPSSolver::SetPrintLevel(int print_lvl)
446{
447 print_level = print_lvl;
448}
449
451{
452 mat_type = mtype;
453}
454
456{
457 reorder_method = method;
458}
459
461{
462 reorder_reuse = reuse;
463}
464
465#if MFEM_MUMPS_VERSION >= 510
467{
468 blr_tol = tol;
469}
470#endif
471
472void MUMPSSolver::SetParameters()
473{
474 // Output stream for error messages
475 id->MUMPS_ICNTL(1) = 6;
476 // Output stream for diagnosting printing local to each proc
477 id->MUMPS_ICNTL(2) = 0;
478 // Output stream for global info
479 id->MUMPS_ICNTL(3) = 6;
480 // Level of error printing
481 id->MUMPS_ICNTL(4) = print_level;
482 // Input matrix format (assembled)
483 id->MUMPS_ICNTL(5) = 0;
484 // Use A or A^T
485 id->MUMPS_ICNTL(9) = 1;
486 // Iterative refinement (disabled)
487 id->MUMPS_ICNTL(10) = 0;
488 // Error analysis-statistics (disabled)
489 id->MUMPS_ICNTL(11) = 0;
490 // Use of ScaLAPACK (Parallel factorization on root)
491 id->MUMPS_ICNTL(13) = 0;
492 // Percentage increase of estimated workspace (default = 20%)
493 id->MUMPS_ICNTL(14) = 20;
494 // Number of OpenMP threads (default)
495 id->MUMPS_ICNTL(16) = 0;
496 // Matrix input format (distributed)
497 id->MUMPS_ICNTL(18) = 3;
498 // Schur complement (no Schur complement matrix returned)
499 id->MUMPS_ICNTL(19) = 0;
500#if MFEM_MUMPS_VERSION >= 530
501 // Distributed RHS
502 id->MUMPS_ICNTL(20) = 10;
503 // Distributed Sol
504 id->MUMPS_ICNTL(21) = 1;
505#else
506 // Centralized RHS
507 id->MUMPS_ICNTL(20) = 0;
508 // Centralized Sol
509 id->MUMPS_ICNTL(21) = 0;
510#endif
511 // Out of core factorization and solve (disabled)
512 id->MUMPS_ICNTL(22) = 0;
513 // Max size of working memory (default = based on estimates)
514 id->MUMPS_ICNTL(23) = 0;
515 // Configure reordering
516 switch (reorder_method)
517 {
519 id->MUMPS_ICNTL(28) = 0;
520 id->MUMPS_ICNTL(7) = 7;
521 id->MUMPS_ICNTL(29) = 0;
522 break;
524 id->MUMPS_ICNTL(28) = 1;
525 id->MUMPS_ICNTL(7) = 0;
526 break;
528 id->MUMPS_ICNTL(28) = 1;
529 id->MUMPS_ICNTL(7) = 2;
530 break;
532 id->MUMPS_ICNTL(28) = 1;
533 id->MUMPS_ICNTL(7) = 4;
534 break;
536 id->MUMPS_ICNTL(28) = 1;
537 id->MUMPS_ICNTL(7) = 5;
538 break;
540 id->MUMPS_ICNTL(28) = 2;
541 id->MUMPS_ICNTL(29) = 2;
542 break;
544 id->MUMPS_ICNTL(28) = 1;
545 id->MUMPS_ICNTL(7) = 3;
546 break;
548 id->MUMPS_ICNTL(28) = 2;
549 id->MUMPS_ICNTL(29) = 1;
550 break;
551 default:
552 break; // This should be unreachable
553 }
554 // Option to activate BLR factorization
555#if MFEM_MUMPS_VERSION >= 510
556 if (blr_tol > 0.0)
557 {
558 id->MUMPS_ICNTL(35) = 1;
559 id->MUMPS_CNTL(7) = blr_tol;
560 }
561#endif
562}
563
564#if MFEM_MUMPS_VERSION >= 530
565int MUMPSSolver::GetRowRank(int i, const Array<int> &row_starts_) const
566{
567 if (row_starts_.Size() == 1)
568 {
569 return 0;
570 }
571 auto up = std::upper_bound(row_starts_.begin(), row_starts_.end(), i);
572 return std::distance(row_starts_.begin(), up) - 1;
573}
574
575void MUMPSSolver::RedistributeSol(const int *rmap, const real_t *x,
576 const int lx_loc, Array<Vector *> &Y) const
577{
578 int *send_count = new int[numProcs]();
579 for (int i = 0; i < lx_loc; i++)
580 {
581 int j = rmap[i] - 1;
582 int row_rank = GetRowRank(j, row_starts);
583 if (myid == row_rank) { continue; }
584 send_count[row_rank]++;
585 }
586
587 int *recv_count = new int[numProcs];
588 MPI_Alltoall(send_count, 1, MPI_INT, recv_count, 1, MPI_INT, comm);
589
590 int *send_displ = new int[numProcs]; send_displ[0] = 0;
591 int *recv_displ = new int[numProcs]; recv_displ[0] = 0;
592 int sbuff_size = send_count[numProcs-1];
593 int rbuff_size = recv_count[numProcs-1];
594 for (int k = 0; k < numProcs - 1; k++)
595 {
596 send_displ[k + 1] = send_displ[k] + send_count[k];
597 recv_displ[k + 1] = recv_displ[k] + recv_count[k];
598 sbuff_size += send_count[k];
599 rbuff_size += recv_count[k];
600 }
601
602 int *sendbuf_index = new int[sbuff_size];
603 real_t *sendbuf_values = new real_t[sbuff_size];
604 int *recvbuf_index = new int[rbuff_size];
605 real_t *recvbuf_values = new real_t[rbuff_size];
606 int *soffs = new int[numProcs]();
607
608 for (int i = 0; i < lx_loc; i++)
609 {
610 int j = rmap[i] - 1;
611 int row_rank = GetRowRank(j, row_starts);
612 if (myid != row_rank)
613 {
614 int k = send_displ[row_rank] + soffs[row_rank];
615 sendbuf_index[k] = j;
616 soffs[row_rank]++;
617 }
618 }
619
620 MPI_Alltoallv(sendbuf_index, send_count, send_displ, MPI_INT,
621 recvbuf_index, recv_count, recv_displ, MPI_INT, comm);
622
623 for (int rhs = 0; rhs < Y.Size(); rhs++)
624 {
625 MFEM_ASSERT(Y[rhs], "Missing Vector in MUMPSSolver::Mult!");
626 Y[rhs]->HostWrite();
627
628 std::fill(soffs, soffs + numProcs, 0);
629 for (int i = 0; i < lx_loc; i++)
630 {
631 int j = rmap[i] - 1;
632 int row_rank = GetRowRank(j, row_starts);
633 if (myid == row_rank)
634 {
635 int local_index = j - row_start;
636 (*Y[rhs])(local_index) = x[rhs * lx_loc + i];
637 }
638 else
639 {
640 int k = send_displ[row_rank] + soffs[row_rank];
641 sendbuf_values[k] = x[rhs * lx_loc + i];
642 soffs[row_rank]++;
643 }
644 }
645
646 MPI_Alltoallv(sendbuf_values, send_count, send_displ,
647 MPITypeMap<real_t>::mpi_type,
648 recvbuf_values, recv_count, recv_displ, MPITypeMap<real_t>::mpi_type, comm);
649
650 // Unpack recv buffer
651 for (int i = 0; i < rbuff_size; i++)
652 {
653 int local_index = recvbuf_index[i] - row_start;
654 (*Y[rhs])(local_index) = recvbuf_values[i];
655 }
656 }
657
658 delete [] recvbuf_values;
659 delete [] recvbuf_index;
660 delete [] soffs;
661 delete [] sendbuf_values;
662 delete [] sendbuf_index;
663 delete [] recv_displ;
664 delete [] send_displ;
665 delete [] recv_count;
666 delete [] send_count;
667}
668#endif
669
670} // namespace mfem
671
672#endif // MFEM_USE_MPI
673#endif // MFEM_USE_MUMPS
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
Definition array.hpp:321
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
Definition array.hpp:697
int Size() const
Return the logical size of the array.
Definition array.hpp:144
T * GetData()
Returns the data.
Definition array.hpp:118
T * HostWrite()
Shortcut for mfem::Write(a.GetMemory(), a.Size(), false).
Definition array.hpp:329
Wrapper for hypre's ParCSR matrix class.
Definition hypre.hpp:388
void HostRead() const
Update the internal hypre_ParCSRMatrix object, A, to be on host.
Definition hypre.hpp:881
A class to initialize the size of a Tensor.
Definition dtensor.hpp:55
ReorderingStrategy
Specify the reordering strategy for the MUMPS solver.
Definition mumps.hpp:54
@ AMD
Approximate Minimum Degree with auto quasi-dense row detection is used.
Definition mumps.hpp:58
@ METIS
The METIS library will be used.
Definition mumps.hpp:64
@ PARMETIS
The ParMETIS library will be used.
Definition mumps.hpp:66
@ AMF
Approximate Minimum Fill method will be used.
Definition mumps.hpp:60
@ PORD
The PORD library will be used.
Definition mumps.hpp:62
@ AUTOMATIC
Let MUMPS automatically decide the reording strategy.
Definition mumps.hpp:56
@ PTSCOTCH
The PTScotch library will be used.
Definition mumps.hpp:70
@ SCOTCH
The Scotch library will be used.
Definition mumps.hpp:68
MUMPSSolver(MPI_Comm comm_)
Constructor with MPI_Comm parameter.
Definition mumps.cpp:41
void MultTranspose(const Vector &x, Vector &y) const
Transpose Solve .
Definition mumps.cpp:424
void SetBLRTol(double tol)
Set the tolerance for activating block low-rank (BLR) approximate factorization.
Definition mumps.cpp:466
void SetPrintLevel(int print_lvl)
Set the error print level for MUMPS.
Definition mumps.cpp:445
void Mult(const Vector &x, Vector &y) const
Solve .
Definition mumps.cpp:353
void SetReorderingStrategy(ReorderingStrategy method)
Set the reordering strategy.
Definition mumps.cpp:455
void ArrayMultTranspose(const Array< const Vector * > &X, Array< Vector * > &Y) const
Transpose Solve .
Definition mumps.cpp:434
MatType
Specify the type of matrix we are applying the solver to.
Definition mumps.hpp:43
@ UNSYMMETRIC
General sparse matrix, no symmetry is assumed.
Definition mumps.hpp:45
void SetMatrixSymType(MatType mtype)
Set the matrix type.
Definition mumps.cpp:450
void SetReorderingReuse(bool reuse)
Set the flag controlling reuse of the symbolic factorization for multiple operators.
Definition mumps.cpp:460
void SetOperator(const Operator &op)
Set the Operator and perform factorization.
Definition mumps.cpp:103
void ArrayMult(const Array< const Vector * > &X, Array< Vector * > &Y) const
Solve .
Definition mumps.cpp:362
Abstract operator.
Definition operator.hpp:25
int width
Dimension of the input / number of columns in the matrix.
Definition operator.hpp:28
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
Definition operator.hpp:66
int height
Dimension of the output / number of rows in the matrix.
Definition operator.hpp:27
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
Definition operator.hpp:72
Vector data type.
Definition vector.hpp:80
HYPRE_Int HYPRE_BigInt
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Definition globals.hpp:66
float real_t
Definition config.hpp:43
RefCoord s[3]
Helper struct to convert a C++ type to an MPI type.