12 #include "../config/config.hpp"
17 #include "../fem/fem.hpp"
18 #include "../general/forall.hpp"
25 #ifdef MFEM_USE_SUNDIALS
26 #include <nvector/nvector_parallel.h>
34 template<
typename TargetT,
typename SourceT>
35 static TargetT *DuplicateAs(
const SourceT *array,
int size,
36 bool cplusplus =
true)
38 TargetT *target_array = cplusplus ? (TargetT*) Memory<TargetT>(size)
39 : mfem_hypre_TAlloc_host(TargetT, size);
40 for (
int i = 0; i < size; i++)
42 target_array[i] = array[i];
56 if (src_d_mt == MemoryType::DEFAULT)
58 src_d_mt = MemoryManager::GetDualMemoryType(src_h_mt);
65 inline void HypreParVector::_SetDataAndSize_()
67 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
68 #ifndef HYPRE_USING_CUDA
69 SetDataAndSize(hypre_VectorData(x_loc),
73 MemoryType mt = (hypre_VectorMemoryLocation(x_loc) == HYPRE_MEMORY_HOST
75 if (hypre_VectorData(x_loc) != NULL)
77 data.Wrap(hypre_VectorData(x_loc), size, mt,
false);
86 HypreParVector::HypreParVector(MPI_Comm comm,
HYPRE_BigInt glob_size,
89 x = hypre_ParVectorCreate(comm,glob_size,col);
90 hypre_ParVectorInitialize(x);
91 hypre_ParVectorSetPartitioningOwner(x,0);
93 hypre_ParVectorSetDataOwner(x,1);
94 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(x),1);
104 x = hypre_ParVectorCreate(comm,glob_size,col);
105 hypre_ParVectorSetDataOwner(x,1);
106 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
107 hypre_SeqVectorSetDataOwner(x_loc,0);
108 hypre_ParVectorSetPartitioningOwner(x,0);
110 hypre_VectorData(x_loc) = &tmp;
111 #ifdef HYPRE_USING_CUDA
112 hypre_VectorMemoryLocation(x_loc) =
113 is_device_ptr ? HYPRE_MEMORY_DEVICE : HYPRE_MEMORY_HOST;
119 hypre_ParVectorInitialize(x);
121 hypre_VectorData(x_loc) = data_;
128 x = hypre_ParVectorCreate(y.x -> comm, y.x -> global_size,
129 y.x -> partitioning);
130 hypre_ParVectorInitialize(x);
131 hypre_ParVectorSetPartitioningOwner(x,0);
132 hypre_ParVectorSetDataOwner(x,1);
133 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(x),1);
143 x = hypre_ParVectorInDomainOf(const_cast<HypreParMatrix&>(A));
147 x = hypre_ParVectorInRangeOf(const_cast<HypreParMatrix&>(A));
155 x = (hypre_ParVector *) y;
164 hypre_ParVectorInitialize(x);
165 hypre_ParVectorSetPartitioningOwner(x,0);
167 hypre_ParVectorSetDataOwner(x,1);
168 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(x),1);
175 if (own_ParVector) { hypre_ParVectorDestroy(x); }
179 own_ParVector = owner;
184 hypre_Vector *hv = hypre_ParVectorToVectorAll(*
this);
187 hypre_SeqVectorSetDataOwner(hv,0);
188 hypre_SeqVectorDestroy(hv);
201 if (size != y.
Size())
213 hypre_VectorData(hypre_ParVectorLocalVector(x)) = data_;
219 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
220 hypre_VectorData(x_loc) =
222 #ifdef HYPRE_USING_CUDA
223 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
229 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
231 #ifdef HYPRE_USING_CUDA
232 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
238 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
240 #ifdef HYPRE_USING_CUDA
241 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
251 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
252 hypre_VectorData(x_loc) =
254 #ifdef HYPRE_USING_CUDA
255 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
266 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
268 #ifdef HYPRE_USING_CUDA
269 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
280 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
282 #ifdef HYPRE_USING_CUDA
283 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
290 return hypre_ParVectorSetRandomValues(x,seed);
295 hypre_ParVectorPrint(x,fname);
302 hypre_ParVectorDestroy(x);
306 #ifdef MFEM_USE_SUNDIALS
313 #endif // MFEM_USE_SUNDIALS
318 return hypre_ParVectorInnerProd(*x, *y);
323 return hypre_ParVectorInnerProd(x, y);
332 double loc_norm = vec.
Norml1();
333 MPI_Allreduce(&loc_norm, &norm, 1, MPI_DOUBLE, MPI_SUM, comm);
337 double loc_norm = vec*vec;
338 MPI_Allreduce(&loc_norm, &norm, 1, MPI_DOUBLE, MPI_SUM, comm);
344 for (
int i = 0; i < vec.
Size(); i++)
346 sum += pow(fabs(vec(i)), p);
348 MPI_Allreduce(&sum, &norm, 1, MPI_DOUBLE, MPI_SUM, comm);
349 norm = pow(norm, 1.0/p);
354 MPI_Allreduce(&loc_norm, &norm, 1, MPI_DOUBLE, MPI_MAX, comm);
376 template <
typename T>
411 template <
typename SrcT,
typename DstT>
419 MFEM_FORALL(i, capacity, dst_p[i] = src_p[i];);
423 void HypreParMatrix::Init()
428 diagOwner = offdOwner = colMapOwner = -1;
440 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
441 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
442 const int num_rows =
NumRows();
445 diag->i =
const_cast<HYPRE_Int*
>(mem_diag.
I.
Read(mc, num_rows+1));
446 diag->j =
const_cast<HYPRE_Int*
>(mem_diag.
J.
Read(mc, diag_nnz));
447 diag->data =
const_cast<double*
>(mem_diag.
data.
Read(mc, diag_nnz));
448 offd->i =
const_cast<HYPRE_Int*
>(mem_offd.
I.
Read(mc, num_rows+1));
449 offd->j =
const_cast<HYPRE_Int*
>(mem_offd.
J.
Read(mc, offd_nnz));
450 offd->data =
const_cast<double*
>(mem_offd.
data.
Read(mc, offd_nnz));
451 #if MFEM_HYPRE_VERSION >= 21800
452 decltype(diag->memory_location) ml =
454 diag->memory_location = ml;
455 offd->memory_location = ml;
461 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
462 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
463 const int num_rows =
NumRows();
466 diag->i = mem_diag.
I.
ReadWrite(mc, num_rows+1);
469 offd->i = mem_offd.
I.
ReadWrite(mc, num_rows+1);
472 #if MFEM_HYPRE_VERSION >= 21800
473 decltype(diag->memory_location) ml =
475 diag->memory_location = ml;
476 offd->memory_location = ml;
480 void HypreParMatrix::Write(
MemoryClass mc,
bool set_diag,
bool set_offd)
482 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
483 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
496 #if MFEM_HYPRE_VERSION >= 21800
497 decltype(diag->memory_location) ml =
499 if (set_diag) { diag->memory_location = ml; }
500 if (set_offd) { offd->memory_location = ml; }
518 #if MFEM_HYPRE_VERSION >= 21800
519 MemoryType diag_mt = (A->diag->memory_location == HYPRE_MEMORY_HOST
521 MemoryType offd_mt = (A->offd->memory_location == HYPRE_MEMORY_HOST
527 diagOwner = HypreCsrToMem(A->diag, diag_mt,
false, mem_diag);
528 offdOwner = HypreCsrToMem(A->offd, offd_mt,
false, mem_offd);
534 hypre_CSRMatrix *hypre_csr,
549 const int num_rows = csr->
Height();
551 hypre_csr->i =
const_cast<HYPRE_Int*
>(mem_csr.
I.
Read(hypre_mc, num_rows+1));
552 hypre_csr->j =
const_cast<HYPRE_Int*
>(mem_csr.
J.
Read(hypre_mc, nnz));
553 hypre_csr->data =
const_cast<double*
>(mem_csr.
data.
Read(hypre_mc, nnz));
556 "invalid state: host ownership for I and J differ!");
561 signed char HypreParMatrix::CopyBoolCSR(Table *bool_csr,
562 MemoryIJData &mem_csr,
563 hypre_CSRMatrix *hypre_csr)
568 CopyMemory(bool_csr->GetIMemory(), mem_csr.I, hypre_mc,
false);
569 CopyMemory(bool_csr->GetJMemory(), mem_csr.J, hypre_mc,
false);
575 const int num_rows = bool_csr->Size();
576 const int nnz = bool_csr->Size_of_connections();
579 for (
int i = 0; i < nnz; i++)
583 hypre_csr->i =
const_cast<HYPRE_Int*
>(mem_csr.I.Read(hypre_mc, num_rows+1));
584 hypre_csr->j =
const_cast<HYPRE_Int*
>(mem_csr.J.Read(hypre_mc, nnz));
585 hypre_csr->data =
const_cast<double*
>(mem_csr.data.Read(hypre_mc, nnz));
587 MFEM_ASSERT(mem_csr.I.OwnsHostPtr() == mem_csr.J.OwnsHostPtr(),
588 "invalid state: host ownership for I and J differ!");
589 return (mem_csr.I.OwnsHostPtr() ? 1 : 0) +
590 (mem_csr.data.OwnsHostPtr() ? 2 : 0);
593 void HypreParMatrix::CopyCSR_J(hypre_CSRMatrix *hypre_csr,
int *J)
595 HYPRE_Int nnz = hypre_CSRMatrixNumNonzeros(hypre_csr);
596 #if MFEM_HYPRE_VERSION >= 21600
597 if (hypre_CSRMatrixBigJ(hypre_csr))
599 for (HYPRE_Int j = 0; j < nnz; j++)
606 for (HYPRE_Int j = 0; j < nnz; j++)
613 signed char HypreParMatrix::HypreCsrToMem(hypre_CSRMatrix *h_mat,
620 mem.I.Wrap(h_mat->i, nr1, h_mat_mt, own_ija);
621 mem.J.Wrap(h_mat->j, nnz, h_mat_mt, own_ija);
622 mem.data.Wrap(h_mat->data, nnz, h_mat_mt, own_ija);
628 h_mem.I.New(nr1, hypre_mt);
629 h_mem.I.CopyFrom(mem.I, nr1);
631 h_mem.J.New(nnz, hypre_mt);
632 h_mem.J.CopyFrom(mem.J, nnz);
634 h_mem.data.New(nnz, hypre_mt);
635 h_mem.data.CopyFrom(mem.data, nnz);
644 #if MFEM_HYPRE_VERSION < 21400
645 hypre_TFree(h_mat->i);
646 #elif MFEM_HYPRE_VERSION < 21800
647 hypre_TFree(h_mat->i, HYPRE_MEMORY_SHARED);
649 hypre_TFree(h_mat->i, h_mat->memory_location);
651 if (h_mat->owns_data)
653 #if MFEM_HYPRE_VERSION < 21400
654 hypre_TFree(h_mat->j);
655 hypre_TFree(h_mat->data);
656 #elif MFEM_HYPRE_VERSION < 21800
657 hypre_TFree(h_mat->j, HYPRE_MEMORY_SHARED);
658 hypre_TFree(h_mat->data, HYPRE_MEMORY_SHARED);
660 hypre_TFree(h_mat->j, h_mat->memory_location);
661 hypre_TFree(h_mat->data, h_mat->memory_location);
665 h_mat->i = mem.I.ReadWrite(hypre_mc, nr1);
666 h_mat->j = mem.J.ReadWrite(hypre_mc, nnz);
667 h_mat->data = mem.data.ReadWrite(hypre_mc, nnz);
668 h_mat->owns_data = 0;
669 #if MFEM_HYPRE_VERSION >= 21800
670 h_mat->memory_location = HYPRE_MEMORY_DEVICE;
680 :
Operator(diag->Height(), diag->Width())
683 A = hypre_ParCSRMatrixCreate(comm, glob_size, glob_size, row_starts,
685 hypre_ParCSRMatrixSetDataOwner(A,1);
686 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
687 hypre_ParCSRMatrixSetColStartsOwner(A,0);
689 hypre_CSRMatrixSetDataOwner(A->diag,0);
690 diagOwner = CopyCSR(diag, mem_diag, A->diag,
false);
691 hypre_CSRMatrixSetRownnz(A->diag);
693 hypre_CSRMatrixSetDataOwner(A->offd,1);
694 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Height()+1);
702 hypre_ParCSRMatrixSetNumNonzeros(A);
705 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
709 CopyCSR_J(A->diag, diag->
GetJ());
712 hypre_MatvecCommPkgCreate(A);
722 :
Operator(diag->Height(), diag->Width())
725 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
726 row_starts, col_starts,
728 hypre_ParCSRMatrixSetDataOwner(A,1);
729 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
730 hypre_ParCSRMatrixSetColStartsOwner(A,0);
732 hypre_CSRMatrixSetDataOwner(A->diag,0);
733 diagOwner = CopyCSR(diag, mem_diag, A->diag,
false);
734 hypre_CSRMatrixSetRownnz(A->diag);
736 hypre_CSRMatrixSetDataOwner(A->offd,1);
737 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Height()+1);
740 hypre_ParCSRMatrixSetNumNonzeros(A);
743 if (row_starts == col_starts)
745 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
748 CopyCSR_J(A->diag, diag->
GetJ());
752 hypre_MatvecCommPkgCreate(A);
765 :
Operator(diag->Height(), diag->Width())
768 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
769 row_starts, col_starts,
772 hypre_ParCSRMatrixSetDataOwner(A,1);
773 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
774 hypre_ParCSRMatrixSetColStartsOwner(A,0);
776 hypre_CSRMatrixSetDataOwner(A->diag,0);
777 diagOwner = CopyCSR(diag, mem_diag, A->diag, own_diag_offd);
778 if (own_diag_offd) {
delete diag; }
779 hypre_CSRMatrixSetRownnz(A->diag);
781 hypre_CSRMatrixSetDataOwner(A->offd,0);
782 offdOwner = CopyCSR(offd, mem_offd, A->offd, own_diag_offd);
783 if (own_diag_offd) {
delete offd; }
784 hypre_CSRMatrixSetRownnz(A->offd);
786 hypre_ParCSRMatrixColMapOffd(A) = cmap;
790 hypre_ParCSRMatrixSetNumNonzeros(A);
793 if (row_starts == col_starts)
795 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
798 CopyCSR_J(A->diag, diag->
GetJ());
802 hypre_MatvecCommPkgCreate(A);
811 HYPRE_Int *diag_i, HYPRE_Int *diag_j,
double *diag_data,
812 HYPRE_Int *offd_i, HYPRE_Int *offd_j,
double *offd_data,
817 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
818 row_starts, col_starts, offd_num_cols, 0, 0);
819 hypre_ParCSRMatrixSetDataOwner(A,1);
820 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
821 hypre_ParCSRMatrixSetColStartsOwner(A,0);
823 HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A->diag);
825 hypre_CSRMatrixSetDataOwner(A->diag, hypre_arrays);
826 hypre_CSRMatrixI(A->diag) = diag_i;
827 hypre_CSRMatrixJ(A->diag) = diag_j;
828 hypre_CSRMatrixData(A->diag) = diag_data;
829 hypre_CSRMatrixNumNonzeros(A->diag) = diag_i[local_num_rows];
830 #ifdef HYPRE_USING_CUDA
831 hypre_CSRMatrixMemoryLocation(A->diag) = HYPRE_MEMORY_HOST;
833 hypre_CSRMatrixSetRownnz(A->diag);
835 hypre_CSRMatrixSetDataOwner(A->offd, hypre_arrays);
836 hypre_CSRMatrixI(A->offd) = offd_i;
837 hypre_CSRMatrixJ(A->offd) = offd_j;
838 hypre_CSRMatrixData(A->offd) = offd_data;
839 hypre_CSRMatrixNumNonzeros(A->offd) = offd_i[local_num_rows];
840 #ifdef HYPRE_USING_CUDA
841 hypre_CSRMatrixMemoryLocation(A->offd) = HYPRE_MEMORY_HOST;
843 hypre_CSRMatrixSetRownnz(A->offd);
845 hypre_ParCSRMatrixColMapOffd(A) = offd_col_map;
847 colMapOwner = hypre_arrays ? -1 : 1;
849 hypre_ParCSRMatrixSetNumNonzeros(A);
852 if (row_starts == col_starts)
854 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
857 hypre_MatvecCommPkgCreate(A);
865 diagOwner = HypreCsrToMem(A->diag, host_mt,
true, mem_diag);
866 offdOwner = HypreCsrToMem(A->offd, host_mt,
true, mem_offd);
871 diagOwner = HypreCsrToMem(A->diag, host_mt,
false, mem_diag);
872 offdOwner = HypreCsrToMem(A->offd, host_mt,
false, mem_offd);
883 MFEM_ASSERT(sm_a != NULL,
"invalid input");
884 MFEM_VERIFY(!HYPRE_AssumedPartitionCheck(),
885 "this method can not be used with assumed partition");
889 hypre_CSRMatrix *csr_a;
890 csr_a = hypre_CSRMatrixCreate(sm_a ->
Height(), sm_a ->
Width(),
891 sm_a -> NumNonZeroElems());
893 hypre_CSRMatrixSetDataOwner(csr_a,0);
895 CopyCSR(sm_a, mem_a, csr_a,
false);
896 hypre_CSRMatrixSetRownnz(csr_a);
900 hypre_ParCSRMatrix *new_A =
901 hypre_CSRMatrixToParCSRMatrix(comm, csr_a, row_starts, col_starts);
907 hypre_CSRMatrixI(csr_a) = NULL;
908 hypre_CSRMatrixDestroy(csr_a);
911 if (row_starts == col_starts)
913 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(new_A));
916 hypre_MatvecCommPkgCreate(A);
931 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
932 row_starts, col_starts, 0, nnz, 0);
933 hypre_ParCSRMatrixSetDataOwner(A,1);
934 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
935 hypre_ParCSRMatrixSetColStartsOwner(A,0);
937 hypre_CSRMatrixSetDataOwner(A->diag,0);
938 diagOwner = CopyBoolCSR(diag, mem_diag, A->diag);
939 hypre_CSRMatrixSetRownnz(A->diag);
941 hypre_CSRMatrixSetDataOwner(A->offd,1);
942 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Size()+1);
945 hypre_ParCSRMatrixSetNumNonzeros(A);
948 if (row_starts == col_starts)
950 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
953 CopyCSR_J(A->diag, diag->
GetJ());
957 hypre_MatvecCommPkgCreate(A);
967 HYPRE_Int *i_diag, HYPRE_Int *j_diag,
968 HYPRE_Int *i_offd, HYPRE_Int *j_offd,
971 HYPRE_Int diag_nnz, offd_nnz;
974 if (HYPRE_AssumedPartitionCheck())
976 diag_nnz = i_diag[row[1]-row[0]];
977 offd_nnz = i_offd[row[1]-row[0]];
979 A = hypre_ParCSRMatrixCreate(comm, row[2], col[2], row, col,
980 cmap_size, diag_nnz, offd_nnz);
984 diag_nnz = i_diag[row[
id+1]-row[id]];
985 offd_nnz = i_offd[row[
id+1]-row[id]];
987 A = hypre_ParCSRMatrixCreate(comm, row[np], col[np], row, col,
988 cmap_size, diag_nnz, offd_nnz);
991 hypre_ParCSRMatrixSetDataOwner(A,1);
992 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
993 hypre_ParCSRMatrixSetColStartsOwner(A,0);
996 for (HYPRE_Int i = 0; i < diag_nnz; i++)
998 mem_diag.
data[i] = 1.0;
1002 for (HYPRE_Int i = 0; i < offd_nnz; i++)
1004 mem_offd.
data[i] = 1.0;
1007 hypre_CSRMatrixSetDataOwner(A->diag,0);
1008 hypre_CSRMatrixI(A->diag) = i_diag;
1009 hypre_CSRMatrixJ(A->diag) = j_diag;
1010 hypre_CSRMatrixData(A->diag) = mem_diag.
data;
1011 #ifdef HYPRE_USING_CUDA
1012 hypre_CSRMatrixMemoryLocation(A->diag) = HYPRE_MEMORY_HOST;
1014 hypre_CSRMatrixSetRownnz(A->diag);
1016 hypre_CSRMatrixSetDataOwner(A->offd,0);
1017 hypre_CSRMatrixI(A->offd) = i_offd;
1018 hypre_CSRMatrixJ(A->offd) = j_offd;
1019 hypre_CSRMatrixData(A->offd) = mem_offd.
data;
1020 #ifdef HYPRE_USING_CUDA
1021 hypre_CSRMatrixMemoryLocation(A->offd) = HYPRE_MEMORY_HOST;
1023 hypre_CSRMatrixSetRownnz(A->offd);
1025 hypre_ParCSRMatrixColMapOffd(A) = cmap;
1029 hypre_ParCSRMatrixSetNumNonzeros(A);
1034 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1037 hypre_MatvecCommPkgCreate(A);
1043 diagOwner = HypreCsrToMem(A->diag, host_mt,
true, mem_diag);
1044 offdOwner = HypreCsrToMem(A->offd, host_mt,
true, mem_offd);
1064 if (HYPRE_AssumedPartitionCheck())
1067 my_col_start = cols[0];
1068 my_col_end = cols[1];
1073 MPI_Comm_rank(comm, &myid);
1074 MPI_Comm_size(comm, &part_size);
1076 my_col_start = cols[myid];
1077 my_col_end = cols[myid+1];
1084 row_starts = col_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1085 for (
int i = 0; i < part_size; i++)
1087 row_starts[i] = rows[i];
1092 row_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1093 col_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1094 for (
int i = 0; i < part_size; i++)
1096 row_starts[i] = rows[i];
1097 col_starts[i] = cols[i];
1103 HYPRE_Int diag_nnz = 0, offd_nnz = 0, offd_num_cols = 0;
1104 map<HYPRE_BigInt, HYPRE_Int> offd_map;
1105 for (HYPRE_Int j = 0, loc_nnz = I[nrows]; j < loc_nnz; j++)
1108 if (my_col_start <= glob_col && glob_col < my_col_end)
1114 offd_map.insert(pair<const HYPRE_BigInt, HYPRE_Int>(glob_col, -1));
1119 for (
auto it = offd_map.begin(); it != offd_map.end(); ++it)
1121 it->second = offd_num_cols++;
1125 A = hypre_ParCSRMatrixCreate(comm, glob_nrows, glob_ncols,
1126 row_starts, col_starts, offd_num_cols,
1127 diag_nnz, offd_nnz);
1128 hypre_ParCSRMatrixInitialize(A);
1134 HYPRE_Int *diag_i, *diag_j, *offd_i, *offd_j;
1136 double *diag_data, *offd_data;
1137 diag_i = A->diag->i;
1138 diag_j = A->diag->j;
1139 diag_data = A->diag->data;
1140 offd_i = A->offd->i;
1141 offd_j = A->offd->j;
1142 offd_data = A->offd->data;
1143 offd_col_map = A->col_map_offd;
1145 diag_nnz = offd_nnz = 0;
1146 for (HYPRE_Int i = 0, j = 0; i < nrows; i++)
1148 diag_i[i] = diag_nnz;
1149 offd_i[i] = offd_nnz;
1150 for (HYPRE_Int j_end = I[i+1]; j < j_end; j++)
1153 if (my_col_start <= glob_col && glob_col < my_col_end)
1155 diag_j[diag_nnz] = glob_col - my_col_start;
1156 diag_data[diag_nnz] = data[j];
1161 offd_j[offd_nnz] = offd_map[glob_col];
1162 offd_data[offd_nnz] = data[j];
1167 diag_i[nrows] = diag_nnz;
1168 offd_i[nrows] = offd_nnz;
1169 for (
auto it = offd_map.begin(); it != offd_map.end(); ++it)
1171 offd_col_map[it->second] = it->first;
1174 hypre_ParCSRMatrixSetNumNonzeros(A);
1176 if (row_starts == col_starts)
1178 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1180 hypre_MatvecCommPkgCreate(A);
1190 hypre_ParCSRMatrix *Ph =
static_cast<hypre_ParCSRMatrix *
>(P);
1195 A = hypre_ParCSRMatrixCompleteClone(Ph);
1197 hypre_ParCSRMatrixCopy(Ph, A, 1);
1205 hypre_ParCSRMatrixSetNumNonzeros(A);
1207 hypre_MatvecCommPkgCreate(A);
1236 MFEM_ASSERT(diagOwner < 0 && offdOwner < 0 && colMapOwner == -1,
"");
1237 MFEM_ASSERT(diagOwner == offdOwner,
"");
1238 MFEM_ASSERT(ParCSROwner,
"");
1239 hypre_ParCSRMatrix *R = A;
1240 #ifdef HYPRE_USING_CUDA
1244 ParCSROwner =
false;
1261 colMapOwner = colmap;
1266 if (!A || hypre_ParCSRMatrixOwnsRowStarts(A) ||
1267 (hypre_ParCSRMatrixRowStarts(A) == hypre_ParCSRMatrixColStarts(A) &&
1268 hypre_ParCSRMatrixOwnsColStarts(A)))
1273 int row_starts_size;
1274 if (HYPRE_AssumedPartitionCheck())
1276 row_starts_size = 2;
1280 MPI_Comm_size(hypre_ParCSRMatrixComm(A), &row_starts_size);
1284 HYPRE_BigInt *old_row_starts = hypre_ParCSRMatrixRowStarts(A);
1287 for (
int i = 0; i < row_starts_size; i++)
1289 new_row_starts[i] = old_row_starts[i];
1292 hypre_ParCSRMatrixRowStarts(A) = new_row_starts;
1293 hypre_ParCSRMatrixOwnsRowStarts(A) = 1;
1295 if (hypre_ParCSRMatrixColStarts(A) == old_row_starts)
1297 hypre_ParCSRMatrixColStarts(A) = new_row_starts;
1298 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
1304 if (!A || hypre_ParCSRMatrixOwnsColStarts(A) ||
1305 (hypre_ParCSRMatrixRowStarts(A) == hypre_ParCSRMatrixColStarts(A) &&
1306 hypre_ParCSRMatrixOwnsRowStarts(A)))
1311 int col_starts_size;
1312 if (HYPRE_AssumedPartitionCheck())
1314 col_starts_size = 2;
1318 MPI_Comm_size(hypre_ParCSRMatrixComm(A), &col_starts_size);
1322 HYPRE_BigInt *old_col_starts = hypre_ParCSRMatrixColStarts(A);
1325 for (
int i = 0; i < col_starts_size; i++)
1327 new_col_starts[i] = old_col_starts[i];
1330 hypre_ParCSRMatrixColStarts(A) = new_col_starts;
1332 if (hypre_ParCSRMatrixRowStarts(A) == old_col_starts)
1334 hypre_ParCSRMatrixRowStarts(A) = new_col_starts;
1335 hypre_ParCSRMatrixOwnsRowStarts(A) = 1;
1336 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
1340 hypre_ParCSRMatrixOwnsColStarts(A) = 1;
1346 const int size =
Height();
1348 #ifdef HYPRE_USING_CUDA
1351 MFEM_ASSERT(A->diag->memory_location == HYPRE_MEMORY_DEVICE,
"");
1352 double *d_diag = diag.
Write();
1353 const HYPRE_Int *A_diag_i = A->diag->i;
1354 const double *A_diag_d = A->diag->data;
1355 MFEM_FORALL(i, size, d_diag[i] = A_diag_d[A_diag_i[i]];);
1362 for (
int j = 0; j < size; j++)
1364 diag(j) = A->diag->data[A->diag->i[j]];
1365 MFEM_ASSERT(A->diag->j[A->diag->i[j]] == j,
1366 "the first entry in each row must be the diagonal one");
1372 static void MakeSparseMatrixWrapper(
int nrows,
int ncols,
1373 HYPRE_Int *I, HYPRE_Int *J,
double *data,
1376 #ifndef HYPRE_BIGINT
1377 SparseMatrix tmp(I, J, data, nrows, ncols,
false,
false,
false);
1380 for (
int i = 0; i <= nrows; i++)
1384 const int nnz = mI[nrows];
1385 int *mJ = Memory<int>(nnz);
1386 for (
int j = 0; j < nnz; j++)
1390 SparseMatrix tmp(mI, mJ, data, nrows, ncols,
true,
false,
false);
1395 static void MakeWrapper(
const hypre_CSRMatrix *mat,
1396 const MemoryIJData &mem,
1397 SparseMatrix &wrapper)
1405 MakeSparseMatrixWrapper(nrows, ncols,
1406 const_cast<HYPRE_Int*>(I),
1407 const_cast<HYPRE_Int*>(J),
1408 const_cast<double*>(data),
1414 MakeWrapper(A->diag, mem_diag, diag);
1419 MakeWrapper(A->offd, mem_offd, offd);
1420 cmap = A->col_map_offd;
1426 hypre_CSRMatrix *hypre_merged = hypre_MergeDiagAndOffd(A);
1430 #if MFEM_HYPRE_VERSION >= 21600
1431 hypre_CSRMatrixBigJtoJ(hypre_merged);
1433 MakeSparseMatrixWrapper(
1442 merged = merged_tmp;
1444 hypre_CSRMatrixDestroy(hypre_merged);
1448 bool interleaved_rows,
1449 bool interleaved_cols)
const
1454 hypre_ParCSRMatrix **hypre_blocks =
new hypre_ParCSRMatrix*[nr * nc];
1456 internal::hypre_ParCSRMatrixSplit(A, nr, nc, hypre_blocks,
1457 interleaved_rows, interleaved_cols);
1460 for (
int i = 0; i < nr; i++)
1462 for (
int j = 0; j < nc; j++)
1468 delete [] hypre_blocks;
1473 hypre_ParCSRMatrix * At;
1474 hypre_ParCSRMatrixTranspose(A, &At, 1);
1475 hypre_ParCSRMatrixSetNumNonzeros(At);
1477 hypre_MatvecCommPkgCreate(At);
1483 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(At));
1489 #if MFEM_HYPRE_VERSION >= 21800
1491 double threshold)
const
1495 hypre_MatvecCommPkgCreate(A);
1498 hypre_ParCSRMatrix *submat;
1501 int local_num_vars = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
1506 for (
int j=0; j<indices.
Size(); j++)
1508 if (indices[j] > local_num_vars)
1510 MFEM_WARNING(
"WARNING : " << indices[j] <<
" > " << local_num_vars);
1512 CF_marker[indices[j]] = -1;
1517 hypre_BoomerAMGCoarseParms(MPI_COMM_WORLD, local_num_vars, 1, NULL,
1518 CF_marker, NULL, &cpts_global);
1521 hypre_ParCSRMatrixExtractSubmatrixFC(A, CF_marker, cpts_global,
1522 "FF", &submat, threshold);
1524 mfem_hypre_TFree(cpts_global);
1530 double a,
double b)
const
1534 return hypre_ParCSRMatrixMatvec(a, A, x, b, y);
1539 MFEM_ASSERT(x.
Size() ==
Width(),
"invalid x.Size() = " << x.
Size()
1540 <<
", expected size = " <<
Width());
1541 MFEM_ASSERT(y.
Size() ==
Height(),
"invalid y.Size() = " << y.
Size()
1542 <<
", expected size = " <<
Height());
1589 hypre_ParCSRMatrixMatvec(a, A, *X, b, *Y);
1591 if (!yshallow) { y = *Y; }
1597 MFEM_ASSERT(x.
Size() ==
Height(),
"invalid x.Size() = " << x.
Size()
1598 <<
", expected size = " <<
Height());
1599 MFEM_ASSERT(y.
Size() ==
Width(),
"invalid y.Size() = " << y.
Size()
1600 <<
", expected size = " <<
Width());
1651 hypre_ParCSRMatrixMatvecT(a, A, *Y, b, *X);
1653 if (!yshallow) { y = *X; }
1657 double a,
double b)
const
1659 return hypre_ParCSRMatrixMatvec(a, A, (hypre_ParVector *) x, b,
1660 (hypre_ParVector *) y);
1664 double a,
double b)
const
1668 return hypre_ParCSRMatrixMatvecT(a, A, x, b, y);
1674 MFEM_ASSERT(x.
Size() ==
Width(),
"invalid x.Size() = " << x.
Size()
1675 <<
", expected size = " <<
Width());
1676 MFEM_ASSERT(y.
Size() ==
Height(),
"invalid y.Size() = " << y.
Size()
1677 <<
", expected size = " <<
Height());
1683 internal::hypre_ParCSRMatrixAbsMatvec(A, a, const_cast<double*>(x_data),
1691 MFEM_ASSERT(x.
Size() ==
Height(),
"invalid x.Size() = " << x.
Size()
1692 <<
", expected size = " <<
Height());
1693 MFEM_ASSERT(y.
Size() ==
Width(),
"invalid y.Size() = " << y.
Size()
1694 <<
", expected size = " <<
Width());
1700 internal::hypre_ParCSRMatrixAbsMatvecT(A, a, const_cast<double*>(x_data),
1708 const bool assumed_partition = HYPRE_AssumedPartitionCheck();
1709 const bool row_starts_given = (row_starts != NULL);
1710 if (!row_starts_given)
1712 row_starts = hypre_ParCSRMatrixRowStarts(A);
1713 MFEM_VERIFY(D.
Height() == hypre_CSRMatrixNumRows(A->diag),
1714 "the matrix D is NOT compatible with the row starts of"
1715 " this HypreParMatrix, row_starts must be given.");
1720 if (assumed_partition)
1726 MPI_Comm_rank(
GetComm(), &offset);
1728 int local_num_rows = row_starts[offset+1]-row_starts[offset];
1729 MFEM_VERIFY(local_num_rows == D.
Height(),
"the number of rows in D is "
1730 " not compatible with the given row_starts");
1737 if (assumed_partition)
1740 if (row_starts_given)
1742 global_num_rows = row_starts[2];
1749 global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
1754 MPI_Comm_size(
GetComm(), &part_size);
1755 global_num_rows = row_starts[part_size];
1759 HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
1765 GetOffd(A_offd, col_map_offd);
1775 DuplicateAs<HYPRE_BigInt>(row_starts, part_size,
false);
1777 (row_starts == col_starts ? new_row_starts :
1778 DuplicateAs<HYPRE_BigInt>(col_starts, part_size,
false));
1780 DuplicateAs<HYPRE_BigInt>(col_map_offd, A_offd.
Width());
1784 const bool own_diag_offd =
true;
1789 global_num_rows, hypre_ParCSRMatrixGlobalNumCols(A),
1790 new_row_starts, new_col_starts,
1791 DA_diag, DA_offd, new_col_map_offd,
1795 hypre_ParCSRMatrixSetRowStartsOwner(DA->A, 1);
1796 hypre_ParCSRMatrixSetColStartsOwner(DA->A, 1);
1797 DA->colMapOwner = 1;
1804 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
1809 if (hypre_CSRMatrixNumRows(A->diag) != diag.
Size())
1811 mfem_error(
"Note the Vector diag is not of compatible dimensions with A\n");
1818 double *Adiag_data = hypre_CSRMatrixData(A->diag);
1819 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
1821 double *Aoffd_data = hypre_CSRMatrixData(A->offd);
1822 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
1825 for (
int i(0); i < size; ++i)
1828 for (jj = Adiag_i[i]; jj < Adiag_i[i+1]; ++jj)
1830 Adiag_data[jj] *= val;
1832 for (jj = Aoffd_i[i]; jj < Aoffd_i[i+1]; ++jj)
1834 Aoffd_data[jj] *= val;
1843 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
1848 if (hypre_CSRMatrixNumRows(A->diag) != diag.
Size())
1850 mfem_error(
"Note the Vector diag is not of compatible dimensions with A\n");
1857 double *Adiag_data = hypre_CSRMatrixData(A->diag);
1858 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
1861 double *Aoffd_data = hypre_CSRMatrixData(A->offd);
1862 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
1865 for (
int i(0); i < size; ++i)
1870 mfem_error(
"HypreParMatrix::InvDiagScale : Division by 0");
1874 for (jj = Adiag_i[i]; jj < Adiag_i[i+1]; ++jj)
1876 Adiag_data[jj] *= val;
1878 for (jj = Aoffd_i[i]; jj < Aoffd_i[i+1]; ++jj)
1880 Aoffd_data[jj] *= val;
1889 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
1896 HYPRE_Int size=hypre_CSRMatrixNumRows(A->diag);
1899 double *Adiag_data = hypre_CSRMatrixData(A->diag);
1900 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
1901 for (jj = 0; jj < Adiag_i[size]; ++jj)
1903 Adiag_data[jj] *=
s;
1906 double *Aoffd_data = hypre_CSRMatrixData(A->offd);
1907 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
1908 for (jj = 0; jj < Aoffd_i[size]; ++jj)
1910 Aoffd_data[jj] *=
s;
1916 static void get_sorted_rows_cols(
const Array<int> &rows_cols,
1922 for (
int i = 0; i < rows_cols.
Size(); i++)
1924 hypre_sorted[i] = rows_cols[i];
1925 if (i && rows_cols[i-1] > rows_cols[i]) { sorted =
false; }
1927 if (!sorted) { hypre_sorted.
Sort(); }
1935 hypre_CSRMatrix * csr_A;
1936 hypre_CSRMatrix * csr_A_wo_z;
1937 hypre_ParCSRMatrix * parcsr_A_ptr;
1942 comm = hypre_ParCSRMatrixComm(A);
1944 ierr += hypre_ParCSRMatrixGetLocalRange(A,
1945 &row_start,&row_end,
1946 &col_start,&col_end );
1948 row_starts = hypre_ParCSRMatrixRowStarts(A);
1949 col_starts = hypre_ParCSRMatrixColStarts(A);
1951 bool old_owns_row = hypre_ParCSRMatrixOwnsRowStarts(A);
1952 bool old_owns_col = hypre_ParCSRMatrixOwnsColStarts(A);
1953 HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
1954 HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
1955 parcsr_A_ptr = hypre_ParCSRMatrixCreate(comm, global_num_rows,
1957 row_starts, col_starts,
1959 hypre_ParCSRMatrixOwnsRowStarts(parcsr_A_ptr) = old_owns_row;
1960 hypre_ParCSRMatrixOwnsColStarts(parcsr_A_ptr) = old_owns_col;
1961 hypre_ParCSRMatrixOwnsRowStarts(A) = 0;
1962 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
1964 csr_A = hypre_MergeDiagAndOffd(A);
1970 csr_A_wo_z = hypre_CSRMatrixDeleteZeros(csr_A,threshold);
1974 if (csr_A_wo_z == NULL)
1980 ierr += hypre_CSRMatrixDestroy(csr_A);
1986 ierr += GenerateDiagAndOffd(csr_A_wo_z,parcsr_A_ptr,
1989 ierr += hypre_CSRMatrixDestroy(csr_A_wo_z);
1991 MFEM_VERIFY(ierr == 0,
"");
1995 hypre_ParCSRMatrixSetNumNonzeros(A);
1997 if (row_starts == col_starts)
1999 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
2001 hypre_MatvecCommPkgCreate(A);
2008 HYPRE_Int
err = 0, old_err = hypre_error_flag;
2009 hypre_error_flag = 0;
2011 #if MFEM_HYPRE_VERSION < 21400
2013 double threshold = 0.0;
2016 HYPRE_Int *diag_I = A->diag->i, *offd_I = A->offd->i;
2017 double *diag_d = A->diag->data, *offd_d = A->offd->data;
2018 HYPRE_Int local_num_rows = A->diag->num_rows;
2019 double max_l2_row_norm = 0.0;
2021 for (HYPRE_Int r = 0; r < local_num_rows; r++)
2024 double l2_row_norm = row.
Norml2();
2026 l2_row_norm = std::hypot(l2_row_norm, row.
Norml2());
2027 max_l2_row_norm = std::max(max_l2_row_norm, l2_row_norm);
2029 double loc_max_l2_row_norm = max_l2_row_norm;
2030 MPI_Allreduce(&loc_max_l2_row_norm, &max_l2_row_norm, 1, MPI_DOUBLE,
2032 threshold = tol * max_l2_row_norm;
2037 #elif MFEM_HYPRE_VERSION < 21800
2039 err = hypre_ParCSRMatrixDropSmallEntries(A, tol);
2043 err = hypre_ParCSRMatrixDropSmallEntries(A, tol, 2);
2047 MFEM_VERIFY(!err,
"error encountered: error code = " << err);
2049 hypre_error_flag = old_err;
2057 get_sorted_rows_cols(rows_cols, rc_sorted);
2059 internal::hypre_ParCSRMatrixEliminateAXB(
2066 get_sorted_rows_cols(rows_cols, rc_sorted);
2068 hypre_ParCSRMatrix* Ae;
2070 internal::hypre_ParCSRMatrixEliminateAAe(
2080 get_sorted_rows_cols(cols, rc_sorted);
2082 hypre_ParCSRMatrix* Ae;
2084 internal::hypre_ParCSRMatrixEliminateAAe(
2085 A, &Ae, rc_sorted.
Size(), rc_sorted.
GetData(), 1);
2093 if (rows.
Size() > 0)
2096 get_sorted_rows_cols(rows, r_sorted);
2098 internal::hypre_ParCSRMatrixEliminateRows(A, r_sorted.
Size(),
2109 Ae.
Mult(-1.0, X, 1.0, B);
2113 if (ess_dof_list.
Size() == 0) {
return; }
2116 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
2117 double *data = hypre_CSRMatrixData(A_diag);
2118 HYPRE_Int *I = hypre_CSRMatrixI(A_diag);
2120 HYPRE_Int *J = hypre_CSRMatrixJ(A_diag);
2121 hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
2122 HYPRE_Int *I_offd = hypre_CSRMatrixI(A_offd);
2123 double *data_offd = hypre_CSRMatrixData(A_offd);
2130 for (
int i = 0; i < ess_dof_list.
Size(); i++)
2132 int r = ess_dof_list[i];
2133 B(r) = data[I[r]] * X(r);
2135 MFEM_ASSERT(I[r] < I[r+1],
"empty row found!");
2141 MFEM_ABORT(
"the diagonal entry must be the first entry in the row!");
2143 for (
int j = I[r]+1; j < I[r+1]; j++)
2147 MFEM_ABORT(
"all off-diagonal entries must be zero!");
2150 for (
int j = I_offd[r]; j < I_offd[r+1]; j++)
2152 if (data_offd[j] != 0.0)
2154 MFEM_ABORT(
"all off-diagonal entries must be zero!");
2163 HYPRE_Int offj)
const
2166 hypre_ParCSRMatrixPrintIJ(A,offi,offj,fname);
2170 void HypreParMatrix::Read(MPI_Comm comm,
const char *fname)
2175 HYPRE_Int base_i, base_j;
2176 hypre_ParCSRMatrixReadIJ(comm, fname, &base_i, &base_j, &A);
2177 hypre_ParCSRMatrixSetNumNonzeros(A);
2179 hypre_MatvecCommPkgCreate(A);
2190 HYPRE_IJMatrix A_ij;
2191 HYPRE_IJMatrixRead(fname, comm, 5555, &A_ij);
2193 HYPRE_ParCSRMatrix A_parcsr;
2194 HYPRE_IJMatrixGetObject(A_ij, (
void**) &A_parcsr);
2196 A = (hypre_ParCSRMatrix*)A_parcsr;
2198 hypre_ParCSRMatrixSetNumNonzeros(A);
2200 hypre_MatvecCommPkgCreate(A);
2208 hypre_ParCSRCommPkg *comm_pkg = A->comm_pkg;
2209 MPI_Comm comm = A->comm;
2211 const int tag = 46801;
2213 MPI_Comm_rank(comm, &myid);
2214 MPI_Comm_size(comm, &nproc);
2218 MPI_Recv(&c, 1, MPI_CHAR, myid-1, tag, comm, MPI_STATUS_IGNORE);
2222 out <<
"\nHypreParMatrix: hypre_ParCSRCommPkg:\n";
2224 out <<
"Rank " << myid <<
":\n"
2225 " number of sends = " << comm_pkg->num_sends <<
2226 " (" <<
sizeof(double)*comm_pkg->send_map_starts[comm_pkg->num_sends] <<
2228 " number of recvs = " << comm_pkg->num_recvs <<
2229 " (" <<
sizeof(
double)*comm_pkg->recv_vec_starts[comm_pkg->num_recvs] <<
2231 if (myid != nproc-1)
2234 MPI_Send(&c, 1, MPI_CHAR, myid+1, tag, comm);
2247 out <<
"global number of rows : " << A->global_num_rows <<
'\n'
2248 <<
"global number of columns : " << A->global_num_cols <<
'\n'
2249 <<
"first row index : " << A->first_row_index <<
'\n'
2250 <<
" last row index : " << A->last_row_index <<
'\n'
2251 <<
"first col diag : " << A->first_col_diag <<
'\n'
2252 <<
" last col diag : " << A->last_col_diag <<
'\n'
2253 <<
"number of nonzeros : " << A->num_nonzeros <<
'\n';
2255 hypre_CSRMatrix *csr = A->diag;
2256 const char *csr_name =
"diag";
2257 for (
int m = 0; m < 2; m++)
2259 auto csr_nnz = csr->i[csr->num_rows];
2260 out << csr_name <<
" num rows : " << csr->num_rows <<
'\n'
2261 << csr_name <<
" num cols : " << csr->num_cols <<
'\n'
2262 << csr_name <<
" num nnz : " << csr->num_nonzeros <<
'\n'
2263 << csr_name <<
" i last : " << csr_nnz
2264 << (csr_nnz == csr->num_nonzeros ?
2265 " [good]" :
" [** BAD **]") <<
'\n';
2267 out << csr_name <<
" i hash : " << hf.
GetHash() <<
'\n';
2268 out << csr_name <<
" j hash : ";
2269 if (csr->j ==
nullptr)
2278 #if MFEM_HYPRE_VERSION >= 21600
2279 out << csr_name <<
" big j hash : ";
2280 if (csr->big_j ==
nullptr)
2290 out << csr_name <<
" data hash : ";
2291 if (csr->data ==
nullptr)
2305 hf.
AppendInts(A->col_map_offd, A->offd->num_cols);
2306 out <<
"col map offd hash : " << hf.
GetHash() <<
'\n';
2311 HYPRE_BigInt *A_col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
2312 int size = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
2316 void HypreParMatrix::Destroy()
2318 if ( X != NULL ) {
delete X; }
2319 if ( Y != NULL ) {
delete Y; }
2323 if (A == NULL) {
return; }
2325 #ifdef HYPRE_USING_CUDA
2326 if (ParCSROwner && (diagOwner < 0 || offdOwner < 0))
2334 MFEM_VERIFY(!(diagOwner < 0 && offdOwner < 0) || diagOwner == offdOwner,
2337 MemoryClass mc = (diagOwner == -1 || offdOwner == -1) ?
2339 Write(mc, diagOwner < 0, offdOwner <0);
2348 hypre_CSRMatrixI(A->diag) = NULL;
2349 hypre_CSRMatrixJ(A->diag) = NULL;
2350 hypre_CSRMatrixData(A->diag) = NULL;
2357 hypre_CSRMatrixI(A->offd) = NULL;
2358 hypre_CSRMatrixJ(A->offd) = NULL;
2359 hypre_CSRMatrixData(A->offd) = NULL;
2361 if (colMapOwner >= 0)
2363 if (colMapOwner & 1)
2367 hypre_ParCSRMatrixColMapOffd(A) = NULL;
2372 hypre_ParCSRMatrixDestroy(A);
2376 #if MFEM_HYPRE_VERSION >= 21800
2385 hypre_ParCSRMatrix *C_hypre;
2386 hypre_ParcsrBdiagInvScal(*A, blocksize, &C_hypre);
2387 hypre_ParCSRMatrixDropSmallEntries(C_hypre, 1e-15, 1);
2397 hypre_ParVector *d_hypre;
2398 hypre_ParvecBdiagInvScal(b_Hypre, blocksize, &d_hypre, *A);
2406 #if MFEM_HYPRE_VERSION < 21400
2411 hypre_ParCSRMatrix *C_hypre =
2412 internal::hypre_ParCSRMatrixAdd(const_cast<HypreParMatrix &>(A),
2413 const_cast<HypreParMatrix &>(B));
2414 MFEM_VERIFY(C_hypre,
"error in hypre_ParCSRMatrixAdd");
2416 hypre_MatvecCommPkgCreate(C_hypre);
2427 hypre_ParCSRMatrix * C = internal::hypre_ParCSRMatrixAdd(*A,*B);
2429 hypre_MatvecCommPkgCreate(C);
2436 HypreParMatrix *
Add(
double alpha,
const HypreParMatrix &A,
2437 double beta,
const HypreParMatrix &B)
2439 hypre_ParCSRMatrix *C;
2440 #if MFEM_HYPRE_VERSION <= 22000
2441 hypre_ParcsrAdd(alpha, A, beta, B, &C);
2443 hypre_ParCSRMatrixAdd(alpha, A, beta, B, &C);
2445 hypre_MatvecCommPkgCreate(C);
2447 return new HypreParMatrix(C);
2450 HypreParMatrix *
ParAdd(
const HypreParMatrix *A,
const HypreParMatrix *B)
2452 hypre_ParCSRMatrix *C;
2453 #if MFEM_HYPRE_VERSION <= 22000
2454 hypre_ParcsrAdd(1.0, *A, 1.0, *B, &C);
2456 hypre_ParCSRMatrixAdd(1.0, *A, 1.0, *B, &C);
2459 return new HypreParMatrix(C);
2467 hypre_ParCSRMatrix * ab;
2468 #ifdef HYPRE_USING_CUDA
2469 ab = hypre_ParCSRMatMat(*A, *B);
2471 ab = hypre_ParMatmul(*A,*B);
2473 hypre_ParCSRMatrixSetNumNonzeros(ab);
2475 hypre_MatvecCommPkgCreate(ab);
2487 hypre_ParCSRMatrix * rap;
2489 #ifdef HYPRE_USING_CUDA
2497 hypre_ParCSRMatrix *Q = hypre_ParCSRMatMat(*A,*P);
2498 const bool keepTranspose =
false;
2499 rap = hypre_ParCSRTMatMatKT(*P,Q,keepTranspose);
2500 hypre_ParCSRMatrixDestroy(Q);
2506 HYPRE_Int P_owns_its_col_starts =
2507 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*P));
2509 hypre_BoomerAMGBuildCoarseOperator(*P,*A,*P,&rap);
2513 hypre_ParCSRMatrixSetRowStartsOwner(rap,0);
2514 hypre_ParCSRMatrixSetColStartsOwner(rap,0);
2515 if (P_owns_its_col_starts)
2517 hypre_ParCSRMatrixSetColStartsOwner(*P, 1);
2521 hypre_ParCSRMatrixSetNumNonzeros(rap);
2530 hypre_ParCSRMatrix * rap;
2532 #ifdef HYPRE_USING_CUDA
2534 hypre_ParCSRMatrix *Q = hypre_ParCSRMatMat(*A,*P);
2535 rap = hypre_ParCSRTMatMat(*Rt,Q);
2536 hypre_ParCSRMatrixDestroy(Q);
2539 HYPRE_Int P_owns_its_col_starts =
2540 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*P));
2541 HYPRE_Int Rt_owns_its_col_starts =
2542 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*Rt));
2544 hypre_BoomerAMGBuildCoarseOperator(*Rt,*A,*P,&rap);
2548 hypre_ParCSRMatrixSetRowStartsOwner(rap,0);
2549 hypre_ParCSRMatrixSetColStartsOwner(rap,0);
2550 if (P_owns_its_col_starts)
2552 hypre_ParCSRMatrixSetColStartsOwner(*P, 1);
2554 if (Rt_owns_its_col_starts)
2556 hypre_ParCSRMatrixSetColStartsOwner(*Rt, 1);
2560 hypre_ParCSRMatrixSetNumNonzeros(rap);
2569 const int num_loc,
const Array<int> &offsets,
2570 std::vector<int> &all_num_loc,
const int numBlocks,
2571 std::vector<std::vector<HYPRE_BigInt>> &blockProcOffsets,
2572 std::vector<HYPRE_BigInt> &procOffsets,
2573 std::vector<std::vector<int>> &procBlockOffsets,
2576 std::vector<std::vector<int>> all_block_num_loc(numBlocks);
2578 MPI_Allgather(&num_loc, 1, MPI_INT, all_num_loc.data(), 1, MPI_INT, comm);
2580 for (
int j = 0; j < numBlocks; ++j)
2582 all_block_num_loc[j].resize(nprocs);
2583 blockProcOffsets[j].resize(nprocs);
2585 const int blockNumRows = offsets[j + 1] - offsets[j];
2586 MPI_Allgather(&blockNumRows, 1, MPI_INT, all_block_num_loc[j].data(), 1,
2588 blockProcOffsets[j][0] = 0;
2589 for (
int i = 0; i < nprocs - 1; ++i)
2591 blockProcOffsets[j][i + 1] = blockProcOffsets[j][i]
2592 + all_block_num_loc[j][i];
2599 for (
int i = 0; i < nprocs; ++i)
2601 globalNum += all_num_loc[i];
2604 MFEM_VERIFY(globalNum >= 0,
"overflow in global size");
2608 firstLocal += all_num_loc[i];
2613 procOffsets[i + 1] = procOffsets[i] + all_num_loc[i];
2616 procBlockOffsets[i].resize(numBlocks);
2617 procBlockOffsets[i][0] = 0;
2618 for (
int j = 1; j < numBlocks; ++j)
2620 procBlockOffsets[i][j] = procBlockOffsets[i][j - 1]
2621 + all_block_num_loc[j - 1][i];
2629 const int numBlockRows = blocks.
NumRows();
2630 const int numBlockCols = blocks.
NumCols();
2632 MFEM_VERIFY(numBlockRows > 0 &&
2633 numBlockCols > 0,
"Invalid input to HypreParMatrixFromBlocks");
2635 if (blockCoeff != NULL)
2637 MFEM_VERIFY(numBlockRows == blockCoeff->
NumRows() &&
2638 numBlockCols == blockCoeff->
NumCols(),
2639 "Invalid input to HypreParMatrixFromBlocks");
2645 int nonNullBlockRow0 = -1;
2646 for (
int j=0; j<numBlockCols; ++j)
2648 if (blocks(0,j) != NULL)
2650 nonNullBlockRow0 = j;
2655 MFEM_VERIFY(nonNullBlockRow0 >= 0,
"Null row of blocks");
2656 MPI_Comm comm = blocks(0,nonNullBlockRow0)->GetComm();
2661 for (
int i=0; i<numBlockRows; ++i)
2663 for (
int j=0; j<numBlockCols; ++j)
2665 if (blocks(i,j) != NULL)
2667 const int nrows = blocks(i,j)->
NumRows();
2668 const int ncols = blocks(i,j)->
NumCols();
2670 MFEM_VERIFY(nrows > 0 &&
2671 ncols > 0,
"Invalid block in HypreParMatrixFromBlocks");
2673 if (rowOffsets[i+1] == 0)
2675 rowOffsets[i+1] = nrows;
2679 MFEM_VERIFY(rowOffsets[i+1] == nrows,
2680 "Inconsistent blocks in HypreParMatrixFromBlocks");
2683 if (colOffsets[j+1] == 0)
2685 colOffsets[j+1] = ncols;
2689 MFEM_VERIFY(colOffsets[j+1] == ncols,
2690 "Inconsistent blocks in HypreParMatrixFromBlocks");
2695 MFEM_VERIFY(rowOffsets[i+1] > 0,
"Invalid input blocks");
2696 rowOffsets[i+1] += rowOffsets[i];
2699 for (
int j=0; j<numBlockCols; ++j)
2701 MFEM_VERIFY(colOffsets[j+1] > 0,
"Invalid input blocks");
2702 colOffsets[j+1] += colOffsets[j];
2705 const int num_loc_rows = rowOffsets[numBlockRows];
2706 const int num_loc_cols = colOffsets[numBlockCols];
2709 MPI_Comm_rank(comm, &rank);
2710 MPI_Comm_size(comm, &nprocs);
2712 std::vector<int> all_num_loc_rows(nprocs);
2713 std::vector<int> all_num_loc_cols(nprocs);
2714 std::vector<HYPRE_BigInt> procRowOffsets(nprocs);
2715 std::vector<HYPRE_BigInt> procColOffsets(nprocs);
2716 std::vector<std::vector<HYPRE_BigInt>> blockRowProcOffsets(numBlockRows);
2717 std::vector<std::vector<HYPRE_BigInt>> blockColProcOffsets(numBlockCols);
2718 std::vector<std::vector<int>> procBlockRowOffsets(nprocs);
2719 std::vector<std::vector<int>> procBlockColOffsets(nprocs);
2721 HYPRE_BigInt first_loc_row, glob_nrows, first_loc_col, glob_ncols;
2723 all_num_loc_rows, numBlockRows, blockRowProcOffsets,
2724 procRowOffsets, procBlockRowOffsets, first_loc_row,
2728 all_num_loc_cols, numBlockCols, blockColProcOffsets,
2729 procColOffsets, procBlockColOffsets, first_loc_col,
2732 std::vector<int> opI(num_loc_rows + 1);
2733 std::vector<int> cnt(num_loc_rows);
2735 for (
int i = 0; i < num_loc_rows; ++i)
2741 opI[num_loc_rows] = 0;
2746 for (
int i = 0; i < numBlockRows; ++i)
2748 for (
int j = 0; j < numBlockCols; ++j)
2750 if (blocks(i, j) == NULL)
2752 csr_blocks(i, j) = NULL;
2756 blocks(i, j)->HostRead();
2757 csr_blocks(i, j) = hypre_MergeDiagAndOffd(*blocks(i, j));
2758 blocks(i, j)->HypreRead();
2760 for (
int k = 0; k < csr_blocks(i, j)->num_rows; ++k)
2762 opI[rowOffsets[i] + k + 1] +=
2763 csr_blocks(i, j)->i[k + 1] - csr_blocks(i, j)->i[k];
2770 for (
int i = 0; i < num_loc_rows; ++i)
2772 opI[i + 1] += opI[i];
2775 const int nnz = opI[num_loc_rows];
2777 std::vector<HYPRE_BigInt> opJ(nnz);
2778 std::vector<double> data(nnz);
2781 for (
int i = 0; i < numBlockRows; ++i)
2783 for (
int j = 0; j < numBlockCols; ++j)
2785 if (csr_blocks(i, j) != NULL)
2787 const int nrows = csr_blocks(i, j)->num_rows;
2788 const double cij = blockCoeff ? (*blockCoeff)(i, j) : 1.0;
2789 #if MFEM_HYPRE_VERSION >= 21600
2790 const bool usingBigJ = (csr_blocks(i, j)->big_j != NULL);
2793 for (
int k = 0; k < nrows; ++k)
2795 const int rowg = rowOffsets[i] + k;
2796 const int nnz_k = csr_blocks(i,j)->i[k+1]-csr_blocks(i,j)->i[k];
2797 const int osk = csr_blocks(i, j)->i[k];
2799 for (
int l = 0; l < nnz_k; ++l)
2802 #if MFEM_HYPRE_VERSION >= 21600
2803 const HYPRE_Int bcol = usingBigJ ?
2804 csr_blocks(i, j)->big_j[osk + l] :
2805 csr_blocks(i, j)->j[osk + l];
2807 const HYPRE_Int bcol = csr_blocks(i, j)->j[osk + l];
2811 const auto &offs = blockColProcOffsets[j];
2812 const int bcolproc =
2813 std::upper_bound(offs.begin() + 1, offs.end(), bcol)
2816 opJ[opI[rowg] + cnt[rowg]] = procColOffsets[bcolproc] +
2817 procBlockColOffsets[bcolproc][j]
2819 - blockColProcOffsets[j][bcolproc];
2820 data[opI[rowg] + cnt[rowg]] = cij * csr_blocks(i, j)->data[osk + l];
2828 for (
int i = 0; i < numBlockRows; ++i)
2830 for (
int j = 0; j < numBlockCols; ++j)
2832 if (csr_blocks(i, j) != NULL)
2834 hypre_CSRMatrixDestroy(csr_blocks(i, j));
2839 std::vector<HYPRE_BigInt> rowStarts2(2);
2840 rowStarts2[0] = first_loc_row;
2841 rowStarts2[1] = first_loc_row + all_num_loc_rows[rank];
2843 std::vector<HYPRE_BigInt> colStarts2(2);
2844 colStarts2[0] = first_loc_col;
2845 colStarts2[1] = first_loc_col + all_num_loc_cols[rank];
2847 MFEM_VERIFY(HYPRE_AssumedPartitionCheck(),
2848 "only 'assumed partition' mode is supported");
2850 return new HypreParMatrix(comm, num_loc_rows, glob_nrows, glob_ncols,
2851 opI.data(), opJ.data(),
2877 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
2878 HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
2880 double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
2881 double *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
2883 for (
int i = 0; i < N; i++)
2886 hypre_ParVectorCopy(f, r);
2887 hypre_ParCSRMatrixMatvec(-1.0, A, u, 1.0, r);
2890 (0 == (i % 2)) ? coef = lambda : coef =
mu;
2892 for (HYPRE_Int j = 0; j < num_rows; j++)
2894 u_data[j] += coef*r_data[j] / max_eig;
2910 hypre_ParVector *x0,
2911 hypre_ParVector *x1,
2912 hypre_ParVector *x2,
2913 hypre_ParVector *x3)
2916 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
2917 HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
2919 double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(u));
2921 double *x0_data = hypre_VectorData(hypre_ParVectorLocalVector(x0));
2922 double *x1_data = hypre_VectorData(hypre_ParVectorLocalVector(x1));
2923 double *x2_data = hypre_VectorData(hypre_ParVectorLocalVector(x2));
2924 double *x3_data = hypre_VectorData(hypre_ParVectorLocalVector(x3));
2926 hypre_ParVectorCopy(u, x0);
2929 hypre_ParVectorCopy(f, x1);
2930 hypre_ParCSRMatrixMatvec(-1.0, A, x0, 1.0, x1);
2932 for (HYPRE_Int i = 0; i < num_rows; i++)
2934 x1_data[i] /= -max_eig;
2938 for (HYPRE_Int i = 0; i < num_rows; i++)
2940 x1_data[i] = x0_data[i] -x1_data[i];
2944 for (HYPRE_Int i = 0; i < num_rows; i++)
2946 x3_data[i] = fir_coeffs[0]*x0_data[i] +fir_coeffs[1]*x1_data[i];
2949 for (
int n = 2; n <= poly_order; n++)
2952 hypre_ParVectorCopy(f, x2);
2953 hypre_ParCSRMatrixMatvec(-1.0, A, x1, 1.0, x2);
2955 for (HYPRE_Int i = 0; i < num_rows; i++)
2957 x2_data[i] /= -max_eig;
2965 for (HYPRE_Int i = 0; i < num_rows; i++)
2967 x2_data[i] = (x1_data[i]-x0_data[i]) +(x1_data[i]-2*x2_data[i]);
2968 x3_data[i] += fir_coeffs[n]*x2_data[i];
2969 x0_data[i] = x1_data[i];
2970 x1_data[i] = x2_data[i];
2974 for (HYPRE_Int i = 0; i < num_rows; i++)
2976 u_data[i] = x3_data[i];
2997 B =
X =
V =
Z = NULL;
3005 int relax_times_,
double relax_weight_,
3006 double omega_,
int poly_order_,
3007 double poly_fraction_,
int eig_est_cg_iter_)
3019 B =
X =
V =
Z = NULL;
3030 type =
static_cast<int>(type_);
3041 int eig_est_cg_iter_)
3058 double a = -1,
b, c;
3059 if (!strcmp(name,
"Rectangular")) { a = 1.0,
b = 0.0, c = 0.0; }
3060 if (!strcmp(name,
"Hanning")) { a = 0.5,
b = 0.5, c = 0.0; }
3061 if (!strcmp(name,
"Hamming")) { a = 0.54,
b = 0.46, c = 0.0; }
3062 if (!strcmp(name,
"Blackman")) { a = 0.42,
b = 0.50, c = 0.08; }
3065 mfem_error(
"HypreSmoother::SetWindowByName : name not recognized!");
3083 mfem_error(
"HypreSmoother::SetOperator : not HypreParMatrix!");
3090 if (
B) {
delete B; }
3091 if (
X) {
delete X; }
3092 if (
V) {
delete V; }
3093 if (
Z) {
delete Z; }
3115 A->
Mult(ones, diag);
3123 #ifdef HYPRE_USING_CUDA
3127 d_l1_norms[i] = std::abs(d_l1_norms[i]);
3130 for (
int i = 0; i <
height; i++)
3152 else if (
type == 1001 ||
type == 1002)
3191 double* window_coeffs =
new double[
poly_order+1];
3192 double* cheby_coeffs =
new double[
poly_order+1];
3200 window_coeffs[i] = a + b*cos(t) +c*cos(2*t);
3204 double theta_pb = acos(1.0 -0.5*k_pb);
3206 cheby_coeffs[0] = (theta_pb +
sigma)/M_PI;
3209 double t = i*(theta_pb+
sigma);
3210 cheby_coeffs[i] = 2.0*sin(t)/(i*M_PI);
3215 fir_coeffs[i] = window_coeffs[i]*cheby_coeffs[i];
3218 delete[] window_coeffs;
3219 delete[] cheby_coeffs;
3226 mfem_error(
"HypreSmoother::Mult (...) : HypreParMatrix A is missing");
3239 HYPRE_ParCSRDiagScale(NULL, *
A, b, x);
3246 hypre_ParVectorSetConstantValues(x, 0.0);
3267 else if (
type == 1002)
3281 int hypre_type =
type;
3283 if (
type == 5) { hypre_type = 1; }
3287 hypre_ParCSRRelax(*
A, b, hypre_type,
3294 hypre_ParCSRRelax(*
A, b, hypre_type,
3309 mfem_error(
"HypreSmoother::Mult (...) : HypreParMatrix A is missing");
3316 A -> GetGlobalNumRows(),
3318 A -> GetRowStarts());
3320 A -> GetGlobalNumCols(),
3322 A -> GetColStarts());
3360 if (!xshallow) { x = *
X; }
3370 mfem_error(
"HypreSmoother::MultTranspose (...) : undefined!\n");
3376 if (
B) {
delete B; }
3377 if (
X) {
delete X; }
3378 if (
V) {
delete V; }
3379 if (
Z) {
delete Z; }
3388 if (
X0) {
delete X0; }
3389 if (
X1) {
delete X1; }
3404 :
Solver(A_->Height(), A_->Width())
3419 mfem_error(
"HypreSolver::Mult (...) : HypreParMatrix A is missing");
3426 hypre_ParVectorSetConstantValues(x, 0.0);
3437 if (err) { MFEM_WARNING(
"Error during setup! Error code: " << err); }
3441 MFEM_VERIFY(!err,
"Error during setup! Error code: " << err);
3443 hypre_error_flag = 0;
3450 if (err) { MFEM_WARNING(
"Error during solve! Error code: " << err); }
3454 MFEM_VERIFY(!err,
"Error during solve! Error code: " << err);
3456 hypre_error_flag = 0;
3466 mfem_error(
"HypreSolver::Mult (...) : HypreParMatrix A is missing");
3473 A -> GetGlobalNumRows(),
3475 A -> GetRowStarts());
3477 A -> GetGlobalNumCols(),
3479 A -> GetColStarts());
3517 if (!xshallow) { x = *
X; }
3522 if (
B) {
delete B; }
3523 if (
X) {
delete X; }
3533 HYPRE_ParCSRPCGCreate(comm, &pcg_solver);
3542 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
3544 HYPRE_ParCSRPCGCreate(comm, &pcg_solver);
3550 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
3571 HYPRE_PCGSetTol(pcg_solver, tol);
3576 HYPRE_PCGSetAbsoluteTol(pcg_solver, atol);
3581 HYPRE_PCGSetMaxIter(pcg_solver, max_iter);
3586 HYPRE_PCGSetLogging(pcg_solver, logging);
3591 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_lvl);
3596 precond = &precond_;
3598 HYPRE_ParCSRPCGSetPrecond(pcg_solver,
3606 HYPRE_PCGSetTwoNorm(pcg_solver, 1);
3607 if (res_frequency > 0)
3609 HYPRE_PCGSetRecomputeResidualP(pcg_solver, res_frequency);
3613 HYPRE_PCGSetResidualTol(pcg_solver, rtol);
3620 HYPRE_Int time_index = 0;
3621 HYPRE_Int num_iterations;
3622 double final_res_norm;
3624 HYPRE_Int print_level;
3626 HYPRE_PCGGetPrintLevel(pcg_solver, &print_level);
3627 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_level%3);
3629 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
3634 hypre_ParVectorSetConstantValues(x, 0.0);
3642 if (print_level > 0 && print_level < 3)
3644 time_index = hypre_InitializeTiming(
"PCG Setup");
3645 hypre_BeginTiming(time_index);
3648 HYPRE_ParCSRPCGSetup(pcg_solver, *
A, b, x);
3651 if (print_level > 0 && print_level < 3)
3653 hypre_EndTiming(time_index);
3654 hypre_PrintTiming(
"Setup phase times", comm);
3655 hypre_FinalizeTiming(time_index);
3656 hypre_ClearTiming();
3660 if (print_level > 0 && print_level < 3)
3662 time_index = hypre_InitializeTiming(
"PCG Solve");
3663 hypre_BeginTiming(time_index);
3666 HYPRE_ParCSRPCGSolve(pcg_solver, *
A, b, x);
3668 if (print_level > 0)
3670 if (print_level < 3)
3672 hypre_EndTiming(time_index);
3673 hypre_PrintTiming(
"Solve phase times", comm);
3674 hypre_FinalizeTiming(time_index);
3675 hypre_ClearTiming();
3678 HYPRE_ParCSRPCGGetNumIterations(pcg_solver, &num_iterations);
3679 HYPRE_ParCSRPCGGetFinalRelativeResidualNorm(pcg_solver,
3682 MPI_Comm_rank(comm, &myid);
3686 mfem::out <<
"PCG Iterations = " << num_iterations << endl
3687 <<
"Final PCG Relative Residual Norm = " << final_res_norm
3691 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_level);
3696 HYPRE_ParCSRPCGDestroy(pcg_solver);
3704 HYPRE_ParCSRGMRESCreate(comm, &gmres_solver);
3705 SetDefaultOptions();
3715 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
3717 HYPRE_ParCSRGMRESCreate(comm, &gmres_solver);
3718 SetDefaultOptions();
3721 void HypreGMRES::SetDefaultOptions()
3727 HYPRE_ParCSRGMRESSetKDim(gmres_solver, k_dim);
3728 HYPRE_ParCSRGMRESSetMaxIter(gmres_solver, max_iter);
3729 HYPRE_ParCSRGMRESSetTol(gmres_solver, tol);
3735 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
3756 HYPRE_GMRESSetTol(gmres_solver, tol);
3761 HYPRE_GMRESSetAbsoluteTol(gmres_solver, tol);
3766 HYPRE_GMRESSetMaxIter(gmres_solver, max_iter);
3771 HYPRE_GMRESSetKDim(gmres_solver, k_dim);
3776 HYPRE_GMRESSetLogging(gmres_solver, logging);
3781 HYPRE_GMRESSetPrintLevel(gmres_solver, print_lvl);
3786 precond = &precond_;
3788 HYPRE_ParCSRGMRESSetPrecond(gmres_solver,
3797 HYPRE_Int time_index = 0;
3798 HYPRE_Int num_iterations;
3799 double final_res_norm;
3801 HYPRE_Int print_level;
3803 HYPRE_GMRESGetPrintLevel(gmres_solver, &print_level);
3805 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
3810 hypre_ParVectorSetConstantValues(x, 0.0);
3818 if (print_level > 0)
3820 time_index = hypre_InitializeTiming(
"GMRES Setup");
3821 hypre_BeginTiming(time_index);
3824 HYPRE_ParCSRGMRESSetup(gmres_solver, *
A, b, x);
3827 if (print_level > 0)
3829 hypre_EndTiming(time_index);
3830 hypre_PrintTiming(
"Setup phase times", comm);
3831 hypre_FinalizeTiming(time_index);
3832 hypre_ClearTiming();
3836 if (print_level > 0)
3838 time_index = hypre_InitializeTiming(
"GMRES Solve");
3839 hypre_BeginTiming(time_index);
3842 HYPRE_ParCSRGMRESSolve(gmres_solver, *
A, b, x);
3844 if (print_level > 0)
3846 hypre_EndTiming(time_index);
3847 hypre_PrintTiming(
"Solve phase times", comm);
3848 hypre_FinalizeTiming(time_index);
3849 hypre_ClearTiming();
3851 HYPRE_ParCSRGMRESGetNumIterations(gmres_solver, &num_iterations);
3852 HYPRE_ParCSRGMRESGetFinalRelativeResidualNorm(gmres_solver,
3855 MPI_Comm_rank(comm, &myid);
3859 mfem::out <<
"GMRES Iterations = " << num_iterations << endl
3860 <<
"Final GMRES Relative Residual Norm = " << final_res_norm
3868 HYPRE_ParCSRGMRESDestroy(gmres_solver);
3876 HYPRE_ParCSRFlexGMRESCreate(comm, &fgmres_solver);
3877 SetDefaultOptions();
3887 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
3889 HYPRE_ParCSRFlexGMRESCreate(comm, &fgmres_solver);
3890 SetDefaultOptions();
3893 void HypreFGMRES::SetDefaultOptions()
3899 HYPRE_ParCSRFlexGMRESSetKDim(fgmres_solver, k_dim);
3900 HYPRE_ParCSRFlexGMRESSetMaxIter(fgmres_solver, max_iter);
3901 HYPRE_ParCSRFlexGMRESSetTol(fgmres_solver, tol);
3907 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
3928 HYPRE_ParCSRFlexGMRESSetTol(fgmres_solver, tol);
3933 HYPRE_ParCSRFlexGMRESSetMaxIter(fgmres_solver, max_iter);
3938 HYPRE_ParCSRFlexGMRESSetKDim(fgmres_solver, k_dim);
3943 HYPRE_ParCSRFlexGMRESSetLogging(fgmres_solver, logging);
3948 HYPRE_ParCSRFlexGMRESSetPrintLevel(fgmres_solver, print_lvl);
3953 precond = &precond_;
3954 HYPRE_ParCSRFlexGMRESSetPrecond(fgmres_solver,
3963 HYPRE_Int time_index = 0;
3964 HYPRE_Int num_iterations;
3965 double final_res_norm;
3967 HYPRE_Int print_level;
3969 HYPRE_FlexGMRESGetPrintLevel(fgmres_solver, &print_level);
3971 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
3976 hypre_ParVectorSetConstantValues(x, 0.0);
3984 if (print_level > 0)
3986 time_index = hypre_InitializeTiming(
"FGMRES Setup");
3987 hypre_BeginTiming(time_index);
3990 HYPRE_ParCSRFlexGMRESSetup(fgmres_solver, *
A, b, x);
3993 if (print_level > 0)
3995 hypre_EndTiming(time_index);
3996 hypre_PrintTiming(
"Setup phase times", comm);
3997 hypre_FinalizeTiming(time_index);
3998 hypre_ClearTiming();
4002 if (print_level > 0)
4004 time_index = hypre_InitializeTiming(
"FGMRES Solve");
4005 hypre_BeginTiming(time_index);
4008 HYPRE_ParCSRFlexGMRESSolve(fgmres_solver, *
A, b, x);
4010 if (print_level > 0)
4012 hypre_EndTiming(time_index);
4013 hypre_PrintTiming(
"Solve phase times", comm);
4014 hypre_FinalizeTiming(time_index);
4015 hypre_ClearTiming();
4017 HYPRE_ParCSRFlexGMRESGetNumIterations(fgmres_solver, &num_iterations);
4018 HYPRE_ParCSRFlexGMRESGetFinalRelativeResidualNorm(fgmres_solver,
4021 MPI_Comm_rank(comm, &myid);
4025 mfem::out <<
"FGMRES Iterations = " << num_iterations << endl
4026 <<
"Final FGMRES Relative Residual Norm = " << final_res_norm
4034 HYPRE_ParCSRFlexGMRESDestroy(fgmres_solver);
4041 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4058 HYPRE_ParaSailsCreate(comm, &sai_precond);
4059 SetDefaultOptions();
4066 HYPRE_ParCSRMatrixGetComm(A, &comm);
4068 HYPRE_ParaSailsCreate(comm, &sai_precond);
4069 SetDefaultOptions();
4072 void HypreParaSails::SetDefaultOptions()
4074 int sai_max_levels = 1;
4075 double sai_threshold = 0.1;
4076 double sai_filter = 0.1;
4078 double sai_loadbal = 0.0;
4080 int sai_logging = 1;
4082 HYPRE_ParaSailsSetParams(sai_precond, sai_threshold, sai_max_levels);
4083 HYPRE_ParaSailsSetFilter(sai_precond, sai_filter);
4084 HYPRE_ParaSailsSetSym(sai_precond, sai_sym);
4085 HYPRE_ParaSailsSetLoadbal(sai_precond, sai_loadbal);
4086 HYPRE_ParaSailsSetReuse(sai_precond, sai_reuse);
4087 HYPRE_ParaSailsSetLogging(sai_precond, sai_logging);
4090 void HypreParaSails::ResetSAIPrecond(MPI_Comm comm)
4092 HYPRE_Int sai_max_levels;
4093 HYPRE_Real sai_threshold;
4094 HYPRE_Real sai_filter;
4096 HYPRE_Real sai_loadbal;
4097 HYPRE_Int sai_reuse;
4098 HYPRE_Int sai_logging;
4101 HYPRE_ParaSailsGetNlevels(sai_precond, &sai_max_levels);
4102 HYPRE_ParaSailsGetThresh(sai_precond, &sai_threshold);
4103 HYPRE_ParaSailsGetFilter(sai_precond, &sai_filter);
4104 HYPRE_ParaSailsGetSym(sai_precond, &sai_sym);
4105 HYPRE_ParaSailsGetLoadbal(sai_precond, &sai_loadbal);
4106 HYPRE_ParaSailsGetReuse(sai_precond, &sai_reuse);
4107 HYPRE_ParaSailsGetLogging(sai_precond, &sai_logging);
4109 HYPRE_ParaSailsDestroy(sai_precond);
4110 HYPRE_ParaSailsCreate(comm, &sai_precond);
4112 HYPRE_ParaSailsSetParams(sai_precond, sai_threshold, sai_max_levels);
4113 HYPRE_ParaSailsSetFilter(sai_precond, sai_filter);
4114 HYPRE_ParaSailsSetSym(sai_precond, sai_sym);
4115 HYPRE_ParaSailsSetLoadbal(sai_precond, sai_loadbal);
4116 HYPRE_ParaSailsSetReuse(sai_precond, sai_reuse);
4117 HYPRE_ParaSailsSetLogging(sai_precond, sai_logging);
4123 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4128 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4129 ResetSAIPrecond(comm);
4146 HYPRE_ParaSailsSetSym(sai_precond, sym);
4151 HYPRE_ParaSailsDestroy(sai_precond);
4157 HYPRE_EuclidCreate(comm, &euc_precond);
4158 SetDefaultOptions();
4165 HYPRE_ParCSRMatrixGetComm(A, &comm);
4167 HYPRE_EuclidCreate(comm, &euc_precond);
4168 SetDefaultOptions();
4171 void HypreEuclid::SetDefaultOptions()
4179 HYPRE_EuclidSetLevel(euc_precond, euc_level);
4180 HYPRE_EuclidSetStats(euc_precond, euc_stats);
4181 HYPRE_EuclidSetMem(euc_precond, euc_mem);
4182 HYPRE_EuclidSetBJ(euc_precond, euc_bj);
4183 HYPRE_EuclidSetRowScale(euc_precond, euc_ro_sc);
4186 void HypreEuclid::ResetEuclidPrecond(MPI_Comm comm)
4190 HYPRE_EuclidDestroy(euc_precond);
4191 HYPRE_EuclidCreate(comm, &euc_precond);
4193 SetDefaultOptions();
4199 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4204 HYPRE_ParCSRMatrixGetComm(*new_A, &comm);
4205 ResetEuclidPrecond(comm);
4222 HYPRE_EuclidDestroy(euc_precond);
4226 #if MFEM_HYPRE_VERSION >= 21900
4229 HYPRE_ILUCreate(&ilu_precond);
4230 SetDefaultOptions();
4233 void HypreILU::SetDefaultOptions()
4236 HYPRE_Int ilu_type = 0;
4237 HYPRE_ILUSetType(ilu_precond, ilu_type);
4240 HYPRE_Int max_iter = 1;
4241 HYPRE_ILUSetMaxIter(ilu_precond, max_iter);
4244 HYPRE_Real tol = 0.0;
4245 HYPRE_ILUSetTol(ilu_precond, tol);
4248 HYPRE_Int lev_fill = 1;
4249 HYPRE_ILUSetLevelOfFill(ilu_precond, lev_fill);
4252 HYPRE_Int reorder_type = 1;
4253 HYPRE_ILUSetLocalReordering(ilu_precond, reorder_type);
4256 HYPRE_Int print_level = 0;
4257 HYPRE_ILUSetPrintLevel(ilu_precond, print_level);
4260 void HypreILU::ResetILUPrecond()
4264 HYPRE_ILUDestroy(ilu_precond);
4266 HYPRE_ILUCreate(&ilu_precond);
4267 SetDefaultOptions();
4272 HYPRE_ILUSetLevelOfFill(ilu_precond, lev_fill);
4277 HYPRE_ILUSetPrintLevel(ilu_precond, print_level);
4283 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4285 if (
A) { ResetILUPrecond(); }
4301 HYPRE_ILUDestroy(ilu_precond);
4308 HYPRE_BoomerAMGCreate(&amg_precond);
4309 SetDefaultOptions();
4314 HYPRE_BoomerAMGCreate(&amg_precond);
4315 SetDefaultOptions();
4318 void HypreBoomerAMG::SetDefaultOptions()
4320 #ifndef HYPRE_USING_CUDA
4322 int coarsen_type = 10;
4324 double theta = 0.25;
4327 int interp_type = 6;
4332 int relax_sweeps = 1;
4335 int print_level = 1;
4336 int max_levels = 25;
4339 int coarsen_type = 8;
4341 double theta = 0.25;
4344 int interp_type = 6;
4348 int relax_type = 18;
4349 int relax_sweeps = 1;
4352 int print_level = 1;
4353 int max_levels = 25;
4356 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
4357 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, agg_levels);
4358 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
4361 HYPRE_BoomerAMGSetNumSweeps(amg_precond, relax_sweeps);
4362 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, theta);
4363 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
4364 HYPRE_BoomerAMGSetPMaxElmts(amg_precond, Pmax);
4365 HYPRE_BoomerAMGSetPrintLevel(amg_precond, print_level);
4366 HYPRE_BoomerAMGSetMaxLevels(amg_precond, max_levels);
4369 HYPRE_BoomerAMGSetMaxIter(amg_precond, 1);
4370 HYPRE_BoomerAMGSetTol(amg_precond, 0.0);
4373 void HypreBoomerAMG::ResetAMGPrecond()
4375 HYPRE_Int coarsen_type;
4376 HYPRE_Int agg_levels;
4377 HYPRE_Int relax_type;
4378 HYPRE_Int relax_sweeps;
4380 HYPRE_Int interp_type;
4382 HYPRE_Int print_level;
4383 HYPRE_Int max_levels;
4385 HYPRE_Int nrbms = rbms.
Size();
4387 HYPRE_Int nodal_diag;
4388 HYPRE_Int relax_coarse;
4389 HYPRE_Int interp_vec_variant;
4391 HYPRE_Int smooth_interp_vectors;
4392 HYPRE_Int interp_refine;
4394 hypre_ParAMGData *amg_data = (hypre_ParAMGData *)amg_precond;
4397 HYPRE_BoomerAMGGetCoarsenType(amg_precond, &coarsen_type);
4398 agg_levels = hypre_ParAMGDataAggNumLevels(amg_data);
4399 relax_type = hypre_ParAMGDataUserRelaxType(amg_data);
4400 relax_sweeps = hypre_ParAMGDataUserNumSweeps(amg_data);
4401 HYPRE_BoomerAMGGetStrongThreshold(amg_precond, &theta);
4402 hypre_BoomerAMGGetInterpType(amg_precond, &interp_type);
4403 HYPRE_BoomerAMGGetPMaxElmts(amg_precond, &Pmax);
4404 HYPRE_BoomerAMGGetPrintLevel(amg_precond, &print_level);
4405 HYPRE_BoomerAMGGetMaxLevels(amg_precond, &max_levels);
4406 HYPRE_BoomerAMGGetNumFunctions(amg_precond, &dim);
4409 nodal = hypre_ParAMGDataNodal(amg_data);
4410 nodal_diag = hypre_ParAMGDataNodalDiag(amg_data);
4411 HYPRE_BoomerAMGGetCycleRelaxType(amg_precond, &relax_coarse, 3);
4412 interp_vec_variant = hypre_ParAMGInterpVecVariant(amg_data);
4413 q_max = hypre_ParAMGInterpVecQMax(amg_data);
4414 smooth_interp_vectors = hypre_ParAMGSmoothInterpVectors(amg_data);
4415 interp_refine = hypre_ParAMGInterpRefine(amg_data);
4418 HYPRE_BoomerAMGDestroy(amg_precond);
4419 HYPRE_BoomerAMGCreate(&amg_precond);
4421 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
4422 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, agg_levels);
4423 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
4424 HYPRE_BoomerAMGSetNumSweeps(amg_precond, relax_sweeps);
4425 HYPRE_BoomerAMGSetMaxLevels(amg_precond, max_levels);
4426 HYPRE_BoomerAMGSetTol(amg_precond, 0.0);
4427 HYPRE_BoomerAMGSetMaxIter(amg_precond, 1);
4428 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, theta);
4429 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
4430 HYPRE_BoomerAMGSetPMaxElmts(amg_precond, Pmax);
4431 HYPRE_BoomerAMGSetPrintLevel(amg_precond, print_level);
4432 HYPRE_BoomerAMGSetNumFunctions(amg_precond, dim);
4435 HYPRE_BoomerAMGSetNodal(amg_precond, nodal);
4436 HYPRE_BoomerAMGSetNodalDiag(amg_precond, nodal_diag);
4437 HYPRE_BoomerAMGSetCycleRelaxType(amg_precond, relax_coarse, 3);
4438 HYPRE_BoomerAMGSetInterpVecVariant(amg_precond, interp_vec_variant);
4439 HYPRE_BoomerAMGSetInterpVecQMax(amg_precond, q_max);
4440 HYPRE_BoomerAMGSetSmoothInterpVectors(amg_precond, smooth_interp_vectors);
4441 HYPRE_BoomerAMGSetInterpRefine(amg_precond, interp_refine);
4443 HYPRE_BoomerAMGSetInterpVectors(amg_precond, rbms.
Size(), rbms.
GetData());
4450 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4452 if (
A) { ResetAMGPrecond(); }
4468 HYPRE_BoomerAMGSetNumFunctions(amg_precond, dim);
4477 HYPRE_Int *mapping = mfem_hypre_CTAlloc_host(HYPRE_Int,
height);
4479 MFEM_VERIFY(
height % dim == 0,
"Ordering does not work as claimed!");
4481 for (
int i = 0; i <
dim; ++i)
4483 for (
int j = 0; j < h_nnodes; ++j)
4488 HYPRE_BoomerAMGSetDofFunc(amg_precond, mapping);
4492 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, 0);
4493 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, 0.5);
4499 y = 0.0; y(0) = x(1); y(1) = -x(0);
4501 static void func_ryz(
const Vector &x, Vector &y)
4503 y = 0.0; y(1) = x(2); y(2) = -x(1);
4505 static void func_rzx(
const Vector &x, Vector &y)
4507 y = 0.0; y(2) = x(0); y(0) = -x(2);
4510 void HypreBoomerAMG::RecomputeRBMs()
4513 Array<HypreParVector*> gf_rbms;
4516 for (
int i = 0; i < rbms.
Size(); i++)
4518 HYPRE_ParVectorDestroy(rbms[i]);
4525 VectorFunctionCoefficient coeff_rxy(2, func_rxy);
4527 ParGridFunction rbms_rxy(fespace);
4528 rbms_rxy.ProjectCoefficient(coeff_rxy);
4531 gf_rbms.SetSize(nrbms);
4532 gf_rbms[0] = rbms_rxy.ParallelAverage();
4538 VectorFunctionCoefficient coeff_rxy(3, func_rxy);
4539 VectorFunctionCoefficient coeff_ryz(3, func_ryz);
4540 VectorFunctionCoefficient coeff_rzx(3, func_rzx);
4542 ParGridFunction rbms_rxy(fespace);
4543 ParGridFunction rbms_ryz(fespace);
4544 ParGridFunction rbms_rzx(fespace);
4545 rbms_rxy.ProjectCoefficient(coeff_rxy);
4546 rbms_ryz.ProjectCoefficient(coeff_ryz);
4547 rbms_rzx.ProjectCoefficient(coeff_rzx);
4550 gf_rbms.SetSize(nrbms);
4551 gf_rbms[0] = rbms_rxy.ParallelAverage();
4552 gf_rbms[1] = rbms_ryz.ParallelAverage();
4553 gf_rbms[2] = rbms_rzx.ParallelAverage();
4562 for (
int i = 0; i < nrbms; i++)
4564 rbms[i] = gf_rbms[i]->StealParVector();
4571 #ifdef HYPRE_USING_CUDA
4572 MFEM_ABORT(
"this method is not supported in hypre built with CUDA");
4576 this->fespace = fespace;
4586 int relax_coarse = 8;
4589 int interp_vec_variant = 2;
4591 int smooth_interp_vectors = 1;
4595 int interp_refine = 1;
4597 HYPRE_BoomerAMGSetNodal(amg_precond, nodal);
4598 HYPRE_BoomerAMGSetNodalDiag(amg_precond, nodal_diag);
4599 HYPRE_BoomerAMGSetCycleRelaxType(amg_precond, relax_coarse, 3);
4600 HYPRE_BoomerAMGSetInterpVecVariant(amg_precond, interp_vec_variant);
4601 HYPRE_BoomerAMGSetInterpVecQMax(amg_precond, q_max);
4602 HYPRE_BoomerAMGSetSmoothInterpVectors(amg_precond, smooth_interp_vectors);
4603 HYPRE_BoomerAMGSetInterpRefine(amg_precond, interp_refine);
4606 HYPRE_BoomerAMGSetInterpVectors(amg_precond, rbms.
Size(), rbms.
GetData());
4615 #if MFEM_HYPRE_VERSION >= 21800
4618 const std::string &prerelax,
4619 const std::string &postrelax)
4623 int interp_type = 100;
4624 int relax_type = 10;
4625 int coarsen_type = 6;
4626 double strength_tolC = 0.1;
4627 double strength_tolR = 0.01;
4628 double filter_tolR = 0.0;
4629 double filterA_tol = 0.0;
4632 int ns_down, ns_up, ns_coarse;
4635 ns_down = prerelax.length();
4636 ns_up = postrelax.length();
4640 HYPRE_Int **grid_relax_points = mfem_hypre_TAlloc(HYPRE_Int*, 4);
4641 grid_relax_points[0] = NULL;
4642 grid_relax_points[1] = mfem_hypre_TAlloc(HYPRE_Int, ns_down);
4643 grid_relax_points[2] = mfem_hypre_TAlloc(HYPRE_Int, ns_up);
4644 grid_relax_points[3] = mfem_hypre_TAlloc(HYPRE_Int, 1);
4645 grid_relax_points[3][0] = 0;
4648 for (
int i = 0; i<ns_down; i++)
4650 if (prerelax[i] ==
'F')
4652 grid_relax_points[1][i] = -1;
4654 else if (prerelax[i] ==
'C')
4656 grid_relax_points[1][i] = 1;
4658 else if (prerelax[i] ==
'A')
4660 grid_relax_points[1][i] = 0;
4665 for (
int i = 0; i<ns_up; i++)
4667 if (postrelax[i] ==
'F')
4669 grid_relax_points[2][i] = -1;
4671 else if (postrelax[i] ==
'C')
4673 grid_relax_points[2][i] = 1;
4675 else if (postrelax[i] ==
'A')
4677 grid_relax_points[2][i] = 0;
4681 HYPRE_BoomerAMGSetRestriction(amg_precond, distanceR);
4683 HYPRE_BoomerAMGSetGridRelaxPoints(amg_precond, grid_relax_points);
4685 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
4690 HYPRE_BoomerAMGSetSabs(amg_precond, Sabs);
4693 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
4696 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, 0);
4698 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, strength_tolC);
4702 HYPRE_BoomerAMGSetStrongThresholdR(amg_precond, strength_tolR);
4703 HYPRE_BoomerAMGSetFilterThresholdR(amg_precond, filter_tolR);
4706 if (relax_type > -1)
4708 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
4713 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_coarse, 3);
4714 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_down, 1);
4715 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_up, 2);
4717 HYPRE_BoomerAMGSetADropTol(amg_precond, filterA_tol);
4719 HYPRE_BoomerAMGSetADropType(amg_precond, -1);
4727 for (
int i = 0; i < rbms.
Size(); i++)
4729 HYPRE_ParVectorDestroy(rbms[i]);
4732 HYPRE_BoomerAMGDestroy(amg_precond);
4748 int cycle_type = 13;
4750 double rlx_weight = 1.0;
4751 double rlx_omega = 1.0;
4752 #ifndef HYPRE_USING_CUDA
4753 int amg_coarsen_type = 10;
4754 int amg_agg_levels = 1;
4755 int amg_rlx_type = 8;
4757 double theta = 0.25;
4758 int amg_interp_type = 6;
4761 int amg_coarsen_type = 8;
4762 int amg_agg_levels = 0;
4763 int amg_rlx_type = 18;
4765 double theta = 0.25;
4766 int amg_interp_type = 6;
4774 bool trace_space, rt_trace_space;
4778 trace_space = trace_space || rt_trace_space;
4781 if (edge_fespace->
GetNE() > 0)
4787 if (dim == 2) { p++; }
4798 nd_tr_fec =
new ND_Trace_FECollection(p, dim);
4799 edge_fespace =
new ParFiniteElementSpace(pmesh, nd_tr_fec);
4802 HYPRE_AMSCreate(&ams);
4804 HYPRE_AMSSetDimension(ams, sdim);
4805 HYPRE_AMSSetTol(ams, 0.0);
4806 HYPRE_AMSSetMaxIter(ams, 1);
4807 HYPRE_AMSSetCycleType(ams, cycle_type);
4808 HYPRE_AMSSetPrintLevel(ams, 1);
4811 FiniteElementCollection *vert_fec;
4814 vert_fec =
new H1_Trace_FECollection(p, dim);
4818 vert_fec =
new H1_FECollection(p, dim);
4820 ParFiniteElementSpace *vert_fespace =
new ParFiniteElementSpace(pmesh,
4824 if (p == 1 && pmesh->GetNodes() == NULL)
4826 ParGridFunction x_coord(vert_fespace);
4827 ParGridFunction y_coord(vert_fespace);
4828 ParGridFunction z_coord(vert_fespace);
4830 for (
int i = 0; i < pmesh->GetNV(); i++)
4832 coord = pmesh -> GetVertex(i);
4833 x_coord(i) = coord[0];
4834 y_coord(i) = coord[1];
4835 if (sdim == 3) { z_coord(i) = coord[2]; }
4837 x = x_coord.ParallelProject();
4838 y = y_coord.ParallelProject();
4845 HYPRE_AMSSetCoordinateVectors(ams, *x, *y, NULL);
4849 z = z_coord.ParallelProject();
4851 HYPRE_AMSSetCoordinateVectors(ams, *x, *y, *z);
4862 ParDiscreteLinearOperator *grad;
4863 grad =
new ParDiscreteLinearOperator(vert_fespace, edge_fespace);
4866 grad->AddTraceFaceInterpolator(
new GradientInterpolator);
4870 grad->AddDomainInterpolator(
new GradientInterpolator);
4874 G = grad->ParallelAssemble();
4875 HYPRE_AMSSetDiscreteGradient(ams, *G);
4879 Pi = Pix = Piy = Piz = NULL;
4880 if (p > 1 || pmesh->GetNodes() != NULL)
4882 ParFiniteElementSpace *vert_fespace_d
4885 ParDiscreteLinearOperator *id_ND;
4886 id_ND =
new ParDiscreteLinearOperator(vert_fespace_d, edge_fespace);
4889 id_ND->AddTraceFaceInterpolator(
new IdentityInterpolator);
4893 id_ND->AddDomainInterpolator(
new IdentityInterpolator);
4898 if (cycle_type < 10)
4900 Pi = id_ND->ParallelAssemble();
4904 Array2D<HypreParMatrix *> Pi_blocks;
4905 id_ND->GetParBlocks(Pi_blocks);
4906 Pix = Pi_blocks(0,0);
4907 Piy = Pi_blocks(0,1);
4908 if (sdim == 3) { Piz = Pi_blocks(0,2); }
4913 HYPRE_ParCSRMatrix HY_Pi = (Pi) ? (HYPRE_ParCSRMatrix) *Pi : NULL;
4914 HYPRE_ParCSRMatrix HY_Pix = (Pix) ? (HYPRE_ParCSRMatrix) *Pix : NULL;
4915 HYPRE_ParCSRMatrix HY_Piy = (Piy) ? (HYPRE_ParCSRMatrix) *Piy : NULL;
4916 HYPRE_ParCSRMatrix HY_Piz = (Piz) ? (HYPRE_ParCSRMatrix) *Piz : NULL;
4917 HYPRE_AMSSetInterpolations(ams, HY_Pi, HY_Pix, HY_Piy, HY_Piz);
4919 delete vert_fespace_d;
4922 delete vert_fespace;
4927 delete edge_fespace;
4932 HYPRE_AMSSetSmoothingOptions(ams, rlx_type, rlx_sweeps, rlx_weight, rlx_omega);
4933 HYPRE_AMSSetAlphaAMGOptions(ams, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
4934 theta, amg_interp_type, amg_Pmax);
4935 HYPRE_AMSSetBetaAMGOptions(ams, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
4936 theta, amg_interp_type, amg_Pmax);
4938 HYPRE_AMSSetAlphaAMGCoarseRelaxType(ams, amg_rlx_type);
4939 HYPRE_AMSSetBetaAMGCoarseRelaxType(ams, amg_rlx_type);
4951 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4968 HYPRE_AMSDestroy(ams);
4983 HYPRE_AMSSetPrintLevel(ams, print_lvl);
4999 int cycle_type = 11;
5002 double rlx_weight = 1.0;
5003 double rlx_omega = 1.0;
5004 #ifndef HYPRE_USING_CUDA
5005 int amg_coarsen_type = 10;
5006 int amg_agg_levels = 1;
5007 int amg_rlx_type = 8;
5008 double theta = 0.25;
5009 int amg_interp_type = 6;
5012 int amg_coarsen_type = 8;
5013 int amg_agg_levels = 0;
5014 int amg_rlx_type = 18;
5015 double theta = 0.25;
5016 int amg_interp_type = 6;
5019 int ams_cycle_type = 14;
5025 if (face_fespace->
GetNE() > 0)
5038 HYPRE_ADSCreate(&ads);
5040 HYPRE_ADSSetTol(ads, 0.0);
5041 HYPRE_ADSSetMaxIter(ads, 1);
5042 HYPRE_ADSSetCycleType(ads, cycle_type);
5043 HYPRE_ADSSetPrintLevel(ads, 1);
5046 ParMesh *pmesh = (ParMesh *) face_fespace->
GetMesh();
5047 FiniteElementCollection *vert_fec, *edge_fec;
5050 vert_fec =
new H1_Trace_FECollection(p, 3);
5051 edge_fec =
new ND_Trace_FECollection(p, 3);
5055 vert_fec =
new H1_FECollection(p, 3);
5056 edge_fec =
new ND_FECollection(p, 3);
5059 ParFiniteElementSpace *vert_fespace =
new ParFiniteElementSpace(pmesh,
5061 ParFiniteElementSpace *edge_fespace =
new ParFiniteElementSpace(pmesh,
5065 if (p == 1 && pmesh->GetNodes() == NULL)
5067 ParGridFunction x_coord(vert_fespace);
5068 ParGridFunction y_coord(vert_fespace);
5069 ParGridFunction z_coord(vert_fespace);
5071 for (
int i = 0; i < pmesh->GetNV(); i++)
5073 coord = pmesh -> GetVertex(i);
5074 x_coord(i) = coord[0];
5075 y_coord(i) = coord[1];
5076 z_coord(i) = coord[2];
5078 x = x_coord.ParallelProject();
5079 y = y_coord.ParallelProject();
5080 z = z_coord.ParallelProject();
5084 HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
5094 ParDiscreteLinearOperator *curl;
5095 curl =
new ParDiscreteLinearOperator(edge_fespace, face_fespace);
5098 curl->AddTraceFaceInterpolator(
new CurlInterpolator);
5102 curl->AddDomainInterpolator(
new CurlInterpolator);
5106 C = curl->ParallelAssemble();
5108 HYPRE_ADSSetDiscreteCurl(ads, *C);
5112 ParDiscreteLinearOperator *grad;
5113 grad =
new ParDiscreteLinearOperator(vert_fespace, edge_fespace);
5116 grad->AddTraceFaceInterpolator(
new GradientInterpolator);
5120 grad->AddDomainInterpolator(
new GradientInterpolator);
5124 G = grad->ParallelAssemble();
5127 HYPRE_ADSSetDiscreteGradient(ads, *G);
5131 RT_Pi = RT_Pix = RT_Piy = RT_Piz = NULL;
5132 ND_Pi = ND_Pix = ND_Piy = ND_Piz = NULL;
5133 if (p > 1 || pmesh->GetNodes() != NULL)
5135 ParFiniteElementSpace *vert_fespace_d
5138 ParDiscreteLinearOperator *id_ND;
5139 id_ND =
new ParDiscreteLinearOperator(vert_fespace_d, edge_fespace);
5142 id_ND->AddTraceFaceInterpolator(
new IdentityInterpolator);
5146 id_ND->AddDomainInterpolator(
new IdentityInterpolator);
5151 if (ams_cycle_type < 10)
5153 ND_Pi = id_ND->ParallelAssemble();
5159 Array2D<HypreParMatrix *> ND_Pi_blocks;
5160 id_ND->GetParBlocks(ND_Pi_blocks);
5161 ND_Pix = ND_Pi_blocks(0,0);
5162 ND_Piy = ND_Pi_blocks(0,1);
5163 ND_Piz = ND_Pi_blocks(0,2);
5168 ParDiscreteLinearOperator *id_RT;
5169 id_RT =
new ParDiscreteLinearOperator(vert_fespace_d, face_fespace);
5172 id_RT->AddTraceFaceInterpolator(
new NormalInterpolator);
5176 id_RT->AddDomainInterpolator(
new IdentityInterpolator);
5181 if (cycle_type < 10)
5183 RT_Pi = id_RT->ParallelAssemble();
5188 Array2D<HypreParMatrix *> RT_Pi_blocks;
5189 id_RT->GetParBlocks(RT_Pi_blocks);
5190 RT_Pix = RT_Pi_blocks(0,0);
5191 RT_Piy = RT_Pi_blocks(0,1);
5192 RT_Piz = RT_Pi_blocks(0,2);
5197 HYPRE_ParCSRMatrix HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz;
5198 HY_RT_Pi = (RT_Pi) ? (HYPRE_ParCSRMatrix) *RT_Pi : NULL;
5199 HY_RT_Pix = (RT_Pix) ? (HYPRE_ParCSRMatrix) *RT_Pix : NULL;
5200 HY_RT_Piy = (RT_Piy) ? (HYPRE_ParCSRMatrix) *RT_Piy : NULL;
5201 HY_RT_Piz = (RT_Piz) ? (HYPRE_ParCSRMatrix) *RT_Piz : NULL;
5202 HYPRE_ParCSRMatrix HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz;
5203 HY_ND_Pi = (ND_Pi) ? (HYPRE_ParCSRMatrix) *ND_Pi : NULL;
5204 HY_ND_Pix = (ND_Pix) ? (HYPRE_ParCSRMatrix) *ND_Pix : NULL;
5205 HY_ND_Piy = (ND_Piy) ? (HYPRE_ParCSRMatrix) *ND_Piy : NULL;
5206 HY_ND_Piz = (ND_Piz) ? (HYPRE_ParCSRMatrix) *ND_Piz : NULL;
5207 HYPRE_ADSSetInterpolations(ads,
5208 HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz,
5209 HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz);
5211 delete vert_fespace_d;
5215 delete vert_fespace;
5217 delete edge_fespace;
5220 HYPRE_ADSSetSmoothingOptions(ads, rlx_type, rlx_sweeps, rlx_weight, rlx_omega);
5221 HYPRE_ADSSetAMGOptions(ads, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5222 theta, amg_interp_type, amg_Pmax);
5223 HYPRE_ADSSetAMSOptions(ads, ams_cycle_type, amg_coarsen_type, amg_agg_levels,
5224 amg_rlx_type, theta, amg_interp_type, amg_Pmax);
5236 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
5253 HYPRE_ADSDestroy(ads);
5275 HYPRE_ADSSetPrintLevel(ads, print_lvl);
5278 HypreLOBPCG::HypreMultiVector::HypreMultiVector(
int n,
HypreParVector & v,
5279 mv_InterfaceInterpreter & interpreter)
5283 mv_ptr = mv_MultiVectorCreateFromSampleVector(&interpreter, nv,
5284 (HYPRE_ParVector)v);
5286 HYPRE_ParVector* vecs = NULL;
5288 mv_TempMultiVector* tmp =
5289 (mv_TempMultiVector*)mv_MultiVectorGetData(mv_ptr);
5290 vecs = (HYPRE_ParVector*)(tmp -> vector);
5293 hpv =
new HypreParVector*[nv];
5294 for (
int i=0; i<nv; i++)
5296 hpv[i] =
new HypreParVector(vecs[i]);
5300 HypreLOBPCG::HypreMultiVector::~HypreMultiVector()
5304 for (
int i=0; i<nv; i++)
5311 mv_MultiVectorDestroy(mv_ptr);
5315 HypreLOBPCG::HypreMultiVector::Randomize(HYPRE_Int seed)
5317 mv_MultiVectorSetRandom(mv_ptr, seed);
5321 HypreLOBPCG::HypreMultiVector::GetVector(
unsigned int i)
5323 MFEM_ASSERT((
int)i < nv,
"index out of range");
5329 HypreLOBPCG::HypreMultiVector::StealVectors()
5331 HypreParVector ** hpv_ret = hpv;
5335 mv_TempMultiVector * mv_tmp =
5336 (mv_TempMultiVector*)mv_MultiVectorGetData(mv_ptr);
5338 mv_tmp->ownsVectors = 0;
5340 for (
int i=0; i<nv; i++)
5342 hpv_ret[i]->SetOwnership(1);
5360 MPI_Comm_size(comm,&numProcs);
5361 MPI_Comm_rank(comm,&myid);
5363 HYPRE_ParCSRSetupInterpreter(&interpreter);
5364 HYPRE_ParCSRSetupMatvec(&matvec_fn);
5365 HYPRE_LOBPCGCreate(&interpreter, &matvec_fn, &lobpcg_solver);
5374 HYPRE_LOBPCGDestroy(lobpcg_solver);
5380 HYPRE_LOBPCGSetTol(lobpcg_solver, tol);
5386 #if MFEM_HYPRE_VERSION >= 21101
5387 HYPRE_LOBPCGSetRTol(lobpcg_solver, rel_tol);
5389 MFEM_ABORT(
"This method requires HYPRE version >= 2.11.1");
5396 HYPRE_LOBPCGSetMaxIter(lobpcg_solver, max_iter);
5404 HYPRE_LOBPCGSetPrintLevel(lobpcg_solver, logging);
5411 HYPRE_LOBPCGSetPrecondUsageMode(lobpcg_solver, pcg_mode);
5417 HYPRE_LOBPCGSetPrecond(lobpcg_solver,
5418 (HYPRE_PtrToSolverFcn)this->PrecondSolve,
5419 (HYPRE_PtrToSolverFcn)this->PrecondSetup,
5420 (HYPRE_Solver)&precond);
5428 if (HYPRE_AssumedPartitionCheck())
5432 MPI_Scan(&locSize, &part[1], 1, HYPRE_MPI_BIG_INT, MPI_SUM, comm);
5434 part[0] = part[1] - locSize;
5436 MPI_Allreduce(&locSize, &glbSize, 1, HYPRE_MPI_BIG_INT, MPI_SUM, comm);
5442 MPI_Allgather(&locSize, 1, HYPRE_MPI_BIG_INT,
5443 &part[1], 1, HYPRE_MPI_BIG_INT, comm);
5446 for (
int i=0; i<numProcs; i++)
5448 part[i+1] += part[i];
5451 glbSize = part[numProcs];
5460 const bool is_device_ptr =
true;
5463 matvec_fn.MatvecCreate = this->OperatorMatvecCreate;
5464 matvec_fn.Matvec = this->OperatorMatvec;
5465 matvec_fn.MatvecDestroy = this->OperatorMatvecDestroy;
5467 HYPRE_LOBPCGSetup(lobpcg_solver,(HYPRE_Matrix)&A,NULL,NULL);
5473 matvec_fn.MatvecCreate = this->OperatorMatvecCreate;
5474 matvec_fn.Matvec = this->OperatorMatvec;
5475 matvec_fn.MatvecDestroy = this->OperatorMatvecDestroy;
5477 HYPRE_LOBPCGSetupB(lobpcg_solver,(HYPRE_Matrix)&M,NULL);
5486 for (
int i=0; i<nev; i++)
5488 eigs[i] = eigenvalues[i];
5495 return multi_vec->GetVector(i);
5502 if ( multi_vec == NULL )
5504 MFEM_ASSERT(x != NULL,
"In HypreLOBPCG::SetInitialVectors()");
5506 multi_vec =
new HypreMultiVector(nev, *x, interpreter);
5510 for (
int i=0; i < min(num_vecs,nev); i++)
5512 multi_vec->GetVector(i) = *vecs[i];
5516 for (
int i=min(num_vecs,nev); i < nev; i++)
5518 multi_vec->GetVector(i).Randomize(seed);
5522 if ( subSpaceProj != NULL )
5525 y = multi_vec->GetVector(0);
5527 for (
int i=1; i<nev; i++)
5529 subSpaceProj->
Mult(multi_vec->GetVector(i),
5530 multi_vec->GetVector(i-1));
5532 subSpaceProj->
Mult(y,
5533 multi_vec->GetVector(nev-1));
5541 if ( multi_vec == NULL )
5543 MFEM_ASSERT(x != NULL,
"In HypreLOBPCG::Solve()");
5545 multi_vec =
new HypreMultiVector(nev, *x, interpreter);
5546 multi_vec->Randomize(seed);
5548 if ( subSpaceProj != NULL )
5551 y = multi_vec->GetVector(0);
5553 for (
int i=1; i<nev; i++)
5555 subSpaceProj->
Mult(multi_vec->GetVector(i),
5556 multi_vec->GetVector(i-1));
5558 subSpaceProj->
Mult(y, multi_vec->GetVector(nev-1));
5569 HYPRE_LOBPCGSolve(lobpcg_solver, NULL, *multi_vec, eigenvalues);
5573 HypreLOBPCG::OperatorMatvecCreate(
void *A,
5580 return ( matvec_data );
5584 HypreLOBPCG::OperatorMatvec(
void *matvec_data,
5585 HYPRE_Complex
alpha,
5591 MFEM_VERIFY(alpha == 1.0 && beta == 0.0,
"values not supported");
5593 Operator *Aop = (Operator*)A;
5595 int width = Aop->Width();
5597 hypre_ParVector * xPar = (hypre_ParVector *)x;
5598 hypre_ParVector * yPar = (hypre_ParVector *)y;
5600 Vector xVec(xPar->local_vector->data, width);
5601 Vector yVec(yPar->local_vector->data, width);
5603 Aop->Mult( xVec, yVec );
5609 HypreLOBPCG::OperatorMatvecDestroy(
void *matvec_data )
5615 HypreLOBPCG::PrecondSolve(
void *solver,
5620 Solver *
PC = (Solver*)solver;
5621 Operator *OP = (Operator*)A;
5623 int width = OP->Width();
5625 hypre_ParVector * bPar = (hypre_ParVector *)b;
5626 hypre_ParVector * xPar = (hypre_ParVector *)x;
5628 Vector bVec(bPar->local_vector->data, width);
5629 Vector xVec(xPar->local_vector->data, width);
5631 PC->Mult( bVec, xVec );
5637 HypreLOBPCG::PrecondSetup(
void *solver,
5655 MPI_Comm_size(comm,&numProcs);
5656 MPI_Comm_rank(comm,&myid);
5658 HYPRE_AMECreate(&ame_solver);
5659 HYPRE_AMESetPrintLevel(ame_solver, 0);
5666 mfem_hypre_TFree_host(multi_vec);
5671 for (
int i=0; i<nev; i++)
5673 delete eigenvectors[i];
5676 delete [] eigenvectors;
5680 mfem_hypre_TFree_host(eigenvalues);
5683 HYPRE_AMEDestroy(ame_solver);
5691 HYPRE_AMESetBlockSize(ame_solver, nev);
5697 HYPRE_AMESetTol(ame_solver, tol);
5703 #if MFEM_HYPRE_VERSION >= 21101
5704 HYPRE_AMESetRTol(ame_solver, rel_tol);
5706 MFEM_ABORT(
"This method requires HYPRE version >= 2.11.1");
5713 HYPRE_AMESetMaxIter(ame_solver, max_iter);
5721 HYPRE_AMESetPrintLevel(ame_solver, logging);
5728 ams_precond = &precond;
5736 HYPRE_Solver ams_precond_ptr = (HYPRE_Solver)*ams_precond;
5738 ams_precond->
SetupFcn()(*ams_precond,A,NULL,NULL);
5740 HYPRE_AMESetAMSSolver(ame_solver, ams_precond_ptr);
5743 HYPRE_AMESetup(ame_solver);
5749 HYPRE_ParCSRMatrix parcsr_M = M;
5750 HYPRE_AMESetMassMatrix(ame_solver,(HYPRE_ParCSRMatrix)parcsr_M);
5756 HYPRE_AMESolve(ame_solver);
5759 HYPRE_AMEGetEigenvalues(ame_solver,&eigenvalues);
5762 HYPRE_AMEGetEigenvectors(ame_solver,&multi_vec);
5769 eigs.
SetSize(nev); eigs = -1.0;
5772 for (
int i=0; i<nev; i++)
5774 eigs[i] = eigenvalues[i];
5779 HypreAME::createDummyVectors()
const
5782 for (
int i=0; i<nev; i++)
5789 const HypreParVector &
5792 if ( eigenvectors == NULL )
5794 this->createDummyVectors();
5797 return *eigenvectors[i];
5803 if ( eigenvectors == NULL )
5805 this->createDummyVectors();
5810 eigenvectors = NULL;
MemoryType GetHypreMemoryType()
The MemoryType used by MFEM when allocating arrays for Hypre objects.
virtual ~HypreBoomerAMG()
void SetAbsTol(double atol)
Hash function for data sequences.
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
int Size() const
Return the logical size of the array.
void EliminateRowsCols(const Array< int > &rows_cols, const HypreParVector &X, HypreParVector &B)
HypreADS(ParFiniteElementSpace *face_fespace)
virtual int NumNonZeroElems() const
Returns the number of the nonzero elements in the matrix.
static constexpr Type default_type
Vector()
Default constructor for Vector. Sets size = 0 and data = NULL.
MPI_Comm GetComm() const
MPI communicator.
HypreEuclid(MPI_Comm comm)
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Vector * GlobalVector() const
Returns the global vector in each processor.
void EliminateBC(const HypreParMatrix &A, const HypreParMatrix &Ae, const Array< int > &ess_dof_list, const Vector &X, Vector &B)
Eliminate essential BC specified by ess_dof_list from the solution X to the r.h.s. B.
HypreParVector * X0
FIR Filter Temporary Vectors.
void Clear()
Clear the contents of the SparseMatrix.
double min_eig_est
Minimal eigenvalue estimate for polynomial smoothing.
void MakeDataOwner() const
Set the Vector data (host pointer) ownership flag.
static MemoryClass GetHostMemoryClass()
Get the current Host MemoryClass. This is the MemoryClass used by most MFEM host Memory objects...
bool IsVariableOrder() const
Returns true if the space contains elements of varying polynomial orders.
void SetPrintLevel(int print_lvl)
ParMesh * GetParMesh() const
void SetMassMatrix(const HypreParMatrix &M)
HypreParMatrix * RAP(const HypreParMatrix *A, const HypreParMatrix *P)
Returns the matrix P^t * A * P.
void WrapMemoryWrite(Memory< double > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for write access...
int setup_called
Was hypre's Setup function called already?
void HypreRead() const
Prepare the HypreParVector for read access in hypre's device memory space, HYPRE_MEMORY_DEVICE.
void EliminateBC(const HypreParMatrix &Ae, const Array< int > &ess_dof_list, const Vector &X, Vector &B) const
Eliminate essential BC specified by ess_dof_list from the solution X to the r.h.s. B.
void HypreRead() const
Update the internal hypre_ParCSRMatrix object, A, to be in hypre memory space.
HYPRE_BigInt GlobalSize() const
Returns the global number of rows.
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
void MakeRef(const HypreParMatrix &master)
Make this HypreParMatrix a reference to 'master'.
void Read_IJMatrix(MPI_Comm comm, const char *fname)
Read a matrix saved as a HYPRE_IJMatrix.
bool CanShallowCopy(const Memory< T > &src, MemoryClass mc)
Return true if the src Memory can be used with the MemoryClass mc.
void SetSize(int s)
Resize the vector to size s.
HypreParMatrix & Add(const double beta, const HypreParMatrix &B)
HypreParVector * B
Right-hand side and solution vector.
void Delete()
Delete the owned pointers. The Memory is not reset by this method, i.e. it will, generally, not be Empty() after this call.
double window_params[3]
Parameters for windowing function of FIR filter.
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
void Mult(const Table &A, const Table &B, Table &C)
C = A * B (as boolean matrices)
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
void SetInitialVectors(int num_vecs, HypreParVector **vecs)
double Norml2() const
Returns the l2 norm of the vector.
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
int * GetJ()
Return the array J.
void SetWindowByName(const char *window_name)
Convenience function for setting canonical windowing parameters.
HYPRE_BigInt GlobalTrueVSize() const
T * GetData()
Returns the data.
Issue warnings on hypre errors.
T * Write(Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for write access to mem with the mfem::Device's DeviceMemoryClass, if on_dev = true...
void SetPreconditioner(HypreSolver &precond)
virtual double * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
int Size() const
Returns the size of the vector.
HypreILU()
Constructor; sets the default options.
int GetElementOrder(int i) const
Returns the order of the i'th finite element.
T * Write(MemoryClass mc, int size)
Get write-only access to the memory with the given MemoryClass.
HypreAMS(ParFiniteElementSpace *edge_fespace)
void SetPrintLevel(int print_lvl)
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
std::string GetHash() const
Return the hash string for the current sequence and reset (clear) the sequence.
void SetPreconditioner(Solver &precond)
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
void SetMassMatrix(Operator &M)
Abstract parallel finite element space.
HypreParVector()
Default constructor, no underlying hypre_ParVector is created.
double Normlinf() const
Returns the l_infinity norm of the vector.
void SetPrintLevel(int logging)
bool iterative_mode
If true, use the second argument of Mult() as an initial guess.
void CopyFrom(const Memory &src, int size)
Copy size entries from src to *this.
void SetAdvectiveOptions(int distance=15, const std::string &prerelax="", const std::string &postrelax="FFC")
void BlockInverseScale(const HypreParMatrix *A, HypreParMatrix *C, const Vector *b, HypreParVector *d, int blocksize, BlockInverseScaleJob job)
void HostWrite()
Update the internal hypre_ParCSRMatrix object, A, to be on host.
bool pos_l1_norms
If set, take absolute values of the computed l1_norms.
double * GetData() const
Return a pointer to the beginning of the Vector data.
HypreParMatrix * ParMult(const HypreParMatrix *A, const HypreParMatrix *B, bool own_matrix)
void SetPrintLevel(int print_lvl)
MPI_Comm GetComm() const
MPI communicator.
void ScaleRows(const Vector &s)
Scale the local row i by s(i).
virtual MFEM_DEPRECATED N_Vector ToNVector()
(DEPRECATED) Return a new wrapper SUNDIALS N_Vector of type SUNDIALS_NVEC_PARALLEL.
int Size_of_connections() const
double poly_fraction
Fraction of spectrum to smooth for polynomial relaxation.
Memory< double > & GetMemoryData()
int Capacity() const
Return the size of the allocated memory.
Memory< double > & GetMemory()
Return a reference to the Memory object used by the Vector.
void SetPrintLevel(int print_lvl)
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Memory< int > & GetMemoryI()
int poly_scale
Apply the polynomial smoother to A or D^{-1/2} A D^{-1/2}.
void MergeDiagAndOffd(SparseMatrix &merged)
Get a single SparseMatrix containing all rows from this processor, merged from the diagonal and off-d...
HashFunction & AppendDoubles(const double *doubles, size_t num_doubles)
Add a sequence of doubles for hashing, given as a c-array.
void GetEigenvalues(Array< double > &eigenvalues) const
Collect the converged eigenvalues.
HypreFGMRES(MPI_Comm comm)
HYPRE_BigInt N() const
Returns the global number of columns.
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre's FGMRES.
HYPRE_BigInt M() const
Returns the global number of rows.
Memory< int > & GetMemoryJ()
void SetWindowParameters(double a, double b, double c)
Set parameters for windowing function for FIR smoother.
HypreGMRES(MPI_Comm comm)
void GetOffd(SparseMatrix &offd, HYPRE_BigInt *&cmap) const
Get the local off-diagonal block. NOTE: 'offd' will not own any data.
Vector & operator=(const double *v)
Copy Size() entries from v.
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre's GMRES.
void Print(const char *fname) const
Prints the locally owned rows in parallel.
bool MemoryClassContainsType(MemoryClass mc, MemoryType mt)
Return true iff the MemoryType mt is contained in the MemoryClass mc.
void AbsMultTranspose(double a, const Vector &x, double b, Vector &y) const
Computes y = a * |At| * x + b * y, using entry-wise absolute values of the transpose of the matrix A...
HypreLOBPCG(MPI_Comm comm)
HYPRE_BigInt * GetRowStarts() const
Return the parallel row partitioning array.
void Add(const DenseMatrix &A, const DenseMatrix &B, double alpha, DenseMatrix &C)
C = A + alpha*B.
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Relax the linear system Ax=b.
int GetNE() const
Returns number of elements in the mesh.
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
void SetSymmetry(int sym)
virtual ~HypreParaSails()
int GetFaceOrder(int face, int variant=0) const
Returns the polynomial degree of the i'th face finite element.
double f(const Vector &xvec)
void SetMaxIter(int max_iter)
double * l1_norms
l1 norms of the rows of A
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
void SetOwnerFlags(signed char diag, signed char offd, signed char colmap)
Explicitly set the three ownership flags, see docs for diagOwner etc.
void PrintHash(std::ostream &out) const
Print sizes and hashes for all data arrays of the HypreParMatrix from the local MPI rank...
void SetLogging(int logging)
Mesh * GetMesh() const
Returns the mesh.
virtual HYPRE_PtrToParSolverFcn SolveFcn() const =0
hypre's internal Solve function
virtual double * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
void SetRelTol(double rel_tol)
void mfem_error(const char *msg)
Function called when an error is encountered. Used by the macros MFEM_ABORT, MFEM_ASSERT, MFEM_VERIFY.
void SetResidualConvergenceOptions(int res_frequency=-1, double rtol=0.0)
int to_int(const std::string &str)
Convert a string to an int.
Abort on hypre errors (default in base class)
void SetLogging(int logging)
Arbitrary order H(curl)-trace finite elements defined on the interface between mesh elements (faces...
void GetBlocks(Array2D< HypreParMatrix * > &blocks, bool interleaved_rows=false, bool interleaved_cols=false) const
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve the linear system Ax=b.
HypreParaSails(MPI_Comm comm)
void HypreWrite()
Prepare the HypreParVector for write access in hypre's device memory space, HYPRE_MEMORY_DEVICE.
void WrapHypreParVector(hypre_ParVector *y, bool owner=true)
Converts hypre's format to HypreParVector.
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
HypreParVector ** StealEigenvectors()
Transfer ownership of the converged eigenvectors.
const HypreParMatrix * A
The linear system matrix.
bool OwnsHostPtr() const
Return true if the host pointer is owned. Ownership indicates whether the pointer will be deleted by ...
HypreParMatrix * Transpose() const
Returns the transpose of *this.
HYPRE_Int Randomize(HYPRE_Int seed)
Set random values.
Ignore hypre errors (see e.g. HypreADS)
double relax_weight
Damping coefficient (usually <= 1)
void SetElasticityOptions(ParFiniteElementSpace *fespace)
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
void Sort()
Sorts the array in ascending order. This requires operator< to be defined for T.
int Size() const
Returns the number of TYPE I elements.
const T * Read(const Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for read access to mem with the mfem::Device's DeviceMemoryClass, if on_dev = true...
void SetLevelOfFill(HYPRE_Int lev_fill)
Set the fill level for ILU(k); the default is k=1.
void Reset()
Reset the memory to be empty, ensuring that Delete() will be a no-op.
HypreParMatrix * A
The linear system matrix.
virtual const double * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
Biwise-OR of all CUDA backends.
Dynamic 2D array using row-major layout.
virtual void SetOperator(const Operator &op)
virtual HYPRE_PtrToParSolverFcn SetupFcn() const =0
hypre's internal Setup function
void SetLogging(int logging)
HYPRE_BigInt GetGlobalNumCols() const
Return the global number of columns.
HypreParMatrix()
An empty matrix to be used as a reference to an existing matrix.
bool A_is_symmetric
A flag that indicates whether the linear system matrix A is symmetric.
void SetMaxIter(int max_iter)
int ParCSRRelax_FIR(hypre_ParCSRMatrix *A, hypre_ParVector *f, double max_eig, int poly_order, double *fir_coeffs, hypre_ParVector *u, hypre_ParVector *x0, hypre_ParVector *x1, hypre_ParVector *x2, hypre_ParVector *x3)
void SetMaxIter(int max_iter)
void SetPolyOptions(int poly_order, double poly_fraction, int eig_est_cg_iter=10)
Set parameters for polynomial smoothing.
void SetSOROptions(double relax_weight, double omega)
Set SOR-related parameters.
int SpaceDimension() const
T * HostWrite(Memory< T > &mem, int size)
Shortcut to Write(const Memory<T> &mem, int size, false)
void CopyMemory(Memory< T > &src, Memory< T > &dst, MemoryClass dst_mc, bool dst_owner)
Shallow or deep copy src to dst with the goal to make the array src accessible through dst with the M...
Wrapper for hypre's parallel vector class.
HypreParMatrix * EliminateCols(const Array< int > &cols)
double Norml1() const
Returns the l_1 norm of the vector.
int ParCSRRelax_Taubin(hypre_ParCSRMatrix *A, hypre_ParVector *f, double lambda, double mu, int N, double max_eig, hypre_ParVector *u, hypre_ParVector *r)
HYPRE_BigInt * GetTrueDofOffsets() const
void ClearOwnerFlags() const
Clear the ownership flags for the host and device pointers, as well as any internal data allocated by...
double p(const Vector &x, double t)
static MemoryType GetHostMemoryType()
Get the current Host MemoryType. This is the MemoryType used by most MFEM classes when allocating mem...
MemoryType GetDeviceMemoryType() const
Return the device MemoryType of the Memory object. If the device MemoryType is not set...
OutStream err(std::cerr)
Global stream used by the library for standard error output. Initially it uses the same std::streambu...
int NumRows() const
Get the number of rows (size of output) of the Operator. Synonym with Height().
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
void HostRead() const
Update the internal hypre_ParCSRMatrix object, A, to be on host.
MemoryType
Memory types supported by MFEM.
void SetPrintLevel(int print_lvl)
void SetHostPtrOwner(bool own) const
Set/clear the ownership flag for the host pointer. Ownership indicates whether the pointer will be de...
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
void Solve()
Solve the eigenproblem.
void SetData(double *data_)
Sets the data of the Vector and the hypre_ParVector to data_.
int NumCols() const
Get the number of columns (size of input) of the Operator. Synonym with Width().
void SetNumModes(int num_eigs)
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
int GetNumRows() const
Returns the number of rows in the diagonal block of the ParCSRMatrix.
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
HypreParMatrix * ParAdd(const HypreParMatrix *A, const HypreParMatrix *B)
Returns the matrix A + B.
double max_eig_est
Maximal eigenvalue estimate for polynomial smoothing.
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
A class to initialize the size of a Tensor.
void SetDataAndSize(double *d, int s)
Set the Vector data and size.
void SetRelTol(double rel_tol)
void CuWrap1D(const int N, DBODY &&d_body)
HashFunction & AppendInts(const int_type *ints, size_t num_ints)
Add a sequence of integers for hashing, given as a c-array.
HYPRE_Int MultTranspose(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0) const
Computes y = alpha * A^t * x + beta * y.
void MakeAlias(const Memory &base, int offset, int size)
Create a memory object that points inside the memory object base.
int height
Dimension of the output / number of rows in the matrix.
void SetMaxIter(int max_iter)
void SetFIRCoefficients(double max_eig)
Compute window and Chebyshev coefficients for given polynomial order.
void EliminateRows(const Array< int > &rows)
Eliminate rows from the diagonal and off-diagonal blocks of the matrix.
double * fir_coeffs
Combined coefficients for windowing and Chebyshev polynomials.
void Threshold(double threshold=0.0)
Remove values smaller in absolute value than some threshold.
const T * HostRead(const Memory< T > &mem, int size)
Shortcut to Read(const Memory<T> &mem, int size, false)
double InnerProduct(HypreParVector *x, HypreParVector *y)
void Print(const char *fname, HYPRE_Int offi=0, HYPRE_Int offj=0) const
Prints the locally owned rows in parallel.
void SetPrintLevel(HYPRE_Int print_level)
Set the print level: 0 = none, 1 = setup, 2 = solve, 3 = setup+solve.
Host memory; using new[] and delete[].
constexpr MemoryClass GetHypreMemoryClass()
The MemoryClass used by Hypre objects.
HYPRE_BigInt GetGlobalNumRows() const
Return the global number of rows.
void HypreReadWrite()
Prepare the HypreParVector for read and write access in hypre's device memory space, HYPRE_MEMORY_DEVICE.
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
void New(int size)
Allocate host memory for size entries with the current host memory type returned by MemoryManager::Ge...
void InvScaleRows(const Vector &s)
Scale the local row i by 1./s(i)
void SetTaubinOptions(double lambda, double mu, int iter)
Set parameters for Taubin's lambda-mu method.
bool Empty() const
Return true if the Memory object is empty, see Reset().
HypreParVector * B
Right-hand side and solution vectors.
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
virtual void MultTranspose(const Vector &b, Vector &x) const
Apply transpose of the smoother to relax the linear system Ax=b.
T * ReadWrite(MemoryClass mc, int size)
Get read-write access to the memory with the given MemoryClass.
MemoryType GetMemoryType(MemoryClass mc)
Return a suitable MemoryType for a given MemoryClass.
int GetNumCols() const
Returns the number of columns in the diagonal block of the ParCSRMatrix.
void SetOperator(Operator &A)
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
void Destroy()
Destroy a vector.
int relax_times
Number of relaxation sweeps.
void GetEigenvalues(Array< double > &eigenvalues) const
Collect the converged eigenvalues.
Class used by MFEM to store pointers to host and/or device memory.
void operator*=(double s)
Scale all entries by s: A_scaled = s*A.
void SetPrintLevel(int logging)
double infinity()
Define a shortcut for std::numeric_limits<double>::infinity()
double ParNormlp(const Vector &vec, double p, MPI_Comm comm)
Compute the l_p norm of the Vector which is split without overlap across the given communicator...
int eig_est_cg_iter
Number of CG iterations to determine eigenvalue estimates.
void SetOwnership(int own)
Sets ownership of the internal hypre_ParVector.
Abstract class for hypre's solvers and preconditioners.
ErrorMode error_mode
How to treat hypre errors.
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
HYPRE_BigInt * GetColStarts() const
Return the parallel column partitioning array.
HypreParVector & operator=(double d)
Set constant values.
void SetAbsTol(double tol)
const FiniteElementCollection * FEColl() const
void PrintCommPkg(std::ostream &out=mfem::out) const
Print information about the hypre_ParCSRCommPkg of the HypreParMatrix.
void delete_hypre_ParCSRMatrixColMapOffd(hypre_ParCSRMatrix *A)
void CopyConvertMemory(Memory< SrcT > &src, MemoryClass dst_mc, Memory< DstT > &dst)
Deep copy and convert src to dst with the goal to make the array src accessible through dst with the ...
HypreParMatrix * ExtractSubmatrix(const Array< int > &indices, double threshold=0.0) const
Arbitrary order "H^{-1/2}-conforming" face finite elements defined on the interface between mesh elem...
hypre_ParCSRMatrix * StealData()
Changes the ownership of the matrix.
void SetPrecondUsageMode(int pcg_mode)
void SetOperator(const HypreParMatrix &A)
void SetSystemsOptions(int dim, bool order_bynodes=false)
HYPRE_Int Mult(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0) const
Computes y = alpha * A * x + beta * y.
void DropSmallEntries(double tol)
Wrapper for hypre_ParCSRMatrixDropSmallEntries in different versions of hypre. Drop off-diagonal entr...
void SetMaxIter(int max_iter)
double omega
SOR parameter (usually in (0,2))
double u(const Vector &xvec)
HypreParMatrix * LeftDiagMult(const SparseMatrix &D, HYPRE_BigInt *row_starts=NULL) const
Multiply the HypreParMatrix on the left by a block-diagonal parallel matrix D and return the result a...
void WrapHypreParCSRMatrix(hypre_ParCSRMatrix *a, bool owner=true)
Converts hypre's format to HypreParMatrix.
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
Wrapper for hypre's ParCSR matrix class.
void HostReadWrite()
Update the internal hypre_ParCSRMatrix object, A, to be on host.
void AbsMult(double a, const Vector &x, double b, Vector &y) const
Computes y = a * |A| * x + b * y, using entry-wise absolute values of the matrix A.
Memory< double > auxB
Auxiliary buffers for the case when the input or output arrays in methods like Mult(const Vector &...
void Swap(SparseMatrix &other)
~HypreParVector()
Calls hypre's destroy function.
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre's PCG.
HypreParMatrix * HypreParMatrixFromBlocks(Array2D< HypreParMatrix * > &blocks, Array2D< double > *blockCoeff)
Returns a merged hypre matrix constructed from hypre matrix blocks.
const T * Read(MemoryClass mc, int size) const
Get read-only access to the memory with the given MemoryClass.
void Solve()
Solve the eigenproblem.
HypreParVector * V
Temporary vectors.
void SetType(HypreSmoother::Type type, int relax_times=1)
Set the relaxation type and number of sweeps.
void GatherBlockOffsetData(MPI_Comm comm, const int rank, const int nprocs, const int num_loc, const Array< int > &offsets, std::vector< int > &all_num_loc, const int numBlocks, std::vector< std::vector< HYPRE_BigInt >> &blockProcOffsets, std::vector< HYPRE_BigInt > &procOffsets, std::vector< std::vector< int >> &procBlockOffsets, HYPRE_BigInt &firstLocal, HYPRE_BigInt &globalNum)
MemoryType GetHostMemoryType() const
Return the host MemoryType of the Memory object.
virtual double * HostReadWrite()
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), false).
MemoryClass
Memory classes identify sets of memory types.
void WrapMemoryReadWrite(Memory< double > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for read and wri...
int width
Dimension of the input / number of columns in the matrix.
int poly_order
Order of the smoothing polynomial.
double lambda
Taubin's lambda-mu method parameters.
void WrapMemoryRead(const Memory< double > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for read access ...
double sigma(const Vector &x)
void HypreReadWrite()
Update the internal hypre_ParCSRMatrix object, A, to be in hypre memory space.