12 #include "../config/config.hpp" 17 #include "../fem/fem.hpp" 18 #include "../general/forall.hpp" 32 #if MFEM_HYPRE_VERSION >= 21900 41 void Hypre::Finalize()
43 Hypre &hypre = Instance();
46 #if MFEM_HYPRE_VERSION >= 21900 49 hypre.finalized =
true;
53 void Hypre::SetDefaultOptions()
58 #if MFEM_HYPRE_VERSION >= 22100 59 #ifdef HYPRE_USING_CUDA 61 HYPRE_SetSpGemmUseCusparse(0);
62 #elif defined(HYPRE_USING_HIP) 90 template<
typename TargetT,
typename SourceT>
91 static TargetT *DuplicateAs(
const SourceT *array,
int size,
92 bool cplusplus =
true)
94 TargetT *target_array = cplusplus ? (TargetT*) Memory<TargetT>(size)
95 : mfem_hypre_TAlloc_host(TargetT, size);
96 for (
int i = 0; i < size; i++)
98 target_array[i] = array[i];
107 template <
typename T>
112 if (src_d_mt == MemoryType::DEFAULT)
114 src_d_mt = MemoryManager::GetDualMemoryType(src_h_mt);
121 inline void HypreParVector::_SetDataAndSize_()
123 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
124 #if !defined(HYPRE_USING_GPU) 125 SetDataAndSize(hypre_VectorData(x_loc),
129 MemoryType mt = (hypre_VectorMemoryLocation(x_loc) == HYPRE_MEMORY_HOST
131 if (hypre_VectorData(x_loc) != NULL)
133 data.Wrap(hypre_VectorData(x_loc), size, mt,
false);
145 x = hypre_ParVectorCreate(comm,glob_size,col);
146 hypre_ParVectorInitialize(x);
147 #if MFEM_HYPRE_VERSION <= 22200 148 hypre_ParVectorSetPartitioningOwner(x,0);
151 hypre_ParVectorSetDataOwner(x,1);
152 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(x),1);
162 x = hypre_ParVectorCreate(comm,glob_size,col);
163 hypre_ParVectorSetDataOwner(x,1);
164 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
165 hypre_SeqVectorSetDataOwner(x_loc,0);
166 #if MFEM_HYPRE_VERSION <= 22200 167 hypre_ParVectorSetPartitioningOwner(x,0);
170 hypre_VectorData(x_loc) = &tmp;
171 #ifdef HYPRE_USING_GPU 172 hypre_VectorMemoryLocation(x_loc) =
173 is_device_ptr ? HYPRE_MEMORY_DEVICE : HYPRE_MEMORY_HOST;
179 hypre_ParVectorInitialize(x);
181 hypre_VectorData(x_loc) = data_;
188 y.CreateCompatibleVector())
191 hypre_SeqVectorCopy(hypre_ParVectorLocalVector(y.x),
192 hypre_ParVectorLocalVector(x));
198 *
this = std::move(y);
206 x = hypre_ParVectorInDomainOf(const_cast<HypreParMatrix&>(A));
210 x = hypre_ParVectorInRangeOf(const_cast<HypreParMatrix&>(A));
218 x = (hypre_ParVector *) y;
227 hypre_ParVectorInitialize(x);
228 #if MFEM_HYPRE_VERSION <= 22200 229 hypre_ParVectorSetPartitioningOwner(x,0);
232 hypre_ParVectorSetDataOwner(x,1);
233 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(x),1);
241 result.x = hypre_ParVectorCreate(x -> comm, x -> global_size,
243 hypre_ParVectorInitialize(result.x);
244 #if MFEM_HYPRE_VERSION <= 22200 245 hypre_ParVectorSetPartitioningOwner(result.x,0);
247 hypre_ParVectorSetDataOwner(result.x,1);
248 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(result.x),1);
249 result._SetDataAndSize_();
250 result.own_ParVector = 1;
257 if (own_ParVector) { hypre_ParVectorDestroy(x); }
261 own_ParVector = owner;
266 hypre_Vector *hv = hypre_ParVectorToVectorAll(*
this);
269 hypre_SeqVectorSetDataOwner(hv,0);
270 hypre_SeqVectorDestroy(hv);
297 const auto own_tmp = y.own_ParVector;
299 own_ParVector = own_tmp;
300 const auto x_tmp = y.x;
308 hypre_VectorData(hypre_ParVectorLocalVector(x)) = data_;
314 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
315 hypre_VectorData(x_loc) =
317 #ifdef HYPRE_USING_GPU 318 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
324 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
326 #ifdef HYPRE_USING_GPU 327 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
333 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
335 #ifdef HYPRE_USING_GPU 336 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
346 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
347 hypre_VectorData(x_loc) =
349 #ifdef HYPRE_USING_GPU 350 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
361 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
363 #ifdef HYPRE_USING_GPU 364 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
375 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
377 #ifdef HYPRE_USING_GPU 378 hypre_VectorMemoryLocation(x_loc) = HYPRE_MEMORY_DEVICE;
385 return hypre_ParVectorSetRandomValues(x,seed);
390 hypre_ParVectorPrint(x,fname);
397 hypre_ParVectorDestroy(x);
400 x = hypre_ParVectorRead(comm, fname);
401 own_ParVector =
true;
409 hypre_ParVectorDestroy(x);
416 return hypre_ParVectorInnerProd(*x, *y);
421 return hypre_ParVectorInnerProd(x, y);
430 double loc_norm = vec.
Norml1();
431 MPI_Allreduce(&loc_norm, &norm, 1, MPI_DOUBLE, MPI_SUM, comm);
435 double loc_norm = vec*vec;
436 MPI_Allreduce(&loc_norm, &norm, 1, MPI_DOUBLE, MPI_SUM, comm);
442 for (
int i = 0; i < vec.
Size(); i++)
444 sum += pow(fabs(vec(i)),
p);
446 MPI_Allreduce(&sum, &norm, 1, MPI_DOUBLE, MPI_SUM, comm);
447 norm = pow(norm, 1.0/
p);
452 MPI_Allreduce(&loc_norm, &norm, 1, MPI_DOUBLE, MPI_MAX, comm);
474 template <
typename T>
509 template <
typename SrcT,
typename DstT>
517 mfem::forall(capacity, [=] MFEM_HOST_DEVICE (
int i) { dst_p[i] = src_p[i]; });
521 void HypreParMatrix::Init()
526 diagOwner = offdOwner = colMapOwner = -1;
538 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
539 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
540 const int num_rows =
NumRows();
543 diag->i =
const_cast<HYPRE_Int*
>(mem_diag.
I.
Read(mc, num_rows+1));
544 diag->j =
const_cast<HYPRE_Int*
>(mem_diag.
J.
Read(mc, diag_nnz));
545 diag->data =
const_cast<double*
>(mem_diag.
data.
Read(mc, diag_nnz));
546 offd->i =
const_cast<HYPRE_Int*
>(mem_offd.
I.
Read(mc, num_rows+1));
547 offd->j =
const_cast<HYPRE_Int*
>(mem_offd.
J.
Read(mc, offd_nnz));
548 offd->data =
const_cast<double*
>(mem_offd.
data.
Read(mc, offd_nnz));
549 #if MFEM_HYPRE_VERSION >= 21800 550 decltype(diag->memory_location) ml =
552 diag->memory_location = ml;
553 offd->memory_location = ml;
559 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
560 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
561 const int num_rows =
NumRows();
564 diag->i = mem_diag.
I.
ReadWrite(mc, num_rows+1);
567 offd->i = mem_offd.
I.
ReadWrite(mc, num_rows+1);
570 #if MFEM_HYPRE_VERSION >= 21800 571 decltype(diag->memory_location) ml =
573 diag->memory_location = ml;
574 offd->memory_location = ml;
578 void HypreParMatrix::Write(
MemoryClass mc,
bool set_diag,
bool set_offd)
580 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
581 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
594 #if MFEM_HYPRE_VERSION >= 21800 595 decltype(diag->memory_location) ml =
597 if (set_diag) { diag->memory_location = ml; }
598 if (set_offd) { offd->memory_location = ml; }
616 #if MFEM_HYPRE_VERSION >= 21800 617 MemoryType diag_mt = (A->diag->memory_location == HYPRE_MEMORY_HOST
619 MemoryType offd_mt = (A->offd->memory_location == HYPRE_MEMORY_HOST
625 diagOwner = HypreCsrToMem(A->diag, diag_mt,
false, mem_diag);
626 offdOwner = HypreCsrToMem(A->offd, offd_mt,
false, mem_offd);
632 hypre_CSRMatrix *hypre_csr,
647 const int num_rows = csr->
Height();
649 hypre_csr->i =
const_cast<HYPRE_Int*
>(mem_csr.
I.
Read(hypre_mc, num_rows+1));
650 hypre_csr->j =
const_cast<HYPRE_Int*
>(mem_csr.
J.
Read(hypre_mc, nnz));
651 hypre_csr->data =
const_cast<double*
>(mem_csr.
data.
Read(hypre_mc, nnz));
654 "invalid state: host ownership for I and J differ!");
659 signed char HypreParMatrix::CopyBoolCSR(Table *bool_csr,
660 MemoryIJData &mem_csr,
661 hypre_CSRMatrix *hypre_csr)
666 CopyMemory(bool_csr->GetIMemory(), mem_csr.I, hypre_mc,
false);
667 CopyMemory(bool_csr->GetJMemory(), mem_csr.J, hypre_mc,
false);
673 const int num_rows = bool_csr->Size();
674 const int nnz = bool_csr->Size_of_connections();
677 for (
int i = 0; i < nnz; i++)
681 hypre_csr->i =
const_cast<HYPRE_Int*
>(mem_csr.I.Read(hypre_mc, num_rows+1));
682 hypre_csr->j =
const_cast<HYPRE_Int*
>(mem_csr.J.Read(hypre_mc, nnz));
683 hypre_csr->data =
const_cast<double*
>(mem_csr.data.Read(hypre_mc, nnz));
685 MFEM_ASSERT(mem_csr.I.OwnsHostPtr() == mem_csr.J.OwnsHostPtr(),
686 "invalid state: host ownership for I and J differ!");
687 return (mem_csr.I.OwnsHostPtr() ? 1 : 0) +
688 (mem_csr.data.OwnsHostPtr() ? 2 : 0);
694 static void CopyCSR_J(
const int nnz,
const MemoryIJData &mem_csr,
700 mfem::forall(nnz, [=] MFEM_HOST_DEVICE (
int i) { dst_p[i] = src_p[i]; });
705 signed char HypreParMatrix::HypreCsrToMem(hypre_CSRMatrix *h_mat,
712 mem.I.Wrap(h_mat->i, nr1, h_mat_mt, own_ija);
713 mem.J.Wrap(h_mat->j, nnz, h_mat_mt, own_ija);
714 mem.data.Wrap(h_mat->data, nnz, h_mat_mt, own_ija);
720 h_mem.I.New(nr1, hypre_mt);
721 h_mem.I.CopyFrom(mem.I, nr1);
723 h_mem.J.New(nnz, hypre_mt);
724 h_mem.J.CopyFrom(mem.J, nnz);
726 h_mem.data.New(nnz, hypre_mt);
727 h_mem.data.CopyFrom(mem.data, nnz);
736 #if MFEM_HYPRE_VERSION < 21400 737 hypre_TFree(h_mat->i);
738 #elif MFEM_HYPRE_VERSION < 21800 739 hypre_TFree(h_mat->i, HYPRE_MEMORY_SHARED);
741 hypre_TFree(h_mat->i, h_mat->memory_location);
743 if (h_mat->owns_data)
745 #if MFEM_HYPRE_VERSION < 21400 746 hypre_TFree(h_mat->j);
747 hypre_TFree(h_mat->data);
748 #elif MFEM_HYPRE_VERSION < 21800 749 hypre_TFree(h_mat->j, HYPRE_MEMORY_SHARED);
750 hypre_TFree(h_mat->data, HYPRE_MEMORY_SHARED);
752 hypre_TFree(h_mat->j, h_mat->memory_location);
753 hypre_TFree(h_mat->data, h_mat->memory_location);
757 h_mat->i = mem.I.ReadWrite(hypre_mc, nr1);
758 h_mat->j = mem.J.ReadWrite(hypre_mc, nnz);
759 h_mat->data = mem.data.ReadWrite(hypre_mc, nnz);
760 h_mat->owns_data = 0;
761 #if MFEM_HYPRE_VERSION >= 21800 762 h_mat->memory_location = HYPRE_MEMORY_DEVICE;
772 :
Operator(diag->Height(), diag->Width())
775 A = hypre_ParCSRMatrixCreate(comm, glob_size, glob_size, row_starts,
777 hypre_ParCSRMatrixSetDataOwner(A,1);
778 #if MFEM_HYPRE_VERSION <= 22200 779 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
780 hypre_ParCSRMatrixSetColStartsOwner(A,0);
783 hypre_CSRMatrixSetDataOwner(A->diag,0);
784 diagOwner = CopyCSR(diag, mem_diag, A->diag,
false);
785 hypre_CSRMatrixSetRownnz(A->diag);
787 hypre_CSRMatrixSetDataOwner(A->offd,1);
788 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Height()+1);
796 hypre_ParCSRMatrixSetNumNonzeros(A);
800 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
804 CopyCSR_J(A->diag->num_nonzeros, mem_diag, diag->
GetMemoryJ());
808 hypre_MatvecCommPkgCreate(A);
818 :
Operator(diag->Height(), diag->Width())
821 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
822 row_starts, col_starts,
824 hypre_ParCSRMatrixSetDataOwner(A,1);
825 #if MFEM_HYPRE_VERSION <= 22200 826 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
827 hypre_ParCSRMatrixSetColStartsOwner(A,0);
830 hypre_CSRMatrixSetDataOwner(A->diag,0);
831 diagOwner = CopyCSR(diag, mem_diag, A->diag,
false);
832 hypre_CSRMatrixSetRownnz(A->diag);
834 hypre_CSRMatrixSetDataOwner(A->offd,1);
835 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Height()+1);
838 hypre_ParCSRMatrixSetNumNonzeros(A);
841 if (row_starts == col_starts)
844 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
848 CopyCSR_J(A->diag->num_nonzeros, mem_diag, diag->
GetMemoryJ());
853 hypre_MatvecCommPkgCreate(A);
866 :
Operator(diag->Height(), diag->Width())
869 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
870 row_starts, col_starts,
873 hypre_ParCSRMatrixSetDataOwner(A,1);
874 #if MFEM_HYPRE_VERSION <= 22200 875 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
876 hypre_ParCSRMatrixSetColStartsOwner(A,0);
879 hypre_CSRMatrixSetDataOwner(A->diag,0);
880 diagOwner = CopyCSR(diag, mem_diag, A->diag, own_diag_offd);
881 if (own_diag_offd) {
delete diag; }
882 hypre_CSRMatrixSetRownnz(A->diag);
884 hypre_CSRMatrixSetDataOwner(A->offd,0);
885 offdOwner = CopyCSR(offd, mem_offd, A->offd, own_diag_offd);
886 if (own_diag_offd) {
delete offd; }
887 hypre_CSRMatrixSetRownnz(A->offd);
889 hypre_ParCSRMatrixColMapOffd(A) = cmap;
893 hypre_ParCSRMatrixSetNumNonzeros(A);
896 if (row_starts == col_starts)
899 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
903 CopyCSR_J(A->diag->num_nonzeros, mem_diag, diag->
GetMemoryJ());
908 hypre_MatvecCommPkgCreate(A);
917 HYPRE_Int *diag_i, HYPRE_Int *diag_j,
double *diag_data,
918 HYPRE_Int *offd_i, HYPRE_Int *offd_j,
double *offd_data,
923 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
924 row_starts, col_starts, offd_num_cols, 0, 0);
925 hypre_ParCSRMatrixSetDataOwner(A,1);
926 #if MFEM_HYPRE_VERSION <= 22200 927 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
928 hypre_ParCSRMatrixSetColStartsOwner(A,0);
931 HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A->diag);
933 hypre_CSRMatrixSetDataOwner(A->diag, hypre_arrays);
934 hypre_CSRMatrixI(A->diag) = diag_i;
935 hypre_CSRMatrixJ(A->diag) = diag_j;
936 hypre_CSRMatrixData(A->diag) = diag_data;
937 hypre_CSRMatrixNumNonzeros(A->diag) = diag_i[local_num_rows];
938 #ifdef HYPRE_USING_GPU 939 hypre_CSRMatrixMemoryLocation(A->diag) = HYPRE_MEMORY_HOST;
942 hypre_CSRMatrixSetDataOwner(A->offd, hypre_arrays);
943 hypre_CSRMatrixI(A->offd) = offd_i;
944 hypre_CSRMatrixJ(A->offd) = offd_j;
945 hypre_CSRMatrixData(A->offd) = offd_data;
946 hypre_CSRMatrixNumNonzeros(A->offd) = offd_i[local_num_rows];
947 #ifdef HYPRE_USING_GPU 948 hypre_CSRMatrixMemoryLocation(A->offd) = HYPRE_MEMORY_HOST;
951 hypre_ParCSRMatrixColMapOffd(A) = offd_col_map;
953 colMapOwner = hypre_arrays ? -1 : 1;
955 hypre_ParCSRMatrixSetNumNonzeros(A);
958 if (row_starts == col_starts)
960 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
963 hypre_MatvecCommPkgCreate(A);
971 diagOwner = HypreCsrToMem(A->diag, host_mt,
true, mem_diag);
972 offdOwner = HypreCsrToMem(A->offd, host_mt,
true, mem_offd);
977 diagOwner = HypreCsrToMem(A->diag, host_mt,
false, mem_diag);
978 offdOwner = HypreCsrToMem(A->offd, host_mt,
false, mem_offd);
982 hypre_CSRMatrixSetRownnz(A->diag);
983 hypre_CSRMatrixSetRownnz(A->offd);
992 MFEM_ASSERT(sm_a != NULL,
"invalid input");
993 MFEM_VERIFY(!HYPRE_AssumedPartitionCheck(),
994 "this method can not be used with assumed partition");
998 hypre_CSRMatrix *csr_a;
999 csr_a = hypre_CSRMatrixCreate(sm_a ->
Height(), sm_a ->
Width(),
1000 sm_a -> NumNonZeroElems());
1002 hypre_CSRMatrixSetDataOwner(csr_a,0);
1004 CopyCSR(sm_a, mem_a, csr_a,
false);
1005 hypre_CSRMatrixSetRownnz(csr_a);
1009 hypre_ParCSRMatrix *new_A =
1010 hypre_CSRMatrixToParCSRMatrix(comm, csr_a, row_starts, col_starts);
1016 hypre_CSRMatrixI(csr_a) = NULL;
1017 hypre_CSRMatrixDestroy(csr_a);
1020 if (row_starts == col_starts)
1022 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(new_A));
1025 hypre_MatvecCommPkgCreate(A);
1040 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
1041 row_starts, col_starts, 0, nnz, 0);
1042 hypre_ParCSRMatrixSetDataOwner(A,1);
1043 #if MFEM_HYPRE_VERSION <= 22200 1044 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1045 hypre_ParCSRMatrixSetColStartsOwner(A,0);
1048 hypre_CSRMatrixSetDataOwner(A->diag,0);
1049 diagOwner = CopyBoolCSR(diag, mem_diag, A->diag);
1050 hypre_CSRMatrixSetRownnz(A->diag);
1052 hypre_CSRMatrixSetDataOwner(A->offd,1);
1053 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Size()+1);
1056 hypre_ParCSRMatrixSetNumNonzeros(A);
1059 if (row_starts == col_starts)
1062 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1069 hypre_MatvecCommPkgCreate(A);
1079 HYPRE_Int *i_diag, HYPRE_Int *j_diag,
1080 HYPRE_Int *i_offd, HYPRE_Int *j_offd,
1083 HYPRE_Int diag_nnz, offd_nnz;
1086 if (HYPRE_AssumedPartitionCheck())
1088 diag_nnz = i_diag[row[1]-row[0]];
1089 offd_nnz = i_offd[row[1]-row[0]];
1091 A = hypre_ParCSRMatrixCreate(comm, row[2], col[2], row, col,
1092 cmap_size, diag_nnz, offd_nnz);
1096 diag_nnz = i_diag[row[
id+1]-row[id]];
1097 offd_nnz = i_offd[row[
id+1]-row[id]];
1099 A = hypre_ParCSRMatrixCreate(comm, row[np], col[np], row, col,
1100 cmap_size, diag_nnz, offd_nnz);
1103 hypre_ParCSRMatrixSetDataOwner(A,1);
1104 #if MFEM_HYPRE_VERSION <= 22200 1105 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1106 hypre_ParCSRMatrixSetColStartsOwner(A,0);
1110 for (HYPRE_Int i = 0; i < diag_nnz; i++)
1112 mem_diag.
data[i] = 1.0;
1116 for (HYPRE_Int i = 0; i < offd_nnz; i++)
1118 mem_offd.
data[i] = 1.0;
1121 hypre_CSRMatrixSetDataOwner(A->diag,0);
1122 hypre_CSRMatrixI(A->diag) = i_diag;
1123 hypre_CSRMatrixJ(A->diag) = j_diag;
1124 hypre_CSRMatrixData(A->diag) = mem_diag.
data;
1125 #ifdef HYPRE_USING_GPU 1126 hypre_CSRMatrixMemoryLocation(A->diag) = HYPRE_MEMORY_HOST;
1129 hypre_CSRMatrixSetDataOwner(A->offd,0);
1130 hypre_CSRMatrixI(A->offd) = i_offd;
1131 hypre_CSRMatrixJ(A->offd) = j_offd;
1132 hypre_CSRMatrixData(A->offd) = mem_offd.
data;
1133 #ifdef HYPRE_USING_GPU 1134 hypre_CSRMatrixMemoryLocation(A->offd) = HYPRE_MEMORY_HOST;
1137 hypre_ParCSRMatrixColMapOffd(A) = cmap;
1141 hypre_ParCSRMatrixSetNumNonzeros(A);
1146 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1149 hypre_MatvecCommPkgCreate(A);
1155 diagOwner = HypreCsrToMem(A->diag, host_mt,
true, mem_diag);
1156 offdOwner = HypreCsrToMem(A->offd, host_mt,
true, mem_offd);
1159 hypre_CSRMatrixSetRownnz(A->diag);
1160 hypre_CSRMatrixSetRownnz(A->offd);
1179 if (HYPRE_AssumedPartitionCheck())
1182 my_col_start = cols[0];
1183 my_col_end = cols[1];
1188 MPI_Comm_rank(comm, &myid);
1189 MPI_Comm_size(comm, &part_size);
1191 my_col_start = cols[myid];
1192 my_col_end = cols[myid+1];
1199 row_starts = col_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1200 for (
int i = 0; i < part_size; i++)
1202 row_starts[i] = rows[i];
1207 row_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1208 col_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1209 for (
int i = 0; i < part_size; i++)
1211 row_starts[i] = rows[i];
1212 col_starts[i] = cols[i];
1218 HYPRE_Int diag_nnz = 0, offd_nnz = 0, offd_num_cols = 0;
1219 map<HYPRE_BigInt, HYPRE_Int> offd_map;
1220 for (HYPRE_Int j = 0, loc_nnz = I[nrows]; j < loc_nnz; j++)
1223 if (my_col_start <= glob_col && glob_col < my_col_end)
1229 offd_map.insert(pair<const HYPRE_BigInt, HYPRE_Int>(glob_col, -1));
1234 for (
auto it = offd_map.begin(); it != offd_map.end(); ++it)
1236 it->second = offd_num_cols++;
1240 A = hypre_ParCSRMatrixCreate(comm, glob_nrows, glob_ncols,
1241 row_starts, col_starts, offd_num_cols,
1242 diag_nnz, offd_nnz);
1243 hypre_ParCSRMatrixInitialize(A);
1249 HYPRE_Int *diag_i, *diag_j, *offd_i, *offd_j;
1251 double *diag_data, *offd_data;
1252 diag_i = A->diag->i;
1253 diag_j = A->diag->j;
1254 diag_data = A->diag->data;
1255 offd_i = A->offd->i;
1256 offd_j = A->offd->j;
1257 offd_data = A->offd->data;
1258 offd_col_map = A->col_map_offd;
1260 diag_nnz = offd_nnz = 0;
1261 for (HYPRE_Int i = 0, j = 0; i < nrows; i++)
1263 diag_i[i] = diag_nnz;
1264 offd_i[i] = offd_nnz;
1265 for (HYPRE_Int j_end = I[i+1]; j < j_end; j++)
1268 if (my_col_start <= glob_col && glob_col < my_col_end)
1270 diag_j[diag_nnz] = glob_col - my_col_start;
1271 diag_data[diag_nnz] = data[j];
1276 offd_j[offd_nnz] = offd_map[glob_col];
1277 offd_data[offd_nnz] = data[j];
1282 diag_i[nrows] = diag_nnz;
1283 offd_i[nrows] = offd_nnz;
1284 for (
auto it = offd_map.begin(); it != offd_map.end(); ++it)
1286 offd_col_map[it->second] = it->first;
1289 hypre_ParCSRMatrixSetNumNonzeros(A);
1291 if (row_starts == col_starts)
1293 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1295 #if MFEM_HYPRE_VERSION > 22200 1296 mfem_hypre_TFree_host(row_starts);
1299 mfem_hypre_TFree_host(col_starts);
1302 hypre_MatvecCommPkgCreate(A);
1312 hypre_ParCSRMatrix *Ph =
static_cast<hypre_ParCSRMatrix *
>(P);
1317 A = hypre_ParCSRMatrixCompleteClone(Ph);
1319 hypre_ParCSRMatrixCopy(Ph, A, 1);
1327 hypre_ParCSRMatrixSetNumNonzeros(A);
1329 hypre_MatvecCommPkgCreate(A);
1358 MFEM_ASSERT(diagOwner < 0 && offdOwner < 0 && colMapOwner == -1,
"");
1359 MFEM_ASSERT(diagOwner == offdOwner,
"");
1360 MFEM_ASSERT(ParCSROwner,
"");
1361 hypre_ParCSRMatrix *R = A;
1362 #ifdef HYPRE_USING_GPU 1366 ParCSROwner =
false;
1394 colMapOwner = colmap;
1399 #if MFEM_HYPRE_VERSION <= 22200 1400 if (!A || hypre_ParCSRMatrixOwnsRowStarts(A) ||
1401 (hypre_ParCSRMatrixRowStarts(A) == hypre_ParCSRMatrixColStarts(A) &&
1402 hypre_ParCSRMatrixOwnsColStarts(A)))
1407 int row_starts_size;
1408 if (HYPRE_AssumedPartitionCheck())
1410 row_starts_size = 2;
1414 MPI_Comm_size(hypre_ParCSRMatrixComm(A), &row_starts_size);
1418 HYPRE_BigInt *old_row_starts = hypre_ParCSRMatrixRowStarts(A);
1421 for (
int i = 0; i < row_starts_size; i++)
1423 new_row_starts[i] = old_row_starts[i];
1426 hypre_ParCSRMatrixRowStarts(A) = new_row_starts;
1427 hypre_ParCSRMatrixOwnsRowStarts(A) = 1;
1429 if (hypre_ParCSRMatrixColStarts(A) == old_row_starts)
1431 hypre_ParCSRMatrixColStarts(A) = new_row_starts;
1432 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
1439 #if MFEM_HYPRE_VERSION <= 22200 1440 if (!A || hypre_ParCSRMatrixOwnsColStarts(A) ||
1441 (hypre_ParCSRMatrixRowStarts(A) == hypre_ParCSRMatrixColStarts(A) &&
1442 hypre_ParCSRMatrixOwnsRowStarts(A)))
1447 int col_starts_size;
1448 if (HYPRE_AssumedPartitionCheck())
1450 col_starts_size = 2;
1454 MPI_Comm_size(hypre_ParCSRMatrixComm(A), &col_starts_size);
1458 HYPRE_BigInt *old_col_starts = hypre_ParCSRMatrixColStarts(A);
1461 for (
int i = 0; i < col_starts_size; i++)
1463 new_col_starts[i] = old_col_starts[i];
1466 hypre_ParCSRMatrixColStarts(A) = new_col_starts;
1468 if (hypre_ParCSRMatrixRowStarts(A) == old_col_starts)
1470 hypre_ParCSRMatrixRowStarts(A) = new_col_starts;
1471 hypre_ParCSRMatrixOwnsRowStarts(A) = 1;
1472 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
1476 hypre_ParCSRMatrixOwnsColStarts(A) = 1;
1483 const int size =
Height();
1485 #ifdef HYPRE_USING_GPU 1488 MFEM_ASSERT(A->diag->memory_location == HYPRE_MEMORY_DEVICE,
"");
1489 double *d_diag = diag.
Write();
1490 const HYPRE_Int *A_diag_i = A->diag->i;
1491 const double *A_diag_d = A->diag->data;
1494 d_diag[i] = A_diag_d[A_diag_i[i]];
1502 for (
int j = 0; j < size; j++)
1504 diag(j) = A->diag->data[A->diag->i[j]];
1505 MFEM_ASSERT(A->diag->j[A->diag->i[j]] == j,
1506 "the first entry in each row must be the diagonal one");
1512 static void MakeSparseMatrixWrapper(
int nrows,
int ncols,
1513 HYPRE_Int *I, HYPRE_Int *J,
double *data,
1516 #ifndef HYPRE_BIGINT 1517 SparseMatrix tmp(I, J, data, nrows, ncols,
false,
false,
false);
1520 for (
int i = 0; i <= nrows; i++)
1524 const int nnz = mI[nrows];
1525 int *mJ = Memory<int>(nnz);
1526 for (
int j = 0; j < nnz; j++)
1530 SparseMatrix tmp(mI, mJ, data, nrows, ncols,
true,
false,
false);
1535 static void MakeWrapper(
const hypre_CSRMatrix *mat,
1536 const MemoryIJData &mem,
1537 SparseMatrix &wrapper)
1545 MakeSparseMatrixWrapper(nrows, ncols,
1546 const_cast<HYPRE_Int*>(I),
1547 const_cast<HYPRE_Int*>(J),
1548 const_cast<double*>(data),
1554 MakeWrapper(A->diag, mem_diag, diag);
1559 MakeWrapper(A->offd, mem_offd, offd);
1560 cmap = A->col_map_offd;
1566 hypre_CSRMatrix *hypre_merged = hypre_MergeDiagAndOffd(A);
1570 #if MFEM_HYPRE_VERSION >= 21600 1571 hypre_CSRMatrixBigJtoJ(hypre_merged);
1573 MakeSparseMatrixWrapper(
1582 merged = merged_tmp;
1584 hypre_CSRMatrixDestroy(hypre_merged);
1588 bool interleaved_rows,
1589 bool interleaved_cols)
const 1594 hypre_ParCSRMatrix **hypre_blocks =
new hypre_ParCSRMatrix*[nr * nc];
1596 internal::hypre_ParCSRMatrixSplit(A, nr, nc, hypre_blocks,
1597 interleaved_rows, interleaved_cols);
1600 for (
int i = 0; i < nr; i++)
1602 for (
int j = 0; j < nc; j++)
1608 delete [] hypre_blocks;
1613 hypre_ParCSRMatrix * At;
1614 hypre_ParCSRMatrixTranspose(A, &At, 1);
1615 hypre_ParCSRMatrixSetNumNonzeros(At);
1617 if (!hypre_ParCSRMatrixCommPkg(At)) { hypre_MatvecCommPkgCreate(At); }
1623 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(At));
1629 #if MFEM_HYPRE_VERSION >= 21800 1631 double threshold)
const 1639 hypre_MatvecCommPkgCreate(A);
1642 hypre_ParCSRMatrix *submat;
1645 int local_num_vars = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
1648 #ifdef hypre_IntArrayData 1650 hypre_IntArray *CF_marker;
1652 CF_marker = hypre_IntArrayCreate(local_num_vars);
1653 hypre_IntArrayInitialize_v2(CF_marker, HYPRE_MEMORY_HOST);
1654 hypre_IntArraySetConstantValues(CF_marker, 1);
1659 for (
int j=0; j<indices.
Size(); j++)
1661 if (indices[j] > local_num_vars)
1663 MFEM_WARNING(
"WARNING : " << indices[j] <<
" > " << local_num_vars);
1665 #ifdef hypre_IntArrayData 1666 hypre_IntArrayData(CF_marker)[indices[j]] = -1;
1668 CF_marker[indices[j]] = -1;
1673 #if (MFEM_HYPRE_VERSION > 22300) || (MFEM_HYPRE_VERSION == 22300 && HYPRE_DEVELOP_NUMBER >=8) 1676 hypre_BoomerAMGCoarseParms(MPI_COMM_WORLD, local_num_vars, 1, NULL,
1677 CF_marker, NULL, cpts_global);
1680 hypre_BoomerAMGCoarseParms(MPI_COMM_WORLD, local_num_vars, 1, NULL,
1681 CF_marker, NULL, &cpts_global);
1685 #ifdef hypre_IntArrayData 1686 hypre_ParCSRMatrixExtractSubmatrixFC(A, hypre_IntArrayData(CF_marker),
1687 cpts_global,
"FF", &submat,
1690 hypre_ParCSRMatrixExtractSubmatrixFC(A, CF_marker, cpts_global,
1691 "FF", &submat, threshold);
1694 #if (MFEM_HYPRE_VERSION <= 22300) && !(MFEM_HYPRE_VERSION == 22300 && HYPRE_DEVELOP_NUMBER >=8) 1695 mfem_hypre_TFree(cpts_global);
1697 #ifdef hypre_IntArrayData 1698 hypre_IntArrayDestroy(CF_marker);
1709 #if (MFEM_HYPRE_VERSION == 22500 && HYPRE_DEVELOP_NUMBER >= 1) || \ 1710 (MFEM_HYPRE_VERSION > 22500) 1711 #ifdef HYPRE_USING_GPU 1712 hypre_ParCSRMatrixLocalTranspose(A);
1719 #if (MFEM_HYPRE_VERSION == 22500 && HYPRE_DEVELOP_NUMBER >= 1) || \ 1720 (MFEM_HYPRE_VERSION > 22500) 1721 #ifdef HYPRE_USING_GPU 1724 hypre_CSRMatrixDestroy(A->diagT);
1729 hypre_CSRMatrixDestroy(A->offdT);
1737 double a,
double b)
const 1741 return hypre_ParCSRMatrixMatvec(
a, A, x,
b, y);
1746 MFEM_ASSERT(x.
Size() ==
Width(),
"invalid x.Size() = " << x.
Size()
1747 <<
", expected size = " <<
Width());
1748 MFEM_ASSERT(y.
Size() ==
Height(),
"invalid y.Size() = " << y.
Size()
1749 <<
", expected size = " <<
Height());
1796 hypre_ParCSRMatrixMatvec(
a, A, *X,
b, *Y);
1798 if (!yshallow) { y = *Y; }
1804 MFEM_ASSERT(x.
Size() ==
Height(),
"invalid x.Size() = " << x.
Size()
1805 <<
", expected size = " <<
Height());
1806 MFEM_ASSERT(y.
Size() ==
Width(),
"invalid y.Size() = " << y.
Size()
1807 <<
", expected size = " <<
Width());
1860 hypre_ParCSRMatrixMatvecT(
a, A, *Y,
b, *X);
1862 if (!yshallow) { y = *X; }
1866 double a,
double b)
const 1868 return hypre_ParCSRMatrixMatvec(
a, A, (hypre_ParVector *) x,
b,
1869 (hypre_ParVector *) y);
1873 double a,
double b)
const 1878 return hypre_ParCSRMatrixMatvecT(
a, A, x,
b, y);
1884 MFEM_ASSERT(x.
Size() ==
Width(),
"invalid x.Size() = " << x.
Size()
1885 <<
", expected size = " <<
Width());
1886 MFEM_ASSERT(y.
Size() ==
Height(),
"invalid y.Size() = " << y.
Size()
1887 <<
", expected size = " <<
Height());
1893 internal::hypre_ParCSRMatrixAbsMatvec(A,
a, const_cast<double*>(x_data),
1901 MFEM_ASSERT(x.
Size() ==
Height(),
"invalid x.Size() = " << x.
Size()
1902 <<
", expected size = " <<
Height());
1903 MFEM_ASSERT(y.
Size() ==
Width(),
"invalid y.Size() = " << y.
Size()
1904 <<
", expected size = " <<
Width());
1910 internal::hypre_ParCSRMatrixAbsMatvecT(A,
a, const_cast<double*>(x_data),
1918 const bool assumed_partition = HYPRE_AssumedPartitionCheck();
1919 const bool row_starts_given = (row_starts != NULL);
1920 if (!row_starts_given)
1922 row_starts = hypre_ParCSRMatrixRowStarts(A);
1923 MFEM_VERIFY(D.
Height() == hypre_CSRMatrixNumRows(A->diag),
1924 "the matrix D is NOT compatible with the row starts of" 1925 " this HypreParMatrix, row_starts must be given.");
1930 if (assumed_partition)
1936 MPI_Comm_rank(
GetComm(), &offset);
1938 int local_num_rows = row_starts[offset+1]-row_starts[offset];
1939 MFEM_VERIFY(local_num_rows == D.
Height(),
"the number of rows in D is " 1940 " not compatible with the given row_starts");
1947 if (assumed_partition)
1950 if (row_starts_given)
1952 global_num_rows = row_starts[2];
1959 global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
1964 MPI_Comm_size(
GetComm(), &part_size);
1965 global_num_rows = row_starts[part_size];
1969 HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
1975 GetOffd(A_offd, col_map_offd);
1985 DuplicateAs<HYPRE_BigInt>(row_starts, part_size,
false);
1987 (row_starts == col_starts ? new_row_starts :
1988 DuplicateAs<HYPRE_BigInt>(col_starts, part_size,
false));
1990 DuplicateAs<HYPRE_BigInt>(col_map_offd, A_offd.
Width());
1994 const bool own_diag_offd =
true;
1999 global_num_rows, hypre_ParCSRMatrixGlobalNumCols(A),
2000 new_row_starts, new_col_starts,
2001 DA_diag, DA_offd, new_col_map_offd,
2004 #if MFEM_HYPRE_VERSION <= 22200 2006 hypre_ParCSRMatrixSetRowStartsOwner(DA->A, 1);
2007 hypre_ParCSRMatrixSetColStartsOwner(DA->A, 1);
2009 mfem_hypre_TFree_host(new_row_starts);
2010 mfem_hypre_TFree_host(new_col_starts);
2012 DA->colMapOwner = 1;
2019 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2024 if (hypre_CSRMatrixNumRows(A->diag) != diag.
Size())
2026 mfem_error(
"Note the Vector diag is not of compatible dimensions with A\n");
2033 double *Adiag_data = hypre_CSRMatrixData(A->diag);
2034 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2036 double *Aoffd_data = hypre_CSRMatrixData(A->offd);
2037 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2040 for (
int i(0); i < size; ++i)
2043 for (jj = Adiag_i[i]; jj < Adiag_i[i+1]; ++jj)
2045 Adiag_data[jj] *= val;
2047 for (jj = Aoffd_i[i]; jj < Aoffd_i[i+1]; ++jj)
2049 Aoffd_data[jj] *= val;
2058 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2063 if (hypre_CSRMatrixNumRows(A->diag) != diag.
Size())
2065 mfem_error(
"Note the Vector diag is not of compatible dimensions with A\n");
2072 double *Adiag_data = hypre_CSRMatrixData(A->diag);
2073 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2076 double *Aoffd_data = hypre_CSRMatrixData(A->offd);
2077 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2080 for (
int i(0); i < size; ++i)
2085 mfem_error(
"HypreParMatrix::InvDiagScale : Division by 0");
2089 for (jj = Adiag_i[i]; jj < Adiag_i[i+1]; ++jj)
2091 Adiag_data[jj] *= val;
2093 for (jj = Aoffd_i[i]; jj < Aoffd_i[i+1]; ++jj)
2095 Aoffd_data[jj] *= val;
2104 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2111 HYPRE_Int size=hypre_CSRMatrixNumRows(A->diag);
2114 double *Adiag_data = hypre_CSRMatrixData(A->diag);
2115 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2116 for (jj = 0; jj < Adiag_i[size]; ++jj)
2118 Adiag_data[jj] *=
s;
2121 double *Aoffd_data = hypre_CSRMatrixData(A->offd);
2122 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2123 for (jj = 0; jj < Aoffd_i[size]; ++jj)
2125 Aoffd_data[jj] *=
s;
2131 static void get_sorted_rows_cols(
const Array<int> &rows_cols,
2137 for (
int i = 0; i < rows_cols.
Size(); i++)
2139 hypre_sorted[i] = rows_cols[i];
2140 if (i && rows_cols[i-1] > rows_cols[i]) { sorted =
false; }
2142 if (!sorted) { hypre_sorted.
Sort(); }
2150 hypre_CSRMatrix * csr_A;
2151 hypre_CSRMatrix * csr_A_wo_z;
2152 hypre_ParCSRMatrix * parcsr_A_ptr;
2157 comm = hypre_ParCSRMatrixComm(A);
2159 ierr += hypre_ParCSRMatrixGetLocalRange(A,
2160 &row_start,&row_end,
2161 &col_start,&col_end );
2163 row_starts = hypre_ParCSRMatrixRowStarts(A);
2164 col_starts = hypre_ParCSRMatrixColStarts(A);
2166 #if MFEM_HYPRE_VERSION <= 22200 2167 bool old_owns_row = hypre_ParCSRMatrixOwnsRowStarts(A);
2168 bool old_owns_col = hypre_ParCSRMatrixOwnsColStarts(A);
2170 HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
2171 HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
2172 parcsr_A_ptr = hypre_ParCSRMatrixCreate(comm, global_num_rows,
2174 row_starts, col_starts,
2176 #if MFEM_HYPRE_VERSION <= 22200 2177 hypre_ParCSRMatrixOwnsRowStarts(parcsr_A_ptr) = old_owns_row;
2178 hypre_ParCSRMatrixOwnsColStarts(parcsr_A_ptr) = old_owns_col;
2179 hypre_ParCSRMatrixOwnsRowStarts(A) = 0;
2180 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
2183 csr_A = hypre_MergeDiagAndOffd(A);
2189 csr_A_wo_z = hypre_CSRMatrixDeleteZeros(csr_A,threshold);
2193 if (csr_A_wo_z == NULL)
2199 ierr += hypre_CSRMatrixDestroy(csr_A);
2205 ierr += GenerateDiagAndOffd(csr_A_wo_z,parcsr_A_ptr,
2208 ierr += hypre_CSRMatrixDestroy(csr_A_wo_z);
2210 MFEM_VERIFY(ierr == 0,
"");
2214 hypre_ParCSRMatrixSetNumNonzeros(A);
2216 #if MFEM_HYPRE_VERSION <= 22200 2217 if (row_starts == col_starts)
2219 if ((row_starts[0] == col_starts[0]) &&
2220 (row_starts[1] == col_starts[1]))
2223 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
2225 if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2232 HYPRE_Int old_err = hypre_error_flag;
2233 hypre_error_flag = 0;
2235 #if MFEM_HYPRE_VERSION < 21400 2237 double threshold = 0.0;
2240 HYPRE_Int *diag_I = A->diag->i, *offd_I = A->offd->i;
2241 double *diag_d = A->diag->data, *offd_d = A->offd->data;
2242 HYPRE_Int local_num_rows = A->diag->num_rows;
2243 double max_l2_row_norm = 0.0;
2245 for (HYPRE_Int r = 0; r < local_num_rows; r++)
2248 double l2_row_norm = row.
Norml2();
2250 l2_row_norm = std::hypot(l2_row_norm, row.
Norml2());
2251 max_l2_row_norm = std::max(max_l2_row_norm, l2_row_norm);
2253 double loc_max_l2_row_norm = max_l2_row_norm;
2254 MPI_Allreduce(&loc_max_l2_row_norm, &max_l2_row_norm, 1, MPI_DOUBLE,
2256 threshold = tol * max_l2_row_norm;
2261 #elif MFEM_HYPRE_VERSION < 21800 2263 HYPRE_Int err_flag = hypre_ParCSRMatrixDropSmallEntries(A, tol);
2264 MFEM_VERIFY(!err_flag,
"error encountered: error code = " << err_flag);
2268 HYPRE_Int err_flag = hypre_ParCSRMatrixDropSmallEntries(A, tol, 2);
2269 MFEM_VERIFY(!err_flag,
"error encountered: error code = " << err_flag);
2273 hypre_error_flag = old_err;
2281 get_sorted_rows_cols(rows_cols, rc_sorted);
2283 internal::hypre_ParCSRMatrixEliminateAXB(
2290 get_sorted_rows_cols(rows_cols, rc_sorted);
2292 hypre_ParCSRMatrix* Ae;
2294 internal::hypre_ParCSRMatrixEliminateAAe(
2304 get_sorted_rows_cols(cols, rc_sorted);
2306 hypre_ParCSRMatrix* Ae;
2308 internal::hypre_ParCSRMatrixEliminateAAe(
2309 A, &Ae, rc_sorted.
Size(), rc_sorted.
GetData(), 1);
2317 if (rows.
Size() > 0)
2320 get_sorted_rows_cols(rows, r_sorted);
2322 internal::hypre_ParCSRMatrixEliminateRows(A, r_sorted.
Size(),
2333 Ae.
Mult(-1.0, x, 1.0,
b);
2337 if (ess_dof_list.
Size() == 0) {
return; }
2340 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
2341 double *data = hypre_CSRMatrixData(A_diag);
2342 HYPRE_Int *I = hypre_CSRMatrixI(A_diag);
2344 HYPRE_Int *J = hypre_CSRMatrixJ(A_diag);
2345 hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
2346 HYPRE_Int *I_offd = hypre_CSRMatrixI(A_offd);
2347 double *data_offd = hypre_CSRMatrixData(A_offd);
2354 for (
int i = 0; i < ess_dof_list.
Size(); i++)
2356 int r = ess_dof_list[i];
2357 b(r) = data[I[r]] * x(r);
2359 MFEM_ASSERT(I[r] < I[r+1],
"empty row found!");
2365 MFEM_ABORT(
"the diagonal entry must be the first entry in the row!");
2367 for (
int j = I[r]+1; j < I[r+1]; j++)
2371 MFEM_ABORT(
"all off-diagonal entries must be zero!");
2374 for (
int j = I_offd[r]; j < I_offd[r+1]; j++)
2376 if (data_offd[j] != 0.0)
2378 MFEM_ABORT(
"all off-diagonal entries must be zero!");
2389 hypre_ParCSRMatrix *A_hypre = *
this;
2392 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A_hypre);
2393 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A_hypre);
2395 HYPRE_Int diag_nrows = hypre_CSRMatrixNumRows(diag);
2396 HYPRE_Int offd_ncols = hypre_CSRMatrixNumCols(offd);
2398 const int n_ess_dofs = ess_dofs.
Size();
2404 hypre_ParCSRCommHandle *comm_handle;
2405 HYPRE_Int *int_buf_data, *eliminate_row, *eliminate_col;
2407 eliminate_row = mfem_hypre_CTAlloc(HYPRE_Int, diag_nrows);
2408 eliminate_col = mfem_hypre_CTAlloc(HYPRE_Int, offd_ncols);
2411 hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A_hypre);
2414 hypre_MatvecCommPkgCreate(A_hypre);
2415 comm_pkg = hypre_ParCSRMatrixCommPkg(A_hypre);
2419 MFEM_HYPRE_FORALL(i, diag_nrows, eliminate_row[i] = 0; );
2420 MFEM_HYPRE_FORALL(i, n_ess_dofs, eliminate_row[ess_dofs_d[i]] = 1; );
2425 HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
2426 HYPRE_Int int_buf_sz = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
2427 int_buf_data = mfem_hypre_CTAlloc(HYPRE_Int, int_buf_sz);
2429 HYPRE_Int *send_map_elmts;
2430 #if defined(HYPRE_USING_GPU) 2431 hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
2432 send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
2434 send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
2436 MFEM_HYPRE_FORALL(i, int_buf_sz,
2438 int k = send_map_elmts[i];
2439 int_buf_data[i] = eliminate_row[k];
2442 #if defined(HYPRE_USING_GPU) 2444 comm_handle = hypre_ParCSRCommHandleCreate_v2(
2445 11, comm_pkg, HYPRE_MEMORY_DEVICE, int_buf_data,
2446 HYPRE_MEMORY_DEVICE, eliminate_col);
2448 comm_handle = hypre_ParCSRCommHandleCreate(
2449 11, comm_pkg, int_buf_data, eliminate_col );
2455 const auto I = diag->i;
2456 const auto J = diag->j;
2457 auto data = diag->data;
2459 MFEM_HYPRE_FORALL(i, n_ess_dofs,
2461 const int idof = ess_dofs_d[i];
2462 for (
int j=I[idof]; j<I[idof+1]; ++j)
2464 const int jdof = J[j];
2467 if (diag_policy == DiagonalPolicy::DIAG_ONE)
2471 else if (diag_policy == DiagonalPolicy::DIAG_ZERO)
2480 for (
int k=I[jdof]; k<I[jdof+1]; ++k)
2495 const auto I = offd->i;
2496 auto data = offd->data;
2497 MFEM_HYPRE_FORALL(i, n_ess_dofs,
2499 const int idof = ess_dofs_d[i];
2500 for (
int j=I[idof]; j<I[idof+1]; ++j)
2508 hypre_ParCSRCommHandleDestroy(comm_handle);
2509 mfem_hypre_TFree(int_buf_data);
2510 mfem_hypre_TFree(eliminate_row);
2514 const int nrows_offd = hypre_CSRMatrixNumRows(offd);
2515 const auto I = offd->i;
2516 const auto J = offd->j;
2517 auto data = offd->data;
2518 MFEM_HYPRE_FORALL(i, nrows_offd,
2520 for (
int j=I[i]; j<I[i+1]; ++j)
2522 data[j] *= 1 - eliminate_col[J[j]];
2527 mfem_hypre_TFree(eliminate_col);
2531 HYPRE_Int offj)
const 2534 hypre_ParCSRMatrixPrintIJ(A,offi,offj,fname);
2538 void HypreParMatrix::Read(MPI_Comm comm,
const char *fname)
2543 HYPRE_Int base_i, base_j;
2544 hypre_ParCSRMatrixReadIJ(comm, fname, &base_i, &base_j, &A);
2545 hypre_ParCSRMatrixSetNumNonzeros(A);
2547 if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2558 HYPRE_IJMatrix A_ij;
2559 HYPRE_IJMatrixRead(fname, comm, 5555, &A_ij);
2561 HYPRE_ParCSRMatrix A_parcsr;
2562 HYPRE_IJMatrixGetObject(A_ij, (
void**) &A_parcsr);
2564 A = (hypre_ParCSRMatrix*)A_parcsr;
2566 hypre_ParCSRMatrixSetNumNonzeros(A);
2568 if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2576 hypre_ParCSRCommPkg *comm_pkg = A->comm_pkg;
2577 MPI_Comm comm = A->comm;
2579 const int tag = 46801;
2581 MPI_Comm_rank(comm, &myid);
2582 MPI_Comm_size(comm, &nproc);
2586 MPI_Recv(&c, 1, MPI_CHAR, myid-1, tag, comm, MPI_STATUS_IGNORE);
2590 os <<
"\nHypreParMatrix: hypre_ParCSRCommPkg:\n";
2592 os <<
"Rank " << myid <<
":\n" 2593 " number of sends = " << comm_pkg->num_sends <<
2594 " (" <<
sizeof(double)*comm_pkg->send_map_starts[comm_pkg->num_sends] <<
2596 " number of recvs = " << comm_pkg->num_recvs <<
2597 " (" <<
sizeof(
double)*comm_pkg->recv_vec_starts[comm_pkg->num_recvs] <<
2599 if (myid != nproc-1)
2602 MPI_Send(&c, 1, MPI_CHAR, myid+1, tag, comm);
2615 os <<
"global number of rows : " << A->global_num_rows <<
'\n' 2616 <<
"global number of columns : " << A->global_num_cols <<
'\n' 2617 <<
"first row index : " << A->first_row_index <<
'\n' 2618 <<
" last row index : " << A->last_row_index <<
'\n' 2619 <<
"first col diag : " << A->first_col_diag <<
'\n' 2620 <<
" last col diag : " << A->last_col_diag <<
'\n' 2621 <<
"number of nonzeros : " << A->num_nonzeros <<
'\n';
2623 hypre_CSRMatrix *csr = A->diag;
2624 const char *csr_name =
"diag";
2625 for (
int m = 0; m < 2; m++)
2627 auto csr_nnz = csr->i[csr->num_rows];
2628 os << csr_name <<
" num rows : " << csr->num_rows <<
'\n' 2629 << csr_name <<
" num cols : " << csr->num_cols <<
'\n' 2630 << csr_name <<
" num nnz : " << csr->num_nonzeros <<
'\n' 2631 << csr_name <<
" i last : " << csr_nnz
2632 << (csr_nnz == csr->num_nonzeros ?
2633 " [good]" :
" [** BAD **]") <<
'\n';
2635 os << csr_name <<
" i hash : " << hf.
GetHash() <<
'\n';
2636 os << csr_name <<
" j hash : ";
2637 if (csr->j ==
nullptr)
2646 #if MFEM_HYPRE_VERSION >= 21600 2647 os << csr_name <<
" big j hash : ";
2648 if (csr->big_j ==
nullptr)
2658 os << csr_name <<
" data hash : ";
2659 if (csr->data ==
nullptr)
2673 hf.
AppendInts(A->col_map_offd, A->offd->num_cols);
2674 os <<
"col map offd hash : " << hf.
GetHash() <<
'\n';
2679 HYPRE_BigInt *A_col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
2680 int size = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
2684 void HypreParMatrix::Destroy()
2686 if ( X != NULL ) {
delete X; }
2687 if ( Y != NULL ) {
delete Y; }
2691 if (A == NULL) {
return; }
2693 #ifdef HYPRE_USING_GPU 2694 if (ParCSROwner && (diagOwner < 0 || offdOwner < 0))
2702 MFEM_VERIFY(!(diagOwner < 0 && offdOwner < 0) || diagOwner == offdOwner,
2705 MemoryClass mc = (diagOwner == -1 || offdOwner == -1) ?
2707 Write(mc, diagOwner < 0, offdOwner <0);
2716 hypre_CSRMatrixI(A->diag) = NULL;
2717 hypre_CSRMatrixJ(A->diag) = NULL;
2718 hypre_CSRMatrixData(A->diag) = NULL;
2725 hypre_CSRMatrixI(A->offd) = NULL;
2726 hypre_CSRMatrixJ(A->offd) = NULL;
2727 hypre_CSRMatrixData(A->offd) = NULL;
2729 if (colMapOwner >= 0)
2731 if (colMapOwner & 1)
2735 hypre_ParCSRMatrixColMapOffd(A) = NULL;
2740 hypre_ParCSRMatrixDestroy(A);
2746 #ifndef HYPRE_BIGINT 2749 MFEM_CONTRACT_VAR(own_j);
2750 MFEM_ASSERT(own_i == own_j,
"Inconsistent ownership");
2764 #if MFEM_HYPRE_VERSION >= 21800 2773 hypre_ParCSRMatrix *C_hypre;
2774 hypre_ParcsrBdiagInvScal(*A, blocksize, &C_hypre);
2775 hypre_ParCSRMatrixDropSmallEntries(C_hypre, 1e-15, 1);
2785 hypre_ParVector *d_hypre;
2786 hypre_ParvecBdiagInvScal(b_Hypre, blocksize, &d_hypre, *A);
2794 #if MFEM_HYPRE_VERSION < 21400 2799 hypre_ParCSRMatrix *C_hypre =
2800 internal::hypre_ParCSRMatrixAdd(const_cast<HypreParMatrix &>(A),
2801 const_cast<HypreParMatrix &>(B));
2802 MFEM_VERIFY(C_hypre,
"error in hypre_ParCSRMatrixAdd");
2804 if (!hypre_ParCSRMatrixCommPkg(C_hypre)) { hypre_MatvecCommPkgCreate(C_hypre); }
2815 hypre_ParCSRMatrix * C = internal::hypre_ParCSRMatrixAdd(*A,*B);
2817 if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
2824 HypreParMatrix *
Add(
double alpha,
const HypreParMatrix &A,
2825 double beta,
const HypreParMatrix &B)
2827 hypre_ParCSRMatrix *C;
2828 #if MFEM_HYPRE_VERSION <= 22000 2831 hypre_ParCSRMatrixAdd(
alpha, A,
beta, B, &C);
2833 if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
2835 return new HypreParMatrix(C);
2838 HypreParMatrix *
ParAdd(
const HypreParMatrix *A,
const HypreParMatrix *B)
2840 hypre_ParCSRMatrix *C;
2841 #if MFEM_HYPRE_VERSION <= 22000 2842 hypre_ParcsrAdd(1.0, *A, 1.0, *B, &C);
2844 hypre_ParCSRMatrixAdd(1.0, *A, 1.0, *B, &C);
2846 if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
2848 return new HypreParMatrix(C);
2856 hypre_ParCSRMatrix * ab;
2857 #ifdef HYPRE_USING_GPU 2858 ab = hypre_ParCSRMatMat(*A, *B);
2860 ab = hypre_ParMatmul(*A,*B);
2862 hypre_ParCSRMatrixSetNumNonzeros(ab);
2864 if (!hypre_ParCSRMatrixCommPkg(ab)) { hypre_MatvecCommPkgCreate(ab); }
2876 hypre_ParCSRMatrix * rap;
2878 #ifdef HYPRE_USING_GPU 2886 hypre_ParCSRMatrix *Q = hypre_ParCSRMatMat(*A,*P);
2887 const bool keepTranspose =
false;
2888 rap = hypre_ParCSRTMatMatKT(*P,Q,keepTranspose);
2889 hypre_ParCSRMatrixDestroy(Q);
2895 #if MFEM_HYPRE_VERSION <= 22200 2896 HYPRE_Int P_owns_its_col_starts =
2897 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*P));
2900 hypre_BoomerAMGBuildCoarseOperator(*P,*A,*P,&rap);
2902 #if MFEM_HYPRE_VERSION <= 22200 2905 hypre_ParCSRMatrixSetRowStartsOwner(rap,0);
2906 hypre_ParCSRMatrixSetColStartsOwner(rap,0);
2907 if (P_owns_its_col_starts)
2909 hypre_ParCSRMatrixSetColStartsOwner(*P, 1);
2914 hypre_ParCSRMatrixSetNumNonzeros(rap);
2923 hypre_ParCSRMatrix * rap;
2925 #ifdef HYPRE_USING_GPU 2927 hypre_ParCSRMatrix *Q = hypre_ParCSRMatMat(*A,*P);
2928 rap = hypre_ParCSRTMatMat(*Rt,Q);
2929 hypre_ParCSRMatrixDestroy(Q);
2932 #if MFEM_HYPRE_VERSION <= 22200 2933 HYPRE_Int P_owns_its_col_starts =
2934 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*P));
2935 HYPRE_Int Rt_owns_its_col_starts =
2936 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*Rt));
2939 hypre_BoomerAMGBuildCoarseOperator(*Rt,*A,*P,&rap);
2941 #if MFEM_HYPRE_VERSION <= 22200 2944 hypre_ParCSRMatrixSetRowStartsOwner(rap,0);
2945 hypre_ParCSRMatrixSetColStartsOwner(rap,0);
2946 if (P_owns_its_col_starts)
2948 hypre_ParCSRMatrixSetColStartsOwner(*P, 1);
2950 if (Rt_owns_its_col_starts)
2952 hypre_ParCSRMatrixSetColStartsOwner(*Rt, 1);
2957 hypre_ParCSRMatrixSetNumNonzeros(rap);
2966 const int num_loc,
const Array<int> &offsets,
2967 std::vector<int> &all_num_loc,
const int numBlocks,
2968 std::vector<std::vector<HYPRE_BigInt>> &blockProcOffsets,
2969 std::vector<HYPRE_BigInt> &procOffsets,
2970 std::vector<std::vector<int>> &procBlockOffsets,
2973 std::vector<std::vector<int>> all_block_num_loc(numBlocks);
2975 MPI_Allgather(&num_loc, 1, MPI_INT, all_num_loc.data(), 1, MPI_INT, comm);
2977 for (
int j = 0; j < numBlocks; ++j)
2979 all_block_num_loc[j].resize(nprocs);
2980 blockProcOffsets[j].resize(nprocs);
2982 const int blockNumRows = offsets[j + 1] - offsets[j];
2983 MPI_Allgather(&blockNumRows, 1, MPI_INT, all_block_num_loc[j].data(), 1,
2985 blockProcOffsets[j][0] = 0;
2986 for (
int i = 0; i < nprocs - 1; ++i)
2988 blockProcOffsets[j][i + 1] = blockProcOffsets[j][i]
2989 + all_block_num_loc[j][i];
2996 for (
int i = 0; i < nprocs; ++i)
2998 globalNum += all_num_loc[i];
2999 MFEM_VERIFY(globalNum >= 0,
"overflow in global size");
3002 firstLocal += all_num_loc[i];
3007 procOffsets[i + 1] = procOffsets[i] + all_num_loc[i];
3010 procBlockOffsets[i].resize(numBlocks);
3011 procBlockOffsets[i][0] = 0;
3012 for (
int j = 1; j < numBlocks; ++j)
3014 procBlockOffsets[i][j] = procBlockOffsets[i][j - 1]
3015 + all_block_num_loc[j - 1][i];
3023 const int numBlockRows = blocks.
NumRows();
3024 const int numBlockCols = blocks.
NumCols();
3026 MFEM_VERIFY(numBlockRows > 0 &&
3027 numBlockCols > 0,
"Invalid input to HypreParMatrixFromBlocks");
3029 if (blockCoeff != NULL)
3031 MFEM_VERIFY(numBlockRows == blockCoeff->
NumRows() &&
3032 numBlockCols == blockCoeff->
NumCols(),
3033 "Invalid input to HypreParMatrixFromBlocks");
3039 int nonNullBlockRow0 = -1;
3040 for (
int j=0; j<numBlockCols; ++j)
3042 if (blocks(0,j) != NULL)
3044 nonNullBlockRow0 = j;
3049 MFEM_VERIFY(nonNullBlockRow0 >= 0,
"Null row of blocks");
3050 MPI_Comm comm = blocks(0,nonNullBlockRow0)->GetComm();
3055 for (
int i=0; i<numBlockRows; ++i)
3057 for (
int j=0; j<numBlockCols; ++j)
3059 if (blocks(i,j) != NULL)
3061 const int nrows = blocks(i,j)->
NumRows();
3062 const int ncols = blocks(i,j)->
NumCols();
3064 if (rowOffsets[i+1] == 0)
3066 rowOffsets[i+1] = nrows;
3070 MFEM_VERIFY(rowOffsets[i+1] == nrows,
3071 "Inconsistent blocks in HypreParMatrixFromBlocks");
3074 if (colOffsets[j+1] == 0)
3076 colOffsets[j+1] = ncols;
3080 MFEM_VERIFY(colOffsets[j+1] == ncols,
3081 "Inconsistent blocks in HypreParMatrixFromBlocks");
3085 rowOffsets[i+1] += rowOffsets[i];
3088 for (
int j=0; j<numBlockCols; ++j)
3090 colOffsets[j+1] += colOffsets[j];
3093 const int num_loc_rows = rowOffsets[numBlockRows];
3094 const int num_loc_cols = colOffsets[numBlockCols];
3097 MPI_Comm_rank(comm, &rank);
3098 MPI_Comm_size(comm, &nprocs);
3100 std::vector<int> all_num_loc_rows(nprocs);
3101 std::vector<int> all_num_loc_cols(nprocs);
3102 std::vector<HYPRE_BigInt> procRowOffsets(nprocs);
3103 std::vector<HYPRE_BigInt> procColOffsets(nprocs);
3104 std::vector<std::vector<HYPRE_BigInt>> blockRowProcOffsets(numBlockRows);
3105 std::vector<std::vector<HYPRE_BigInt>> blockColProcOffsets(numBlockCols);
3106 std::vector<std::vector<int>> procBlockRowOffsets(nprocs);
3107 std::vector<std::vector<int>> procBlockColOffsets(nprocs);
3109 HYPRE_BigInt first_loc_row, glob_nrows, first_loc_col, glob_ncols;
3111 all_num_loc_rows, numBlockRows, blockRowProcOffsets,
3112 procRowOffsets, procBlockRowOffsets, first_loc_row,
3116 all_num_loc_cols, numBlockCols, blockColProcOffsets,
3117 procColOffsets, procBlockColOffsets, first_loc_col,
3120 std::vector<int> opI(num_loc_rows + 1);
3121 std::vector<int> cnt(num_loc_rows);
3123 for (
int i = 0; i < num_loc_rows; ++i)
3129 opI[num_loc_rows] = 0;
3134 for (
int i = 0; i < numBlockRows; ++i)
3136 for (
int j = 0; j < numBlockCols; ++j)
3138 if (blocks(i, j) == NULL)
3140 csr_blocks(i, j) = NULL;
3144 blocks(i, j)->HostRead();
3145 csr_blocks(i, j) = hypre_MergeDiagAndOffd(*blocks(i, j));
3146 blocks(i, j)->HypreRead();
3148 for (
int k = 0; k < csr_blocks(i, j)->num_rows; ++k)
3150 opI[rowOffsets[i] + k + 1] +=
3151 csr_blocks(i, j)->i[k + 1] - csr_blocks(i, j)->i[k];
3158 for (
int i = 0; i < num_loc_rows; ++i)
3160 opI[i + 1] += opI[i];
3163 const int nnz = opI[num_loc_rows];
3165 std::vector<HYPRE_BigInt> opJ(nnz);
3166 std::vector<double> data(nnz);
3169 for (
int i = 0; i < numBlockRows; ++i)
3171 for (
int j = 0; j < numBlockCols; ++j)
3173 if (csr_blocks(i, j) != NULL)
3175 const int nrows = csr_blocks(i, j)->num_rows;
3176 const double cij = blockCoeff ? (*blockCoeff)(i, j) : 1.0;
3177 #if MFEM_HYPRE_VERSION >= 21600 3178 const bool usingBigJ = (csr_blocks(i, j)->big_j != NULL);
3181 for (
int k = 0; k < nrows; ++k)
3183 const int rowg = rowOffsets[i] + k;
3184 const int nnz_k = csr_blocks(i,j)->i[k+1]-csr_blocks(i,j)->i[k];
3185 const int osk = csr_blocks(i, j)->i[k];
3187 for (
int l = 0; l < nnz_k; ++l)
3190 #if MFEM_HYPRE_VERSION >= 21600 3191 const HYPRE_Int bcol = usingBigJ ?
3192 csr_blocks(i, j)->big_j[osk + l] :
3193 csr_blocks(i, j)->j[osk + l];
3195 const HYPRE_Int bcol = csr_blocks(i, j)->j[osk + l];
3199 const auto &offs = blockColProcOffsets[j];
3200 const int bcolproc =
3201 std::upper_bound(offs.begin() + 1, offs.end(), bcol)
3204 opJ[opI[rowg] + cnt[rowg]] = procColOffsets[bcolproc] +
3205 procBlockColOffsets[bcolproc][j]
3207 - blockColProcOffsets[j][bcolproc];
3208 data[opI[rowg] + cnt[rowg]] = cij * csr_blocks(i, j)->data[osk + l];
3216 for (
int i = 0; i < numBlockRows; ++i)
3218 for (
int j = 0; j < numBlockCols; ++j)
3220 if (csr_blocks(i, j) != NULL)
3222 hypre_CSRMatrixDestroy(csr_blocks(i, j));
3227 MFEM_VERIFY(HYPRE_AssumedPartitionCheck(),
3228 "only 'assumed partition' mode is supported");
3230 std::vector<HYPRE_BigInt> rowStarts2(2);
3231 rowStarts2[0] = first_loc_row;
3232 rowStarts2[1] = first_loc_row + all_num_loc_rows[rank];
3234 int square = std::equal(all_num_loc_rows.begin(), all_num_loc_rows.end(),
3235 all_num_loc_cols.begin());
3238 return new HypreParMatrix(comm, num_loc_rows, glob_nrows, glob_ncols,
3239 opI.data(), opJ.data(),
3246 std::vector<HYPRE_BigInt> colStarts2(2);
3247 colStarts2[0] = first_loc_col;
3248 colStarts2[1] = first_loc_col + all_num_loc_cols[rank];
3250 return new HypreParMatrix(comm, num_loc_rows, glob_nrows, glob_ncols,
3251 opI.data(), opJ.data(),
3278 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
3279 HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
3281 double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(
u));
3282 double *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
3284 for (
int i = 0; i < N; i++)
3287 hypre_ParVectorCopy(
f, r);
3288 hypre_ParCSRMatrixMatvec(-1.0, A,
u, 1.0, r);
3291 (0 == (i % 2)) ? coef = lambda : coef =
mu;
3293 for (HYPRE_Int j = 0; j < num_rows; j++)
3295 u_data[j] += coef*r_data[j] / max_eig;
3311 hypre_ParVector *x0,
3312 hypre_ParVector *x1,
3313 hypre_ParVector *x2,
3314 hypre_ParVector *x3)
3317 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
3318 HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
3320 double *u_data = hypre_VectorData(hypre_ParVectorLocalVector(
u));
3322 double *x0_data = hypre_VectorData(hypre_ParVectorLocalVector(x0));
3323 double *x1_data = hypre_VectorData(hypre_ParVectorLocalVector(x1));
3324 double *x2_data = hypre_VectorData(hypre_ParVectorLocalVector(x2));
3325 double *x3_data = hypre_VectorData(hypre_ParVectorLocalVector(x3));
3327 hypre_ParVectorCopy(
u, x0);
3330 hypre_ParVectorCopy(
f, x1);
3331 hypre_ParCSRMatrixMatvec(-1.0, A, x0, 1.0, x1);
3333 for (HYPRE_Int i = 0; i < num_rows; i++)
3335 x1_data[i] /= -max_eig;
3339 for (HYPRE_Int i = 0; i < num_rows; i++)
3341 x1_data[i] = x0_data[i] -x1_data[i];
3345 for (HYPRE_Int i = 0; i < num_rows; i++)
3347 x3_data[i] = fir_coeffs[0]*x0_data[i] +fir_coeffs[1]*x1_data[i];
3350 for (
int n = 2; n <= poly_order; n++)
3353 hypre_ParVectorCopy(
f, x2);
3354 hypre_ParCSRMatrixMatvec(-1.0, A, x1, 1.0, x2);
3356 for (HYPRE_Int i = 0; i < num_rows; i++)
3358 x2_data[i] /= -max_eig;
3366 for (HYPRE_Int i = 0; i < num_rows; i++)
3368 x2_data[i] = (x1_data[i]-x0_data[i]) +(x1_data[i]-2*x2_data[i]);
3369 x3_data[i] += fir_coeffs[n]*x2_data[i];
3370 x0_data[i] = x1_data[i];
3371 x1_data[i] = x2_data[i];
3375 for (HYPRE_Int i = 0; i < num_rows; i++)
3377 u_data[i] = x3_data[i];
3398 B =
X =
V =
Z = NULL;
3406 int relax_times_,
double relax_weight_,
3407 double omega_,
int poly_order_,
3408 double poly_fraction_,
int eig_est_cg_iter_)
3420 B =
X =
V =
Z = NULL;
3431 type =
static_cast<int>(type_);
3442 int eig_est_cg_iter_)
3459 double a = -1,
b, c;
3460 if (!strcmp(name,
"Rectangular")) {
a = 1.0,
b = 0.0, c = 0.0; }
3461 if (!strcmp(name,
"Hanning")) {
a = 0.5,
b = 0.5, c = 0.0; }
3462 if (!strcmp(name,
"Hamming")) {
a = 0.54,
b = 0.46, c = 0.0; }
3463 if (!strcmp(name,
"Blackman")) {
a = 0.42,
b = 0.50, c = 0.08; }
3466 mfem_error(
"HypreSmoother::SetWindowByName : name not recognized!");
3484 mfem_error(
"HypreSmoother::SetOperator : not HypreParMatrix!");
3491 if (
B) {
delete B; }
3492 if (
X) {
delete X; }
3493 if (
V) {
delete V; }
3494 if (
Z) {
delete Z; }
3516 A->
Mult(ones, diag);
3524 #if defined(HYPRE_USING_GPU) 3526 MFEM_GPU_FORALL(i,
height,
3528 d_l1_norms[i] = std::abs(d_l1_norms[i]);
3531 for (
int i = 0; i <
height; i++)
3548 #if MFEM_HYPRE_VERSION <= 22200 3557 else if (
type == 1001 ||
type == 1002)
3567 #if MFEM_HYPRE_VERSION <= 22200 3600 double* window_coeffs =
new double[
poly_order+1];
3601 double* cheby_coeffs =
new double[
poly_order+1];
3609 window_coeffs[i] =
a +
b*cos(
t) +c*cos(2*
t);
3613 double theta_pb = acos(1.0 -0.5*k_pb);
3615 cheby_coeffs[0] = (theta_pb +
sigma)/M_PI;
3618 double t = i*(theta_pb+
sigma);
3619 cheby_coeffs[i] = 2.0*sin(
t)/(i*M_PI);
3624 fir_coeffs[i] = window_coeffs[i]*cheby_coeffs[i];
3627 delete[] window_coeffs;
3628 delete[] cheby_coeffs;
3635 mfem_error(
"HypreSmoother::Mult (...) : HypreParMatrix A is missing");
3648 HYPRE_ParCSRDiagScale(NULL, *
A,
b, x);
3655 hypre_ParVectorSetConstantValues(x, 0.0);
3676 else if (
type == 1002)
3690 int hypre_type =
type;
3692 if (
type == 5) { hypre_type = 1; }
3696 hypre_ParCSRRelax(*
A,
b, hypre_type,
3703 hypre_ParCSRRelax(*
A,
b, hypre_type,
3713 MFEM_ASSERT(
b.Size() ==
NumCols(),
"");
3718 mfem_error(
"HypreSmoother::Mult (...) : HypreParMatrix A is missing");
3725 A -> GetGlobalNumRows(),
3727 A -> GetRowStarts());
3729 A -> GetGlobalNumCols(),
3731 A -> GetColStarts());
3769 if (!xshallow) { x = *
X; }
3779 mfem_error(
"HypreSmoother::MultTranspose (...) : undefined!\n");
3785 if (
B) {
delete B; }
3786 if (
X) {
delete X; }
3787 if (
V) {
delete V; }
3788 if (
Z) {
delete Z; }
3797 if (
X0) {
delete X0; }
3798 if (
X1) {
delete X1; }
3813 :
Solver(A_->Height(), A_->Width())
3825 MFEM_ASSERT(
b.Size() ==
NumCols(),
"");
3828 MFEM_VERIFY(
A != NULL,
"HypreParMatrix A is missing");
3878 MFEM_VERIFY(
A != NULL,
"HypreParMatrix A is missing");
3880 HYPRE_Int err_flag =
SetupFcn()(*
this, *
A,
b, x);
3884 { MFEM_WARNING(
"Error during setup! Error code: " << err_flag); }
3888 MFEM_VERIFY(!err_flag,
"Error during setup! Error code: " << err_flag);
3890 hypre_error_flag = 0;
3898 if (!x_shallow) { x = *
X; }
3906 mfem_error(
"HypreSolver::Mult (...) : HypreParMatrix A is missing");
3913 hypre_ParVectorSetConstantValues(x, 0.0);
3925 { MFEM_WARNING(
"Error during solve! Error code: " << err_flag); }
3929 MFEM_VERIFY(!err_flag,
"Error during solve! Error code: " << err_flag);
3931 hypre_error_flag = 0;
3938 if (!x_shallow) { x = *
X; }
3943 if (
B) {
delete B; }
3944 if (
X) {
delete X; }
3954 HYPRE_ParCSRPCGCreate(comm, &pcg_solver);
3963 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
3965 HYPRE_ParCSRPCGCreate(comm, &pcg_solver);
3971 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
3992 HYPRE_PCGSetTol(pcg_solver, tol);
3997 HYPRE_PCGSetAbsoluteTol(pcg_solver, atol);
4002 HYPRE_PCGSetMaxIter(pcg_solver, max_iter);
4007 HYPRE_PCGSetLogging(pcg_solver, logging);
4012 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_lvl);
4017 precond = &precond_;
4019 HYPRE_ParCSRPCGSetPrecond(pcg_solver,
4027 HYPRE_PCGSetTwoNorm(pcg_solver, 1);
4028 if (res_frequency > 0)
4030 HYPRE_PCGSetRecomputeResidualP(pcg_solver, res_frequency);
4034 HYPRE_PCGSetResidualTol(pcg_solver, rtol);
4041 HYPRE_Int time_index = 0;
4042 HYPRE_Int num_iterations;
4043 double final_res_norm;
4045 HYPRE_Int print_level;
4047 HYPRE_PCGGetPrintLevel(pcg_solver, &print_level);
4048 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_level%3);
4050 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4055 hypre_ParVectorSetConstantValues(x, 0.0);
4063 if (print_level > 0 && print_level < 3)
4065 time_index = hypre_InitializeTiming(
"PCG Setup");
4066 hypre_BeginTiming(time_index);
4069 HYPRE_ParCSRPCGSetup(pcg_solver, *
A,
b, x);
4072 if (print_level > 0 && print_level < 3)
4074 hypre_EndTiming(time_index);
4075 hypre_PrintTiming(
"Setup phase times", comm);
4076 hypre_FinalizeTiming(time_index);
4077 hypre_ClearTiming();
4081 if (print_level > 0 && print_level < 3)
4083 time_index = hypre_InitializeTiming(
"PCG Solve");
4084 hypre_BeginTiming(time_index);
4087 HYPRE_ParCSRPCGSolve(pcg_solver, *
A,
b, x);
4089 if (print_level > 0)
4091 if (print_level < 3)
4093 hypre_EndTiming(time_index);
4094 hypre_PrintTiming(
"Solve phase times", comm);
4095 hypre_FinalizeTiming(time_index);
4096 hypre_ClearTiming();
4099 HYPRE_ParCSRPCGGetNumIterations(pcg_solver, &num_iterations);
4100 HYPRE_ParCSRPCGGetFinalRelativeResidualNorm(pcg_solver,
4103 MPI_Comm_rank(comm, &myid);
4107 mfem::out <<
"PCG Iterations = " << num_iterations << endl
4108 <<
"Final PCG Relative Residual Norm = " << final_res_norm
4112 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_level);
4117 HYPRE_ParCSRPCGDestroy(pcg_solver);
4125 HYPRE_ParCSRGMRESCreate(comm, &gmres_solver);
4126 SetDefaultOptions();
4136 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4138 HYPRE_ParCSRGMRESCreate(comm, &gmres_solver);
4139 SetDefaultOptions();
4142 void HypreGMRES::SetDefaultOptions()
4148 HYPRE_ParCSRGMRESSetKDim(gmres_solver, k_dim);
4149 HYPRE_ParCSRGMRESSetMaxIter(gmres_solver, max_iter);
4150 HYPRE_ParCSRGMRESSetTol(gmres_solver, tol);
4156 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4177 HYPRE_GMRESSetTol(gmres_solver, tol);
4182 HYPRE_GMRESSetAbsoluteTol(gmres_solver, tol);
4187 HYPRE_GMRESSetMaxIter(gmres_solver, max_iter);
4192 HYPRE_GMRESSetKDim(gmres_solver, k_dim);
4197 HYPRE_GMRESSetLogging(gmres_solver, logging);
4202 HYPRE_GMRESSetPrintLevel(gmres_solver, print_lvl);
4207 precond = &precond_;
4209 HYPRE_ParCSRGMRESSetPrecond(gmres_solver,
4218 HYPRE_Int time_index = 0;
4219 HYPRE_Int num_iterations;
4220 double final_res_norm;
4222 HYPRE_Int print_level;
4224 HYPRE_GMRESGetPrintLevel(gmres_solver, &print_level);
4226 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4231 hypre_ParVectorSetConstantValues(x, 0.0);
4239 if (print_level > 0)
4241 time_index = hypre_InitializeTiming(
"GMRES Setup");
4242 hypre_BeginTiming(time_index);
4245 HYPRE_ParCSRGMRESSetup(gmres_solver, *
A,
b, x);
4248 if (print_level > 0)
4250 hypre_EndTiming(time_index);
4251 hypre_PrintTiming(
"Setup phase times", comm);
4252 hypre_FinalizeTiming(time_index);
4253 hypre_ClearTiming();
4257 if (print_level > 0)
4259 time_index = hypre_InitializeTiming(
"GMRES Solve");
4260 hypre_BeginTiming(time_index);
4263 HYPRE_ParCSRGMRESSolve(gmres_solver, *
A,
b, x);
4265 if (print_level > 0)
4267 hypre_EndTiming(time_index);
4268 hypre_PrintTiming(
"Solve phase times", comm);
4269 hypre_FinalizeTiming(time_index);
4270 hypre_ClearTiming();
4272 HYPRE_ParCSRGMRESGetNumIterations(gmres_solver, &num_iterations);
4273 HYPRE_ParCSRGMRESGetFinalRelativeResidualNorm(gmres_solver,
4276 MPI_Comm_rank(comm, &myid);
4280 mfem::out <<
"GMRES Iterations = " << num_iterations << endl
4281 <<
"Final GMRES Relative Residual Norm = " << final_res_norm
4289 HYPRE_ParCSRGMRESDestroy(gmres_solver);
4297 HYPRE_ParCSRFlexGMRESCreate(comm, &fgmres_solver);
4298 SetDefaultOptions();
4308 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4310 HYPRE_ParCSRFlexGMRESCreate(comm, &fgmres_solver);
4311 SetDefaultOptions();
4314 void HypreFGMRES::SetDefaultOptions()
4320 HYPRE_ParCSRFlexGMRESSetKDim(fgmres_solver, k_dim);
4321 HYPRE_ParCSRFlexGMRESSetMaxIter(fgmres_solver, max_iter);
4322 HYPRE_ParCSRFlexGMRESSetTol(fgmres_solver, tol);
4328 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4349 HYPRE_ParCSRFlexGMRESSetTol(fgmres_solver, tol);
4354 HYPRE_ParCSRFlexGMRESSetMaxIter(fgmres_solver, max_iter);
4359 HYPRE_ParCSRFlexGMRESSetKDim(fgmres_solver, k_dim);
4364 HYPRE_ParCSRFlexGMRESSetLogging(fgmres_solver, logging);
4369 HYPRE_ParCSRFlexGMRESSetPrintLevel(fgmres_solver, print_lvl);
4374 precond = &precond_;
4375 HYPRE_ParCSRFlexGMRESSetPrecond(fgmres_solver,
4384 HYPRE_Int time_index = 0;
4385 HYPRE_Int num_iterations;
4386 double final_res_norm;
4388 HYPRE_Int print_level;
4390 HYPRE_FlexGMRESGetPrintLevel(fgmres_solver, &print_level);
4392 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4397 hypre_ParVectorSetConstantValues(x, 0.0);
4405 if (print_level > 0)
4407 time_index = hypre_InitializeTiming(
"FGMRES Setup");
4408 hypre_BeginTiming(time_index);
4411 HYPRE_ParCSRFlexGMRESSetup(fgmres_solver, *
A,
b, x);
4414 if (print_level > 0)
4416 hypre_EndTiming(time_index);
4417 hypre_PrintTiming(
"Setup phase times", comm);
4418 hypre_FinalizeTiming(time_index);
4419 hypre_ClearTiming();
4423 if (print_level > 0)
4425 time_index = hypre_InitializeTiming(
"FGMRES Solve");
4426 hypre_BeginTiming(time_index);
4429 HYPRE_ParCSRFlexGMRESSolve(fgmres_solver, *
A,
b, x);
4431 if (print_level > 0)
4433 hypre_EndTiming(time_index);
4434 hypre_PrintTiming(
"Solve phase times", comm);
4435 hypre_FinalizeTiming(time_index);
4436 hypre_ClearTiming();
4438 HYPRE_ParCSRFlexGMRESGetNumIterations(fgmres_solver, &num_iterations);
4439 HYPRE_ParCSRFlexGMRESGetFinalRelativeResidualNorm(fgmres_solver,
4442 MPI_Comm_rank(comm, &myid);
4446 mfem::out <<
"FGMRES Iterations = " << num_iterations << endl
4447 <<
"Final FGMRES Relative Residual Norm = " << final_res_norm
4455 HYPRE_ParCSRFlexGMRESDestroy(fgmres_solver);
4462 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4479 HYPRE_ParaSailsCreate(comm, &sai_precond);
4480 SetDefaultOptions();
4487 HYPRE_ParCSRMatrixGetComm(
A, &comm);
4489 HYPRE_ParaSailsCreate(comm, &sai_precond);
4490 SetDefaultOptions();
4493 void HypreParaSails::SetDefaultOptions()
4495 int sai_max_levels = 1;
4496 double sai_threshold = 0.1;
4497 double sai_filter = 0.1;
4499 double sai_loadbal = 0.0;
4501 int sai_logging = 1;
4503 HYPRE_ParaSailsSetParams(sai_precond, sai_threshold, sai_max_levels);
4504 HYPRE_ParaSailsSetFilter(sai_precond, sai_filter);
4505 HYPRE_ParaSailsSetSym(sai_precond, sai_sym);
4506 HYPRE_ParaSailsSetLoadbal(sai_precond, sai_loadbal);
4507 HYPRE_ParaSailsSetReuse(sai_precond, sai_reuse);
4508 HYPRE_ParaSailsSetLogging(sai_precond, sai_logging);
4511 void HypreParaSails::ResetSAIPrecond(MPI_Comm comm)
4513 HYPRE_Int sai_max_levels;
4514 HYPRE_Real sai_threshold;
4515 HYPRE_Real sai_filter;
4517 HYPRE_Real sai_loadbal;
4518 HYPRE_Int sai_reuse;
4519 HYPRE_Int sai_logging;
4522 HYPRE_ParaSailsGetNlevels(sai_precond, &sai_max_levels);
4523 HYPRE_ParaSailsGetThresh(sai_precond, &sai_threshold);
4524 HYPRE_ParaSailsGetFilter(sai_precond, &sai_filter);
4525 HYPRE_ParaSailsGetSym(sai_precond, &sai_sym);
4526 HYPRE_ParaSailsGetLoadbal(sai_precond, &sai_loadbal);
4527 HYPRE_ParaSailsGetReuse(sai_precond, &sai_reuse);
4528 HYPRE_ParaSailsGetLogging(sai_precond, &sai_logging);
4530 HYPRE_ParaSailsDestroy(sai_precond);
4531 HYPRE_ParaSailsCreate(comm, &sai_precond);
4533 HYPRE_ParaSailsSetParams(sai_precond, sai_threshold, sai_max_levels);
4534 HYPRE_ParaSailsSetFilter(sai_precond, sai_filter);
4535 HYPRE_ParaSailsSetSym(sai_precond, sai_sym);
4536 HYPRE_ParaSailsSetLoadbal(sai_precond, sai_loadbal);
4537 HYPRE_ParaSailsSetReuse(sai_precond, sai_reuse);
4538 HYPRE_ParaSailsSetLogging(sai_precond, sai_logging);
4544 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4549 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4550 ResetSAIPrecond(comm);
4567 HYPRE_ParaSailsSetParams(sai_precond, threshold, max_levels);
4572 HYPRE_ParaSailsSetFilter(sai_precond, filter);
4577 HYPRE_ParaSailsSetLoadbal(sai_precond, loadbal);
4582 HYPRE_ParaSailsSetReuse(sai_precond, reuse);
4587 HYPRE_ParaSailsSetLogging(sai_precond, logging);
4592 HYPRE_ParaSailsSetSym(sai_precond, sym);
4597 HYPRE_ParaSailsDestroy(sai_precond);
4603 HYPRE_EuclidCreate(comm, &euc_precond);
4604 SetDefaultOptions();
4611 HYPRE_ParCSRMatrixGetComm(
A, &comm);
4613 HYPRE_EuclidCreate(comm, &euc_precond);
4614 SetDefaultOptions();
4617 void HypreEuclid::SetDefaultOptions()
4625 HYPRE_EuclidSetLevel(euc_precond, euc_level);
4626 HYPRE_EuclidSetStats(euc_precond, euc_stats);
4627 HYPRE_EuclidSetMem(euc_precond, euc_mem);
4628 HYPRE_EuclidSetBJ(euc_precond, euc_bj);
4629 HYPRE_EuclidSetRowScale(euc_precond, euc_ro_sc);
4634 HYPRE_EuclidSetLevel(euc_precond, level);
4639 HYPRE_EuclidSetStats(euc_precond, stats);
4644 HYPRE_EuclidSetMem(euc_precond, mem);
4649 HYPRE_EuclidSetBJ(euc_precond, bj);
4654 HYPRE_EuclidSetRowScale(euc_precond, row_scale);
4657 void HypreEuclid::ResetEuclidPrecond(MPI_Comm comm)
4661 HYPRE_EuclidDestroy(euc_precond);
4662 HYPRE_EuclidCreate(comm, &euc_precond);
4664 SetDefaultOptions();
4670 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4675 HYPRE_ParCSRMatrixGetComm(*new_A, &comm);
4676 ResetEuclidPrecond(comm);
4693 HYPRE_EuclidDestroy(euc_precond);
4697 #if MFEM_HYPRE_VERSION >= 21900 4700 HYPRE_ILUCreate(&ilu_precond);
4701 SetDefaultOptions();
4704 void HypreILU::SetDefaultOptions()
4707 HYPRE_Int ilu_type = 0;
4708 HYPRE_ILUSetType(ilu_precond, ilu_type);
4711 HYPRE_Int max_iter = 1;
4712 HYPRE_ILUSetMaxIter(ilu_precond, max_iter);
4715 HYPRE_Real tol = 0.0;
4716 HYPRE_ILUSetTol(ilu_precond, tol);
4719 HYPRE_Int lev_fill = 1;
4720 HYPRE_ILUSetLevelOfFill(ilu_precond, lev_fill);
4723 HYPRE_Int reorder_type = 1;
4724 HYPRE_ILUSetLocalReordering(ilu_precond, reorder_type);
4727 HYPRE_Int print_level = 0;
4728 HYPRE_ILUSetPrintLevel(ilu_precond, print_level);
4731 void HypreILU::ResetILUPrecond()
4735 HYPRE_ILUDestroy(ilu_precond);
4737 HYPRE_ILUCreate(&ilu_precond);
4738 SetDefaultOptions();
4743 HYPRE_ILUSetLevelOfFill(ilu_precond, lev_fill);
4748 HYPRE_ILUSetType(ilu_precond, ilu_type);
4753 HYPRE_ILUSetMaxIter(ilu_precond, max_iter);
4758 HYPRE_ILUSetTol(ilu_precond, tol);
4763 HYPRE_ILUSetLocalReordering(ilu_precond, reorder_type);
4768 HYPRE_ILUSetPrintLevel(ilu_precond, print_level);
4774 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4776 if (
A) { ResetILUPrecond(); }
4792 HYPRE_ILUDestroy(ilu_precond);
4799 HYPRE_BoomerAMGCreate(&amg_precond);
4800 SetDefaultOptions();
4805 HYPRE_BoomerAMGCreate(&amg_precond);
4806 SetDefaultOptions();
4809 void HypreBoomerAMG::SetDefaultOptions()
4811 #if !defined(HYPRE_USING_GPU) 4813 int coarsen_type = 10;
4815 double theta = 0.25;
4818 int interp_type = 6;
4823 int relax_sweeps = 1;
4826 int print_level = 1;
4827 int max_levels = 25;
4830 int coarsen_type = 8;
4832 double theta = 0.25;
4835 int interp_type = 6;
4839 int relax_type = 18;
4840 int relax_sweeps = 1;
4843 int print_level = 1;
4844 int max_levels = 25;
4847 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
4848 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, agg_levels);
4849 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
4852 HYPRE_BoomerAMGSetNumSweeps(amg_precond, relax_sweeps);
4853 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, theta);
4854 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
4855 HYPRE_BoomerAMGSetPMaxElmts(amg_precond, Pmax);
4856 HYPRE_BoomerAMGSetPrintLevel(amg_precond, print_level);
4857 HYPRE_BoomerAMGSetMaxLevels(amg_precond, max_levels);
4860 HYPRE_BoomerAMGSetMaxIter(amg_precond, 1);
4861 HYPRE_BoomerAMGSetTol(amg_precond, 0.0);
4864 void HypreBoomerAMG::ResetAMGPrecond()
4866 HYPRE_Int coarsen_type;
4867 HYPRE_Int agg_levels;
4868 HYPRE_Int relax_type;
4869 HYPRE_Int relax_sweeps;
4871 HYPRE_Int interp_type;
4873 HYPRE_Int print_level;
4874 HYPRE_Int max_levels;
4876 HYPRE_Int nrbms = rbms.
Size();
4878 HYPRE_Int nodal_diag;
4879 HYPRE_Int relax_coarse;
4880 HYPRE_Int interp_vec_variant;
4882 HYPRE_Int smooth_interp_vectors;
4883 HYPRE_Int interp_refine;
4885 hypre_ParAMGData *amg_data = (hypre_ParAMGData *)amg_precond;
4888 HYPRE_BoomerAMGGetCoarsenType(amg_precond, &coarsen_type);
4889 agg_levels = hypre_ParAMGDataAggNumLevels(amg_data);
4890 relax_type = hypre_ParAMGDataUserRelaxType(amg_data);
4891 relax_sweeps = hypre_ParAMGDataUserNumSweeps(amg_data);
4892 HYPRE_BoomerAMGGetStrongThreshold(amg_precond, &theta);
4893 hypre_BoomerAMGGetInterpType(amg_precond, &interp_type);
4894 HYPRE_BoomerAMGGetPMaxElmts(amg_precond, &Pmax);
4895 HYPRE_BoomerAMGGetPrintLevel(amg_precond, &print_level);
4896 HYPRE_BoomerAMGGetMaxLevels(amg_precond, &max_levels);
4897 HYPRE_BoomerAMGGetNumFunctions(amg_precond, &
dim);
4900 nodal = hypre_ParAMGDataNodal(amg_data);
4901 nodal_diag = hypre_ParAMGDataNodalDiag(amg_data);
4902 HYPRE_BoomerAMGGetCycleRelaxType(amg_precond, &relax_coarse, 3);
4903 interp_vec_variant = hypre_ParAMGInterpVecVariant(amg_data);
4904 q_max = hypre_ParAMGInterpVecQMax(amg_data);
4905 smooth_interp_vectors = hypre_ParAMGSmoothInterpVectors(amg_data);
4906 interp_refine = hypre_ParAMGInterpRefine(amg_data);
4909 HYPRE_BoomerAMGDestroy(amg_precond);
4910 HYPRE_BoomerAMGCreate(&amg_precond);
4912 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
4913 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, agg_levels);
4914 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
4915 HYPRE_BoomerAMGSetNumSweeps(amg_precond, relax_sweeps);
4916 HYPRE_BoomerAMGSetMaxLevels(amg_precond, max_levels);
4917 HYPRE_BoomerAMGSetTol(amg_precond, 0.0);
4918 HYPRE_BoomerAMGSetMaxIter(amg_precond, 1);
4919 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, theta);
4920 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
4921 HYPRE_BoomerAMGSetPMaxElmts(amg_precond, Pmax);
4922 HYPRE_BoomerAMGSetPrintLevel(amg_precond, print_level);
4923 HYPRE_BoomerAMGSetNumFunctions(amg_precond,
dim);
4926 HYPRE_BoomerAMGSetNodal(amg_precond, nodal);
4927 HYPRE_BoomerAMGSetNodalDiag(amg_precond, nodal_diag);
4928 HYPRE_BoomerAMGSetCycleRelaxType(amg_precond, relax_coarse, 3);
4929 HYPRE_BoomerAMGSetInterpVecVariant(amg_precond, interp_vec_variant);
4930 HYPRE_BoomerAMGSetInterpVecQMax(amg_precond, q_max);
4931 HYPRE_BoomerAMGSetSmoothInterpVectors(amg_precond, smooth_interp_vectors);
4932 HYPRE_BoomerAMGSetInterpRefine(amg_precond, interp_refine);
4934 HYPRE_BoomerAMGSetInterpVectors(amg_precond, rbms.
Size(), rbms.
GetData());
4941 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4943 if (
A) { ResetAMGPrecond(); }
4959 HYPRE_BoomerAMGSetNumFunctions(amg_precond,
dim);
4967 HYPRE_Int *h_mapping = mfem_hypre_CTAlloc_host(HYPRE_Int,
height);
4969 MFEM_VERIFY(
height %
dim == 0,
"Ordering does not work as claimed!");
4971 for (
int i = 0; i <
dim; ++i)
4973 for (
int j = 0; j < h_nnodes; ++j)
4982 #if defined(hypre_IntArrayData) && defined(HYPRE_USING_GPU) 4983 HYPRE_Int *mapping = mfem_hypre_CTAlloc(HYPRE_Int,
height);
4984 hypre_TMemcpy(mapping, h_mapping, HYPRE_Int,
height,
4985 HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
4986 mfem_hypre_TFree_host(h_mapping);
4988 HYPRE_Int *mapping = h_mapping;
4993 HYPRE_BoomerAMGSetDofFunc(amg_precond, mapping);
4997 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, 0);
4998 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, 0.5);
5004 y = 0.0; y(0) = x(1); y(1) = -x(0);
5006 static void func_ryz(
const Vector &x, Vector &y)
5008 y = 0.0; y(1) = x(2); y(2) = -x(1);
5010 static void func_rzx(
const Vector &x, Vector &y)
5012 y = 0.0; y(2) = x(0); y(0) = -x(2);
5015 void HypreBoomerAMG::RecomputeRBMs()
5018 Array<HypreParVector*> gf_rbms;
5021 for (
int i = 0; i < rbms.
Size(); i++)
5023 HYPRE_ParVectorDestroy(rbms[i]);
5030 VectorFunctionCoefficient coeff_rxy(2, func_rxy);
5032 ParGridFunction rbms_rxy(fespace);
5033 rbms_rxy.ProjectCoefficient(coeff_rxy);
5036 gf_rbms.SetSize(nrbms);
5038 rbms_rxy.GetTrueDofs(*gf_rbms[0]);
5044 VectorFunctionCoefficient coeff_rxy(3, func_rxy);
5045 VectorFunctionCoefficient coeff_ryz(3, func_ryz);
5046 VectorFunctionCoefficient coeff_rzx(3, func_rzx);
5048 ParGridFunction rbms_rxy(fespace);
5049 ParGridFunction rbms_ryz(fespace);
5050 ParGridFunction rbms_rzx(fespace);
5051 rbms_rxy.ProjectCoefficient(coeff_rxy);
5052 rbms_ryz.ProjectCoefficient(coeff_ryz);
5053 rbms_rzx.ProjectCoefficient(coeff_rzx);
5056 gf_rbms.SetSize(nrbms);
5060 rbms_rxy.GetTrueDofs(*gf_rbms[0]);
5061 rbms_ryz.GetTrueDofs(*gf_rbms[1]);
5062 rbms_rzx.GetTrueDofs(*gf_rbms[2]);
5071 for (
int i = 0; i < nrbms; i++)
5073 rbms[i] = gf_rbms[i]->StealParVector();
5080 #ifdef HYPRE_USING_GPU 5081 MFEM_ABORT(
"this method is not supported in hypre built with GPU support");
5085 this->fespace = fespace_;
5095 int relax_coarse = 8;
5098 int interp_vec_variant = 2;
5100 int smooth_interp_vectors = 1;
5104 int interp_refine = 1;
5106 HYPRE_BoomerAMGSetNodal(amg_precond, nodal);
5107 HYPRE_BoomerAMGSetNodalDiag(amg_precond, nodal_diag);
5108 HYPRE_BoomerAMGSetCycleRelaxType(amg_precond, relax_coarse, 3);
5109 HYPRE_BoomerAMGSetInterpVecVariant(amg_precond, interp_vec_variant);
5110 HYPRE_BoomerAMGSetInterpVecQMax(amg_precond, q_max);
5111 HYPRE_BoomerAMGSetSmoothInterpVectors(amg_precond, smooth_interp_vectors);
5112 HYPRE_BoomerAMGSetInterpRefine(amg_precond, interp_refine);
5115 HYPRE_BoomerAMGSetInterpVectors(amg_precond, rbms.
Size(), rbms.
GetData());
5124 #if MFEM_HYPRE_VERSION >= 21800 5127 const std::string &prerelax,
5128 const std::string &postrelax)
5132 int interp_type = 100;
5133 int relax_type = 10;
5134 int coarsen_type = 6;
5135 double strength_tolC = 0.1;
5136 double strength_tolR = 0.01;
5137 double filter_tolR = 0.0;
5138 double filterA_tol = 0.0;
5141 int ns_down = 0, ns_up = 0, ns_coarse;
5144 ns_down = prerelax.length();
5145 ns_up = postrelax.length();
5149 HYPRE_Int **grid_relax_points = mfem_hypre_TAlloc(HYPRE_Int*, 4);
5150 grid_relax_points[0] = NULL;
5151 grid_relax_points[1] = mfem_hypre_TAlloc(HYPRE_Int, ns_down);
5152 grid_relax_points[2] = mfem_hypre_TAlloc(HYPRE_Int, ns_up);
5153 grid_relax_points[3] = mfem_hypre_TAlloc(HYPRE_Int, 1);
5154 grid_relax_points[3][0] = 0;
5157 for (
int i = 0; i<ns_down; i++)
5159 if (prerelax[i] ==
'F')
5161 grid_relax_points[1][i] = -1;
5163 else if (prerelax[i] ==
'C')
5165 grid_relax_points[1][i] = 1;
5167 else if (prerelax[i] ==
'A')
5169 grid_relax_points[1][i] = 0;
5174 for (
int i = 0; i<ns_up; i++)
5176 if (postrelax[i] ==
'F')
5178 grid_relax_points[2][i] = -1;
5180 else if (postrelax[i] ==
'C')
5182 grid_relax_points[2][i] = 1;
5184 else if (postrelax[i] ==
'A')
5186 grid_relax_points[2][i] = 0;
5190 HYPRE_BoomerAMGSetRestriction(amg_precond, distanceR);
5192 HYPRE_BoomerAMGSetGridRelaxPoints(amg_precond, grid_relax_points);
5194 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
5199 HYPRE_BoomerAMGSetSabs(amg_precond, Sabs);
5202 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
5205 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, 0);
5207 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, strength_tolC);
5211 HYPRE_BoomerAMGSetStrongThresholdR(amg_precond, strength_tolR);
5212 HYPRE_BoomerAMGSetFilterThresholdR(amg_precond, filter_tolR);
5215 if (relax_type > -1)
5217 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
5222 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_coarse, 3);
5223 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_down, 1);
5224 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_up, 2);
5226 HYPRE_BoomerAMGSetADropTol(amg_precond, filterA_tol);
5228 HYPRE_BoomerAMGSetADropType(amg_precond, -1);
5236 for (
int i = 0; i < rbms.
Size(); i++)
5238 HYPRE_ParVectorDestroy(rbms[i]);
5241 HYPRE_BoomerAMGDestroy(amg_precond);
5267 MFEM_ASSERT(G != NULL,
"");
5268 MFEM_ASSERT(x != NULL,
"");
5269 MFEM_ASSERT(y != NULL,
"");
5270 int sdim = (z == NULL) ? 2 : 3;
5271 int cycle_type = 13;
5273 MakeSolver(sdim, cycle_type);
5275 HYPRE_ParVector pz = z ?
static_cast<HYPRE_ParVector
>(*z) : NULL;
5276 HYPRE_AMSSetCoordinateVectors(ams, *x, *y, pz);
5277 HYPRE_AMSSetDiscreteGradient(ams, *G);
5280 void HypreAMS::MakeSolver(
int sdim,
int cycle_type)
5283 double rlx_weight = 1.0;
5284 double rlx_omega = 1.0;
5285 #if !defined(HYPRE_USING_GPU) 5286 int amg_coarsen_type = 10;
5287 int amg_agg_levels = 1;
5288 int amg_rlx_type = 8;
5290 double theta = 0.25;
5291 int amg_interp_type = 6;
5294 int amg_coarsen_type = 8;
5295 int amg_agg_levels = 0;
5296 int amg_rlx_type = 18;
5298 double theta = 0.25;
5299 int amg_interp_type = 6;
5304 ams_cycle_type = cycle_type;
5305 HYPRE_AMSCreate(&ams);
5307 HYPRE_AMSSetDimension(ams, sdim);
5308 HYPRE_AMSSetTol(ams, 0.0);
5309 HYPRE_AMSSetMaxIter(ams, 1);
5310 HYPRE_AMSSetCycleType(ams, cycle_type);
5311 HYPRE_AMSSetPrintLevel(ams, 1);
5314 HYPRE_AMSSetSmoothingOptions(ams, rlx_type, rlx_sweeps, rlx_weight, rlx_omega);
5315 HYPRE_AMSSetAlphaAMGOptions(ams, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5316 theta, amg_interp_type, amg_Pmax);
5317 HYPRE_AMSSetBetaAMGOptions(ams, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5318 theta, amg_interp_type, amg_Pmax);
5320 HYPRE_AMSSetAlphaAMGCoarseRelaxType(ams, amg_rlx_type);
5321 HYPRE_AMSSetBetaAMGCoarseRelaxType(ams, amg_rlx_type);
5330 void HypreAMS::MakeGradientAndInterpolation(
5331 ParFiniteElementSpace *edge_fespace,
int cycle_type)
5333 int dim = edge_fespace->GetMesh()->Dimension();
5334 int sdim = edge_fespace->GetMesh()->SpaceDimension();
5335 const FiniteElementCollection *edge_fec = edge_fespace->FEColl();
5337 bool trace_space, rt_trace_space;
5338 ND_Trace_FECollection *nd_tr_fec = NULL;
5339 trace_space =
dynamic_cast<const ND_Trace_FECollection*
>(edge_fec);
5340 rt_trace_space =
dynamic_cast<const RT_Trace_FECollection*
>(edge_fec);
5341 trace_space = trace_space || rt_trace_space;
5343 MFEM_VERIFY(!edge_fespace->IsVariableOrder(),
"");
5344 int p = edge_fec->GetOrder();
5346 ParMesh *pmesh = edge_fespace->GetParMesh();
5349 nd_tr_fec =
new ND_Trace_FECollection(
p,
dim);
5350 edge_fespace =
new ParFiniteElementSpace(pmesh, nd_tr_fec);
5354 FiniteElementCollection *vert_fec;
5357 vert_fec =
new H1_Trace_FECollection(
p,
dim);
5361 vert_fec =
new H1_FECollection(
p,
dim);
5363 ParFiniteElementSpace *vert_fespace =
new ParFiniteElementSpace(pmesh,
5367 if (
p == 1 && pmesh->GetNodes() == NULL)
5369 ParGridFunction x_coord(vert_fespace);
5370 ParGridFunction y_coord(vert_fespace);
5371 ParGridFunction z_coord(vert_fespace);
5373 for (
int i = 0; i < pmesh->GetNV(); i++)
5375 coord = pmesh -> GetVertex(i);
5376 x_coord(i) = coord[0];
5377 if (sdim >= 2) { y_coord(i) = coord[1]; }
5378 if (sdim == 3) { z_coord(i) = coord[2]; }
5380 x = x_coord.ParallelProject();
5387 y = y_coord.ParallelProject();
5392 z = z_coord.ParallelProject();
5396 HYPRE_AMSSetCoordinateVectors(ams,
5397 x ? (HYPRE_ParVector)(*x) : NULL,
5398 y ? (HYPRE_ParVector)(*y) : NULL,
5399 z ? (HYPRE_ParVector)(*z) : NULL);
5409 ParDiscreteLinearOperator *grad;
5410 grad =
new ParDiscreteLinearOperator(vert_fespace, edge_fespace);
5413 grad->AddTraceFaceInterpolator(
new GradientInterpolator);
5417 grad->AddDomainInterpolator(
new GradientInterpolator);
5421 G = grad->ParallelAssemble();
5422 HYPRE_AMSSetDiscreteGradient(ams, *G);
5426 Pi = Pix = Piy = Piz = NULL;
5427 if (
p > 1 || pmesh->GetNodes() != NULL)
5429 ParFiniteElementSpace *vert_fespace_d
5432 ParDiscreteLinearOperator *id_ND;
5433 id_ND =
new ParDiscreteLinearOperator(vert_fespace_d, edge_fespace);
5436 id_ND->AddTraceFaceInterpolator(
new IdentityInterpolator);
5440 id_ND->AddDomainInterpolator(
new IdentityInterpolator);
5445 if (cycle_type < 10)
5447 Pi = id_ND->ParallelAssemble();
5451 Array2D<HypreParMatrix *> Pi_blocks;
5452 id_ND->GetParBlocks(Pi_blocks);
5453 Pix = Pi_blocks(0,0);
5454 if (sdim >= 2) { Piy = Pi_blocks(0,1); }
5455 if (sdim == 3) { Piz = Pi_blocks(0,2); }
5460 HYPRE_ParCSRMatrix HY_Pi = (Pi) ? (HYPRE_ParCSRMatrix) *Pi : NULL;
5461 HYPRE_ParCSRMatrix HY_Pix = (Pix) ? (HYPRE_ParCSRMatrix) *Pix : NULL;
5462 HYPRE_ParCSRMatrix HY_Piy = (Piy) ? (HYPRE_ParCSRMatrix) *Piy : NULL;
5463 HYPRE_ParCSRMatrix HY_Piz = (Piz) ? (HYPRE_ParCSRMatrix) *Piz : NULL;
5464 HYPRE_AMSSetInterpolations(ams, HY_Pi, HY_Pix, HY_Piy, HY_Piz);
5466 delete vert_fespace_d;
5469 delete vert_fespace;
5474 delete edge_fespace;
5479 void HypreAMS::Init(ParFiniteElementSpace *edge_fespace)
5481 int cycle_type = 13;
5482 int sdim = edge_fespace->GetMesh()->SpaceDimension();
5483 MakeSolver(sdim, cycle_type);
5484 MakeGradientAndInterpolation(edge_fespace, cycle_type);
5487 void HypreAMS::ResetAMSPrecond()
5489 #if MFEM_HYPRE_VERSION >= 22600 5491 auto *ams_data = (hypre_AMSData *)ams;
5494 HYPRE_Int
dim = hypre_AMSDataDimension(ams_data);
5497 hypre_ParCSRMatrix *hy_G = hypre_AMSDataDiscreteGradient(ams_data);
5499 HYPRE_Int beta_is_zero = hypre_AMSDataBetaIsZero(ams_data);
5502 hypre_ParCSRMatrix *hy_Pi hypre_AMSDataPiInterpolation(ams_data);
5503 hypre_ParCSRMatrix *hy_Pix = ams_data->Pix;
5504 hypre_ParCSRMatrix *hy_Piy = ams_data->Piy;
5505 hypre_ParCSRMatrix *hy_Piz = ams_data->Piz;
5506 HYPRE_Int owns_Pi = hypre_AMSDataOwnsPiInterpolation(ams_data);
5509 ams_data->owns_Pi = 0;
5513 hypre_ParVector *hy_x = hypre_AMSDataVertexCoordinateX(ams_data);
5514 hypre_ParVector *hy_y = hypre_AMSDataVertexCoordinateY(ams_data);
5515 hypre_ParVector *hy_z = hypre_AMSDataVertexCoordinateZ(ams_data);
5518 HYPRE_Int maxit = hypre_AMSDataMaxIter(ams_data);
5519 HYPRE_Real tol = hypre_AMSDataTol(ams_data);
5520 HYPRE_Int cycle_type = hypre_AMSDataCycleType(ams_data);
5521 HYPRE_Int ams_print_level = hypre_AMSDataPrintLevel(ams_data);
5524 HYPRE_Int A_relax_type = hypre_AMSDataARelaxType(ams_data);
5525 HYPRE_Int A_relax_times = hypre_AMSDataARelaxTimes(ams_data);
5526 HYPRE_Real A_relax_weight = hypre_AMSDataARelaxWeight(ams_data);
5527 HYPRE_Real A_omega = hypre_AMSDataAOmega(ams_data);
5528 HYPRE_Int A_cheby_order = hypre_AMSDataAChebyOrder(ams_data);
5529 HYPRE_Real A_cheby_fraction = hypre_AMSDataAChebyFraction(ams_data);
5531 HYPRE_Int B_Pi_coarsen_type = hypre_AMSDataPoissonAlphaAMGCoarsenType(ams_data);
5532 HYPRE_Int B_Pi_agg_levels = hypre_AMSDataPoissonAlphaAMGAggLevels(ams_data);
5533 HYPRE_Int B_Pi_relax_type = hypre_AMSDataPoissonAlphaAMGRelaxType(ams_data);
5534 HYPRE_Int B_Pi_coarse_relax_type = ams_data->B_Pi_coarse_relax_type;
5535 HYPRE_Real B_Pi_theta = hypre_AMSDataPoissonAlphaAMGStrengthThreshold(ams_data);
5536 HYPRE_Int B_Pi_interp_type = ams_data->B_Pi_interp_type;
5537 HYPRE_Int B_Pi_Pmax = ams_data->B_Pi_Pmax;
5539 HYPRE_Int B_G_coarsen_type = hypre_AMSDataPoissonBetaAMGCoarsenType(ams_data);
5540 HYPRE_Int B_G_agg_levels = hypre_AMSDataPoissonBetaAMGAggLevels(ams_data);
5541 HYPRE_Int B_G_relax_type = hypre_AMSDataPoissonBetaAMGRelaxType(ams_data);
5542 HYPRE_Int B_G_coarse_relax_type = ams_data->B_G_coarse_relax_type;
5543 HYPRE_Real B_G_theta = hypre_AMSDataPoissonBetaAMGStrengthThreshold(ams_data);
5544 HYPRE_Int B_G_interp_type = ams_data->B_G_interp_type;
5545 HYPRE_Int B_G_Pmax = ams_data->B_G_Pmax;
5547 HYPRE_AMSDestroy(ams);
5548 HYPRE_AMSCreate(&ams);
5549 ams_data = (hypre_AMSData *)ams;
5551 HYPRE_AMSSetDimension(ams,
dim);
5552 HYPRE_AMSSetTol(ams, tol);
5553 HYPRE_AMSSetMaxIter(ams, maxit);
5554 HYPRE_AMSSetCycleType(ams, cycle_type);
5555 HYPRE_AMSSetPrintLevel(ams, ams_print_level);
5557 HYPRE_AMSSetCoordinateVectors(ams, hy_x, hy_y, hy_z);
5559 HYPRE_AMSSetDiscreteGradient(ams, hy_G);
5560 HYPRE_AMSSetCoordinateVectors(ams, hy_x, hy_y, hy_z);
5561 HYPRE_AMSSetInterpolations(ams, hy_Pi, hy_Pix, hy_Piy, hy_Piz);
5562 ams_data->owns_Pi = owns_Pi;
5565 HYPRE_AMSSetSmoothingOptions(ams, A_relax_type, A_relax_times, A_relax_weight,
5568 hypre_AMSDataAChebyOrder(ams_data) = A_cheby_order;
5569 hypre_AMSDataAChebyFraction(ams_data) = A_cheby_fraction;
5571 HYPRE_AMSSetAlphaAMGOptions(ams, B_Pi_coarsen_type, B_Pi_agg_levels,
5573 B_Pi_theta, B_Pi_interp_type, B_Pi_Pmax);
5574 HYPRE_AMSSetBetaAMGOptions(ams, B_G_coarsen_type, B_G_agg_levels,
5576 B_G_theta, B_G_interp_type, B_G_Pmax);
5578 HYPRE_AMSSetAlphaAMGCoarseRelaxType(ams, B_Pi_coarse_relax_type);
5579 HYPRE_AMSSetBetaAMGCoarseRelaxType(ams, B_G_coarse_relax_type);
5581 ams_data->beta_is_zero = beta_is_zero;
5584 HYPRE_AMSDestroy(ams);
5586 MakeSolver(space_dim, ams_cycle_type);
5588 HYPRE_AMSSetPrintLevel(ams, print_level);
5589 if (singular) { HYPRE_AMSSetBetaPoissonMatrix(ams, NULL); }
5591 HYPRE_AMSSetDiscreteGradient(ams, *G);
5594 HYPRE_AMSSetCoordinateVectors(ams,
5595 x ? (HYPRE_ParVector)(*x) :
nullptr,
5596 y ? (HYPRE_ParVector)(*y) :
nullptr,
5597 z ? (HYPRE_ParVector)(*z) :
nullptr);
5601 HYPRE_AMSSetInterpolations(ams,
5602 Pi ? (HYPRE_ParCSRMatrix) *Pi :
nullptr,
5603 Pix ? (HYPRE_ParCSRMatrix) *Pix :
nullptr,
5604 Piy ? (HYPRE_ParCSRMatrix) *Piy :
nullptr,
5605 Piz ? (HYPRE_ParCSRMatrix) *Piz :
nullptr);
5613 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
5615 if (
A) { ResetAMSPrecond(); }
5632 HYPRE_AMSDestroy(ams);
5647 HYPRE_AMSSetPrintLevel(ams, print_lvl);
5648 print_level = print_lvl;
5666 x(x_), y(y_), z(z_),
5668 ND_Pi(NULL), ND_Pix(NULL), ND_Piy(NULL), ND_Piz(NULL),
5669 RT_Pi(NULL), RT_Pix(NULL), RT_Piy(NULL), RT_Piz(NULL)
5671 MFEM_ASSERT(C != NULL,
"");
5672 MFEM_ASSERT(G != NULL,
"");
5673 MFEM_ASSERT(x != NULL,
"");
5674 MFEM_ASSERT(y != NULL,
"");
5675 MFEM_ASSERT(z != NULL,
"");
5679 HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
5680 HYPRE_ADSSetDiscreteCurl(ads, *C);
5681 HYPRE_ADSSetDiscreteGradient(ads, *G);
5684 void HypreADS::MakeSolver()
5687 double rlx_weight = 1.0;
5688 double rlx_omega = 1.0;
5689 #if !defined(HYPRE_USING_GPU) 5691 int amg_coarsen_type = 10;
5692 int amg_agg_levels = 1;
5693 int amg_rlx_type = 8;
5694 double theta = 0.25;
5695 int amg_interp_type = 6;
5699 int amg_coarsen_type = 8;
5700 int amg_agg_levels = 0;
5701 int amg_rlx_type = 18;
5702 double theta = 0.25;
5703 int amg_interp_type = 6;
5707 HYPRE_ADSCreate(&ads);
5709 HYPRE_ADSSetTol(ads, 0.0);
5710 HYPRE_ADSSetMaxIter(ads, 1);
5711 HYPRE_ADSSetCycleType(ads, cycle_type);
5712 HYPRE_ADSSetPrintLevel(ads, 1);
5715 HYPRE_ADSSetSmoothingOptions(ads, rlx_type, rlx_sweeps, rlx_weight, rlx_omega);
5716 HYPRE_ADSSetAMGOptions(ads, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5717 theta, amg_interp_type, amg_Pmax);
5718 HYPRE_ADSSetAMSOptions(ads, ams_cycle_type, amg_coarsen_type, amg_agg_levels,
5719 amg_rlx_type, theta, amg_interp_type, amg_Pmax);
5728 void HypreADS::MakeDiscreteMatrices(ParFiniteElementSpace *face_fespace)
5730 const FiniteElementCollection *face_fec = face_fespace->FEColl();
5732 (
dynamic_cast<const RT_Trace_FECollection*
>(face_fec) != NULL);
5734 MFEM_VERIFY(!face_fespace->IsVariableOrder(),
"");
5735 int p = face_fec->GetOrder();
5738 ParMesh *pmesh = (ParMesh *) face_fespace->GetMesh();
5739 FiniteElementCollection *vert_fec, *edge_fec;
5742 vert_fec =
new H1_Trace_FECollection(
p, 3);
5743 edge_fec =
new ND_Trace_FECollection(
p, 3);
5747 vert_fec =
new H1_FECollection(
p, 3);
5748 edge_fec =
new ND_FECollection(
p, 3);
5751 ParFiniteElementSpace *vert_fespace =
new ParFiniteElementSpace(pmesh,
5753 ParFiniteElementSpace *edge_fespace =
new ParFiniteElementSpace(pmesh,
5757 if (
p == 1 && pmesh->GetNodes() == NULL)
5759 ParGridFunction x_coord(vert_fespace);
5760 ParGridFunction y_coord(vert_fespace);
5761 ParGridFunction z_coord(vert_fespace);
5763 for (
int i = 0; i < pmesh->GetNV(); i++)
5765 coord = pmesh -> GetVertex(i);
5766 x_coord(i) = coord[0];
5767 y_coord(i) = coord[1];
5768 z_coord(i) = coord[2];
5770 x = x_coord.ParallelProject();
5771 y = y_coord.ParallelProject();
5772 z = z_coord.ParallelProject();
5776 HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
5786 ParDiscreteLinearOperator *curl;
5787 curl =
new ParDiscreteLinearOperator(edge_fespace, face_fespace);
5790 curl->AddTraceFaceInterpolator(
new CurlInterpolator);
5794 curl->AddDomainInterpolator(
new CurlInterpolator);
5798 C = curl->ParallelAssemble();
5800 HYPRE_ADSSetDiscreteCurl(ads, *C);
5804 ParDiscreteLinearOperator *grad;
5805 grad =
new ParDiscreteLinearOperator(vert_fespace, edge_fespace);
5808 grad->AddTraceFaceInterpolator(
new GradientInterpolator);
5812 grad->AddDomainInterpolator(
new GradientInterpolator);
5816 G = grad->ParallelAssemble();
5819 HYPRE_ADSSetDiscreteGradient(ads, *G);
5823 RT_Pi = RT_Pix = RT_Piy = RT_Piz = NULL;
5824 ND_Pi = ND_Pix = ND_Piy = ND_Piz = NULL;
5825 if (
p > 1 || pmesh->GetNodes() != NULL)
5827 ParFiniteElementSpace *vert_fespace_d
5830 ParDiscreteLinearOperator *id_ND;
5831 id_ND =
new ParDiscreteLinearOperator(vert_fespace_d, edge_fespace);
5834 id_ND->AddTraceFaceInterpolator(
new IdentityInterpolator);
5838 id_ND->AddDomainInterpolator(
new IdentityInterpolator);
5843 if (ams_cycle_type < 10)
5845 ND_Pi = id_ND->ParallelAssemble();
5851 Array2D<HypreParMatrix *> ND_Pi_blocks;
5852 id_ND->GetParBlocks(ND_Pi_blocks);
5853 ND_Pix = ND_Pi_blocks(0,0);
5854 ND_Piy = ND_Pi_blocks(0,1);
5855 ND_Piz = ND_Pi_blocks(0,2);
5860 ParDiscreteLinearOperator *id_RT;
5861 id_RT =
new ParDiscreteLinearOperator(vert_fespace_d, face_fespace);
5864 id_RT->AddTraceFaceInterpolator(
new NormalInterpolator);
5868 id_RT->AddDomainInterpolator(
new IdentityInterpolator);
5873 if (cycle_type < 10)
5875 RT_Pi = id_RT->ParallelAssemble();
5880 Array2D<HypreParMatrix *> RT_Pi_blocks;
5881 id_RT->GetParBlocks(RT_Pi_blocks);
5882 RT_Pix = RT_Pi_blocks(0,0);
5883 RT_Piy = RT_Pi_blocks(0,1);
5884 RT_Piz = RT_Pi_blocks(0,2);
5889 HYPRE_ParCSRMatrix HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz;
5890 HY_RT_Pi = (RT_Pi) ? (HYPRE_ParCSRMatrix) *RT_Pi : NULL;
5891 HY_RT_Pix = (RT_Pix) ? (HYPRE_ParCSRMatrix) *RT_Pix : NULL;
5892 HY_RT_Piy = (RT_Piy) ? (HYPRE_ParCSRMatrix) *RT_Piy : NULL;
5893 HY_RT_Piz = (RT_Piz) ? (HYPRE_ParCSRMatrix) *RT_Piz : NULL;
5894 HYPRE_ParCSRMatrix HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz;
5895 HY_ND_Pi = (ND_Pi) ? (HYPRE_ParCSRMatrix) *ND_Pi : NULL;
5896 HY_ND_Pix = (ND_Pix) ? (HYPRE_ParCSRMatrix) *ND_Pix : NULL;
5897 HY_ND_Piy = (ND_Piy) ? (HYPRE_ParCSRMatrix) *ND_Piy : NULL;
5898 HY_ND_Piz = (ND_Piz) ? (HYPRE_ParCSRMatrix) *ND_Piz : NULL;
5899 HYPRE_ADSSetInterpolations(ads,
5900 HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz,
5901 HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz);
5903 delete vert_fespace_d;
5907 delete vert_fespace;
5909 delete edge_fespace;
5912 void HypreADS::Init(ParFiniteElementSpace *face_fespace)
5915 MakeDiscreteMatrices(face_fespace);
5918 void HypreADS::ResetADSPrecond()
5920 HYPRE_ADSDestroy(ads);
5924 HYPRE_ADSSetPrintLevel(ads, print_level);
5926 HYPRE_ADSSetDiscreteCurl(ads, *C);
5927 HYPRE_ADSSetDiscreteGradient(ads, *G);
5930 MFEM_VERIFY(x && y && z,
"");
5931 HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
5935 HYPRE_ParCSRMatrix HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz;
5936 HY_RT_Pi = (RT_Pi) ? (HYPRE_ParCSRMatrix) *RT_Pi : NULL;
5937 HY_RT_Pix = (RT_Pix) ? (HYPRE_ParCSRMatrix) *RT_Pix : NULL;
5938 HY_RT_Piy = (RT_Piy) ? (HYPRE_ParCSRMatrix) *RT_Piy : NULL;
5939 HY_RT_Piz = (RT_Piz) ? (HYPRE_ParCSRMatrix) *RT_Piz : NULL;
5940 HYPRE_ParCSRMatrix HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz;
5941 HY_ND_Pi = (ND_Pi) ? (HYPRE_ParCSRMatrix) *ND_Pi : NULL;
5942 HY_ND_Pix = (ND_Pix) ? (HYPRE_ParCSRMatrix) *ND_Pix : NULL;
5943 HY_ND_Piy = (ND_Piy) ? (HYPRE_ParCSRMatrix) *ND_Piy : NULL;
5944 HY_ND_Piz = (ND_Piz) ? (HYPRE_ParCSRMatrix) *ND_Piz : NULL;
5945 HYPRE_ADSSetInterpolations(ads,
5946 HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz,
5947 HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz);
5954 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
5956 if (
A) { ResetADSPrecond(); }
5973 HYPRE_ADSDestroy(ads);
5995 HYPRE_ADSSetPrintLevel(ads, print_lvl);
5996 print_level = print_lvl;
5999 HypreLOBPCG::HypreMultiVector::HypreMultiVector(
int n,
HypreParVector & v,
6000 mv_InterfaceInterpreter & interpreter)
6004 mv_ptr = mv_MultiVectorCreateFromSampleVector(&interpreter, nv,
6005 (HYPRE_ParVector)v);
6007 HYPRE_ParVector* vecs = NULL;
6009 mv_TempMultiVector* tmp =
6010 (mv_TempMultiVector*)mv_MultiVectorGetData(mv_ptr);
6011 vecs = (HYPRE_ParVector*)(tmp -> vector);
6014 hpv =
new HypreParVector*[nv];
6015 for (
int i=0; i<nv; i++)
6017 hpv[i] =
new HypreParVector(vecs[i]);
6021 HypreLOBPCG::HypreMultiVector::~HypreMultiVector()
6025 for (
int i=0; i<nv; i++)
6032 mv_MultiVectorDestroy(mv_ptr);
6036 HypreLOBPCG::HypreMultiVector::Randomize(HYPRE_Int seed_)
6038 mv_MultiVectorSetRandom(mv_ptr, seed_);
6042 HypreLOBPCG::HypreMultiVector::GetVector(
unsigned int i)
6044 MFEM_ASSERT((
int)i < nv,
"index out of range");
6050 HypreLOBPCG::HypreMultiVector::StealVectors()
6052 HypreParVector ** hpv_ret = hpv;
6056 mv_TempMultiVector * mv_tmp =
6057 (mv_TempMultiVector*)mv_MultiVectorGetData(mv_ptr);
6059 mv_tmp->ownsVectors = 0;
6061 for (
int i=0; i<nv; i++)
6063 hpv_ret[i]->SetOwnership(1);
6081 MPI_Comm_size(comm,&numProcs);
6082 MPI_Comm_rank(comm,&myid);
6084 HYPRE_ParCSRSetupInterpreter(&interpreter);
6085 HYPRE_ParCSRSetupMatvec(&matvec_fn);
6086 HYPRE_LOBPCGCreate(&interpreter, &matvec_fn, &lobpcg_solver);
6095 HYPRE_LOBPCGDestroy(lobpcg_solver);
6101 HYPRE_LOBPCGSetTol(lobpcg_solver, tol);
6107 #if MFEM_HYPRE_VERSION >= 21101 6108 HYPRE_LOBPCGSetRTol(lobpcg_solver, rel_tol);
6110 MFEM_ABORT(
"This method requires HYPRE version >= 2.11.1");
6117 HYPRE_LOBPCGSetMaxIter(lobpcg_solver, max_iter);
6125 HYPRE_LOBPCGSetPrintLevel(lobpcg_solver, logging);
6132 HYPRE_LOBPCGSetPrecondUsageMode(lobpcg_solver, pcg_mode);
6138 HYPRE_LOBPCGSetPrecond(lobpcg_solver,
6139 (HYPRE_PtrToSolverFcn)this->PrecondSolve,
6140 (HYPRE_PtrToSolverFcn)this->PrecondSetup,
6141 (HYPRE_Solver)&precond);
6149 if (HYPRE_AssumedPartitionCheck())
6153 MPI_Scan(&locSize, &part[1], 1, HYPRE_MPI_BIG_INT, MPI_SUM, comm);
6155 part[0] = part[1] - locSize;
6157 MPI_Allreduce(&locSize, &glbSize, 1, HYPRE_MPI_BIG_INT, MPI_SUM, comm);
6163 MPI_Allgather(&locSize, 1, HYPRE_MPI_BIG_INT,
6164 &part[1], 1, HYPRE_MPI_BIG_INT, comm);
6167 for (
int i=0; i<numProcs; i++)
6169 part[i+1] += part[i];
6172 glbSize = part[numProcs];
6181 const bool is_device_ptr =
true;
6184 matvec_fn.MatvecCreate = this->OperatorMatvecCreate;
6185 matvec_fn.Matvec = this->OperatorMatvec;
6186 matvec_fn.MatvecDestroy = this->OperatorMatvecDestroy;
6188 HYPRE_LOBPCGSetup(lobpcg_solver,(HYPRE_Matrix)&A,NULL,NULL);
6194 matvec_fn.MatvecCreate = this->OperatorMatvecCreate;
6195 matvec_fn.Matvec = this->OperatorMatvec;
6196 matvec_fn.MatvecDestroy = this->OperatorMatvecDestroy;
6198 HYPRE_LOBPCGSetupB(lobpcg_solver,(HYPRE_Matrix)&M,NULL);
6207 for (
int i=0; i<nev; i++)
6209 eigs[i] = eigenvalues[i];
6216 return multi_vec->GetVector(i);
6223 if ( multi_vec == NULL )
6225 MFEM_ASSERT(x != NULL,
"In HypreLOBPCG::SetInitialVectors()");
6227 multi_vec =
new HypreMultiVector(nev, *x, interpreter);
6231 for (
int i=0; i < min(num_vecs,nev); i++)
6233 multi_vec->GetVector(i) = *vecs[i];
6237 for (
int i=min(num_vecs,nev); i < nev; i++)
6239 multi_vec->GetVector(i).Randomize(seed);
6243 if ( subSpaceProj != NULL )
6246 y = multi_vec->GetVector(0);
6248 for (
int i=1; i<nev; i++)
6250 subSpaceProj->
Mult(multi_vec->GetVector(i),
6251 multi_vec->GetVector(i-1));
6253 subSpaceProj->
Mult(y,
6254 multi_vec->GetVector(nev-1));
6262 if ( multi_vec == NULL )
6264 MFEM_ASSERT(x != NULL,
"In HypreLOBPCG::Solve()");
6266 multi_vec =
new HypreMultiVector(nev, *x, interpreter);
6267 multi_vec->Randomize(seed);
6269 if ( subSpaceProj != NULL )
6272 y = multi_vec->GetVector(0);
6274 for (
int i=1; i<nev; i++)
6276 subSpaceProj->
Mult(multi_vec->GetVector(i),
6277 multi_vec->GetVector(i-1));
6279 subSpaceProj->
Mult(y, multi_vec->GetVector(nev-1));
6290 HYPRE_LOBPCGSolve(lobpcg_solver, NULL, *multi_vec, eigenvalues);
6294 HypreLOBPCG::OperatorMatvecCreate(
void *A,
6301 return ( matvec_data );
6305 HypreLOBPCG::OperatorMatvec(
void *matvec_data,
6306 HYPRE_Complex
alpha,
6312 MFEM_VERIFY(
alpha == 1.0 &&
beta == 0.0,
"values not supported");
6314 Operator *Aop = (Operator*)A;
6316 hypre_ParVector * xPar = (hypre_ParVector *)x;
6317 hypre_ParVector * yPar = (hypre_ParVector *)y;
6319 HypreParVector xVec(xPar);
6320 HypreParVector yVec(yPar);
6322 Aop->Mult( xVec, yVec );
6326 yVec.HypreReadWrite();
6332 HypreLOBPCG::OperatorMatvecDestroy(
void *matvec_data )
6338 HypreLOBPCG::PrecondSolve(
void *solver,
6343 Solver *
PC = (Solver*)solver;
6345 hypre_ParVector * bPar = (hypre_ParVector *)
b;
6346 hypre_ParVector * xPar = (hypre_ParVector *)x;
6348 HypreParVector bVec(bPar);
6349 HypreParVector xVec(xPar);
6351 PC->Mult( bVec, xVec );
6355 xVec.HypreReadWrite();
6361 HypreLOBPCG::PrecondSetup(
void *solver,
6379 MPI_Comm_size(comm,&numProcs);
6380 MPI_Comm_rank(comm,&myid);
6382 HYPRE_AMECreate(&ame_solver);
6383 HYPRE_AMESetPrintLevel(ame_solver, 0);
6390 mfem_hypre_TFree_host(multi_vec);
6395 for (
int i=0; i<nev; i++)
6397 delete eigenvectors[i];
6400 delete [] eigenvectors;
6404 mfem_hypre_TFree_host(eigenvalues);
6407 HYPRE_AMEDestroy(ame_solver);
6415 HYPRE_AMESetBlockSize(ame_solver, nev);
6421 HYPRE_AMESetTol(ame_solver, tol);
6427 #if MFEM_HYPRE_VERSION >= 21101 6428 HYPRE_AMESetRTol(ame_solver, rel_tol);
6430 MFEM_ABORT(
"This method requires HYPRE version >= 2.11.1");
6437 HYPRE_AMESetMaxIter(ame_solver, max_iter);
6445 HYPRE_AMESetPrintLevel(ame_solver, logging);
6452 ams_precond = &precond;
6460 HYPRE_Solver ams_precond_ptr = (HYPRE_Solver)*ams_precond;
6462 ams_precond->
SetupFcn()(*ams_precond,A,NULL,NULL);
6464 HYPRE_AMESetAMSSolver(ame_solver, ams_precond_ptr);
6467 HYPRE_AMESetup(ame_solver);
6473 HYPRE_ParCSRMatrix parcsr_M = M;
6474 HYPRE_AMESetMassMatrix(ame_solver,(HYPRE_ParCSRMatrix)parcsr_M);
6480 HYPRE_AMESolve(ame_solver);
6483 HYPRE_AMEGetEigenvalues(ame_solver,&eigenvalues);
6486 HYPRE_AMEGetEigenvectors(ame_solver,&multi_vec);
6493 eigs.
SetSize(nev); eigs = -1.0;
6496 for (
int i=0; i<nev; i++)
6498 eigs[i] = eigenvalues[i];
6503 HypreAME::createDummyVectors()
const 6506 for (
int i=0; i<nev; i++)
6513 const HypreParVector &
6516 if ( eigenvectors == NULL )
6518 this->createDummyVectors();
6521 return *eigenvectors[i];
6527 if ( eigenvectors == NULL )
6529 this->createDummyVectors();
6534 eigenvectors = NULL;
MemoryType GetHypreMemoryType()
The MemoryType used by MFEM when allocating arrays for Hypre objects.
virtual ~HypreBoomerAMG()
void SetAbsTol(double atol)
void GetEigenvalues(Array< double > &eigenvalues) const
Collect the converged eigenvalues.
Hash function for data sequences.
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
void SetType(HYPRE_Int ilu_type)
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre's GMRES.
void EliminateRowsCols(const Array< int > &rows_cols, const HypreParVector &X, HypreParVector &B)
HypreADS(ParFiniteElementSpace *face_fespace)
HypreParMatrix * ExtractSubmatrix(const Array< int > &indices, double threshold=0.0) const
const T * Read(MemoryClass mc, int size) const
Get read-only access to the memory with the given MemoryClass.
virtual int NumNonZeroElems() const
Returns the number of the nonzero elements in the matrix.
static constexpr Type default_type
HypreEuclid(MPI_Comm comm)
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
void EliminateBC(const HypreParMatrix &A, const HypreParMatrix &Ae, const Array< int > &ess_dof_list, const Vector &X, Vector &B)
Eliminate essential BC specified by ess_dof_list from the solution X to the r.h.s. B.
HypreParVector * NewTrueDofVector()
HypreParVector * X0
FIR Filter Temporary Vectors.
void Clear()
Clear the contents of the SparseMatrix.
double min_eig_est
Minimal eigenvalue estimate for polynomial smoothing.
Memory< T > & GetMemory()
Return a reference to the Memory object used by the Array.
static MemoryClass GetHostMemoryClass()
Get the current Host MemoryClass. This is the MemoryClass used by most MFEM host Memory objects...
void SetPrintLevel(int print_lvl)
void SetRowScale(int row_scale)
void SetMassMatrix(const HypreParMatrix &M)
void WrapMemoryWrite(Memory< double > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for write access...
int setup_called
Was hypre's Setup function called already?
void SetLogging(int logging)
int Dimension() const
Dimension of the reference space used within the elements.
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
void MakeRef(const HypreParMatrix &master)
Make this HypreParMatrix a reference to 'master'.
void Read_IJMatrix(MPI_Comm comm, const char *fname)
Read a matrix saved as a HYPRE_IJMatrix.
bool CanShallowCopy(const Memory< T > &src, MemoryClass mc)
Return true if the src Memory can be used with the MemoryClass mc.
void SetSize(int s)
Resize the vector to size s.
HypreParMatrix & Add(const double beta, const HypreParMatrix &B)
HypreParVector * B
Right-hand side and solution vector.
void Delete()
Delete the owned pointers and reset the Memory object.
double window_params[3]
Parameters for windowing function of FIR filter.
virtual const double * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre's PCG.
MemoryType GetHostMemoryType() const
Return the host MemoryType of the Memory object.
void Mult(const Table &A, const Table &B, Table &C)
C = A * B (as boolean matrices)
void GetEigenvalues(Array< double > &eigenvalues) const
Collect the converged eigenvalues.
void SetInitialVectors(int num_vecs, HypreParVector **vecs)
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
void SetWindowByName(const char *window_name)
Convenience function for setting canonical windowing parameters.
Biwise-OR of all HIP backends.
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
T * GetData()
Returns the data.
Issue warnings on hypre errors.
int Size() const
Returns the size of the vector.
T * Write(Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for write access to mem with the mfem::Device's DeviceMemoryClass, if on_dev = true...
std::string GetHash() const
Return the hash string for the current sequence and reset (clear) the sequence.
void SetPreconditioner(HypreSolver &precond)
virtual double * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
HypreILU()
Constructor; sets the default options.
int GetNumCols() const
Returns the number of columns in the diagonal block of the ParCSRMatrix.
T * Write(MemoryClass mc, int size)
Get write-only access to the memory with the given MemoryClass.
HypreAMS(ParFiniteElementSpace *edge_fespace)
Construct the AMS solver on the given edge finite element space.
int NumRows() const
Get the number of rows (size of output) of the Operator. Synonym with Height().
void SetPrintLevel(int print_lvl)
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
void SetPreconditioner(Solver &precond)
void ResetTranspose() const
Reset (destroy) the internal transpose matrix that is created by EnsureMultTranspose() and MultTransp...
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
Memory< HYPRE_Int > & GetDiagMemoryJ()
void SetMassMatrix(Operator &M)
Abstract parallel finite element space.
HypreParVector()
Default constructor, no underlying hypre_ParVector is created.
void SetPrintLevel(int logging)
bool iterative_mode
If true, use the second argument of Mult() as an initial guess.
void CopyFrom(const Memory &src, int size)
Copy size entries from src to *this.
void HostRead() const
Update the internal hypre_ParCSRMatrix object, A, to be on host.
void SetAdvectiveOptions(int distance=15, const std::string &prerelax="", const std::string &postrelax="FFC")
HYPRE_BigInt N() const
Returns the global number of columns.
void BlockInverseScale(const HypreParMatrix *A, HypreParMatrix *C, const Vector *b, HypreParVector *d, int blocksize, BlockInverseScaleJob job)
void HostWrite()
Update the internal hypre_ParCSRMatrix object, A, to be on host.
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Relax the linear system Ax=b.
bool pos_l1_norms
If set, take absolute values of the computed l1_norms.
HypreParMatrix * ParMult(const HypreParMatrix *A, const HypreParMatrix *B, bool own_matrix)
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve the linear system Ax=b.
void SetPrintLevel(int print_lvl)
void ScaleRows(const Vector &s)
Scale the local row i by s(i).
void SetParams(double threshold, int max_levels)
double poly_fraction
Fraction of spectrum to smooth for polynomial relaxation.
Memory< double > & GetMemoryData()
Memory< double > & GetMemory()
Return a reference to the Memory object used by the Vector.
void SetPrintLevel(int print_lvl)
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
Memory< int > & GetMemoryI()
int poly_scale
Apply the polynomial smoother to A or D^{-1/2} A D^{-1/2}.
void MergeDiagAndOffd(SparseMatrix &merged)
Get a single SparseMatrix containing all rows from this processor, merged from the diagonal and off-d...
HashFunction & AppendDoubles(const double *doubles, size_t num_doubles)
Add a sequence of doubles for hashing, given as a c-array.
void SetMaxIter(HYPRE_Int max_iter)
HypreFGMRES(MPI_Comm comm)
Memory< int > & GetMemoryJ()
void SetWindowParameters(double a, double b, double c)
Set parameters for windowing function for FIR smoother.
void SetLoadBal(double loadbal)
std::function< double(const Vector &)> f(double mass_coeff)
HypreGMRES(MPI_Comm comm)
Vector & operator=(const double *v)
Copy Size() entries from v.
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
bool MemoryClassContainsType(MemoryClass mc, MemoryType mt)
Return true iff the MemoryType mt is contained in the MemoryClass mc.
HypreLOBPCG(MPI_Comm comm)
void Add(const DenseMatrix &A, const DenseMatrix &B, double alpha, DenseMatrix &C)
C = A + alpha*B.
void EliminateBC(const HypreParMatrix &Ae, const Array< int > &ess_dof_list, const Vector &X, Vector &B) const
Eliminate essential BC specified by ess_dof_list from the solution X to the r.h.s. B.
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
void SetSymmetry(int sym)
void HypreStealOwnership(HypreParMatrix &A_hyp, SparseMatrix &A_diag)
Make A_hyp steal ownership of its diagonal part A_diag.
virtual ~HypreParaSails()
void SetDevicePtrOwner(bool own) const
Set/clear the ownership flag for the device pointer. Ownership indicates whether the pointer will be ...
void SetMaxIter(int max_iter)
bool Empty() const
Return true if the Memory object is empty, see Reset().
double * l1_norms
l1 norms of the rows of A
void SetOwnerFlags(signed char diag, signed char offd, signed char colmap)
Explicitly set the three ownership flags, see docs for diagOwner etc.
virtual void Setup(const HypreParVector &b, HypreParVector &x) const
Set up the solver (if not set up already, also called automatically by HypreSolver::Mult).
void SetLogging(int logging)
virtual HYPRE_PtrToParSolverFcn SolveFcn() const =0
hypre's internal Solve function
virtual double * Write(bool on_dev=true)
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), on_dev).
void SetRelTol(double rel_tol)
void mfem_error(const char *msg)
Function called when an error is encountered. Used by the macros MFEM_ABORT, MFEM_ASSERT, MFEM_VERIFY.
void SetResidualConvergenceOptions(int res_frequency=-1, double rtol=0.0)
int to_int(const std::string &str)
Convert a string to an int.
Abort on hypre errors (default in base class)
void SetLogging(int logging)
void Print(const char *fname, HYPRE_Int offi=0, HYPRE_Int offj=0) const
Prints the locally owned rows in parallel.
ParMesh * GetParMesh() const
HypreParaSails(MPI_Comm comm)
void HypreWrite()
Prepare the HypreParVector for write access in hypre's device memory space, HYPRE_MEMORY_DEVICE.
void WrapHypreParVector(hypre_ParVector *y, bool owner=true)
Converts hypre's format to HypreParVector.
void AbsMultTranspose(double a, const Vector &x, double b, Vector &y) const
Computes y = a * |At| * x + b * y, using entry-wise absolute values of the transpose of the matrix A...
bool WrapVectors(const Vector &b, Vector &x) const
Makes the internal HypreParVectors B and X wrap the input vectors b and x.
void Read(MPI_Comm comm, const char *fname)
Reads a HypreParVector from files saved with HypreParVector::Print.
HypreParMatrix * LeftDiagMult(const SparseMatrix &D, HYPRE_BigInt *row_starts=NULL) const
Multiply the HypreParMatrix on the left by a block-diagonal parallel matrix D and return the result a...
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
HypreParVector ** StealEigenvectors()
Transfer ownership of the converged eigenvectors.
const HypreParMatrix * A
The linear system matrix.
HYPRE_Int Randomize(HYPRE_Int seed)
Set random values.
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve Ax=b with hypre's FGMRES.
virtual void MultTranspose(const Vector &b, Vector &x) const
Apply transpose of the smoother to relax the linear system Ax=b.
Ignore hypre errors (see e.g. HypreADS)
double relax_weight
Damping coefficient (usually <= 1)
HYPRE_BigInt * GetColStarts() const
Return the parallel column partitioning array.
void SetElasticityOptions(ParFiniteElementSpace *fespace)
void Sort()
Sorts the array in ascending order. This requires operator< to be defined for T.
int Capacity() const
Return the size of the allocated memory.
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
void RAP(const DenseMatrix &A, const DenseMatrix &P, DenseMatrix &RAP)
const T * Read(const Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for read access to mem with the mfem::Device's DeviceMemoryClass, if on_dev = true...
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
void SetLevelOfFill(HYPRE_Int lev_fill)
Set the fill level for ILU(k); the default is k=1.
void Reset()
Reset the memory to be empty, ensuring that Delete() will be a no-op.
HypreParMatrix * A
The linear system matrix.
HYPRE_BigInt GlobalTrueVSize() const
Biwise-OR of all CUDA backends.
Dynamic 2D array using row-major layout.
virtual void SetOperator(const Operator &op)
virtual HYPRE_PtrToParSolverFcn SetupFcn() const =0
hypre's internal Setup function
void SetLogging(int logging)
HypreParMatrix()
An empty matrix to be used as a reference to an existing matrix.
bool A_is_symmetric
A flag that indicates whether the linear system matrix A is symmetric.
void EnsureMultTranspose() const
Ensure the action of the transpose is performed fast.
void SetMaxIter(int max_iter)
int ParCSRRelax_FIR(hypre_ParCSRMatrix *A, hypre_ParVector *f, double max_eig, int poly_order, double *fir_coeffs, hypre_ParVector *u, hypre_ParVector *x0, hypre_ParVector *x1, hypre_ParVector *x2, hypre_ParVector *x3)
void SetMaxIter(int max_iter)
void SetPolyOptions(int poly_order, double poly_fraction, int eig_est_cg_iter=10)
Set parameters for polynomial smoothing.
void SetSOROptions(double relax_weight, double omega)
Set SOR-related parameters.
MPI_Comm GetComm() const
MPI communicator.
T * HostWrite(Memory< T > &mem, int size)
Shortcut to Write(const Memory<T> &mem, int size, false)
void CopyMemory(Memory< T > &src, Memory< T > &dst, MemoryClass dst_mc, bool dst_owner)
Shallow or deep copy src to dst with the goal to make the array src accessible through dst with the M...
Wrapper for hypre's parallel vector class.
HypreParMatrix * EliminateCols(const Array< int > &cols)
int ParCSRRelax_Taubin(hypre_ParCSRMatrix *A, hypre_ParVector *f, double lambda, double mu, int N, double max_eig, hypre_ParVector *u, hypre_ParVector *r)
double p(const Vector &x, double t)
static MemoryType GetHostMemoryType()
Get the current Host MemoryType. This is the MemoryType used by most MFEM classes when allocating mem...
void forall(int N, lambda &&body)
void PrintHash(std::ostream &out) const
Print sizes and hashes for all data arrays of the HypreParMatrix from the local MPI rank...
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
MemoryType
Memory types supported by MFEM.
void HypreRead() const
Prepare the HypreParVector for read access in hypre's device memory space, HYPRE_MEMORY_DEVICE.
void SetPrintLevel(int print_lvl)
void PrintCommPkg(std::ostream &out=mfem::out) const
Print information about the hypre_ParCSRCommPkg of the HypreParMatrix.
signed char OwnsColMap() const
Get colmap ownership flag.
void SetHostPtrOwner(bool own) const
Set/clear the ownership flag for the host pointer. Ownership indicates whether the pointer will be de...
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
void Solve()
Solve the eigenproblem.
void SetData(double *data_)
Sets the data of the Vector and the hypre_ParVector to data_.
void SetNumModes(int num_eigs)
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
HypreParMatrix * ParAdd(const HypreParMatrix *A, const HypreParMatrix *B)
Returns the matrix A + B.
double max_eig_est
Maximal eigenvalue estimate for polynomial smoothing.
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
int NumCols() const
Get the number of columns (size of input) of the Operator. Synonym with Width().
A class to initialize the size of a Tensor.
void SetDataAndSize(double *d, int s)
Set the Vector data and size.
void SetRelTol(double rel_tol)
HashFunction & AppendInts(const int_type *ints, size_t num_ints)
Add a sequence of integers for hashing, given as a c-array.
double Norml1() const
Returns the l_1 norm of the vector.
void MakeAlias(const Memory &base, int offset, int size)
Create a memory object that points inside the memory object base.
HypreParMatrix * Transpose() const
Returns the transpose of *this.
void GetBlocks(Array2D< HypreParMatrix *> &blocks, bool interleaved_rows=false, bool interleaved_cols=false) const
int height
Dimension of the output / number of rows in the matrix.
void SetMaxIter(int max_iter)
void SetFIRCoefficients(double max_eig)
Compute window and Chebyshev coefficients for given polynomial order.
void EliminateRows(const Array< int > &rows)
Eliminate rows from the diagonal and off-diagonal blocks of the matrix.
double * fir_coeffs
Combined coefficients for windowing and Chebyshev polynomials.
void MakeDataOwner() const
Set the Vector data (host pointer) ownership flag.
void Threshold(double threshold=0.0)
Remove values smaller in absolute value than some threshold.
const T * HostRead(const Memory< T > &mem, int size)
Shortcut to Read(const Memory<T> &mem, int size, false)
double InnerProduct(HypreParVector *x, HypreParVector *y)
void SetPrintLevel(HYPRE_Int print_level)
Set the print level: 0 = none, 1 = setup, 2 = solve, 3 = setup+solve.
Host memory; using new[] and delete[].
constexpr MemoryClass GetHypreMemoryClass()
The MemoryClass used by Hypre objects.
void Print(const char *fname) const
Prints the locally owned rows in parallel.
void HypreReadWrite()
Prepare the HypreParVector for read and write access in hypre's device memory space, HYPRE_MEMORY_DEVICE.
Memory< HYPRE_Int > & GetDiagMemoryI()
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
void New(int size)
Allocate host memory for size entries with the current host memory type returned by MemoryManager::Ge...
void InvScaleRows(const Vector &s)
Scale the local row i by 1./s(i)
int Size() const
Returns the number of TYPE I elements.
void SetTaubinOptions(double lambda, double mu, int iter)
Set parameters for Taubin's lambda-mu method.
HypreParVector * B
Right-hand side and solution vectors.
MemoryType GetDeviceMemoryType() const
Return the device MemoryType of the Memory object. If the device MemoryType is not set...
virtual void SetOperator(const Operator &op)
Set/update the solver for the given operator.
A simple singleton class for hypre's global settings, that 1) calls HYPRE_Init() and sets some GPU-re...
void ClearOwnerFlags() const
Clear the ownership flags for the host and device pointers, as well as any internal data allocated by...
T * ReadWrite(MemoryClass mc, int size)
Get read-write access to the memory with the given MemoryClass.
DiagonalPolicy
Defines operator diagonal policy upon elimination of rows and/or columns.
MemoryType GetMemoryType(MemoryClass mc)
Return a suitable MemoryType for a given MemoryClass.
void SetOperator(Operator &A)
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
void Destroy()
Destroy a vector.
void SetLocalReordering(HYPRE_Int reorder_type)
void HypreRead() const
Update the internal hypre_ParCSRMatrix object, A, to be in hypre memory space.
int relax_times
Number of relaxation sweeps.
Class used by MFEM to store pointers to host and/or device memory.
void operator*=(double s)
Scale all entries by s: A_scaled = s*A.
void SetPrintLevel(int logging)
double infinity()
Define a shortcut for std::numeric_limits<double>::infinity()
double ParNormlp(const Vector &vec, double p, MPI_Comm comm)
Compute the l_p norm of the Vector which is split without overlap across the given communicator...
int eig_est_cg_iter
Number of CG iterations to determine eigenvalue estimates.
void SetOwnership(int own)
Sets ownership of the internal hypre_ParVector.
double Norml2() const
Returns the l2 norm of the vector.
Abstract class for hypre's solvers and preconditioners.
int Size_of_connections() const
ErrorMode error_mode
How to treat hypre errors.
void AbsMult(double a, const Vector &x, double b, Vector &y) const
Computes y = a * |A| * x + b * y, using entry-wise absolute values of the matrix A.
bool OwnsHostPtr() const
Return true if the host pointer is owned. Ownership indicates whether the pointer will be deleted by ...
int Size() const
Return the logical size of the array.
HypreParVector & operator=(double d)
Set constant values.
void SetAbsTol(double tol)
void delete_hypre_ParCSRMatrixColMapOffd(hypre_ParCSRMatrix *A)
void CopyConvertMemory(Memory< SrcT > &src, MemoryClass dst_mc, Memory< DstT > &dst)
Deep copy and convert src to dst with the goal to make the array src accessible through dst with the ...
hypre_ParCSRMatrix * StealData()
Changes the ownership of the matrix.
void SetPrecondUsageMode(int pcg_mode)
void SetOperator(const HypreParMatrix &A)
void SetSystemsOptions(int dim, bool order_bynodes=false)
void SetFilter(double filter)
void DropSmallEntries(double tol)
Wrapper for hypre_ParCSRMatrixDropSmallEntries in different versions of hypre. Drop off-diagonal entr...
double sigma(const Vector &x)
void SetMaxIter(int max_iter)
HYPRE_BigInt GetGlobalNumCols() const
Return the global number of columns.
Vector * GlobalVector() const
Returns the global vector in each processor.
void GetOffd(SparseMatrix &offd, HYPRE_BigInt *&cmap) const
Get the local off-diagonal block. NOTE: 'offd' will not own any data.
double omega
SOR parameter (usually in (0,2))
HYPRE_BigInt M() const
Returns the global number of rows.
signed char OwnsOffd() const
Get offd ownership flag.
double u(const Vector &xvec)
HYPRE_Int MultTranspose(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0) const
Computes y = alpha * A^t * x + beta * y.
void WrapHypreParCSRMatrix(hypre_ParCSRMatrix *a, bool owner=true)
Converts hypre's format to HypreParMatrix.
Wrapper for hypre's ParCSR matrix class.
void HostReadWrite()
Update the internal hypre_ParCSRMatrix object, A, to be on host.
Memory< double > auxB
Auxiliary buffers for the case when the input or output arrays in methods like Mult(const Vector &...
void Swap(SparseMatrix &other)
~HypreParVector()
Calls hypre's destroy function.
HypreParMatrix * HypreParMatrixFromBlocks(Array2D< HypreParMatrix *> &blocks, Array2D< double > *blockCoeff)
Returns a merged hypre matrix constructed from hypre matrix blocks.
Memory< double > & GetDiagMemoryData()
void Solve()
Solve the eigenproblem.
HYPRE_BigInt * GetTrueDofOffsets() const
HypreParVector * V
Temporary vectors.
double Normlinf() const
Returns the l_infinity norm of the vector.
void SetType(HypreSmoother::Type type, int relax_times=1)
Set the relaxation type and number of sweeps.
void GatherBlockOffsetData(MPI_Comm comm, const int rank, const int nprocs, const int num_loc, const Array< int > &offsets, std::vector< int > &all_num_loc, const int numBlocks, std::vector< std::vector< HYPRE_BigInt >> &blockProcOffsets, std::vector< HYPRE_BigInt > &procOffsets, std::vector< std::vector< int >> &procBlockOffsets, HYPRE_BigInt &firstLocal, HYPRE_BigInt &globalNum)
virtual double * HostReadWrite()
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), false).
MemoryClass
Memory classes identify sets of memory types.
HYPRE_BigInt GetGlobalNumRows() const
Return the global number of rows.
void WrapMemoryReadWrite(Memory< double > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for read and wri...
HYPRE_Int Mult(HypreParVector &x, HypreParVector &y, double alpha=1.0, double beta=0.0) const
Computes y = alpha * A * x + beta * y.
int width
Dimension of the input / number of columns in the matrix.
HypreParVector CreateCompatibleVector() const
Constructs a HypreParVector compatible with the calling vector.
int poly_order
Order of the smoothing polynomial.
void SetTol(HYPRE_Real tol)
int GetNumRows() const
Returns the number of rows in the diagonal block of the ParCSRMatrix.
double lambda
Taubin's lambda-mu method parameters.
void WrapMemoryRead(const Memory< double > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for read access ...
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
void HypreReadWrite()
Update the internal hypre_ParCSRMatrix object, A, to be in hypre memory space.
HYPRE_BigInt * GetRowStarts() const
Return the parallel row partitioning array.