31Hypre::State Hypre::state = Hypre::State::UNINITIALIZED;
35 if (state != State::INITIALIZED)
37#if MFEM_HYPRE_VERSION >= 21900
47 state = State::INITIALIZED;
55#if defined(HYPRE_USING_GPU) && (MFEM_HYPRE_VERSION >= 23100)
58 MFEM_VERIFY(HYPRE_Initialized(),
"HYPRE must be initialized before"
59 " calling Hypre::InitDevice()");
62 HYPRE_SetMemoryLocation(HYPRE_MEMORY_DEVICE);
63 HYPRE_SetExecutionPolicy(HYPRE_EXEC_DEVICE);
64 HYPRE_DeviceInitialize();
68 HYPRE_SetMemoryLocation(HYPRE_MEMORY_HOST);
69 HYPRE_SetExecutionPolicy(HYPRE_EXEC_HOST);
77 if (state != State::UNINITIALIZED)
79#if MFEM_HYPRE_VERSION >= 21900
83 state = State::UNINITIALIZED;
86void Hypre::SetDefaultOptions()
91#if MFEM_HYPRE_VERSION >= 22100
92#ifdef HYPRE_USING_CUDA
94 HYPRE_SetSpGemmUseCusparse(0);
95#elif defined(HYPRE_USING_HIP)
100 HYPRE_SetSpMVUseVendor(0);
126template<
typename TargetT,
typename SourceT>
127static TargetT *DuplicateAs(
const SourceT *array,
int size,
128 bool cplusplus =
true)
130 TargetT *target_array = cplusplus ? (TargetT*) Memory<TargetT>(size)
131 : mfem_hypre_TAlloc_host(TargetT, size);
132 for (
int i = 0; i < size; i++)
134 target_array[i] = array[i];
157inline void HypreParVector::_SetDataAndSize_()
159 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
160#if !defined(HYPRE_USING_GPU)
162 internal::to_int(hypre_VectorSize(x_loc)));
164 size = internal::to_int(hypre_VectorSize(x_loc));
165 MemoryType mt = (hypre_VectorMemoryLocation(x_loc) == HYPRE_MEMORY_HOST
167 if (hypre_VectorData(x_loc) != NULL)
181 x = hypre_ParVectorCreate(comm,glob_size,col);
182 hypre_ParVectorInitialize(x);
183#if MFEM_HYPRE_VERSION <= 22200
184 hypre_ParVectorSetPartitioningOwner(x,0);
187 hypre_ParVectorSetDataOwner(x,1);
188 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(x),1);
198 x = hypre_ParVectorCreate(comm,glob_size,col);
199 hypre_ParVectorSetDataOwner(x,1);
200 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
201 hypre_SeqVectorSetDataOwner(x_loc,0);
202#if MFEM_HYPRE_VERSION <= 22200
203 hypre_ParVectorSetPartitioningOwner(x,0);
206 hypre_VectorData(x_loc) = &tmp;
207#ifdef HYPRE_USING_GPU
208 hypre_VectorMemoryLocation(x_loc) =
209 is_device_ptr ? HYPRE_MEMORY_DEVICE : HYPRE_MEMORY_HOST;
211 MFEM_CONTRACT_VAR(is_device_ptr);
215 hypre_ParVectorInitialize(x);
217 hypre_VectorData(x_loc) = data_;
227 "the MemoryTypes of 'base' are incompatible with Hypre!");
228 MFEM_ASSERT(offset +
size <= base.
Size(),
229 "the size of 'base' is too small!");
233 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
235#ifdef HYPRE_USING_GPU
242 y.CreateCompatibleVector())
245 hypre_SeqVectorCopy(hypre_ParVectorLocalVector(y.x),
246 hypre_ParVectorLocalVector(x));
252 *
this = std::move(y);
272 x = (hypre_ParVector *) y;
281 hypre_ParVectorInitialize(x);
282#if MFEM_HYPRE_VERSION <= 22200
283 hypre_ParVectorSetPartitioningOwner(x,0);
286 hypre_ParVectorSetDataOwner(x,1);
287 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(x),1);
295 result.x = hypre_ParVectorCreate(x -> comm, x -> global_size,
297 hypre_ParVectorInitialize(result.x);
298#if MFEM_HYPRE_VERSION <= 22200
299 hypre_ParVectorSetPartitioningOwner(result.x,0);
301 hypre_ParVectorSetDataOwner(result.x,1);
302 hypre_SeqVectorSetDataOwner(hypre_ParVectorLocalVector(result.x),1);
303 result._SetDataAndSize_();
304 result.own_ParVector = 1;
311 if (own_ParVector) { hypre_ParVectorDestroy(x); }
315 own_ParVector = owner;
320 hypre_Vector *hv = hypre_ParVectorToVectorAll(*
this);
321 Vector *v =
new Vector(hv->data, internal::to_int(hv->size));
323 hypre_SeqVectorSetDataOwner(hv,0);
324 hypre_SeqVectorDestroy(hv);
351 const auto own_tmp = y.own_ParVector;
353 own_ParVector = own_tmp;
354 const auto x_tmp = y.x;
362 hypre_VectorData(hypre_ParVectorLocalVector(x)) = data_;
368 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
369 hypre_VectorData(x_loc) =
371#ifdef HYPRE_USING_GPU
378 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
380#ifdef HYPRE_USING_GPU
387 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
389#ifdef HYPRE_USING_GPU
400 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
401 hypre_VectorData(x_loc) =
403#ifdef HYPRE_USING_GPU
415 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
417#ifdef HYPRE_USING_GPU
429 hypre_Vector *x_loc = hypre_ParVectorLocalVector(x);
431#ifdef HYPRE_USING_GPU
439 return hypre_ParVectorSetRandomValues(x,seed);
444 hypre_ParVectorPrint(x, fname.c_str());
451 hypre_ParVectorDestroy(x);
454 x = hypre_ParVectorRead(comm, fname.c_str());
455 own_ParVector =
true;
463 hypre_ParVectorDestroy(x);
470 return hypre_ParVectorInnerProd(*x, *y);
475 return hypre_ParVectorInnerProd(x, y);
489 real_t loc_norm = vec*vec;
496 for (
int i = 0; i < vec.
Size(); i++)
498 sum += pow(fabs(vec(i)),
p);
563template <
typename SrcT,
typename DstT>
572 mfem::forall(capacity, [=] MFEM_HOST_DEVICE (
int i) { dst_p[i] = src_p[i]; });
576void HypreParMatrix::Init()
581 diagOwner = offdOwner = colMapOwner = -1;
591#if MFEM_HYPRE_VERSION >= 21800
592inline decltype(hypre_CSRMatrix::memory_location)
602 decltype(hypre_CSRMatrix::memory_location) ml;
604#if !defined(HYPRE_USING_GPU)
606 ml = HYPRE_MEMORY_HOST;
621 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
622 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
623 const int num_rows =
NumRows();
624 const int diag_nnz = internal::to_int(diag->num_nonzeros);
625 const int offd_nnz = internal::to_int(offd->num_nonzeros);
626 diag->i =
const_cast<HYPRE_Int*
>(mem_diag.
I.
Read(mc, num_rows+1));
627 diag->j =
const_cast<HYPRE_Int*
>(mem_diag.
J.
Read(mc, diag_nnz));
628 diag->data =
const_cast<real_t*
>(mem_diag.
data.
Read(mc, diag_nnz));
629 offd->i =
const_cast<HYPRE_Int*
>(mem_offd.
I.
Read(mc, num_rows+1));
630 offd->j =
const_cast<HYPRE_Int*
>(mem_offd.
J.
Read(mc, offd_nnz));
631 offd->data =
const_cast<real_t*
>(mem_offd.
data.
Read(mc, offd_nnz));
632#if MFEM_HYPRE_VERSION >= 21800
634 diag->memory_location = ml;
635 offd->memory_location = ml;
641 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
642 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
643 const int num_rows =
NumRows();
644 const int diag_nnz = internal::to_int(diag->num_nonzeros);
645 const int offd_nnz = internal::to_int(offd->num_nonzeros);
646 diag->i = mem_diag.
I.
ReadWrite(mc, num_rows+1);
649 offd->i = mem_offd.
I.
ReadWrite(mc, num_rows+1);
652#if MFEM_HYPRE_VERSION >= 21800
654 diag->memory_location = ml;
655 offd->memory_location = ml;
659void HypreParMatrix::Write(
MemoryClass mc,
bool set_diag,
bool set_offd)
661 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A);
662 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A);
675#if MFEM_HYPRE_VERSION >= 21800
677 if (set_diag) { diag->memory_location = ml; }
678 if (set_offd) { offd->memory_location = ml; }
696#if MFEM_HYPRE_VERSION >= 21800
697 MemoryType diag_mt = (A->diag->memory_location == HYPRE_MEMORY_HOST
699 MemoryType offd_mt = (A->offd->memory_location == HYPRE_MEMORY_HOST
705 diagOwner = HypreCsrToMem(A->diag, diag_mt,
false, mem_diag);
706 offdOwner = HypreCsrToMem(A->offd, offd_mt,
false, mem_offd);
712 hypre_CSRMatrix *hypre_csr,
727 const int num_rows = csr->
Height();
729 hypre_csr->i =
const_cast<HYPRE_Int*
>(mem_csr.
I.
Read(hypre_mc, num_rows+1));
730 hypre_csr->j =
const_cast<HYPRE_Int*
>(mem_csr.
J.
Read(hypre_mc, nnz));
731 hypre_csr->data =
const_cast<real_t*
>(mem_csr.
data.
Read(hypre_mc, nnz));
734 "invalid state: host ownership for I and J differ!");
736 (mem_csr.data.OwnsHostPtr() ? 2 : 0);
739signed char HypreParMatrix::CopyBoolCSR(Table *bool_csr,
740 MemoryIJData &mem_csr,
741 hypre_CSRMatrix *hypre_csr)
746 CopyMemory(bool_csr->GetIMemory(), mem_csr.I, hypre_mc,
false);
747 CopyMemory(bool_csr->GetJMemory(), mem_csr.J, hypre_mc,
false);
753 const int num_rows = bool_csr->Size();
754 const int nnz = bool_csr->Size_of_connections();
757 for (
int i = 0; i < nnz; i++)
761 hypre_csr->i =
const_cast<HYPRE_Int*
>(mem_csr.I.Read(hypre_mc, num_rows+1));
762 hypre_csr->j =
const_cast<HYPRE_Int*
>(mem_csr.J.Read(hypre_mc, nnz));
763 hypre_csr->data =
const_cast<real_t*
>(mem_csr.data.Read(hypre_mc, nnz));
765 MFEM_ASSERT(mem_csr.I.OwnsHostPtr() == mem_csr.J.OwnsHostPtr(),
766 "invalid state: host ownership for I and J differ!");
767 return (mem_csr.I.OwnsHostPtr() ? 1 : 0) +
768 (mem_csr.data.OwnsHostPtr() ? 2 : 0);
774static void CopyCSR_J(
const int nnz,
const MemoryIJData &mem_csr,
780 mfem::forall(nnz, [=] MFEM_HOST_DEVICE (
int i) { dst_p[i] = src_p[i]; });
785static void SyncBackCSR(SparseMatrix *csr, MemoryIJData &mem_csr)
788 const bool data_shallow =
CanShallowCopy(csr->GetMemoryData(), hypre_mc);
790#if !defined(HYPRE_BIGINT) && defined(MFEM_DEBUG)
791 const bool J_shallow =
CanShallowCopy(csr->GetMemoryJ(), hypre_mc);
792 MFEM_ASSERT(J_shallow == data_shallow,
"unsupported state");
799 csr->GetMemoryJ().Sync(mem_csr.J);
803 CopyCSR_J(csr->GetMemoryJ().Capacity(), mem_csr, csr->GetMemoryJ());
805 csr->GetMemoryData().Sync(mem_csr.data);
810static void SyncBackBoolCSR(Table *bool_csr, MemoryIJData &mem_csr)
813 const bool J_shallow =
CanShallowCopy(bool_csr->GetJMemory(), hypre_mc);
818 bool_csr->GetJMemory().Sync(mem_csr.J);
827static int GetPartitioningArraySize(MPI_Comm comm)
829 if (HYPRE_AssumedPartitionCheck())
836 MPI_Comm_size(comm, &comm_size);
837 return comm_size + 1;
846static bool RowAndColStartsAreEqual(MPI_Comm comm,
const HYPRE_BigInt *rows,
849 const int part_size = GetPartitioningArraySize(comm);
850 bool are_equal =
true;
851 for (
int i = 0; i < part_size; ++i)
853 if (rows[i] != cols[i])
859 MPI_Allreduce(MPI_IN_PLACE, &are_equal, 1, MFEM_MPI_CXX_BOOL, MPI_LAND, comm);
864signed char HypreParMatrix::HypreCsrToMem(hypre_CSRMatrix *h_mat,
869 const int nr1 = internal::to_int(h_mat->num_rows) + 1;
870 const int nnz = internal::to_int(h_mat->num_nonzeros);
871 mem.I.Wrap(h_mat->i, nr1, h_mat_mt, own_ija);
872 mem.J.Wrap(h_mat->j, nnz, h_mat_mt, own_ija);
873 mem.data.Wrap(h_mat->data, nnz, h_mat_mt, own_ija);
879 h_mem.I.New(nr1, hypre_mt);
880 h_mem.I.CopyFrom(mem.I, nr1);
882 h_mem.J.New(nnz, hypre_mt);
883 h_mem.J.CopyFrom(mem.J, nnz);
885 h_mem.data.New(nnz, hypre_mt);
886 h_mem.data.CopyFrom(mem.data, nnz);
895#if MFEM_HYPRE_VERSION < 21400
896 hypre_TFree(h_mat->i);
897#elif MFEM_HYPRE_VERSION < 21800
898 hypre_TFree(h_mat->i, HYPRE_MEMORY_SHARED);
900 hypre_TFree(h_mat->i, h_mat->memory_location);
902 if (h_mat->owns_data)
904#if MFEM_HYPRE_VERSION < 21400
905 hypre_TFree(h_mat->j);
906 hypre_TFree(h_mat->data);
907#elif MFEM_HYPRE_VERSION < 21800
908 hypre_TFree(h_mat->j, HYPRE_MEMORY_SHARED);
909 hypre_TFree(h_mat->data, HYPRE_MEMORY_SHARED);
911 hypre_TFree(h_mat->j, h_mat->memory_location);
912 hypre_TFree(h_mat->data, h_mat->memory_location);
916 h_mat->i = mem.I.ReadWrite(hypre_mc, nr1);
917 h_mat->j = mem.J.ReadWrite(hypre_mc, nnz);
918 h_mat->data = mem.data.ReadWrite(hypre_mc, nnz);
919 h_mat->owns_data = 0;
920#if MFEM_HYPRE_VERSION >= 21800
931 :
Operator(diag->Height(), diag->Width())
934 A = hypre_ParCSRMatrixCreate(comm, glob_size, glob_size, row_starts,
936 hypre_ParCSRMatrixSetDataOwner(A,1);
937#if MFEM_HYPRE_VERSION <= 22200
938 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
939 hypre_ParCSRMatrixSetColStartsOwner(A,0);
942 hypre_CSRMatrixSetDataOwner(A->diag,0);
943 diagOwner = CopyCSR(diag, mem_diag, A->diag,
false);
944 hypre_CSRMatrixSetRownnz(A->diag);
946 hypre_CSRMatrixSetDataOwner(A->offd,1);
947 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Height()+1);
955 hypre_ParCSRMatrixSetNumNonzeros(A);
959 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
960 SyncBackCSR(diag, mem_diag);
962 hypre_MatvecCommPkgCreate(A);
972 :
Operator(diag->Height(), diag->Width())
975 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
976 row_starts, col_starts,
978 hypre_ParCSRMatrixSetDataOwner(A,1);
979#if MFEM_HYPRE_VERSION <= 22200
980 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
981 hypre_ParCSRMatrixSetColStartsOwner(A,0);
984 hypre_CSRMatrixSetDataOwner(A->diag,0);
985 diagOwner = CopyCSR(diag, mem_diag, A->diag,
false);
986 hypre_CSRMatrixSetRownnz(A->diag);
988 hypre_CSRMatrixSetDataOwner(A->offd,1);
989 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Height()+1);
992 hypre_ParCSRMatrixSetNumNonzeros(A);
995 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
998 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
999 SyncBackCSR(diag, mem_diag);
1002 hypre_MatvecCommPkgCreate(A);
1015 :
Operator(diag->Height(), diag->Width())
1018 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
1019 row_starts, col_starts,
1022 hypre_ParCSRMatrixSetDataOwner(A,1);
1023#if MFEM_HYPRE_VERSION <= 22200
1024 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1025 hypre_ParCSRMatrixSetColStartsOwner(A,0);
1028 hypre_CSRMatrixSetDataOwner(A->diag,0);
1029 diagOwner = CopyCSR(diag, mem_diag, A->diag, own_diag_offd);
1030 if (own_diag_offd) {
delete diag; }
1031 hypre_CSRMatrixSetRownnz(A->diag);
1033 hypre_CSRMatrixSetDataOwner(A->offd,0);
1034 offdOwner = CopyCSR(offd, mem_offd, A->offd, own_diag_offd);
1035 if (own_diag_offd) {
delete offd; }
1036 hypre_CSRMatrixSetRownnz(A->offd);
1038 hypre_ParCSRMatrixColMapOffd(A) = cmap;
1042 hypre_ParCSRMatrixSetNumNonzeros(A);
1045 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
1048 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1050 if (!own_diag_offd) { SyncBackCSR(diag, mem_diag); }
1053 hypre_MatvecCommPkgCreate(A);
1062 HYPRE_Int *diag_i, HYPRE_Int *diag_j,
real_t *diag_data,
1063 HYPRE_Int *offd_i, HYPRE_Int *offd_j,
real_t *offd_data,
1068 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
1069 row_starts, col_starts, offd_num_cols, 0, 0);
1070 hypre_ParCSRMatrixSetDataOwner(A,1);
1071#if MFEM_HYPRE_VERSION <= 22200
1072 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1073 hypre_ParCSRMatrixSetColStartsOwner(A,0);
1076 HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A->diag);
1078 hypre_CSRMatrixSetDataOwner(A->diag, hypre_arrays);
1079 hypre_CSRMatrixI(A->diag) = diag_i;
1080 hypre_CSRMatrixJ(A->diag) = diag_j;
1081 hypre_CSRMatrixData(A->diag) = diag_data;
1082 hypre_CSRMatrixNumNonzeros(A->diag) = diag_i[local_num_rows];
1083#ifdef HYPRE_USING_GPU
1084 hypre_CSRMatrixMemoryLocation(A->diag) = HYPRE_MEMORY_HOST;
1087 hypre_CSRMatrixSetDataOwner(A->offd, hypre_arrays);
1088 hypre_CSRMatrixI(A->offd) = offd_i;
1089 hypre_CSRMatrixJ(A->offd) = offd_j;
1090 hypre_CSRMatrixData(A->offd) = offd_data;
1091 hypre_CSRMatrixNumNonzeros(A->offd) = offd_i[local_num_rows];
1092#ifdef HYPRE_USING_GPU
1093 hypre_CSRMatrixMemoryLocation(A->offd) = HYPRE_MEMORY_HOST;
1096 hypre_ParCSRMatrixColMapOffd(A) = offd_col_map;
1098 colMapOwner = hypre_arrays ? -1 : 1;
1100 hypre_ParCSRMatrixSetNumNonzeros(A);
1103 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
1105 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1108 hypre_MatvecCommPkgCreate(A);
1116 diagOwner = HypreCsrToMem(A->diag, host_mt,
true, mem_diag);
1117 offdOwner = HypreCsrToMem(A->offd, host_mt,
true, mem_offd);
1122 diagOwner = HypreCsrToMem(A->diag, host_mt,
false, mem_diag);
1123 offdOwner = HypreCsrToMem(A->offd, host_mt,
false, mem_offd);
1127 hypre_CSRMatrixSetRownnz(A->diag);
1128 hypre_CSRMatrixSetRownnz(A->offd);
1137 MFEM_ASSERT(sm_a != NULL,
"invalid input");
1138 MFEM_VERIFY(!HYPRE_AssumedPartitionCheck(),
1139 "this method can not be used with assumed partition");
1143 hypre_CSRMatrix *csr_a;
1144 csr_a = hypre_CSRMatrixCreate(sm_a ->
Height(), sm_a ->
Width(),
1145 sm_a -> NumNonZeroElems());
1147 hypre_CSRMatrixSetDataOwner(csr_a,0);
1149 CopyCSR(
const_cast<SparseMatrix*
>(sm_a), mem_a, csr_a,
false);
1150 hypre_CSRMatrixSetRownnz(csr_a);
1154 hypre_ParCSRMatrix *new_A =
1155 hypre_CSRMatrixToParCSRMatrix(comm, csr_a, row_starts, col_starts);
1161 hypre_CSRMatrixI(csr_a) = NULL;
1162 hypre_CSRMatrixDestroy(csr_a);
1165 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
1167 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(new_A));
1170 hypre_MatvecCommPkgCreate(A);
1185 A = hypre_ParCSRMatrixCreate(comm, global_num_rows, global_num_cols,
1186 row_starts, col_starts, 0, nnz, 0);
1187 hypre_ParCSRMatrixSetDataOwner(A,1);
1188#if MFEM_HYPRE_VERSION <= 22200
1189 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1190 hypre_ParCSRMatrixSetColStartsOwner(A,0);
1193 hypre_CSRMatrixSetDataOwner(A->diag,0);
1194 diagOwner = CopyBoolCSR(diag, mem_diag, A->diag);
1195 hypre_CSRMatrixSetRownnz(A->diag);
1197 hypre_CSRMatrixSetDataOwner(A->offd,1);
1198 hypre_CSRMatrixI(A->offd) = mfem_hypre_CTAlloc(HYPRE_Int, diag->
Size()+1);
1201 hypre_ParCSRMatrixSetNumNonzeros(A);
1204 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
1207 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1208 SyncBackBoolCSR(diag, mem_diag);
1211 hypre_MatvecCommPkgCreate(A);
1221 HYPRE_Int *i_diag, HYPRE_Int *j_diag,
1222 HYPRE_Int *i_offd, HYPRE_Int *j_offd,
1225 HYPRE_Int diag_nnz, offd_nnz;
1228 if (HYPRE_AssumedPartitionCheck())
1230 diag_nnz = i_diag[row[1]-row[0]];
1231 offd_nnz = i_offd[row[1]-row[0]];
1233 A = hypre_ParCSRMatrixCreate(comm, row[2], col[2], row, col,
1234 cmap_size, diag_nnz, offd_nnz);
1238 diag_nnz = i_diag[row[
id+1]-row[id]];
1239 offd_nnz = i_offd[row[
id+1]-row[id]];
1241 A = hypre_ParCSRMatrixCreate(comm, row[np], col[np], row, col,
1242 cmap_size, diag_nnz, offd_nnz);
1245 hypre_ParCSRMatrixSetDataOwner(A,1);
1246#if MFEM_HYPRE_VERSION <= 22200
1247 hypre_ParCSRMatrixSetRowStartsOwner(A,0);
1248 hypre_ParCSRMatrixSetColStartsOwner(A,0);
1252 for (HYPRE_Int i = 0; i < diag_nnz; i++)
1254 mem_diag.
data[i] = 1.0;
1258 for (HYPRE_Int i = 0; i < offd_nnz; i++)
1260 mem_offd.
data[i] = 1.0;
1263 hypre_CSRMatrixSetDataOwner(A->diag,0);
1264 hypre_CSRMatrixI(A->diag) = i_diag;
1265 hypre_CSRMatrixJ(A->diag) = j_diag;
1266 hypre_CSRMatrixData(A->diag) = mem_diag.
data;
1267#ifdef HYPRE_USING_GPU
1268 hypre_CSRMatrixMemoryLocation(A->diag) = HYPRE_MEMORY_HOST;
1271 hypre_CSRMatrixSetDataOwner(A->offd,0);
1272 hypre_CSRMatrixI(A->offd) = i_offd;
1273 hypre_CSRMatrixJ(A->offd) = j_offd;
1274 hypre_CSRMatrixData(A->offd) = mem_offd.
data;
1275#ifdef HYPRE_USING_GPU
1276 hypre_CSRMatrixMemoryLocation(A->offd) = HYPRE_MEMORY_HOST;
1279 hypre_ParCSRMatrixColMapOffd(A) = cmap;
1283 hypre_ParCSRMatrixSetNumNonzeros(A);
1288 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1291 hypre_MatvecCommPkgCreate(A);
1297 diagOwner = HypreCsrToMem(A->diag, host_mt,
true, mem_diag);
1298 offdOwner = HypreCsrToMem(A->offd, host_mt,
true, mem_offd);
1301 hypre_CSRMatrixSetRownnz(A->diag);
1302 hypre_CSRMatrixSetRownnz(A->offd);
1320 const int part_size = GetPartitioningArraySize(comm);
1322 if (HYPRE_AssumedPartitionCheck())
1324 my_col_start = cols[0];
1325 my_col_end = cols[1];
1330 MPI_Comm_rank(comm, &myid);
1331 my_col_start = cols[myid];
1332 my_col_end = cols[myid+1];
1336 const bool rows_eq_cols = RowAndColStartsAreEqual(comm, rows, cols);
1340 row_starts = col_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1341 for (
int i = 0; i < part_size; i++)
1343 row_starts[i] = rows[i];
1348 row_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1349 col_starts = mfem_hypre_TAlloc_host(
HYPRE_BigInt, part_size);
1350 for (
int i = 0; i < part_size; i++)
1352 row_starts[i] = rows[i];
1353 col_starts[i] = cols[i];
1359 HYPRE_Int diag_nnz = 0, offd_nnz = 0, offd_num_cols = 0;
1360 map<HYPRE_BigInt, HYPRE_Int> offd_map;
1361 for (HYPRE_Int j = 0, loc_nnz = I[nrows]; j < loc_nnz; j++)
1364 if (my_col_start <= glob_col && glob_col < my_col_end)
1370 offd_map.insert(pair<const HYPRE_BigInt, HYPRE_Int>(glob_col, -1));
1375 for (
auto it = offd_map.begin(); it != offd_map.end(); ++it)
1377 it->second = offd_num_cols++;
1381 A = hypre_ParCSRMatrixCreate(comm, glob_nrows, glob_ncols,
1382 row_starts, col_starts, offd_num_cols,
1383 diag_nnz, offd_nnz);
1384 hypre_ParCSRMatrixInitialize(A);
1390 HYPRE_Int *diag_i, *diag_j, *offd_i, *offd_j;
1392 real_t *diag_data, *offd_data;
1393 diag_i = A->diag->i;
1394 diag_j = A->diag->j;
1395 diag_data = A->diag->data;
1396 offd_i = A->offd->i;
1397 offd_j = A->offd->j;
1398 offd_data = A->offd->data;
1399 offd_col_map = A->col_map_offd;
1401 diag_nnz = offd_nnz = 0;
1402 for (HYPRE_Int i = 0, j = 0; i < nrows; i++)
1404 diag_i[i] = diag_nnz;
1405 offd_i[i] = offd_nnz;
1406 for (HYPRE_Int j_end = I[i+1]; j < j_end; j++)
1409 if (my_col_start <= glob_col && glob_col < my_col_end)
1411 diag_j[diag_nnz] = glob_col - my_col_start;
1412 diag_data[diag_nnz] = data[j];
1417 offd_j[offd_nnz] = offd_map[glob_col];
1418 offd_data[offd_nnz] = data[j];
1423 diag_i[nrows] = diag_nnz;
1424 offd_i[nrows] = offd_nnz;
1425 for (
auto it = offd_map.begin(); it != offd_map.end(); ++it)
1427 offd_col_map[it->second] = it->first;
1430 hypre_ParCSRMatrixSetNumNonzeros(A);
1434 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
1436#if MFEM_HYPRE_VERSION > 22200
1437 mfem_hypre_TFree_host(row_starts);
1440 mfem_hypre_TFree_host(col_starts);
1443 hypre_MatvecCommPkgCreate(A);
1453 hypre_ParCSRMatrix *Ph =
static_cast<hypre_ParCSRMatrix *
>(P);
1458 A = hypre_ParCSRMatrixCompleteClone(Ph);
1460 hypre_ParCSRMatrixCopy(Ph, A, 1);
1468 hypre_ParCSRMatrixSetNumNonzeros(A);
1470 hypre_MatvecCommPkgCreate(A);
1499 MFEM_ASSERT(diagOwner < 0 && offdOwner < 0 && colMapOwner == -1,
"");
1500 MFEM_ASSERT(diagOwner == offdOwner,
"");
1501 MFEM_ASSERT(ParCSROwner,
"");
1502 hypre_ParCSRMatrix *R = A;
1503#ifdef HYPRE_USING_GPU
1510 ParCSROwner =
false;
1538 colMapOwner = colmap;
1543#if MFEM_HYPRE_VERSION <= 22200
1544 if (!A || hypre_ParCSRMatrixOwnsRowStarts(A) ||
1545 (hypre_ParCSRMatrixRowStarts(A) == hypre_ParCSRMatrixColStarts(A) &&
1546 hypre_ParCSRMatrixOwnsColStarts(A)))
1551 const int row_starts_size = GetPartitioningArraySize(hypre_ParCSRMatrixComm(A));
1553 HYPRE_BigInt *old_row_starts = hypre_ParCSRMatrixRowStarts(A);
1556 for (
int i = 0; i < row_starts_size; i++)
1558 new_row_starts[i] = old_row_starts[i];
1561 hypre_ParCSRMatrixRowStarts(A) = new_row_starts;
1562 hypre_ParCSRMatrixOwnsRowStarts(A) = 1;
1564 if (hypre_ParCSRMatrixColStarts(A) == old_row_starts)
1566 hypre_ParCSRMatrixColStarts(A) = new_row_starts;
1567 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
1574#if MFEM_HYPRE_VERSION <= 22200
1575 if (!A || hypre_ParCSRMatrixOwnsColStarts(A) ||
1576 (hypre_ParCSRMatrixRowStarts(A) == hypre_ParCSRMatrixColStarts(A) &&
1577 hypre_ParCSRMatrixOwnsRowStarts(A)))
1582 const int col_starts_size = GetPartitioningArraySize(hypre_ParCSRMatrixComm(A));
1584 HYPRE_BigInt *old_col_starts = hypre_ParCSRMatrixColStarts(A);
1587 for (
int i = 0; i < col_starts_size; i++)
1589 new_col_starts[i] = old_col_starts[i];
1592 hypre_ParCSRMatrixColStarts(A) = new_col_starts;
1594 if (hypre_ParCSRMatrixRowStarts(A) == old_col_starts)
1596 hypre_ParCSRMatrixRowStarts(A) = new_col_starts;
1597 hypre_ParCSRMatrixOwnsRowStarts(A) = 1;
1598 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
1602 hypre_ParCSRMatrixOwnsColStarts(A) = 1;
1609 const int size =
Height();
1615#if MFEM_HYPRE_VERSION >= 21800
1617 "unexpected HypreParMatrix memory location!");
1619 const HYPRE_Int *A_diag_i = A->diag->i;
1620 const real_t *A_diag_d = A->diag->data;
1622 const HYPRE_Int *A_diag_j = A->diag->j;
1626 diag_hd[i] = A_diag_d[A_diag_i[i]];
1628 A_diag_j[A_diag_i[i]] == i,
1629 "The first entry in each row must be the diagonal one!");
1633static void MakeSparseMatrixWrapper(
int nrows,
int ncols,
1634 HYPRE_Int *I, HYPRE_Int *J,
real_t *data,
1638 SparseMatrix tmp(I, J, data, nrows, ncols,
false,
false,
false);
1641 for (
int i = 0; i <= nrows; i++)
1643 mI[i] = internal::to_int(I[i]);
1645 const int nnz = mI[nrows];
1646 int *mJ = Memory<int>(nnz);
1647 for (
int j = 0; j < nnz; j++)
1649 mJ[j] = internal::to_int(J[j]);
1651 SparseMatrix tmp(mI, mJ, data, nrows, ncols,
true,
false,
false);
1656static void MakeWrapper(
const hypre_CSRMatrix *mat,
1657 const MemoryIJData &mem,
1658 SparseMatrix &wrapper)
1660 const int nrows = internal::to_int(hypre_CSRMatrixNumRows(mat));
1661 const int ncols = internal::to_int(hypre_CSRMatrixNumCols(mat));
1662 const int nnz = internal::to_int(mat->num_nonzeros);
1666 MakeSparseMatrixWrapper(nrows, ncols,
1667 const_cast<HYPRE_Int*
>(I),
1668 const_cast<HYPRE_Int*
>(J),
1669 const_cast<real_t*
>(data),
1675 MakeWrapper(A->diag, mem_diag, diag);
1680 MakeWrapper(A->offd, mem_offd, offd);
1681 cmap = A->col_map_offd;
1687 hypre_CSRMatrix *hypre_merged = hypre_MergeDiagAndOffd(A);
1691#if MFEM_HYPRE_VERSION >= 21600
1692 hypre_CSRMatrixBigJtoJ(hypre_merged);
1694 MakeSparseMatrixWrapper(
1695 internal::to_int(hypre_merged->num_rows),
1696 internal::to_int(hypre_merged->num_cols),
1703 merged = merged_tmp;
1705 hypre_CSRMatrixDestroy(hypre_merged);
1709 bool interleaved_rows,
1710 bool interleaved_cols)
const
1715 hypre_ParCSRMatrix **hypre_blocks =
new hypre_ParCSRMatrix*[nr * nc];
1717 internal::hypre_ParCSRMatrixSplit(A, nr, nc, hypre_blocks,
1718 interleaved_rows, interleaved_cols);
1721 for (
int i = 0; i < nr; i++)
1723 for (
int j = 0; j < nc; j++)
1729 delete [] hypre_blocks;
1734 hypre_ParCSRMatrix * At;
1735 hypre_ParCSRMatrixTranspose(A, &At, 1);
1736 hypre_ParCSRMatrixSetNumNonzeros(At);
1738 if (!hypre_ParCSRMatrixCommPkg(At)) { hypre_MatvecCommPkgCreate(At); }
1744 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(At));
1750#if MFEM_HYPRE_VERSION >= 21800
1760 hypre_MatvecCommPkgCreate(A);
1763 hypre_ParCSRMatrix *submat;
1766 int local_num_vars = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A));
1769#ifdef hypre_IntArrayData
1771 hypre_IntArray *CF_marker;
1773 CF_marker = hypre_IntArrayCreate(local_num_vars);
1774 hypre_IntArrayInitialize_v2(CF_marker, HYPRE_MEMORY_HOST);
1775 hypre_IntArraySetConstantValues(CF_marker, 1);
1780 for (
int j=0; j<indices.
Size(); j++)
1782 if (indices[j] > local_num_vars)
1784 MFEM_WARNING(
"WARNING : " << indices[j] <<
" > " << local_num_vars);
1786#ifdef hypre_IntArrayData
1787 hypre_IntArrayData(CF_marker)[indices[j]] = -1;
1789 CF_marker[indices[j]] = -1;
1794#if (MFEM_HYPRE_VERSION > 22300) || (MFEM_HYPRE_VERSION == 22300 && HYPRE_DEVELOP_NUMBER >=8)
1797 hypre_BoomerAMGCoarseParms(MPI_COMM_WORLD, local_num_vars, 1, NULL,
1798 CF_marker, NULL, cpts_global);
1801 hypre_BoomerAMGCoarseParms(MPI_COMM_WORLD, local_num_vars, 1, NULL,
1802 CF_marker, NULL, &cpts_global);
1806#ifdef hypre_IntArrayData
1807 hypre_ParCSRMatrixExtractSubmatrixFC(A, hypre_IntArrayData(CF_marker),
1808 cpts_global,
"FF", &submat,
1811 hypre_ParCSRMatrixExtractSubmatrixFC(A, CF_marker, cpts_global,
1812 "FF", &submat, threshold);
1815#if (MFEM_HYPRE_VERSION <= 22300) && !(MFEM_HYPRE_VERSION == 22300 && HYPRE_DEVELOP_NUMBER >=8)
1816 mfem_hypre_TFree(cpts_global);
1818#ifdef hypre_IntArrayData
1819 hypre_IntArrayDestroy(CF_marker);
1830#if (MFEM_HYPRE_VERSION == 22500 && HYPRE_DEVELOP_NUMBER >= 1) || \
1831 (MFEM_HYPRE_VERSION > 22500)
1832#ifdef HYPRE_USING_GPU
1835 hypre_ParCSRMatrixLocalTranspose(A);
1843#if (MFEM_HYPRE_VERSION == 22500 && HYPRE_DEVELOP_NUMBER >= 1) || \
1844 (MFEM_HYPRE_VERSION > 22500)
1845#ifdef HYPRE_USING_GPU
1850 hypre_CSRMatrixDestroy(A->diagT);
1855 hypre_CSRMatrixDestroy(A->offdT);
1868 return hypre_ParCSRMatrixMatvec(
a, A, x,
b, y);
1873 MFEM_ASSERT(x.
Size() ==
Width(),
"invalid x.Size() = " << x.
Size()
1874 <<
", expected size = " <<
Width());
1875 MFEM_ASSERT(y.
Size() ==
Height(),
"invalid y.Size() = " << y.
Size()
1876 <<
", expected size = " <<
Height());
1923 hypre_ParCSRMatrixMatvec(
a, A, *X,
b, *Y);
1925 if (!yshallow) { y = *Y; }
1931 MFEM_ASSERT(x.
Size() ==
Height(),
"invalid x.Size() = " << x.
Size()
1932 <<
", expected size = " <<
Height());
1933 MFEM_ASSERT(y.
Size() ==
Width(),
"invalid y.Size() = " << y.
Size()
1934 <<
", expected size = " <<
Width());
1987 hypre_ParCSRMatrixMatvecT(
a, A, *Y,
b, *X);
1989 if (!yshallow) { y = *X; }
1995 return hypre_ParCSRMatrixMatvec(
a, A, (hypre_ParVector *) x,
b,
1996 (hypre_ParVector *) y);
2005 return hypre_ParCSRMatrixMatvecT(
a, A, x,
b, y);
2011 MFEM_ASSERT(x.
Size() ==
Width(),
"invalid x.Size() = " << x.
Size()
2012 <<
", expected size = " <<
Width());
2013 MFEM_ASSERT(y.
Size() ==
Height(),
"invalid y.Size() = " << y.
Size()
2014 <<
", expected size = " <<
Height());
2020 internal::hypre_ParCSRMatrixAbsMatvec(A,
a,
const_cast<real_t*
>(x_data),
2028 MFEM_ASSERT(x.
Size() ==
Height(),
"invalid x.Size() = " << x.
Size()
2029 <<
", expected size = " <<
Height());
2030 MFEM_ASSERT(y.
Size() ==
Width(),
"invalid y.Size() = " << y.
Size()
2031 <<
", expected size = " <<
Width());
2037 internal::hypre_ParCSRMatrixAbsMatvecT(A,
a,
const_cast<real_t*
>(x_data),
2045 const bool assumed_partition = HYPRE_AssumedPartitionCheck();
2046 const bool row_starts_given = (row_starts != NULL);
2047 if (!row_starts_given)
2049 row_starts = hypre_ParCSRMatrixRowStarts(A);
2050 MFEM_VERIFY(D.
Height() == hypre_CSRMatrixNumRows(A->diag),
2051 "the matrix D is NOT compatible with the row starts of"
2052 " this HypreParMatrix, row_starts must be given.");
2057 if (assumed_partition)
2063 MPI_Comm_rank(
GetComm(), &offset);
2065 int local_num_rows = row_starts[offset+1]-row_starts[offset];
2066 MFEM_VERIFY(local_num_rows == D.
Height(),
"the number of rows in D is "
2067 " not compatible with the given row_starts");
2074 if (assumed_partition)
2077 if (row_starts_given)
2079 global_num_rows = row_starts[2];
2086 global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
2091 MPI_Comm_size(
GetComm(), &part_size);
2092 global_num_rows = row_starts[part_size];
2096 HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
2102 GetOffd(A_offd, col_map_offd);
2112 DuplicateAs<HYPRE_BigInt>(row_starts, part_size,
false);
2114 (row_starts == col_starts ? new_row_starts :
2115 DuplicateAs<HYPRE_BigInt>(col_starts, part_size,
false));
2117 DuplicateAs<HYPRE_BigInt>(col_map_offd, A_offd.
Width());
2121 const bool own_diag_offd =
true;
2126 global_num_rows, hypre_ParCSRMatrixGlobalNumCols(A),
2127 new_row_starts, new_col_starts,
2128 DA_diag, DA_offd, new_col_map_offd,
2131#if MFEM_HYPRE_VERSION <= 22200
2133 hypre_ParCSRMatrixSetRowStartsOwner(DA->A, 1);
2134 hypre_ParCSRMatrixSetColStartsOwner(DA->A, 1);
2136 mfem_hypre_TFree_host(new_row_starts);
2137 mfem_hypre_TFree_host(new_col_starts);
2139 DA->colMapOwner = 1;
2146 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2151 if (hypre_CSRMatrixNumRows(A->diag) != diag.
Size())
2153 mfem_error(
"Note the Vector diag is not of compatible dimensions with A\n");
2160 real_t *Adiag_data = hypre_CSRMatrixData(A->diag);
2161 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2163 real_t *Aoffd_data = hypre_CSRMatrixData(A->offd);
2164 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2167 for (
int i(0); i < size; ++i)
2170 for (jj = Adiag_i[i]; jj < Adiag_i[i+1]; ++jj)
2172 Adiag_data[jj] *= val;
2174 for (jj = Aoffd_i[i]; jj < Aoffd_i[i+1]; ++jj)
2176 Aoffd_data[jj] *= val;
2185 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2190 if (hypre_CSRMatrixNumRows(A->diag) != diag.
Size())
2192 mfem_error(
"Note the Vector diag is not of compatible dimensions with A\n");
2199 real_t *Adiag_data = hypre_CSRMatrixData(A->diag);
2200 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2203 real_t *Aoffd_data = hypre_CSRMatrixData(A->offd);
2204 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2207 for (
int i(0); i < size; ++i)
2212 mfem_error(
"HypreParMatrix::InvDiagScale : Division by 0");
2216 for (jj = Adiag_i[i]; jj < Adiag_i[i+1]; ++jj)
2218 Adiag_data[jj] *= val;
2220 for (jj = Aoffd_i[i]; jj < Aoffd_i[i+1]; ++jj)
2222 Aoffd_data[jj] *= val;
2231 if (hypre_CSRMatrixNumRows(A->diag) != hypre_CSRMatrixNumRows(A->offd))
2238 HYPRE_Int size=hypre_CSRMatrixNumRows(A->diag);
2241 real_t *Adiag_data = hypre_CSRMatrixData(A->diag);
2242 HYPRE_Int *Adiag_i = hypre_CSRMatrixI(A->diag);
2243 for (jj = 0; jj < Adiag_i[size]; ++jj)
2245 Adiag_data[jj] *= s;
2248 real_t *Aoffd_data = hypre_CSRMatrixData(A->offd);
2249 HYPRE_Int *Aoffd_i = hypre_CSRMatrixI(A->offd);
2250 for (jj = 0; jj < Aoffd_i[size]; ++jj)
2252 Aoffd_data[jj] *= s;
2258static void get_sorted_rows_cols(
const Array<int> &rows_cols,
2264 for (
int i = 0; i < rows_cols.
Size(); i++)
2266 hypre_sorted[i] = rows_cols[i];
2267 if (i && rows_cols[i-1] > rows_cols[i]) { sorted =
false; }
2269 if (!sorted) { hypre_sorted.
Sort(); }
2277 hypre_CSRMatrix * csr_A;
2278 hypre_CSRMatrix * csr_A_wo_z;
2279 hypre_ParCSRMatrix * parcsr_A_ptr;
2284 comm = hypre_ParCSRMatrixComm(A);
2286 ierr += hypre_ParCSRMatrixGetLocalRange(A,
2287 &row_start,&row_end,
2288 &col_start,&col_end );
2290 row_starts = hypre_ParCSRMatrixRowStarts(A);
2291 col_starts = hypre_ParCSRMatrixColStarts(A);
2293#if MFEM_HYPRE_VERSION <= 22200
2294 bool old_owns_row = hypre_ParCSRMatrixOwnsRowStarts(A);
2295 bool old_owns_col = hypre_ParCSRMatrixOwnsColStarts(A);
2297 HYPRE_BigInt global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A);
2298 HYPRE_BigInt global_num_cols = hypre_ParCSRMatrixGlobalNumCols(A);
2299 parcsr_A_ptr = hypre_ParCSRMatrixCreate(comm, global_num_rows,
2301 row_starts, col_starts,
2303#if MFEM_HYPRE_VERSION <= 22200
2304 hypre_ParCSRMatrixOwnsRowStarts(parcsr_A_ptr) = old_owns_row;
2305 hypre_ParCSRMatrixOwnsColStarts(parcsr_A_ptr) = old_owns_col;
2306 hypre_ParCSRMatrixOwnsRowStarts(A) = 0;
2307 hypre_ParCSRMatrixOwnsColStarts(A) = 0;
2310 csr_A = hypre_MergeDiagAndOffd(A);
2316 csr_A_wo_z = hypre_CSRMatrixDeleteZeros(csr_A,threshold);
2320 if (csr_A_wo_z == NULL)
2326 ierr += hypre_CSRMatrixDestroy(csr_A);
2332 ierr += hypre_GenerateDiagAndOffd(csr_A_wo_z,parcsr_A_ptr,
2335 ierr += hypre_CSRMatrixDestroy(csr_A_wo_z);
2337 MFEM_VERIFY(ierr == 0,
"");
2341 hypre_ParCSRMatrixSetNumNonzeros(A);
2343 if (RowAndColStartsAreEqual(comm, row_starts, col_starts))
2345 hypre_CSRMatrixReorder(hypre_ParCSRMatrixDiag(A));
2347 if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2354 HYPRE_Int old_err = hypre_error_flag;
2355 hypre_error_flag = 0;
2357#if MFEM_HYPRE_VERSION < 21400
2362 HYPRE_Int *diag_I = A->diag->i, *offd_I = A->offd->i;
2363 real_t *diag_d = A->diag->data, *offd_d = A->offd->data;
2364 HYPRE_Int local_num_rows = A->diag->num_rows;
2365 real_t max_l2_row_norm = 0.0;
2367 for (HYPRE_Int r = 0; r < local_num_rows; r++)
2372 l2_row_norm = std::hypot(l2_row_norm, row.
Norml2());
2373 max_l2_row_norm = std::max(max_l2_row_norm, l2_row_norm);
2375 real_t loc_max_l2_row_norm = max_l2_row_norm;
2376 MPI_Allreduce(&loc_max_l2_row_norm, &max_l2_row_norm, 1,
2379 threshold = tol * max_l2_row_norm;
2384#elif MFEM_HYPRE_VERSION < 21800
2386 HYPRE_Int err_flag = hypre_ParCSRMatrixDropSmallEntries(A, tol);
2387 MFEM_VERIFY(!err_flag,
"error encountered: error code = " << err_flag);
2391 HYPRE_Int err_flag = hypre_ParCSRMatrixDropSmallEntries(A, tol, 2);
2392 MFEM_VERIFY(!err_flag,
"error encountered: error code = " << err_flag);
2396 hypre_error_flag = old_err;
2404 get_sorted_rows_cols(rows_cols, rc_sorted);
2406 internal::hypre_ParCSRMatrixEliminateAXB(
2413 get_sorted_rows_cols(rows_cols, rc_sorted);
2415 hypre_ParCSRMatrix* Ae;
2417 internal::hypre_ParCSRMatrixEliminateAAe(
2427 get_sorted_rows_cols(cols, rc_sorted);
2429 hypre_ParCSRMatrix* Ae;
2431 internal::hypre_ParCSRMatrixEliminateAAe(
2432 A, &Ae, rc_sorted.
Size(), rc_sorted.
GetData(), 1);
2440 if (rows.
Size() > 0)
2443 get_sorted_rows_cols(rows, r_sorted);
2445 internal::hypre_ParCSRMatrixEliminateRows(A, r_sorted.
Size(),
2456 Ae.
Mult(-1.0, x, 1.0,
b);
2460 if (ess_dof_list.
Size() == 0) {
return; }
2463 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
2464 real_t *data = hypre_CSRMatrixData(A_diag);
2465 HYPRE_Int *I = hypre_CSRMatrixI(A_diag);
2467 HYPRE_Int *J = hypre_CSRMatrixJ(A_diag);
2468 hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
2469 HYPRE_Int *I_offd = hypre_CSRMatrixI(A_offd);
2470 real_t *data_offd = hypre_CSRMatrixData(A_offd);
2477 for (
int i = 0; i < ess_dof_list.
Size(); i++)
2479 int r = ess_dof_list[i];
2480 b(r) = data[I[r]] * x(r);
2482 MFEM_ASSERT(I[r] < I[r+1],
"empty row found!");
2488 MFEM_ABORT(
"the diagonal entry must be the first entry in the row!");
2490 for (
int j = I[r]+1; j < I[r+1]; j++)
2494 MFEM_ABORT(
"all off-diagonal entries must be zero!");
2497 for (
int j = I_offd[r]; j < I_offd[r+1]; j++)
2499 if (data_offd[j] != 0.0)
2501 MFEM_ABORT(
"all off-diagonal entries must be zero!");
2512 hypre_ParCSRMatrix *A_hypre = *
this;
2515 hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(A_hypre);
2516 hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(A_hypre);
2518 HYPRE_Int diag_nrows = hypre_CSRMatrixNumRows(diag);
2519 HYPRE_Int offd_ncols = hypre_CSRMatrixNumCols(offd);
2521 const int n_ess_dofs = ess_dofs.
Size();
2522 const auto ess_dofs_d = ess_dofs.
GetMemory().Read(
2527 hypre_ParCSRCommHandle *comm_handle;
2528 HYPRE_Int *int_buf_data, *eliminate_row, *eliminate_col;
2530 eliminate_row = mfem_hypre_CTAlloc(HYPRE_Int, diag_nrows);
2531 eliminate_col = mfem_hypre_CTAlloc(HYPRE_Int, offd_ncols);
2534 hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A_hypre);
2537 hypre_MatvecCommPkgCreate(A_hypre);
2538 comm_pkg = hypre_ParCSRMatrixCommPkg(A_hypre);
2544 eliminate_row[i] = 0;
2548 eliminate_row[ess_dofs_d[i]] = 1;
2554 HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
2555 HYPRE_Int int_buf_sz = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
2556 int_buf_data = mfem_hypre_CTAlloc(HYPRE_Int, int_buf_sz);
2558 HYPRE_Int *send_map_elmts;
2559#if defined(HYPRE_USING_GPU)
2562 hypre_ParCSRCommPkgCopySendMapElmtsToDevice(comm_pkg);
2563 send_map_elmts = hypre_ParCSRCommPkgDeviceSendMapElmts(comm_pkg);
2568 send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
2572 int k = send_map_elmts[i];
2573 int_buf_data[i] = eliminate_row[k];
2576#if defined(HYPRE_USING_GPU)
2579#if defined(HYPRE_WITH_GPU_AWARE_MPI) || defined(HYPRE_USING_GPU_AWARE_MPI)
2583#if MFEM_HYPRE_VERSION >= 23300
2584 if (hypre_GetGpuAwareMPI())
2592 comm_handle = hypre_ParCSRCommHandleCreate_v2(
2593 11, comm_pkg, HYPRE_MEMORY_DEVICE, int_buf_data,
2594 HYPRE_MEMORY_DEVICE, eliminate_col);
2599 comm_handle = hypre_ParCSRCommHandleCreate(
2600 11, comm_pkg, int_buf_data, eliminate_col );
2606 const auto I = diag->i;
2607 const auto J = diag->j;
2608 auto data = diag->data;
2612 const int idof = ess_dofs_d[i];
2613 for (
auto j=I[idof]; j<I[idof+1]; ++j)
2615 const auto jdof = J[j];
2631 for (
auto k=I[jdof]; k<I[jdof+1]; ++k)
2646 const auto I = offd->i;
2647 auto data = offd->data;
2650 const int idof = ess_dofs_d[i];
2651 for (
auto j=I[idof]; j<I[idof+1]; ++j)
2659 hypre_ParCSRCommHandleDestroy(comm_handle);
2660 mfem_hypre_TFree(int_buf_data);
2661 mfem_hypre_TFree(eliminate_row);
2665 const int nrows_offd = hypre_CSRMatrixNumRows(offd);
2666 const auto I = offd->i;
2667 const auto J = offd->j;
2668 auto data = offd->data;
2671 for (
auto j=I[i]; j<I[i+1]; ++j)
2673 data[j] *= 1 - eliminate_col[J[j]];
2678 mfem_hypre_TFree(eliminate_col);
2682 HYPRE_Int offj)
const
2685 hypre_ParCSRMatrixPrintIJ(A, offi, offj, fname.c_str());
2689void HypreParMatrix::Read(MPI_Comm comm,
const std::string &fname)
2691 HYPRE_ParCSRMatrix A_parcsr;
2692 HYPRE_Int base_i, base_j;
2693 hypre_ParCSRMatrixReadIJ(comm, fname.c_str(), &base_i, &base_j, &A_parcsr);
2697 hypre_ParCSRMatrixSetNumNonzeros(A);
2698 if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2703 HYPRE_IJMatrix A_ij;
2704 HYPRE_IJMatrixRead(fname.c_str(), comm, 5555, &A_ij);
2706 HYPRE_ParCSRMatrix A_parcsr;
2707 HYPRE_IJMatrixGetObject(A_ij, (
void**) &A_parcsr);
2711 hypre_ParCSRMatrixSetNumNonzeros(A);
2712 if (!hypre_ParCSRMatrixCommPkg(A)) { hypre_MatvecCommPkgCreate(A); }
2717 hypre_ParCSRCommPkg *comm_pkg = A->comm_pkg;
2718 MPI_Comm comm = A->comm;
2720 const int tag = 46801;
2722 MPI_Comm_rank(comm, &myid);
2723 MPI_Comm_size(comm, &nproc);
2727 MPI_Recv(&c, 1, MPI_CHAR, myid-1, tag, comm, MPI_STATUS_IGNORE);
2731 os <<
"\nHypreParMatrix: hypre_ParCSRCommPkg:\n";
2733 os <<
"Rank " << myid <<
":\n"
2734 " number of sends = " << comm_pkg->num_sends <<
2735 " (" <<
sizeof(
real_t)*comm_pkg->send_map_starts[comm_pkg->num_sends] <<
2737 " number of recvs = " << comm_pkg->num_recvs <<
2738 " (" <<
sizeof(
real_t)*comm_pkg->recv_vec_starts[comm_pkg->num_recvs] <<
2740 if (myid != nproc-1)
2743 MPI_Send(&c, 1, MPI_CHAR, myid+1, tag, comm);
2756 os <<
"global number of rows : " << A->global_num_rows <<
'\n'
2757 <<
"global number of columns : " << A->global_num_cols <<
'\n'
2758 <<
"first row index : " << A->first_row_index <<
'\n'
2759 <<
" last row index : " << A->last_row_index <<
'\n'
2760 <<
"first col diag : " << A->first_col_diag <<
'\n'
2761 <<
" last col diag : " << A->last_col_diag <<
'\n'
2762 <<
"number of nonzeros : " << A->num_nonzeros <<
'\n';
2764 hypre_CSRMatrix *csr = A->diag;
2765 const char *csr_name =
"diag";
2766 for (
int m = 0; m < 2; m++)
2768 auto csr_nnz = csr->i[csr->num_rows];
2769 os << csr_name <<
" num rows : " << csr->num_rows <<
'\n'
2770 << csr_name <<
" num cols : " << csr->num_cols <<
'\n'
2771 << csr_name <<
" num nnz : " << csr->num_nonzeros <<
'\n'
2772 << csr_name <<
" i last : " << csr_nnz
2773 << (csr_nnz == csr->num_nonzeros ?
2774 " [good]" :
" [** BAD **]") <<
'\n';
2776 os << csr_name <<
" i hash : " << hf.
GetHash() <<
'\n';
2777 os << csr_name <<
" j hash : ";
2778 if (csr->j ==
nullptr)
2787#if MFEM_HYPRE_VERSION >= 21600
2788 os << csr_name <<
" big j hash : ";
2789 if (csr->big_j ==
nullptr)
2799 os << csr_name <<
" data hash : ";
2800 if (csr->data ==
nullptr)
2814 hf.
AppendInts(A->col_map_offd, A->offd->num_cols);
2815 os <<
"col map offd hash : " << hf.
GetHash() <<
'\n';
2822#if MFEM_HYPRE_VERSION >= 21900
2824 const int ierr = hypre_ParCSRMatrixNormFro(A, &norm_fro);
2825 MFEM_VERIFY(ierr == 0,
"");
2832 Vector Avec_diag(A->diag->data, A->diag->num_nonzeros);
2834 Vector Avec_offd(A->offd->data, A->offd->num_nonzeros);
2837 MPI_SUM, hypre_ParCSRMatrixComm(A));
2838 norm_fro = sqrt(normsqr_fro);
2847 HYPRE_BigInt *A_col_map_offd = hypre_ParCSRMatrixColMapOffd(A);
2848 int size = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A));
2852void HypreParMatrix::Destroy()
2854 if ( X != NULL ) {
delete X; }
2855 if ( Y != NULL ) {
delete Y; }
2859 if (A == NULL) {
return; }
2861#ifdef HYPRE_USING_GPU
2862 if (
HypreUsingGPU() && ParCSROwner && (diagOwner < 0 || offdOwner < 0))
2870 MFEM_VERIFY(!(diagOwner < 0 && offdOwner < 0) || diagOwner == offdOwner,
2873 MemoryClass mc = (diagOwner == -1 || offdOwner == -1) ?
2875 Write(mc, diagOwner < 0, offdOwner <0);
2884 hypre_CSRMatrixI(A->diag) = NULL;
2885 hypre_CSRMatrixJ(A->diag) = NULL;
2886 hypre_CSRMatrixData(A->diag) = NULL;
2893 hypre_CSRMatrixI(A->offd) = NULL;
2894 hypre_CSRMatrixJ(A->offd) = NULL;
2895 hypre_CSRMatrixData(A->offd) = NULL;
2897 if (colMapOwner >= 0)
2899 if (colMapOwner & 1)
2903 hypre_ParCSRMatrixColMapOffd(A) = NULL;
2908 hypre_ParCSRMatrixDestroy(A);
2917 MFEM_CONTRACT_VAR(own_j);
2918 MFEM_ASSERT(own_i == own_j,
"Inconsistent ownership");
2932#if MFEM_HYPRE_VERSION >= 21800
2941 hypre_ParCSRMatrix *C_hypre;
2942 hypre_ParcsrBdiagInvScal(*A, blocksize, &C_hypre);
2943 hypre_ParCSRMatrixDropSmallEntries(C_hypre, 1e-15, 1);
2953 hypre_ParVector *d_hypre;
2954 hypre_ParvecBdiagInvScal(b_Hypre, blocksize, &d_hypre, *A);
2962#if MFEM_HYPRE_VERSION < 21400
2967 hypre_ParCSRMatrix *C_hypre =
2970 MFEM_VERIFY(C_hypre,
"error in hypre_ParCSRMatrixAdd");
2972 if (!hypre_ParCSRMatrixCommPkg(C_hypre)) { hypre_MatvecCommPkgCreate(C_hypre); }
2983 hypre_ParCSRMatrix * C = internal::hypre_ParCSRMatrixAdd(*A,*B);
2985 if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
2993 real_t beta,
const HypreParMatrix &B)
2995 hypre_ParCSRMatrix *C;
2996#if MFEM_HYPRE_VERSION <= 22000
2997 hypre_ParcsrAdd(
alpha, A, beta, B, &C);
2999 hypre_ParCSRMatrixAdd(
alpha, A, beta, B, &C);
3001 if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
3003 return new HypreParMatrix(C);
3006HypreParMatrix *
ParAdd(
const HypreParMatrix *A,
const HypreParMatrix *B)
3008 hypre_ParCSRMatrix *C;
3009#if MFEM_HYPRE_VERSION <= 22000
3010 hypre_ParcsrAdd(1.0, *A, 1.0, *B, &C);
3012 hypre_ParCSRMatrixAdd(1.0, *A, 1.0, *B, &C);
3014 if (!hypre_ParCSRMatrixCommPkg(C)) { hypre_MatvecCommPkgCreate(C); }
3016 return new HypreParMatrix(C);
3024 hypre_ParCSRMatrix * ab;
3025#ifdef HYPRE_USING_GPU
3028 ab = hypre_ParCSRMatMat(*A, *B);
3033 ab = hypre_ParMatmul(*A,*B);
3035 hypre_ParCSRMatrixSetNumNonzeros(ab);
3037 if (!hypre_ParCSRMatrixCommPkg(ab)) { hypre_MatvecCommPkgCreate(ab); }
3049 hypre_ParCSRMatrix * rap;
3051#ifdef HYPRE_USING_GPU
3060 hypre_ParCSRMatrix *Q = hypre_ParCSRMatMat(*A,*P);
3061 const bool keepTranspose =
false;
3062 rap = hypre_ParCSRTMatMatKT(*P,Q,keepTranspose);
3063 hypre_ParCSRMatrixDestroy(Q);
3071#if MFEM_HYPRE_VERSION <= 22200
3072 HYPRE_Int P_owns_its_col_starts =
3073 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*P));
3076 hypre_BoomerAMGBuildCoarseOperator(*P,*A,*P,&rap);
3078#if MFEM_HYPRE_VERSION <= 22200
3081 hypre_ParCSRMatrixSetRowStartsOwner(rap,0);
3082 hypre_ParCSRMatrixSetColStartsOwner(rap,0);
3083 if (P_owns_its_col_starts)
3085 hypre_ParCSRMatrixSetColStartsOwner(*P, 1);
3090 hypre_ParCSRMatrixSetNumNonzeros(rap);
3099 hypre_ParCSRMatrix * rap;
3101#ifdef HYPRE_USING_GPU
3104 hypre_ParCSRMatrix *Q = hypre_ParCSRMatMat(*A,*P);
3105 rap = hypre_ParCSRTMatMat(*Rt,Q);
3106 hypre_ParCSRMatrixDestroy(Q);
3111#if MFEM_HYPRE_VERSION <= 22200
3112 HYPRE_Int P_owns_its_col_starts =
3113 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*P));
3114 HYPRE_Int Rt_owns_its_col_starts =
3115 hypre_ParCSRMatrixOwnsColStarts((hypre_ParCSRMatrix*)(*Rt));
3118 hypre_BoomerAMGBuildCoarseOperator(*Rt,*A,*P,&rap);
3120#if MFEM_HYPRE_VERSION <= 22200
3123 hypre_ParCSRMatrixSetRowStartsOwner(rap,0);
3124 hypre_ParCSRMatrixSetColStartsOwner(rap,0);
3125 if (P_owns_its_col_starts)
3127 hypre_ParCSRMatrixSetColStartsOwner(*P, 1);
3129 if (Rt_owns_its_col_starts)
3131 hypre_ParCSRMatrixSetColStartsOwner(*Rt, 1);
3136 hypre_ParCSRMatrixSetNumNonzeros(rap);
3145 const int num_loc,
const Array<int> &offsets,
3146 std::vector<int> &all_num_loc,
const int numBlocks,
3147 std::vector<std::vector<HYPRE_BigInt>> &blockProcOffsets,
3148 std::vector<HYPRE_BigInt> &procOffsets,
3149 std::vector<std::vector<int>> &procBlockOffsets,
3152 std::vector<std::vector<int>> all_block_num_loc(numBlocks);
3154 MPI_Allgather(
const_cast<int*
>(&num_loc), 1, MPI_INT, all_num_loc.data(), 1,
3157 for (
int j = 0; j < numBlocks; ++j)
3159 all_block_num_loc[j].resize(nprocs);
3160 blockProcOffsets[j].resize(nprocs);
3162 const int blockNumRows = offsets[j + 1] - offsets[j];
3163 MPI_Allgather(
const_cast<int*
>(&blockNumRows), 1, MPI_INT,
3164 all_block_num_loc[j].data(), 1,
3166 blockProcOffsets[j][0] = 0;
3167 for (
int i = 0; i < nprocs - 1; ++i)
3169 blockProcOffsets[j][i + 1] = blockProcOffsets[j][i]
3170 + all_block_num_loc[j][i];
3177 for (
int i = 0; i < nprocs; ++i)
3179 globalNum += all_num_loc[i];
3180 MFEM_VERIFY(globalNum >= 0,
"overflow in global size");
3183 firstLocal += all_num_loc[i];
3188 procOffsets[i + 1] = procOffsets[i] + all_num_loc[i];
3191 procBlockOffsets[i].resize(numBlocks);
3192 procBlockOffsets[i][0] = 0;
3193 for (
int j = 1; j < numBlocks; ++j)
3195 procBlockOffsets[i][j] = procBlockOffsets[i][j - 1]
3196 + all_block_num_loc[j - 1][i];
3204 const int numBlockRows = blocks.
NumRows();
3205 const int numBlockCols = blocks.
NumCols();
3207 MFEM_VERIFY(numBlockRows > 0 &&
3208 numBlockCols > 0,
"Invalid input to HypreParMatrixFromBlocks");
3210 if (blockCoeff != NULL)
3212 MFEM_VERIFY(numBlockRows == blockCoeff->
NumRows() &&
3213 numBlockCols == blockCoeff->
NumCols(),
3214 "Invalid input to HypreParMatrixFromBlocks");
3220 int nonNullBlockRow0 = -1;
3221 for (
int j=0; j<numBlockCols; ++j)
3223 if (blocks(0,j) != NULL)
3225 nonNullBlockRow0 = j;
3230 MFEM_VERIFY(nonNullBlockRow0 >= 0,
"Null row of blocks");
3231 MPI_Comm comm = blocks(0,nonNullBlockRow0)->GetComm();
3236 for (
int i=0; i<numBlockRows; ++i)
3238 for (
int j=0; j<numBlockCols; ++j)
3240 if (blocks(i,j) != NULL)
3242 const int nrows = blocks(i,j)->
NumRows();
3243 const int ncols = blocks(i,j)->
NumCols();
3245 if (rowOffsets[i+1] == 0)
3247 rowOffsets[i+1] = nrows;
3251 MFEM_VERIFY(rowOffsets[i+1] == nrows,
3252 "Inconsistent blocks in HypreParMatrixFromBlocks");
3255 if (colOffsets[j+1] == 0)
3257 colOffsets[j+1] = ncols;
3261 MFEM_VERIFY(colOffsets[j+1] == ncols,
3262 "Inconsistent blocks in HypreParMatrixFromBlocks");
3266 rowOffsets[i+1] += rowOffsets[i];
3269 for (
int j=0; j<numBlockCols; ++j)
3271 colOffsets[j+1] += colOffsets[j];
3274 const int num_loc_rows = rowOffsets[numBlockRows];
3275 const int num_loc_cols = colOffsets[numBlockCols];
3278 MPI_Comm_rank(comm, &rank);
3279 MPI_Comm_size(comm, &nprocs);
3281 std::vector<int> all_num_loc_rows(nprocs);
3282 std::vector<int> all_num_loc_cols(nprocs);
3283 std::vector<HYPRE_BigInt> procRowOffsets(nprocs);
3284 std::vector<HYPRE_BigInt> procColOffsets(nprocs);
3285 std::vector<std::vector<HYPRE_BigInt>> blockRowProcOffsets(numBlockRows);
3286 std::vector<std::vector<HYPRE_BigInt>> blockColProcOffsets(numBlockCols);
3287 std::vector<std::vector<int>> procBlockRowOffsets(nprocs);
3288 std::vector<std::vector<int>> procBlockColOffsets(nprocs);
3290 HYPRE_BigInt first_loc_row, glob_nrows, first_loc_col, glob_ncols;
3292 all_num_loc_rows, numBlockRows, blockRowProcOffsets,
3293 procRowOffsets, procBlockRowOffsets, first_loc_row,
3297 all_num_loc_cols, numBlockCols, blockColProcOffsets,
3298 procColOffsets, procBlockColOffsets, first_loc_col,
3301 std::vector<int> opI(num_loc_rows + 1);
3302 std::vector<int> cnt(num_loc_rows);
3304 for (
int i = 0; i < num_loc_rows; ++i)
3310 opI[num_loc_rows] = 0;
3315 for (
int i = 0; i < numBlockRows; ++i)
3317 for (
int j = 0; j < numBlockCols; ++j)
3319 if (blocks(i, j) == NULL)
3321 csr_blocks(i, j) = NULL;
3325 blocks(i, j)->HostRead();
3326 csr_blocks(i, j) = hypre_MergeDiagAndOffd(*blocks(i, j));
3327 blocks(i, j)->HypreRead();
3329 for (
int k = 0; k < csr_blocks(i, j)->num_rows; ++k)
3331 opI[rowOffsets[i] + k + 1] +=
3332 csr_blocks(i, j)->i[k + 1] - csr_blocks(i, j)->i[k];
3339 for (
int i = 0; i < num_loc_rows; ++i)
3341 opI[i + 1] += opI[i];
3344 const int nnz = opI[num_loc_rows];
3346 std::vector<HYPRE_BigInt> opJ(nnz);
3347 std::vector<real_t> data(nnz);
3350 for (
int i = 0; i < numBlockRows; ++i)
3352 for (
int j = 0; j < numBlockCols; ++j)
3354 if (csr_blocks(i, j) != NULL)
3356 const int nrows = csr_blocks(i, j)->num_rows;
3357 const real_t cij = blockCoeff ? (*blockCoeff)(i, j) : 1.0;
3358#if MFEM_HYPRE_VERSION >= 21600
3359 const bool usingBigJ = (csr_blocks(i, j)->big_j != NULL);
3362 for (
int k = 0; k < nrows; ++k)
3364 const int rowg = rowOffsets[i] + k;
3365 const int nnz_k = csr_blocks(i,j)->i[k+1]-csr_blocks(i,j)->i[k];
3366 const int osk = csr_blocks(i, j)->i[k];
3368 for (
int l = 0; l < nnz_k; ++l)
3371#if MFEM_HYPRE_VERSION >= 21600
3372 const HYPRE_Int bcol = usingBigJ ?
3373 csr_blocks(i, j)->big_j[osk + l] :
3374 csr_blocks(i, j)->j[osk + l];
3376 const HYPRE_Int bcol = csr_blocks(i, j)->j[osk + l];
3380 const auto &offs = blockColProcOffsets[j];
3381 const int bcolproc =
3382 std::upper_bound(offs.begin() + 1, offs.end(), bcol)
3385 opJ[opI[rowg] + cnt[rowg]] = procColOffsets[bcolproc] +
3386 procBlockColOffsets[bcolproc][j]
3388 - blockColProcOffsets[j][bcolproc];
3389 data[opI[rowg] + cnt[rowg]] = cij * csr_blocks(i, j)->data[osk + l];
3397 for (
int i = 0; i < numBlockRows; ++i)
3399 for (
int j = 0; j < numBlockCols; ++j)
3401 if (csr_blocks(i, j) != NULL)
3403 hypre_CSRMatrixDestroy(csr_blocks(i, j));
3408 MFEM_VERIFY(HYPRE_AssumedPartitionCheck(),
3409 "only 'assumed partition' mode is supported");
3411 std::vector<HYPRE_BigInt> rowStarts2(2);
3412 rowStarts2[0] = first_loc_row;
3413 rowStarts2[1] = first_loc_row + all_num_loc_rows[rank];
3415 int square = std::equal(all_num_loc_rows.begin(), all_num_loc_rows.end(),
3416 all_num_loc_cols.begin());
3419 return new HypreParMatrix(comm, num_loc_rows, glob_nrows, glob_ncols,
3420 opI.data(), opJ.data(),
3427 std::vector<HYPRE_BigInt> colStarts2(2);
3428 colStarts2[0] = first_loc_col;
3429 colStarts2[1] = first_loc_col + all_num_loc_cols[rank];
3431 return new HypreParMatrix(comm, num_loc_rows, glob_nrows, glob_ncols,
3432 opI.data(), opJ.data(),
3443 for (
int i = 0; i < blocks.
NumRows(); ++i)
3445 for (
int j = 0; j < blocks.
NumCols(); ++j)
3447 constBlocks(i, j) = blocks(i, j);
3473 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
3474 HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
3476 real_t *u_data = hypre_VectorData(hypre_ParVectorLocalVector(
u));
3477 real_t *r_data = hypre_VectorData(hypre_ParVectorLocalVector(r));
3479 for (
int i = 0; i < N; i++)
3482 hypre_ParVectorCopy(
f, r);
3483 hypre_ParCSRMatrixMatvec(-1.0, A,
u, 1.0, r);
3486 (0 == (i % 2)) ? coef = lambda : coef =
mu;
3488 for (HYPRE_Int j = 0; j < num_rows; j++)
3490 u_data[j] += coef*r_data[j] / max_eig;
3506 hypre_ParVector *x0,
3507 hypre_ParVector *x1,
3508 hypre_ParVector *x2,
3509 hypre_ParVector *x3)
3512 hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
3513 HYPRE_Int num_rows = hypre_CSRMatrixNumRows(A_diag);
3515 real_t *u_data = hypre_VectorData(hypre_ParVectorLocalVector(
u));
3517 real_t *x0_data = hypre_VectorData(hypre_ParVectorLocalVector(x0));
3518 real_t *x1_data = hypre_VectorData(hypre_ParVectorLocalVector(x1));
3519 real_t *x2_data = hypre_VectorData(hypre_ParVectorLocalVector(x2));
3520 real_t *x3_data = hypre_VectorData(hypre_ParVectorLocalVector(x3));
3522 hypre_ParVectorCopy(
u, x0);
3525 hypre_ParVectorCopy(
f, x1);
3526 hypre_ParCSRMatrixMatvec(-1.0, A, x0, 1.0, x1);
3528 for (HYPRE_Int i = 0; i < num_rows; i++)
3530 x1_data[i] /= -max_eig;
3534 for (HYPRE_Int i = 0; i < num_rows; i++)
3536 x1_data[i] = x0_data[i] -x1_data[i];
3540 for (HYPRE_Int i = 0; i < num_rows; i++)
3542 x3_data[i] = fir_coeffs[0]*x0_data[i] +fir_coeffs[1]*x1_data[i];
3545 for (
int n = 2; n <= poly_order; n++)
3548 hypre_ParVectorCopy(
f, x2);
3549 hypre_ParCSRMatrixMatvec(-1.0, A, x1, 1.0, x2);
3551 for (HYPRE_Int i = 0; i < num_rows; i++)
3553 x2_data[i] /= -max_eig;
3561 for (HYPRE_Int i = 0; i < num_rows; i++)
3563 x2_data[i] = (x1_data[i]-x0_data[i]) +(x1_data[i]-2*x2_data[i]);
3564 x3_data[i] += fir_coeffs[n]*x2_data[i];
3565 x0_data[i] = x1_data[i];
3566 x1_data[i] = x2_data[i];
3570 for (HYPRE_Int i = 0; i < num_rows; i++)
3572 u_data[i] = x3_data[i];
3593 B =
X =
V =
Z = NULL;
3601 int relax_times_,
real_t relax_weight_,
3602 real_t omega_,
int poly_order_,
3603 real_t poly_fraction_,
int eig_est_cg_iter_)
3615 B =
X =
V =
Z = NULL;
3626 type =
static_cast<int>(type_);
3637 int eig_est_cg_iter_)
3655 if (!strcmp(name,
"Rectangular")) {
a = 1.0,
b = 0.0, c = 0.0; }
3656 if (!strcmp(name,
"Hanning")) {
a = 0.5,
b = 0.5, c = 0.0; }
3657 if (!strcmp(name,
"Hamming")) {
a = 0.54,
b = 0.46, c = 0.0; }
3658 if (!strcmp(name,
"Blackman")) {
a = 0.42,
b = 0.50, c = 0.08; }
3661 mfem_error(
"HypreSmoother::SetWindowByName : name not recognized!");
3679 mfem_error(
"HypreSmoother::SetOperator : not HypreParMatrix!");
3686 if (
B) {
delete B; }
3687 if (
X) {
delete X; }
3688 if (
V) {
delete V; }
3689 if (
Z) {
delete Z; }
3711 A->
Mult(ones, diag);
3722 d_l1_norms[i] = std::abs(d_l1_norms[i]);
3726#if MFEM_HYPRE_VERSION < 22100
3738#elif defined(HYPRE_USING_GPU)
3768#if MFEM_HYPRE_VERSION <= 22200
3777 else if (
type == 1001 ||
type == 1002)
3787#if MFEM_HYPRE_VERSION <= 22200
3829 window_coeffs[i] =
a +
b*cos(t) +c*cos(2*t);
3833 real_t theta_pb = acos(1.0 -0.5*k_pb);
3835 cheby_coeffs[0] = (theta_pb +
sigma)/M_PI;
3839 cheby_coeffs[i] = 2.0*sin(t)/(i*M_PI);
3844 fir_coeffs[i] = window_coeffs[i]*cheby_coeffs[i];
3847 delete[] window_coeffs;
3848 delete[] cheby_coeffs;
3855 mfem_error(
"HypreSmoother::Mult (...) : HypreParMatrix A is missing");
3868 HYPRE_ParCSRDiagScale(NULL, *
A,
b, x);
3875 hypre_ParVectorSetConstantValues(x, 0.0);
3896 else if (
type == 1002)
3910 int hypre_type =
type;
3912 if (
type == 5) { hypre_type = 1; }
3916 hypre_ParCSRRelax(*
A,
b, hypre_type,
3923 hypre_ParCSRRelax(*
A,
b, hypre_type,
3933 MFEM_ASSERT(
b.Size() ==
NumCols(),
"");
3938 mfem_error(
"HypreSmoother::Mult (...) : HypreParMatrix A is missing");
3945 A -> GetGlobalNumRows(),
3947 A -> GetRowStarts());
3949 A -> GetGlobalNumCols(),
3951 A -> GetColStarts());
3989 if (!xshallow) { x = *
X; }
3999 mfem_error(
"HypreSmoother::MultTranspose (...) : undefined!\n");
4005 if (
B) {
delete B; }
4006 if (
X) {
delete X; }
4007 if (
V) {
delete V; }
4008 if (
Z) {
delete Z; }
4017 if (
X0) {
delete X0; }
4018 if (
X1) {
delete X1; }
4033 :
Solver(A_->Height(), A_->Width())
4045 MFEM_ASSERT(
b.Size() ==
NumCols(),
"");
4048 MFEM_VERIFY(
A != NULL,
"HypreParMatrix A is missing");
4098 MFEM_VERIFY(
A != NULL,
"HypreParMatrix A is missing");
4100 HYPRE_Int err_flag =
SetupFcn()(*
this, *
A,
b, x);
4104 { MFEM_WARNING(
"Error during setup! Error code: " << err_flag); }
4108 MFEM_VERIFY(!err_flag,
"Error during setup! Error code: " << err_flag);
4110 hypre_error_flag = 0;
4118 if (!x_shallow) { x = *
X; }
4126 mfem_error(
"HypreSolver::Mult (...) : HypreParMatrix A is missing");
4133 hypre_ParVectorSetConstantValues(x, 0.0);
4145 { MFEM_WARNING(
"Error during solve! Error code: " << err_flag); }
4149 MFEM_VERIFY(!err_flag,
"Error during solve! Error code: " << err_flag);
4151 hypre_error_flag = 0;
4158 if (!x_shallow) { x = *
X; }
4163 if (
B) {
delete B; }
4164 if (
X) {
delete X; }
4174 HYPRE_ParCSRPCGCreate(comm, &pcg_solver);
4183 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4185 HYPRE_ParCSRPCGCreate(comm, &pcg_solver);
4191 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4212 HYPRE_PCGSetTol(pcg_solver, tol);
4217 HYPRE_PCGSetAbsoluteTol(pcg_solver, atol);
4222 HYPRE_PCGSetMaxIter(pcg_solver, max_iter);
4227 HYPRE_PCGSetLogging(pcg_solver, logging);
4232 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_lvl);
4237 precond = &precond_;
4239 HYPRE_ParCSRPCGSetPrecond(pcg_solver,
4247 HYPRE_PCGSetTwoNorm(pcg_solver, 1);
4248 if (res_frequency > 0)
4250 HYPRE_PCGSetRecomputeResidualP(pcg_solver, res_frequency);
4254 HYPRE_PCGSetResidualTol(pcg_solver, rtol);
4261 HYPRE_Int time_index = 0;
4262 HYPRE_Int num_iterations;
4265 HYPRE_Int print_level;
4267 HYPRE_PCGGetPrintLevel(pcg_solver, &print_level);
4268 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_level%3);
4270 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4275 hypre_ParVectorSetConstantValues(x, 0.0);
4283 if (print_level > 0 && print_level < 3)
4285 time_index = hypre_InitializeTiming(
"PCG Setup");
4286 hypre_BeginTiming(time_index);
4289 HYPRE_ParCSRPCGSetup(pcg_solver, *
A,
b, x);
4292 if (print_level > 0 && print_level < 3)
4294 hypre_EndTiming(time_index);
4295 hypre_PrintTiming(
"Setup phase times", comm);
4296 hypre_FinalizeTiming(time_index);
4297 hypre_ClearTiming();
4301 if (print_level > 0 && print_level < 3)
4303 time_index = hypre_InitializeTiming(
"PCG Solve");
4304 hypre_BeginTiming(time_index);
4307 HYPRE_ParCSRPCGSolve(pcg_solver, *
A,
b, x);
4309 if (print_level > 0)
4311 if (print_level < 3)
4313 hypre_EndTiming(time_index);
4314 hypre_PrintTiming(
"Solve phase times", comm);
4315 hypre_FinalizeTiming(time_index);
4316 hypre_ClearTiming();
4319 HYPRE_ParCSRPCGGetNumIterations(pcg_solver, &num_iterations);
4320 HYPRE_ParCSRPCGGetFinalRelativeResidualNorm(pcg_solver,
4323 MPI_Comm_rank(comm, &myid);
4327 mfem::out <<
"PCG Iterations = " << num_iterations << endl
4328 <<
"Final PCG Relative Residual Norm = " << final_res_norm
4332 HYPRE_ParCSRPCGSetPrintLevel(pcg_solver, print_level);
4337 HYPRE_ParCSRPCGDestroy(pcg_solver);
4345 HYPRE_ParCSRGMRESCreate(comm, &gmres_solver);
4346 SetDefaultOptions();
4356 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4358 HYPRE_ParCSRGMRESCreate(comm, &gmres_solver);
4359 SetDefaultOptions();
4362void HypreGMRES::SetDefaultOptions()
4368 HYPRE_ParCSRGMRESSetKDim(gmres_solver, k_dim);
4369 HYPRE_ParCSRGMRESSetMaxIter(gmres_solver, max_iter);
4370 HYPRE_ParCSRGMRESSetTol(gmres_solver, tol);
4376 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4397 HYPRE_GMRESSetTol(gmres_solver, tol);
4402 HYPRE_GMRESSetAbsoluteTol(gmres_solver, tol);
4407 HYPRE_GMRESSetMaxIter(gmres_solver, max_iter);
4412 HYPRE_GMRESSetKDim(gmres_solver, k_dim);
4417 HYPRE_GMRESSetLogging(gmres_solver, logging);
4422 HYPRE_GMRESSetPrintLevel(gmres_solver, print_lvl);
4427 precond = &precond_;
4429 HYPRE_ParCSRGMRESSetPrecond(gmres_solver,
4438 HYPRE_Int time_index = 0;
4439 HYPRE_Int num_iterations;
4442 HYPRE_Int print_level;
4444 HYPRE_GMRESGetPrintLevel(gmres_solver, &print_level);
4446 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4451 hypre_ParVectorSetConstantValues(x, 0.0);
4459 if (print_level > 0)
4461 time_index = hypre_InitializeTiming(
"GMRES Setup");
4462 hypre_BeginTiming(time_index);
4465 HYPRE_ParCSRGMRESSetup(gmres_solver, *
A,
b, x);
4468 if (print_level > 0)
4470 hypre_EndTiming(time_index);
4471 hypre_PrintTiming(
"Setup phase times", comm);
4472 hypre_FinalizeTiming(time_index);
4473 hypre_ClearTiming();
4477 if (print_level > 0)
4479 time_index = hypre_InitializeTiming(
"GMRES Solve");
4480 hypre_BeginTiming(time_index);
4483 HYPRE_ParCSRGMRESSolve(gmres_solver, *
A,
b, x);
4485 if (print_level > 0)
4487 hypre_EndTiming(time_index);
4488 hypre_PrintTiming(
"Solve phase times", comm);
4489 hypre_FinalizeTiming(time_index);
4490 hypre_ClearTiming();
4492 HYPRE_ParCSRGMRESGetNumIterations(gmres_solver, &num_iterations);
4493 HYPRE_ParCSRGMRESGetFinalRelativeResidualNorm(gmres_solver,
4496 MPI_Comm_rank(comm, &myid);
4500 mfem::out <<
"GMRES Iterations = " << num_iterations << endl
4501 <<
"Final GMRES Relative Residual Norm = " << final_res_norm
4509 HYPRE_ParCSRGMRESDestroy(gmres_solver);
4517 HYPRE_ParCSRFlexGMRESCreate(comm, &fgmres_solver);
4518 SetDefaultOptions();
4528 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4530 HYPRE_ParCSRFlexGMRESCreate(comm, &fgmres_solver);
4531 SetDefaultOptions();
4534void HypreFGMRES::SetDefaultOptions()
4540 HYPRE_ParCSRFlexGMRESSetKDim(fgmres_solver, k_dim);
4541 HYPRE_ParCSRFlexGMRESSetMaxIter(fgmres_solver, max_iter);
4542 HYPRE_ParCSRFlexGMRESSetTol(fgmres_solver, tol);
4548 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4569 HYPRE_ParCSRFlexGMRESSetTol(fgmres_solver, tol);
4574 HYPRE_ParCSRFlexGMRESSetMaxIter(fgmres_solver, max_iter);
4579 HYPRE_ParCSRFlexGMRESSetKDim(fgmres_solver, k_dim);
4584 HYPRE_ParCSRFlexGMRESSetLogging(fgmres_solver, logging);
4589 HYPRE_ParCSRFlexGMRESSetPrintLevel(fgmres_solver, print_lvl);
4594 precond = &precond_;
4595 HYPRE_ParCSRFlexGMRESSetPrecond(fgmres_solver,
4604 HYPRE_Int time_index = 0;
4605 HYPRE_Int num_iterations;
4608 HYPRE_Int print_level;
4610 HYPRE_FlexGMRESGetPrintLevel(fgmres_solver, &print_level);
4612 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4617 hypre_ParVectorSetConstantValues(x, 0.0);
4625 if (print_level > 0)
4627 time_index = hypre_InitializeTiming(
"FGMRES Setup");
4628 hypre_BeginTiming(time_index);
4631 HYPRE_ParCSRFlexGMRESSetup(fgmres_solver, *
A,
b, x);
4634 if (print_level > 0)
4636 hypre_EndTiming(time_index);
4637 hypre_PrintTiming(
"Setup phase times", comm);
4638 hypre_FinalizeTiming(time_index);
4639 hypre_ClearTiming();
4643 if (print_level > 0)
4645 time_index = hypre_InitializeTiming(
"FGMRES Solve");
4646 hypre_BeginTiming(time_index);
4649 HYPRE_ParCSRFlexGMRESSolve(fgmres_solver, *
A,
b, x);
4651 if (print_level > 0)
4653 hypre_EndTiming(time_index);
4654 hypre_PrintTiming(
"Solve phase times", comm);
4655 hypre_FinalizeTiming(time_index);
4656 hypre_ClearTiming();
4658 HYPRE_ParCSRFlexGMRESGetNumIterations(fgmres_solver, &num_iterations);
4659 HYPRE_ParCSRFlexGMRESGetFinalRelativeResidualNorm(fgmres_solver,
4662 MPI_Comm_rank(comm, &myid);
4666 mfem::out <<
"FGMRES Iterations = " << num_iterations << endl
4667 <<
"Final FGMRES Relative Residual Norm = " << final_res_norm
4675 HYPRE_ParCSRFlexGMRESDestroy(fgmres_solver);
4682 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4699 HYPRE_ParaSailsCreate(comm, &sai_precond);
4700 SetDefaultOptions();
4707 HYPRE_ParCSRMatrixGetComm(
A, &comm);
4709 HYPRE_ParaSailsCreate(comm, &sai_precond);
4710 SetDefaultOptions();
4713void HypreParaSails::SetDefaultOptions()
4715 int sai_max_levels = 1;
4716 real_t sai_threshold = 0.1;
4719 real_t sai_loadbal = 0.0;
4721 int sai_logging = 1;
4723 HYPRE_ParaSailsSetParams(sai_precond, sai_threshold, sai_max_levels);
4724 HYPRE_ParaSailsSetFilter(sai_precond, sai_filter);
4725 HYPRE_ParaSailsSetSym(sai_precond, sai_sym);
4726 HYPRE_ParaSailsSetLoadbal(sai_precond, sai_loadbal);
4727 HYPRE_ParaSailsSetReuse(sai_precond, sai_reuse);
4728 HYPRE_ParaSailsSetLogging(sai_precond, sai_logging);
4731void HypreParaSails::ResetSAIPrecond(MPI_Comm comm)
4733 HYPRE_Int sai_max_levels;
4734 HYPRE_Real sai_threshold;
4735 HYPRE_Real sai_filter;
4737 HYPRE_Real sai_loadbal;
4738 HYPRE_Int sai_reuse;
4739 HYPRE_Int sai_logging;
4742 HYPRE_ParaSailsGetNlevels(sai_precond, &sai_max_levels);
4743 HYPRE_ParaSailsGetThresh(sai_precond, &sai_threshold);
4744 HYPRE_ParaSailsGetFilter(sai_precond, &sai_filter);
4745 HYPRE_ParaSailsGetSym(sai_precond, &sai_sym);
4746 HYPRE_ParaSailsGetLoadbal(sai_precond, &sai_loadbal);
4747 HYPRE_ParaSailsGetReuse(sai_precond, &sai_reuse);
4748 HYPRE_ParaSailsGetLogging(sai_precond, &sai_logging);
4750 HYPRE_ParaSailsDestroy(sai_precond);
4751 HYPRE_ParaSailsCreate(comm, &sai_precond);
4753 HYPRE_ParaSailsSetParams(sai_precond, sai_threshold, sai_max_levels);
4754 HYPRE_ParaSailsSetFilter(sai_precond, sai_filter);
4755 HYPRE_ParaSailsSetSym(sai_precond, sai_sym);
4756 HYPRE_ParaSailsSetLoadbal(sai_precond, sai_loadbal);
4757 HYPRE_ParaSailsSetReuse(sai_precond, sai_reuse);
4758 HYPRE_ParaSailsSetLogging(sai_precond, sai_logging);
4764 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4769 HYPRE_ParCSRMatrixGetComm(*
A, &comm);
4770 ResetSAIPrecond(comm);
4787 HYPRE_ParaSailsSetParams(sai_precond, threshold, max_levels);
4792 HYPRE_ParaSailsSetFilter(sai_precond, filter);
4797 HYPRE_ParaSailsSetSym(sai_precond, sym);
4802 HYPRE_ParaSailsSetLoadbal(sai_precond, loadbal);
4807 HYPRE_ParaSailsSetReuse(sai_precond, reuse);
4812 HYPRE_ParaSailsSetLogging(sai_precond, logging);
4817 HYPRE_ParaSailsDestroy(sai_precond);
4823 HYPRE_EuclidCreate(comm, &euc_precond);
4824 SetDefaultOptions();
4831 HYPRE_ParCSRMatrixGetComm(
A, &comm);
4833 HYPRE_EuclidCreate(comm, &euc_precond);
4834 SetDefaultOptions();
4837void HypreEuclid::SetDefaultOptions()
4845 HYPRE_EuclidSetLevel(euc_precond, euc_level);
4846 HYPRE_EuclidSetStats(euc_precond, euc_stats);
4847 HYPRE_EuclidSetMem(euc_precond, euc_mem);
4848 HYPRE_EuclidSetBJ(euc_precond, euc_bj);
4849 HYPRE_EuclidSetRowScale(euc_precond, euc_ro_sc);
4854 HYPRE_EuclidSetLevel(euc_precond, level);
4859 HYPRE_EuclidSetStats(euc_precond, stats);
4864 HYPRE_EuclidSetMem(euc_precond, mem);
4869 HYPRE_EuclidSetBJ(euc_precond, bj);
4874 HYPRE_EuclidSetRowScale(euc_precond, row_scale);
4877void HypreEuclid::ResetEuclidPrecond(MPI_Comm comm)
4881 HYPRE_EuclidDestroy(euc_precond);
4882 HYPRE_EuclidCreate(comm, &euc_precond);
4884 SetDefaultOptions();
4890 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4895 HYPRE_ParCSRMatrixGetComm(*new_A, &comm);
4896 ResetEuclidPrecond(comm);
4913 HYPRE_EuclidDestroy(euc_precond);
4917#if MFEM_HYPRE_VERSION >= 21900
4920 HYPRE_ILUCreate(&ilu_precond);
4921 SetDefaultOptions();
4924void HypreILU::SetDefaultOptions()
4927 HYPRE_Int ilu_type = 0;
4928 HYPRE_ILUSetType(ilu_precond, ilu_type);
4931 HYPRE_Int max_iter = 1;
4932 HYPRE_ILUSetMaxIter(ilu_precond, max_iter);
4935 HYPRE_Real tol = 0.0;
4936 HYPRE_ILUSetTol(ilu_precond, tol);
4939 HYPRE_Int lev_fill = 1;
4940 HYPRE_ILUSetLevelOfFill(ilu_precond, lev_fill);
4943 HYPRE_Int reorder_type = 1;
4944 HYPRE_ILUSetLocalReordering(ilu_precond, reorder_type);
4947 HYPRE_Int print_level = 0;
4948 HYPRE_ILUSetPrintLevel(ilu_precond, print_level);
4951void HypreILU::ResetILUPrecond()
4955 HYPRE_ILUDestroy(ilu_precond);
4957 HYPRE_ILUCreate(&ilu_precond);
4958 SetDefaultOptions();
4963 HYPRE_ILUSetLevelOfFill(ilu_precond, lev_fill);
4968 HYPRE_ILUSetType(ilu_precond, ilu_type);
4973 HYPRE_ILUSetMaxIter(ilu_precond, max_iter);
4978 HYPRE_ILUSetTol(ilu_precond, tol);
4983 HYPRE_ILUSetLocalReordering(ilu_precond, reorder_type);
4988 HYPRE_ILUSetPrintLevel(ilu_precond, print_level);
4994 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
4996 if (
A) { ResetILUPrecond(); }
5012 HYPRE_ILUDestroy(ilu_precond);
5019 HYPRE_BoomerAMGCreate(&amg_precond);
5020 SetDefaultOptions();
5025 HYPRE_BoomerAMGCreate(&amg_precond);
5026 SetDefaultOptions();
5029void HypreBoomerAMG::SetDefaultOptions()
5032 int coarsen_type, agg_levels, interp_type, Pmax, relax_type, relax_sweeps,
5033 print_level, max_levels;
5075 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
5076 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, agg_levels);
5077 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
5080 HYPRE_BoomerAMGSetNumSweeps(amg_precond, relax_sweeps);
5081 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, theta);
5082 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
5083 HYPRE_BoomerAMGSetPMaxElmts(amg_precond, Pmax);
5084 HYPRE_BoomerAMGSetPrintLevel(amg_precond, print_level);
5085 HYPRE_BoomerAMGSetMaxLevels(amg_precond, max_levels);
5088 HYPRE_BoomerAMGSetMaxIter(amg_precond, 1);
5089 HYPRE_BoomerAMGSetTol(amg_precond, 0.0);
5092void HypreBoomerAMG::ResetAMGPrecond()
5094 HYPRE_Int coarsen_type;
5095 HYPRE_Int agg_levels;
5096 HYPRE_Int relax_type;
5097 HYPRE_Int relax_sweeps;
5099 HYPRE_Int interp_type;
5101 HYPRE_Int print_level;
5102 HYPRE_Int max_levels;
5104 HYPRE_Int nrbms = rbms.
Size();
5106 HYPRE_Int nodal_diag;
5107 HYPRE_Int relax_coarse;
5108 HYPRE_Int interp_vec_variant;
5110 HYPRE_Int smooth_interp_vectors;
5111 HYPRE_Int interp_refine;
5113 hypre_ParAMGData *amg_data = (hypre_ParAMGData *)amg_precond;
5116 HYPRE_BoomerAMGGetCoarsenType(amg_precond, &coarsen_type);
5117 agg_levels = hypre_ParAMGDataAggNumLevels(amg_data);
5118 relax_type = hypre_ParAMGDataUserRelaxType(amg_data);
5119 relax_sweeps = hypre_ParAMGDataUserNumSweeps(amg_data);
5120 HYPRE_BoomerAMGGetStrongThreshold(amg_precond, &theta);
5121 hypre_BoomerAMGGetInterpType(amg_precond, &interp_type);
5122 HYPRE_BoomerAMGGetPMaxElmts(amg_precond, &Pmax);
5123 HYPRE_BoomerAMGGetPrintLevel(amg_precond, &print_level);
5124 HYPRE_BoomerAMGGetMaxLevels(amg_precond, &max_levels);
5125 HYPRE_BoomerAMGGetNumFunctions(amg_precond, &
dim);
5128 nodal = hypre_ParAMGDataNodal(amg_data);
5129 nodal_diag = hypre_ParAMGDataNodalDiag(amg_data);
5130 HYPRE_BoomerAMGGetCycleRelaxType(amg_precond, &relax_coarse, 3);
5131 interp_vec_variant = hypre_ParAMGInterpVecVariant(amg_data);
5132 q_max = hypre_ParAMGInterpVecQMax(amg_data);
5133 smooth_interp_vectors = hypre_ParAMGSmoothInterpVectors(amg_data);
5134 interp_refine = hypre_ParAMGInterpRefine(amg_data);
5137 HYPRE_BoomerAMGDestroy(amg_precond);
5138 HYPRE_BoomerAMGCreate(&amg_precond);
5140 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
5141 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, agg_levels);
5142 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
5143 HYPRE_BoomerAMGSetNumSweeps(amg_precond, relax_sweeps);
5144 HYPRE_BoomerAMGSetMaxLevels(amg_precond, max_levels);
5145 HYPRE_BoomerAMGSetTol(amg_precond, 0.0);
5146 HYPRE_BoomerAMGSetMaxIter(amg_precond, 1);
5147 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, theta);
5148 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
5149 HYPRE_BoomerAMGSetPMaxElmts(amg_precond, Pmax);
5150 HYPRE_BoomerAMGSetPrintLevel(amg_precond, print_level);
5151 HYPRE_BoomerAMGSetNumFunctions(amg_precond,
dim);
5154 HYPRE_BoomerAMGSetNodal(amg_precond, nodal);
5155 HYPRE_BoomerAMGSetNodalDiag(amg_precond, nodal_diag);
5156 HYPRE_BoomerAMGSetCycleRelaxType(amg_precond, relax_coarse, 3);
5157 HYPRE_BoomerAMGSetInterpVecVariant(amg_precond, interp_vec_variant);
5158 HYPRE_BoomerAMGSetInterpVecQMax(amg_precond, q_max);
5159 HYPRE_BoomerAMGSetSmoothInterpVectors(amg_precond, smooth_interp_vectors);
5160 HYPRE_BoomerAMGSetInterpRefine(amg_precond, interp_refine);
5162 HYPRE_BoomerAMGSetInterpVectors(amg_precond, rbms.
Size(), rbms.
GetData());
5169 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
5171 if (
A) { ResetAMGPrecond(); }
5187 HYPRE_BoomerAMGSetNumFunctions(amg_precond,
dim);
5195 HYPRE_Int *h_mapping = mfem_hypre_CTAlloc_host(HYPRE_Int,
height);
5197 MFEM_VERIFY(
height %
dim == 0,
"Ordering does not work as claimed!");
5199 for (
int i = 0; i <
dim; ++i)
5201 for (
int j = 0; j < h_nnodes; ++j)
5210 HYPRE_Int *mapping =
nullptr;
5211#if defined(hypre_IntArrayData) && defined(HYPRE_USING_GPU)
5214 mapping = mfem_hypre_CTAlloc(HYPRE_Int,
height);
5215 hypre_TMemcpy(mapping, h_mapping, HYPRE_Int,
height,
5216 HYPRE_MEMORY_DEVICE, HYPRE_MEMORY_HOST);
5217 mfem_hypre_TFree_host(h_mapping);
5222 mapping = h_mapping;
5227 HYPRE_BoomerAMGSetDofFunc(amg_precond, mapping);
5231 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, 0);
5232 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, 0.5);
5238 y = 0.0; y(0) = x(1); y(1) = -x(0);
5240static void func_ryz(
const Vector &x, Vector &y)
5242 y = 0.0; y(1) = x(2); y(2) = -x(1);
5244static void func_rzx(
const Vector &x, Vector &y)
5246 y = 0.0; y(2) = x(0); y(0) = -x(2);
5249void HypreBoomerAMG::RecomputeRBMs()
5252 Array<HypreParVector*> gf_rbms;
5255 for (
int i = 0; i < rbms.
Size(); i++)
5257 HYPRE_ParVectorDestroy(rbms[i]);
5264 VectorFunctionCoefficient coeff_rxy(2, func_rxy);
5266 ParGridFunction rbms_rxy(fespace);
5267 rbms_rxy.ProjectCoefficient(coeff_rxy);
5270 gf_rbms.SetSize(nrbms);
5272 rbms_rxy.GetTrueDofs(*gf_rbms[0]);
5278 VectorFunctionCoefficient coeff_rxy(3, func_rxy);
5279 VectorFunctionCoefficient coeff_ryz(3, func_ryz);
5280 VectorFunctionCoefficient coeff_rzx(3, func_rzx);
5282 ParGridFunction rbms_rxy(fespace);
5283 ParGridFunction rbms_ryz(fespace);
5284 ParGridFunction rbms_rzx(fespace);
5285 rbms_rxy.ProjectCoefficient(coeff_rxy);
5286 rbms_ryz.ProjectCoefficient(coeff_ryz);
5287 rbms_rzx.ProjectCoefficient(coeff_rzx);
5290 gf_rbms.SetSize(nrbms);
5294 rbms_rxy.GetTrueDofs(*gf_rbms[0]);
5295 rbms_ryz.GetTrueDofs(*gf_rbms[1]);
5296 rbms_rzx.GetTrueDofs(*gf_rbms[2]);
5305 for (
int i = 0; i < nrbms; i++)
5307 rbms[i] = gf_rbms[i]->StealParVector();
5313 bool interp_refine_)
5315#ifdef HYPRE_USING_GPU
5318 MFEM_ABORT(
"this method is not supported in hypre built with GPU support");
5323 this->fespace = fespace_;
5333 int relax_coarse = 8;
5336 int interp_vec_variant = 2;
5338 int smooth_interp_vectors = 1;
5342 int interp_refine = interp_refine_;
5344 HYPRE_BoomerAMGSetNodal(amg_precond, nodal);
5345 HYPRE_BoomerAMGSetNodalDiag(amg_precond, nodal_diag);
5346 HYPRE_BoomerAMGSetCycleRelaxType(amg_precond, relax_coarse, 3);
5347 HYPRE_BoomerAMGSetInterpVecVariant(amg_precond, interp_vec_variant);
5348 HYPRE_BoomerAMGSetInterpVecQMax(amg_precond, q_max);
5349 HYPRE_BoomerAMGSetSmoothInterpVectors(amg_precond, smooth_interp_vectors);
5350 HYPRE_BoomerAMGSetInterpRefine(amg_precond, interp_refine);
5353 HYPRE_BoomerAMGSetInterpVectors(amg_precond, rbms.
Size(), rbms.
GetData());
5362#if MFEM_HYPRE_VERSION >= 21800
5365 const std::string &prerelax,
5366 const std::string &postrelax)
5370 int interp_type = 100;
5371 int relax_type = 10;
5372 int coarsen_type = 6;
5373 real_t strength_tolC = 0.1;
5374 real_t strength_tolR = 0.01;
5375 real_t filter_tolR = 0.0;
5376 real_t filterA_tol = 0.0;
5379 int ns_down = 0, ns_up = 0, ns_coarse;
5382 ns_down =
static_cast<int>(prerelax.length());
5383 ns_up =
static_cast<int>(postrelax.length());
5387 HYPRE_Int **grid_relax_points = mfem_hypre_TAlloc(HYPRE_Int*, 4);
5388 grid_relax_points[0] = NULL;
5389 grid_relax_points[1] = mfem_hypre_TAlloc(HYPRE_Int, ns_down);
5390 grid_relax_points[2] = mfem_hypre_TAlloc(HYPRE_Int, ns_up);
5391 grid_relax_points[3] = mfem_hypre_TAlloc(HYPRE_Int, 1);
5392 grid_relax_points[3][0] = 0;
5395 for (
int i = 0; i<ns_down; i++)
5397 if (prerelax[i] ==
'F')
5399 grid_relax_points[1][i] = -1;
5401 else if (prerelax[i] ==
'C')
5403 grid_relax_points[1][i] = 1;
5405 else if (prerelax[i] ==
'A')
5407 grid_relax_points[1][i] = 0;
5412 for (
int i = 0; i<ns_up; i++)
5414 if (postrelax[i] ==
'F')
5416 grid_relax_points[2][i] = -1;
5418 else if (postrelax[i] ==
'C')
5420 grid_relax_points[2][i] = 1;
5422 else if (postrelax[i] ==
'A')
5424 grid_relax_points[2][i] = 0;
5428 HYPRE_BoomerAMGSetRestriction(amg_precond, distanceR);
5430 HYPRE_BoomerAMGSetGridRelaxPoints(amg_precond, grid_relax_points);
5432 HYPRE_BoomerAMGSetInterpType(amg_precond, interp_type);
5437 HYPRE_BoomerAMGSetSabs(amg_precond, Sabs);
5440 HYPRE_BoomerAMGSetCoarsenType(amg_precond, coarsen_type);
5443 HYPRE_BoomerAMGSetAggNumLevels(amg_precond, 0);
5445 HYPRE_BoomerAMGSetStrongThreshold(amg_precond, strength_tolC);
5449 HYPRE_BoomerAMGSetStrongThresholdR(amg_precond, strength_tolR);
5450 HYPRE_BoomerAMGSetFilterThresholdR(amg_precond, filter_tolR);
5453 if (relax_type > -1)
5455 HYPRE_BoomerAMGSetRelaxType(amg_precond, relax_type);
5460 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_coarse, 3);
5461 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_down, 1);
5462 HYPRE_BoomerAMGSetCycleNumSweeps(amg_precond, ns_up, 2);
5464 HYPRE_BoomerAMGSetADropTol(amg_precond, filterA_tol);
5466 HYPRE_BoomerAMGSetADropType(amg_precond, -1);
5474 for (
int i = 0; i < rbms.
Size(); i++)
5476 HYPRE_ParVectorDestroy(rbms[i]);
5479 HYPRE_BoomerAMGDestroy(amg_precond);
5505 MFEM_ASSERT(G != NULL,
"");
5506 MFEM_ASSERT(x != NULL,
"");
5507 MFEM_ASSERT(y != NULL,
"");
5508 int sdim = (z == NULL) ? 2 : 3;
5509 int cycle_type = 13;
5510 MakeSolver(sdim, cycle_type);
5512 HYPRE_ParVector pz = z ?
static_cast<HYPRE_ParVector
>(*z) : NULL;
5513 HYPRE_AMSSetCoordinateVectors(ams, *x, *y, pz);
5514 HYPRE_AMSSetDiscreteGradient(ams, *G);
5522 int cycle_type = 13;
5527 trace_space = trace_space || rt_trace_space;
5533 "HypreAMS does not support variable order spaces");
5540 MakeSolver(std::max(sdim, vdim), cycle_type);
5541 MakeGradientAndInterpolation(edge_fespace, cycle_type);
5545 delete edge_fespace;
5550void HypreAMS::MakeSolver(
int sdim,
int cycle_type)
5556 int amg_coarsen_type = hypre_gpu ? 8 : 10;
5557 int amg_agg_levels = hypre_gpu ? 0 : 1;
5558 int amg_rlx_type = hypre_gpu ? 18 : 8;
5559 int rlx_type = hypre_gpu ? 1: 2;
5561 int amg_interp_type = 6;
5565 ams_cycle_type = cycle_type;
5566 HYPRE_AMSCreate(&ams);
5568 HYPRE_AMSSetDimension(ams, sdim);
5569 HYPRE_AMSSetTol(ams, 0.0);
5570 HYPRE_AMSSetMaxIter(ams, 1);
5571 HYPRE_AMSSetCycleType(ams, cycle_type);
5572 HYPRE_AMSSetPrintLevel(ams, 1);
5575 HYPRE_AMSSetSmoothingOptions(ams, rlx_type, rlx_sweeps, rlx_weight, rlx_omega);
5576 HYPRE_AMSSetAlphaAMGOptions(ams, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5577 theta, amg_interp_type, amg_Pmax);
5578 HYPRE_AMSSetBetaAMGOptions(ams, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5579 theta, amg_interp_type, amg_Pmax);
5581 HYPRE_AMSSetAlphaAMGCoarseRelaxType(ams, amg_rlx_type);
5582 HYPRE_AMSSetBetaAMGCoarseRelaxType(ams, amg_rlx_type);
5591void HypreAMS::MakeGradientAndInterpolation(
5592 ParFiniteElementSpace *edge_fespace,
int cycle_type)
5594 const FiniteElementCollection *edge_fec = edge_fespace->FEColl();
5595 bool trace_space =
dynamic_cast<const ND_Trace_FECollection *
>(edge_fec);
5597 ParMesh *pmesh = edge_fespace->GetParMesh();
5599 int sdim = pmesh->SpaceDimension();
5600 int vdim = edge_fespace->FEColl()->GetRangeDim(
dim - trace_space);
5603 MFEM_VERIFY(!edge_fespace->IsVariableOrder(),
5604 "HypreAMS does not support variable order spaces");
5605 int p = edge_fec->GetOrder() + (
dim - trace_space == 1 ? 1 : 0);
5608 FiniteElementCollection *vert_fec;
5611 vert_fec =
new H1_Trace_FECollection(
p,
dim);
5615 vert_fec =
new H1_FECollection(
p,
dim);
5617 ParFiniteElementSpace *vert_fespace =
new ParFiniteElementSpace(pmesh,
5621 ParDiscreteLinearOperator *grad;
5622 grad =
new ParDiscreteLinearOperator(vert_fespace, edge_fespace);
5625 grad->AddTraceFaceInterpolator(
new GradientInterpolator);
5629 grad->AddDomainInterpolator(
new GradientInterpolator);
5633 G = grad->ParallelAssemble();
5634 HYPRE_AMSSetDiscreteGradient(ams, *G);
5639 Pi = Pix = Piy = Piz = NULL;
5640 if (
p == 1 && pmesh->GetNodes() == NULL && vdim <= sdim)
5642 ParGridFunction x_coord(vert_fespace);
5643 ParGridFunction y_coord(vert_fespace);
5644 ParGridFunction z_coord(vert_fespace);
5646 for (
int i = 0; i < pmesh->GetNV(); i++)
5648 coord = pmesh -> GetVertex(i);
5649 x_coord(i) = coord[0];
5650 if (sdim >= 2) { y_coord(i) = coord[1]; }
5651 if (sdim == 3) { z_coord(i) = coord[2]; }
5653 x = x_coord.ParallelProject();
5660 y = y_coord.ParallelProject();
5665 z = z_coord.ParallelProject();
5669 HYPRE_AMSSetCoordinateVectors(ams,
5670 x ? (HYPRE_ParVector)(*x) : NULL,
5671 y ? (HYPRE_ParVector)(*y) : NULL,
5672 z ? (HYPRE_ParVector)(*z) : NULL);
5676 ParFiniteElementSpace *vert_fespace_d =
5677 new ParFiniteElementSpace(pmesh, vert_fec, std::max(sdim, vdim),
5680 ParDiscreteLinearOperator *id_ND;
5681 id_ND =
new ParDiscreteLinearOperator(vert_fespace_d, edge_fespace);
5684 id_ND->AddTraceFaceInterpolator(
new IdentityInterpolator);
5688 id_ND->AddDomainInterpolator(
new IdentityInterpolator);
5693 if (cycle_type < 10)
5695 Pi = id_ND->ParallelAssemble();
5699 Array2D<HypreParMatrix *> Pi_blocks;
5700 id_ND->GetParBlocks(Pi_blocks);
5701 Pix = Pi_blocks(0,0);
5702 if (std::max(sdim, vdim) >= 2) { Piy = Pi_blocks(0,1); }
5703 if (std::max(sdim, vdim) == 3) { Piz = Pi_blocks(0,2); }
5708 HYPRE_ParCSRMatrix HY_Pi = (Pi) ? (HYPRE_ParCSRMatrix) *Pi : NULL;
5709 HYPRE_ParCSRMatrix HY_Pix = (Pix) ? (HYPRE_ParCSRMatrix) *Pix : NULL;
5710 HYPRE_ParCSRMatrix HY_Piy = (Piy) ? (HYPRE_ParCSRMatrix) *Piy : NULL;
5711 HYPRE_ParCSRMatrix HY_Piz = (Piz) ? (HYPRE_ParCSRMatrix) *Piz : NULL;
5712 HYPRE_AMSSetInterpolations(ams, HY_Pi, HY_Pix, HY_Piy, HY_Piz);
5714 delete vert_fespace_d;
5717 delete vert_fespace;
5721void HypreAMS::ResetAMSPrecond()
5723#if MFEM_HYPRE_VERSION >= 22600
5725 auto *ams_data = (hypre_AMSData *)ams;
5728 HYPRE_Int
dim = hypre_AMSDataDimension(ams_data);
5731 hypre_ParCSRMatrix *hy_G = hypre_AMSDataDiscreteGradient(ams_data);
5733 HYPRE_Int beta_is_zero = hypre_AMSDataBetaIsZero(ams_data);
5736 hypre_ParCSRMatrix *hy_Pi hypre_AMSDataPiInterpolation(ams_data);
5737 hypre_ParCSRMatrix *hy_Pix = ams_data->Pix;
5738 hypre_ParCSRMatrix *hy_Piy = ams_data->Piy;
5739 hypre_ParCSRMatrix *hy_Piz = ams_data->Piz;
5740 HYPRE_Int owns_Pi = hypre_AMSDataOwnsPiInterpolation(ams_data);
5743 ams_data->owns_Pi = 0;
5747 hypre_ParVector *hy_x = hypre_AMSDataVertexCoordinateX(ams_data);
5748 hypre_ParVector *hy_y = hypre_AMSDataVertexCoordinateY(ams_data);
5749 hypre_ParVector *hy_z = hypre_AMSDataVertexCoordinateZ(ams_data);
5752 HYPRE_Int maxit = hypre_AMSDataMaxIter(ams_data);
5753 HYPRE_Real tol = hypre_AMSDataTol(ams_data);
5754 HYPRE_Int cycle_type = hypre_AMSDataCycleType(ams_data);
5755 HYPRE_Int ams_print_level = hypre_AMSDataPrintLevel(ams_data);
5758 HYPRE_Int A_relax_type = hypre_AMSDataARelaxType(ams_data);
5759 HYPRE_Int A_relax_times = hypre_AMSDataARelaxTimes(ams_data);
5760 HYPRE_Real A_relax_weight = hypre_AMSDataARelaxWeight(ams_data);
5761 HYPRE_Real A_omega = hypre_AMSDataAOmega(ams_data);
5762 HYPRE_Int A_cheby_order = hypre_AMSDataAChebyOrder(ams_data);
5763 HYPRE_Real A_cheby_fraction = hypre_AMSDataAChebyFraction(ams_data);
5765 HYPRE_Int B_Pi_coarsen_type = hypre_AMSDataPoissonAlphaAMGCoarsenType(ams_data);
5766 HYPRE_Int B_Pi_agg_levels = hypre_AMSDataPoissonAlphaAMGAggLevels(ams_data);
5767 HYPRE_Int B_Pi_relax_type = hypre_AMSDataPoissonAlphaAMGRelaxType(ams_data);
5768 HYPRE_Int B_Pi_coarse_relax_type = ams_data->B_Pi_coarse_relax_type;
5769 HYPRE_Real B_Pi_theta = hypre_AMSDataPoissonAlphaAMGStrengthThreshold(ams_data);
5770 HYPRE_Int B_Pi_interp_type = ams_data->B_Pi_interp_type;
5771 HYPRE_Int B_Pi_Pmax = ams_data->B_Pi_Pmax;
5773 HYPRE_Int B_G_coarsen_type = hypre_AMSDataPoissonBetaAMGCoarsenType(ams_data);
5774 HYPRE_Int B_G_agg_levels = hypre_AMSDataPoissonBetaAMGAggLevels(ams_data);
5775 HYPRE_Int B_G_relax_type = hypre_AMSDataPoissonBetaAMGRelaxType(ams_data);
5776 HYPRE_Int B_G_coarse_relax_type = ams_data->B_G_coarse_relax_type;
5777 HYPRE_Real B_G_theta = hypre_AMSDataPoissonBetaAMGStrengthThreshold(ams_data);
5778 HYPRE_Int B_G_interp_type = ams_data->B_G_interp_type;
5779 HYPRE_Int B_G_Pmax = ams_data->B_G_Pmax;
5781 HYPRE_AMSDestroy(ams);
5782 HYPRE_AMSCreate(&ams);
5783 ams_data = (hypre_AMSData *)ams;
5785 HYPRE_AMSSetDimension(ams,
dim);
5786 HYPRE_AMSSetTol(ams, tol);
5787 HYPRE_AMSSetMaxIter(ams, maxit);
5788 HYPRE_AMSSetCycleType(ams, cycle_type);
5789 HYPRE_AMSSetPrintLevel(ams, ams_print_level);
5791 HYPRE_AMSSetCoordinateVectors(ams, hy_x, hy_y, hy_z);
5793 HYPRE_AMSSetDiscreteGradient(ams, hy_G);
5794 HYPRE_AMSSetCoordinateVectors(ams, hy_x, hy_y, hy_z);
5795 HYPRE_AMSSetInterpolations(ams, hy_Pi, hy_Pix, hy_Piy, hy_Piz);
5796 ams_data->owns_Pi = owns_Pi;
5799 HYPRE_AMSSetSmoothingOptions(ams, A_relax_type, A_relax_times, A_relax_weight,
5802 hypre_AMSDataAChebyOrder(ams_data) = A_cheby_order;
5803 hypre_AMSDataAChebyFraction(ams_data) = A_cheby_fraction;
5805 HYPRE_AMSSetAlphaAMGOptions(ams, B_Pi_coarsen_type, B_Pi_agg_levels,
5807 B_Pi_theta, B_Pi_interp_type, B_Pi_Pmax);
5808 HYPRE_AMSSetBetaAMGOptions(ams, B_G_coarsen_type, B_G_agg_levels,
5810 B_G_theta, B_G_interp_type, B_G_Pmax);
5812 HYPRE_AMSSetAlphaAMGCoarseRelaxType(ams, B_Pi_coarse_relax_type);
5813 HYPRE_AMSSetBetaAMGCoarseRelaxType(ams, B_G_coarse_relax_type);
5815 ams_data->beta_is_zero = beta_is_zero;
5818 HYPRE_AMSDestroy(ams);
5820 MakeSolver(space_dim, ams_cycle_type);
5822 HYPRE_AMSSetPrintLevel(ams, print_level);
5823 if (singular) { HYPRE_AMSSetBetaPoissonMatrix(ams, NULL); }
5825 HYPRE_AMSSetDiscreteGradient(ams, *G);
5828 HYPRE_AMSSetCoordinateVectors(ams,
5829 x ? (HYPRE_ParVector)(*x) : nullptr,
5830 y ? (HYPRE_ParVector)(*y) : nullptr,
5831 z ? (HYPRE_ParVector)(*z) : nullptr);
5835 HYPRE_AMSSetInterpolations(ams,
5836 Pi ? (HYPRE_ParCSRMatrix) *Pi : nullptr,
5837 Pix ? (HYPRE_ParCSRMatrix) *Pix : nullptr,
5838 Piy ? (HYPRE_ParCSRMatrix) *Piy : nullptr,
5839 Piz ? (HYPRE_ParCSRMatrix) *Piz : nullptr);
5847 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
5849 if (
A) { ResetAMSPrecond(); }
5866 HYPRE_AMSDestroy(ams);
5881 HYPRE_AMSSetPrintLevel(ams, print_lvl);
5882 print_level = print_lvl;
5900 x(x_), y(y_), z(z_),
5902 ND_Pi(NULL), ND_Pix(NULL), ND_Piy(NULL), ND_Piz(NULL),
5903 RT_Pi(NULL), RT_Pix(NULL), RT_Piy(NULL), RT_Piz(NULL)
5905 MFEM_ASSERT(C != NULL,
"");
5906 MFEM_ASSERT(G != NULL,
"");
5907 MFEM_ASSERT(x != NULL,
"");
5908 MFEM_ASSERT(y != NULL,
"");
5909 MFEM_ASSERT(z != NULL,
"");
5913 HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
5914 HYPRE_ADSSetDiscreteCurl(ads, *C);
5915 HYPRE_ADSSetDiscreteGradient(ads, *G);
5918void HypreADS::MakeSolver()
5924 int rlx_type = hypre_gpu ? 1 : 2;
5925 int amg_coarsen_type = hypre_gpu ? 8 : 10;
5926 int amg_agg_levels = hypre_gpu ? 0 : 1;
5927 int amg_rlx_type = hypre_gpu ? 18 : 8;
5929 int amg_interp_type = 6;
5932 HYPRE_ADSCreate(&ads);
5934 HYPRE_ADSSetTol(ads, 0.0);
5935 HYPRE_ADSSetMaxIter(ads, 1);
5936 HYPRE_ADSSetCycleType(ads, cycle_type);
5937 HYPRE_ADSSetPrintLevel(ads, 1);
5940 HYPRE_ADSSetSmoothingOptions(ads, rlx_type, rlx_sweeps, rlx_weight, rlx_omega);
5941 HYPRE_ADSSetAMGOptions(ads, amg_coarsen_type, amg_agg_levels, amg_rlx_type,
5942 theta, amg_interp_type, amg_Pmax);
5943 HYPRE_ADSSetAMSOptions(ads, ams_cycle_type, amg_coarsen_type, amg_agg_levels,
5944 amg_rlx_type, theta, amg_interp_type, amg_Pmax);
5953void HypreADS::MakeDiscreteMatrices(ParFiniteElementSpace *face_fespace)
5955 const FiniteElementCollection *face_fec = face_fespace->FEColl();
5957 (
dynamic_cast<const RT_Trace_FECollection*
>(face_fec) != NULL);
5959 MFEM_VERIFY(!face_fespace->IsVariableOrder(),
"");
5960 int p = face_fec->GetOrder();
5963 ParMesh *pmesh = (ParMesh *) face_fespace->GetMesh();
5964 FiniteElementCollection *vert_fec, *edge_fec;
5967 vert_fec =
new H1_Trace_FECollection(
p, 3);
5968 edge_fec =
new ND_Trace_FECollection(
p, 3);
5972 vert_fec =
new H1_FECollection(
p, 3);
5973 edge_fec =
new ND_FECollection(
p, 3);
5976 ParFiniteElementSpace *vert_fespace =
new ParFiniteElementSpace(pmesh,
5978 ParFiniteElementSpace *edge_fespace =
new ParFiniteElementSpace(pmesh,
5982 if (
p == 1 && pmesh->GetNodes() == NULL)
5984 ParGridFunction x_coord(vert_fespace);
5985 ParGridFunction y_coord(vert_fespace);
5986 ParGridFunction z_coord(vert_fespace);
5988 for (
int i = 0; i < pmesh->GetNV(); i++)
5990 coord = pmesh -> GetVertex(i);
5991 x_coord(i) = coord[0];
5992 y_coord(i) = coord[1];
5993 z_coord(i) = coord[2];
5995 x = x_coord.ParallelProject();
5996 y = y_coord.ParallelProject();
5997 z = z_coord.ParallelProject();
6001 HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
6011 ParDiscreteLinearOperator *curl;
6012 curl =
new ParDiscreteLinearOperator(edge_fespace, face_fespace);
6015 curl->AddTraceFaceInterpolator(
new CurlInterpolator);
6019 curl->AddDomainInterpolator(
new CurlInterpolator);
6023 C = curl->ParallelAssemble();
6025 HYPRE_ADSSetDiscreteCurl(ads, *C);
6029 ParDiscreteLinearOperator *grad;
6030 grad =
new ParDiscreteLinearOperator(vert_fespace, edge_fespace);
6033 grad->AddTraceFaceInterpolator(
new GradientInterpolator);
6037 grad->AddDomainInterpolator(
new GradientInterpolator);
6041 G = grad->ParallelAssemble();
6044 HYPRE_ADSSetDiscreteGradient(ads, *G);
6048 RT_Pi = RT_Pix = RT_Piy = RT_Piz = NULL;
6049 ND_Pi = ND_Pix = ND_Piy = ND_Piz = NULL;
6050 if (
p > 1 || pmesh->GetNodes() != NULL)
6052 ParFiniteElementSpace *vert_fespace_d
6055 ParDiscreteLinearOperator *id_ND;
6056 id_ND =
new ParDiscreteLinearOperator(vert_fespace_d, edge_fespace);
6059 id_ND->AddTraceFaceInterpolator(
new IdentityInterpolator);
6063 id_ND->AddDomainInterpolator(
new IdentityInterpolator);
6068 if (ams_cycle_type < 10)
6070 ND_Pi = id_ND->ParallelAssemble();
6076 Array2D<HypreParMatrix *> ND_Pi_blocks;
6077 id_ND->GetParBlocks(ND_Pi_blocks);
6078 ND_Pix = ND_Pi_blocks(0,0);
6079 ND_Piy = ND_Pi_blocks(0,1);
6080 ND_Piz = ND_Pi_blocks(0,2);
6085 ParDiscreteLinearOperator *id_RT;
6086 id_RT =
new ParDiscreteLinearOperator(vert_fespace_d, face_fespace);
6089 id_RT->AddTraceFaceInterpolator(
new NormalInterpolator);
6093 id_RT->AddDomainInterpolator(
new IdentityInterpolator);
6098 if (cycle_type < 10)
6100 RT_Pi = id_RT->ParallelAssemble();
6105 Array2D<HypreParMatrix *> RT_Pi_blocks;
6106 id_RT->GetParBlocks(RT_Pi_blocks);
6107 RT_Pix = RT_Pi_blocks(0,0);
6108 RT_Piy = RT_Pi_blocks(0,1);
6109 RT_Piz = RT_Pi_blocks(0,2);
6114 HYPRE_ParCSRMatrix HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz;
6115 HY_RT_Pi = (RT_Pi) ? (HYPRE_ParCSRMatrix) *RT_Pi : NULL;
6116 HY_RT_Pix = (RT_Pix) ? (HYPRE_ParCSRMatrix) *RT_Pix : NULL;
6117 HY_RT_Piy = (RT_Piy) ? (HYPRE_ParCSRMatrix) *RT_Piy : NULL;
6118 HY_RT_Piz = (RT_Piz) ? (HYPRE_ParCSRMatrix) *RT_Piz : NULL;
6119 HYPRE_ParCSRMatrix HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz;
6120 HY_ND_Pi = (ND_Pi) ? (HYPRE_ParCSRMatrix) *ND_Pi : NULL;
6121 HY_ND_Pix = (ND_Pix) ? (HYPRE_ParCSRMatrix) *ND_Pix : NULL;
6122 HY_ND_Piy = (ND_Piy) ? (HYPRE_ParCSRMatrix) *ND_Piy : NULL;
6123 HY_ND_Piz = (ND_Piz) ? (HYPRE_ParCSRMatrix) *ND_Piz : NULL;
6124 HYPRE_ADSSetInterpolations(ads,
6125 HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz,
6126 HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz);
6128 delete vert_fespace_d;
6132 delete vert_fespace;
6134 delete edge_fespace;
6137void HypreADS::Init(ParFiniteElementSpace *face_fespace)
6140 MakeDiscreteMatrices(face_fespace);
6143void HypreADS::ResetADSPrecond()
6145 HYPRE_ADSDestroy(ads);
6149 HYPRE_ADSSetPrintLevel(ads, print_level);
6151 HYPRE_ADSSetDiscreteCurl(ads, *C);
6152 HYPRE_ADSSetDiscreteGradient(ads, *G);
6155 MFEM_VERIFY(x && y && z,
"");
6156 HYPRE_ADSSetCoordinateVectors(ads, *x, *y, *z);
6160 HYPRE_ParCSRMatrix HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz;
6161 HY_RT_Pi = (RT_Pi) ? (HYPRE_ParCSRMatrix) *RT_Pi : NULL;
6162 HY_RT_Pix = (RT_Pix) ? (HYPRE_ParCSRMatrix) *RT_Pix : NULL;
6163 HY_RT_Piy = (RT_Piy) ? (HYPRE_ParCSRMatrix) *RT_Piy : NULL;
6164 HY_RT_Piz = (RT_Piz) ? (HYPRE_ParCSRMatrix) *RT_Piz : NULL;
6165 HYPRE_ParCSRMatrix HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz;
6166 HY_ND_Pi = (ND_Pi) ? (HYPRE_ParCSRMatrix) *ND_Pi : NULL;
6167 HY_ND_Pix = (ND_Pix) ? (HYPRE_ParCSRMatrix) *ND_Pix : NULL;
6168 HY_ND_Piy = (ND_Piy) ? (HYPRE_ParCSRMatrix) *ND_Piy : NULL;
6169 HY_ND_Piz = (ND_Piz) ? (HYPRE_ParCSRMatrix) *ND_Piz : NULL;
6170 HYPRE_ADSSetInterpolations(ads,
6171 HY_RT_Pi, HY_RT_Pix, HY_RT_Piy, HY_RT_Piz,
6172 HY_ND_Pi, HY_ND_Pix, HY_ND_Piy, HY_ND_Piz);
6179 MFEM_VERIFY(new_A,
"new Operator must be a HypreParMatrix!");
6181 if (
A) { ResetADSPrecond(); }
6198 HYPRE_ADSDestroy(ads);
6220 HYPRE_ADSSetPrintLevel(ads, print_lvl);
6221 print_level = print_lvl;
6224HypreLOBPCG::HypreMultiVector::HypreMultiVector(
int n,
HypreParVector & v,
6225 mv_InterfaceInterpreter & interpreter)
6229 mv_ptr = mv_MultiVectorCreateFromSampleVector(&interpreter, nv,
6230 (HYPRE_ParVector)v);
6232 HYPRE_ParVector* vecs = NULL;
6234 mv_TempMultiVector* tmp =
6235 (mv_TempMultiVector*)mv_MultiVectorGetData(mv_ptr);
6236 vecs = (HYPRE_ParVector*)(tmp -> vector);
6239 hpv =
new HypreParVector*[nv];
6240 for (
int i=0; i<nv; i++)
6242 hpv[i] =
new HypreParVector(vecs[i]);
6246HypreLOBPCG::HypreMultiVector::~HypreMultiVector()
6250 for (
int i=0; i<nv; i++)
6257 mv_MultiVectorDestroy(mv_ptr);
6261HypreLOBPCG::HypreMultiVector::Randomize(HYPRE_Int seed_)
6263 mv_MultiVectorSetRandom(mv_ptr, seed_);
6267HypreLOBPCG::HypreMultiVector::GetVector(
unsigned int i)
6269 MFEM_ASSERT((
int)i < nv,
"index out of range");
6275HypreLOBPCG::HypreMultiVector::StealVectors()
6277 HypreParVector ** hpv_ret = hpv;
6281 mv_TempMultiVector * mv_tmp =
6282 (mv_TempMultiVector*)mv_MultiVectorGetData(mv_ptr);
6284 mv_tmp->ownsVectors = 0;
6286 for (
int i=0; i<nv; i++)
6288 hpv_ret[i]->SetOwnership(1);
6306 MPI_Comm_size(comm,&numProcs);
6307 MPI_Comm_rank(comm,&myid);
6309 HYPRE_ParCSRSetupInterpreter(&interpreter);
6310 HYPRE_ParCSRSetupMatvec(&matvec_fn);
6311 HYPRE_LOBPCGCreate(&interpreter, &matvec_fn, &lobpcg_solver);
6320 HYPRE_LOBPCGDestroy(lobpcg_solver);
6326 HYPRE_LOBPCGSetTol(lobpcg_solver, tol);
6332#if MFEM_HYPRE_VERSION >= 21101
6333 HYPRE_LOBPCGSetRTol(lobpcg_solver, rel_tol);
6335 MFEM_ABORT(
"This method requires HYPRE version >= 2.11.1");
6342 HYPRE_LOBPCGSetMaxIter(lobpcg_solver, max_iter);
6350 HYPRE_LOBPCGSetPrintLevel(lobpcg_solver, logging);
6357 HYPRE_LOBPCGSetPrecondUsageMode(lobpcg_solver, pcg_mode);
6363 HYPRE_LOBPCGSetPrecond(lobpcg_solver,
6364 (HYPRE_PtrToSolverFcn)this->PrecondSolve,
6365 (HYPRE_PtrToSolverFcn)this->PrecondSetup,
6366 (HYPRE_Solver)&precond);
6374 if (HYPRE_AssumedPartitionCheck())
6378 MPI_Scan(&locSize, &part[1], 1, HYPRE_MPI_BIG_INT, MPI_SUM, comm);
6380 part[0] = part[1] - locSize;
6382 MPI_Allreduce(&locSize, &glbSize, 1, HYPRE_MPI_BIG_INT, MPI_SUM, comm);
6388 MPI_Allgather(&locSize, 1, HYPRE_MPI_BIG_INT,
6389 &part[1], 1, HYPRE_MPI_BIG_INT, comm);
6392 for (
int i=0; i<numProcs; i++)
6394 part[i+1] += part[i];
6397 glbSize = part[numProcs];
6409 matvec_fn.MatvecCreate = this->OperatorMatvecCreate;
6410 matvec_fn.Matvec = this->OperatorMatvec;
6411 matvec_fn.MatvecDestroy = this->OperatorMatvecDestroy;
6413 HYPRE_LOBPCGSetup(lobpcg_solver,(HYPRE_Matrix)&A,NULL,NULL);
6419 matvec_fn.MatvecCreate = this->OperatorMatvecCreate;
6420 matvec_fn.Matvec = this->OperatorMatvec;
6421 matvec_fn.MatvecDestroy = this->OperatorMatvecDestroy;
6423 HYPRE_LOBPCGSetupB(lobpcg_solver,(HYPRE_Matrix)&M,NULL);
6432 for (
int i=0; i<nev; i++)
6434 eigs[i] = eigenvalues[i];
6441 return multi_vec->GetVector(i);
6448 if ( multi_vec == NULL )
6450 MFEM_ASSERT(x != NULL,
"In HypreLOBPCG::SetInitialVectors()");
6452 multi_vec =
new HypreMultiVector(nev, *x, interpreter);
6456 for (
int i=0; i < min(num_vecs,nev); i++)
6458 multi_vec->GetVector(i) = *vecs[i];
6462 for (
int i=min(num_vecs,nev); i < nev; i++)
6464 multi_vec->GetVector(i).
Randomize(seed);
6468 if ( subSpaceProj != NULL )
6471 y = multi_vec->GetVector(0);
6473 for (
int i=1; i<nev; i++)
6475 subSpaceProj->
Mult(multi_vec->GetVector(i),
6476 multi_vec->GetVector(i-1));
6478 subSpaceProj->
Mult(y,
6479 multi_vec->GetVector(nev-1));
6487 if ( multi_vec == NULL )
6489 MFEM_ASSERT(x != NULL,
"In HypreLOBPCG::Solve()");
6491 multi_vec =
new HypreMultiVector(nev, *x, interpreter);
6492 multi_vec->Randomize(seed);
6494 if ( subSpaceProj != NULL )
6497 y = multi_vec->GetVector(0);
6499 for (
int i=1; i<nev; i++)
6501 subSpaceProj->
Mult(multi_vec->GetVector(i),
6502 multi_vec->GetVector(i-1));
6504 subSpaceProj->
Mult(y, multi_vec->GetVector(nev-1));
6515 HYPRE_LOBPCGSolve(lobpcg_solver, NULL, *multi_vec, eigenvalues);
6519HypreLOBPCG::OperatorMatvecCreate(
void *A,
6526 return ( matvec_data );
6530HypreLOBPCG::OperatorMatvec(
void *matvec_data,
6531 HYPRE_Complex
alpha,
6537 MFEM_VERIFY(
alpha == 1.0 && beta == 0.0,
"values not supported");
6539 Operator *Aop = (Operator*)A;
6541 hypre_ParVector * xPar = (hypre_ParVector *)x;
6542 hypre_ParVector * yPar = (hypre_ParVector *)y;
6544 HypreParVector xVec(xPar);
6545 HypreParVector yVec(yPar);
6547 Aop->Mult( xVec, yVec );
6551 yVec.HypreReadWrite();
6557HypreLOBPCG::OperatorMatvecDestroy(
void *matvec_data )
6563HypreLOBPCG::PrecondSolve(
void *solver,
6568 Solver *
PC = (Solver*)solver;
6570 hypre_ParVector * bPar = (hypre_ParVector *)
b;
6571 hypre_ParVector * xPar = (hypre_ParVector *)x;
6573 HypreParVector bVec(bPar);
6574 HypreParVector xVec(xPar);
6576 PC->Mult( bVec, xVec );
6580 xVec.HypreReadWrite();
6586HypreLOBPCG::PrecondSetup(
void *solver,
6604 MPI_Comm_size(comm,&numProcs);
6605 MPI_Comm_rank(comm,&myid);
6607 HYPRE_AMECreate(&ame_solver);
6608 HYPRE_AMESetPrintLevel(ame_solver, 0);
6615 mfem_hypre_TFree_host(multi_vec);
6620 for (
int i=0; i<nev; i++)
6622 delete eigenvectors[i];
6625 delete [] eigenvectors;
6629 mfem_hypre_TFree_host(eigenvalues);
6632 HYPRE_AMEDestroy(ame_solver);
6640 HYPRE_AMESetBlockSize(ame_solver, nev);
6646 HYPRE_AMESetTol(ame_solver, tol);
6652#if MFEM_HYPRE_VERSION >= 21101
6653 HYPRE_AMESetRTol(ame_solver, rel_tol);
6655 MFEM_ABORT(
"This method requires HYPRE version >= 2.11.1");
6662 HYPRE_AMESetMaxIter(ame_solver, max_iter);
6670 HYPRE_AMESetPrintLevel(ame_solver, logging);
6677 ams_precond = &precond;
6685 HYPRE_Solver ams_precond_ptr = (HYPRE_Solver)*ams_precond;
6687 ams_precond->
SetupFcn()(*ams_precond,A,NULL,NULL);
6689 HYPRE_AMESetAMSSolver(ame_solver, ams_precond_ptr);
6692 HYPRE_AMESetup(ame_solver);
6698 HYPRE_ParCSRMatrix parcsr_M = M;
6699 HYPRE_AMESetMassMatrix(ame_solver,(HYPRE_ParCSRMatrix)parcsr_M);
6705 HYPRE_AMESolve(ame_solver);
6708 HYPRE_AMEGetEigenvalues(ame_solver,&eigenvalues);
6711 HYPRE_AMEGetEigenvectors(ame_solver,&multi_vec);
6718 eigs.
SetSize(nev); eigs = -1.0;
6721 for (
int i=0; i<nev; i++)
6723 eigs[i] = eigenvalues[i];
6728HypreAME::createDummyVectors()
const
6731 for (
int i=0; i<nev; i++)
6738const HypreParVector &
6741 if ( eigenvectors == NULL )
6743 this->createDummyVectors();
6746 return *eigenvectors[i];
6752 if ( eigenvectors == NULL )
6754 this->createDummyVectors();
6759 eigenvectors = NULL;
Dynamic 2D array using row-major layout.
Memory< T > & GetMemory()
Return a reference to the Memory object used by the Array.
const T * HostRead() const
Shortcut for mfem::Read(a.GetMemory(), a.Size(), false).
void Sort()
Sorts the array in ascending order. This requires operator< to be defined for T.
void SetSize(int nsize)
Change the logical size of the array, keep existing entries.
int Size() const
Return the logical size of the array.
T * GetData()
Returns the data.
static MemoryType GetHostMemoryType()
Get the current Host MemoryType. This is the MemoryType used by most MFEM classes when allocating mem...
static MemoryClass GetHostMemoryClass()
Get the current Host MemoryClass. This is the MemoryClass used by most MFEM host Memory objects.
static bool Allows(unsigned long b_mask)
Return true if any of the backends in the backend mask, b_mask, are allowed.
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
int GetOrder() const
Return the order (polynomial degree) of the FE collection, corresponding to the order/degree returned...
int GetRangeDim(int dim) const
bool IsVariableOrder() const
Returns true if the space contains elements of varying polynomial orders.
Ordering::Type GetOrdering() const
Return the ordering method.
const FiniteElementCollection * FEColl() const
Hash function for data sequences.
std::string GetHash() const
Return the hash string for the current sequence and reset (clear) the sequence.
HashFunction & AppendDoubles(const real_t *doubles, size_t num_doubles)
Add a sequence of doubles for hashing, given as a c-array.
HashFunction & AppendInts(const int_type *ints, size_t num_ints)
Add a sequence of integers for hashing, given as a c-array.
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
HypreADS(ParFiniteElementSpace *face_fespace)
void SetPrintLevel(int print_lvl)
void SetNumModes(int num_eigs)
void SetPreconditioner(HypreSolver &precond)
void SetPrintLevel(int logging)
void SetMassMatrix(const HypreParMatrix &M)
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
void GetEigenvalues(Array< real_t > &eigenvalues) const
Collect the converged eigenvalues.
void SetOperator(const HypreParMatrix &A)
void Solve()
Solve the eigenproblem.
void SetMaxIter(int max_iter)
HypreParVector ** StealEigenvectors()
Transfer ownership of the converged eigenvectors.
void SetRelTol(real_t rel_tol)
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
HypreAMS(ParFiniteElementSpace *edge_fespace)
Construct the AMS solver on the given edge finite element space.
void SetPrintLevel(int print_lvl)
void SetSystemsOptions(int dim, bool order_bynodes=false)
void SetElasticityOptions(ParFiniteElementSpace *fespace, bool interp_refine=true)
void SetAdvectiveOptions(int distance=15, const std::string &prerelax="", const std::string &postrelax="FFC")
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
virtual ~HypreBoomerAMG()
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
HypreEuclid(MPI_Comm comm)
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetRowScale(int row_scale)
void SetMaxIter(int max_iter)
HypreFGMRES(MPI_Comm comm)
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
void SetPrintLevel(int print_lvl)
void Mult(const HypreParVector &b, HypreParVector &x) const override
Solve Ax=b with hypre's FGMRES.
void SetLogging(int logging)
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetLogging(int logging)
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
void SetMaxIter(int max_iter)
void SetPrintLevel(int print_lvl)
void Mult(const HypreParVector &b, HypreParVector &x) const override
Solve Ax=b with hypre's GMRES.
HypreGMRES(MPI_Comm comm)
void SetAbsTol(real_t tol)
HypreILU()
Constructor; sets the default options.
void SetType(HYPRE_Int ilu_type)
void SetLocalReordering(HYPRE_Int reorder_type)
void SetMaxIter(HYPRE_Int max_iter)
void SetLevelOfFill(HYPRE_Int lev_fill)
Set the fill level for ILU(k); the default is k=1.
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetTol(HYPRE_Real tol)
void SetPrintLevel(HYPRE_Int print_level)
Set the print level: 0 = none, 1 = setup, 2 = solve, 3 = setup+solve.
void SetMassMatrix(Operator &M)
HypreLOBPCG(MPI_Comm comm)
void SetPrintLevel(int logging)
void SetPreconditioner(Solver &precond)
void GetEigenvalues(Array< real_t > &eigenvalues) const
Collect the converged eigenvalues.
void SetOperator(Operator &A)
void Solve()
Solve the eigenproblem.
void SetPrecondUsageMode(int pcg_mode)
void SetInitialVectors(int num_vecs, HypreParVector **vecs)
void SetMaxIter(int max_iter)
void SetRelTol(real_t rel_tol)
const HypreParVector & GetEigenvector(unsigned int i) const
Extract a single eigenvector.
void Mult(const HypreParVector &b, HypreParVector &x) const override
Solve Ax=b with hypre's PCG.
void SetResidualConvergenceOptions(int res_frequency=-1, real_t rtol=0.0)
void SetPrintLevel(int print_lvl)
void SetLogging(int logging)
void SetPreconditioner(HypreSolver &precond)
Set the hypre solver to be used as a preconditioner.
void SetMaxIter(int max_iter)
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetAbsTol(real_t atol)
Wrapper for hypre's ParCSR matrix class.
void HypreReadWrite()
Update the internal hypre_ParCSRMatrix object, A, to be in hypre memory space.
void operator*=(real_t s)
Scale all entries by s: A_scaled = s*A.
signed char OwnsColMap() const
Get colmap ownership flag.
HYPRE_BigInt N() const
Returns the global number of columns.
void ScaleRows(const Vector &s)
Scale the local row i by s(i).
void AbsMultTranspose(real_t a, const Vector &x, real_t b, Vector &y) const
Computes y = a * |At| * x + b * y, using entry-wise absolute values of the transpose of the matrix A.
void HostReadWrite()
Update the internal hypre_ParCSRMatrix object, A, to be on host.
void Print(const std::string &fname, HYPRE_Int offi=0, HYPRE_Int offj=0) const
Prints the locally owned rows in parallel. The resulting files can be read with Read_IJMatrix().
void DropSmallEntries(real_t tol)
Wrapper for hypre_ParCSRMatrixDropSmallEntries in different versions of hypre. Drop off-diagonal entr...
void PrintCommPkg(std::ostream &out=mfem::out) const
Print information about the hypre_ParCSRCommPkg of the HypreParMatrix.
void GetDiag(Vector &diag) const
Get the local diagonal of the matrix.
HYPRE_Int MultTranspose(HypreParVector &x, HypreParVector &y, real_t alpha=1.0, real_t beta=0.0) const
Computes y = alpha * A^t * x + beta * y.
void Threshold(real_t threshold=0.0)
Remove values smaller in absolute value than some threshold.
Memory< HYPRE_Int > & GetDiagMemoryI()
HypreParMatrix * LeftDiagMult(const SparseMatrix &D, HYPRE_BigInt *row_starts=NULL) const
Multiply the HypreParMatrix on the left by a block-diagonal parallel matrix D and return the result a...
void EliminateRows(const Array< int > &rows)
Eliminate rows from the diagonal and off-diagonal blocks of the matrix.
void AbsMult(real_t a, const Vector &x, real_t b, Vector &y) const
Computes y = a * |A| * x + b * y, using entry-wise absolute values of the matrix A.
int GetNumCols() const
Returns the number of columns in the diagonal block of the ParCSRMatrix.
void SetOwnerFlags(signed char diag, signed char offd, signed char colmap)
Explicitly set the three ownership flags, see docs for diagOwner etc.
void InvScaleRows(const Vector &s)
Scale the local row i by 1./s(i)
void ResetTranspose() const
Reset (destroy) the internal transpose matrix that is created by EnsureMultTranspose() and MultTransp...
void WrapHypreParCSRMatrix(hypre_ParCSRMatrix *a, bool owner=true)
Converts hypre's format to HypreParMatrix.
int GetNumRows() const
Returns the number of rows in the diagonal block of the ParCSRMatrix.
Memory< HYPRE_Int > & GetDiagMemoryJ()
HYPRE_BigInt * GetRowStarts() const
Return the parallel row partitioning array.
void HypreRead() const
Update the internal hypre_ParCSRMatrix object, A, to be in hypre memory space.
HypreParMatrix()
An empty matrix to be used as a reference to an existing matrix.
signed char OwnsOffd() const
Get offd ownership flag.
HYPRE_BigInt GetGlobalNumRows() const
Return the global number of rows.
void PrintHash(std::ostream &out) const
Print sizes and hashes for all data arrays of the HypreParMatrix from the local MPI rank.
void HostWrite()
Update the internal hypre_ParCSRMatrix object, A, to be on host.
HYPRE_BigInt GetGlobalNumCols() const
Return the global number of columns.
void HostRead() const
Update the internal hypre_ParCSRMatrix object, A, to be on host.
void EliminateRowsCols(const Array< int > &rows_cols, const HypreParVector &X, HypreParVector &B)
HYPRE_Int Mult(HypreParVector &x, HypreParVector &y, real_t alpha=1.0, real_t beta=0.0) const
Computes y = alpha * A * x + beta * y.
HypreParMatrix * ExtractSubmatrix(const Array< int > &indices, real_t threshold=0.0) const
void EnsureMultTranspose() const
Ensure the action of the transpose is performed fast.
MPI_Comm GetComm() const
MPI communicator.
Memory< real_t > & GetDiagMemoryData()
HYPRE_BigInt * GetColStarts() const
Return the parallel column partitioning array.
void Read_IJMatrix(MPI_Comm comm, const std::string &fname)
Read a matrix saved as a HYPRE_IJMatrix.
HypreParMatrix & Add(const real_t beta, const HypreParMatrix &B)
void GetBlocks(Array2D< HypreParMatrix * > &blocks, bool interleaved_rows=false, bool interleaved_cols=false) const
void EliminateBC(const HypreParMatrix &Ae, const Array< int > &ess_dof_list, const Vector &X, Vector &B) const
Eliminate essential BC specified by ess_dof_list from the solution X to the r.h.s....
void MakeRef(const HypreParMatrix &master)
Make this HypreParMatrix a reference to 'master'.
real_t FNorm() const
Return the Frobenius norm of the matrix (or 0 if the underlying hypre matrix is NULL)
void GetOffd(SparseMatrix &offd, HYPRE_BigInt *&cmap) const
Get the local off-diagonal block. NOTE: 'offd' will not own any data.
HYPRE_BigInt M() const
Returns the global number of rows.
hypre_ParCSRMatrix * StealData()
Changes the ownership of the matrix.
HypreParMatrix * Transpose() const
Returns the transpose of *this.
void MergeDiagAndOffd(SparseMatrix &merged)
Get a single SparseMatrix containing all rows from this processor, merged from the diagonal and off-d...
HypreParMatrix * EliminateCols(const Array< int > &cols)
Wrapper for hypre's parallel vector class.
void WrapMemoryWrite(Memory< real_t > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for write access...
void HypreRead() const
Prepare the HypreParVector for read access in hypre's device memory space, HYPRE_MEMORY_DEVICE.
void Read(MPI_Comm comm, const std::string &fname)
Reads a HypreParVector from files saved with HypreParVector::Print.
HypreParVector CreateCompatibleVector() const
Constructs a HypreParVector compatible with the calling vector.
void Print(const std::string &fname) const
Prints the locally owned rows in parallel.
void WrapMemoryReadWrite(Memory< real_t > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for read and wri...
~HypreParVector()
Calls hypre's destroy function.
void WrapHypreParVector(hypre_ParVector *y, bool owner=true)
Converts hypre's format to HypreParVector.
HYPRE_Int Randomize(HYPRE_Int seed)
Set random values.
void HypreReadWrite()
Prepare the HypreParVector for read and write access in hypre's device memory space,...
void HypreWrite()
Prepare the HypreParVector for write access in hypre's device memory space, HYPRE_MEMORY_DEVICE.
HypreParVector()
Default constructor, no underlying hypre_ParVector is created.
void WrapMemoryRead(const Memory< real_t > &mem)
Replace the HypreParVector's data with the given Memory, mem, and prepare the vector for read access ...
HypreParVector & operator=(real_t d)
Set constant values.
void SetData(real_t *data_)
Sets the data of the Vector and the hypre_ParVector to data_.
Vector * GlobalVector() const
Returns the global vector in each processor.
void SetOwnership(int own)
Sets ownership of the internal hypre_ParVector.
void SetReuse(int reuse)
Set the pattern reuse parameter.
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
void SetSymmetry(int sym)
Set symmetry parameter.
void SetParams(real_t thresh, int nlevels)
Set the threshold and levels parameters.
void SetLoadBal(real_t loadbal)
Set the load balance parameter.
HypreParaSails(MPI_Comm comm)
void SetFilter(real_t filter)
Set the filter parameter.
void SetLogging(int logging)
Set the logging parameter.
virtual ~HypreParaSails()
int eig_est_cg_iter
Number of CG iterations to determine eigenvalue estimates.
int poly_order
Order of the smoothing polynomial.
void SetPolyOptions(int poly_order, real_t poly_fraction, int eig_est_cg_iter=10)
Set parameters for polynomial smoothing.
void SetFIRCoefficients(real_t max_eig)
Compute window and Chebyshev coefficients for given polynomial order.
void SetWindowParameters(real_t a, real_t b, real_t c)
Set parameters for windowing function for FIR smoother.
real_t poly_fraction
Fraction of spectrum to smooth for polynomial relaxation.
void MultTranspose(const Vector &b, Vector &x) const override
Apply transpose of the smoother to relax the linear system Ax=b.
HypreParVector * V
Temporary vectors.
HypreParMatrix * A
The linear system matrix.
real_t * fir_coeffs
Combined coefficients for windowing and Chebyshev polynomials.
int relax_times
Number of relaxation sweeps.
void SetWindowByName(const char *window_name)
Convenience function for setting canonical windowing parameters.
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Relax the linear system Ax=b.
real_t * l1_norms
l1 norms of the rows of A
void SetOperator(const Operator &op) override
real_t max_eig_est
Maximal eigenvalue estimate for polynomial smoothing.
void SetTaubinOptions(real_t lambda, real_t mu, int iter)
Set parameters for Taubin's lambda-mu method.
bool pos_l1_norms
If set, take absolute values of the computed l1_norms.
real_t window_params[3]
Parameters for windowing function of FIR filter.
real_t omega
SOR parameter (usually in (0,2))
int poly_scale
Apply the polynomial smoother to A or D^{-1/2} A D^{-1/2}.
HypreParVector * B
Right-hand side and solution vectors.
real_t relax_weight
Damping coefficient (usually <= 1)
real_t min_eig_est
Minimal eigenvalue estimate for polynomial smoothing.
void SetSOROptions(real_t relax_weight, real_t omega)
Set SOR-related parameters.
void SetType(HypreSmoother::Type type, int relax_times=1)
Set the relaxation type and number of sweeps.
Type
HYPRE smoother types.
real_t lambda
Taubin's lambda-mu method parameters.
Memory< real_t > auxB
Auxiliary buffers for the case when the input or output arrays in methods like Mult(const Vector &,...
bool A_is_symmetric
A flag that indicates whether the linear system matrix A is symmetric.
HypreParVector * X0
FIR Filter Temporary Vectors.
static Type DefaultType()
Default value for the smoother type used by the constructors: Type::l1GS when HYPRE is running on CPU...
Abstract class for hypre's solvers and preconditioners.
const HypreParMatrix * A
The linear system matrix.
int setup_called
Was hypre's Setup function called already?
@ WARN_HYPRE_ERRORS
Issue warnings on hypre errors.
@ IGNORE_HYPRE_ERRORS
Ignore hypre errors (see e.g. HypreADS)
@ ABORT_HYPRE_ERRORS
Abort on hypre errors (default in base class)
virtual HYPRE_PtrToParSolverFcn SolveFcn() const =0
hypre's internal Solve function
HypreParVector * B
Right-hand side and solution vector.
bool WrapVectors(const Vector &b, Vector &x) const
Makes the internal HypreParVectors B and X wrap the input vectors b and x.
virtual void Mult(const HypreParVector &b, HypreParVector &x) const
Solve the linear system Ax=b.
ErrorMode error_mode
How to treat hypre errors.
void SetOperator(const Operator &op) override
Set/update the solver for the given operator.
virtual void Setup(const HypreParVector &b, HypreParVector &x) const
Set up the solver (if not set up already, also called automatically by HypreSolver::Mult).
virtual HYPRE_PtrToParSolverFcn SetupFcn() const =0
hypre's internal Setup function
static void InitDevice()
Configure HYPRE's compute and memory policy.
static void Init()
Initialize hypre by calling HYPRE_Init() and set default options. After calling Hypre::Init(),...
static bool configure_runtime_policy_from_mfem
Use MFEM's device policy to configure HYPRE's device policy, true by default. This variable is used b...
static void Finalize()
Finalize hypre (called automatically at program exit if Hypre::Init() has been called).
A class to initialize the size of a Tensor.
static MemoryType GetDualMemoryType(MemoryType mt)
Return the dual MemoryType of the given one, mt.
Class used by MFEM to store pointers to host and/or device memory.
void SetHostPtrOwner(bool own) const
Set/clear the ownership flag for the host pointer. Ownership indicates whether the pointer will be de...
T * Write(MemoryClass mc, int size)
Get write-only access to the memory with the given MemoryClass.
MemoryType GetHostMemoryType() const
Return the host MemoryType of the Memory object.
void SetDevicePtrOwner(bool own) const
Set/clear the ownership flag for the device pointer. Ownership indicates whether the pointer will be ...
int Capacity() const
Return the size of the allocated memory.
MemoryType GetDeviceMemoryType() const
Return the device MemoryType of the Memory object. If the device MemoryType is not set,...
T * ReadWrite(MemoryClass mc, int size)
Get read-write access to the memory with the given MemoryClass.
void MakeAlias(const Memory &base, int offset, int size)
Create a memory object that points inside the memory object base.
bool OwnsHostPtr() const
Return true if the host pointer is owned. Ownership indicates whether the pointer will be deleted by ...
bool Empty() const
Return true if the Memory object is empty, see Reset().
const T * Read(MemoryClass mc, int size) const
Get read-only access to the memory with the given MemoryClass.
void CopyFrom(const Memory &src, int size)
Copy size entries from src to *this.
void Reset()
Reset the memory to be empty, ensuring that Delete() will be a no-op.
void Wrap(T *ptr, int size, bool own)
Wrap an externally allocated host pointer, ptr with the current host memory type returned by MemoryMa...
void Delete()
Delete the owned pointers and reset the Memory object.
void ClearOwnerFlags() const
Clear the ownership flags for the host and device pointers, as well as any internal data allocated by...
void New(int size)
Allocate host memory for size entries with the current host memory type returned by MemoryManager::Ge...
int Dimension() const
Dimension of the reference space used within the elements.
int SpaceDimension() const
Dimension of the physical space containing the mesh.
Arbitrary order H(curl)-trace finite elements defined on the interface between mesh elements (faces,...
int width
Dimension of the input / number of columns in the matrix.
int Height() const
Get the height (size of output) of the Operator. Synonym with NumRows().
int height
Dimension of the output / number of rows in the matrix.
virtual void Mult(const Vector &x, Vector &y) const =0
Operator application: y=A(x).
int NumCols() const
Get the number of columns (size of input) of the Operator. Synonym with Width().
DiagonalPolicy
Defines operator diagonal policy upon elimination of rows and/or columns.
@ DIAG_ONE
Set the diagonal value to one.
@ DIAG_ZERO
Set the diagonal value to zero.
int Width() const
Get the width (size of input) of the Operator. Synonym with NumCols().
int NumRows() const
Get the number of rows (size of output) of the Operator. Synonym with Height().
Abstract parallel finite element space.
HYPRE_BigInt * GetTrueDofOffsets() const
HYPRE_BigInt GlobalTrueVSize() const
HypreParVector * NewTrueDofVector()
ParMesh * GetParMesh() const
Class for parallel meshes.
Arbitrary order "H^{-1/2}-conforming" face finite elements defined on the interface between mesh elem...
bool iterative_mode
If true, use the second argument of Mult() as an initial guess.
int NumNonZeroElems() const override
Returns the number of the nonzero elements in the matrix.
Memory< int > & GetMemoryI()
void Swap(SparseMatrix &other)
void Clear()
Clear the contents of the SparseMatrix.
Memory< int > & GetMemoryJ()
Memory< real_t > & GetMemoryData()
Table stores the connectivity of elements of TYPE I to elements of TYPE II. For example,...
int Size() const
Returns the number of TYPE I elements.
int Size_of_connections() const
Returns the number of connections in the table.
virtual const real_t * HostRead() const
Shortcut for mfem::Read(vec.GetMemory(), vec.Size(), false).
void SetDataAndSize(real_t *d, int s)
Set the Vector data and size.
real_t Normlinf() const
Returns the l_infinity norm of the vector.
void MakeDataOwner() const
Set the Vector data (host pointer) ownership flag.
real_t Norml1() const
Returns the l_1 norm of the vector.
Memory< real_t > & GetMemory()
Return a reference to the Memory object used by the Vector.
real_t Norml2() const
Returns the l2 norm of the vector.
void Destroy()
Destroy a vector.
int Size() const
Returns the size of the vector.
void SetSize(int s)
Resize the vector to size s.
virtual real_t * HostWrite()
Shortcut for mfem::Write(vec.GetMemory(), vec.Size(), false).
Vector & operator=(const real_t *v)
Copy Size() entries from v.
virtual real_t * HostReadWrite()
Shortcut for mfem::ReadWrite(vec.GetMemory(), vec.Size(), false).
real_t sigma(const Vector &x)
MemoryType GetHypreMemoryType()
The MemoryType used by MFEM when allocating arrays for Hypre objects.
void CopyMemory(Memory< T > &src, Memory< T > &dst, MemoryClass dst_mc, bool dst_owner)
Shallow or deep copy src to dst with the goal to make the array src accessible through dst with the M...
bool CanShallowCopy(const Memory< T > &src, MemoryClass mc)
Return true if the src Memory can be used with the MemoryClass mc.
const T * Read(const Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for read access to mem with the mfem::Device's DeviceMemoryClass, if on_dev = true,...
MemoryClass GetHypreForallMemoryClass()
real_t infinity()
Define a shortcut for std::numeric_limits<double>::infinity()
real_t ParNormlp(const Vector &vec, real_t p, MPI_Comm comm)
Compute the l_p norm of the Vector which is split without overlap across the given communicator.
real_t u(const Vector &xvec)
void mfem_error(const char *msg)
void Mult(const Table &A, const Table &B, Table &C)
C = A * B (as boolean matrices)
decltype(hypre_CSRMatrix::memory_location) GetHypreParMatrixMemoryLocation(MemoryClass mc)
const T * HostRead(const Memory< T > &mem, int size)
Shortcut to Read(const Memory<T> &mem, int size, false)
T * Write(Memory< T > &mem, int size, bool on_dev=true)
Get a pointer for write access to mem with the mfem::Device's DeviceMemoryClass, if on_dev = true,...
OutStream out(std::cout)
Global stream used by the library for standard output. Initially it uses the same std::streambuf as s...
real_t InnerProduct(HypreParVector *x, HypreParVector *y)
MemoryClass
Memory classes identify sets of memory types.
MemoryClass GetHypreMemoryClass()
The MemoryClass used by Hypre objects.
void RAP(const DenseMatrix &A, const DenseMatrix &P, DenseMatrix &RAP)
MemoryType GetMemoryType(MemoryClass mc)
Return a suitable MemoryType for a given MemoryClass.
int ParCSRRelax_FIR(hypre_ParCSRMatrix *A, hypre_ParVector *f, real_t max_eig, int poly_order, real_t *fir_coeffs, hypre_ParVector *u, hypre_ParVector *x0, hypre_ParVector *x1, hypre_ParVector *x2, hypre_ParVector *x3)
void delete_hypre_ParCSRMatrixColMapOffd(hypre_ParCSRMatrix *A)
HypreParMatrix * HypreParMatrixFromBlocks(Array2D< const HypreParMatrix * > &blocks, Array2D< real_t > *blockCoeff)
Returns a merged hypre matrix constructed from hypre matrix blocks.
HypreParMatrix * ParAdd(const HypreParMatrix *A, const HypreParMatrix *B)
Returns the matrix A + B.
T * HostWrite(Memory< T > &mem, int size)
Shortcut to Write(const Memory<T> &mem, int size, false)
int ParCSRRelax_Taubin(hypre_ParCSRMatrix *A, hypre_ParVector *f, real_t lambda, real_t mu, int N, real_t max_eig, hypre_ParVector *u, hypre_ParVector *r)
void HypreStealOwnership(HypreParMatrix &A_hyp, SparseMatrix &A_diag)
Make A_hyp steal ownership of its diagonal part A_diag.
void GatherBlockOffsetData(MPI_Comm comm, const int rank, const int nprocs, const int num_loc, const Array< int > &offsets, std::vector< int > &all_num_loc, const int numBlocks, std::vector< std::vector< HYPRE_BigInt > > &blockProcOffsets, std::vector< HYPRE_BigInt > &procOffsets, std::vector< std::vector< int > > &procBlockOffsets, HYPRE_BigInt &firstLocal, HYPRE_BigInt &globalNum)
HypreParMatrix * ParMult(const HypreParMatrix *A, const HypreParMatrix *B, bool own_matrix)
void CopyConvertMemory(const Memory< SrcT > &src, MemoryClass dst_mc, Memory< DstT > &dst)
Deep copy and convert src to dst with the goal to make the array src accessible through dst with the ...
void hypre_forall(int N, lambda &&body)
bool HypreUsingGPU()
Return true if HYPRE is configured to use GPU.
void BlockInverseScale(const HypreParMatrix *A, HypreParMatrix *C, const Vector *b, HypreParVector *d, int blocksize, BlockInverseScaleJob job)
MemoryType
Memory types supported by MFEM.
@ HOST
Host memory; using new[] and delete[].
void EliminateBC(const HypreParMatrix &A, const HypreParMatrix &Ae, const Array< int > &ess_dof_list, const Vector &X, Vector &B)
Eliminate essential BC specified by ess_dof_list from the solution X to the r.h.s....
HYPRE_MemoryLocation GetHypreMemoryLocation()
Return the configured HYPRE_MemoryLocation.
std::function< real_t(const Vector &)> f(real_t mass_coeff)
bool MemoryClassContainsType(MemoryClass mc, MemoryType mt)
Return true iff the MemoryType mt is contained in the MemoryClass mc.
void forall(int N, lambda &&body)
void Add(const DenseMatrix &A, const DenseMatrix &B, real_t alpha, DenseMatrix &C)
C = A + alpha*B.
real_t p(const Vector &x, real_t t)
MFEM_HOST_DEVICE real_t norm(const Complex &z)
@ DEBUG_DEVICE
[device] Debug backend: host memory is READ/WRITE protected while a device is in use....
@ DEVICE_MASK
Biwise-OR of all device backends.
Helper struct to convert a C++ type to an MPI type.